diff --git a/qcom/opensource/camera-kernel/.gitignore b/qcom/opensource/camera-kernel/.gitignore new file mode 100644 index 0000000000..23071c5472 --- /dev/null +++ b/qcom/opensource/camera-kernel/.gitignore @@ -0,0 +1,5 @@ +clangd* +.cache +compile_commands.json +cscope* +cam_generated_h diff --git a/qcom/opensource/camera-kernel/Android.mk b/qcom/opensource/camera-kernel/Android.mk new file mode 100644 index 0000000000..e2715e65b3 --- /dev/null +++ b/qcom/opensource/camera-kernel/Android.mk @@ -0,0 +1,98 @@ +CAMERA_DLKM_ENABLED := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_CAMERA_OVERRIDE), false) + CAMERA_DLKM_ENABLED := false; + endif +endif + +ifeq ($(CAMERA_DLKM_ENABLED),true) +ifeq ($(call is-board-platform-in-list, $(TARGET_BOARD_PLATFORM)),true) + +# Make target to specify building the camera.ko from within Android build system. +LOCAL_PATH := $(call my-dir) +# Path to DLKM make scripts +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_MODULE_DDK_BUILD := true + +ifneq ($(TARGET_BOARD_PLATFORM),) +LOCAL_MODULE_DDK_EXTRA_ARGS := "--//vendor/qcom/opensource/camera-kernel:project_name=$(TARGET_BOARD_PLATFORM)" +endif + +# List of board platforms for which MMRM driver API should be enabled +MMRM_BOARDS := taro parrot kalama pineapple + +# List of board platforms for which Synx V2 vendor driver API should be enabled +SYNX_VENDOR_BOARDS := pineapple sun + +# List of board platforms for which SMCINVOKE_DLKM driver API should be enabled +SMCINVOKE_DLKM_BOARDS := pineapple sun + +# List of board platforms for which SMMU_PROXY_DLKM driver API should be enabled +SMMU_PROXY_DLKM_BOARDS := pineapple sun + +CAMERA_SRC_FILES := \ + $(addprefix $(LOCAL_PATH)/, $(call all-named-files-under,*.h,drivers dt-bindings include))\ + $(addprefix $(LOCAL_PATH)/, $(call all-named-files-under,*.mk,config))\ + $(addprefix $(LOCAL_PATH)/, $(call all-named-files-under,*.c,drivers))\ + $(LOCAL_PATH)/dependency.mk \ + $(LOCAL_PATH)/board.mk \ + $(LOCAL_PATH)/product.mk \ + $(LOCAL_PATH)/Kbuild + +# Target for pre-sil symbols +ifeq ($(CONFIG_CAM_PRESIL), y) +$(warning camera-kernel: Enabling Pre-Sil Kbuild Options!) +KBUILD_OPTIONS := CONFIG_CAM_PRESIL=y +include $(CLEAR_VARS) +$(warning camera-kernel: Enabling Pre-Sil build, exporting symbols!) +LOCAL_SRC_FILES := $(CAMERA_SRC_FILES) +LOCAL_MODULE := camera-kernel-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +# Check build for optional dependencies +include $(LOCAL_PATH)/dependency.mk + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +endif + +# Kbuild options +KBUILD_OPTIONS := CAMERA_KERNEL_ROOT=$(TOP)/$(LOCAL_PATH) +KBUILD_OPTIONS += KERNEL_ROOT=$(TOP)/kernel_platform/common +KBUILD_OPTIONS += MODNAME=camera +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +# Clear shell environment variables from previous android module during build +include $(CLEAR_VARS) +# For incremental compilation support. +LOCAL_SRC_FILES := $(CAMERA_SRC_FILES) +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +LOCAL_MODULE := camera.ko +LOCAL_MODULE_TAGS := optional +#LOCAL_MODULE_KBUILD_NAME := camera.ko +#LOCAL_MODULE_DEBUG_ENABLE := true + +# Check build for optional dependencies +include $(LOCAL_PATH)/dependency.mk + +# $(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) +# $(info intermediates mmrm symvers path = $(call intermediates-dir-for,DLKM,mmrm-module-symvers)) +# $(info CAMERA_EXTRA_SYMBOLS = $(CAMERA_EXTRA_SYMBOLS)) +# $(info CAMERA_EXTRA_CONFIGS = $(CAMERA_EXTRA_CONFIGS)) +# $(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES)) +# $(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES)) +# $(info DLKM_DIR = $(DLKM_DIR)) +$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) + +ifeq ($(TARGET_BOARD_PLATFORM), lahaina) +# Include Kernel DLKM Android.mk target to place generated .ko file in image +include $(DLKM_DIR)/AndroidKernelModule.mk +# Include Camera UAPI Android.mk target to copy headers +include $(LOCAL_PATH)/include/uapi/Android.mk +else +include $(DLKM_DIR)/Build_external_kernelmodule.mk +endif + +endif # End of check for board platform +endif # ifeq ($(CAMERA_DLKM_ENABLED),true) diff --git a/qcom/opensource/camera-kernel/BUILD.bazel b/qcom/opensource/camera-kernel/BUILD.bazel new file mode 100644 index 0000000000..c03c1a2ff0 --- /dev/null +++ b/qcom/opensource/camera-kernel/BUILD.bazel @@ -0,0 +1,178 @@ +package( + default_visibility = [ + "//visibility:public", + ], +) + +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +ddk_headers( + name = "camera_uapi_headers", + hdrs = glob(["include/uapi/camera/**/*.h"]), + includes = [ + "include/uapi/camera", + "include/uapi/camera/media", + ], +) + +driver_header_globs = [ + "drivers/**/*.h", + "drivers/camera_main.h", + "dt-bindings/msm-camera.h", +] + +# Generated list with: find drivers -type f -name '*.h' -printf '"%h",\n' | sort -u +driver_includes = [ + "drivers", + "drivers/cam_cdm", + "drivers/cam_core", + "drivers/cam_cpas", + "drivers/cam_cpas/cpas_top", + "drivers/cam_cpas/include", + "drivers/cam_cre", + "drivers/cam_cre/cam_cre_hw_mgr", + "drivers/cam_cre/cam_cre_hw_mgr/cre_hw", + "drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd", + "drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr", + "drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include", + "drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top", + "drivers/cam_cust", + "drivers/cam_cust/cam_custom_hw_mgr", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1", + "drivers/cam_cust/cam_custom_hw_mgr/include", + "drivers/cam_fd", + "drivers/cam_fd/fd_hw_mgr", + "drivers/cam_fd/fd_hw_mgr/fd_hw", + "drivers/cam_icp", + "drivers/cam_icp/fw_inc", + "drivers/cam_icp/icp_hw/bps_hw", + "drivers/cam_icp/icp_hw/icp_hw_mgr", + "drivers/cam_icp/icp_hw/icp_hw_mgr/include", + "drivers/cam_icp/icp_hw/icp_proc", + "drivers/cam_icp/icp_hw/icp_proc/icp_common", + "drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw", + "drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw", + "drivers/cam_icp/icp_hw/include", + "drivers/cam_icp/icp_hw/ipe_hw", + "drivers/cam_icp/icp_hw/ofe_hw", + "drivers/cam_isp", + "drivers/cam_isp/isp_hw_mgr", + "drivers/cam_isp/isp_hw_mgr/hw_utils/include", + "drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller", + "drivers/cam_isp/isp_hw_mgr/include", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/include", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/include", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top", + "drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include", + "drivers/cam_jpeg", + "drivers/cam_jpeg/jpeg_hw", + "drivers/cam_jpeg/jpeg_hw/include", + "drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw", + "drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw", + "drivers/cam_lrme", + "drivers/cam_lrme/lrme_hw_mgr", + "drivers/cam_lrme/lrme_hw_mgr/lrme_hw", + "drivers/cam_ope", + "drivers/cam_ope/ope_hw_mgr", + "drivers/cam_ope/ope_hw_mgr/ope_hw", + "drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd", + "drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr", + "drivers/cam_ope/ope_hw_mgr/ope_hw/top", + "drivers/cam_presil/inc", + "drivers/cam_req_mgr", + "drivers/cam_sensor_module/cam_actuator", + "drivers/cam_sensor_module/cam_cci", + "drivers/cam_sensor_module/cam_csiphy", + "drivers/cam_sensor_module/cam_csiphy/include", + "drivers/cam_sensor_module/cam_eeprom", + "drivers/cam_sensor_module/cam_flash", + "drivers/cam_sensor_module/cam_ois", + "drivers/cam_sensor_module/cam_res_mgr", + "drivers/cam_sensor_module/cam_sensor", + "drivers/cam_sensor_module/cam_sensor_io", + "drivers/cam_sensor_module/cam_sensor_utils", + "drivers/cam_sensor_module/cam_tpg", + "drivers/cam_sensor_module/cam_tpg/tpg_hw", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4", + "drivers/cam_smmu", + "drivers/cam_sync", + "drivers/cam_utils", + "drivers/cam_vmrm", + "drivers/cam_vmrm/qrtr", +] + +ddk_headers( + name = "camera_src_headers", + hdrs = glob(driver_header_globs), + includes = driver_includes + [ + ".", + ], +) + +ddk_headers( + name = "camera_headers", + hdrs = [ + ":camera_src_headers", + ":camera_uapi_headers", + ], +) + +genrule( + name = "camera_banner_header", + srcs = [], + outs = ["cam_generated.h"], + cmd = """ + { + printf '#define CAMERA_COMPILE_TIME "%s"\n' "$$(date)" + printf '#define CAMERA_COMPILE_BY "%s"\n' "$$(whoami)" + printf '#define CAMERA_COMPILE_HOST "%s"\n' "$$(uname -n)" + } > "$@" + """ +) + +ddk_headers( + name = "camera_banner", + hdrs = [":camera_banner_header"], +) + +load(":camera_modules.bzl", "define_camera_module") + +define_camera_module() + +load("@bazel_skylib//rules:common_settings.bzl", "string_flag") +string_flag( + name = "project_name", + build_setting_default = "none", +) +config_setting( + name = "no_project", + flag_values = {":project_name": "none"}, +) +config_setting( + name = "pineapple", + flag_values = {":project_name": "pineapple"}, +) +config_setting( + name = "sun", + flag_values = {":project_name": "sun"}, +) +config_setting( + name = "canoe", + flag_values = {":project_name": "canoe"}, +) + diff --git a/qcom/opensource/camera-kernel/Kbuild b/qcom/opensource/camera-kernel/Kbuild new file mode 100644 index 0000000000..17b0659a5e --- /dev/null +++ b/qcom/opensource/camera-kernel/Kbuild @@ -0,0 +1,323 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ifeq ($(CONFIG_QCOM_CAMERA_DEBUG), y) +$(info "CAMERA_KERNEL_ROOT is: $(CAMERA_KERNEL_ROOT)") +$(info "KERNEL_ROOT is: $(KERNEL_ROOT)") +endif + +# Include Architecture configurations +ifeq ($(CONFIG_ARCH_PINEAPPLE), y) +ifeq ($(CONFIG_ARCH_QTI_VM), y) +include $(CAMERA_KERNEL_ROOT)/config/pineapple_tuivm.mk +else +include $(CAMERA_KERNEL_ROOT)/config/pineapple.mk +endif +endif + +ifeq ($(CONFIG_ARCH_KALAMA), y) +include $(CAMERA_KERNEL_ROOT)/config/kalama.mk +endif + +ifeq ($(CONFIG_ARCH_WAIPIO), y) +include $(CAMERA_KERNEL_ROOT)/config/waipio.mk +endif + +ifeq ($(CONFIG_ARCH_LAHAINA), y) +include $(CAMERA_KERNEL_ROOT)/config/lahaina.mk +endif + +ifeq ($(CONFIG_ARCH_KONA), y) +include $(CAMERA_KERNEL_ROOT)/config/kona.mk +endif + +ifeq ($(CONFIG_ARCH_BENGAL), y) +include $(CAMERA_KERNEL_ROOT)/config/holi.mk +endif + +ifeq ($(CONFIG_ARCH_HOLI), y) +include $(CAMERA_KERNEL_ROOT)/config/holi.mk +endif + +ifeq ($(CONFIG_ARCH_LITO), y) +include $(CAMERA_KERNEL_ROOT)/config/lito.mk +endif + +ifeq ($(CONFIG_ARCH_SHIMA), y) +include $(CAMERA_KERNEL_ROOT)/config/shima.mk +endif + +ifeq ($(CONFIG_ARCH_DIWALI), y) +include $(CAMERA_KERNEL_ROOT)/config/diwali.mk +endif + +ifeq ($(CONFIG_ARCH_CAPE), y) +include $(CAMERA_KERNEL_ROOT)/config/cape.mk +endif + +ifeq ($(CONFIG_ARCH_PARROT), y) +include $(CAMERA_KERNEL_ROOT)/config/parrot.mk +endif + +ifneq ($(KBUILD_EXTRA_CONFIGS),) +include $(KBUILD_EXTRA_CONFIGS) +endif + +# List of all camera-kernel headers +cam_include_dirs := $(shell dirname `find $(CAMERA_KERNEL_ROOT) -name '*.h'` | uniq) + +# Include UAPI headers +USERINCLUDE += \ + -I$(CAMERA_KERNEL_ROOT)/include/uapi/ +# Include Kernel headers +LINUXINCLUDE += \ + -I$(KERNEL_ROOT) \ + $(addprefix -I,$(cam_include_dirs)) \ + -I$(CAMERA_KERNEL_ROOT)/include/uapi/camera \ + -I$(CAMERA_KERNEL_ROOT)/ +# Optional include directories +SYNXVENDORDIR=$(CAMERA_KERNEL_ROOT)/../synx-kernel +ccflags-$(CONFIG_MSM_GLOBAL_SYNX) += -I$(KERNEL_ROOT)/drivers/media/platform/msm/synx +ccflags-$(TARGET_SYNX_ENABLE) += -I$(SYNXVENDORDIR)/include/uapi/synx/media +ccflags-$(TARGET_SYNX_ENABLE) += -I$(SYNXVENDORDIR)/msm/synx +ccflags-$(TARGET_SYNX_ENABLE) += -DCONFIG_TARGET_SYNX_ENABLE=1 +ccflags-y += -I$(CAMERA_KERNEL_ROOT)/../securemsm-kernel/ +ccflags-y += -I$(CAMERA_KERNEL_ROOT)/../securemsm-kernel/include/ + +# After creating lists, add content of 'ccflags-m' variable to 'ccflags-y' one. +ccflags-y += ${ccflags-m} + +camera-y := \ + drivers/cam_req_mgr/cam_req_mgr_core.o \ + drivers/cam_req_mgr/cam_req_mgr_dev.o \ + drivers/cam_req_mgr/cam_req_mgr_util.o \ + drivers/cam_req_mgr/cam_mem_mgr.o \ + drivers/cam_req_mgr/cam_req_mgr_workq.o \ + drivers/cam_req_mgr/cam_req_mgr_timer.o \ + drivers/cam_req_mgr/cam_req_mgr_debug.o \ + drivers/cam_utils/cam_soc_util.o \ + drivers/cam_utils/cam_packet_util.o \ + drivers/cam_utils/cam_debug_util.o \ + drivers/cam_utils/cam_trace.o \ + drivers/cam_utils/cam_common_util.o \ + drivers/cam_utils/cam_compat.o \ + drivers/cam_core/cam_context.o \ + drivers/cam_core/cam_context_utils.o \ + drivers/cam_core/cam_node.o \ + drivers/cam_core/cam_subdev.o \ + drivers/cam_smmu/cam_smmu_api.o \ + drivers/cam_sync/cam_sync.o \ + drivers/cam_sync/cam_sync_util.o \ + drivers/cam_sync/cam_sync_dma_fence.o \ + drivers/cam_cpas/cpas_top/cam_cpastop_hw.o \ + drivers/cam_cpas/camss_top/cam_camsstop_hw.o \ + drivers/cam_cpas/cam_cpas_soc.o \ + drivers/cam_cpas/cam_cpas_intf.o \ + drivers/cam_cpas/cam_cpas_hw.o \ + drivers/cam_cdm/cam_cdm_soc.o \ + drivers/cam_cdm/cam_cdm_util.o \ + drivers/cam_cdm/cam_cdm_intf.o \ + drivers/cam_cdm/cam_cdm_core_common.o \ + drivers/cam_cdm/cam_cdm_virtual_core.o \ + drivers/cam_cdm/cam_cdm_hw_core.o \ + drivers/cam_vmrm/cam_vmrm_interface.o + +ifeq (,$(filter $(CONFIG_CAM_PRESIL),y m)) + camera-y += drivers/cam_presil/stub/cam_presil_hw_access_stub.o + camera-y += drivers/cam_utils/cam_io_util.o +else + camera-y += drivers/cam_presil/presil/cam_presil_io_util.o + camera-y += drivers/cam_presil/presil/cam_presil_hw_access.o + camera-y += drivers/cam_presil/presil_framework_dev/cam_presil_framework_dev.o + ccflags-y += -DCONFIG_CAM_PRESIL=1 +endif + +camera-$(TARGET_SYNX_ENABLE) += drivers/cam_sync/cam_sync_synx.o +camera-$(CONFIG_QCOM_CX_IPEAK) += drivers/cam_utils/cam_cx_ipeak.o +camera-$(CONFIG_QCOM_BUS_SCALING) += drivers/cam_utils/cam_soc_bus.o +camera-$(CONFIG_INTERCONNECT_QCOM) += drivers/cam_utils/cam_soc_icc.o + +camera-$(CONFIG_SPECTRA_ISP) += \ + drivers/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.o \ + drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.o \ + drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_mod.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite_mod.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.o \ + drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.o \ + drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.o \ + drivers/cam_isp/cam_isp_dev.o \ + drivers/cam_isp/cam_isp_context.o + +camera-$(CONFIG_SPECTRA_ICP) += \ + drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.o \ + drivers/cam_icp/icp_hw/ipe_hw/ipe_dev.o \ + drivers/cam_icp/icp_hw/ipe_hw/ipe_core.o \ + drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.o \ + drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.o \ + drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.o \ + drivers/cam_icp/icp_hw/bps_hw/bps_dev.o \ + drivers/cam_icp/icp_hw/bps_hw/bps_core.o \ + drivers/cam_icp/icp_hw/bps_hw/bps_soc.o \ + drivers/cam_icp/icp_hw/ofe_hw/ofe_dev.o \ + drivers/cam_icp/icp_hw/ofe_hw/ofe_core.o \ + drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.o \ + drivers/cam_icp/cam_icp_subdev.o \ + drivers/cam_icp/cam_icp_context.o \ + drivers/cam_icp/hfi.o + +camera-$(CONFIG_SPECTRA_JPEG) += \ + drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.o \ + drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.o \ + drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.o \ + drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.o \ + drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.o \ + drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.o \ + drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.o \ + drivers/cam_jpeg/cam_jpeg_dev.o \ + drivers/cam_jpeg/cam_jpeg_context.o + +camera-$(CONFIG_SPECTRA_FD) += \ + drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.o \ + drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.o \ + drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.o \ + drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.o \ + drivers/cam_fd/cam_fd_dev.o \ + drivers/cam_fd/cam_fd_context.o + +camera-$(CONFIG_SPECTRA_LRME) += \ + drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.o \ + drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.o \ + drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.o \ + drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.o \ + drivers/cam_lrme/cam_lrme_dev.o \ + drivers/cam_lrme/cam_lrme_context.o + +camera-$(CONFIG_SPECTRA_SENSOR) += \ + drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.o \ + drivers/cam_sensor_module/cam_actuator/cam_actuator_core.o \ + drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.o \ + drivers/cam_sensor_module/cam_cci/cam_cci_dev.o \ + drivers/cam_sensor_module/cam_cci/cam_cci_core.o \ + drivers/cam_sensor_module/cam_cci/cam_cci_soc.o \ + drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.o \ + drivers/cam_sensor_module/cam_tpg/cam_tpg_core.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.o \ + drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.o \ + drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.o \ + drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.o \ + drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.o \ + drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.o \ + drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.o \ + drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.o \ + drivers/cam_sensor_module/cam_ois/cam_ois_dev.o \ + drivers/cam_sensor_module/cam_ois/cam_ois_core.o \ + drivers/cam_sensor_module/cam_ois/cam_ois_soc.o \ + drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.o \ + drivers/cam_sensor_module/cam_sensor/cam_sensor_core.o \ + drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.o \ + drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.o \ + drivers/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.o \ + drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.o \ + drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i3c.o \ + drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.o \ + drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.o \ + drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr.o \ + drivers/cam_sensor_module/cam_flash/cam_flash_dev.o \ + drivers/cam_sensor_module/cam_flash/cam_flash_core.o \ + drivers/cam_sensor_module/cam_flash/cam_flash_soc.o \ + drivers/cam_sensor_module/cam_sensor_module_debug.o + +camera-$(CONFIG_SPECTRA_CUSTOM) += \ + drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.o \ + drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.o \ + drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.o \ + drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.o \ + drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.o \ + drivers/cam_cust/cam_custom_dev.o \ + drivers/cam_cust/cam_custom_context.o + +camera-$(CONFIG_SPECTRA_OPE) += \ + drivers/cam_ope/cam_ope_subdev.o \ + drivers/cam_ope/cam_ope_context.o \ + drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.o \ + drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev.o \ + drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.o \ + drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.o \ + drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.o \ + drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.o\ + drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.o + +camera-$(CONFIG_SPECTRA_CRE) += \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.o \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.o \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.o \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.o \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.o \ + drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.o \ + drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.o \ + drivers/cam_cre/cam_cre_dev.o \ + drivers/cam_cre/cam_cre_context.o + +camera-$(CONFIG_SPECTRA_TFE) += \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.o \ + drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.o \ + drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.o + +camera-$(CONFIG_SPECTRA_VMRM) += \ + drivers/cam_vmrm/qrtr/cam_qrtr_comms.o \ + drivers/cam_vmrm/cam_vmrm.o \ + drivers/cam_vmrm/cam_vmrm_gh_wrapper.o + +camera-y += drivers/camera_main.o + +obj-m += camera.o +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/camera.ko diff --git a/qcom/opensource/camera-kernel/Kconfig b/qcom/opensource/camera-kernel/Kconfig new file mode 100644 index 0000000000..60a888c2cc --- /dev/null +++ b/qcom/opensource/camera-kernel/Kconfig @@ -0,0 +1,119 @@ +config SPECTRA_ISP + bool "enable camera ISP module" + help + This is enabling camera ISP module. + IFE camera driver file be included. + This will enable camera ISP driver to handle IFE driver. + Core camera driver to handle VFE HW. + +config SPECTRA_ICP + bool "enable camera ICP module" + help + This is enabling camera ICP module. + Camera ICP driver file be included. + This will enable camera ICP driver to interact with, + ICP FW. Which can control IPE and BPS HW. + +config SPECTRA_JPEG + bool "enable camera jpeg module" + help + This is enabling camera JPEG module. + camera jpeg module will be functional. + This module interact with jpeg HW for + snapshot processing. + +config SPECTRA_SENSOR + bool "enable camera sensor module" + help + This is enabling camera sensor module. + sensor module files will be included to enable the driver. + eeprom, flash, csiphy all other module will be included + to make the sensor functional. + +config SPECTRA_USE_CLK_CRM_API + bool "enable camera cesta clk feature" + help + This is enabling camera cesta feature. + kernel clk driver api will cache the clk value + send by camera hw and apply to CRM hw. + CRM hw will consolidate and apply the clk to camera HW. + +config SPECTRA_USE_RPMH_DRV_API + bool "enable camera new ICC BW api" + help + This is helping to set BW through kernel icc driver. + Kernel icc driver do caching the bw information. + icc driver will update the bw information RPMH, + to apply proper bw voting for camera to mmnoc + +config SPECTRA_LLCC_STALING + bool "enable camera new LLCC staling feature" + help + This will enable to call LLCC driver cache notification API + and increment count API. + Which will enable LLCC staling feature,based on counter + invalidate the cache slice. + +config TARGET_SYNX_ENABLE + bool "enable HW synx" + help + This is enabling HW syx feature. + synx hw signaling external to camera, + like EVA and other. + This is to avoid sw signaling latency. + +config MSM_MMRM + bool "enable MSM MMRM" + help + This is enabling mmrm api access. + mmrm to cahce the clk voting. + mmrm knows about all clients' clock rates. + MMRM can decide whether new peak power usage is within allowed value + +config INTERCONNECT_QCOM + bool "enabling icc bus driver" + help + This is to access icc driver api. + To cache AB and IB vote will + use ICC kernel driver API. + So that ICC driver can consolidate the vote + +config DOMAIN_ID_SECURE_CAMERA + bool "enable domain ID based secure camera flow" + help + This is to enable domain ID based secure + camera flow on the hardware platforms with + domain ID based security architecture. + VC based security can be achieved with this. + +config DYNAMIC_FD_PORT_CONFIG + bool "enable dynamic FD port config feature" + help + This config enables dynamic FD port config + feature that allows the userspace to configure + the FD port to secure or non-secure based on + the FD solution in use in secure camera use cases. + +config SECURE_CAMERA_25 + bool "enable secure camera 2.5" + help + This config enables 2.5 secure camera + feature that allows the usage of non contiguous + buffers for the secure camera images by + lending the buffers to a trusted virtual machine. + +config SPECTRA_SECURE_DYN_PORT_CFG + bool "enable dynamic port config feature" + help + This config enables dynamic port config + feature that allows the userspace to configure + the output port(s) of a device to secure or + non-secure based the usecase requirements. + +config SPECTRA_SECURE_CAMNOC_REG_UPDATE + bool "enable secure camnoc reg update feature" + help + This config enables secure reg update + feature that allows the HLOS to configure + camnoc registers through HYP to avoid + potential security issues. diff --git a/qcom/opensource/camera-kernel/Makefile b/qcom/opensource/camera-kernel/Makefile new file mode 100644 index 0000000000..b8abb53c95 --- /dev/null +++ b/qcom/opensource/camera-kernel/Makefile @@ -0,0 +1,27 @@ +# Makefile for use with Android's kernel/build system + +KBUILD_OPTIONS += CAMERA_KERNEL_ROOT=$(shell pwd) +KBUILD_OPTIONS += KERNEL_ROOT=$(ROOT_DIR)/$(KERNEL_DIR) +KBUILD_OPTIONS += MODNAME=camera + +all: modules + +CAMERA_COMPILE_TIME = $(shell date) +CAMERA_COMPILE_BY = $(shell whoami | sed 's/\\/\\\\/') +CAMERA_COMPILE_HOST = $(shell uname -n) + +cam_generated_h: $(shell find . -iname "*.c") $(shell find . -iname "*.h") $(shell find . -iname "*.mk") + echo '#define CAMERA_COMPILE_TIME "$(CAMERA_COMPILE_TIME)"' > cam_generated_h + echo '#define CAMERA_COMPILE_BY "$(CAMERA_COMPILE_BY)"' >> cam_generated_h + echo '#define CAMERA_COMPILE_HOST "$(CAMERA_COMPILE_HOST)"' >> cam_generated_h + +modules: cam_generated_h + +modules dtbs: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(M) clean diff --git a/qcom/opensource/camera-kernel/Makefile.am b/qcom/opensource/camera-kernel/Makefile.am new file mode 100644 index 0000000000..1f2a040f0a --- /dev/null +++ b/qcom/opensource/camera-kernel/Makefile.am @@ -0,0 +1,30 @@ +# Makefile for use with TVM camera's kernel/build system + +KBUILD_OPTIONS += CAMERA_KERNEL_ROOT=$(shell pwd) +KBUILD_OPTIONS += KERNEL_ROOT=$(ROOT_DIR)/$(KERNEL_DIR) +KBUILD_OPTIONS += MODNAME=camera + +all: modules + +CAMERA_COMPILE_TIME = $(shell date) +CAMERA_COMPILE_BY = $(shell whoami | sed 's/\\/\\\\/') +CAMERA_COMPILE_HOST = $(shell uname -n) + +cam_generated_h: $(shell find . -iname "*.c") $(shell find . -iname "*.h") $(shell find . -iname "*.mk") + echo '#define CAMERA_COMPILE_TIME "$(CAMERA_COMPILE_TIME)"' > cam_generated_h + echo '#define CAMERA_COMPILE_BY "$(CAMERA_COMPILE_BY)"' >> cam_generated_h + echo '#define CAMERA_COMPILE_HOST "$(CAMERA_COMPILE_HOST)"' >> cam_generated_h + +modules: cam_generated_h + +modules dtbs: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(M) clean + +headers_install: + $(MAKE) -C $(KERNEL_SRC) M=$(M) headers_install $(KBUILD_OPTIONS) diff --git a/qcom/opensource/camera-kernel/board.mk b/qcom/opensource/camera-kernel/board.mk new file mode 100644 index 0000000000..a69ba64cb0 --- /dev/null +++ b/qcom/opensource/camera-kernel/board.mk @@ -0,0 +1,15 @@ +# Build camera kernel driver +CAMERA_DLKM_ENABLED := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_CAMERA_OVERRIDE), false) + CAMERA_DLKM_ENABLED := false; + endif +endif + +ifeq ($(CAMERA_DLKM_ENABLED),true) +ifneq ($(TARGET_BOARD_AUTO),true) +ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/camera.ko +endif +endif +endif diff --git a/qcom/opensource/camera-kernel/camera_modules.bzl b/qcom/opensource/camera-kernel/camera_modules.bzl new file mode 100644 index 0000000000..d4a65ed399 --- /dev/null +++ b/qcom/opensource/camera-kernel/camera_modules.bzl @@ -0,0 +1,268 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") +load(":project_defconfig.bzl", "get_project_defconfig") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + deps = [ + ":camera_headers", + ":camera_banner", + "//msm-kernel:all_headers", + ] + + # Generate the defconfig file dynamically + native.genrule( + name = "{}_defconfig_generated".format(tv), + srcs = [ + # Use the base target/variant defconfig to start + # and concatenate and project-specific config + #"{}_defconfig".format(tv), + get_project_defconfig(target, variant), + ], + outs = ["{}_defconfig.generated".format(tv)], + cmd = "cat $(SRCS) > $@", + ) + + if target == "pineapple": + deps.extend([ + "//vendor/qcom/opensource/synx-kernel:synx_headers", + "//vendor/qcom/opensource/synx-kernel:{}_modules".format(tv), + "//vendor/qcom/opensource/securemsm-kernel:smcinvoke_kernel_headers", + "//vendor/qcom/opensource/securemsm-kernel:smmu_proxy_headers", + "//vendor/qcom/opensource/securemsm-kernel:{}_smcinvoke_dlkm".format(tv), + "//vendor/qcom/opensource/securemsm-kernel:{}_smmu_proxy_dlkm".format(tv), + "//vendor/qcom/opensource/mmrm-driver:{}_mmrm_driver".format(tv), + ]) + if target == "sun": + deps.extend([ + "//vendor/qcom/opensource/synx-kernel:synx_headers", + "//vendor/qcom/opensource/synx-kernel:{}_modules".format(tv), + "//vendor/qcom/opensource/securemsm-kernel:smcinvoke_kernel_headers", + "//vendor/qcom/opensource/securemsm-kernel:smmu_proxy_headers", + "//vendor/qcom/opensource/securemsm-kernel:{}_smcinvoke_dlkm".format(tv), + "//vendor/qcom/opensource/securemsm-kernel:{}_smmu_proxy_dlkm".format(tv), + "//vendor/qcom/opensource/mmrm-driver:{}_mmrm_driver".format(tv), + ]) + ddk_module( + name = "{}_camera".format(tv), + out = "camera.ko", + srcs = [ + "drivers/cam_req_mgr/cam_req_mgr_core.c", + "drivers/cam_req_mgr/cam_req_mgr_dev.c", + "drivers/cam_req_mgr/cam_req_mgr_util.c", + "drivers/cam_req_mgr/cam_mem_mgr.c", + "drivers/cam_req_mgr/cam_req_mgr_workq.c", + "drivers/cam_req_mgr/cam_req_mgr_timer.c", + "drivers/cam_req_mgr/cam_req_mgr_debug.c", + "drivers/cam_utils/cam_soc_util.c", + "drivers/cam_utils/cam_packet_util.c", + "drivers/cam_utils/cam_debug_util.c", + "drivers/cam_utils/cam_trace.c", + "drivers/cam_utils/cam_common_util.c", + "drivers/cam_utils/cam_compat.c", + "drivers/cam_core/cam_context.c", + "drivers/cam_core/cam_context_utils.c", + "drivers/cam_core/cam_node.c", + "drivers/cam_core/cam_subdev.c", + "drivers/cam_smmu/cam_smmu_api.c", + "drivers/cam_sync/cam_sync.c", + "drivers/cam_sync/cam_sync_util.c", + "drivers/cam_sync/cam_sync_dma_fence.c", + "drivers/cam_cpas/cpas_top/cam_cpastop_hw.c", + "drivers/cam_cpas/camss_top/cam_camsstop_hw.c", + "drivers/cam_cpas/cam_cpas_soc.c", + "drivers/cam_cpas/cam_cpas_intf.c", + "drivers/cam_cpas/cam_cpas_hw.c", + "drivers/cam_cdm/cam_cdm_soc.c", + "drivers/cam_cdm/cam_cdm_util.c", + "drivers/cam_cdm/cam_cdm_intf.c", + "drivers/cam_cdm/cam_cdm_core_common.c", + "drivers/cam_cdm/cam_cdm_virtual_core.c", + "drivers/cam_cdm/cam_cdm_hw_core.c", + "drivers/cam_utils/cam_soc_icc.c", + "drivers/cam_vmrm/cam_vmrm_interface.c", + "drivers/camera_main.c", + ], + conditional_srcs = { + "CONFIG_TARGET_SYNX_ENABLE": { + True: ["drivers/cam_sync/cam_sync_synx.c"], + }, + "CONFIG_QCOM_CX_IPEAK": { + True: ["drivers/cam_utils/cam_cx_ipeak.c"], + }, + "CONFIG_INTERCONNECT_QCOM": { + True: ["drivers/cam_utils/cam_soc_icc.c"], + }, + "CONFIG_SPECTRA_ISP": { + True: [ + "drivers/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c", + "drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c", + "drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_mod.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite_mod.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c", + "drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.c", + "drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c", + "drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c", + "drivers/cam_isp/cam_isp_dev.c", + "drivers/cam_isp/cam_isp_context.c", + ], + }, + "CONFIG_SPECTRA_ICP": { + True: [ + "drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c", + "drivers/cam_icp/icp_hw/ipe_hw/ipe_dev.c", + "drivers/cam_icp/icp_hw/ipe_hw/ipe_core.c", + "drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.c", + "drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.c", + "drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.c", + "drivers/cam_icp/icp_hw/bps_hw/bps_dev.c", + "drivers/cam_icp/icp_hw/bps_hw/bps_core.c", + "drivers/cam_icp/icp_hw/bps_hw/bps_soc.c", + "drivers/cam_icp/icp_hw/ofe_hw/ofe_dev.c", + "drivers/cam_icp/icp_hw/ofe_hw/ofe_core.c", + "drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.c", + "drivers/cam_icp/cam_icp_subdev.c", + "drivers/cam_icp/cam_icp_context.c", + "drivers/cam_icp/hfi.c", + ], + }, + "CONFIG_SPECTRA_JPEG": { + True: [ + "drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c", + "drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c", + "drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c", + "drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c", + "drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c", + "drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c", + "drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c", + "drivers/cam_jpeg/cam_jpeg_dev.c", + "drivers/cam_jpeg/cam_jpeg_context.c", + ], + }, + "CONFIG_SPECTRA_SENSOR": { + True: [ + "drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.c", + "drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c", + "drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.c", + "drivers/cam_sensor_module/cam_cci/cam_cci_dev.c", + "drivers/cam_sensor_module/cam_cci/cam_cci_core.c", + "drivers/cam_sensor_module/cam_cci/cam_cci_soc.c", + "drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.c", + "drivers/cam_sensor_module/cam_tpg/cam_tpg_core.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.c", + "drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.c", + "drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c", + "drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c", + "drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c", + "drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c", + "drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c", + "drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c", + "drivers/cam_sensor_module/cam_ois/cam_ois_dev.c", + "drivers/cam_sensor_module/cam_ois/cam_ois_core.c", + "drivers/cam_sensor_module/cam_ois/cam_ois_soc.c", + "drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.c", + "drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c", + "drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.c", + "drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.c", + "drivers/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c", + "drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c", + "drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i3c.c", + "drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c", + "drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c", + "drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr.c", + "drivers/cam_sensor_module/cam_flash/cam_flash_dev.c", + "drivers/cam_sensor_module/cam_flash/cam_flash_core.c", + "drivers/cam_sensor_module/cam_flash/cam_flash_soc.c", + "drivers/cam_sensor_module/cam_sensor_module_debug.c", + ], + }, + "CONFIG_SPECTRA_CUSTOM": { + True: [ + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.c", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.c", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.c", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.c", + "drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.c", + "drivers/cam_cust/cam_custom_dev.c", + "drivers/cam_cust/cam_custom_context.c", + ], + }, + "CONFIG_QCOM_BUS_SCALING": { + True: ["drivers/cam_utils/cam_soc_bus.c"], + }, + "CONFIG_CAM_PRESIL": { + # Sources need to be available to specify + # True: [ + # "drivers/cam_presil/presil/cam_presil_io_util.c", + # "drivers/cam_presil/presil/cam_presil_hw_access.c", + # "drivers/cam_presil/presil_framework_dev/cam_presil_framework_dev.c", + # ], + False: [ + "drivers/cam_presil/stub/cam_presil_hw_access_stub.c", + "drivers/cam_utils/cam_io_util.c", + ], + }, + "CONFIG_SPECTRA_VMRM": { + True: ["drivers/cam_vmrm/qrtr/cam_qrtr_comms.c"], + }, + }, + copts = ["-include", "$(location :camera_banner)"], + deps = deps, + kconfig = "Kconfig", + defconfig = "{}_defconfig_generated".format(tv), + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_camera_dist".format(tv), + data = [":{}_camera".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + ) + +def define_camera_module(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/qcom/opensource/camera-kernel/canoe_defconfig b/qcom/opensource/camera-kernel/canoe_defconfig new file mode 100644 index 0000000000..662aaaec52 --- /dev/null +++ b/qcom/opensource/camera-kernel/canoe_defconfig @@ -0,0 +1,13 @@ +CONFIG_SPECTRA_ISP=y +CONFIG_SPECTRA_ICP=y +CONFIG_SPECTRA_JPEG=y +CONFIG_SPECTRA_SENSOR=y +CONFIG_SPECTRA_USE_CLK_CRM_API=y +CONFIG_SPECTRA_USE_RPMH_DRV_API=y +CONFIG_SPECTRA_LLCC_STALING=y +#CONFIG_TARGET_SYNX_ENABLE=y +#CONFIG_MSM_MMRM=y +#CONFIG_INTERCONNECT_QCOM=y +#CONFIG_DOMAIN_ID_SECURE_CAMERA=y +#CONFIG_DYNAMIC_FD_PORT_CONFIG=y +#CONFIG_SECURE_CAMERA_25=y diff --git a/qcom/opensource/camera-kernel/config/cape.mk b/qcom/opensource/camera-kernel/config/cape.mk new file mode 100644 index 0000000000..84c346c350 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/cape.mk @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Settings for compiling cape camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_CUSTOM := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_CUSTOM=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 + +# External Dependencies +KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1 +ifeq ($(CONFIG_QCOM_VA_MINIDUMP), y) +KBUILD_CPPFLAGS += -DCONFIG_QCOM_VA_MINIDUMP=1 +endif diff --git a/qcom/opensource/camera-kernel/config/diwali.mk b/qcom/opensource/camera-kernel/config/diwali.mk new file mode 100644 index 0000000000..718011e9a3 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/diwali.mk @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Settings for compiling diwali camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/holi.mk b/qcom/opensource/camera-kernel/config/holi.mk new file mode 100644 index 0000000000..0ffdaaca46 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/holi.mk @@ -0,0 +1,13 @@ +# Settings for compiling holi camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_OPE := y +CONFIG_SPECTRA_TFE := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_OPE=1 +ccflags-y += -DCONFIG_SPECTRA_TFE=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/kalama.mk b/qcom/opensource/camera-kernel/config/kalama.mk new file mode 100644 index 0000000000..30c90c5dd6 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/kalama.mk @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y +CONFIG_SPECTRA_USE_RPMH_DRV_API := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 +ccflags-y += -DCONFIG_SPECTRA_USE_RPMH_DRV_API=1 + +# External Dependencies +KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1 +ifeq ($(CONFIG_QCOM_VA_MINIDUMP), y) +KBUILD_CPPFLAGS += -DCONFIG_QCOM_VA_MINIDUMP=1 +endif diff --git a/qcom/opensource/camera-kernel/config/kona.mk b/qcom/opensource/camera-kernel/config/kona.mk new file mode 100644 index 0000000000..94ee92a9b6 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/kona.mk @@ -0,0 +1,15 @@ +# Settings for compiling kona camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_FD := y +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_FD=1 +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/lahaina.mk b/qcom/opensource/camera-kernel/config/lahaina.mk new file mode 100644 index 0000000000..2962ea0fc3 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/lahaina.mk @@ -0,0 +1,15 @@ +# Settings for compiling lahaina camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_CUSTOM := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_CUSTOM=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/lito.mk b/qcom/opensource/camera-kernel/config/lito.mk new file mode 100644 index 0000000000..6ea1131258 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/lito.mk @@ -0,0 +1,17 @@ +# Settings for compiling lito camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_FD := y +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_LRME := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_FD=1 +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_LRME=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/parrot.mk b/qcom/opensource/camera-kernel/config/parrot.mk new file mode 100644 index 0000000000..fa22a0de1e --- /dev/null +++ b/qcom/opensource/camera-kernel/config/parrot.mk @@ -0,0 +1,15 @@ +# Settings for compiling netrani camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_TFE := y +CONFIG_SPECTRA_CRE := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_TFE=1 +ccflags-y += -DCONFIG_SPECTRA_CRE=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/pineapple.mk b/qcom/opensource/camera-kernel/config/pineapple.mk new file mode 100644 index 0000000000..479a1f71f2 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/pineapple.mk @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y +CONFIG_SPECTRA_LLCC_STALING := y +CONFIG_SPECTRA_USE_RPMH_DRV_API := y +CONFIG_SPECTRA_USE_CLK_CRM_API := y +CONFIG_DOMAIN_ID_SECURE_CAMERA := y +CONFIG_DYNAMIC_FD_PORT_CONFIG := y +CONFIG_SECURE_CAMERA_25 := y +CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 +ccflags-y += -DCONFIG_SPECTRA_LLCC_STALING=1 +ccflags-y += -DCONFIG_SPECTRA_USE_RPMH_DRV_API=1 +ccflags-y += -DCONFIG_SPECTRA_USE_CLK_CRM_API=1 +ccflags-y += -DCONFIG_DOMAIN_ID_SECURE_CAMERA=1 +ccflags-y += -DCONFIG_DYNAMIC_FD_PORT_CONFIG=1 +ccflags-y += -DCONFIG_SECURE_CAMERA_25=1 +ccflags-y += -DCONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE=1 + +# External Dependencies +KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1 +ifeq ($(CONFIG_QCOM_VA_MINIDUMP), y) +KBUILD_CPPFLAGS += -DCONFIG_QCOM_VA_MINIDUMP=1 +endif diff --git a/qcom/opensource/camera-kernel/config/pineapple_tuivm.mk b/qcom/opensource/camera-kernel/config/pineapple_tuivm.mk new file mode 100644 index 0000000000..cb7db3cf81 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/pineapple_tuivm.mk @@ -0,0 +1,15 @@ +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +#CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y +CONFIG_SPECTRA_LLCC_STALING := y +CONFIG_INTERCONNECT_QCOM := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +#ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 +ccflags-y += -DCONFIG_SPECTRA_LLCC_STALING=1 +ccflags-y += -DCONFIG_INTERCONNECT_QCOM=1 diff --git a/qcom/opensource/camera-kernel/config/shima.mk b/qcom/opensource/camera-kernel/config/shima.mk new file mode 100644 index 0000000000..c86bb31341 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/shima.mk @@ -0,0 +1,13 @@ +# Settings for compiling shima camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/config/waipio.mk b/qcom/opensource/camera-kernel/config/waipio.mk new file mode 100644 index 0000000000..6a96551256 --- /dev/null +++ b/qcom/opensource/camera-kernel/config/waipio.mk @@ -0,0 +1,19 @@ +# Settings for compiling waipio camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 + +# External Dependencies +KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1 +ifeq ($(CONFIG_QCOM_VA_MINIDUMP), y) +KBUILD_CPPFLAGS += -DCONFIG_QCOM_VA_MINIDUMP=1 +endif diff --git a/qcom/opensource/camera-kernel/config/yupik.mk b/qcom/opensource/camera-kernel/config/yupik.mk new file mode 100644 index 0000000000..8c08e40cfd --- /dev/null +++ b/qcom/opensource/camera-kernel/config/yupik.mk @@ -0,0 +1,15 @@ +# Settings for compiling yupik camera architecture + +# Localized KCONFIG settings +CONFIG_SPECTRA_ISP := y +CONFIG_SPECTRA_ICP := y +CONFIG_SPECTRA_JPEG := y +CONFIG_SPECTRA_LRME := y +CONFIG_SPECTRA_SENSOR := y + +# Flags to pass into C preprocessor +ccflags-y += -DCONFIG_SPECTRA_ISP=1 +ccflags-y += -DCONFIG_SPECTRA_ICP=1 +ccflags-y += -DCONFIG_SPECTRA_JPEG=1 +ccflags-y += -DCONFIG_SPECTRA_LRME=1 +ccflags-y += -DCONFIG_SPECTRA_SENSOR=1 diff --git a/qcom/opensource/camera-kernel/dependency.mk b/qcom/opensource/camera-kernel/dependency.mk new file mode 100644 index 0000000000..747bea077c --- /dev/null +++ b/qcom/opensource/camera-kernel/dependency.mk @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Check if this board's product.mk finds msm-mmrm.ko driver +ifeq ($(findstring msm-mmrm.ko,$(BOARD_VENDOR_KERNEL_MODULES)), msm-mmrm.ko) +# Assume if msm-mmrm.ko driver is found, then symbols will be required! + +ifeq ($(call is-board-platform-in-list, $(MMRM_BOARDS)),true) +# Add MMRM driver symbols, requires absolute path +CAM_MMRM_EXTRA_SYMBOLS ?= $(realpath $(TOP))/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers +$(info camera-kernel: Found msm-mmrm driver, adding symbol dependency! $(CAM_MMRM_EXTRA_SYMBOLS)) +ifneq ($(TARGET_BOARD_PLATFORM), pineapple) +LOCAL_REQUIRED_MODULES := mmrm-module-symvers +endif # End of check lanai +CAM_MMRM_EXTRA_CONFIGS ?= $(realpath $(TOP))/vendor/qcom/opensource/mmrm-driver/config/waipiommrm.conf +LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers + +endif # End of check for board platform MMRM_BOARDS + +endif # End of find msm-mmrm driver + +# Check if this board's product.mk finds synx-driver.ko driver +ifeq ($(findstring synx-driver.ko,$(BOARD_VENDOR_KERNEL_MODULES)), synx-driver.ko) +# Assume if synx-driver.ko driver is found, then symbols will be required! + +ifeq ($(call is-board-platform-in-list, $(SYNX_VENDOR_BOARDS)),true) +# Add SYNX driver symbols, requires absolute path +CAM_SYNX_EXTRA_SYMBOLS ?= $(realpath $(TOP))/$(call intermediates-dir-for,DLKM,synx-driver-symvers)/synx-driver-symvers +$(info camera-kernel: Found synx driver, adding symbol dependency! $(CAM_SYNX_EXTRA_SYMBOLS)) +LOCAL_REQUIRED_MODULES := synx-driver-symvers +CAM_SYNX_EXTRA_CONFIGS ?= $(realpath $(TOP))/vendor/qcom/opensource/synx-kernel/config/pineapplesynx.conf +LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,synx-driver-symvers)/synx-driver-symvers + +endif # End of check for board platform SYNX_VENDOR_BOARDS + +endif # End of find synx driver + +# Check if this board's product.mk finds smcinvoke_dlkm.ko driver +ifeq ($(findstring smcinvoke_dlkm.ko, $(BOARD_VENDOR_KERNEL_MODULES)), smcinvoke_dlkm.ko) + +ifeq ($(call is-board-platform-in-list, $(SMCINVOKE_DLKM_BOARDS)),true) +SMCINVOKE_EXTRA_SYMBOLS ?= $(realpath $(TOP))/$(call intermediates-dir-for,DLKM,smcinvoke_dlkm.ko)/Module.symvers +$(info camera-kernel: Found smcinvoke driver, adding symbol dependency! $(SMCINVOKE_EXTRA_SYMBOLS)) +LOCAL_REQUIRED_MODULES += smcinvoke_dlkm.ko +CAM_SMCINOKE_EXTRA_CONFIGS ?= $(realpath $(TOP))/vendor/qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smcinvoke.conf +LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,smcinvoke_dlkm.ko)/Module.symvers + +endif # End of check for board platform SMCINVOKE_DLKM_BOARDS + +endif # End of find smcinvoke_dlkm driver + +# Check if this board's product.mk finds smmu_proxy_dlkm.ko driver +ifeq ($(findstring smmu_proxy_dlkm.ko, $(BOARD_VENDOR_KERNEL_MODULES)), smmu_proxy_dlkm.ko) + +ifeq ($(call is-board-platform-in-list, $(SMMU_PROXY_DLKM_BOARDS)),true) +SMMU_PROXY_EXTRA_SYMBOLS ?= $(realpath $(TOP))/$(call intermediates-dir-for,DLKM,smmu_proxy_dlkm.ko)/Module.symvers +$(info camera-kernel: Found smmu proxy driver, adding symbol dependency! $(SMMU_PROXY_EXTRA_SYMBOLS)) +LOCAL_REQUIRED_MODULES += smmu_proxy_dlkm.ko +CAM_SMMU_PROXY_EXTRA_CONFIGS ?= $(realpath $(TOP))/vendor/qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smmu_proxy.conf +LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,smmu_proxy_dlkm.ko)/Module.symvers + +endif # End of check for board platform SMMU_PROXY_DLKM_BOARDS + +endif # End of find smmu_proxy_dlkm driver + +KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(CAM_MMRM_EXTRA_SYMBOLS) KBUILD_EXTRA_SYMBOLS+=$(CAM_SYNX_EXTRA_SYMBOLS) KBUILD_EXTRA_SYMBOLS+=$(SMCINVOKE_EXTRA_SYMBOLS) KBUILD_EXTRA_SYMBOLS+=$(SMMU_PROXY_EXTRA_SYMBOLS) +KBUILD_OPTIONS += KBUILD_EXTRA_CONFIGS=$(CAM_MMRM_EXTRA_CONFIGS) KBUILD_EXTRA_CONFIGS+=$(CAM_SYNX_EXTRA_CONFIGS) KBUILD_EXTRA_CONFIGS+=$(CAM_SMCINOKE_EXTRA_CONFIGS) KBUILD_EXTRA_CONFIGS+=$(CAM_SMMU_PROXY_EXTRA_CONFIGS) diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm.h new file mode 100644 index 0000000000..fcc6868357 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm.h @@ -0,0 +1,614 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CDM_H_ +#define _CAM_CDM_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "cam_cdm_intf_api.h" +#include "cam_soc_util.h" +#include "cam_cpas_api.h" +#include "cam_hw_intf.h" +#include "cam_hw.h" +#include "cam_debug_util.h" + +#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1 +#define CAM_SW_CDM_INDEX 0 +#define CAM_CDM_INFLIGHT_WORKS 5 +#define CAM_CDM_HW_RESET_TIMEOUT 300 +#define CAM_CDM_PAUSE_CORE_US_TIMEOUT 10000 + +#define CAM_CDM_CAP_PAUSE_CORE BIT(0) + +/* + * Macros to get prepare and get information + * from client CDM handles. + */ + +#define CAM_CDM_HW_ID_MASK 0xF +#define CAM_CDM_HW_ID_SHIFT 0x10 + +#define CAM_CDM_CLIENTS_ID_MASK 0xFF + +#define CAM_CDM_BL_FIFO_ID_MASK 0xF +#define CAM_CDM_BL_FIFO_ID_SHIFT 0x8 + +#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \ + CAM_CDM_HW_ID_MASK) + +#define CAM_CDM_GET_BLFIFO_IDX(x) (((x) >> CAM_CDM_BL_FIFO_ID_SHIFT) & \ + CAM_CDM_BL_FIFO_ID_MASK) + +#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, priority, client_idx) \ + ((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \ + (((priority) & CAM_CDM_BL_FIFO_ID_MASK) << CAM_CDM_BL_FIFO_ID_SHIFT)| \ + ((client_idx) & CAM_CDM_CLIENTS_ID_MASK)) +#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK) +#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1) +#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1) + +/* Number of FIFO supported on CDM */ +#define CAM_CDM_NUM_BL_FIFO 0x4 +#define CAM_CDM_NUM_TEST_BUS 16 + +/* Max number of register set for different CDM */ +#define CAM_CDM_BL_FIFO_REG_NUM 0x4 +#define CAM_CDM_BL_FIFO_IRQ_REG_NUM 0x4 +#define CAM_CDM_BL_FIFO_PENDING_REQ_REG_NUM 0x2 +#define CAM_CDM_SCRATCH_REG_NUM 0xc +#define CAM_CDM_COMP_WAIT_STATUS_REG_NUM 0x2 +#define CAM_CDM_PERF_MON_REG_NUM 0x2 + +/* BL_FIFO configurations*/ +#define CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT 0x40 +#define CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT 0x10 +#define CAM_CDM_BL_FIFO_FLUSH_SHIFT 0x3 + +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX 0x00 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV2 0x01 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV4 0x10 +#define CAM_CDM_BL_FIFO_REQ_SIZE_MAX_DIV8 0x11 + +/* CDM core status bitmap */ +#define CAM_CDM_HW_INIT_STATUS 0x0 +#define CAM_CDM_FIFO_0_BLDONE_STATUS 0x0 +#define CAM_CDM_FIFO_1_BLDONE_STATUS 0x1 +#define CAM_CDM_FIFO_2_BLDONE_STATUS 0x2 +#define CAM_CDM_FIFO_3_BLDONE_STATUS 0x3 +#define CAM_CDM_RESET_HW_STATUS 0x4 +#define CAM_CDM_ERROR_HW_STATUS 0x5 +#define CAM_CDM_FLUSH_HW_STATUS 0x6 +#define CAM_CDM_RESET_ERR_STATUS 0x7 +#define CAM_CDM_PF_HW_STATUS 0x8 + +/* Curent used AHB masks and shifts */ +#define CAM_CDM_AHB_LOG_CID_SHIFT 28 +#define CAM_CDM_AHB_LOG_CID_MASK 0X30000000 +#define CAM_CDM_AHB_ADDR_MASK 0x00FFFFFF + +/* Invalid command status register's masks and shifts */ +#define CAM_CDM_ICL_STATUS_LAST_CID_SHIFT 4 +#define CAM_CDM_ICL_STATUS_LAST_CID_MASK 0X30 +#define CAM_CDM_ICL_STATUS_INV_CID_MASK 0x03 + +/* Core_cfg register's masks and shifts */ +#define CAM_CDM_CORE_CFG_PRIORITY_MASK 0XF00000 +#define CAM_CDM_CORE_CFG_PRIORITY_SHIFT 20 +#define CAM_CDM_CORE_CFG_IMPLICIT_WAIT_EN_MASK 0x20000 +#define CAM_CDM_CORE_CFG_ARB_SEL_RR_MASK 0x10000 +#define CAM_CDM_CORE_CFG_AHB_STOP_ON_ERR_MASK 0x100 +#define CAM_CDM_CORE_CFG_AHB_BURST_EN_MASK 0x10 +#define CAM_CDM_CORE_CFG_AHB_BURST_LEN_MASK 0x0F + +/* Core enable register masks and shifts */ +#define CAM_CDM_CORE_EN_MASK 0x1 +#define CAM_CDM_CORE_PAUSE_MASK 0X2 + +/* Core Debug register masks and shifts */ +#define CAM_CDM_CORE_DBG_TEST_BUS_EN_MASK 0X01 +#define CAM_CDM_CORE_DBG_TEST_BUS_SEL_MASK 0XF0 +#define CAM_CDM_CORE_DBG_TEST_BUS_SEL_SHIFT 4 +#define CAM_CDM_CORE_DBG_LOG_AHB_MASK 0X100 +#define CAM_CDM_CORE_DBG_LOG_SHIFT 8 +#define CAM_CDM_CORE_DBG_FIFO_RB_EN_MASK 0x10000 +#define CAM_CDM_CORE_DBG_FIFO_RB_EN_SHIFT 16 + + +/* Curent BL command masks and shifts */ +#define CAM_CDM_CURRENT_BL_LEN 0xFFFFF +#define CAM_CDM_CURRENT_BL_ARB 0x100000 +#define CAM_CDM_CURRENT_BL_FIFO 0xC00000 +#define CAM_CDM_CURRENT_BL_TAG 0xFF000000 + +#define CAM_CDM_CURRENT_BL_ARB_SHIFT 0x14 +#define CAM_CDM_CURRENT_BL_FIFO_SHIFT 0x16 +#define CAM_CDM_CURRENT_BL_TAG_SHIFT 0x18 + +/* IRQ bit-masks */ +#define CAM_CDM_IRQ_STATUS_RST_DONE_MASK 0x1 +#define CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK 0x2 +#define CAM_CDM_IRQ_STATUS_BL_DONE_MASK 0x4 +#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000 +#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000 +#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000 +#define CAM_CDM_IRQ_STATUS_USR_DATA_MASK 0xFF + +#define CAM_CDM_IRQ_STATUS_ERRORS \ + (CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK | \ + CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK | \ + CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) + +/* Structure to store hw version info */ +struct cam_version_reg { + uint32_t hw_version; +}; + +/** + * struct cam_cdm_irq_regs - CDM IRQ registers + * + * @irq_mask: register offset for irq_mask + * @irq_clear: register offset for irq_clear + * @irq_clear_cmd: register offset to initiate irq clear + * @irq_set: register offset to set irq + * @irq_set_cmd: register offset to issue set_irq from irq_set + * @irq_status: register offset to look which irq is received + */ +struct cam_cdm_irq_regs { + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_clear_cmd; + uint32_t irq_set; + uint32_t irq_set_cmd; + uint32_t irq_status; +}; + +/** + * struct cam_cdm_bl_fifo_regs - BL_FIFO registers + * + * @bl_fifo_base: register offset to write bl_cmd base address + * @bl_fifo_len: register offset to write bl_cmd length + * @bl_fifo_store: register offset to commit the BL cmd + * @bl_fifo_cfg: register offset to config BL_FIFO depth, etc. + */ +struct cam_cdm_bl_fifo_regs { + uint32_t bl_fifo_base; + uint32_t bl_fifo_len; + uint32_t bl_fifo_store; + uint32_t bl_fifo_cfg; +}; + +/** + * struct cam_cdm_bl_pending_req_reg_params - BL_FIFO pending registers + * + * @rb_offset: register offset pending bl request in BL_FIFO + * @rb_mask: mask to get number of pending BLs in BL_FIFO + * @rb_num_fifo: number of BL_FIFO's information in the register + * @rb_next_fifo_shift: shift to get next fifo's pending BLs. + */ +struct cam_cdm_bl_pending_req_reg_params { + uint32_t rb_offset; + uint32_t rb_mask; + uint32_t rb_num_fifo; + uint32_t rb_next_fifo_shift; +}; + +/** + * struct cam_cdm_scratch_reg - scratch register + * + * @scratch_reg: offset of scratch register + */ +struct cam_cdm_scratch_reg { + uint32_t scratch_reg; +}; + +/* struct cam_cdm_perf_mon_regs - perf_mon registers */ +struct cam_cdm_perf_mon_regs { + uint32_t perf_mon_ctrl; + uint32_t perf_mon_0; + uint32_t perf_mon_1; + uint32_t perf_mon_2; +}; + +/** + * struct cam_cdm_perf_mon_regs - perf mon counter's registers + * + * @count_cfg_0: register offset to configure perf measures + * @always_count_val: register offset for always count value + * @busy_count_val: register offset to get busy count + * @stall_axi_count_val: register offset to get axi stall counts + * @count_status: register offset to know if count status finished + * for stall, busy and always. + */ +struct cam_cdm_perf_regs { + uint32_t count_cfg_0; + uint32_t always_count_val; + uint32_t busy_count_val; + uint32_t stall_axi_count_val; + uint32_t count_status; +}; + +/** + * struct cam_cdm_icl_data_regs - CDM icl data registers + * + * @icl_last_data_0: register offset to log last known good command + * @icl_last_data_1: register offset to log last known good command 1 + * @icl_last_data_2: register offset to log last known good command 2 + * @icl_inv_data: register offset to log CDM cmd that triggered + * invalid command. + */ +struct cam_cdm_icl_data_regs { + uint32_t icl_last_data_0; + uint32_t icl_last_data_1; + uint32_t icl_last_data_2; + uint32_t icl_inv_data; +}; + +/** + * struct cam_cdm_icl_misc_regs - CDM icl misc registers + * + * @icl_inv_bl_addr: register offset to give address of bl_cmd that + * gave invalid command + * @icl_status: register offset for context that gave good BL + * command and invalid command. + */ +struct cam_cdm_icl_misc_regs { + uint32_t icl_inv_bl_addr; + uint32_t icl_status; +}; + +/** + * struct cam_cdm_icl_regs - CDM icl registers + * + * @data_regs: structure with registers of all cdm good and invalid + * BL command information. + * @misc_regs: structure with registers for invalid command address + * and context + */ +struct cam_cdm_icl_regs { + struct cam_cdm_icl_data_regs *data_regs; + struct cam_cdm_icl_misc_regs *misc_regs; +}; + +/** + * struct cam_cdm_comp_wait_status - BL_FIFO comp_event status register + * + * @comp_wait_status: register offset to give information on whether the + * CDM is waiting for an event from another module + */ +struct cam_cdm_comp_wait_status { + uint32_t comp_wait_status; +}; + +/** + * struct cam_cdm_common_reg_data - structure for register data + * + * @num_bl_fifo: number of FIFO are there in CDM + * @num_bl_fifo_irq: number of FIFO irqs in CDM + * @num_bl_pending_req_reg: number of pending_requests register in CDM + * @num_scratch_reg: number of scratch registers in CDM + * @capabilities: If some newer features are supported on target + */ +struct cam_cdm_common_reg_data { + uint32_t num_bl_fifo; + uint32_t num_bl_fifo_irq; + uint32_t num_bl_pending_req_reg; + uint32_t num_scratch_reg; + uint32_t capabilities; +}; + +/** + * struct cam_cdm_common_regs - common structure to get common registers + * of CDM + * + * @cdm_hw_version: offset to read cdm_hw_version + * @cam_version: offset to read the camera Titan architecture version + * @rst_cmd: offset to reset the CDM + * @cgc_cfg: offset to configure CDM CGC logic + * @core_cfg: offset to configure CDM core with ARB_SEL, implicit + * wait, etc. + * @core_en: offset to pause/enable CDM + * @fe_cfg: offset to configure CDM fetch engine + * @cdm_status: offset to read CDM status register, this register + * indicates if CDM is idle, and if a pause operation + * is successfully completed or not + * @irq_context_status offset to read back irq context status + * @bl_fifo_rb: offset to set BL_FIFO read back + * @bl_fifo_base_rb: offset to read back base address on offset set by + * bl_fifo_rb + * @bl_fifo_len_rb: offset to read back base len and tag on offset set by + * bl_fifo_rb + * @usr_data: offset to read user data from GEN_IRQ commands + * @wait_status: offset to read status for last WAIT command + * @last_ahb_addr: offset to read back last AHB address generated by CDM + * @last_ahb_data: offset to read back last AHB data generated by CDM + * @core_debug: offset to configure CDM debug bus and debug features + * @last_ahb_err_addr: offset to read back last AHB Error address generated + * by CDM + * @last_ahb_err_data: offset to read back last AHB Error data generated + * by CDM + * @current_bl_base: offset to read back current command buffer BASE address + * value out of BL_FIFO + * @current_bl_len: offset to read back current command buffer len, TAG, + * context ID ARB value out of BL_FIFO + * @current_used_ahb_base: offset to read back current base address used by + * CDM to access camera register + * @debug_status: offset to read back current CDM status + * @bus_misr_cfg0: offset to enable bus MISR and configure sampling mode + * @bus_misr_cfg1: offset to select from one of the six MISR's for reading + * signature value + * @bus_misr_rd_val: offset to read MISR signature + * @pending_req: registers to read pending request in FIFO + * @comp_wait: registers to read comp_event CDM is waiting for + * @perf_mon: registers to read perf_mon information + * @scratch: registers to read scratch register value + * @perf_reg: registers to read performance counters value + * @icl_reg: registers to read information related to good + * and invalid commands in FIFO + * @spare: spare register + * @priority_group_bit_offset offset of priority group bits + * @pause_core_done_mask: Mask to test if pause core operation is done + * @pause_core_enable_mask: Mask to enable pause core operation + * + */ +struct cam_cdm_common_regs { + uint32_t cdm_hw_version; + const struct cam_version_reg *cam_version; + uint32_t rst_cmd; + uint32_t cgc_cfg; + uint32_t core_cfg; + uint32_t core_en; + uint32_t fe_cfg; + uint32_t cdm_status; + uint32_t irq_context_status; + uint32_t bl_fifo_rb; + uint32_t bl_fifo_base_rb; + uint32_t bl_fifo_len_rb; + uint32_t usr_data; + uint32_t wait_status; + uint32_t last_ahb_addr; + uint32_t last_ahb_data; + uint32_t core_debug; + uint32_t last_ahb_err_addr; + uint32_t last_ahb_err_data; + uint32_t current_bl_base; + uint32_t current_bl_len; + uint32_t current_used_ahb_base; + uint32_t debug_status; + uint32_t bus_misr_cfg0; + uint32_t bus_misr_cfg1; + uint32_t bus_misr_rd_val; + const struct cam_cdm_bl_pending_req_reg_params + *pending_req[CAM_CDM_BL_FIFO_PENDING_REQ_REG_NUM]; + const struct cam_cdm_comp_wait_status + *comp_wait[CAM_CDM_COMP_WAIT_STATUS_REG_NUM]; + const struct cam_cdm_perf_mon_regs + *perf_mon[CAM_CDM_PERF_MON_REG_NUM]; + const struct cam_cdm_scratch_reg + *scratch[CAM_CDM_SCRATCH_REG_NUM]; + const struct cam_cdm_perf_regs *perf_reg; + const struct cam_cdm_icl_regs *icl_reg; + uint32_t spare; + uint32_t priority_group_bit_offset; + uint32_t pause_core_done_mask; + uint32_t pause_core_enable_mask; +}; + +/** + * struct cam_cdm_hw_reg_offset - BL_FIFO comp_event status register + * + * @cmn_reg: pointer to structure to get common registers of a CDM + * @bl_fifo_reg: pointer to structure to get BL_FIFO registers of a CDM + * @irq_reg: pointer to structure to get IRQ registers of a CDM + * @reg_data: pointer to structure to reg_data related to CDM + * registers + */ +struct cam_cdm_hw_reg_offset { + const struct cam_cdm_common_regs *cmn_reg; + const struct cam_cdm_bl_fifo_regs *bl_fifo_reg[CAM_CDM_BL_FIFO_REG_NUM]; + const struct cam_cdm_irq_regs *irq_reg[CAM_CDM_BL_FIFO_IRQ_REG_NUM]; + const struct cam_cdm_common_reg_data *reg_data; +}; + +/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/ +enum cam_cdm_hw_process_intf_cmd { + CAM_CDM_HW_INTF_CMD_ACQUIRE, + CAM_CDM_HW_INTF_CMD_RELEASE, + CAM_CDM_HW_INTF_CMD_SUBMIT_BL, + CAM_CDM_HW_INTF_CMD_RESET_HW, + CAM_CDM_HW_INTF_CMD_FLUSH_HW, + CAM_CDM_HW_INTF_CMD_HANDLE_ERROR, + CAM_CDM_HW_INTF_CMD_HANG_DETECT, + CAM_CDM_HW_INTF_DUMP_DBG_REGS, + CAM_CDM_HW_INTF_CMD_INVALID, +}; + +/* enum cam_cdm_flags - Bit fields for CDM flags used */ +enum cam_cdm_flags { + CAM_CDM_FLAG_SHARED_CDM, + CAM_CDM_FLAG_PRIVATE_CDM, +}; + +/* enum cam_cdm_type - Enum for possible CAM CDM types */ +enum cam_cdm_type { + CAM_VIRTUAL_CDM, + CAM_HW_CDM, +}; + +/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */ +enum cam_cdm_mem_base_index { + CAM_HW_CDM_BASE_INDEX, + CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK, +}; + +/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */ +enum cam_cdm_bl_cb_type { + CAM_HW_CDM_BL_CB_CLIENT = 1, + CAM_HW_CDM_BL_CB_INTERNAL, +}; + +/* enum cam_cdm_arbitration - Enum type of arbitration */ +enum cam_cdm_arbitration { + CAM_CDM_ARBITRATION_NONE, + CAM_CDM_ARBITRATION_ROUND_ROBIN, + CAM_CDM_ARBITRATION_PRIORITY_BASED, + CAM_CDM_ARBITRATION_MAX, +}; + +enum cam_cdm_hw_version { + CAM_CDM_VERSION = 0, + CAM_CDM_VERSION_1_0 = 0x10000000, + CAM_CDM_VERSION_1_1 = 0x10010000, + CAM_CDM_VERSION_1_2 = 0x10020000, + CAM_CDM_VERSION_2_0 = 0x20000000, + CAM_CDM_VERSION_2_1 = 0x20010000, + CAM_CDM_VERSION_2_2 = 0x20020000, + CAM_CDM_VERSION_MAX, +}; + +/* struct cam_cdm_client - struct for cdm clients data.*/ +struct cam_cdm_client { + struct cam_cdm_acquire_data data; + void __iomem *changebase_addr; + uint32_t stream_on; + uint32_t refcount; + struct mutex lock; + uint32_t handle; +}; + +/* struct cam_cdm_work_payload - struct for cdm work payload data.*/ +struct cam_cdm_work_payload { + struct cam_hw_info *hw; + uint32_t irq_status; + uint32_t irq_data; + int fifo_idx; + ktime_t workq_scheduled_ts; + struct work_struct work; +}; + +/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/ +struct cam_cdm_bl_cb_request_entry { + uint8_t bl_tag; + enum cam_cdm_bl_cb_type request_type; + uint32_t client_hdl; + void *userdata; + uint32_t cookie; + struct list_head entry; +}; + +/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/ +struct cam_cdm_hw_intf_cmd_submit_bl { + uint32_t handle; + struct cam_cdm_bl_request *data; +}; + +/* struct cam_cdm_bl_fifo - CDM hw memory struct */ +struct cam_cdm_bl_fifo { + struct completion bl_complete; + struct workqueue_struct *work_queue; + struct list_head bl_request_list; + struct mutex fifo_lock; + uint8_t bl_tag; + uint32_t bl_depth; + uint8_t last_bl_tag_done; + atomic_t work_record; +}; + +/** + * struct cam_cdm - CDM hw device struct + * + * @index: index of CDM hardware + * @name: cdm_name + * @id: enum for possible CDM hardwares + * @flags: enum to tell if CDM is private of shared + * @reset_complete: completion event to make CDM wait for reset + * @work_queue: workqueue to schedule work for virtual CDM + * @bl_request_list: bl_request list for submitted commands in + * virtual CDM + * @version: CDM version with major, minor, incr and reserved + * @hw_version: CDM version as read from the cdm_version register + * @hw_family_version: version of hw family the CDM belongs to + * @iommu_hdl: CDM iommu handle + * @offsets: pointer to structure of CDM registers + * @ops: CDM ops for generating cdm commands + * @clients: CDM clients array currently active on CDM + * @bl_fifo: structure with per fifo related attributes + * @cdm_status: bitfield with bits assigned for different cdm status + * @bl_tag: slot value at which the next bl cmd will be written + * in case of virtual CDM + * @gen_irq: memory region in which gen_irq command will be written + * @cpas_handle: handle for cpas driver + * @arbitration: type of arbitration to be used for the CDM + * @num_active_clients: Number of currently active clients + */ +struct cam_cdm { + uint32_t index; + char name[128]; + enum cam_cdm_id id; + enum cam_cdm_flags flags; + struct completion reset_complete; + struct workqueue_struct *work_queue; + struct list_head bl_request_list; + struct cam_hw_version version; + uint32_t hw_version; + uint32_t hw_family_version; + struct cam_iommu_handle iommu_hdl; + struct cam_cdm_hw_reg_offset *offsets; + struct cam_cdm_utils_ops *ops; + struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS]; + struct cam_cdm_bl_fifo bl_fifo[CAM_CDM_BL_FIFO_MAX]; + unsigned long cdm_status; + uint8_t bl_tag; + uint32_t cpas_handle; + enum cam_cdm_arbitration arbitration; + uint8_t num_active_clients; +}; + +/* struct cam_cdm_private_dt_data - CDM hw custom dt data */ +struct cam_cdm_private_dt_data { + bool dt_cdm_shared; + bool config_fifo; + bool is_single_ctx_cdm; + uint8_t priority_group; + uint32_t fifo_depth[CAM_CDM_BL_FIFO_MAX]; + uint32_t dt_num_supported_clients; + uint32_t pid; + uint32_t mid; + const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS]; +}; + +/* struct cam_cdm_intf_devices - CDM mgr interface devices */ +struct cam_cdm_intf_devices { + struct mutex lock; + uint32_t refcount; + struct cam_hw_intf *device; + struct cam_cdm_private_dt_data *data; +}; + +/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */ +struct cam_cdm_intf_mgr { + bool probe_done; + uint32_t cdm_count; + uint32_t dt_supported_hw_cdm; + int32_t refcount; + struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM]; + struct dentry *dentry; +}; + +int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw, + struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, + uint32_t *index); +int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw, + struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, + uint32_t index); + +#endif /* _CAM_CDM_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.c new file mode 100644 index 0000000000..39b030e210 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.c @@ -0,0 +1,980 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_soc_util.h" +#include "cam_smmu_api.h" +#include "cam_io_util.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm.h" +#include "cam_cdm_soc.h" +#include "cam_cdm_core_common.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +int cam_cdm_util_cpas_start(struct cam_hw_info *cdm_hw) +{ + struct cam_cdm *core = NULL; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + int rc = 0; + + if (!cdm_hw) { + CAM_ERR(CAM_CDM, "Invalid cdm hw"); + return -EINVAL; + } + + core = (struct cam_cdm *)cdm_hw->core_info; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_CDM, "CDM[%d] CPAS start failed rc=%d", core->index, rc); + return rc; + } + + return rc; +} + +static void cam_cdm_get_client_refcount(struct cam_cdm_client *client) +{ + mutex_lock(&client->lock); + CAM_DBG(CAM_CDM, "CDM client get refcount=%d", + client->refcount); + client->refcount++; + mutex_unlock(&client->lock); +} + +static void cam_cdm_put_client_refcount(struct cam_cdm_client *client) +{ + mutex_lock(&client->lock); + CAM_DBG(CAM_CDM, "CDM client put refcount=%d", + client->refcount); + if (client->refcount > 0) { + client->refcount--; + } else { + CAM_ERR(CAM_CDM, "Refcount put when zero"); + WARN_ON(1); + } + mutex_unlock(&client->lock); +} + +bool cam_cdm_set_cam_hw_version( + uint32_t ver, struct cam_hw_version *cam_version) +{ + switch (ver) { + case CAM_CDM100_VERSION: + case CAM_CDM110_VERSION: + case CAM_CDM120_VERSION: + case CAM_CDM200_VERSION: + case CAM_CDM210_VERSION: + case CAM_CDM220_VERSION: + cam_version->major = (ver & 0xF0000000); + cam_version->minor = (ver & 0xFFF0000); + cam_version->incr = (ver & 0xFFFF); + cam_version->reserved = 0; + return true; + default: + CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver); + break; + } + return false; +} + +bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + if (!irq_data) + return false; + + CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type); + + return false; +} + +struct cam_cdm_utils_ops *cam_cdm_get_ops( + uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version) +{ + if (by_cam_version == false) { + switch (ver) { + case CAM_CDM100_VERSION: + case CAM_CDM110_VERSION: + case CAM_CDM120_VERSION: + case CAM_CDM200_VERSION: + case CAM_CDM210_VERSION: + case CAM_CDM220_VERSION: + return &CDM170_ops; + default: + CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", + ver); + } + } else if (cam_version) { + if (((cam_version->major == 1) && + (cam_version->minor == 0) && + (cam_version->incr == 0)) || + ((cam_version->major == 1) && + (cam_version->minor == 1) && + (cam_version->incr == 0)) || + ((cam_version->major == 1) && + (cam_version->minor == 2) && + (cam_version->incr == 0))) { + + CAM_DBG(CAM_CDM, + "cam_hw_version=%x:%x:%x supported", + cam_version->major, cam_version->minor, + cam_version->incr); + return &CDM170_ops; + } + + CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported", + cam_version->major, cam_version->minor, + cam_version->incr); + } + + return NULL; +} + +struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag( + uint32_t tag, struct list_head *bl_list) +{ + struct cam_cdm_bl_cb_request_entry *node; + + list_for_each_entry(node, bl_list, entry) { + if (node->bl_tag == tag) + return node; + } + CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag); + + return NULL; +} + +int cam_cdm_get_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_cdm *cdm_core; + + if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) && + (sizeof(struct cam_iommu_handle) == arg_size)) { + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + *((struct cam_iommu_handle *)get_hw_cap_args) = + cdm_core->iommu_hdl; + return 0; + } + + return -EINVAL; +} + +int cam_cdm_find_free_client_slot(struct cam_cdm *hw) +{ + int i; + + for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) { + if (hw->clients[i] == NULL) { + CAM_DBG(CAM_CDM, "Found client slot %d", i); + return i; + } + } + CAM_ERR(CAM_CDM, "No more client slots"); + + return -EBUSY; +} + +static int cam_cdm_get_last_client_idx(struct cam_cdm *core) +{ + int i, last_client = 0; + + for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) { + if (core->clients[i]) + last_client = i; + } + + return last_client; +} + +void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, + enum cam_cdm_cb_status status, void *data) +{ + int i; + struct cam_cdm *core = NULL; + struct cam_cdm_client *client = NULL; + struct cam_cdm_bl_cb_request_entry *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + int client_idx, last_client; + + if (!cdm_hw) { + CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info"); + return; + } + core = (struct cam_cdm *)cdm_hw->core_info; + + switch (status) { + case CAM_CDM_CB_STATUS_BL_SUCCESS: + node = (struct cam_cdm_bl_cb_request_entry *)data; + + client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl); + client = core->clients[client_idx]; + if ((!client) || (client->handle != node->client_hdl)) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, + node->client_hdl); + return; + } + cam_cdm_get_client_refcount(client); + mutex_lock(&client->lock); + if (client->data.cam_cdm_callback) { + CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d", + client->data.identifier, node->cookie); + client->data.cam_cdm_callback(node->client_hdl, + node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS, + (void *)(&node->cookie)); + CAM_DBG(CAM_CDM, "Exit client cb cookie=%d", + node->cookie); + } else { + CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x", + node->client_hdl); + } + mutex_unlock(&client->lock); + cam_cdm_put_client_refcount(client); + break; + case CAM_CDM_CB_STATUS_HW_RESET_DONE: + case CAM_CDM_CB_STATUS_HW_FLUSH: + case CAM_CDM_CB_STATUS_HW_RESUBMIT: + case CAM_CDM_CB_STATUS_INVALID_BL_CMD: + case CAM_CDM_CB_STATUS_HW_ERROR: + node = (struct cam_cdm_bl_cb_request_entry *)data; + + client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl); + client = core->clients[client_idx]; + if ((!client) || (client->handle != node->client_hdl)) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, + node->client_hdl); + return; + } + cam_cdm_get_client_refcount(client); + mutex_lock(&client->lock); + if (client->data.cam_cdm_callback) { + client->data.cam_cdm_callback( + client->handle, + client->data.userdata, + status, + (void *)(&node->cookie)); + } else { + CAM_ERR(CAM_CDM, + "No cb registered for client: name %s, hdl=%x", + client->data.identifier, client->handle); + } + mutex_unlock(&client->lock); + cam_cdm_put_client_refcount(client); + break; + case CAM_CDM_CB_STATUS_PAGEFAULT: + last_client = cam_cdm_get_last_client_idx(core); + pf_args.pf_smmu_info = (struct cam_smmu_pf_info *)data; + pf_args.handle_sec_pf = true; + for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) { + client = core->clients[i]; + if (!client) + continue; + cam_cdm_get_client_refcount(client); + if (client->data.cam_cdm_callback) { + if (i == last_client) + /* + * If the fault causing client is not found, + * make the last client of this CDM sends PF + * notification to userspace. This avoids multiple + * PF notifications and ensures at least one + * notification is sent. + */ + pf_args.pf_context_info.force_send_pf_evt = true; + mutex_lock(&client->lock); + CAM_DBG(CAM_CDM, "Found client slot %d name %s", + i, client->data.identifier); + client->data.cam_cdm_callback( + client->handle, + client->data.userdata, + status, + &pf_args); + if (pf_args.pf_context_info.ctx_found || + pf_args.pf_context_info.force_send_pf_evt) { + if (pf_args.pf_context_info.ctx_found) + CAM_ERR(CAM_CDM, + "Page Fault found on client: [%s][%u]", + client->data.identifier, + client->data.cell_index); + mutex_unlock(&client->lock); + cam_cdm_put_client_refcount(client); + break; + } + mutex_unlock(&client->lock); + } else { + CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x", + client->handle); + } + cam_cdm_put_client_refcount(client); + } + + break; + default: + CAM_ERR(CAM_CDM, "Invalid cdm cb status: %u", status); + } +} + +static int cam_cdm_stream_handle_init(void *hw_priv, bool init) +{ + int rc = -EPERM; + + if (init) { + rc = cam_hw_cdm_init(hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_CDM, "CDM HW init failed"); + return rc; + } + } else { + rc = cam_hw_cdm_deinit(hw_priv, NULL, 0); + if (rc) + CAM_ERR(CAM_CDM, "Deinit failed in streamoff"); + } + + return rc; +} + +int cam_cdm_stream_ops_internal(void *hw_priv, + void *start_args, bool operation) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_cdm *core; + int rc = -EPERM, client_idx; + struct cam_cdm_client *client; + uint32_t *handle = start_args; + + if (!hw_priv) + return -EINVAL; + + core = (struct cam_cdm *)cdm_hw->core_info; + + /* + * If this CDM HW encounters Page Fault, block any futher + * stream on/off until this CDM get released and acquired + * again. CDM page fault handler will stream off the device. + */ + if (test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) { + CAM_WARN(CAM_CDM, + "Attempt to stream %s failed. %s%u has encountered a page fault", + operation ? "on" : "off", + core->name, core->id); + return -EAGAIN; + } + + mutex_lock(&cdm_hw->hw_mutex); + client_idx = CAM_CDM_GET_CLIENT_IDX(*handle); + client = core->clients[client_idx]; + if (!client) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + return -EINVAL; + } + cam_cdm_get_client_refcount(client); + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle); + rc = -EINVAL; + goto end; + } + if (operation == true) { + if (true == client->stream_on) { + CAM_ERR(CAM_CDM, + "Invalid CDM client is already streamed ON"); + goto end; + } + } else { + if (client->stream_on == false) { + CAM_ERR(CAM_CDM, + "Invalid CDM client is already streamed Off"); + goto end; + } + } + + if (operation == true) { + if (!cdm_hw->open_count) { + rc = cam_cdm_util_cpas_start(cdm_hw); + if (rc != 0) { + CAM_ERR(CAM_CDM, "CPAS start failed"); + goto end; + } + CAM_DBG(CAM_CDM, "CDM init first time"); + if (core->id == CAM_CDM_VIRTUAL) { + CAM_DBG(CAM_CDM, + "Virtual CDM HW init first time"); + rc = 0; + } else { + CAM_DBG(CAM_CDM, "CDM HW init first time"); + rc = cam_cdm_stream_handle_init(hw_priv, true); + } + if (rc == 0) { + cdm_hw->open_count++; + client->stream_on = true; + } else { + if (cam_cpas_stop(core->cpas_handle)) + CAM_ERR(CAM_CDM, "CPAS stop failed"); + } + } else { + cdm_hw->open_count++; + CAM_DBG(CAM_CDM, "CDM HW already ON count=%d", + cdm_hw->open_count); + rc = 0; + client->stream_on = true; + } + } else { + if (cdm_hw->open_count) { + cdm_hw->open_count--; + CAM_DBG(CAM_CDM, "stream OFF CDM %d", + cdm_hw->open_count); + if (!cdm_hw->open_count) { + CAM_DBG(CAM_CDM, "CDM Deinit now"); + if (core->id == CAM_CDM_VIRTUAL) { + CAM_DBG(CAM_CDM, + "Virtual CDM HW Deinit"); + rc = 0; + } else { + CAM_DBG(CAM_CDM, "CDM HW Deinit now"); + rc = cam_cdm_stream_handle_init(hw_priv, + false); + } + if (rc == 0) { + client->stream_on = false; + rc = cam_cpas_stop(core->cpas_handle); + if (rc) + CAM_ERR(CAM_CDM, + "CPAS stop failed"); + } + } else { + client->stream_on = false; + rc = 0; + CAM_DBG(CAM_CDM, + "Client stream off success =%d", + cdm_hw->open_count); + } + } else { + CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d", + cdm_hw->open_count); + rc = -ENXIO; + } + } +end: + cam_cdm_put_client_refcount(client); + mutex_unlock(&cdm_hw->hw_mutex); + return rc; +} + +int cam_cdm_pf_stream_off_all_clients(struct cam_hw_info *cdm_hw) +{ + struct cam_cdm *core; + struct cam_cdm_client *client; + int i, rc; + + if (!cdm_hw) + return -EINVAL; + + core = cdm_hw->core_info; + + if (!cdm_hw->open_count) { + CAM_DBG(CAM_CDM, "%s%u already streamed off. Open count %d", + core->name, core->id, cdm_hw->open_count); + return -EPERM; + } + + CAM_DBG(CAM_CDM, "streaming off %s%u internally", + core->name, core->id); + + rc = cam_hw_cdm_pf_deinit(cdm_hw, NULL, 0); + if (rc) + CAM_ERR(CAM_CDM, "Deinit failed in stream off rc: %d", rc); + + for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) { + client = core->clients[i]; + if (!client) + continue; + + mutex_lock(&client->lock); + client->stream_on = false; + mutex_unlock(&client->lock); + } + + rc = cam_cpas_stop(core->cpas_handle); + if (rc) + CAM_ERR(CAM_CDM, "CPAS stop failed in stream off rc %d", rc); + + cdm_hw->open_count = 0; + + return rc; +} + +int cam_cdm_stream_start(void *hw_priv, + void *start_args, uint32_t size) +{ + int rc = 0; + + if (!hw_priv) + return -EINVAL; + + rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true); + return rc; + +} + +int cam_cdm_stream_stop(void *hw_priv, + void *start_args, uint32_t size) +{ + int rc = 0; + + if (!hw_priv) + return -EINVAL; + + rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false); + return rc; + +} + +int cam_cdm_process_cmd(void *hw_priv, + uint32_t cmd, void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_cdm *core; + int rc = -EINVAL; + + if ((!hw_priv) || (!cmd_args) || + (cmd >= CAM_CDM_HW_INTF_CMD_INVALID)) + return rc; + + core = (struct cam_cdm *)cdm_hw->core_info; + + /* + * When CDM has encountered a page fault, other than release no + * other command will be serviced. PF handler notifies all clients + * on the error, clients are expected to handle it, and release + * its reference to the CDM core. + */ + if (test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status) && + (cmd != CAM_CDM_HW_INTF_CMD_RELEASE)) { + CAM_ERR(CAM_CDM, + "%s%u has encountered a page fault, unable to service cmd %u", + core->name, core->id, cmd); + return -EAGAIN; + } + + switch (cmd) { + case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: { + struct cam_cdm_hw_intf_cmd_submit_bl *req; + int idx; + struct cam_cdm_client *client; + + if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) { + CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd, + arg_size); + break; + } + req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args; + if ((req->data->type < 0) || + (req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) { + CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d", + req->data->type); + break; + } + idx = CAM_CDM_GET_CLIENT_IDX(req->handle); + client = core->clients[idx]; + if ((!client) || (req->handle != client->handle)) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, + req->handle); + break; + } + cam_cdm_get_client_refcount(client); + if (req->data->flag && + (!client->data.cam_cdm_callback)) { + CAM_ERR(CAM_CDM, + "CDM request cb without registering cb"); + cam_cdm_put_client_refcount(client); + break; + } + if (client->stream_on != true) { + CAM_ERR(CAM_CDM, + "Invalid CDM needs to be streamed ON first"); + cam_cdm_put_client_refcount(client); + break; + } + if (core->id == CAM_CDM_VIRTUAL) + rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client); + else + rc = cam_hw_cdm_submit_bl(cdm_hw, req, client); + + cam_cdm_put_client_refcount(client); + break; + } + case CAM_CDM_HW_INTF_CMD_ACQUIRE: { + struct cam_cdm_acquire_data *data; + int idx; + struct cam_cdm_client *client; + + if (sizeof(struct cam_cdm_acquire_data) != arg_size) { + CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd, + arg_size); + break; + } + + mutex_lock(&cdm_hw->hw_mutex); + data = (struct cam_cdm_acquire_data *)cmd_args; + CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d", + data->identifier, core->index); + + if (data->priority >= CAM_CDM_BL_FIFO_MAX) { + mutex_unlock(&cdm_hw->hw_mutex); + CAM_ERR(CAM_CDM, + "Invalid priority requested %d", + data->priority); + rc = -EINVAL; + break; + } + + if (core->id != CAM_CDM_VIRTUAL && + core->bl_fifo[data->priority].bl_depth == 0) { + mutex_unlock(&cdm_hw->hw_mutex); + CAM_ERR(CAM_CDM, + "FIFO %d not supported for core %d", + data->priority, + core->id); + rc = -EINVAL; + break; + } + + idx = cam_cdm_find_free_client_slot(core); + if ((idx < 0) || (core->clients[idx])) { + mutex_unlock(&cdm_hw->hw_mutex); + CAM_ERR(CAM_CDM, + "Fail to client slots, client=%s in hw idx=%d", + data->identifier, core->index); + break; + } + core->clients[idx] = CAM_MEM_ZALLOC(sizeof(struct cam_cdm_client), + GFP_KERNEL); + if (!core->clients[idx]) { + mutex_unlock(&cdm_hw->hw_mutex); + rc = -ENOMEM; + break; + } + + /* Acquire ownership */ + if ((core->id != CAM_CDM_VIRTUAL) && (core->num_active_clients == 0)) { + rc = cam_vmrm_soc_acquire_resources(cdm_hw->soc_info.hw_id); + if (rc) { + CAM_ERR(CAM_ISP, "CDM[%u] acquire ownership failed", + cdm_hw->soc_info.index); + CAM_MEM_FREE(core->clients[idx]); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + } + + core->num_active_clients++; + mutex_unlock(&cdm_hw->hw_mutex); + + client = core->clients[idx]; + mutex_init(&client->lock); + data->ops = core->ops; + if (core->id == CAM_CDM_VIRTUAL) { + data->cdm_version.major = 1; + data->cdm_version.minor = 0; + data->cdm_version.incr = 0; + data->cdm_version.reserved = 0; + data->ops = cam_cdm_get_ops(0, + &data->cdm_version, true); + if (!data->ops) { + mutex_destroy(&client->lock); + mutex_lock(&cdm_hw->hw_mutex); + CAM_MEM_FREE(core->clients[idx]); + core->clients[idx] = NULL; + core->num_active_clients--; + mutex_unlock( + &cdm_hw->hw_mutex); + rc = -EPERM; + CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm"); + break; + } + } else { + mutex_lock(&cdm_hw->hw_mutex); + data->cdm_version = core->version; + mutex_unlock(&cdm_hw->hw_mutex); + } + + cam_cdm_get_client_refcount(client); + mutex_lock(&client->lock); + memcpy(&client->data, data, + sizeof(struct cam_cdm_acquire_data)); + client->handle = CAM_CDM_CREATE_CLIENT_HANDLE( + core->index, + data->priority, + idx); + client->stream_on = false; + data->handle = client->handle; + CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d", + data->identifier, core->index); + mutex_unlock(&client->lock); + rc = 0; + break; + } + case CAM_CDM_HW_INTF_CMD_RELEASE: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if ((!client) || (*handle != client->handle)) { + CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", + client, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + cam_cdm_put_client_refcount(client); + mutex_lock(&client->lock); + if (client->refcount != 0) { + CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d", + client->refcount); + rc = -EPERM; + mutex_unlock(&client->lock); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + core->clients[idx] = NULL; + mutex_unlock(&client->lock); + mutex_destroy(&client->lock); + CAM_MEM_FREE(client); + if (core->num_active_clients) + core->num_active_clients--; + else + CAM_ERR(CAM_CDM, + "Invalid active client decrement %u for %s%u", + core->num_active_clients, core->name, core->id); + if (!core->num_active_clients) { + CAM_DBG(CAM_CDM, "Clear cdm status bits for %s%u", + core->name, core->id); + core->cdm_status = 0; + } + + if ((core->id != CAM_CDM_VIRTUAL) && (core->num_active_clients == 0)) { + rc = cam_vmrm_soc_release_resources(cdm_hw->soc_info.hw_id); + if (rc) { + CAM_ERR(CAM_ISP, "CDM[%u] vmrm soc release resources failed", + cdm_hw->soc_info.index); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + } + + mutex_unlock(&cdm_hw->hw_mutex); + rc = 0; + break; + } + case CAM_CDM_HW_INTF_CMD_RESET_HW: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + rc = cam_hw_cdm_reset_hw(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW reset failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW reset done for handle 0x%x", + *handle); + } + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + case CAM_CDM_HW_INTF_CMD_FLUSH_HW: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + rc = cam_hw_cdm_flush_hw(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW flush failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW flush done for handle 0x%x", + *handle); + } + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + case CAM_CDM_HW_INTF_CMD_HANDLE_ERROR: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + mutex_lock(&cdm_hw->hw_mutex); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + + rc = cam_hw_cdm_handle_error_info(cdm_hw, *handle); + if (rc) { + CAM_ERR(CAM_CDM, + "CDM HW handle error failed for handle 0x%x rc = %d", + *handle, rc); + } else { + CAM_INFO_RATE_LIMIT(CAM_CDM, + "CDM HW handle error done for handle 0x%x", + *handle); + } + + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + case CAM_CDM_HW_INTF_CMD_HANG_DETECT: { + uint32_t *handle = cmd_args; + int idx; + struct cam_cdm_client *client; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=%x", + cmd, arg_size, *handle); + return -EINVAL; + } + + idx = CAM_CDM_GET_CLIENT_IDX(*handle); + client = core->clients[idx]; + if (!client) { + CAM_ERR(CAM_CDM, + "Client not present for handle %d", + *handle); + break; + } + + if (*handle != client->handle) { + CAM_ERR(CAM_CDM, + "handle mismatch, client handle %d index %d received handle %d", + client->handle, idx, *handle); + break; + } + + rc = cam_hw_cdm_hang_detect(cdm_hw, *handle); + break; + } + case CAM_CDM_HW_INTF_DUMP_DBG_REGS: + { + uint32_t *handle = cmd_args; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CDM, + "Invalid CDM cmd %d size=%x for handle=0x%x", + cmd, arg_size, *handle); + return -EINVAL; + } + + mutex_lock(&cdm_hw->hw_mutex); + cam_hw_cdm_dump_core_debug_registers(cdm_hw, true); + mutex_unlock(&cdm_hw->hw_mutex); + break; + } + default: + CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd); + break; + } + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.h new file mode 100644 index 0000000000..0e345c820d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_core_common.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CDM_CORE_COMMON_H_ +#define _CAM_CDM_CORE_COMMON_H_ + +#include "cam_mem_mgr.h" + +#define CAM_CDM100_VERSION 0x10000000 +#define CAM_CDM110_VERSION 0x10010000 +#define CAM_CDM120_VERSION 0x10020000 +#define CAM_CDM200_VERSION 0x20000000 +#define CAM_CDM210_VERSION 0x20010000 +#define CAM_CDM220_VERSION 0x20020000 + +#define CAM_CDM_AHB_BURST_LEN_1 (BIT(1) - 1) +#define CAM_CDM_AHB_BURST_LEN_4 (BIT(2) - 1) +#define CAM_CDM_AHB_BURST_LEN_8 (BIT(3) - 1) +#define CAM_CDM_AHB_BURST_LEN_16 (BIT(4) - 1) +#define CAM_CDM_AHB_BURST_EN BIT(4) +#define CAM_CDM_AHB_STOP_ON_ERROR BIT(8) +#define CAM_CDM_ARB_SEL_RR BIT(16) +#define CAM_CDM_IMPLICIT_WAIT_EN BIT(17) + +extern struct cam_cdm_utils_ops CDM170_ops; + +int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size); +int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size); +int cam_hw_cdm_pf_deinit(void *hw_priv, void *init_hw_args, + uint32_t arg_size); +int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size); +int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args, + bool operation); +int cam_cdm_pf_stream_off_all_clients(struct cam_hw_info *cdm_hw); +int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size); +int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size); +int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args, + uint32_t arg_size); +bool cam_cdm_set_cam_hw_version( + uint32_t ver, struct cam_hw_version *cam_version); +bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data); +struct cam_cdm_utils_ops *cam_cdm_get_ops( + uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version); +int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + struct cam_cdm_client *client); +int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + struct cam_cdm_client *client); +int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle); +int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle); +int cam_hw_cdm_hang_detect(struct cam_hw_info *cdm_hw, uint32_t handle); +int cam_hw_cdm_handle_error_info(struct cam_hw_info *cdm_hw, uint32_t handle); +struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag( + uint32_t tag, struct list_head *bl_list); +void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw, + enum cam_cdm_cb_status status, void *data); +void cam_hw_cdm_dump_core_debug_registers( + struct cam_hw_info *cdm_hw, bool pause_core); +int cam_cdm_util_cpas_start(struct cam_hw_info *cdm_hw); + +#endif /* _CAM_CDM_CORE_COMMON_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_core.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_core.c new file mode 100644 index 0000000000..7f33331807 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_core.c @@ -0,0 +1,2671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "cam_soc_util.h" +#include "cam_smmu_api.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm.h" +#include "cam_cdm_core_common.h" +#include "cam_cdm_soc.h" +#include "cam_io_util.h" +#include "cam_cdm_hw_reg_1_0.h" +#include "cam_cdm_hw_reg_1_1.h" +#include "cam_cdm_hw_reg_1_2.h" +#include "cam_cdm_hw_reg_2_0.h" +#include "cam_cdm_hw_reg_2_1.h" +#include "cam_cdm_hw_reg_2_2.h" +#include "camera_main.h" +#include "cam_trace.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000 +#define CAM_CDM_DBG_GEN_IRQ_USR_DATA 0xff +#define CAM_CDM_MAX_BL_LENGTH 0x100000 +#define CAM_CDM_FIFO_LEN_REG_LEN_MASK 0xFFFFF +#define CAM_CDM_FIFO_LEN_REG_TAG_MASK 0xFF +#define CAM_CDM_FIFO_LEN_REG_TAG_SHIFT 24 +#define CAM_CDM_FIFO_LEN_REG_ARB_SHIFT 20 + +static void cam_hw_cdm_work(struct work_struct *work); + +/* DT match table entry for all CDM variants*/ +static const struct of_device_id msm_cam_hw_cdm_dt_match[] = { + { + .compatible = CAM_HW_CDM_CPAS_0_NAME, + .data = &cam_cdm_1_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_0, + .data = &cam_cdm_1_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_1, + .data = &cam_cdm_1_1_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_1_2, + .data = &cam_cdm_1_2_reg_offset, + }, + { + .compatible = CAM_HW_CDM_IFE_NAME_1_2, + .data = &cam_cdm_1_2_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_2_0, + .data = &cam_cdm_2_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_OPE_NAME_2_0, + .data = &cam_cdm_2_0_reg_offset, + }, + { + .compatible = CAM_HW_CDM_CPAS_NAME_2_1, + .data = &cam_cdm_2_1_reg_offset, + }, + { + .compatible = CAM_HW_CDM_RT_NAME_2_1, + .data = &cam_cdm_2_1_reg_offset, + }, + { + .compatible = CAM_HW_CDM_OPE_NAME_2_1, + .data = &cam_cdm_2_1_reg_offset, + }, + { + .compatible = CAM_HW_CDM_RT_NAME_2_2, + .data = &cam_cdm_2_2_reg_offset, + }, + {}, +}; + +static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name) +{ + if (strnstr(name, CAM_HW_CDM_CPAS_0_NAME, + strlen(CAM_HW_CDM_CPAS_0_NAME))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_0, + strlen(CAM_HW_CDM_CPAS_NAME_1_0))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_1, + strlen(CAM_HW_CDM_CPAS_NAME_1_1))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_1_2, + strlen(CAM_HW_CDM_CPAS_NAME_1_2))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_IFE_NAME_1_2, + strlen(CAM_HW_CDM_IFE_NAME_1_2))) + return CAM_CDM_IFE; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_2_0, + strlen(CAM_HW_CDM_CPAS_NAME_2_0))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_OPE_NAME_2_0, + strlen(CAM_HW_CDM_OPE_NAME_2_0))) + return CAM_CDM_OPE; + if (strnstr(name, CAM_HW_CDM_CPAS_NAME_2_1, + strlen(CAM_HW_CDM_CPAS_NAME_2_1))) + return CAM_CDM_CPAS; + if (strnstr(name, CAM_HW_CDM_RT_NAME_2_1, + strlen(CAM_HW_CDM_RT_NAME_2_1))) + return CAM_CDM_RT; + if (strnstr(name, CAM_HW_CDM_OPE_NAME_2_1, + strlen(CAM_HW_CDM_OPE_NAME_2_1))) + return CAM_CDM_OPE; + if (strnstr(name, CAM_HW_CDM_RT_NAME_2_2, + strlen(CAM_HW_CDM_RT_NAME_2_2))) + return CAM_CDM_RT; + return CAM_CDM_MAX; +} + +static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw, + bool enable, uint32_t fifo_idx) +{ + int rc = -EIO; + uint32_t irq_mask = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + &irq_mask)) { + CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask"); + return rc; + } + + if (enable == true) { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + (irq_mask | 0x4))) { + CAM_ERR(CAM_CDM, "Write failed to enable BL done irq"); + } else { + set_bit(fifo_idx, &core->cdm_status); + rc = 0; + CAM_DBG(CAM_CDM, "BL done irq enabled =%d", + test_bit(fifo_idx, &core->cdm_status)); + } + } else { + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[fifo_idx]->irq_mask, + (irq_mask & 0x70003))) { + CAM_ERR(CAM_CDM, "Write failed to disable BL done irq"); + } else { + clear_bit(fifo_idx, &core->cdm_status); + rc = 0; + CAM_DBG(CAM_CDM, "BL done irq disable =%d", + test_bit(fifo_idx, &core->cdm_status)); + } + } + return rc; +} + +static int cam_hw_cdm_pause_core(struct cam_hw_info *cdm_hw, bool pause) +{ + int rc = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + uint32_t val = 0x1, core_en_reg, cdm_status_reg; + bool pause_core_supported; + + if (pause) + val |= 0x2; + + pause_core_supported = core->offsets->reg_data->capabilities & + CAM_CDM_CAP_PAUSE_CORE; + + if (pause_core_supported) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, &core_en_reg); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cdm_status, &cdm_status_reg); + + /* In both pause or resume, further action need not/cannot be taken */ + if ((core_en_reg & core->offsets->cmn_reg->pause_core_enable_mask) && + !(cdm_status_reg & core->offsets->cmn_reg->pause_core_done_mask)) { + if (!pause) + CAM_ERR(CAM_CDM, "Pause core not done yet, can't resume core"); + return -EAGAIN; + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, val)) { + CAM_ERR(CAM_CDM, "Failed to Write core_en for %s%u", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EIO; + } + + if (pause && pause_core_supported) { + if (cam_common_read_poll_timeout( + cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base + + core->offsets->cmn_reg->cdm_status, 0, + CAM_CDM_PAUSE_CORE_US_TIMEOUT, + core->offsets->cmn_reg->pause_core_done_mask, + core->offsets->cmn_reg->pause_core_done_mask, + &cdm_status_reg)) { + CAM_WARN(CAM_CDM, + "Pause core operation not successful: status=0x%08x", + cdm_status_reg); + } else + CAM_DBG(CAM_CDM, "Pause core CDM[%u] successful", core->index); + } + + return rc; +} + +int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw, uint32_t value) +{ + int rc = 0; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_debug, + value)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug"); + rc = -EIO; + } + + return rc; +} + +int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw) +{ + int rc = 0; + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->core_debug, 0)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug"); + rc = -EIO; + } + + return rc; +} + +void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw) +{ + uint32_t dump_reg = 0; + int i; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, &dump_reg); + CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg); + + for (i = 0; i < core->offsets->reg_data->num_scratch_reg; i++) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->scratch[i]->scratch_reg, + &dump_reg); + CAM_ERR(CAM_CDM, "dump scratch%d=%x", i, dump_reg); + } +} + +int cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo( + struct cam_hw_info *cdm_hw, + uint32_t fifo_idx, + uint32_t *pending_bl_req) +{ + int rc = 0; + uint32_t fifo_reg; + uint32_t fifo_id; + + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + if (fifo_idx >= CAM_CDM_BL_FIFO_REG_NUM) { + CAM_ERR(CAM_CDM, + "BL_FIFO index is wrong. fifo_idx %d", + fifo_idx); + rc = -EINVAL; + goto end; + } + + fifo_reg = fifo_idx / 2; + fifo_id = fifo_idx % 2; + + if (core->offsets->cmn_reg->pending_req[fifo_reg]) { + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_offset, + pending_bl_req)) { + CAM_ERR(CAM_CDM, "Error reading CDM register"); + rc = -EIO; + goto end; + } + + *pending_bl_req = (*pending_bl_req >> ( + core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_next_fifo_shift * + fifo_id)) & core->offsets->cmn_reg->pending_req + [fifo_reg]->rb_mask; + rc = 0; + } + + CAM_DBG(CAM_CDM, "Number of pending bl entries:%d in fifo: %d", + *pending_bl_req, fifo_id); + +end: + return rc; +} + +static void cam_hw_cdm_dump_bl_fifo_data(struct cam_hw_info *cdm_hw) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + int i, j; + uint32_t num_pending_req = 0, dump_reg[2]; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw, i, &num_pending_req); + + CAM_INFO(CAM_CDM, "Fifo:%d content dump. num_pending_BLs: %d", i, num_pending_req); + + if (!num_pending_req) + continue; + + for (j = 0; j < core->bl_fifo[i].bl_depth; j++) { + cam_cdm_write_hw_reg(cdm_hw, core->offsets->cmn_reg->bl_fifo_rb, j); + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->bl_fifo_base_rb, + &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->bl_fifo_len_rb, + &dump_reg[1]); + CAM_INFO(CAM_CDM, + "BL_entry:%d base_addr:0x%x, len:%d, ARB:%d, tag:%d", + j, dump_reg[0], + (dump_reg[1] & CAM_CDM_CURRENT_BL_LEN), + (dump_reg[1] & CAM_CDM_CURRENT_BL_ARB) >> + CAM_CDM_CURRENT_BL_ARB_SHIFT, + (dump_reg[1] & CAM_CDM_CURRENT_BL_TAG) >> + CAM_CDM_CURRENT_BL_TAG_SHIFT); + } + } +} + +void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw, + bool pause_core) +{ + uint32_t dump_reg[4]; + uint32_t core_dbg = CAM_CDM_CORE_DBG_TEST_BUS_EN_MASK | + CAM_CDM_CORE_DBG_LOG_AHB_MASK | + CAM_CDM_CORE_DBG_FIFO_RB_EN_MASK; + uint32_t cdm_version = 0; + int i; + bool is_core_paused_already; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + const struct cam_cdm_icl_regs *inv_cmd_log = + core->offsets->cmn_reg->icl_reg; + + CAM_INFO(CAM_CDM, "Dumping debug data for %s%u", + cdm_hw->soc_info.label_name, cdm_hw->soc_info.index); + + + if (pause_core) + cam_hw_cdm_pause_core(cdm_hw, true); + + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->cdm_hw_version, + &cdm_version); + + if (core_dbg & CAM_CDM_CORE_DBG_TEST_BUS_EN_MASK) { + for (i = 0; i < CAM_CDM_NUM_TEST_BUS; i++) { + core_dbg &= ~CAM_CDM_CORE_DBG_TEST_BUS_SEL_MASK; + core_dbg |= ((i << CAM_CDM_CORE_DBG_TEST_BUS_SEL_SHIFT) & + (CAM_CDM_CORE_DBG_TEST_BUS_SEL_MASK)); + cam_hw_cdm_enable_core_dbg(cdm_hw, core_dbg); + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->debug_status, + &dump_reg[0]); + + CAM_INFO(CAM_CDM, "Core_dbg: 0x%x, Debug_status[%d]: 0x%x", + core_dbg, i, dump_reg[0]); + } + + core_dbg &= ~(CAM_CDM_CORE_DBG_TEST_BUS_EN_MASK | + CAM_CDM_CORE_DBG_TEST_BUS_SEL_MASK) | + CAM_CDM_CORE_DBG_LOG_AHB_MASK | + CAM_CDM_CORE_DBG_FIFO_RB_EN_MASK; + cam_hw_cdm_enable_core_dbg(cdm_hw, core_dbg); + } + + if (core_dbg & CAM_CDM_CORE_DBG_LOG_AHB_MASK || + core_dbg & CAM_CDM_CORE_DBG_FIFO_RB_EN_MASK){ + cam_hw_cdm_enable_core_dbg(cdm_hw, core_dbg); + + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->debug_status, + &dump_reg[0]); + + CAM_INFO(CAM_CDM, "Debug_status: 0x%x", dump_reg[0]); + } + + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en, &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->usr_data, &dump_reg[1]); + CAM_INFO(CAM_CDM, "Core_en: %u, Core_pause: %u User_data: 0x%x", + (dump_reg[0] & CAM_CDM_CORE_EN_MASK), + (bool)(dump_reg[0] & CAM_CDM_CORE_PAUSE_MASK), + dump_reg[1]); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_used_ahb_base, &dump_reg[0]); + + if (cdm_version >= CAM_CDM_VERSION_2_0) + CAM_INFO(CAM_CDM, + "Current AHB base address: 0x%x set by change base cmd by fifo: %u", + dump_reg[0] & CAM_CDM_AHB_ADDR_MASK, + (dump_reg[0] & CAM_CDM_AHB_LOG_CID_MASK) >> + CAM_CDM_AHB_LOG_CID_SHIFT); + else + CAM_INFO(CAM_CDM, + "Current AHB base address: 0x%x set by change base cmd", + dump_reg[0] & CAM_CDM_AHB_ADDR_MASK); + + if (core_dbg & CAM_CDM_CORE_DBG_LOG_AHB_MASK) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_addr, + &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_data, + &dump_reg[1]); + + if (cdm_version >= CAM_CDM_VERSION_2_0) + CAM_INFO(CAM_CDM, + "Last AHB addr: 0x%x, data: 0x%x that cdm sent out from fifo: %u", + (dump_reg[0] & CAM_CDM_AHB_ADDR_MASK), + dump_reg[1], + (dump_reg[0] & CAM_CDM_AHB_LOG_CID_MASK) >> + CAM_CDM_AHB_LOG_CID_SHIFT); + else + CAM_INFO(CAM_CDM, + "Last AHB addr: 0x%x, data: 0x%x that cdm sent out", + (dump_reg[0] & CAM_CDM_AHB_ADDR_MASK), + dump_reg[1]); + } else { + CAM_INFO(CAM_CDM, "CDM HW AHB dump not enabled"); + } + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_err_addr, + &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->last_ahb_err_data, + &dump_reg[1]); + + if (cdm_version >= CAM_CDM_VERSION_2_0) + CAM_INFO(CAM_CDM, + "Last Bad AHB addr: 0x%x and data: 0x%x from fifo: %u", + (dump_reg[0] & CAM_CDM_AHB_ADDR_MASK), dump_reg[1], + (dump_reg[0] & CAM_CDM_AHB_LOG_CID_MASK) >> + CAM_CDM_AHB_LOG_CID_SHIFT); + else + CAM_INFO(CAM_CDM, "Last Bad AHB addr: 0x%x and data: 0x%x", + (dump_reg[0] & CAM_CDM_AHB_ADDR_MASK), dump_reg[1]); + + if (inv_cmd_log) { + if (inv_cmd_log->misc_regs) { + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->misc_regs->icl_status, + &dump_reg[0]); + CAM_INFO(CAM_CDM, + "ICL_Status: last_invalid_fifo: %u, last known good fifo: %u", + (dump_reg[0] & CAM_CDM_ICL_STATUS_INV_CID_MASK), + (dump_reg[0] & + CAM_CDM_ICL_STATUS_LAST_CID_MASK) >> + CAM_CDM_ICL_STATUS_LAST_CID_SHIFT); + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->misc_regs->icl_inv_bl_addr, + &dump_reg[0]); + CAM_INFO(CAM_CDM, + "Last Inv Command BL's base_addr: 0x%x", + dump_reg[0]); + } + if (inv_cmd_log->data_regs) { + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->data_regs->icl_inv_data, + &dump_reg[0]); + CAM_INFO(CAM_CDM, "First word of Last Inv cmd: 0x%x", + dump_reg[0]); + + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->data_regs->icl_last_data_0, + &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->data_regs->icl_last_data_1, + &dump_reg[1]); + cam_cdm_read_hw_reg(cdm_hw, + inv_cmd_log->data_regs->icl_last_data_2, + &dump_reg[2]); + + CAM_INFO(CAM_CDM, + "Last good cdm command's word[0]: 0x%x, word[1]: 0x%x, word[2]: 0x%x", + dump_reg[0], dump_reg[1], dump_reg[2]); + } + } + + if (core_dbg & CAM_CDM_CORE_DBG_FIFO_RB_EN_MASK) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_en, &dump_reg[0]); + is_core_paused_already = (bool)(dump_reg[0] & 0x20); + if (!is_core_paused_already) + cam_hw_cdm_pause_core(cdm_hw, true); + + cam_hw_cdm_dump_bl_fifo_data(cdm_hw); + + if (!is_core_paused_already) + cam_hw_cdm_pause_core(cdm_hw, false); + } + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_cfg, &dump_reg[0]); + + if (cdm_version >= CAM_CDM_VERSION_2_0) + CAM_INFO(CAM_CDM, + "Core cfg: AHB_Burst_Len: %u, AHB_Burst_En: %u, AHB_stop_on_err: %u, Priority: %s, Imp_Wait: %u, Pririty_mask: 0x%x", + dump_reg[0] & CAM_CDM_CORE_CFG_AHB_BURST_LEN_MASK, + (bool)(dump_reg[0] & + CAM_CDM_CORE_CFG_AHB_BURST_EN_MASK), + (bool)(dump_reg[0] & + CAM_CDM_CORE_CFG_AHB_STOP_ON_ERR_MASK), + (dump_reg[0] & CAM_CDM_CORE_CFG_ARB_SEL_RR_MASK) ? "RR": + "PRI", + (bool)(dump_reg[0] & + CAM_CDM_CORE_CFG_IMPLICIT_WAIT_EN_MASK), + (dump_reg[0] & CAM_CDM_CORE_CFG_PRIORITY_MASK) >> + CAM_CDM_CORE_CFG_PRIORITY_SHIFT); + else + CAM_INFO(CAM_CDM, + "Core cfg: AHB_Burst_Len: %u, AHB_Burst_En: %u, AHB_stop_on_err: %u", + dump_reg[0] & CAM_CDM_CORE_CFG_AHB_BURST_LEN_MASK, + (bool)(dump_reg[0] & + CAM_CDM_CORE_CFG_AHB_BURST_EN_MASK), + (bool)(dump_reg[0] & + CAM_CDM_CORE_CFG_AHB_STOP_ON_ERR_MASK)); + + if (cdm_version >= CAM_CDM_VERSION_2_1) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->irq_context_status, + &dump_reg[0]); + CAM_INFO(CAM_CDM, "irq_context_status: 0x%x", dump_reg[0]); + } + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo_irq; i++) { + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_status, &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_set, &dump_reg[1]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_mask, &dump_reg[2]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_clear, &dump_reg[3]); + + CAM_INFO(CAM_CDM, + "cnt %d irq status 0x%x set 0x%x mask 0x%x clear 0x%x", + i, dump_reg[0], dump_reg[1], dump_reg[2], dump_reg[3]); + } + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_bl_base, &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->current_bl_len, &dump_reg[1]); + + if (cdm_version >= CAM_CDM_VERSION_2_0) + CAM_INFO(CAM_CDM, + "Last fetched BL by cdm from fifo: %u has Base: 0x%x, len: %d ARB: %d tag: %d ", + (dump_reg[1] & CAM_CDM_CURRENT_BL_FIFO) >> + CAM_CDM_CURRENT_BL_FIFO_SHIFT, + dump_reg[0], + (dump_reg[1] & CAM_CDM_CURRENT_BL_LEN), + (dump_reg[1] & CAM_CDM_CURRENT_BL_ARB) >> + CAM_CDM_CURRENT_BL_ARB_SHIFT, + (dump_reg[1] & CAM_CDM_CURRENT_BL_TAG) >> + CAM_CDM_CURRENT_BL_TAG_SHIFT); + else + CAM_INFO(CAM_CDM, + "Last fetched BL by cdm has Base: 0x%x, len: %d tag: %d ", + dump_reg[0], + (dump_reg[1] & CAM_CDM_CURRENT_BL_LEN), + (dump_reg[1] & CAM_CDM_CURRENT_BL_TAG) >> + CAM_CDM_CURRENT_BL_TAG_SHIFT); + + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->wait_status, &dump_reg[0]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->comp_wait[0]->comp_wait_status, + &dump_reg[1]); + cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->comp_wait[1]->comp_wait_status, + &dump_reg[2]); + CAM_INFO(CAM_CDM, "Wait status: 0x%x, Comp_wait_status0: 0x%x:, Comp_wait_status1: 0x%x", + dump_reg[0], dump_reg[1], dump_reg[2]); + + cam_hw_cdm_disable_core_dbg(cdm_hw); + if (pause_core) + cam_hw_cdm_pause_core(cdm_hw, false); +} + +enum cam_cdm_arbitration cam_cdm_get_arbitration_type( + uint32_t cdm_version, + enum cam_cdm_id id) +{ + enum cam_cdm_arbitration arbitration; + + if (cdm_version < CAM_CDM_VERSION_2_0) { + arbitration = CAM_CDM_ARBITRATION_NONE; + goto end; + } + switch (id) { + case CAM_CDM_CPAS: + arbitration = CAM_CDM_ARBITRATION_ROUND_ROBIN; + break; + default: + arbitration = CAM_CDM_ARBITRATION_PRIORITY_BASED; + break; + } +end: + return arbitration; +} + +int cam_hw_cdm_set_cdm_blfifo_cfg(struct cam_hw_info *cdm_hw) +{ + int rc = 0, i; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo_irq; i++) { + if (!core->bl_fifo[i].bl_depth) + continue; + + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->irq_reg[i]->irq_mask, 0x70003); + if (rc) { + CAM_ERR(CAM_CDM, + "Unable to write to %s%u irq mask register", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EIO; + goto end; + } + } + + if (core->hw_version >= CAM_CDM_VERSION_2_0) { + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->bl_fifo_reg[i]->bl_fifo_cfg, + core->bl_fifo[i].bl_depth + << CAM_CDM_BL_FIFO_LENGTH_CFG_SHIFT); + if (rc) { + CAM_ERR(CAM_CDM, + "Unable to write to %s%u irq mask register", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EIO; + goto end; + } + } + } +end: + return rc; +} + +int cam_hw_cdm_set_cdm_core_cfg(struct cam_hw_info *cdm_hw) +{ + uint32_t cdm_version; + uint32_t cfg_mask = 0; + int rc; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_private_dt_data *pvt_data = + (struct cam_cdm_private_dt_data *)cdm_hw->soc_info.soc_private; + + cfg_mask = cfg_mask | + CAM_CDM_AHB_STOP_ON_ERROR| + CAM_CDM_AHB_BURST_EN| + CAM_CDM_AHB_BURST_LEN_16; + + /* use version from cdm_core structure. */ + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cdm_hw_version, + &cdm_version)) { + CAM_ERR(CAM_CDM, "Error reading %s%u register", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EIO; + goto end; + } + + if (cdm_version >= CAM_CDM_VERSION_2_0) { + if (core->id != CAM_CDM_CPAS && + (!pvt_data->is_single_ctx_cdm)) + cfg_mask = cfg_mask | CAM_CDM_IMPLICIT_WAIT_EN; + + if (core->arbitration == CAM_CDM_ARBITRATION_ROUND_ROBIN) + cfg_mask = cfg_mask | CAM_CDM_ARB_SEL_RR; + + } + + if (cdm_version >= CAM_CDM_VERSION_2_1) { + cfg_mask = cfg_mask | ((uint32_t)pvt_data->priority_group << + core->offsets->cmn_reg->priority_group_bit_offset); + } + + rc = cam_cdm_write_hw_reg(cdm_hw, + core->offsets->cmn_reg->core_cfg, cfg_mask); + if (rc) { + CAM_ERR(CAM_CDM, "Error writing %s%u core cfg", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EIO; + goto end; + } + +end: + return rc; +} + +int cam_hw_cdm_wait_for_bl_fifo( + struct cam_hw_info *cdm_hw, + uint32_t bl_count, + uint32_t fifo_idx) +{ + uint32_t pending_bl = 0; + int32_t available_bl_slots = 0; + int rc = -EIO; + long time_left; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_bl_fifo *bl_fifo = NULL; + + if (fifo_idx >= CAM_CDM_BL_FIFO_MAX) { + rc = -EINVAL; + CAM_ERR(CAM_CDM, + "Invalid fifo index %d rc = %d", + fifo_idx, rc); + goto end; + } + + bl_fifo = &core->bl_fifo[fifo_idx]; + + do { + if (cam_hw_cdm_bl_fifo_pending_bl_rb_in_fifo(cdm_hw, fifo_idx, &pending_bl)) { + CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's"); + rc = -EIO; + break; + } + available_bl_slots = bl_fifo->bl_depth - pending_bl; + if (available_bl_slots < 0) { + CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d", + available_bl_slots, bl_fifo->bl_depth, + pending_bl); + break; + } + if (0 == (available_bl_slots - 1)) { + reinit_completion(&core->bl_fifo[fifo_idx].bl_complete); + + rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, + true, fifo_idx); + if (rc) { + CAM_ERR(CAM_CDM, "Enable BL done irq failed"); + break; + } + time_left = cam_common_wait_for_completion_timeout( + &core->bl_fifo[fifo_idx].bl_complete, + msecs_to_jiffies( + CAM_CDM_BL_FIFO_WAIT_TIMEOUT)); + if (time_left <= 0) { + CAM_ERR(CAM_CDM, + "CDM HW BL Wait timed out failed"); + if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, + false, fifo_idx)) + CAM_ERR(CAM_CDM, + "Disable BL done irq failed"); + rc = -EIO; + break; + } + if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, + false, fifo_idx)) + CAM_ERR(CAM_CDM, "Disable BL done irq failed"); + rc = 1; + CAM_DBG(CAM_CDM, "CDM HW is ready for data"); + } else { + CAM_DBG(CAM_CDM, + "BL slot available_cnt=%d requested=%d", + (available_bl_slots - 1), bl_count); + rc = available_bl_slots - 1; + break; + } + } while (1); + +end: + + return rc; +} + +bool cam_hw_cdm_bl_write( + struct cam_hw_info *cdm_hw, uint32_t src, + uint32_t len, uint32_t tag, bool set_arb, + uint32_t fifo_idx) +{ + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + CAM_DBG(CAM_CDM, "%s%d Base: 0x%x, Len: %u, Tag: %u, set_arb: %u, fifo_idx: %u", + cdm_hw->soc_info.label_name, cdm_hw->soc_info.index, + src, len, tag, set_arb, fifo_idx); + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_base, + src)) { + CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base"); + return true; + } + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_len, + ((len & CAM_CDM_FIFO_LEN_REG_LEN_MASK) | + ((tag & CAM_CDM_FIFO_LEN_REG_TAG_MASK) << CAM_CDM_FIFO_LEN_REG_TAG_SHIFT)) | + ((set_arb) ? (1 << CAM_CDM_FIFO_LEN_REG_ARB_SHIFT) : (0)))) { + CAM_ERR(CAM_CDM, "Failed to write CDM BL len"); + return true; + } + return false; +} + +bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw, uint32_t fifo_idx) +{ + struct cam_cdm *cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->bl_fifo_reg[fifo_idx]->bl_fifo_store, + 1)) { + CAM_ERR(CAM_CDM, "Failed to write CDM commit BL"); + return true; + } + return false; +} + +int cam_hw_cdm_submit_gen_irq( + struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + uint32_t fifo_idx) +{ + struct cam_cdm_bl_cb_request_entry *node; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_bl_request *cdm_cmd = req->data; + dma_addr_t hw_vaddr_ptr = 0; + int rc; + bool bit_wr_enable = false; + size_t len, genirq_cmd_size; + + if (core->bl_fifo[fifo_idx].bl_tag > (core->bl_fifo[fifo_idx].bl_depth - 1)) { + CAM_ERR(CAM_CDM, "Invalid BL Tag: %d, BL Depth: %d Fifo_idx: %d", + core->bl_fifo[fifo_idx].bl_tag, + core->bl_fifo[fifo_idx].bl_depth, + fifo_idx); + return -EINVAL; + } + + genirq_cmd_size = 4 * core->ops->cdm_required_size_genirq(); + + CAM_DBG(CAM_CDM, "Last Tag: 0x%x Total BLs: %d, Cookie: %d", + core->bl_fifo[fifo_idx].bl_tag, cdm_cmd->cmd_arrary_count, cdm_cmd->cookie); + + rc = cam_mem_get_io_buf(cdm_cmd->genirq_buff->handle, core->iommu_hdl.non_secure, + &hw_vaddr_ptr, &len, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CDM, "Getting a hwva from mem_hdl failed. rc: %d", rc); + return -EINVAL; + } + + node = CAM_MEM_ZALLOC(sizeof(struct cam_cdm_bl_cb_request_entry), + GFP_KERNEL); + if (!node) { + CAM_ERR(CAM_CDM, "Failed while getting memory for the Node"); + return -ENOMEM; + } + + if (core->offsets->reg_data->num_bl_fifo > 1) + bit_wr_enable = true; + + node->request_type = CAM_HW_CDM_BL_CB_CLIENT; + node->client_hdl = req->handle; + node->cookie = cdm_cmd->cookie; + node->bl_tag = core->bl_fifo[fifo_idx].bl_tag; + node->userdata = cdm_cmd->userdata; + list_add_tail(&node->entry, &core->bl_fifo[fifo_idx].bl_request_list); + + core->ops->cdm_write_genirq( + cdm_cmd->genirq_buff->cpu_addr + (cdm_cmd->genirq_buff->used_bytes / 4), + core->bl_fifo[fifo_idx].bl_tag, bit_wr_enable, fifo_idx); + rc = cam_hw_cdm_bl_write(cdm_hw, + (uint32_t)(hw_vaddr_ptr + cdm_cmd->genirq_buff->offset), + genirq_cmd_size - 1, + core->bl_fifo[fifo_idx].bl_tag, cdm_cmd->gen_irq_arb, fifo_idx); + if (rc) { + CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d", + core->bl_fifo[fifo_idx].bl_tag); + list_del_init(&node->entry); + CAM_MEM_FREE(node); + node = NULL; + return -EIO; + } + + CAM_DBG(CAM_CDM, "GenIRQ BL: First Word: 0x%llx Second Word: 0x%llx", + *(cdm_cmd->genirq_buff->cpu_addr + (cdm_cmd->genirq_buff->used_bytes / 4)), + *(cdm_cmd->genirq_buff->cpu_addr + (cdm_cmd->genirq_buff->used_bytes / 4) + 1)); + + cdm_cmd->genirq_buff->used_bytes += genirq_cmd_size; + cdm_cmd->genirq_buff->offset += genirq_cmd_size; + + if (cam_presil_mode_enabled()) { + CAM_DBG(CAM_PRESIL, + "Sending CDM gen irq cmd buffer:%d with iommu_hdl:%d", + cdm_cmd->genirq_buff->handle, core->iommu_hdl.non_secure); + + rc = cam_mem_mgr_send_buffer_to_presil(core->iommu_hdl.non_secure, + cdm_cmd->genirq_buff->handle); + if (rc) { + CAM_ERR(CAM_PRESIL, + "Failed to send CDM gen irq cmd buffer fifo_idx:%d mem_handle:%d rc:%d", + fifo_idx, cdm_cmd->genirq_buff->handle, rc); + return rc; + } + } + + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { + CAM_ERR(CAM_CDM, + "Cannot commit the genirq BL with Tag: %d", + core->bl_fifo[fifo_idx].bl_tag); + list_del_init(&node->entry); + CAM_MEM_FREE(node); + node = NULL; + rc = -EIO; + } + + trace_cam_log_event("CDM_START", "CDM_START_IRQ", cdm_cmd->cookie, 0); + + return rc; +} + +int cam_hw_cdm_submit_debug_gen_irq( + struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + uint32_t fifo_idx) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_bl_request *cdm_cmd = req->data; + dma_addr_t hw_vaddr_ptr = 0; + int rc; + bool bit_wr_enable = false; + size_t len, genirq_cmd_size; + + genirq_cmd_size = 4 * core->ops->cdm_required_size_genirq(); + + if (core->bl_fifo[fifo_idx].bl_tag > (core->bl_fifo[fifo_idx].bl_depth - 1)) { + CAM_ERR(CAM_CDM, "Invalid BL Tag: %d, BL Depth: %d Fifo_idx: %d", + core->bl_fifo[fifo_idx].bl_tag, + core->bl_fifo[fifo_idx].bl_depth, + fifo_idx); + return -EINVAL; + } + + if ((cdm_cmd->genirq_buff->size - cdm_cmd->genirq_buff->used_bytes) < genirq_cmd_size) { + CAM_ERR(CAM_CDM, "Insufficient memory for GenIRQ Command"); + return -ENOMEM; + } + + CAM_DBG(CAM_CDM, "Last Tag: 0x%x Total BLs: %d, Cookie: %d", + core->bl_fifo[fifo_idx].bl_tag, cdm_cmd->cmd_arrary_count, cdm_cmd->cookie); + + rc = cam_mem_get_io_buf(cdm_cmd->genirq_buff->handle, core->iommu_hdl.non_secure, + &hw_vaddr_ptr, &len, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CDM, "Getting a hwva from mem_hdl failed. rc: %d", rc); + return -EINVAL; + } + + if (core->offsets->reg_data->num_bl_fifo > 1) + bit_wr_enable = true; + + core->ops->cdm_write_genirq( + cdm_cmd->genirq_buff->cpu_addr + (cdm_cmd->genirq_buff->used_bytes / 4), + CAM_CDM_DBG_GEN_IRQ_USR_DATA, bit_wr_enable, fifo_idx); + rc = cam_hw_cdm_bl_write(cdm_hw, + (uint32_t)(hw_vaddr_ptr + cdm_cmd->genirq_buff->offset), + genirq_cmd_size - 1, + core->bl_fifo[fifo_idx].bl_tag, + false, fifo_idx); + if (rc) { + CAM_ERR(CAM_CDM, "CDM hw bl write failed for Debug GenIRQ USRdata: %d, Tag: 0x%x", + CAM_CDM_DBG_GEN_IRQ_USR_DATA, core->bl_fifo[fifo_idx].bl_tag); + return -EIO; + } + + cdm_cmd->genirq_buff->used_bytes += genirq_cmd_size; + cdm_cmd->genirq_buff->offset += genirq_cmd_size; + + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { + CAM_ERR(CAM_CDM, "Cannot commit the Debug GenIRQ BL with Tag: 0x%x", + core->bl_fifo[fifo_idx].bl_tag); + return -EIO; + } + + return rc; +} + +int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + struct cam_cdm_client *client) +{ + unsigned int i; + int rc = 0; + struct cam_cdm_bl_request *cdm_cmd = req->data; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + struct cam_cdm_bl_fifo *bl_fifo = NULL; + uint32_t fifo_idx = 0; + int write_count = 0; + + fifo_idx = CAM_CDM_GET_BLFIFO_IDX(client->handle); + + CAM_DBG(CAM_CDM, "Submit bl to %s%u", cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + if (fifo_idx >= CAM_CDM_BL_FIFO_MAX) { + rc = -EINVAL; + CAM_ERR(CAM_CDM, "Invalid handle 0x%x, rc = %d", + client->handle, rc); + goto end; + } + + if ((cdm_cmd->genirq_buff->size - cdm_cmd->genirq_buff->used_bytes) < + (core->ops->cdm_required_size_genirq() * 4)) { + CAM_ERR(CAM_CDM, "Insufficient memory for GenIRQ Command"); + rc = -ENOMEM; + goto end; + } + + bl_fifo = &core->bl_fifo[fifo_idx]; + + if (req->data->cmd_arrary_count > bl_fifo->bl_depth) { + CAM_INFO(CAM_CDM, + "requested BL more than max size, cnt=%d max=%d", + req->data->cmd_arrary_count, + bl_fifo->bl_depth); + } + + mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock); + mutex_lock(&client->lock); + + /* + * Check PF status bit to avoid submiting commands to CDM + */ + if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) { + mutex_unlock(&client->lock); + mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock); + return -EAGAIN; + } + + for (i = 0; i < req->data->cmd_arrary_count ; i++) { + dma_addr_t hw_vaddr_ptr = 0; + size_t len = 0; + + if ((!cdm_cmd->cmd_flex[i].len) || + (cdm_cmd->cmd_flex[i].len > CAM_CDM_MAX_BL_LENGTH)) { + CAM_ERR(CAM_CDM, + "cmd len=: %d is invalid_ent: %d, num_cmd_ent: %d", + cdm_cmd->cmd_flex[i].len, i, + req->data->cmd_arrary_count); + rc = -EINVAL; + break; + } + + /* + * While commands submission is ongoing, if error/reset/PF occurs, prevent + * further command submission. + */ + if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) { + CAM_ERR_RATE_LIMIT(CAM_CDM, + "In error/reset/PF state cnt=%d total cnt=%d cdm_status 0x%x", + i, req->data->cmd_arrary_count, + core->cdm_status); + rc = -EAGAIN; + break; + } + if (write_count == 0) { + write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw, + (req->data->cmd_arrary_count - i), fifo_idx); + if (write_count < 0) { + CAM_ERR(CAM_CDM, + "wait for bl fifo failed for ent: %u", i); + rc = -EIO; + break; + } + } + + if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) { + rc = cam_mem_get_io_buf(cdm_cmd->cmd_flex[i].bl_addr.mem_handle, + core->iommu_hdl.non_secure, &hw_vaddr_ptr, + &len, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CDM, + "Getting a hwva from mem_hdl failed. rc: %d, cmd_ent: %u", + rc, i); + rc = -EINVAL; + break; + } + } else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) { + if (!cdm_cmd->cmd_flex[i].bl_addr.hw_iova) { + CAM_ERR(CAM_CDM, "hw_iova is null for ent: %d", i); + rc = -EINVAL; + break; + } + + hw_vaddr_ptr = (dma_addr_t)cdm_cmd->cmd_flex[i].bl_addr.hw_iova; + len = cdm_cmd->cmd_flex[i].len + cdm_cmd->cmd_flex[i].offset; + } else { + CAM_ERR(CAM_CDM, + "Only mem hdl/hw va type is supported %d", + req->data->type); + rc = -EINVAL; + break; + } + + if ((hw_vaddr_ptr) && (len) && (len >= cdm_cmd->cmd_flex[i].offset)) { + if ((len - cdm_cmd->cmd_flex[i].offset) < cdm_cmd->cmd_flex[i].len) { + CAM_ERR(CAM_CDM, + "Not enough buffer cmd offset: %u cmd length: %u", + cdm_cmd->cmd_flex[i].offset, + cdm_cmd->cmd_flex[i].len); + rc = -EINVAL; + break; + } + + CAM_DBG(CAM_CDM, "Got the hwva: %pK, type: %u", + hw_vaddr_ptr, req->data->type); + + rc = cam_hw_cdm_bl_write(cdm_hw, + ((uint32_t)hw_vaddr_ptr + cdm_cmd->cmd_flex[i].offset), + (cdm_cmd->cmd_flex[i].len - 1), + core->bl_fifo[fifo_idx].bl_tag, + cdm_cmd->cmd_flex[i].arbitrate, + fifo_idx); + if (rc) { + CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d", + i, req->data->cmd_arrary_count); + rc = -EIO; + break; + } + + if (cam_hw_cdm_commit_bl_write(cdm_hw, fifo_idx)) { + CAM_ERR(CAM_CDM, "Commit failed for BL: %d Tag: %u", + i, core->bl_fifo[fifo_idx].bl_tag); + rc = -EIO; + break; + } + + CAM_DBG(CAM_CDM, "Commit success for BL: %d of %d, Tag: %u", (i + 1), + req->data->cmd_arrary_count, + core->bl_fifo[fifo_idx].bl_tag); + + write_count--; + core->bl_fifo[fifo_idx].bl_tag++; + core->bl_fifo[fifo_idx].bl_tag %= (bl_fifo->bl_depth - 1); + + if (cdm_cmd->cmd_flex[i].enable_debug_gen_irq) { + if (write_count == 0) { + write_count = + cam_hw_cdm_wait_for_bl_fifo(cdm_hw, 1, fifo_idx); + if (write_count < 0) { + CAM_ERR(CAM_CDM, "wait for bl fifo failed %d:%d", + i, req->data->cmd_arrary_count); + rc = -EIO; + break; + } + } + + rc = cam_hw_cdm_submit_debug_gen_irq(cdm_hw, req, fifo_idx); + if (!rc) { + CAM_DBG(CAM_CDM, + "Commit success for Dbg_GenIRQ_BL, Tag: %d", + core->bl_fifo[fifo_idx].bl_tag); + write_count--; + core->bl_fifo[fifo_idx].bl_tag++; + core->bl_fifo[fifo_idx].bl_tag %= (bl_fifo->bl_depth - 1); + } else { + CAM_WARN(CAM_CDM, + "Failed in submitting the debug gen entry. rc: %d", + rc); + continue; + } + } + + if (req->data->flag && (i == (req->data->cmd_arrary_count - 1))) { + + if (write_count == 0) { + write_count = + cam_hw_cdm_wait_for_bl_fifo(cdm_hw, 1, fifo_idx); + if (write_count < 0) { + CAM_ERR(CAM_CDM, "wait for bl fifo failed %d:%d", + i, req->data->cmd_arrary_count); + rc = -EIO; + break; + } + } + + if (core->arbitration == CAM_CDM_ARBITRATION_PRIORITY_BASED) + cdm_cmd->gen_irq_arb = true; + else + cdm_cmd->gen_irq_arb = false; + + rc = cam_hw_cdm_submit_gen_irq(cdm_hw, req, fifo_idx); + if (!rc) { + CAM_DBG(CAM_CDM, "Commit success for GenIRQ_BL, Tag: %d", + core->bl_fifo[fifo_idx].bl_tag); + core->bl_fifo[fifo_idx].bl_tag++; + core->bl_fifo[fifo_idx].bl_tag %= (bl_fifo->bl_depth - 1); + } + } else if (!req->data->flag && (i == (req->data->cmd_arrary_count - 1))) { + core->bl_fifo[fifo_idx].last_bl_tag_done = -1; + } + } else { + CAM_ERR(CAM_CDM, + "Sanity check failed for cdm_cmd: %d, Hdl: 0x%x, len: %zu, offset: 0x%x, num_cmds: %d", + i, cdm_cmd->cmd_flex[i].bl_addr.mem_handle, len, + cdm_cmd->cmd_flex[i].offset, req->data->cmd_arrary_count); + rc = -EINVAL; + break; + } + } + mutex_unlock(&client->lock); + mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock); + +end: + return rc; + +} + +static void cam_hw_cdm_reset_cleanup( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + int i; + struct cam_cdm_bl_cb_request_entry *node, *tnode; + bool flush_hw = false; + bool reset_err = false; + + if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) || + test_bit(CAM_CDM_FLUSH_HW_STATUS, &core->cdm_status)) + flush_hw = true; + + if (test_bit(CAM_CDM_RESET_ERR_STATUS, &core->cdm_status)) + reset_err = true; + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) { + list_for_each_entry_safe(node, tnode, + &core->bl_fifo[i].bl_request_list, entry) { + if (node->request_type == + CAM_HW_CDM_BL_CB_CLIENT) { + CAM_DBG(CAM_CDM, + "Notifying client %d for tag %d", + node->client_hdl, node->bl_tag); + if (flush_hw) { + enum cam_cdm_cb_status status; + + status = reset_err ? + CAM_CDM_CB_STATUS_HW_ERROR : + CAM_CDM_CB_STATUS_HW_RESUBMIT; + + cam_cdm_notify_clients(cdm_hw, + (node->client_hdl == handle) ? + CAM_CDM_CB_STATUS_HW_FLUSH : + status, + (void *)node); + } + else + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_HW_RESET_DONE, + (void *)node); + } + list_del_init(&node->entry); + CAM_MEM_FREE(node); + node = NULL; + } + core->bl_fifo[i].bl_tag = 0; + core->bl_fifo[i].last_bl_tag_done = -1; + atomic_set(&core->bl_fifo[i].work_record, 0); + } +} + +static void cam_hw_cdm_work(struct work_struct *work) +{ + struct cam_cdm_work_payload *payload; + struct cam_hw_info *cdm_hw; + struct cam_cdm *core; + int i, fifo_idx; + struct cam_cdm_bl_cb_request_entry *tnode = NULL; + struct cam_cdm_bl_cb_request_entry *node = NULL; + + payload = container_of(work, struct cam_cdm_work_payload, work); + if (!payload) { + CAM_ERR(CAM_CDM, "NULL payload"); + return; + } + + cdm_hw = payload->hw; + core = (struct cam_cdm *)cdm_hw->core_info; + fifo_idx = payload->fifo_idx; + if ((fifo_idx >= core->offsets->reg_data->num_bl_fifo) || + (!core->bl_fifo[fifo_idx].bl_depth)) { + CAM_ERR(CAM_CDM, "Invalid fifo idx %d", + fifo_idx); + CAM_MEM_FREE(payload); + payload = NULL; + return; + } + + cam_common_util_thread_switch_delay_detect( + "cam_cdm_workq", "schedule", cam_hw_cdm_work, + payload->workq_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + + CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status); + if (payload->irq_status & + CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { + CAM_DBG(CAM_CDM, "inline IRQ data=0x%x last tag: 0x%x", + payload->irq_data, core->bl_fifo[payload->fifo_idx].last_bl_tag_done); + + if (payload->irq_data == 0xff) { + CAM_INFO(CAM_CDM, "%s%u Debug genirq received", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + CAM_MEM_FREE(payload); + payload = NULL; + return; + } + + mutex_lock(&cdm_hw->hw_mutex); + mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock); + + if (atomic_read(&core->bl_fifo[fifo_idx].work_record)) + atomic_dec(&core->bl_fifo[fifo_idx].work_record); + + if (list_empty(&core->bl_fifo[fifo_idx].bl_request_list)) { + CAM_INFO(CAM_CDM, "Fifo list empty, idx %d tag %d arb %d", + fifo_idx, payload->irq_data, core->arbitration); + mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock); + mutex_unlock(&cdm_hw->hw_mutex); + CAM_MEM_FREE(payload); + payload = NULL; + return; + } + + if (core->bl_fifo[fifo_idx].last_bl_tag_done != payload->irq_data) { + core->bl_fifo[fifo_idx].last_bl_tag_done = payload->irq_data; + list_for_each_entry_safe(node, tnode, + &core->bl_fifo[fifo_idx].bl_request_list, entry) { + if (node->request_type == CAM_HW_CDM_BL_CB_CLIENT) { + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_BL_SUCCESS, (void *)node); + } else if (node->request_type == CAM_HW_CDM_BL_CB_INTERNAL) { + CAM_ERR(CAM_CDM, "Invalid node=%pK %d", + node, node->request_type); + } + + list_del_init(&node->entry); + if (node->bl_tag == payload->irq_data) { + CAM_MEM_FREE(node); + node = NULL; + break; + } + + CAM_MEM_FREE(node); + node = NULL; + } + } else { + CAM_INFO(CAM_CDM, "Skip GenIRQ, tag 0x%x fifo %d", + payload->irq_data, payload->fifo_idx); + } + + mutex_unlock(&core->bl_fifo[payload->fifo_idx].fifo_lock); + mutex_unlock(&cdm_hw->hw_mutex); + } + + if (payload->irq_status & + CAM_CDM_IRQ_STATUS_BL_DONE_MASK) { + if (test_bit(payload->fifo_idx, &core->cdm_status)) { + CAM_DBG(CAM_CDM, "%s%u HW BL done IRQ", + cdm_hw->soc_info.label_name, cdm_hw->soc_info.index); + complete(&core->bl_fifo[payload->fifo_idx].bl_complete); + } + } + if (payload->irq_status & + CAM_CDM_IRQ_STATUS_ERRORS) { + int reset_hw_hdl = 0x0; + + CAM_ERR_RATE_LIMIT(CAM_CDM, "%s%u Error IRQ status %d\n", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index, payload->irq_status); + set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status); + mutex_lock(&cdm_hw->hw_mutex); + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&core->bl_fifo[i].fifo_lock); + + cam_hw_cdm_dump_core_debug_registers(cdm_hw, true); + + if (payload->irq_status & + CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) { + node = list_first_entry_or_null( + &core->bl_fifo[payload->fifo_idx].bl_request_list, + struct cam_cdm_bl_cb_request_entry, entry); + + if (node != NULL) { + if (node->request_type == + CAM_HW_CDM_BL_CB_CLIENT) { + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_INVALID_BL_CMD, + (void *)node); + } else if (node->request_type == + CAM_HW_CDM_BL_CB_INTERNAL) { + CAM_ERR(CAM_CDM, + "Invalid node=%pK %d", node, + node->request_type); + } + list_del_init(&node->entry); + CAM_MEM_FREE(node); + } + } + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&core->bl_fifo[i].fifo_lock); + + if (payload->irq_status & + CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) + cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl); + + mutex_unlock(&cdm_hw->hw_mutex); + if (!(payload->irq_status & + CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK)) + clear_bit(CAM_CDM_ERROR_HW_STATUS, + &core->cdm_status); + } + + CAM_MEM_FREE(payload); + payload = NULL; + +} + +static void cam_hw_cdm_iommu_fault_handler(struct cam_smmu_pf_info *pf_info) +{ + struct cam_hw_info *cdm_hw = NULL; + struct cam_cdm *core = NULL; + struct cam_cdm_private_dt_data *pvt_data; + int i, rc; + + if (!pf_info) { + CAM_ERR(CAM_CDM, "pf_info is null"); + return; + } + + if (pf_info->token) { + cdm_hw = (struct cam_hw_info *)pf_info->token; + core = (struct cam_cdm *)cdm_hw->core_info; + pvt_data = (struct cam_cdm_private_dt_data *) cdm_hw->soc_info.soc_private; + CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n", + (void *)pf_info->iova); + + /* Check if the PID and MID are valid, if not handle the pf */ + if (((pf_info->pid == pvt_data->pid) && (pf_info->mid == pvt_data->mid))) + goto handle_cdm_pf; + else + return; + +handle_cdm_pf: + + CAM_ERR(CAM_CDM, "Page Fault on %s%u, flags: %u, status: %llu", + core->name, core->id, core->flags, core->cdm_status); + set_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status); + mutex_lock(&cdm_hw->hw_mutex); + /* Pausing CDM HW from doing any further memory transactions */ + cam_hw_cdm_pause_core(cdm_hw, true); + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&core->bl_fifo[i].fifo_lock); + + if (cdm_hw->hw_state == CAM_HW_STATE_POWER_UP) { + cam_hw_cdm_dump_core_debug_registers(cdm_hw, true); + } else + CAM_INFO(CAM_CDM, "%s%u hw is power in off state", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + + for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&core->bl_fifo[i].fifo_lock); + + /* Notify clients to handle PF event */ + cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT, (void *)pf_info); + /* Stream off CDM completely */ + rc = cam_cdm_pf_stream_off_all_clients(cdm_hw); + if (rc) + CAM_ERR(CAM_CDM, "Stream off failed for %s%u rc: %d", + core->name, core->id, rc); + mutex_unlock(&cdm_hw->hw_mutex); + } else { + CAM_ERR(CAM_CDM, "Invalid token"); + } +} + +irqreturn_t cam_hw_cdm_irq(int irq_num, void *data) +{ + struct cam_hw_info *cdm_hw = data; + struct cam_hw_soc_info *soc_info = &cdm_hw->soc_info; + struct cam_cdm *cdm_core = cdm_hw->core_info; + struct cam_cdm_work_payload *payload[CAM_CDM_BL_FIFO_MAX] = {0}; + uint8_t rst_done_cnt = 0; + uint32_t user_data = 0; + uint32_t irq_status[CAM_CDM_BL_FIFO_MAX] = {0}; + uint32_t irq_context_summary = 0xF; + bool work_status; + int i; + + CAM_DBG(CAM_CDM, "Got irq hw_version 0x%x from %s%u", + cdm_core->hw_version, soc_info->label_name, + soc_info->index); + cam_hw_util_hw_lock(cdm_hw); + if (cdm_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_DBG(CAM_CDM, "CDM is in power down state"); + cam_hw_util_hw_unlock(cdm_hw); + return IRQ_HANDLED; + } + if (cdm_core->hw_version >= CAM_CDM_VERSION_2_1) { + if (cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->irq_context_status, + &irq_context_summary)) { + CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status"); + } + } + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { + if (!(BIT(i) & irq_context_summary)) { + continue; + } + if (cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_status, + &irq_status[i])) { + CAM_ERR(CAM_CDM, "Failed to read %s%u HW IRQ status", + soc_info->label_name, + soc_info->index); + } + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_clear, + irq_status[i])) { + CAM_ERR(CAM_CDM, "Failed to Write %s%u HW IRQ Clear", + soc_info->label_name, + soc_info->index); + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[0]->irq_clear_cmd, 0x01)) + CAM_ERR(CAM_CDM, "Failed to Write %s%u HW IRQ clr cmd", + soc_info->label_name, + soc_info->index); + if (cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->usr_data, + &user_data)) + CAM_ERR(CAM_CDM, "Failed to read %s%u HW IRQ data", + soc_info->label_name, + soc_info->index); + + cam_hw_util_hw_unlock(cdm_hw); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo_irq; i++) { + if (!irq_status[i]) + continue; + + if (irq_status[i] & CAM_CDM_IRQ_STATUS_RST_DONE_MASK) { + rst_done_cnt++; + continue; + } + + payload[i] = CAM_MEM_ZALLOC(sizeof(struct cam_cdm_work_payload), + GFP_ATOMIC); + + if (!payload[i]) { + CAM_ERR(CAM_CDM, + "failed to allocate memory for fifo %d payload", + i); + continue; + } + + if (irq_status[i] & CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) { + + payload[i]->irq_data = (user_data >> (i * 0x8)) & + CAM_CDM_IRQ_STATUS_USR_DATA_MASK; + + if (payload[i]->irq_data == CAM_CDM_DBG_GEN_IRQ_USR_DATA) + CAM_INFO(CAM_CDM, "Debug gen_irq received"); + + atomic_inc(&cdm_core->bl_fifo[i].work_record); + } + + CAM_DBG(CAM_CDM, + "Rcvd of fifo %d userdata 0x%x tag 0x%x irq_stat 0x%x", + i, user_data, payload[i]->irq_data, irq_status[i]); + + payload[i]->fifo_idx = i; + payload[i]->irq_status = irq_status[i]; + payload[i]->hw = cdm_hw; + + INIT_WORK((struct work_struct *)&payload[i]->work, + cam_hw_cdm_work); + + trace_cam_log_event("CDM_DONE", "CDM_DONE_IRQ", + payload[i]->irq_status, + cdm_hw->soc_info.index); + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_clear, + payload[i]->irq_status)) { + CAM_ERR(CAM_CDM, "Failed to Write %s%u HW IRQ Clear", + soc_info->label_name, + soc_info->index); + CAM_MEM_FREE(payload[i]); + return IRQ_HANDLED; + } + + payload[i]->workq_scheduled_ts = ktime_get(); + + work_status = queue_work( + cdm_core->bl_fifo[i].work_queue, + &payload[i]->work); + + if (work_status == false) { + CAM_ERR(CAM_CDM, + "Failed to queue work for FIFO: %d irq=0x%x", + i, payload[i]->irq_status); + CAM_MEM_FREE(payload[i]); + payload[i] = NULL; + } + } + if (rst_done_cnt == cdm_core->offsets->reg_data->num_bl_fifo_irq) { + CAM_DBG(CAM_CDM, "%s%u HW reset done IRQ", + soc_info->label_name, + soc_info->index); + complete(&cdm_core->reset_complete); + } + if (rst_done_cnt && + (rst_done_cnt != cdm_core->offsets->reg_data->num_bl_fifo_irq)) + CAM_INFO(CAM_CDM, + "%s%u Reset IRQ received for %d fifos instead of %d", + soc_info->label_name, + soc_info->index, rst_done_cnt, + cdm_core->offsets->reg_data->num_bl_fifo_irq); + return IRQ_HANDLED; +} + +int cam_hw_cdm_reset_hw(struct cam_hw_info *cdm_hw, uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + struct cam_hw_soc_info *soc_info = &cdm_hw->soc_info; + long time_left; + int i, rc = -EIO; + int reset_val = 1; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + + reinit_completion(&cdm_core->reset_complete); + + cam_hw_cdm_pause_core(cdm_hw, true); + + if (cam_cdm_write_hw_reg(cdm_hw, cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) { + CAM_ERR(CAM_CDM, "Failed to Write %s%u HW reset", + soc_info->label_name, + soc_info->index); + goto end; + } + + CAM_DBG(CAM_CDM, "Waiting for %s%u HW reset done", soc_info->label_name, soc_info->index); + time_left = cam_common_wait_for_completion_timeout( + &cdm_core->reset_complete, + msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + + if (time_left <= 0) { + rc = -ETIMEDOUT; + + CAM_ERR(CAM_CDM, "%s%u HW reset Wait failed rc=%d", + soc_info->label_name, + soc_info->index, rc); + + cam_hw_cdm_dump_core_debug_registers(cdm_hw, false); + cam_hw_cdm_pause_core(cdm_hw, false); + goto end; + } + + rc = cam_hw_cdm_set_cdm_core_cfg(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure %s%u rc=%d", + soc_info->label_name, + soc_info->index, rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_blfifo_cfg(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure %s%u fifo rc=%d", + soc_info->label_name, + soc_info->index, rc); + goto end; + } + + cam_hw_cdm_reset_cleanup(cdm_hw, handle); +end: + clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + return rc; +} + +int cam_hw_cdm_handle_error_info( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + struct cam_cdm_bl_cb_request_entry *node = NULL; + long time_left; + int i, rc = -EIO, reset_hw_hdl = 0x0; + uint32_t current_bl_data = 0, current_fifo = 0, current_tag = 0; + int reset_val = 1; + struct cam_hw_soc_info *soc_info = &cdm_hw->soc_info; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + reinit_completion(&cdm_core->reset_complete); + set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + + if (cdm_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_CDM, "CDM is in power down state"); + goto end; + } + + cam_hw_cdm_pause_core(cdm_hw, true); + + rc = cam_cdm_read_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->current_bl_len, + ¤t_bl_data); + + current_fifo = ((CAM_CDM_CURRENT_BL_FIFO & current_bl_data) + >> CAM_CDM_CURRENT_BL_FIFO_SHIFT); + current_tag = ((CAM_CDM_CURRENT_BL_TAG & current_bl_data) + >> CAM_CDM_CURRENT_BL_TAG_SHIFT); + + if (current_fifo >= CAM_CDM_BL_FIFO_MAX) { + rc = -EFAULT; + goto end; + } + + CAM_ERR(CAM_CDM, "Hang detected for %s%u's fifo %d with tag 0x%x", + soc_info->label_name, soc_info->index, + current_fifo, current_tag); + + /* dump cdm registers for further debug */ + cam_hw_cdm_dump_core_debug_registers(cdm_hw, false); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + if (!cdm_core->bl_fifo[i].bl_depth) + continue; + + reset_val = reset_val | + (1 << (i + CAM_CDM_BL_FIFO_FLUSH_SHIFT)); + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_mask, + 0x70003)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask"); + goto end; + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); + goto end; + } + + CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone"); + time_left = cam_common_wait_for_completion_timeout( + &cdm_core->reset_complete, + msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + + if (time_left <= 0) { + rc = -ETIMEDOUT; + CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc); + set_bit(CAM_CDM_RESET_ERR_STATUS, &cdm_core->cdm_status); + } + + rc = cam_hw_cdm_set_cdm_core_cfg(cdm_hw); + + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM rc=%d", rc); + goto end; + } + + rc = cam_hw_cdm_set_cdm_blfifo_cfg(cdm_hw); + + if (rc) { + CAM_ERR(CAM_CDM, "Failed to configure CDM fifo rc=%d", rc); + goto end; + } + + node = list_first_entry_or_null( + &cdm_core->bl_fifo[current_fifo].bl_request_list, + struct cam_cdm_bl_cb_request_entry, entry); + + if (node != NULL) { + if (node->request_type == CAM_HW_CDM_BL_CB_CLIENT) { + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_HW_ERROR, + (void *)node); + } else if (node->request_type == CAM_HW_CDM_BL_CB_INTERNAL) { + CAM_ERR(CAM_CDM, "Invalid node=%pK %d", node, + node->request_type); + } + list_del_init(&node->entry); + CAM_MEM_FREE(node); + node = NULL; + } + + cam_hw_cdm_reset_cleanup(cdm_hw, reset_hw_hdl); +end: + clear_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + clear_bit(CAM_CDM_RESET_ERR_STATUS, &cdm_core->cdm_status); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + return rc; +} + +int cam_hw_cdm_flush_hw(struct cam_hw_info *cdm_hw, uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + int rc = 0; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + set_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + rc = cam_hw_cdm_reset_hw(cdm_hw, handle); + clear_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status); + + return rc; +} + +int cam_hw_cdm_hang_detect( + struct cam_hw_info *cdm_hw, + uint32_t handle) +{ + struct cam_cdm *cdm_core = NULL; + struct cam_hw_soc_info *soc_info; + int i, rc = -1; + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + soc_info = &cdm_hw->soc_info; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + if (atomic_read(&cdm_core->bl_fifo[i].work_record)) { + CAM_WARN(CAM_CDM, + "fifo: %d Workqueue got delayed for %s%u, work_record :%u", + i, soc_info->label_name, soc_info->index, + atomic_read(&cdm_core->bl_fifo[i].work_record)); + rc = 0; + break; + } + + return rc; +} + +int cam_hw_cdm_get_cdm_config(struct cam_hw_info *cdm_hw) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cdm *core = NULL; + int rc = 0, ret = 0; + + core = (struct cam_cdm *)cdm_hw->core_info; + soc_info = &cdm_hw->soc_info; + + if (!cam_vmrm_no_register_read_on_bind()) { + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_CDM, "Enable platform failed for dev %s", + soc_info->dev_name); + goto end; + } else { + CAM_DBG(CAM_CDM, "%s%u init success", + soc_info->label_name, soc_info->index); + cdm_hw->hw_state = CAM_HW_STATE_POWER_UP; + } + + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cdm_hw_version, &core->hw_version)) { + CAM_ERR(CAM_CDM, "Failed to read HW Version for %s%u", + soc_info->label_name, soc_info->index); + rc = -EIO; + goto disable_platform_resource; + } + + if (core->offsets->cmn_reg->cam_version) { + if (cam_cdm_read_hw_reg(cdm_hw, + core->offsets->cmn_reg->cam_version->hw_version, + &core->hw_family_version)) { + CAM_ERR(CAM_CDM, "Failed to read %s%d family Version", + soc_info->label_name, soc_info->index); + rc = -EIO; + goto disable_platform_resource; + } + } + } else { + /* + * This is temporary workaround in TVM to avoid register read during probe/bind. + */ + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "override-cdm-family", &core->hw_family_version); + if (rc) { + CAM_ERR(CAM_CDM, "no cdm family"); + return rc; + } + + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "override-cdm-version", &core->hw_version); + if (rc) { + CAM_ERR(CAM_CDM, "no cdm version"); + return rc; + } + } + + if (cam_presil_mode_enabled()) { + uint32_t override_family = 0; + uint32_t override_version = 0; + + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "override-cdm-family", &override_family); + if (rc) + CAM_INFO(CAM_CDM, + "no cdm family override,using current hw family 0x%x", + core->hw_family_version); + else + core->hw_family_version = override_family; + + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "override-cdm-version", &override_version); + if (rc) { + CAM_INFO(CAM_CDM, + "no cdm version override,using current hw version 0x%x", + core->hw_version); + rc = 0; + } else { + core->hw_version = override_version; + } + } + + CAM_DBG(CAM_CDM, + "%s%d Hw version read success family =%x hw =%x", + soc_info->label_name, soc_info->index, + core->hw_family_version, core->hw_version); + + core->ops = cam_cdm_get_ops(core->hw_version, NULL, + false); + + if (!core->ops) { + CAM_ERR(CAM_CDM, "Failed to util ops for cdm hw name %s", + core->name); + rc = -EINVAL; + goto disable_platform_resource; + } + +disable_platform_resource: + if (cam_vmrm_no_register_read_on_bind()) + return rc; + ret = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (ret) { + CAM_ERR(CAM_CDM, "disable platform failed for dev %s", + soc_info->dev_name); + } else { + CAM_DBG(CAM_CDM, "%s%d Deinit success", + soc_info->label_name, soc_info->index); + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + } +end: + return rc; +} + +int cam_hw_cdm_init(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cdm *cdm_core = NULL; + int rc, i, reset_hw_hdl = 0x0; + unsigned long flags = 0; + + if (!hw_priv) + return -EINVAL; + + soc_info = &cdm_hw->soc_info; + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_CDM, "Enable platform failed for %s%d", + soc_info->label_name, soc_info->index); + goto end; + } + flags = cam_hw_util_hw_lock_irqsave(cdm_hw); + cdm_hw->hw_state = CAM_HW_STATE_POWER_UP; + cam_hw_util_hw_unlock_irqrestore(cdm_hw, flags); + + CAM_DBG(CAM_CDM, "Enable soc done for %s%d", + soc_info->label_name, soc_info->index); + +/* Before triggering the reset to HW, clear the reset complete */ + clear_bit(CAM_CDM_ERROR_HW_STATUS, &cdm_core->cdm_status); + + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + clear_bit(i, &cdm_core->cdm_status); + reinit_completion(&cdm_core->bl_fifo[i].bl_complete); + } + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + cdm_core->bl_fifo[i].last_bl_tag_done = -1; + atomic_set(&cdm_core->bl_fifo[i].work_record, 0); + } + + rc = cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl); + if (rc) { + CAM_ERR(CAM_CDM, "%s%u HW reset Wait failed rc=%d", + soc_info->label_name, + soc_info->index, rc); + goto disable_return; + } + + CAM_DBG(CAM_CDM, "%s%u Init success", soc_info->label_name, soc_info->index); + return 0; + +disable_return: + rc = -EIO; + flags = cam_hw_util_hw_lock_irqsave(cdm_hw); + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cam_hw_util_hw_unlock_irqrestore(cdm_hw, flags); + cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); +end: + return rc; +} + +static inline void cam_hw_cdm_clear_bl_requests(struct cam_cdm *cdm_core) +{ + struct cam_cdm_bl_cb_request_entry *node, *tnode; + int i; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + list_for_each_entry_safe(node, tnode, + &cdm_core->bl_fifo[i].bl_request_list, entry) { + list_del_init(&node->entry); + CAM_MEM_FREE(node); + node = NULL; + } + } +} + +int cam_hw_cdm_pf_deinit(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cdm *cdm_core = NULL; + int i, rc; + unsigned long flags = 0; + + if (!hw_priv) + return -EINVAL; + + soc_info = &cdm_hw->soc_info; + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + /* clear bl request */ + cam_hw_cdm_clear_bl_requests(cdm_core); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + flags = cam_hw_util_hw_lock_irqsave(cdm_hw); + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cam_hw_util_hw_unlock_irqrestore(cdm_hw, flags); + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_CDM, "disable platform failed for %s%u", + soc_info->label_name, soc_info->index); + else + CAM_DBG(CAM_CDM, "%s%u Deinit success", + soc_info->label_name, soc_info->index); + + return rc; +} + +int cam_hw_cdm_deinit(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cdm *cdm_core = NULL; + int rc = 0, i; + uint32_t reset_val = 1; + long time_left; + unsigned long flags = 0; + + if (!hw_priv) + return -EINVAL; + + soc_info = &cdm_hw->soc_info; + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_lock(&cdm_core->bl_fifo[i].fifo_lock); + + /*clear bl request */ + cam_hw_cdm_clear_bl_requests(cdm_core); + + set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + reinit_completion(&cdm_core->reset_complete); + + cam_hw_cdm_pause_core(cdm_hw, true); + + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) { + if (!cdm_core->bl_fifo[i].bl_depth) + continue; + + reset_val = reset_val | + (1 << (i + CAM_CDM_BL_FIFO_FLUSH_SHIFT)); + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->irq_reg[i]->irq_mask, + 0x70003)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask"); + } + } + + if (cam_cdm_write_hw_reg(cdm_hw, + cdm_core->offsets->cmn_reg->rst_cmd, reset_val)) { + CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset"); + } + + CAM_DBG(CAM_CDM, "Waiting for %s%u HW reset done", + soc_info->label_name, soc_info->index); + time_left = cam_common_wait_for_completion_timeout( + &cdm_core->reset_complete, + msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT)); + + if (time_left <= 0) { + rc = -ETIMEDOUT; + CAM_ERR(CAM_CDM, "%s%u HW reset Wait failed rc=%d", + soc_info->label_name, soc_info->index, rc); + } + + clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status); + for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) + mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock); + + flags = cam_hw_util_hw_lock_irqsave(cdm_hw); + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cam_hw_util_hw_unlock_irqrestore(cdm_hw, flags); + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_CDM, "disable platform failed for %s%u", + soc_info->label_name, soc_info->index); + } else { + CAM_DBG(CAM_CDM, "%s%u Deinit success", + soc_info->label_name, soc_info->index); + } + + return rc; +} + +#ifdef CONFIG_CAM_TEST_IRQ_LINE +static int cam_cdm_test_irq_line(void *hw_priv) +{ + struct cam_hw_info *cdm_hw = hw_priv; + struct cam_cdm *core = NULL; + int rc = 0; + + if (!cdm_hw) { + CAM_ERR(CAM_CDM, "Invalid cdm hw"); + return -EINVAL; + } + + core = (struct cam_cdm *)cdm_hw->core_info; + + rc = cam_cdm_util_cpas_start(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "CDM[%d] Failed in cpas start rc", core->index, rc); + goto done; + } + + rc = cam_hw_cdm_init(cdm_hw, NULL, 0); + if (rc) { + CAM_ERR(CAM_CDM, "CDM[%d] Failed in cdm init rc", core->index, rc); + goto cpas_stop; + } + + rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0); + if (rc) { + CAM_ERR(CAM_CDM, "CDM[%d] Failed in cdm deinit rc", core->index, rc); + goto cpas_stop; + } + +cpas_stop: + rc = cam_cpas_stop(core->cpas_handle); + if (rc) + CAM_ERR(CAM_CDM, "CDM[%d] Failed in cpas stop rc", core->index, rc); +done: + return rc; +} +#else +static int cam_cdm_test_irq_line(void *hw_priv) +{ + return -EPERM; +} +#endif + + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) +static int cam_cdm_test_irq_line_at_probe(struct cam_hw_info *cdm_hw) +{ + return cam_cdm_test_irq_line(cdm_hw); +} +#else +static int cam_cdm_test_irq_line_at_probe(struct cam_hw_info *cdm_hw) +{ + return -EPERM; +} +#endif + +static int cam_hw_cdm_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc, len = 0, i, j; + struct cam_hw_info *cdm_hw = NULL; + struct cam_hw_intf *cdm_hw_intf = NULL; + struct cam_cdm *cdm_core = NULL; + struct cam_cdm_private_dt_data *soc_private = NULL; + struct cam_cpas_register_params cpas_parms; + char cdm_name[128], work_q_name[128]; + struct platform_device *pdev = to_platform_device(dev); + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + cdm_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!cdm_hw_intf) + return -ENOMEM; + + cdm_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!cdm_hw) { + CAM_MEM_FREE(cdm_hw_intf); + cdm_hw_intf = NULL; + return -ENOMEM; + } + + cdm_hw->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_cdm), GFP_KERNEL); + if (!cdm_hw->core_info) { + CAM_MEM_FREE(cdm_hw); + cdm_hw = NULL; + CAM_MEM_FREE(cdm_hw_intf); + cdm_hw_intf = NULL; + return -ENOMEM; + } + + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cdm_hw->soc_info.pdev = pdev; + cdm_hw->soc_info.dev = &pdev->dev; + cdm_hw->soc_info.dev_name = pdev->name; + cdm_hw_intf->hw_type = CAM_HW_CDM; + cdm_hw->open_count = 0; + mutex_init(&cdm_hw->hw_mutex); + cam_hw_util_init_hw_lock(cdm_hw); + init_completion(&cdm_hw->hw_complete); + + rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to get dt properties"); + goto release_mem; + } + cdm_hw_intf->hw_idx = cdm_hw->soc_info.index; + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + soc_private = (struct cam_cdm_private_dt_data *) + cdm_hw->soc_info.soc_private; + if (soc_private->dt_cdm_shared == true) + cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM; + else + cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM; + + cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name); + + CAM_DBG(CAM_CDM, "cdm_name %s", cdm_core->name); + + if (cdm_core->id >= CAM_CDM_MAX) { + CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s", + cdm_core->name); + goto release_private_mem; + } + + init_completion(&cdm_core->reset_complete); + cdm_hw_intf->hw_priv = cdm_hw; + cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps; + cdm_hw_intf->hw_ops.init = cam_hw_cdm_init; + cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit; + cdm_hw_intf->hw_ops.start = cam_cdm_stream_start; + cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop; + cdm_hw_intf->hw_ops.read = NULL; + cdm_hw_intf->hw_ops.write = NULL; + cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd; + cdm_hw_intf->hw_ops.test_irq_line = cam_cdm_test_irq_line; + mutex_lock(&cdm_hw->hw_mutex); + + CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type, + cdm_hw_intf->hw_idx); + + platform_set_drvdata(pdev, cdm_hw_intf); + + snprintf(cdm_name, sizeof(cdm_name), "%s", cdm_hw->soc_info.label_name); + + rc = cam_smmu_get_handle(cdm_name, &cdm_core->iommu_hdl.non_secure); + if (rc < 0) { + if (rc != -EALREADY) { + CAM_ERR(CAM_CDM, + "%s get iommu handle failed, rc = %d", + cdm_name, rc); + goto unlock_release_mem; + } + rc = 0; + } + + cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, + cam_hw_cdm_iommu_fault_handler, cdm_hw); + + cdm_core->iommu_hdl.secure = -1; + + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + INIT_LIST_HEAD(&cdm_core->bl_fifo[i].bl_request_list); + + mutex_init(&cdm_core->bl_fifo[i].fifo_lock); + + init_completion(&cdm_core->bl_fifo[i].bl_complete); + + len = strscpy(work_q_name, cdm_hw->soc_info.label_name, + sizeof(work_q_name)); + snprintf(work_q_name + len, sizeof(work_q_name) - len, "%d_%d", cdm_hw->soc_info.index, i); + cdm_core->bl_fifo[i].work_queue = alloc_workqueue(work_q_name, + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, + CAM_CDM_INFLIGHT_WORKS); + if (!cdm_core->bl_fifo[i].work_queue) { + CAM_ERR(CAM_CDM, + "Workqueue allocation failed for FIFO %d, cdm %s", + i, cdm_core->name); + goto failed_workq_create; + } + + CAM_DBG(CAM_CDM, "wq %s", work_q_name); + } + + for (i = 0; i < cdm_hw->soc_info.irq_count; i++) + irq_data[i] = cdm_hw; + + rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info, + cam_hw_cdm_irq, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_CDM, + "Failed to request platform resource for %s%u", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + goto destroy_non_secure_hdl; + } + + cdm_hw->soc_info.hw_id = CAM_HW_ID_CDM0 + cdm_hw->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&cdm_hw->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CDM, " cdm %d hw instance populate failed: %d", + cdm_hw->soc_info.index, rc); + goto release_platform_resource; + } + + cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb; + cpas_parms.cell_index = cdm_hw->soc_info.index; + cpas_parms.dev = &pdev->dev; + cpas_parms.userdata = cdm_hw_intf; + strscpy(cpas_parms.identifier, cdm_hw->soc_info.label_name, + CAM_HW_IDENTIFIER_LENGTH); + rc = cam_cpas_register_client(&cpas_parms); + if (rc) { + CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed"); + goto release_platform_resource; + } + CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d", + cpas_parms.client_handle); + cdm_core->cpas_handle = cpas_parms.client_handle; + + if (!cam_vmrm_no_register_read_on_bind()) { + rc = cam_cdm_util_cpas_start(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "CPAS start failed"); + goto cpas_unregister; + } + } + + rc = cam_hw_cdm_get_cdm_config(cdm_hw); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to get cdm configuration rc = %d", rc); + goto cpas_stop; + } + + if (cdm_core->hw_version < CAM_CDM_VERSION_2_0) { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + cdm_core->bl_fifo[i].bl_depth = + CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT; + CAM_DBG(CAM_CDM, "Setting FIFO%d length to %d", + i, cdm_core->bl_fifo[i].bl_depth); + } + } else { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + cdm_core->bl_fifo[i].bl_depth = + soc_private->fifo_depth[i]; + CAM_DBG(CAM_CDM, "Setting FIFO%d length to %d", + i, cdm_core->bl_fifo[i].bl_depth); + } + } + + cdm_core->arbitration = cam_cdm_get_arbitration_type( + cdm_core->hw_version, cdm_core->id); + + cdm_core->cdm_status = CAM_CDM_HW_INIT_STATUS; + + cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL, + false); + + if (!cdm_core->ops) { + CAM_ERR(CAM_CDM, "Failed to util ops for %s%u HW", + cdm_hw->soc_info.label_name, + cdm_hw->soc_info.index); + rc = -EINVAL; + goto cpas_stop; + } + + if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version, + &cdm_core->version)) { + CAM_ERR(CAM_CDM, "Failed to set cam hw version for hw"); + rc = -EINVAL; + goto cpas_stop; + } + + if (!cam_vmrm_no_register_read_on_bind()) { + rc = cam_cpas_stop(cdm_core->cpas_handle); + if (rc) { + CAM_ERR(CAM_CDM, "CPAS stop failed"); + goto cpas_unregister; + } + } + + rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf, + soc_private, CAM_HW_CDM, &cdm_core->index); + if (rc) { + CAM_ERR(CAM_CDM, "HW CDM Interface registration failed"); + goto cpas_unregister; + } + + cam_cdm_test_irq_line_at_probe(cdm_hw); + mutex_unlock(&cdm_hw->hw_mutex); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + CAM_DBG(CAM_CDM, "%s component bound successfully", cdm_core->name); + + return rc; + +cpas_stop: + if (!cam_vmrm_no_register_read_on_bind()) { + if (cam_cpas_stop(cdm_core->cpas_handle)) + CAM_ERR(CAM_CDM, "CPAS stop failed"); + } +cpas_unregister: + if (cam_cpas_unregister_client(cdm_core->cpas_handle)) + CAM_ERR(CAM_CDM, "CPAS unregister failed"); +release_platform_resource: + if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info)) + CAM_ERR(CAM_CDM, "Release platform resource failed"); +failed_workq_create: + for (j = 0; j < i; j++) { + flush_workqueue(cdm_core->bl_fifo[j].work_queue); + destroy_workqueue(cdm_core->bl_fifo[j].work_queue); + } +destroy_non_secure_hdl: + cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, + NULL, cdm_hw); + if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure)) + CAM_ERR(CAM_CDM, "Release iommu secure hdl failed"); +unlock_release_mem: + mutex_unlock(&cdm_hw->hw_mutex); +release_private_mem: + CAM_MEM_FREE(cdm_hw->soc_info.soc_private); + cdm_hw->soc_info.soc_private = NULL; +release_mem: + mutex_destroy(&cdm_hw->hw_mutex); + CAM_MEM_FREE(cdm_hw_intf); + cdm_hw_intf = NULL; + CAM_MEM_FREE(cdm_hw->core_info); + cdm_hw->core_info = NULL; + CAM_MEM_FREE(cdm_hw); + cdm_hw = NULL; + return rc; +} + +static void cam_hw_cdm_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = -EBUSY, i; + struct cam_hw_info *cdm_hw = NULL; + struct cam_hw_intf *cdm_hw_intf = NULL; + struct cam_cdm *cdm_core = NULL; + struct platform_device *pdev = to_platform_device(dev); + + cdm_hw_intf = platform_get_drvdata(pdev); + if (!cdm_hw_intf) { + CAM_ERR(CAM_CDM, "Failed to get dev private data"); + return; + } + + cdm_hw = cdm_hw_intf->hw_priv; + if (!cdm_hw) { + CAM_ERR(CAM_CDM, + "Failed to get hw private data for type=%d idx=%d", + cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx); + return; + } + + cdm_core = cdm_hw->core_info; + if (!cdm_core) { + CAM_ERR(CAM_CDM, + "Failed to get hw core data for type=%d idx=%d", + cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx); + return; + } + + if (cdm_hw->open_count != 0) { + CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d", + cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx, + cdm_hw->open_count); + return; + } + + if (cdm_hw->hw_state == CAM_HW_STATE_POWER_UP) { + rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0); + if (rc) { + CAM_ERR(CAM_CDM, "Deinit failed for hw"); + return; + } + } + + rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf, + cdm_hw->soc_info.soc_private, CAM_HW_CDM, cdm_core->index); + if (rc) { + CAM_ERR(CAM_CDM, + "HW_CDM interface deregistration failed: rd: %d", rc); + } + + rc = cam_cpas_unregister_client(cdm_core->cpas_handle); + if (rc) { + CAM_ERR(CAM_CDM, "CPAS unregister failed"); + return; + } + + if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info)) + CAM_ERR(CAM_CDM, "Release platform resource failed"); + + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + flush_workqueue(cdm_core->bl_fifo[i].work_queue); + destroy_workqueue(cdm_core->bl_fifo[i].work_queue); + } + + cam_smmu_unset_client_page_fault_handler( + cdm_core->iommu_hdl.non_secure, cdm_hw); + if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure)) + CAM_ERR(CAM_CDM, "Release iommu secure hdl failed"); + + mutex_destroy(&cdm_hw->hw_mutex); + CAM_MEM_FREE(cdm_hw->soc_info.soc_private); + cdm_hw->soc_info.soc_private = NULL; + CAM_MEM_FREE(cdm_hw_intf); + cdm_hw_intf = NULL; + CAM_MEM_FREE(cdm_hw->core_info); + cdm_hw->core_info = NULL; + CAM_MEM_FREE(cdm_hw); + cdm_hw = NULL; +} + +const static struct component_ops cam_hw_cdm_component_ops = { + .bind = cam_hw_cdm_component_bind, + .unbind = cam_hw_cdm_component_unbind, +}; + +int cam_hw_cdm_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CDM, "Adding HW CDM component"); + rc = component_add(&pdev->dev, &cam_hw_cdm_component_ops); + if (rc) + CAM_ERR(CAM_CDM, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_hw_cdm_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_hw_cdm_component_ops); + return 0; +} + +struct platform_driver cam_hw_cdm_driver = { + .probe = cam_hw_cdm_probe, + .remove = cam_hw_cdm_remove, + .driver = { + .name = "msm_cam_cdm", + .owner = THIS_MODULE, + .of_match_table = msm_cam_hw_cdm_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_hw_cdm_init_module(void) +{ + return platform_driver_register(&cam_hw_cdm_driver); +} + +void cam_hw_cdm_exit_module(void) +{ + platform_driver_unregister(&cam_hw_cdm_driver); +} + +MODULE_DESCRIPTION("MSM Camera HW CDM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h new file mode 100644 index 0000000000..fd34dda935 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_0.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_0_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_0_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7F, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_0_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_0_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_0_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_0_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_comp_wait_status cdm_1_0_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_1_0_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_common_regs cdm_hw_1_0_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_0_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x0, + .irq_context_status = 0x0, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_0_bl_pending_req0, + NULL, + }, + .comp_wait = { + &cdm_1_0_comp_wait_status0, + &cdm_1_0_comp_wait_status1, + }, + .perf_mon = { + &cdm_1_0_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_0_scratch_reg0, + &cdm_1_0_scratch_reg1, + &cdm_1_0_scratch_reg2, + &cdm_1_0_scratch_reg3, + &cdm_1_0_scratch_reg4, + &cdm_1_0_scratch_reg5, + &cdm_1_0_scratch_reg6, + &cdm_1_0_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = NULL, + .icl_reg = NULL, + .spare = 0x200, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_0_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +static struct cam_cdm_hw_reg_offset cam_cdm_1_0_reg_offset = { + .cmn_reg = &cdm_hw_1_0_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_0_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_0_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_0_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h new file mode 100644 index 0000000000..2f33ae2aba --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_1.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_1_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_1_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7f, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_1_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_1_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_1_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_1_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_comp_wait_status cdm_1_1_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_1_1_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_common_regs cdm_hw_1_1_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_1_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x0, + .irq_context_status = 0x0, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_1_bl_pending_req0, + NULL, + }, + .comp_wait = { + &cdm_1_1_comp_wait_status0, + &cdm_1_1_comp_wait_status1, + }, + .perf_mon = { + &cdm_1_1_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_1_scratch_reg0, + &cdm_1_1_scratch_reg1, + &cdm_1_1_scratch_reg2, + &cdm_1_1_scratch_reg3, + &cdm_1_1_scratch_reg4, + &cdm_1_1_scratch_reg5, + &cdm_1_1_scratch_reg6, + &cdm_1_1_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = NULL, + .icl_reg = NULL, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_1_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_1_1_reg_offset = { + .cmn_reg = &cdm_hw_1_1_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_1_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_1_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_1_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h new file mode 100644 index 0000000000..78996c1a8a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_1_2.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +static struct cam_version_reg cdm_hw_1_2_titan_version = { + .hw_version = 0x4, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_1_2_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x7f, + .rb_num_fifo = 0x1, + .rb_next_fifo_shift = 0x0, +}; + +static struct cam_cdm_irq_regs cdm_hw_1_2_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_1_2_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_1_2_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_perf_mon_regs cdm_1_2_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_comp_wait_status cdm_1_2_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_1_2_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_perf_regs cdm_1_2_perf = { + .count_cfg_0 = 0x180, + .always_count_val = 0x184, + .busy_count_val = 0x188, + .stall_axi_count_val = 0x18c, + .count_status = 0x190, +}; + +static struct cam_cdm_icl_data_regs cdm_1_2_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_regs cdm_1_2_icl = { + .data_regs = &cdm_1_2_icl_data, + .misc_regs = NULL, +}; + +static struct cam_cdm_common_regs cdm_hw_1_2_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = &cdm_hw_1_2_titan_version, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x0, + .irq_context_status = 0x0, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_1_2_bl_pending_req0, + NULL, + }, + .comp_wait = { + &cdm_1_2_comp_wait_status0, + &cdm_1_2_comp_wait_status1, + }, + .perf_mon = { + &cdm_1_2_perf_mon0, + NULL, + }, + .scratch = { + &cdm_1_2_scratch_reg0, + &cdm_1_2_scratch_reg1, + &cdm_1_2_scratch_reg2, + &cdm_1_2_scratch_reg3, + &cdm_1_2_scratch_reg4, + &cdm_1_2_scratch_reg5, + &cdm_1_2_scratch_reg6, + &cdm_1_2_scratch_reg7, + NULL, + NULL, + NULL, + NULL, + }, + .perf_reg = &cdm_1_2_perf, + .icl_reg = &cdm_1_2_icl, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_1_2_cmn_reg_data = { + .num_bl_fifo = 0x1, + .num_bl_fifo_irq = 0x1, + .num_bl_pending_req_reg = 0x1, + .num_scratch_reg = 0x8, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_1_2_reg_offset = { + .cmn_reg = &cdm_hw_1_2_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_1_2_bl_fifo0, + NULL, + NULL, + NULL, + }, + .irq_reg = { + &cdm_hw_1_2_irq0, + NULL, + NULL, + NULL, + }, + .reg_data = &cdm_hw_1_2_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h new file mode 100644 index 0000000000..000ac420a8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_0.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_0_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_0_bl_pending_req1 = { + .rb_offset = 0x70, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq1 = { + .irq_mask = 0x130, + .irq_clear = 0x134, + .irq_clear_cmd = 0x138, + .irq_set = 0x13c, + .irq_set_cmd = 0x140, + .irq_status = 0x144, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq2 = { + .irq_mask = 0x230, + .irq_clear = 0x234, + .irq_clear_cmd = 0x238, + .irq_set = 0x23c, + .irq_set_cmd = 0x240, + .irq_status = 0x244, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_0_irq3 = { + .irq_mask = 0x330, + .irq_clear = 0x334, + .irq_clear_cmd = 0x338, + .irq_set = 0x33c, + .irq_set_cmd = 0x340, + .irq_status = 0x344, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo1 = { + .bl_fifo_base = 0x150, + .bl_fifo_len = 0x154, + .bl_fifo_store = 0x158, + .bl_fifo_cfg = 0x15c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo2 = { + .bl_fifo_base = 0x250, + .bl_fifo_len = 0x254, + .bl_fifo_store = 0x258, + .bl_fifo_cfg = 0x25c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_0_bl_fifo3 = { + .bl_fifo_base = 0x350, + .bl_fifo_len = 0x354, + .bl_fifo_store = 0x358, + .bl_fifo_cfg = 0x35c, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg8 = { + .scratch_reg = 0xb0, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg9 = { + .scratch_reg = 0xb4, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg10 = { + .scratch_reg = 0xb8, +}; + +static struct cam_cdm_scratch_reg cdm_2_0_scratch_reg11 = { + .scratch_reg = 0xbc, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_0_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_0_perf_mon1 = { + .perf_mon_ctrl = 0x120, + .perf_mon_0 = 0x124, + .perf_mon_1 = 0x128, + .perf_mon_2 = 0x12c, +}; + +static struct cam_cdm_comp_wait_status cdm_2_0_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_2_0_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_icl_data_regs cdm_2_0_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_misc_regs cdm_2_0_icl_misc = { + .icl_inv_bl_addr = 0x1d0, + .icl_status = 0x1d4, +}; + +static struct cam_cdm_icl_regs cdm_2_0_icl = { + .data_regs = &cdm_2_0_icl_data, + .misc_regs = &cdm_2_0_icl_misc, +}; + +static struct cam_cdm_common_regs cdm_hw_2_0_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = NULL, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x0, + .irq_context_status = 0x0, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_2_0_bl_pending_req0, + &cdm_hw_2_0_bl_pending_req1, + }, + .comp_wait = { + &cdm_2_0_comp_wait_status0, + &cdm_2_0_comp_wait_status1, + }, + .perf_mon = { + &cdm_2_0_perf_mon0, + &cdm_2_0_perf_mon1, + }, + .scratch = { + &cdm_2_0_scratch_reg0, + &cdm_2_0_scratch_reg1, + &cdm_2_0_scratch_reg2, + &cdm_2_0_scratch_reg3, + &cdm_2_0_scratch_reg4, + &cdm_2_0_scratch_reg5, + &cdm_2_0_scratch_reg6, + &cdm_2_0_scratch_reg7, + &cdm_2_0_scratch_reg8, + &cdm_2_0_scratch_reg9, + &cdm_2_0_scratch_reg10, + &cdm_2_0_scratch_reg11, + }, + .perf_reg = NULL, + .icl_reg = &cdm_2_0_icl, + .spare = 0x1fc, +}; + +static struct cam_cdm_common_reg_data cdm_hw_2_0_cmn_reg_data = { + .num_bl_fifo = 0x4, + .num_bl_fifo_irq = 0x4, + .num_bl_pending_req_reg = 0x2, + .num_scratch_reg = 0xc, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_2_0_reg_offset = { + .cmn_reg = &cdm_hw_2_0_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_2_0_bl_fifo0, + &cdm_hw_2_0_bl_fifo1, + &cdm_hw_2_0_bl_fifo2, + &cdm_hw_2_0_bl_fifo3, + }, + .irq_reg = { + &cdm_hw_2_0_irq0, + &cdm_hw_2_0_irq1, + &cdm_hw_2_0_irq2, + &cdm_hw_2_0_irq3, + }, + .reg_data = &cdm_hw_2_0_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_1.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_1.h new file mode 100644 index 0000000000..2740a168bb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_1.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_1_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_1_bl_pending_req1 = { + .rb_offset = 0x70, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_1_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_1_irq1 = { + .irq_mask = 0x130, + .irq_clear = 0x134, + .irq_clear_cmd = 0x138, + .irq_set = 0x13c, + .irq_set_cmd = 0x140, + .irq_status = 0x144, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_1_irq2 = { + .irq_mask = 0x230, + .irq_clear = 0x234, + .irq_clear_cmd = 0x238, + .irq_set = 0x23c, + .irq_set_cmd = 0x240, + .irq_status = 0x244, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_1_irq3 = { + .irq_mask = 0x330, + .irq_clear = 0x334, + .irq_clear_cmd = 0x338, + .irq_set = 0x33c, + .irq_set_cmd = 0x340, + .irq_status = 0x344, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_1_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_1_bl_fifo1 = { + .bl_fifo_base = 0x150, + .bl_fifo_len = 0x154, + .bl_fifo_store = 0x158, + .bl_fifo_cfg = 0x15c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_1_bl_fifo2 = { + .bl_fifo_base = 0x250, + .bl_fifo_len = 0x254, + .bl_fifo_store = 0x258, + .bl_fifo_cfg = 0x25c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_1_bl_fifo3 = { + .bl_fifo_base = 0x350, + .bl_fifo_len = 0x354, + .bl_fifo_store = 0x358, + .bl_fifo_cfg = 0x35c, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg8 = { + .scratch_reg = 0xb0, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg9 = { + .scratch_reg = 0xb4, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg10 = { + .scratch_reg = 0xb8, +}; + +static struct cam_cdm_scratch_reg cdm_2_1_scratch_reg11 = { + .scratch_reg = 0xbc, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_1_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_1_perf_mon1 = { + .perf_mon_ctrl = 0x120, + .perf_mon_0 = 0x124, + .perf_mon_1 = 0x128, + .perf_mon_2 = 0x12c, +}; + +static struct cam_cdm_comp_wait_status cdm_2_1_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_2_1_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_icl_data_regs cdm_2_1_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_misc_regs cdm_2_1_icl_misc = { + .icl_inv_bl_addr = 0x1d0, + .icl_status = 0x1d8, +}; + +static struct cam_cdm_icl_regs cdm_2_1_icl = { + .data_regs = &cdm_2_1_icl_data, + .misc_regs = &cdm_2_1_icl_misc, +}; + +static struct cam_cdm_common_regs cdm_hw_2_1_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = NULL, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x0, + .irq_context_status = 0x2c, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_2_1_bl_pending_req0, + &cdm_hw_2_1_bl_pending_req1, + }, + .comp_wait = { + &cdm_2_1_comp_wait_status0, + &cdm_2_1_comp_wait_status1, + }, + .perf_mon = { + &cdm_2_1_perf_mon0, + &cdm_2_1_perf_mon1, + }, + .scratch = { + &cdm_2_1_scratch_reg0, + &cdm_2_1_scratch_reg1, + &cdm_2_1_scratch_reg2, + &cdm_2_1_scratch_reg3, + &cdm_2_1_scratch_reg4, + &cdm_2_1_scratch_reg5, + &cdm_2_1_scratch_reg6, + &cdm_2_1_scratch_reg7, + &cdm_2_1_scratch_reg8, + &cdm_2_1_scratch_reg9, + &cdm_2_1_scratch_reg10, + &cdm_2_1_scratch_reg11, + }, + .perf_reg = NULL, + .icl_reg = &cdm_2_1_icl, + .spare = 0x3fc, + .priority_group_bit_offset = 20, +}; + +static struct cam_cdm_common_reg_data cdm_hw_2_1_cmn_reg_data = { + .num_bl_fifo = 0x4, + .num_bl_fifo_irq = 0x4, + .num_bl_pending_req_reg = 0x2, + .num_scratch_reg = 0xc, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_2_1_reg_offset = { + .cmn_reg = &cdm_hw_2_1_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_2_1_bl_fifo0, + &cdm_hw_2_1_bl_fifo1, + &cdm_hw_2_1_bl_fifo2, + &cdm_hw_2_1_bl_fifo3, + }, + .irq_reg = { + &cdm_hw_2_1_irq0, + &cdm_hw_2_1_irq1, + &cdm_hw_2_1_irq2, + &cdm_hw_2_1_irq3, + }, + .reg_data = &cdm_hw_2_1_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_2.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_2.h new file mode 100644 index 0000000000..823a48bad2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_hw_reg_2_2.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cdm.h" + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_2_bl_pending_req0 = { + .rb_offset = 0x6c, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +struct cam_cdm_bl_pending_req_reg_params cdm_hw_2_2_bl_pending_req1 = { + .rb_offset = 0x70, + .rb_mask = 0x1ff, + .rb_num_fifo = 0x2, + .rb_next_fifo_shift = 0x10, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_2_irq0 = { + .irq_mask = 0x30, + .irq_clear = 0x34, + .irq_clear_cmd = 0x38, + .irq_set = 0x3c, + .irq_set_cmd = 0x40, + .irq_status = 0x44, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_2_irq1 = { + .irq_mask = 0x130, + .irq_clear = 0x134, + .irq_clear_cmd = 0x138, + .irq_set = 0x13c, + .irq_set_cmd = 0x140, + .irq_status = 0x144, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_2_irq2 = { + .irq_mask = 0x230, + .irq_clear = 0x234, + .irq_clear_cmd = 0x238, + .irq_set = 0x23c, + .irq_set_cmd = 0x240, + .irq_status = 0x244, +}; + +static struct cam_cdm_irq_regs cdm_hw_2_2_irq3 = { + .irq_mask = 0x330, + .irq_clear = 0x334, + .irq_clear_cmd = 0x338, + .irq_set = 0x33c, + .irq_set_cmd = 0x340, + .irq_status = 0x344, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_2_bl_fifo0 = { + .bl_fifo_base = 0x50, + .bl_fifo_len = 0x54, + .bl_fifo_store = 0x58, + .bl_fifo_cfg = 0x5c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_2_bl_fifo1 = { + .bl_fifo_base = 0x150, + .bl_fifo_len = 0x154, + .bl_fifo_store = 0x158, + .bl_fifo_cfg = 0x15c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_2_bl_fifo2 = { + .bl_fifo_base = 0x250, + .bl_fifo_len = 0x254, + .bl_fifo_store = 0x258, + .bl_fifo_cfg = 0x25c, +}; + +static struct cam_cdm_bl_fifo_regs cdm_hw_2_2_bl_fifo3 = { + .bl_fifo_base = 0x350, + .bl_fifo_len = 0x354, + .bl_fifo_store = 0x358, + .bl_fifo_cfg = 0x35c, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg0 = { + .scratch_reg = 0x90, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg1 = { + .scratch_reg = 0x94, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg2 = { + .scratch_reg = 0x98, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg3 = { + .scratch_reg = 0x9c, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg4 = { + .scratch_reg = 0xa0, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg5 = { + .scratch_reg = 0xa4, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg6 = { + .scratch_reg = 0xa8, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg7 = { + .scratch_reg = 0xac, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg8 = { + .scratch_reg = 0xb0, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg9 = { + .scratch_reg = 0xb4, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg10 = { + .scratch_reg = 0xb8, +}; + +static struct cam_cdm_scratch_reg cdm_2_2_scratch_reg11 = { + .scratch_reg = 0xbc, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_2_perf_mon0 = { + .perf_mon_ctrl = 0x110, + .perf_mon_0 = 0x114, + .perf_mon_1 = 0x118, + .perf_mon_2 = 0x11c, +}; + +static struct cam_cdm_perf_mon_regs cdm_2_2_perf_mon1 = { + .perf_mon_ctrl = 0x120, + .perf_mon_0 = 0x124, + .perf_mon_1 = 0x128, + .perf_mon_2 = 0x12c, +}; + +static struct cam_cdm_comp_wait_status cdm_2_2_comp_wait_status0 = { + .comp_wait_status = 0x88, +}; + +static struct cam_cdm_comp_wait_status cdm_2_2_comp_wait_status1 = { + .comp_wait_status = 0x8c, +}; + +static struct cam_cdm_icl_data_regs cdm_2_2_icl_data = { + .icl_last_data_0 = 0x1c0, + .icl_last_data_1 = 0x1c4, + .icl_last_data_2 = 0x1c8, + .icl_inv_data = 0x1cc, +}; + +static struct cam_cdm_icl_misc_regs cdm_2_2_icl_misc = { + .icl_inv_bl_addr = 0x1d0, + .icl_status = 0x1d8, +}; + +static struct cam_cdm_icl_regs cdm_2_2_icl = { + .data_regs = &cdm_2_2_icl_data, + .misc_regs = &cdm_2_2_icl_misc, +}; + +static struct cam_cdm_common_regs cdm_hw_2_2_cmn_reg_offset = { + .cdm_hw_version = 0x0, + .cam_version = NULL, + .rst_cmd = 0x10, + .cgc_cfg = 0x14, + .core_cfg = 0x18, + .core_en = 0x1c, + .fe_cfg = 0x20, + .cdm_status = 0x28, + .irq_context_status = 0x2c, + .bl_fifo_rb = 0x60, + .bl_fifo_base_rb = 0x64, + .bl_fifo_len_rb = 0x68, + .usr_data = 0x80, + .wait_status = 0x84, + .last_ahb_addr = 0xd0, + .last_ahb_data = 0xd4, + .core_debug = 0xd8, + .last_ahb_err_addr = 0xe0, + .last_ahb_err_data = 0xe4, + .current_bl_base = 0xe8, + .current_bl_len = 0xec, + .current_used_ahb_base = 0xf0, + .debug_status = 0xf4, + .bus_misr_cfg0 = 0x100, + .bus_misr_cfg1 = 0x104, + .bus_misr_rd_val = 0x108, + .pending_req = { + &cdm_hw_2_2_bl_pending_req0, + &cdm_hw_2_2_bl_pending_req1, + }, + .comp_wait = { + &cdm_2_2_comp_wait_status0, + &cdm_2_2_comp_wait_status1, + }, + .perf_mon = { + &cdm_2_2_perf_mon0, + &cdm_2_2_perf_mon1, + }, + .scratch = { + &cdm_2_2_scratch_reg0, + &cdm_2_2_scratch_reg1, + &cdm_2_2_scratch_reg2, + &cdm_2_2_scratch_reg3, + &cdm_2_2_scratch_reg4, + &cdm_2_2_scratch_reg5, + &cdm_2_2_scratch_reg6, + &cdm_2_2_scratch_reg7, + &cdm_2_2_scratch_reg8, + &cdm_2_2_scratch_reg9, + &cdm_2_2_scratch_reg10, + &cdm_2_2_scratch_reg11, + }, + .perf_reg = NULL, + .icl_reg = &cdm_2_2_icl, + .spare = 0x3fc, + .priority_group_bit_offset = 20, + .pause_core_done_mask = 0x2, + .pause_core_enable_mask = 0x2, +}; + +static struct cam_cdm_common_reg_data cdm_hw_2_2_cmn_reg_data = { + .num_bl_fifo = 0x4, + .num_bl_fifo_irq = 0x4, + .num_bl_pending_req_reg = 0x2, + .num_scratch_reg = 0xc, + .capabilities = CAM_CDM_CAP_PAUSE_CORE, +}; + +struct cam_cdm_hw_reg_offset cam_cdm_2_2_reg_offset = { + .cmn_reg = &cdm_hw_2_2_cmn_reg_offset, + .bl_fifo_reg = { + &cdm_hw_2_2_bl_fifo0, + &cdm_hw_2_2_bl_fifo1, + &cdm_hw_2_2_bl_fifo2, + &cdm_hw_2_2_bl_fifo3, + }, + .irq_reg = { + &cdm_hw_2_2_irq0, + &cdm_hw_2_2_irq1, + &cdm_hw_2_2_irq2, + &cdm_hw_2_2_irq3, + }, + .reg_data = &cdm_hw_2_2_cmn_reg_data, +}; diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf.c new file mode 100644 index 0000000000..119b47cb7d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf.c @@ -0,0 +1,832 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_cdm_intf_api.h" +#include "cam_cdm.h" +#include "cam_cdm_virtual.h" +#include "cam_soc_util.h" +#include "cam_cdm_soc.h" +#include "cam_cdm_core_common.h" +#include "camera_main.h" + +static struct cam_cdm_intf_mgr cdm_mgr; +static DEFINE_MUTEX(cam_cdm_mgr_lock); + +static const struct of_device_id msm_cam_cdm_intf_dt_match[] = { + { .compatible = "qcom,cam-cdm-intf", }, + {} +}; + +static int get_cdm_mgr_refcount(void) +{ + int rc = 0; + + mutex_lock(&cam_cdm_mgr_lock); + if (cdm_mgr.probe_done == false) { + CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet"); + rc = -EPERM; + } else { + CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d", + cdm_mgr.refcount); + cdm_mgr.refcount++; + } + mutex_unlock(&cam_cdm_mgr_lock); + return rc; +} + +static void put_cdm_mgr_refcount(void) +{ + mutex_lock(&cam_cdm_mgr_lock); + if (cdm_mgr.probe_done == false) { + CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet"); + } else { + CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d", + cdm_mgr.refcount); + if (cdm_mgr.refcount > 0) { + cdm_mgr.refcount--; + } else { + CAM_ERR(CAM_CDM, "Refcount put when zero"); + WARN_ON(1); + } + } + mutex_unlock(&cam_cdm_mgr_lock); +} + +static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles, + uint32_t hw_idx) +{ + int rc = -EPERM; + struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device; + + if (hw->hw_ops.get_hw_caps) { + rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles, + sizeof(struct cam_iommu_handle)); + } + + return rc; +} + +static int get_cdm_index_by_id(char *identifier, + uint32_t cell_index, uint32_t *hw_index) +{ + int rc = -EPERM, i, j; + char client_name[128], name_index[160]; + + snprintf(client_name, sizeof(client_name), "%s", identifier); + snprintf(name_index, sizeof(name_index), "%s%d", + identifier, cell_index); + + CAM_DBG(CAM_CDM, + "Looking for HW id of =%s or %s and index=%d cdm_count %d", + identifier, name_index, cell_index, cdm_mgr.cdm_count); + mutex_lock(&cam_cdm_mgr_lock); + for (i = 0; i < cdm_mgr.cdm_count; i++) { + mutex_lock(&cdm_mgr.nodes[i].lock); + CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d", + cdm_mgr.nodes[i].data->dt_num_supported_clients); + + for (j = 0; j < + cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) { + CAM_DBG(CAM_CDM, "client name:%s dev Index: %d", + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + i); + if (!strcmp( + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + client_name) || !strcmp( + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + name_index)) { + rc = 0; + *hw_index = i; + break; + } + } + mutex_unlock(&cdm_mgr.nodes[i].lock); + if (rc == 0) + break; + } + mutex_unlock(&cam_cdm_mgr_lock); + + return rc; +} + +int cam_cdm_get_iommu_handle(char *identifier, + struct cam_iommu_handle *cdm_handles) +{ + int i, j, rc = -EPERM; + + if ((!identifier) || (!cdm_handles)) + return -EINVAL; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return rc; + } + CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier); + + mutex_lock(&cam_cdm_mgr_lock); + for (i = 0; i < cdm_mgr.cdm_count; i++) { + mutex_lock(&cdm_mgr.nodes[i].lock); + if (!cdm_mgr.nodes[i].data) { + mutex_unlock(&cdm_mgr.nodes[i].lock); + continue; + } + CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d", + cdm_mgr.nodes[i].data->dt_num_supported_clients); + for (j = 0; j < + cdm_mgr.nodes[i].data->dt_num_supported_clients; + j++) { + CAM_DBG(CAM_CDM, "client name:%s dev Index: %d", + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + i); + if (!strcmp( + cdm_mgr.nodes[i].data->dt_cdm_client_name[j], + identifier)) { + rc = get_cdm_iommu_handle(cdm_handles, i); + break; + } + } + mutex_unlock(&cdm_mgr.nodes[i].lock); + if (rc == 0) + break; + } + mutex_unlock(&cam_cdm_mgr_lock); + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_get_iommu_handle); + +int cam_cdm_acquire(struct cam_cdm_acquire_data *data) +{ + int rc = -EPERM; + struct cam_hw_intf *hw; + struct cam_hw_info *cdm_hw; + struct cam_cdm *core = NULL; + uint32_t hw_index = 0; + + if ((!data) || (!data->base_array_cnt)) + return -EINVAL; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return rc; + } + + if (data->id > CAM_CDM_HW_ANY) { + CAM_ERR(CAM_CDM, + "only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported"); + rc = -EPERM; + goto end; + } + rc = get_cdm_index_by_id(data->identifier, data->cell_index, + &hw_index); + if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) { + CAM_ERR(CAM_CDM, "Failed to identify associated hw id"); + goto end; + } else { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + cdm_hw = hw->hw_priv; + core = (struct cam_cdm *)cdm_hw->core_info; + data->id = core->id; + data->hw_idx = hw->hw_idx; + CAM_DBG(CAM_CDM, + "Device = %s, hw_index = %d, CDM id = %d", + data->identifier, hw_index, data->id); + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_ACQUIRE, data, + sizeof(struct cam_cdm_acquire_data)); + if (rc < 0) { + CAM_ERR(CAM_CDM, "CDM hw acquire failed"); + goto end; + } + } else { + CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops", + hw_index); + rc = -EPERM; + } + } +end: + if (rc < 0) { + CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d", + data->id, data->identifier, data->cell_index); + put_cdm_mgr_refcount(); + } + return rc; +} +EXPORT_SYMBOL(cam_cdm_acquire); + +struct cam_cdm_utils_ops *cam_cdm_publish_ops(void) +{ + struct cam_hw_version cdm_version; + + cdm_version.major = 1; + cdm_version.minor = 0; + cdm_version.incr = 0; + cdm_version.reserved = 0; + + return cam_cdm_get_ops(0, &cdm_version, true); +} +EXPORT_SYMBOL(cam_cdm_publish_ops); + +int cam_cdm_release(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EPERM; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_RELEASE, &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "hw release failed for handle=%x", + handle); + } else + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + put_cdm_mgr_refcount(); + if (rc == 0) + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_release); + + +int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (!data) + return rc; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + struct cam_cdm_hw_intf_cmd_submit_bl req; + + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + req.data = data; + req.handle = handle; + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req, + sizeof(struct cam_cdm_hw_intf_cmd_submit_bl)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "hw submit bl failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_submit_bls); + +int cam_cdm_stream_on(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.start) { + rc = hw->hw_ops.start(hw->hw_priv, &handle, + sizeof(uint32_t)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "hw start failed handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, + "hw idx %d doesn't have start ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_stream_on); + +int cam_cdm_stream_off(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.stop) { + rc = hw->hw_ops.stop(hw->hw_priv, &handle, + sizeof(uint32_t)); + if (rc < 0) + CAM_ERR(CAM_CDM, "hw stop failed handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_stream_off); + +int cam_cdm_reset_hw(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_RESET_HW, &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "CDM hw release failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_reset_hw); + +int cam_cdm_flush_hw(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_FLUSH_HW, &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "CDM hw release failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_flush_hw); + +int cam_cdm_handle_error(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) { + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_HANDLE_ERROR, + &handle, + sizeof(handle)); + if (rc < 0) + CAM_ERR(CAM_CDM, + "CDM hw release failed for handle=%x", + handle); + } else { + CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops", + hw_index); + } + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_handle_error); + +int cam_cdm_detect_hang_error(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_CMD_HANG_DETECT, + &handle, + sizeof(handle)); + } + put_cdm_mgr_refcount(); + + return rc; +} +EXPORT_SYMBOL(cam_cdm_detect_hang_error); + +int cam_cdm_dump_debug_registers(uint32_t handle) +{ + uint32_t hw_index; + int rc = -EINVAL; + struct cam_hw_intf *hw; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + hw_index = CAM_CDM_GET_HW_IDX(handle); + if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) { + hw = cdm_mgr.nodes[hw_index].device; + if (hw && hw->hw_ops.process_cmd) + rc = hw->hw_ops.process_cmd(hw->hw_priv, + CAM_CDM_HW_INTF_DUMP_DBG_REGS, + &handle, + sizeof(handle)); + } + put_cdm_mgr_refcount(); + + return rc; +} + +int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw, + struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, + uint32_t *index) +{ + int rc = -EINVAL; + + if ((!hw) || (!data) || (!index)) + return rc; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return rc; + } + + mutex_lock(&cam_cdm_mgr_lock); + if ((type == CAM_VIRTUAL_CDM) && + (!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) { + mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock); + cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw; + cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data; + *index = cdm_mgr.cdm_count; + mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock); + cdm_mgr.cdm_count++; + rc = 0; + } else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) { + mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock); + cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw; + cdm_mgr.nodes[cdm_mgr.cdm_count].data = data; + *index = cdm_mgr.cdm_count; + mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock); + cdm_mgr.cdm_count++; + rc = 0; + } else { + CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d", + type, cdm_mgr.cdm_count); + } + mutex_unlock(&cam_cdm_mgr_lock); + put_cdm_mgr_refcount(); + + return rc; +} + +int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw, + struct cam_cdm_private_dt_data *data, enum cam_cdm_type type, + uint32_t index) +{ + int rc = -EINVAL; + + if ((!hw) || (!data)) + return rc; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + rc = -EPERM; + return rc; + } + + mutex_lock(&cam_cdm_mgr_lock); + if ((type == CAM_VIRTUAL_CDM) && + (hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) && + (index == CAM_SW_CDM_INDEX)) { + mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock); + cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL; + cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL; + mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock); + rc = 0; + } else if ((type == CAM_HW_CDM) && + (hw == cdm_mgr.nodes[index].device)) { + mutex_lock(&cdm_mgr.nodes[index].lock); + cdm_mgr.nodes[index].device = NULL; + cdm_mgr.nodes[index].data = NULL; + mutex_unlock(&cdm_mgr.nodes[index].lock); + cdm_mgr.cdm_count--; + rc = 0; + } else { + CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d", + type, index); + } + mutex_unlock(&cam_cdm_mgr_lock); + put_cdm_mgr_refcount(); + + return rc; +} + +static int cam_cdm_set_irq_line_test(void *data, u64 val) +{ + int i, rc = 0; + struct cam_hw_intf *hw_intf; + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return rc; + } + mutex_lock(&cam_cdm_mgr_lock); + + for (i = 0 ; i < cdm_mgr.cdm_count; i++) { + if (!cdm_mgr.nodes[i].device || !cdm_mgr.nodes[i].data) { + CAM_ERR(CAM_CDM, "invalid node present in index=%d", i); + continue; + } + + hw_intf = cdm_mgr.nodes[i].device; + + if (hw_intf->hw_ops.test_irq_line) { + CAM_DBG(CAM_CDM, "Testing irq line for CDM at index %d", i); + + rc = hw_intf->hw_ops.test_irq_line(hw_intf->hw_priv); + if (rc) + CAM_ERR(CAM_CDM, + "[%d] : CDM%d type %d - irq line test failed rc %d", + i, hw_intf->hw_idx, hw_intf->hw_type, rc); + else + CAM_INFO(CAM_CDM, + "[%d] : CDM%d type %d - irq line test passed", + i, hw_intf->hw_idx, hw_intf->hw_type); + } else { + CAM_WARN(CAM_CDM, "test irq line interface not present for cdm at index %d", + i); + } + } + + mutex_unlock(&cam_cdm_mgr_lock); + put_cdm_mgr_refcount(); + + return rc; +} + +static int cam_cdm_get_irq_line_test(void *data, u64 *val) +{ + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(cam_cdm_irq_line_test, cam_cdm_get_irq_line_test, + cam_cdm_set_irq_line_test, "%16llu"); + +int cam_cdm_debugfs_init(struct cam_cdm_intf_mgr *mgr) +{ + struct dentry *dbgfileptr = NULL; + int rc; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("cdm", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_CDM, "DebugFS could not create directory!"); + return rc; + } + + mgr->dentry = dbgfileptr; + + debugfs_create_file("test_irq_line", 0644, + mgr->dentry, NULL, &cam_cdm_irq_line_test); + + return 0; +} + +static int cam_cdm_intf_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int i, rc; + struct platform_device *pdev = to_platform_device(dev); + + rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to get dt properties"); + return rc; + } + mutex_lock(&cam_cdm_mgr_lock); + for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) { + mutex_init(&cdm_mgr.nodes[i].lock); + cdm_mgr.nodes[i].device = NULL; + cdm_mgr.nodes[i].data = NULL; + cdm_mgr.nodes[i].refcount = 0; + } + cdm_mgr.probe_done = true; + cdm_mgr.refcount = 0; + mutex_unlock(&cam_cdm_mgr_lock); + rc = cam_virtual_cdm_probe(pdev); + if (rc) { + mutex_lock(&cam_cdm_mgr_lock); + cdm_mgr.probe_done = false; + for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) { + if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data || + (cdm_mgr.nodes[i].refcount != 0)) + CAM_ERR(CAM_CDM, + "Valid node present in index=%d", i); + mutex_destroy(&cdm_mgr.nodes[i].lock); + cdm_mgr.nodes[i].device = NULL; + cdm_mgr.nodes[i].data = NULL; + cdm_mgr.nodes[i].refcount = 0; + } + mutex_unlock(&cam_cdm_mgr_lock); + } + + cam_cdm_debugfs_init(&cdm_mgr); + + CAM_DBG(CAM_CDM, "CDM Intf component bound successfully"); + + return rc; +} + +static void cam_cdm_intf_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct platform_device *pdev = to_platform_device(dev); + + if (get_cdm_mgr_refcount()) { + CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed"); + return; + } + + if (cam_virtual_cdm_remove(pdev)) { + CAM_ERR(CAM_CDM, "Virtual CDM remove failed"); + return; + } + put_cdm_mgr_refcount(); + + mutex_lock(&cam_cdm_mgr_lock); + if (cdm_mgr.refcount != 0) { + CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d", + cdm_mgr.refcount); + goto end; + } + + for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) { + if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data || + (cdm_mgr.nodes[i].refcount != 0)) { + CAM_ERR(CAM_CDM, "Valid node present in index=%d", i); + goto end; + } + mutex_lock(&cdm_mgr.nodes[i].lock); + cdm_mgr.nodes[i].device = NULL; + cdm_mgr.nodes[i].data = NULL; + cdm_mgr.nodes[i].refcount = 0; + mutex_unlock(&cdm_mgr.nodes[i].lock); + mutex_destroy(&cdm_mgr.nodes[i].lock); + } + + cdm_mgr.probe_done = false; + +end: + mutex_unlock(&cam_cdm_mgr_lock); +} + +const static struct component_ops cam_cdm_intf_component_ops = { + .bind = cam_cdm_intf_component_bind, + .unbind = cam_cdm_intf_component_unbind, +}; + +static int cam_cdm_intf_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CDM, "Adding CDM INTF component"); + rc = component_add(&pdev->dev, &cam_cdm_intf_component_ops); + if (rc) + CAM_ERR(CAM_CDM, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_cdm_intf_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_cdm_intf_component_ops); + return 0; +} + +struct platform_driver cam_cdm_intf_driver = { + .probe = cam_cdm_intf_probe, + .remove = cam_cdm_intf_remove, + .driver = { + .name = "msm_cam_cdm_intf", + .owner = THIS_MODULE, + .of_match_table = msm_cam_cdm_intf_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_cdm_intf_init_module(void) +{ + return platform_driver_register(&cam_cdm_intf_driver); +} + +void cam_cdm_intf_exit_module(void) +{ + platform_driver_unregister(&cam_cdm_intf_driver); +} + +MODULE_DESCRIPTION("MSM Camera CDM Intf driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf_api.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf_api.h new file mode 100644 index 0000000000..97c767b1f4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_intf_api.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CDM_API_H_ +#define _CAM_CDM_API_H_ + +#include +#include "cam_cdm_util.h" +#include "cam_soc_util.h" +#include "cam_packet_util.h" + +#define CAM_CDM_BL_CMD_MAX 25 + +/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */ +enum cam_cdm_id { + CAM_CDM_VIRTUAL, + CAM_CDM_HW_ANY, + CAM_CDM_CPAS, + CAM_CDM_IFE, + CAM_CDM_TFE, + CAM_CDM_OPE, + CAM_CDM_IPE0, + CAM_CDM_IPE1, + CAM_CDM_BPS, + CAM_CDM_VFE, + CAM_CDM_RT, + CAM_CDM_MAX +}; + +/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */ +enum cam_cdm_cb_status { + CAM_CDM_CB_STATUS_BL_SUCCESS, + CAM_CDM_CB_STATUS_INVALID_BL_CMD, + CAM_CDM_CB_STATUS_PAGEFAULT, + CAM_CDM_CB_STATUS_HW_RESET_ONGOING, + CAM_CDM_CB_STATUS_HW_RESET_DONE, + CAM_CDM_CB_STATUS_HW_FLUSH, + CAM_CDM_CB_STATUS_HW_RESUBMIT, + CAM_CDM_CB_STATUS_HW_ERROR, + CAM_CDM_CB_STATUS_UNKNOWN_ERROR, +}; + +/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */ +enum cam_cdm_bl_cmd_addr_type { + CAM_CDM_BL_CMD_TYPE_MEM_HANDLE, + CAM_CDM_BL_CMD_TYPE_HW_IOVA, + CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA, +}; + +/* enum cam_cdm_bl_fifo - interface commands.*/ +enum cam_cdm_bl_fifo_queue { + CAM_CDM_BL_FIFO_0, + CAM_CDM_BL_FIFO_1, + CAM_CDM_BL_FIFO_2, + CAM_CDM_BL_FIFO_3, + CAM_CDM_BL_FIFO_MAX, +}; + +/** + * struct cam_cdm_acquire_data - Cam CDM acquire data structure + * + * @identifier : Input identifier string which is the device label from dt + * like vfe, ife, jpeg etc + * @cell_index : Input integer identifier pointing to the cell index from dt + * of the device. This can be used to form a unique string + * with @identifier like vfe0, ife1, jpeg0 etc + * @id : ID of a specific or any CDM HW which needs to be acquired. + * @userdata : Input private data which will be returned as part + * of callback. + * @cam_cdm_callback : Input callback pointer for triggering the + * callbacks from CDM driver + * @handle : CDM Client handle + * @userdata : Private data given at the time of acquire + * @status : Callback status + * @cookie : Cookie if the callback is gen irq status or + * pf_args if it is page fault + * @base_array_cnt : Input number of ioremapped address pair pointing + * in base_array, needed only if selected cdm is a virtual. + * @base_array : Input pointer to ioremapped address pair arrary + * needed only if selected cdm is a virtual. + * @priority : Priority of the client. + * @cdm_version : CDM version is output while acquiring HW cdm and + * it is Input while acquiring virtual cdm. + * Currently fixing it to one version below + * acquire API. + * @ops : Output pointer updated by cdm driver to the CDM + * util ops for this HW version of CDM acquired. + * @handle : Output Unique handle generated for this acquire + * @hw_idx : The physical CDM acquired + * + */ +struct cam_cdm_acquire_data { + char identifier[128]; + uint32_t cell_index; + enum cam_cdm_id id; + void *userdata; + void (*cam_cdm_callback)(uint32_t handle, void *userdata, + enum cam_cdm_cb_status status, void *cookie); + uint32_t base_array_cnt; + struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK]; + enum cam_cdm_bl_fifo_queue priority; + struct cam_hw_version cdm_version; + struct cam_cdm_utils_ops *ops; + uint32_t handle; + uint32_t hw_idx; +}; + +/** + * struct cam_cdm_bl_cmd - Cam CDM HW bl command + * + * @bl_addr : Union of all three type for CDM BL commands + * @mem_handle : Input mem handle of bl cmd + * @offset : Input offset of the actual bl cmd in the memory pointed + * by mem_handle + * @len : Input length of the BL command, Cannot be more than 1MB and + * this is will be validated with offset+size of the memory pointed + * by mem_handle + * @enable_debug_gen_irq : bool flag to submit extra gen_irq afteR bl_command + * @arbitrate : bool flag to arbitrate on submitted BL boundary + */ +struct cam_cdm_bl_cmd { + union { + int32_t mem_handle; + uint32_t *hw_iova; + uintptr_t kernel_iova; + } bl_addr; + uint32_t offset; + uint32_t len; + bool enable_debug_gen_irq; + bool arbitrate; +}; + +/** + * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request + * + * @flag : 1 for callback needed and 0 for no callback when this BL + * request is done + * @gen_irq_arb : enum for setting arbitration in gen_irq + * @userdata :Input private data which will be returned as part + * of callback if request for this bl request in flags. + * @type : type of the submitted bl cmd address. + * @cmd_arrary_count : Input number of BL commands to be submitted to CDM + * @cookie : Cookie if the callback is gen irq status + * @avail_buff_size: Available buffer size in bytes + * @bl_cmd_array : Input payload holding the BL cmd's arrary + * to be sumbitted. + * + */ +struct cam_cdm_bl_request { + bool flag; + bool gen_irq_arb; + void *userdata; + enum cam_cdm_bl_cmd_addr_type type; + uint32_t cmd_arrary_count; + struct cam_kmd_buf_info *genirq_buff; + uint64_t cookie; + union { + struct cam_cdm_bl_cmd cmd[1]; + __DECLARE_FLEX_ARRAY(struct cam_cdm_bl_cmd, cmd_flex); + }; +}; + +/** + * struct cam_cdm_bl_data - last submiited CDM BL data + * + * @mem_handle : Input mem handle of bl cmd + * @hw_addr : Hw address of submitted Bl command + * @offset : Input offset of the actual bl cmd in the memory pointed + * by mem_handle + * @len : length of submitted Bl command to CDM. + * @input_len : Input length of the BL command, Cannot be more than 1MB and + * this is will be validated with offset+size of the memory pointed + * by mem_handle + * @type : CDM bl cmd addr types. + */ +struct cam_cdm_bl_data { + int32_t mem_handle; + dma_addr_t hw_addr; + uint32_t offset; + size_t len; + uint32_t input_len; + enum cam_cdm_bl_cmd_addr_type type; +}; + +/** + * struct cam_cdm_bl_info + * + * @bl_count : No. of Bl commands submiited to CDM. + * @cmd : payload holding the BL cmd's arrary + * that is sumbitted. + * + */ +struct cam_cdm_bl_info { + int32_t bl_count; + struct cam_cdm_bl_data cmd[CAM_CDM_BL_CMD_MAX]; +}; + +/** + * @brief : API to get the CDM capabilities for a camera device type + * + * @identifier : Input pointer to a string which is the device label from dt + * like vfe, ife, jpeg etc, We do not need cell index + * assuming all devices of a single type maps to one SMMU + * client + * @cdm_handles : Input iommu handle memory pointer to update handles + * + * @return 0 on success + */ +int cam_cdm_get_iommu_handle(char *identifier, + struct cam_iommu_handle *cdm_handles); + +/** + * @brief : API to acquire a CDM + * + * @data : Input data for the CDM to be acquired + * + * @return 0 on success + */ +int cam_cdm_acquire(struct cam_cdm_acquire_data *data); + +/** + * @brief : API to release a previously acquired CDM + * + * @handle : Input handle for the CDM to be released + * + * @return 0 on success + */ +int cam_cdm_release(uint32_t handle); + +/** + * @brief : API to submit the base & length (BL's) for acquired CDM + * + * @handle : Input cdm handle to which the BL's needs to be sumbitted. + * @data : Input pointer to the BL's to be sumbitted + * + * @return 0 on success + */ +int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data); + +/** + * @brief : API to stream ON a previously acquired CDM, + * during this we turn on/off clocks/power based on active clients. + * + * @handle : Input handle for the CDM to be released + * + * @return 0 on success + */ +int cam_cdm_stream_on(uint32_t handle); + +/** + * @brief : API to stream OFF a previously acquired CDM, + * during this we turn on/off clocks/power based on active clients. + * + * @handle : Input handle for the CDM to be released + * + * @return 0 on success + */ +int cam_cdm_stream_off(uint32_t handle); + +/** + * @brief : API to reset previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to reset + * + * @return 0 on success + */ +int cam_cdm_reset_hw(uint32_t handle); + +/** + * @brief : API to publish CDM ops to HW blocks like IFE + * + * @return : CDM operations + * + */ +struct cam_cdm_utils_ops *cam_cdm_publish_ops(void); + +/** + * @brief : API to register CDM hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_hw_cdm_init_module(void); + +/** + * @brief : API to register CDM interface to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_cdm_intf_init_module(void); + +/** + * @brief : API to remove CDM interface from platform framework. + */ +void cam_cdm_intf_exit_module(void); + +/** + * @brief : API to remove CDM hw from platform framework. + */ +void cam_hw_cdm_exit_module(void); + +/** + * @brief : API to flush previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to reset + * + * @return 0 on success + */ +int cam_cdm_flush_hw(uint32_t handle); + +/** + * @brief : API to detect culprit bl_tag in previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to reset + * + * @return 0 on success + */ +int cam_cdm_handle_error(uint32_t handle); + +/** + * @brief : API get CDM ops + * + * @return : CDM operations + * + */ +struct cam_cdm_utils_ops *cam_cdm_publish_ops(void); + +/** + * @brief : API to detect hang in previously acquired CDM, + * this should be only performed only if the CDM is private. + * + * @handle : Input handle of the CDM to detect hang + * + * @return 0 on success + */ +int cam_cdm_detect_hang_error(uint32_t handle); + +/** + * @brief : API to dump the CDM Debug registers + * + * @handle : Input handle of the CDM to dump the registers + * + * @return 0 on success + */ +int cam_cdm_dump_debug_registers(uint32_t handle); +#endif /* _CAM_CDM_API_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.c new file mode 100644 index 0000000000..1fe5f14463 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_soc_util.h" +#include "cam_smmu_api.h" +#include "cam_cdm.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cdm_soc.h" +#include "cam_mem_mgr_api.h" + +#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset) +#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute) + +bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw, + uint32_t reg, uint32_t *value) +{ + void __iomem *reg_addr; + void __iomem *base = + cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base; + resource_size_t mem_len = + cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size; + + CAM_DBG(CAM_CDM, "E: b=%pK blen=%d off=%x", (void __iomem *)base, + (int)mem_len, reg); + + reg_addr = (base + reg); + if (reg_addr > (base + mem_len)) { + CAM_ERR_RATE_LIMIT(CAM_CDM, + "Invalid mapped region %d", reg); + goto permission_error; + } + *value = cam_io_r_mb(reg_addr); + CAM_DBG(CAM_CDM, "X b=%pK off=%x val=%x", + (void __iomem *)base, reg, + *value); + return false; + +permission_error: + *value = 0; + return true; + +} + +bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw, + uint32_t reg, uint32_t value) +{ + void __iomem *reg_addr; + void __iomem *base = + cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base; + resource_size_t mem_len = + cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size; + + CAM_DBG(CAM_CDM, "E: b=%pK off=%x val=%x", (void __iomem *)base, + reg, value); + + reg_addr = (base + reg); + if (reg_addr > (base + mem_len)) { + CAM_ERR_RATE_LIMIT(CAM_CDM, + "Accessing invalid region:%d\n", + reg); + goto permission_error; + } + cam_io_w_mb(value, reg_addr); + return false; + +permission_error: + return true; + +} + +int cam_cdm_soc_load_dt_private(struct platform_device *pdev, + struct cam_cdm_private_dt_data *cdm_pvt_data) +{ + int i, rc = -EINVAL, num_fifo_entries = 0, num_clients = 0; + + num_clients = of_property_count_strings( + pdev->dev.of_node, "cdm-client-names"); + if ((num_clients <= 0) || + (num_clients > CAM_PER_CDM_MAX_REGISTERED_CLIENTS)) { + CAM_ERR(CAM_CDM, "Invalid count of client names count=%d", + num_clients); + + rc = -EINVAL; + goto end; + } + + cdm_pvt_data->dt_num_supported_clients = (uint32_t)num_clients; + CAM_DBG(CAM_CDM, "Num supported cdm_client = %u", + cdm_pvt_data->dt_num_supported_clients); + + cdm_pvt_data->dt_cdm_shared = true; + + for (i = 0; i < cdm_pvt_data->dt_num_supported_clients; i++) { + rc = of_property_read_string_index(pdev->dev.of_node, + "cdm-client-names", i, + &(cdm_pvt_data->dt_cdm_client_name[i])); + CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i, + cdm_pvt_data->dt_cdm_client_name[i]); + if (rc < 0) { + CAM_ERR(CAM_CDM, + "Reading cdm-client-names failed for client: %d", + i); + goto end; + } + + } + + cdm_pvt_data->is_single_ctx_cdm = + of_property_read_bool(pdev->dev.of_node, "single-context-cdm"); + + rc = of_property_read_u32(pdev->dev.of_node, "cam_hw_pid", &cdm_pvt_data->pid); + if (rc) + cdm_pvt_data->pid = -1; + + rc = of_property_read_u32(pdev->dev.of_node, "cam-hw-mid", &cdm_pvt_data->mid); + if (rc) + cdm_pvt_data->mid = -1; + + rc = of_property_read_u8(pdev->dev.of_node, "cdm-priority-group", + &cdm_pvt_data->priority_group); + if (rc < 0) { + cdm_pvt_data->priority_group = 0; + rc = 0; + } + + cdm_pvt_data->config_fifo = of_property_read_bool(pdev->dev.of_node, + "config-fifo"); + if (cdm_pvt_data->config_fifo) { + num_fifo_entries = of_property_count_u32_elems( + pdev->dev.of_node, + "fifo-depths"); + if (num_fifo_entries != CAM_CDM_NUM_BL_FIFO) { + CAM_ERR(CAM_CDM, + "Wrong number of configurable FIFOs %d", + num_fifo_entries); + rc = -EINVAL; + goto end; + } + for (i = 0; i < num_fifo_entries; i++) { + rc = of_property_read_u32_index(pdev->dev.of_node, + "fifo-depths", i, &cdm_pvt_data->fifo_depth[i]); + if (rc < 0) { + CAM_ERR(CAM_CDM, + "Unable to read fifo-depth rc %d", + rc); + goto end; + } + CAM_DBG(CAM_CDM, "FIFO%d depth is %d", + i, cdm_pvt_data->fifo_depth[i]); + } + } else { + for (i = 0; i < CAM_CDM_BL_FIFO_MAX; i++) { + cdm_pvt_data->fifo_depth[i] = + CAM_CDM_BL_FIFO_LENGTH_MAX_DEFAULT; + CAM_DBG(CAM_CDM, "FIFO%d depth is %d", + i, cdm_pvt_data->fifo_depth[i]); + } + } +end: + return rc; +} + +int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw, + const struct of_device_id *table) +{ + int rc; + struct cam_hw_soc_info *soc_ptr; + const struct of_device_id *id; + struct cam_cdm *cdm_core = NULL; + + if (!cdm_hw || (cdm_hw->soc_info.soc_private) + || !(cdm_hw->soc_info.pdev)) + return -EINVAL; + + cdm_core = cdm_hw->core_info; + soc_ptr = &cdm_hw->soc_info; + + rc = cam_soc_util_get_dt_properties(soc_ptr); + if (rc != 0) { + CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties"); + goto end; + } + + soc_ptr->soc_private = CAM_MEM_ZALLOC( + sizeof(struct cam_cdm_private_dt_data), + GFP_KERNEL); + if (!soc_ptr->soc_private) + return -ENOMEM; + + rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev, + soc_ptr->soc_private); + if (rc != 0) { + CAM_ERR(CAM_CDM, "Failed to load CDM dt private data"); + goto error; + } + + id = of_match_node(table, soc_ptr->pdev->dev.of_node); + if ((!id) || !(id->data)) { + CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table"); + goto error; + } + cdm_core->offsets = + (struct cam_cdm_hw_reg_offset *)id->data; + + CAM_DBG(CAM_CDM, "name %s", cdm_core->name); + + snprintf(cdm_core->name, sizeof(cdm_core->name), "%s%d", + id->compatible, soc_ptr->index); + + CAM_DBG(CAM_CDM, "name %s", cdm_core->name); + + goto end; + +error: + rc = -EINVAL; + CAM_MEM_FREE(soc_ptr->soc_private); + soc_ptr->soc_private = NULL; +end: + return rc; +} + +int cam_cdm_intf_mgr_soc_get_dt_properties( + struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr) +{ + int rc; + + rc = of_property_read_u32(pdev->dev.of_node, + "num-hw-cdm", &mgr->dt_supported_hw_cdm); + CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d", + mgr->dt_supported_hw_cdm); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.h new file mode 100644 index 0000000000..9f650554d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_soc.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CDM_SOC_H_ +#define _CAM_CDM_SOC_H_ + +#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0" +#define CAM_HW_CDM_CPAS_NAME_1_0 "qcom,cam-cpas-cdm1_0" +#define CAM_HW_CDM_CPAS_NAME_1_1 "qcom,cam-cpas-cdm1_1" +#define CAM_HW_CDM_CPAS_NAME_1_2 "qcom,cam-cpas-cdm1_2" +#define CAM_HW_CDM_IFE_NAME_1_2 "qcom,cam-ife-cdm1_2" +#define CAM_HW_CDM_CPAS_NAME_2_0 "qcom,cam-cpas-cdm2_0" +#define CAM_HW_CDM_OPE_NAME_2_0 "qcom,cam-ope-cdm2_0" +#define CAM_HW_CDM_CPAS_NAME_2_1 "qcom,cam-cpas-cdm2_1" +#define CAM_HW_CDM_RT_NAME_2_1 "qcom,cam-rt-cdm2_1" +#define CAM_HW_CDM_OPE_NAME_2_1 "qcom,cam-ope-cdm2_1" +#define CAM_HW_CDM_RT_NAME_2_2 "qcom,cam-rt-cdm2_2" + +int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw, + const struct of_device_id *table); +bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw, + uint32_t reg, uint32_t *value); +bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw, + uint32_t reg, uint32_t value); +int cam_cdm_intf_mgr_soc_get_dt_properties( + struct platform_device *pdev, + struct cam_cdm_intf_mgr *mgr); +int cam_cdm_soc_load_dt_private(struct platform_device *pdev, + struct cam_cdm_private_dt_data *ptr); + +#endif /* _CAM_CDM_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.c new file mode 100644 index 0000000000..ed3a6a090d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.c @@ -0,0 +1,1343 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "cam_cdm_intf_api.h" +#include "cam_cdm_util.h" +#include "cam_cdm.h" +#include "cam_io_util.h" +#include "cam_mem_mgr_api.h" + +#define CAM_CDM_DWORD 4 + +#define CAM_CDM_SW_CMD_COUNT 2 +#define CAM_CMD_LENGTH_MASK 0xFFFF +#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF + +#define CAM_CDM_DMI_DATA_HI_OFFSET 8 +#define CAM_CDM_DMI_DATA_OFFSET 8 +#define CAM_CDM_DMI_DATA_LO_OFFSET 12 +#define CAM_CDM_REG_RANDOM_CMD_WORDS 2 + +static unsigned int CDMCmdHeaderSizes[ + CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = { + 0, /* UNUSED*/ + 3, /* DMI*/ + 0, /* UNUSED*/ + 2, /* RegContinuous*/ + 1, /* RegRandom*/ + 2, /* BUFFER_INDIREC*/ + 2, /* GenerateIRQ*/ + 3, /* WaitForEvent*/ + 1, /* ChangeBase*/ + 1, /* PERF_CONTROL*/ + 3, /* DMI32*/ + 3, /* DMI64*/ + 3, /* WaitCompEvent*/ + 3, /* ClearCompEvent*/ + 3, /* WaitPrefetchDisable*/ +}; + +/** + * struct cdm_regrandom_cmd - Definition for CDM random register command. + * @count: Number of register writes + * @reserved: reserved bits + * @cmd: Command ID (CDMCmd) + */ +struct cdm_regrandom_cmd { + unsigned int count : 16; + unsigned int reserved : 8; + unsigned int cmd : 8; +} __attribute__((__packed__)); + +/** + * struct cdm_regcontinuous_cmd - Definition for a CDM register range command. + * @count: Number of register writes + * @reserved0: reserved bits + * @cmd: Command ID (CDMCmd) + * @offset: Start address of the range of registers + * @reserved1: reserved bits + */ +struct cdm_regcontinuous_cmd { + unsigned int count : 16; + unsigned int reserved0 : 8; + unsigned int cmd : 8; + unsigned int offset : 24; + unsigned int reserved1 : 8; +} __attribute__((__packed__)); + +/** + * struct cdm_dmi_cmd - Definition for a CDM DMI command. + * @length: Number of bytes in LUT - 1 + * @reserved: reserved bits + * @cmd: Command ID (CDMCmd) + * @addr: Address of the LUT in memory + * @DMIAddr: Address of the target DMI config register + * @DMISel: DMI identifier + */ +struct cdm_dmi_cmd { + unsigned int length : 16; + unsigned int reserved : 8; + unsigned int cmd : 8; + unsigned int addr; + unsigned int DMIAddr : 24; + unsigned int DMISel : 8; +} __attribute__((__packed__)); + +/** + * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command. + * @length: Number of bytes in buffer - 1 + * @reserved: reserved bits + * @cmd: Command ID (CDMCmd) + * @addr: Device address of the indirect buffer + */ +struct cdm_indirect_cmd { + unsigned int length : 16; + unsigned int reserved : 8; + unsigned int cmd : 8; + unsigned int addr; +} __attribute__((__packed__)); + +/** + * struct cdm_changebase_cmd - Definition for CDM base address change command. + * @base: Base address to be changed to + * @cmd:Command ID (CDMCmd) + */ +struct cdm_changebase_cmd { + unsigned int base : 24; + unsigned int cmd : 8; +} __attribute__((__packed__)); + +/** + * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command. + * @mask: Mask for the events + * @id: ID to read back for debug + * @iw_reserved: reserved bits + * @iw: iw AHB write bit + * @cmd:Command ID (CDMCmd) + * @offset: Offset to where data is written + * @offset_reserved: reserved bits + * @data: data returned in IRQ_USR_DATA + */ +struct cdm_wait_event_cmd { + unsigned int mask : 8; + unsigned int id : 8; + unsigned int iw_reserved : 7; + unsigned int iw : 1; + unsigned int cmd : 8; + unsigned int offset : 24; + unsigned int offset_reserved : 8; + unsigned int data; +} __attribute__((__packed__)); + +/** + * struct cdm_genirq_cmd - Definition for a CDM Wait event command. + * @reserved: reserved bits + * @cmd:Command ID (CDMCmd) + * @userdata: userdata returned in IRQ_USR_DATA + */ +struct cdm_genirq_cmd { + unsigned int reserved : 24; + unsigned int cmd : 8; + unsigned int userdata; +} __attribute__((__packed__)); + +/** + * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command. + * @perf: perf command + * @reserved: reserved bits + * @cmd:Command ID (CDMCmd) + */ +struct cdm_perf_ctrl_cmd { + unsigned int perf : 2; + unsigned int reserved : 22; + unsigned int cmd : 8; +} __attribute__((__packed__)); + +struct cdm_wait_comp_event_cmd { + unsigned int reserved : 8; + unsigned int id : 8; + unsigned int id_reserved: 8; + unsigned int cmd : 8; + unsigned int mask1; + unsigned int mask2; +} __attribute__((__packed__)); + +struct cdm_clear_comp_event_cmd { + unsigned int reserved : 8; + unsigned int id : 8; + unsigned int id_reserved: 8; + unsigned int cmd : 8; + unsigned int mask1; + unsigned int mask2; +} __attribute__((__packed__)); + +struct cdm_prefetch_disable_event_cmd { + unsigned int reserved : 8; + unsigned int id : 8; + unsigned int id_reserved: 8; + unsigned int cmd : 8; + unsigned int mask1; + unsigned int mask2; +} __attribute__((__packed__)); + +uint32_t cam_cdm_get_cmd_header_size(unsigned int command) +{ + return CDMCmdHeaderSizes[command]; +} + +uint32_t cam_cdm_required_size_dmi(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); +} + +uint32_t cam_cdm_required_size_reg_continuous(uint32_t numVals) +{ + if (!numVals) { + CAM_WARN(CAM_CDM, "numVals cant be 0"); + return 0; + } + + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals; +} + +uint32_t cam_cdm_required_size_reg_random(uint32_t numRegVals) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) + + (2 * numRegVals); +} + +uint32_t cam_cdm_required_size_indirect(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT); +} + +uint32_t cam_cdm_required_size_genirq(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ); +} + +uint32_t cam_cdm_required_size_wait_event(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT); +} + +uint32_t cam_cdm_required_size_changebase(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE); +} + +uint32_t cam_cdm_required_size_comp_wait(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CMD_COMP_WAIT); +} + +uint32_t cam_cdm_required_size_clear_comp_event(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_CLEAR_COMP_WAIT); +} + +uint32_t cam_cdm_required_size_prefetch_disable(void) +{ + return cam_cdm_get_cmd_header_size(CAM_CDM_WAIT_PREFETCH_DISABLE); +} + +uint32_t cam_cdm_offsetof_dmi_addr(void) +{ + return offsetof(struct cdm_dmi_cmd, addr); +} + +uint32_t cam_cdm_offsetof_indirect_addr(void) +{ + return offsetof(struct cdm_indirect_cmd, addr); +} + +uint32_t *cam_cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd, + uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr, + uint32_t length) +{ + struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_CMD_DMI; + pHeader->addr = dmiBufferAddr; + pHeader->length = length; + pHeader->DMIAddr = DMIAddr; + pHeader->DMISel = DMISel; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); + + return pCmdBuffer; +} + +uint32_t *cam_cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg, + uint32_t numVals, uint32_t *pVals) +{ + uint32_t i; + struct cdm_regcontinuous_cmd *pHeader = + (struct cdm_regcontinuous_cmd *)pCmdBuffer; + + pHeader->count = numVals; + pHeader->cmd = CAM_CDM_CMD_REG_CONT; + pHeader->reserved0 = 0; + pHeader->reserved1 = 0; + pHeader->offset = reg; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT); + + for (i = 0; i < numVals; i++) + (((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]); + + pCmdBuffer += numVals; + + return pCmdBuffer; +} + +uint32_t *cam_cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals, + uint32_t *pRegVals) +{ + uint32_t i; + uint32_t *dst, *src; + struct cdm_regrandom_cmd *pHeader = + (struct cdm_regrandom_cmd *)pCmdBuffer; + + if (!numRegVals) { + CAM_ERR(CAM_CDM, "Number of reg-val pairs can not be 0"); + return pCmdBuffer; + } + + pHeader->count = numRegVals; + pHeader->cmd = CAM_CDM_CMD_REG_RANDOM; + pHeader->reserved = 0; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + dst = pCmdBuffer; + src = pRegVals; + for (i = 0; i < numRegVals; i++) { + *dst++ = *src++; + *dst++ = *src++; + } + + return dst; +} + +uint32_t *cam_cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr, + uint32_t length) +{ + struct cdm_indirect_cmd *pHeader = + (struct cdm_indirect_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT; + pHeader->addr = indirectBufAddr; + pHeader->length = length - 1; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT); + + return pCmdBuffer; +} + +void cam_cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata, + bool bit_wr_enable, uint32_t fifo_idx) +{ + struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer; + + CAM_DBG(CAM_CDM, "userdata 0x%x, fifo_idx %d", + userdata, fifo_idx); + + if (bit_wr_enable) + pHeader->reserved = (unsigned int)((fifo_idx << 1) + | (unsigned int)(bit_wr_enable)); + + pHeader->cmd = CAM_CDM_CMD_GEN_IRQ; + pHeader->userdata = (userdata << (8 * fifo_idx)); +} + +uint32_t *cam_cdm_write_wait_event(uint32_t *pcmdbuffer, uint32_t iw, + uint32_t id, uint32_t mask, + uint32_t offset, uint32_t data) +{ + struct cdm_wait_event_cmd *pheader = + (struct cdm_wait_event_cmd *)pcmdbuffer; + + pheader->cmd = CAM_CDM_CMD_WAIT_EVENT; + pheader->mask = mask; + pheader->data = data; + pheader->id = id; + pheader->iw = iw; + pheader->offset = offset; + pheader->iw_reserved = 0; + pheader->offset_reserved = 0; + + pcmdbuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT); + + return pcmdbuffer; +} + +uint32_t *cam_cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base) +{ + struct cdm_changebase_cmd *pHeader = + (struct cdm_changebase_cmd *)pCmdBuffer; + + CAM_DBG(CAM_CDM, "Change to base 0x%x", base); + + pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE; + pHeader->base = base; + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE); + + return pCmdBuffer; +} + +uint32_t *cam_cdm_write_wait_comp_event( + uint32_t *pCmdBuffer, uint32_t mask1, uint32_t mask2) +{ + struct cdm_wait_comp_event_cmd *pHeader = + (struct cdm_wait_comp_event_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_CMD_COMP_WAIT; + pHeader->mask1 = mask1; + pHeader->mask2 = mask2; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_COMP_WAIT); + + return pCmdBuffer; +} + +uint32_t *cam_cdm_write_clear_comp_event( + uint32_t *pCmdBuffer, uint32_t mask1, uint32_t mask2) +{ + struct cdm_clear_comp_event_cmd *pHeader = + (struct cdm_clear_comp_event_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_CLEAR_COMP_WAIT; + pHeader->mask1 = mask1; + pHeader->mask2 = mask2; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_CLEAR_COMP_WAIT); + + return pCmdBuffer; +} + +uint32_t *cam_cdm_write_wait_prefetch_disable( + uint32_t *pCmdBuffer, + uint32_t id, + uint32_t mask1, + uint32_t mask2) +{ + struct cdm_prefetch_disable_event_cmd *pHeader = + (struct cdm_prefetch_disable_event_cmd *)pCmdBuffer; + + pHeader->cmd = CAM_CDM_WAIT_PREFETCH_DISABLE; + pHeader->id = id; + pHeader->mask1 = mask1; + pHeader->mask2 = mask2; + + pCmdBuffer += cam_cdm_get_cmd_header_size(CAM_CDM_WAIT_PREFETCH_DISABLE); + + return pCmdBuffer; +} + + +struct cam_cdm_utils_ops CDM170_ops = { + .cdm_get_cmd_header_size = cam_cdm_get_cmd_header_size, + .cdm_required_size_dmi = cam_cdm_required_size_dmi, + .cdm_required_size_reg_continuous = cam_cdm_required_size_reg_continuous, + .cdm_required_size_reg_random = cam_cdm_required_size_reg_random, + .cdm_required_size_indirect = cam_cdm_required_size_indirect, + .cdm_required_size_genirq = cam_cdm_required_size_genirq, + .cdm_required_size_wait_event = cam_cdm_required_size_wait_event, + .cdm_required_size_changebase = cam_cdm_required_size_changebase, + .cdm_required_size_comp_wait = cam_cdm_required_size_comp_wait, + .cdm_required_size_clear_comp_event = cam_cdm_required_size_clear_comp_event, + .cdm_required_size_prefetch_disable = cam_cdm_required_size_prefetch_disable, + .cdm_offsetof_dmi_addr = cam_cdm_offsetof_dmi_addr, + .cdm_offsetof_indirect_addr = cam_cdm_offsetof_indirect_addr, + .cdm_write_dmi = cam_cdm_write_dmi, + .cdm_write_regcontinuous = cam_cdm_write_regcontinuous, + .cdm_write_regrandom = cam_cdm_write_regrandom, + .cdm_write_indirect = cam_cdm_write_indirect, + .cdm_write_genirq = cam_cdm_write_genirq, + .cdm_write_wait_event = cam_cdm_write_wait_event, + .cdm_write_changebase = cam_cdm_write_changebase, + .cdm_write_wait_comp_event = cam_cdm_write_wait_comp_event, + .cdm_write_clear_comp_event = cam_cdm_write_clear_comp_event, + .cdm_write_wait_prefetch_disable = cam_cdm_write_wait_prefetch_disable, +}; + +int cam_cdm_get_ioremap_from_base(uint32_t hw_base, + uint32_t base_array_size, + struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK], + void __iomem **device_base) +{ + int ret = -EINVAL, i; + + for (i = 0; i < base_array_size; i++) { + if (base_table[i]) + CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x", + i, (base_table[i])->mem_cam_base, hw_base); + if ((base_table[i]) && + ((base_table[i])->mem_cam_base == hw_base)) { + *device_base = (base_table[i])->mem_base; + ret = 0; + break; + } + } + + return ret; +} + +static int cam_cdm_util_cmd_buf_validation(void __iomem *base_addr, + uint32_t base_array_size, + struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK], + uint32_t cmd_buf_size, void *buf, + resource_size_t *size, + enum cam_cdm_command cmd_type) +{ + + int i, ret = 0; + + if (!base_addr) { + CAM_ERR(CAM_CDM, "invalid base address"); + return -EINVAL; + } + + for (i = 0; i < base_array_size; i++) { + if ((base_table[i]) && + ((base_table[i])->mem_base == base_addr)) { + *size = (base_table[i])->size; + break; + } + } + + if (*size == 0) { + CAM_ERR(CAM_CDM, "Could not retrieve ioremap size, address not mapped!"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_CDM_CMD_REG_RANDOM: { + struct cdm_regrandom_cmd *reg_random = (struct cdm_regrandom_cmd *)buf; + uint32_t *data, offset; + + if ((!reg_random->count) || + (((reg_random->count * (sizeof(uint32_t) * CAM_CDM_REG_RANDOM_CMD_WORDS)) + + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) > cmd_buf_size)) { + CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d", + reg_random->count, cmd_buf_size); + return -EINVAL; + } + + data = (uint32_t *)buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + + for (i = 0; i < reg_random->count; i++) { + offset = data[0]; + if (offset > (*size - sizeof(uint32_t))) { + CAM_ERR(CAM_CDM, "Offset out of mapped range, size:%llu offset:%u", + *size, offset); + return -EINVAL; + } + + data += CAM_CDM_REG_RANDOM_CMD_WORDS; + } + + } + break; + case CAM_CDM_CMD_REG_CONT: { + struct cdm_regcontinuous_cmd *reg_cont = (struct cdm_regcontinuous_cmd *) buf; + + if ((!reg_cont->count) || (((reg_cont->count * sizeof(uint32_t)) + + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) > cmd_buf_size)) { + CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d", + cmd_buf_size, reg_cont->count); + return -EINVAL; + } + + if ((reg_cont->offset > (*size - sizeof(uint32_t))) || ((reg_cont->offset + + (reg_cont->count * sizeof(uint32_t))) > *size)) { + CAM_ERR(CAM_CDM, "Offset out of mapped range! size: %lu, offset: %u", + *size, reg_cont->offset); + return -EINVAL; + } + + } + break; + case CAM_CDM_CMD_SWD_DMI_64: { + struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf; + uint32_t count = 0, low_off = 0, high_off = 0; + + if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) { + CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d", swd_dmi->length + 1); + return -EINVAL; + } + + count = (swd_dmi->length + 1)/8; + low_off = swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET; + high_off = swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET; + + if ((low_off + (count * sizeof(uint64_t)) - sizeof(uint32_t) > *size) || + (high_off + (count * sizeof(uint64_t)) > *size)) { + CAM_ERR(CAM_CDM, + "Offset out of mapped range! size:%llu lo_offset:%u hi_offset:%u", + *size, low_off, high_off); + return -EINVAL; + } + + } + break; + case CAM_CDM_CMD_SWD_DMI_32: { + struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf; + uint32_t count = 0, low_off = 0; + + if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) { + CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d", swd_dmi->length + 1); + return -EINVAL; + } + + count = (swd_dmi->length + 1)/4; + low_off = swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET; + + if (low_off + (count * sizeof(uint32_t)) > *size) { + CAM_ERR(CAM_CDM, "Offset out of mapped range! size:%llu lo_offset:%u", + *size, low_off); + return -EINVAL; + } + + } + break; + case CAM_CDM_CMD_DMI: { + struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf; + uint32_t count = 0, data_off = 0; + + if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) { + CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d", swd_dmi->length + 1); + return -EINVAL; + } + + count = (swd_dmi->length + 1)/4; + data_off = swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET; + + if (data_off + (count * sizeof(uint32_t)) > *size) { + CAM_ERR(CAM_CDM, "Offset out of mapped range! size:%llu offset:%u", + *size, data_off); + return -EINVAL; + } + + } + break; + default: + CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x", cmd_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int cam_cdm_util_reg_cont_write(void __iomem *base_addr, + uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes, + uint32_t base_array_size, struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK]) +{ + int rc = 0; + uint32_t *data; + struct cdm_regcontinuous_cmd reg_cont; + resource_size_t size = 0; + + memcpy(®_cont, cmd_buf, sizeof(struct cdm_regcontinuous_cmd)); + rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table, + cmd_buf_size, (void *)®_cont, &size, CAM_CDM_CMD_REG_CONT); + if (rc) { + CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc); + return rc; + } + + data = cmd_buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT); + + cam_io_memcpy(base_addr + reg_cont.offset, data, + reg_cont.count * sizeof(uint32_t)); + *used_bytes = (reg_cont.count * sizeof(uint32_t)) + + (4 * cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)); + + return rc; +} + +static int cam_cdm_util_reg_random_write(void __iomem *base_addr, + uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes, + uint32_t base_array_size, struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK]) +{ + int i, rc = 0; + struct cdm_regrandom_cmd *reg_random; + void *kbuf = NULL; + uint32_t *data; + resource_size_t size = 0; + + kbuf = CAM_MEM_ZALLOC(cmd_buf_size, GFP_KERNEL); + if (!kbuf) { + CAM_ERR(CAM_CDM, "Mem alloc failed! rc=%d", -ENOMEM); + return -ENOMEM; + } + + memcpy(kbuf, cmd_buf, cmd_buf_size); + rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table, + cmd_buf_size, kbuf, &size, CAM_CDM_CMD_REG_RANDOM); + if (rc) { + CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc); + goto end; + } + + data = (uint32_t *)kbuf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + reg_random = (struct cdm_regrandom_cmd *)kbuf; + + for (i = 0; i < reg_random->count; i++) { + CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x", + ((void __iomem *)(base_addr + data[0])), + data[1]); + cam_io_w(data[1], base_addr + data[0]); + data += CAM_CDM_REG_RANDOM_CMD_WORDS; + } + + *used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) + + (4 * cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM))); +end: + CAM_MEM_FREE(kbuf); + return rc; +} + +static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type, + void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size, + uint32_t *used_bytes, uint32_t base_array_size, + struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK]) +{ + int i, rc = 0; + struct cdm_dmi_cmd swd_dmi; + uint32_t *data; + resource_size_t size = 0; + + memcpy(&swd_dmi, cmd_buf, sizeof(struct cdm_dmi_cmd)); + rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table, + cmd_buf_size, (void *)&swd_dmi, &size, cdm_cmd_type); + if (rc) { + CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc); + return rc; + } + + data = cmd_buf + cam_cdm_required_size_dmi(); + + if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) { + for (i = 0; i < (swd_dmi.length + 1)/8; i++) { + cam_io_w_mb(data[0], base_addr + + swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET); + cam_io_w_mb(data[1], base_addr + + swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET); + data += 2; + } + } else if (cdm_cmd_type == CAM_CDM_CMD_DMI) { + for (i = 0; i < (swd_dmi.length + 1)/4; i++) { + cam_io_w_mb(data[0], base_addr + + swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_OFFSET); + data += 1; + } + } else { + for (i = 0; i < (swd_dmi.length + 1)/4; i++) { + cam_io_w_mb(data[0], base_addr + + swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET); + data += 1; + } + } + + *used_bytes = (4 * cam_cdm_required_size_dmi()) + swd_dmi.length + 1; + + return rc; +} + +int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base, + uint32_t *cmd_buf, uint32_t cmd_buf_size, + struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK], + uint32_t base_array_size, uint8_t bl_tag) +{ + int ret = 0; + uint32_t cdm_cmd_type = 0; + uint32_t used_bytes = 0; + + while (cmd_buf_size > 0) { + CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf); + cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET); + switch (cdm_cmd_type) { + case CAM_CDM_CMD_REG_CONT: { + ret = cam_cdm_util_reg_cont_write(*current_device_base, + cmd_buf, cmd_buf_size, &used_bytes, + base_array_size, base_table); + if (ret) + break; + + if (used_bytes > 0) { + cmd_buf_size -= used_bytes; + cmd_buf += used_bytes/4; + } + } + break; + case CAM_CDM_CMD_REG_RANDOM: { + ret = cam_cdm_util_reg_random_write( + *current_device_base, cmd_buf, cmd_buf_size, + &used_bytes, base_array_size, base_table); + if (ret) + break; + + if (used_bytes > 0) { + cmd_buf_size -= used_bytes; + cmd_buf += used_bytes / 4; + } + } + break; + case CAM_CDM_CMD_DMI: + case CAM_CDM_CMD_SWD_DMI_32: + case CAM_CDM_CMD_SWD_DMI_64: { + if (*current_device_base == 0) { + CAM_ERR(CAM_CDM, + "Got SWI DMI cmd =%d for invalid hw", + cdm_cmd_type); + ret = -EINVAL; + break; + } + ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type, + *current_device_base, cmd_buf, cmd_buf_size, + &used_bytes, base_array_size, base_table); + if (ret) + break; + + if (used_bytes > 0) { + cmd_buf_size -= used_bytes; + cmd_buf += used_bytes / 4; + } + } + break; + case CAM_CDM_CMD_CHANGE_BASE: { + struct cdm_changebase_cmd *change_base_cmd = + (struct cdm_changebase_cmd *)cmd_buf; + + ret = cam_cdm_get_ioremap_from_base( + change_base_cmd->base, base_array_size, + base_table, current_device_base); + if (ret != 0) { + CAM_ERR(CAM_CDM, + "Get ioremap change base failed %x", + change_base_cmd->base); + break; + } + CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK", + change_base_cmd->base, + current_device_base); + cmd_buf_size -= (4 * + cam_cdm_required_size_changebase()); + cmd_buf += cam_cdm_required_size_changebase(); + } + break; + default: + CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x", + cdm_cmd_type); + ret = -EINVAL; + break; + } + + if (ret < 0) + break; + } + + return ret; +} + +static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr, + uint32_t *cmd_buf_addr_end) +{ + long ret = 0; + struct cdm_dmi_cmd *p_dmi_cmd; + uint32_t *temp_ptr = cmd_buf_addr; + + p_dmi_cmd = (struct cdm_dmi_cmd *)cmd_buf_addr; + temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI]; + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI]; + + if (temp_ptr > cmd_buf_addr_end) + CAM_ERR(CAM_CDM, + "Invalid cmd start addr:%pK end addr:%pK", + temp_ptr, cmd_buf_addr_end); + + CAM_INFO(CAM_CDM, + "DMI: LEN: %u DMIAddr: 0x%X DMISel: 0x%X LUT_addr: 0x%X", + p_dmi_cmd->length, p_dmi_cmd->DMIAddr, + p_dmi_cmd->DMISel, p_dmi_cmd->addr); + return ret; +} + +static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr, + uint32_t *cmd_buf_addr_end) +{ + long ret = 0; + struct cdm_indirect_cmd *p_indirect_cmd; + uint32_t *temp_ptr = cmd_buf_addr; + + p_indirect_cmd = (struct cdm_indirect_cmd *)cmd_buf_addr; + temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT]; + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT]; + + if (temp_ptr > cmd_buf_addr_end) + CAM_ERR(CAM_CDM, + "Invalid cmd start addr:%pK end addr:%pK", + temp_ptr, cmd_buf_addr_end); + + CAM_INFO(CAM_CDM, + "Buff Indirect: LEN: %u addr: 0x%X", + p_indirect_cmd->length, p_indirect_cmd->addr); + return ret; +} + +static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr, + uint32_t *cmd_buf_addr_end) +{ + long ret = 0; + struct cdm_regcontinuous_cmd *p_regcont_cmd; + uint32_t *temp_ptr = cmd_buf_addr; + int i = 0; + + p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr; + temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT]; + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT]; + + CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X", + p_regcont_cmd->count, p_regcont_cmd->offset); + + for (i = 0; i < p_regcont_cmd->count; i++) { + if (temp_ptr > cmd_buf_addr_end) { + CAM_ERR(CAM_CDM, + "Invalid cmd(%d) start addr:%pK end addr:%pK", + i, temp_ptr, cmd_buf_addr_end); + break; + } + CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i, + *temp_ptr); + temp_ptr++; + ret++; + } + + return ret; +} + +static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr, + uint32_t *cmd_buf_addr_end) +{ + struct cdm_regrandom_cmd *p_regrand_cmd; + uint32_t *temp_ptr = cmd_buf_addr; + long ret = 0; + int i = 0; + + p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr; + temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM]; + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM]; + + CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u", + p_regrand_cmd->count); + + for (i = 0; i < p_regrand_cmd->count; i++) { + if (temp_ptr > cmd_buf_addr_end) { + CAM_ERR(CAM_CDM, + "Invalid cmd(%d) start addr:%pK end addr:%pK", + i, temp_ptr, cmd_buf_addr_end); + break; + } + CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X", + i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i, + *(temp_ptr + 1)); + temp_ptr += 2; + ret += 2; + } + + return ret; +} + +static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr) +{ + long ret = 0; + + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ]; + + CAM_INFO(CAM_CDM, "GEN_IRQ"); + + return ret; +} + +static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr) +{ + long ret = 0; + + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT]; + + CAM_INFO(CAM_CDM, "WAIT_EVENT"); + + return ret; +} + +static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr, + uint32_t *cmd_buf_addr_end) +{ + long ret = 0; + struct cdm_changebase_cmd *p_cbase_cmd; + uint32_t *temp_ptr = cmd_buf_addr; + + if (temp_ptr > cmd_buf_addr_end) { + CAM_ERR(CAM_CDM, + "Invalid cmd start addr:%pK end addr:%pK", + temp_ptr, cmd_buf_addr_end); + + return 0; + } + + p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr; + temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE]; + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE]; + + CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X, curr cmd addr: %pK", + p_cbase_cmd->base, temp_ptr); + + return ret; +} + +static long cam_cdm_util_dump_comp_wait_event_cmd(uint32_t *cmd_buf_addr) +{ + long ret = 0; + + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_COMP_WAIT]; + + CAM_INFO(CAM_CDM, "WAIT_EVENT"); + + return ret; +} + +static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr) +{ + long ret = 0; + + ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL]; + + CAM_INFO(CAM_CDM, "PERF_CTRL"); + + return ret; +} + +bool cam_cdm_util_validate_cmd_buf( + uint32_t *cmd_buf_start, uint32_t *cmd_buf_end) +{ + uint32_t *buf_now = cmd_buf_start; + uint32_t *buf_end = cmd_buf_end; + uint32_t cmd = 0; + int i = 0; + struct cdm_regcontinuous_cmd *p_regcont_cmd = NULL; + struct cdm_regrandom_cmd *p_regrand_cmd = NULL; + + if (!cmd_buf_start || !cmd_buf_end) { + CAM_ERR(CAM_CDM, "Invalid args"); + return true; + } + + do { + cmd = *buf_now; + cmd = cmd >> CAM_CDM_COMMAND_OFFSET; + + switch (cmd) { + case CAM_CDM_CMD_DMI: + case CAM_CDM_CMD_DMI_32: + case CAM_CDM_CMD_DMI_64: + if (buf_now > buf_end) + return true; + + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI]; + break; + case CAM_CDM_CMD_REG_CONT: + p_regcont_cmd = (struct cdm_regcontinuous_cmd *)buf_now; + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT]; + for (i = 0; i < p_regcont_cmd->count; i++) { + if (buf_now > buf_end) + return true; + + buf_now++; + } + break; + case CAM_CDM_CMD_REG_RANDOM: + p_regrand_cmd = (struct cdm_regrandom_cmd *)buf_now; + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM]; + for (i = 0; i < p_regrand_cmd->count; i++) { + if (buf_now > buf_end) + return true; + + buf_now += 2; + } + break; + case CAM_CDM_CMD_BUFF_INDIRECT: + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT]; + if (buf_now > buf_end) + return true; + + break; + case CAM_CDM_CMD_GEN_IRQ: + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ]; + break; + case CAM_CDM_CMD_WAIT_EVENT: + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT]; + break; + case CAM_CDM_CMD_CHANGE_BASE: + if (buf_now > buf_end) + return true; + + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE]; + break; + case CAM_CDM_CMD_PERF_CTRL: + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL]; + break; + case CAM_CDM_CMD_COMP_WAIT: + buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_COMP_WAIT]; + break; + default: + CAM_ERR(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x", + cmd, *buf_now); + return true; + } + } while (buf_now < cmd_buf_end); + + return false; +} + +void cam_cdm_util_dump_cmd_buf( + uint32_t *cmd_buf_start, uint32_t *cmd_buf_end) +{ + uint32_t *buf_now = cmd_buf_start; + uint32_t *buf_end = cmd_buf_end; + uint32_t cmd = 0; + + if (!cmd_buf_start || !cmd_buf_end) { + CAM_ERR(CAM_CDM, "Invalid args"); + return; + } + + do { + cmd = *buf_now; + cmd = cmd >> CAM_CDM_COMMAND_OFFSET; + + switch (cmd) { + case CAM_CDM_CMD_DMI: + case CAM_CDM_CMD_DMI_32: + case CAM_CDM_CMD_DMI_64: + buf_now += cam_cdm_util_dump_dmi_cmd(buf_now, + buf_end); + break; + case CAM_CDM_CMD_REG_CONT: + buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now, + buf_end); + break; + case CAM_CDM_CMD_REG_RANDOM: + buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now, + buf_end); + break; + case CAM_CDM_CMD_BUFF_INDIRECT: + buf_now += cam_cdm_util_dump_buff_indirect(buf_now, + buf_end); + break; + case CAM_CDM_CMD_GEN_IRQ: + buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now); + break; + case CAM_CDM_CMD_WAIT_EVENT: + buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now); + break; + case CAM_CDM_CMD_CHANGE_BASE: + buf_now += cam_cdm_util_dump_change_base_cmd(buf_now, + buf_end); + break; + case CAM_CDM_CMD_PERF_CTRL: + buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now); + break; + case CAM_CDM_CMD_COMP_WAIT: + buf_now += + cam_cdm_util_dump_comp_wait_event_cmd(buf_now); + break; + default: + CAM_ERR(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x", + cmd, *buf_now); + buf_now++; + break; + } + } while (buf_now < cmd_buf_end); +} + +static uint32_t cam_cdm_util_dump_reg_cont_cmd_v2( + uint32_t *cmd_buf_addr, + struct cam_cdm_cmd_buf_dump_info *dump_info) +{ + int i; + uint32_t ret; + uint8_t *dst; + size_t remain_len; + uint32_t *temp_ptr = cmd_buf_addr; + uint32_t *addr, *start; + uint32_t min_len; + struct cdm_regcontinuous_cmd *p_regcont_cmd; + struct cam_cdm_cmd_dump_header *hdr; + + p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr; + temp_ptr += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT); + ret = cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT); + + min_len = (sizeof(uint32_t) * p_regcont_cmd->count) + + sizeof(struct cam_cdm_cmd_dump_header) + + (2 * sizeof(uint32_t)); + remain_len = dump_info->dst_max_size - dump_info->dst_offset; + + if (remain_len < min_len) { + CAM_WARN_RATE_LIMIT(CAM_CDM, + "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + return ret; + } + + dst = (char *)dump_info->dst_start + dump_info->dst_offset; + hdr = (struct cam_cdm_cmd_dump_header *)dst; + scnprintf(hdr->tag, CAM_CDM_CMD_TAG_MAX_LEN, "CDM_REG_CONT:"); + hdr->word_size = sizeof(uint32_t); + addr = (uint32_t *)(dst + sizeof(struct cam_cdm_cmd_dump_header)); + start = addr; + *addr++ = p_regcont_cmd->offset; + *addr++ = p_regcont_cmd->count; + for (i = 0; i < p_regcont_cmd->count; i++) { + *addr = *temp_ptr; + temp_ptr++; + addr++; + ret++; + } + hdr->size = hdr->word_size * (addr - start); + dump_info->dst_offset += hdr->size + + sizeof(struct cam_cdm_cmd_dump_header); + + return ret; +} + +static uint32_t cam_cdm_util_dump_reg_random_cmd_v2( + uint32_t *cmd_buf_addr, + struct cam_cdm_cmd_buf_dump_info *dump_info) +{ + int i; + uint32_t ret; + uint8_t *dst; + uint32_t *temp_ptr = cmd_buf_addr; + uint32_t *addr, *start; + size_t remain_len; + uint32_t min_len; + struct cdm_regrandom_cmd *p_regrand_cmd; + struct cam_cdm_cmd_dump_header *hdr; + + p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr; + temp_ptr += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + ret = cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + + min_len = (2 * sizeof(uint32_t) * p_regrand_cmd->count) + + sizeof(struct cam_cdm_cmd_dump_header) + sizeof(uint32_t); + remain_len = dump_info->dst_max_size - dump_info->dst_offset; + + if (remain_len < min_len) { + CAM_WARN_RATE_LIMIT(CAM_CDM, + "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + return ret; + } + + dst = (char *)dump_info->dst_start + dump_info->dst_offset; + hdr = (struct cam_cdm_cmd_dump_header *)dst; + scnprintf(hdr->tag, CAM_CDM_CMD_TAG_MAX_LEN, "CDM_REG_RANDOM:"); + hdr->word_size = sizeof(uint32_t); + addr = (uint32_t *)(dst + sizeof(struct cam_cdm_cmd_dump_header)); + start = addr; + *addr++ = p_regrand_cmd->count; + for (i = 0; i < p_regrand_cmd->count; i++) { + addr[0] = temp_ptr[0] & CAM_CDM_REG_OFFSET_MASK; + addr[1] = temp_ptr[1]; + temp_ptr += 2; + addr += 2; + ret += 2; + } + hdr->size = hdr->word_size * (addr - start); + dump_info->dst_offset += hdr->size + + sizeof(struct cam_cdm_cmd_dump_header); + return ret; +} + +int cam_cdm_util_dump_cmd_bufs_v2( + struct cam_cdm_cmd_buf_dump_info *dump_info) +{ + uint32_t cmd; + uint32_t *buf_now; + int rc = 0; + + if (!dump_info || !dump_info->src_start || !dump_info->src_end || + !dump_info->dst_start) { + CAM_INFO(CAM_CDM, "Invalid args"); + return -EINVAL; + } + + buf_now = dump_info->src_start; + do { + if (dump_info->dst_offset >= dump_info->dst_max_size) { + CAM_WARN(CAM_CDM, + "Dump overshoot offset %zu size %zu", + dump_info->dst_offset, + dump_info->dst_max_size); + return -ENOSPC; + } + cmd = *buf_now; + cmd = cmd >> CAM_CDM_COMMAND_OFFSET; + + switch (cmd) { + case CAM_CDM_CMD_DMI: + case CAM_CDM_CMD_DMI_32: + case CAM_CDM_CMD_DMI_64: + buf_now += cam_cdm_get_cmd_header_size(CAM_CDM_CMD_DMI); + break; + case CAM_CDM_CMD_REG_CONT: + buf_now += cam_cdm_util_dump_reg_cont_cmd_v2(buf_now, + dump_info); + break; + case CAM_CDM_CMD_REG_RANDOM: + buf_now += cam_cdm_util_dump_reg_random_cmd_v2(buf_now, + dump_info); + break; + case CAM_CDM_CMD_BUFF_INDIRECT: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_BUFF_INDIRECT); + break; + case CAM_CDM_CMD_GEN_IRQ: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_GEN_IRQ); + break; + case CAM_CDM_CMD_WAIT_EVENT: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_WAIT_EVENT); + break; + case CAM_CDM_CMD_CHANGE_BASE: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_CHANGE_BASE); + break; + case CAM_CDM_CMD_PERF_CTRL: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_PERF_CTRL); + break; + case CAM_CDM_CMD_COMP_WAIT: + buf_now += cam_cdm_get_cmd_header_size( + CAM_CDM_CMD_COMP_WAIT); + break; + default: + CAM_ERR(CAM_CDM, "Invalid CMD: 0x%x", cmd); + buf_now++; + break; + } + } while (buf_now < dump_info->src_end); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.h new file mode 100644 index 0000000000..ecdb228e39 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_util.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CDM_UTIL_H_ +#define _CAM_CDM_UTIL_H_ + +/* Max len for tag name for header while dumping cmd buffer*/ +#define CAM_CDM_CMD_TAG_MAX_LEN 128 +#define CAM_CDM_COMMAND_OFFSET 24 + +#include + +enum cam_cdm_command { + CAM_CDM_CMD_UNUSED = 0x0, + CAM_CDM_CMD_DMI = 0x1, + CAM_CDM_CMD_NOT_DEFINED = 0x2, + CAM_CDM_CMD_REG_CONT = 0x3, + CAM_CDM_CMD_REG_RANDOM = 0x4, + CAM_CDM_CMD_BUFF_INDIRECT = 0x5, + CAM_CDM_CMD_GEN_IRQ = 0x6, + CAM_CDM_CMD_WAIT_EVENT = 0x7, + CAM_CDM_CMD_CHANGE_BASE = 0x8, + CAM_CDM_CMD_PERF_CTRL = 0x9, + CAM_CDM_CMD_DMI_32 = 0xa, + CAM_CDM_CMD_DMI_64 = 0xb, + CAM_CDM_CMD_COMP_WAIT = 0xc, + CAM_CDM_CLEAR_COMP_WAIT = 0xd, + CAM_CDM_WAIT_PREFETCH_DISABLE = 0xe, + CAM_CDM_CMD_PRIVATE_BASE = 0xf, + CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64), + CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65), + CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F, +}; + +/** + * struct cam_cdm_utils_ops - Camera CDM util ops + * + * @cdm_get_cmd_header_size: Returns the size of the given command header + * in DWORDs. + * @command Command ID + * @return Size of the command in DWORDs + * + * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous + * command in dwords. + * @numVals Number of continuous values + * @return Size in dwords + * + * @cdm_required_size_reg_random: Calculates the size of a reg-random command + * in dwords. + * @numRegVals Number of register/value pairs + * @return Size in dwords + * + * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords. + * @return Size in dwords + * + * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords. + * @return Size in dwords + * + * @cdm_required_size_indirect: Calculates the size of an indirect command + * in dwords. + * @return Size in dwords + * + * @cdm_required_size_comp_wait: Calculates the size of a comp-wait command + * in dwords. + * @return Size in dwords + * + * @cdm_required_size_clear_comp_event: Calculates the size of clear-comp-event + * command in dwords. + * @return Size in dwords + * + * @cdm_required_size_changebase: Calculates the size of a change-base command + * in dwords. + * @return Size in dwords + * + * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI + * command header. + * @return Offset of addr field + * + * @cdm_offsetof_indirect_addr: Returns the offset of address field in the + * indirect command header. + * @return Offset of addr field + * + * @cdm_write_regcontinuous: Writes a command into the command buffer. + * @pCmdBuffer: Pointer to command buffer + * @reg: Beginning of the register address range where + * values will be written. + * @numVals: Number of values (registers) that will be written + * @pVals : An array of values that will be written + * @return Pointer in command buffer pointing past the written commands + * + * @cdm_write_regrandom: Writes a command into the command buffer in + * register/value pairs. + * @pCmdBuffer: Pointer to command buffer + * @numRegVals: Number of register/value pairs that will be written + * @pRegVals: An array of register/value pairs that will be written + * The even indices are registers and the odd indices + * arevalues, e.g., {reg1, val1, reg2, val2, ...}. + * @return Pointer in command buffer pointing past the written commands + * + * @cdm_write_dmi: Writes a DMI command into the command bufferM. + * @pCmdBuffer: Pointer to command buffer + * @dmiCmd: DMI command + * @DMIAddr: Address of the DMI + * @DMISel: Selected bank that the DMI will write to + * @length: Size of data in bytes + * @return Pointer in command buffer pointing past the written commands + * + * @cdm_write_indirect: Writes a indirect command into the command buffer. + * @pCmdBuffer: Pointer to command buffer + * @indirectBufferAddr: Device address of the indirect cmd buffer. + * @length: Size of data in bytes + * @return Pointer in command buffer pointing past the written commands + * + * @cdm_write_changebase: Writes a changing CDM (address) base command into + * the command buffer. + * @pCmdBuffer: Pointer to command buffer + * @base: New base (device) address + * @return Pointer in command buffer pointing past the written commands + * + * @cdm_write_genirq: Writes a gen irq command into the command buffer. + * @pCmdBuffer: Pointer to command buffer + * @userdata: userdata or cookie return by hardware during irq. + * + * @cdm_write_wait_comp_event: Writes a wait comp event cmd into the + * command buffer. + * @pCmdBuffer: Pointer to command buffer + * @mask1: This value decides which comp events to wait (0 - 31). + * @mask2: This value decides which comp events to wait (32 - 65). + * + * @cdm_write_clear_comp_event: Writes a clear comp event cmd into the + * command buffer. + * @pCmdBuffer: Pointer to command buffer + * @mask1: This value decides which comp events to clear (0 - 31). + * @mask2: This value decides which comp events to clear (32 - 65). + */ +struct cam_cdm_utils_ops { +uint32_t (*cdm_get_cmd_header_size)(unsigned int command); +uint32_t (*cdm_required_size_dmi)(void); +uint32_t (*cdm_required_size_reg_continuous)(uint32_t numVals); +uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals); +uint32_t (*cdm_required_size_indirect)(void); +uint32_t (*cdm_required_size_genirq)(void); +uint32_t (*cdm_required_size_wait_event)(void); +uint32_t (*cdm_required_size_changebase)(void); +uint32_t (*cdm_required_size_comp_wait)(void); +uint32_t (*cdm_required_size_clear_comp_event)(void); +uint32_t (*cdm_required_size_prefetch_disable)(void); +uint32_t (*cdm_offsetof_dmi_addr)(void); +uint32_t (*cdm_offsetof_indirect_addr)(void); +uint32_t *(*cdm_write_dmi)( + uint32_t *pCmdBuffer, + uint8_t dmiCmd, + uint32_t DMIAddr, + uint8_t DMISel, + uint32_t dmiBufferAddr, + uint32_t length); +uint32_t* (*cdm_write_regcontinuous)( + uint32_t *pCmdBuffer, + uint32_t reg, + uint32_t numVals, + uint32_t *pVals); +uint32_t *(*cdm_write_regrandom)( + uint32_t *pCmdBuffer, + uint32_t numRegVals, + uint32_t *pRegVals); +uint32_t *(*cdm_write_indirect)( + uint32_t *pCmdBuffer, + uint32_t indirectBufferAddr, + uint32_t length); +void (*cdm_write_genirq)( + uint32_t *pCmdBuffer, + uint32_t userdata, + bool bit_wr_enable, + uint32_t fifo_idx); +uint32_t *(*cdm_write_wait_event)( + uint32_t *pCmdBuffer, + uint32_t iw, + uint32_t id, + uint32_t mask, + uint32_t offset, + uint32_t data); +uint32_t *(*cdm_write_changebase)( + uint32_t *pCmdBuffer, + uint32_t base); +uint32_t *(*cdm_write_wait_comp_event)( + uint32_t *pCmdBuffer, + uint32_t mask1, + uint32_t mask2); +uint32_t *(*cdm_write_clear_comp_event)( + uint32_t *pCmdBuffer, + uint32_t mask1, + uint32_t mask2); +uint32_t *(*cdm_write_wait_prefetch_disable)( + uint32_t *pCmdBuffer, + uint32_t id, + uint32_t mask1, + uint32_t mask2); +}; + +/** + * struct cam_cdm_cmd_buf_dump_info; - Camera CDM dump info + * @dst_offset: dst offset + * @dst_max_size max size of destination buffer + * @src_start: source start address + * @src_end: source end address + * @dst_start: dst start address + */ +struct cam_cdm_cmd_buf_dump_info { + size_t dst_offset; + size_t dst_max_size; + uint32_t *src_start; + uint32_t *src_end; + uintptr_t dst_start; +}; + +/** + * struct cam_cdm_cmd_dump_header- Camera CDM dump header + * @tag: tag name for header + * @size: size of data + * @word_size: size of each word + */ +struct cam_cdm_cmd_dump_header { + uint8_t tag[CAM_CDM_CMD_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * cam_cdm_util_validate_cmd_buf() + * + * @brief: Util function to validate cdm command buffers + * + * @cmd_buffer_start: Pointer to start of cmd buffer + * @cmd_buffer_end: Pointer to end of cmd buffer + * + * return true if invalid cmd found, otherwise false + * + */ +bool cam_cdm_util_validate_cmd_buf( + uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end); + +/** + * cam_cdm_util_log_cmd_bufs() + * + * @brief: Util function to log cdm command buffers + * + * @cmd_buffer_start: Pointer to start of cmd buffer + * @cmd_buffer_end: Pointer to end of cmd buffer + * + */ +void cam_cdm_util_dump_cmd_buf( + uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end); + +/** + * cam_cdm_util_dump_cmd_bufs_v2() + * + * @brief: Util function to cdm command buffers + * to a buffer + * + * @dump_info: Information about source and destination buffers + * + * return SUCCESS/FAILURE + */ +int cam_cdm_util_dump_cmd_bufs_v2( + struct cam_cdm_cmd_buf_dump_info *dump_info); + + +#endif /* _CAM_CDM_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual.h b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual.h new file mode 100644 index 0000000000..193e01be47 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CDM_VIRTUAL_H_ +#define _CAM_CDM_VIRTUAL_H_ + +#include "cam_cdm_intf_api.h" + +int cam_virtual_cdm_probe(struct platform_device *pdev); +int cam_virtual_cdm_remove(struct platform_device *pdev); +int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base, + uint32_t *cmd_buf, uint32_t cmd_buf_size, + struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK], + uint32_t base_array_size, uint8_t bl_tag); + +#endif /* _CAM_CDM_VIRTUAL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual_core.c b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual_core.c new file mode 100644 index 0000000000..e28556fa5b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cdm/cam_cdm_virtual_core.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_soc_util.h" +#include "cam_smmu_api.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm.h" +#include "cam_cdm_util.h" +#include "cam_cdm_virtual.h" +#include "cam_cdm_core_common.h" +#include "cam_cdm_soc.h" +#include "cam_io_util.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm" + +static void cam_virtual_cdm_work(struct work_struct *work) +{ + struct cam_cdm_work_payload *payload; + struct cam_hw_info *cdm_hw; + struct cam_cdm *core; + + payload = container_of(work, struct cam_cdm_work_payload, work); + if (payload) { + cdm_hw = payload->hw; + core = (struct cam_cdm *)cdm_hw->core_info; + + cam_common_util_thread_switch_delay_detect( + "virtual_cdm_workq", "schedule", cam_virtual_cdm_work, + payload->workq_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + + if (payload->irq_status & 0x2) { + struct cam_cdm_bl_cb_request_entry *node; + + CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x", + payload->irq_data); + mutex_lock(&cdm_hw->hw_mutex); + node = cam_cdm_find_request_by_bl_tag( + payload->irq_data, + &core->bl_request_list); + if (node) { + if (node->request_type == + CAM_HW_CDM_BL_CB_CLIENT) { + cam_cdm_notify_clients(cdm_hw, + CAM_CDM_CB_STATUS_BL_SUCCESS, + (void *)node); + } else if (node->request_type == + CAM_HW_CDM_BL_CB_INTERNAL) { + CAM_ERR(CAM_CDM, "Invalid node=%pK %d", + node, node->request_type); + } + list_del_init(&node->entry); + CAM_MEM_FREE(node); + } else { + CAM_ERR(CAM_CDM, "Invalid node for inline irq"); + } + mutex_unlock(&cdm_hw->hw_mutex); + } + if (payload->irq_status & 0x1) { + CAM_DBG(CAM_CDM, "CDM HW reset done IRQ"); + complete(&core->reset_complete); + } + CAM_MEM_FREE(payload); + } + +} + +int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw, + struct cam_cdm_hw_intf_cmd_submit_bl *req, + struct cam_cdm_client *client) +{ + int i, rc = -EINVAL; + struct cam_cdm_bl_request *cdm_cmd = req->data; + struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info; + + mutex_lock(&client->lock); + for (i = 0; i < req->data->cmd_arrary_count ; i++) { + uintptr_t vaddr_ptr = 0; + size_t len = 0; + + if ((!cdm_cmd->cmd_flex[i].len) && + (cdm_cmd->cmd_flex[i].len > 0x100000)) { + CAM_ERR(CAM_CDM, + "len(%d) is invalid count=%d total cnt=%d", + cdm_cmd->cmd_flex[i].len, i, + req->data->cmd_arrary_count); + rc = -EINVAL; + break; + } + if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) { + rc = cam_mem_get_cpu_buf( + cdm_cmd->cmd_flex[i].bl_addr.mem_handle, &vaddr_ptr, + &len); + if (rc) { + CAM_ERR(CAM_CDM, + "Falied to get CPU addr_i[%d] req_type %d", i, + req->data->type); + } + } else if (req->data->type == + CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) { + rc = 0; + vaddr_ptr = cdm_cmd->cmd_flex[i].bl_addr.kernel_iova; + len = cdm_cmd->cmd_flex[i].offset + cdm_cmd->cmd_flex[i].len; + } else { + CAM_ERR(CAM_CDM, + "Only mem hdl/Kernel va type is supported %d", + req->data->type); + rc = -EINVAL; + } + + if ((!rc) && (vaddr_ptr) && (len) && + (len >= cdm_cmd->cmd_flex[i].offset)) { + + + if ((len - cdm_cmd->cmd_flex[i].offset) < + cdm_cmd->cmd_flex[i].len) { + CAM_ERR(CAM_CDM, "Not enough buffer"); + rc = -EINVAL; + goto put_cpu_buf; + } + CAM_DBG(CAM_CDM, + "hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu", + cdm_cmd->cmd_flex[i].bl_addr.mem_handle, + (void *)vaddr_ptr, cdm_cmd->cmd_flex[i].offset, + cdm_cmd->cmd_flex[i].len, len); + rc = cam_cdm_util_cmd_buf_write( + &client->changebase_addr, + ((uint32_t *)vaddr_ptr + + ((cdm_cmd->cmd_flex[i].offset)/4)), + cdm_cmd->cmd_flex[i].len, client->data.base_array, + client->data.base_array_cnt, core->bl_tag); + if (rc) { + CAM_ERR(CAM_CDM, + "write failed for cnt=%d:%d len %u", + i, req->data->cmd_arrary_count, + cdm_cmd->cmd_flex[i].len); + goto put_cpu_buf; + } + } else { + CAM_ERR(CAM_CDM, + "Sanity check failed for hdl=%x len=%zu:%d", + cdm_cmd->cmd_flex[i].bl_addr.mem_handle, len, + cdm_cmd->cmd_flex[i].offset); + CAM_ERR(CAM_CDM, + "Sanity check failed for cmd_count=%d cnt=%d", + i, req->data->cmd_arrary_count); + rc = -EINVAL; + goto err; + } + if (!rc) { + struct cam_cdm_work_payload *payload; + + CAM_DBG(CAM_CDM, + "write BL success for cnt=%d with tag=%d", + i, core->bl_tag); + if (req->data->flag && (i == req->data->cmd_arrary_count)) { + struct cam_cdm_bl_cb_request_entry *node; + + node = CAM_MEM_ZALLOC(sizeof( + struct cam_cdm_bl_cb_request_entry), + GFP_KERNEL); + if (!node) { + rc = -ENOMEM; + goto err; + } + node->request_type = CAM_HW_CDM_BL_CB_CLIENT; + node->client_hdl = req->handle; + node->cookie = req->data->cookie; + node->bl_tag = core->bl_tag; + node->userdata = req->data->userdata; + mutex_lock(&cdm_hw->hw_mutex); + list_add_tail(&node->entry, + &core->bl_request_list); + mutex_unlock(&cdm_hw->hw_mutex); + + payload = CAM_MEM_ZALLOC(sizeof( + struct cam_cdm_work_payload), + GFP_ATOMIC); + if (payload) { + payload->irq_status = 0x2; + payload->irq_data = core->bl_tag; + payload->hw = cdm_hw; + INIT_WORK((struct work_struct *) + &payload->work, + cam_virtual_cdm_work); + payload->workq_scheduled_ts = + ktime_get(); + queue_work(core->work_queue, + &payload->work); + } + } + core->bl_tag++; + CAM_DBG(CAM_CDM, + "Now commit the BL nothing for virtual"); + if (!rc && (core->bl_tag == 63)) + core->bl_tag = 0; + + if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i].bl_addr.mem_handle); + } + + } + mutex_unlock(&client->lock); + return rc; + +put_cpu_buf: + if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i].bl_addr.mem_handle); + +err: + mutex_unlock(&client->lock); + return rc; + +} + +int cam_virtual_cdm_probe(struct platform_device *pdev) +{ + struct cam_hw_info *cdm_hw = NULL; + struct cam_hw_intf *cdm_hw_intf = NULL; + struct cam_cdm *cdm_core = NULL; + struct cam_cdm_private_dt_data *soc_private = NULL; + int rc; + struct cam_cpas_register_params cpas_parms; + + cdm_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!cdm_hw_intf) + return -ENOMEM; + + cdm_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!cdm_hw) { + CAM_MEM_FREE(cdm_hw_intf); + return -ENOMEM; + } + + cdm_hw->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_cdm), GFP_KERNEL); + if (!cdm_hw->core_info) { + CAM_MEM_FREE(cdm_hw); + CAM_MEM_FREE(cdm_hw_intf); + return -ENOMEM; + } + cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cdm_hw->soc_info.pdev = pdev; + cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM; + cdm_hw->soc_info.soc_private = CAM_MEM_ZALLOC( + sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL); + if (!cdm_hw->soc_info.soc_private) { + rc = -ENOMEM; + goto soc_load_failed; + } + + rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private); + if (rc) { + CAM_ERR(CAM_CDM, "Failed to load CDM dt private data"); + CAM_MEM_FREE(cdm_hw->soc_info.soc_private); + cdm_hw->soc_info.soc_private = NULL; + goto soc_load_failed; + } + + cdm_core = (struct cam_cdm *)cdm_hw->core_info; + soc_private = (struct cam_cdm_private_dt_data *) + cdm_hw->soc_info.soc_private; + if (soc_private->dt_cdm_shared == true) + cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM; + else + cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM; + + cdm_core->bl_tag = 0; + INIT_LIST_HEAD(&cdm_core->bl_request_list); + init_completion(&cdm_core->reset_complete); + cdm_hw_intf->hw_priv = cdm_hw; + cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps; + cdm_hw_intf->hw_ops.init = NULL; + cdm_hw_intf->hw_ops.deinit = NULL; + cdm_hw_intf->hw_ops.start = cam_cdm_stream_start; + cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop; + cdm_hw_intf->hw_ops.read = NULL; + cdm_hw_intf->hw_ops.write = NULL; + cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd; + + CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type, + cdm_hw_intf->hw_idx); + + platform_set_drvdata(pdev, cdm_hw_intf); + + cdm_hw->open_count = 0; + cdm_core->iommu_hdl.non_secure = -1; + cdm_core->iommu_hdl.secure = -1; + mutex_init(&cdm_hw->hw_mutex); + spin_lock_init(&cdm_hw->hw_lock); + init_completion(&cdm_hw->hw_complete); + mutex_lock(&cdm_hw->hw_mutex); + cdm_core->id = CAM_CDM_VIRTUAL; + memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME, + sizeof(CAM_CDM_VIRTUAL_NAME)); + cdm_core->work_queue = alloc_workqueue(cdm_core->name, + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, + CAM_CDM_INFLIGHT_WORKS); + cdm_core->ops = NULL; + + cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb; + cpas_parms.cell_index = cdm_hw->soc_info.index; + cpas_parms.dev = &pdev->dev; + cpas_parms.userdata = cdm_hw_intf; + strscpy(cpas_parms.identifier, "cam-cdm-intf", + CAM_HW_IDENTIFIER_LENGTH); + rc = cam_cpas_register_client(&cpas_parms); + if (rc) { + CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed"); + goto cpas_registration_failed; + } + CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d", + cpas_parms.client_handle); + cdm_core->cpas_handle = cpas_parms.client_handle; + + CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx); + + rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf, + soc_private, CAM_VIRTUAL_CDM, &cdm_core->index); + if (rc) { + CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed"); + goto intf_registration_failed; + } + CAM_DBG(CAM_CDM, "CDM%d registered to intf successful", + cdm_hw_intf->hw_idx); + mutex_unlock(&cdm_hw->hw_mutex); + + return 0; +intf_registration_failed: + cam_cpas_unregister_client(cdm_core->cpas_handle); +cpas_registration_failed: + CAM_MEM_FREE(cdm_hw->soc_info.soc_private); + flush_workqueue(cdm_core->work_queue); + destroy_workqueue(cdm_core->work_queue); + mutex_unlock(&cdm_hw->hw_mutex); + mutex_destroy(&cdm_hw->hw_mutex); +soc_load_failed: + CAM_MEM_FREE(cdm_hw->core_info); + CAM_MEM_FREE(cdm_hw); + CAM_MEM_FREE(cdm_hw_intf); + return rc; +} + +int cam_virtual_cdm_remove(struct platform_device *pdev) +{ + struct cam_hw_info *cdm_hw = NULL; + struct cam_hw_intf *cdm_hw_intf = NULL; + struct cam_cdm *cdm_core = NULL; + int rc = -EBUSY; + + cdm_hw_intf = platform_get_drvdata(pdev); + if (!cdm_hw_intf) { + CAM_ERR(CAM_CDM, "Failed to get dev private data"); + return rc; + } + + cdm_hw = cdm_hw_intf->hw_priv; + if (!cdm_hw) { + CAM_ERR(CAM_CDM, + "Failed to get virtual private data for type=%d idx=%d", + cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx); + return rc; + } + + cdm_core = cdm_hw->core_info; + if (!cdm_core) { + CAM_ERR(CAM_CDM, + "Failed to get virtual core data for type=%d idx=%d", + cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx); + return rc; + } + + rc = cam_cpas_unregister_client(cdm_core->cpas_handle); + if (rc) { + CAM_ERR(CAM_CDM, "CPAS unregister failed"); + return rc; + } + + rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf, + cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM, + cdm_core->index); + if (rc) { + CAM_ERR(CAM_CDM, + "Virtual CDM Interface de-registration failed"); + return rc; + } + + flush_workqueue(cdm_core->work_queue); + destroy_workqueue(cdm_core->work_queue); + mutex_destroy(&cdm_hw->hw_mutex); + CAM_MEM_FREE(cdm_hw->soc_info.soc_private); + CAM_MEM_FREE(cdm_hw->core_info); + CAM_MEM_FREE(cdm_hw); + CAM_MEM_FREE(cdm_hw_intf); + rc = 0; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.c b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.c new file mode 100644 index 0000000000..e795f4b849 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.c @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_context.h" +#include "cam_debug_util.h" +#include "cam_node.h" +#include "cam_context_utils.h" + +static int cam_context_handle_hw_event(void *context, uint32_t evt_id, + void *evt_data) +{ + int rc = 0; + struct cam_context *ctx = (struct cam_context *)context; + + if (!ctx || !ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (ctx->state_machine[ctx->state].irq_ops) + rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id, + evt_data); + else + CAM_DBG(CAM_CORE, + "No function to handle event %d in dev %d, state %d", + evt_id, ctx->dev_hdl, ctx->state); + return rc; +} + +int cam_context_shutdown(struct cam_context *ctx) +{ + int rc = 0; + struct cam_release_dev_cmd cmd; + + if (ctx->state > CAM_CTX_AVAILABLE && ctx->state < CAM_CTX_STATE_MAX) { + cmd.session_handle = ctx->session_hdl; + cmd.dev_handle = ctx->dev_hdl; + rc = cam_context_handle_release_dev(ctx, &cmd); + if (rc) + CAM_ERR(CAM_CORE, + "context release failed for dev_name %s", + ctx->dev_name); + else + cam_context_putref(ctx); + } else { + CAM_WARN(CAM_CORE, + "dev %s context id %u state %d invalid to release hdl", + ctx->dev_name, ctx->ctx_id, ctx->state); + rc = -EINVAL; + } + + if (ctx->dev_hdl != -1) { + rc = cam_destroy_device_hdl(ctx->dev_hdl); + if (rc) + CAM_ERR(CAM_CORE, + "destroy device hdl failed for node %s", + ctx->dev_name); + else + ctx->dev_hdl = -1; + } + + return rc; +} + +int cam_context_handle_crm_get_dev_info(struct cam_context *ctx, + struct cam_req_mgr_device_info *info) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!info) { + CAM_ERR(CAM_CORE, "Invalid get device info payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) { + rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info( + ctx, info); + } else { + CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_link(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *link) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!link) { + CAM_ERR(CAM_CORE, "Invalid link payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.link) { + rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link); + } else { + CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_unlink(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!unlink) { + CAM_ERR(CAM_CORE, "Invalid unlink payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.unlink) { + rc = ctx->state_machine[ctx->state].crm_ops.unlink( + ctx, unlink); + } else { + CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d", + ctx->dev_hdl, ctx->dev_name, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_apply_req(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!apply) { + CAM_ERR(CAM_CORE, "Invalid apply request payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.apply_req) { + rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx, + apply); + } else { + CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_notify_frame_skip( + struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!apply) { + CAM_ERR(CAM_CORE, "Invalid apply request payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.notify_frame_skip) + rc = ctx->state_machine[ctx->state].crm_ops.notify_frame_skip( + ctx, apply); + else + CAM_DBG(CAM_CORE, + "No crm notify_frame_skip in dev %d, state %d", + ctx->dev_hdl, ctx->state); + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_flush_req(struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.flush_req) { + rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx, + flush); + } else { + CAM_INFO(CAM_CORE, "No crm flush req in dev %d, state %d, name %s", + ctx->dev_hdl, ctx->state, ctx->dev_name); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_process_evt(struct cam_context *ctx, + struct cam_req_mgr_link_evt_data *process_evt) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].crm_ops.process_evt) { + rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx, + process_evt); + } else { + /* handling of this message is optional */ + CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_crm_dump_req(struct cam_context *ctx, + struct cam_req_mgr_dump_info *dump) +{ + int rc = 0; + + if (!ctx) { + CAM_ERR(CAM_CORE, "Invalid Context"); + return -EINVAL; + } + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context %s ctx_id %d is not ready", + ctx->dev_name, ctx->ctx_id); + return -EINVAL; + } + mutex_lock(&ctx->ctx_mutex); + + if (ctx->state_machine[ctx->state].crm_ops.dump_req) + rc = ctx->state_machine[ctx->state].crm_ops.dump_req(ctx, + dump); + else + CAM_ERR(CAM_CORE, "No crm dump req for %s dev %d, state %d", + ctx->dev_name, ctx->dev_hdl, ctx->state); + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_mini_dump_from_hw(struct cam_context *ctx, + void *args) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context [id %d name:%s] is not ready", ctx->ctx_id, + ctx->dev_name); + return -EINVAL; + } + + if ((ctx->state >= CAM_CTX_AVAILABLE) && (ctx->state < CAM_CTX_STATE_MAX)) { + if (ctx->state_machine[ctx->state].mini_dump_ops) + rc = ctx->state_machine[ctx->state].mini_dump_ops(ctx, args); + else + CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + return rc; +} + +int cam_context_dump_pf_info(void *data, void *args) +{ + struct cam_context *ctx = data; + struct cam_hw_dump_pf_args *pf_args = args; + int rc = 0; + + if (!pf_args) { + CAM_ERR(CAM_CORE, "PF args is NULL"); + return -EINVAL; + } + + if (!ctx) { + CAM_ERR(CAM_CORE, "Context is NULL"); + if (pf_args->pf_context_info.force_send_pf_evt) { + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_CORE, + "Failed to notify PF event to userspace rc: %d", rc); + } + return -EINVAL; + } + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if ((ctx->state > CAM_CTX_AVAILABLE) && + (ctx->state < CAM_CTX_STATE_MAX)) { + if (ctx->state_machine[ctx->state].pagefault_ops) { + rc = ctx->state_machine[ctx->state].pagefault_ops( + ctx, pf_args); + } else { + CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_message(struct cam_context *ctx, + uint32_t msg_type, void *data) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if ((ctx->state > CAM_CTX_AVAILABLE) && + (ctx->state < CAM_CTX_STATE_MAX)) { + if (ctx->state_machine[ctx->state].msg_cb_ops) { + rc = ctx->state_machine[ctx->state].msg_cb_ops( + ctx, msg_type, data); + } else { + CAM_WARN(CAM_CORE, + "No message handler for ctx %d, state %d msg_type :%d", + ctx->dev_hdl, ctx->state, msg_type); + } + } + + return rc; +} + +int cam_context_handle_acquire_dev(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd) +{ + int rc; + int i; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid acquire device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) { + rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev( + ctx, cmd); + } else { + CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d", + cmd->dev_handle, ctx->state); + rc = -EPROTO; + } + + INIT_LIST_HEAD(&ctx->active_req_list); + INIT_LIST_HEAD(&ctx->wait_req_list); + INIT_LIST_HEAD(&ctx->pending_req_list); + INIT_LIST_HEAD(&ctx->free_req_list); + + for (i = 0; i < ctx->req_size; i++) { + INIT_LIST_HEAD(&ctx->req_list[i].list); + list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list); + } + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_acquire_hw(struct cam_context *ctx, + void *args) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!args) { + CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) { + rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw( + ctx, args); + } else { + CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d", + ctx->dev_name, ctx->state); + rc = -EPROTO; + } + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_release_dev(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid release device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) { + rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev( + ctx, cmd); + } else { + CAM_ERR(CAM_CORE, "No release device in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_release_hw(struct cam_context *ctx, + void *args) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!args) { + CAM_ERR(CAM_CORE, "Invalid release HW command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) { + rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw( + ctx, args); + } else { + CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d", + ctx->dev_name, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_flush_dev(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid flush device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) { + rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev( + ctx, cmd); + } else { + CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_config_dev(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid config device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) { + rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev( + ctx, cmd); + } else { + CAM_INFO(CAM_CORE, "No config device in dev %d, state %d", + ctx->dev_hdl, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_start_dev(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + + if (!ctx || !ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid start device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.start_dev) + rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev( + ctx, cmd); + else + /* start device can be optional for some driver */ + CAM_DBG(CAM_CORE, "No start device in dev %d, state %d", + ctx->dev_hdl, ctx->state); + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_stop_dev(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + + if (!ctx || !ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, "Invalid stop device command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) + rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev( + ctx, cmd); + else + /* stop device can be optional for some driver */ + CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d", + ctx->dev_hdl, ctx->dev_name, ctx->state); + + ctx->last_flush_req = 0; + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_info_dump(void *context, + enum cam_context_dump_id id) +{ + int rc = 0; + struct cam_context *ctx = (struct cam_context *)context; + + if (!ctx || !ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].dumpinfo_ops) + rc = ctx->state_machine[ctx->state].dumpinfo_ops(ctx, + id); + mutex_unlock(&ctx->ctx_mutex); + + if (rc) + CAM_WARN(CAM_CORE, + "Dump for id %u failed on ctx_id %u name %s state %d", + id, ctx->ctx_id, ctx->dev_name, ctx->state); + + return rc; +} + +int cam_context_handle_dump_dev(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc = 0; + + if (!ctx) { + CAM_ERR(CAM_CORE, "Invalid Context"); + return -EINVAL; + } + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context %s ctx_id %d is not ready", + ctx->dev_name, ctx->ctx_id); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_CORE, + "Context %s ctx_id %d Invalid dump command payload", + ctx->dev_name, ctx->ctx_id); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + CAM_DBG(CAM_CORE, "dump device in dev %d, name %s state %d", + ctx->dev_hdl, ctx->dev_name, ctx->state); + if (ctx->state_machine[ctx->state].ioctl_ops.dump_dev) + rc = ctx->state_machine[ctx->state].ioctl_ops.dump_dev( + ctx, cmd); + else + CAM_WARN(CAM_CORE, "No dump device in dev %d, name %s state %d", + ctx->dev_hdl, ctx->dev_name, ctx->state); + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + +int cam_context_handle_hw_recovery(void *priv, void *data) +{ + struct cam_context *ctx = priv; + int rc = 0; + + if (!ctx) { + CAM_ERR(CAM_CORE, "null context"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state != CAM_CTX_ACTIVATED) { + CAM_DBG(CAM_CORE, "skipping recovery for ctx:%d dev:%s in state:%d", ctx->ctx_id, + ctx->dev_name, ctx->state); + goto end; + } + CAM_DBG(CAM_CORE, "try hw recovery for ctx:%d dev:%s", ctx->ctx_id, ctx->dev_name); + if (ctx->state_machine[ctx->state].recovery_ops) + rc = ctx->state_machine[ctx->state].recovery_ops(priv, data); + else + CAM_WARN(CAM_CORE, "no recovery op in state:%d for ctx:%d dev:%s", + ctx->state, ctx->ctx_id, ctx->dev_name); +end: + mutex_unlock(&ctx->ctx_mutex); + return rc; +} + +int cam_context_init(struct cam_context *ctx, + const char *dev_name, + uint64_t dev_id, + uint32_t ctx_id, + struct cam_req_mgr_kmd_ops *crm_node_intf, + struct cam_hw_mgr_intf *hw_mgr_intf, + struct cam_ctx_request *req_list, + uint32_t req_size, int img_iommu_hdl) +{ + int i; + + /* crm_node_intf is optinal */ + if (!ctx || !hw_mgr_intf || !req_list) { + CAM_ERR(CAM_CORE, "Invalid input parameters"); + return -EINVAL; + } + + memset(ctx, 0, sizeof(*ctx)); + ctx->dev_hdl = -1; + ctx->link_hdl = -1; + ctx->session_hdl = -1; + INIT_LIST_HEAD(&ctx->list); + mutex_init(&ctx->ctx_mutex); + mutex_init(&ctx->sync_mutex); + spin_lock_init(&ctx->lock); + + strscpy(ctx->dev_name, dev_name, CAM_CTX_DEV_NAME_MAX_LENGTH); + ctx->dev_id = dev_id; + ctx->ctx_id = ctx_id; + ctx->last_flush_req = 0; + ctx->ctx_crm_intf = NULL; + ctx->crm_ctx_intf = crm_node_intf; + ctx->hw_mgr_intf = hw_mgr_intf; + ctx->irq_cb_intf = cam_context_handle_hw_event; + + INIT_LIST_HEAD(&ctx->active_req_list); + INIT_LIST_HEAD(&ctx->wait_req_list); + INIT_LIST_HEAD(&ctx->pending_req_list); + INIT_LIST_HEAD(&ctx->free_req_list); + ctx->req_list = req_list; + ctx->req_size = req_size; + for (i = 0; i < req_size; i++) { + INIT_LIST_HEAD(&ctx->req_list[i].list); + list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list); + ctx->req_list[i].ctx = ctx; + ctx->req_list[i].index = i; + } + ctx->state = CAM_CTX_AVAILABLE; + ctx->state_machine = NULL; + ctx->ctx_priv = NULL; + ctx->img_iommu_hdl = img_iommu_hdl; + + return 0; +} + +int cam_context_deinit(struct cam_context *ctx) +{ + if (!ctx) + return -EINVAL; + + /** + * This is called from platform device remove. + * Everyting should be released at this moment. + * so we just free the memory for the context + */ + if (ctx->state != CAM_CTX_AVAILABLE) + CAM_ERR(CAM_CORE, "Device did not shutdown cleanly"); + + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} + +void cam_context_putref(struct cam_context *ctx) +{ + if (kref_read(&ctx->refcount)) + kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list); + else + WARN(1, "ctx %s %d state %d devhdl %X\n", ctx->dev_name, + ctx->ctx_id, ctx->state, ctx->dev_hdl); + + CAM_DBG(CAM_CORE, + "ctx device hdl %ld, ref count %d, dev_name %s", + ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)), + ctx->dev_name); +} + +void cam_context_getref(struct cam_context *ctx) +{ + if (kref_get_unless_zero(&ctx->refcount) == 0) { + /* should never happen */ + WARN(1, "%s fail\n", __func__); + } + CAM_DBG(CAM_CORE, + "ctx device hdl %ld, ref count %d, dev_name %s", + ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)), + ctx->dev_name); +} + +int cam_context_add_evt_inject(struct cam_context *ctx, void *evt_args) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if ((ctx->state > CAM_CTX_AVAILABLE) && + (ctx->state < CAM_CTX_STATE_MAX)) { + if (ctx->state_machine[ctx->state].evt_inject_ops) { + rc = ctx->state_machine[ctx->state].evt_inject_ops( + ctx, evt_args); + } else { + CAM_WARN(CAM_CORE, "No evt inject ops in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + } else { + rc = -EINVAL; + } + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.h new file mode 100644 index 0000000000..35c7cafd0f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context.h @@ -0,0 +1,670 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CONTEXT_H_ +#define _CAM_CONTEXT_H_ + +#include +#include +#include +#include +#include "cam_req_mgr_interface.h" +#include "cam_hw_mgr_intf.h" +#include "cam_smmu_api.h" +#include "cam_common_util.h" + +/* Forward declarations */ +struct cam_context; + +/* max device name string length*/ +#define CAM_CTX_DEV_NAME_MAX_LENGTH 20 + +/* max request number */ +#define CAM_CTX_REQ_MAX 20 +#define CAM_CTX_ICP_REQ_MAX 40 +#define CAM_CTX_CFG_MAX 20 +#define CAM_CTX_RES_MAX 20 + +/* max tag dump header string length*/ +#define CAM_CTXT_DUMP_TAG_MAX_LEN 128 + +/* Number of words to be dumped for context*/ +#define CAM_CTXT_DUMP_NUM_WORDS 10 + +/** + * enum cam_ctx_state - context top level states + * + */ +enum cam_context_state { + CAM_CTX_UNINIT = 0, + CAM_CTX_AVAILABLE = 1, + CAM_CTX_ACQUIRED = 2, + CAM_CTX_READY = 3, + CAM_CTX_FLUSHED = 4, + CAM_CTX_ACTIVATED = 5, + CAM_CTX_STATE_MAX = 6, +}; + +/** + * struct cam_ctx_request - Common request structure for the context + * + * @list: Link list entry + * @status: Request status + * @request_id: Request id + * @req_priv: Derived request object + * @hw_update_entries: Hardware update entries + * @num_hw_update_entries: Number of hardware update entries + * @in_map_entries: Entries for in fences + * @num_in_map_entries: Number of in map entries + * @out_map_entries: Entries for out fences + * @num_out_map_entries: Number of out map entries + * @num_in_acked: Number of in fence acked + * @num_out_acked: Number of out fence acked + * @index: Index of request in the list + * @flushed: Request is flushed + * @ctx: The context to which this request belongs + * @buf_tracker: List of buffers we want to keep ref counts on + * used by the HW block for a particular req + * @pf_data page fault debug data + * @packet pointer to packet + * + */ +struct cam_ctx_request { + struct list_head list; + uint32_t status; + uint64_t request_id; + void *req_priv; + struct cam_hw_update_entry *hw_update_entries; + uint32_t num_hw_update_entries; + struct cam_hw_fence_map_entry *in_map_entries; + uint32_t num_in_map_entries; + struct cam_hw_fence_map_entry *out_map_entries; + uint32_t num_out_map_entries; + atomic_t num_in_acked; + uint32_t num_out_acked; + uint32_t index; + int flushed; + struct cam_context *ctx; + struct list_head buf_tracker; + struct cam_hw_mgr_pf_request_info pf_data; + void *packet; +}; + +/** + * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls + * + * @acquire_dev: Function pointer for acquire device + * @release_dev: Function pointer for release device + * @config_dev: Function pointer for config device + * @start_dev: Function pointer for start device + * @stop_dev: Function pointer for stop device + * @flush_dev: Function pointer for flush device + * @acquire_hw: Function pointer for acquire hw + * @release_hw: Function pointer for release hw + * @dump_dev: Function pointer for dump dev + * + */ +struct cam_ctx_ioctl_ops { + int (*acquire_dev)(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd); + int (*release_dev)(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd); + int (*config_dev)(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd); + int (*start_dev)(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); + int (*stop_dev)(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); + int (*flush_dev)(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd); + int (*acquire_hw)(struct cam_context *ctx, void *args); + int (*release_hw)(struct cam_context *ctx, void *args); + int (*dump_dev)(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd); +}; + +/** + * struct cam_ctx_crm_ops - Function table for handling CRM to context calls + * + * @get_dev_info: Get device informaiton + * @link: Link the context + * @unlink: Unlink the context + * @apply_req: Apply setting for the context + * @notify_frame_skip: Notify device that a frame is skipped + * @flush_req: Flush request to remove request ids + * @process_evt: Handle event notification from CRM.(optional) + * @dump_req: Dump information for the issue request + * + */ +struct cam_ctx_crm_ops { + int (*get_dev_info)(struct cam_context *ctx, + struct cam_req_mgr_device_info *device_info); + int (*link)(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *link); + int (*unlink)(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink); + int (*apply_req)(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply); + int (*notify_frame_skip)(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply); + int (*flush_req)(struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush); + int (*process_evt)(struct cam_context *ctx, + struct cam_req_mgr_link_evt_data *evt_data); + int (*dump_req)(struct cam_context *ctx, + struct cam_req_mgr_dump_info *dump); +}; + + +/** + * struct cam_ctx_ops - Collection of the interface funciton tables + * + * @ioctl_ops: Ioctl funciton table + * @crm_ops: CRM to context interface function table + * @irq_ops: Hardware event handle function + * @pagefault_ops: Function to be called on page fault + * @dumpinfo_ops: Function to be invoked for dumping any + * context info + * @recovery_ops: Function to be invoked to try hardware recovery + * @mini_dump_ops: Function for mini dump + * @err_inject_ops: Function for error injection + * @msg_cb_ops: Function to be called on any message from + * other subdev notifications + * + */ +struct cam_ctx_ops { + struct cam_ctx_ioctl_ops ioctl_ops; + struct cam_ctx_crm_ops crm_ops; + cam_hw_event_cb_func irq_ops; + cam_hw_pagefault_cb_func pagefault_ops; + cam_ctx_info_dump_cb_func dumpinfo_ops; + cam_ctx_recovery_cb_func recovery_ops; + cam_ctx_mini_dump_cb_func mini_dump_ops; + cam_ctx_err_inject_cb_func evt_inject_ops; + cam_ctx_message_cb_func msg_cb_ops; +}; + + +/** + * struct cam_context - camera context object for the subdevice node + * + * @dev_name: String giving name of device associated + * @dev_id: ID of device associated + * @ctx_id: ID for this context + * @list: Link list entry + * @sessoin_hdl: Session handle + * @dev_hdl: Device handle + * @link_hdl: Link handle + * @ctx_mutex: Mutex for ioctl calls + * @lock: Spin lock + * @active_req_list: Requests pending for done event + * @pending_req_list: Requests pending for reg upd event + * @wait_req_list: Requests waiting for apply + * @free_req_list: Requests that are free + * @req_list: Reference to the request storage + * @req_size: Size of the request storage + * @hw_mgr_intf: Context to HW interface + * @ctx_crm_intf: Context to CRM interface + * @crm_ctx_intf: CRM to context interface + * @irq_cb_intf: HW to context callback interface + * @state: Current state for top level state machine + * @state_machine: Top level state machine + * @ctx_priv: Private context pointer + * @ctxt_to_hw_map: Context to hardware mapping pointer + * @hw_mgr_ctx_id: Hw Mgr context id returned from hw mgr + * @ctx_id_string: Context id string constructed with dev type, + * ctx id, hw mgr ctx id, hw id + * @refcount: Context object refcount + * @node: The main node to which this context belongs + * @sync_mutex: mutex to sync with sync cb thread + * @last_flush_req: Last request to flush + * @max_hw_update_entries: Max hw update entries + * @max_in_map_entries: Max in map entries + * @max_out_map_entries: Max out in map entries + * @hw_updater_entry: Hw update entry + * @in_map_entries: In map update entry + * @out_map_entries: Out map entry + * @mini dump cb: Mini dump cb + * @img_iommu_hdl: Image IOMMU handle + * @cdm_done_ts: CDM callback done timestamp + */ +struct cam_context { + char dev_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + uint64_t dev_id; + uint32_t ctx_id; + struct list_head list; + int32_t session_hdl; + int32_t dev_hdl; + int32_t link_hdl; + + struct mutex ctx_mutex; + spinlock_t lock; + + struct list_head active_req_list; + struct list_head pending_req_list; + struct list_head wait_req_list; + struct list_head free_req_list; + struct cam_ctx_request *req_list; + uint32_t req_size; + + struct cam_hw_mgr_intf *hw_mgr_intf; + struct cam_req_mgr_crm_cb *ctx_crm_intf; + struct cam_req_mgr_kmd_ops *crm_ctx_intf; + cam_hw_event_cb_func irq_cb_intf; + + enum cam_context_state state; + struct cam_ctx_ops *state_machine; + + void *ctx_priv; + void *ctxt_to_hw_map; + uint32_t hw_mgr_ctx_id; + char ctx_id_string[128]; + + struct kref refcount; + void *node; + struct mutex sync_mutex; + uint32_t last_flush_req; + uint32_t max_hw_update_entries; + uint32_t max_in_map_entries; + uint32_t max_out_map_entries; + struct cam_hw_update_entry **hw_update_entry; + struct cam_hw_fence_map_entry **in_map_entries; + struct cam_hw_fence_map_entry **out_map_entries; + cam_ctx_mini_dump_cb_func mini_dump_cb; + int img_iommu_hdl; + struct timespec64 cdm_done_ts; +}; + +/** + * struct cam_context_stream_dump - camera context stream information + * + * @hw_mgr_ctx_id: Hw Mgr context id returned from hw mgr + * @dev_id: ID of device associated + * @dev_hdl: Device handle + * @link_hdl: Link handle + * @sessoin_hdl: Session handle + * @refcount: Context object refcount + * @last_flush_req: Last request to flush + * @state: Current state for top level state machine + */ +struct cam_context_stream_dump { + uint32_t hw_mgr_ctx_id; + uint32_t dev_id; + uint32_t dev_hdl; + uint32_t link_hdl; + uint32_t session_hdl; + uint32_t refcount; + uint32_t last_flush_req; + enum cam_context_state state; +}; + +/** + * struct cam_context_each_req_info - camera each request information + * + * @request_id: request id + */ +struct cam_context_each_req_info { + uint64_t request_id; +}; + +/** + * struct cam_context_dump_header - Function for context dump header + * + * @tag : Tag for context dump header + * @size : Size of data + * @word_size : Word size of data + */ +struct cam_context_dump_header { + uint8_t tag[CAM_CTXT_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * cam_context_shutdown() + * + * @brief: Calls while device close or shutdown + * + * @ctx: Object pointer for cam_context + * + */ +int cam_context_shutdown(struct cam_context *ctx); + +/** + * cam_context_handle_crm_get_dev_info() + * + * @brief: Handle get device information command + * + * @ctx: Object pointer for cam_context + * @info: Device information returned + * + */ +int cam_context_handle_crm_get_dev_info(struct cam_context *ctx, + struct cam_req_mgr_device_info *info); + +/** + * cam_context_handle_crm_link() + * + * @brief: Handle link command + * + * @ctx: Object pointer for cam_context + * @link: Link command payload + * + */ +int cam_context_handle_crm_link(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *link); + +/** + * cam_context_handle_crm_unlink() + * + * @brief: Handle unlink command + * + * @ctx: Object pointer for cam_context + * @unlink: Unlink command payload + * + */ +int cam_context_handle_crm_unlink(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink); + +/** + * cam_context_handle_crm_apply_req() + * + * @brief: Handle apply request command + * + * @ctx: Object pointer for cam_context + * @apply: Apply request command payload + * + */ +int cam_context_handle_crm_apply_req(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply); + +/** + * cam_context_handle_crm_notify_frame_skip() + * + * @brief: Handle notify frame skip command + * + * @ctx: Object pointer for cam_context + * @apply: Notify frame skip command payload + * + */ +int cam_context_handle_crm_notify_frame_skip( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply); + +/** + * cam_context_handle_crm_flush_req() + * + * @brief: Handle flush request command + * + * @ctx: Object pointer for cam_context + * @apply: Flush request command payload + * + */ +int cam_context_handle_crm_flush_req(struct cam_context *ctx, + struct cam_req_mgr_flush_request *apply); + +/** + * cam_context_handle_crm_process_evt() + * + * @brief: Handle process event command + * + * @ctx: Object pointer for cam_context + * @process_evt: process event command payload + * + */ +int cam_context_handle_crm_process_evt(struct cam_context *ctx, + struct cam_req_mgr_link_evt_data *process_evt); + +/** + * cam_context_handle_crm_dump_req() + * + * @brief: Handle CRM dump request + * + * @ctx: Object pointer for cam_context + * @dump: Dump request command payload + * + */ +int cam_context_handle_crm_dump_req(struct cam_context *ctx, + struct cam_req_mgr_dump_info *dump); + +/** + * cam_context_mini_dump_from_hw() + * + * @brief: Handle mini dump request command + * + * @ctx: Object pointer for cam_context + * @args: Args to be passed + * + */ +int cam_context_mini_dump_from_hw(struct cam_context *ctx, + void *args); + +/** + * cam_context_dump_pf_info() + * + * @brief: Handle dump active request request command + * + * @ctx: Object pointer for cam_context + * @pf_args: pf args to dump pf info to hw + * + */ +int cam_context_dump_pf_info(void *ctx, + void *pf_args); + +/** + * cam_context_handle_message() + * + * @brief: Handle message callback command + * + * @ctx: Object pointer for cam_context + * @msg_type: message type sent from other subdev + * @data: data from other subdev + * + */ +int cam_context_handle_message(struct cam_context *ctx, + uint32_t msg_type, void *data); + +/** + * cam_context_handle_acquire_dev() + * + * @brief: Handle acquire device command + * + * @ctx: Object pointer for cam_context + * @cmd: Acquire device command payload + * + */ +int cam_context_handle_acquire_dev(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd); + +/** + * cam_context_handle_acquire_hw() + * + * @brief: Handle acquire HW command + * + * @ctx: Object pointer for cam_context + * @cmd: Acquire HW command payload + * + */ +int cam_context_handle_acquire_hw(struct cam_context *ctx, + void *cmd); + +/** + * cam_context_handle_release_dev() + * + * @brief: Handle release device command + * + * @ctx: Object pointer for cam_context + * @cmd: Release device command payload + * + */ +int cam_context_handle_release_dev(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd); + +/** + * cam_context_handle_release_hw() + * + * @brief: Handle release HW command + * + * @ctx: Object pointer for cam_context + * @cmd: Release HW command payload + * + */ +int cam_context_handle_release_hw(struct cam_context *ctx, + void *cmd); + +/** + * cam_context_handle_config_dev() + * + * @brief: Handle config device command + * + * @ctx: Object pointer for cam_context + * @cmd: Config device command payload + * + */ +int cam_context_handle_config_dev(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd); + +/** + * cam_context_handle_flush_dev() + * + * @brief: Handle flush device command + * + * @ctx: Object pointer for cam_context + * @cmd: Flush device command payload + * + */ +int cam_context_handle_flush_dev(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd); + +/** + * cam_context_handle_start_dev() + * + * @brief: Handle start device command + * + * @ctx: Object pointer for cam_context + * @cmd: Start device command payload + * + */ +int cam_context_handle_start_dev(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); + +/** + * cam_context_handle_stop_dev() + * + * @brief: Handle stop device command + * + * @ctx: Object pointer for cam_context + * @cmd: Stop device command payload + * + */ +int cam_context_handle_stop_dev(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); + +/** + * cam_context_handle_dump_dev() + * + * @brief: Handle dump device command + * + * @ctx: Object pointer for cam_context + * @cmd: Dump device command payload + * + */ +int cam_context_handle_dump_dev(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd); + +/** + * cam_context_handle_info_dump() + * + * @brief: Handle any dump info for the context + * + * @ctx: Object pointer for cam_context + * @id: To indicate which info pertaining + * to that ctx needs to be dumped + * + */ +int cam_context_handle_info_dump(void *context, + enum cam_context_dump_id id); + +/** + * cam_context_handle_hw_recovery() + * + * @brief: Handle hardware recovery. This function can be scheduled in + * cam_req_mgr_workq. + * + * @context: Object pointer for cam_context + * @data: Recovery data that is to be passsed to hw mgr + * + */ +int cam_context_handle_hw_recovery(void *context, void *data); + +/** + * cam_context_deinit() + * + * @brief: Camera context deinitialize function + * + * @ctx: Object pointer for cam_context + * + */ +int cam_context_deinit(struct cam_context *ctx); + +/** + * cam_context_init() + * + * @brief: Camera context initialize function + * + * @ctx: Object pointer for cam_context + * @dev_name: String giving name of device associated + * @dev_id: ID of the device associated + * @ctx_id: ID for this context + * @crm_node_intf: Function table for crm to context interface + * @hw_mgr_intf: Function table for context to hw interface + * @req_list: Requests storage + * @req_size: Size of the request storage + * @img_iommu_hdl: IOMMU Handle for image buffers + * + */ +int cam_context_init(struct cam_context *ctx, + const char *dev_name, + uint64_t dev_id, + uint32_t ctx_id, + struct cam_req_mgr_kmd_ops *crm_node_intf, + struct cam_hw_mgr_intf *hw_mgr_intf, + struct cam_ctx_request *req_list, + uint32_t req_size, int img_iommu_hdl); + +/** + * cam_context_putref() + * + * @brief: Put back context reference. + * + * @ctx: Context for which ref is returned + * + */ +void cam_context_putref(struct cam_context *ctx); + +/** + * cam_context_getref() + * + * @brief: Get back context reference. + * + * @ctx: Context for which ref is taken + * + */ +void cam_context_getref(struct cam_context *ctx); + +/** + * cam_context_add_evt_inject() + * + * @brief: Add error inject parameters through evt_inject_ops. + * + * @ctx: Context for which error is to be injected + * + * @inject_args: Event injection parameters + * + */ +int cam_context_add_evt_inject(struct cam_context *ctx, + void *inject_args); + +#endif /* _CAM_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.c b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.c new file mode 100644 index 0000000000..909107ea47 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.c @@ -0,0 +1,2170 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_context.h" +#include "cam_context_utils.h" +#include "cam_mem_mgr.h" +#include "cam_node.h" +#include "cam_req_mgr_util.h" +#include "cam_req_mgr_dev.h" +#include "cam_sync_api.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr_api.h" + +static uint cam_debug_ctx_req_list; +module_param(cam_debug_ctx_req_list, uint, 0644); + +static inline int cam_context_validate_thread(void) +{ + if (in_interrupt()) { + WARN(1, "Invalid execution context\n"); + return -EINVAL; + } + return 0; +} + +static void cam_context_free_mem_hw_entries(struct cam_context *ctx) +{ + int i; + + if (ctx->out_map_entries) { + for (i = 0; i < ctx->req_size; i++) { + CAM_MEM_FREE(ctx->out_map_entries[i]); + ctx->out_map_entries[i] = NULL; + } + + CAM_MEM_FREE(ctx->out_map_entries); + ctx->out_map_entries = NULL; + } + + if (ctx->in_map_entries) { + for (i = 0; i < ctx->req_size; i++) { + CAM_MEM_FREE(ctx->in_map_entries[i]); + ctx->in_map_entries[i] = NULL; + } + + CAM_MEM_FREE(ctx->in_map_entries); + ctx->in_map_entries = NULL; + } + + if (ctx->hw_update_entry) { + for (i = 0; i < ctx->req_size; i++) { + CAM_MEM_FREE(ctx->hw_update_entry[i]); + ctx->hw_update_entry[i] = NULL; + } + + CAM_MEM_FREE(ctx->hw_update_entry); + ctx->hw_update_entry = NULL; + } +} + +static int cam_context_allocate_mem_hw_entries(struct cam_context *ctx) +{ + int rc = 0; + unsigned int i; + struct cam_ctx_request *req; + struct cam_ctx_request *temp_req; + + CAM_DBG(CAM_CTXT, + "%s[%d] num: max_hw %u in_map %u out_map %u req %u", + ctx->dev_name, + ctx->ctx_id, + ctx->max_hw_update_entries, + ctx->max_in_map_entries, + ctx->max_out_map_entries, + ctx->req_size); + + ctx->hw_update_entry = CAM_MEM_ZALLOC_ARRAY(ctx->req_size, + sizeof(struct cam_hw_update_entry *), GFP_KERNEL); + + if (!ctx->hw_update_entry) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry", + ctx->dev_name, ctx->ctx_id); + return -ENOMEM; + } + + for (i = 0; i < ctx->req_size; i++) { + ctx->hw_update_entry[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_hw_update_entries, + sizeof(struct cam_hw_update_entry), GFP_KERNEL); + + if (!ctx->hw_update_entry[i]) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for hw_update_entry: %u", + ctx->dev_name, ctx->ctx_id, i); + rc = -ENOMEM; + goto free_mem; + } + } + + ctx->in_map_entries = CAM_MEM_ZALLOC_ARRAY(ctx->req_size, + sizeof(struct cam_hw_fence_map_entry *), + GFP_KERNEL); + + if (!ctx->in_map_entries) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto free_mem; + } + + for (i = 0; i < ctx->req_size; i++) { + ctx->in_map_entries[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_in_map_entries, + sizeof(struct cam_hw_fence_map_entry), GFP_KERNEL); + + if (!ctx->in_map_entries[i]) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for in_map_entries: %u", + ctx->dev_name, ctx->ctx_id, i); + rc = -ENOMEM; + goto free_mem; + } + } + + ctx->out_map_entries = CAM_MEM_ZALLOC_ARRAY(ctx->req_size, + sizeof(struct cam_hw_fence_map_entry *), + GFP_KERNEL); + + if (!ctx->out_map_entries) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto free_mem; + } + + for (i = 0; i < ctx->req_size; i++) { + ctx->out_map_entries[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_out_map_entries, + sizeof(struct cam_hw_fence_map_entry), GFP_KERNEL); + + if (!ctx->out_map_entries[i]) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for out_map_entries: %u", + ctx->dev_name, ctx->ctx_id, i); + rc = -ENOMEM; + goto free_mem; + } + } + + list_for_each_entry_safe(req, temp_req, &ctx->free_req_list, list) { + req->hw_update_entries = ctx->hw_update_entry[req->index]; + req->in_map_entries = ctx->in_map_entries[req->index]; + req->out_map_entries = ctx->out_map_entries[req->index]; + } + + return rc; + +free_mem: + cam_context_free_mem_hw_entries(ctx); + return rc; +} + +int cam_context_buf_done_from_hw(struct cam_context *ctx, + void *done_event_data, uint32_t evt_id) +{ + int j, result, rc; + struct cam_ctx_request *req; + struct cam_hw_done_event_data *done = + (struct cam_hw_done_event_data *)done_event_data; + + if (!ctx || !done) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done); + return -EINVAL; + } + + rc = cam_context_validate_thread(); + if (rc) + return rc; + + spin_lock(&ctx->lock); + if (list_empty(&ctx->active_req_list)) { + CAM_ERR(CAM_CTXT, "[%s][%d] no active request", + ctx->dev_name, ctx->ctx_id); + spin_unlock(&ctx->lock); + return -EIO; + } + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + + trace_cam_buf_done("UTILS", ctx, req); + + if (done->request_id != req->request_id) { + CAM_ERR(CAM_CTXT, + "[%s][%d] mismatch: done req[%lld], active req[%lld]", + ctx->dev_name, ctx->ctx_id, + done->request_id, req->request_id); + spin_unlock(&ctx->lock); + return -EIO; + } + + if (!req->num_out_map_entries) { + CAM_DBG(CAM_CTXT, "[%s][%d] no output fence to signal", + ctx->dev_name, ctx->ctx_id); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + req->ctx = NULL; + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + return -EIO; + } + + /* + * since another thread may be adding/removing from active + * list, so hold the lock + */ + list_del_init(&req->list); + spin_unlock(&ctx->lock); + + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + + if (evt_id == CAM_CTX_EVT_ID_SUCCESS) + result = CAM_SYNC_STATE_SIGNALED_SUCCESS; + else if (evt_id == CAM_CTX_EVT_ID_CANCEL) + result = CAM_SYNC_STATE_SIGNALED_CANCEL; + else + result = CAM_SYNC_STATE_SIGNALED_ERROR; + + CAM_DBG(CAM_REQ, + "[%s][ctx_id %d] : req[%llu] : Signaling %d", + ctx->dev_name, ctx->ctx_id, req->request_id, result); + + for (j = 0; j < req->num_out_map_entries; j++) { + /* Get buf handles from packet and retrieve them from presil framework */ + if (cam_presil_mode_enabled()) { + rc = cam_presil_retrieve_buffers_from_packet(req->packet, + ctx->img_iommu_hdl, req->out_map_entries[j].resource_handle); + if (rc) { + CAM_ERR(CAM_CTXT, "Failed to retrieve image buffers rc:%d", rc); + return rc; + } + } + + CAM_DBG(CAM_REQ, "fence %d signal with %d", + req->out_map_entries[j].sync_id, result); + cam_sync_signal(req->out_map_entries[j].sync_id, result, + done->evt_param); + req->out_map_entries[j].sync_id = -1; + } + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from active_list to free_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + + cam_cpas_notify_event(ctx->ctx_id_string, req->request_id); + + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + req->ctx = NULL; + + /* + * another thread may be adding/removing from free list, + * so hold the lock + */ + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + + return 0; +} + +static int cam_context_apply_req_to_hw(struct cam_ctx_request *req, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_context *ctx = req->ctx; + struct cam_hw_config_args cfg; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + spin_lock(&ctx->lock); + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from pending_list to active_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + + cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cfg.request_id = req->request_id; + cfg.hw_update_entries = req->hw_update_entries; + cfg.num_hw_update_entries = req->num_hw_update_entries; + cfg.out_map_entries = req->out_map_entries; + cfg.num_out_map_entries = req->num_out_map_entries; + cfg.priv = req->req_priv; + + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + + spin_lock(&ctx->lock); + list_del_init(&req->list); + spin_unlock(&ctx->lock); + + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from active_list to free_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + } + +end: + return rc; +} + +static void cam_context_sync_callback(int32_t sync_obj, int status, void *data) +{ + struct cam_ctx_request *req = data; + struct cam_context *ctx = NULL; + struct cam_context_utils_flush_args flush_args; + struct cam_flush_dev_cmd flush_cmd; + struct cam_req_mgr_apply_request apply; + int rc; + + if (!req) { + CAM_ERR(CAM_CTXT, "Invalid input param"); + return; + } + rc = cam_context_validate_thread(); + if (rc) + return; + + ctx = req->ctx; + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid ctx for req %llu", req->request_id); + return; + } + + if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) { + apply.request_id = req->request_id; + /* + * take mutex to ensure that another thread does + * not flush the request while this + * thread is submitting it to h/w. The submit to + * h/w and adding to the active list should happen + * in a critical section which is provided by this + * mutex. + */ + if ((status == CAM_SYNC_STATE_SIGNALED_ERROR) || + (status == CAM_SYNC_STATE_SIGNALED_CANCEL)) { + CAM_DBG(CAM_CTXT, "fence error: %d on obj %d", + status, sync_obj); + flush_cmd.req_id = req->request_id; + flush_args.cmd = &flush_cmd; + flush_args.flush_active_req = false; + cam_context_flush_req_to_hw(ctx, &flush_args); + } + + mutex_lock(&ctx->sync_mutex); + if (!req->flushed) { + cam_context_apply_req_to_hw(req, &apply); + mutex_unlock(&ctx->sync_mutex); + } else { + req->flushed = 0; + req->ctx = NULL; + mutex_unlock(&ctx->sync_mutex); + + spin_lock(&ctx->lock); + list_del_init(&req->list); + spin_unlock(&ctx->lock); + + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from pending_list to free_list", + ctx->dev_name, ctx->ctx_id, + req->request_id); + } + } + cam_context_putref(ctx); +} + +int32_t cam_context_release_dev_to_hw(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + struct cam_hw_release_args arg; + + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid input param"); + return -EINVAL; + } + + if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + return -EINVAL; + } + + arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + arg.active_req = false; + + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg); + cam_context_free_mem_hw_entries(ctx); + ctx->ctxt_to_hw_map = NULL; + + ctx->session_hdl = -1; + ctx->dev_hdl = -1; + ctx->link_hdl = -1; + + return 0; +} + +int32_t cam_context_config_dev_to_hw( + struct cam_context *ctx, struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + size_t len; + struct cam_hw_stream_setttings cfg; + uintptr_t packet_addr; + struct cam_packet *packet_u; + struct cam_packet *packet = NULL; + size_t remain_len = 0; + + if (!ctx || !cmd) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); + return -EINVAL; + } + + if (!ctx->hw_mgr_intf->hw_config_stream_settings) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + return rc; + } + + rc = cam_context_validate_thread(); + if (rc) { + CAM_ERR(CAM_CTXT, + "Not executing in the right context"); + return rc; + } + + rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle, + &packet_addr, &len); + if (rc) { + CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address", + ctx->dev_name, ctx->ctx_id); + rc = -EINVAL; + return rc; + } + + packet_u = (struct cam_packet *) ((uint8_t *)packet_addr + + (uint32_t)cmd->offset); + remain_len = len - (uint32_t)cmd->offset; + + rc = cam_packet_util_copy_pkt_to_kmd(packet_u, &packet, remain_len); + if (rc) { + CAM_ERR(CAM_CTXT, "Copying packet to KMD failed"); + goto put_ref; + } + + cfg.packet = packet; + cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cfg.priv = NULL; + + CAM_DBG(CAM_CTXT, "Processing config settings"); + rc = ctx->hw_mgr_intf->hw_config_stream_settings( + ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc) { + CAM_ERR(CAM_CTXT, + "[%s][%d] Config failed stream settings", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + } + + cam_common_mem_free(packet); + packet = NULL; +put_ref: + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); + return rc; +} + +int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + struct cam_ctx_request *req = NULL; + struct cam_hw_prepare_update_args cfg; + struct cam_packet *packet = NULL; + size_t remain_len = 0; + int32_t i = 0, j = 0; + + if (!ctx || !cmd) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); + return -EINVAL; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + return -EFAULT; + } + rc = cam_context_validate_thread(); + if (rc) + return rc; + + spin_lock(&ctx->lock); + if (!list_empty(&ctx->free_req_list)) { + req = list_first_entry(&ctx->free_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + } + spin_unlock(&ctx->lock); + + if (!req) { + CAM_ERR(CAM_CTXT, "[%s][%d] No more request obj free", + ctx->dev_name, ctx->ctx_id); + return -ENOMEM; + } + + if (req->packet) { + CAM_WARN(CAM_CTXT, "[%s][%d] Missing free request local packet", + ctx->dev_name, ctx->ctx_id); + /* Remove buffer ref counts if any */ + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + INIT_LIST_HEAD(&req->buf_tracker); + INIT_LIST_HEAD(&req->list); + req->ctx = ctx; + req->num_hw_update_entries = 0; + req->num_in_map_entries = 0; + req->num_out_map_entries = 0; + req->num_out_acked = 0; + req->flushed = 0; + atomic_set(&req->num_in_acked, 0); + + remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet); + if (IS_ERR_OR_NULL(packet)) { + rc = PTR_ERR(packet); + goto free_req; + } + + if (cam_packet_util_validate_packet(packet, remain_len)) { + CAM_ERR(CAM_CTXT, "Invalid packet params"); + rc = -EINVAL; + goto free_packet; + } + + if (packet->header.request_id <= ctx->last_flush_req) { + CAM_INFO(CAM_CORE, + "request %lld has been flushed, reject packet", + packet->header.request_id); + rc = -EBADR; + goto free_packet; + } + + if (packet->header.request_id > ctx->last_flush_req) + ctx->last_flush_req = 0; + + /* preprocess the configuration */ + cfg.packet = packet; + cfg.remain_len = remain_len; + cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cfg.max_hw_update_entries = ctx->max_hw_update_entries; + cfg.num_hw_update_entries = req->num_hw_update_entries; + cfg.hw_update_entries = req->hw_update_entries; + cfg.max_out_map_entries = ctx->max_out_map_entries; + cfg.out_map_entries = req->out_map_entries; + cfg.max_in_map_entries = ctx->max_in_map_entries; + cfg.in_map_entries = req->in_map_entries; + cfg.pf_data = &(req->pf_data); + cfg.priv = req->req_priv; + cfg.num_in_map_entries = 0; + cfg.num_out_map_entries = 0; + cfg.buf_tracker = &req->buf_tracker; + memset(req->out_map_entries, 0, sizeof(struct cam_hw_fence_map_entry) + * ctx->max_out_map_entries); + + rc = ctx->hw_mgr_intf->hw_prepare_update( + ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc != 0) { + CAM_ERR(CAM_CTXT, + "[%s][%d] Prepare config packet failed in HW layer", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto free_packet; + } + + req->num_hw_update_entries = cfg.num_hw_update_entries; + req->num_out_map_entries = cfg.num_out_map_entries; + req->num_in_map_entries = cfg.num_in_map_entries; + atomic_set(&req->num_in_acked, 0); + req->request_id = packet->header.request_id; + req->status = 1; + req->req_priv = cfg.priv; + req->pf_data.req = req; + req->packet = packet; + + for (i = 0; i < req->num_out_map_entries; i++) { + rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id); + if (rc) { + CAM_ERR(CAM_CTXT, "Can't get ref for sync %d", + req->out_map_entries[i].sync_id); + goto put_ref; + } + } + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->pending_req_list); + spin_unlock(&ctx->lock); + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from free_list to pending_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + + if (req->num_in_map_entries > 0) { + for (j = 0; j < req->num_in_map_entries; j++) { + rc = cam_sync_check_valid( + req->in_map_entries[j].sync_id); + if (rc) { + spin_lock(&ctx->lock); + list_del_init(&req->list); + spin_unlock(&ctx->lock); + CAM_ERR(CAM_CTXT, + "invalid in map sync object %d", + req->in_map_entries[j].sync_id); + goto put_ref; + } + } + + for (j = 0; j < req->num_in_map_entries; j++) { + cam_context_getref(ctx); + rc = cam_sync_register_callback( + cam_context_sync_callback, + (void *)req, + req->in_map_entries[j].sync_id); + if (rc) { + CAM_ERR(CAM_CTXT, + "[%s][%d] Failed register fence cb: %d ret = %d", + ctx->dev_name, ctx->ctx_id, + req->in_map_entries[j].sync_id, rc); + spin_lock(&ctx->lock); + list_del_init(&req->list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from pending_list to free_list", + ctx->dev_name, ctx->ctx_id, + req->request_id); + + cam_context_putref(ctx); + goto put_ref; + } + CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d", + req->in_map_entries[j].sync_id, rc); + } + } else { + struct cam_req_mgr_apply_request apply; + + /* If there are no input fences submit request immediately */ + apply.request_id = req->request_id; + mutex_lock(&ctx->sync_mutex); + rc = cam_context_apply_req_to_hw(req, &apply); + mutex_unlock(&ctx->sync_mutex); + if (rc) + CAM_ERR(CAM_CTXT, + "[%s][%d] : Failed to apply req: %llu with no input dependencies", + ctx->dev_name, ctx->ctx_id, req->request_id); + } + + return rc; +put_ref: + for (--i; i >= 0; i--) { + if (cam_sync_put_obj_ref(req->out_map_entries[i].sync_id)) + CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d", + req->out_map_entries[i].sync_id); + } +free_packet: + cam_common_mem_free(packet); +free_req: + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req->packet = NULL; + req->ctx = NULL; + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + + return rc; +} + +int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd) +{ + int rc; + struct cam_hw_acquire_args param; + struct cam_create_dev_hdl req_hdl_param; + struct cam_hw_release_args release; + + if (!ctx || !cmd) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld", + cmd->session_handle, cmd->num_resources, cmd->handle_type, + cmd->resource_hdl); + + if (cmd->num_resources > CAM_CTX_RES_MAX) { + CAM_ERR(CAM_CTXT, "[%s][%d] resource[%d] limit exceeded", + ctx->dev_name, ctx->ctx_id, cmd->num_resources); + rc = -ENOMEM; + goto end; + } + + /* for now we only support user pointer */ + if (cmd->handle_type != 1) { + CAM_ERR(CAM_CTXT, "[%s][%d] Only user pointer is supported", + ctx->dev_name, ctx->ctx_id); + rc = -EINVAL; + goto end; + } + + /* fill in parameters */ + param.context_data = ctx; + param.ctx_id = ctx->ctx_id; + param.event_cb = ctx->irq_cb_intf; + param.mini_dump_cb = ctx->mini_dump_cb; + param.num_acq = cmd->num_resources; + param.acquire_info = cmd->resource_hdl; + param.api_version = cmd->struct_version; + + /* Allocate memory for hw and map entries */ + rc = cam_context_allocate_mem_hw_entries(ctx); + + if (rc != 0) { + CAM_ERR(CAM_CTXT, "[%s][%d] Alloc entries failed", + ctx->dev_name, ctx->ctx_id); + goto end; + } + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_CTXT, + "[%s][%d] Acquire device failed session hdl: 0x%x dev hdl: 0x%x", + ctx->dev_name, ctx->ctx_id, ctx->session_hdl, ctx->dev_hdl); + goto end; + } + + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + ctx->hw_mgr_ctx_id = param.hw_mgr_ctx_id; + + snprintf(ctx->ctx_id_string, sizeof(ctx->ctx_id_string), + "%s_ctx[%d]_hwmgrctx[%d]_Done", + ctx->dev_name, + ctx->ctx_id, + ctx->hw_mgr_ctx_id); + + /* if hw resource acquire successful, acquire dev handle */ + req_hdl_param.session_hdl = cmd->session_handle; + /* bridge is not ready for these flags. so false for now */ + req_hdl_param.v4l2_sub_dev_flag = 0; + req_hdl_param.media_entity_flag = 0; + req_hdl_param.priv = ctx; + req_hdl_param.ops = ctx->crm_ctx_intf; + req_hdl_param.dev_id = ctx->dev_id; + ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param); + if (ctx->dev_hdl <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_CTXT, "[%s][%d] Can not create device handle", + ctx->dev_name, ctx->ctx_id); + goto free_hw; + } + cmd->dev_handle = ctx->dev_hdl; + + /* store session information */ + ctx->session_hdl = cmd->session_handle; + + CAM_TRACE(CAM_CTXT, "Ctx[%s]: session_hdl 0x%x, dev_hdl 0x%x", + ctx->ctx_id_string, ctx->session_hdl, ctx->dev_hdl); + + return rc; + +free_hw: + release.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release); + ctx->ctxt_to_hw_map = NULL; + ctx->dev_hdl = -1; +end: + return rc; +} + +int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx) +{ + struct cam_hw_flush_args flush_args = {0}; + struct list_head temp_list; + struct list_head *list; + struct cam_ctx_request *req; + uint32_t i, num_entries = 0; + int rc = 0; + bool free_req; + + CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name); + + /* + * flush pending requests, take the sync lock to synchronize with the + * sync callback thread so that the sync cb thread does not try to + * submit request to h/w while the request is being flushed + */ + mutex_lock(&ctx->sync_mutex); + INIT_LIST_HEAD(&temp_list); + spin_lock(&ctx->lock); + list_splice_init(&ctx->pending_req_list, &temp_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving all pending requests from pending_list to temp_list", + ctx->dev_name, ctx->ctx_id); + + flush_args.last_flush_req = ctx->last_flush_req; + list_for_each(list, &temp_list) { + num_entries++; + } + if (num_entries) { + flush_args.flush_req_pending = + CAM_MEM_ZALLOC_ARRAY(num_entries, sizeof(void *), GFP_KERNEL); + if (!flush_args.flush_req_pending) { + CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail", + ctx->dev_name, ctx->ctx_id); + mutex_unlock(&ctx->sync_mutex); + rc = -ENOMEM; + goto end; + } + } + + while (num_entries) { + + if (list_empty(&temp_list)) + break; + + req = list_first_entry(&temp_list, + struct cam_ctx_request, list); + + list_del_init(&req->list); + req->flushed = 1; + + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + flush_args.flush_req_pending[flush_args.num_req_pending++] = + req->req_priv; + + free_req = false; + for (i = 0; i < req->num_in_map_entries; i++) { + rc = cam_sync_deregister_callback( + cam_context_sync_callback, + (void *)req, + req->in_map_entries[i].sync_id); + if (!rc) { + cam_context_putref(ctx); + if (atomic_inc_return(&req->num_in_acked) == + req->num_in_map_entries) + free_req = true; + } + } + + for (i = 0; i < req->num_out_map_entries; i++) { + if (req->out_map_entries[i].sync_id != -1) { + rc = cam_sync_signal( + req->out_map_entries[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_FLUSH); + if (rc == -EALREADY) { + CAM_ERR(CAM_CTXT, + "Req: %llu already signalled, sync_id:%d", + req->request_id, + req->out_map_entries[i].sync_id); + break; + } + } + } + + /* + * If we have deregistered the last sync callback, req will + * not be put on the free list. So put it on the free list here + */ + if (free_req) { + req->ctx = NULL; + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + } + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Deleting req[%llu] from temp_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + } + mutex_unlock(&ctx->sync_mutex); + + if (ctx->hw_mgr_intf->hw_flush) { + num_entries = 0; + + /* + * Hold the ctx lock till the active requests are populated for + * flush, allocate memory in atomic context. We want to ensure the + * allocated memory is sufficient for the requests in the active list. + * The active list should not be reinitialized since it is possible that + * any buf done's from HW is serviced before the flush + * makes it to the HW layer + */ + spin_lock(&ctx->lock); + list_for_each(list, &ctx->active_req_list) + num_entries++; + + if (num_entries) { + flush_args.flush_req_active = + CAM_MEM_ZALLOC_ARRAY(num_entries, sizeof(void *), GFP_ATOMIC); + if (!flush_args.flush_req_active) { + spin_unlock(&ctx->lock); + CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto end; + } + + list_for_each_entry(req, &ctx->active_req_list, list) { + flush_args.flush_req_active[flush_args.num_req_active++] = + req->req_priv; + } + } + spin_unlock(&ctx->lock); + + if (flush_args.num_req_pending || flush_args.num_req_active) { + flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + flush_args.flush_type = CAM_FLUSH_TYPE_ALL; + ctx->hw_mgr_intf->hw_flush( + ctx->hw_mgr_intf->hw_mgr_priv, &flush_args); + } + } + + INIT_LIST_HEAD(&temp_list); + spin_lock(&ctx->lock); + list_splice_init(&ctx->active_req_list, &temp_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving all requests from active_list to temp_list", + ctx->dev_name, ctx->ctx_id); + + while (num_entries) { + + if (list_empty(&temp_list)) + break; + + req = list_first_entry(&temp_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + for (i = 0; i < req->num_out_map_entries; i++) { + if (req->out_map_entries[i].sync_id != -1) { + rc = cam_sync_signal( + req->out_map_entries[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_FLUSH); + if (rc == -EALREADY) { + CAM_ERR(CAM_CTXT, + "Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d", + req->request_id, req->ctx, + req->ctx->dev_name, + req->ctx->dev_hdl, + req->ctx->state); + break; + } + } + } + + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + req->ctx = NULL; + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from temp_list to free_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + } + + rc = 0; + CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name); + +end: + CAM_MEM_FREE(flush_args.flush_req_active); + CAM_MEM_FREE(flush_args.flush_req_pending); + return rc; +} + +int32_t cam_context_flush_req_to_hw(struct cam_context *ctx, + struct cam_context_utils_flush_args *args) +{ + struct cam_ctx_request *req = NULL; + struct cam_hw_flush_args flush_args = {0}; + struct cam_flush_dev_cmd *cmd = args->cmd; + uint32_t i = 0; + int32_t sync_id = 0; + int rc = 0; + bool free_req = false; + + CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name); + + flush_args.flush_req_pending = CAM_MEM_ZALLOC(sizeof(void *), GFP_KERNEL); + if (!flush_args.flush_req_pending) { + CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto end; + } + mutex_lock(&ctx->sync_mutex); + spin_lock(&ctx->lock); + list_for_each_entry(req, &ctx->pending_req_list, list) { + if (req->request_id != cmd->req_id) + continue; + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Deleting req[%llu] from pending_list", + ctx->dev_name, ctx->ctx_id, req->request_id); + + list_del_init(&req->list); + req->flushed = 1; + + flush_args.flush_req_pending[flush_args.num_req_pending++] = + req->req_priv; + break; + } + spin_unlock(&ctx->lock); + mutex_unlock(&ctx->sync_mutex); + + if (ctx->hw_mgr_intf->hw_flush) { + if (!flush_args.num_req_pending) { + flush_args.flush_req_active = CAM_MEM_ZALLOC(sizeof(void *), GFP_KERNEL); + if (!flush_args.flush_req_active) { + CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto end; + } + + spin_lock(&ctx->lock); + list_for_each_entry(req, &ctx->active_req_list, list) { + if (req->request_id != cmd->req_id) + continue; + + if (!args->flush_active_req) { + CAM_ERR(CAM_CTXT, + "[%s][%d] : Flushing active request id: %llu is not supported", + ctx->dev_name, ctx->ctx_id, req->request_id); + rc = -EPERM; + spin_unlock(&ctx->lock); + goto end; + } + + list_del_init(&req->list); + flush_args.flush_req_active[ + flush_args.num_req_active++] = + req->req_priv; + break; + } + spin_unlock(&ctx->lock); + } + + if (flush_args.num_req_pending || flush_args.num_req_active) { + flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + flush_args.flush_type = CAM_FLUSH_TYPE_REQ; + ctx->hw_mgr_intf->hw_flush( + ctx->hw_mgr_intf->hw_mgr_priv, &flush_args); + } + } + + if (req) { + if (flush_args.num_req_pending) { + for (i = 0; i < req->num_in_map_entries; i++) { + rc = cam_sync_deregister_callback( + cam_context_sync_callback, + (void *)req, + req->in_map_entries[i].sync_id); + if (rc) + continue; + + cam_context_putref(ctx); + if (atomic_inc_return(&req->num_in_acked) == + req->num_in_map_entries) + free_req = true; + } + } + + if (flush_args.num_req_pending || flush_args.num_req_active) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + + for (i = 0; i < req->num_out_map_entries; i++) { + sync_id = + req->out_map_entries[i].sync_id; + if (sync_id != -1) { + rc = cam_sync_signal(sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_FLUSH); + if (rc == -EALREADY) { + CAM_ERR(CAM_CTXT, + "Req: %llu already signalled, sync_id:%d", + req->request_id, sync_id); + break; + } + } + } + if (flush_args.num_req_active || free_req) { + req->ctx = NULL; + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + spin_lock(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock(&ctx->lock); + + if (cam_debug_ctx_req_list & ctx->dev_id) + CAM_INFO(CAM_CTXT, + "[%s][%d] : Moving req[%llu] from %s to free_list", + ctx->dev_name, ctx->ctx_id, + req->request_id, + flush_args.num_req_active ? + "active_list" : + "pending_list"); + } + } + + rc = 0; + } else { + CAM_ERR(CAM_CTXT, + "[%s][%d] : No pending or active request to flush for req id: %llu", + ctx->dev_name, ctx->ctx_id, cmd->req_id); + rc = -EINVAL; + } + + CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name); + +end: + CAM_MEM_FREE(flush_args.flush_req_active); + CAM_MEM_FREE(flush_args.flush_req_pending); + return rc; +} + +int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx, + struct cam_context_utils_flush_args *flush_args) +{ + int rc = 0; + + if (!ctx || !flush_args) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, flush_args); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + if (flush_args->cmd->flush_type == CAM_FLUSH_TYPE_ALL) { + ctx->last_flush_req = flush_args->cmd->req_id; + rc = cam_context_flush_ctx_to_hw(ctx); + } else if (flush_args->cmd->flush_type == CAM_FLUSH_TYPE_REQ) + rc = cam_context_flush_req_to_hw(ctx, flush_args); + else { + rc = -EINVAL; + CAM_ERR(CAM_CORE, "[%s][%d] Invalid flush type %d", + ctx->dev_name, ctx->ctx_id, flush_args->cmd->flush_type); + } + +end: + return rc; +} + +int32_t cam_context_start_dev_to_hw(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + struct cam_hw_start_args arg; + + if (!ctx || !cmd) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + if ((cmd->session_handle != ctx->session_hdl) || + (cmd->dev_handle != ctx->dev_hdl)) { + CAM_ERR(CAM_CTXT, + "[%s][%d] Invalid session hdl[%d], dev_handle[%d]", + ctx->dev_name, ctx->ctx_id, + cmd->session_handle, cmd->dev_handle); + rc = -EPERM; + goto end; + } + + if (ctx->hw_mgr_intf->hw_start) { + arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, + &arg); + if (rc) { + /* HW failure. user need to clean up the resource */ + CAM_ERR(CAM_CTXT, "[%s][%d] Start HW failed", + ctx->dev_name, ctx->ctx_id); + goto end; + } + } + +end: + return rc; +} + +int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx) +{ + int rc = 0; + struct cam_hw_stop_args stop; + + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid input param"); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + rc = cam_context_validate_thread(); + if (rc) + goto end; + + rc = cam_context_flush_ctx_to_hw(ctx); + if (rc) + goto end; + + /* stop hw first */ + if (ctx->hw_mgr_intf->hw_stop) { + stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv, + &stop); + } + +end: + return rc; +} + +static void cam_context_util_translate_pf_info_to_string(struct cam_req_mgr_pf_err_msg *pf_msg, + char **pf_evt_str, char **pf_type_str, char **pf_stage_str) +{ + switch (pf_msg->pf_evt) { + case CAM_REQ_MGR_PF_EVT_BUF_NOT_FOUND: + *pf_evt_str = "PF Evt: faulting buffer not found"; + break; + case CAM_REQ_MGR_PF_EVT_BUF_FOUND_IO_CFG: + *pf_evt_str = "PF Evt: faulting IO Config buffer"; + break; + case CAM_REQ_MGR_PF_EVT_BUF_FOUND_REF_BUF: + *pf_evt_str = "PF Evt: faulting Ref buffer"; + break; + case CAM_REQ_MGR_PF_EVT_BUF_FOUND_CDM: + *pf_evt_str = "PF Evt: faulting CDM buffer"; + break; + case CAM_REQ_MGR_PF_EVT_BUF_FOUND_SHARED: + *pf_evt_str = "PF Evt: faulting shared memory buffer"; + break; + default: + CAM_ERR(CAM_CTXT, "Invalid PF Evt %u", pf_msg->pf_evt); + *pf_evt_str = "PF Evt: Unknown"; + } + + switch (pf_msg->pf_type) { + case CAM_REQ_MGR_PF_TYPE_NULL: + *pf_type_str = "PF Type: faulting addr is NULL"; + break; + case CAM_REQ_MGR_PF_TYPE_OUT_OF_BOUND: + *pf_type_str = "PF Type: faulting addr out of bounds"; + break; + case CAM_REQ_MGR_PF_TYPE_MAPPED_REGION: + *pf_type_str = "PF Type: faulting addr within mapped region"; + break; + default: + CAM_ERR(CAM_CTXT, "Invalid PF Type %u", pf_msg->pf_type); + *pf_type_str = "PF Type: Unknown"; + } + + switch (pf_msg->pf_stage) { + case CAM_REQ_MGR_STAGE1_FAULT: + *pf_stage_str = "PF Stage: stage 1 (non-secure)"; + break; + case CAM_REQ_MGR_STAGE2_FAULT: + *pf_stage_str = "PF Stage: stage 2 (secure)"; + break; + default: + CAM_ERR(CAM_CTXT, "Invalid PF Stage %u", pf_msg->pf_stage); + *pf_stage_str = "PF Stage: Unknown"; + } +} + +static void cam_context_get_pf_evt(struct cam_context *ctx, + struct cam_context_pf_info *pf_context_info, struct cam_req_mgr_pf_err_msg *pf_msg, + char *log_info, size_t *len, uint32_t buf_size) +{ + if (pf_context_info->mem_type != CAM_FAULT_BUF_NOT_FOUND) { + pf_msg->buf_hdl = pf_context_info->buf_hdl; + pf_msg->offset = pf_context_info->offset; + pf_msg->far_delta = pf_context_info->delta; + pf_msg->req_id = pf_context_info->req_id; + + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, len, + "buf_hdl: 0x%x, offset: 0x%x, req_id: %llu, delta: 0x%llx", + pf_msg->buf_hdl, pf_msg->offset, pf_msg->req_id, + pf_msg->far_delta); + + if (pf_context_info->mem_type == CAM_FAULT_PATCH_BUF) { + pf_msg->patch_id = pf_context_info->patch_idx; + if (pf_context_info->mem_flag & CAM_MEM_FLAG_CMD_BUF_TYPE) + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_FOUND_CDM; + else if (pf_context_info->mem_flag & CAM_MEM_FLAG_HW_SHARED_ACCESS) + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_FOUND_SHARED; + else + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_FOUND_REF_BUF; + + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, len, + "Faulted patch found in patch index: %u", + pf_msg->patch_id); + } else + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_FOUND_IO_CFG; + + } else + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_NOT_FOUND; + +} + +int32_t cam_context_send_pf_evt(struct cam_context *ctx, + struct cam_hw_dump_pf_args *pf_args) +{ + struct cam_req_mgr_message req_msg = {0}; + struct cam_req_mgr_pf_err_msg *pf_msg = &req_msg.u.pf_err_msg; + struct cam_context_pf_info *pf_context_info; + struct cam_smmu_pf_info *pf_smmu_info; + char log_info[512]; + int rc = 0; + size_t buf_size = 0, len = 0; + char *pf_evt_str, *pf_type_str, *pf_stage_str; + + if (!pf_args) { + CAM_ERR(CAM_CTXT, "Invalid input PF args"); + return -EINVAL; + } + + buf_size = sizeof(log_info); + pf_msg->pf_evt = CAM_REQ_MGR_PF_EVT_BUF_NOT_FOUND; + pf_context_info = &(pf_args->pf_context_info); + pf_smmu_info = pf_args->pf_smmu_info; + + if (ctx) { + req_msg.session_hdl = ctx->session_hdl; + pf_msg->device_hdl = ctx->dev_hdl; + pf_msg->link_hdl = ctx->link_hdl; + pf_msg->port_id = pf_context_info->resource_type; + + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, &len, + "Faulted ctx: [%s][%d] session_hdl: 0x%x, device_hdl: 0x%x, link_hdl: 0x%x, port_id: 0x%x", + ctx->dev_name, ctx->ctx_id, req_msg.session_hdl, pf_msg->device_hdl, + pf_msg->link_hdl, pf_msg->port_id); + + cam_context_get_pf_evt(ctx, pf_context_info, pf_msg, log_info, &len, buf_size); + } else + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, &len, "Faulted ctx NOT found"); + + if (pf_smmu_info->iova == 0) + pf_msg->pf_type = CAM_REQ_MGR_PF_TYPE_NULL; + else if (pf_smmu_info->in_map_region) + pf_msg->pf_type = CAM_REQ_MGR_PF_TYPE_MAPPED_REGION; + else + pf_msg->pf_type = CAM_REQ_MGR_PF_TYPE_OUT_OF_BOUND; + + pf_msg->pf_stage = pf_smmu_info->is_secure; + pf_msg->bid = pf_smmu_info->bid; + pf_msg->pid = pf_smmu_info->pid; + pf_msg->mid = pf_smmu_info->mid; + + cam_context_util_translate_pf_info_to_string(pf_msg, &pf_evt_str, + &pf_type_str, &pf_stage_str); + + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, &len, + "%s, %s, %s", + pf_evt_str, pf_type_str, pf_stage_str); + + CAM_ERR_BUF(CAM_CTXT, log_info, buf_size, &len, + "bid: %u, pid: %u, mid: %u", + pf_msg->bid, pf_msg->pid, pf_msg->mid); + + CAM_ERR(CAM_CTXT, "PF INFO: %s", log_info); + + rc = cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_PF_ERROR, + V4L_EVENT_CAM_REQ_MGR_EVENT); + + return rc; +} + +int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx, + struct cam_hw_dump_pf_args *pf_args, + struct cam_hw_mgr_pf_request_info *pf_req_info) +{ + int rc = 0; + struct cam_hw_cmd_args cmd_args; + struct cam_hw_cmd_pf_args pf_cmd_args; + + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + if (ctx->hw_mgr_intf->hw_cmd) { + pf_cmd_args.pf_args = pf_args; + pf_cmd_args.pf_req_info = pf_req_info; + cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO; + cmd_args.u.pf_cmd_args = &pf_cmd_args; + ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &cmd_args); + } + +end: + return rc; +} + +int32_t cam_context_dump_hw_acq_info(struct cam_context *ctx) +{ + int rc = 0; + struct cam_hw_cmd_args cmd_args; + + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid input params"); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + if (ctx->hw_mgr_intf->hw_cmd) { + cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_ACQ_INFO; + ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &cmd_args); + } + +end: + return rc; +} + +static int cam_context_dump_data_validaion(void *src, void *dest, + uint32_t base_len, uint32_t actual_len, uint32_t bytes_required) +{ + if (base_len + bytes_required >= actual_len) { + CAM_ERR(CAM_CTXT, "actual len %pK base len %pK", + actual_len, base_len); + return -ENOSPC; + } + memcpy(dest, src, bytes_required); + return 0; +} + +static int cam_context_stream_dump_validation(struct cam_context *ctx, + uint64_t *addr, uint32_t local_len, uint32_t buf_len) +{ + struct cam_context_stream_dump stream_dump; + + stream_dump.hw_mgr_ctx_id = ctx->hw_mgr_ctx_id; + stream_dump.dev_id = ctx->dev_id; + stream_dump.dev_hdl = ctx->dev_hdl; + stream_dump.link_hdl = ctx->link_hdl; + stream_dump.session_hdl = ctx->session_hdl; + stream_dump.refcount = refcount_read(&(ctx->refcount.refcount)); + stream_dump.last_flush_req = ctx->last_flush_req; + stream_dump.state = ctx->state; + if (cam_context_dump_data_validaion(&stream_dump, addr, + local_len, buf_len, + sizeof(struct cam_context_stream_dump))) { + CAM_WARN(CAM_CTXT, "failed to copy the stream info"); + return -ENOSPC; + } + return 0; +} + +static int cam_context_user_dump(struct cam_context *ctx, + struct cam_hw_dump_args *dump_args) +{ + int rc = 0, i; + struct cam_ctx_request *req = NULL, *req_temp; + struct cam_context_dump_header *hdr; + uint8_t *dst; + uint64_t *addr, *start; + size_t buf_len, remain_len; + uintptr_t cpu_addr; + uint32_t local_len; + + if (!ctx || !dump_args) { + CAM_ERR(CAM_CORE, "Invalid parameters %pK %pK", + ctx, dump_args); + return -EINVAL; + } + + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &cpu_addr, &buf_len); + + if (rc) { + CAM_ERR(CAM_CTXT, "Invalid hdl %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + + if (dump_args->offset >= buf_len) { + CAM_WARN(CAM_CTXT, "dump buffer overshoot offset %zu len %zu", + dump_args->offset, buf_len); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + + /* Dump context info */ + remain_len = buf_len - dump_args->offset; + if (remain_len < sizeof(struct cam_context_dump_header)) { + CAM_WARN(CAM_CTXT, + "No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d", + remain_len, sizeof(struct cam_context_dump_header)); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_context_dump_header *)dst; + local_len = + (dump_args->offset + sizeof(struct cam_context_dump_header)); + scnprintf(hdr->tag, CAM_CTXT_DUMP_TAG_MAX_LEN, + "%s_CTX_INFO:", ctx->dev_name); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_context_dump_header)); + start = addr; + if (cam_context_stream_dump_validation(ctx, addr, local_len, buf_len)) { + CAM_WARN(CAM_CTXT, "%s_CTX_INFO failed to copy the stream info ", ctx->dev_name); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + addr = (uint64_t *)((uint8_t *)addr + sizeof(struct cam_context_stream_dump)); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_context_dump_header); + + spin_lock(&ctx->lock); + /* Dump waiting requests */ + if (!list_empty(&ctx->wait_req_list)) { + list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) { + for (i = 0; i < req->num_out_map_entries; i++) { + remain_len = buf_len - dump_args->offset; + if (remain_len < sizeof(struct cam_context_dump_header)) { + CAM_WARN(CAM_CTXT, + "No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d", + remain_len, sizeof(struct cam_context_dump_header)); + rc = -ENOSPC; + goto cleanup; + } + + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_context_dump_header *)dst; + local_len = dump_args->offset + + sizeof(struct cam_context_dump_header); + scnprintf(hdr->tag, CAM_CTXT_DUMP_TAG_MAX_LEN, + "%s_OUT_FENCE_REQUEST_APPLIED.%d.%pad.%d:", + ctx->dev_name, + req->out_map_entries[i].resource_handle, + &(req->out_map_entries[i].image_buf_addr), + req->out_map_entries[i].sync_id); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_context_dump_header)); + start = addr; + if (cam_context_dump_data_validaion(&req->request_id, addr, + local_len, buf_len, + sizeof(struct cam_context_each_req_info))) { + CAM_WARN(CAM_CTXT, "%s_CTX_INFO waiting_req: failed to copy the request info", + ctx->dev_name); + goto cleanup; + } + addr = (uint64_t *)((uint8_t *)addr + + sizeof(struct cam_context_each_req_info)); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_context_dump_header); + } + } + } + + /* Dump pending requests */ + if (!list_empty(&ctx->pending_req_list)) { + list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) { + for (i = 0; i < req->num_out_map_entries; i++) { + remain_len = buf_len - dump_args->offset; + if (remain_len < sizeof(struct cam_context_dump_header)) { + CAM_WARN(CAM_CTXT, + "No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d", + remain_len, sizeof(struct cam_context_dump_header)); + rc = -ENOSPC; + goto cleanup; + } + + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_context_dump_header *)dst; + local_len = dump_args->offset + + sizeof(struct cam_context_dump_header); + scnprintf(hdr->tag, CAM_CTXT_DUMP_TAG_MAX_LEN, + "%s_OUT_FENCE_REQUEST_PENDING.%d.%pad.%d:", + ctx->dev_name, + req->out_map_entries[i].resource_handle, + &(req->out_map_entries[i].image_buf_addr), + req->out_map_entries[i].sync_id); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_context_dump_header)); + start = addr; + if (cam_context_dump_data_validaion(&req->request_id, addr, + local_len, buf_len, + sizeof(struct cam_context_each_req_info))) { + CAM_WARN(CAM_CTXT, "%s_CTX_INFO pending_req: failed to copy the request info", + ctx->dev_name); + goto cleanup; + } + addr = (uint64_t *)((uint8_t *)addr + + sizeof(struct cam_context_each_req_info)); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_context_dump_header); + } + } + } + + /* Dump active requests */ + if (!list_empty(&ctx->active_req_list)) { + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + for (i = 0; i < req->num_out_map_entries; i++) { + remain_len = buf_len - dump_args->offset; + if (remain_len < sizeof(struct cam_context_dump_header)) { + CAM_WARN(CAM_CTXT, + "No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d", + remain_len, sizeof(struct cam_context_dump_header)); + rc = -ENOSPC; + goto cleanup; + } + + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_context_dump_header *)dst; + local_len = dump_args->offset + + sizeof(struct cam_context_dump_header); + scnprintf(hdr->tag, CAM_CTXT_DUMP_TAG_MAX_LEN, + "%s_OUT_FENCE_REQUEST_ACTIVE.%d.%pad.%d:", + ctx->dev_name, + req->out_map_entries[i].resource_handle, + &(req->out_map_entries[i].image_buf_addr), + req->out_map_entries[i].sync_id); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_context_dump_header)); + start = addr; + if (cam_context_dump_data_validaion(&req->request_id, addr, + local_len, buf_len, + sizeof(struct cam_context_each_req_info))) { + CAM_WARN(CAM_CTXT, "%s_CTX_INFO active_req: failed to copy the request info", + ctx->dev_name); + goto cleanup; + } + addr = (uint64_t *)((uint8_t *)addr + + sizeof(struct cam_context_each_req_info)); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_context_dump_header); + } + } + } +cleanup: + spin_unlock(&ctx->lock); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +int32_t cam_context_dump_dev_to_hw(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc = 0; + struct cam_hw_dump_args dump_args; + + if (!ctx || !cmd) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd); + return -EINVAL; + } + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + return -EFAULT; + } + + if (ctx->hw_mgr_intf->hw_dump) { + dump_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + dump_args.buf_handle = cmd->buf_handle; + dump_args.offset = cmd->offset; + dump_args.request_id = cmd->issue_req_id; + dump_args.error_type = cmd->error_type; + + rc = cam_context_user_dump(ctx, &dump_args); + if (rc) { + CAM_ERR(CAM_CTXT, "[%s][%d] handle[%u] failed", + ctx->dev_name, ctx->ctx_id, dump_args.buf_handle); + return rc; + } + + rc = ctx->hw_mgr_intf->hw_dump( + ctx->hw_mgr_intf->hw_mgr_priv, + &dump_args); + if (rc) { + CAM_ERR(CAM_CTXT, "[%s][%d] handle[%u] failed", + ctx->dev_name, ctx->ctx_id, dump_args.buf_handle); + return rc; + } + + + if (dump_args.offset > cmd->offset) { + cmd->offset = dump_args.offset; + } + } else { + CAM_DBG(CAM_CTXT, "%s hw dump not registered", ctx->dev_name); + } + return rc; +} + +size_t cam_context_parse_config_cmd(struct cam_context *ctx, struct cam_config_dev_cmd *cmd, + struct cam_packet **packet) +{ + size_t len; + uintptr_t packet_addr; + int rc = 0; + struct cam_packet *packet_u; + size_t packet_len = 0; + + if (!ctx || !cmd || !packet) { + CAM_ERR(CAM_CTXT, "invalid args"); + rc = -EINVAL; + goto err; + } + + /* for config dev, only memory handle is supported */ + /* map packet from the memhandle */ + rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle, &packet_addr, &len); + if (rc != 0) { + CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address for handle:%llx", + ctx->dev_name, ctx->ctx_id, cmd->packet_handle); + rc = -EINVAL; + goto err; + } + + if ((len < sizeof(struct cam_packet)) || + ((size_t)cmd->offset >= len - sizeof(struct cam_packet))) { + CAM_ERR(CAM_CTXT, "invalid buff length: %zu or offset: %zu", len, + (size_t)cmd->offset); + rc = -EINVAL; + goto put_cpu_buf; + } + + packet_u = (struct cam_packet *) ((uint8_t *)packet_addr + (uint32_t)cmd->offset); + if (IS_ERR_OR_NULL(packet_u)) { + rc = PTR_ERR(packet_u); + goto put_cpu_buf; + } + + packet_len = len - (size_t)cmd->offset; + rc = cam_packet_util_copy_pkt_to_kmd(packet_u, packet, packet_len); + if (rc) { + CAM_ERR(CAM_CTXT, "Copying packet to KMD failed"); + goto put_cpu_buf; + } + + CAM_DBG(CAM_CTXT, + "handle:%llx, addr:0x%zx, offset:%0xllx, len:%zu, req:%llu, size:%u, opcode:0x%x", + cmd->packet_handle, packet_addr, cmd->offset, len, (*packet)->header.request_id, + (*packet)->header.size, (*packet)->header.op_code); + + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); + return packet_len; + +put_cpu_buf: + if (cmd) + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); +err: + if (packet) + *packet = ERR_PTR(rc); + return 0; +} + +static void __cam_context_req_mini_dump(struct cam_ctx_request *req, + uint8_t *start_addr, uint8_t *end_addr, + unsigned long *bytes_updated) +{ + struct cam_hw_req_mini_dump *req_md; + struct cam_buf_io_cfg *io_cfg; + struct cam_packet *packet = NULL; + unsigned long bytes_written = 0; + unsigned long bytes_required = 0; + + bytes_required = sizeof(*req_md); + if (start_addr + bytes_written + bytes_required > end_addr) + goto end; + + req_md = (struct cam_hw_req_mini_dump *)start_addr; + + req_md->num_fence_map_in = req->num_in_map_entries; + req_md->num_fence_map_out = req->num_out_map_entries; + req_md->num_in_acked = atomic_read(&req->num_in_acked); + req_md->num_out_acked = req->num_out_acked; + req_md->request_id = req->request_id; + bytes_written += bytes_required; + + if (req->num_out_map_entries) { + bytes_required = sizeof(struct cam_hw_fence_map_entry) * + req->num_out_map_entries; + if (start_addr + bytes_written + bytes_required > end_addr) + goto end; + + req_md->fence_map_out = (struct cam_hw_fence_map_entry *) + (start_addr + bytes_written); + memcpy(req_md->fence_map_out, req->out_map_entries, bytes_required); + req_md->num_fence_map_out = req->num_out_map_entries; + bytes_written += bytes_required; + } + + if (req->num_in_map_entries) { + bytes_required = sizeof(struct cam_hw_fence_map_entry) * + req->num_in_map_entries; + if (start_addr + bytes_written + bytes_required > end_addr) + goto end; + + req_md->fence_map_in = (struct cam_hw_fence_map_entry *) + (start_addr + bytes_written); + memcpy(req_md->fence_map_in, req->in_map_entries, bytes_required); + req_md->num_fence_map_in = req->num_in_map_entries; + bytes_written += bytes_required; + } + + packet = (struct cam_packet *)req->packet; + if (packet && packet->num_io_configs) { + bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg); + if (start_addr + bytes_written + bytes_required > end_addr) + goto end; + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + req_md->io_cfg = (struct cam_buf_io_cfg *)(start_addr + bytes_written); + memcpy(req_md->io_cfg, io_cfg, bytes_required); + bytes_written += bytes_required; + req_md->num_io_cfg = packet->num_io_configs; + } + +end: + *bytes_updated = bytes_written; +} + +static int cam_context_apply_buf_done_err_injection(struct cam_context *ctx, + struct cam_common_evt_inject_data *inject_evt) +{ + struct cam_hw_done_event_data buf_done_data = {0}; + int rc; + + buf_done_data.request_id = inject_evt->evt_params->req_id; + buf_done_data.evt_param = inject_evt->evt_params->u.buf_err_evt.sync_error; + rc = cam_context_buf_done_from_hw(ctx, &buf_done_data, + CAM_CTX_EVT_ID_ERROR); + if (rc) + CAM_ERR(CAM_CTXT, + "Fail to apply buf done error injection with req id: %llu ctx id: %u rc: %d", + buf_done_data.request_id, ctx->ctx_id, rc); + + return rc; +} + +static int cam_context_apply_pf_evt_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + struct cam_hw_dump_pf_args pf_args = {0}; + struct cam_smmu_pf_info pf_info = {0}; + int rc; + + pf_args.pf_context_info.req_id = evt_params->req_id; + pf_args.pf_context_info.ctx_found = evt_params->u.evt_notify.u.pf_evt_params.ctx_found; + pf_args.pf_smmu_info = &pf_info; + + rc = cam_context_send_pf_evt(ctx, &pf_args); + if (rc) + CAM_ERR(CAM_CTXT, + "Fail to apply Page Fault event injection with req id: %llu ctx id: %u rc: %d", + evt_params->req_id, ctx->ctx_id, rc); + + return rc; +} + +static int cam_context_apply_node_event_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + struct cam_hw_inject_node_evt_param *node_evt_params = + &evt_params->u.evt_notify.u.node_evt_params; + struct cam_req_mgr_message req_msg = {0}; + int rc; + + req_msg.u.node_msg.request_id = evt_params->req_id; + req_msg.u.node_msg.link_hdl = ctx->link_hdl; + req_msg.u.node_msg.device_hdl = ctx->dev_hdl; + req_msg.u.node_msg.event_type = node_evt_params->event_type; + req_msg.u.node_msg.event_cause = node_evt_params->event_cause; + req_msg.session_hdl = ctx->session_hdl; + + rc = cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT); + if (rc) + CAM_ERR(CAM_CTXT, + "Fail to apply error event injection with req id: %llu ctx id: %u rc: %d", + evt_params->req_id, ctx->ctx_id, rc); + + return rc; +} + +static int cam_context_apply_error_event_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + struct cam_hw_inject_err_evt_param *err_evt_params = + &evt_params->u.evt_notify.u.err_evt_params; + struct cam_req_mgr_message req_msg = {0}; + int rc; + + req_msg.u.err_msg.device_hdl = ctx->dev_hdl; + req_msg.u.err_msg.error_type = err_evt_params->err_type; + req_msg.u.err_msg.link_hdl = ctx->link_hdl; + req_msg.u.err_msg.request_id = evt_params->req_id; + req_msg.u.err_msg.resource_size = 0x0; + req_msg.u.err_msg.error_code = err_evt_params->err_code; + req_msg.session_hdl = ctx->session_hdl; + + rc = cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT); + if (rc) + CAM_ERR(CAM_CTXT, + "Fail to apply V4L2 Node event injection with req id: %llu ctx id: %u rc: %d", + evt_params->req_id, ctx->ctx_id, rc); + + return rc; +} + +int cam_context_apply_evt_injection(struct cam_context *ctx, void *inject_evt_arg) +{ + struct cam_common_evt_inject_data *inject_evt; + struct cam_hw_inject_evt_param *evt_params; + uint32_t evt_notify_type; + int rc = -EINVAL; + + if (!ctx || !inject_evt_arg) { + CAM_ERR(CAM_CTXT, "Invalid parameters ctx %s inject evt args %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(inject_evt_arg)); + return -EINVAL; + } + + inject_evt = inject_evt_arg; + evt_params = inject_evt->evt_params; + + switch (evt_params->inject_id) { + case CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE: + rc = cam_context_apply_buf_done_err_injection(ctx, inject_evt); + break; + case CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE: + evt_notify_type = evt_params->u.evt_notify.evt_notify_type; + switch (evt_notify_type) { + case V4L_EVENT_CAM_REQ_MGR_ERROR: + rc = cam_context_apply_error_event_injection(ctx, evt_params); + break; + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: + rc = cam_context_apply_pf_evt_injection(ctx, evt_params); + break; + case V4L_EVENT_CAM_REQ_MGR_NODE_EVENT: + rc = cam_context_apply_node_event_injection(ctx, evt_params); + break; + default: + CAM_ERR(CAM_CTXT, "Invalid notify type %u", evt_notify_type); + } + break; + default: + CAM_ERR(CAM_CTXT, "Invalid inject id %u", evt_params->inject_id); + } + + return rc; +} + +int cam_context_mini_dump(struct cam_context *ctx, void *args) +{ + struct cam_hw_mini_dump_info *md; + struct cam_ctx_request *req, *req_temp; + struct cam_hw_mini_dump_args *md_args; + uint8_t *start_addr; + uint8_t *end_addr; + unsigned long bytes_written = 0; + unsigned long bytes_updated = 0; + + if (!ctx || !args) { + CAM_ERR(CAM_CTXT, "invalid params"); + return -EINVAL; + } + + md_args = (struct cam_hw_mini_dump_args *)args; + if (md_args->len < sizeof(*md)) { + md_args->bytes_written = 0; + CAM_ERR(CAM_CTXT, "Insufficient len %lu, bytes_written %lu", md_args->len, + md_args->bytes_written); + return 0; + } + + start_addr = (uint8_t *)md_args->start_addr; + end_addr = start_addr + md_args->len; + md = (struct cam_hw_mini_dump_info *)md_args->start_addr; + md->ctx_id = ctx->ctx_id; + md->last_flush_req = ctx->last_flush_req; + md->hw_mgr_ctx_id = ctx->hw_mgr_ctx_id; + md->dev_id = ctx->dev_id; + md->link_hdl = ctx->link_hdl; + md->state = ctx->state; + md->session_hdl = ctx->session_hdl; + md->dev_hdl = ctx->dev_hdl; + scnprintf(md->name, CAM_HW_MINI_DUMP_DEV_NAME_LEN, ctx->dev_name); + bytes_written += sizeof(*md); + + if (!list_empty(&ctx->active_req_list)) { + md->active_list = (struct cam_hw_req_mini_dump *) + (start_addr + bytes_written); + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + bytes_updated = 0; + __cam_context_req_mini_dump(req, + (uint8_t *)&md->active_list[md->active_cnt++], + end_addr, &bytes_updated); + if ((start_addr + bytes_written + bytes_updated >= end_addr)) + goto end; + bytes_written += bytes_updated; + } + } + + if (!list_empty(&ctx->wait_req_list)) { + md->wait_list = (struct cam_hw_req_mini_dump *) + (start_addr + bytes_written); + list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) { + bytes_updated = 0; + __cam_context_req_mini_dump(req, + (uint8_t *)&md->wait_list[md->wait_cnt++], + end_addr, &bytes_updated); + if ((start_addr + bytes_written + bytes_updated >= end_addr)) + goto end; + bytes_written += bytes_updated; + } + } + + if (!list_empty(&ctx->pending_req_list)) { + md->pending_list = (struct cam_hw_req_mini_dump *) + (start_addr + bytes_written); + list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) { + bytes_updated = 0; + __cam_context_req_mini_dump(req, + (uint8_t *)&md->pending_list[md->pending_cnt++], + end_addr, &bytes_updated); + if ((start_addr + bytes_written + bytes_updated >= end_addr)) + goto end; + bytes_written += bytes_updated; + } + } +end: + md_args->bytes_written = bytes_written; + CAM_INFO(CAM_CTXT, "Ctx %s bytes_written %lu", ctx->dev_name, md_args->bytes_written); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.h new file mode 100644 index 0000000000..ff6e6f6d18 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_context_utils.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CONTEXT_UTILS_H_ +#define _CAM_CONTEXT_UTILS_H_ + +#include +#include "cam_smmu_api.h" + +/** + * struct cam_context_utils_flush_args - arguments for flush context util + * + * @cmd: flush dev command from userland + * @flush_active_req: flag to indicate if the device supports flushing a particular + * active request or not + */ +struct cam_context_utils_flush_args { + struct cam_flush_dev_cmd *cmd; + bool flush_active_req; +}; + +int cam_context_buf_done_from_hw(struct cam_context *ctx, + void *done_event_data, uint32_t evt_id); +int32_t cam_context_release_dev_to_hw(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd); +int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd); +int32_t cam_context_config_dev_to_hw( + struct cam_context *ctx, struct cam_config_dev_cmd *cmd); +int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd); +int32_t cam_context_start_dev_to_hw(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); +int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx); +int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx, + struct cam_context_utils_flush_args *args); +int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx); +int32_t cam_context_flush_req_to_hw(struct cam_context *ctx, + struct cam_context_utils_flush_args *args); +int32_t cam_context_send_pf_evt(struct cam_context *ctx, + struct cam_hw_dump_pf_args *pf_args); +int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx, + struct cam_hw_dump_pf_args *pf_args, + struct cam_hw_mgr_pf_request_info *pf_req_info); +int32_t cam_context_dump_hw_acq_info(struct cam_context *ctx); +int32_t cam_context_dump_dev_to_hw(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd); +size_t cam_context_parse_config_cmd(struct cam_context *ctx, struct cam_config_dev_cmd *cmd, + struct cam_packet **packet); +int cam_context_mini_dump(struct cam_context *ctx, void *args); +int cam_context_apply_evt_injection(struct cam_context *ctx, void *inject_evt_arg); +#endif /* _CAM_CONTEXT_UTILS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw.h new file mode 100644 index 0000000000..5105c7198f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_HW_H_ +#define _CAM_HW_H_ + +#include "cam_soc_util.h" + +/* + * This file declares Enums, Structures and APIs to be used as template + * when writing any HW driver in the camera subsystem. + */ + +/* Hardware state enum */ +enum cam_hw_state { + CAM_HW_STATE_POWER_DOWN, + CAM_HW_STATE_POWER_UP, +}; + +/** + * struct cam_hw_info - Common hardware information + * + * @hw_mutex: Hardware mutex + * @hw_lock: Hardware spinlock + * @hw_complete: Hardware Completion + * @open_count: Count to track the HW enable from the client + * @hw_state: Hardware state + * @soc_info: Platform SOC properties for hardware + * @node_info: Private HW data related to nodes + * @core_info: Private HW data related to core logic + * @presil_hw_lock: Mutex lock used for presil in place of hw_lock, + * for drivers like CDM + * + */ +struct cam_hw_info { + struct mutex hw_mutex; + spinlock_t hw_lock; + struct completion hw_complete; + uint32_t open_count; + enum cam_hw_state hw_state; + struct cam_hw_soc_info soc_info; + void *node_info; + void *core_info; + +#ifdef CONFIG_CAM_PRESIL + struct mutex presil_hw_lock; +#endif +}; + +#endif /* _CAM_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_intf.h new file mode 100644 index 0000000000..e458575b87 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_intf.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_HW_INTF_H_ +#define _CAM_HW_INTF_H_ + +#include +#include "cam_hw.h" + +/* + * This file declares Constants, Enums, Structures and APIs to be used as + * Interface between HW driver and HW Manager. + */ + +#define CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ 6 + +enum cam_clk_bw_state { + CAM_CLK_BW_STATE_INIT, + CAM_CLK_BW_STATE_UNCHANGED, + CAM_CLK_BW_STATE_INCREASE, + CAM_CLK_BW_STATE_DECREASE, +}; + +/** + * struct cam_hw_ops - Hardware layer interface functions + * + * @get_hw_caps: Function pointer for get hw caps + * @init: Function poniter for initialize hardware + * @deinit: Function pointer for deinitialize hardware + * @reset: Function pointer for reset hardware + * @reserve: Function pointer for reserve hardware + * @release: Function pointer for release hardware + * @start: Function pointer for start hardware + * @stop: Function pointer for stop hardware + * @read: Function pointer for read hardware registers + * @write: Function pointer for Write hardware registers + * @process_cmd: Function pointer for additional hardware controls + * @flush_cmd: Function pointer for flush requests + * @test_irq_line: Function pointer for testing irq line + * + */ +struct cam_hw_ops { + int (*get_hw_caps)(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size); + int (*init)(void *hw_priv, + void *init_hw_args, uint32_t arg_size); + int (*deinit)(void *hw_priv, + void *init_hw_args, uint32_t arg_size); + int (*reset)(void *hw_priv, + void *reset_core_args, uint32_t arg_size); + int (*reserve)(void *hw_priv, + void *reserve_args, uint32_t arg_size); + int (*release)(void *hw_priv, + void *release_args, uint32_t arg_size); + int (*start)(void *hw_priv, + void *start_args, uint32_t arg_size); + int (*stop)(void *hw_priv, + void *stop_args, uint32_t arg_size); + int (*read)(void *hw_priv, + void *read_args, uint32_t arg_size); + int (*write)(void *hw_priv, + void *write_args, uint32_t arg_size); + int (*process_cmd)(void *hw_priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); + int (*flush)(void *hw_priv, + void *flush_args, uint32_t arg_size); + int (*test_irq_line)(void *hw_priv); +}; + +/** + * struct cam_hw_intf - Common hardware node + * + * @hw_type: Hardware type + * @hw_idx: Hardware ID + * @hw_ops: Hardware interface function table + * @hw_priv: Private hardware node pointer + * + */ +struct cam_hw_intf { + uint32_t hw_type; + uint32_t hw_idx; + struct cam_hw_ops hw_ops; + void *hw_priv; +}; + +/* hardware event callback function type */ +typedef int (*cam_hw_mgr_event_cb_func)(void *priv, uint32_t evt_id, + void *evt_data); + +#ifdef CONFIG_CAM_PRESIL +static inline void cam_hw_util_init_hw_lock(struct cam_hw_info *hw_info) +{ + mutex_init(&hw_info->presil_hw_lock); +} + +static inline unsigned long cam_hw_util_hw_lock_irqsave(struct cam_hw_info *hw_info) +{ + mutex_lock(&hw_info->presil_hw_lock); + + return 0; +} + +static inline void cam_hw_util_hw_unlock_irqrestore(struct cam_hw_info *hw_info, + unsigned long flags) +{ + mutex_unlock(&hw_info->presil_hw_lock); +} + +static inline void cam_hw_util_hw_lock(struct cam_hw_info *hw_info) +{ + mutex_lock(&hw_info->presil_hw_lock); +} + +static inline void cam_hw_util_hw_unlock(struct cam_hw_info *hw_info) +{ + mutex_unlock(&hw_info->presil_hw_lock); +} +#else +static inline void cam_hw_util_init_hw_lock(struct cam_hw_info *hw_info) +{ + spin_lock_init(&hw_info->hw_lock); +} + +static inline unsigned long cam_hw_util_hw_lock_irqsave(struct cam_hw_info *hw_info) +{ + unsigned long flags = 0; + + if (!in_irq()) + spin_lock_irqsave(&hw_info->hw_lock, flags); + + return flags; +} + +static inline void cam_hw_util_hw_unlock_irqrestore(struct cam_hw_info *hw_info, + unsigned long flags) +{ + if (!in_irq()) + spin_unlock_irqrestore(&hw_info->hw_lock, flags); +} + +static inline void cam_hw_util_hw_lock(struct cam_hw_info *hw_info) +{ + spin_lock(&hw_info->hw_lock); +} + +static inline void cam_hw_util_hw_unlock(struct cam_hw_info *hw_info) +{ + spin_unlock(&hw_info->hw_lock); +} +#endif /* CONFIG_CAM_PRESIL */ + +#endif /* _CAM_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_mgr_intf.h new file mode 100644 index 0000000000..563f18e98a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_hw_mgr_intf.h @@ -0,0 +1,787 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_HW_MGR_INTF_H_ +#define _CAM_HW_MGR_INTF_H_ + +#include +#include +#include +#include "cam_smmu_api.h" + +/* + * This file declares Constants, Enums, Structures and APIs to be used as + * Interface between HW Manager and Context. + */ + + +/* maximum context numbers */ +#define CAM_CTX_MAX 8 + +/* maximum buf done irqs, multiplied by 3 hw context entries per resource */ +#define CAM_NUM_OUT_PER_COMP_IRQ_MAX 36 + +/* Maximum reg dump cmd buffer entries in a context */ +#define CAM_REG_DUMP_MAX_BUF_ENTRIES 10 + +/* max device name string length*/ +#define CAM_HW_MINI_DUMP_DEV_NAME_LEN 20 + +/** + * enum cam_context_dump_id - + * context dump type + * + */ +enum cam_context_dump_id { + CAM_CTX_DUMP_TYPE_NONE, + CAM_CTX_DUMP_ACQ_INFO, + CAM_CTX_DUMP_TYPE_MAX, +}; + +/** + * enum cam_faulted_mem_type - + * page fault buffer type found in packet + * + */ +enum cam_faulted_mem_type { + CAM_FAULT_BUF_NOT_FOUND, + CAM_FAULT_IO_CFG_BUF, + CAM_FAULT_PATCH_BUF +}; + +/** + * enum cam_pf_pid_found_status - + * Status of PID found for a hardware node + * + */ +enum cam_pf_pid_found_status { + CAM_PF_PID_FOUND_PENDING, + CAM_PF_PID_FOUND_FAILURE, + CAM_PF_PID_FOUND_SUCCESS +}; + +#define CAM_CTX_EVT_ID_SUCCESS 0 +#define CAM_CTX_EVT_ID_ERROR 1 +#define CAM_CTX_EVT_ID_CANCEL 2 + +/* hardware event callback function type */ +typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id, + void *evt_data); + +/* hardware page fault callback function type */ +typedef int (*cam_hw_pagefault_cb_func)(void *context, + void *pf_args); + +/* ctx dump callback function type */ +typedef int (*cam_ctx_info_dump_cb_func)(void *context, + enum cam_context_dump_id dump_id); + +/* ctx recovery callback function type */ +typedef int (*cam_ctx_recovery_cb_func)(void *context, + void *recovery_data); + +/* ctx mini dump callback function type */ +typedef int (*cam_ctx_mini_dump_cb_func)(void *context, + void *args); + +/* ctx error inject callback function type */ +typedef int (*cam_ctx_err_inject_cb_func)(void *context, + void *args); + +/* message callback function type */ +typedef int (*cam_ctx_message_cb_func)(void *context, + uint32_t message_type, void *data); + +/** + * struct cam_hw_update_entry - Entry for hardware config + * + * @handle: Memory handle for the configuration + * @offset: Memory offset + * @len: Size of the configuration + * @flags: Flags for the config entry(eg. DMI) + * @addr: Address of hardware update entry + * @single_apply_only: If set entry is configured to cdm only once + * + */ +struct cam_hw_update_entry { + int handle; + uint32_t offset; + uint32_t len; + uint32_t flags; + uintptr_t addr; + bool single_apply_only; +}; + +/** + * struct cam_hw_fence_map_entry - Entry for the resource to sync id map + * + * @resrouce_handle: Resource port id for the buffer + * @hw_ctxt_id: Hw ctxt id for multi-context capable targets + * @sync_id: Sync id + * @sync_id: Early sync id + * @image_buf_addr: Image buffer address array + * @buffer_tracker: Some buffers with fences have buf dones come + * separately from each out port, and signalled + * independently. Ref counting needs to be handled + * independently as well corresponding to individual + * buf dones. + * + */ +struct cam_hw_fence_map_entry { + uint32_t resource_handle; + uint32_t hw_ctxt_id; + int32_t sync_id; + int32_t early_sync_id; + dma_addr_t image_buf_addr[CAM_PACKET_MAX_PLANES]; + struct cam_smmu_buffer_tracker *buffer_tracker; +}; + +/** + * struct cam_hw_done_event_data - Payload for hw done event + * + * @num_handles: number of handles in the event + * @resrouce_handle: list of the resource handle + * @timestamp: time stamp + * @request_id: request identifier + * @evt_param: event parameter + * + */ +struct cam_hw_done_event_data { + uint32_t num_handles; + uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX]; + struct timespec64 timestamp; + uint64_t request_id; + uint32_t evt_param; +}; + +/** + * struct cam_hw_acquire_stream_caps - Any HW caps info from HW mgr to ctx + * Params to be interpreted by the + * respective drivers + * @fcg_caps : SFE/IFE/MC_TFE FCG capabilities during hw acquire + * @num_valid_params : Number of valid params + * @param_list : List of params interpreted by the driver + * + */ +struct cam_hw_acquire_stream_caps { + void *fcg_caps; + uint32_t num_valid_params; + uint32_t param_list[4]; +}; + +/** + * struct cam_hw_acquire_args - Payload for acquire command + * + * @context_data: Context data pointer for the callback function + * @ctx_id: Core context id + * @event_cb: Callback function array + * @sec_pf_evt_cb: Callback function for secondary page fault from HW to ctx. + * It's the callback that pertains to a PF not directly on + * this HW. But a different block to which this HW is + * associated or is a client of + * @num_acq: Total number of acquire in the payload + * @acquire_info: Acquired resource array pointer + * @ctxt_to_hw_map: HW context (returned) + * @hw_mgr_ctx_id: HWMgr context id(returned) + * @op_flags: Used as bitwise params from hw_mgr to ctx + * See xxx_hw_mgr_intf.h for definitions + * @acquired_hw_id: Acquired hardware mask + * @acquired_hw_path: Acquired path mask for an input + * if input splits into multiple paths, + * its updated per hardware + * @valid_acquired_hw: Valid num of acquired hardware + * @total_ports_acq: Total acquired ports + * @op_params: OP Params from hw_mgr to ctx + * @mini_dump_cb: Mini dump callback function + * @api_version: Version of the acquire API + * + */ +struct cam_hw_acquire_args { + void *context_data; + uint32_t ctx_id; + cam_hw_event_cb_func event_cb; + cam_hw_pagefault_cb_func sec_pf_evt_cb; + uint32_t num_acq; + uint32_t acquire_info_size; + uintptr_t acquire_info; + void *ctxt_to_hw_map; + uint32_t hw_mgr_ctx_id; + uint32_t op_flags; + uint32_t acquired_hw_id[CAM_MAX_ACQ_RES]; + uint32_t acquired_hw_path[CAM_MAX_ACQ_RES][CAM_MAX_HW_SPLIT]; + uint32_t valid_acquired_hw; + uint32_t total_ports_acq; + struct cam_hw_acquire_stream_caps op_params; + cam_ctx_mini_dump_cb_func mini_dump_cb; + uint32_t api_version; +}; + +/** + * struct cam_hw_release_args - Payload for release command + * + * @ctxt_to_hw_map: HW context from the acquire + * @active_req: Active request flag + * + */ +struct cam_hw_release_args { + void *ctxt_to_hw_map; + bool active_req; +}; + +/** + * struct cam_hw_start_args - Payload for start command + * + * @ctxt_to_hw_map: HW context from the acquire + * @num_hw_update_entries: Number of Hardware configuration + * @hw_update_entries: Hardware configuration list + * + */ +struct cam_hw_start_args { + void *ctxt_to_hw_map; + uint32_t num_hw_update_entries; + struct cam_hw_update_entry *hw_update_entries; +}; + +/** + * struct cam_hw_stop_args - Payload for stop command + * + * @ctxt_to_hw_map: HW context from the acquire + * @args: Arguments to pass for stop + * + */ +struct cam_hw_stop_args { + void *ctxt_to_hw_map; + void *args; +}; + +/** + * struct cam_hw_mgr_pf_request_info - page fault debug data + * + * @req: pointer to req (HW specific) + */ +struct cam_hw_mgr_pf_request_info { + void *req; +}; + +/** + * struct cam_hw_prepare_update_args - Payload for prepare command + * + * @packet: CSL packet from user mode driver + * @remain_len Remaining length of CPU buffer after config offset + * @ctxt_to_hw_map: HW context from the acquire + * @max_hw_update_entries: Maximum hardware update entries supported + * @hw_update_entries: Actual hardware update configuration (returned) + * @num_hw_update_entries: Number of actual hardware update entries (returned) + * @max_out_map_entries: Maximum output fence mapping supported + * @out_map_entries: Actual output fence mapping list (returned) + * @num_out_map_entries: Number of actual output fence mapping (returned) + * @max_in_map_entries: Maximum input fence mapping supported + * @in_map_entries: Actual input fence mapping list (returned) + * @num_in_map_entries: Number of acutal input fence mapping (returned) + * @reg_dump_buf_desc: cmd buffer descriptors for reg dump + * @num_reg_dump_buf: Count of descriptors in reg_dump_buf_desc + * @priv: Private pointer of hw update + * @buf_tracker: Ptr to list of buffers we want to keep ref counts on + * @pf_data: Debug data for page fault + * + */ +struct cam_hw_prepare_update_args { + struct cam_packet *packet; + size_t remain_len; + void *ctxt_to_hw_map; + uint32_t max_hw_update_entries; + struct cam_hw_update_entry *hw_update_entries; + uint32_t num_hw_update_entries; + uint32_t max_out_map_entries; + struct cam_hw_fence_map_entry *out_map_entries; + uint32_t num_out_map_entries; + uint32_t max_in_map_entries; + struct cam_hw_fence_map_entry *in_map_entries; + uint32_t num_in_map_entries; + struct cam_cmd_buf_desc reg_dump_buf_desc[ + CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint32_t num_reg_dump_buf; + void *priv; + struct list_head *buf_tracker; + struct cam_hw_mgr_pf_request_info *pf_data; +}; + +/** + * struct cam_hw_stream_setttings - Payload for config stream command + * + * @packet: CSL packet from user mode driver + * @ctxt_to_hw_map: HW context from the acquire + * @priv: Private pointer of hw update + * + */ +struct cam_hw_stream_setttings { + struct cam_packet *packet; + void *ctxt_to_hw_map; + void *priv; +}; + +/** + * enum cam_hw_config_reapply_type + */ +enum cam_hw_config_reapply_type { + CAM_CONFIG_REAPPLY_NONE, + CAM_CONFIG_REAPPLY_IQ, + CAM_CONFIG_REAPPLY_IO, +}; + +/** + * struct cam_hw_config_args - Payload for config command + * + * @ctxt_to_hw_map: HW context from the acquire + * @num_hw_update_entries: Number of hardware update entries + * @hw_update_entries: Hardware update list + * @out_map_entries: Out map info + * @num_out_map_entries: Number of out map entries + * @priv: Private pointer + * @request_id: Request ID + * @reapply_type: On reapply determines what is to be applied + * @init_packet: Set if INIT packet + * @cdm_reset_before_apply: True is need to reset CDM before re-apply bubble + * request + * + */ +struct cam_hw_config_args { + void *ctxt_to_hw_map; + uint32_t num_hw_update_entries; + struct cam_hw_update_entry *hw_update_entries; + struct cam_hw_fence_map_entry *out_map_entries; + uint32_t num_out_map_entries; + void *priv; + uint64_t request_id; + enum cam_hw_config_reapply_type reapply_type; + bool init_packet; + bool cdm_reset_before_apply; +}; + +/** + * struct cam_hw_flush_args - Flush arguments + * + * @ctxt_to_hw_map: HW context from the acquire + * @num_req_pending: Num request to flush, valid when flush type is REQ + * @flush_req_pending: Request pending pointers to flush + * @num_req_active: Num request to flush, valid when flush type is REQ + * @flush_req_active: Request active pointers to flush + * @flush_type: The flush type + * @last_flush_req: last flush req_id notified to hw_mgr for the + * given stream + * + */ +struct cam_hw_flush_args { + void *ctxt_to_hw_map; + uint32_t num_req_pending; + void **flush_req_pending; + uint32_t num_req_active; + void **flush_req_active; + enum flush_type_t flush_type; + uint32_t last_flush_req; +}; + +/** + * struct cam_context_pf_info - Page Fault related info to the faulted context + * + * @mem_type: Faulted memory type found in packet + * @resource_type: Resource type of the port which caused page fault + * @buf_hdl: Faulted memory handle + * @offset: Offset for faulted buf_hdl + * @req_id: request id for the faulted request + * @delta: Delta size between faulted address and buffer + * Or closest-mapped buffer + * (if faulted addr isn't found to be in any buffer) + * @patch_idx: Index to which patch in the packet is faulted + * @mem_flag: Memory flag of the faulted buffer + * @ctx_found: If fault pid found in context acquired hardware + * @force_send_pf_evt: Must send page fault notification to UMD even if + * current ctx is not the faulted ctx + */ +struct cam_context_pf_info { + enum cam_faulted_mem_type mem_type; + uint32_t resource_type; + int32_t buf_hdl; + uint32_t offset; + unsigned long delta; + uint32_t patch_idx; + uint32_t mem_flag; + uint64_t req_id; + bool ctx_found; + bool force_send_pf_evt; +}; + +/** + * struct cam_hw_dump_pf_args - General payload contains all PF relateed info. + * + * @pf_smmu_info: Page fault info from SMMU driver + * @pf_context_info: Page fault info related to faulted context or + * faulted request. + * @handle_sec_pf: Indicates if this PF args comes from HW level + * @check_pid: Indicates if simply checking error client by pid without dumping ctx + * or active requests + * @pf_pid_found_status: Indicates if client with the same pid is found for the PF issue + */ +struct cam_hw_dump_pf_args { + struct cam_smmu_pf_info *pf_smmu_info; + struct cam_context_pf_info pf_context_info; + bool handle_sec_pf; + bool check_pid; + enum cam_pf_pid_found_status pf_pid_found_status; +}; + +/** + * struct cam_hw_cmd_pf_args - page fault command payload to hw manager. + * @pf_args: Page Fault related info. + * @cmd_pf_req_info: Command payload related to request info. Used to + * submit to HW for PF processing. + * + */ +struct cam_hw_cmd_pf_args { + struct cam_hw_dump_pf_args *pf_args; + struct cam_hw_mgr_pf_request_info *pf_req_info; +}; + +/** + * struct cam_hw_reset_args -hw reset arguments + * + * @ctxt_to_hw_map: HW context from the acquire + * + */ +struct cam_hw_reset_args { + void *ctxt_to_hw_map; +}; + +/** + * struct cam_hw_dump_args - Dump arguments + * + * @request_id: request_id + * @offset: Buffer offset. This is updated by the drivers. + * @buf_handle: Buffer handle + * @error_type: Error type, to be used to extend dump information + * @ctxt_to_hw_map: HW context from the acquire + */ +struct cam_hw_dump_args { + uint64_t request_id; + size_t offset; + uint32_t buf_handle; + uint32_t error_type; + void *ctxt_to_hw_map; +}; + +/* enum cam_hw_mgr_command - Hardware manager command type */ +enum cam_hw_mgr_command { + CAM_HW_MGR_CMD_INTERNAL, + CAM_HW_MGR_CMD_DUMP_PF_INFO, + CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, + CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, + CAM_HW_MGR_CMD_REG_DUMP_PER_REQ, + CAM_HW_MGR_CMD_DUMP_ACQ_INFO, +}; + +/** + * struct cam_hw_cmd_args - Payload for hw manager command + * + * @ctxt_to_hw_map: HW context from the acquire + * @cmd_type HW command type + * @internal_args Arguments for internal command + * @pf_cmd_args Arguments for Dump PF info command + * @hw_update_data HW update data for register dump + * + */ +struct cam_hw_cmd_args { + void *ctxt_to_hw_map; + uint32_t cmd_type; + union { + void *internal_args; + struct cam_hw_cmd_pf_args *pf_cmd_args; + struct cam_isp_prepare_hw_update_data *hw_update_data; + } u; +}; + +/** + * struct cam_hw_mini_dump_args - Mini Dump arguments + * + * @start_addr: Start address of buffer + * @len: Len of Buffer + * @bytes_written: Bytes written + */ +struct cam_hw_mini_dump_args { + void *start_addr; + unsigned long len; + unsigned long bytes_written; +}; + +/** + * struct cam_hw_req_mini_dump - Mini dump context req + * + * @fence_map_out: Fence map out array + * @fence_map_in: Fence map in array + * @io_cfg: IO cfg. + * @request_id: Request id + * @num_fence_map_in: num of fence map in + * @num_fence_map_in: num of fence map out + * @num_io_cfg: num of io cfg + * @num_in_acked: num in acked + * @num_out_acked: num out acked + * @status: Status + * @flushed: whether request is flushed + * + */ +struct cam_hw_req_mini_dump { + struct cam_hw_fence_map_entry *fence_map_out; + struct cam_hw_fence_map_entry *fence_map_in; + struct cam_buf_io_cfg *io_cfg; + uint64_t request_id; + uint16_t num_fence_map_in; + uint16_t num_fence_map_out; + uint16_t num_io_cfg; + uint16_t num_in_acked; + uint16_t num_out_acked; + uint8_t status; + uint8_t flushed; +}; + +/** + * struct cam_hw_mini_dump_info - Mini dump context info + * + * @active_list: array of active req in context + * @wait_list: array of wait req in context + * @pending_list: array of pending req in context + * @name: name of context + * @dev_id: dev id. + * @last_flush_req: last flushed request id + * @refcount: kernel ref count + * @ctx_id: Context id + * @session_hdl: Session handle + * @link_hdl: link handle + * @dev_hdl: Dev hdl + * @state: State of context + * @hw_mgr_ctx_id: ctx id with hw mgr + * + */ +struct cam_hw_mini_dump_info { + struct cam_hw_req_mini_dump *active_list; + struct cam_hw_req_mini_dump *wait_list; + struct cam_hw_req_mini_dump *pending_list; + char name[CAM_HW_MINI_DUMP_DEV_NAME_LEN]; + uint64_t dev_id; + uint32_t last_flush_req; + uint32_t refcount; + uint32_t ctx_id; + uint32_t active_cnt; + uint32_t pending_cnt; + uint32_t wait_cnt; + int32_t session_hdl; + int32_t link_hdl; + int32_t dev_hdl; + uint8_t state; + uint8_t hw_mgr_ctx_id; +}; + +/** + * cam_hw_inject_err_evt_param - error evt injection parameters + * + * @err_type: error type for the injected err evt + * @err_code: error code for the injected err evt + */ +struct cam_hw_inject_err_evt_param { + uint32_t err_type; + uint32_t err_code; +}; + +/** + * cam_hw_inject_node_evt_param - node evt injection parameters + * @event_type: event type for the injected node evt + * @event_cause: event cause for the injected node evt + */ +struct cam_hw_inject_node_evt_param { + uint32_t event_type; + uint32_t event_cause; +}; + +/** + * cam_hw_inject_pf_evt_param - pf evt injection parameters + * + * @ctx_found: flag to indicate if page fault notification sent with ctx or not + */ +struct cam_hw_inject_pf_evt_param { + bool ctx_found; +}; + +/** + * cam_hw_inject_evt_notification_param - notify event parameters + * + * @evt_notify_type: type of event notification + * @u: union which can be either error event/Page Fault event/Node event + */ + +struct cam_hw_inject_evt_notification_param { + uint32_t evt_notify_type; + union { + struct cam_hw_inject_err_evt_param err_evt_params; + struct cam_hw_inject_pf_evt_param pf_evt_params; + struct cam_hw_inject_node_evt_param node_evt_params; + } u; +}; + +/** + * cam_hw_inject_buffer_error_param - buffer error injection parameters + * + * @sync_error: sync error code + */ +struct cam_hw_inject_buffer_error_param { + uint32_t sync_error; +}; + +/** + * cam_hw_inject_evt_param - CRM event injection parameters + * + * @inject_id: generic inject identifier + * @req_id: Req Id for which the event is injected + * @u: union which can be either buffer done error/event notification + * @is_valid: bool flag to indicate if event injection is enabled for a context + */ +struct cam_hw_inject_evt_param { + uint8_t inject_id; + uint64_t req_id; + union { + struct cam_hw_inject_buffer_error_param buf_err_evt; + struct cam_hw_inject_evt_notification_param evt_notify; + } u; + bool is_valid; +}; + +/** + * struct cam_acquire_dev_cmd_unified - Unified payload for acquire devices + * + * @struct_version: API version of the acquire command + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Resource handle type: + * 1 = user pointer, 2 = mem handle + * @num_resources: Number of the resources to be acquired + * @resources_hdl: Resource handle that refers to the actual + * resource array. Each item in this + * array is device specific resource structure + * + */ +struct cam_acquire_dev_cmd_unified { + __u32 struct_version; + __s32 session_handle; + __s32 dev_handle; + __u32 handle_type; + __u32 num_resources; + __u64 resource_hdl; +}; + +/** + * struct cam_icp_res_info_unified - ICP output resource info + * + * @format: format of the resource + * @width: width in pixels + * @height: height in lines + * @fps: fps + * @port_id: ID of the out resource + * @is_secure: whether the port is secure + */ +struct cam_icp_res_info_unified { + __u32 format; + __u32 width; + __u32 height; + __u32 fps; + __u32 port_id; + __u32 is_secure; +}; + +/** + * struct cam_icp_acquire_dev_info_unified - An ICP device info + * + * @scratch_mem_size: Output param - size of scratch memory + * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS) + * @io_config_cmd_size: size of IO config command + * @io_config_cmd_handle: IO config command for each acquire + * @secure_mode: camera mode (secure/non secure) + * @chain_info: chaining info of FW device handles + * @in_res: resource info used for clock and bandwidth calculation + * @num_out_res: number of output resources + * @out_res_flex: output resource + */ +struct cam_icp_acquire_dev_info_unified { + __u32 scratch_mem_size; + __u32 dev_type; + __u32 io_config_cmd_size; + __s32 io_config_cmd_handle; + __u32 secure_mode; + __s32 chain_info; + struct cam_icp_res_info_unified in_res; + __u32 num_out_res; + __DECLARE_FLEX_ARRAY(struct cam_icp_res_info_unified, out_res_flex); +} __attribute__((__packed__)); + +/** + * cam_hw_mgr_intf - HW manager interface + * + * @hw_mgr_priv: HW manager object + * @hw_get_caps: Function pointer for get hw caps + * args = cam_query_cap_cmd + * @hw_get_caps_v2: Function pointer for get hw caps v2 + * args = cam_query_cap_cmd_v2 + * @hw_acquire: Function poniter for acquire hw resources + * args = cam_hw_acquire_args + * @hw_release: Function pointer for release hw device resource + * args = cam_hw_release_args + * @hw_start: Function pointer for start hw devices + * args = cam_hw_start_args + * @hw_stop: Function pointer for stop hw devices + * args = cam_hw_stop_args + * @hw_prepare_update: Function pointer for prepare hw update for hw + * devices args = cam_hw_prepare_update_args + * @hw_config_stream_settings: Function pointer for configure stream for hw + * devices args = cam_hw_stream_setttings + * @hw_config: Function pointer for configure hw devices + * args = cam_hw_config_args + * @hw_read: Function pointer for read hardware registers + * @hw_write: Function pointer for Write hardware registers + * @hw_cmd: Function pointer for any customized commands for + * the hardware manager + * @hw_open: Function pointer for HW init + * @hw_close: Function pointer for HW deinit + * @hw_flush: Function pointer for HW flush + * @hw_reset: Function pointer for HW reset + * @hw_dump: Function pointer for HW dump + * @hw_recovery: Function pointer for HW recovery callback + * @hw_inject_evt: Function pointer for HW event injection callback + * @synx_trigger: Function pointer for synx test trigger + * + */ +struct cam_hw_mgr_intf { + void *hw_mgr_priv; + + int (*hw_get_caps)(void *hw_priv, void *hw_caps_args); + int (*hw_get_caps_v2)(void *hw_priv, void *hw_caps_args); + int (*hw_acquire)(void *hw_priv, void *hw_acquire_args); + int (*hw_release)(void *hw_priv, void *hw_release_args); + int (*hw_start)(void *hw_priv, void *hw_start_args); + int (*hw_stop)(void *hw_priv, void *hw_stop_args); + int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args); + int (*hw_config_stream_settings)(void *hw_priv, + void *hw_stream_settings); + int (*hw_config)(void *hw_priv, void *hw_config_args); + int (*hw_read)(void *hw_priv, void *read_args); + int (*hw_write)(void *hw_priv, void *write_args); + int (*hw_cmd)(void *hw_priv, void *write_args); + int (*hw_open)(void *hw_priv, void *fw_download_args); + int (*hw_close)(void *hw_priv, void *hw_close_args); + int (*hw_flush)(void *hw_priv, void *hw_flush_args); + int (*hw_reset)(void *hw_priv, void *hw_reset_args); + int (*hw_dump)(void *hw_priv, void *hw_dump_args); + int (*hw_recovery)(void *hw_priv, void *hw_recovery_args); + void (*hw_inject_evt)(void *hw_priv, void *evt_args); + int (*synx_trigger)(void *hw_priv, void *synx_params); +}; + +#endif /* _CAM_HW_MGR_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.c b/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.c new file mode 100644 index 0000000000..9f24cdaf2f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.c @@ -0,0 +1,1223 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_node.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static void cam_node_print_ctx_state( + struct cam_node *node) +{ + int i; + struct cam_context *ctx; + + CAM_INFO(CAM_CORE, "[%s] state=%d, ctx_size %d", + node->name, node->state, node->ctx_size); + + mutex_lock(&node->list_mutex); + for (i = 0; i < node->ctx_size; i++) { + ctx = &node->ctx_list[i]; + + spin_lock_bh(&ctx->lock); + CAM_INFO(CAM_CORE, + "[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d", + ctx->dev_name, + i, ctx->state, + atomic_read(&(ctx->refcount.refcount.refs)), + list_empty(&ctx->active_req_list), + list_empty(&ctx->pending_req_list), + list_empty(&ctx->wait_req_list), + list_empty(&ctx->free_req_list)); + spin_unlock_bh(&ctx->lock); + } + mutex_unlock(&node->list_mutex); +} + +static struct cam_context *cam_node_get_ctxt_from_free_list( + struct cam_node *node) +{ + struct cam_context *ctx = NULL; + + mutex_lock(&node->list_mutex); + if (!list_empty(&node->free_ctx_list)) { + ctx = list_first_entry(&node->free_ctx_list, + struct cam_context, list); + list_del_init(&ctx->list); + } + mutex_unlock(&node->list_mutex); + if (ctx) + kref_init(&ctx->refcount); + return ctx; +} + +void cam_node_put_ctxt_to_free_list(struct kref *ref) +{ + struct cam_context *ctx = + container_of(ref, struct cam_context, refcount); + struct cam_node *node = ctx->node; + + mutex_lock(&node->list_mutex); + list_add_tail(&ctx->list, &node->free_ctx_list); + mutex_unlock(&node->list_mutex); +} + +static int __cam_node_handle_query_cap(uint32_t version, + struct cam_node *node, struct cam_query_cap_cmd *query) +{ + struct cam_hw_mgr_intf *hw_mgr_intf = &node->hw_mgr_intf; + int rc = 0; + + if (!query) { + CAM_ERR(CAM_CORE, "Invalid params"); + return -EINVAL; + } + + switch (version) { + case CAM_QUERY_CAP: + if (!hw_mgr_intf->hw_get_caps) { + CAM_ERR(CAM_CORE, "Node %s query cap version: %u get hw cap intf is NULL", + node->name, version); + return -EINVAL; + } + rc = hw_mgr_intf->hw_get_caps(hw_mgr_intf->hw_mgr_priv, query); + break; + case CAM_QUERY_CAP_V2: + if (!hw_mgr_intf->hw_get_caps_v2) { + CAM_ERR(CAM_CORE, "Node %s query cap version: %u get hw cap intf is NULL", + node->name, version); + return -EINVAL; + } + rc = hw_mgr_intf->hw_get_caps_v2(hw_mgr_intf->hw_mgr_priv, query); + break; + default: + CAM_ERR(CAM_CORE, "Invalid version number %u", version); + return -EINVAL; + } + + return rc; +} + +static int __cam_node_handle_acquire_dev(struct cam_node *node, + struct cam_acquire_dev_cmd_unified *acquire) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!acquire) + return -EINVAL; + + ctx = cam_node_get_ctxt_from_free_list(node); + if (!ctx) { + CAM_ERR(CAM_CORE, + "No free ctx in free list node %s with size:%d", + node->name, node->ctx_size); + cam_node_print_ctx_state(node); + + rc = -ENOMEM; + goto err; + } + + ctx->last_flush_req = 0; + + rc = cam_handle_validate(acquire->session_handle, acquire->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for acquire dev"); + goto free_ctx; + } + + rc = cam_context_handle_acquire_dev(ctx, acquire); + if (rc) { + CAM_ERR(CAM_CORE, "Acquire device failed for node %s", + node->name); + goto free_ctx; + } + + CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d", + node->name, ctx->ctx_id); + + return 0; +free_ctx: + cam_context_putref(ctx); +err: + return rc; +} + +static void __cam_node_handle_acquired_hw_dump( + struct cam_node *node) +{ + int i; + + for (i = 0; i < node->ctx_size; i++) + cam_context_handle_info_dump(&(node->ctx_list[i]), + CAM_CTX_DUMP_ACQ_INFO); +} + +static int __cam_node_handle_acquire_hw_v1(struct cam_node *node, + struct cam_acquire_hw_cmd_v1 *acquire) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!acquire) + return -EINVAL; + + rc = cam_handle_validate(acquire->session_handle, acquire->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(acquire->session_handle, acquire->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + acquire->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_acquire_hw(ctx, acquire); + if (rc) { + CAM_ERR(CAM_CORE, "Acquire device failed for node %s", + node->name); + __cam_node_handle_acquired_hw_dump(node); + return rc; + } + + CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d", + node->name, ctx->ctx_id); + + return 0; +} + +static int __cam_node_handle_acquire_hw_v2(struct cam_node *node, + struct cam_acquire_hw_cmd_v2 *acquire) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!acquire) + return -EINVAL; + + rc = cam_handle_validate(acquire->session_handle, acquire->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(acquire->session_handle, acquire->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + acquire->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_acquire_hw(ctx, acquire); + if (rc) { + CAM_ERR(CAM_CORE, "Acquire device failed for node %s", + node->name); + __cam_node_handle_acquired_hw_dump(node); + return rc; + } + + CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d", + node->name, ctx->ctx_id); + + return 0; +} + +static int __cam_node_handle_start_dev(struct cam_node *node, + struct cam_start_stop_dev_cmd *start) +{ + struct cam_context *ctx = NULL; + int rc; + + if (!start) + return -EINVAL; + + rc = cam_handle_validate(start->session_handle, start->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(start->session_handle, start->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + start->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_start_dev(ctx, start); + if (rc) + CAM_ERR(CAM_CORE, "Start failure for node %s", node->name); + + return rc; +} + +static int __cam_node_handle_stop_dev(struct cam_node *node, + struct cam_start_stop_dev_cmd *stop) +{ + struct cam_context *ctx = NULL; + int rc; + + if (!stop) + return -EINVAL; + + rc = cam_handle_validate(stop->session_handle, stop->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(stop->session_handle, stop->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + stop->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_stop_dev(ctx, stop); + if (rc) + CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name); + + return rc; +} + +static int __cam_node_handle_config_dev(struct cam_node *node, + struct cam_config_dev_cmd *config) +{ + struct cam_context *ctx = NULL; + int rc; + + if (!config) + return -EINVAL; + + rc = cam_handle_validate(config->session_handle, config->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(config->session_handle, config->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + config->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_config_dev(ctx, config); + if (rc) { + if (ctx->state == CAM_CTX_FLUSHED) + CAM_INFO(CAM_CORE, + "Config failure for node %s, it has been flushed", + node->name); + else + CAM_ERR(CAM_CORE, "Config failure for node %s", node->name); + } + return rc; +} + +static int __cam_node_handle_flush_dev(struct cam_node *node, + struct cam_flush_dev_cmd *flush) +{ + struct cam_context *ctx = NULL; + int rc; + + if (!flush) + return -EINVAL; + + rc = cam_handle_validate(flush->session_handle, flush->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(flush->session_handle, flush->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle); + if (!ctx) { + CAM_ERR_RATE_LIMIT(CAM_CORE, + "Can not get context for handle %d", + flush->dev_handle); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR_RATE_LIMIT(CAM_CORE, + "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_flush_dev(ctx, flush); + if (rc) + CAM_ERR_RATE_LIMIT(CAM_CORE, + "Flush failure for node %s", node->name); + + return rc; +} + +static int __cam_node_handle_release_dev(struct cam_node *node, + struct cam_release_dev_cmd *release) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!release) + return -EINVAL; + + rc = cam_handle_validate(release->session_handle, release->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(release->session_handle, release->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s", + release->dev_handle, node->name); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) { + rc = cam_context_handle_release_dev(ctx, release); + if (rc) + CAM_ERR(CAM_CORE, "context release failed for node %s", + node->name); + } else { + CAM_WARN(CAM_CORE, + "node %s context id %u state %d invalid to release hdl", + node->name, ctx->ctx_id, ctx->state); + goto destroy_dev_hdl; + } + + cam_context_putref(ctx); + +destroy_dev_hdl: + rc = cam_destroy_device_hdl(release->dev_handle); + if (rc) + CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s", + node->name); + else + ctx->dev_hdl = -1; + + CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d", + node->name, ctx->ctx_id, + atomic_read(&(ctx->refcount.refcount.refs))); + + return rc; +} + +static int __cam_node_handle_dump_dev(struct cam_node *node, + struct cam_dump_req_cmd *dump) +{ + int rc; + struct cam_context *ctx = NULL; + + if (!dump) + return -EINVAL; + + rc = cam_handle_validate(dump->session_handle, dump->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(dump->session_handle, dump->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(dump->dev_handle); + if (!ctx) { + CAM_ERR_RATE_LIMIT(CAM_CORE, + "Can not get context for handle %d", + dump->dev_handle); + return -EINVAL; + } + + rc = cam_context_handle_dump_dev(ctx, dump); + if (rc) + CAM_ERR_RATE_LIMIT(CAM_CORE, + "Dump failure for node %s", node->name); + + return rc; +} + +static int __cam_node_handle_release_hw_v1(struct cam_node *node, + struct cam_release_hw_cmd_v1 *release) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!release) + return -EINVAL; + + rc = cam_handle_validate(release->session_handle, release->session_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return rc; + } + + rc = cam_handle_validate(release->session_handle, release->dev_handle); + if (rc) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return rc; + } + + ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s", + release->dev_handle, node->name); + return -EINVAL; + } + + if (strcmp(node->name, ctx->dev_name)) { + CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching", + node->name, ctx->dev_name); + return -EINVAL; + } + + rc = cam_context_handle_release_hw(ctx, release); + if (rc) + CAM_ERR(CAM_CORE, "context release failed node %s", node->name); + + CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d", + node->name, ctx->ctx_id, + atomic_read(&(ctx->refcount.refcount.refs))); + + return rc; +} + +static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info) +{ + struct cam_context *ctx = NULL; + + if (!info) + return -EINVAL; + + ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + info->dev_hdl); + return -EINVAL; + } + return cam_context_handle_crm_get_dev_info(ctx, info); +} + +static int __cam_node_crm_link_setup( + struct cam_req_mgr_core_dev_link_setup *setup) +{ + int rc; + struct cam_context *ctx = NULL; + + if (!setup) + return -EINVAL; + + ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + setup->dev_hdl); + return -EINVAL; + } + + if (setup->link_enable) + rc = cam_context_handle_crm_link(ctx, setup); + else + rc = cam_context_handle_crm_unlink(ctx, setup); + + return rc; +} + +static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply) +{ + struct cam_context *ctx = NULL; + + if (!apply) + return -EINVAL; + + ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + apply->dev_hdl); + return -EINVAL; + } + + trace_cam_apply_req("Node", ctx->ctx_id, apply->request_id, apply->link_hdl); + + return cam_context_handle_crm_apply_req(ctx, apply); +} + +static int __cam_node_crm_notify_frame_skip( + struct cam_req_mgr_apply_request *apply) +{ + struct cam_context *ctx = NULL; + + if (!apply) + return -EINVAL; + + ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + apply->dev_hdl); + return -EINVAL; + } + + trace_cam_apply_req("Node", ctx->ctx_id, apply->request_id, apply->link_hdl); + + return cam_context_handle_crm_notify_frame_skip(ctx, apply); +} + +static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush) +{ + struct cam_context *ctx = NULL; + + if (!flush) { + CAM_ERR(CAM_CORE, "Invalid flush request payload"); + return -EINVAL; + } + + ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + flush->dev_hdl); + return -EINVAL; + } + + return cam_context_handle_crm_flush_req(ctx, flush); +} + +static int __cam_node_crm_process_evt( + struct cam_req_mgr_link_evt_data *evt_data) +{ + struct cam_context *ctx = NULL; + + if (!evt_data) { + CAM_ERR(CAM_CORE, "Invalid process event request payload"); + return -EINVAL; + } + + ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + evt_data->dev_hdl); + return -EINVAL; + } + return cam_context_handle_crm_process_evt(ctx, evt_data); +} + +static int __cam_node_crm_dump_req(struct cam_req_mgr_dump_info *dump) +{ + struct cam_context *ctx = NULL; + + if (!dump) { + CAM_ERR(CAM_CORE, "Invalid dump request payload"); + return -EINVAL; + } + + ctx = (struct cam_context *) cam_get_device_priv(dump->dev_hdl); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + dump->dev_hdl); + return -EINVAL; + } + + return cam_context_handle_crm_dump_req(ctx, dump); +} + +int cam_node_deinit(struct cam_node *node) +{ + if (node) + memset(node, 0, sizeof(*node)); + + CAM_DBG(CAM_CORE, "deinit complete"); + + return 0; +} + +int cam_node_shutdown(struct cam_node *node) +{ + int i = 0; + int rc = 0; + + if (!node) + return -EINVAL; + + for (i = 0; i < node->ctx_size; i++) { + if (node->ctx_list[i].dev_hdl > 0) { + rc = cam_context_shutdown(&(node->ctx_list[i])); + CAM_DBG(CAM_CORE, + "Node [%s] invoking shutdown on context [%d], rc %d", + node->name, i, rc); + } + } + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "PRESIL-ICP-B2B-HFI-INIT No Shutdown No Deinit No HFIfreeMem"); + return 0; + } + + if (node->hw_mgr_intf.hw_close) + node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv, + NULL); + + return 0; +} + +static int __cam_node_handle_synx_test( + struct cam_node *node, void *params) +{ + int i, rc = -EINVAL; + + for (i = 0; i < node->ctx_size; i++) { + if (node->ctx_list[i].dev_hdl > 0) { + CAM_ERR(CAM_CORE, "Node [%s] has active context [%d]", + node->name, i); + return -EAGAIN; + } + } + + if (node->hw_mgr_intf.synx_trigger) + rc = node->hw_mgr_intf.synx_trigger( + node->hw_mgr_intf.hw_mgr_priv, params); + + return rc; +} + +int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf, + struct cam_context *ctx_list, uint32_t ctx_size, char *name) +{ + int rc = 0; + int i; + + if (!node || !hw_mgr_intf || + sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) { + return -EINVAL; + } + + strscpy(node->name, name, sizeof(node->name)); + + memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf)); + node->crm_node_intf.apply_req = __cam_node_crm_apply_req; + node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info; + node->crm_node_intf.link_setup = __cam_node_crm_link_setup; + node->crm_node_intf.flush_req = __cam_node_crm_flush_req; + node->crm_node_intf.process_evt = __cam_node_crm_process_evt; + node->crm_node_intf.dump_req = __cam_node_crm_dump_req; + node->crm_node_intf.notify_frame_skip = + __cam_node_crm_notify_frame_skip; + + mutex_init(&node->list_mutex); + INIT_LIST_HEAD(&node->free_ctx_list); + node->ctx_list = ctx_list; + node->ctx_size = ctx_size; + for (i = 0; i < ctx_size; i++) { + if (!ctx_list[i].state_machine) { + CAM_ERR(CAM_CORE, + "camera context %d is not initialized", i); + rc = -1; + goto err; + } + INIT_LIST_HEAD(&ctx_list[i].list); + list_add_tail(&ctx_list[i].list, &node->free_ctx_list); + ctx_list[i].node = node; + } + + node->state = CAM_NODE_STATE_INIT; +err: + CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc); + return rc; +} + +static int cam_node_translate_acquire_dev_from_v1 ( + struct cam_acquire_dev_cmd *acquire, + struct cam_acquire_dev_cmd_unified *acquire_unified) { + + acquire_unified->struct_version = CAM_ACQUIRE_DEV_STRUCT_VERSION_1; + acquire_unified->session_handle = acquire->session_handle; + acquire_unified->dev_handle = acquire->dev_handle; + acquire_unified->handle_type = acquire->handle_type; + acquire_unified->num_resources = acquire->num_resources; + acquire_unified->resource_hdl = acquire->resource_hdl; + + return 0; +} +static int cam_node_translate_acquire_dev_from_v2 ( + struct cam_acquire_dev_cmd_v2 *acquire, + struct cam_acquire_dev_cmd_unified *acquire_unified) { + + acquire_unified->struct_version = acquire->struct_version; + acquire_unified->session_handle = acquire->session_handle; + acquire_unified->dev_handle = acquire->dev_handle; + acquire_unified->handle_type = acquire->handle_type; + acquire_unified->num_resources = acquire->num_resources; + acquire_unified->resource_hdl = acquire->resource_hdl; + + return 0; +} + +static int cam_node_translate_acquire_dev_to_v1 ( + struct cam_acquire_dev_cmd *acquire, + struct cam_acquire_dev_cmd_unified *acquire_unified) { + + acquire->session_handle = acquire_unified->session_handle; + acquire->dev_handle = acquire_unified->dev_handle; + acquire->handle_type = acquire_unified->handle_type; + acquire->num_resources = acquire_unified->num_resources; + acquire->resource_hdl = acquire_unified->resource_hdl; + + return 0; +} +static int cam_node_translate_acquire_dev_to_v2 ( + struct cam_acquire_dev_cmd_v2 *acquire, + struct cam_acquire_dev_cmd_unified *acquire_unified) { + + acquire->struct_version = acquire_unified->struct_version; + acquire->session_handle = acquire_unified->session_handle; + acquire->dev_handle = acquire_unified->dev_handle; + acquire->handle_type = acquire_unified->handle_type; + acquire->num_resources = acquire_unified->num_resources; + acquire->resource_hdl = acquire_unified->resource_hdl; + + return 0; +} + + +int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd) +{ + int rc = 0; + + if (!cmd) + return -EINVAL; + + CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code); + + switch (cmd->op_code) { + case CAM_QUERY_CAP: + fallthrough; + case CAM_QUERY_CAP_V2: { + struct cam_query_cap_cmd query; + + if (copy_from_user(&query, u64_to_user_ptr(cmd->handle), + sizeof(query))) { + rc = -EFAULT; + break; + } + + rc = __cam_node_handle_query_cap(cmd->op_code, node, &query); + if (rc) { + CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)", + rc); + break; + } + + if (copy_to_user(u64_to_user_ptr(cmd->handle), &query, sizeof(query))) + rc = -EFAULT; + + break; + } + case CAM_ACQUIRE_DEV: { + struct cam_acquire_dev_cmd acquire; + struct cam_acquire_dev_cmd_unified acquire_unified; + + if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle), + sizeof(acquire))) { + rc = -EFAULT; + break; + } + + rc = cam_node_translate_acquire_dev_from_v1(&acquire, &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)", + rc); + break; + } + + rc = __cam_node_handle_acquire_dev(node, &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)", + rc); + break; + } + + rc = cam_node_translate_acquire_dev_to_v1(&acquire, &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)", + rc); + break; + } + + if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire, + sizeof(acquire))) + rc = -EFAULT; + break; + } + case CAM_ACQUIRE_DEV_V2: { + uint32_t api_version; + void *acquire_ptr = NULL; + size_t acquire_size; + struct cam_acquire_dev_cmd_unified acquire_unified; + + if (copy_from_user(&api_version, (void __user *)cmd->handle, + sizeof(api_version))) { + rc = -EFAULT; + break; + } + + if (api_version == 2) { + acquire_size = sizeof(struct cam_acquire_dev_cmd_v2); + } else { + CAM_ERR(CAM_CORE, "Unsupported api version %d", + api_version); + rc = -EINVAL; + break; + } + + acquire_ptr = CAM_MEM_ZALLOC(acquire_size, GFP_KERNEL); + if (!acquire_ptr) { + CAM_ERR(CAM_CORE, "No memory for acquire HW"); + rc = -ENOMEM; + break; + } + + if (copy_from_user(acquire_ptr, (void __user *)cmd->handle, + acquire_size)) { + rc = -EFAULT; + goto acquire_dev_free; + } + + rc = cam_node_translate_acquire_dev_from_v2(acquire_ptr, + &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)", + rc); + goto acquire_dev_free; + } + + /* Calling into the core acquire_dev with the API version skipped */ + rc = __cam_node_handle_acquire_dev(node, &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, + "acquire hw failed(rc = %d)", rc); + goto acquire_dev_free; + } + + rc = cam_node_translate_acquire_dev_to_v2(acquire_ptr, + &acquire_unified); + if (rc) { + CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)", + rc); + goto acquire_dev_free;; + } + + if (copy_to_user((void __user *)cmd->handle, acquire_ptr, + acquire_size)) + rc = -EFAULT; + +acquire_dev_free: + CAM_MEM_FREE(acquire_ptr); + break; + } + case CAM_ACQUIRE_HW: { + uint32_t api_version; + void *acquire_ptr = NULL; + size_t acquire_size; + + if (copy_from_user(&api_version, (void __user *)cmd->handle, + sizeof(api_version))) { + rc = -EFAULT; + break; + } + + if (api_version == 1) { + acquire_size = sizeof(struct cam_acquire_hw_cmd_v1); + } else if (api_version == 2) { + acquire_size = sizeof(struct cam_acquire_hw_cmd_v2); + } else { + CAM_ERR(CAM_CORE, "Unsupported api version %d", + api_version); + rc = -EINVAL; + break; + } + + acquire_ptr = CAM_MEM_ZALLOC(acquire_size, GFP_KERNEL); + if (!acquire_ptr) { + CAM_ERR(CAM_CORE, "No memory for acquire HW"); + rc = -ENOMEM; + break; + } + + if (copy_from_user(acquire_ptr, (void __user *)cmd->handle, + acquire_size)) { + rc = -EFAULT; + goto acquire_free; + } + + if (api_version == 1) { + rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr); + if (rc) { + CAM_ERR(CAM_CORE, + "acquire hw failed(rc = %d)", rc); + goto acquire_free; + } + } else if (api_version == 2) { + rc = __cam_node_handle_acquire_hw_v2(node, acquire_ptr); + if (rc) { + CAM_ERR(CAM_CORE, + "acquire hw failed(rc = %d)", rc); + goto acquire_free; + } + } + + if (copy_to_user((void __user *)cmd->handle, acquire_ptr, + acquire_size)) + rc = -EFAULT; + +acquire_free: + CAM_MEM_FREE(acquire_ptr); + break; + } + case CAM_START_DEV: { + struct cam_start_stop_dev_cmd start; + + if (copy_from_user(&start, u64_to_user_ptr(cmd->handle), + sizeof(start))) + rc = -EFAULT; + else { + rc = __cam_node_handle_start_dev(node, &start); + if (rc) + CAM_ERR(CAM_CORE, + "start device failed(rc = %d)", rc); + } + break; + } + case CAM_STOP_DEV: { + struct cam_start_stop_dev_cmd stop; + + if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle), + sizeof(stop))) + rc = -EFAULT; + else { + rc = __cam_node_handle_stop_dev(node, &stop); + if (rc) + CAM_ERR(CAM_CORE, + "stop device failed(rc = %d)", rc); + } + break; + } + case CAM_CONFIG_DEV: { + struct cam_config_dev_cmd config; + + if (copy_from_user(&config, u64_to_user_ptr(cmd->handle), + sizeof(config))) + rc = -EFAULT; + else { + rc = __cam_node_handle_config_dev(node, &config); + if (rc) + CAM_ERR(CAM_CORE, + "config device failed(rc = %d)", rc); + } + break; + } + case CAM_RELEASE_DEV: { + struct cam_release_dev_cmd release; + + if (copy_from_user(&release, u64_to_user_ptr(cmd->handle), + sizeof(release))) + rc = -EFAULT; + else { + rc = __cam_node_handle_release_dev(node, &release); + if (rc) + CAM_ERR(CAM_CORE, + "release device failed(rc = %d)", rc); + } + break; + } + case CAM_RELEASE_HW: { + uint32_t api_version; + size_t release_size; + void *release_ptr = NULL; + + if (copy_from_user(&api_version, (void __user *)cmd->handle, + sizeof(api_version))) { + rc = -EFAULT; + break; + } + + if (api_version == 1) { + release_size = sizeof(struct cam_release_hw_cmd_v1); + } else { + CAM_ERR(CAM_CORE, "Unsupported api version %d", + api_version); + rc = -EINVAL; + break; + } + + release_ptr = CAM_MEM_ZALLOC(release_size, GFP_KERNEL); + if (!release_ptr) { + CAM_ERR(CAM_CORE, "No memory for release HW"); + rc = -ENOMEM; + break; + } + + if (copy_from_user(release_ptr, (void __user *)cmd->handle, + release_size)) { + rc = -EFAULT; + goto release_free; + } + + if (api_version == 1) { + rc = __cam_node_handle_release_hw_v1(node, release_ptr); + if (rc) + CAM_ERR(CAM_CORE, + "release device failed(rc = %d)", rc); + } + +release_free: + CAM_MEM_FREE(release_ptr); + break; + } + case CAM_FLUSH_REQ: { + struct cam_flush_dev_cmd flush; + + if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle), + sizeof(flush))) + rc = -EFAULT; + else { + rc = __cam_node_handle_flush_dev(node, &flush); + if (rc) + CAM_ERR(CAM_CORE, + "flush device failed(rc = %d)", rc); + } + break; + } + case CAM_DUMP_REQ: { + struct cam_dump_req_cmd dump; + + if (copy_from_user(&dump, u64_to_user_ptr(cmd->handle), + sizeof(dump))) { + rc = -EFAULT; + break; + } + rc = __cam_node_handle_dump_dev(node, &dump); + if (rc) { + CAM_ERR(CAM_CORE, + "Dump device %s failed(rc = %d) ", + node->name, rc); + break; + } + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &dump, sizeof(dump))) { + CAM_ERR(CAM_CORE, + "Dump device %s copy_to_user fail", + node->name); + rc = -EFAULT; + } + break; + } + case CAM_SYNX_TEST_TRIGGER: { + struct cam_synx_test_params synx_params; + + if (copy_from_user(&synx_params, u64_to_user_ptr(cmd->handle), + sizeof(synx_params))) { + rc = -EFAULT; + break; + } + + rc = __cam_node_handle_synx_test(node, &synx_params); + if (rc) + CAM_ERR(CAM_CORE, "Synx test on %s failed(rc = %d)", + node->name, rc); + break; + } + default: + CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code); + rc = -EINVAL; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.h b/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.h new file mode 100644 index 0000000000..d567b4b409 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_node.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_NODE_H_ +#define _CAM_NODE_H_ + +#include +#include "cam_context.h" +#include "cam_hw_mgr_intf.h" +#include "cam_req_mgr_interface.h" + + +#define CAM_NODE_STATE_UNINIT 0 +#define CAM_NODE_STATE_INIT 1 + +/** + * struct cam_node - Singleton Node for camera HW devices + * + * @name: Name for struct cam_node + * @state: Node state: + * 0 = uninitialized, 1 = initialized + * @device_idx: Index to identify which device this node belongs to + * @list_mutex: Mutex for the context pool + * @free_ctx_list: Free context pool list + * @ctx_list: Context list + * @ctx_size: Context list size + * @hw_mgr_intf: Interface for cam_node to HW + * @crm_node_intf: Interface for the CRM to cam_node + * @sd_handler: Shutdown handler for this subdev + * + */ +struct cam_node { + char name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + uint32_t state; + uint32_t device_idx; + + /* context pool */ + struct mutex list_mutex; + struct list_head free_ctx_list; + struct cam_context *ctx_list; + uint32_t ctx_size; + + /* interfaces */ + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_req_mgr_kmd_ops crm_node_intf; + + int (*sd_handler)(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh); +}; + +/** + * cam_node_handle_ioctl() + * + * @brief: Handle ioctl commands + * + * @node: Node handle + * @cmd: IOCTL command + * + */ +int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd); + +/** + * cam_node_deinit() + * + * @brief: Deinitialization function for the Node interface + * + * @node: Node handle + * + */ +int cam_node_deinit(struct cam_node *node); + +/** + * cam_node_shutdown() + * + * @brief: Shutdowns/Closes the cam node. + * + * @node: Cam_node pointer + * + */ +int cam_node_shutdown(struct cam_node *node); + +/** + * cam_node_init() + * + * @brief: Initialization function for the Node interface. + * + * @node: Cam_node pointer + * @hw_mgr_intf: HW manager interface blob + * @ctx_list: List of cam_contexts to be added + * @ctx_size: Size of the cam_context + * @name: Name for the node + * + */ +int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf, + struct cam_context *ctx_list, uint32_t ctx_size, char *name); + +/** + * cam_node_put_ctxt_to_free_list() + * + * @brief: Put context in node free list. + * + * @ref: Context's kref object + * + */ +void cam_node_put_ctxt_to_free_list(struct kref *ref); + +/** + * cam_get_dev_handle_info() + * + * @brief: provides the active dev index. + * + * @handle: pointer to struct v4l2_dev + * @ctx: pointer to struct cam_context + * @dev_index: dev index + * + */ +int32_t cam_get_dev_handle_info(uint64_t handle, + struct cam_context **ctx, int32_t dev_index); + +#endif /* _CAM_NODE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_core/cam_subdev.c b/qcom/opensource/camera-kernel/drivers/cam_core/cam_subdev.c new file mode 100644 index 0000000000..45eb0947d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_core/cam_subdev.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_subdev.h" +#include "cam_node.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +/** + * cam_subdev_subscribe_event() + * + * @brief: function to subscribe to v4l2 events + * + * @sd: Pointer to struct v4l2_subdev. + * @fh: Pointer to struct v4l2_fh. + * @sub: Pointer to struct v4l2_event_subscription. + */ +static int cam_subdev_subscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL); +} + +/** + * cam_subdev_unsubscribe_event() + * + * @brief: function to unsubscribe from v4l2 events + * + * @sd: Pointer to struct v4l2_subdev. + * @fh: Pointer to struct v4l2_fh. + * @sub: Pointer to struct v4l2_event_subscription. + */ +static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return v4l2_event_unsubscribe(fh, sub); +} + +static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, + void *arg) +{ + long rc; + struct cam_node *node = + (struct cam_node *) v4l2_get_subdevdata(sd); + struct v4l2_subdev_fh *fh = (struct v4l2_subdev_fh *)arg; + + if (!node || node->state == CAM_NODE_STATE_UNINIT) { + rc = -EINVAL; + goto end; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + rc = cam_node_handle_ioctl(node, + (struct cam_control *) arg); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_WARN(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + if (!node->sd_handler) { + CAM_ERR(CAM_CORE, + "No shutdown routine for %s", node->name); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_CORE, "Shutdown for %s from media device", node->name); + rc = node->sd_handler(sd, fh); + if (rc) + CAM_ERR(CAM_CORE, + "shutdown device failed(rc = %d) for %s", + rc, node->name); + break; + default: + CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd, + node->name); + rc = -EINVAL; + } +end: + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int rc; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + rc = cam_subdev_ioctl(sd, cmd, &cmd_data); + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_CORE, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} +#endif + +const struct v4l2_subdev_core_ops cam_subdev_core_ops = { + .ioctl = cam_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_subdev_compat_ioctl, +#endif + .subscribe_event = cam_subdev_subscribe_event, + .unsubscribe_event = cam_subdev_unsubscribe_event, +}; + +static const struct v4l2_subdev_ops cam_subdev_ops = { + .core = &cam_subdev_core_ops, +}; + +int cam_subdev_remove(struct cam_subdev *sd) +{ + if (!sd) + return -EINVAL; + + cam_unregister_subdev(sd); + cam_node_deinit((struct cam_node *)sd->token); + CAM_MEM_FREE(sd->token); + memset(sd, 0, sizeof(struct cam_subdev)); + + return 0; +} + +int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev, + char *name, uint32_t dev_type) +{ + int rc; + struct cam_node *node = NULL; + + if (!sd || !pdev || !name) + return -EINVAL; + + node = CAM_MEM_ZALLOC(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + /* Setup camera v4l2 subdevice */ + sd->pdev = pdev; + sd->name = name; + sd->ops = &cam_subdev_ops; + sd->token = node; + sd->sd_flags = + V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; + sd->ent_function = dev_type; + + rc = cam_register_subdev(sd); + if (rc) { + CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s", + sd->name); + goto err; + } + platform_set_drvdata(sd->pdev, sd); + return rc; +err: + CAM_MEM_FREE(node); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.c b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.c new file mode 100644 index 0000000000..85da1c3024 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.c @@ -0,0 +1,5406 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_cpas_hw.h" +#include "cam_cpas_hw_intf.h" +#include "cam_cpas_soc.h" +#include "cam_req_mgr_dev.h" +#include "cam_smmu_api.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_interface.h" +#include "cam_vmrm_interface.h" + +#define CAM_CPAS_LOG_BUF_LEN 512 +#define CAM_CPAS_APPLY_TYPE_START 1 +#define CAM_CPAS_APPLY_TYPE_STOP 2 +#define CAM_CPAS_APPLY_TYPE_UPDATE 3 + +static uint cam_min_camnoc_ib_bw; +module_param(cam_min_camnoc_ib_bw, uint, 0644); + +static void cam_cpas_update_monitor_array(struct cam_hw_info *cpas_hw, + const char *identifier_string, int32_t identifier_value); +static void cam_cpas_dump_monitor_array( + struct cam_hw_info *cpas_hw); +static int cam_cpas_log_vote(struct cam_hw_info *cpas_hw, bool ddr_only); + +static void cam_cpas_process_drv_bw_overrides( + struct cam_cpas_bus_client *bus_client, uint64_t *high_ab, uint64_t *high_ib, + uint64_t *low_ab, uint64_t *low_ib, const struct cam_cpas_debug_settings *cpas_settings) +{ + uint64_t curr_ab_high = *high_ab; + uint64_t curr_ib_high = *high_ib; + uint64_t curr_ab_low = *low_ab; + uint64_t curr_ib_low = *low_ib; + size_t name_len = strlen(bus_client->common_data.name); + + if (!cpas_settings) { + CAM_ERR(CAM_CPAS, "Invalid cpas debug settings"); + return; + } + + if (strnstr(bus_client->common_data.name, "cam_ife_0_drv", + name_len)) { + if (cpas_settings->cam_ife_0_drv_ab_high_bw) + *high_ab = cpas_settings->cam_ife_0_drv_ab_high_bw; + if (cpas_settings->cam_ife_0_drv_ib_high_bw) + *high_ib = cpas_settings->cam_ife_0_drv_ib_high_bw; + if (cpas_settings->cam_ife_0_drv_ab_low_bw) + *low_ab = cpas_settings->cam_ife_0_drv_ab_low_bw; + if (cpas_settings->cam_ife_0_drv_ib_low_bw) + *low_ib = cpas_settings->cam_ife_0_drv_ib_low_bw; + if (cpas_settings->cam_ife_0_drv_low_set_zero) { + *low_ab = 0; + *low_ib = 0; + } + } else if (strnstr(bus_client->common_data.name, "cam_ife_1_drv", + name_len)) { + if (cpas_settings->cam_ife_1_drv_ab_high_bw) + *high_ab = cpas_settings->cam_ife_1_drv_ab_high_bw; + if (cpas_settings->cam_ife_1_drv_ib_high_bw) + *high_ib = cpas_settings->cam_ife_1_drv_ib_high_bw; + if (cpas_settings->cam_ife_1_drv_ab_low_bw) + *low_ab = cpas_settings->cam_ife_1_drv_ab_low_bw; + if (cpas_settings->cam_ife_1_drv_ib_low_bw) + *low_ib = cpas_settings->cam_ife_1_drv_ib_low_bw; + if (cpas_settings->cam_ife_1_drv_low_set_zero) { + *low_ab = 0; + *low_ib = 0; + } + } else if (strnstr(bus_client->common_data.name, "cam_ife_2_drv", + name_len)) { + if (cpas_settings->cam_ife_2_drv_ab_high_bw) + *high_ab = cpas_settings->cam_ife_2_drv_ab_high_bw; + if (cpas_settings->cam_ife_2_drv_ib_high_bw) + *high_ib = cpas_settings->cam_ife_2_drv_ib_high_bw; + if (cpas_settings->cam_ife_2_drv_ab_low_bw) + *low_ab = cpas_settings->cam_ife_2_drv_ab_low_bw; + if (cpas_settings->cam_ife_2_drv_ib_low_bw) + *low_ib = cpas_settings->cam_ife_2_drv_ib_low_bw; + if (cpas_settings->cam_ife_2_drv_low_set_zero) { + *low_ab = 0; + *low_ib = 0; + } + } else { + CAM_ERR(CAM_CPAS, "unknown mnoc port: %s, bw override failed", + bus_client->common_data.name); + return; + } + + CAM_INFO(CAM_CPAS, + "Overriding mnoc bw for: %s with [AB IB] high: [%llu %llu], low: [%llu %llu], curr high: [%llu %llu], curr low: [%llu %llu]", + bus_client->common_data.name, *high_ab, *high_ib, *low_ab, *low_ib, + curr_ab_high, curr_ib_high, curr_ab_low, curr_ib_low); +} + +static void cam_cpas_process_bw_overrides( + struct cam_cpas_bus_client *bus_client, uint64_t *ab, uint64_t *ib, + const struct cam_cpas_debug_settings *cpas_settings) +{ + uint64_t curr_ab = *ab; + uint64_t curr_ib = *ib; + size_t name_len = strlen(bus_client->common_data.name); + + if (!cpas_settings) { + CAM_ERR(CAM_CPAS, "Invalid cpas debug settings"); + return; + } + + if (strnstr(bus_client->common_data.name, "cam_hf_0", name_len)) { + if (cpas_settings->mnoc_hf_0_ab_bw) + *ab = cpas_settings->mnoc_hf_0_ab_bw; + if (cpas_settings->mnoc_hf_0_ib_bw) + *ib = cpas_settings->mnoc_hf_0_ib_bw; + } else if (strnstr(bus_client->common_data.name, "cam_hf_1", + name_len)) { + if (cpas_settings->mnoc_hf_1_ab_bw) + *ab = cpas_settings->mnoc_hf_1_ab_bw; + if (cpas_settings->mnoc_hf_1_ib_bw) + *ib = cpas_settings->mnoc_hf_1_ib_bw; + } else if (strnstr(bus_client->common_data.name, "cam_sf_0", + name_len)) { + if (cpas_settings->mnoc_sf_0_ab_bw) + *ab = cpas_settings->mnoc_sf_0_ab_bw; + if (cpas_settings->mnoc_sf_0_ib_bw) + *ib = cpas_settings->mnoc_sf_0_ib_bw; + } else if (strnstr(bus_client->common_data.name, "cam_sf_1", + name_len)) { + if (cpas_settings->mnoc_sf_1_ab_bw) + *ab = cpas_settings->mnoc_sf_1_ab_bw; + if (cpas_settings->mnoc_sf_1_ib_bw) + *ib = cpas_settings->mnoc_sf_1_ib_bw; + } else if (strnstr(bus_client->common_data.name, "cam_sf_icp", + name_len)) { + if (cpas_settings->mnoc_sf_icp_ab_bw) + *ab = cpas_settings->mnoc_sf_icp_ab_bw; + if (cpas_settings->mnoc_sf_icp_ib_bw) + *ib = cpas_settings->mnoc_sf_icp_ib_bw; + } else { + CAM_ERR(CAM_CPAS, "unknown mnoc port: %s, bw override failed", + bus_client->common_data.name); + return; + } + + CAM_INFO(CAM_CPAS, + "Overriding mnoc bw for: %s with ab: %llu, ib: %llu, curr_ab: %llu, curr_ib: %llu", + bus_client->common_data.name, *ab, *ib, curr_ab, curr_ib); +} + +int cam_cpas_util_reg_read(struct cam_hw_info *cpas_hw, + enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + uint32_t value; + int reg_base_index; + + if (!reg_info->enable) + return 0; + + reg_base_index = cpas_core->regbase_index[reg_base]; + if (reg_base_index == -1) + return -EINVAL; + + value = cam_io_r_mb( + soc_info->reg_map[reg_base_index].mem_base + reg_info->offset); + + CAM_INFO(CAM_CPAS, "Base[%d] Offset[0x%08x] Value[0x%08x]", + reg_base, reg_info->offset, value); + + return 0; +} + +int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw, + enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + uint32_t value; + int reg_base_index; + + if (reg_info->enable == false) + return 0; + + reg_base_index = cpas_core->regbase_index[reg_base]; + if (reg_base_index == -1) + return -EINVAL; + + if (reg_info->masked_value) { + value = cam_io_r_mb( + soc_info->reg_map[reg_base_index].mem_base + + reg_info->offset); + value = value & (~reg_info->mask); + value = value | (reg_info->value << reg_info->shift); + } else { + value = reg_info->value; + } + + CAM_DBG(CAM_CPAS, "Base[%d]:[0x%08x] Offset[0x%08x] Value[0x%08x]", + reg_base, soc_info->reg_map[reg_base_index].mem_base, reg_info->offset, value); + + cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base + + reg_info->offset); + + return 0; +} + +static int cam_cpas_util_vote_bus_client_level( + struct cam_cpas_bus_client *bus_client, unsigned int level) +{ + int rc = 0; + + if (!bus_client->valid) { + CAM_ERR(CAM_CPAS, "bus client not valid"); + rc = -EINVAL; + goto end; + } + + if (level >= CAM_MAX_VOTE) { + CAM_ERR(CAM_CPAS, + "Invalid votelevel=%d,usecases=%d,Bus client=[%s]", + level, bus_client->common_data.num_usecases, + bus_client->common_data.name); + return -EINVAL; + } + + if (level == bus_client->curr_vote_level) + goto end; + + rc = cam_soc_bus_client_update_request(bus_client->soc_bus_client, + level); + if (rc) { + CAM_ERR(CAM_CPAS, "Client: %s update request failed rc: %d", + bus_client->common_data.name, rc); + goto end; + } + bus_client->curr_vote_level = level; + +end: + return rc; +} + +static inline void cam_cpas_util_validate_rt_bw(struct cam_cpas_private_soc *soc_private, + struct cam_cpas *cpas_core, struct cam_cpas_bus_client *bus_client, + struct cam_cpas_axi_bw_info *curr_vote) +{ + int i; + uint64_t total_rt_ab_bw = 0, total_rt_ib_bw = 0; + const struct camera_debug_settings *cam_debug = NULL; + struct cam_cpas_axi_port *curr_axi_port = + container_of(bus_client, struct cam_cpas_axi_port, bus_client); + + cam_debug = cam_debug_get_settings(); + if ((!curr_axi_port->is_rt) || (!soc_private->cam_max_rt_axi_bw) || + debug_disable_rt_clk_bw_limit || (cam_debug && cam_debug->cpas_settings.is_updated)) + return; + + if (curr_vote->vote_type == CAM_CPAS_VOTE_TYPE_HLOS) { + total_rt_ab_bw = curr_vote->hlos_vote.ab; + total_rt_ib_bw = curr_vote->hlos_vote.ib; + } else { + total_rt_ab_bw = curr_vote->drv_vote.high.ab; + total_rt_ib_bw = curr_vote->drv_vote.high.ib; + } + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if ((!cpas_core->axi_port[i].is_rt) || + (cpas_core->axi_port[i].bus_client.soc_bus_client == + bus_client->soc_bus_client)) + continue; + + if (cpas_core->axi_port[i].applied_bw.vote_type == CAM_CPAS_VOTE_TYPE_HLOS) { + total_rt_ab_bw += cpas_core->axi_port[i].applied_bw.hlos_vote.ab; + total_rt_ib_bw += cpas_core->axi_port[i].applied_bw.hlos_vote.ib; + } else { + total_rt_ab_bw += cpas_core->axi_port[i].applied_bw.drv_vote.high.ab; + total_rt_ib_bw += cpas_core->axi_port[i].applied_bw.drv_vote.high.ib; + } + } + + if ((total_rt_ab_bw > soc_private->cam_max_rt_axi_bw) || + (total_rt_ib_bw > soc_private->cam_max_rt_axi_bw)) + CAM_WARN(CAM_CPAS, + "Requested RT BW[AB IB]: [%llu %llu] exceeds max supported value: %llu curr client: %s", + total_rt_ab_bw, total_rt_ib_bw, soc_private->cam_max_rt_axi_bw, + bus_client->common_data.name); + +} + +static int cam_cpas_util_vote_drv_bus_client_bw(struct cam_cpas_private_soc *soc_private, + struct cam_cpas *cpas_core, struct cam_cpas_bus_client *bus_client, + struct cam_cpas_axi_bw_info *curr_vote, struct cam_cpas_axi_bw_info *applied_vote) +{ + int rc = 0; + const struct camera_debug_settings *cam_debug = NULL; + + if (!bus_client->valid) { + CAM_ERR(CAM_CPAS, "bus client: %s not valid", + bus_client->common_data.name); + rc = -EINVAL; + goto end; + } + + mutex_lock(&bus_client->lock); + if ((curr_vote->drv_vote.high.ab > 0) && + (curr_vote->drv_vote.high.ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW)) + curr_vote->drv_vote.high.ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW; + + if ((curr_vote->drv_vote.high.ib > 0) && + (curr_vote->drv_vote.high.ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW)) + curr_vote->drv_vote.high.ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW; + + if ((curr_vote->drv_vote.low.ab > 0) && + (curr_vote->drv_vote.low.ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW)) + curr_vote->drv_vote.low.ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW; + + if ((curr_vote->drv_vote.low.ib > 0) && + (curr_vote->drv_vote.low.ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW)) + curr_vote->drv_vote.low.ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW; + + cam_debug = cam_debug_get_settings(); + + if ((curr_vote->drv_vote.high.ab || curr_vote->drv_vote.high.ib || + curr_vote->drv_vote.low.ab || curr_vote->drv_vote.low.ib) && + cam_debug && cam_debug->cpas_settings.is_updated) + cam_cpas_process_drv_bw_overrides(bus_client, &curr_vote->drv_vote.high.ab, + &curr_vote->drv_vote.high.ib, &curr_vote->drv_vote.low.ab, + &curr_vote->drv_vote.low.ib, &cam_debug->cpas_settings); + + if (debug_drv) + CAM_INFO(CAM_CPAS, "Bus_client: %s, DRV vote high=[%llu %llu] low=[%llu %llu]", + bus_client->common_data.name, curr_vote->drv_vote.high.ab, + curr_vote->drv_vote.high.ib, curr_vote->drv_vote.low.ab, + curr_vote->drv_vote.low.ib); + + CAM_DBG(CAM_CPAS, "Bus_client: %s, DRV vote high=[%llu %llu] low=[%llu %llu]", + bus_client->common_data.name, curr_vote->drv_vote.high.ab, + curr_vote->drv_vote.high.ib, curr_vote->drv_vote.low.ab, + curr_vote->drv_vote.low.ib); + + cam_cpas_util_validate_rt_bw(soc_private, cpas_core, bus_client, curr_vote); + + rc = cam_soc_bus_client_update_bw(bus_client->soc_bus_client, curr_vote->drv_vote.high.ab, + curr_vote->drv_vote.high.ib, CAM_SOC_BUS_PATH_DATA_DRV_HIGH); + if (rc) { + CAM_ERR(CAM_CPAS, "Update bw failed, Bus path: %s ab[%llu] ib[%llu]", + cam_soc_bus_path_data_to_str(CAM_SOC_BUS_PATH_DATA_DRV_HIGH), + curr_vote->drv_vote.high.ab, curr_vote->drv_vote.high.ib); + goto unlock_client; + } + + rc = cam_soc_bus_client_update_bw(bus_client->soc_bus_client, curr_vote->drv_vote.low.ab, + curr_vote->drv_vote.low.ib, CAM_SOC_BUS_PATH_DATA_DRV_LOW); + if (rc) { + CAM_ERR(CAM_CPAS, "Update bw failed, Bus path: %s ab[%llu] ib[%llu]", + cam_soc_bus_path_data_to_str(CAM_SOC_BUS_PATH_DATA_DRV_LOW), + curr_vote->drv_vote.low.ab, curr_vote->drv_vote.low.ib); + goto unlock_client; + } + + if (applied_vote) + memcpy(applied_vote, curr_vote, sizeof(struct cam_cpas_axi_bw_info)); + +unlock_client: + mutex_unlock(&bus_client->lock); +end: + return rc; +} + +static int cam_cpas_util_vote_hlos_bus_client_bw( + struct cam_cpas_private_soc *soc_private, struct cam_cpas *cpas_core, + struct cam_cpas_bus_client *bus_client, bool is_camnoc_bw, + struct cam_cpas_axi_bw_info *curr_vote, struct cam_cpas_axi_bw_info *applied_vote) +{ + int rc = 0; + uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW; + const struct camera_debug_settings *cam_debug = NULL; + + if (!bus_client->valid) { + CAM_ERR(CAM_CPAS, "bus client: %s not valid", + bus_client->common_data.name); + rc = -EINVAL; + goto end; + } + + if (cam_min_camnoc_ib_bw > 0) + min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L; + + CAM_DBG(CAM_CPAS, + "Bus_client: %s, cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu", + bus_client->common_data.name, cam_min_camnoc_ib_bw, + min_camnoc_ib_bw); + + mutex_lock(&bus_client->lock); + if (is_camnoc_bw) { + if ((curr_vote->hlos_vote.ab > 0) && + (curr_vote->hlos_vote.ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW)) + curr_vote->hlos_vote.ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW; + + if ((curr_vote->hlos_vote.ib > 0) && (curr_vote->hlos_vote.ib < min_camnoc_ib_bw)) + curr_vote->hlos_vote.ib = min_camnoc_ib_bw; + } else { + if ((curr_vote->hlos_vote.ab > 0) && + (curr_vote->hlos_vote.ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW)) + curr_vote->hlos_vote.ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW; + + if ((curr_vote->hlos_vote.ib > 0) && + (curr_vote->hlos_vote.ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW)) + curr_vote->hlos_vote.ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW; + } + + cam_debug = cam_debug_get_settings(); + + if ((curr_vote->hlos_vote.ab || curr_vote->hlos_vote.ib) && cam_debug && + cam_debug->cpas_settings.is_updated) + cam_cpas_process_bw_overrides(bus_client, &curr_vote->hlos_vote.ab, + &curr_vote->hlos_vote.ib, &cam_debug->cpas_settings); + + CAM_DBG(CAM_CPAS, "Bus_client: %s, HLOS vote [%llu %llu] is_camnoc_bw: %s", + bus_client->common_data.name, + curr_vote->hlos_vote.ab, curr_vote->hlos_vote.ib, CAM_BOOL_TO_YESNO(is_camnoc_bw)); + + cam_cpas_util_validate_rt_bw(soc_private, cpas_core, bus_client, curr_vote); + + rc = cam_soc_bus_client_update_bw(bus_client->soc_bus_client, curr_vote->hlos_vote.ab, + curr_vote->hlos_vote.ib, CAM_SOC_BUS_PATH_DATA_HLOS); + if (rc) { + CAM_ERR(CAM_CPAS, "Update bw failed, Bus path %s ab[%llu] ib[%llu]", + cam_soc_bus_path_data_to_str(CAM_SOC_BUS_PATH_DATA_HLOS), + curr_vote->hlos_vote.ab, curr_vote->hlos_vote.ib); + goto unlock_client; + } + + if (applied_vote) + memcpy(applied_vote, curr_vote, sizeof(struct cam_cpas_axi_bw_info)); + + +unlock_client: + mutex_unlock(&bus_client->lock); +end: + return rc; +} + +static int cam_cpas_util_register_bus_client( + struct cam_hw_soc_info *soc_info, struct device_node *dev_node, + struct cam_cpas_bus_client *bus_client) +{ + int rc = 0; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) soc_info->soc_private; + + rc = cam_soc_bus_client_register(soc_info->pdev, dev_node, + &bus_client->soc_bus_client, &bus_client->common_data, + soc_private->use_cam_icc_path_str); + if (rc) { + CAM_ERR(CAM_CPAS, "Bus client: %s registertion failed ,rc: %d", + bus_client->common_data.name, rc); + return rc; + } + + bus_client->curr_vote_level = 0; + bus_client->valid = true; + mutex_init(&bus_client->lock); + + return 0; +} + +static int cam_cpas_util_unregister_bus_client( + struct cam_cpas_bus_client *bus_client) +{ + if (!bus_client->valid) { + CAM_ERR(CAM_CPAS, "bus client not valid"); + return -EINVAL; + } + + cam_soc_bus_client_unregister(&bus_client->soc_bus_client); + bus_client->curr_vote_level = 0; + bus_client->valid = false; + mutex_destroy(&bus_client->lock); + + return 0; +} + +static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core, + struct cam_hw_soc_info *soc_info) +{ + int i = 0; + + if (cpas_core->num_axi_ports > CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid num_axi_ports: %d", + cpas_core->num_axi_ports); + return -EINVAL; + } + + if (cpas_core->num_camnoc_axi_ports > CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid num_camnoc_axi_ports: %d", + cpas_core->num_camnoc_axi_ports); + return -EINVAL; + } + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + cam_cpas_util_unregister_bus_client( + &cpas_core->axi_port[i].bus_client); + of_node_put(cpas_core->axi_port[i].axi_port_node); + cpas_core->axi_port[i].axi_port_node = NULL; + } + + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + cam_cpas_util_unregister_bus_client( + &cpas_core->camnoc_axi_port[i].bus_client); + of_node_put(cpas_core->camnoc_axi_port[i].axi_port_node); + cpas_core->camnoc_axi_port[i].axi_port_node = NULL; + } + + return 0; +} + +static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core, + struct cam_hw_soc_info *soc_info) +{ + int i = 0, rc = 0; + struct device_node *axi_port_mnoc_node = NULL; + struct device_node *axi_port_camnoc_node = NULL; + + if (cpas_core->num_axi_ports > CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid num_axi_ports: %d", + cpas_core->num_axi_ports); + return -EINVAL; + } + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + axi_port_mnoc_node = cpas_core->axi_port[i].axi_port_node; + rc = cam_cpas_util_register_bus_client(soc_info, + axi_port_mnoc_node, &cpas_core->axi_port[i].bus_client); + if (rc) + goto bus_register_fail; + } + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + axi_port_camnoc_node = + cpas_core->camnoc_axi_port[i].axi_port_node; + rc = cam_cpas_util_register_bus_client(soc_info, + axi_port_camnoc_node, + &cpas_core->camnoc_axi_port[i].bus_client); + if (rc) + goto bus_register_fail; + } + + return 0; +bus_register_fail: + of_node_put(cpas_core->axi_port[i].axi_port_node); + return rc; +} + +int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw, + int enable) +{ + int rc, i = 0; + struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info; + uint64_t ab_bw, ib_bw; + + rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client, + (enable == true) ? CAM_LOWSVS_D1_VOTE : CAM_SUSPEND_VOTE); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d", + enable, rc); + return rc; + } + + if (enable) { + ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + } else { + ab_bw = 0; + ib_bw = 0; + } + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (cpas_core->axi_port[i].bus_client.common_data.is_drv_port) + continue; + + cpas_core->axi_port[i].curr_bw.hlos_vote.ab = ab_bw; + cpas_core->axi_port[i].curr_bw.hlos_vote.ib = ib_bw; + rc = cam_cpas_util_vote_hlos_bus_client_bw(cpas_hw->soc_info.soc_private, + cpas_core, &cpas_core->axi_port[i].bus_client, false, + &cpas_core->axi_port[i].curr_bw, &cpas_core->axi_port[i].applied_bw); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in mnoc vote, enable=%d, rc=%d", + enable, rc); + goto remove_ahb_vote; + } + } + + return 0; +remove_ahb_vote: + cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client, + CAM_SUSPEND_VOTE); + return rc; +} + +static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw, + uint32_t client_handle, enum cam_cpas_reg_base reg_base, + uint32_t offset, bool mb, uint32_t value) +{ + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_client *cpas_client = NULL; + int reg_base_index = cpas_core->regbase_index[reg_base]; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + int rc = 0; + + if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) { + CAM_ERR(CAM_CPAS, + "Invalid reg_base=%d, reg_base_index=%d, num_map=%d", + reg_base, reg_base_index, soc_info->num_reg_map); + return -EINVAL; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_core->client_mutex[client_indx]); + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto unlock_client; + } + + if (mb) + cam_io_w_mb(value, + soc_info->reg_map[reg_base_index].mem_base + offset); + else + cam_io_w(value, + soc_info->reg_map[reg_base_index].mem_base + offset); + +unlock_client: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + return rc; +} + +static int cam_cpas_hw_set_addr_trans(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_addr_trans_data *cmd_addr_trans) +{ + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + uint32_t client_handle = cmd_addr_trans->client_handle; + struct cam_cpas_addr_trans_data *addr_trans_data = cmd_addr_trans->addr_trans_data; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int reg_base_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC_NRT]; + int camnoc_info_idx = cpas_core->camnoc_info_idx[CAM_CAMNOC_HW_NRT]; + struct cam_camnoc_info *camnoc_info; + struct cam_camnoc_addr_trans_info *addr_trans_info; + struct cam_cpas_client *cpas_client; + struct cam_camnoc_addr_trans_client_info *client_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) soc_info->soc_private; + char client_name[CAM_HW_IDENTIFIER_LENGTH + 3]; + void __iomem *cam_noc_base; + int rc = 0, i; + bool found = false; + + if (camnoc_info_idx < 0) { + CAM_ERR(CAM_CPAS, "Setting address translator is only supported after CPAS v980"); + return -EINVAL; + } + + camnoc_info = cpas_core->camnoc_info[camnoc_info_idx]; + if (!camnoc_info) { + CAM_ERR(CAM_CPAS, "Invalid cam noc info"); + return -EINVAL; + } + + addr_trans_info = camnoc_info->addr_trans_info; + if (!addr_trans_info) { + CAM_ERR(CAM_CPAS, "Invalid address translator information, camnoc name: %s", + camnoc_info->camnoc_name); + return -EINVAL; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_core->client_mutex[client_indx]); + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto unlock_client; + } + + if (soc_private->client_id_based) + snprintf(client_name, sizeof(client_name), "%s%d", + cpas_client->data.identifier, + cpas_client->data.cell_index); + else + snprintf(client_name, sizeof(client_name), "%s", + cpas_client->data.identifier); + + for (i = 0; i < addr_trans_info->num_supported_clients; i++) { + client_info = &addr_trans_info->addr_trans_client_info[i]; + if (!strnstr(client_name, client_info->client_name, strlen(client_name))) + continue; + + CAM_DBG(CAM_CPAS, "Found corresponding client %s that supports address translate", + client_name); + cam_noc_base = soc_info->reg_map[reg_base_index].mem_base; + + if (addr_trans_data->enable) { + cam_io_w_mb(0x1, cam_noc_base + client_info->reg_enable); + + /* Mapped (0 - 64MB) to (128MB - 192MB) */ + cam_io_w_mb(addr_trans_data->val_offset0, + cam_noc_base + client_info->reg_offset0); + cam_io_w_mb(addr_trans_data->val_base1, + cam_noc_base + client_info->reg_base1); + + /* Avoid address translator touching other space */ + cam_io_w_mb(addr_trans_data->val_offset1, + cam_noc_base + client_info->reg_offset1); + cam_io_w_mb(addr_trans_data->val_base2, + cam_noc_base + client_info->reg_base2); + cam_io_w_mb(addr_trans_data->val_offset2, + cam_noc_base + client_info->reg_offset2); + cam_io_w_mb(addr_trans_data->val_base3, + cam_noc_base + client_info->reg_base3); + cam_io_w_mb(addr_trans_data->val_offset3, + cam_noc_base + client_info->reg_offset3); + + CAM_DBG(CAM_CPAS, "Enabled address translator for %s", client_name); + } else { + cam_io_w_mb(0x0, cam_noc_base + client_info->reg_enable); + + CAM_DBG(CAM_CPAS, "Disabled address translator for %s", client_name); + } + found = true; + break; + } + + if (!found) { + CAM_ERR(CAM_CPAS, "No address translator support for this client: %s", + client_name); + rc = -EINVAL; + } + +unlock_client: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + return rc; +} + + +static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw, + uint32_t client_handle, enum cam_cpas_reg_base reg_base, + uint32_t offset, bool mb, uint32_t *value) +{ + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_client *cpas_client = NULL; + int reg_base_index = cpas_core->regbase_index[reg_base]; + uint32_t reg_value; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + int rc = 0; + + if (!value) + return -EINVAL; + + if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) { + CAM_ERR(CAM_CPAS, + "Invalid reg_base=%d, reg_base_index=%d, num_map=%d", + reg_base, reg_base_index, soc_info->num_reg_map); + return -EINVAL; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + return -EPERM; + } + + if (mb) + reg_value = cam_io_r_mb( + soc_info->reg_map[reg_base_index].mem_base + offset); + else + reg_value = cam_io_r( + soc_info->reg_map[reg_base_index].mem_base + offset); + + *value = reg_value; + + return rc; +} + +static int cam_cpas_hw_dump_camnoc_buff_fill_info( + struct cam_hw_info *cpas_hw, + uint32_t client_handle) +{ + int rc = 0, i, camnoc_idx; + uint32_t val = 0, client_idx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_camnoc_info *camnoc_info; + char log_buf[CAM_CPAS_LOG_BUF_LEN]; + size_t len; + + if (!CAM_CPAS_CLIENT_VALID(client_idx)) { + CAM_ERR(CAM_CPAS, "Invalid client idx: %u", client_idx); + return -EPERM; + } + + /* log buffer fill level of both RT/NRT NIU */ + for (camnoc_idx = 0; camnoc_idx < cpas_core->num_valid_camnoc; camnoc_idx++) { + log_buf[0] = '\0'; + len = 0; + camnoc_info = cpas_core->camnoc_info[camnoc_idx]; + + for (i = 0; i < camnoc_info->specific_size; i++) { + if ((!camnoc_info->specific[i].enable) || + (!camnoc_info->specific[i].maxwr_low.enable)) + continue; + + rc = cam_cpas_hw_reg_read(cpas_hw, client_handle, + camnoc_info->reg_base, + camnoc_info->specific[i].maxwr_low.offset, true, &val); + if (rc) + break; + + len += scnprintf((log_buf + len), (CAM_CPAS_LOG_BUF_LEN - len), + " %s:[%d %d]", camnoc_info->specific[i].port_name, + (val & 0x7FF), (val & 0x7F0000) >> 16); + } + + CAM_INFO(CAM_CPAS, "%s Fill level [Queued Pending] %s", + camnoc_info->camnoc_name, log_buf); + } + + return rc; +} + +static void cam_cpas_print_smart_qos_priority( + struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_camnoc_info *camnoc_info = NULL; + struct cam_cpas_tree_node *niu_node; + uint8_t i; + int32_t reg_indx; + char log_buf[CAM_CPAS_LOG_BUF_LEN] = {0}; + size_t len = 0; + uint32_t val_low = 0, val_high = 0; + + /* Smart QOS only apply to CPAS RT nius */ + camnoc_info = cpas_core->camnoc_info[cpas_core->camnoc_rt_idx]; + reg_indx = cpas_core->regbase_index[camnoc_info->reg_base]; + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + + val_high = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + + niu_node->pri_lut_high_offset); + + val_low = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + + niu_node->pri_lut_low_offset); + + len += scnprintf((log_buf + len), (CAM_CPAS_LOG_BUF_LEN - len), + " [%s:high 0x%x low 0x%x]", niu_node->node_name, + val_high, val_low); + } + + CAM_INFO(CAM_CPAS, "%s SmartQoS [Node Pri_lut] %s", camnoc_info->camnoc_name, log_buf); +} + +static bool cam_cpas_is_new_rt_bw_lower( + const struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int i; + struct cam_cpas_axi_port *temp_axi_port = NULL; + uint64_t applied_total = 0, new_total = 0; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + temp_axi_port = &cpas_core->axi_port[i]; + + if (!temp_axi_port->is_rt) + continue; + + if (temp_axi_port->bus_client.common_data.is_drv_port) { + CAM_DBG(CAM_PERF, "Port %s DRV ab applied [%llu %llu] new [%llu %llu]", + temp_axi_port->axi_port_name, + temp_axi_port->applied_bw.drv_vote.high.ab, + temp_axi_port->applied_bw.drv_vote.low.ab, + temp_axi_port->curr_bw.drv_vote.high.ab, + temp_axi_port->curr_bw.drv_vote.low.ab); + + applied_total += temp_axi_port->applied_bw.drv_vote.high.ab; + new_total += temp_axi_port->curr_bw.drv_vote.high.ab; + } else { + CAM_DBG(CAM_PERF, "Port %s HLOS ab applied %llu new %llu", + temp_axi_port->axi_port_name, + temp_axi_port->applied_bw.hlos_vote.ab, + temp_axi_port->curr_bw.hlos_vote.ab); + + applied_total += temp_axi_port->applied_bw.hlos_vote.ab; + new_total += temp_axi_port->curr_bw.hlos_vote.ab; + } + } + + return (new_total < applied_total) ? true : false; +} + +static void cam_cpas_reset_niu_priorities( + struct cam_hw_info *cpas_hw) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + uint8_t i; + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + soc_private->smart_qos_info->rt_wr_niu_node[i]->applied_priority_low = 0x0; + soc_private->smart_qos_info->rt_wr_niu_node[i]->applied_priority_high = 0x0; + } +} + +static bool cam_cpas_calculate_smart_qos( + struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_tree_node *niu_node; + uint8_t i; + bool needs_update = false; + uint64_t bw_per_kb, total_camnoc_bw, max_bw_per_kb = 0, remainder, ramp_val; + uint64_t total_bw_per_kb = 0, total_bw_ramp_val = 0; + int8_t pos; + uint64_t priority; + uint8_t val, clamp_threshold; + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + + bw_per_kb = niu_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc; + + if (soc_private->enable_cam_clk_drv) + bw_per_kb += niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.low.camnoc; + + total_camnoc_bw = bw_per_kb; + remainder = do_div(bw_per_kb, niu_node->niu_size); + total_bw_per_kb += bw_per_kb; + + if (max_bw_per_kb < bw_per_kb) + max_bw_per_kb = bw_per_kb; + + CAM_DBG(CAM_PERF, + "NIU[%d][%s]camnoc_bw %llu, niu_size %u, init_bw_per_kb %lld, remainder %lld, max_bw_per_kb %lld, total_bw_per_kb %lld", + i, niu_node->node_name, total_camnoc_bw, niu_node->niu_size, + bw_per_kb, remainder, max_bw_per_kb, total_bw_per_kb); + } + + if (!max_bw_per_kb) { + CAM_DBG(CAM_PERF, "No valid bw on NIU nodes"); + return false; + } + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + + bw_per_kb = niu_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc; + + if (soc_private->enable_cam_clk_drv) + bw_per_kb += niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.low.camnoc; + + do_div(bw_per_kb, niu_node->niu_size); + + if ((bw_per_kb * CAM_CPAS_MAX_STRESS_INDICATOR) > + (total_bw_per_kb * + soc_private->smart_qos_info->highstress_indicator_th)) { + clamp_threshold = soc_private->smart_qos_info->moststressed_clamp_th; + CAM_DBG(CAM_PERF, "Current niu clamp_threshold=%d", + clamp_threshold); + } else { + ramp_val = soc_private->smart_qos_info->bw_ratio_scale_factor * + bw_per_kb; + ramp_val = ramp_val * + (soc_private->smart_qos_info->leaststressed_clamp_th - + soc_private->smart_qos_info->moststressed_clamp_th); + + /* + * Stress indicator threshold may have a float type value + * such as 0.5 according max stress indicator value 1, + * we take in percentages to avoid float type calcaulate. + */ + total_bw_ramp_val = total_bw_per_kb * + (soc_private->smart_qos_info->highstress_indicator_th - + soc_private->smart_qos_info->lowstress_indicator_th) / + CAM_CPAS_MAX_STRESS_INDICATOR; + + CAM_DBG(CAM_PERF, "ramp_val=%lld, total_bw_ramp_val=%lld", + ramp_val, total_bw_ramp_val); + + remainder = do_div(ramp_val, total_bw_ramp_val); + /* round the value */ + if ((remainder * 2) >= total_bw_ramp_val) + ramp_val += 1; + + val = (uint8_t)(ramp_val); + clamp_threshold = + soc_private->smart_qos_info->leaststressed_clamp_th - val; + + CAM_DBG(CAM_PERF, "Current niu clamp_threshold=%d, val=%d", + clamp_threshold, val); + } + + priority = 0; + + for (pos = 15; pos >= clamp_threshold; pos--) { + val = soc_private->smart_qos_info->rt_wr_priority_clamp; + priority = priority << 4; + priority |= val; + + CAM_DBG(CAM_PERF, "pos=%d, val=0x%x, priority=0x%llx", pos, val, priority); + } + + for (pos = clamp_threshold - 1; pos >= 0; pos--) { + if (pos == 0) { + val = soc_private->smart_qos_info->rt_wr_priority_min; + } else { + ramp_val = pos * bw_per_kb; + /* + * Slope factor may have a float type value such as 0.7 + * according max slope factor value 1, + * we take in percentages to avoid float type calcaulate. + */ + ramp_val = ramp_val * + soc_private->smart_qos_info->rt_wr_slope_factor / + CAM_CPAS_MAX_SLOPE_FACTOR; + remainder = do_div(ramp_val, max_bw_per_kb); + + CAM_DBG(CAM_PERF, + "pos=%d, bw_per_kb=%lld, pos*bw_per_kb=%lld, ramp_val=%lld, remainder=%lld, max_bw_per_kb=%lld", + pos, bw_per_kb, pos * bw_per_kb, ramp_val, remainder, + max_bw_per_kb); + + /* round the value */ + if ((remainder * 2) >= max_bw_per_kb) + ramp_val += 1; + + val = (uint8_t)(ramp_val); + val += soc_private->smart_qos_info->rt_wr_priority_min; + val = min(val, soc_private->smart_qos_info->rt_wr_priority_max); + } + + priority = priority << 4; + priority |= val; + + CAM_DBG(CAM_PERF, "pos=%d, val=0x%x, priority=0x%llx", pos, val, priority); + } + + niu_node->curr_priority_low = (uint32_t)(priority & 0xFFFFFFFF); + niu_node->curr_priority_high = (uint32_t)((priority >> 32) & 0xFFFFFFFF); + + if ((niu_node->curr_priority_low != niu_node->applied_priority_low) || + (niu_node->curr_priority_high != niu_node->applied_priority_high)) + needs_update = true; + + CAM_DBG(CAM_PERF, + "Node[%d][%s]Priority applied high 0x%x low 0x%x, new high 0x%x low 0x%x, needs_update %d", + i, niu_node->node_name, + niu_node->applied_priority_high, niu_node->applied_priority_low, + niu_node->curr_priority_high, niu_node->curr_priority_low, + needs_update); + } + + if (cpas_core->smart_qos_dump && needs_update) { + uint64_t total_camnoc; + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + + total_camnoc = niu_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc; + + if (soc_private->enable_cam_clk_drv) + total_camnoc += + niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_0].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_1].drv_vote.low.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.high.camnoc + + niu_node->bw_info[CAM_CPAS_PORT_DRV_2].drv_vote.low.camnoc; + + CAM_INFO(CAM_PERF, + "Node[%d][%s] camnoc_bw=%lld, niu_size=%d, offset high 0x%x, low 0x%x, Priority new high 0x%x low 0x%x, applied high 0x%x low 0x%x", + i, niu_node->node_name, total_camnoc, niu_node->niu_size, + niu_node->pri_lut_high_offset, niu_node->pri_lut_low_offset, + niu_node->curr_priority_high, niu_node->curr_priority_low, + niu_node->applied_priority_high, niu_node->applied_priority_low); + } + } + + return needs_update; +} + +static int cam_cpas_apply_smart_qos( + struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_tree_node *niu_node; + struct cam_camnoc_info *camnoc_info; + uint8_t i; + int32_t reg_indx, cam_qos_cnt = 0, ret = 0; + uint32_t reg_base_mask; + struct qcom_scm_camera_qos scm_buf[QCOM_SCM_CAMERA_MAX_QOS_CNT] = {0}; + + if (cpas_core->smart_qos_dump) { + CAM_INFO(CAM_PERF, "Printing SmartQoS values before update"); + cam_cpas_print_smart_qos_priority(cpas_hw); + } + + /* Smart QOS only apply to CPAS RT nius */ + camnoc_info = cpas_core->camnoc_info[cpas_core->camnoc_rt_idx]; + reg_indx = cpas_core->regbase_index[camnoc_info->reg_base]; + + switch (camnoc_info->reg_base) { + case CAM_CPAS_REG_CAMNOC: + reg_base_mask = (CAM_CAMNOC_HW_COMBINED_MASK << CAM_CAMNOC_HW_TYPE_SHIFT); + break; + case CAM_CPAS_REG_CAMNOC_RT: + reg_base_mask = (CAM_CAMNOC_HW_RT_MASK << CAM_CAMNOC_HW_TYPE_SHIFT); + break; + case CAM_CPAS_REG_CAMNOC_NRT: + reg_base_mask = (CAM_CAMNOC_HW_NRT_MASK << CAM_CAMNOC_HW_TYPE_SHIFT); + break; + default: + CAM_ERR(CAM_CPAS, "Reg base %d is not supported in updating smart QoS", + camnoc_info->reg_base); + return -EINVAL; + } + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + + if (niu_node->curr_priority_high != niu_node->applied_priority_high) { + if (!soc_private->enable_secure_qos_update) { + cam_io_w_mb(niu_node->curr_priority_high, + soc_info->reg_map[reg_indx].mem_base + + niu_node->pri_lut_high_offset); + } else { + scm_buf[cam_qos_cnt].offset = + niu_node->pri_lut_high_offset | reg_base_mask; + scm_buf[cam_qos_cnt].val = niu_node->curr_priority_high; + cam_qos_cnt++; + } + + niu_node->applied_priority_high = niu_node->curr_priority_high; + } + + if (niu_node->curr_priority_low != niu_node->applied_priority_low) { + if (!soc_private->enable_secure_qos_update) { + cam_io_w_mb(niu_node->curr_priority_low, + soc_info->reg_map[reg_indx].mem_base + + niu_node->pri_lut_low_offset); + } else { + scm_buf[cam_qos_cnt].offset = + niu_node->pri_lut_low_offset | reg_base_mask; + scm_buf[cam_qos_cnt].val = niu_node->curr_priority_low; + cam_qos_cnt++; + } + + niu_node->applied_priority_low = niu_node->curr_priority_low; + } + } + + if (soc_private->enable_secure_qos_update && cam_qos_cnt) { + CAM_DBG(CAM_PERF, "Updating secure camera smartQoS count: %d", cam_qos_cnt); + ret = cam_update_camnoc_qos_settings(CAM_QOS_UPDATE_TYPE_SMART, + cam_qos_cnt, scm_buf); + if (ret) { + CAM_ERR(CAM_PERF, "Secure camera smartQoS update failed: %d", ret); + return ret; + } + CAM_DBG(CAM_PERF, "Updated secure camera smartQoS"); + } + + if (cpas_core->smart_qos_dump) { + CAM_INFO(CAM_PERF, "Printing SmartQoS values after update"); + cam_cpas_print_smart_qos_priority(cpas_hw); + } + + return 0; +} + +static int cam_cpas_util_camnoc_drv_idx_to_cesta_hw_client_idx(int camnoc_drv_idx) +{ + int hw_client = -1; + + switch (camnoc_drv_idx) { + case CAM_CPAS_PORT_HLOS_DRV: + hw_client = -1; + break; + case CAM_CPAS_PORT_DRV_0: + hw_client = 0; + break; + case CAM_CPAS_PORT_DRV_1: + hw_client = 1; + break; + case CAM_CPAS_PORT_DRV_2: + hw_client = 2; + break; + default: + CAM_WARN(CAM_CPAS, "Invalid drv idx %d", camnoc_drv_idx); + break; + } + + return hw_client; +} + +static inline void cam_cpas_util_validate_rt_camnoc_clk_rate(struct cam_cpas *cpas_core, + struct cam_hw_soc_info *soc_info, struct cam_soc_util_clk_rates rt_clk_rates, + int cesta_idx) +{ + int i; + unsigned long rt_hlos_clk_rate = 0; + unsigned long total_hw_client_rate = 0; + const struct camera_debug_settings *cam_debug = NULL; + + if (!cpas_core || !soc_info) { + CAM_ERR(CAM_CPAS, "Invalid params for validating rt camnoc clock rate"); + return; + } + + cam_debug = cam_debug_get_settings(); + if (debug_disable_rt_clk_bw_limit || (cam_debug && cam_debug->cpas_settings.camnoc_bw)) + return; + + if (cesta_idx == -1) + rt_hlos_clk_rate = rt_clk_rates.sw_client; + else + rt_hlos_clk_rate = cpas_core->applied_hlos_rt_camnoc_axi_rate; + + for (i = 0; i < CAM_CESTA_MAX_CLIENTS; i++) { + if ((cesta_idx != -1) && (cesta_idx == i)) + total_hw_client_rate += rt_clk_rates.hw_client[i].high; + else + total_hw_client_rate += + cpas_core->applied_camnoc_axi_rate.hw_client[i].high; + } + + if ((rt_hlos_clk_rate + total_hw_client_rate) > + (soc_info->clk_rate[soc_info->highest_clk_level][soc_info->src_clk_idx])) + CAM_WARN(CAM_CPAS, + "Requested clk rate[SW HW Total]: [%lld %lld %lld] exceeds max supported value: %d", + rt_hlos_clk_rate, total_hw_client_rate, (rt_hlos_clk_rate + + total_hw_client_rate), + soc_info->clk_rate[soc_info->highest_clk_level][soc_info->src_clk_idx]); + +} + +static int cam_cpas_util_set_camnoc_axi_drv_clk_rate(struct cam_hw_soc_info *soc_info, + struct cam_cpas_private_soc *soc_private, struct cam_cpas *cpas_core, int cesta_drv_idx) +{ + struct cam_cpas_tree_node *tree_node = NULL; + uint64_t req_drv_high_camnoc_bw = 0, intermediate_drv_high_result = 0, + req_drv_low_camnoc_bw = 0, intermediate_drv_low_result = 0; + int64_t drv_high_clk_rate = 0, drv_low_clk_rate = 0; + struct cam_soc_util_clk_rates rt_clk_rates = {0}; + int i, rc = 0; + + if (!soc_private->enable_cam_clk_drv) { + CAM_ERR(CAM_CPAS, "Clk DRV not enabled, can't set clk rates through cesta APIs"); + return -EINVAL; + } + + for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) { + tree_node = soc_private->tree_node[i]; + if (!tree_node || + !tree_node->camnoc_max_needed) + continue; + + if (req_drv_high_camnoc_bw < + (tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc * + tree_node->bus_width_factor)) + req_drv_high_camnoc_bw = + (tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc * + tree_node->bus_width_factor); + + if (req_drv_low_camnoc_bw < + (tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc * + tree_node->bus_width_factor)) + req_drv_low_camnoc_bw = + (tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc * + tree_node->bus_width_factor); + } + + intermediate_drv_high_result = req_drv_high_camnoc_bw * + soc_private->camnoc_axi_clk_bw_margin; + intermediate_drv_low_result = req_drv_low_camnoc_bw * + soc_private->camnoc_axi_clk_bw_margin; + do_div(intermediate_drv_high_result, 100); + do_div(intermediate_drv_low_result, 100); + req_drv_high_camnoc_bw += intermediate_drv_high_result; + req_drv_low_camnoc_bw += intermediate_drv_low_result; + + /* + * Since all low votes are considered as part of high votes as well, add low camnoc bw + * to final requested high camnoc bw value. + */ + req_drv_high_camnoc_bw += req_drv_low_camnoc_bw; + + intermediate_drv_high_result = req_drv_high_camnoc_bw; + intermediate_drv_low_result = req_drv_low_camnoc_bw; + do_div(intermediate_drv_high_result, soc_private->camnoc_bus_width); + do_div(intermediate_drv_low_result, soc_private->camnoc_bus_width); + drv_high_clk_rate = intermediate_drv_high_result; + drv_low_clk_rate = intermediate_drv_low_result; + + if (cpas_core->streamon_clients) { + int hw_client_idx; + + /* + * cesta_drv_idx is based on enum we set in dtsi properties which is +1 of actual + * corresponding hw client index + */ + hw_client_idx = cam_cpas_util_camnoc_drv_idx_to_cesta_hw_client_idx(cesta_drv_idx); + if (hw_client_idx == -1) { + CAM_ERR(CAM_CPAS, "Invalid hw client idx %d, cesta_drv_idx %d", + hw_client_idx, cesta_drv_idx); + return rc; + } + + if (debug_drv) + CAM_INFO(CAM_PERF, + "Setting camnoc axi cesta clk rate[BW Clk] : High [%llu %lld] Low [%llu %lld] cesta/hw_client_idx:[%d][%d]", + req_drv_high_camnoc_bw, drv_high_clk_rate, req_drv_low_camnoc_bw, + drv_low_clk_rate, cesta_drv_idx, hw_client_idx); + else + CAM_DBG(CAM_PERF, + "Setting camnoc axi cesta clk rate[BW Clk] : High [%llu %lld] Low [%llu %lld] cesta/hw_client_idx:[%d][%d]", + req_drv_high_camnoc_bw, drv_high_clk_rate, req_drv_low_camnoc_bw, + drv_low_clk_rate, cesta_drv_idx, hw_client_idx); + + rt_clk_rates.hw_client[hw_client_idx].high = (unsigned long) drv_high_clk_rate; + cam_cpas_util_validate_rt_camnoc_clk_rate(cpas_core, soc_info, rt_clk_rates, + hw_client_idx); + + rc = cam_soc_util_set_src_clk_rate(soc_info, hw_client_idx, + drv_high_clk_rate, drv_low_clk_rate); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting camnoc cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d", + drv_high_clk_rate, drv_low_clk_rate, hw_client_idx, + rc); + return rc; + } + + cpas_core->applied_camnoc_axi_rate.hw_client[hw_client_idx].high = + drv_high_clk_rate; + cpas_core->applied_camnoc_axi_rate.hw_client[hw_client_idx].low = + drv_low_clk_rate; + + if (debug_drv) + CAM_INFO(CAM_PERF, "Triggering channel switch for cesta client %d", + hw_client_idx); + else + CAM_DBG(CAM_PERF, "Triggering channel switch for cesta client %d", + hw_client_idx); + + rc = cam_soc_util_cesta_channel_switch(hw_client_idx, "cpas_update"); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to apply power states for cesta client:%d rc:%d", + hw_client_idx, rc); + return rc; + } + } + + return rc; +} + +static int cam_cpas_util_set_max_camnoc_axi_clk_rate(struct cam_cpas *cpas_core, + struct cam_hw_soc_info *soc_info) +{ + int rc, highest_level = 0; + int64_t applied_rate = 0; + const struct camera_debug_settings *cam_debug = NULL; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) soc_info->soc_private; + + CAM_DBG(CAM_CPAS, "Finding max of hlos axi floor lvl: %d and hlos axi lvl: %d", + cpas_core->hlos_axi_floor_lvl, cpas_core->hlos_axi_bw_calc_lvl); + + highest_level = max(cpas_core->hlos_axi_floor_lvl, cpas_core->hlos_axi_bw_calc_lvl); + rc = cam_soc_util_get_valid_clk_rate(soc_info, highest_level, &applied_rate); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in getting valid clk rate to apply rc: %d", rc); + return rc; + } + + cam_debug = cam_debug_get_settings(); + if (cam_debug && cam_debug->cpas_settings.camnoc_bw) { + uint64_t intermediate_hlos_result = 0; + + intermediate_hlos_result = cam_debug->cpas_settings.camnoc_bw; + do_div(intermediate_hlos_result, soc_private->camnoc_bus_width); + applied_rate = intermediate_hlos_result; + } + + CAM_DBG(CAM_CPAS, "Highest valid lvl: %d, applying corresponding rate %lld", + highest_level, applied_rate); + + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, applied_rate, 0); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting camnoc axi clk applied rate:[%lld] rc:%d", + applied_rate, rc); + return rc; + } + + cpas_core->applied_camnoc_axi_rate.sw_client = applied_rate; + + return rc; +} + +static int cam_cpas_util_set_camnoc_axi_hlos_clk_rate(struct cam_hw_soc_info *soc_info, + struct cam_cpas_private_soc *soc_private, struct cam_cpas *cpas_core) +{ + struct cam_cpas_tree_node *tree_node = NULL; + uint64_t req_hlos_camnoc_bw = 0, req_rt_hlos_camnoc_bw = 0, + intermediate_hlos_result = 0, intermediate_rt_hlos_result = 0; + int64_t hlos_clk_rate = 0, rt_hlos_clk_rate = 0; + struct cam_soc_util_clk_rates rt_clk_rates = {0}; + int32_t clk_lvl = 0; + int i, rc = 0; + const struct camera_debug_settings *cam_debug = NULL; + + for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) { + tree_node = soc_private->tree_node[i]; + if (!tree_node || !tree_node->camnoc_max_needed) + continue; + + if (req_hlos_camnoc_bw < + (tree_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc * + tree_node->bus_width_factor)) { + req_hlos_camnoc_bw = + (tree_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc * + tree_node->bus_width_factor); + } + + if (tree_node->is_rt_node && (req_rt_hlos_camnoc_bw < + (tree_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc * + tree_node->bus_width_factor))) { + req_rt_hlos_camnoc_bw = + (tree_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc * + tree_node->bus_width_factor); + } + } + + intermediate_hlos_result = req_hlos_camnoc_bw * soc_private->camnoc_axi_clk_bw_margin; + do_div(intermediate_hlos_result, 100); + req_hlos_camnoc_bw += intermediate_hlos_result; + + intermediate_rt_hlos_result = req_rt_hlos_camnoc_bw * soc_private->camnoc_axi_clk_bw_margin; + do_div(intermediate_rt_hlos_result, 100); + req_rt_hlos_camnoc_bw += intermediate_rt_hlos_result; + + if (cpas_core->streamon_clients && (req_hlos_camnoc_bw == 0)) { + CAM_DBG(CAM_CPAS, + "Set min vote if streamon_clients is non-zero : streamon_clients=%d", + cpas_core->streamon_clients); + req_hlos_camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + } + + if ((req_hlos_camnoc_bw > 0) && (req_hlos_camnoc_bw < soc_private->camnoc_axi_min_ib_bw)) + req_hlos_camnoc_bw = soc_private->camnoc_axi_min_ib_bw; + + cam_debug = cam_debug_get_settings(); + if (cam_debug && cam_debug->cpas_settings.camnoc_bw) { + if (cam_debug->cpas_settings.camnoc_bw < soc_private->camnoc_bus_width) + req_hlos_camnoc_bw = soc_private->camnoc_bus_width; + + else + req_hlos_camnoc_bw = cam_debug->cpas_settings.camnoc_bw; + + CAM_INFO(CAM_CPAS, "Overriding camnoc bw: %llu", req_hlos_camnoc_bw); + } + + intermediate_hlos_result = req_hlos_camnoc_bw; + do_div(intermediate_hlos_result, soc_private->camnoc_bus_width); + hlos_clk_rate = intermediate_hlos_result; + + intermediate_rt_hlos_result = req_rt_hlos_camnoc_bw; + do_div(intermediate_rt_hlos_result, soc_private->camnoc_bus_width); + rt_hlos_clk_rate = intermediate_rt_hlos_result; + + CAM_DBG(CAM_PERF, "Setting camnoc axi HLOS clk rate[BW Clk RT_only_Clk] : [%llu %lld %lld]", + req_hlos_camnoc_bw, hlos_clk_rate, rt_hlos_clk_rate); + + rt_clk_rates.sw_client = (unsigned long) rt_hlos_clk_rate; + cam_cpas_util_validate_rt_camnoc_clk_rate(cpas_core, soc_info, rt_clk_rates, -1); + + rc = cam_soc_util_get_clk_level(soc_info, hlos_clk_rate, + soc_info->src_clk_idx, &clk_lvl); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to get clk lvl: %d, src clk idx: %d, hlos clk rate %lld", + rc, soc_info->src_clk_idx, hlos_clk_rate); + return rc; + } + + cpas_core->hlos_axi_bw_calc_lvl = clk_lvl; + + /* + * CPAS hw is not powered on for the first client. + * Also, clk_rate will be overwritten with default + * value while power on. So, skipping this for first + * client. + */ + + if (cpas_core->streamon_clients) { + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_set_clk_rate_level(soc_info->hw_id, CAM_CLK_SW_CLIENT_IDX, + cpas_core->hlos_axi_floor_lvl, 0, false, hlos_clk_rate); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting camnoc axi clk applied rate:[%lld] rc:%d", + hlos_clk_rate, rc); + return rc; + } + /* + * Since actual clk frequencies are not available, + * saving hlos_clk_rate here. + */ + cpas_core->applied_camnoc_axi_rate.sw_client = hlos_clk_rate; + } else { + rc = cam_cpas_util_set_max_camnoc_axi_clk_rate(cpas_core, soc_info); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting camnoc axi clk [BW Clk]:[%llu %lld] rc:%d", + req_hlos_camnoc_bw, hlos_clk_rate, rc); + return rc; + } + } + + cpas_core->applied_hlos_rt_camnoc_axi_rate = (unsigned long) rt_hlos_clk_rate; + } + + return rc; +} + +static int cam_cpas_util_set_camnoc_axi_clk_rate(struct cam_hw_info *cpas_hw, + int cesta_drv_idx) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int rc = 0; + + CAM_DBG(CAM_CPAS, "control_camnoc_axi_clk=%d", soc_private->control_camnoc_axi_clk); + + if (cesta_drv_idx == CAM_CPAS_PORT_HLOS_DRV) { + rc = cam_cpas_util_set_camnoc_axi_hlos_clk_rate(soc_info, + soc_private, cpas_core); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in setting hlos clk rate rc: %d", + rc); + goto end; + } + } else { + rc = cam_cpas_util_set_camnoc_axi_drv_clk_rate(soc_info, + soc_private, cpas_core, cesta_drv_idx); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting drv clk rate drv_idx:%d rc: %d", + cesta_drv_idx, rc); + goto end; + } + } + +end: + return rc; +} + +static int cam_cpas_util_translate_client_paths( + struct cam_axi_vote *axi_vote) +{ + int i; + uint32_t *path_data_type = NULL; + + if (!axi_vote) + return -EINVAL; + + for (i = 0; i < axi_vote->num_paths; i++) { + path_data_type = &axi_vote->axi_path[i].path_data_type; + /* Update path_data_type from UAPI value to internal value */ + if (*path_data_type >= CAM_CPAS_PATH_DATA_CONSO_OFFSET) + *path_data_type = CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT + + (*path_data_type % + CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT); + else + *path_data_type %= CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT; + + if (*path_data_type >= CAM_CPAS_PATH_DATA_MAX) { + CAM_ERR(CAM_CPAS, "index Invalid: %d", *path_data_type); + return -EINVAL; + } + } + + return 0; +} + +static void cam_cpas_axi_util_find_consolidate_path(struct cam_cpas_client *cpas_client, + struct cam_cpas_axi_per_path_bw_vote *axi_path, bool *path_found) +{ + int i, j; + bool cons_entry_found; + uint32_t transac_type; + uint32_t path_data_type; + struct cam_cpas_tree_node *sum_tree_node = NULL; + struct cam_cpas_axi_consolidate_per_path_bw_vote *cons_axi_path = NULL; + struct cam_axi_consolidate_vote *con_axi_vote = &cpas_client->cons_axi_vote; + + path_data_type = axi_path->path_data_type; + transac_type = axi_path->transac_type; + for (i = 0; i < CAM_CPAS_PATH_DATA_MAX; i++) { + sum_tree_node = cpas_client->tree_node[i][transac_type]; + + if (!sum_tree_node) + continue; + + if (sum_tree_node->constituent_paths[path_data_type]) { + *path_found = true; + /* + * Check if corresponding consolidated path + * entry is already added into consolidated list + */ + cons_entry_found = false; + for (j = 0; j < con_axi_vote->num_paths; j++) { + if ((con_axi_vote->axi_path[j].path_data_type == i) && + (con_axi_vote->axi_path[j].transac_type == transac_type)) { + cons_axi_path = &con_axi_vote->axi_path[j]; + cons_entry_found = true; + if (axi_path->vote_level == CAM_CPAS_VOTE_LEVEL_HIGH) { + cons_axi_path->drv_vote.high.camnoc += + axi_path->camnoc_bw; + cons_axi_path->drv_vote.high.ab += + axi_path->mnoc_ab_bw; + cons_axi_path->drv_vote.high.ib += + axi_path->mnoc_ib_bw; + } else { + cons_axi_path->drv_vote.low.camnoc += + axi_path->camnoc_bw; + cons_axi_path->drv_vote.low.ab += + axi_path->mnoc_ab_bw; + cons_axi_path->drv_vote.low.ib += + axi_path->mnoc_ib_bw; + } + + break; + } + } + + /* If not found, add a new entry */ + if (!cons_entry_found) { + cons_axi_path = &con_axi_vote->axi_path[con_axi_vote->num_paths]; + cons_axi_path->path_data_type = i; + cons_axi_path->transac_type = transac_type; + if (axi_path->vote_level == CAM_CPAS_VOTE_LEVEL_HIGH) { + cons_axi_path->drv_vote.high.camnoc = axi_path->camnoc_bw; + cons_axi_path->drv_vote.high.ab = axi_path->mnoc_ab_bw; + cons_axi_path->drv_vote.high.ib = axi_path->mnoc_ib_bw; + } else { + cons_axi_path->drv_vote.low.camnoc = axi_path->camnoc_bw; + cons_axi_path->drv_vote.low.ab = axi_path->mnoc_ab_bw; + cons_axi_path->drv_vote.low.ib = axi_path->mnoc_ib_bw; + } + + con_axi_vote->num_paths++; + } + + break; + } + } +} + +static int cam_cpas_axi_consolidate_path_votes( + struct cam_cpas_client *cpas_client, + struct cam_axi_vote *axi_vote) +{ + int rc = 0, i; + bool path_found = false; + uint32_t transac_type; + uint32_t path_data_type; + struct cam_cpas_tree_node *curr_tree_node = NULL; + struct cam_cpas_axi_consolidate_per_path_bw_vote *cons_axi_path; + struct cam_axi_consolidate_vote *con_axi_vote = &cpas_client->cons_axi_vote; + + memset(con_axi_vote, 0x0, sizeof(struct cam_axi_consolidate_vote)); + for (i = 0; i < axi_vote->num_paths; i++) { + path_found = false; + path_data_type = axi_vote->axi_path[i].path_data_type; + transac_type = axi_vote->axi_path[i].transac_type; + + if ((path_data_type >= CAM_CPAS_PATH_DATA_MAX) || + (transac_type >= CAM_CPAS_TRANSACTION_MAX)) { + CAM_ERR(CAM_CPAS, "Invalid path or transac type: %d %d", + path_data_type, transac_type); + return -EINVAL; + } + + cons_axi_path = &con_axi_vote->axi_path[con_axi_vote->num_paths]; + curr_tree_node = cpas_client->tree_node[path_data_type][transac_type]; + if (curr_tree_node) { + cons_axi_path->transac_type = axi_vote->axi_path[i].transac_type; + cons_axi_path->path_data_type = axi_vote->axi_path[i].path_data_type; + if (axi_vote->axi_path[i].vote_level == CAM_CPAS_VOTE_LEVEL_HIGH) { + cons_axi_path->drv_vote.high.camnoc = + axi_vote->axi_path[i].camnoc_bw; + cons_axi_path->drv_vote.high.ab = axi_vote->axi_path[i].mnoc_ab_bw; + cons_axi_path->drv_vote.high.ib = axi_vote->axi_path[i].mnoc_ib_bw; + } else { + cons_axi_path->drv_vote.low.camnoc = + axi_vote->axi_path[i].camnoc_bw; + cons_axi_path->drv_vote.low.ab = axi_vote->axi_path[i].mnoc_ab_bw; + cons_axi_path->drv_vote.low.ib = axi_vote->axi_path[i].mnoc_ib_bw; + } + + con_axi_vote->num_paths++; + continue; + } + + cam_cpas_axi_util_find_consolidate_path(cpas_client, &axi_vote->axi_path[i], + &path_found); + if (!path_found) { + CAM_ERR(CAM_CPAS, + "Client [%s][%d] i=%d num_paths=%d Consolidated path not found for path=%d, transac=%d", + cpas_client->data.identifier, cpas_client->data.cell_index, i, + axi_vote->num_paths, path_data_type, transac_type); + return -EINVAL; + } + } + + return rc; +} + +static int cam_cpas_update_axi_vote_bw( + struct cam_hw_info *cpas_hw, + struct cam_cpas_tree_node *cpas_tree_node, + int ddr_drv_idx, int cesta_drv_idx, + bool *mnoc_axi_port_updated, + bool *camnoc_axi_port_updated) +{ + int axi_port_idx = -1; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + if (debug_bypass_drivers & CAM_BYPASS_ICC) { + CAM_WARN(CAM_UTIL, "Bypass update axi vote bw"); + return 0; + } + + axi_port_idx = cpas_tree_node->axi_port_idx_arr[ddr_drv_idx]; + + if ((axi_port_idx < 0) || (axi_port_idx >= CAM_CPAS_MAX_AXI_PORTS)) { + CAM_ERR(CAM_CPAS, "Invalid axi_port_idx: %d drv_idx: %d", axi_port_idx, + ddr_drv_idx); + return -EINVAL; + } + + memcpy(&cpas_core->axi_port[axi_port_idx].curr_bw, &cpas_tree_node->bw_info[ddr_drv_idx], + sizeof(struct cam_cpas_axi_bw_info)); + + /* Add low value to high for drv */ + if (ddr_drv_idx > CAM_CPAS_PORT_HLOS_DRV) { + cpas_core->axi_port[axi_port_idx].curr_bw.drv_vote.high.ab += + cpas_core->axi_port[axi_port_idx].curr_bw.drv_vote.low.ab; + cpas_core->axi_port[axi_port_idx].curr_bw.drv_vote.high.ib += + cpas_core->axi_port[axi_port_idx].curr_bw.drv_vote.low.ib; + } + + mnoc_axi_port_updated[axi_port_idx] = true; + + if (soc_private->control_camnoc_axi_clk) + return 0; + + if (cesta_drv_idx > CAM_CPAS_PORT_HLOS_DRV) + cpas_core->camnoc_axi_port[cpas_tree_node->axi_port_idx_arr[CAM_CPAS_PORT_HLOS_DRV]] + .curr_bw.hlos_vote.camnoc = + cpas_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc + + cpas_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc; + else + cpas_core->camnoc_axi_port[cpas_tree_node->axi_port_idx_arr[CAM_CPAS_PORT_HLOS_DRV]] + .curr_bw.hlos_vote.camnoc = + cpas_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc; + + camnoc_axi_port_updated[cpas_tree_node->camnoc_axi_port_idx] = true; + return 0; +} + +static int cam_cpas_camnoc_set_bw_vote(struct cam_hw_info *cpas_hw, + bool *camnoc_axi_port_updated) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int i; + int rc = 0; + struct cam_cpas_axi_port *camnoc_axi_port = NULL; + uint64_t camnoc_bw; + + /* Below code is executed if we just vote and do not set the clk rate + * for camnoc + */ + + if (cpas_core->num_camnoc_axi_ports > CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid num_camnoc_axi_ports: %d", + cpas_core->num_camnoc_axi_ports); + return -EINVAL; + } + + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + if (camnoc_axi_port_updated[i]) + camnoc_axi_port = &cpas_core->camnoc_axi_port[i]; + else + continue; + + CAM_DBG(CAM_PERF, "Port[%s] : camnoc_bw=%lld", + camnoc_axi_port->axi_port_name, + camnoc_axi_port->curr_bw.hlos_vote.camnoc); + + if (camnoc_axi_port->curr_bw.hlos_vote.camnoc) + camnoc_bw = camnoc_axi_port->curr_bw.hlos_vote.camnoc; + else if (camnoc_axi_port->additional_bw) + camnoc_bw = camnoc_axi_port->additional_bw; + else if (cpas_core->streamon_clients) + camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + else + camnoc_bw = 0; + + camnoc_axi_port->curr_bw.hlos_vote.ib = camnoc_bw; + rc = cam_cpas_util_vote_hlos_bus_client_bw(cpas_hw->soc_info.soc_private, cpas_core, + &camnoc_axi_port->bus_client, true, &camnoc_axi_port->curr_bw, + &camnoc_axi_port->applied_bw); + + CAM_DBG(CAM_CPAS, "camnoc vote camnoc_bw[%llu] rc=%d %s", + camnoc_bw, rc, camnoc_axi_port->axi_port_name); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in camnoc vote camnoc_bw[%llu] rc=%d", + camnoc_bw, rc); + break; + } + } + return rc; +} + +static int cam_cpas_util_apply_client_axi_vote( + struct cam_hw_info *cpas_hw, + struct cam_cpas_client *cpas_client, + struct cam_axi_vote *axi_vote, uint32_t apply_type) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_axi_consolidate_vote *con_axi_vote = NULL; + struct cam_cpas_axi_port *mnoc_axi_port = NULL; + struct cam_cpas_tree_node *curr_tree_node = NULL; + struct cam_cpas_tree_node *par_tree_node = NULL; + uint32_t transac_type; + uint32_t path_data_type; + bool mnoc_axi_port_updated[CAM_CPAS_MAX_AXI_PORTS] = {false}; + bool camnoc_axi_port_updated[CAM_CPAS_MAX_AXI_PORTS] = {false}; + struct cam_cpas_axi_bw_info curr_mnoc_old = {0}, par_mnoc_old = {0}, curr_camnoc_old = {0}, + par_camnoc_old = {0}, curr_port_bw = {0}, applied_port_bw = {0}; + int rc = 0, i = 0, ddr_drv_idx, merge_type_factor = 1; + bool apply_smart_qos = false; + bool rt_bw_updated = false; + bool camnoc_unchanged; + int cesta_drv_idx = CAM_CPAS_PORT_HLOS_DRV; + int first_ddr_drv_idx = -1, first_cesta_drv_idx = -1; + + mutex_lock(&cpas_core->tree_lock); + if (!cpas_client->tree_node_valid) { + /* + * This is by assuming apply_client_axi_vote is called + * for these clients from only cpas_start, cpas_stop. + * not called from hw_update_axi_vote + */ + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (cpas_core->axi_port[i].bus_client.common_data.is_drv_port) + continue; + + if (axi_vote->axi_path[0].mnoc_ab_bw) { + /* start case */ + cpas_core->axi_port[i].additional_bw += + CAM_CPAS_DEFAULT_AXI_BW; + } else { + /* stop case */ + cpas_core->axi_port[i].additional_bw -= + CAM_CPAS_DEFAULT_AXI_BW; + } + mnoc_axi_port_updated[i] = true; + } + + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + if (axi_vote->axi_path[0].camnoc_bw) { + /* start case */ + cpas_core->camnoc_axi_port[i].additional_bw += + CAM_CPAS_DEFAULT_AXI_BW; + } else { + /* stop case */ + cpas_core->camnoc_axi_port[i].additional_bw -= + CAM_CPAS_DEFAULT_AXI_BW; + } + camnoc_axi_port_updated[i] = true; + } + + goto vote_start_clients; + } + + rc = cam_cpas_axi_consolidate_path_votes(cpas_client, axi_vote); + if (rc) { + CAM_ERR(CAM_PERF, "Failed in bw consolidation, Client [%s][%d]", + cpas_client->data.identifier, + cpas_client->data.cell_index); + goto unlock_tree; + } + + con_axi_vote = &cpas_client->cons_axi_vote; + + cam_cpas_dump_cons_axi_vote_info(cpas_client, "Consolidated Vote", con_axi_vote); + cam_cpas_dump_full_tree_state(cpas_hw, "BeforeClientVoteUpdate"); + + /* Traverse through node tree and update bw vote values */ + for (i = 0; i < con_axi_vote->num_paths; i++) { + camnoc_unchanged = false; + path_data_type = con_axi_vote->axi_path[i].path_data_type; + transac_type = con_axi_vote->axi_path[i].transac_type; + curr_tree_node = cpas_client->tree_node[path_data_type][transac_type]; + ddr_drv_idx = curr_tree_node->drv_voting_idx; + cesta_drv_idx = curr_tree_node->drv_voting_idx; + + if (!soc_private->enable_cam_ddr_drv || cpas_core->force_hlos_drv) { + ddr_drv_idx = CAM_CPAS_PORT_HLOS_DRV; + cesta_drv_idx = CAM_CPAS_PORT_HLOS_DRV; + } else if (!soc_private->enable_cam_clk_drv || cpas_core->force_cesta_sw_client) { + cesta_drv_idx = CAM_CPAS_PORT_HLOS_DRV; + } + + if ((ddr_drv_idx < 0) || (ddr_drv_idx > CAM_CPAS_PORT_DRV_2) || + (cesta_drv_idx < 0) || (cesta_drv_idx > CAM_CPAS_PORT_DRV_2)) { + CAM_ERR(CAM_CPAS, "Invalid drv idx : ddr_drv_idx=%d, cesta_drv_idx=%d", + ddr_drv_idx, cesta_drv_idx); + goto unlock_tree; + } + + if (i == 0) { + first_ddr_drv_idx = ddr_drv_idx; + first_cesta_drv_idx = cesta_drv_idx; + } else if ((first_ddr_drv_idx != ddr_drv_idx) || + (first_cesta_drv_idx != cesta_drv_idx)) { + /* + * drv indices won't change for a given client for different paths in a + * given axi vote update + */ + CAM_WARN(CAM_CPAS, "DRV indices different : DDR: %d, %d, CESTA %d %d", + first_ddr_drv_idx, ddr_drv_idx, first_cesta_drv_idx, cesta_drv_idx); + } + + memcpy(&curr_camnoc_old, &curr_tree_node->bw_info[cesta_drv_idx], + sizeof(struct cam_cpas_axi_bw_info)); + memcpy(&curr_mnoc_old, &curr_tree_node->bw_info[ddr_drv_idx], + sizeof(struct cam_cpas_axi_bw_info)); + cam_cpas_dump_tree_vote_info(cpas_hw, curr_tree_node, "Level0 before update", + ddr_drv_idx, cesta_drv_idx); + + /* Check and update camnoc bw first */ + if (cesta_drv_idx > CAM_CPAS_PORT_HLOS_DRV) { + if ((apply_type != CAM_CPAS_APPLY_TYPE_STOP) && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc + == con_axi_vote->axi_path[i].drv_vote.high.camnoc) && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc + == con_axi_vote->axi_path[i].drv_vote.low.camnoc)) { + camnoc_unchanged = true; + goto update_l0_mnoc; + } + + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc = + con_axi_vote->axi_path[i].drv_vote.high.camnoc; + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc = + con_axi_vote->axi_path[i].drv_vote.low.camnoc; + } else { + if (curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc == + (con_axi_vote->axi_path[i].drv_vote.high.camnoc + + con_axi_vote->axi_path[i].drv_vote.low.camnoc)) { + camnoc_unchanged = true; + goto update_l0_mnoc; + } + + curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc = + (con_axi_vote->axi_path[i].drv_vote.high.camnoc + + con_axi_vote->axi_path[i].drv_vote.low.camnoc); + } + +update_l0_mnoc: + /* Check and update mnoc ab and ib */ + if (cesta_drv_idx > CAM_CPAS_PORT_HLOS_DRV) { + if ((apply_type != CAM_CPAS_APPLY_TYPE_STOP) && camnoc_unchanged && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.ab + == con_axi_vote->axi_path[i].drv_vote.high.ab) && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.ab + == con_axi_vote->axi_path[i].drv_vote.low.ab) && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.ib + == con_axi_vote->axi_path[i].drv_vote.high.ib) && + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.ib + == con_axi_vote->axi_path[i].drv_vote.low.ib)) { + continue; + } + + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.ab = + con_axi_vote->axi_path[i].drv_vote.high.ab; + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.ab = + con_axi_vote->axi_path[i].drv_vote.low.ab; + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.ib = + con_axi_vote->axi_path[i].drv_vote.high.ib; + curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.ib = + con_axi_vote->axi_path[i].drv_vote.low.ib; + } else { + if (camnoc_unchanged && + (curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.ab == + (con_axi_vote->axi_path[i].drv_vote.high.ab + + con_axi_vote->axi_path[i].drv_vote.low.ab)) && + (curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.ib == + (con_axi_vote->axi_path[i].drv_vote.high.ib + + con_axi_vote->axi_path[i].drv_vote.low.ib))) { + continue; + } + + curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.ab = + (con_axi_vote->axi_path[i].drv_vote.high.ab + + con_axi_vote->axi_path[i].drv_vote.low.ab); + curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.ib = + (con_axi_vote->axi_path[i].drv_vote.high.ib + + con_axi_vote->axi_path[i].drv_vote.low.ib); + } + + cam_cpas_dump_tree_vote_info(cpas_hw, curr_tree_node, "Level0 after update", + ddr_drv_idx, cesta_drv_idx); + + while (curr_tree_node->parent_node) { + par_tree_node = curr_tree_node->parent_node; + memcpy(&par_camnoc_old, &par_tree_node->bw_info[cesta_drv_idx], + sizeof(struct cam_cpas_axi_bw_info)); + memcpy(&par_mnoc_old, &par_tree_node->bw_info[ddr_drv_idx], + sizeof(struct cam_cpas_axi_bw_info)); + cam_cpas_dump_tree_vote_info(cpas_hw, par_tree_node, "Parent before update", + ddr_drv_idx, cesta_drv_idx); + + if (par_tree_node->merge_type == CAM_CPAS_TRAFFIC_MERGE_SUM) { + merge_type_factor = 1; + } else if (par_tree_node->merge_type == + CAM_CPAS_TRAFFIC_MERGE_SUM_INTERLEAVE) { + merge_type_factor = 2; + } else { + CAM_ERR(CAM_CPAS, "Invalid Merge type"); + rc = -EINVAL; + goto unlock_tree; + } + + /* + * Remove contribution of current node old camnoc bw from parent, + * then add new camnoc bw of current level to the parent + */ + if (cesta_drv_idx > CAM_CPAS_PORT_HLOS_DRV) { + par_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc -= + (curr_camnoc_old.drv_vote.high.camnoc/merge_type_factor); + par_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc -= + (curr_camnoc_old.drv_vote.low.camnoc/merge_type_factor); + + par_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc += + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc/ + merge_type_factor); + par_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc += + (curr_tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc/ + merge_type_factor); + } else { + par_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc -= + (curr_camnoc_old.hlos_vote.camnoc/merge_type_factor); + + par_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc += + (curr_tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc/ + merge_type_factor); + } + + /* + * Remove contribution of current node old mnoc bw from parent, + * then add new mnoc bw of current level to the parent + */ + if (ddr_drv_idx > CAM_CPAS_PORT_HLOS_DRV) { + par_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ab -= + curr_mnoc_old.drv_vote.high.ab; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ib -= + curr_mnoc_old.drv_vote.high.ib; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ab -= + curr_mnoc_old.drv_vote.low.ab; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ib -= + curr_mnoc_old.drv_vote.low.ib; + + par_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ab += + curr_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ab; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ib += + curr_tree_node->bw_info[ddr_drv_idx].drv_vote.high.ib; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ab += + curr_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ab; + par_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ib += + curr_tree_node->bw_info[ddr_drv_idx].drv_vote.low.ib; + } else { + par_tree_node->bw_info[ddr_drv_idx].hlos_vote.ab -= + curr_mnoc_old.hlos_vote.ab; + par_tree_node->bw_info[ddr_drv_idx].hlos_vote.ib -= + curr_mnoc_old.hlos_vote.ib; + + par_tree_node->bw_info[ddr_drv_idx].hlos_vote.ab += + curr_tree_node->bw_info[ddr_drv_idx].hlos_vote.ab; + par_tree_node->bw_info[ddr_drv_idx].hlos_vote.ib += + curr_tree_node->bw_info[ddr_drv_idx].hlos_vote.ib; + } + + cam_cpas_dump_tree_vote_info(cpas_hw, par_tree_node, "Parent after update", + ddr_drv_idx, cesta_drv_idx); + + if (!par_tree_node->parent_node) { + rc = cam_cpas_update_axi_vote_bw(cpas_hw, par_tree_node, + ddr_drv_idx, cesta_drv_idx, mnoc_axi_port_updated, + camnoc_axi_port_updated); + if (rc) { + CAM_ERR(CAM_CPAS, "Update Vote failed"); + goto unlock_tree; + } + } + + curr_tree_node = par_tree_node; + memcpy(&curr_camnoc_old, &par_camnoc_old, + sizeof(struct cam_cpas_axi_bw_info)); + memcpy(&curr_mnoc_old, &par_mnoc_old, sizeof(struct cam_cpas_axi_bw_info)); + } + } + + cam_cpas_dump_full_tree_state(cpas_hw, "AfterClientVoteUpdate"); + + if (!par_tree_node) { + CAM_DBG(CAM_CPAS, "No change in BW for all paths"); + rc = 0; + goto unlock_tree; + } + + if (soc_private->enable_smart_qos) { + CAM_DBG(CAM_PERF, "Start QoS update for client[%s][%d]", + cpas_client->data.identifier, cpas_client->data.cell_index); + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (mnoc_axi_port_updated[i] && cpas_core->axi_port[i].is_rt) { + rt_bw_updated = true; + break; + } + } + + if (rt_bw_updated) { + apply_smart_qos = cam_cpas_calculate_smart_qos(cpas_hw); + + if (apply_smart_qos && cam_cpas_is_new_rt_bw_lower(cpas_hw)) { + /* + * If new BW is low, apply QoS first and then vote, + * otherwise vote first and then apply QoS + */ + CAM_DBG(CAM_PERF, "Apply Smart QoS first"); + rc = cam_cpas_apply_smart_qos(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in Smart QoS rc=%d", rc); + goto unlock_tree; + } + + apply_smart_qos = false; + } + } + } + +vote_start_clients: + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (mnoc_axi_port_updated[i]) + mnoc_axi_port = &cpas_core->axi_port[i]; + else + continue; + + memcpy(&curr_port_bw, &mnoc_axi_port->curr_bw, sizeof(struct cam_cpas_axi_bw_info)); + + if (mnoc_axi_port->bus_client.common_data.is_drv_port) { + CAM_DBG(CAM_PERF, + "Port[%s] :DRV high [%lld %lld] low [%lld %lld] streamon_clients=%d", + mnoc_axi_port->axi_port_name, + mnoc_axi_port->curr_bw.drv_vote.high.ab, + mnoc_axi_port->curr_bw.drv_vote.high.ib, + mnoc_axi_port->curr_bw.drv_vote.low.ab, + mnoc_axi_port->curr_bw.drv_vote.low.ib, + cpas_core->streamon_clients); + + if (!mnoc_axi_port->ib_bw_voting_needed) { + curr_port_bw.drv_vote.high.ib = 0; + curr_port_bw.drv_vote.low.ib = 0; + } + + /* Vote bw on appropriate bus id */ + rc = cam_cpas_util_vote_drv_bus_client_bw(soc_private, cpas_core, + &mnoc_axi_port->bus_client, + &curr_port_bw, &applied_port_bw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in mnoc vote for %s rc=%d", + mnoc_axi_port->axi_port_name, rc); + goto unlock_tree; + } + + /* Do start/stop/channel switch based on apply type */ + if ((apply_type == CAM_CPAS_APPLY_TYPE_START) && + !mnoc_axi_port->is_drv_started) { + rc = cam_cpas_start_drv_for_dev(mnoc_axi_port->cam_rsc_dev); + if (rc) { + CAM_ERR(CAM_CPAS, "Port[%s] failed in DRV start rc:%d", + mnoc_axi_port->axi_port_name, rc); + goto unlock_tree; + } + + if (debug_drv) + CAM_INFO(CAM_CPAS, "Started rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + + CAM_DBG(CAM_CPAS, "Started rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + mnoc_axi_port->is_drv_started = true; + } else if ((apply_type == CAM_CPAS_APPLY_TYPE_STOP) && + mnoc_axi_port->is_drv_started && + (applied_port_bw.drv_vote.high.ab == 0) && + (applied_port_bw.drv_vote.high.ib == 0) && + (applied_port_bw.drv_vote.low.ab == 0) && + (applied_port_bw.drv_vote.low.ib == 0)) { + rc = cam_cpas_stop_drv_for_dev(mnoc_axi_port->cam_rsc_dev); + if (rc) { + CAM_ERR(CAM_CPAS, "Port[%s] failed in DRV stop rc:%d", + mnoc_axi_port->axi_port_name, rc); + goto unlock_tree; + } + + if (debug_drv) + CAM_INFO(CAM_CPAS, "Stopped rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + + CAM_DBG(CAM_CPAS, "Stopped rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + mnoc_axi_port->is_drv_started = false; + } else { + if (mnoc_axi_port->is_drv_started) { + rc = cam_cpas_drv_channel_switch_for_dev( + mnoc_axi_port->cam_rsc_dev); + if (rc) { + CAM_ERR(CAM_CPAS, + "Port[%s] failed in channel switch rc:%d", + mnoc_axi_port->axi_port_name, rc); + goto unlock_tree; + } + + if (debug_drv) + CAM_INFO(CAM_CPAS, + "Channel switch for rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + + CAM_DBG(CAM_CPAS, + "Channel switch for rsc dev %s mnoc port:%s", + dev_name(mnoc_axi_port->cam_rsc_dev), + mnoc_axi_port->axi_port_name); + } + } + + } else { + CAM_DBG(CAM_PERF, + "Port[%s] :HLOS ab=%lld ib=%lld additional=%lld, streamon_clients=%d", + mnoc_axi_port->axi_port_name, mnoc_axi_port->curr_bw.hlos_vote.ab, + mnoc_axi_port->curr_bw.hlos_vote.ib, mnoc_axi_port->additional_bw, + cpas_core->streamon_clients); + + if (!mnoc_axi_port->curr_bw.hlos_vote.ab) { + if (mnoc_axi_port->additional_bw) + curr_port_bw.hlos_vote.ab = mnoc_axi_port->additional_bw; + else if (cpas_core->streamon_clients) + curr_port_bw.hlos_vote.ab = CAM_CPAS_DEFAULT_AXI_BW; + else + curr_port_bw.hlos_vote.ab = 0; + } + + if (!mnoc_axi_port->ib_bw_voting_needed) + curr_port_bw.hlos_vote.ib = 0; + + rc = cam_cpas_util_vote_hlos_bus_client_bw(soc_private, cpas_core, + &mnoc_axi_port->bus_client, false, &curr_port_bw, &applied_port_bw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in mnoc vote for %s rc=%d", + mnoc_axi_port->axi_port_name, rc); + goto unlock_tree; + } + } + + memcpy(&mnoc_axi_port->applied_bw, &applied_port_bw, + sizeof(struct cam_cpas_axi_bw_info)); + } + + if (soc_private->control_camnoc_axi_clk) { + rc = cam_cpas_util_set_camnoc_axi_clk_rate(cpas_hw, cesta_drv_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in setting axi clk rate rc=%d", rc); + goto unlock_tree; + } + } else { + rc = cam_cpas_camnoc_set_bw_vote(cpas_hw, camnoc_axi_port_updated); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in setting camnoc bw vote rc=%d", rc); + goto unlock_tree; + } + } + + if (soc_private->enable_smart_qos && apply_smart_qos) { + CAM_DBG(CAM_PERF, "Apply Smart QoS after bw votes"); + + rc = cam_cpas_apply_smart_qos(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in Smart QoS rc=%d", rc); + goto unlock_tree; + } + } + +unlock_tree: + mutex_unlock(&cpas_core->tree_lock); + return rc; +} + +static int cam_cpas_util_apply_default_axi_vote( + struct cam_hw_info *cpas_hw, bool enable) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_axi_port *axi_port = NULL; + uint64_t mnoc_ab_bw = 0, mnoc_ib_bw = 0; + int rc = 0, i = 0; + + mutex_lock(&cpas_core->tree_lock); + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if ((!cpas_core->axi_port[i].bus_client.common_data.is_drv_port) && + (!cpas_core->axi_port[i].curr_bw.hlos_vote.ab || + !cpas_core->axi_port[i].curr_bw.hlos_vote.ib)) + axi_port = &cpas_core->axi_port[i]; + else + continue; + + if (enable) + mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + else + mnoc_ib_bw = 0; + + CAM_DBG(CAM_CPAS, "Port=[%s] :ab[%llu] ib[%llu]", + axi_port->axi_port_name, mnoc_ab_bw, mnoc_ib_bw); + + axi_port->curr_bw.hlos_vote.ab = mnoc_ab_bw; + axi_port->curr_bw.hlos_vote.ib = mnoc_ib_bw; + rc = cam_cpas_util_vote_hlos_bus_client_bw(cpas_hw->soc_info.soc_private, cpas_core, + &axi_port->bus_client, false, &axi_port->curr_bw, &axi_port->applied_bw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in mnoc vote ab[%llu] ib[%llu] rc=%d", + mnoc_ab_bw, mnoc_ib_bw, rc); + goto unlock_tree; + } + } + +unlock_tree: + mutex_unlock(&cpas_core->tree_lock); + return rc; +} + +static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw, + uint32_t client_handle, struct cam_axi_vote *client_axi_vote) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_client *cpas_client = NULL; + struct cam_axi_vote *axi_vote = NULL; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + int rc = 0; + + if (!client_axi_vote) { + CAM_ERR(CAM_CPAS, "Invalid arg, client_handle=%d", + client_handle); + return -EINVAL; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + + axi_vote = kmemdup(client_axi_vote, sizeof(struct cam_axi_vote), + GFP_KERNEL); + if (!axi_vote) { + CAM_ERR(CAM_CPAS, "Out of memory"); + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return -ENOMEM; + } + + cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx], + "Incoming Vote", axi_vote); + + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto unlock_client; + } + + rc = cam_cpas_util_translate_client_paths(axi_vote); + if (rc) { + CAM_ERR(CAM_CPAS, + "Unable to translate per path votes rc: %d", rc); + goto unlock_client; + } + + cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx], + "Translated Vote", axi_vote); + + rc = cam_cpas_util_apply_client_axi_vote(cpas_hw, + cpas_core->cpas_client[client_indx], axi_vote, CAM_CPAS_APPLY_TYPE_UPDATE); + + /* Log an entry whenever there is an AXI update - after updating */ + cam_cpas_update_monitor_array(cpas_hw, "CPAS AXI post-update", + client_indx); +unlock_client: + cam_free_clear((void *)axi_vote); + axi_vote = NULL; + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_hw_update_axi_floor_lvl(struct cam_hw_info *cpas_hw, + uint32_t client_handle, enum cam_vote_level floor_lvl, bool locked) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + struct cam_cpas_client *cpas_client = cpas_core->cpas_client[client_indx]; + enum cam_vote_level required_level = floor_lvl; + enum cam_vote_level highest_level; + int i, rc = 0; + + if (!locked) { + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + } + + if (cpas_client->axi_level == required_level) + goto end; + + CAM_DBG(CAM_CPAS, "CPAS client: %s curr lvl: %d required lvl: %d", + cpas_client->data.identifier, cpas_client->axi_level, + required_level); + + cpas_client->axi_level = required_level; + + highest_level = required_level; + for (i = 0; i < cpas_core->num_clients; i++) { + if (cpas_core->cpas_client[i] && + (highest_level < cpas_core->cpas_client[i]->axi_level)) + highest_level = cpas_core->cpas_client[i]->axi_level; + } + + CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level); + cpas_core->hlos_axi_floor_lvl = highest_level; + + rc = cam_cpas_util_set_max_camnoc_axi_clk_rate(cpas_core, + &cpas_hw->soc_info); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in scaling clock rate level %d for AXI", + highest_level); + } + +end: + if (!locked) { + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + } + return rc; +} + +static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw, + struct device *dev, unsigned long freq, enum cam_vote_level *req_level) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct dev_pm_opp *opp; + unsigned int corner; + enum cam_vote_level level = CAM_LOWSVS_D1_VOTE; + unsigned long corner_freq = freq; + int i; + + if (!dev || !req_level) { + CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level); + return -EINVAL; + } + + opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq); + if (IS_ERR(opp)) { + CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK", + corner_freq, opp); + *req_level = CAM_TURBO_VOTE; + return 0; + } + + corner = dev_pm_opp_get_voltage(opp); + + for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++) + if (corner == soc_private->vdd_ahb[i].vdd_corner) + level = soc_private->vdd_ahb[i].ahb_level; + + CAM_DBG(CAM_CPAS, + "From OPP table : freq=[%ld][%ld], corner=%d, level=%d", + freq, corner_freq, corner, level); + + *req_level = level; + + return 0; +} + +static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw, + struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote, + enum cam_vote_level *applied_level) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client; + enum cam_vote_level required_level; + enum cam_vote_level highest_level; + int i, rc = 0; + + if (!ahb_bus_client->valid) { + CAM_ERR(CAM_CPAS, "AHB Bus client not valid"); + return -EINVAL; + } + + if (ahb_vote->type == CAM_VOTE_DYNAMIC) { + rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev, + ahb_vote->vote.freq, &required_level); + if (rc) + return rc; + } else { + required_level = ahb_vote->vote.level; + } + + if (cpas_client->ahb_level == required_level) + return 0; + + mutex_lock(&ahb_bus_client->lock); + cpas_client->ahb_level = required_level; + + CAM_DBG(CAM_CPAS, "Client[%s] required level[%d], curr_level[%d]", + ahb_bus_client->common_data.name, required_level, + ahb_bus_client->curr_vote_level); + + if (required_level == ahb_bus_client->curr_vote_level) + goto unlock_bus_client; + + highest_level = required_level; + for (i = 0; i < cpas_core->num_clients; i++) { + if (cpas_core->cpas_client[i] && (highest_level < + cpas_core->cpas_client[i]->ahb_level)) + highest_level = cpas_core->cpas_client[i]->ahb_level; + } + + CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level); + + if (!cpas_core->ahb_bus_scaling_disable) { + rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client, + highest_level); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d", + highest_level, rc); + goto unlock_bus_client; + } + } + + if (cpas_core->streamon_clients) { + rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, CAM_CLK_SW_CLIENT_IDX, + highest_level, 0, true); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in scaling clock rate level %d for AHB", + highest_level); + goto unlock_bus_client; + } + } + + if (applied_level) + *applied_level = highest_level; + +unlock_bus_client: + mutex_unlock(&ahb_bus_client->lock); + return rc; +} + +static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw, + uint32_t client_handle, struct cam_ahb_vote *client_ahb_vote) +{ + struct cam_ahb_vote ahb_vote; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_client *cpas_client = NULL; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + int rc = 0; + + if (!client_ahb_vote) { + CAM_ERR(CAM_CPAS, "Invalid input arg"); + return -EINVAL; + } + + ahb_vote = *client_ahb_vote; + + if (ahb_vote.vote.level == 0) { + CAM_DBG(CAM_CPAS, "0 ahb vote from client %d", + client_handle); + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto unlock_client; + } + + CAM_DBG(CAM_PERF, + "client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index, ahb_vote.type, + ahb_vote.vote.level, ahb_vote.vote.freq, + cpas_core->cpas_client[client_indx]->ahb_level); + + rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, + cpas_core->cpas_client[client_indx], &ahb_vote, NULL); + +unlock_client: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_util_create_vote_all_paths( + struct cam_cpas_client *cpas_client, + struct cam_axi_vote *axi_vote, bool is_start_vote) +{ + int i, j; + uint64_t camnoc_bw, mnoc_ab_bw, mnoc_ib_bw; + struct cam_cpas_axi_per_path_bw_vote *axi_path; + + if (!cpas_client || !axi_vote) + return -EINVAL; + + camnoc_bw = axi_vote->axi_path[0].camnoc_bw; + mnoc_ab_bw = axi_vote->axi_path[0].mnoc_ab_bw; + mnoc_ib_bw = axi_vote->axi_path[0].mnoc_ib_bw; + + axi_vote->num_paths = 0; + + for (i = 0; i < CAM_CPAS_TRANSACTION_MAX; i++) { + for (j = 0; j < CAM_CPAS_PATH_DATA_MAX; j++) { + if (cpas_client->tree_node[j][i]) { + axi_path = &axi_vote->axi_path[axi_vote->num_paths]; + + axi_path->path_data_type = j; + axi_path->transac_type = i; + axi_path->camnoc_bw = camnoc_bw; + axi_path->mnoc_ab_bw = mnoc_ab_bw; + axi_path->mnoc_ib_bw = mnoc_ib_bw; + if (cpas_client->tree_node[j][i]->drv_voting_idx > + CAM_CPAS_PORT_HLOS_DRV) + axi_path->vote_level = CAM_CPAS_VOTE_LEVEL_LOW; + + axi_vote->num_paths++; + + if (is_start_vote) + return 0; + } + } + } + + return 0; +} + +static int cam_cpas_hw_start(void *hw_priv, void *start_args, + uint32_t arg_size) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + uint32_t client_indx; + struct cam_cpas_hw_cmd_start *cmd_hw_start; + struct cam_cpas_client *cpas_client; + struct cam_ahb_vote *ahb_vote; + struct cam_ahb_vote remove_ahb; + struct cam_axi_vote axi_vote = {0}; + enum cam_vote_level applied_level = CAM_LOWSVS_D1_VOTE; + int rc, i = 0, err_val = 0; + struct cam_cpas_private_soc *soc_private = NULL; + bool invalid_start = true; + int count; + + if (!hw_priv || !start_args) { + CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK", + hw_priv, start_args); + return -EINVAL; + } + + if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) { + CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d", + sizeof(struct cam_cpas_hw_cmd_start), arg_size); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *)hw_priv; + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args; + client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle); + ahb_vote = cmd_hw_start->ahb_vote; + + if (!ahb_vote || !cmd_hw_start->axi_vote) + return -EINVAL; + + if (ahb_vote->vote.level == CAM_SUSPEND_VOTE) { + CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d]", + ahb_vote->vote.level); + return -EINVAL; + } + + memcpy(&axi_vote, cmd_hw_start->axi_vote, sizeof(struct cam_axi_vote)); + for (i = 0; i < axi_vote.num_paths; i++) { + if ((axi_vote.axi_path[i].camnoc_bw != 0) || + (axi_vote.axi_path[i].mnoc_ab_bw != 0) || + (axi_vote.axi_path[i].mnoc_ib_bw != 0)) { + invalid_start = false; + break; + } + } + + if (invalid_start) { + CAM_ERR(CAM_CPAS, "Zero start vote"); + return -EINVAL; + } + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + cpas_client = cpas_core->cpas_client[client_indx]; + + if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d] is not registered", + client_indx); + rc = -EPERM; + goto error; + } + + if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto error; + } + + CAM_DBG(CAM_CPAS, + "AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index, + ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level); + rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client, + ahb_vote, &applied_level); + if (rc) + goto error; + + cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Start Vote", &axi_vote); + + /* + * If client has indicated start bw to be applied on all paths + * of client, apply that otherwise apply whatever the client supplies + * for specific paths + */ + if (axi_vote.axi_path[0].path_data_type == CAM_CPAS_API_PATH_DATA_STD_START) + rc = cam_cpas_util_create_vote_all_paths(cpas_client, &axi_vote, true); + else + rc = cam_cpas_util_translate_client_paths(&axi_vote); + + if (rc) { + CAM_ERR(CAM_CPAS, "Unable to create or translate paths rc: %d", + rc); + goto remove_ahb_vote; + } + + cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Start Translated Vote", &axi_vote); + + if (cpas_core->streamon_clients == 0) { + if (cpas_core->force_hlos_drv) { + soc_private->enable_cam_ddr_drv = false; + soc_private->enable_cam_clk_drv = false; + } + + if (cpas_core->force_cesta_sw_client) + soc_private->enable_cam_clk_drv = false; + + if (debug_drv) + CAM_INFO(CAM_CPAS, "DRV enable[DDR CLK]:[%s %s]", + CAM_BOOL_TO_YESNO(soc_private->enable_cam_ddr_drv), + CAM_BOOL_TO_YESNO(soc_private->enable_cam_clk_drv)); + + rc = cam_cpas_util_apply_default_axi_vote(cpas_hw, true); + if (rc) + goto remove_ahb_vote; + + atomic_set(&cpas_core->soc_access_count, 1); + + count = cam_soc_util_regulators_enabled(&cpas_hw->soc_info); + if (count > 0) + CAM_DBG(CAM_CPAS, "Regulators already enabled %d", count); + + rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, + applied_level); + if (rc) { + atomic_set(&cpas_core->soc_access_count, 0); + CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc); + goto remove_ahb_vote; + } + + if (cpas_core->internal_ops.handle_reset_res_control) { + rc = cpas_core->internal_ops.handle_reset_res_control(cpas_hw); + if (rc) { + CAM_WARN(CAM_CPAS, "failed in reset resource control rc=%d", rc); + /* Do not return error, passthrough */ + } + } + + if (cpas_core->internal_ops.qchannel_handshake) { + rc = cpas_core->internal_ops.qchannel_handshake(cpas_hw, true, false); + if (rc) { + CAM_WARN(CAM_CPAS, "failed in qchannel_handshake rc=%d", rc); + /* Do not return error, passthrough */ + + rc = cpas_core->internal_ops.qchannel_handshake(cpas_hw, + true, true); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed in qchannel_handshake, hw blocks may not work rc=%d", + rc); + /* Do not return error, passthrough */ + } + } + } + + if (cpas_core->internal_ops.power_on) { + rc = cpas_core->internal_ops.power_on(cpas_hw); + if (rc) { + atomic_set(&cpas_core->soc_access_count, 0); + cam_cpas_soc_disable_resources( + &cpas_hw->soc_info, true, true); + CAM_ERR(CAM_CPAS, + "failed in power_on settings rc=%d", + rc); + goto remove_ahb_vote; + } + } + CAM_DBG(CAM_CPAS, "soc_access_count=%d\n", + atomic_read(&cpas_core->soc_access_count)); + + if (soc_private->enable_smart_qos) + cam_cpas_reset_niu_priorities(cpas_hw); + + cam_smmu_reset_cb_page_fault_cnt(); + cpas_hw->hw_state = CAM_HW_STATE_POWER_UP; + } + + /* + * Need to apply axi vote after we enable clocks, since we need certain clocks enabled for + * drv channel switch + */ + rc = cam_cpas_util_apply_client_axi_vote(cpas_hw, cpas_client, &axi_vote, + CAM_CPAS_APPLY_TYPE_START); + if (rc) + goto remove_ahb_vote; + + cpas_client->started = true; + cpas_core->streamon_clients++; + + if (debug_drv && (cpas_core->streamon_clients == 1)) { + cam_cpas_log_vote(cpas_hw, false); + cam_cpas_dump_full_tree_state(cpas_hw, "StartFirstClient"); + } + + CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index, cpas_core->streamon_clients); + + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; + +remove_ahb_vote: + remove_ahb.type = CAM_VOTE_ABSOLUTE; + remove_ahb.vote.level = CAM_SUSPEND_VOTE; + err_val = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client, + &remove_ahb, NULL); + if (err_val) + CAM_ERR(CAM_CPAS, "Removing AHB vote failed, rc=%d", err_val); + +error: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int _check_soc_access_count(struct cam_cpas *cpas_core) +{ + return (atomic_read(&cpas_core->soc_access_count) > 0) ? 0 : 1; +} + +static int cam_cpas_util_validate_stop_bw(struct cam_cpas_private_soc *soc_private, + struct cam_cpas *cpas_core) +{ + int i; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (soc_private->enable_cam_ddr_drv && + (cpas_core->axi_port[i].bus_client.common_data.is_drv_port)) { + + if ((cpas_core->axi_port[i].applied_bw.drv_vote.high.ab) || + (cpas_core->axi_port[i].applied_bw.drv_vote.high.ib) || + (cpas_core->axi_port[i].applied_bw.drv_vote.low.ab) || + (cpas_core->axi_port[i].applied_bw.drv_vote.low.ib)) { + CAM_ERR(CAM_CPAS, + "port:%s Non zero DRV applied BW high[%llu %llu] low[%llu %llu]", + cpas_core->axi_port[i].axi_port_name, + cpas_core->axi_port[i].applied_bw.drv_vote.high.ab, + cpas_core->axi_port[i].applied_bw.drv_vote.high.ib, + cpas_core->axi_port[i].applied_bw.drv_vote.low.ab, + cpas_core->axi_port[i].applied_bw.drv_vote.low.ib); + return -EINVAL; + } + } else { + if (cpas_core->axi_port[i].bus_client.common_data.is_drv_port) + continue; + + if ((cpas_core->axi_port[i].applied_bw.hlos_vote.ab) || + (cpas_core->axi_port[i].applied_bw.hlos_vote.ib)) { + CAM_ERR(CAM_CPAS, + "port:%s Non zero HLOS applied BW [%llu %llu]", + cpas_core->axi_port[i].axi_port_name, + cpas_core->axi_port[i].applied_bw.hlos_vote.ab, + cpas_core->axi_port[i].applied_bw.hlos_vote.ib); + return -EINVAL; + } + } + } + + return 0; +} + +static int cam_cpas_hw_stop(void *hw_priv, void *stop_args, + uint32_t arg_size) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + uint32_t client_indx; + struct cam_cpas_hw_cmd_stop *cmd_hw_stop; + struct cam_cpas_client *cpas_client; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + struct cam_cpas_private_soc *soc_private = NULL; + int rc = 0, count; + long result; + int retry_camnoc_idle = 0; + + if (!hw_priv || !stop_args) { + CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK", + hw_priv, stop_args); + return -EINVAL; + } + + if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) { + CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d", + sizeof(struct cam_cpas_hw_cmd_stop), arg_size); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *)hw_priv; + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args; + client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle); + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + cpas_client = cpas_core->cpas_client[client_indx]; + + CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index, cpas_core->streamon_clients); + + if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index); + rc = -EPERM; + goto done; + } + + rc = cam_cpas_util_create_vote_all_paths(cpas_client, &axi_vote, false); + if (rc) { + CAM_ERR(CAM_CPAS, "Unable to create per path votes rc: %d", rc); + goto done; + } + + cam_cpas_dump_axi_vote_info(cpas_client, "CPAS Stop Vote", &axi_vote); + + rc = cam_cpas_util_apply_client_axi_vote(cpas_hw, cpas_client, &axi_vote, + CAM_CPAS_APPLY_TYPE_STOP); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in apply client axi vote rc: %d", rc); + goto done; + } + + rc = cam_cpas_hw_update_axi_floor_lvl(cpas_hw, cmd_hw_stop->client_handle, + 0, true); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in update AXI floor lvl rc: %d", rc); + goto done; + } + cpas_client->started = false; + + if (debug_drv && (cpas_core->streamon_clients == 1)) { + cam_cpas_log_vote(cpas_hw, false); + cam_cpas_dump_full_tree_state(cpas_hw, "StopLastClient"); + } + + cpas_core->streamon_clients--; + + if (cpas_core->streamon_clients == 0) { + if (cpas_core->internal_ops.power_off) { + rc = cpas_core->internal_ops.power_off(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed in power_off settings rc=%d", + rc); + /* Do not return error, passthrough */ + } + } + + if (cpas_core->internal_ops.qchannel_handshake) { + rc = cpas_core->internal_ops.qchannel_handshake(cpas_hw, false, false); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in qchannel_handshake rc=%d", rc); + retry_camnoc_idle = 1; + /* Do not return error, passthrough */ + } + } + + rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info); + if (rc) { + CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc); + goto done; + } + + /* Wait for any IRQs still being handled */ + atomic_dec(&cpas_core->soc_access_count); + result = wait_event_timeout(cpas_core->soc_access_count_wq, + _check_soc_access_count(cpas_core), HZ); + if (result == 0) { + CAM_ERR(CAM_CPAS, "Wait failed: soc_access_count=%d", + atomic_read(&cpas_core->soc_access_count)); + } + + /* try again incase camnoc is still not idle */ + if (cpas_core->internal_ops.qchannel_handshake && + retry_camnoc_idle) { + rc = cpas_core->internal_ops.qchannel_handshake(cpas_hw, false, true); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in qchannel_handshake rc=%d", rc); + /* Do not return error, passthrough */ + } + } + + rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, + true, false); + if (rc) { + CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc); + goto done; + } + CAM_DBG(CAM_CPAS, "Disabled all the resources: soc_access_count=%d", + atomic_read(&cpas_core->soc_access_count)); + + count = cam_soc_util_regulators_enabled(&cpas_hw->soc_info); + if (count > 0) + CAM_WARN(CAM_CPAS, + "Client=[%d][%s][%d] qchannel shut down while top gdsc is still on %d", + client_indx, cpas_client->data.identifier, + cpas_client->data.cell_index, count); + + rc = cam_cpas_util_apply_default_axi_vote(cpas_hw, false); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in power off default vote rc: %d", rc); + + rc = cam_cpas_util_validate_stop_bw(soc_private, cpas_core); + if (rc) + CAM_ERR(CAM_CPAS, "Invalid applied bw at stop rc: %d", rc); + + CAM_DBG(CAM_CPAS, "hlos axi floor lvl: %d, hlos axi clk lvl: %d", + cpas_core->hlos_axi_floor_lvl, cpas_core->hlos_axi_bw_calc_lvl); + + cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_SUSPEND_VOTE; + rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client, + &ahb_vote, NULL); + if (rc) + goto done; + +done: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args, + uint32_t arg_size) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + int rc = 0; + + if (!hw_priv || !init_hw_args) { + CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK", + hw_priv, init_hw_args); + return -EINVAL; + } + + if (sizeof(struct cam_cpas_hw_caps) != arg_size) { + CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d", + sizeof(struct cam_cpas_hw_caps), arg_size); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *)hw_priv; + cpas_core = (struct cam_cpas *)cpas_hw->core_info; + + if (cpas_core->internal_ops.init_hw_version) { + rc = cpas_core->internal_ops.init_hw_version(cpas_hw, + (struct cam_cpas_hw_caps *)init_hw_args); + } + + return rc; +} + +static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw, + struct cam_cpas_register_params *register_params) +{ + int rc; + char client_name[CAM_HW_IDENTIFIER_LENGTH + 3]; + int32_t client_indx = -1; + struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + if ((!register_params) || + (strlen(register_params->identifier) < 1)) { + CAM_ERR(CAM_CPAS, "Invalid cpas client identifier"); + return -EINVAL; + } + + CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d", + register_params->identifier, register_params->cell_index); + + if (soc_private->client_id_based) + snprintf(client_name, sizeof(client_name), "%s%d", + register_params->identifier, + register_params->cell_index); + else + snprintf(client_name, sizeof(client_name), "%s", + register_params->identifier); + + mutex_lock(&cpas_hw->hw_mutex); + + rc = cam_common_util_get_string_index(soc_private->client_name, + soc_private->num_clients, client_name, &client_indx); + if (rc) { + CAM_ERR(CAM_CPAS, "Client %s is not found in CPAS client list rc=%d", + client_name, rc); + mutex_unlock(&cpas_hw->hw_mutex); + return -ENODEV; + } + + mutex_lock(&cpas_core->client_mutex[client_indx]); + + if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) || + CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, + "Inval client %s %d : %d %d %pK %d", + register_params->identifier, + register_params->cell_index, + CAM_CPAS_CLIENT_VALID(client_indx), + CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx), + cpas_core->cpas_client[client_indx], rc); + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return -EPERM; + } + + register_params->client_handle = + CAM_CPAS_GET_CLIENT_HANDLE(client_indx); + memcpy(&cpas_core->cpas_client[client_indx]->data, register_params, + sizeof(struct cam_cpas_register_params)); + cpas_core->registered_clients++; + cpas_core->cpas_client[client_indx]->registered = true; + + CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d", + client_indx, + cpas_core->cpas_client[client_indx]->data.identifier, + cpas_core->cpas_client[client_indx]->data.cell_index, + cpas_core->registered_clients); + + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + + return 0; +} + +static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw, + uint32_t client_handle) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle); + int rc = 0; + + if (!CAM_CPAS_CLIENT_VALID(client_indx)) + return -EINVAL; + + mutex_lock(&cpas_hw->hw_mutex); + mutex_lock(&cpas_core->client_mutex[client_indx]); + + if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered", + client_indx, + cpas_core->cpas_client[client_indx]->data.identifier, + cpas_core->cpas_client[client_indx]->data.cell_index); + rc = -EPERM; + goto done; + } + + if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) { + CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped", + client_indx, + cpas_core->cpas_client[client_indx]->data.identifier, + cpas_core->cpas_client[client_indx]->data.cell_index); + + rc = -EPERM; + goto done; + } + + CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d", + client_indx, + cpas_core->cpas_client[client_indx]->data.identifier, + cpas_core->cpas_client[client_indx]->data.cell_index, + cpas_core->registered_clients); + + cpas_core->cpas_client[client_indx]->registered = false; + cpas_core->registered_clients--; +done: + mutex_unlock(&cpas_core->client_mutex[client_indx]); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_hw_get_hw_info(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + struct cam_cpas_hw_caps *hw_caps; + struct cam_cpas_private_soc *soc_private; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK", + hw_priv, get_hw_cap_args); + return -EINVAL; + } + + if (sizeof(struct cam_cpas_hw_caps) != arg_size) { + CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d", + sizeof(struct cam_cpas_hw_caps), arg_size); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *)hw_priv; + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args; + *hw_caps = cpas_core->hw_caps; + + /*Extract Fuse Info*/ + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + + hw_caps->fuse_info = soc_private->fuse_info; + CAM_DBG(CAM_CPAS, "fuse info->num_fuses %d", hw_caps->fuse_info.num_fuses); + + return 0; +} + +static int cam_cpas_log_vote(struct cam_hw_info *cpas_hw, bool ddr_only) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + uint32_t i, vcd_idx; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas_cesta_info *cesta_info = + (struct cam_cpas_cesta_info *)cpas_core->cesta_info; + + if ((cpas_core->streamon_clients > 0) && soc_private->enable_smart_qos && !ddr_only) + cam_cpas_print_smart_qos_priority(cpas_hw); + + + /* + * First print rpmh registers as early as possible to catch nearest + * state of rpmh after an issue (overflow) occurs. + */ + if ((cpas_core->streamon_clients > 0) && + (cpas_core->regbase_index[CAM_CPAS_REG_RPMH] != -1)) { + int reg_base_index = + cpas_core->regbase_index[CAM_CPAS_REG_RPMH]; + void __iomem *rpmh_base = + soc_info->reg_map[reg_base_index].mem_base; + uint32_t offset_fe, offset_be; + uint32_t fe_val, be_val; + uint32_t *rpmh_info = &soc_private->rpmh_info[0]; + uint32_t ddr_bcm_index = + soc_private->rpmh_info[CAM_RPMH_BCM_DDR_INDEX]; + uint32_t mnoc_bcm_index = + soc_private->rpmh_info[CAM_RPMH_BCM_MNOC_INDEX]; + + /* + * print 12 registers from 0x4, 0x800 offsets - + * this will give ddr, mmnoc and other BCM info. + * i=0 for DDR, i=4 for mnoc, but double check for each chipset. + */ + for (i = 0; i < rpmh_info[CAM_RPMH_NUMBER_OF_BCMS]; i++) { + if ((!cpas_core->full_state_dump) && + (i != ddr_bcm_index) && + (i != mnoc_bcm_index)) + continue; + + offset_fe = rpmh_info[CAM_RPMH_BCM_FE_OFFSET] + + (i * 0x4); + offset_be = rpmh_info[CAM_RPMH_BCM_BE_OFFSET] + + (i * 0x4); + + fe_val = cam_io_r_mb(rpmh_base + offset_fe); + be_val = cam_io_r_mb(rpmh_base + offset_be); + + CAM_INFO(CAM_CPAS, + "i=%d, FE[offset=0x%x, value=0x%x] BE[offset=0x%x, value=0x%x]", + i, offset_fe, fe_val, offset_be, be_val); + } + } + + if ((cpas_core->streamon_clients > 0) && + cpas_core->regbase_index[CAM_CPAS_REG_CESTA] != -1) { + int reg_base_index = + cpas_core->regbase_index[CAM_CPAS_REG_CESTA]; + void __iomem *cesta_base = + soc_info->reg_map[reg_base_index].mem_base; + uint32_t vcd_base_inc = + cesta_info->cesta_reg_info->vcd_currol.vcd_base_inc; + uint32_t num_vcds = cesta_info->num_vcds; + uint32_t vcd_curr_lvl_base = + cesta_info->cesta_reg_info->vcd_currol.reg_offset; + uint32_t cesta_vcd_curr_perfol_offset, cesta_vcd_curr_perfol_val; + + if (!atomic_inc_not_zero(&cpas_core->soc_access_count)) + goto skip_vcd_dump; + + for (i = 0; i < num_vcds; i++) { + vcd_idx = cesta_info->vcd_info[i].index; + cesta_vcd_curr_perfol_offset = vcd_curr_lvl_base + + (vcd_base_inc * vcd_idx); + cesta_vcd_curr_perfol_val = + cam_io_r_mb(cesta_base + cesta_vcd_curr_perfol_offset); + + CAM_INFO(CAM_CPAS, + "i=%d, VCD[index=%d, type=%d, name=%s] [offset=0x%x, value=0x%x]", + i, cesta_info->vcd_info[i].index, + cesta_info->vcd_info[i].type, + cesta_info->vcd_info[i].clk, + cesta_vcd_curr_perfol_offset, + cesta_vcd_curr_perfol_val); + } + + atomic_dec(&cpas_core->soc_access_count); + wake_up(&cpas_core->soc_access_count_wq); + } + +skip_vcd_dump: + if (ddr_only) + return 0; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (cpas_core->axi_port[i].bus_client.common_data.is_drv_port) { + CAM_INFO(CAM_PERF, + "[%s] DRV applied: high [%llu %llu] low[%llu %llu] new: high [%llu %llu] low [%llu %llu]", + cpas_core->axi_port[i].axi_port_name, + cpas_core->axi_port[i].applied_bw.drv_vote.high.ab, + cpas_core->axi_port[i].applied_bw.drv_vote.high.ib, + cpas_core->axi_port[i].applied_bw.drv_vote.low.ab, + cpas_core->axi_port[i].applied_bw.drv_vote.low.ib, + cpas_core->axi_port[i].curr_bw.drv_vote.high.ab, + cpas_core->axi_port[i].curr_bw.drv_vote.high.ib, + cpas_core->axi_port[i].curr_bw.drv_vote.low.ab, + cpas_core->axi_port[i].curr_bw.drv_vote.low.ib); + } else { + CAM_INFO(CAM_PERF, "Port %s HLOS applied [%llu %llu] new [%llu %llu]", + cpas_core->axi_port[i].axi_port_name, + cpas_core->axi_port[i].applied_bw.hlos_vote.ab, + cpas_core->axi_port[i].applied_bw.hlos_vote.ib, + cpas_core->axi_port[i].curr_bw.hlos_vote.ab, + cpas_core->axi_port[i].curr_bw.hlos_vote.ib); + } + } + + if (soc_private->control_camnoc_axi_clk) { + CAM_INFO(CAM_CPAS, "applied camnoc axi clk sw_client[%lld]", + cpas_core->applied_camnoc_axi_rate.sw_client); + + if (soc_private->enable_cam_clk_drv) + CAM_INFO(CAM_CPAS, + "applied camnoc axi clk hw_client[high low] cesta_idx0:[%lld %lld] cesta_idx1:[%lld %lld] cesta_idx2:[%lld %lld]", + cpas_core->applied_camnoc_axi_rate.hw_client[0].high, + cpas_core->applied_camnoc_axi_rate.hw_client[0].low, + cpas_core->applied_camnoc_axi_rate.hw_client[1].high, + cpas_core->applied_camnoc_axi_rate.hw_client[1].low, + cpas_core->applied_camnoc_axi_rate.hw_client[2].high, + cpas_core->applied_camnoc_axi_rate.hw_client[2].low); + } else { + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + CAM_INFO(CAM_CPAS, + "[%s] ab_bw[%lld] ib_bw[%lld] additional_bw[%lld] applied_ab[%lld] applied_ib[%lld]", + cpas_core->camnoc_axi_port[i].axi_port_name, + cpas_core->camnoc_axi_port[i].curr_bw.hlos_vote.ab, + cpas_core->camnoc_axi_port[i].curr_bw.hlos_vote.ib, + cpas_core->camnoc_axi_port[i].additional_bw, + cpas_core->camnoc_axi_port[i].applied_bw.hlos_vote.ab, + cpas_core->camnoc_axi_port[i].applied_bw.hlos_vote.ib); + } + } + + CAM_INFO(CAM_CPAS, "ahb client curr vote level[%d]", + cpas_core->ahb_bus_client.curr_vote_level); + + if (!cpas_core->full_state_dump) { + CAM_DBG(CAM_CPAS, "CPAS full state dump not enabled"); + return 0; + } + + /* This will traverse through all nodes in the tree and print stats */ + cam_cpas_dump_full_tree_state(cpas_hw, "state_dump_on_error"); + + cam_cpas_dump_monitor_array(cpas_hw); + + if (cpas_core->internal_ops.print_poweron_settings) + cpas_core->internal_ops.print_poweron_settings(cpas_hw); + else + CAM_DBG(CAM_CPAS, "No ops for print_poweron_settings"); + + return 0; +} + +static void cam_cpas_update_monitor_array(struct cam_hw_info *cpas_hw, + const char *identifier_string, int32_t identifier_value) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_camnoc_info *camnoc_info = NULL; + struct cam_cpas_cesta_info *cesta_info = cpas_core->cesta_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_monitor *entry; + int iterator, i, j = 0, vcd_idx, camnoc_reg_idx; + uint32_t val = 0, camnoc_idx; + + CAM_CPAS_INC_MONITOR_HEAD(&cpas_core->monitor_head, &iterator); + + entry = &cpas_core->monitor_entries[iterator]; + entry->cpas_hw = cpas_hw; + + CAM_GET_TIMESTAMP(entry->timestamp); + strscpy(entry->identifier_string, identifier_string, + sizeof(entry->identifier_string)); + + entry->identifier_value = identifier_value; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + entry->axi_info[i].axi_port_name = cpas_core->axi_port[i].axi_port_name; + memcpy(&entry->axi_info[i].curr_bw, &cpas_core->axi_port[i].curr_bw, + sizeof(struct cam_cpas_axi_bw_info)); + + /* camnoc bw value not applicable for mnoc ports */ + entry->axi_info[i].camnoc_bw = 0; + memcpy(&entry->axi_info[i].applied_bw, &cpas_core->axi_port[i].applied_bw, + sizeof(struct cam_cpas_axi_bw_info)); + entry->axi_info[i].is_drv_started = cpas_core->axi_port[i].is_drv_started; + } + + memcpy(&entry->applied_camnoc_clk, &cpas_core->applied_camnoc_axi_rate, + sizeof(struct cam_soc_util_clk_rates)); + entry->applied_ahb_level = cpas_core->ahb_bus_client.curr_vote_level; + + if ((cpas_core->streamon_clients > 0) && + (cpas_core->regbase_index[CAM_CPAS_REG_RPMH] != -1) && + soc_private->rpmh_info[CAM_RPMH_NUMBER_OF_BCMS]) { + int reg_base_index = + cpas_core->regbase_index[CAM_CPAS_REG_RPMH]; + void __iomem *rpmh_base = + soc_info->reg_map[reg_base_index].mem_base; + uint32_t fe_ddr_offset = + soc_private->rpmh_info[CAM_RPMH_BCM_FE_OFFSET] + + (0x4 * soc_private->rpmh_info[CAM_RPMH_BCM_DDR_INDEX]); + uint32_t fe_mnoc_offset = + soc_private->rpmh_info[CAM_RPMH_BCM_FE_OFFSET] + + (0x4 * soc_private->rpmh_info[CAM_RPMH_BCM_MNOC_INDEX]); + uint32_t be_ddr_offset = + soc_private->rpmh_info[CAM_RPMH_BCM_BE_OFFSET] + + (0x4 * soc_private->rpmh_info[CAM_RPMH_BCM_DDR_INDEX]); + uint32_t be_mnoc_offset = + soc_private->rpmh_info[CAM_RPMH_BCM_BE_OFFSET] + + (0x4 * soc_private->rpmh_info[CAM_RPMH_BCM_MNOC_INDEX]); + uint32_t be_shub_offset = + soc_private->rpmh_info[CAM_RPMH_BCM_BE_OFFSET] + + (0x4 * 1); /* i=1 for SHUB, hardcode for now */ + + /* + * 0x4, 0x800 - DDR + * 0x800, 0x810 - mmnoc + */ + entry->fe_ddr = cam_io_r_mb(rpmh_base + fe_ddr_offset); + entry->fe_mnoc = cam_io_r_mb(rpmh_base + fe_mnoc_offset); + entry->be_ddr = cam_io_r_mb(rpmh_base + be_ddr_offset); + entry->be_mnoc = cam_io_r_mb(rpmh_base + be_mnoc_offset); + entry->be_shub = cam_io_r_mb(rpmh_base + be_shub_offset); + + CAM_DBG(CAM_CPAS, + "fe_ddr=0x%x, fe_mnoc=0x%x, be_ddr=0x%x, be_mnoc=0x%x", + entry->fe_ddr, entry->fe_mnoc, entry->be_ddr, + entry->be_mnoc); + } + + if ((cpas_core->streamon_clients > 0) && + cpas_core->regbase_index[CAM_CPAS_REG_CESTA] != -1) { + int reg_base_index = + cpas_core->regbase_index[CAM_CPAS_REG_CESTA]; + void __iomem *cesta_base = + soc_info->reg_map[reg_base_index].mem_base; + uint32_t vcd_base_inc = cesta_info->cesta_reg_info->vcd_currol.vcd_base_inc; + uint32_t num_vcds = cesta_info->num_vcds; + uint32_t vcd_curr_lvl_base = cesta_info->cesta_reg_info->vcd_currol.reg_offset; + uint32_t cesta_vcd_curr_perfol_offset, cesta_vcd_curr_perfol_val; + + if (atomic_inc_not_zero(&cpas_core->soc_access_count)) { + for (i = 0; i < num_vcds; i++) { + vcd_idx = cesta_info->vcd_info[i].index; + + cesta_vcd_curr_perfol_offset = vcd_curr_lvl_base + + (vcd_base_inc * vcd_idx); + cesta_vcd_curr_perfol_val = + cam_io_r_mb(cesta_base + + cesta_vcd_curr_perfol_offset); + entry->vcd_reg_debug_info.vcd_curr_lvl_debug_info[i].index = + cesta_info->vcd_info[i].index; + entry->vcd_reg_debug_info.vcd_curr_lvl_debug_info[i] + .reg_value = cesta_vcd_curr_perfol_val; + } + atomic_dec(&cpas_core->soc_access_count); + wake_up(&cpas_core->soc_access_count_wq); + } + } + + for (camnoc_idx = 0; camnoc_idx < cpas_core->num_valid_camnoc; camnoc_idx++) { + + camnoc_info = cpas_core->camnoc_info[camnoc_idx]; + camnoc_reg_idx = cpas_core->regbase_index[camnoc_info->reg_base]; + + for (i = 0, j = 0; i < camnoc_info->specific_size; i++) { + if ((!camnoc_info->specific[i].enable) || + (!camnoc_info->specific[i].maxwr_low.enable)) + continue; + + if (j >= CAM_CAMNOC_FILL_LVL_REG_INFO_MAX) { + CAM_WARN(CAM_CPAS, + "CPAS monitor reg info buffer full, max : %d", + j); + break; + } + + entry->camnoc_port_name[camnoc_idx][j] = + camnoc_info->specific[i].port_name; + val = cam_io_r_mb(soc_info->reg_map[camnoc_reg_idx].mem_base + + camnoc_info->specific[i].maxwr_low.offset); + entry->camnoc_fill_level[camnoc_idx][j] = val; + j++; + } + + entry->num_camnoc_lvl_regs[camnoc_idx] = j; + } + + if (soc_private->enable_smart_qos) { + + camnoc_info = cpas_core->camnoc_info[cpas_core->camnoc_rt_idx]; + camnoc_reg_idx = cpas_core->regbase_index[camnoc_info->reg_base]; + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + struct cam_cpas_tree_node *niu_node = + soc_private->smart_qos_info->rt_wr_niu_node[i]; + + entry->rt_wr_niu_pri_lut_high[i] = + cam_io_r_mb(soc_info->reg_map[camnoc_reg_idx].mem_base + + niu_node->pri_lut_high_offset); + + entry->rt_wr_niu_pri_lut_low[i] = + cam_io_r_mb(soc_info->reg_map[camnoc_reg_idx].mem_base + + niu_node->pri_lut_low_offset); + } + } +} + +static void cam_cpas_dump_monitor_array( + struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + int i = 0, k = 0; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry, camnoc_idx, j; + uint64_t ms, hrs, min, sec; + struct cam_cpas_monitor *entry; + struct timespec64 curr_timestamp; + char log_buf[CAM_CPAS_LOG_BUF_LEN]; + size_t len; + uint8_t vcd_index; + struct cam_cpas_cesta_info *cesta_info = cpas_core->cesta_info; + struct cam_camnoc_info *camnoc_info; + + if (!cpas_core->full_state_dump) + return; + + state_head = atomic64_read(&cpas_core->monitor_head); + + if (state_head == -1) { + CAM_WARN(CAM_CPAS, "No valid entries in cpas monitor array"); + return; + } else if (state_head < CAM_CPAS_MONITOR_MAX_ENTRIES) { + num_entries = state_head; + oldest_entry = 0; + } else { + num_entries = CAM_CPAS_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_CPAS_MONITOR_MAX_ENTRIES, &oldest_entry); + } + + CAM_GET_TIMESTAMP(curr_timestamp); + CAM_CONVERT_TIMESTAMP_FORMAT(curr_timestamp, hrs, min, sec, ms); + + CAM_INFO(CAM_CPAS, + "**** %llu:%llu:%llu.%llu : ======== Dumping monitor information ===========", + hrs, min, sec, ms); + + index = oldest_entry; + + for (i = 0; i < num_entries; i++) { + entry = &cpas_core->monitor_entries[index]; + CAM_CONVERT_TIMESTAMP_FORMAT(entry->timestamp, hrs, min, sec, ms); + log_buf[0] = '\0'; + + CAM_INFO(CAM_CPAS, + "**** %llu:%llu:%llu.%llu : Index[%d] Identifier[%s][%d] camnoc=sw : %ld, hw clients [%ld %ld][%ld %ld][%ld %ld], ahb=%d", + hrs, min, sec, ms, + index, + entry->identifier_string, entry->identifier_value, + entry->applied_camnoc_clk.sw_client, + entry->applied_camnoc_clk.hw_client[0].high, + entry->applied_camnoc_clk.hw_client[0].low, + entry->applied_camnoc_clk.hw_client[1].high, + entry->applied_camnoc_clk.hw_client[1].low, + entry->applied_camnoc_clk.hw_client[2].high, + entry->applied_camnoc_clk.hw_client[2].low, + entry->applied_ahb_level); + + for (j = 0; j < cpas_core->num_axi_ports; j++) { + if ((entry->axi_info[j].applied_bw.vote_type == CAM_CPAS_VOTE_TYPE_DRV) && + !cpas_core->force_hlos_drv) + CAM_INFO(CAM_CPAS, + "BW [%s] : DRV started:%s high=[%lld %lld], low=[%lld %lld]", + entry->axi_info[j].axi_port_name, + CAM_BOOL_TO_YESNO(entry->axi_info[j].is_drv_started), + entry->axi_info[j].applied_bw.drv_vote.high.ab, + entry->axi_info[j].applied_bw.drv_vote.high.ib, + entry->axi_info[j].applied_bw.drv_vote.low.ab, + entry->axi_info[j].applied_bw.drv_vote.low.ib); + + else + CAM_INFO(CAM_CPAS, + "BW [%s] : HLOS ab=%lld, ib=%lld, DRV high_ab=%lld, high_ib=%lld, low_ab=%lld, low_ib=%lld", + entry->axi_info[j].axi_port_name, + entry->axi_info[j].applied_bw.hlos_vote.ab, + entry->axi_info[j].applied_bw.hlos_vote.ib, + entry->axi_info[j].applied_bw.drv_vote.high.ab, + entry->axi_info[j].applied_bw.drv_vote.high.ib, + entry->axi_info[j].applied_bw.drv_vote.low.ab, + entry->axi_info[j].applied_bw.drv_vote.low.ib); + } + + if (cpas_core->regbase_index[CAM_CPAS_REG_RPMH] != -1) { + CAM_INFO(CAM_CPAS, + "fe_ddr=0x%x, fe_mnoc=0x%x, be_ddr=0x%x, be_mnoc=0x%x, be_shub=0x%x", + entry->fe_ddr, entry->fe_mnoc, + entry->be_ddr, entry->be_mnoc, entry->be_shub); + } + + if (cpas_core->regbase_index[CAM_CPAS_REG_CESTA] != -1) { + uint32_t vcd_base_inc = + cesta_info->cesta_reg_info->vcd_currol.vcd_base_inc; + uint32_t vcd_curr_lvl_base = + cesta_info->cesta_reg_info->vcd_currol.reg_offset; + uint32_t reg_offset; + uint32_t num_vcds = cesta_info->num_vcds; + + for (k = 0; k < num_vcds; k++) { + vcd_index = + entry->vcd_reg_debug_info.vcd_curr_lvl_debug_info[k].index; + reg_offset = vcd_curr_lvl_base + (vcd_base_inc * vcd_index); + CAM_INFO(CAM_CPAS, + "VCD[index=%d, type=%d, name=%s] [offset=0x%x, value=0x%x]", + vcd_index, + cesta_info->vcd_info[k].type, + cesta_info->vcd_info[k].clk, + reg_offset, + entry->vcd_reg_debug_info.vcd_curr_lvl_debug_info[k] + .reg_value); + } + } + + for (camnoc_idx = 0; camnoc_idx < cpas_core->num_valid_camnoc; camnoc_idx++) { + + camnoc_info = cpas_core->camnoc_info[camnoc_idx]; + log_buf[0] = '\0'; + len = 0; + + for (j = 0; j < entry->num_camnoc_lvl_regs[camnoc_idx]; j++) { + len += scnprintf((log_buf + len), + (CAM_CPAS_LOG_BUF_LEN - len), " %s:[%d %d]", + entry->camnoc_port_name[camnoc_idx][j], + (entry->camnoc_fill_level[camnoc_idx][j] & 0x7FF), + (entry->camnoc_fill_level[camnoc_idx][j] & 0x7F0000) + >> 16); + } + + CAM_INFO(CAM_CPAS, "%s REG[Queued Pending] %s", + camnoc_info->camnoc_name, log_buf); + } + + if (soc_private->enable_smart_qos) { + len = 0; + for (j = 0; j < soc_private->smart_qos_info->num_rt_wr_nius; j++) { + struct cam_cpas_tree_node *niu_node = + soc_private->smart_qos_info->rt_wr_niu_node[j]; + + len += scnprintf((log_buf + len), + (CAM_CPAS_LOG_BUF_LEN - len), " [%s: high 0x%x low 0x%x]", + niu_node->node_name, + entry->rt_wr_niu_pri_lut_high[j], + entry->rt_wr_niu_pri_lut_low[j]); + } + CAM_INFO(CAM_CPAS, "SmartQoS [Node: Pri_lut] %s", log_buf); + } + + index = (index + 1) % CAM_CPAS_MONITOR_MAX_ENTRIES; + } +} + +static void *cam_cpas_user_dump_state_monitor_array_info( + void *dump_struct, uint8_t *addr_ptr) +{ + uint64_t *addr; + struct cam_common_hw_dump_header *hdr; + struct cam_cpas_monitor *monitor = (struct cam_cpas_monitor *)dump_struct; + struct cam_cpas_axi_port_debug_info *axi_info = NULL; + struct cam_cpas_cesta_vcd_reg_debug_info *vcd_reg_debug_info = NULL; + struct cam_hw_info *cpas_hw = (struct cam_hw_info *) monitor->cpas_hw; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_tree_node *niu_node; + uint8_t *dst; + uint32_t num_vcds = CAM_CPAS_MAX_CESTA_VCD_NUM, camnoc_idx, i; + + addr = (uint64_t *)addr_ptr; + + *addr++ = monitor->timestamp.tv_sec; + *addr++ = monitor->timestamp.tv_nsec / NSEC_PER_USEC; + + *addr++ = monitor->identifier_value; + *addr++ = monitor->applied_camnoc_clk.sw_client, + *addr++ = monitor->applied_camnoc_clk.hw_client[0].high, + *addr++ = monitor->applied_camnoc_clk.hw_client[0].low, + *addr++ = monitor->applied_camnoc_clk.hw_client[1].high, + *addr++ = monitor->applied_camnoc_clk.hw_client[1].low, + *addr++ = monitor->applied_camnoc_clk.hw_client[2].high, + *addr++ = monitor->applied_camnoc_clk.hw_client[2].low, + *addr++ = monitor->applied_ahb_level; + *addr++ = cpas_core->num_valid_camnoc; + *addr++ = soc_private->smart_qos_info->num_rt_wr_nius; + *addr++ = num_vcds; + *addr++ = cpas_core->num_axi_ports; + + *addr++ = monitor->fe_ddr; + *addr++ = monitor->be_ddr; + *addr++ = monitor->fe_mnoc; + *addr++ = monitor->be_mnoc; + *addr++ = monitor->be_shub; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + axi_info = &monitor->axi_info[i]; + dst = (uint8_t *)addr; + hdr = (struct cam_common_hw_dump_header *)dst; + + if (axi_info->applied_bw.vote_type == CAM_CPAS_VOTE_TYPE_DRV) { + scnprintf(hdr->tag, CAM_COMMON_HW_DUMP_TAG_MAX_LEN, "%s.%s.%s:", + axi_info->axi_port_name, "DRV", + CAM_BOOL_TO_YESNO(axi_info->is_drv_started)); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + *addr++ = axi_info->applied_bw.drv_vote.high.ab; + *addr++ = axi_info->applied_bw.drv_vote.high.ib; + *addr++ = axi_info->applied_bw.drv_vote.low.ab; + *addr++ = axi_info->applied_bw.drv_vote.low.ib; + } else { + scnprintf(hdr->tag, CAM_COMMON_HW_DUMP_TAG_MAX_LEN, "%s.%s.%s:", + axi_info->axi_port_name, "HLOS", + CAM_BOOL_TO_YESNO(axi_info->is_drv_started)); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + *addr++ = axi_info->applied_bw.hlos_vote.ab; + *addr++ = axi_info->applied_bw.hlos_vote.ib; + } + } + + for (camnoc_idx = 0; camnoc_idx < cpas_core->num_valid_camnoc; camnoc_idx++) { + *addr++ = monitor->num_camnoc_lvl_regs[camnoc_idx]; + for (i = 0; i < monitor->num_camnoc_lvl_regs[camnoc_idx]; i++) { + dst = (uint8_t *)addr; + hdr = (struct cam_common_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_COMMON_HW_DUMP_TAG_MAX_LEN, "%s:[%d %d].", + monitor->camnoc_port_name[camnoc_idx][i], + monitor->camnoc_fill_level[camnoc_idx][i] & 0x7FF, + (monitor->camnoc_fill_level[camnoc_idx][i] & 0x7F0000) >> 16); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + } + } + + for (i = 0; i < soc_private->smart_qos_info->num_rt_wr_nius; i++) { + niu_node = soc_private->smart_qos_info->rt_wr_niu_node[i]; + dst = (uint8_t *)addr; + hdr = (struct cam_common_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_COMMON_HW_DUMP_TAG_MAX_LEN, "%s:", niu_node->node_name); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + *addr++ = monitor->rt_wr_niu_pri_lut_high[i]; + *addr++ = monitor->rt_wr_niu_pri_lut_low[i]; + } + + vcd_reg_debug_info = &monitor->vcd_reg_debug_info; + + for (i = 0; i < num_vcds; i++) { + *addr++ = vcd_reg_debug_info->vcd_curr_lvl_debug_info[i].index; + *addr++ = vcd_reg_debug_info->vcd_curr_lvl_debug_info[i].reg_value; + } + + return addr; +} + +/** + * cam_cpas_dump_state_monitor_array_info() + * + * @brief : dump the state monitor array info, dump from monitor_head + * to save state information in time order. + * @cpas_hw : hardware information + * @dump_info : dump payload + */ +static int cam_cpas_dump_state_monitor_array_info( + struct cam_hw_info *cpas_hw, + struct cam_req_mgr_dump_info *dump_info) +{ + int rc = 0; + int i, j; + struct cam_common_hw_dump_args dump_args; + size_t buf_len; + size_t remain_len; + uint32_t min_len = 0, camnoc_idx; + uintptr_t cpu_addr; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_monitor *entry; + uint32_t monitor_idx; + + state_head = atomic64_read(&cpas_core->monitor_head); + if (state_head == -1) { + CAM_WARN(CAM_CPAS, "No valid entries in cpas monitor array"); + return 0; + } else if (state_head < CAM_CPAS_MONITOR_MAX_ENTRIES) { + num_entries = state_head; + oldest_entry = 0; + } else { + num_entries = CAM_CPAS_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_CPAS_MONITOR_MAX_ENTRIES, &oldest_entry); + } + + monitor_idx = index = oldest_entry; + + rc = cam_mem_get_cpu_buf(dump_info->buf_handle, &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid handle %u rc %d", + dump_info->buf_handle, rc); + return rc; + } + + if (buf_len <= dump_info->offset) { + CAM_WARN(CAM_CPAS, "Dump buffer overshoot len %zu offset %zu", + buf_len, dump_info->offset); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + remain_len = buf_len - dump_info->offset; + for (i = 0; i < num_entries; i++) { + min_len += sizeof(struct cam_common_hw_dump_header) + + CAM_CPAS_DUMP_NUM_WORDS_COMM * sizeof(uint64_t); + entry = &cpas_core->monitor_entries[monitor_idx]; + for (j = 0; j < cpas_core->num_axi_ports; j++) { + if (entry->axi_info[j].applied_bw.vote_type == + CAM_CPAS_VOTE_TYPE_DRV) { + min_len += sizeof(struct cam_common_hw_dump_header) + + CAM_CPAS_DUMP_NUM_WORDS_VOTE_TYEP_DRV * sizeof(uint64_t); + } else { + min_len += sizeof(struct cam_common_hw_dump_header) + + CAM_CPAS_DUMP_NUM_WORDS_VOTE_TYEP_HLOS * sizeof(uint64_t); + } + } + + for (camnoc_idx = 0; camnoc_idx < cpas_core->num_valid_camnoc; camnoc_idx++) { + min_len += sizeof(uint64_t); + for (j = 0; j < entry->num_camnoc_lvl_regs[camnoc_idx]; j++) + min_len += sizeof(struct cam_common_hw_dump_header); + } + + for (j = 0; j < soc_private->smart_qos_info->num_rt_wr_nius; j++) + min_len += sizeof(struct cam_common_hw_dump_header) + + CAM_CPAS_DUMP_NUM_WORDS_RT_WR_NIUS * sizeof(uint64_t); + + for (j = 0; j < CAM_CPAS_MAX_CESTA_VCD_NUM; j++) + min_len += CAM_CPAS_DUMP_NUM_WORDS_VCD_CURR_LVL * sizeof(uint64_t); + + monitor_idx = (monitor_idx + 1) % CAM_CPAS_MONITOR_MAX_ENTRIES; + } + + if (remain_len < min_len) { + CAM_WARN(CAM_CPAS, "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + dump_args.req_id = dump_info->req_id; + dump_args.cpu_addr = cpu_addr; + dump_args.buf_len = buf_len; + dump_args.offset = dump_info->offset; + dump_args.ctxt_to_hw_map = NULL; + for (i = 0; i < num_entries; i++) { + rc = cam_common_user_dump_helper(&dump_args, + cam_cpas_user_dump_state_monitor_array_info, + &cpas_core->monitor_entries[index], + sizeof(uint64_t), "CPAS_MONITOR.%d.%s:", index, + &cpas_core->monitor_entries[index].identifier_string); + if (rc) { + CAM_ERR(CAM_CPAS, "Dump state info failed, rc: %d", rc); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; + } + + index = (index + 1) % CAM_CPAS_MONITOR_MAX_ENTRIES; + } + + dump_info->offset = dump_args.offset; + cam_mem_put_cpu_buf(dump_info->buf_handle); + + return rc; +} + +static int cam_cpas_log_event(struct cam_hw_info *cpas_hw, + const char *identifier_string, int32_t identifier_value) +{ + cam_cpas_update_monitor_array(cpas_hw, identifier_string, + identifier_value); + + return 0; +} + +static int cam_cpas_select_qos(struct cam_hw_info *cpas_hw, + uint32_t selection_mask) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int rc = 0; + + mutex_lock(&cpas_hw->hw_mutex); + + if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_CPAS, + "Hw already in power up state, can't change QoS settings"); + rc = -EINVAL; + goto done; + } + + if (cpas_core->internal_ops.setup_qos_settings) { + rc = cpas_core->internal_ops.setup_qos_settings(cpas_hw, + selection_mask); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in changing QoS %d", rc); + } else { + CAM_WARN(CAM_CPAS, "No ops for qos_settings"); + } + +done: + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_hw_enable_tpg_mux_sel(struct cam_hw_info *cpas_hw, + uint32_t tpg_mux) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int rc = 0; + + mutex_lock(&cpas_hw->hw_mutex); + + if (cpas_core->internal_ops.set_tpg_mux_sel) { + rc = cpas_core->internal_ops.set_tpg_mux_sel( + cpas_hw, tpg_mux); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed in tpg mux selection rc=%d", + rc); + } + } else { + CAM_ERR(CAM_CPAS, + "CPAS tpg mux sel not enabled"); + rc = -EINVAL; + } + + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_activate_cache( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_info *cache_info) +{ + int rc = 0; + + mutex_lock(&cpas_hw->hw_mutex); + cache_info->ref_cnt++; + if (cache_info->ref_cnt > 1) { + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, "Cache: %s has already been activated cnt: %d", + cache_info->name, cache_info->ref_cnt); + return rc; + } + + rc = llcc_slice_activate(cache_info->slic_desc); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to activate cache:%s", + cache_info->name); + goto end; + } + + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, "Activated cache:%s", cache_info->name); + return rc; + +end: + cache_info->ref_cnt--; + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +static int cam_cpas_deactivate_cache( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_info *cache_info) +{ + int rc = 0; + + mutex_lock(&cpas_hw->hw_mutex); + if (!cache_info->ref_cnt) { + mutex_unlock(&cpas_hw->hw_mutex); + CAM_ERR(CAM_CPAS, "Unbalanced deactivate"); + return -EFAULT; + } + + cache_info->ref_cnt--; + if (cache_info->ref_cnt) { + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, "activate cnt for: %s non-zero: %d", + cache_info->name, cache_info->ref_cnt); + return rc; + } + + rc = llcc_slice_deactivate(cache_info->slic_desc); + if (rc) + CAM_ERR(CAM_CPAS, "Failed to deactivate cache:%s", + cache_info->name); + + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, "De-activated cache:%s", cache_info->name); + return rc; +} + +#if IS_ENABLED(CONFIG_SPECTRA_LLCC_STALING) +static int cam_cpas_configure_staling_cache( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_info *cache_info, + struct cam_sys_cache_local_info *sys_cache_info) +{ + int rc = 0; + struct llcc_staling_mode_params staling_params; + + mutex_lock(&cpas_hw->hw_mutex); + switch (sys_cache_info->mode) { + case CAM_LLCC_STALING_MODE_CAPACITY: { + staling_params.staling_mode = LLCC_STALING_MODE_CAPACITY; + break; + } + case CAM_LLCC_STALING_MODE_NOTIFY: { + staling_params.staling_mode = LLCC_STALING_MODE_NOTIFY; + break; + } + default: + CAM_ERR(CAM_CPAS, "CPAS LLCC sys cache mode is not valid =%d" + , sys_cache_info->mode); + break; + } + + switch (sys_cache_info->op_type) { + case CAM_LLCC_NOTIFY_STALING_EVICT: { + staling_params.notify_params.op = LLCC_NOTIFY_STALING_WRITEBACK; + break; + } +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE + case CAM_LLCC_NOTIFY_STALING_FORGET: { + staling_params.notify_params.op = LLCC_NOTIFY_STALING_NO_WRITEBACK; + break; + } +#endif + default: + CAM_ERR(CAM_CPAS, "CPAS LLCC sys cache op_type is not valid =%d" + , sys_cache_info->op_type); + break; + } + staling_params.notify_params.staling_distance + = sys_cache_info->staling_distance; + rc = llcc_configure_staling_mode(cache_info->slic_desc, + &staling_params); + if (!rc) { + cache_info->staling_distance = sys_cache_info->staling_distance; + cache_info->mode = sys_cache_info->mode; + cache_info->op_type = sys_cache_info->op_type; + } else if (rc == -EOPNOTSUPP) { + CAM_ERR(CAM_CPAS, "llcc staling feature is not supported cache:%s", + cache_info->name); + } else if (rc) { + CAM_ERR(CAM_CPAS, "Failed to enable llcc notif cache:%s", + cache_info->name); + } + + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, + "llcc notif cache scid = %d, name:%s staling_distance %d cache mode :%d cache op_type :%d", + cache_info->scid, cache_info->name, cache_info->staling_distance, + cache_info->mode, cache_info->op_type); + return rc; +} + +static int cam_cpas_notif_stalling_inc_cache( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_info *cache_info) +{ + int rc = 0; + + mutex_lock(&cpas_hw->hw_mutex); + rc = llcc_notif_staling_inc_counter(cache_info->slic_desc); + if (rc == -EOPNOTSUPP) + CAM_ERR(CAM_CPAS, "llcc notif stalling inc not supported: %s", + cache_info->name); + else if (rc) + CAM_ERR(CAM_CPAS, "Failed to llcc staling frame trigger:%s", + cache_info->name); + + mutex_unlock(&cpas_hw->hw_mutex); + CAM_DBG(CAM_CPAS, "llcc staling frame triggered cache:%s", + cache_info->name); + return rc; +} +#endif + +static inline int cam_cpas_validate_cache_type( + uint32_t num_caches, uint32_t type) +{ + int32_t rc; + + if ((!num_caches)) + return -EINVAL; + + switch (type) { + case CAM_LLCC_SMALL_1: + case CAM_LLCC_SMALL_2: + case CAM_LLCC_LARGE_1: + case CAM_LLCC_LARGE_2: + case CAM_LLCC_LARGE_3: + case CAM_LLCC_LARGE_4: + case CAM_LLCC_OFE_IP: + case CAM_LLCC_IPE_RT_IP: + case CAM_LLCC_IPE_SRT_IP: + case CAM_LLCC_IPE_RT_RF: + case CAM_LLCC_IPE_SRT_RF: + rc = 0; + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_cpas_get_slice_id( + struct cam_hw_info *cpas_hw, + uint32_t type) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + uint32_t num_caches = soc_private->num_caches; + int scid = -1, i; + + if (cam_cpas_validate_cache_type(num_caches, type)) + goto end; + + for (i = 0; i < num_caches; i++) { + if (type == soc_private->llcc_info[i].type) { + scid = soc_private->llcc_info[i].scid; + CAM_DBG(CAM_CPAS, "Cache:%s type:%d scid:%d", + soc_private->llcc_info[i].name, type, scid); + break; + } + } + +end: + return scid; +} +static int cam_cpas_activate_cache_slice( + struct cam_hw_info *cpas_hw, + uint32_t type) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + uint32_t num_caches = soc_private->num_caches; + int rc = 0, i; + + CAM_DBG(CAM_CPAS, "Activate type: %d", type); + if (cam_cpas_validate_cache_type(num_caches, type)) + goto end; + + for (i = 0; i < num_caches; i++) { + if (type == soc_private->llcc_info[i].type) + rc = cam_cpas_activate_cache(cpas_hw, + &soc_private->llcc_info[i]); + } + +end: + return rc; +} + +static int cam_cpas_deactivate_cache_slice( + struct cam_hw_info *cpas_hw, + uint32_t type) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + uint32_t num_caches = soc_private->num_caches; + int rc = 0, i; + + CAM_DBG(CAM_CPAS, "De-activate type: %d", type); + if (cam_cpas_validate_cache_type(num_caches, type)) + goto end; + + for (i = 0; i < num_caches; i++) { + if (type == soc_private->llcc_info[i].type) + rc = cam_cpas_deactivate_cache(cpas_hw, + &soc_private->llcc_info[i]); + } + +end: + return rc; +} + +#if IS_ENABLED(CONFIG_SPECTRA_LLCC_STALING) +static bool cam_cpas_is_configuration_allowed( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_info *cache_info, + struct cam_sys_cache_local_info *sys_cache_info) +{ + bool allow = true; + + mutex_lock(&cpas_hw->hw_mutex); + if ((cache_info->ref_cnt > 0) && + (cache_info->mode != sys_cache_info->mode) && + (cache_info->op_type != sys_cache_info->op_type)) { + CAM_ERR(CAM_CPAS, + "Can not be configured in middle of usage, ref_cnt = %d old param: scid %d mode: %d op_type: %d new param: scid %d mode: %d op_type: %d", + cache_info->ref_cnt, sys_cache_info->type, sys_cache_info->mode, + sys_cache_info->op_type, cache_info->type, + cache_info->mode, cache_info->op_type); + allow = false; + } else if (((cache_info->ref_cnt > 0) && + (cache_info->mode == sys_cache_info->mode) && + (cache_info->op_type == sys_cache_info->op_type) && + cache_info->concur) || cache_info->ref_cnt == 0) { + CAM_DBG(CAM_ICP, "scid_id: %d ref_cnt: %d mode: %d op_type: %d", + cache_info->type, cache_info->ref_cnt, + cache_info->mode, + cache_info->op_type); + + allow = true; + } + mutex_unlock(&cpas_hw->hw_mutex); + + return allow; +} + +static int cam_cpas_configure_staling_cache_slice( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_local_info sys_cache_info) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + uint32_t num_caches = soc_private->num_caches; + int rc = 0, i; + bool allow; + + CAM_DBG(CAM_CPAS, "configuring cache type: %d", sys_cache_info.type); + if (cam_cpas_validate_cache_type(num_caches, sys_cache_info.type)) + goto end; + + for (i = 0; i < num_caches; i++) { + if (sys_cache_info.type == soc_private->llcc_info[i].type) { + allow = cam_cpas_is_configuration_allowed(cpas_hw, + &soc_private->llcc_info[i], &sys_cache_info); + if (allow) { + rc = cam_cpas_configure_staling_cache(cpas_hw, + &soc_private->llcc_info[i], &sys_cache_info); + if (rc) { + CAM_ERR(CAM_CPAS, "cache type %d config failed, rc: %d", + soc_private->llcc_info[i].type, rc); + } + } + break; + } + } + +end: + return rc; +} + +static int cam_cpas_notif_stalling_inc_cache_slice( + struct cam_hw_info *cpas_hw, + uint32_t type) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + uint32_t num_caches = soc_private->num_caches; + int rc = 0, i; + + CAM_DBG(CAM_CPAS, "notification cache type: %d", type); + if (cam_cpas_validate_cache_type(num_caches, type)) + goto end; + + for (i = 0; i < num_caches; i++) { + if (type == soc_private->llcc_info[i].type) + rc = cam_cpas_notif_stalling_inc_cache(cpas_hw, + &soc_private->llcc_info[i]); + } + +end: + return rc; +} + +#else +static int cam_cpas_configure_staling_cache_slice( + struct cam_hw_info *cpas_hw, + struct cam_sys_cache_local_info sys_cache_info) +{ + return -EOPNOTSUPP; +} + +static int cam_cpas_notif_stalling_inc_cache_slice( + struct cam_hw_info *cpas_hw, + uint32_t type) +{ + return -EOPNOTSUPP; +} +#endif + +static int cam_cpas_hw_csid_input_core_info_update(struct cam_hw_info *cpas_hw, + int csid_idx, int sfe_idx, bool set_port) +{ + int i, j, rc = 0; + char client_name[CAM_HW_IDENTIFIER_LENGTH + 3]; + int32_t client_indx = -1; + + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_tree_node *tree_node = NULL; + + if (!soc_private->enable_cam_ddr_drv || cpas_core->force_hlos_drv) + return 0; + + if ((csid_idx < 0) || (sfe_idx < 0)) { + CAM_ERR(CAM_CPAS, "Invalid core info csid:%d sfe:%d", csid_idx, sfe_idx); + return -EINVAL; + } + + snprintf(client_name, sizeof(client_name), "%s%d", "sfe", sfe_idx); + + rc = cam_common_util_get_string_index(soc_private->client_name, + soc_private->num_clients, client_name, &client_indx); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in getting correct client index"); + return -EINVAL; + } + + if (!cpas_core->cpas_client[client_indx]->is_drv_dyn) + return 0; + + for (i = 0; i < CAM_CPAS_PATH_DATA_MAX; i++) { + for (j = 0; j < CAM_CPAS_TRANSACTION_MAX; j++) { + tree_node = cpas_core->cpas_client[client_indx]->tree_node[i][j]; + if (!tree_node) + continue; + + if (set_port) + tree_node->drv_voting_idx = CAM_CPAS_PORT_DRV_0 + csid_idx; + else + tree_node->drv_voting_idx = CAM_CPAS_PORT_DRV_DYN; + } + } + + return rc; +} + +static int cam_cpas_hw_enable_domain_id_clks(struct cam_hw_info *cpas_hw, + bool enable) +{ + int rc = 0, i; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_domain_id_support_clks *domain_id_clks = + soc_private->domain_id_clks; + + if (!soc_private->domain_id_info.domain_id_supported) { + CAM_DBG(CAM_CPAS, "Domain-id not supported on target"); + return -EINVAL; + } + + if (enable) { + for (i = 0; i < domain_id_clks->number_clks; i++) { + rc = cam_soc_util_clk_enable(&cpas_hw->soc_info, CAM_CLK_SW_CLIENT_IDX, + true, domain_id_clks->clk_idx[i], 0); + if (rc) { + CAM_ERR(CAM_CPAS, "Domain-id clk %s enable failed, rc: %d", + domain_id_clks->clk_names[i], i); + goto clean_up; + } + } + CAM_DBG(CAM_CPAS, "Domain-id clks enable success"); + } else { + for (i = 0; i < domain_id_clks->number_clks; i++) { + rc = cam_soc_util_clk_disable(&cpas_hw->soc_info, CAM_CLK_SW_CLIENT_IDX, + true, domain_id_clks->clk_idx[i]); + if (rc) + CAM_WARN(CAM_CPAS, "Domain-id clk %s disable failed, rc: %d", + domain_id_clks->clk_names[i], rc); + } + if (!rc) + CAM_DBG(CAM_CPAS, "Domain-id clks disable success"); + } + + return rc; + +clean_up: + for (--i; i >= 0; i--) + cam_soc_util_clk_disable(&cpas_hw->soc_info, CAM_CLK_SW_CLIENT_IDX, true, + domain_id_clks->clk_idx[i]); + + return rc; +} + +static int cam_cpas_hw_csid_process_resume(struct cam_hw_info *cpas_hw, uint32_t csid_idx) +{ + int i, rc = 0; + + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + + if (!soc_private->enable_cam_ddr_drv) + return 0; + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (!cpas_core->axi_port[i].bus_client.common_data.is_drv_port || + !cpas_core->axi_port[i].is_drv_started || + (cpas_core->axi_port[i].drv_idx != (CAM_CPAS_PORT_DRV_0 + csid_idx))) + continue; + + /* Apply last applied bw again to applicable DRV port */ + rc = cam_cpas_util_vote_drv_bus_client_bw(soc_private, cpas_core, + &cpas_core->axi_port[i].bus_client, + &cpas_core->axi_port[i].applied_bw, &cpas_core->axi_port[i].applied_bw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in BW update on resume rc:%d", rc); + goto end; + } + + /* Trigger channel switch for RSC dev */ + rc = cam_cpas_drv_channel_switch_for_dev(cpas_core->axi_port[i].cam_rsc_dev); + if (rc) { + CAM_ERR(CAM_CPAS, + "Port[%s] failed in channel switch during resume rc:%d", + cpas_core->axi_port[i].axi_port_name, rc); + goto end; + } + } + +end: + return rc; +} + +static int cam_cpas_hw_process_cmd(void *hw_priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!hw_priv || !cmd_args || + (cmd_type >= CAM_CPAS_HW_CMD_INVALID)) { + CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d", + hw_priv, cmd_args, cmd_type); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_CPAS_HW_CMD_REGISTER_CLIENT: { + struct cam_cpas_register_params *register_params; + + if (sizeof(struct cam_cpas_register_params) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + register_params = (struct cam_cpas_register_params *)cmd_args; + rc = cam_cpas_hw_register_client(hw_priv, register_params); + break; + } + case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: { + uint32_t *client_handle; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + client_handle = (uint32_t *)cmd_args; + rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle); + break; + } + case CAM_CPAS_HW_CMD_SET_ADDR_TRANS: { + struct cam_cpas_hw_addr_trans_data *cmd_addr_trans; + + if (sizeof(struct cam_cpas_hw_addr_trans_data) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + cmd_addr_trans = (struct cam_cpas_hw_addr_trans_data *)cmd_args; + rc = cam_cpas_hw_set_addr_trans(hw_priv, cmd_addr_trans); + break; + } + case CAM_CPAS_HW_CMD_REG_WRITE: { + struct cam_cpas_hw_cmd_reg_read_write *reg_write; + + if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) != + arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + reg_write = + (struct cam_cpas_hw_cmd_reg_read_write *)cmd_args; + rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle, + reg_write->reg_base, reg_write->offset, reg_write->mb, + reg_write->value); + break; + } + case CAM_CPAS_HW_CMD_REG_READ: { + struct cam_cpas_hw_cmd_reg_read_write *reg_read; + + if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) != + arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + reg_read = + (struct cam_cpas_hw_cmd_reg_read_write *)cmd_args; + rc = cam_cpas_hw_reg_read(hw_priv, + reg_read->client_handle, reg_read->reg_base, + reg_read->offset, reg_read->mb, ®_read->value); + + break; + } + case CAM_CPAS_HW_CMD_AHB_VOTE: { + struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote; + + if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args; + rc = cam_cpas_hw_update_ahb_vote(hw_priv, + cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote); + break; + } + case CAM_CPAS_HW_CMD_AXI_VOTE: { + struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote; + + if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args; + rc = cam_cpas_hw_update_axi_vote(hw_priv, + cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote); + break; + } + case CAM_CPAS_HW_AXI_FLOOR_LVL: { + struct cam_cpas_hw_axi_floor_lvl *floor_lvl_info; + + if (sizeof(struct cam_cpas_hw_axi_floor_lvl) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + floor_lvl_info = (struct cam_cpas_hw_axi_floor_lvl *)cmd_args; + rc = cam_cpas_hw_update_axi_floor_lvl(hw_priv, + floor_lvl_info->client_handle, floor_lvl_info->floor_lvl, + false); + break; + } + case CAM_CPAS_HW_CMD_LOG_VOTE: { + bool *ddr_only; + + if (sizeof(bool) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + ddr_only = (bool *) cmd_args; + rc = cam_cpas_log_vote(hw_priv, *ddr_only); + break; + } + + case CAM_CPAS_HW_CMD_LOG_EVENT: { + struct cam_cpas_hw_cmd_notify_event *event; + + if (sizeof(struct cam_cpas_hw_cmd_notify_event) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + event = (struct cam_cpas_hw_cmd_notify_event *)cmd_args; + + rc = cam_cpas_log_event(hw_priv, event->identifier_string, + event->identifier_value); + break; + } + + case CAM_CPAS_HW_CMD_SELECT_QOS: { + uint32_t *selection_mask; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + selection_mask = (uint32_t *)cmd_args; + rc = cam_cpas_select_qos(hw_priv, *selection_mask); + break; + } + case CAM_CPAS_HW_CMD_GET_SCID: { + uint32_t type; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + type = *((uint32_t *) cmd_args); + rc = cam_cpas_get_slice_id(hw_priv, type); + } + break; + case CAM_CPAS_HW_CMD_ACTIVATE_LLC: { + uint32_t type; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + type = *((uint32_t *) cmd_args); + rc = cam_cpas_activate_cache_slice(hw_priv, type); + } + break; + case CAM_CPAS_HW_CMD_DEACTIVATE_LLC: { + uint32_t type; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + type = *((uint32_t *) cmd_args); + rc = cam_cpas_deactivate_cache_slice(hw_priv, type); + } + break; + case CAM_CPAS_HW_CMD_CONFIGURE_STALING_LLC: { + struct cam_sys_cache_local_info sys_cache_info; + + if (sizeof(struct cam_sys_cache_local_info) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + sys_cache_info = + *((struct cam_sys_cache_local_info *) cmd_args); + rc = cam_cpas_configure_staling_cache_slice(hw_priv, sys_cache_info); + } + break; + case CAM_CPAS_HW_CMD_NOTIF_STALL_INC_LLC: { + uint32_t type; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + type = *((uint32_t *) cmd_args); + rc = cam_cpas_notif_stalling_inc_cache_slice(hw_priv, type); + } + break; + case CAM_CPAS_HW_CMD_DUMP_BUFF_FILL_INFO: { + uint32_t *client_handle; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + client_handle = (uint32_t *)cmd_args; + rc = cam_cpas_hw_dump_camnoc_buff_fill_info(hw_priv, + *client_handle); + break; + } + case CAM_CPAS_HW_CMD_CSID_INPUT_CORE_INFO_UPDATE: { + struct cam_cpas_hw_cmd_csid_input_core_info_update *core_info_update; + + if (sizeof(struct cam_cpas_hw_cmd_csid_input_core_info_update) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", cmd_type, arg_size); + break; + } + + core_info_update = (struct cam_cpas_hw_cmd_csid_input_core_info_update *)cmd_args; + rc = cam_cpas_hw_csid_input_core_info_update(hw_priv, core_info_update->csid_idx, + core_info_update->sfe_idx, core_info_update->set_port); + break; + } + case CAM_CPAS_HW_CMD_CSID_PROCESS_RESUME: { + uint32_t *csid_idx; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + csid_idx = (uint32_t *)cmd_args; + rc = cam_cpas_hw_csid_process_resume(hw_priv, *csid_idx); + break; + } + case CAM_CPAS_HW_CMD_TPG_MUX_SEL: { + uint32_t *tpg_mux_sel; + + if (sizeof(uint32_t) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + tpg_mux_sel = (uint32_t *)cmd_args; + rc = cam_cpas_hw_enable_tpg_mux_sel(hw_priv, *tpg_mux_sel); + break; + + } + case CAM_CPAS_HW_CMD_ENABLE_DISABLE_DOMAIN_ID_CLK: { + bool *enable; + + if (sizeof(bool) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + enable = (bool *)cmd_args; + rc = cam_cpas_hw_enable_domain_id_clks(hw_priv, *enable); + break; + } + case CAM_CPAS_HW_CMD_DUMP_STATE_MONITOR_INFO: { + struct cam_req_mgr_dump_info *info; + + if (sizeof(struct cam_req_mgr_dump_info) != arg_size) { + CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + info = (struct cam_req_mgr_dump_info *)cmd_args; + rc = cam_cpas_dump_state_monitor_array_info(hw_priv, info); + break; + } + default: + CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type); + break; + } + + return rc; +} + +static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int i; + + for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) { + mutex_init(&cpas_core->client_mutex[i]); + } + + return 0; +} + +int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + int i; + + for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) { + if (cpas_core->cpas_client[i] && + cpas_core->cpas_client[i]->registered) { + cam_cpas_hw_unregister_client(cpas_hw, i); + } + CAM_MEM_FREE(cpas_core->cpas_client[i]); + cpas_core->cpas_client[i] = NULL; + mutex_destroy(&cpas_core->client_mutex[i]); + } + + return 0; +} + +static int cam_cpas_util_get_internal_ops(struct platform_device *pdev, + struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops) +{ + struct device_node *of_node = pdev->dev.of_node; + int rc; + const char *compat_str = NULL; + + rc = of_property_read_string_index(of_node, "arch-compat", 0, + (const char **)&compat_str); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc); + return -EINVAL; + } + + if (strnstr(compat_str, "camss_top", strlen(compat_str))) { + hw_intf->hw_type = CAM_HW_CAMSSTOP; + rc = cam_camsstop_get_internal_ops(internal_ops); + } else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) { + hw_intf->hw_type = CAM_HW_CPASTOP; + rc = cam_cpastop_get_internal_ops(internal_ops); + } else { + CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str); + rc = -EINVAL; + } + + return rc; +} + +static int cam_cpas_util_create_debugfs(struct cam_cpas *cpas_core) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("cpas", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_CPAS,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + cpas_core->dentry = dbgfileptr; + + debugfs_create_bool("ahb_bus_scaling_disable", 0644, + cpas_core->dentry, &cpas_core->ahb_bus_scaling_disable); + + debugfs_create_bool("full_state_dump", 0644, + cpas_core->dentry, &cpas_core->full_state_dump); + + debugfs_create_bool("smart_qos_dump", 0644, + cpas_core->dentry, &cpas_core->smart_qos_dump); + + debugfs_create_bool("force_hlos_drv", 0644, + cpas_core->dentry, &cpas_core->force_hlos_drv); + + debugfs_create_bool("force_cesta_sw_client", 0644, + cpas_core->dentry, &cpas_core->force_cesta_sw_client); + + debugfs_create_bool("force_core_secure", 0644, + cpas_core->dentry, &cpas_core->force_core_secure); + +end: + return rc; +} + +static struct cam_hw_info *cam_cpas_kobj_to_cpas_hw(struct kobject *kobj) +{ + return container_of(kobj, struct cam_cpas_kobj_map, base_kobj)->cpas_hw; +} + +static ssize_t cam_cpas_sysfs_get_subparts_info(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + int len = 0; + struct cam_hw_info *cpas_hw = cam_cpas_kobj_to_cpas_hw(kobj); + struct cam_cpas_private_soc *soc_private = NULL; + struct cam_cpas_sysfs_info *sysfs_info = NULL; + + mutex_lock(&cpas_hw->hw_mutex); + soc_private = (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + sysfs_info = &soc_private->sysfs_info; + + len += scnprintf(buf, PAGE_SIZE, "num_ifes: 0x%x, 0x%x\nnum_ife_lites: 0x%x, 0x%x\n" + "num_sfes: 0x%x, 0x%x\nnum_custom: 0x%x, 0x%x\n", + sysfs_info->num_ifes[CAM_CPAS_AVAILABLE_NUM_SUBPARTS], + sysfs_info->num_ifes[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS], + sysfs_info->num_ife_lites[CAM_CPAS_AVAILABLE_NUM_SUBPARTS], + sysfs_info->num_ife_lites[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS], + sysfs_info->num_sfes[CAM_CPAS_AVAILABLE_NUM_SUBPARTS], + sysfs_info->num_sfes[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS], + sysfs_info->num_custom[CAM_CPAS_AVAILABLE_NUM_SUBPARTS], + sysfs_info->num_custom[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS]); + /* + * subparts_info sysfs string looks like below. + * num_ifes: 0x3, 0x3 (If all IFEs are available)/0x2 (If 1 IFE is unavailable) + * num_ife_lites: 0x2, 0x2 + * num_sfes: 0x3, 0x3 (If all SFEs are available)/0x2 (If 1 SFE is unavailable) + * num_custom: 0x0, 0x0 + */ + + if (len >= PAGE_SIZE) { + CAM_ERR(CAM_CPAS, "camera subparts info sysfs string is truncated, len: %d", len); + mutex_unlock(&cpas_hw->hw_mutex); + return -EOVERFLOW; + } + + mutex_unlock(&cpas_hw->hw_mutex); + return len; +} + +static struct kobj_attribute cam_subparts_info_attribute = __ATTR(subparts_info, 0444, + cam_cpas_sysfs_get_subparts_info, NULL); + +static void cam_cpas_hw_kobj_release(struct kobject *kobj) +{ + CAM_DBG(CAM_CPAS, "Release kobj"); + CAM_MEM_FREE(container_of(kobj, struct cam_cpas_kobj_map, base_kobj)); +} + +static struct kobj_type kobj_cam_cpas_hw_type = { + .release = cam_cpas_hw_kobj_release, + .sysfs_ops = &kobj_sysfs_ops +}; + +static void cam_cpas_remove_sysfs(struct cam_hw_info *cpas_hw) +{ + struct cam_cpas_private_soc *soc_private = NULL; + + mutex_lock(&cpas_hw->hw_mutex); + soc_private = (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + sysfs_remove_file(soc_private->sysfs_info.kobj, &cam_subparts_info_attribute.attr); + kobject_put(soc_private->sysfs_info.kobj); + mutex_unlock(&cpas_hw->hw_mutex); +} + +static int cam_cpas_create_sysfs(struct cam_hw_info *cpas_hw) +{ + int rc = 0; + struct cam_cpas_kobj_map *kobj_camera = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + + mutex_lock(&cpas_hw->hw_mutex); + soc_private = (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + kobj_camera = CAM_MEM_ZALLOC(sizeof(*kobj_camera), GFP_KERNEL); + if (!kobj_camera) { + CAM_ERR(CAM_CPAS, "failed to allocate memory for kobj_camera"); + mutex_unlock(&cpas_hw->hw_mutex); + return -ENOMEM; + } + + kobject_init(&kobj_camera->base_kobj, &kobj_cam_cpas_hw_type); + kobj_camera->cpas_hw = cpas_hw; + soc_private->sysfs_info.kobj = &kobj_camera->base_kobj; + + rc = kobject_add(&kobj_camera->base_kobj, kernel_kobj, "%s", "camera"); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to add camera entry in sysfs"); + goto end; + } + + /* sysfs file is created in /sys/kernel/camera */ + rc = sysfs_create_file(&kobj_camera->base_kobj, &cam_subparts_info_attribute.attr); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to create subparts_info file, rc: %d", rc); + goto end; + } + + mutex_unlock(&cpas_hw->hw_mutex); + return 0; +end: + kobject_put(&kobj_camera->base_kobj); + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} + +int cam_cpas_hw_probe(struct platform_device *pdev, + struct cam_hw_intf **hw_intf) +{ + int rc = 0; + int i; + struct cam_hw_info *cpas_hw = NULL; + struct cam_hw_intf *cpas_hw_intf = NULL; + struct cam_cpas *cpas_core = NULL; + struct cam_cpas_private_soc *soc_private; + struct cam_cpas_internal_ops *internal_ops; + + cpas_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!cpas_hw_intf) + return -ENOMEM; + + cpas_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!cpas_hw) { + CAM_MEM_FREE(cpas_hw_intf); + return -ENOMEM; + } + + cpas_core = CAM_MEM_ZALLOC(sizeof(struct cam_cpas), GFP_KERNEL); + if (!cpas_core) { + CAM_MEM_FREE(cpas_hw); + CAM_MEM_FREE(cpas_hw_intf); + return -ENOMEM; + } + + for (i = 0; i < CAM_CPAS_REG_MAX; i++) + cpas_core->regbase_index[i] = -1; + + cpas_hw_intf->hw_priv = cpas_hw; + cpas_hw->core_info = cpas_core; + + cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + cpas_hw->soc_info.pdev = pdev; + cpas_hw->soc_info.dev = &pdev->dev; + cpas_hw->soc_info.dev_name = pdev->name; + cpas_hw->open_count = 0; + cpas_core->ahb_bus_scaling_disable = false; + cpas_core->full_state_dump = false; + cpas_core->smart_qos_dump = false; + cpas_core->force_core_secure = false; + + atomic64_set(&cpas_core->monitor_head, -1); + + mutex_init(&cpas_hw->hw_mutex); + spin_lock_init(&cpas_hw->hw_lock); + init_completion(&cpas_hw->hw_complete); + + cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info; + cpas_hw_intf->hw_ops.init = cam_cpas_hw_init; + cpas_hw_intf->hw_ops.deinit = NULL; + cpas_hw_intf->hw_ops.reset = NULL; + cpas_hw_intf->hw_ops.reserve = NULL; + cpas_hw_intf->hw_ops.release = NULL; + cpas_hw_intf->hw_ops.start = cam_cpas_hw_start; + cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop; + cpas_hw_intf->hw_ops.read = NULL; + cpas_hw_intf->hw_ops.write = NULL; + cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd; + + cpas_core->work_queue = alloc_workqueue(CAM_CPAS_WORKQUEUE_NAME, + WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS); + if (!cpas_core->work_queue) { + rc = -ENOMEM; + goto release_mem; + } + + internal_ops = &cpas_core->internal_ops; + rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops); + if (rc) + goto release_workq; + + rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info, + internal_ops->handle_irq, cpas_hw); + if (rc) + goto release_workq; + + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + + rc = cam_cpas_create_sysfs(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to create sysfs entries, rc: %d", rc); + goto sysfs_fail; + } + + cpas_core->num_clients = soc_private->num_clients; + atomic_set(&cpas_core->soc_access_count, 0); + init_waitqueue_head(&cpas_core->soc_access_count_wq); + + if (internal_ops->setup_regbase) { + rc = internal_ops->setup_regbase(&cpas_hw->soc_info, + cpas_core->regbase_index, CAM_CPAS_REG_MAX); + if (rc) + goto deinit_platform_res; + } + + rc = cam_cpas_util_client_setup(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc); + goto deinit_platform_res; + } + + rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info, + cpas_hw->soc_info.pdev->dev.of_node, + &cpas_core->ahb_bus_client); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc); + goto client_cleanup; + } + + rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc); + goto ahb_cleanup; + } + + if (!cam_vmrm_no_register_read_on_bind()) { + /* Need to vote first before enabling clocks */ + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true); + if (rc) + goto axi_cleanup; + + rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, + cpas_hw->soc_info.lowest_clk_level); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc); + goto remove_default_vote; + } + } + + if (internal_ops->get_hw_info) { + rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc); + goto disable_soc_res; + } + } else { + CAM_ERR(CAM_CPAS, "Invalid get_hw_info"); + goto disable_soc_res; + } + + rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv, + &cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps)); + if (rc) + goto disable_soc_res; + + + rc = cam_get_subpart_info(&soc_private->part_info, CAM_CPAS_CAMERA_INSTANCES); + if (rc) { +#ifndef CONFIG_ARCH_QTI_VM + CAM_ERR(CAM_CPAS, "Failed to get subpart_info, rc = %d", rc); + goto disable_soc_res; +#else + CAM_WARN(CAM_CPAS, "subparts info is not available"); + cpas_core->cam_subpart_info = NULL; + rc = 0; +#endif + } + + if (!cam_vmrm_no_register_read_on_bind()) { + rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc); + goto remove_default_vote; + } + + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); + if (rc) + goto axi_cleanup; + } + + rc = cam_cpas_util_create_debugfs(cpas_core); + if (unlikely(rc)) + CAM_WARN(CAM_CPAS, "failed to create cpas debugfs rc: %d", rc); + + *hw_intf = cpas_hw_intf; + return 0; + +disable_soc_res: + if (!cam_vmrm_no_register_read_on_bind()) + cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true); +remove_default_vote: + if (!cam_vmrm_no_register_read_on_bind()) + cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); +axi_cleanup: + cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info); +ahb_cleanup: + cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client); +client_cleanup: + cam_cpas_util_client_cleanup(cpas_hw); + cam_cpas_node_tree_cleanup(cpas_core, cpas_hw->soc_info.soc_private); +deinit_platform_res: + cam_cpas_remove_sysfs(cpas_hw); +sysfs_fail: + cam_cpas_soc_deinit_resources(&cpas_hw->soc_info); +release_workq: + flush_workqueue(cpas_core->work_queue); + destroy_workqueue(cpas_core->work_queue); +release_mem: + mutex_destroy(&cpas_hw->hw_mutex); + CAM_MEM_FREE(cpas_core); + CAM_MEM_FREE(cpas_hw); + CAM_MEM_FREE(cpas_hw_intf); + CAM_ERR(CAM_CPAS, "failed in hw probe"); + return rc; +} + +int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + + if (!cpas_hw_intf) { + CAM_ERR(CAM_CPAS, "cpas interface not initialized"); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv; + cpas_core = (struct cam_cpas *)cpas_hw->core_info; + + if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_CPAS, "cpas hw is in power up state"); + return -EINVAL; + } + + cam_cpas_remove_sysfs(cpas_hw); + cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info); + cam_cpas_node_tree_cleanup(cpas_core, cpas_hw->soc_info.soc_private); + cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client); + cam_cpas_util_client_cleanup(cpas_hw); + cam_cpas_soc_deinit_resources(&cpas_hw->soc_info); + cpas_core->dentry = NULL; + flush_workqueue(cpas_core->work_queue); + destroy_workqueue(cpas_core->work_queue); + mutex_destroy(&cpas_hw->hw_mutex); + CAM_MEM_FREE(cpas_core); + CAM_MEM_FREE(cpas_hw); + CAM_MEM_FREE(cpas_hw_intf); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.h new file mode 100644 index 0000000000..5f6e1fd4f9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw.h @@ -0,0 +1,511 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CPAS_HW_H_ +#define _CAM_CPAS_HW_H_ + +#include + +#include "cam_cpas_api.h" +#include "cam_cpas_hw_intf.h" +#include "cam_common_util.h" +#include "cam_soc_bus.h" + +#define CAM_CPAS_INFLIGHT_WORKS 5 +#define CAM_CPAS_MAX_CLIENTS 43 +#define CAM_CPAS_MAX_AXI_PORTS 6 +#define CAM_CPAS_MAX_DRV_PORTS 4 +#define CAM_CPAS_MAX_TREE_LEVELS 4 +#define CAM_CPAS_MAX_RT_WR_NIU_NODES 10 +#define CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT 32 +#define CAM_CPAS_PATH_DATA_MAX 42 +#define CAM_CPAS_TRANSACTION_MAX 2 +#define CAM_CAMNOC_FILL_LVL_REG_INFO_MAX 6 +#define CAM_CPAS_MAX_SLOPE_FACTOR 100 +#define CAM_CPAS_MAX_STRESS_INDICATOR 100 + +/* Number of camera (CAM_SS) instances */ +#define CAM_CPAS_CAMERA_INSTANCES 1 + +#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024) +#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024) +#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024) +#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL) + +#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle) +#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx) + +#define CAM_CPAS_WORKQUEUE_NAME "cam-cpas" + +#define CAM_CPAS_CLIENT_VALID(indx) \ + ((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS)) +#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx) \ + ((CAM_CPAS_CLIENT_VALID(indx)) && \ + (cpas_core->cpas_client[indx]->registered)) +#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx) \ + ((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \ + (cpas_core->cpas_client[indx]->started)) + +/* Array indices to represent corresponding RPMH BCM info */ +#define CAM_RPMH_NUMBER_OF_BCMS 0 +#define CAM_RPMH_BCM_FE_OFFSET 1 +#define CAM_RPMH_BCM_BE_OFFSET 2 +#define CAM_RPMH_BCM_DDR_INDEX 3 +#define CAM_RPMH_BCM_MNOC_INDEX 4 +#define CAM_RPMH_BCM_INFO_MAX 5 + +#define CAM_CPAS_MONITOR_MAX_ENTRIES 100 +#define CAM_CPAS_INC_MONITOR_HEAD(head, ret) \ + div_u64_rem(atomic64_add_return(1, head),\ + CAM_CPAS_MONITOR_MAX_ENTRIES, (ret)) +#define CAM_CPAS_MAX_CESTA_VCD_NUM 9 + +#define CAM_CPAS_DUMP_NUM_WORDS_COMM 20 +#define CAM_CPAS_DUMP_NUM_WORDS_VOTE_TYEP_DRV 4 +#define CAM_CPAS_DUMP_NUM_WORDS_VOTE_TYEP_HLOS 2 +#define CAM_CPAS_DUMP_NUM_WORDS_RT_WR_NIUS 2 +#define CAM_CPAS_DUMP_NUM_WORDS_VCD_CURR_LVL 2 + +/* MAX number of qchannel*/ +#define CAM_CAMNOC_QCHANNEL_MAX 3 + +/** + * enum cam_camnoc_domain_type - Enum for different camnoc domains + * @CAM_CAMNOC_HW_COMBINED: refer to legacy camnoc info that combines RT/NRT HW + * @CAM_CAMNOC_HW_RT: type for camnoc RT info + * @CAM_CAMNOC_HW_NRT: type for camnoc NRT info + * @CAM_CAMNOC_HW_TYPE_MAX: camnoc info maximum type + */ +enum cam_camnoc_hw_type { + CAM_CAMNOC_HW_COMBINED, + CAM_CAMNOC_HW_RT, + CAM_CAMNOC_HW_NRT, + CAM_CAMNOC_HW_TYPE_MAX, +}; + +/** + * enum cam_cpas_access_type - Enum for Register access type + */ +enum cam_cpas_access_type { + CAM_REG_TYPE_READ, + CAM_REG_TYPE_WRITE, + CAM_REG_TYPE_READ_WRITE, +}; + +/** + * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping + * + * @vdd_corner : Voltage corner value + * @ahb_level : AHB vote level corresponds to this vdd_corner + * + */ +struct cam_cpas_vdd_ahb_mapping { + unsigned int vdd_corner; + enum cam_vote_level ahb_level; +}; + +/** + * struct cam_cpas_bw_vote : AXI bw vote + * + * @ab: AB bw value + * @ib: IB bw value + * @camnoc: CAMNOC bw value + * + */ +struct cam_cpas_bw_vote { + uint64_t ab; + uint64_t ib; + uint64_t camnoc; +}; + +/** + * struct cam_cpas_drv_vote : DRV bw vote + * + * @high: Active bw values + * @low: Sleep bw values + * + */ +struct cam_cpas_drv_vote { + struct cam_cpas_bw_vote high; + struct cam_cpas_bw_vote low; +}; + +/** + * struct cam_cpas_axi_bw_info : AXI bw info + * + * @vote_type: HLOS or DRV vote type + * @hlos_vote: HLOS bw values + * @drv_vote: DRV bw values + * + */ +struct cam_cpas_axi_bw_info { + enum cam_cpas_vote_type vote_type; + union { + struct cam_cpas_bw_vote hlos_vote; + struct cam_cpas_drv_vote drv_vote; + }; +}; + +/** + * struct cam_cpas_axi_consolidate_per_path_bw_vote - Internal per path bandwidth vote after + * consolidation. Consolidation is agnostic + * of actual vote that will be done (HLOS or DRV) + * + * @transac_type: Transaction type on the path (read/write) + * @path_data_type: Path for which vote is given (video, display, rdi) + * @drv_vote: Consolidated BW values for high and low level vote. + * Low level also contains VOTE_LEVEL_NONE for backward + * compatibility. + */ +struct cam_cpas_axi_consolidate_per_path_bw_vote { + uint32_t transac_type; + uint32_t path_data_type; + struct cam_cpas_drv_vote drv_vote; + +}; + +/** + * struct cam_axi_consolidate_vote : Consolidated AXI vote + * + * @num_paths: Number of paths on which BW vote is to be applied after consolidation + * @axi_path: Per path consolidate BW vote info + * + */ +struct cam_axi_consolidate_vote { + uint32_t num_paths; + struct cam_cpas_axi_consolidate_per_path_bw_vote axi_path[CAM_CPAS_MAX_PATHS_PER_CLIENT]; +}; + +/** + * struct cam_cpas_kobj_map: wrapper structure for base kobject + * and cam cpas private soc info + * @base_kobj: kernel object for camera sysfs + * @cpas_hw: pointer to cam_hw_info structure + */ +struct cam_cpas_kobj_map { + struct kobject base_kobj; + struct cam_hw_info *cpas_hw; +}; + +/** + * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops + * + * @get_hw_info: Function pointer for get hw info + * @init_hw_version: Function pointer for hw init based on version + * @handle_irq: Function poniter for irq handling + * @setup_regbase: Function pointer for setup rebase indices + * @power_on: Function pointer for hw core specific power on settings + * @power_off: Function pointer for hw core specific power off settings + * @setup_qos_settings: Function pointer for hw to select a specific qos header + * @print_poweron_settings: Function pointer for hw to print poweron settings + * @qchannel_handshake: Function pointer for hw core specific qchannel + * handshake settings + * @set_tpg_mux_sel: Set tpg mux select on CPAS TOP register + * @handle_reset_res_control: handle reset resource control + * + */ +struct cam_cpas_internal_ops { + int (*get_hw_info)(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_caps *hw_caps); + int (*init_hw_version)(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_caps *hw_caps); + irqreturn_t (*handle_irq)(int irq_num, void *data); + int (*setup_regbase)(struct cam_hw_soc_info *soc_info, + int32_t regbase_index[], int32_t num_reg_map); + int (*power_on)(struct cam_hw_info *cpas_hw); + int (*power_off)(struct cam_hw_info *cpas_hw); + int (*setup_qos_settings)(struct cam_hw_info *cpas_hw, + uint32_t selection_mask); + int (*print_poweron_settings)(struct cam_hw_info *cpas_hw); + int (*qchannel_handshake)(struct cam_hw_info *cpas_hw, bool power_on, bool force_on); + int (*set_tpg_mux_sel)(struct cam_hw_info *cpas_hw, uint32_t tpg_num); + int (*handle_reset_res_control)(struct cam_hw_info *cpas_hw); +}; + +/** + * struct cam_cpas_reg : CPAS register info + * + * @enable: Whether this reg info need to be enabled + * @access_type: Register access type + * @masked_value: Whether this register write/read is based on mask, shift + * @mask: Mask for this register value + * @shift: Shift for this register value + * @value: Register value + * + */ +struct cam_cpas_reg { + bool enable; + enum cam_cpas_access_type access_type; + bool masked_value; + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; +}; + +/** + * struct cam_cpas_client : CPAS Client structure info + * + * @data: Client register params + * @registered: Whether client has registered with cpas + * @started: Whether client has streamed on + * @tree_node_valid: Indicates whether tree node has at least one valid node + * @is_drv_dyn: Indicates whether this client is DRV dynamic voting client + * @ahb_level: Determined/Applied ahb level for the client + * @axi_level: Determined/Applied axi level for the client + * @axi_vote: Determined/Applied consolidate axi vote for the client + * @axi_port: Client's parent axi port + * @tree_node: All granular path voting nodes for the client + * + */ +struct cam_cpas_client { + struct cam_cpas_register_params data; + bool registered; + bool started; + bool tree_node_valid; + bool is_drv_dyn; + enum cam_vote_level ahb_level; + enum cam_vote_level axi_level; + struct cam_axi_consolidate_vote cons_axi_vote; + struct cam_cpas_axi_port *axi_port; + struct cam_cpas_tree_node *tree_node[CAM_CPAS_PATH_DATA_MAX] + [CAM_CPAS_TRANSACTION_MAX]; +}; + +/** + * struct cam_cpas_bus_client : Bus client information + * + * @valid: Whether bus client is valid + * @name: Name of the bus client + * @lock: Mutex lock used while voting on this client + * @curr_vote_level: current voted index + * @common_data: Common data fields for bus client + * @soc_bus_client: Bus client private information + */ +struct cam_cpas_bus_client { + bool valid; + struct mutex lock; + unsigned int curr_vote_level; + struct cam_soc_bus_client_common_data common_data; + void *soc_bus_client; +}; + +/** + * struct cam_cpas_axi_port : AXI port information + * + * @axi_port_name: Name of this AXI port + * @bus_client: bus client info for this port + * @ib_bw_voting_needed: if this port can update ib bw dynamically + * @is_rt: if this port represents a real time axi port + * @axi_port_node: Node representing AXI Port info in device tree + * @drv_idx: DRV index for axi port node + * @cam_rsc_dev: Cam RSC device for DRV + * @is_drv_started: Indicates if DRV started for RSC device corresponding to port + * @curr_bw: Current voted bw after cpas consolidation + * @additional_bw: Additional bandwidth to cover non-hw cpas clients + * @applied_bw: Actual applied bw to port + */ +struct cam_cpas_axi_port { + const char *axi_port_name; + struct cam_cpas_bus_client bus_client; + bool ib_bw_voting_needed; + bool is_rt; + struct device_node *axi_port_node; + uint32_t drv_idx; + const struct device *cam_rsc_dev; + bool is_drv_started; + struct cam_cpas_axi_bw_info curr_bw; + uint64_t additional_bw; + struct cam_cpas_axi_bw_info applied_bw; +}; + +/** + * struct cam_cpas_axi_port_debug_info : AXI port information + * + * @axi_port_name: Name of this AXI port + * @curr_bw: Current voted bw after cpas consolidation + * @camnoc_bw: CAMNOC bw value for this port + * @applied_bw: Actual applied bw to port + * @is_drv_started: Indicates if DRV started for RSC device corresponding to port + */ +struct cam_cpas_axi_port_debug_info { + const char *axi_port_name; + struct cam_cpas_axi_bw_info curr_bw; + uint64_t camnoc_bw; + struct cam_cpas_axi_bw_info applied_bw; + bool is_drv_started; +}; + +struct cam_cpas_cesta_vcd_curr_lvl_debug_info { + uint8_t index; + uint32_t reg_value; +}; + +/** + * struct cam_cpas_cesta_vcd_reg_debug_info : to hold all cesta register information + * + * @vcd_currol: vcd control reg info + * + */ +struct cam_cpas_cesta_vcd_reg_debug_info { + struct cam_cpas_cesta_vcd_curr_lvl_debug_info + vcd_curr_lvl_debug_info[CAM_CPAS_MAX_CESTA_VCD_NUM]; +}; + + +/** + * struct cam_cpas_monitor : CPAS monitor array + * + * @timestamp: Timestamp at which this monitor entry is saved + * @identifier_string: String passed by caller + * @identifier_value: Identifier value passed by caller + * @axi_info: AXI port information + * @applied_camnoc_clk: Applied camnoc axi clock rate with sw, hw clients + * @applied_ahb_level: Applied camcc ahb level + * @fe_ddr: RPMH DDR BCM FE (front-end) status register value. + * This indicates requested clock plan + * @be_ddr: RPMH DDR BCM BE (back-end) status register value. + * This indicates actual current clock plan + * @fe_mnoc: RPMH MNOC BCM FE (front-end) status register value. + * This indicates requested clock plan + * @be_mnoc: RPMH MNOC BCM BE (back-end) status register value. + * This indicates actual current clock plan + * @be_shub: RPMH SHUB BCM BE (back-end) status register value. + * This indicates actual current clock plan + * @num_camnoc_lvl_regs: Number of enabled camnoc fill level + * monitoring registers + * @camnoc_port_name: Camnoc port names + * @camnoc_fill_level: Camnoc fill level register info + * @rt_wr_niu_pri_lut_low: priority lut low values of RT Wr NIUs + * @rt_wr_niu_pri_lut_high: priority lut high values of RT Wr NIUs + * @vcd_reg_debug_info: vcd reg debug information + */ +struct cam_cpas_monitor { + struct timespec64 timestamp; + char identifier_string[128]; + int32_t identifier_value; + struct cam_cpas_axi_port_debug_info axi_info[CAM_CPAS_MAX_AXI_PORTS]; + struct cam_soc_util_clk_rates applied_camnoc_clk; + unsigned int applied_ahb_level; + uint32_t fe_ddr; + uint32_t be_ddr; + uint32_t fe_mnoc; + uint32_t be_mnoc; + uint32_t be_shub; + uint32_t num_camnoc_lvl_regs[CAM_CAMNOC_HW_TYPE_MAX]; + const char *camnoc_port_name[CAM_CAMNOC_HW_TYPE_MAX] + [CAM_CAMNOC_FILL_LVL_REG_INFO_MAX]; + uint32_t camnoc_fill_level[CAM_CAMNOC_HW_TYPE_MAX] + [CAM_CAMNOC_FILL_LVL_REG_INFO_MAX]; + uint32_t rt_wr_niu_pri_lut_low[CAM_CPAS_MAX_RT_WR_NIU_NODES]; + uint32_t rt_wr_niu_pri_lut_high[CAM_CPAS_MAX_RT_WR_NIU_NODES]; + struct cam_cpas_cesta_vcd_reg_debug_info vcd_reg_debug_info; + struct cam_hw_info *cpas_hw; +}; + +/** + * struct cam_cpas : CPAS core data structure info + * + * @hw_caps: CPAS hw capabilities + * @cpas_client: Array of pointers to CPAS clients info + * @client_mutex: Mutex for accessing client info + * @tree_lock: Mutex lock for accessing CPAS node tree + * @num_clients: Total number of clients that CPAS supports + * @num_axi_ports: Total number of axi ports found in device tree + * @num_camnoc_axi_ports: Total number of camnoc axi ports found in device tree + * @registered_clients: Number of Clients registered currently + * @streamon_clients: Number of Clients that are in start state currently + * @slave_err_irq_idx: Index of slave error in irq error data structure, + * avoids iterating the entire structure to find this + * idx in irq th + * @regbase_index: Register base indices for CPAS register base IDs + * @ahb_bus_client: AHB Bus client info + * @axi_port: AXI port info for a specific axi index + * @camnoc_axi_port: CAMNOC AXI port info for a specific camnoc axi index + * @cam_subpart_info: camera subparts fuse description + * @internal_ops: CPAS HW internal ops + * @work_queue: Work queue handle + * @soc_access_count: atomic soc_access_count count + * @soc_access_count_wq: wait variable to ensure CPAS is not stop, + * while accessing hw through CPAS + * @dentry: debugfs file entry + * @ahb_bus_scaling_disable: ahb scaling based on src clk corner for bus + * @applied_camnoc_axi_rate: applied camnoc axi clock rate through sw, hw clients + * @applied_hlos_rt_camnoc_axi_rate: applied hlos RT camnoc axi clock rate + * @monitor_head: Monitor array head + * @monitor_entries: cpas monitor array + * @camnoc_info: array of camnoc info pointer + * @cesta_info: Pointer to cesta header info + * @num_valid_camnoc: number of valid camnoc info + * @camnoc_rt_idx: index to real time camnoc info array + * @camnoc_info_idx: map camnoc hw type to index used for camnoc_info array indexing + * @full_state_dump: Whether to enable full cpas state dump or not + * @smart_qos_dump: Whether to dump smart qos information on update + * @slave_err_irq_en: Whether slave error irq is enabled to detect memory + * config issues + * @smmu_fault_handled: Handled address decode error, on fault at SMMU + * @force_hlos_drv: Whether to force disable DRV voting + * @force_cesta_sw_client: Whether to force voting through cesta sw client + * @force_core_secure: Whether to force ife and cdm core to secure access mode + * @hlos_axi_floor_lvl: Determined hlos axi floor level across all clients + * @hlos_axi_clk_lvl: Determined/applied hlos axi clk level + */ +struct cam_cpas { + struct cam_cpas_hw_caps hw_caps; + struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS]; + struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS]; + struct mutex tree_lock; + uint32_t num_clients; + uint32_t num_axi_ports; + uint32_t num_camnoc_axi_ports; + uint32_t registered_clients; + uint32_t streamon_clients; + uint32_t slave_err_irq_idx[CAM_CAMNOC_HW_TYPE_MAX]; + int32_t regbase_index[CAM_CPAS_REG_MAX]; + struct cam_cpas_bus_client ahb_bus_client; + struct cam_cpas_axi_port axi_port[CAM_CPAS_MAX_AXI_PORTS]; + struct cam_cpas_axi_port camnoc_axi_port[CAM_CPAS_MAX_AXI_PORTS]; + struct cam_cpas_subpart_info *cam_subpart_info; + struct cam_cpas_internal_ops internal_ops; + struct workqueue_struct *work_queue; + atomic_t soc_access_count; + wait_queue_head_t soc_access_count_wq; + struct dentry *dentry; + bool ahb_bus_scaling_disable; + struct cam_soc_util_clk_rates applied_camnoc_axi_rate; + unsigned long applied_hlos_rt_camnoc_axi_rate; + atomic64_t monitor_head; + struct cam_cpas_monitor monitor_entries[CAM_CPAS_MONITOR_MAX_ENTRIES]; + void *camnoc_info[CAM_CAMNOC_HW_TYPE_MAX]; + void *cesta_info; + uint8_t num_valid_camnoc; + int8_t camnoc_rt_idx; + int8_t camnoc_info_idx[CAM_CAMNOC_HW_TYPE_MAX]; + bool full_state_dump; + bool smart_qos_dump; + bool slave_err_irq_en[CAM_CAMNOC_HW_TYPE_MAX]; + bool smmu_fault_handled; + bool force_hlos_drv; + bool force_cesta_sw_client; + bool force_core_secure; + enum cam_vote_level hlos_axi_floor_lvl; + enum cam_vote_level hlos_axi_bw_calc_lvl; +}; + +int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops); +int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops); + +int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw, + enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info); +int cam_cpas_util_reg_read(struct cam_hw_info *cpas_hw, + enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info); + +int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw); + +int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw, + int enable); + +#endif /* _CAM_CPAS_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw_intf.h new file mode 100644 index 0000000000..463428068a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_hw_intf.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CPAS_HW_INTF_H_ +#define _CAM_CPAS_HW_INTF_H_ + +#include + +#include "cam_cpas_api.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_debug_util.h" + +/* Number of times to retry while polling */ +#define CAM_CPAS_POLL_RETRY_CNT 5 +/* Minimum usecs to sleep while polling */ +#define CAM_CPAS_POLL_MIN_USECS 200 +/* Maximum usecs to sleep while polling */ +#define CAM_CPAS_POLL_MAX_USECS 250 +/* Number of times to retry while polling */ +#define CAM_CPAS_POLL_QH_RETRY_CNT 5 + +/* Number of CPAS hw caps registers */ +#define CAM_CPAS_MAX_CAPS_REGS 2 + +/** + * enum cam_cpas_hw_type - Enum for CPAS HW type + */ +enum cam_cpas_hw_type { + CAM_HW_CPASTOP, + CAM_HW_CAMSSTOP, +}; + +/** + * enum cam_cpas_reg_base - Enum for register base identifier. These + * are the identifiers used in generic register + * write/read APIs provided by cpas driver. + */ +enum cam_cpas_reg_base { + CAM_CPAS_REG_CPASTOP, + CAM_CPAS_REG_CAMNOC, + CAM_CPAS_REG_CAMSS, + CAM_CPAS_REG_RPMH, + CAM_CPAS_REG_CESTA, + CAM_CPAS_REG_CAMNOC_RT, + CAM_CPAS_REG_CAMNOC_NRT, + CAM_CPAS_REG_SECURE, + CAM_CPAS_REG_MAX +}; + +/** + * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type + */ +enum cam_cpas_hw_cmd_process { + CAM_CPAS_HW_CMD_REGISTER_CLIENT, + CAM_CPAS_HW_CMD_UNREGISTER_CLIENT, + CAM_CPAS_HW_CMD_SET_ADDR_TRANS, + CAM_CPAS_HW_CMD_REG_WRITE, + CAM_CPAS_HW_CMD_REG_READ, + CAM_CPAS_HW_CMD_AHB_VOTE, + CAM_CPAS_HW_CMD_AXI_VOTE, + CAM_CPAS_HW_AXI_FLOOR_LVL, + CAM_CPAS_HW_CMD_LOG_VOTE, + CAM_CPAS_HW_CMD_SELECT_QOS, + CAM_CPAS_HW_CMD_LOG_EVENT, + CAM_CPAS_HW_CMD_GET_SCID, + CAM_CPAS_HW_CMD_ACTIVATE_LLC, + CAM_CPAS_HW_CMD_DEACTIVATE_LLC, + CAM_CPAS_HW_CMD_CONFIGURE_STALING_LLC, + CAM_CPAS_HW_CMD_NOTIF_STALL_INC_LLC, + CAM_CPAS_HW_CMD_DUMP_BUFF_FILL_INFO, + CAM_CPAS_HW_CMD_CSID_INPUT_CORE_INFO_UPDATE, + CAM_CPAS_HW_CMD_CSID_PROCESS_RESUME, + CAM_CPAS_HW_CMD_ENABLE_DISABLE_DOMAIN_ID_CLK, + CAM_CPAS_HW_CMD_TPG_MUX_SEL, + CAM_CPAS_HW_CMD_DUMP_STATE_MONITOR_INFO, + CAM_CPAS_HW_CMD_INVALID, +}; + +/** + * struct cam_cpas_hw_cmd_csid_input_core_info_update : CPAS cmd struct for updating acquired + * csid core info to cpas + * + * @csid_idx: CSID core index + * @sfe_idx: SFE core index corresponding to CSID core + * @set_port: Indicates whether to set or reset port for given client + * + */ +struct cam_cpas_hw_cmd_csid_input_core_info_update { + int csid_idx; + int sfe_idx; + bool set_port; +}; + +/** + * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write + * + * @client_handle: Client handle + * @reg_base: Register base type + * @offset: Register offset + * @value: Register value + * @mb: Whether to do operation with memory barrier + * + */ +struct cam_cpas_hw_cmd_reg_read_write { + uint32_t client_handle; + enum cam_cpas_reg_base reg_base; + uint32_t offset; + uint32_t value; + bool mb; +}; + +/** + * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote + * + * @client_handle: Client handle + * @ahb_vote: AHB voting info + * + */ +struct cam_cpas_hw_cmd_ahb_vote { + uint32_t client_handle; + struct cam_ahb_vote *ahb_vote; +}; + +/** + * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote + * + * @client_handle: Client handle + * @axi_vote: axi bandwidth vote + * + */ +struct cam_cpas_hw_cmd_axi_vote { + uint32_t client_handle; + struct cam_axi_vote *axi_vote; +}; + +/** + * struct cam_cpas_hw_axi_floor_lvl : CPAS struct for AXI floor level + * + * @client_handle: Client handle + * @floor_lvl: axi floor level + * + */ +struct cam_cpas_hw_axi_floor_lvl { + uint32_t client_handle; + enum cam_vote_level floor_lvl; +}; + +/** + * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start + * + * @client_handle: Client handle + * + */ +struct cam_cpas_hw_cmd_start { + uint32_t client_handle; + struct cam_ahb_vote *ahb_vote; + struct cam_axi_vote *axi_vote; +}; + +/** + * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop + * + * @client_handle: Client handle + * + */ +struct cam_cpas_hw_cmd_stop { + uint32_t client_handle; +}; + +/** + * struct cam_cpas_hw_addr_trans_data : CPAS cmd struct for programming address translator + * + * @client_handle: Client handle + * @addr_trans_data: Register values to be programmed for address translator + * + */ +struct cam_cpas_hw_addr_trans_data { + uint32_t client_handle; + struct cam_cpas_addr_trans_data *addr_trans_data; +}; + +/** + * struct cam_cpas_hw_cmd_notify_event : CPAS cmd struct for notify event + * + * @identifier_string: Identifier string passed by caller + * @identifier_value: Identifier value passed by caller + * + */ +struct cam_cpas_hw_cmd_notify_event { + const char *identifier_string; + int32_t identifier_value; +}; + +/** + * struct cam_cpas_hw_caps : CPAS HW capabilities + * + * @camera_family: Camera family type + * @camera_version: Camera version + * @cpas_version: CPAS version + * @camera_capability: array of camera hw capabilities + * @num_capability_reg: Number of camera hw capabilities registers + * @fuse_info: Fuse information + * + */ +struct cam_cpas_hw_caps { + uint32_t camera_family; + struct cam_hw_version camera_version; + struct cam_hw_version cpas_version; + uint32_t camera_capability[CAM_CPAS_MAX_CAPS_REGS]; + uint32_t num_capability_reg; + struct cam_cpas_fuse_info fuse_info; +}; + +int cam_cpas_hw_probe(struct platform_device *pdev, + struct cam_hw_intf **hw_intf); +int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf); + +/** + * @brief : API to register CPAS hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_cpas_dev_init_module(void); + +/** + * @brief : API to remove CPAS interface from platform framework. + */ +void cam_cpas_dev_exit_module(void); + +/** + * @brief : API to select TPG mux select. + */ +int cam_cpas_enable_tpg_mux_sel(uint32_t tpg_mux_sel); + +#endif /* _CAM_CPAS_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_intf.c b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_intf.c new file mode 100644 index 0000000000..0b370a5a43 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_intf.c @@ -0,0 +1,2169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "cam_subdev.h" +#include "cam_cpas_hw_intf.h" +#include "cam_cpas_soc.h" +#include "cam_cpastop_hw.h" +#include "camera_main.h" +#include "cam_cpas_hw.h" +#include "cam_packet_util.h" + +#include +#include "cam_req_mgr_interface.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +#if defined(CONFIG_DYNAMIC_FD_PORT_CONFIG) || defined(CONFIG_SPECTRA_SECURE_DYN_PORT_CFG) +#include +#include +#include +#define CAM_CPAS_MIN_DPC_LEN 1 +#endif + +#define CAM_CPAS_DEV_NAME "cam-cpas" +#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done) + +/** + * struct cam_cpas_intf : CPAS interface + * + * @pdev: Platform device + * @subdev: Subdev info + * @hw_intf: CPAS HW interface + * @hw_caps: CPAS HW capabilities + * @intf_lock: CPAS interface mutex + * @open_cnt: CPAS subdev open count + * @probe_done: Whether CPAS prove completed + * + */ +struct cam_cpas_intf { + struct platform_device *pdev; + struct cam_subdev subdev; + struct cam_hw_intf *hw_intf; + struct cam_cpas_hw_caps hw_caps; + struct mutex intf_lock; + uint32_t open_cnt; + bool probe_done; +}; + +static struct cam_cpas_intf *g_cpas_intf; + +const char *cam_cpas_axi_util_path_type_to_string( + uint32_t path_data_type) +{ + switch (path_data_type) { + /* IFE Paths */ + case CAM_AXI_PATH_DATA_IFE_LINEAR: + return "IFE_LINEAR"; + case CAM_AXI_PATH_DATA_IFE_VID: + return "IFE_VID"; + case CAM_AXI_PATH_DATA_IFE_DISP: + return "IFE_DISP"; + case CAM_AXI_PATH_DATA_IFE_STATS: + return "IFE_STATS"; + case CAM_AXI_PATH_DATA_IFE_RDI0: + return "IFE_RDI0"; + case CAM_AXI_PATH_DATA_IFE_RDI1: + return "IFE_RDI1"; + case CAM_AXI_PATH_DATA_IFE_RDI2: + return "IFE_RDI2"; + case CAM_AXI_PATH_DATA_IFE_RDI3: + return "IFE_RDI3"; + case CAM_AXI_PATH_DATA_IFE_PDAF: + return "IFE_PDAF"; + case CAM_AXI_PATH_DATA_IFE_PIXEL_RAW: + return "IFE_PIXEL_RAW"; + case CAM_AXI_PATH_DATA_IFE_FULL: + return "IFE_FULL"; + case CAM_AXI_PATH_DATA_IFE_DS2: + return "IFE_DS2"; + case CAM_AXI_PATH_DATA_IFE_DS4: + return "IFE_DS4"; + case CAM_AXI_PATH_DATA_IFE_DS16: + return "IFE_DS16"; + case CAM_AXI_PATH_DATA_IFE_RDI4: + return "IFE_RDI4"; + case CAM_AXI_PATH_DATA_IFE_PDAF_1: + return "IFE_PDAF_1"; + case CAM_AXI_PATH_DATA_IFE_PDAF_2: + return "IFE_PDAF_2"; + case CAM_AXI_PATH_DATA_IFE_PDAF_3: + return "IFE_PDAF_3"; + case CAM_AXI_PATH_DATA_IFE_IR: + return "IFE_IR"; + case CAM_AXI_PATH_DATA_IFE_FD: + return "IFE_FD"; + + /* IPE Paths */ + case CAM_AXI_PATH_DATA_IPE_RD_IN: + return "IPE_RD_IN"; + case CAM_AXI_PATH_DATA_IPE_RD_REF: + return "IPE_RD_REF"; + case CAM_AXI_PATH_DATA_IPE_WR_VID: + return "IPE_WR_VID"; + case CAM_AXI_PATH_DATA_IPE_WR_DISP: + return "IPE_WR_DISP"; + case CAM_AXI_PATH_DATA_IPE_WR_REF: + return "IPE_WR_REF"; + case CAM_AXI_PATH_DATA_IPE_WR_APP: + return "IPE_WR_APP"; + + /* OPE Paths */ + case CAM_AXI_PATH_DATA_OPE_RD_IN: + return "OPE_RD_IN"; + case CAM_AXI_PATH_DATA_OPE_RD_REF: + return "OPE_RD_REF"; + case CAM_AXI_PATH_DATA_OPE_WR_VID: + return "OPE_WR_VID"; + case CAM_AXI_PATH_DATA_OPE_WR_DISP: + return "OPE_WR_DISP"; + case CAM_AXI_PATH_DATA_OPE_WR_REF: + return "OPE_WR_REF"; + + /* SFE Paths */ + case CAM_AXI_PATH_DATA_SFE_NRDI: + return "SFE_NRDI"; + case CAM_AXI_PATH_DATA_SFE_RDI0: + return "SFE_RDI0"; + case CAM_AXI_PATH_DATA_SFE_RDI1: + return "SFE_RDI1"; + case CAM_AXI_PATH_DATA_SFE_RDI2: + return "SFE_RDI2"; + case CAM_AXI_PATH_DATA_SFE_RDI3: + return "SFE_RDI3"; + case CAM_AXI_PATH_DATA_SFE_RDI4: + return "SFE_RDI4"; + case CAM_AXI_PATH_DATA_SFE_STATS: + return "SFE_STATS"; + case CAM_AXI_PATH_DATA_CRE_RD_IN: + return "CRE_RD_IN"; + case CAM_AXI_PATH_DATA_CRE_WR_OUT: + return "CRE_WR_OUT"; + + /* OFE Paths */ + case CAM_AXI_PATH_DATA_OFE_RD_EXT: + return "OFE_RD_EXT"; + case CAM_AXI_PATH_DATA_OFE_RD_INT_PDI: + return "OFE_RD_INT_PDI"; + case CAM_AXI_PATH_DATA_OFE_RD_INT_HDR: + return "OFE_RD_INT_HDR"; + case CAM_AXI_PATH_DATA_OFE_WR_VID: + return "OFE_WR_VID"; + case CAM_AXI_PATH_DATA_OFE_WR_DISP: + return "OFE_WR_DISP"; + case CAM_AXI_PATH_DATA_OFE_WR_IR: + return "OFE_WR_IR"; + case CAM_AXI_PATH_DATA_OFE_WR_HDR_LTM: + return "OFE_WR_HDR_LTM"; + case CAM_AXI_PATH_DATA_OFE_WR_DC4: + return "OFE_WR_DC4"; + case CAM_AXI_PATH_DATA_OFE_WR_AI: + return "OFE_WR_AI"; + case CAM_AXI_PATH_DATA_OFE_WR_PDI: + return "OFE_WR_PDI"; + case CAM_AXI_PATH_DATA_OFE_WR_IDEALRAW: + return "OFE_WR_IDEALRAW"; + case CAM_AXI_PATH_DATA_OFE_WR_STATS: + return "OFE_WR_STATS"; + + /* Common Paths */ + case CAM_AXI_PATH_DATA_ALL: + return "DATA_ALL"; + default: + return "CPAS_PATH_INVALID"; + } +} +EXPORT_SYMBOL(cam_cpas_axi_util_path_type_to_string); + +const char *cam_cpas_axi_util_trans_type_to_string( + uint32_t transac_type) +{ + switch (transac_type) { + case CAM_AXI_TRANSACTION_READ: + return "TRANSAC_READ"; + case CAM_AXI_TRANSACTION_WRITE: + return "TRANSAC_WRITE"; + default: + return "TRANSAC_INVALID"; + } +} +EXPORT_SYMBOL(cam_cpas_axi_util_trans_type_to_string); + +const char *cam_cpas_axi_util_drv_vote_lvl_to_string( + uint32_t vote_lvl) +{ + switch (vote_lvl) { + case CAM_CPAS_VOTE_LEVEL_LOW: + return "VOTE_LVL_LOW"; + case CAM_CPAS_VOTE_LEVEL_HIGH: + return "VOTE_LVL_HIGH"; + default: + return "VOTE_LVL_INVALID"; + } +} +EXPORT_SYMBOL(cam_cpas_axi_util_drv_vote_lvl_to_string); + +const char *cam_cpas_util_vote_type_to_string(enum cam_cpas_vote_type vote_type) +{ + switch (vote_type) { + case CAM_CPAS_VOTE_TYPE_HLOS: + return "VOTE_TYPE_HLOS"; + case CAM_CPAS_VOTE_TYPE_DRV: + return "VOTE_TYPE_DRV"; + default: + return "VOTE_TYPE_INVALID"; + } +} +EXPORT_SYMBOL(cam_cpas_util_vote_type_to_string); + +int cam_cpas_query_drv_enable(bool *is_ddr_drv_enabled, bool *is_clk_drv_enabled) +{ + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!is_ddr_drv_enabled && !is_clk_drv_enabled) { + CAM_ERR(CAM_CPAS, "invalid input ddr: %pK clk: %pK", is_ddr_drv_enabled, + is_clk_drv_enabled); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + soc_private = (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + if (is_ddr_drv_enabled) + *is_ddr_drv_enabled = soc_private->enable_cam_ddr_drv; + + if (is_clk_drv_enabled) + *is_clk_drv_enabled = soc_private->enable_cam_clk_drv; + + return 0; +} +EXPORT_SYMBOL(cam_cpas_query_drv_enable); + +int cam_cpas_csid_process_resume(uint32_t csid_idx) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_CSID_PROCESS_RESUME, &csid_idx, + sizeof(uint32_t)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_csid_process_resume); + + +int cam_cpas_csid_input_core_info_update(int csid_idx, int sfe_idx, bool set_port) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_csid_input_core_info_update core_info_update; + + core_info_update.csid_idx = csid_idx; + core_info_update.sfe_idx = sfe_idx; + core_info_update.set_port = set_port; + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_CSID_INPUT_CORE_INFO_UPDATE, &core_info_update, + sizeof(core_info_update)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_csid_input_core_info_update); + +int cam_cpas_dump_camnoc_buff_fill_info(uint32_t client_handle) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_DUMP_BUFF_FILL_INFO, &client_handle, + sizeof(uint32_t)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_dump_camnoc_buff_fill_info); + +bool cam_cpas_is_part_supported(uint32_t flag, uint32_t hw_map, uint32_t part_info) +{ + int32_t i; + struct cam_hw_info *cpas_hw = g_cpas_intf->hw_intf->hw_priv; + struct cam_cpas *cpas_core = NULL; + struct cam_cpas_subpart_info *cam_subpart_info = NULL; + + mutex_lock(&cpas_hw->hw_mutex); + cpas_core = cpas_hw->core_info; + cam_subpart_info = cpas_core->cam_subpart_info; + + if (!cam_subpart_info) { + CAM_DBG(CAM_CPAS, "Invalid address of cam_subpart_info"); + mutex_unlock(&cpas_hw->hw_mutex); + return true; + } + + for (i = 0; i < cam_subpart_info->num_bits; i++) { + if ((cam_subpart_info->hw_bitmap_mask[i][0] == flag) && + (cam_subpart_info->hw_bitmap_mask[i][1] == hw_map)) { + CAM_DBG(CAM_CPAS, "flag: %u hw_map: %u part_info:0x%x", + flag, hw_map, part_info); + mutex_unlock(&cpas_hw->hw_mutex); + return ((part_info & BIT(i)) == 0); + } + } + + mutex_unlock(&cpas_hw->hw_mutex); + return true; +} + +bool cam_cpas_is_feature_supported(uint32_t flag, uint32_t hw_map, + uint32_t *fuse_val) +{ + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + bool supported = true; + int32_t i; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return false; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + + if (flag >= CAM_CPAS_FUSE_FEATURE_MAX) { + CAM_ERR(CAM_CPAS, "Unknown feature flag %x", flag); + return false; + } + + supported = cam_cpas_is_part_supported(flag, hw_map, soc_private->part_info); + + for (i = 0; i < soc_private->num_feature_info; i++) + if (soc_private->feature_info[i].feature == flag) + break; + + if (i == soc_private->num_feature_info) + goto end; + + if (soc_private->feature_info[i].type == CAM_CPAS_FEATURE_TYPE_DISABLE + || (soc_private->feature_info[i].type == + CAM_CPAS_FEATURE_TYPE_ENABLE)) { + if ((soc_private->feature_info[i].hw_map & hw_map) == hw_map) { + if (!(supported && soc_private->feature_info[i].enable)) + supported = false; + } + } else { + if (!fuse_val) { + CAM_ERR(CAM_CPAS, + "Invalid arg fuse_val"); + } else { + *fuse_val = soc_private->feature_info[i].value; + } + } + +end: + return supported; +} +EXPORT_SYMBOL(cam_cpas_is_feature_supported); + +int cam_cpas_get_cpas_hw_version(uint32_t *hw_version) +{ + struct cam_hw_info *cpas_hw = NULL; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!hw_version) { + CAM_ERR(CAM_CPAS, "invalid input %pK", hw_version); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + + *hw_version = cpas_hw->soc_info.hw_version; + + if (*hw_version == CAM_CPAS_TITAN_NONE) { + CAM_DBG(CAM_CPAS, "Didn't find a valid HW Version %d", + *hw_version); + } + + return 0; +} + +int cam_cpas_get_hw_info(uint32_t *camera_family, + struct cam_hw_version *camera_version, + struct cam_hw_version *cpas_version, + uint32_t **cam_caps, uint32_t *num_cap_mask, + struct cam_cpas_fuse_info *cam_fuse_info, + struct cam_cpas_domain_id_caps *domain_id_info) +{ + struct cam_hw_info *cpas_hw; + struct cam_cpas_private_soc *soc_private; + struct cam_cpas_domain_id_info cpas_domain_id; + int i; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!camera_family || !camera_version || !cpas_version || !cam_caps || !num_cap_mask) { + CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK %pK", + camera_family, camera_version, cpas_version, cam_caps, num_cap_mask); + return -EINVAL; + } + + cpas_hw = g_cpas_intf->hw_intf->hw_priv; + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + + *camera_family = g_cpas_intf->hw_caps.camera_family; + *camera_version = g_cpas_intf->hw_caps.camera_version; + *cpas_version = g_cpas_intf->hw_caps.cpas_version; + *cam_caps = g_cpas_intf->hw_caps.camera_capability; + *num_cap_mask = g_cpas_intf->hw_caps.num_capability_reg; + + if (cam_fuse_info) + *cam_fuse_info = g_cpas_intf->hw_caps.fuse_info; + if (domain_id_info) { + cpas_domain_id = soc_private->domain_id_info; + + if (!soc_private->domain_id_info.domain_id_supported) { + domain_id_info->num_mapping = 0; + domain_id_info->is_supported = 0; + } else { + domain_id_info->is_supported = 1; + domain_id_info->num_mapping = + soc_private->domain_id_info.num_domain_ids; + + for (i = 0; i < domain_id_info->num_mapping; i++) { + domain_id_info->entries[i].domain_type = + cpas_domain_id.domain_id_entries[i].domain_type; + domain_id_info->entries[i].mapping_id = + cpas_domain_id.domain_id_entries[i].mapping_id; + } + } + } + + CAM_DBG(CAM_CPAS, "Family %d, version %d.%d cam_caps %d, domain_id: %s", + *camera_family, camera_version->major, + camera_version->minor, *cam_caps, + CAM_BOOL_TO_YESNO(soc_private->domain_id_info.domain_id_supported)); + + return 0; +} +EXPORT_SYMBOL(cam_cpas_get_hw_info); + +static inline enum cam_cpas_reg_base __cam_cpas_get_internal_reg_base( + enum cam_cpas_regbase_types reg_base) +{ + switch (reg_base) { + case CAM_CPAS_REGBASE_CPASTOP: + return CAM_CPAS_REG_CPASTOP; + default: + return CAM_CPAS_REG_MAX; + } +} + +int cam_cpas_set_addr_trans(uint32_t client_handle, + struct cam_cpas_addr_trans_data *addr_trans_data) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!addr_trans_data) { + CAM_ERR(CAM_CPAS, "Invalid addr_trans_data"); + return -EINVAL; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_addr_trans_data cmd_add_trans; + + cmd_add_trans.client_handle = client_handle; + cmd_add_trans.addr_trans_data = addr_trans_data; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_SET_ADDR_TRANS, &cmd_add_trans, + sizeof(struct cam_cpas_hw_addr_trans_data)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_set_addr_trans); + +int cam_cpas_reg_write(uint32_t client_handle, enum cam_cpas_regbase_types reg_base, + uint32_t offset, bool mb, uint32_t value) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write; + enum cam_cpas_reg_base internal_reg_base; + + internal_reg_base = __cam_cpas_get_internal_reg_base(reg_base); + if (internal_reg_base >= CAM_CPAS_REG_MAX) { + CAM_ERR(CAM_CPAS, "Invalid reg base: %d for write ops client: %u", + reg_base, client_handle); + return -EINVAL; + } + + cmd_reg_write.client_handle = client_handle; + cmd_reg_write.reg_base = internal_reg_base; + cmd_reg_write.offset = offset; + cmd_reg_write.value = value; + cmd_reg_write.mb = mb; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write, + sizeof(struct cam_cpas_hw_cmd_reg_read_write)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_reg_write); + +int cam_cpas_reg_read(uint32_t client_handle, enum cam_cpas_regbase_types reg_base, + uint32_t offset, bool mb, uint32_t *value) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!value) { + CAM_ERR(CAM_CPAS, "Invalid arg value"); + return -EINVAL; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read; + enum cam_cpas_reg_base internal_reg_base; + + internal_reg_base = __cam_cpas_get_internal_reg_base(reg_base); + if (internal_reg_base >= CAM_CPAS_REG_MAX) { + CAM_ERR(CAM_CPAS, "Invalid reg base: %d for read ops client: %u", + reg_base, client_handle); + return -EINVAL; + } + + cmd_reg_read.client_handle = client_handle; + cmd_reg_read.reg_base = internal_reg_base; + cmd_reg_read.offset = offset; + cmd_reg_read.mb = mb; + cmd_reg_read.value = 0; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read, + sizeof(struct cam_cpas_hw_cmd_reg_read_write)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + return rc; + } + + *value = cmd_reg_read.value; + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_reg_read); + +int cam_cpas_update_axi_floor_lvl(uint32_t client_handle, + int32_t axi_floor_lvl) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_axi_floor_lvl floor_lvl_info; + + floor_lvl_info.client_handle = client_handle; + floor_lvl_info.floor_lvl = axi_floor_lvl; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_AXI_FLOOR_LVL, &floor_lvl_info, + sizeof(struct cam_cpas_hw_axi_floor_lvl)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd CAM_CPAS_HW_AXI_FLOOR_LVL, rc=%d", + rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; + +} + +int cam_cpas_update_axi_vote(uint32_t client_handle, + struct cam_axi_vote *axi_vote) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!axi_vote) { + CAM_ERR(CAM_CPAS, "NULL axi vote"); + return -EINVAL; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote; + + cmd_axi_vote.client_handle = client_handle; + cmd_axi_vote.axi_vote = axi_vote; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote, + sizeof(struct cam_cpas_hw_cmd_axi_vote)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_update_axi_vote); + +int cam_cpas_update_ahb_vote(uint32_t client_handle, + struct cam_ahb_vote *ahb_vote) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote; + + cmd_ahb_vote.client_handle = client_handle; + cmd_ahb_vote.ahb_vote = ahb_vote; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote, + sizeof(struct cam_cpas_hw_cmd_ahb_vote)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_update_ahb_vote); + +int cam_cpas_stop(uint32_t client_handle) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.stop) { + struct cam_cpas_hw_cmd_stop cmd_hw_stop; + + cmd_hw_stop.client_handle = client_handle; + + rc = g_cpas_intf->hw_intf->hw_ops.stop( + g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop, + sizeof(struct cam_cpas_hw_cmd_stop)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid stop ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_stop); + +int cam_cpas_start(uint32_t client_handle, + struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (!axi_vote) { + CAM_ERR(CAM_CPAS, "NULL axi vote"); + return -EINVAL; + } + + if (g_cpas_intf->hw_intf->hw_ops.start) { + struct cam_cpas_hw_cmd_start cmd_hw_start; + + cmd_hw_start.client_handle = client_handle; + cmd_hw_start.ahb_vote = ahb_vote; + cmd_hw_start.axi_vote = axi_vote; + + rc = g_cpas_intf->hw_intf->hw_ops.start( + g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start, + sizeof(struct cam_cpas_hw_cmd_start)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid start ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_start); + +void cam_cpas_log_votes(bool ddr_only) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_LOG_VOTE, &ddr_only, + sizeof(ddr_only)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + } + +} +EXPORT_SYMBOL(cam_cpas_log_votes); + +int cam_cpas_select_qos_settings(uint32_t selection_mask) +{ + int rc = 0; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -EBADR; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_SELECT_QOS, &selection_mask, + sizeof(selection_mask)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EBADR; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_select_qos_settings); + +int cam_cpas_enable_tpg_mux_sel(uint32_t tpg_mux_sel) +{ + int rc = 0; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -EBADR; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_TPG_MUX_SEL, &tpg_mux_sel, + sizeof(tpg_mux_sel)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EBADR; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_enable_tpg_mux_sel); + +int cam_cpas_notify_event(const char *identifier_string, + int32_t identifier_value) +{ + int rc = 0; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -EBADR; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + struct cam_cpas_hw_cmd_notify_event event = { 0 }; + + event.identifier_string = identifier_string; + event.identifier_value = identifier_value; + + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_LOG_EVENT, &event, + sizeof(event)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EBADR; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_notify_event); + +int cam_cpas_unregister_client(uint32_t client_handle) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_UNREGISTER_CLIENT, + &client_handle, sizeof(uint32_t)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_unregister_client); + +int cam_cpas_register_client( + struct cam_cpas_register_params *register_params) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params, + sizeof(struct cam_cpas_register_params)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_register_client); + +int cam_cpas_get_scid( + uint32_t type) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_GET_SCID, &type, + sizeof(type)); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_get_scid); + +int cam_cpas_prepare_subpart_info(enum cam_subparts_index idx, uint32_t num_subpart_available, + uint32_t num_subpart_functional) +{ + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + + mutex_lock(&cpas_hw->hw_mutex); + soc_private = (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + + if (!soc_private) { + CAM_ERR(CAM_CPAS, "Invalid soc_private: 0x%x", soc_private); + mutex_unlock(&cpas_hw->hw_mutex); + return -EINVAL; + } + + switch (idx) { + case CAM_IFE_HW_IDX: + soc_private->sysfs_info.num_ifes[CAM_CPAS_AVAILABLE_NUM_SUBPARTS] = + num_subpart_available; + soc_private->sysfs_info.num_ifes[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS] = + num_subpart_functional; + break; + case CAM_IFE_LITE_HW_IDX: + soc_private->sysfs_info.num_ife_lites[CAM_CPAS_AVAILABLE_NUM_SUBPARTS] = + num_subpart_available; + soc_private->sysfs_info.num_ife_lites[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS] = + num_subpart_functional; + break; + case CAM_SFE_HW_IDX: + soc_private->sysfs_info.num_sfes[CAM_CPAS_AVAILABLE_NUM_SUBPARTS] = + num_subpart_available; + soc_private->sysfs_info.num_sfes[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS] = + num_subpart_functional; + break; + case CAM_CUSTOM_HW_IDX: + soc_private->sysfs_info.num_custom[CAM_CPAS_AVAILABLE_NUM_SUBPARTS] = + num_subpart_available; + soc_private->sysfs_info.num_custom[CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS] = + num_subpart_functional; + break; + default: + CAM_ERR(CAM_CPAS, "Invalid camera subpart index : %d", idx); + mutex_unlock(&cpas_hw->hw_mutex); + return -EINVAL; + } + + mutex_unlock(&cpas_hw->hw_mutex); + return 0; +} +EXPORT_SYMBOL(cam_cpas_prepare_subpart_info); + +int cam_cpas_activate_llcc(uint32_t type) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_ACTIVATE_LLC, &type, + sizeof(type)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_activate_llcc); + +int cam_cpas_deactivate_llcc(uint32_t type) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_DEACTIVATE_LLC, &type, + sizeof(type)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_deactivate_llcc); + +int cam_cpas_configure_staling_llcc( + uint32_t type, uint32_t mode_param, + uint32_t operation_type, + uint32_t staling_distance) +{ + int rc; + struct cam_sys_cache_local_info sys_cache_info; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + if (!cam_cpas_is_notif_staling_supported()) + return -EOPNOTSUPP; + + sys_cache_info.mode = mode_param; + sys_cache_info.op_type = operation_type; + sys_cache_info.staling_distance + = staling_distance; + sys_cache_info.type = type; + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_CONFIGURE_STALING_LLC, &sys_cache_info, + sizeof(struct cam_sys_cache_local_info)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_configure_staling_llcc); + +int cam_cpas_notif_increment_staling_counter( + uint32_t type) +{ + int rc; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + if (!cam_cpas_is_notif_staling_supported()) + return -EOPNOTSUPP; + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_NOTIF_STALL_INC_LLC, &type, + sizeof(type)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc); + } else { + CAM_ERR(CAM_CPAS, "Invalid process_cmd ops"); + rc = -EINVAL; + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_notif_increment_staling_counter); + +bool cam_cpas_is_notif_staling_supported(void) +{ + #if IS_ENABLED(CONFIG_SPECTRA_LLCC_STALING) + return true; + #else + return false; + #endif +} +EXPORT_SYMBOL(cam_cpas_is_notif_staling_supported); + +bool cam_cpas_is_fw_based_sys_caching_supported(void) +{ + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + int i; + bool supported = false; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return false; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + for (i = 0; i < soc_private->num_caches; i++) { + switch (soc_private->llcc_info[i].type) { + case CAM_LLCC_OFE_IP: + case CAM_LLCC_IPE_RT_IP: + case CAM_LLCC_IPE_SRT_IP: + case CAM_LLCC_IPE_RT_RF: + case CAM_LLCC_IPE_SRT_RF: + supported = true; + break; + default: + supported = false; + break; + } + } + + return supported; +} +EXPORT_SYMBOL(cam_cpas_is_fw_based_sys_caching_supported); + + +bool cam_cpas_query_domain_id_security_support(void) +{ + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas_private_soc *soc_private = NULL; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return false; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + soc_private = + (struct cam_cpas_private_soc *)cpas_hw->soc_info.soc_private; + + return soc_private->domain_id_info.domain_id_supported; +} +EXPORT_SYMBOL(cam_cpas_query_domain_id_security_support); + +int cam_cpas_enable_clks_for_domain_id(bool enable) +{ + int rc = 0; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_ENABLE_DISABLE_DOMAIN_ID_CLK, &enable, + sizeof(enable)); + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_enable_clks_for_domain_id); + +int cam_cpas_dump_state_monitor_info(struct cam_req_mgr_dump_info *info) +{ + int rc = 0; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + if (g_cpas_intf->hw_intf->hw_ops.process_cmd) { + rc = g_cpas_intf->hw_intf->hw_ops.process_cmd( + g_cpas_intf->hw_intf->hw_priv, + CAM_CPAS_HW_CMD_DUMP_STATE_MONITOR_INFO, info, + sizeof(*info)); + } + + return rc; +} +EXPORT_SYMBOL(cam_cpas_dump_state_monitor_info); + +#ifdef CONFIG_DYNAMIC_FD_PORT_CONFIG +static int cam_cpas_handle_fd_port_config(uint32_t is_secure) +{ + int rc = 0; + struct Object client_env, sc_object; + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas *cpas_core; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + if (cpas_hw) { + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + mutex_lock(&cpas_hw->hw_mutex); + if (cpas_core->streamon_clients > 0) { + CAM_ERR(CAM_CPAS, + "FD port config can not be updated during the session"); + mutex_unlock(&cpas_hw->hw_mutex); + return -EINVAL; + } + } else { + CAM_ERR(CAM_CPAS, "cpas_hw handle not initialized"); + return -EINVAL; + } + + /* Need to vote first before enabling clocks */ + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to vote for the default ahb/axi clock, rc=%d", rc); + goto release_mutex; + } + + rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, + cpas_hw->soc_info.lowest_clk_level); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc); + goto remove_default_vote; + } + + rc = get_client_env_object(&client_env); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed getting mink env object, rc: %d", rc); + goto disable_resources; + } + + rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed getting mink sc_object, rc: %d", rc); + goto client_release; + } + + rc = ITrustedCameraDriver_dynamicConfigureFDPort(sc_object, is_secure); + if (rc) { + if (rc == ITRUSTEDCAMERADRIVER_ERROR_NOT_ALLOWED) { + CAM_ERR(CAM_CPAS, "Dynamic FD port config not allowed"); + rc = -EPERM; + } else { + CAM_ERR(CAM_CPAS, "Mink secure call failed, rc: %d", rc); + rc = -EINVAL; + } + goto obj_release; + } + + rc = Object_release(sc_object); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed releasing secure camera object, rc: %d", rc); + goto client_release; + } + + rc = Object_release(client_env); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed releasing mink env object, rc: %d", rc); + goto disable_resources; + } + + rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc); + goto remove_default_vote; + } + + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); + if (rc) + CAM_ERR(CAM_CPAS, + "failed remove the vote on ahb/axi clock, rc=%d", rc); + + mutex_unlock(&cpas_hw->hw_mutex); + return rc; + +obj_release: + Object_release(sc_object); +client_release: + Object_release(client_env); +disable_resources: + cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true); +remove_default_vote: + cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); +release_mutex: + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} +#endif + +static int cam_cpas_handle_custom_config_cmd(struct cam_cpas_intf *cpas_intf, + struct cam_custom_cmd *cmd) +{ + int32_t rc = 0; + + if (!cmd) { + CAM_ERR(CAM_CPAS, "Invalid input cmd"); + return -EINVAL; + } + + switch (cmd->cmd_type) { +#ifdef CONFIG_DYNAMIC_FD_PORT_CONFIG + case CAM_CPAS_CUSTOM_CMD_FD_PORT_CFG: { + struct cam_cpas_fd_port_config cfg; + + if (cmd->size < sizeof(cfg)) + return -EINVAL; + + rc = copy_from_user(&cfg, u64_to_user_ptr(cmd->handle), + sizeof(cfg)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + rc = -EINVAL; + break; + } + + rc = cam_cpas_handle_fd_port_config(cfg.is_secure); + break; + } +#endif + default: + CAM_ERR(CAM_CPAS, "Invalid custom command %d for CPAS", cmd->cmd_type); + rc = -EINVAL; + break; + + } + + return rc; +} + +#ifdef CONFIG_SPECTRA_SECURE_DYN_PORT_CFG +static int cam_cpas_get_scm_device_type(enum cam_device_type device_type, + uint32_t *scm_dev_type) +{ + int rc = 0; + + switch (device_type) { + /* Add support for more devices as per the requirements */ + case CAM_CPAS_HW_TYPE_IPE: + *scm_dev_type = ITRUSTEDCAMERADRIVER_IPE; + break; + default: + CAM_ERR(CAM_CPAS, "unsupported dev type for SCM call", + device_type); + rc = -EINVAL; + } + return rc; +} + +static int cam_cpas_get_scm_port_info(struct cam_cpas_cp_mapping_config_info *config, + struct PortInfo *port_info) +{ + int rc = 0; + int i; + + switch (config->device_type) { + /* Add support for more devices as per the requirements */ + case CAM_CPAS_HW_TYPE_IPE: + for (i = 0; i < config->num_ports; i++) { + /* Add support for more ports as per the requirements */ + if (config->port_ids[i] == CAM_CPAS_IPE_OUTPUT_IMAGE_DISPLAY) { + port_info->port_id[port_info->num_ports++] = IPE_DISP_C; + port_info->port_id[port_info->num_ports++] = IPE_DISP_Y; + } else { + CAM_ERR(CAM_CPAS, "unsupported port for DCP call", + config->port_ids[i]); + rc = -EINVAL; + break; + } + } + break; + default: + CAM_ERR(CAM_CPAS, "unsupported dev type for SCM call", + config->device_type); + rc = -EINVAL; + } + return rc; +} + +int cam_cpas_config_cp_mapping_ctrl( + struct cam_cpas_cp_mapping_config_info *config) +{ + int rc = 0; + struct Object client_env, sc_object; + struct cam_hw_info *cpas_hw = NULL; + struct cam_cpas *cpas_core; + struct PortInfo port_info = {0}; + int scm_dev_type; + + if (!config) { + CAM_ERR(CAM_CPAS, "Invalid CP mapping config"); + return -EINVAL; + } + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -EINVAL; + } + + cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv; + if (!cpas_hw) { + CAM_ERR(CAM_CPAS, "cpas_hw handle not initialized"); + return -EINVAL; + } + + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + mutex_lock(&cpas_hw->hw_mutex); + + if (cpas_core->streamon_clients == 0) { + /* Need to vote first before enabling clocks */ + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to vote for the default ahb/axi clock, rc=%d", rc); + goto release_mutex; + } + + rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, + cpas_hw->soc_info.lowest_clk_level); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc); + goto remove_default_vote; + } + } + + rc = get_client_env_object(&client_env); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed getting mink env object, rc: %d", rc); + goto disable_resources; + } + + rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed getting mink sc_object, rc: %d", rc); + goto client_release; + } + + rc = cam_cpas_get_scm_device_type(config->device_type, &scm_dev_type); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to get SCM device type, rc: %d", rc); + goto obj_release; + } + + port_info.hw_type = scm_dev_type; + port_info.hw_id_mask = config->hw_instance_id_mask; + if (config->protect) + port_info.protect = PROTECT_PORT; + else + port_info.protect = UNPROTECT_PORT; + + rc = cam_cpas_get_scm_port_info(config, &port_info); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to get SCM port info, rc: %d", rc); + goto obj_release; + } + + rc = ITrustedCameraDriver_dynamicConfigurePortsV2(sc_object, &port_info, + CAM_CPAS_MIN_DPC_LEN); + if (rc) { + if (rc == ITRUSTEDCAMERADRIVER_ERROR_NOT_ALLOWED) { + CAM_ERR(CAM_CPAS, "Dynamic port config not allowed"); + rc = -EPERM; + } else { + CAM_ERR(CAM_CPAS, "Dynamic port config failed, rc: %d", rc); + rc = -EINVAL; + } + goto obj_release; + } + + rc = Object_release(sc_object); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed releasing secure camera object, rc: %d", rc); + goto client_release; + } + + rc = Object_release(client_env); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed releasing mink env object, rc: %d", rc); + goto disable_resources; + } + + if (cpas_core->streamon_clients == 0) { + rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc); + goto remove_default_vote; + } + + rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); + if (rc) + CAM_ERR(CAM_CPAS, + "failed remove the vote on ahb/axi clock, rc=%d", rc); + } + mutex_unlock(&cpas_hw->hw_mutex); + return rc; + +obj_release: + Object_release(sc_object); +client_release: + Object_release(client_env); +disable_resources: + if (cpas_core->streamon_clients == 0) + cam_cpas_soc_disable_resources(&cpas_hw->soc_info, + true, true); +remove_default_vote: + if (cpas_core->streamon_clients == 0) + cam_cpas_util_vote_default_ahb_axi(cpas_hw, false); +release_mutex: + mutex_unlock(&cpas_hw->hw_mutex); + return rc; +} +#else +int cam_cpas_config_cp_mapping_ctrl( + struct cam_cpas_cp_mapping_config_info *config) +{ + CAM_ERR(CAM_CPAS, "Dynamic port config not supported on this platform"); + return -EINVAL; +} +#endif + +static int cam_cpas_sys_cache_cap_populate( + struct cam_cpas_sys_cache_query *sys_cache_query_cap) +{ + int rc = 0, i; + struct cam_hw_info *cpas_hw; + struct cam_cpas_private_soc *soc_private; + + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return -ENODEV; + } + + cpas_hw = g_cpas_intf->hw_intf->hw_priv; + soc_private = (struct cam_cpas_private_soc *) + cpas_hw->soc_info.soc_private; + sys_cache_query_cap->num_cache = soc_private->num_caches; + + for (i = 0; i < soc_private->num_caches; i++) { + sys_cache_query_cap->sys_cache_cap[i].scid_id + = soc_private->llcc_info[i].type; + sys_cache_query_cap->sys_cache_cap[i].scid_num + = soc_private->llcc_info[i].scid; + sys_cache_query_cap->sys_cache_cap[i].concur_usage + = soc_private->llcc_info[i].concur; + } + + return rc; +} + +static int cam_cpas_handle_generic_query_blob( + void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + uint32_t *camera_capability, num_cap_mask; + + CAM_DBG(CAM_CPAS, "blob_type=%d, blob_size=%d", + blob_type, blob_size); + + switch (blob_type) { + case CAM_CPAS_QUERY_BLOB_V3: { + struct cam_cpas_query_cap_v3 *query; + + if (blob_size < sizeof(struct cam_cpas_query_cap_v3)) { + CAM_ERR(CAM_CPAS, "Invalid blob size %u, blob_type=%d for query cap v3", + blob_size, blob_type); + return -EINVAL; + } + query = (struct cam_cpas_query_cap_v3 *)blob_data; + rc = cam_cpas_get_hw_info(&query->camera_family, + &query->camera_version, &query->cpas_version, + &camera_capability, &num_cap_mask, &query->fuse_info, + &query->domain_id_info); + if (rc) + break; + + query->camera_caps = camera_capability[0]; + break; + } + case CAM_CPAS_QUERY_BLOB_SYSCACHE: { + struct cam_cpas_sys_cache_query *sys_cache_query_cap = NULL; + + if (blob_size < sizeof(struct cam_cpas_sys_cache_query)) { + CAM_ERR(CAM_CPAS, "Invalid blob size %u, blob_type=%d for sys cache", + blob_size, blob_type); + return -EINVAL; + } + sys_cache_query_cap = (struct cam_cpas_sys_cache_query *)blob_data; + rc = cam_cpas_sys_cache_cap_populate(sys_cache_query_cap); + break; + } + default: + CAM_WARN(CAM_CPAS, "Unknown op code %d for CPAS", blob_type); + rc = 0; + break; + } + + return rc; +} + +int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf, + struct cam_control *cmd) +{ + int rc = 0; + uint32_t *camera_capability, num_cap_mask; + + if (!cmd) { + CAM_ERR(CAM_CPAS, "Invalid input cmd"); + return -EINVAL; + } + + switch (cmd->op_code) { + case CAM_QUERY_CAP: { + struct cam_cpas_query_cap query; + + rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle), + sizeof(query)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + break; + } + + rc = cam_cpas_get_hw_info(&query.camera_family, + &query.camera_version, &query.cpas_version, + &camera_capability, &num_cap_mask, NULL, NULL); + if (rc) + break; + + query.reserved = camera_capability[0]; + + rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query, + sizeof(query)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc); + + break; + } + case CAM_QUERY_CAP_V2: { + struct cam_cpas_query_cap_v2 query; + + rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle), + sizeof(query)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + break; + } + + rc = cam_cpas_get_hw_info(&query.camera_family, + &query.camera_version, &query.cpas_version, + &camera_capability, &num_cap_mask, + &query.fuse_info, NULL); + if (rc) + break; + + query.reserved = camera_capability[0]; + + rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query, + sizeof(query)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc); + + break; + } + case CAM_QUERY_CAP_V3: { + struct cam_cpas_query_cap_v3 query; + + rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle), + sizeof(query)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + break; + } + + rc = cam_cpas_get_hw_info(&query.camera_family, + &query.camera_version, &query.cpas_version, + &camera_capability, &num_cap_mask, &query.fuse_info, + &query.domain_id_info); + if (rc) + break; + + query.camera_caps = camera_capability[0]; + + rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query, + sizeof(query)); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc); + + break; + } + case CAM_CUSTOM_DEV_CONFIG: { + struct cam_custom_cmd custom_cmd; + + rc = copy_from_user(&custom_cmd, u64_to_user_ptr(cmd->handle), + sizeof(custom_cmd)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + break; + } + rc = cam_cpas_handle_custom_config_cmd(cpas_intf, &custom_cmd); + break; + } + case CAM_QUERY_CAP_GENERIC_BLOB: { + void *blob_data = NULL; + + if (!cmd->size) { + CAM_ERR(CAM_CPAS, "Invalid cmd size from user, size=%d", cmd->size); + rc = -EINVAL; + break; + } + + blob_data = CAM_MEM_ZALLOC(cmd->size, GFP_KERNEL); + + if (blob_data) { + rc = copy_from_user(blob_data, u64_to_user_ptr(cmd->handle), + cmd->size); + if (rc) { + CAM_MEM_FREE(blob_data); + CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d", + rc); + break; + } + + rc = cam_packet_util_process_generic_blob(cmd->size, blob_data, + cam_cpas_handle_generic_query_blob, NULL); + if (rc) { + CAM_MEM_FREE(blob_data); + break; + } + rc = copy_to_user(u64_to_user_ptr(cmd->handle), blob_data, + cmd->size); + if (rc) + CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc); + CAM_MEM_FREE(blob_data); + } else { + rc = -ENOMEM; + CAM_ERR(CAM_CPAS, "memory allocation is failed rc = %d", rc); + } + break; + } + case CAM_SD_SHUTDOWN: + break; + default: + CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_cpas_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd); + + if (!cpas_intf || !cpas_intf->probe_done) { + CAM_ERR(CAM_CPAS, "CPAS not initialized"); + return -ENODEV; + } + + mutex_lock(&cpas_intf->intf_lock); + cpas_intf->open_cnt++; + CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt); + mutex_unlock(&cpas_intf->intf_lock); + + return 0; +} + +static int __cam_cpas_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd); + + if (!cpas_intf || !cpas_intf->probe_done) { + CAM_ERR(CAM_CPAS, "CPAS not initialized"); + return -ENODEV; + } + + mutex_lock(&cpas_intf->intf_lock); + if (cpas_intf->open_cnt <= 0) { + CAM_WARN(CAM_CPAS, "device already closed, open_cnt: %d", cpas_intf->open_cnt); + mutex_unlock(&cpas_intf->intf_lock); + return 0; + } + cpas_intf->open_cnt--; + CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt); + mutex_unlock(&cpas_intf->intf_lock); + + return 0; +} + +static int cam_cpas_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_CPAS, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return __cam_cpas_subdev_close(sd, fh); +} + +static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int32_t rc; + struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd); + + if (!cpas_intf || !cpas_intf->probe_done) { + CAM_ERR(CAM_CPAS, "CPAS not initialized"); + return -ENODEV; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg); + break; + case CAM_SD_SHUTDOWN: + rc = __cam_cpas_subdev_close(sd, NULL); + break; + default: + CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd); + rc = -EINVAL; + break; + } + + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc; + struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd); + + if (!cpas_intf || !cpas_intf->probe_done) { + CAM_ERR(CAM_CPAS, "CPAS not initialized"); + return -ENODEV; + } + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data); + break; + default: + CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd); + rc = -EINVAL; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_CPAS, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} +#endif + +static struct v4l2_subdev_core_ops cpas_subdev_core_ops = { + .ioctl = cam_cpas_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_cpas_subdev_compat_ioctl, +#endif +}; + +static const struct v4l2_subdev_ops cpas_subdev_ops = { + .core = &cpas_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = { + .open = cam_cpas_subdev_open, + .close = cam_cpas_subdev_close, +}; + +static int cam_cpas_subdev_register(struct platform_device *pdev) +{ + int rc; + struct cam_subdev *subdev; + + if (!g_cpas_intf) + return -EINVAL; + + subdev = &g_cpas_intf->subdev; + + subdev->name = CAM_CPAS_DEV_NAME; + subdev->pdev = pdev; + subdev->ops = &cpas_subdev_ops; + subdev->internal_ops = &cpas_subdev_intern_ops; + subdev->token = g_cpas_intf; + subdev->sd_flags = + V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; + subdev->ent_function = CAM_CPAS_DEVICE_TYPE; + subdev->close_seq_prior = CAM_SD_CLOSE_LOW_PRIORITY; + + rc = cam_register_subdev(subdev); + if (rc) { + CAM_ERR(CAM_CPAS, "failed register subdev: %s!", + CAM_CPAS_DEV_NAME); + return rc; + } + + platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf); + return rc; +} + +static int cam_cpas_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_cpas_hw_caps *hw_caps; + struct cam_hw_intf *hw_intf; + int rc; + struct platform_device *pdev = to_platform_device(dev); + struct cam_hw_info *cpas_hw = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + if (g_cpas_intf) { + CAM_ERR(CAM_CPAS, "cpas component already binded"); + return -EALREADY; + } + + g_cpas_intf = CAM_MEM_ZALLOC(sizeof(*g_cpas_intf), GFP_KERNEL); + if (!g_cpas_intf) + return -ENOMEM; + + mutex_init(&g_cpas_intf->intf_lock); + g_cpas_intf->pdev = pdev; + + rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf); + if (rc || (g_cpas_intf->hw_intf == NULL)) { + CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc); + goto error_destroy_mem; + } + + hw_intf = g_cpas_intf->hw_intf; + hw_caps = &g_cpas_intf->hw_caps; + cpas_hw = hw_intf->hw_priv; + + cpas_hw->soc_info.hw_id = CAM_HW_ID_CPAS + cpas_hw->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info( + &cpas_hw->soc_info, cam_cpas_vmrm_callback_handler, hw_intf); + if (rc) { + CAM_ERR(CAM_CPAS, "hw instance populate failed: %d", rc); + goto error_hw_remove; + } + + if (hw_intf->hw_ops.get_hw_caps) { + rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv, + hw_caps, sizeof(struct cam_cpas_hw_caps)); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc); + goto error_hw_remove; + } + } else { + CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops"); + goto error_hw_remove; + } + + rc = cam_cpas_subdev_register(pdev); + if (rc) + goto error_hw_remove; + + g_cpas_intf->probe_done = true; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + CAM_DBG(CAM_CPAS, + "Component bound successfully %d, %d.%d.%d, %d.%d.%d, 0x%x", + hw_caps->camera_family, hw_caps->camera_version.major, + hw_caps->camera_version.minor, hw_caps->camera_version.incr, + hw_caps->cpas_version.major, hw_caps->cpas_version.minor, + hw_caps->cpas_version.incr, hw_caps->camera_capability); + + return rc; + +error_hw_remove: + cam_cpas_hw_remove(g_cpas_intf->hw_intf); +error_destroy_mem: + mutex_destroy(&g_cpas_intf->intf_lock); + CAM_MEM_FREE(g_cpas_intf); + g_cpas_intf = NULL; + CAM_ERR(CAM_CPAS, "CPAS component bind failed"); + return rc; +} + +static void cam_cpas_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + if (!CAM_CPAS_INTF_INITIALIZED()) { + CAM_ERR(CAM_CPAS, "cpas intf not initialized"); + return; + } + + mutex_lock(&g_cpas_intf->intf_lock); + g_cpas_intf->probe_done = false; + cam_unregister_subdev(&g_cpas_intf->subdev); + cam_cpas_hw_remove(g_cpas_intf->hw_intf); + mutex_unlock(&g_cpas_intf->intf_lock); + mutex_destroy(&g_cpas_intf->intf_lock); + CAM_MEM_FREE(g_cpas_intf); + g_cpas_intf = NULL; +} + +const static struct component_ops cam_cpas_dev_component_ops = { + .bind = cam_cpas_dev_component_bind, + .unbind = cam_cpas_dev_component_unbind, +}; + +static int cam_cpas_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CPAS, "Adding CPAS INTF component"); + rc = component_add(&pdev->dev, &cam_cpas_dev_component_ops); + if (rc) + CAM_ERR(CAM_CPAS, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_cpas_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_cpas_dev_component_ops); + return 0; +} + +static const struct of_device_id cam_cpas_dt_match[] = { + {.compatible = "qcom,cam-cpas"}, + {} +}; + +struct platform_driver cam_cpas_driver = { + .probe = cam_cpas_dev_probe, + .remove = cam_cpas_dev_remove, + .driver = { + .name = CAM_CPAS_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_cpas_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_cpas_dev_init_module(void) +{ + return platform_driver_register(&cam_cpas_driver); +} + +void cam_cpas_dev_exit_module(void) +{ + platform_driver_unregister(&cam_cpas_driver); +} + +MODULE_DESCRIPTION("MSM CPAS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.c b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.c new file mode 100644 index 0000000000..9040d76250 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.c @@ -0,0 +1,2229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "cam_cpas_api.h" +#include "cam_cpas_hw_intf.h" +#include "cam_cpas_hw.h" +#include "cam_cpas_soc.h" +#include "cam_compat.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +static uint cpas_dump; +module_param(cpas_dump, uint, 0644); + +#define CAM_ICP_CLK_NAME "cam_icp_clk" + +void cam_cpas_dump_tree_vote_info(struct cam_hw_info *cpas_hw, + const struct cam_cpas_tree_node *tree_node, + const char *identifier, int ddr_drv_idx, int cesta_drv_idx) +{ + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + + if ((cpas_dump & BIT(0)) == 0) + return; + + if (cesta_drv_idx > CAM_CPAS_PORT_HLOS_DRV) + CAM_INFO(CAM_PERF, + "%s node:%s lvl:%d cesta_drv_idx:%d DRV BW camnoc[%llu %llu]", + identifier, tree_node->node_name, tree_node->level_idx, cesta_drv_idx, + tree_node->bw_info[cesta_drv_idx].drv_vote.high.camnoc, + tree_node->bw_info[cesta_drv_idx].drv_vote.low.camnoc); + else + CAM_INFO(CAM_PERF, + "%s node:%s lvl:%d cesta_drv_idx:%d HLOS BW camnoc[%llu]", + identifier, tree_node->node_name, tree_node->level_idx, cesta_drv_idx, + tree_node->bw_info[cesta_drv_idx].hlos_vote.camnoc); + + if (ddr_drv_idx > CAM_CPAS_PORT_HLOS_DRV) + CAM_INFO(CAM_PERF, + "%s node:%s lvl:%d ddr_drv_idx:%d DRV BW ab[%llu %llu] ib[%llu %llu]", + identifier, tree_node->node_name, tree_node->level_idx, ddr_drv_idx, + tree_node->bw_info[ddr_drv_idx].drv_vote.high.ab, + tree_node->bw_info[ddr_drv_idx].drv_vote.low.ab, + tree_node->bw_info[ddr_drv_idx].drv_vote.high.ib, + tree_node->bw_info[ddr_drv_idx].drv_vote.low.ib); + else + CAM_INFO(CAM_PERF, + "%s node:%s lvl:%d ddr_drv_idx:%d HLOS BW ab[%llu] ib[%llu]", + identifier, tree_node->node_name, tree_node->level_idx, ddr_drv_idx, + tree_node->bw_info[ddr_drv_idx].hlos_vote.ab, + tree_node->bw_info[ddr_drv_idx].hlos_vote.ib); + + if (soc_private->enable_cam_ddr_drv) { + int i; + + CAM_INFO(CAM_PERF, + "%s node:%s lvl:%d drv_idx:%d cesta_drv_idx:%d ==== printing full node state", + identifier, tree_node->node_name, tree_node->level_idx, + ddr_drv_idx, cesta_drv_idx); + + for (i = 0; i < CAM_CPAS_MAX_DRV_PORTS; i++) { + + if (i == CAM_CPAS_PORT_HLOS_DRV) + CAM_INFO(CAM_PERF, + "idx[%d] HLOS camnoc[%llu], DDR ab[%llu] ib[%llu]", + i, + tree_node->bw_info[i].hlos_vote.camnoc, + tree_node->bw_info[i].hlos_vote.ab, + tree_node->bw_info[i].hlos_vote.ib); + else + CAM_INFO(CAM_PERF, + "idx[%d] DRV camnoc[%llu %llu], DDR ab[%llu %llu] ib[%llu %llu]", + i, + tree_node->bw_info[i].drv_vote.high.camnoc, + tree_node->bw_info[i].drv_vote.low.camnoc, + tree_node->bw_info[i].drv_vote.high.ab, + tree_node->bw_info[i].drv_vote.low.ab, + tree_node->bw_info[i].drv_vote.high.ib, + tree_node->bw_info[i].drv_vote.low.ib); + } + } + +} + +void cam_cpas_dump_full_tree_state(struct cam_hw_info *cpas_hw, const char *identifier) +{ + int j; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_tree_node *curr_node; + + if ((cpas_dump & BIT(1)) == 0) + return; + + CAM_INFO(CAM_CPAS, "Dumping cpas tree full state start ============== %s", identifier); + + /* This will traverse through all nodes in the tree and print stats*/ + for (j = 0; j < CAM_CPAS_MAX_TREE_NODES; j++) { + if (!soc_private->tree_node[j]) + continue; + + curr_node = soc_private->tree_node[j]; + + if (soc_private->enable_cam_ddr_drv) { + int i; + + CAM_INFO(CAM_PERF, + "Identifier[%s] node:[%s] cell:%d lvl:%d PortIdx mnoc[%d %d %d %d] camnoc[%d] camnoc_max %d, bus_width:%d, drv_idx:%d", + identifier, curr_node->node_name, curr_node->cell_idx, + curr_node->level_idx, + curr_node->axi_port_idx_arr[CAM_CPAS_PORT_HLOS_DRV], + curr_node->axi_port_idx_arr[CAM_CPAS_PORT_DRV_0], + curr_node->axi_port_idx_arr[CAM_CPAS_PORT_DRV_1], + curr_node->axi_port_idx_arr[CAM_CPAS_PORT_DRV_2], + curr_node->camnoc_axi_port_idx, + curr_node->camnoc_max_needed, + curr_node->bus_width_factor, + curr_node->drv_voting_idx); + + for (i = 0; i < CAM_CPAS_MAX_DRV_PORTS; i++) { + if (i == CAM_CPAS_PORT_HLOS_DRV) + CAM_INFO(CAM_PERF, + " idx[%d] HLOS camnoc[%llu], DDR ab[%llu] ib[%llu]", + i, + curr_node->bw_info[i].hlos_vote.camnoc, + curr_node->bw_info[i].hlos_vote.ab, + curr_node->bw_info[i].hlos_vote.ib); + else + CAM_INFO(CAM_PERF, + " idx[%d] DRV camnoc[%llu %llu], DDR ab[%llu %llu] ib[%llu %llu]", + i, + curr_node->bw_info[i].drv_vote.high.camnoc, + curr_node->bw_info[i].drv_vote.low.camnoc, + curr_node->bw_info[i].drv_vote.high.ab, + curr_node->bw_info[i].drv_vote.low.ab, + curr_node->bw_info[i].drv_vote.high.ib, + curr_node->bw_info[i].drv_vote.low.ib); + } + } else { + CAM_INFO(CAM_CPAS, + "[%s] Cell[%d] level[%d] PortIdx[%d][%d] camnoc_bw[%d %d %lld %lld] mnoc_bw[%lld %lld]", + curr_node->node_name, curr_node->cell_idx, + curr_node->level_idx, + curr_node->axi_port_idx_arr[CAM_CPAS_PORT_HLOS_DRV], + curr_node->camnoc_axi_port_idx, + curr_node->camnoc_max_needed, + curr_node->bus_width_factor, + curr_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc, + curr_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.camnoc * + curr_node->bus_width_factor, + curr_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.ab, + curr_node->bw_info[CAM_CPAS_PORT_HLOS_DRV].hlos_vote.ib); + } + } + + CAM_INFO(CAM_CPAS, "Dumping cpas tree full state end ============== %s", identifier); +} + +void cam_cpas_dump_cons_axi_vote_info( + const struct cam_cpas_client *cpas_client, + const char *identifier, + struct cam_axi_consolidate_vote *axi_vote) +{ + int i; + + if ((cpas_dump & BIT(0)) == 0) + return; + + if (!axi_vote || (axi_vote->num_paths > + CAM_CPAS_MAX_PATHS_PER_CLIENT)) { + CAM_ERR(CAM_PERF, "Invalid num_paths %d", + axi_vote ? axi_vote->num_paths : -1); + return; + } + + for (i = 0; i < axi_vote->num_paths; i++) { + CAM_INFO(CAM_PERF, + "Client [%s][%d] : [%s], Path=[%d] [%d],[HIGH LOW] camnoc[%llu %llu], mnoc_ab[%llu %llu], mnoc_ib[%llu %llu]", + cpas_client->data.identifier, cpas_client->data.cell_index, + identifier, + axi_vote->axi_path[i].path_data_type, + axi_vote->axi_path[i].transac_type, + axi_vote->axi_path[i].drv_vote.high.camnoc, + axi_vote->axi_path[i].drv_vote.low.camnoc, + axi_vote->axi_path[i].drv_vote.high.ab, + axi_vote->axi_path[i].drv_vote.low.ab, + axi_vote->axi_path[i].drv_vote.high.ib, + axi_vote->axi_path[i].drv_vote.low.ib); + } + +} + +void cam_cpas_dump_axi_vote_info( + const struct cam_cpas_client *cpas_client, + const char *identifier, + struct cam_axi_vote *axi_vote) +{ + int i; + + if ((cpas_dump & BIT(0)) == 0) + return; + + if (!axi_vote || (axi_vote->num_paths > + CAM_CPAS_MAX_PATHS_PER_CLIENT)) { + CAM_ERR(CAM_PERF, "Invalid num_paths %d", + axi_vote ? axi_vote->num_paths : -1); + return; + } + + for (i = 0; i < axi_vote->num_paths; i++) { + CAM_INFO(CAM_PERF, + "Client [%s][%d] : [%s], Path=[%d] [%d], [%s], camnoc[%llu], mnoc_ab[%llu], mnoc_ib[%llu]", + cpas_client->data.identifier, cpas_client->data.cell_index, + identifier, + axi_vote->axi_path[i].path_data_type, + axi_vote->axi_path[i].transac_type, + cam_cpas_axi_util_drv_vote_lvl_to_string(axi_vote->axi_path[i].vote_level), + axi_vote->axi_path[i].camnoc_bw, + axi_vote->axi_path[i].mnoc_ab_bw, + axi_vote->axi_path[i].mnoc_ib_bw); + } + +} + +void cam_cpas_util_debug_parse_data( + struct cam_cpas_private_soc *soc_private) +{ + int i, j; + struct cam_cpas_tree_node *curr_node = NULL; + + if ((cpas_dump & BIT(0)) == 0) + return; + + for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) { + if (!soc_private->tree_node[i]) + break; + + curr_node = soc_private->tree_node[i]; + CAM_INFO(CAM_CPAS, + "NODE cell_idx: %d, level: %d, name: %s, axi_port_idx: %d, merge_type: %d, parent_name: %s camnoc_max_needed: %d", + curr_node->cell_idx, curr_node->level_idx, + curr_node->node_name, curr_node->axi_port_idx_arr[CAM_CPAS_PORT_HLOS_DRV], + curr_node->merge_type, curr_node->parent_node ? + curr_node->parent_node->node_name : "no parent", + curr_node->camnoc_max_needed); + + if (curr_node->level_idx) + continue; + + CAM_INFO(CAM_CPAS, "path_type: %d, transac_type: %s drv_voting_idx:%d", + curr_node->path_data_type, + cam_cpas_axi_util_trans_type_to_string( + curr_node->path_trans_type), curr_node->drv_voting_idx); + + for (j = 0; j < CAM_CPAS_PATH_DATA_MAX; j++) { + if (curr_node->constituent_paths[j]) + CAM_INFO(CAM_CPAS, "Constituent path: %d", j); + } + } + + CAM_INFO(CAM_CPAS, "NUMBER OF NODES PARSED: %d", i); +} + +int cam_cpas_node_tree_cleanup(struct cam_cpas *cpas_core, + struct cam_cpas_private_soc *soc_private) +{ + int i = 0; + + for (i = 0; i < CAM_CPAS_MAX_TREE_NODES; i++) { + if (soc_private->tree_node[i]) { + CAM_MEM_FREE(soc_private->tree_node[i]->bw_info); + CAM_MEM_FREE(soc_private->tree_node[i]->axi_port_idx_arr); + soc_private->tree_node[i]->bw_info = NULL; + soc_private->tree_node[i]->axi_port_idx_arr = NULL; + of_node_put(soc_private->tree_node[i]->tree_dev_node); + CAM_MEM_FREE(soc_private->tree_node[i]); + soc_private->tree_node[i] = NULL; + } + } + + for (i = 0; i < CAM_CPAS_MAX_TREE_LEVELS; i++) { + if (soc_private->level_node[i]) { + of_node_put(soc_private->level_node[i]); + soc_private->level_node[i] = NULL; + } + } + + if (soc_private->camera_bus_node) { + of_node_put(soc_private->camera_bus_node); + soc_private->camera_bus_node = NULL; + } + + mutex_destroy(&cpas_core->tree_lock); + + return 0; +} + +static int cam_cpas_util_path_type_to_idx(uint32_t *path_data_type) +{ + if (*path_data_type >= CAM_CPAS_PATH_DATA_CONSO_OFFSET) { + *path_data_type = CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT + + (*path_data_type % CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT); + } + else { + *path_data_type %= CAM_CPAS_MAX_GRAN_PATHS_PER_CLIENT; + } + + if (*path_data_type >= CAM_CPAS_PATH_DATA_MAX) { + CAM_ERR(CAM_CPAS, "index Invalid: %u", *path_data_type); + return -EINVAL; + } + + return 0; +} + +static int cam_cpas_update_camnoc_node(struct cam_cpas *cpas_core, + struct device_node *curr_node, + struct cam_cpas_tree_node *cpas_node_ptr, + int *camnoc_idx) + +{ + struct device_node *camnoc_node; + int rc; + + camnoc_node = of_find_node_by_name(curr_node, + "qcom,axi-port-camnoc"); + if (camnoc_node) { + + if (*camnoc_idx >= + CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "CAMNOC axi index overshoot %d", + *camnoc_idx); + return -EINVAL; + } + + cpas_core->camnoc_axi_port[*camnoc_idx] + .axi_port_node = camnoc_node; + rc = of_property_read_string( + curr_node, + "qcom,axi-port-name", + &cpas_core->camnoc_axi_port[*camnoc_idx] + .axi_port_name); + + if (rc) { + CAM_ERR(CAM_CPAS, + "fail to read camnoc-port-name rc=%d", + rc); + return rc; + } + cpas_node_ptr->camnoc_axi_port_idx = *camnoc_idx; + cpas_core->num_camnoc_axi_ports++; + (*camnoc_idx)++; + } + return 0; +} + +static int cam_cpas_parse_mnoc_node(struct cam_cpas *cpas_core, + struct cam_cpas_private_soc *soc_private, struct cam_cpas_tree_node *curr_node_ptr, + struct device_node *mnoc_node, int *mnoc_idx) +{ + int rc = 0, count, i; + bool ib_voting_needed = false; + struct of_phandle_args src_args = {0}, dst_args = {0}; + struct cam_cpas_axi_port *curr_axi_port; + + ib_voting_needed = of_property_read_bool(curr_node_ptr->tree_dev_node, + "ib-bw-voting-needed"); + curr_node_ptr->is_rt_node = of_property_read_bool(curr_node_ptr->tree_dev_node, + "rt-axi-port"); + + if (soc_private->bus_icc_based) { + if (soc_private->use_cam_icc_path_str) + count = of_property_count_strings(mnoc_node, "cam-icc-path-names"); + else + count = of_property_count_strings(mnoc_node, "interconnect-names"); + + if (count <= 0) { + CAM_ERR(CAM_ISP, "Missing icc info for camera bus nodes count: %d", count); + return -EINVAL; + } else if (count > CAM_CPAS_MAX_DRV_PORTS) { + CAM_ERR(CAM_CPAS, "Number of interconnects %d greater than max ports %d", + count, CAM_CPAS_MAX_DRV_PORTS); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + if ((i > 0) && !soc_private->enable_cam_ddr_drv) + break; + + if (*mnoc_idx >= CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid mnoc index: %d", *mnoc_idx); + return -EINVAL; + } + + curr_axi_port = &cpas_core->axi_port[*mnoc_idx]; + curr_axi_port->axi_port_node = mnoc_node; + + if (soc_private->use_cam_icc_path_str) { + rc = of_property_read_string_index(mnoc_node, "cam-icc-path-names", + i, &curr_axi_port->bus_client.common_data.name); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to read cam icc name strings mnoc_node: %s i: %d rc=%d", + curr_node_ptr->node_name, i, rc); + return rc; + } + } else { + rc = of_property_read_string_index(mnoc_node, "interconnect-names", + i, &curr_axi_port->bus_client.common_data.name); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to read interconnect-names rc=%d", rc); + return rc; + } + + /* interconnect is not supported on tvm */ + if (!cam_vmrm_proxy_icc_voting_enable()) { + rc = of_parse_phandle_with_args(mnoc_node, "interconnects", + "#interconnect-cells", (2 * i), &src_args); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to read axi bus src info rc=%d", + rc); + return -EINVAL; + } + + of_node_put(src_args.np); + if (src_args.args_count != 1) { + CAM_ERR(CAM_CPAS, + "Invalid number of axi src args: %d", + src_args.args_count); + return -EINVAL; + } + + curr_axi_port->bus_client.common_data.src_id = + src_args.args[0]; + + rc = of_parse_phandle_with_args(mnoc_node, "interconnects", + "#interconnect-cells", ((2 * i) + 1), &dst_args); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to read axi bus dst info rc=%d", + rc); + return -EINVAL; + } + + of_node_put(dst_args.np); + if (dst_args.args_count != 1) { + CAM_ERR(CAM_CPAS, + "Invalid number of axi dst args: %d", + dst_args.args_count); + return -EINVAL; + } + + curr_axi_port->bus_client.common_data.dst_id = + dst_args.args[0]; + } + } + + curr_axi_port->bus_client.common_data.num_usecases = 2; + curr_axi_port->axi_port_name = curr_axi_port->bus_client.common_data.name; + curr_axi_port->drv_idx = i; + + if (i > CAM_CPAS_PORT_HLOS_DRV) { + curr_axi_port->bus_client.common_data.is_drv_port = true; + curr_axi_port->curr_bw.vote_type = CAM_CPAS_VOTE_TYPE_DRV; + curr_axi_port->applied_bw.vote_type = CAM_CPAS_VOTE_TYPE_DRV; + curr_axi_port->cam_rsc_dev = + cam_cpas_get_rsc_dev_for_drv(i - CAM_CPAS_PORT_DRV_0); + if (!curr_axi_port->cam_rsc_dev) { + CAM_ERR(CAM_CPAS, + "Port[%s][%d] Failed to get rsc device drv_idx:%d", + curr_axi_port->axi_port_name, *mnoc_idx, i); + rc = -ENODEV; + goto err; + } + } + + /* + * The indexes of axi_port_idx_arr map to drv_voting_idx, + * with 0 pointing to hlos drv bus ID + */ + curr_node_ptr->axi_port_idx_arr[i] = *mnoc_idx; + curr_axi_port->ib_bw_voting_needed = ib_voting_needed; + curr_axi_port->is_rt = curr_node_ptr->is_rt_node; + CAM_DBG(CAM_PERF, "Adding Bus Client=[%s] : src=%d, dst=%d mnoc_idx:%d", + curr_axi_port->bus_client.common_data.name, + curr_axi_port->bus_client.common_data.src_id, + curr_axi_port->bus_client.common_data.dst_id, + *mnoc_idx); + (*mnoc_idx)++; + cpas_core->num_axi_ports++; + } + } else { + if (soc_private->enable_cam_ddr_drv) { + CAM_ERR(CAM_CPAS, "DRV not supported for old bus scaling clients"); + return -EPERM; + } + + if (*mnoc_idx >= CAM_CPAS_MAX_AXI_PORTS) { + CAM_ERR(CAM_CPAS, "Invalid mnoc index: %d", *mnoc_idx); + return -EINVAL; + } + + curr_axi_port = &cpas_core->axi_port[*mnoc_idx]; + curr_axi_port->axi_port_node = mnoc_node; + rc = of_property_read_string(curr_node_ptr->tree_dev_node, "qcom,axi-port-name", + &curr_axi_port->bus_client.common_data.name); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to read mnoc-port-name rc=%d", + rc); + return rc; + } + + curr_axi_port->axi_port_name = + curr_axi_port->bus_client.common_data.name; + curr_node_ptr->axi_port_idx_arr[0] = *mnoc_idx; + curr_axi_port->ib_bw_voting_needed = ib_voting_needed; + curr_axi_port->is_rt = curr_node_ptr->is_rt_node; + (*mnoc_idx)++; + cpas_core->num_axi_ports++; + } + +err: + return rc; +} + +static int cam_cpas_parse_node_tree(struct cam_cpas *cpas_core, + struct device_node *of_node, struct cam_cpas_private_soc *soc_private) +{ + struct device_node *camera_bus_node; + struct device_node *level_node; + struct device_node *curr_node; + struct device_node *parent_node; + struct device_node *mnoc_node; + int mnoc_idx = 0, camnoc_idx = 0, level_idx = 0; + uint32_t path_idx; + bool camnoc_max_needed = false; + struct cam_cpas_tree_node *curr_node_ptr = NULL; + struct cam_cpas_client *curr_client = NULL; + const char *client_name = NULL; + uint32_t client_idx = 0, cell_idx = 0; + uint32_t auto_path_incr[CAM_CPAS_MAX_CLIENTS] = {0}; + uint8_t niu_idx = 0; + int rc = 0, count = 0, i, j, num_drv_ports; + + camera_bus_node = of_get_child_by_name(of_node, "camera-bus-nodes"); + if (!camera_bus_node) { + CAM_ERR(CAM_CPAS, "Camera Bus node not found in cpas DT node"); + return -EINVAL; + } + + soc_private->camera_bus_node = camera_bus_node; + + for_each_available_child_of_node(camera_bus_node, level_node) { + rc = of_property_read_u32(level_node, "level-index", &level_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "Error reading level idx rc: %d", rc); + return rc; + } + if (level_idx >= CAM_CPAS_MAX_TREE_LEVELS) { + CAM_ERR(CAM_CPAS, "Invalid level idx: %d", level_idx); + return -EINVAL; + } + + soc_private->level_node[level_idx] = level_node; + } + + if (soc_private->enable_smart_qos) + soc_private->smart_qos_info->num_rt_wr_nius = 0; + + if (soc_private->enable_cam_ddr_drv) + num_drv_ports = CAM_CPAS_MAX_DRV_PORTS; + else + num_drv_ports = 1; + + for (level_idx = (CAM_CPAS_MAX_TREE_LEVELS - 1); level_idx >= 0; level_idx--) { + level_node = soc_private->level_node[level_idx]; + if (!level_node) + continue; + + CAM_DBG(CAM_CPAS, "Parsing level %d nodes", level_idx); + + camnoc_max_needed = of_property_read_bool(level_node, "camnoc-max-needed"); + for_each_available_child_of_node(level_node, curr_node) { + curr_node_ptr = CAM_MEM_ZALLOC(sizeof(struct cam_cpas_tree_node), + GFP_KERNEL); + if (!curr_node_ptr) + return -ENOMEM; + + curr_node_ptr->tree_dev_node = curr_node; + rc = of_property_read_u32(curr_node, "cell-index", + &curr_node_ptr->cell_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "Node index not found"); + return rc; + } + + CAM_DBG(CAM_CPAS, "Parsing Node with cell index %d", + curr_node_ptr->cell_idx); + + if (curr_node_ptr->cell_idx >= + CAM_CPAS_MAX_TREE_NODES) { + CAM_ERR(CAM_CPAS, "Invalid cell idx: %d", curr_node_ptr->cell_idx); + return -EINVAL; + } + + soc_private->tree_node[curr_node_ptr->cell_idx] = curr_node_ptr; + curr_node_ptr->level_idx = level_idx; + + rc = of_property_read_string(curr_node, "node-name", + &curr_node_ptr->node_name); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to read node-name rc=%d", rc); + return rc; + } + + curr_node_ptr->bw_info = + CAM_MEM_ZALLOC((sizeof(struct cam_cpas_axi_bw_info) * + num_drv_ports), GFP_KERNEL); + if (!curr_node_ptr->bw_info) { + CAM_ERR(CAM_CPAS, "Failed in allocating memory for bw info"); + return -ENOMEM; + } + + curr_node_ptr->axi_port_idx_arr = + CAM_MEM_ZALLOC((sizeof(int) * num_drv_ports), + GFP_KERNEL); + if (!curr_node_ptr->axi_port_idx_arr) { + CAM_ERR(CAM_CPAS, "Failed in allocating memory for port indices"); + return -ENOMEM; + } + + if (soc_private->enable_smart_qos && (level_idx == 1) && + of_property_read_bool(curr_node, "rt-wr-niu")) { + + rc = of_property_read_u32(curr_node, "priority-lut-low-offset", + &curr_node_ptr->pri_lut_low_offset); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid priority low offset rc %d", rc); + return rc; + } + + rc = of_property_read_u32(curr_node, "priority-lut-high-offset", + &curr_node_ptr->pri_lut_high_offset); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid priority high offset rc %d", rc); + return rc; + } + + rc = of_property_read_u32(curr_node, "niu-size", + &curr_node_ptr->niu_size); + if (rc || !curr_node_ptr->niu_size) { + CAM_ERR(CAM_CPAS, "Invalid niu size rc %d", rc); + return rc; + } + + niu_idx = soc_private->smart_qos_info->num_rt_wr_nius; + if (niu_idx >= CAM_CPAS_MAX_RT_WR_NIU_NODES) { + CAM_ERR(CAM_CPAS, "Invalid number of level1 nodes %d", + soc_private->smart_qos_info->num_rt_wr_nius); + return -EINVAL; + } + + soc_private->smart_qos_info->rt_wr_niu_node[niu_idx] = + curr_node_ptr; + soc_private->smart_qos_info->num_rt_wr_nius++; + + CAM_DBG(CAM_CPAS, + "level1[%d] : Node %s idx %d priority offset 0x%x, NIU size %dKB", + niu_idx, curr_node_ptr->node_name, curr_node_ptr->cell_idx, + curr_node_ptr->pri_lut_low_offset, curr_node_ptr->niu_size); + } + + curr_node_ptr->camnoc_max_needed = camnoc_max_needed; + rc = of_property_read_u32(curr_node, "bus-width-factor", + &curr_node_ptr->bus_width_factor); + if (rc) + curr_node_ptr->bus_width_factor = 1; + + rc = of_property_read_u32(curr_node, "traffic-merge-type", + &curr_node_ptr->merge_type); + if (rc) + curr_node_ptr->merge_type = CAM_CPAS_TRAFFIC_MERGE_SUM; + + for (j = 0; j < num_drv_ports; j++) + curr_node_ptr->axi_port_idx_arr[j] = -1; + + mnoc_node = of_get_child_by_name(curr_node, "qcom,axi-port-mnoc"); + if (mnoc_node) { + rc = cam_cpas_parse_mnoc_node(cpas_core, soc_private, curr_node_ptr, + mnoc_node, &mnoc_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to parse mnoc node info rc=%d", + rc); + return rc; + } + } + + if (!soc_private->control_camnoc_axi_clk) { + rc = cam_cpas_update_camnoc_node(cpas_core, curr_node, + curr_node_ptr, &camnoc_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to parse camnoc node info rc=%d", + rc); + return rc; + } + } + + if (level_idx == 0) { + bool traffic_data_found = false, constituent_path_found = false; + + traffic_data_found = + (of_find_property(curr_node, "traffic-data", + NULL) ? true : false); + constituent_path_found = + (of_find_property(curr_node, "constituent-paths", + NULL) ? true : false); + + if (!traffic_data_found && !constituent_path_found) { + CAM_ERR(CAM_CPAS, + "Essential path identifier property missing, traffic-data:%s constituent-paths:%s", + CAM_BOOL_TO_YESNO(traffic_data_found), + CAM_BOOL_TO_YESNO(constituent_path_found)); + return -EINVAL; + } + + rc = of_property_read_string(curr_node, "client-name", + &client_name); + if (rc) { + CAM_ERR(CAM_CPAS, "Client name not found"); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_private->client_name, + soc_private->num_clients, client_name, &client_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "client name not found in list: %s", + client_name); + return rc; + } + + if (client_idx >= CAM_CPAS_MAX_CLIENTS) { + CAM_ERR(CAM_ISP, "Client idx:%d exceeds max:%d", client_idx, + CAM_CPAS_MAX_CLIENTS); + return -EINVAL; + } + + rc = of_property_read_u32(curr_node, "traffic-data", + &curr_node_ptr->path_data_type); + if (rc && !constituent_path_found) { + CAM_ERR(CAM_CPAS, "Path Data type not found"); + return rc; + } + + /* + * If constituent path found for a node, update path_data_type such + * that it always has a unique path type value for a particular + * client + */ + if (constituent_path_found) + curr_node_ptr->path_data_type = + (CAM_CPAS_PATH_DATA_CONSO_OFFSET + + (++auto_path_incr[client_idx])); + + rc = cam_cpas_util_path_type_to_idx(&curr_node_ptr->path_data_type); + if (rc) { + CAM_ERR(CAM_CPAS, "Incorrect path type for client: %s", + client_name); + return rc; + } + + rc = of_property_read_u32(curr_node, "traffic-transaction-type", + &curr_node_ptr->path_trans_type); + if (rc) { + CAM_ERR(CAM_CPAS, "Path Transac type not found"); + return rc; + } + + if (curr_node_ptr->path_trans_type >= CAM_CPAS_TRANSACTION_MAX) { + CAM_ERR(CAM_CPAS, "Invalid transac type: %d", + curr_node_ptr->path_trans_type); + return -EINVAL; + } + + count = of_property_count_u32_elems(curr_node, "constituent-paths"); + for (i = 0; i < count; i++) { + rc = of_property_read_u32_index(curr_node, + "constituent-paths", i, &path_idx); + if (rc) { + CAM_ERR(CAM_CPAS, "No constituent path at %d", i); + return rc; + } + + rc = cam_cpas_util_path_type_to_idx(&path_idx); + if (rc) + return rc; + + curr_node_ptr->constituent_paths[path_idx] = true; + } + + curr_client = cpas_core->cpas_client[client_idx]; + curr_client->tree_node_valid = true; + curr_client->tree_node[curr_node_ptr->path_data_type] + [curr_node_ptr->path_trans_type] = curr_node_ptr; + + if (soc_private->enable_cam_ddr_drv) { + rc = of_property_read_u32(curr_node, "drv-voting-index", + &curr_node_ptr->drv_voting_idx); + if (rc) + curr_node_ptr->merge_type = CAM_CPAS_PORT_HLOS_DRV; + + if (curr_node_ptr->drv_voting_idx == CAM_CPAS_PORT_DRV_DYN) + curr_client->is_drv_dyn = true; + + if (curr_client->is_drv_dyn && + (curr_node_ptr->drv_voting_idx != + CAM_CPAS_PORT_DRV_DYN)) + CAM_ERR(CAM_CPAS, + "Invalid config for drv dyn client: %s drv_idx: %d", + client_name, curr_node_ptr->drv_voting_idx); + } + + CAM_DBG(CAM_CPAS, + "Node Added: Client[%s] DataType[%d] TransType[%d] DRV idx[%d]", + client_name, + curr_node_ptr->path_data_type, + curr_node_ptr->path_trans_type, + curr_node_ptr->drv_voting_idx); + } + + if (soc_private->enable_cam_ddr_drv) + for (j = CAM_CPAS_PORT_DRV_0; j < num_drv_ports; j++) + curr_node_ptr->bw_info[j].vote_type = + CAM_CPAS_VOTE_TYPE_DRV; + + parent_node = of_parse_phandle(curr_node, "parent-node", 0); + if (parent_node) { + of_property_read_u32(parent_node, "cell-index", &cell_idx); + curr_node_ptr->parent_node = soc_private->tree_node[cell_idx]; + curr_node_ptr->is_rt_node = curr_node_ptr->parent_node->is_rt_node; + } else { + CAM_DBG(CAM_CPAS, "no parent node at this level"); + } + } + } + + mutex_init(&cpas_core->tree_lock); + cam_cpas_util_debug_parse_data(soc_private); + + return 0; +} + +int cam_cpas_get_hw_features(struct platform_device *pdev, + struct cam_cpas_private_soc *soc_private) +{ + struct device_node *of_node; + void *fuse; + uint32_t fuse_addr, fuse_mask, fuse_shift; + uint32_t val = 0, fuse_val = 0, feature; + uint32_t enable_type = 0, hw_map = 0; + int count = 0, i = 0, j = 0, num_feature = 0, num_fuse = 0; + struct cam_cpas_feature_info *feature_info; + + of_node = pdev->dev.of_node; + count = of_property_count_u32_elems(of_node, "cam_hw_fuse"); + + CAM_DBG(CAM_CPAS, "fuse info elements count %d", count); + + if (count <= 0) { + goto end; + } else if (count%5 != 0) { + CAM_INFO(CAM_CPAS, "fuse entries should be multiple of 5 %d", + count); + goto end; + } + + for (i = 0; (i + 5) <= count; i = i + 5) { + of_property_read_u32_index(of_node, "cam_hw_fuse", i, + &feature); + of_property_read_u32_index(of_node, "cam_hw_fuse", i + 1, + &fuse_addr); + of_property_read_u32_index(of_node, "cam_hw_fuse", i + 2, + &fuse_mask); + of_property_read_u32_index(of_node, "cam_hw_fuse", i + 3, + &enable_type); + of_property_read_u32_index(of_node, "cam_hw_fuse", i + 4, + &hw_map); + val = ffs(fuse_mask); + if (val == 0) { + CAM_ERR(CAM_CPAS, "fuse_mask not valid 0x%x", + fuse_mask); + fuse_shift = 0; + } else { + fuse_shift = val - 1; + } + CAM_INFO(CAM_CPAS, + "feature 0x%x addr 0x%x, mask 0x%x, shift 0x%x type 0x%x hw_map 0x%x", + feature, fuse_addr, fuse_mask, fuse_shift, enable_type, + hw_map); + + fuse = ioremap(fuse_addr, 4); + if (fuse) { + fuse_val = cam_io_r(fuse); + for (j = 0; (j < num_fuse) && (j < CAM_CPAS_FUSES_MAX); + j++) { + if (soc_private->fuse_info.fuse_val[j].fuse_id + == fuse_addr) + break; + } + if (j >= CAM_CPAS_FUSES_MAX) { + CAM_ERR(CAM_CPAS, + "fuse_info array overflow! %d", j); + goto end; + } + if (j == num_fuse) { + soc_private->fuse_info.fuse_val[j].fuse_id = + fuse_addr; + soc_private->fuse_info.fuse_val[j].fuse_val = + fuse_val; + CAM_INFO(CAM_CPAS, + "fuse_addr 0x%x, fuse_val %x", + fuse_addr, fuse_val); + num_fuse++; + } + } else { + /* if fuse ioremap is failed, disable the feature */ + CAM_ERR(CAM_CPAS, + "fuse register io remap failed fuse_addr:0x%x feature0x%x ", + fuse_addr, feature); + + if (enable_type == CAM_CPAS_FEATURE_TYPE_ENABLE || + enable_type == CAM_CPAS_FEATURE_TYPE_DISABLE) + fuse_val = (enable_type) ? ~fuse_mask : + fuse_mask; + else + fuse_val = 0; + } + + if (num_feature >= CAM_CPAS_MAX_FUSE_FEATURE) { + CAM_ERR(CAM_CPAS, "feature_info array overflow %d", + num_feature); + goto end; + } + + soc_private->feature_info[num_feature].feature = + feature; + soc_private->feature_info[num_feature].hw_map = hw_map; + soc_private->feature_info[num_feature].type = enable_type; + feature_info = &soc_private->feature_info[num_feature]; + + if (enable_type != CAM_CPAS_FEATURE_TYPE_VALUE) { + if (enable_type == CAM_CPAS_FEATURE_TYPE_ENABLE) { + /* + * fuse is for enable feature + * if fust bit is set means feature is enabled + * or HW is enabled + */ + if (fuse_val & fuse_mask) + feature_info->enable = true; + else + feature_info->enable = false; + } else if (enable_type == + CAM_CPAS_FEATURE_TYPE_DISABLE){ + /* + * fuse is for disable feature + * if fust bit is set means feature is disabled + * or HW is disabled + */ + if (fuse_val & fuse_mask) + feature_info->enable = false; + else + feature_info->enable = true; + } else { + CAM_ERR(CAM_CPAS, + "Feature type not valid, type: %d", + enable_type); + goto end; + } + CAM_INFO(CAM_CPAS, + "feature 0x%x enable=%d hw_map=0x%x", + feature_info->feature, feature_info->enable, + feature_info->hw_map); + } else { + feature_info->value = + (fuse_val & fuse_mask) >> fuse_shift; + CAM_INFO(CAM_CPAS, + "feature 0x%x value=0x%x hw_map=0x%x", + feature_info->feature, feature_info->value, + feature_info->hw_map); + } + num_feature++; + iounmap(fuse); + } + +end: + soc_private->fuse_info.num_fuses = num_fuse; + soc_private->num_feature_info = num_feature; + return 0; +} + +static inline int + cam_cpas_find_type_from_string( + const char *cache_name) +{ + if (strcmp(cache_name, "small-1") == 0) + return CAM_LLCC_SMALL_1; + else if (strcmp(cache_name, "small-2") == 0) + return CAM_LLCC_SMALL_2; + else if (strcmp(cache_name, "large-1") == 0) + return CAM_LLCC_LARGE_1; + else if (strcmp(cache_name, "large-2") == 0) + return CAM_LLCC_LARGE_2; + else if (strcmp(cache_name, "large-3") == 0) + return CAM_LLCC_LARGE_3; + else if (strcmp(cache_name, "large-4") == 0) + return CAM_LLCC_LARGE_4; + else if (strcmp(cache_name, "ofe_ip") == 0) + return CAM_LLCC_OFE_IP; + else if (strcmp(cache_name, "ipe_rt_ip") == 0) + return CAM_LLCC_IPE_RT_IP; + else if (strcmp(cache_name, "ipe_srt_ip") == 0) + return CAM_LLCC_IPE_SRT_IP; + else if (strcmp(cache_name, "ipe_rt_rf") == 0) + return CAM_LLCC_IPE_RT_RF; + else if (strcmp(cache_name, "ipe_srt_rf") == 0) + return CAM_LLCC_IPE_SRT_RF; + else + return -1; +} + +static inline bool cam_cpas_is_valid_fw_based_scid( + uint32_t type) +{ + bool rc; + + switch (type) { + case CAM_LLCC_OFE_IP: + case CAM_LLCC_IPE_RT_IP: + case CAM_LLCC_IPE_SRT_IP: + case CAM_LLCC_IPE_RT_RF: + case CAM_LLCC_IPE_SRT_RF: + rc = true; + break; + default: + rc = false; + break; + } + + return rc; +} + +static int cam_cpas_parse_sys_cache_uids( + struct device_node *of_node, + struct cam_cpas_private_soc *soc_private) +{ + int type = -1; + int num_caches, i, rc; + uint32_t scid; + + soc_private->llcc_info = NULL; + soc_private->num_caches = 0; + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_CPAS, "PRESIL parse syscache uids is DISABLED"); + return 0; + } + + num_caches = of_property_count_strings(of_node, "sys-cache-names"); + if (num_caches <= 0) { + CAM_DBG(CAM_CPAS, "no cache-names found"); + return 0; + } + + soc_private->llcc_info = CAM_MEM_ZALLOC_ARRAY(num_caches, + sizeof(struct cam_sys_cache_info), GFP_KERNEL); + if (!soc_private->llcc_info) + return -ENOMEM; + + for (i = 0; i < num_caches; i++) { + rc = of_property_read_string_index(of_node, "sys-cache-names", i, + &soc_private->llcc_info[i].name); + if (rc) { + CAM_ERR(CAM_CPAS, "failed to read cache-names at %d", i); + goto end; + } + + type = cam_cpas_find_type_from_string( + soc_private->llcc_info[i].name); + if (type < 0) { + CAM_ERR(CAM_CPAS, "Unsupported cache found: %s", + soc_private->llcc_info[i].name); + rc = -EINVAL; + goto end; + } + + soc_private->llcc_info[i].type = type; + rc = of_property_read_u32_index(of_node, + "sys-cache-uids", i, + &soc_private->llcc_info[i].uid); + if (rc < 0) { + CAM_ERR(CAM_CPAS, + "unable to read sys cache uid at index %d", i); + goto end; + } + rc = of_property_read_u32_index(of_node, + "sys-cache-concur", i, + &soc_private->llcc_info[i].concur); + if ((rc < 0) && cam_cpas_is_valid_fw_based_scid(type)) { + CAM_ERR(CAM_CPAS, + "unable to read sys cache concur at index %d cache type = %d", + i, type); + goto end; + } else if (rc < 0) { + /* This to make SFE sHDR exposure scid also concurrent type */ + soc_private->llcc_info[i].concur = 1; + CAM_DBG(CAM_CPAS, + "read sys cache concur at index %d cache type = %d", + i, type); + } + + soc_private->llcc_info[i].slic_desc = + llcc_slice_getd(soc_private->llcc_info[i].uid); + + if (IS_ERR_OR_NULL(soc_private->llcc_info[i].slic_desc)) { + CAM_ERR(CAM_CPAS, + "Failed to get slice desc for uid %u", + soc_private->llcc_info[i].uid); + rc = -EINVAL; + goto end; + } + + scid = llcc_get_slice_id(soc_private->llcc_info[i].slic_desc); + soc_private->llcc_info[i].scid = scid; + soc_private->llcc_info[i].size = + llcc_get_slice_size(soc_private->llcc_info[i].slic_desc); + soc_private->llcc_info[i].staling_distance = 0; + soc_private->llcc_info[i].mode = CAM_LLCC_STALING_MODE_CAPACITY; + soc_private->llcc_info[i].op_type = CAM_LLCC_NOTIFY_STALING_EVICT; + soc_private->num_caches++; + + CAM_DBG(CAM_CPAS, + "Cache: %s uid: %u scid: %d size: %zukb", + soc_private->llcc_info[i].name, + soc_private->llcc_info[i].uid, scid, + soc_private->llcc_info[i].size); + } + + return 0; + +end: + CAM_MEM_FREE(soc_private->llcc_info); + soc_private->llcc_info = NULL; + return rc; +} + +#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA +static int cam_cpas_parse_domain_id_mapping(struct device_node *of_node, + struct cam_cpas_private_soc *soc_private) +{ + int count, i, rc = 0; + + soc_private->domain_id_info.num_domain_ids = 0; + soc_private->domain_id_info.domain_id_supported = false; + soc_private->domain_id_info.domain_id_entries = NULL; + count = of_property_count_u32_elems(of_node, "domain-id"); + if (count <= 0) { + CAM_DBG(CAM_CPAS, "No domain-id mapping found, count: %d", count); + return rc; + } + + /* This property will always be specified in pairs */ + if (count % 2) { + CAM_ERR(CAM_CPAS, + "Mismatch in domain-id mapping, found %d number of entries", count); + rc = -EINVAL; + goto err; + } + + soc_private->domain_id_info.num_domain_ids = count / 2; + + if (soc_private->domain_id_info.num_domain_ids > CAM_CPAS_DOMAIN_ID_MAX) { + CAM_ERR(CAM_CPAS, + "Number of domain id types: %u more than supported: %d", + soc_private->domain_id_info.num_domain_ids, CAM_CPAS_DOMAIN_ID_MAX); + rc = -EINVAL; + goto err; + } + + soc_private->domain_id_info.domain_id_entries = + CAM_MEM_ZALLOC_ARRAY(soc_private->domain_id_info.num_domain_ids, + sizeof(struct cam_cpas_domain_id_mapping), GFP_KERNEL); + if (!soc_private->domain_id_info.domain_id_entries) { + CAM_ERR(CAM_CPAS, + "Error allocating memory for %u domain-id mapping(s)", + soc_private->domain_id_info.num_domain_ids); + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < soc_private->domain_id_info.num_domain_ids; i++) { + struct cam_cpas_domain_id_mapping *domain_id_entry = + &soc_private->domain_id_info.domain_id_entries[i]; + + rc = of_property_read_u32_index(of_node, "domain-id", + (i * 2), &domain_id_entry->domain_type); + if (rc) { + CAM_ERR(CAM_CPAS, "Error reading domain-id type entry at pos %d", i); + rc = -EINVAL; + goto err; + } + + if (domain_id_entry->domain_type > CAM_CPAS_SECURE_DOMAIN) { + CAM_ERR(CAM_CPAS, "Unexpected domain id type: %u", + domain_id_entry->domain_type); + rc = -EINVAL; + goto err; + } + + rc = of_property_read_u32_index(of_node, "domain-id", + ((i * 2) + 1), &domain_id_entry->mapping_id); + if (rc) { + CAM_ERR(CAM_CPAS, "Error reading domain-id mapping id at pos %d", i); + rc = -EINVAL; + goto err; + } + + CAM_DBG(CAM_CPAS, "Domain-id type: %u, mapping: %u at pos: %d", + domain_id_entry->domain_type, domain_id_entry->mapping_id, i); + } + + soc_private->domain_id_info.domain_id_supported = true; + + return rc; + +err: + soc_private->domain_id_info.num_domain_ids = 0; + CAM_MEM_FREE(soc_private->domain_id_info.domain_id_entries); + soc_private->domain_id_info.domain_id_entries = NULL; + return rc; +} +#endif + +static int cam_cpas_get_domain_id_support_clks(struct device_node *of_node, + struct cam_hw_soc_info *soc_info, struct cam_cpas_private_soc *soc_private) +{ + int rc = 0, count, i; + struct cam_cpas_domain_id_support_clks *domain_id_clks; + + soc_private->domain_id_clks = CAM_MEM_ZALLOC(sizeof(struct cam_cpas_domain_id_support_clks), + GFP_KERNEL); + if (!soc_private->domain_id_clks) { + CAM_ERR(CAM_CPAS, "Failed in allocating memory for domain_id_clk"); + return -ENOMEM; + } + + domain_id_clks = soc_private->domain_id_clks; + + count = of_property_count_strings(of_node, "domain-id-support-clks"); + CAM_DBG(CAM_CPAS, "Domain-id clk count: %d", count); + if (count > CAM_SOC_MAX_OPT_CLK) { + CAM_ERR(CAM_CPAS, "Invalid cnt of clocks, count: %d", count); + rc = -EINVAL; + goto err; + } + if (count <= 0) { + CAM_ERR(CAM_CPAS, "No domain-id clk found"); + rc = -EINVAL; + goto err; + } + + domain_id_clks->number_clks = count; + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "domain-id-support-clks", + i, &domain_id_clks->clk_names[i]); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed reading domain-id clk name at i: %d, total clks: %d", + i, count); + rc = -EINVAL; + goto err; + } + + rc = cam_soc_util_get_option_clk_by_name(soc_info, domain_id_clks->clk_names[i], + &domain_id_clks->clk_idx[i]); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed reading domain-id clk %s at i: %d, total clks; %d", + domain_id_clks->clk_names[i], i, count); + rc = -EINVAL; + goto err; + } + + CAM_DBG(CAM_CPAS, "Domain-id-clk %s with clk index %d", + domain_id_clks->clk_names[i], domain_id_clks->clk_idx[i]); + + } + + return rc; + +err: + CAM_MEM_FREE(domain_id_clks); + return rc; +} + +int cam_cpas_get_custom_dt_info(struct cam_hw_info *cpas_hw, + struct platform_device *pdev, struct cam_cpas_private_soc *soc_private) +{ + struct device_node *of_node; + struct of_phandle_args src_args = {0}, dst_args = {0}; + int count = 0, i = 0, rc = 0, num_bw_values = 0, num_levels = 0; + uint32_t cam_drv_en_mask_val = 0; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + uint32_t ahb_bus_client_ab = 0, ahb_bus_client_ib = 0; + + if (!soc_private || !pdev) { + CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK", + soc_private, pdev); + return -EINVAL; + } + + of_node = pdev->dev.of_node; + + rc = of_property_read_string(of_node, "arch-compat", + &soc_private->arch_compat); + if (rc) { + CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat", + pdev->name); + return rc; + } + + cam_cpas_get_hw_features(pdev, soc_private); + +#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA + /* get domain id mapping info */ + rc = cam_cpas_parse_domain_id_mapping(of_node, soc_private); + if (rc) + return rc; + /* check if the domain ID configuration is available in the DTSI */ + if (soc_private->domain_id_info.domain_id_supported == false) { + CAM_ERR(CAM_CPAS, "Domain ID configuration is expected for this target"); + return -EINVAL; + } +#endif + + soc_private->camnoc_axi_min_ib_bw = 0; + rc = of_property_read_u64(of_node, "camnoc-axi-min-ib-bw", + &soc_private->camnoc_axi_min_ib_bw); + if (rc == -EOVERFLOW) { + soc_private->camnoc_axi_min_ib_bw = 0; + rc = of_property_read_u32(of_node, "camnoc-axi-min-ib-bw", + (u32 *)&soc_private->camnoc_axi_min_ib_bw); + } + + if (rc) { + CAM_DBG(CAM_CPAS, "failed to read camnoc-axi-min-ib-bw rc:%d", rc); + soc_private->camnoc_axi_min_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW; + } + + CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu", soc_private->camnoc_axi_min_ib_bw); + + rc = of_property_read_u64(of_node, "cam-max-rt-axi-bw", &soc_private->cam_max_rt_axi_bw); + if (rc) { + CAM_DBG(CAM_CPAS, "failed to read cam-max-rt-axi-bw rc:%d", rc); + soc_private->cam_max_rt_axi_bw = 0; + } + + CAM_DBG(CAM_CPAS, "cam-max-rt-axi-bw = %llu", soc_private->cam_max_rt_axi_bw); + + soc_private->client_id_based = of_property_read_bool(of_node, "client-id-based"); + soc_private->bus_icc_based = of_property_read_bool(of_node, "interconnect-names"); + + if (soc_private->bus_icc_based) { + soc_private->use_cam_icc_path_str = of_property_read_bool(of_node, + "cam-icc-path-names"); + if (soc_private->use_cam_icc_path_str) { + rc = of_property_read_string(of_node, "cam-icc-path-names", + &cpas_core->ahb_bus_client.common_data.name); + if (rc) { + CAM_ERR(CAM_CPAS, "device %s failed to read ahb cam-icc-path-names", + pdev->name); + return rc; + } + + goto parse_ahb_table; + } + + rc = of_property_read_string(of_node, "interconnect-names", + &cpas_core->ahb_bus_client.common_data.name); + if (rc) { + CAM_ERR(CAM_CPAS, + "device %s failed to read ahb interconnect-names", pdev->name); + return rc; + } + + /* interconnect is not supported on tvm */ + if (!cam_vmrm_proxy_icc_voting_enable()) { + rc = of_parse_phandle_with_args(of_node, "interconnects", + "#interconnect-cells", 0, &src_args); + if (rc) { + CAM_ERR(CAM_CPAS, "device %s failed to read ahb bus src info", + pdev->name); + return rc; + } + + of_node_put(src_args.np); + if (src_args.args_count != 1) { + CAM_ERR(CAM_CPAS, "Invalid number of ahb src args: %d", + src_args.args_count); + return -EINVAL; + } + + cpas_core->ahb_bus_client.common_data.src_id = src_args.args[0]; + + rc = of_parse_phandle_with_args(of_node, "interconnects", + "#interconnect-cells", 1, &dst_args); + if (rc) { + CAM_ERR(CAM_CPAS, "device %s failed to read ahb bus dst info", + pdev->name); + return rc; + } + + of_node_put(dst_args.np); + if (dst_args.args_count != 1) { + CAM_ERR(CAM_CPAS, "Invalid number of ahb dst args: %d", + dst_args.args_count); + return -EINVAL; + } + + cpas_core->ahb_bus_client.common_data.dst_id = dst_args.args[0]; + } + +parse_ahb_table: + rc = of_property_read_u32(of_node, "cam-ahb-num-cases", + &cpas_core->ahb_bus_client.common_data.num_usecases); + if (rc) { + CAM_ERR(CAM_CPAS, + "device %s failed to read ahb num usecases", + pdev->name); + return rc; + } + + if (cpas_core->ahb_bus_client.common_data.num_usecases > + CAM_SOC_BUS_MAX_NUM_USECASES) { + CAM_ERR(CAM_UTIL, "Invalid number of usecases: %d", + cpas_core->ahb_bus_client.common_data.num_usecases); + return -EINVAL; + } + + num_bw_values = of_property_count_u32_elems(of_node, "cam-ahb-bw-KBps"); + if (num_bw_values <= 0) { + CAM_ERR(CAM_UTIL, "Error counting ahb bw values"); + return -EINVAL; + } + + CAM_DBG(CAM_CPAS, "AHB: num bw values %d", num_bw_values); + num_levels = (num_bw_values / 2); + + if (num_levels != + cpas_core->ahb_bus_client.common_data.num_usecases) { + CAM_ERR(CAM_UTIL, "Invalid number of levels: %d", num_bw_values/2); + return -EINVAL; + } + + for (i = 0; i < num_levels; i++) { + rc = of_property_read_u32_index(of_node, "cam-ahb-bw-KBps", (i * 2), + &ahb_bus_client_ab); + if (rc) { + CAM_ERR(CAM_UTIL, "Error reading ab bw value, rc=%d", rc); + return rc; + } + + cpas_core->ahb_bus_client.common_data.bw_pair[i].ab = ahb_bus_client_ab; + + rc = of_property_read_u32_index(of_node, "cam-ahb-bw-KBps", ((i * 2) + 1), + &ahb_bus_client_ib); + if (rc) { + CAM_ERR(CAM_UTIL, "Error reading ib bw value, rc=%d", rc); + return rc; + } + + cpas_core->ahb_bus_client.common_data.bw_pair[i].ib = ahb_bus_client_ib; + + CAM_DBG(CAM_CPAS, "AHB: Level: %d, ab_value %llu, ib_value: %llu", + i, cpas_core->ahb_bus_client.common_data.bw_pair[i].ab, + cpas_core->ahb_bus_client.common_data.bw_pair[i].ib); + } + } + + count = of_property_count_strings(of_node, "client-names"); + if (count <= 0) { + CAM_ERR(CAM_CPAS, "no client-names found"); + return -EINVAL; + } else if (count > CAM_CPAS_MAX_CLIENTS) { + CAM_ERR(CAM_CPAS, "Number of clients %d greater than max %d", + count, CAM_CPAS_MAX_CLIENTS); + return -EINVAL; + } + + soc_private->num_clients = count; + CAM_DBG(CAM_CPAS, + "arch-compat=%s, client_id_based = %d, num_clients=%d", + soc_private->arch_compat, soc_private->client_id_based, + soc_private->num_clients); + + for (i = 0; i < soc_private->num_clients; i++) { + rc = of_property_read_string_index(of_node, + "client-names", i, &soc_private->client_name[i]); + if (rc) { + CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i); + return -EINVAL; + } + + cpas_core->cpas_client[i] = + CAM_MEM_ZALLOC(sizeof(struct cam_cpas_client), GFP_KERNEL); + if (!cpas_core->cpas_client[i]) { + rc = -ENOMEM; + goto cleanup_clients; + } + + CAM_DBG(CAM_CPAS, "Client[%d] : %s", i, + soc_private->client_name[i]); + } + + soc_private->control_camnoc_axi_clk = of_property_read_bool(of_node, + "control-camnoc-axi-clk"); + + if (soc_private->control_camnoc_axi_clk == true) { + rc = of_property_read_u32(of_node, "camnoc-bus-width", + &soc_private->camnoc_bus_width); + if (rc || (soc_private->camnoc_bus_width == 0)) { + CAM_ERR(CAM_CPAS, "Bus width not found rc=%d, %d", + rc, soc_private->camnoc_bus_width); + goto cleanup_clients; + } + + if (of_property_read_u32(of_node, + "camnoc-axi-clk-bw-margin-perc", + &soc_private->camnoc_axi_clk_bw_margin)) { + + /* this is not fatal, overwrite to 0 */ + soc_private->camnoc_axi_clk_bw_margin = 0; + } + } + + CAM_DBG(CAM_CPAS, + "control_camnoc_axi_clk=%d, width=%d, margin=%d", + soc_private->control_camnoc_axi_clk, + soc_private->camnoc_bus_width, + soc_private->camnoc_axi_clk_bw_margin); + + count = of_property_count_u32_elems(of_node, "vdd-corners"); + if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) && + (of_property_count_strings(of_node, "vdd-corner-ahb-mapping") == + count)) { + const char *ahb_string; + + for (i = 0; i < count; i++) { + rc = of_property_read_u32_index(of_node, "vdd-corners", + i, &soc_private->vdd_ahb[i].vdd_corner); + if (rc) { + CAM_ERR(CAM_CPAS, + "vdd-corners failed at index=%d", i); + rc = -ENODEV; + goto cleanup_clients; + } + + rc = of_property_read_string_index(of_node, + "vdd-corner-ahb-mapping", i, &ahb_string); + if (rc) { + CAM_ERR(CAM_CPAS, + "no ahb-mapping at index=%d", i); + rc = -ENODEV; + goto cleanup_clients; + } + + rc = cam_soc_util_get_level_from_string(ahb_string, + &soc_private->vdd_ahb[i].ahb_level); + if (rc) { + CAM_ERR(CAM_CPAS, + "invalid ahb-string at index=%d", i); + rc = -EINVAL; + goto cleanup_clients; + } + + CAM_DBG(CAM_CPAS, + "Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i, + soc_private->vdd_ahb[i].vdd_corner, + ahb_string, soc_private->vdd_ahb[i].ahb_level); + } + + soc_private->num_vdd_ahb_mapping = count; + } + + soc_private->enable_secure_qos_update = of_property_read_bool(of_node, + "enable-secure-qos-update"); + CAM_DBG(CAM_CPAS, "Enable secure qos update: %s", + CAM_BOOL_TO_YESNO(soc_private->enable_secure_qos_update)); + + soc_private->enable_smart_qos = of_property_read_bool(of_node, + "enable-smart-qos"); + + if (soc_private->enable_smart_qos) { + uint32_t value; + + soc_private->smart_qos_info = CAM_MEM_ZALLOC(sizeof(struct cam_cpas_smart_qos_info), + GFP_KERNEL); + if (!soc_private->smart_qos_info) { + rc = -ENOMEM; + goto cleanup_clients; + } + + /* + * If enabled, we expect min and max priority values, + * clamp priority value, slope factor, least and most + * stressed clamp threshold values, high and low stress + * indicator threshold values, bw ratio scale factor value, + * so treat as fatal error if not available. + */ + rc = of_property_read_u32(of_node, "rt-wr-priority-min", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid min Qos priority rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->rt_wr_priority_min = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-priority-max", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid max Qos priority rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->rt_wr_priority_max = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-priority-clamp", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid clamp Qos priority rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->rt_wr_priority_clamp = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-slope-factor", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid slope factor rc %d", rc); + goto cleanup_clients; + } + + if (value > CAM_CPAS_MAX_SLOPE_FACTOR) { + CAM_ERR(CAM_UTIL, "Invalid slope factor value %d", value); + rc = -EINVAL; + goto cleanup_clients; + } else + soc_private->smart_qos_info->rt_wr_slope_factor = (uint8_t)value; + + CAM_DBG(CAM_CPAS, + "SmartQoS enabled, priority min=%u, max=%u, clamp=%u, slope factor=%u", + (uint32_t)soc_private->smart_qos_info->rt_wr_priority_min, + (uint32_t)soc_private->smart_qos_info->rt_wr_priority_max, + (uint32_t)soc_private->smart_qos_info->rt_wr_priority_clamp, + (uint32_t)soc_private->smart_qos_info->rt_wr_slope_factor); + + rc = of_property_read_u32(of_node, "rt-wr-leaststressed-clamp-threshold", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid leaststressed clamp threshold rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->leaststressed_clamp_th = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-moststressed-clamp-threshold", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid moststressed clamp threshold rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->moststressed_clamp_th = (uint8_t)value; + + CAM_DBG(CAM_CPAS, + "leaststressed_clamp_th=%u, moststressed_clamp_th=%u", + (uint32_t)soc_private->smart_qos_info->leaststressed_clamp_th, + (uint32_t)soc_private->smart_qos_info->moststressed_clamp_th); + + rc = of_property_read_u32(of_node, "rt-wr-highstress-indicator-threshold", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid highstress indicator threshold rc %d", rc); + goto cleanup_clients; + } + + if (value > CAM_CPAS_MAX_STRESS_INDICATOR) { + CAM_ERR(CAM_UTIL, "Invalid highstress indicator threshold value %d", value); + rc = -EINVAL; + goto cleanup_clients; + } else + soc_private->smart_qos_info->highstress_indicator_th = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-lowstress-indicator-threshold", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid lowstress indicator threshold rc %d", rc); + goto cleanup_clients; + } + + if (value > CAM_CPAS_MAX_STRESS_INDICATOR) { + CAM_ERR(CAM_UTIL, "Invalid lowstress indicator threshold value %d", value); + rc = -EINVAL; + goto cleanup_clients; + } else + soc_private->smart_qos_info->lowstress_indicator_th = (uint8_t)value; + + rc = of_property_read_u32(of_node, "rt-wr-bw-ratio-scale-factor", + &value); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid bw ratio scale factor rc %d", rc); + goto cleanup_clients; + } + soc_private->smart_qos_info->bw_ratio_scale_factor = (uint8_t)value; + + CAM_DBG(CAM_CPAS, + "highstress_indicator_th=%u, lowstress_indicator_th=%u, scale factor=%u", + (uint32_t)soc_private->smart_qos_info->highstress_indicator_th, + (uint32_t)soc_private->smart_qos_info->lowstress_indicator_th, + (uint32_t)soc_private->smart_qos_info->bw_ratio_scale_factor); + } else { + CAM_DBG(CAM_CPAS, "SmartQoS not enabled, use static settings"); + soc_private->smart_qos_info = NULL; + } + + rc = of_property_read_u32(of_node, "enable-cam-drv", &cam_drv_en_mask_val); + + if (!rc) { + if (cam_drv_en_mask_val & CAM_DDR_DRV) + soc_private->enable_cam_ddr_drv = true; + + if (cam_drv_en_mask_val & CAM_CLK_DRV) { + if (!soc_private->enable_cam_ddr_drv) { + CAM_ERR(CAM_CPAS, "DDR DRV needs to be enabled for Clock DRV"); + rc = -EPERM; + goto cleanup_clients; + } + + soc_private->enable_cam_clk_drv = true; + rc = cam_soc_util_cesta_populate_crm_device(); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to populate cam cesta crm device rc %d", + rc); + goto cleanup_clients; + } + } + } + + CAM_DBG(CAM_CPAS, "enable_cam_ddr_drv %d enable_cam_clk_drv %d cam_drv_en_mask_val %d", + soc_private->enable_cam_ddr_drv, soc_private->enable_cam_clk_drv, + cam_drv_en_mask_val); + + rc = cam_cpas_parse_node_tree(cpas_core, of_node, soc_private); + if (rc) { + CAM_ERR(CAM_CPAS, "Node tree parsing failed rc: %d", rc); + goto cleanup_tree; + } + + /* If SmartQoS is enabled, we expect few tags in dtsi, validate */ + if (soc_private->enable_smart_qos) { + int port_idx; + bool rt_port_exists = false; + + if ((soc_private->smart_qos_info->num_rt_wr_nius == 0) || + (soc_private->smart_qos_info->num_rt_wr_nius > + CAM_CPAS_MAX_RT_WR_NIU_NODES)) { + CAM_ERR(CAM_CPAS, "Invalid number of level1 nodes %d", + soc_private->smart_qos_info->num_rt_wr_nius); + rc = -EINVAL; + goto cleanup_tree; + } + + for (port_idx = 0; port_idx < cpas_core->num_axi_ports; + port_idx++) { + CAM_DBG(CAM_CPAS, "[%d] : Port[%s] is_rt=%d", + port_idx, cpas_core->axi_port[port_idx].axi_port_name, + cpas_core->axi_port[port_idx].is_rt); + if (cpas_core->axi_port[port_idx].is_rt) { + rt_port_exists = true; + break; + } + } + + if (!rt_port_exists) { + CAM_ERR(CAM_CPAS, + "RT AXI port not tagged, num ports %d", + cpas_core->num_axi_ports); + rc = -EINVAL; + goto cleanup_tree; + } + } + + /* Optional rpmh bcm info */ + count = of_property_count_u32_elems(of_node, "rpmh-bcm-info"); + /* + * We expect count=5(CAM_RPMH_BCM_INFO_MAX) if valid rpmh bcm info + * is available. + * 0 - Total number of BCMs + * 1 - First BCM FE (front-end) register offset. + * These represent requested clk plan by sw + * 2 - First BCM BE (back-end) register offset. + * These represent actual clk plan at hw + * 3 - DDR BCM index + * 4 - MMNOC BCM index + */ + if (count == CAM_RPMH_BCM_INFO_MAX) { + for (i = 0; i < count; i++) { + rc = of_property_read_u32_index(of_node, + "rpmh-bcm-info", i, &soc_private->rpmh_info[i]); + if (rc) { + CAM_ERR(CAM_CPAS, + "Incorrect rpmh info at %d, count=%d", + i, count); + break; + } + CAM_DBG(CAM_CPAS, "RPMH BCM Info [%d]=0x%x", + i, soc_private->rpmh_info[i]); + } + + if (rc) + soc_private->rpmh_info[CAM_RPMH_NUMBER_OF_BCMS] = 0; + } else { + CAM_DBG(CAM_CPAS, "RPMH BCM info not available in DT, count=%d", + count); + } + + /* check cache info */ + rc = cam_cpas_parse_sys_cache_uids(of_node, soc_private); + if (rc) + goto cache_parse_fail; + + return 0; + +cache_parse_fail: + soc_private->rpmh_info[CAM_RPMH_NUMBER_OF_BCMS] = 0; +cleanup_tree: + cam_cpas_node_tree_cleanup(cpas_core, soc_private); +cleanup_clients: + cam_cpas_util_client_cleanup(cpas_hw); + return rc; +} + +static int cam_cpas_soc_fill_irq_data(struct cam_hw_info *cpas_hw, + struct cam_hw_soc_info *soc_info, void **irq_data) +{ + struct cam_cpas_private_soc *soc_private = soc_info->soc_private; + int i; + + for (i = 0; i < soc_info->irq_count; i++) { + soc_private->irq_data[i].cpas_hw = cpas_hw; + + if (!strcmp(soc_info->irq_name[i], "cpas_camnoc")) + soc_private->irq_data[i].camnoc_type = CAM_CAMNOC_HW_COMBINED; + else if (!strcmp(soc_info->irq_name[i], "cpas_camnoc_rt")) + soc_private->irq_data[i].camnoc_type = CAM_CAMNOC_HW_RT; + else if (!strcmp(soc_info->irq_name[i], "cpas_camnoc_nrt")) + soc_private->irq_data[i].camnoc_type = CAM_CAMNOC_HW_NRT; + else { + CAM_ERR(CAM_CPAS, "Unable to identify interrupt name: %s", + soc_info->irq_name[i]); + return -EINVAL; + } + + irq_data[i] = &soc_private->irq_data[i]; + } + + return 0; +} + +int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, struct cam_hw_info *cpas_hw) +{ + int rc = 0; + struct cam_cpas_private_soc *soc_private; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc); + return rc; + } + + if (soc_info->irq_count > 0 && !irq_handler) { + CAM_ERR(CAM_CPAS, "Invalid IRQ handler"); + return -EINVAL; + } + + soc_info->soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_cpas_private_soc), + GFP_KERNEL); + if (!soc_info->soc_private) { + CAM_ERR(CAM_CPAS, "Failed to allocate soc private"); + return -ENOMEM; + } + soc_private = (struct cam_cpas_private_soc *)soc_info->soc_private; + + rc = cam_cpas_get_custom_dt_info(cpas_hw, soc_info->pdev, soc_private); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc); + goto free_soc_private; + } + + soc_private->irq_data = CAM_MEM_ZALLOC_ARRAY(soc_info->irq_count, + sizeof(struct cam_cpas_soc_irq_data), + GFP_KERNEL); + if (!soc_private->irq_data) { + CAM_ERR(CAM_CPAS, "Failed to allocate irq data"); + rc = -ENOMEM; + goto free_soc_private; + } + + rc = cam_cpas_soc_fill_irq_data(cpas_hw, soc_info, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to fill irq data rc=%d", rc); + goto free_irq_data; + } + + soc_info->is_clk_drv_en = soc_private->enable_cam_clk_drv; + + rc = cam_soc_util_request_platform_resource(soc_info, irq_handler, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d", rc); + goto free_irq_data; + } + + rc = cam_soc_util_get_option_clk_by_name(soc_info, CAM_ICP_CLK_NAME, + &soc_private->icp_clk_index); + if (rc) { + CAM_DBG(CAM_CPAS, "ICP option clk get failed with rc %d", rc); + soc_private->icp_clk_index = -1; + rc = 0; + } else { + CAM_DBG(CAM_CPAS, "ICP option clk get success index %d", + soc_private->icp_clk_index); + } + + if (soc_private->domain_id_info.domain_id_supported) { + rc = cam_cpas_get_domain_id_support_clks(soc_info->pdev->dev.of_node, + soc_info, soc_private); + if (rc) + goto release_res; + } + + rc = cam_soc_util_get_reset_resource(soc_info); + if (rc) { + CAM_DBG(CAM_CPAS, "Reset resource get failed rc %d", rc); + rc = 0; + } + + return rc; + +release_res: + cam_soc_util_release_platform_resource(soc_info); +free_irq_data: + CAM_MEM_FREE(soc_private->irq_data); +free_soc_private: + CAM_MEM_FREE(soc_private->llcc_info); + CAM_MEM_FREE(soc_private->smart_qos_info); + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; + return rc; +} + +int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info) +{ + int rc, i; + struct cam_cpas_private_soc *soc_private = soc_info->soc_private; + + for (i = 0; i < soc_private->num_caches; i++) + llcc_slice_putd(soc_private->llcc_info[i].slic_desc); + + if (soc_private->icp_clk_index != -1) { + rc = cam_soc_util_put_optional_clk(soc_info, soc_private->icp_clk_index); + if (rc) + CAM_ERR(CAM_CPAS, "Error Put optional clk failed rc=%d", rc); + } + + CAM_MEM_FREE(soc_private->domain_id_info.domain_id_entries); + + CAM_MEM_FREE(soc_private->domain_id_clks); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc); + + rc = cam_soc_util_put_reset_resource(soc_info); + if (rc) + CAM_ERR(CAM_CPAS, "release put reset failed, rc=%d", rc); + + CAM_MEM_FREE(soc_private->irq_data); + CAM_MEM_FREE(soc_private->llcc_info); + CAM_MEM_FREE(soc_private->smart_qos_info); + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; + + return rc; +} + +int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info, + enum cam_vote_level default_level) +{ + struct cam_cpas_private_soc *soc_private = soc_info->soc_private; + int rc = 0; + + /* set this everytime in order to support debugfs to disable clk drv between runs */ + soc_info->is_clk_drv_en = soc_private->enable_cam_clk_drv; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + default_level, true); + if (rc) + CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc); + + return rc; +} + +int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info, + bool disable_clocks, bool disable_irq) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + disable_clocks, disable_irq); + if (rc) + CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc); + + return rc; +} + +int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_irq_disable(soc_info); + if (rc) + CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc); + + return rc; +} + +int cam_cpas_vmrm_callback_handler(void *cb_data, void *msg, uint32_t size) +{ + int rc = 0, i; + uint32_t msg_type; + struct cam_hw_intf *cpas_hw_intf = NULL; + struct cam_hw_info *cpas_hw = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cpas *cpas_core = NULL; + struct cam_soc_bus_client *bus_client = NULL; + struct cam_vmrm_msg *cpas_msg = NULL; + unsigned long cesta_clk_rate_high = 0, cesta_clk_rate_low = 0; + int32_t clk_level_high, clk_level_low, clk_lvl, highest_level; + bool do_not_set_src_clk; + int cesta_client_idx = -1; + int64_t applied_rate = 0; + struct cam_msg_set_clk_rate *msg_set_clk_rate = NULL; + struct cam_msg_set_clk_rate_level *msg_set_clk_rate_level = NULL; + struct cam_msg_icc_vote *msg_icc_vote = NULL; + char name[CAM_ICC_CLIENT_NAME_MAX]; + uint64_t ab, ib; + + cpas_hw_intf = (struct cam_hw_intf *)cb_data; + cpas_hw = cpas_hw_intf->hw_priv; + soc_info = &cpas_hw->soc_info; + cpas_core = cpas_hw->core_info; + + cpas_msg = msg; + msg_type = cpas_msg->msg_type; + + CAM_DBG(CAM_CPAS, "msg type %d", msg_type); + + switch (msg_type) { + case CAM_SOC_ENABLE_RESOURCE: + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + cpas_hw->soc_info.lowest_clk_level, false); + if (rc) + CAM_ERR(CAM_CPAS, "cpas soc enable resource failed %d", rc); + break; + + case CAM_SOC_DISABLE_RESOURCE: + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + true, false); + if (rc) + CAM_ERR(CAM_CPAS, "cpas soc disable resource failed %d", rc); + break; + + case CAM_CLK_SET_RATE: + msg_set_clk_rate = (struct cam_msg_set_clk_rate *)&cpas_msg->data[0]; + cesta_client_idx = msg_set_clk_rate->cesta_client_idx; + cesta_clk_rate_high = msg_set_clk_rate->clk_rate_high; + cesta_clk_rate_low = msg_set_clk_rate->clk_rate_low; + + CAM_DBG(CAM_CPAS, "set %s, cesta_client_idx %d rate [%ld %ld] dev_name %s", + soc_info->clk_name[soc_info->src_clk_idx], cesta_client_idx, + cesta_clk_rate_high, cesta_clk_rate_low, soc_info->dev_name); + + rc = cam_soc_util_set_src_clk_rate(soc_info, cesta_client_idx, + cesta_clk_rate_high, cesta_clk_rate_low); + if (rc) + CAM_ERR(CAM_CPAS, "set %s, cesta_client_idx %d rate [%ld %ld] dev_name %s", + soc_info->clk_name[soc_info->src_clk_idx], cesta_client_idx, + cesta_clk_rate_high, cesta_clk_rate_low, soc_info->dev_name); + break; + + case CAM_CLK_SET_RATE_LEVEL: + msg_set_clk_rate_level = (struct cam_msg_set_clk_rate_level *)&cpas_msg->data[0]; + cesta_client_idx = msg_set_clk_rate_level->cesta_client_idx; + clk_level_high = msg_set_clk_rate_level->clk_level_high; + clk_level_low = msg_set_clk_rate_level->clk_level_low; + cesta_clk_rate_high = msg_set_clk_rate_level->clk_rate; + do_not_set_src_clk = msg_set_clk_rate_level->do_not_set_src_clk; + + CAM_DBG(CAM_CPAS, + "set %s, cesta_client_idx %d level [%d %d] rate %ld dev_name %s do_not_set_src_clk %d", + soc_info->clk_name[soc_info->src_clk_idx], cesta_client_idx, + clk_level_high, clk_level_low, cesta_clk_rate_high, soc_info->dev_name); + + if (cesta_clk_rate_high) { + rc = cam_soc_util_get_clk_level(soc_info, cesta_clk_rate_high, + soc_info->src_clk_idx, &clk_lvl); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to get clk lvl: %d, src clk idx: %d, hlos clk rate %lld", + rc, soc_info->src_clk_idx, cesta_clk_rate_high); + return rc; + } + + highest_level = max(clk_level_high, clk_lvl); + rc = cam_soc_util_get_valid_clk_rate(soc_info, highest_level, + &applied_rate); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in getting valid clk rate to apply rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_CPAS, + "Highest valid lvl: %d, applying corresponding rate %lld", + highest_level, applied_rate); + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, + applied_rate, 0); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in setting camnoc axi clk applied rate:[%lld] rc:%d", + applied_rate, rc); + return rc; + } + } else { + rc = cam_soc_util_set_clk_rate_level(soc_info, cesta_client_idx, + clk_level_high, clk_level_low, do_not_set_src_clk); + if (rc) { + CAM_ERR(CAM_CPAS, + "Failed in scaling clock rate level %d for cpas", + clk_level_high); + return rc; + } + } + break; + + case CAM_ICC_VOTE: + msg_icc_vote = (struct cam_msg_icc_vote *)&cpas_msg->data[0]; + scnprintf(name, sizeof(name), "%s", msg_icc_vote->name); + ab = msg_icc_vote->ab; + ib = msg_icc_vote->ib; + + CAM_DBG(CAM_CPAS, "name %s, ab %llu ib %llu", name, ab, ib); + + if (!strcmp(name, cpas_core->ahb_bus_client.common_data.name)) { + bus_client = (struct cam_soc_bus_client *) + cpas_core->ahb_bus_client.soc_bus_client; + goto update_bw; + } + + for (i = 0; i < cpas_core->num_axi_ports; i++) { + if (!strcmp(name, cpas_core->axi_port[i].bus_client.common_data.name)) { + bus_client = (struct cam_soc_bus_client *) + cpas_core->axi_port[i].bus_client.soc_bus_client; + goto update_bw; + } + } + + for (i = 0; i < cpas_core->num_camnoc_axi_ports; i++) { + if (!strcmp(name, + cpas_core->camnoc_axi_port[i].bus_client.common_data.name)) { + bus_client = (struct cam_soc_bus_client *) + cpas_core->camnoc_axi_port[i].bus_client.soc_bus_client; + break; + } + } + +update_bw: + if (bus_client == NULL) { + CAM_ERR(CAM_CPAS, "Invalid bus client name %s", name); + return -EINVAL; + } + + rc = cam_soc_bus_client_update_bw(bus_client, ab, ib, CAM_SOC_BUS_PATH_DATA_HLOS); + if (rc) + CAM_ERR(CAM_CPAS, "bus client %s ab %llu ib %llu update bw failed %d", + name, ab, ib, rc); + break; + + default: + rc = -EINVAL; + CAM_ERR(CAM_CPAS, "Invalid msg type:%d", msg_type); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.h new file mode 100644 index 0000000000..9b7fdee485 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cam_cpas_soc.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CPAS_SOC_H_ +#define _CAM_CPAS_SOC_H_ + +#include +#include "cam_soc_util.h" +#include "cam_cpas_hw.h" + +#define CAM_REGULATOR_LEVEL_MAX 16 +#define CAM_CPAS_MAX_TREE_NODES 63 +#define CAM_CPAS_MAX_FUSE_FEATURE 10 + +/** + * enum cam_cpas_num_subparts_types - Enum for types of number of camera subparts + */ +enum cam_cpas_num_subparts_types { + CAM_CPAS_AVAILABLE_NUM_SUBPARTS, + CAM_CPAS_FUNCTIONAL_NUM_SUBPARTS, + CAM_CPAS_NUM_SUBPARTS_MAX_TYPES, +}; + +/** + * struct cpas_tree_node: Generic cpas tree node for BW voting + * + * @cell_idx: Index to identify node from device tree and its parent + * @level_idx: Index to identify at what level the node is present + * @axi_port_idx_arr: Index to identify which axi port to vote the consolidated bw. + * It can point to multiple indexes in case of camera DRV + * @drv_voting_idx: Specifies the index to which the child node would finally vote. + * @camnoc_axi_port_idx: Index to find which axi port to vote consolidated bw + * @path_data_type: Traffic type info from device tree (ife-vid, ife-disp etc) + * @path_trans_type: Transaction type info from device tree (rd, wr) + * @merge_type: Traffic merge type (calculation info) from device tree + * @bus_width_factor: Factor for accounting bus width in CAMNOC bw calculation + * @bw_info: AXI BW info for all drv ports + * @is_rt_node: Indicates if a tree node is representing a RT bus node/port + * @camnoc_max_needed: If node is needed for CAMNOC BW calculation then true + * @constituent_paths: Constituent paths presence info from device tree + * Ex: For CAM_CPAS_PATH_DATA_IFE_UBWC_STATS, index corresponding to + * CAM_CPAS_PATH_DATA_IFE_VID, CAM_CPAS_PATH_DATA_IFE_DISP and + * CAM_CPAS_PATH_DATA_IFE_STATS + * @tree_dev_node: Device node from devicetree for current tree node + * @parent_node: Pointer to node one or more level above the current level + * (starting from end node of cpas client) + * @pri_lut_low_offset: Register offset value for priority lut low. + * Valid only for level1 nodes (representing NIUs) + * @pri_lut_high_offset: Register offset value for priority lut high. + * Valid only for level1 nodes (representing NIUs) + * @niu_size: Size of NIU that this node represents. Size in KB + * @curr_priority_low: New calculated priority lut low values + * @curr_priority_high: New calculated priority lut high values + * @applied_priority_low: Currently applied priority lut low values + * @applied_priority_high: Currently applied priority lut high values + * + */ +struct cam_cpas_tree_node { + uint32_t cell_idx; + int level_idx; + int *axi_port_idx_arr; + int drv_voting_idx; + int camnoc_axi_port_idx; + const char *node_name; + uint32_t path_data_type; + uint32_t path_trans_type; + uint32_t merge_type; + uint32_t bus_width_factor; + struct cam_cpas_axi_bw_info *bw_info; + bool is_rt_node; + bool camnoc_max_needed; + bool constituent_paths[CAM_CPAS_PATH_DATA_MAX]; + struct device_node *tree_dev_node; + struct cam_cpas_tree_node *parent_node; + uint32_t pri_lut_low_offset; + uint32_t pri_lut_high_offset; + uint32_t niu_size; + uint32_t curr_priority_low; + uint32_t curr_priority_high; + uint32_t applied_priority_low; + uint32_t applied_priority_high; +}; + +/** + * struct cam_cpas_feature_info : CPAS fuse feature info + * @feature: Identifier for feature + * @type: Type of feature + * @value: Fuse value + * @enable: Feature enable or disable + * @hw_map: Each bit position indicates if the hw_id for the feature + */ + +struct cam_cpas_feature_info { + uint32_t feature; + uint32_t type; + uint32_t value; + bool enable; + uint32_t hw_map; +}; + +/** + * struct cam_sys_cache_local_info : camera cache info saving locally + * + * @type: Cache type, For example cache types are + * CAM_LLCC_SMALL_1/CAM_LLCC_SMALL_2/CAM_LLCC_LARGE_1/ .... + * CAM_LLCC_IPE_SRT_IP/CAM_LLCC_IPE_RT_RF + * @staling_distance: staling_distance + * @mode: llcc staling mode params, possible allowed values + * CAM_LLCC_STALING_MODE_CAPACITY/CAM_LLCC_STALING_MODE_NOTIFY + * @op_type: cache operation type, possible allowed values are + * CAM_LLCC_NOTIFY_STALING_EVICT/CAM_LLCC_NOTIFY_STALING_FORGET + */ +struct cam_sys_cache_local_info { + uint32_t type; + uint32_t staling_distance; + uint32_t mode; + uint32_t op_type; +}; + +/** + * struct cam_sys_cache_info : Last level camera cache info + * + * @ref_cnt: Ref cnt activate/deactivate cache + * @type: Cache type, For example cache types are + * CAM_LLCC_SMALL_1/CAM_LLCC_SMALL_2/CAM_LLCC_LARGE_1/ .... + * CAM_LLCC_IPE_SRT_IP/CAM_LLCC_IPE_RT_RF + * @uid: Client user ID + * @size: Cache size + * @scid: Slice ID + * @slic_desc: Slice descriptor + * @staling_distance: staling_distance + * @mode: camera llcc's staling mode params, possible allowed values + * CAM_LLCC_STALING_MODE_CAPACITY/CAM_LLCC_STALING_MODE_NOTIFY + * @op_type: cache operation type, possible allowed values are + * CAM_LLCC_NOTIFY_STALING_EVICT/CAM_LLCC_NOTIFY_STALING_FORGET + * @concur concurrent usage is supported or not for a scid + */ +struct cam_sys_cache_info { + uint32_t ref_cnt; + uint32_t type; + uint32_t uid; + size_t size; + int32_t scid; + const char *name; + struct llcc_slice_desc *slic_desc; + uint32_t staling_distance; + int32_t mode; + int32_t op_type; + uint32_t concur; +}; + + +/** + * struct cam_cpas_smart_qos_info : Smart QOS info + * + * @rt_wr_priority_min: Minimum priority value for rt write nius + * @rt_wr_priority_max: Maximum priority value for rt write nius + * @rt_wr_priority_clamp: Clamp priority value for rt write nius + * @rt_wr_slope_factor: Slope factor value for rt write nius + * @leaststressed_clamp_th: Leaststressed clamp threshold value for rt write nius + * @moststressed_clamp_th: Moststressed clamp threshold value for rt write nius + * @highstress_indicator_th: Highstress indicator threshold value for rt write nius + * @lowstress_indicator_th: Lowstress indicator threshold value for rt write nius + * @bw_ratio_scale_factor: BW ratio scale factor value for rt write nius + * @num_rt_wr_nius: Number of rt write nius + * @rt_wr_niu_node: List of level1 nodes representing rt write nius + */ +struct cam_cpas_smart_qos_info { + uint8_t rt_wr_priority_min; + uint8_t rt_wr_priority_max; + uint8_t rt_wr_priority_clamp; + uint8_t rt_wr_slope_factor; + uint8_t leaststressed_clamp_th; + uint8_t moststressed_clamp_th; + uint8_t highstress_indicator_th; + uint8_t lowstress_indicator_th; + uint8_t bw_ratio_scale_factor; + uint8_t num_rt_wr_nius; + struct cam_cpas_tree_node *rt_wr_niu_node[CAM_CPAS_MAX_RT_WR_NIU_NODES]; +}; + +/** + * struct cam_cpas_domain_id_mapping : Domain id mapping + * + * @domain_type: Domain type, currently defined as two, + * secure/non-secure. This will be expanded + * later to more types, and correspnding ID + * @mapping_id: ID of domain type + */ +struct cam_cpas_domain_id_mapping { + uint32_t domain_type; + uint32_t mapping_id; +}; + +/** + * struct cam_cpas_domain_id_info : Stores all information related + * to domain-id support + * @domain_id_entries: Stores mapping between domain types and their IDs + * @num_domain_ids: Num of domain id types found from dtsi + * @domain_id_supported: Whether domain id is supported + */ +struct cam_cpas_domain_id_info { + struct cam_cpas_domain_id_mapping *domain_id_entries; + uint32_t num_domain_ids; + bool domain_id_supported; +}; + +/** + * struct cam_cpas_domain_id_support_clks : Stores all information + * related to clocks + * needed to turn on SWIs + * for domain id programming + * @clk_names: Clock names as declared in DT + * @clk_idx: Corresponding clk index as declared in DT + * @number_clks: Number of clocks declared to turn all CSIDs + */ +struct cam_cpas_domain_id_support_clks { + const char *clk_names[CAM_SOC_MAX_OPT_CLK]; + int32_t clk_idx[CAM_SOC_MAX_OPT_CLK]; + int number_clks; +}; + +/** + * struct cam_cpas_soc_irq_data: irq data to be passed in irq handler from ISR + * + * @cpas_hw: cpas hw info + * @camnoc_type: type of camnoc associated with the irq + * + */ +struct cam_cpas_soc_irq_data { + struct cam_hw_info *cpas_hw; + enum cam_camnoc_hw_type camnoc_type; +}; + +/** + * struct cam_cpas_sysfs_info - cpas sysfs info + * + * @kobj: Kobj for camera directory + * @num_ifes: Number of available and functional IFEs + * @num_ife_lites: Number of available and functional IFE-LITEs + * @num_sfes: Number of available and functional SFEs + * @num_custom: Number of available and functional CUSTOM + */ +struct cam_cpas_sysfs_info { + struct kobject *kobj; + uint32_t num_ifes[CAM_CPAS_NUM_SUBPARTS_MAX_TYPES]; + uint32_t num_ife_lites[CAM_CPAS_NUM_SUBPARTS_MAX_TYPES]; + uint32_t num_sfes[CAM_CPAS_NUM_SUBPARTS_MAX_TYPES]; + uint32_t num_custom[CAM_CPAS_NUM_SUBPARTS_MAX_TYPES]; +}; + +/** + * struct cam_cpas_private_soc : CPAS private DT info + * + * @arch_compat: ARCH compatible string + * @client_id_based: Whether clients are id based + * @bus_icc_based: Interconnect based bus interaction + * @use_cam_icc_path_str: Indicates if AXI path from interconnect is taken based on + * cam specified property + * @num_clients: Number of clients supported + * @client_name: Client names + * @tree_node: Array of pointers to all tree nodes required to calculate + * axi bw, arranged with help of cell index in device tree + * @camera_bus_node: Device tree node from cpas node + * @level_node: Device tree node for each level in camera_bus_node + * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported + * @vdd_ahb : AHB level mapping info for the supported vdd levels + * @control_camnoc_axi_clk : Whether CPAS driver need to set camnoc axi clk freq + * @camnoc_bus_width : CAMNOC Bus width + * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating + * camnoc axi clock + * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target + * @cam_max_rt_axi_bw: Max axi BW in bytes which varies based on target + * @fuse_info: fuse information + * @sysfs_info: Camera subparts sysfs information + * @rpmh_info: RPMH BCM info + * @num_feature_info: number of feature_info entries + * @feature_info: Structure for storing feature information + * @num_caches: Number of last level caches + * @part_info: Camera Hw subpart info + * @llcc_info: Cache info + * @enable_secure_qos_update: whether to program QoS securely on current chipset + * @enable_smart_qos: Whether to enable Smart QoS mechanism on current chipset + * @enable_cam_ddr_drv: Whether to enable Camera DDR DRV on current chipset + * @enable_cam_clk_drv: Whether to enable Camera Clk DRV on current chipset + * @smart_qos_info: Pointer to smart qos info + * @icp_clk_index: Index of optional icp clk + * @domain_id_info: Stores all information related to domain id support + * @domain_id_clks: All clock related information for domain id support + * @irq_data: array of data for each irq line to be passed in irq handler + */ +struct cam_cpas_private_soc { + const char *arch_compat; + bool client_id_based; + bool bus_icc_based; + bool use_cam_icc_path_str; + uint32_t num_clients; + const char *client_name[CAM_CPAS_MAX_CLIENTS]; + struct cam_cpas_tree_node *tree_node[CAM_CPAS_MAX_TREE_NODES]; + struct device_node *camera_bus_node; + struct device_node *level_node[CAM_CPAS_MAX_TREE_LEVELS]; + uint32_t num_vdd_ahb_mapping; + struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX]; + bool control_camnoc_axi_clk; + uint32_t camnoc_bus_width; + uint32_t camnoc_axi_clk_bw_margin; + uint64_t camnoc_axi_min_ib_bw; + uint64_t cam_max_rt_axi_bw; + struct cam_cpas_fuse_info fuse_info; + struct cam_cpas_sysfs_info sysfs_info; + uint32_t rpmh_info[CAM_RPMH_BCM_INFO_MAX]; + uint32_t num_feature_info; + struct cam_cpas_feature_info feature_info[CAM_CPAS_MAX_FUSE_FEATURE]; + uint32_t num_caches; + uint32_t part_info; + struct cam_sys_cache_info *llcc_info; + bool enable_smart_qos; + bool enable_cam_ddr_drv; + bool enable_cam_clk_drv; + bool enable_secure_qos_update; + struct cam_cpas_smart_qos_info *smart_qos_info; + int32_t icp_clk_index; + struct cam_cpas_domain_id_info domain_id_info; + struct cam_cpas_domain_id_support_clks *domain_id_clks; + struct cam_cpas_soc_irq_data *irq_data; +}; + +void cam_cpas_dump_tree_vote_info(struct cam_hw_info *cpas_hw, + const struct cam_cpas_tree_node *tree_node, + const char *identifier, int ddr_drv_idx, int cesta_drv_idx); +void cam_cpas_dump_full_tree_state(struct cam_hw_info *cpas_hw, const char *identifier); + +void cam_cpas_util_debug_parse_data(struct cam_cpas_private_soc *soc_private); +void cam_cpas_dump_cons_axi_vote_info( + const struct cam_cpas_client *cpas_client, + const char *identifier, + struct cam_axi_consolidate_vote *axi_vote); +void cam_cpas_dump_axi_vote_info( + const struct cam_cpas_client *cpas_client, + const char *identifier, + struct cam_axi_vote *axi_vote); +int cam_cpas_node_tree_cleanup(struct cam_cpas *cpas_core, + struct cam_cpas_private_soc *soc_private); +int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t vfe_irq_handler, struct cam_hw_info *cpas_hw); +int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info); +int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info, + enum cam_vote_level default_level); +int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info, + bool disable_clocks, bool disable_irq); +int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info); +int cam_cpas_vmrm_callback_handler(void *cb_data, void *msg, uint32_t size); +#endif /* _CAM_CPAS_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/camss_top/cam_camsstop_hw.c b/qcom/opensource/camera-kernel/drivers/cam_cpas/camss_top/cam_camsstop_hw.c new file mode 100644 index 0000000000..1c20d079fc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/camss_top/cam_camsstop_hw.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cpas_hw_intf.h" +#include "cam_cpas_hw.h" +#include "cam_cpas_soc.h" + +int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_caps *hw_caps) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS]; + uint32_t reg_value; + + if (reg_indx == -1) + return -EINVAL; + + hw_caps->camera_family = CAM_FAMILY_CAMERA_SS; + + reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0); + hw_caps->camera_version.major = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c); + hw_caps->camera_version.minor = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->camera_version.incr = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d", + hw_caps->camera_family, hw_caps->camera_version.major, + hw_caps->camera_version.minor, hw_caps->camera_version.incr); + + return 0; +} + +int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info, + int32_t regbase_index[], int32_t num_reg_map) +{ + uint32_t index; + int rc; + + if (num_reg_map > CAM_CPAS_REG_MAX) { + CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map); + return -EINVAL; + } + + if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) { + CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d", + soc_info->num_mem_block); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_camss", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CAMSS] = index; + } else { + CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS"); + return -EINVAL; + } + + return 0; +} + +int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops) +{ + if (!internal_ops) { + CAM_ERR(CAM_CPAS, "invalid NULL param"); + return -EINVAL; + } + + internal_ops->get_hw_info = cam_camsstop_get_hw_info; + internal_ops->init_hw_version = NULL; + internal_ops->handle_irq = NULL; + internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices; + internal_ops->power_on = NULL; + internal_ops->power_off = NULL; + internal_ops->setup_qos_settings = NULL; + internal_ops->print_poweron_settings = NULL; + internal_ops->qchannel_handshake = NULL; + internal_ops->set_tpg_mux_sel = NULL; + internal_ops->handle_reset_res_control = NULL; + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.c b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.c new file mode 100644 index 0000000000..e2e32b3661 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.c @@ -0,0 +1,1740 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_compat.h" +#include "cam_cpas_hw_intf.h" +#include "cam_cpas_hw.h" +#include "cam_cpastop_hw.h" +#include "cam_io_util.h" +#include "cam_cpas_soc.h" +#include "cpastop100.h" +#include "cpastop_v150_100.h" +#include "cpastop_v170_200.h" +#include "cpastop_v170_110.h" +#include "cpastop_v175_100.h" +#include "cpastop_v175_101.h" +#include "cpastop_v175_120.h" +#include "cpastop_v175_130.h" +#include "cpastop_v480_100.h" +#include "cpastop_v480_custom.h" +#include "cpastop_v580_100.h" +#include "cpastop_v580_custom.h" +#include "cpastop_v540_100.h" +#include "cpastop_v520_100.h" +#include "cpastop_v545_100.h" +#include "cpastop_v570_100.h" +#include "cpastop_v570_200.h" +#include "cpastop_v680_100.h" +#include "cpastop_v680_110.h" +#include "cpastop_v165_100.h" +#include "cpastop_v780_100.h" +#include "cpastop_v640_200.h" +#include "cpastop_v880_100.h" +#include "cpastop_v975_100.h" +#include "cpastop_v970_110.h" +#include "cpastop_v980_100.h" +#include "cpastop_v1080_100.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +struct cam_camnoc_info *camnoc_info[CAM_CAMNOC_HW_TYPE_MAX]; +struct cam_cpas_info *cpas_info; +struct cam_cpas_camnoc_qchannel *qchannel_info; +struct cam_cpas_top_regs *cpas_top_info; + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + struct completion test_irq_hw_complete[CAM_CAMNOC_HW_TYPE_MAX]; +#endif + +#define CAMNOC_SLAVE_MAX_ERR_CODE 7 +static const char * const camnoc_slave_err_code[] = { + "Target Error", /* err code 0 */ + "Address decode error", /* err code 1 */ + "Unsupported request", /* err code 2 */ + "Disconnected target", /* err code 3 */ + "Security violation", /* err code 4 */ + "Hidden security violation", /* err code 5 */ + "Timeout Error", /* err code 6 */ + "Unknown Error", /* unknown err code */ +}; + +static const uint32_t cam_cpas_hw_version_map + [CAM_CPAS_CAMERA_VERSION_ID_MAX][CAM_CPAS_VERSION_ID_MAX] = { + /* for camera_150 */ + { + CAM_CPAS_TITAN_150_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_170 */ + { + CAM_CPAS_TITAN_170_V100, + 0, + CAM_CPAS_TITAN_170_V110, + CAM_CPAS_TITAN_170_V120, + 0, + CAM_CPAS_TITAN_170_V200, + }, + /* for camera_175 */ + { + CAM_CPAS_TITAN_175_V100, + CAM_CPAS_TITAN_175_V101, + 0, + CAM_CPAS_TITAN_175_V120, + CAM_CPAS_TITAN_175_V130, + 0, + }, + /* for camera_480 */ + { + CAM_CPAS_TITAN_480_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_580 */ + { + CAM_CPAS_TITAN_580_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_520 */ + { + CAM_CPAS_TITAN_520_V100, + 0, + 0, + 0, + 0, + 0, + + }, + /* for camera_540 */ + { + CAM_CPAS_TITAN_540_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_545 */ + { + CAM_CPAS_TITAN_545_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_570 */ + { + CAM_CPAS_TITAN_570_V100, + 0, + 0, + 0, + 0, + CAM_CPAS_TITAN_570_V200, + }, + /* for camera_680 */ + { + CAM_CPAS_TITAN_680_V100, + 0, + CAM_CPAS_TITAN_680_V110, + 0, + 0, + 0, + }, + /* for camera_165 */ + { + CAM_CPAS_TITAN_165_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_780 */ + { + CAM_CPAS_TITAN_780_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_640 */ + { + 0, + 0, + 0, + 0, + 0, + CAM_CPAS_TITAN_640_V200, + }, + /* for camera_880 */ + { + CAM_CPAS_TITAN_880_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_980 */ + { + CAM_CPAS_TITAN_980_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_1080 */ + { + CAM_CPAS_TITAN_1080_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_975 */ + { + CAM_CPAS_TITAN_975_V100, + 0, + 0, + 0, + 0, + 0, + }, + /* for camera_970 */ + { + 0, + 0, + CAM_CPAS_TITAN_970_V110, + 0, + 0, + 0, + }, +}; + +static char *cam_cpastop_get_camnoc_name(enum cam_camnoc_hw_type type) +{ + switch (type) { + case CAM_CAMNOC_HW_COMBINED: + return "CAMNOC_COMBINED"; + case CAM_CAMNOC_HW_RT: + return "CAMNOC_RT"; + case CAM_CAMNOC_HW_NRT: + return "CAMNOC_NRT"; + default: + return "Invalid CAMNOC"; + } +} + +static int cam_cpas_translate_camera_cpas_version_id( + uint32_t cam_version, + uint32_t cpas_version, + uint32_t *cam_version_id, + uint32_t *cpas_version_id) +{ + + switch (cam_version) { + + case CAM_CPAS_CAMERA_VERSION_150: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_150; + break; + + case CAM_CPAS_CAMERA_VERSION_170: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_170; + break; + + case CAM_CPAS_CAMERA_VERSION_175: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_175; + break; + + case CAM_CPAS_CAMERA_VERSION_480: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_480; + break; + + case CAM_CPAS_CAMERA_VERSION_520: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_520; + break; + + case CAM_CPAS_CAMERA_VERSION_540: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_540; + break; + + case CAM_CPAS_CAMERA_VERSION_580: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_580; + break; + + case CAM_CPAS_CAMERA_VERSION_545: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_545; + break; + + case CAM_CPAS_CAMERA_VERSION_570: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_570; + break; + + case CAM_CPAS_CAMERA_VERSION_680: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_680; + break; + + case CAM_CPAS_CAMERA_VERSION_165: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_165; + break; + + case CAM_CPAS_CAMERA_VERSION_780: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_780; + break; + case CAM_CPAS_CAMERA_VERSION_640: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_640; + break; + case CAM_CPAS_CAMERA_VERSION_880: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_880; + break; + case CAM_CPAS_CAMERA_VERSION_975: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_975; + break; + case CAM_CPAS_CAMERA_VERSION_970: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_970; + break; + case CAM_CPAS_CAMERA_VERSION_980: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_980; + break; + case CAM_CPAS_CAMERA_VERSION_1080: + *cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_1080; + break; + default: + CAM_ERR(CAM_CPAS, "Invalid cam version %u", + cam_version); + return -EINVAL; + } + + switch (cpas_version) { + + case CAM_CPAS_VERSION_100: + *cpas_version_id = CAM_CPAS_VERSION_ID_100; + break; + + case CAM_CPAS_VERSION_101: + *cpas_version_id = CAM_CPAS_VERSION_ID_101; + break; + case CAM_CPAS_VERSION_110: + *cpas_version_id = CAM_CPAS_VERSION_ID_110; + break; + + case CAM_CPAS_VERSION_120: + *cpas_version_id = CAM_CPAS_VERSION_ID_120; + break; + + case CAM_CPAS_VERSION_130: + *cpas_version_id = CAM_CPAS_VERSION_ID_130; + break; + + case CAM_CPAS_VERSION_200: + *cpas_version_id = CAM_CPAS_VERSION_ID_200; + break; + + default: + CAM_ERR(CAM_CPAS, "Invalid cpas version %u", + cpas_version); + return -EINVAL; + } + return 0; +} + +static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_caps *hw_caps) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP]; + uint32_t cam_version, cpas_version; + uint32_t cam_version_id, cpas_version_id; + int rc; + + if (reg_indx == -1) { + CAM_ERR(CAM_CPAS, "Invalid arguments"); + return -EINVAL; + } + + hw_caps->camera_family = CAM_FAMILY_CPAS_SS; + + if (!cam_vmrm_no_register_read_on_bind()) { + cam_version = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0); + } else { + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "cam-version", &cam_version); + if (rc) { + CAM_ERR(CAM_CPAS, "no cam version"); + return rc; + } + } + + hw_caps->camera_version.major = + CAM_BITS_MASK_SHIFT(cam_version, 0xff0000, 0x10); + hw_caps->camera_version.minor = + CAM_BITS_MASK_SHIFT(cam_version, 0xff00, 0x8); + hw_caps->camera_version.incr = + CAM_BITS_MASK_SHIFT(cam_version, 0xff, 0x0); + + if (!cam_vmrm_no_register_read_on_bind()) { + cpas_version = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4); + } else { + rc = of_property_read_u32(soc_info->pdev->dev.of_node, + "cpas-version", &cpas_version); + if (rc) { + CAM_ERR(CAM_CPAS, "no cpas version"); + return rc; + } + } + + hw_caps->cpas_version.major = + CAM_BITS_MASK_SHIFT(cpas_version, 0xf0000000, 0x1c); + hw_caps->cpas_version.minor = + CAM_BITS_MASK_SHIFT(cpas_version, 0xfff0000, 0x10); + hw_caps->cpas_version.incr = + CAM_BITS_MASK_SHIFT(cpas_version, 0xffff, 0x0); + + CAM_DBG(CAM_CPAS, "Family %d, version %d.%d.%d, cpas %d.%d.%d", + hw_caps->camera_family, hw_caps->camera_version.major, + hw_caps->camera_version.minor, hw_caps->camera_version.incr, + hw_caps->cpas_version.major, hw_caps->cpas_version.minor, + hw_caps->cpas_version.incr); + + soc_info->hw_version = CAM_CPAS_TITAN_NONE; + rc = cam_cpas_translate_camera_cpas_version_id(cam_version, + cpas_version, &cam_version_id, &cpas_version_id); + if (rc) { + CAM_ERR(CAM_CPAS, "Invalid Version, Camera: 0x%x CPAS: 0x%x", + cam_version, cpas_version); + return -EINVAL; + } + + soc_info->hw_version = + cam_cpas_hw_version_map[cam_version_id][cpas_version_id]; + + CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version); + + return 0; +} + +static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info, + int32_t regbase_index[], int32_t num_reg_map) +{ + uint32_t index; + int rc; + + if (num_reg_map > CAM_CPAS_REG_MAX) { + CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map); + return -EINVAL; + } + + if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) { + CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d", + soc_info->num_mem_block); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_cpas_top", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CPASTOP] = index; + } else if (rc) { + CAM_ERR(CAM_CPAS, "failed to get index for CPASTOP rc=%d", rc); + return rc; + } else { + CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d", + rc, index, num_reg_map); + return -EINVAL; + } + + /* + * camnoc reg base represents reg base for legacy camnoc, this becomes optional since + * some targets have camnoc split into RT/NRT + */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_camnoc", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CAMNOC] = index; + CAM_DBG(CAM_CPAS, "regbase found for CAMNOC, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_CAMNOC] = -1; + } + + /* optional - reg base for a target where camnoc reg is split into two domains: rt/nrt */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_camnoc_rt", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CAMNOC_RT] = index; + CAM_DBG(CAM_CPAS, "regbase found for CAMNOC RT, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for CAMNOC RT, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_CAMNOC_RT] = -1; + } + + /* optional - reg base for a target where camnoc reg is split into two domains: rt/nrt */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_camnoc_nrt", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CAMNOC_NRT] = index; + CAM_DBG(CAM_CPAS, "regbase found for CAMNOC NRT, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for CAMNOC NRT, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_CAMNOC_NRT] = -1; + } + + /* optional - rpmh register map */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_rpmh", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_RPMH] = index; + CAM_DBG(CAM_CPAS, "regbase found for RPMH, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for RPMH, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_RPMH] = -1; + } + + /* optional - cesta register map */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_cesta", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_CESTA] = index; + CAM_DBG(CAM_CPAS, "regbase found for cesta, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for cesta, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_CESTA] = -1; + } + + /* optional - secure register map */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "cam_cpas_secure", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[CAM_CPAS_REG_SECURE] = index; + CAM_DBG(CAM_CPAS, "regbase found for secure, rc=%d, %d %d", + rc, index, num_reg_map); + } else { + CAM_DBG(CAM_CPAS, "regbase not found for secure, rc=%d, %d %d", + rc, index, num_reg_map); + regbase_index[CAM_CPAS_REG_SECURE] = -1; + } + + return 0; +} + +static int cam_cpastop_handle_errlogger(int camnoc_idx, + struct cam_cpas *cpas_core, + struct cam_hw_soc_info *soc_info, + struct cam_camnoc_irq_slave_err_data *slave_err) +{ + uint8_t log_buffer[512]; + size_t buf_len = 0; + int regbase_idx = cpas_core->regbase_index[camnoc_info[camnoc_idx]->reg_base]; + int err_code_index = 0; + + if (!camnoc_info[camnoc_idx]->err_logger) { + CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info"); + return -EINVAL; + } + + slave_err->mainctrl.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->mainctrl); + + slave_err->errvld.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errvld); + + slave_err->errlog0_low.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog0_low); + + slave_err->errlog0_high.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog0_high); + + slave_err->errlog1_low.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog1_low); + + slave_err->errlog1_high.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog1_high); + + slave_err->errlog2_low.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog2_low); + + slave_err->errlog2_high.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog2_high); + + slave_err->errlog3_low.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog3_low); + + slave_err->errlog3_high.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog3_high); + + err_code_index = slave_err->errlog0_low.err_code; + if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE) + err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE; + + CAM_ERR_BUF(CAM_CPAS, log_buffer, 512, &buf_len, + "%s NoC Error Info: %s, MAINCTL_LOW = 0x%x, ERRVLD_LOW = 0x%x", + camnoc_slave_err_code[err_code_index], + camnoc_info[camnoc_idx]->camnoc_name, slave_err->mainctrl.value, + slave_err->errvld.value, log_buffer); + + CAM_ERR_BUF(CAM_CPAS, log_buffer, 512, &buf_len, + "ERRLOG0_LOW = 0x%x, ERRLOG0_HIGH = 0x%x, ERRLOG1_LOW = 0x%x, ERRLOG1_HIGH = 0x%x, ERRLOG2_LOW = 0x%x, ERRLOG2_HIGH = 0x%x, ERRLOG3_LOW = 0x%x, ERRLOG3_HIGH = 0x%x", + slave_err->errlog0_low.value, slave_err->errlog0_high.value, + slave_err->errlog1_low.value, slave_err->errlog1_high.value, + slave_err->errlog2_low.value, slave_err->errlog2_high.value, + slave_err->errlog3_low.value, slave_err->errlog3_high.value); + + CAM_ERR(CAM_CPAS, "%s", log_buffer); + + return 0; +} + +static int cam_cpastop_handle_ubwc_enc_err(int camnoc_idx, + struct cam_cpas *cpas_core, struct cam_hw_soc_info *soc_info, int i, + struct cam_camnoc_irq_ubwc_enc_data *enc_err) +{ + int regbase_idx = cpas_core->regbase_index[camnoc_info[camnoc_idx]->reg_base]; + + enc_err->encerr_status.value = + cam_io_r_mb(soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->irq_err[i].err_status.offset); + + /* Let clients handle the UBWC errors */ + CAM_DBG(CAM_CPAS, + "[%s] ubwc enc err [%d]: offset[0x%x] value[0x%x]", + camnoc_info[camnoc_idx]->camnoc_name, i, + camnoc_info[camnoc_idx]->irq_err[i].err_status.offset, + enc_err->encerr_status.value); + + return 0; +} + +static int cam_cpastop_handle_ubwc_dec_err(int camnoc_idx, + struct cam_cpas *cpas_core, struct cam_hw_soc_info *soc_info, int i, + struct cam_camnoc_irq_ubwc_dec_data *dec_err) +{ + int regbase_idx = cpas_core->regbase_index[camnoc_info[camnoc_idx]->reg_base]; + + dec_err->decerr_status.value = + cam_io_r_mb(soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->irq_err[i].err_status.offset); + + /* Let clients handle the UBWC errors */ + CAM_DBG(CAM_CPAS, + "[%s] ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d", + camnoc_info[camnoc_idx]->camnoc_name, i, + camnoc_info[camnoc_idx]->irq_err[i].err_status.offset, dec_err->decerr_status.value, + dec_err->decerr_status.thr_err, dec_err->decerr_status.fcl_err, + dec_err->decerr_status.len_md_err, dec_err->decerr_status.format_err); + + return 0; +} + +static int cam_cpastop_handle_ahb_timeout_err(int camnoc_idx, + struct cam_hw_info *cpas_hw, struct cam_camnoc_irq_ahb_timeout_data *ahb_err) +{ + CAM_ERR_RATE_LIMIT(CAM_CPAS, "[%s] ahb timeout error", + camnoc_info[camnoc_idx]->camnoc_name); + + return 0; +} + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) +static int cam_cpastop_enable_test_irq(int camnoc_idx, + struct cam_hw_info *cpas_hw) +{ + int i; + + camnoc_info[camnoc_idx]->irq_sbm->sbm_enable.value |= + camnoc_info[camnoc_idx]->test_irq_info.sbm_enable_mask; + camnoc_info[camnoc_idx]->irq_sbm->sbm_clear.value |= + camnoc_info[camnoc_idx]->test_irq_info.sbm_clear_mask; + + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if (camnoc_info[camnoc_idx]->irq_err[i].irq_type == + CAM_CAMNOC_HW_IRQ_CAMNOC_TEST) + camnoc_info[camnoc_idx]->irq_err[i].enable = true; + } + + return 0; +} + +static int cam_cpastop_disable_test_irq(int camnoc_idx, + struct cam_hw_info *cpas_hw) +{ + int i; + + camnoc_info[camnoc_idx]->irq_sbm->sbm_enable.value &= + ~camnoc_info[camnoc_idx]->test_irq_info.sbm_enable_mask; + camnoc_info[camnoc_idx]->irq_sbm->sbm_clear.value &= + ~camnoc_info[camnoc_idx]->test_irq_info.sbm_clear_mask; + + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if (camnoc_info[camnoc_idx]->irq_err[i].irq_type == + CAM_CAMNOC_HW_IRQ_CAMNOC_TEST) + camnoc_info[camnoc_idx]->irq_err[i].enable = false; + } + + return 0; +} + +static void cam_cpastop_check_test_irq(int camnoc_idx, + struct cam_hw_info *cpas_hw, uint32_t irq_status) +{ + int i; + + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if ((camnoc_info[camnoc_idx]->irq_err[i].irq_type == + CAM_CAMNOC_HW_IRQ_CAMNOC_TEST) && + (irq_status & camnoc_info[camnoc_idx]->irq_err[i].sbm_port) && + (camnoc_info[camnoc_idx]->irq_err[i].enable)) { + complete(&test_irq_hw_complete[camnoc_idx]); + CAM_INFO(CAM_CPAS, "[%s] Test IRQ triggerred", + camnoc_info[camnoc_idx]->camnoc_name); + } + } +} +#endif + +static void cam_cpastop_enable_camnoc_irqs( + struct cam_hw_info *cpas_hw, int camnoc_idx) +{ + int i; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + + cpas_core->smmu_fault_handled = false; + + /* clear and enable all irq errors */ + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if (camnoc_info[camnoc_idx]->irq_err[i].enable) { + cam_cpas_util_reg_update(cpas_hw, camnoc_info[camnoc_idx]->reg_base, + &camnoc_info[camnoc_idx]->irq_err[i].err_clear); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[camnoc_idx]->reg_base, + &camnoc_info[camnoc_idx]->irq_err[i].err_enable); + } + } + + /* On poweron reset enable all error irqs applicable for the target */ + cam_cpas_util_reg_update(cpas_hw, camnoc_info[camnoc_idx]->reg_base, + &camnoc_info[camnoc_idx]->irq_sbm->sbm_enable); +} + +static void cam_cpastop_handle_camnoc_irqs(uint32_t irq_status, + struct cam_hw_info *cpas_hw, int camnoc_idx) +{ + int i, regbase_idx; + struct cam_cpas *cpas_core = cpas_hw->core_info; + uint32_t updated_sbm_mask = 0; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + enum cam_cpas_reg_base reg_base; + + reg_base = camnoc_info[camnoc_idx]->reg_base; + regbase_idx = cpas_core->regbase_index[reg_base]; + + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if ((camnoc_info[camnoc_idx]->irq_err[i].enable) && + (camnoc_info[camnoc_idx]->irq_err[i].sbm_port & irq_status)) { + /* Clear the error status */ + cam_cpas_util_reg_update(cpas_hw, reg_base, + &camnoc_info[camnoc_idx]->irq_err[i].err_clear); + updated_sbm_mask |= camnoc_info[camnoc_idx]->irq_err[i].sbm_port; + } + } + + /* Disable all serviced irqs, all disabled irqs are enabled only when CPAS restarts */ + cam_io_w(((~updated_sbm_mask) & (camnoc_info[camnoc_idx]->irq_sbm->sbm_enable.value)), + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->irq_sbm->sbm_enable.offset); +} + +static int cam_cpastop_reset_irq(uint32_t irq_status, + struct cam_hw_info *cpas_hw, int camnoc_idx) +{ +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + static int counter[CAM_CAMNOC_HW_TYPE_MAX] = {0}; + bool wait_for_irq = false; + + if (counter[camnoc_idx] == 0) { + CAM_INFO(CAM_CPAS, "Enabling %s test irq", + camnoc_info[camnoc_idx]->camnoc_name); + cam_cpastop_enable_test_irq(camnoc_idx, cpas_hw); + wait_for_irq = true; + init_completion(&test_irq_hw_complete[camnoc_idx]); + counter[camnoc_idx] = 1; + } else if (counter[camnoc_idx] == 1) { + CAM_INFO(CAM_CPAS, "Disabling %s test irq", + camnoc_info[camnoc_idx]->camnoc_name); + cam_cpastop_disable_test_irq(camnoc_idx, cpas_hw); + counter[camnoc_idx] = 2; + } +#endif + + if (!camnoc_info[camnoc_idx]->irq_sbm->sbm_enable.enable) + return 0; + + cam_cpas_util_reg_update(cpas_hw, camnoc_info[camnoc_idx]->reg_base, + &camnoc_info[camnoc_idx]->irq_sbm->sbm_clear); + + if (irq_status) + cam_cpastop_handle_camnoc_irqs(irq_status, cpas_hw, camnoc_idx); + else + /* poweron reset */ + cam_cpastop_enable_camnoc_irqs(cpas_hw, camnoc_idx); + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + if (wait_for_irq) { + if (!cam_common_wait_for_completion_timeout(&test_irq_hw_complete[camnoc_idx], + msecs_to_jiffies(2000))) + CAM_ERR(CAM_CPAS, "[%s] CAMNOC Test IRQ line verification timed out", + camnoc_info[camnoc_idx]->camnoc_name); + CAM_INFO(CAM_CPAS, "[%s] IRQ test Success", + camnoc_info[camnoc_idx]->camnoc_name); + } +#endif + + return 0; +} + +static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core, + struct cam_cpas_irq_data *irq_data, bool force_notify) +{ + int i; + struct cam_cpas_client *cpas_client; + bool error_handled = false; + + CAM_DBG(CAM_CPAS, + "Notify CB : num_clients=%d, registered=%d, started=%d", + cpas_core->num_clients, cpas_core->registered_clients, + cpas_core->streamon_clients); + + for (i = 0; i < cpas_core->num_clients; i++) { + if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) { + cpas_client = cpas_core->cpas_client[i]; + if (cpas_client->data.cam_cpas_client_cb) { + CAM_DBG(CAM_CPAS, + "Calling client CB %d : %d", + i, irq_data->irq_type); + error_handled = + cpas_client->data.cam_cpas_client_cb( + cpas_client->data.client_handle, + cpas_client->data.userdata, + irq_data); + if (error_handled && !force_notify) + break; + } + } + } +} + +static void cam_cpastop_work(struct work_struct *work) +{ + struct cam_cpas_work_payload *payload; + struct cam_hw_info *cpas_hw; + struct cam_cpas *cpas_core; + struct cam_hw_soc_info *soc_info; + int i; + enum cam_camnoc_hw_irq_type irq_type; + struct cam_cpas_irq_data irq_data; + int camnoc_idx; + + payload = container_of(work, struct cam_cpas_work_payload, work); + if (!payload) { + CAM_ERR(CAM_CPAS, "NULL payload"); + return; + } + + camnoc_idx = payload->camnoc_idx; + cam_common_util_thread_switch_delay_detect( + "cam_cpas_workq", "schedule", cam_cpastop_work, + payload->workq_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + + cpas_hw = payload->hw; + cpas_core = (struct cam_cpas *) cpas_hw->core_info; + soc_info = &cpas_hw->soc_info; + + if (!atomic_inc_not_zero(&cpas_core->soc_access_count)) { + CAM_ERR(CAM_CPAS, "CPAS off"); + return; + } + + for (i = 0; i < camnoc_info[camnoc_idx]->irq_err_size; i++) { + if ((payload->irq_status & camnoc_info[camnoc_idx]->irq_err[i].sbm_port) && + (camnoc_info[camnoc_idx]->irq_err[i].enable)) { + irq_type = camnoc_info[camnoc_idx]->irq_err[i].irq_type; + CAM_ERR_RATE_LIMIT(CAM_CPAS, + "Error occurred, irq type=%d", irq_type); + memset(&irq_data, 0x0, sizeof(irq_data)); + irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type; + + switch (irq_type) { + case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: + cam_cpastop_handle_errlogger(camnoc_idx, cpas_core, soc_info, + &irq_data.u.slave_err); + break; + case CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR: + case CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR: + cam_cpastop_handle_ubwc_enc_err(camnoc_idx, + cpas_core, soc_info, i, + &irq_data.u.enc_err); + break; + case CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR: + case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: + case CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR: + cam_cpastop_handle_ubwc_dec_err(camnoc_idx, + cpas_core, soc_info, i, + &irq_data.u.dec_err); + break; + case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT: + cam_cpastop_handle_ahb_timeout_err(camnoc_idx, + cpas_hw, &irq_data.u.ahb_err); + break; + case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST: + CAM_INFO(CAM_CPAS, "TEST IRQ for %s", + camnoc_info[camnoc_idx]->camnoc_name); + break; + default: + CAM_ERR(CAM_CPAS, "Invalid IRQ type: %u", irq_type); + break; + } + + cam_cpastop_notify_clients(cpas_core, &irq_data, false); + + payload->irq_status &= + ~camnoc_info[camnoc_idx]->irq_err[i].sbm_port; + } + } + atomic_dec(&cpas_core->soc_access_count); + wake_up(&cpas_core->soc_access_count_wq); + CAM_DBG(CAM_CPAS, "soc_access_count=%d\n", atomic_read(&cpas_core->soc_access_count)); + + if (payload->irq_status) + CAM_ERR(CAM_CPAS, "%s IRQ not handled irq_status=0x%x", + camnoc_info[camnoc_idx]->camnoc_name, payload->irq_status); + + CAM_MEM_FREE(payload); +} + +static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data) +{ + struct cam_cpas_soc_irq_data *soc_irq_data = data; + struct cam_hw_info *cpas_hw = soc_irq_data->cpas_hw; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int regbase_idx, slave_err_irq_idx, camnoc_idx; + struct cam_cpas_work_payload *payload; + struct cam_cpas_irq_data irq_data; + enum cam_camnoc_hw_type camnoc_type; + + if (!atomic_inc_not_zero(&cpas_core->soc_access_count)) { + CAM_ERR(CAM_CPAS, "CPAS off"); + return IRQ_HANDLED; + } + + camnoc_type = soc_irq_data->camnoc_type; + if ((camnoc_type < 0) || (camnoc_type >= CAM_CAMNOC_HW_TYPE_MAX)) { + CAM_ERR(CAM_CPAS, "Invalid camnoc type: %d", camnoc_type); + goto done; + } + + camnoc_idx = cpas_core->camnoc_info_idx[camnoc_type]; + if ((camnoc_idx < 0) || (camnoc_idx >= cpas_core->num_valid_camnoc)) { + CAM_ERR(CAM_CPAS, "Invalid camnoc idx: %d", camnoc_idx); + goto done; + } + + payload = CAM_MEM_ZALLOC(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC); + if (!payload) + goto done; + + regbase_idx = cpas_core->regbase_index[camnoc_info[camnoc_idx]->reg_base]; + + payload->irq_status = cam_io_r_mb(soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->irq_sbm->sbm_status.offset); + payload->camnoc_idx = camnoc_idx; + + CAM_DBG(CAM_CPAS, "IRQ callback of %s irq_status=0x%x", + camnoc_info[camnoc_idx]->camnoc_name, payload->irq_status); + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + cam_cpastop_check_test_irq(camnoc_idx, cpas_hw, payload->irq_status); +#endif + + /* Clear irq */ + cam_cpastop_reset_irq(payload->irq_status, cpas_hw, camnoc_idx); + + slave_err_irq_idx = cpas_core->slave_err_irq_idx[camnoc_idx]; + + /* Check for slave error irq */ + if ((cpas_core->slave_err_irq_en[camnoc_idx]) && (payload->irq_status & + camnoc_info[camnoc_idx]->irq_err[slave_err_irq_idx].sbm_port)) { + struct cam_camnoc_irq_slave_err_data *slave_err = &irq_data.u.slave_err; + + irq_data.irq_type = (enum cam_camnoc_irq_type) + camnoc_info[camnoc_idx]->irq_err[slave_err_irq_idx].irq_type; + slave_err->errlog0_low.value = cam_io_r_mb( + soc_info->reg_map[regbase_idx].mem_base + + camnoc_info[camnoc_idx]->err_logger->errlog0_low); + + /* Validate address decode error */ + if (slave_err->errlog0_low.err_code == CAM_CAMNOC_ADDRESS_DECODE_ERROR) { + /* Notify clients about potential page fault */ + if (!cpas_core->smmu_fault_handled) { + /* Dump error logger */ + cam_cpastop_handle_errlogger(camnoc_idx, cpas_core, soc_info, + &irq_data.u.slave_err); + cam_cpastop_notify_clients(cpas_core, &irq_data, true); + } + cpas_core->smmu_fault_handled = true; + + /* Skip bh if no other irq is set */ + payload->irq_status &= + ~camnoc_info[camnoc_idx]->irq_err[slave_err_irq_idx].sbm_port; + if (!payload->irq_status) { + CAM_MEM_FREE(payload); + goto done; + } + } + } + + payload->hw = cpas_hw; + INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work); + + payload->workq_scheduled_ts = ktime_get(); + queue_work(cpas_core->work_queue, &payload->work); + +done: + atomic_dec(&cpas_core->soc_access_count); + wake_up(&cpas_core->soc_access_count_wq); + + return IRQ_HANDLED; +} + +static int cam_cpastop_print_poweron_settings(struct cam_hw_info *cpas_hw) +{ + int i, j; + enum cam_cpas_reg_base reg_base; + struct cam_cpas *cpas_core = cpas_hw->core_info; + + for (i = 0; i < cpas_core->num_valid_camnoc; i++) { + CAM_INFO(CAM_CPAS, "QOS settings for %s :", + camnoc_info[i]->camnoc_name); + for (j = 0; j < camnoc_info[i]->specific_size; j++) { + if (camnoc_info[i]->specific[j].enable) { + CAM_INFO(CAM_CPAS, + "Reading QoS settings port: %d port name: %s", + camnoc_info[i]->specific[j].port_type, + camnoc_info[i]->specific[j].port_name); + reg_base = camnoc_info[i]->reg_base; + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].priority_lut_low); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].priority_lut_high); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].urgency); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].danger_lut); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].safe_lut); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].ubwc_ctl); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].flag_out_set0_low); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].dynattr_mainctl); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].qosgen_mainctl); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].qosgen_shaping_low); + cam_cpas_util_reg_read(cpas_hw, reg_base, + &camnoc_info[i]->specific[j].qosgen_shaping_high); + } + } + } + + return 0; +} + +static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw) +{ + int i, j, rc = 0; + struct cam_cpas_hw_errata_wa_list *errata_wa_list; + struct cam_cpas_hw_errata_wa *errata_wa; + struct cam_cpas *cpas_core = cpas_hw->core_info; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + bool errata_enabled = false; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int index; + + for (i = 0; i < cpas_core->num_valid_camnoc; i++) + cam_cpastop_reset_irq(0x0, cpas_hw, i); + + if (!soc_private->enable_secure_qos_update) { + for (i = 0; i < cpas_core->num_valid_camnoc; i++) { + CAM_DBG(CAM_CPAS, "QOS settings for %s :", + camnoc_info[i]->camnoc_name); + for (j = 0; j < camnoc_info[i]->specific_size; j++) { + if (camnoc_info[i]->specific[j].enable) { + CAM_DBG(CAM_CPAS, + "Updating QoS settings port: %d prot name: %s", + camnoc_info[i]->specific[j].port_type, + camnoc_info[i]->specific[j].port_name); + + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].priority_lut_low); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].priority_lut_high); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].urgency); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].danger_lut); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].safe_lut); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].ubwc_ctl); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].flag_out_set0_low); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].dynattr_mainctl); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].qosgen_mainctl); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].qosgen_shaping_low); + cam_cpas_util_reg_update(cpas_hw, camnoc_info[i]->reg_base, + &camnoc_info[i]->specific[j].qosgen_shaping_high); + } + } + + if (!errata_enabled) { + errata_wa_list = camnoc_info[i]->errata_wa_list; + if (errata_wa_list) { + errata_wa = &errata_wa_list->tcsr_camera_hf_sf_ares_glitch; + if (errata_wa->enable) { + cam_cpastop_scm_write(errata_wa); + errata_enabled = true; + } + } + } + } + } else { + CAM_DBG(CAM_CPAS, "Updating secure camera static QoS settings"); + rc = cam_update_camnoc_qos_settings(CAM_QOS_UPDATE_TYPE_STATIC, 0, NULL); + if (rc) { + CAM_ERR(CAM_CPAS, "Secure camera static OoS update failed: %d", rc); + return rc; + } + CAM_DBG(CAM_CPAS, "Updated secure camera static QoS settings"); + } + + /* + * Force set ife and cdm core to secure mode, used for debug only, + * register access is restricted in normal builds. + */ + if (cpas_core->force_core_secure) { + index = cpas_core->regbase_index[CAM_CPAS_REG_SECURE]; + + if (index != -1) { + CAM_DBG(CAM_CPAS, + "Set reg offset: 0x%x value: 0x%x with regbase index: %d for secure", + cpas_info->hw_caps_secure_info->secure_access_ctrl_offset, + cpas_info->hw_caps_secure_info->secure_access_ctrl_value, + index); + + cam_io_w_mb(cpas_info->hw_caps_secure_info->secure_access_ctrl_value, + soc_info->reg_map[index].mem_base + + cpas_info->hw_caps_secure_info->secure_access_ctrl_offset); + } else { + CAM_WARN(CAM_CPAS, "Invalid CPAS secure regbase index: %d", + index); + } + } + + return 0; +} + +static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int camnoc_index, rc = 0; + struct cam_cpas_hw_errata_wa_list *errata_wa_list; + struct cam_cpas_hw_errata_wa *errata_wa; + + errata_wa_list = camnoc_info[0]->errata_wa_list; + if (!errata_wa_list) + return 0; + + if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) { + errata_wa = &errata_wa_list->camnoc_flush_slave_pending_trans; + camnoc_index = cpas_core->regbase_index[camnoc_info[0]->reg_base]; + + rc = cam_io_poll_value_wmask( + soc_info->reg_map[camnoc_index].mem_base + + errata_wa->data.reg_info.offset, + errata_wa->data.reg_info.value, + errata_wa->data.reg_info.mask, + CAM_CPAS_POLL_RETRY_CNT, + CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS); + if (rc) { + CAM_DBG(CAM_CPAS, + "camnoc flush slave pending trans failed"); + /* Do not return error, passthrough */ + rc = 0; + } + } + + return rc; +} + +static int cam_cpastop_qchannel_handshake(struct cam_hw_info *cpas_hw, + bool power_on, bool force) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP]; + uint32_t mask = 0; + uint32_t wait_data, qchannel_status, qbusy; + int rc = 0, ret = 0, i; + struct cam_cpas_private_soc *soc_private = + (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private; + struct cam_cpas_hw_errata_wa_list *errata_wa_list; + bool icp_clk_enabled = false; + struct cam_cpas_camnoc_qchannel *qchannel_info; + uint32_t busy_mask; + + if (reg_indx == -1) + return -EINVAL; + + for (i = 0; i < cpas_info->num_qchannel; i++) { + qchannel_info = cpas_info->qchannel_info[i]; + + if (!icp_clk_enabled) { + errata_wa_list = camnoc_info[i]->errata_wa_list; + if (errata_wa_list && errata_wa_list->enable_icp_clk_for_qchannel.enable) { + CAM_DBG(CAM_CPAS, "Enabling ICP clk for qchannel handshake"); + + if (soc_private->icp_clk_index == -1) { + CAM_ERR(CAM_CPAS, + "ICP clock not added as optional clk, qchannel handshake will fail"); + } else { + rc = cam_soc_util_clk_enable(soc_info, + CAM_CLK_SW_CLIENT_IDX, true, + soc_private->icp_clk_index, -1); + if (rc) + CAM_ERR(CAM_CPAS, + "Error enable icp clk failed rc=%d", rc); + else + icp_clk_enabled = true; + } + } + } + + if (power_on) { + if (force) { + cam_io_w_mb(0x1, + soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_ctrl); + CAM_DBG(CAM_CPAS, "Force qchannel on for %s", + camnoc_info[i]->camnoc_name); + } + /* wait for QACCEPTN in QCHANNEL status*/ + mask = BIT(0); + wait_data = 1; + } else { + if (force) { + cam_io_w_mb(0x1, + soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_ctrl); + CAM_DBG(CAM_CPAS, "Force qchannel on for %s now sleep for 1us", + camnoc_info[i]->camnoc_name); + usleep_range(1, 2); + } + /* Clear the quiecience request in QCHANNEL ctrl*/ + cam_io_w_mb(0, soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_ctrl); + mask = BIT(0); + wait_data = 0; + } + + busy_mask = BIT(0); + rc = cam_io_poll_value_wmask( + soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_status, + wait_data, mask, CAM_CPAS_POLL_QH_RETRY_CNT, + CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS); + if (rc) { + CAM_ERR(CAM_CPAS, + "CPAS_%s %s idle sequence failed, qstat 0x%x", + power_on ? "START" : "STOP", camnoc_info[i]->camnoc_name, + cam_io_r(soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_status)); + ret = rc; + /* Do not return error, passthrough */ + } + + /* check if accept bit is set */ + qchannel_status = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + + qchannel_info->qchannel_status); + CAM_DBG(CAM_CPAS, + "CPAS_%s %s : qchannel status 0x%x", power_on ? "START" : "STOP", + camnoc_info[i]->camnoc_name, qchannel_status); + + qbusy = (qchannel_status & busy_mask); + if (!power_on && qbusy) + ret = -EBUSY; + else if (power_on && !qbusy) + ret = -EAGAIN; + } + + if (icp_clk_enabled) { + rc = cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_private->icp_clk_index); + if (rc) + CAM_ERR(CAM_CPAS, "Error disable icp clk failed rc=%d", rc); + } + + return ret; +} + +static int cam_cpastop_set_up_camnoc_info(struct cam_cpas *cpas_core, + struct cam_camnoc_info **alloc_camnoc) +{ + int i, j, camnoc_cnt = 0; + + for (i = 0; i < CAM_CAMNOC_HW_TYPE_MAX; i++) { + if (alloc_camnoc[i]) { + alloc_camnoc[i]->camnoc_name = cam_cpastop_get_camnoc_name(i); + camnoc_info[camnoc_cnt] = alloc_camnoc[i]; + cpas_core->camnoc_info[camnoc_cnt] = alloc_camnoc[i]; + cpas_core->camnoc_info_idx[i] = camnoc_cnt; + + switch (i) { + case CAM_CAMNOC_HW_COMBINED: + camnoc_info[camnoc_cnt]->reg_base = CAM_CPAS_REG_CAMNOC; + break; + case CAM_CAMNOC_HW_RT: + camnoc_info[camnoc_cnt]->reg_base = CAM_CPAS_REG_CAMNOC_RT; + break; + case CAM_CAMNOC_HW_NRT: + camnoc_info[camnoc_cnt]->reg_base = CAM_CPAS_REG_CAMNOC_NRT; + break; + default: + CAM_ERR(CAM_CPAS, "Invalid camnoc type %u", i); + return -EINVAL; + } + + + if (cpas_core->regbase_index[camnoc_info[camnoc_cnt]->reg_base] == -1) { + CAM_ERR(CAM_CPAS, "Regbase not set up for %s", + camnoc_info[i]->camnoc_name); + return -EINVAL; + } + camnoc_cnt++; + } else { + cpas_core->camnoc_info_idx[i] = -1; + } + } + + if (camnoc_cnt == 0) { + CAM_ERR(CAM_CPAS, "No available camnoc header for binding"); + return -EINVAL; + } + + if (cpas_info->num_qchannel && cpas_info->num_qchannel != camnoc_cnt) { + CAM_ERR(CAM_CPAS, "Invalid number of qchannel: %u number of camnoc: %u", + cpas_info->num_qchannel, camnoc_cnt); + return -EINVAL; + } + + cpas_core->num_valid_camnoc = camnoc_cnt; + + if (cpas_core->camnoc_info_idx[CAM_CAMNOC_HW_COMBINED] >= 0) + cpas_core->camnoc_rt_idx = cpas_core->camnoc_info_idx[CAM_CAMNOC_HW_COMBINED]; + else if (cpas_core->camnoc_info_idx[CAM_CAMNOC_HW_RT] >= 0) + cpas_core->camnoc_rt_idx = cpas_core->camnoc_info_idx[CAM_CAMNOC_HW_RT]; + else { + cpas_core->camnoc_rt_idx = -1; + CAM_ERR(CAM_CPAS, "No CAMNOC RT idx found"); + return -EINVAL; + } + + /* Check if slave error irq is enabled */ + for (i = 0; i < cpas_core->num_valid_camnoc; i++) { + for (j = 0; j < camnoc_info[i]->irq_err_size; j++) { + if (camnoc_info[i]->irq_err[j].irq_type == + CAM_CAMNOC_HW_IRQ_SLAVE_ERROR) { + if (camnoc_info[i]->irq_err[j].enable) { + cpas_core->slave_err_irq_en[i] = true; + cpas_core->slave_err_irq_idx[i] = j; + break; + } + } + } + } + + return 0; +} + +static int cam_cpastop_get_hw_capability(struct cam_hw_info *cpas_hw) +{ + int i, reg_idx, rc; + struct cam_cpas *cpas_core = cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas_hw_cap_info *hw_caps_info; + struct cam_cpas_hw_caps *hw_caps = &cpas_core->hw_caps; + + hw_caps_info = &cpas_info->hw_caps_info; + reg_idx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP]; + + /* At least one hw caps register must be present */ + if (!hw_caps_info->num_caps_registers || + hw_caps_info->num_caps_registers > CAM_CPAS_MAX_CAPS_REGS) { + CAM_ERR(CAM_CPAS, + "Invalid number of populated caps registers: %u", + hw_caps_info->num_caps_registers); + return -EINVAL; + } + + hw_caps->num_capability_reg = hw_caps_info->num_caps_registers; + + if (!cam_vmrm_no_register_read_on_bind()) { + for (i = 0; i < hw_caps_info->num_caps_registers; i++) { + hw_caps->camera_capability[i] = + cam_io_r_mb(soc_info->reg_map[reg_idx].mem_base + + hw_caps_info->hw_caps_offsets[i]); + CAM_DBG(CAM_CPAS, "camera_caps_%d = 0x%x", + i, hw_caps->camera_capability[i]); + } + } else { + if (hw_caps_info->num_caps_registers > 0) { + rc = of_property_read_u32_array(soc_info->pdev->dev.of_node, + "camera-capability", hw_caps->camera_capability, + hw_caps_info->num_caps_registers); + if (rc) { + CAM_ERR(CAM_CPAS, "no camera capability"); + return rc; + } + } + for (i = 0; i < hw_caps_info->num_caps_registers; i++) { + CAM_DBG(CAM_CPAS, "camera_caps_%d = 0x%x", i, + hw_caps->camera_capability[i]); + } + } + + return 0; +} + +static int cam_cpastop_set_tpg_mux_sel(struct cam_hw_info *cpas_hw, + uint32_t tpg_mux) +{ + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int reg_cpas_top; + uint32_t curr_tpg_mux = 0; + + reg_cpas_top = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP]; + + if (cpas_top_info == NULL) + return 0; + + if (!cpas_top_info->tpg_mux_sel_enabled) + return 0; + + curr_tpg_mux = cam_io_r_mb(soc_info->reg_map[reg_cpas_top].mem_base + + cpas_top_info->tpg_mux_sel); + + curr_tpg_mux = curr_tpg_mux | ((1 << tpg_mux) << cpas_top_info->tpg_mux_sel_shift); + cam_io_w_mb(curr_tpg_mux, soc_info->reg_map[reg_cpas_top].mem_base + + cpas_top_info->tpg_mux_sel); + CAM_DBG(CAM_CPAS, "SET TPG MUX to 0x%x", curr_tpg_mux); + + return 0; +} + +static int cam_cpastop_handle_reset_res_control(struct cam_hw_info *cpas_hw) +{ + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + int rc = 0; + + switch (soc_info->hw_version) { + case CAM_CPAS_TITAN_970_V110: + rc = cam_soc_util_reset_control(soc_info); + if (rc) { + CAM_ERR(CAM_UTIL, "resource control assert/de-assert failed"); + return rc; + } + break; + default: + break; + } + + return 0; +} + +static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw, + struct cam_cpas_hw_caps *hw_caps) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info; + struct cam_cpas_cesta_info *cesta_info = NULL; + struct cam_camnoc_info *alloc_camnoc_info[CAM_CAMNOC_HW_TYPE_MAX] = {0}; + + qchannel_info = NULL; + cpas_top_info = NULL; + + CAM_DBG(CAM_CPAS, + "hw_version=0x%x Camera Version %d.%d.%d, cpas version %d.%d.%d", + soc_info->hw_version, + hw_caps->camera_version.major, + hw_caps->camera_version.minor, + hw_caps->camera_version.incr, + hw_caps->cpas_version.major, + hw_caps->cpas_version.minor, + hw_caps->cpas_version.incr); + + switch (soc_info->hw_version) { + case CAM_CPAS_TITAN_170_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam170_cpas100_camnoc_info; + cpas_info = &cam170_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_170_V110: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam170_cpas110_camnoc_info; + cpas_info = &cam170_cpas110_cpas_info; + break; + case CAM_CPAS_TITAN_170_V200: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam170_cpas200_camnoc_info; + cpas_info = &cam170_cpas200_cpas_info; + break; + case CAM_CPAS_TITAN_175_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam175_cpas100_camnoc_info; + cpas_info = &cam175_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_175_V101: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam175_cpas101_camnoc_info; + cpas_info = &cam175_cpas101_cpas_info; + break; + case CAM_CPAS_TITAN_175_V120: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam175_cpas120_camnoc_info; + cpas_info = &cam175_cpas120_cpas_info; + break; + case CAM_CPAS_TITAN_175_V130: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam175_cpas130_camnoc_info; + cpas_info = &cam175_cpas130_cpas_info; + break; + case CAM_CPAS_TITAN_150_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam150_cpas100_camnoc_info; + cpas_info = &cam150_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_480_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam480_cpas100_camnoc_info; + cpas_info = &cam480_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_580_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam580_cpas100_camnoc_info; + cpas_info = &cam580_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_540_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam540_cpas100_camnoc_info; + cpas_info = &cam540_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_520_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam520_cpas100_camnoc_info; + cpas_info = &cam520_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_545_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam545_cpas100_camnoc_info; + cpas_info = &cam545_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_570_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam570_cpas100_camnoc_info; + cpas_info = &cam570_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_570_V200: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam570_cpas200_camnoc_info; + cpas_info = &cam570_cpas200_cpas_info; + break; + case CAM_CPAS_TITAN_680_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam680_cpas100_camnoc_info; + cpas_info = &cam680_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_680_V110: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam680_cpas110_camnoc_info; + cpas_info = &cam680_cpas110_cpas_info; + break; + case CAM_CPAS_TITAN_165_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam165_cpas100_camnoc_info; + cpas_info = &cam165_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_780_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam780_cpas100_camnoc_info; + cpas_info = &cam780_cpas100_cpas_info; + break; + case CAM_CPAS_TITAN_640_V200: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam640_cpas200_camnoc_info; + cpas_info = &cam640_cpas200_cpas_info; + cpas_top_info = &cam640_cpas200_cpas_top_info; + break; + case CAM_CPAS_TITAN_880_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam880_cpas100_camnoc_info; + cpas_info = &cam880_cpas100_cpas_info; + cesta_info = &cam_v880_cesta_info; + break; + case CAM_CPAS_TITAN_975_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_RT] = &cam975_cpas100_camnoc_info_rt; + alloc_camnoc_info[CAM_CAMNOC_HW_NRT] = &cam975_cpas100_camnoc_info_nrt; + cpas_info = &cam975_cpas100_cpas_info; + cesta_info = &cam_v975_cesta_info; + break; + case CAM_CPAS_TITAN_970_V110: + alloc_camnoc_info[CAM_CAMNOC_HW_RT] = &cam970_cpas110_camnoc_info_rt; + alloc_camnoc_info[CAM_CAMNOC_HW_NRT] = &cam970_cpas110_camnoc_info_nrt; + cpas_info = &cam970_cpas110_cpas_info; + break; + case CAM_CPAS_TITAN_980_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_RT] = &cam980_cpas100_camnoc_info_rt; + alloc_camnoc_info[CAM_CAMNOC_HW_NRT] = &cam980_cpas100_camnoc_info_nrt; + cpas_info = &cam980_cpas100_cpas_info; + cesta_info = &cam_v980_cesta_info; + break; + case CAM_CPAS_TITAN_1080_V100: + alloc_camnoc_info[CAM_CAMNOC_HW_RT] = &cam1080_cpas100_camnoc_info_rt; + alloc_camnoc_info[CAM_CAMNOC_HW_NRT] = &cam1080_cpas100_camnoc_info_nrt; + cpas_info = &cam1080_cpas100_cpas_info; + cesta_info = &cam_v1080_cesta_info; + break; + default: + CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d", + hw_caps->camera_version.major, + hw_caps->camera_version.minor, + hw_caps->camera_version.incr); + return -EINVAL; + } + + cpas_core->cesta_info = cesta_info; + cpas_core->cam_subpart_info = cpas_info->subpart_info; + + rc = cam_cpastop_set_up_camnoc_info(cpas_core, alloc_camnoc_info); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to set up camnoc info rc=%d", rc); + return rc; + } + + rc = cam_cpastop_get_hw_capability(cpas_hw); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed to get titan hw capability rc=%d", rc); + return rc; + } + + return rc; +} + +static int cam_cpastop_setup_qos_settings(struct cam_hw_info *cpas_hw, + uint32_t selection_mask) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info; + + CAM_DBG(CAM_CPAS, + "QoS selection : hw_version=0x%x selection_mask 0x%x", + soc_info->hw_version, + selection_mask); + + switch (soc_info->hw_version) { + case CAM_CPAS_TITAN_480_V100: + if (selection_mask & CAM_CPAS_QOS_CUSTOM_SETTINGS_MASK) + camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam480_custom_camnoc_info; + else if (selection_mask & CAM_CPAS_QOS_DEFAULT_SETTINGS_MASK) + camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam480_cpas100_camnoc_info; + else + CAM_ERR(CAM_CPAS, "Invalid selection mask 0x%x", + selection_mask); + break; + case CAM_CPAS_TITAN_580_V100: + if (selection_mask & CAM_CPAS_QOS_CUSTOM_SETTINGS_MASK) + camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam580_custom_camnoc_info; + else if (selection_mask & CAM_CPAS_QOS_DEFAULT_SETTINGS_MASK) + camnoc_info[CAM_CAMNOC_HW_COMBINED] = &cam580_cpas100_camnoc_info; + else + CAM_ERR(CAM_CPAS, + "Invalid selection mask 0x%x for hw 0x%x", + selection_mask, soc_info->hw_version); + break; + default: + CAM_WARN(CAM_CPAS, "QoS selection not supported for 0x%x", + soc_info->hw_version); + rc = -EINVAL; + break; + } + + return rc; +} + +int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops) +{ + if (!internal_ops) { + CAM_ERR(CAM_CPAS, "invalid NULL param"); + return -EINVAL; + } + + internal_ops->get_hw_info = cam_cpastop_get_hw_info; + internal_ops->init_hw_version = cam_cpastop_init_hw_version; + internal_ops->handle_irq = cam_cpastop_handle_irq; + internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices; + internal_ops->power_on = cam_cpastop_poweron; + internal_ops->power_off = cam_cpastop_poweroff; + internal_ops->setup_qos_settings = cam_cpastop_setup_qos_settings; + internal_ops->print_poweron_settings = + cam_cpastop_print_poweron_settings; + internal_ops->qchannel_handshake = cam_cpastop_qchannel_handshake; + internal_ops->set_tpg_mux_sel = cam_cpastop_set_tpg_mux_sel; + internal_ops->handle_reset_res_control = cam_cpastop_handle_reset_res_control; + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.h new file mode 100644 index 0000000000..f68b72075f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cam_cpastop_hw.h @@ -0,0 +1,614 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CPASTOP_HW_H_ +#define _CAM_CPASTOP_HW_H_ + +#include "cam_cpas_api.h" +#include "cam_cpas_hw.h" + +/* Camera Hw parts array indices */ +#define CAM_CPAS_PART_MAX_FUSE_BITS 8 +#define CAM_CPAS_PART_MAX_FUSE_BIT_INFO 2 + +/** + * enum cam_camnoc_hw_irq_type - Enum for camnoc error types + * + * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR : Each slave port in CAMNOC + * (3 QSB ports and 1 QHB port) + * has an error logger. The + * error observed at any slave + * port is logged into the + * error logger register and + * an IRQ is triggered + * @CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the IFE UBWC + * encoder instance + * @CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR : Triggered if any error + * detected in the IFE UBWC- + * Stats encoder instance + * @CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR: Triggered if any error + * detected in the IFE UBWC- + * Stats 1 encoder instance + * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the IFE0 UBWC + * encoder instance + * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the IFE1 or + * IFE3 UBWC encoder instance + * @CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR : Triggered if any UBWC error + * is detected in IFE0 write + * path + * @CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR : Triggered if any UBWC error + * is detected in IFE1 write + * path slave times out after + * 4000 AHB cycles + * @CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the IPE + * UBWC encoder instance + * @CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the BPS + * UBWC encoder instance + * @CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR : Triggered if any error + * detected in the IPE1/BPS + * read path decoder instance + * @CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR : Triggered if any error + * detected in the IPE0 read + * path decoder instance + * @CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR : Triggered if any error + * detected in the IPE1 read + * path decoder instance + * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR : Triggered if any error + * detected in the IPE/BPS + * UBWC decoder instance + * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the IPE/BPS + * UBWC encoder instance + * @CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR : Triggered if any error + * detected in the OFE write + * path enconder instance + * @CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR : Triggered if any error + * detected in the OFE read + * path enconder instance + * @CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR : Triggered if any error + * detected in the TFE + * UBWC enconder instance + * @CAM_CAMNOC_HW_IRQ_TFE_UBWC_1_ENCODE_ERROR : Triggered if any error + * detected in the TFE + * UBWC enconder instance + * @CAM_CAMNOC_HW_IRQ_RESERVED1 : Reserved + * @CAM_CAMNOC_HW_IRQ_RESERVED2 : Reserved + * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST : To test the IRQ logic + */ +enum cam_camnoc_hw_irq_type { + CAM_CAMNOC_HW_IRQ_SLAVE_ERROR = + CAM_CAMNOC_IRQ_SLAVE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IPE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_BPS_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR = + CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR = + CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR = + CAM_CAMNOC_IRQ_IPE1_UBWC_DECODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR = + CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR = + CAM_CAMNOC_IRQ_OFE_WR_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR = + CAM_CAMNOC_IRQ_OFE_RD_UBWC_DECODE_ERROR, + CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR = + CAM_CAMNOC_IRQ_TFE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_TFE_UBWC_1_ENCODE_ERROR = + CAM_CAMNOC_IRQ_TFE_UBWC_1_ENCODE_ERROR, + CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT = + CAM_CAMNOC_IRQ_AHB_TIMEOUT, + CAM_CAMNOC_HW_IRQ_RESERVED1, + CAM_CAMNOC_HW_IRQ_RESERVED2, + CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, +}; + +/** + * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC + * settings like QoS, LUT mappings need to be configured for + * each of these ports. + * + * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc + * @CAM_CAMNOC_SFE_RD: Indicates read data from all SFEs to cammnoc + * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc + * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc + * @CAM_CAMNOC_IFE_LITE: Indicates all IFE lites connection to camnoc + * @CAM_CAMNOC_IFE_LINEAR: Indicates linear data from all IFEs to cammnoc + * @CAM_CAMNOC_IFE_LINEAR_STATS: Indicates linear and stats data from certan + * IFEs to cammnoc + * @CAM_CAMNOC_IFE_LINEAR_STATS_1: Indicates linear and stats data from certan + * IFEs to cammnoc + * @CAM_CAMNOC_IFE_PDAF: Indicates pdaf data from all IFEs to cammnoc + * @CAM_CAMNOC_IFE_UBWC: Indicates ubwc from all IFEs to cammnoc + * @CAM_CAMNOC_IFE_UBWC_STATS: Indicates ubwc+stats from certain IFEs to cammnoc + * @CAM_CAMNOC_IFE_UBWC_STATS_1: Indicates ubwc+stats from certain + * IFEs to cammnoc + * @CAM_CAMNOC_IFE_RDI_WR: Indicates RDI write data from certain IFEs to cammnoc + * @CAM_CAMNOC_IFE_RDI_WR_1: Indicates RDI write data from certain + * IFEs to cammnoc + * @CAM_CAMNOC_IFE_RDI_RD: Indicates RDI read data from all IFEs to cammnoc + * @CAM_CAMNOC_IFE0123_RDI_WRITE: RDI write only for all IFEx + * @CAM_CAMNOC_IFE0_NRDI_WRITE: IFE0 non-RDI write + * @CAM_CAMNOC_IFE01_RDI_READ: IFE0/1 RDI READ + * @CAM_CAMNOC_IFE1_NRDI_WRITE: IFE1 non-RDI write + * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW + * connection to camnoc + * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW + * connection to camnoc + * @CAM_CAMNOC_IPE_VID_DISP_WRITE: Indicates IPE's VID/DISP Wrire HW + * connection to camnoc + * @CAM_CAMNOC_IPE_WR: Indicates IPE HW's write connection to camnoc + * @CAM_CAMNOC_IPE0_RD: Indicates IPE's Read0 HW connection to camnoc + * @CAM_CAMNOC_IPE1_RD: Indicates IPE's Read1 HW connection to camnoc + * @CAM_CAMNOC_IPE1_BPS_RD: Indicates IPE's Read1 + BPS Read HW connection + * to camnoc + * @CAM_CAMNOC_IPE_BPS_WR: Indicates IPE+BPS Write HW connection to camnoc + * @CAM_CAMNOC_BPS_WR: Indicates BPS HW's write connection to camnoc + * @CAM_CAMNOC_BPS_RD: Indicates BPS HW's read connection to camnoc + * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc + * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc + * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc + * @CAM_CAMNOC_TFE: Indicates TFE0 HW connection to camnoc + * @CAM_CAMNOC_TFE_1: Indicates TFE1 HW connection to camnoc + * @CAM_CAMNOC_TFE_2: Indicates TFE2 HW connection to camnoc + * @CAM_CAMNOC_OPE: Indicates OPE HW connection to camnoc + */ + /* Deprecated, do not use this anymore. port_name serves the purpose instead of this */ +enum cam_camnoc_port_type { + CAM_CAMNOC_CDM, + CAM_CAMNOC_SFE_RD, + CAM_CAMNOC_IFE02, + CAM_CAMNOC_IFE13, + CAM_CAMNOC_IFE_LITE, + CAM_CAMNOC_IFE_LINEAR, + CAM_CAMNOC_IFE_LINEAR_STATS, + CAM_CAMNOC_IFE_LINEAR_STATS_1, + CAM_CAMNOC_IFE_PDAF, + CAM_CAMNOC_IFE_UBWC, + CAM_CAMNOC_IFE_UBWC_STATS, + CAM_CAMNOC_IFE_UBWC_STATS_1, + CAM_CAMNOC_IFE_RDI_WR, + CAM_CAMNOC_IFE_RDI_WR_1, + CAM_CAMNOC_IFE_RDI_RD, + CAM_CAMNOC_IFE0123_RDI_WRITE, + CAM_CAMNOC_IFE0_NRDI_WRITE, + CAM_CAMNOC_IFE01_RDI_READ, + CAM_CAMNOC_IFE1_NRDI_WRITE, + CAM_CAMNOC_IPE_BPS_LRME_READ, + CAM_CAMNOC_IPE_BPS_LRME_WRITE, + CAM_CAMNOC_IPE_VID_DISP_WRITE, + CAM_CAMNOC_IPE_WR, + CAM_CAMNOC_IPE0_RD, + CAM_CAMNOC_IPE1_RD, + CAM_CAMNOC_IPE1_BPS_RD, + CAM_CAMNOC_IPE_BPS_WR, + CAM_CAMNOC_BPS_WR, + CAM_CAMNOC_BPS_RD, + CAM_CAMNOC_JPEG, + CAM_CAMNOC_FD, + CAM_CAMNOC_ICP, + CAM_CAMNOC_TFE_BAYER_STATS, + CAM_CAMNOC_TFE_BAYER_STATS_1, + CAM_CAMNOC_TFE_BAYER_STATS_2, + CAM_CAMNOC_TFE_RAW, + CAM_CAMNOC_TFE_RAW_1, + CAM_CAMNOC_TFE_RAW_2, + CAM_CAMNOC_TFE, + CAM_CAMNOC_TFE_1, + CAM_CAMNOC_TFE_2, + CAM_CAMNOC_OPE, + CAM_CAMNOC_OPE_BPS_WR, + CAM_CAMNOC_OPE_BPS_CDM_RD, + CAM_CAMNOC_CRE, + CAM_CAMNOC_IFE01234_RDI_WRITE, + CAM_CAMNOC_IFE01_NRDI_WRITE, + CAM_CAMNOC_IFE2_NRDI_WRITE, +}; + +/** + * struct cam_camnoc_specific : CPAS camnoc specific settings + * + * @port_type: Port type + * @port_name: Port name + * @enable: Whether to enable settings for this connection + * @priority_lut_low: Priority Low LUT mapping for this connection + * @priority_lut_high: Priority High LUT mapping for this connection + * @urgency: Urgency (QoS) settings for this connection + * @danger_lut: Danger LUT mapping for this connection + * @safe_lut: Safe LUT mapping for this connection + * @ubwc_ctl: UBWC control settings for this connection + * @qosgen_mainctl: qosgen shaping control configuration for this connection + * @qosgen_shaping_low: qosgen shaping low configuration for this connection + * @qosgen_shaping_high: qosgen shaping high configuration for this connection + * @maxwr_low: maxwr low configuration for this connection + * @maxrd_low: maxrd low configuration for this connection + * @dynattr_mainctl: Dynamic attribute main control register for this connection + * + */ +struct cam_camnoc_specific { + enum cam_camnoc_port_type port_type; + const char *port_name; + bool enable; + struct cam_cpas_reg priority_lut_low; + struct cam_cpas_reg priority_lut_high; + struct cam_cpas_reg urgency; + struct cam_cpas_reg danger_lut; + struct cam_cpas_reg safe_lut; + struct cam_cpas_reg ubwc_ctl; + struct cam_cpas_reg flag_out_set0_low; + struct cam_cpas_reg qosgen_mainctl; + struct cam_cpas_reg qosgen_shaping_low; + struct cam_cpas_reg qosgen_shaping_high; + struct cam_cpas_reg maxwr_low; + struct cam_cpas_reg maxrd_low; + struct cam_cpas_reg dynattr_mainctl; +}; + +/** + * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs + * + * @sbm_enable: SBM settings for IRQ enable + * @sbm_status: SBM settings for IRQ status + * @sbm_clear: SBM settings for IRQ clear + * + */ +struct cam_camnoc_irq_sbm { + struct cam_cpas_reg sbm_enable; + struct cam_cpas_reg sbm_status; + struct cam_cpas_reg sbm_clear; +}; + +/** + * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ + * + * @irq_type: Type of IRQ + * @enable: Whether to enable error settings for this IRQ + * @sbm_port: Corresponding SBM port for this IRQ + * @err_enable: Error enable settings for this IRQ + * @err_status: Error status settings for this IRQ + * @err_clear: Error clear settings for this IRQ + * + */ +struct cam_camnoc_irq_err { + enum cam_camnoc_hw_irq_type irq_type; + bool enable; + uint32_t sbm_port; + struct cam_cpas_reg err_enable; + struct cam_cpas_reg err_status; + struct cam_cpas_reg err_clear; +}; + +/** + * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info + * + * @enable: Whether to enable this errata workround + * @data: HW Errata workaround data + * + */ +struct cam_cpas_hw_errata_wa { + bool enable; + union { + struct cam_cpas_reg reg_info; + } data; +}; + +/** + * struct cam_cpas_subpart_info : Struct for camera Hw parts info + * + * @num_bits: Number of entries in hw_bitmap_mask + * @hw_bitmap_mask: Contains Fuse flag and hw_map info + * + */ +struct cam_cpas_subpart_info { + uint32_t num_bits; + uint32_t hw_bitmap_mask[CAM_CPAS_PART_MAX_FUSE_BITS][CAM_CPAS_PART_MAX_FUSE_BIT_INFO]; +}; + +/** + * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info + * + * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing + * camnoc slave pending transactions before turning off CPAS_TOP gdsc + * @tcsr_camera_hf_sf_ares_glitch: Errata workaround info from ignoring + * erroneous signals at camera start + * @enable_icp_clk_for_qchannel: Need to enable ICP clk while qchannel handshake + */ +struct cam_cpas_hw_errata_wa_list { + struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans; + struct cam_cpas_hw_errata_wa tcsr_camera_hf_sf_ares_glitch; + struct cam_cpas_hw_errata_wa enable_icp_clk_for_qchannel; +}; + +/** + * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets + * + * @mainctrl: Register offset for mainctrl + * @errvld: Register offset for errvld + * @errlog0_low: Register offset for errlog0_low + * @errlog0_high: Register offset for errlog0_high + * @errlog1_low: Register offset for errlog1_low + * @errlog1_high: Register offset for errlog1_high + * @errlog2_low: Register offset for errlog2_low + * @errlog2_high: Register offset for errlog2_high + * @errlog3_low: Register offset for errlog3_low + * @errlog3_high: Register offset for errlog3_high + * + */ +struct cam_camnoc_err_logger_info { + uint32_t mainctrl; + uint32_t errvld; + uint32_t errlog0_low; + uint32_t errlog0_high; + uint32_t errlog1_low; + uint32_t errlog1_high; + uint32_t errlog2_low; + uint32_t errlog2_high; + uint32_t errlog3_low; + uint32_t errlog3_high; +}; + +/** + * struct cam_cpas_test_irq_info : CAMNOC Test IRQ mask information + * + * @sbm_enable_mask: sbm mask to enable camnoc test irq + * @sbm_clear_mask: sbm mask to clear camnoc test irq + * + */ +struct cam_cpas_test_irq_info { + uint32_t sbm_enable_mask; + uint32_t sbm_clear_mask; +}; + +/** + * struct cam_cpas_cesta_crm_type : CESTA crm type information + * + * @CAM_CESTA_CRMB: CRM for bandwidth + * @CAM_CESTA_CRMC: CRM for clocks + * + */ +enum cam_cpas_cesta_crm_type { + CAM_CESTA_CRMB = 0, + CAM_CESTA_CRMC, +}; + +/** + * struct cam_vcd_info : cpas vcd(virtual clk domain) information + * + * @vcd_index: vcd number of each clk + * @type: type of clk domain CESTA_CRMB, CESTA_CRMC + * @clk_name: name of each vcd clk, exmp: cam_cc_ife_0_clk_src + * + */ +struct cam_cpas_vcd_info { + uint8_t index; + enum cam_cpas_cesta_crm_type type; + const char *clk; +}; + +/** + * struct cam_cpas_cesta_vcd_curr_lvl : cesta vcd operating level information + * + * @reg_offset: register offset + * @vcd_base_inc: each vcd base addr offset + * @num_vcds: number of vcds + * + */ +struct cam_cpas_cesta_vcd_curr_lvl { + uint32_t reg_offset; + uint32_t vcd_base_inc; + uint8_t num_vcds; +}; + +/** + * struct cam_cpas_cesta_vcd_reg_info : to hold all cesta register information + * + * @vcd_currol: vcd current perf level reg info + * + */ +struct cam_cpas_cesta_vcd_reg_info { + struct cam_cpas_cesta_vcd_curr_lvl vcd_currol; +}; + +/** + * struct cam_cpas_cesta_info : to hold all cesta register information + * + * @vcd_info: vcd info + * @num_vcds: number of vcds + * @cesta_reg_info: cesta vcds reg info + * + */ +struct cam_cpas_cesta_info { + struct cam_cpas_vcd_info *vcd_info; + int num_vcds; + struct cam_cpas_cesta_vcd_reg_info *cesta_reg_info; +}; + +/** + * struct cam_cpas_hw_cap_info : CPAS Hardware capability information + * + * @num_caps_registers: number of hw capability registers + * @hw_caps_offsets: array of hw cap register offsets + * + */ +struct cam_cpas_hw_cap_info { + uint32_t num_caps_registers; + uint32_t hw_caps_offsets[CAM_CPAS_MAX_CAPS_REGS]; +}; + +/** + * struct cam_camnoc_addr_trans_client_reg_info : CPAS Address translator supported client + * register information + * + * @client_name: Name of the client + * @reg_enable: Register offset to enable address translator + * @reg_offset0: Register offset for offset 0 + * @reg_base1: Register offset for base 1 + * @reg_offset1: Register offset for offset 1 + * @reg_base2: Register offset for base 2 + * @reg_offset2: Register offset for offset 2 + * @reg_base3: Register offset for base 3 + * @reg_offset3: Register offset for offset 3 + * + */ +struct cam_camnoc_addr_trans_client_info { + const char *client_name; + uint32_t reg_enable; + uint32_t reg_offset0; + uint32_t reg_base1; + uint32_t reg_offset1; + uint32_t reg_base2; + uint32_t reg_offset2; + uint32_t reg_base3; + uint32_t reg_offset3; +}; + +/** + * struct cam_camnoc_addr_trans_info : CPAS Address translator generic info + * + * @num_supported_clients: Number of clients that support address translator + * @addr_trans_client_info: Client information with address translator supported + * + */ +struct cam_camnoc_addr_trans_info { + uint8_t num_supported_clients; + struct cam_camnoc_addr_trans_client_info *addr_trans_client_info; +}; + +/** + * struct cam_camnoc_info : Overall CAMNOC settings info + * + * @camnoc_type: type of camnoc (RT/NRT/COMBINED) + * @camnoc_name: name of camnoc (CAMNOC_RT/CAMNOC_NRT/CAMNOC_COMBINED) + * @reg_base: register base for camnoc RT/NRT/COMBINED register space + * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings + * @specific_size: Array size of SPECIFICTONTTPTR settings + * @irq_sbm: Pointer to CAMNOC IRQ SBM settings + * @irq_err: Pointer to CAMNOC IRQ Error settings + * @irq_err_size: Array size of IRQ Error settings + * @err_logger: Pointer to CAMNOC IRQ Error logger read registers + * @errata_wa_list: HW Errata workaround info + * @test_irq_info: CAMNOC Test IRQ info + * @cesta_info: cpas cesta reg info + * @addr_trans_info: CAMNOC address translator info + * + */ +struct cam_camnoc_info { + /* Below fields populated at probe on camera version */ + enum cam_camnoc_hw_type camnoc_type; + char *camnoc_name; + enum cam_cpas_reg_base reg_base; + + /* Below fields populated from the cpas header */ + struct cam_camnoc_specific *specific; + int specific_size; + struct cam_camnoc_irq_sbm *irq_sbm; + struct cam_camnoc_irq_err *irq_err; + int irq_err_size; + struct cam_camnoc_err_logger_info *err_logger; + struct cam_cpas_hw_errata_wa_list *errata_wa_list; + struct cam_cpas_test_irq_info test_irq_info; + struct cam_cpas_cesta_info *cesta_info; + struct cam_camnoc_addr_trans_info *addr_trans_info; +}; + +/** + * struct cam_cpas_work_payload : Struct for cpas work payload data + * + * @camnoc_idx: index to camnoc info array + * @hw: Pointer to HW info + * @irq_status: IRQ status value + * @irq_data: IRQ data + * @workq_scheduled_ts: workqueue scheduled timestamp + * @work: Work handle + * + */ +struct cam_cpas_work_payload { + int8_t camnoc_idx; + struct cam_hw_info *hw; + uint32_t irq_status; + uint32_t irq_data; + ktime_t workq_scheduled_ts; + struct work_struct work; +}; + +/** + * struct cam_cpas_camnoc_qchannel : Cpas camnoc qchannel info + * + * @qchannel_ctrl: offset to configure to control camnoc qchannel interface + * @qchannel_status: offset to read camnoc qchannel interface status + * + */ +struct cam_cpas_camnoc_qchannel { + uint32_t qchannel_ctrl; + uint32_t qchannel_status; +}; + +/** + * struct cam_cpas_secure_info : Cpas secure info + * + * @secure_access_ctrl_offset: Offset value of secure access ctrl register + * @seurec_access_ctrl_value: Value to set for core secure access + * + */ +struct cam_cpas_secure_info { + uint32_t secure_access_ctrl_offset; + uint32_t secure_access_ctrl_value; +}; + +/** + * struct cam_cpas_info: CPAS information + * + * @qchannel_info: CPAS qchannel info + * @hw_cap_info: CPAS Hardware capability information + * @hw_caps_secure_info: CPAS Hardware secure information + * @num_qchannel: Number of qchannel + * @subpart_info: Subpart info + */ +struct cam_cpas_info { + struct cam_cpas_camnoc_qchannel *qchannel_info[CAM_CAMNOC_QCHANNEL_MAX]; + struct cam_cpas_hw_cap_info hw_caps_info; + uint8_t num_qchannel; + struct cam_cpas_secure_info *hw_caps_secure_info; + struct cam_cpas_subpart_info *subpart_info; +}; + +/** + * struct cam_cpas_top_regs : CPAS Top registers + * @tpg_mux_sel_shift: TPG mux select shift value + * @tpg_mux_sel: For selecting TPG + * @tpg_mux_sel_enabled: TPG mux select enabled or not + * + */ +struct cam_cpas_top_regs { + uint32_t tpg_mux_sel_shift; + uint32_t tpg_mux_sel; + bool tpg_mux_sel_enabled; +}; + +#endif /* _CAM_CPASTOP_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop100.h new file mode 100644 index 0000000000..959b4a7db4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop100.h @@ -0,0 +1,539 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP100_H_ +#define _CPASTOP100_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x3, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x3, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = false, + } +}; + +static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = true, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +struct cam_camnoc_info cam170_cpas100_camnoc_info = { + .specific = &cam_cpas100_camnoc_specific[0], + .specific_size = sizeof(cam_cpas100_camnoc_specific) / + sizeof(cam_cpas100_camnoc_specific[0]), + .irq_sbm = &cam_cpas100_irq_sbm, + .irq_err = &cam_cpas100_irq_err[0], + .irq_err_size = sizeof(cam_cpas100_irq_err) / + sizeof(cam_cpas100_irq_err[0]), + .err_logger = &cam170_cpas100_err_logger_offsets, + .errata_wa_list = &cam170_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam170_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v1080_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v1080_100.h new file mode 100644 index 0000000000..79e1fe1b99 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v1080_100.h @@ -0,0 +1,1632 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V1080_100_H_ +#define _CPASTOP_V1080_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v1080_100_irq_sbm_rt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_RT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* RT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave error IRQ */ + 0x02 | /* RT_SBM_FAULTINEN0_LOW_PORT1_MASK - TFE UBWC-0 Encoder Error IRQ */ + 0x04, /* RT_SBM_FAULTINEN0_LOW_PORT2_MASK - TFE UBWC-1 Encoder Error IRQ */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_RT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x0, + } +}; + +static struct cam_camnoc_irq_sbm cam_cpas_v1080_100_irq_sbm_nrt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_NRT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* NRT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave Error */ + 0x02 | /* NRT_SBM_FAULTINEN0_LOW_PORT1_MASK - IPE WR UBWC En */ + 0x04 | /* NRT_SBM_FAULTINEN0_LOW_PORT2_MASK - OFE WR UBWC En */ + 0x08 | /* NRT_SBM_FAULTINEN0_LOW_PORT3_MASK - OFE RD UBWC De */ + 0x10 | /* NRT_SBM_FAULTINEN0_LOW_PORT4_MASK - IPE RD UBWC En */ + 0x20 | /* NRT_SBM_FAULTINEN0_LOW_PORT5_MASK - IPE RD UBWC En */ + 0x40, /* NRT_SBM_FAULTINEN0_LOW_PORT6_MASK - AHB Bus Timeout En */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_NRT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v1080_100_irq_err_rt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* RT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_RT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_RT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_RT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* RT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2DA0, /* TFE_UBWC : RT_0_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2D90, /* IFE_UBWC : RT_0_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2D98, /* IFE_UBWC : RT_0_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_TFE_UBWC_1_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x04, /* RT_SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2FA0, /* TFE_UBWC : RT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2F90, /* IFE_UBWC : RT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2F98, /* IFE_UBWC : RT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x8, /* RT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* RT_CAM_NOC_RT_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_RT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_irq_err + cam_cpas_v1080_100_irq_err_nrt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_NRT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_NRT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_NRT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IPE_WR : NRT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IPE_WR : NRT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IPE_WR : NRT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x63A0, /* OFE_WR : NRT_3_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6390, /* OFE_WR : NRT_3_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6398, /* OFE_WR : NRT_3_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* OFE_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* OFE_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* OFE_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6B20, /* IPE_1_RD : NRT_7_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B10, /* IPE_1_RD : NRT_7_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B18, /* IPE_1_RD : NRT_7_NIU_DECERRCLR_LOW */ + .value = 0xFF, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6D20, /* IPE_0_RD : NRT_8_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6D10, /* IPE_0_RD : NRT_8_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6D18, /* IPE_0_RD : NRT_8_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0xE, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x80, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT10_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0x2, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v1080_100_camnoc_specific_rt[] = { + /* RT ports */ + { + .port_name = "RT0-TFE_DS16_FULL_RDI_RAW_PDAF", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C30, /* NOC_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x66655554, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C34, /* NOC_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C38, /* NOC_RT_0_NIU_URGENCY_LOW */ + .value = 0x00001E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C40, /* NOC_RT_0_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C48, /* NOC_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3608, /* NOC_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2808, /* NOC_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2820, /* NOC_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2824, /* NOC_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2C20, /* NOC_RT_0_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-TFE_DS_PDAF_FD_AND_IFE_LITE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* NOC_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x55544444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* NOC_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666665, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* NOC_RT_1_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* NOC_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* NOC_RT_1_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3688, /* NOC_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2888, /* NOC_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x28A0, /* NOC_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x28A4, /* NOC_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2E20, /* NOC_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-TFE_PDAF_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3030, /* NOC_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x44444444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3034, /* NOC_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666644, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3038, /* NOC_RT_2_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3040, /* NOC_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3048, /* NOC_RT_2_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3708, /* NOC_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2908, /* NOC_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2920, /* NOC_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2924, /* NOC_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3020, /* NOC_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-RT_CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3230, /* NOC_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3234, /* NOC_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3238, /* NOC_RT_3_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3240, /* NOC_RT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3248, /* NOC_RT_3_NIU_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3788, /* NOC_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* NOC_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x29A0, /* NOC_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x29A4, /* NOC_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v1080_100_camnoc_specific_nrt[] = { + /* NRT ports */ + { + .port_name = "NRT0-IPE_WR_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5630, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5634, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5638, /* IPE_WR_1 : NOC_NRT_0_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5640, /* IPE_WR_1 : NOC_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5648, /* IPE_WR_1 : NOC_NRT_0_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7008, /* IPE_WR_1 : NOC_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-IPE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IPE_WR_0 : NOC_NRT_1_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IPE_WR_0 : NOC_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IPE_WR_0 : NOC_NRT_1_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7088, /* IPE_WR_0 : NOC_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4888, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A0, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A4, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IPE_WR_0 : NOC_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-OFE_Linear_WR_1-CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7108, /* OFE_WR_1-CRE_WR : NOC_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4908, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4920, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4924, /* OFE_WR_1-CRE_WR:NOC_NRT_2_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6020, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-OFE_UBWC_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6230, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6234, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6238, /* OFE_WR_0 : NOC_NRT_3_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6240, /* OFE_WR_0 : NOC_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6248, /* OFE_WR_0 : NOC_NRT_3_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7188, /* OFE_WR_0 : NOC_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4988, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A0, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A4, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT4-OFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* OFE_RD : NOC_NRT_4_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* OFE_RD : NOC_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* OFE_RD : NOC_NRT_4_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6508, /* OFE_RD : NOC_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* OFE_RD : NOC_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A08, /* OFE_RD : NOC_NRT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A20, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A24, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6410, /* OFE_RD : NOC_NRT_4_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT5-CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* CRE_RD : NOC_NRT_5_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6640, /* CRE_RD : NOC_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6648, /* CRE_RD : NOC_NRT_5_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7288, /* CRE_RD : NOC_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A88, /* CRE_RD : NOC_NRT_5_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA0, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA4, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6610, /* CRE_RD : NOC_NRT_5_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT6-JPEG0,1,2,3_RD_WR_ENC_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* JPEG_RD_WR : NOC_NRT_6_NIU_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6840, /* JPEG_RD_WR : NOC_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6848, /* JPEG_RD_WR : NOC_NRT_6_NIU_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7308, /* JPEG_RD_WR : NOC_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B08, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B20, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B24, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6810, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT7-IPE_RD_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* IPE_WR_1 : NOC_NRT_7_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* IPE_WR_1 : NOC_NRT_7_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* IPE_WR_1 : NOC_NRT_7_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7388, /* IPE_WR_1 : NOC_NRT_7_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B88, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA0, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA4, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A10, /* IPE_WR_1 : NOC_NRT_7_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT8-IPE_RD_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C30, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C34, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C38, /* IPE_RD_0 : NOC_NRT_8_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C40, /* IPE_RD_0 : NOC_NRT_8_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C48, /* IPE_RD_0 : NOC_NRT_8_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7408, /* IPE_RD_0 : NOC_NRT_8_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C08, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C20, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C24, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6C10, /* IPE_RD_0 : NOC_NRT_8_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT9-CDM_IPE_OFE", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* CDM_IPE_OFE : NOC_NRT_9_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* CDM_IPE_OFE : NOC_NRT_9_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* CDM_IPE_OFE : NOC_NRT_9_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7488, /* CDM_IPE_OFE : NOC_NRT_9_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C88, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA0, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA4, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6E10, /* CDM_IPE_OFE : NOC_NRT_10_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_0_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7508, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D08, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D20, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D24, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_1_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7588, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D88, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA0, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA4, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam1080_cpas100_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam1080_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = false, + }, +}; + +static struct cam_cpas_cesta_vcd_reg_info cam_cpas_v1080_100_cesta_reg_info = { + .vcd_currol = { + .reg_offset = 0x266C, + .vcd_base_inc = 0x210, + .num_vcds = 8, + }, +}; + +static struct cam_cpas_vcd_info cam_v1080_100_vcd_info[] = { + { + .index = 0, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_0_clk_src", + }, + { + .index = 1, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_1_clk_src", + }, + { + .index = 2, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_2_clk_src", + }, + { + .index = 6, .type = CAM_CESTA_CRMC, .clk = "cam_cc_csid_clk_src", + }, + { + .index = 7, .type = CAM_CESTA_CRMC, .clk = "cam_cc_cphy_rx_clk_src", + }, + { + .index = 8, .type = CAM_CESTA_CRMB, .clk = "cam_cc_camnoc_axi_rt_clk_src", + }, +}; + +static struct cam_cpas_cesta_info cam_v1080_cesta_info = { + .vcd_info = &cam_v1080_100_vcd_info[0], + .num_vcds = ARRAY_SIZE(cam_v1080_100_vcd_info), + .cesta_reg_info = &cam_cpas_v1080_100_cesta_reg_info, +}; + +static struct cam_camnoc_addr_trans_client_info + cam1080_cpas100_addr_trans_client_info[] = { + { + .client_name = "icp1", + .reg_enable = 0x8088, + .reg_offset0 = 0x8098, + .reg_base1 = 0x80A0, + .reg_offset1 = 0x80A8, + .reg_base2 = 0x80B0, + .reg_offset2 = 0x80B8, + .reg_base3 = 0x80C0, + .reg_offset3 = 0x80C8, + }, +}; + +static struct cam_camnoc_addr_trans_info cam1080_cpas100_addr_trans_info = { + .num_supported_clients = ARRAY_SIZE(cam1080_cpas100_addr_trans_client_info), + .addr_trans_client_info = &cam1080_cpas100_addr_trans_client_info[0], +}; + +static struct cam_camnoc_info cam1080_cpas100_camnoc_info_rt = { + .specific = &cam_cpas_v1080_100_camnoc_specific_rt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v1080_100_camnoc_specific_rt), + .irq_sbm = &cam_cpas_v1080_100_irq_sbm_rt, + .irq_err = &cam_cpas_v1080_100_irq_err_rt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v1080_100_irq_err_rt), + .err_logger = &cam1080_cpas100_err_logger_offsets, + .errata_wa_list = &cam1080_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x8, + .sbm_clear_mask = 0x1, + }, +}; + +static struct cam_camnoc_info cam1080_cpas100_camnoc_info_nrt = { + .specific = &cam_cpas_v1080_100_camnoc_specific_nrt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v1080_100_camnoc_specific_nrt), + .irq_sbm = &cam_cpas_v1080_100_irq_sbm_nrt, + .irq_err = &cam_cpas_v1080_100_irq_err_nrt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v1080_100_irq_err_nrt), + .err_logger = &cam1080_cpas100_err_logger_offsets, + .errata_wa_list = &cam1080_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x80, + .sbm_clear_mask = 0x1, + }, + .addr_trans_info = &cam1080_cpas100_addr_trans_info, +}; + +static struct cam_cpas_camnoc_qchannel cam1080_cpas100_qchannel_info_rt = { + .qchannel_ctrl = 0xEC, + .qchannel_status = 0xF0, +}; + +static struct cam_cpas_camnoc_qchannel cam1080_cpas100_qchannel_info_nrt = { + .qchannel_ctrl = 0xF4, + .qchannel_status = 0xF8, +}; + +static struct cam_cpas_camnoc_qchannel cam1080_cpas100_qchannel_info_pdx = { + .qchannel_ctrl = 0x130, + .qchannel_status = 0x12C, +}; + +/* + * struct cam_cpas_secure_info: CPAS secure information are used only for + * debug purpose, Register access is restricted in normal builds. + * + */ +static struct cam_cpas_secure_info cam1080_cpas100_secure_info = { + .secure_access_ctrl_offset = 0x1C, + .secure_access_ctrl_value = 0xFFFFFFFF, +}; + +static struct cam_cpas_info cam1080_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 2, + .hw_caps_offsets = {0x8, 0xDC}, + }, + .qchannel_info = {&cam1080_cpas100_qchannel_info_rt, + &cam1080_cpas100_qchannel_info_nrt, + &cam1080_cpas100_qchannel_info_pdx}, + .num_qchannel = 3, + .hw_caps_secure_info = &cam1080_cpas100_secure_info, +}; + +#endif /* _CPASTOP_V1080_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v150_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v150_100.h new file mode 100644 index 0000000000..4d9dcaeac0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v150_100.h @@ -0,0 +1,538 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V150_100_H_ +#define _CPASTOP_V150_100_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v150_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v150_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v150_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 0x5, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam150_cpas100_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam150_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam150_cpas100_camnoc_info = { + .specific = &cam_cpas_v150_100_camnoc_specific[0], + .specific_size = sizeof(cam_cpas_v150_100_camnoc_specific) / + sizeof(cam_cpas_v150_100_camnoc_specific[0]), + .irq_sbm = &cam_cpas_v150_100_irq_sbm, + .irq_err = &cam_cpas_v150_100_irq_err[0], + .irq_err_size = sizeof(cam_cpas_v150_100_irq_err) / + sizeof(cam_cpas_v150_100_irq_err[0]), + .err_logger = &cam150_cpas100_err_logger_offsets, + .errata_wa_list = &cam150_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam150_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V150_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v165_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v165_100.h new file mode 100644 index 0000000000..79dafe45b8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v165_100.h @@ -0,0 +1,719 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + #ifndef _CPASTOP_V165_100_H_ + #define _CPASTOP_V165_100_H_ + + #define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v165_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2240, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v165_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x4F08, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x4F10, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x4F18, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3BA0, /* SPECIFIC_IFE0_MAIN_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3B90, + /* SPECIFIC_IFE0_MAIN_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3B98, /* SPECIFIC_IFE0_MAIN_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x55A0, /* SPECIFIC_IFE1_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5590, /* SPECIFIC_IFE1_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5598, /* SPECIFIC_IFE1_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2BA0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2B90, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v165_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4230, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4234, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, /* CDM_Urgency_Low */ + .offset = 0x4238, + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4240, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4248, /* CDM_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE01234_RDI_WRITE, + .port_name = "IFE01234_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3630, /* IFE01234_RDI_PRIORITYLUT_LOW */ + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3634, /* IFE01234_RDI_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3638, /* IFE01234_RDI_LINEAR_URGENCY_LOW */ + .mask = 0x1FF0, + .shift = 0x4, + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3640, /* IFE01234_RDI_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3648, /* IFE01234_RDI_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3620, /* IFE01234_RDI_MAXWR_LOW */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IFE01234_RDI_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IFE01234_RDI_QOSGEN_SHAPING_LOW */ + .value = 0x07070707, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IFE01234_RDI_QOSGEN_SHAPING_HIGH */ + .value = 0x07070707, + }, + }, + { + .port_type = CAM_CAMNOC_IFE01_NRDI_WRITE, + .port_name = "IFE01_NRDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A30, /* IFE01_NRDI_PRIORITYLUT_LOW */ + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A34, /* IFE01_NRDI_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3A38, /* IFE01_NRDI_URGENCY_LOW */ + /* IFE01_NRDI_URGENCY_LOW_WRITE_MASK */ + .mask = 0x1FF0, + /* IFE01_NRDI_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A40, /* IFE01_NRDI_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A48, /* IFE01_NRDI_SAFELUT_LOW */ + .value = 0xF, + }, + /* no reg for this */ + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3B88, /* IFE01_NRDI_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3A20, /* IFE01_MAXWR_LOW */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4708, /* IFE01_NRDI_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4720, /* IFE01_NRDI_QOSGEN_SHAPING_LOW */ + .value = 0x07070707, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4724, /* IFE01_NRDI_QOSGEN_SHAPING_HIGH */ + .value = 0x07070707, + }, + }, + { + .port_type = CAM_CAMNOC_IFE2_NRDI_WRITE, + .port_name = "IFE2_NRDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5430, /* IFE2_NDRI_PRIORITYLUT_LOW */ + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* IFE2_NRDI_PRIORITYLUT_HIGH */ + .offset = 0x5434, + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x5438, /* IFE2_NRDI_URGENCY_LOW */ + /* IFE2_NRDI_URGENCY_LOW_WRITE_MASK */ + .mask = 0x1FF0, + /* IFE2_NRDI_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5440, /* IFE2_NRDI_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5448, /* IFE2_NRDI_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5588, /* IFE2_NRDI_ENCCTL_LOW */ + .value = 0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5420, /* IFE2_MAXWR_LOW */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5188, /* IFE2_NRDI_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A0, /* IFE2_NRDI_QOSGEN_SHAPING_LOW */ + .value = 0x07070707, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A4, /* IFE2_NRDI_QOSGEN_SHAPING_HIGH */ + .value = 0x07070707, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .port_name = "IPE_BPS_LRME_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* IPE_BPS_LRME_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, /*Value is 0 in excel sheet */ + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* IPE_BPS_LRME_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, /*Value is 0 in excel sheet */ + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2E38, /* IPE_BPS_LRME_RD_URGENCY_LOW */ + /* IPE_BPS_LRME_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* IPE_BPS_LRME_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* IPE_BPS_LRME_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* IPE_BPS_LRME_RD_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2F08, /* IPE_BPS_LRME_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .port_name = "IPE_BPS_LRME_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A30, /* IPE_BPS_LRME_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A34, /* IPE_BPS_LRME_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2A38, /* IPE_BPS_LRME_WR_URGENCY_LOW */ + /* IPE_BPS_LRME_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* IPE_BPS_LRME_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A40, /* IPE_BPS_LRME_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A48, /* IPE_BPS_LRME_WR_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2B88, /* IPE_BPS_LRME_WR_ENCCTL_LOW */ + .value = 0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2A20, /* IBL_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2630, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2634, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2638, /* JPEG_URGENCY_LOW */ + .mask = 0x3F, + .shift = 0x0, + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2640, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2648, /* JPEG_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2620, /* JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + /*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/ + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x5088, + .value = 0x50, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam165_cpas100_err_logger_offsets = { + .mainctrl = 0x4F08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x4F10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x4F20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x4F28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x4F2C, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x4F30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x4F38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x4F3C, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam165_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam165_cpas100_camnoc_info = { + .specific = &cam_cpas_v165_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v165_100_camnoc_specific), + .irq_sbm = &cam_cpas_v165_100_irq_sbm, + .irq_err = &cam_cpas_v165_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v165_100_irq_err), + .err_logger = &cam165_cpas100_err_logger_offsets, + .errata_wa_list = &cam165_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam165_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V165_100_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_110.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_110.h new file mode 100644 index 0000000000..5eaa8ba3ee --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_110.h @@ -0,0 +1,546 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V170_110_H_ +#define _CPASTOP_V170_110_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas110_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas110_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam170_cpas110_camnoc_info = { + .specific = &cam_cpas110_camnoc_specific[0], + .specific_size = sizeof(cam_cpas110_camnoc_specific) / + sizeof(cam_cpas110_camnoc_specific[0]), + .irq_sbm = &cam_cpas110_irq_sbm, + .irq_err = &cam_cpas110_irq_err[0], + .irq_err_size = sizeof(cam_cpas110_irq_err) / + sizeof(cam_cpas110_irq_err[0]), + .err_logger = &cam170_cpas110_err_logger_offsets, + .errata_wa_list = &cam170_cpas110_errata_wa_list, +}; + +static struct cam_cpas_info cam170_cpas110_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V170_110_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_200.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_200.h new file mode 100644 index 0000000000..24abb9f09c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v170_200.h @@ -0,0 +1,582 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V170_200_H_ +#define _CPASTOP_V170_200_H_ + +#define TEST_IRQ_ENABLE 0 +#define TCSR_CONN_RESET 0x0 +#define TCSR_CONN_SET 0x3 + +static struct cam_camnoc_irq_sbm cam_cpas_v170_200_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v170_200_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v170_200_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .port_name = "IFE02", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2420, /* IFE02_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .port_name = "IFE13", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2620, /* IFE13_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .port_name = "IPE_BPS_LRME_RD", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .port_name = "IPE_BPS_LRME_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2A20, /* IPE_BPS_LRME_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2C20, /* IPE_BPS_LRME_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .port_name = "FD", + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam170_cpas200_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam170_cpas200_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam170_cpas200_camnoc_info = { + .specific = &cam_cpas_v170_200_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v170_200_camnoc_specific), + .irq_sbm = &cam_cpas_v170_200_irq_sbm, + .irq_err = &cam_cpas_v170_200_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v170_200_irq_err), + .err_logger = &cam170_cpas200_err_logger_offsets, + .errata_wa_list = &cam170_cpas200_errata_wa_list, +}; + +static struct cam_cpas_info cam170_cpas200_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V170_200_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_100.h new file mode 100644 index 0000000000..6995f896e5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_100.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V175_100_H_ +#define _CPASTOP_V175_100_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v175_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v175_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v175_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam175_cpas100_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam175_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam175_cpas100_camnoc_info = { + .specific = &cam_cpas_v175_100_camnoc_specific[0], + .specific_size = sizeof(cam_cpas_v175_100_camnoc_specific) / + sizeof(cam_cpas_v175_100_camnoc_specific[0]), + .irq_sbm = &cam_cpas_v175_100_irq_sbm, + .irq_err = &cam_cpas_v175_100_irq_err[0], + .irq_err_size = sizeof(cam_cpas_v175_100_irq_err) / + sizeof(cam_cpas_v175_100_irq_err[0]), + .err_logger = &cam175_cpas100_err_logger_offsets, + .errata_wa_list = &cam175_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam175_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V175_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_101.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_101.h new file mode 100644 index 0000000000..42cc5100a8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_101.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V175_101_H_ +#define _CPASTOP_V175_101_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v175_101_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2040, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v175_101_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1190, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v175_101_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */ + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE02, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */ + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IFE13, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */ + .value = 0x66666543, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */ + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = false, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam175_cpas101_err_logger_offsets = { + .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam175_cpas101_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam175_cpas101_camnoc_info = { + .specific = &cam_cpas_v175_101_camnoc_specific[0], + .specific_size = sizeof(cam_cpas_v175_101_camnoc_specific) / + sizeof(cam_cpas_v175_101_camnoc_specific[0]), + .irq_sbm = &cam_cpas_v175_101_irq_sbm, + .irq_err = &cam_cpas_v175_101_irq_err[0], + .irq_err_size = sizeof(cam_cpas_v175_101_irq_err) / + sizeof(cam_cpas_v175_101_irq_err[0]), + .err_logger = &cam175_cpas101_err_logger_offsets, + .errata_wa_list = &cam175_cpas101_errata_wa_list, +}; + +static struct cam_cpas_info cam175_cpas101_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V175_101_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_120.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_120.h new file mode 100644 index 0000000000..a7077cd580 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_120.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V175_120_H_ +#define _CPASTOP_V175_120_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v175_120_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2240, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v175_120_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x4F08, /* ERRORLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x4F10, /* ERRORLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x4F18, /* ERRORLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3BA0, /* SPECIFIC_IFE02_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3B90, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3B98, /* SPECIFIC_IFE02_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x55a0, /* SPECIFIC_IFE13_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5590, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5598, /* SPECIFIC_IFE13_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2Ba0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2B90, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v175_120_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4230, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4234, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + /* cdm_main_SpecificToNttpTr_Urgency_Low */ + .offset = 0x4238, + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4240, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4248, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE0123_RDI_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE0123_PRIORITYLUT_LOW */ + .offset = 0x3630, + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE0123_PRIORITYLUT_HIGH */ + .offset = 0x3634, + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3638, /* SPECIFIC_IFE0123_URGENCY_LOW */ + /* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3640, /* SPECIFIC_IFE0123_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3648, /* SPECIFIC_IFE0123_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE0_NRDI_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A30, /* SPECIFIC_IFE0_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A34, /* SPECIFIC_IFE0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3A38, /* SPECIFIC_IFE0_URGENCY_LOW */ + /* SPECIFIC_IFE0_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE0_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A40, /* SPECIFIC_IFE0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A48, /* SPECIFIC_IFE0_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3B88, /* SPECIFIC_IFE0_ENCCTL_LOW */ + .value = 1, + }, + }, + { + /* IFE0/1 RDI READ PATH */ + .port_type = CAM_CAMNOC_IFE01_RDI_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3230, /* SPECIFIC_IFE1_PRIORITYLUT_LOW */ + .value = 0x44443333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3234, /* SPECIFIC_IFE1_PRIORITYLUT_HIGH */ + .value = 0x66665555, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3238, /* SPECIFIC_IFE1_URGENCY_LOW */ + /* SPECIFIC_IFE1_URGENCY_LOW_WRITE_MASK */ + .mask = 0x7, + /* SPECIFIC_IFE1_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3240, /* SPECIFIC_IFE1_DANGERLUT_LOW */ + .value = 0x00000000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3248, /* SPECIFIC_IFE1_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE1_NRDI_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5430, /* SPECIFIC_IFE1_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE1_WR_PRIORITYLUT_HIGH */ + .offset = 0x5434, + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x5438, /* SPECIFIC_IFE1_WR_URGENCY_LOW */ + /* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5440, /* SPECIFIC_IFE1_WR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5448, /* SPECIFIC_IFE1_WR_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5588, /* SPECIFIC_IFE1_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2E38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2F08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A30, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A34, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2A38, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A40, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A48, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2B88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_VID_DISP_WRITE, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_LOW */ + .offset = 0x5E30, + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_HIGH */ + .offset = 0x5E34, + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW */ + .offset = 0x5E38, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_MASK */ + .mask = 0x70, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC__IPE_VID_DISP_DANGERLUT_LOW */ + .offset = 0x5E40, + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_SAFELUT_LOW */ + .offset = 0x5E48, + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5F88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2630, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2634, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2638, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2640, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2648, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E30, /* SPECIFIC_FD_PRIORITYLUT_LOW */ + .value = 0x44444444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E34, /* SPECIFIC_FD_PRIORITYLUT_HIGH */ + .value = 0x44444444, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E38, /* SPECIFIC_FD_URGENCY_LOW */ + .value = 0x44, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E40, /* SPECIFIC_FD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E48, /* SPECIFIC_FD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + + }, + { + /*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/ + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2288, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam175_cpas120_err_logger_offsets = { + .mainctrl = 0x4F08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x4F10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x4F20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x4F28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x4F2c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x4F30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x4F38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x4F3c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam175_cpas120_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2300, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam175_cpas120_camnoc_info = { + .specific = &cam_cpas_v175_120_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v175_120_camnoc_specific), + .irq_sbm = &cam_cpas_v175_120_irq_sbm, + .irq_err = &cam_cpas_v175_120_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v175_120_irq_err), + .err_logger = &cam175_cpas120_err_logger_offsets, + .errata_wa_list = &cam175_cpas120_errata_wa_list, +}; + +static struct cam_cpas_info cam175_cpas120_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V175_120_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_130.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_130.h new file mode 100644 index 0000000000..4307314923 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v175_130.h @@ -0,0 +1,840 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V175_130_H_ +#define _CPASTOP_V175_130_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v175_130_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2240, /* SBM_FAULTINEN0_LOW */ + .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x6 : 0x2, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v175_130_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x4F08, /* ERRORLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x4F10, /* ERRORLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x4F18, /* ERRORLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3BA0, /* SPECIFIC_IFE0_MAIN_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + /* SPECIFIC_IFE0_MAIN_ENCERRSTATUS_LOW */ + .offset = 0x3B90, + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3B98, /* SPECIFIC_IFE0_MAIN_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x55A0, /* SPECIFIC_IFE1_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + /* SPECIFIC_IFE1_WR_ENCERRSTATUS_LOW */ + .offset = 0x5590, + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5598, /* SPECIFIC_IFE1_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2BA0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2B90, + /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v175_130_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4230, /* SPECIFIC_CDM_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4234, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + /* cdm_main_SpecificToNttpTr_Urgency_Low */ + .offset = 0x4238, + .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */ + .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */ + .value = 0x2, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4240, /* SPECIFIC_CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4248, /* SPECIFIC_CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE0123_RDI_WRITE, + .port_name = "IFE0123_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE0123_PRIORITYLUT_LOW */ + .offset = 0x3630, + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE0123_PRIORITYLUT_HIGH */ + .offset = 0x3634, + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3638, /* SPECIFIC_IFE0123_URGENCY_LOW */ + /* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE0123_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3640, /* SPECIFIC_IFE0123_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3648, /* SPECIFIC_IFE0123_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3620, /* IFE0123_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE0_NRDI_WRITE, + .port_name = "IFE0_NRDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A30, /* SPECIFIC_IFE0_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3A34, /* SPECIFIC_IFE0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3A38, /* SPECIFIC_IFE0_URGENCY_LOW */ + /* SPECIFIC_IFE0_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE0_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A40, /* SPECIFIC_IFE0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3A48, /* SPECIFIC_IFE0_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3B88, /* SPECIFIC_IFE0_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3A20, /* IFE0_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + /* IFE0/1 RDI READ PATH */ + .port_type = CAM_CAMNOC_IFE01_RDI_READ, + .port_name = "IFE01_RDI_RD", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3230, /* SPECIFIC_IFE1_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3234, /* SPECIFIC_IFE1_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x3238, /* SPECIFIC_IFE1_URGENCY_LOW */ + /* SPECIFIC_IFE1_URGENCY_LOW_WRITE_MASK */ + .mask = 0x7, + /* SPECIFIC_IFE1_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x0, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3240, /* SPECIFIC_IFE1_DANGERLUT_LOW */ + .value = 0x00000000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x3248, /* SPECIFIC_IFE1_SAFELUT_LOW */ + .value = 0xFFFFFFFF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE1_NRDI_WRITE, + .port_name = "IFE1_NRDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5430, /* SPECIFIC_IFE1_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IFE1_WR_PRIORITYLUT_HIGH */ + .offset = 0x5434, + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x5438, /* SPECIFIC_IFE1_WR_URGENCY_LOW */ + /* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IFE1_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5440, /* SPECIFIC_IFE1_WR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5448, /* SPECIFIC_IFE1_WR_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5588, /* SPECIFIC_IFE1_WR_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5420, /* IFE1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ, + .port_name = "IPE_BPS_LRME_RD", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2E38, /* SPECIFIC_IBL_RD_URGENCY_LOW */ + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */ + .mask = 0x7, + /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */ + .shift = 0x0, + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2F08, /* SPECIFIC_IBL_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE, + .port_name = "IPE_BPS_LRME_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A30, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A34, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + .offset = 0x2A38, /* SPECIFIC_IBL_WR_URGENCY_LOW */ + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */ + .mask = 0x70, + /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */ + .shift = 0x4, + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A40, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2A48, /* SPECIFIC_IBL_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2B88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2A20, /* IBL_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_VID_DISP_WRITE, + .port_name = "IPE_VID_DISP_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_LOW */ + .offset = 0x5E30, + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_PRIORITYLUT_HIGH */ + .offset = 0x5E34, + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 1, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW */ + .offset = 0x5E38, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_MASK */ + .mask = 0x70, + /* SPECIFIC_IPE_VID_DISP_URGENCY_LOW_READ_SHIFT */ + .shift = 0x4, + .value = 3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC__IPE_VID_DISP_DANGERLUT_LOW */ + .offset = 0x5E40, + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* SPECIFIC_IPE_VID_DISP_SAFELUT_LOW */ + .offset = 0x5E48, + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5F88, /* SPECIFIC_IBL_WR_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5E20, /* IPE_VID_DISP_MAXWR_LOW */ + .value = 0x0, + }, + }, + + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2630, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2634, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2638, /* SPECIFIC_JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2640, /* SPECIFIC_JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2648, /* SPECIFIC_JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2620, /* JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .port_name = "FD", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E30, /* SPECIFIC_FD_PRIORITYLUT_LOW */ + .value = 0x44444444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E34, /* SPECIFIC_FD_PRIORITYLUT_HIGH */ + .value = 0x44444444, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E38, /* SPECIFIC_FD_URGENCY_LOW */ + .value = 0x44, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E40, /* SPECIFIC_FD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3E48, /* SPECIFIC_FD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x3E20, /* FD_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + /*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/ + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = true, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2288, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam175_cpas130_err_logger_offsets = { + .mainctrl = 0x4F08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x4F10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x4F20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x4F28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x4F2c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x4F30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x4F38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x4F3c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam175_cpas130_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2300, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + /* TZ owned register */ + .tcsr_camera_hf_sf_ares_glitch = { + .enable = true, + .data.reg_info = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + /* TCSR_CAMERA_HF_SF_ARES_GLITCH_MASK */ + .offset = 0x01FCA08C, + .value = 0x4, /* set bit[2] to 1 */ + }, + }, +}; + +static struct cam_camnoc_info cam175_cpas130_camnoc_info = { + .specific = &cam_cpas_v175_130_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v175_130_camnoc_specific), + .irq_sbm = &cam_cpas_v175_130_irq_sbm, + .irq_err = &cam_cpas_v175_130_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v175_130_irq_err), + .err_logger = &cam175_cpas130_err_logger_offsets, + .errata_wa_list = &cam175_cpas130_errata_wa_list, +}; + +static struct cam_cpas_info cam175_cpas130_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V175_130_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_100.h new file mode 100644 index 0000000000..f1540daf4f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_100.h @@ -0,0 +1,772 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V480_100_H_ +#define _CPASTOP_V480_100_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v480_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3840, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + (TEST_IRQ_ENABLE ? + 0x40 : /* SBM_FAULTINEN0_LOW_PORT6_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x5 : 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v480_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7008, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7010, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7018, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1BA0, /* IFE_UBWC_STATS_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1B90, /* IFE_UBWC_STATS_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1B98, /* IFE_UBWC_STATS_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2990, + /* IPE_BPS_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v480_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .port_name = "FD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x630, /* FD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x634, /* FD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x638, /* FD_URGENCY_LOW */ + .value = 0x33, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x640, /* FD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x648, /* FD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x620, /* FD_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .port_name = "IFE_LINEAR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0xA20, /* IFE_LINEAR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .port_name = "IFE_RDI_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* IFE_RDI_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* IFE_RDI_WR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* IFE_RDI_WR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1440, /* IFE_RDI_WR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1448, /* IFE_RDI_WR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1420, /* IFE_RDI_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .port_name = "IFE_UBWC_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A30, /* IFE_UBWC_STATS_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A34, /* IFE_UBWC_STATS_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A38, /* IFE_UBWC_STATS_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A40, /* IFE_UBWC_STATS_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A48, /* IFE_UBWC_STATS_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1B88, /* IFE_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1A20, /* IFE_UBWC_STATS_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .port_name = "IPE1_BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .port_name = "IPE_BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */ + .value = 1, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2820, /* IPE_BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2E20, /* JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam480_cpas100_err_logger_offsets = { + .mainctrl = 0x7008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x7010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x7020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x7024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x7028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x702c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x7030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x7034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x7038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x703c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam480_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam480_cpas100_camnoc_info = { + .specific = &cam_cpas_v480_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v480_100_camnoc_specific), + .irq_sbm = &cam_cpas_v480_100_irq_sbm, + .irq_err = &cam_cpas_v480_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v480_100_irq_err), + .err_logger = &cam480_cpas100_err_logger_offsets, + .errata_wa_list = &cam480_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam480_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V480_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_custom.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_custom.h new file mode 100644 index 0000000000..6b8637c4b4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v480_custom.h @@ -0,0 +1,719 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V480_CUSTOM_H_ +#define _CPASTOP_V480_CUSTOM_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v480_custom_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3840, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + (TEST_IRQ_ENABLE ? + 0x40 : /* SBM_FAULTINEN0_LOW_PORT6_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x5 : 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v480_custom_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7008, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7010, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7018, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1BA0, /* IFE_UBWC_STATS_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1B90, /* IFE_UBWC_STATS_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1B98, /* IFE_UBWC_STATS_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2990, + /* IPE_BPS_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v480_custom_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_FD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x630, /* FD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x634, /* FD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x638, /* FD_URGENCY_LOW */ + .value = 0x33, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x640, /* FD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x648, /* FD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* IFE_RDI_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* IFE_RDI_WR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* IFE_RDI_WR_URGENCY_LOW */ + .value = 0x1070, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1440, /* IFE_RDI_WR_DANGERLUT_LOW */ + .value = 0xFFFFFFF0, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1448, /* IFE_RDI_WR_SAFELUT_LOW */ + .value = 0x1, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A30, /* IFE_UBWC_STATS_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A34, /* IFE_UBWC_STATS_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A38, /* IFE_UBWC_STATS_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A40, /* IFE_UBWC_STATS_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A48, /* IFE_UBWC_STATS_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1B88, /* IFE_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */ + .value = 1, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2088, + .value = 0x100000, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam480_custom_err_logger_offsets = { + .mainctrl = 0x7008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x7010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x7020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x7024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x7028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x702c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x7030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x7034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x7038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x703c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam480_custom_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2100, /* SidebandManager_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam480_custom_camnoc_info = { + .specific = &cam_cpas_v480_custom_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v480_custom_camnoc_specific), + .irq_sbm = &cam_cpas_v480_custom_irq_sbm, + .irq_err = &cam_cpas_v480_custom_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v480_custom_irq_err), + .err_logger = &cam480_custom_err_logger_offsets, + .errata_wa_list = &cam480_cpas100_errata_wa_list, +}; + +static struct cam_cpas_info cam480_custom_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V480_CUSTOM_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v520_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v520_100.h new file mode 100644 index 0000000000..2db08b27b1 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v520_100.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V520_100_H_ +#define _CPASTOP_V520_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v520_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA40, /* SBM_FAULTINEN0_LOW */ + .value = 0x1, /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA48, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xA80, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v520_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xD08, /* ERRORLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xD10, /* ERRORLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xD18, /* ERRORLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA90, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + + +static struct cam_camnoc_specific + cam_cpas_v520_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE38, /* CDM_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_TFE, + .port_name = "TFE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_LOW */ + .offset = 0x30, + .value = 0x44443333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_HIGH */ + .offset = 0x34, + .value = 0x66665555, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* TFE_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x40, /* TFE_DANGERLUT_LOW */ + .value = 0xffff0000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x48, /* TFE_SAFELUT_LOW */ + .value = 0x00000003, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x20, /* TFE_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_OPE, + .port_name = "OPE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* OPE_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* OPE_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x438, /* OPE_URGENCY_LOW */ + .value = 0x00000033, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* OPE_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* OPE_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x420, /* OPE_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam520_cpas100_err_logger_offsets = { + .mainctrl = 0xD08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0xD10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0xD20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0xD24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0xD28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0xD2C, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0xD30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0xD34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0xD38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0xD3C, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_camnoc_info cam520_cpas100_camnoc_info = { + .specific = &cam_cpas_v520_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v520_100_camnoc_specific), + .irq_sbm = &cam_cpas_v520_100_irq_sbm, + .irq_err = &cam_cpas_v520_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v520_100_irq_err), + .err_logger = &cam520_cpas100_err_logger_offsets, + .errata_wa_list = NULL, + .test_irq_info = { + .sbm_enable_mask = 0x2, + .sbm_clear_mask = 0x2, + } +}; + +static struct cam_cpas_camnoc_qchannel cam520_cpas100_qchannel_info = { + .qchannel_ctrl = 0x14, + .qchannel_status = 0x18, +}; + +static struct cam_cpas_info cam520_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam520_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V520_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v540_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v540_100.h new file mode 100644 index 0000000000..c24bfbde5e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v540_100.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V540_100_H_ +#define _CPASTOP_V540_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v540_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA40, /* SBM_FAULTINEN0_LOW */ + .value = 0x1, /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA48, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xA80, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v540_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xD08, /* ERRORLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xD10, /* ERRORLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xD18, /* ERRORLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA90, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + + +// TODO: Need to update cam_cpas_v540_100_camnoc_specific values based on QoS +static struct cam_camnoc_specific + cam_cpas_v540_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE38, /* CDM_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_TFE, + .port_name = "TFE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_LOW */ + .offset = 0x30, + .value = 0x44443333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_HIGH */ + .offset = 0x34, + .value = 0x66665555, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* TFE_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x40, /* TFE_DANGERLUT_LOW */ + .value = 0xffff0000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x48, /* TFE_SAFELUT_LOW */ + .value = 0x00000003, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x20, /* TFE_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_OPE, + .port_name = "OPE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* OPE_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* OPE_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x438, /* OPE_URGENCY_LOW */ + .value = 0x00000033, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* OPE_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* OPE_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x420, /* OPE_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam540_cpas100_err_logger_offsets = { + .mainctrl = 0xD08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0xD10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0xD20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0xD24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0xD28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0xD2C, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0xD30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0xD34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0xD38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0xD3C, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_camnoc_info cam540_cpas100_camnoc_info = { + .specific = &cam_cpas_v540_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v540_100_camnoc_specific), + .irq_sbm = &cam_cpas_v540_100_irq_sbm, + .irq_err = &cam_cpas_v540_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v540_100_irq_err), + .err_logger = &cam540_cpas100_err_logger_offsets, + .errata_wa_list = NULL, + .test_irq_info = { + .sbm_enable_mask = 0x2, + .sbm_clear_mask = 0x2, + } +}; + +static struct cam_cpas_camnoc_qchannel cam540_cpas100_qchannel_info = { + .qchannel_ctrl = 0x14, + .qchannel_status = 0x18, +}; + +static struct cam_cpas_info cam540_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam540_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V540_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v545_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v545_100.h new file mode 100644 index 0000000000..f436f4fbfc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v545_100.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V545_100_H_ +#define _CPASTOP_V545_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v545_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA40, /* SBM_FAULTINEN0_LOW */ + .value = 0x1, /* SBM_FAULTINEN0_LOW_PORT0_MASK*/ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA48, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xA80, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v545_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xD08, /* ERRORLOGGER_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xD10, /* ERRORLOGGER_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0xD18, /* ERRORLOGGER_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0xA88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0xA90, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + + +static struct cam_camnoc_specific + cam_cpas_v545_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE38, /* CDM_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + }, + { + .port_type = CAM_CAMNOC_TFE, + .port_name = "TFE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_LOW */ + .offset = 0x30, + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_HIGH */ + .offset = 0x34, + .value = 0x66666655, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* TFE_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x40, /* TFE_DANGERLUT_LOW */ + .value = 0xffff0000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x48, /* TFE_SAFELUT_LOW */ + .value = 0x00000003, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x20, /* TFE_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_TFE_1, + .port_name = "TFE_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_LOW */ + .offset = 0x4030, + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_HIGH */ + .offset = 0x4034, + .value = 0x66666655, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4038, /* TFE_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x4040, /* TFE_DANGERLUT_LOW */ + .value = 0xffff0000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x4048, /* TFE_SAFELUT_LOW */ + .value = 0x00000003, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4020, /* TFE_1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_TFE_2, + .port_name = "TFE_2", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_LOW */ + .offset = 0x5030, + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + /* TFE_PRIORITYLUT_HIGH */ + .offset = 0x5034, + .value = 0x66666655, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5038, /* TFE_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5040, /* TFE_DANGERLUT_LOW */ + .value = 0xffff0000, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x5048, /* TFE_SAFELUT_LOW */ + .value = 0x00000003, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5020, /* TFE_2_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_OPE, + .port_name = "OPE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x430, /* OPE_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x434, /* OPE_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x438, /* OPE_URGENCY_LOW */ + .value = 0x00000033, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x440, /* OPE_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x448, /* OPE_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x420, /* OPE_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam545_cpas100_err_logger_offsets = { + .mainctrl = 0xD08, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0xD10, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0xD20, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0xD24, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0xD28, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0xD2C, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0xD30, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0xD34, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0xD38, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0xD3C, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_camnoc_info cam545_cpas100_camnoc_info = { + .specific = &cam_cpas_v545_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v545_100_camnoc_specific), + .irq_sbm = &cam_cpas_v545_100_irq_sbm, + .irq_err = &cam_cpas_v545_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v545_100_irq_err), + .err_logger = &cam545_cpas100_err_logger_offsets, + .errata_wa_list = NULL, + .test_irq_info = { + .sbm_enable_mask = 0x2, + .sbm_clear_mask = 0x2, + } +}; + +static struct cam_cpas_camnoc_qchannel cam545_cpas100_qchannel_info = { + .qchannel_ctrl = 0x14, + .qchannel_status = 0x18, +}; + +static struct cam_cpas_info cam545_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam545_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V545_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_100.h new file mode 100644 index 0000000000..9522f3c14d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_100.h @@ -0,0 +1,939 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V570_100_H_ +#define _CPASTOP_V570_100_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v570_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7A40, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x1000 | /* SBM_FAULTINEN0_LOW_PORT12_MASK */ + (TEST_IRQ_ENABLE ? + 0x40 : /* SBM_FAULTINEN0_LOW_PORT6_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7A48, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7A80, /* SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x5 : 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v570_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7908, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7910, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7918, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x63A0, /* IFE_NIU_0_NIU_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6390, + /* IFE_NIU_0_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6398, /* IFE_NIU_0_NIU_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6B20, /* NRT_NIU_0_NIU_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B10, /* NRT_NIU_0_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B18, /* NRT_NIU_0_NIU_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6F20, /* NRT_NIU_2_NIU_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6F10, /* NRT_NIU_2_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6F18, /* NRT_NIU_2_NIU_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6DA0, /* NRT_NIU_1_NIU_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6D90, + /* NRT_NIU_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6D98, /* NRT_NIU_1_NIU_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7A88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7A90, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7A88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7A90, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v570_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* CDM_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* CDM_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* CDM_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* CDM_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* CDM_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5008, /* CDM_QOSGEN_MAINCTL_LOW */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5020, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5024, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .port_name = "IFE_LINEAR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* IFE_NIU_1_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* IFE_NIU_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* IFE_NIU_1_NIU_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6440, /* IFE_NIU_1_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6448, /* IFE_NIU_1_NIU_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5108, /* IFE_NIU_1_NIU_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5120, /* IFE_NIU_1_NIU_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5124, /* IFE_NIU_1_NIU_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6420, /* IFE_NIU_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .port_name = "IFE_RDI_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* IFE_NIU_2_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* IFE_NIU_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* IFE_NIU_2_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6640, /* IFE_NIU_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6648, /* IFE_NIU_2_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5188, /* IFE_NIU_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A0, /* IFE_NIU_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A4, /* IFE_NIU_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* IFE_NIU_3_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* IFE_NIU_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* IFE_NIU_3_NIU_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6840, /* IFE_NIU_3_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6848, /* IFE_NIU_3_NIU_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5208, /* IFE_NIU_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5220, /* IFE_NIU_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5224, /* IFE_NIU_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* IFE_NIU_3_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .port_name = "IFE_UBWC_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6230, /* IFE_NIU_0_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6234, + /* IFE_NIU_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6238, /* IFE_NIU_0_NIU_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6240, /* IFE_NIU_0_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x6248, /*IFE_NIU_0_NIU_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6388, /* IFE_NIU_0_NIU_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5088, /* IFE_NIU_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x50A0, + /* IFE_NIU_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x50A4, + /* IFE_NIU_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6220, /* IFE_NIU_0_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* NRT_NIU_2_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* NRT_NIU_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* NRT_NIU_2_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* NRT_NIU_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* NRT_NIU_2_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6F08, /* NRT_NIU_2_NIU_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5388, /* NRT_NIU_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x53A0, /* NRT_NIU_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x53A4, /* NRT_NIU_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .port_name = "IPE1_BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* NRT_NIU_0_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* NRT_NIU_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* NRT_NIU_0_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* NRT_NIU_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* NRT_NIU_0_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6B08, /* NRT_NIU_0_NIU_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5288, /* NRT_NIU_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + // TITAN_A_CAMNOC_cam_noc_amm_NRT_NIU_0_qosgen_Shaping_Low + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x52A0, /* NRT_NIU_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x52A4, /* NRT_NIU_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .port_name = "IPE_BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C30, /* NRT_NIU_1_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C34, /* NRT_NIU_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C38, /* NRT_NIU_1_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C40, /* NRT_NIU_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C48, /* NRT_NIU_1_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6D88, /* NRT_NIU_1_NIU_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5308, /* NRT_NIU_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5320, /* NRT_NIU_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5324, /* NRT_NIU_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6C20, /* NRT_NIU_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7030, /* NRT_NIU_3_NIU_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7034, /* NRT_NIU_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7038, /* NRT_NIU_3_NIU_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7040, /* NRT_NIU_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7048, /* NRT_NIU_3_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* NRT_NIU_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5420, /* NRT_NIU_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5424, /* NRT_NIU_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7020, /* NRT_NIU_3_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x7A88, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5488, /* ICP_QOSGEN_MAINCTL_LOW */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x54A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x54A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam570_cpas100_err_logger_offsets = { + .mainctrl = 0x7908, /* ERL_MAINCTL_LOW */ + .errvld = 0x7910, /* ERL_ERRVLD_LOW */ + .errlog0_low = 0x7920, /* ERL_ERRLOG0_LOW */ + .errlog0_high = 0x7924, /* ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x7928, /* ERL_ERRLOG1_LOW */ + .errlog1_high = 0x792c, /* ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x7930, /* ERL_ERRLOG2_LOW */ + .errlog2_high = 0x7934, /* ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x7938, /* ERL_ERRLOG3_LOW */ + .errlog3_high = 0x793c, /* ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam570_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + }, +}; + +static struct cam_camnoc_info cam570_cpas100_camnoc_info = { + .specific = &cam_cpas_v570_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v570_100_camnoc_specific), + .irq_sbm = &cam_cpas_v570_100_irq_sbm, + .irq_err = &cam_cpas_v570_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v570_100_irq_err), + .err_logger = &cam570_cpas100_err_logger_offsets, + .errata_wa_list = &cam570_cpas100_errata_wa_list, +}; + +static struct cam_cpas_camnoc_qchannel cam570_cpas100_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam570_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam570_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V570_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_200.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_200.h new file mode 100644 index 0000000000..15d5f78ecd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v570_200.h @@ -0,0 +1,944 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V570_200_H_ +#define _CPASTOP_V570_200_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v570_200_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3840, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x1000, /* SBM_FAULTINEN0_LOW_PORT12_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v570_200_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7008, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7010, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7018, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1BA0, /* IFE_UBWC_STATS_0_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1B90, + /* IFE_UBWC_STATS_0_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1B98, /* IFE_UBWC_STATS_0_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2990, + /* IPE_BPS_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v570_200_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x408, /* CDM_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x420, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x424, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .port_name = "IFE_LINEAR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66666666, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE08, /* IFE_LINEAR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE20, /* IFE_LINEAR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE24, /* IFE_LINEAR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0xA20, /* IFE_LINEAR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .port_name = "IFE_RDI_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF08, /* IFE_RDI_RD_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF20, /* IFE_RDI_RD_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF24, /* IFE_RDI_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* IFE_RDI_WR_0_PRIORITYLUT_LOW */ + .value = 0x66666666, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* IFE_RDI_WR_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* IFE_RDI_WR_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1440, /* IFE_RDI_WR_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1448, /* IFE_RDI_WR_0_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1808, /* IFE_RDI_WR_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1820, /* IFE_RDI_WR_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1824, /* IFE_RDI_WR_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1420, /* IFE_RDI_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .port_name = "IFE_UBWC_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A30, /* IFE_UBWC_STATS_0_PRIORITYLUT_LOW */ + .value = 0x66666666, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A34, + /* IFE_UBWC_STATS_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A38, /* IFE_UBWC_STATS_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A40, /* IFE_UBWC_STATS_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A48, /* IFE_UBWC_STATS_0_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1B88, /* IFE_UBWC_STATS_0_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1908, /* IFE_UBWC_STATS_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1920, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1924, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1A20, /* IFE_UBWC_STATS_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2208, /* IPE0_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2220, /* IPE0_RD_QOSGEN_SHAPING_LOW */ + .value = 0x12121212, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2224, /* IPE0_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x12121212, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .port_name = "IPE1_BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2308, /* IPE1_BPS_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + // TITAN_A_CAMNOC_cam_noc_amm_nrt_niu_0_qosgen_Shaping_Low + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2320, /* IPE1_BPS_RD_QOSGEN_SHAPING_LOW */ + .value = 0x23232323, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2324, /* IPE1_BPS_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x23232323, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .port_name = "IPE_BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C08, /* IPE_BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C20, /* IPE_BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C24, /* IPE_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2820, /* IFE_IPE_BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D08, /* JPEG_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D20, /* JPEG_QOSGEN_SHAPING_LOW */ + .value = 0xA0A0A0A, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D24, /* JPEG_QOSGEN_SHAPING_HIGH */ + .value = 0xA0A0A0A, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2E20, /* IFE_JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x3888, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3488, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam570_cpas200_err_logger_offsets = { + .mainctrl = 0x7008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x7010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x7020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x7024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x7028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x702c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x7030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x7034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x7038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x703c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam570_cpas200_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x3900, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam570_cpas200_camnoc_info = { + .specific = &cam_cpas_v570_200_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v570_200_camnoc_specific), + .irq_sbm = &cam_cpas_v570_200_irq_sbm, + .irq_err = &cam_cpas_v570_200_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v570_200_irq_err), + .err_logger = &cam570_cpas200_err_logger_offsets, + .errata_wa_list = &cam570_cpas200_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x40, + .sbm_clear_mask = 0x4, + } +}; + +static struct cam_cpas_camnoc_qchannel cam570_cpas200_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam570_cpas200_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam570_cpas200_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V570_200_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_100.h new file mode 100644 index 0000000000..52267b87f8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_100.h @@ -0,0 +1,1122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V580_100_H_ +#define _CPASTOP_V580_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v580_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3840, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x1000, /* SBM_FAULTINEN0_LOW_PORT12_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v580_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7008, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7010, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7018, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1BA0, /* IFE_UBWC_STATS_0_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1B90, + /* IFE_UBWC_STATS_0_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1B98, /* IFE_UBWC_STATS_0_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2990, + /* IPE_BPS_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x1000, /* SBM_FAULTINSTATUS0_LOW_PORT12_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x77A0, /* IFE_UBWC_STATS_1_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7790, + /* IFE_UBWC_STATS_1_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7798, /* IFE_UBWC_STATS_1_ENCERRCLR_LOW */ + .value = 1, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v580_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x408, /* CDM_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x420, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x424, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .port_name = "IFE_LINEAR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE08, /* IFE_LINEAR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE20, /* IFE_LINEAR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE24, /* IFE_LINEAR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0xA20, /* IFE_LINEAR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .port_name = "IFE_RDI_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF08, /* IFE_RDI_RD_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF20, /* IFE_RDI_RD_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF24, /* IFE_RDI_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* IFE_RDI_WR_0_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* IFE_RDI_WR_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* IFE_RDI_WR_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1440, /* IFE_RDI_WR_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1448, /* IFE_RDI_WR_0_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1808, /* IFE_RDI_WR_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1820, /* IFE_RDI_WR_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1824, /* IFE_RDI_WR_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1420, /* IFE_RDI_WR_0_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR_1, + .port_name = "IFE_RDI_WR_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7430, /* IFE_RDI_WR_1_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7434, /* IFE_RDI_WR_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7438, /* IFE_RDI_WR_1_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7440, /* IFE_RDI_WR_1_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7448, /* IFE_RDI_WR_1_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C08, /* IFE_RDI_WR_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C20, /* IFE_RDI_WR_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C24, /* IFE_RDI_WR_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7420, /* IFE_RDI_WR_1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .port_name = "IFE_UBWC_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A30, /* IFE_UBWC_STATS_0_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A34, + /* IFE_UBWC_STATS_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A38, /* IFE_UBWC_STATS_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A40, /* IFE_UBWC_STATS_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A48, /* IFE_UBWC_STATS_0_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1B88, /* IFE_UBWC_STATS_0_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1908, /* IFE_UBWC_STATS_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1920, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1924, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x1A20, /* IFE_UBWC_STATS_0_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS_1, + .port_name = "IFE_UBWC_STATS_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7630, /* IFE_UBWC_STATS_1_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7634, + /* IFE_UBWC_STATS_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7638, /* IFE_UBWC_STATS_1_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7640, /* IFE_UBWC_STATS_1_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7648, /* IFE_UBWC_STATS_1_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7788, /* IFE_UBWC_STATS_1_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C88, /* IFE_UBWC_STATS_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7CA0, + /* IFE_UBWC_STATS_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7CA4, + /* IFE_UBWC_STATS_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7620, /* IFE_UBWC_STATS_1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2208, /* IPE0_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2220, /* IPE0_RD_QOSGEN_SHAPING_LOW */ + .value = 0x13131313, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2224, /* IPE0_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x13131313, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .port_name = "IPE1_BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2308, /* IPE1_BPS_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2320, /* IPE1_BPS_RD_QOSGEN_SHAPING_LOW */ + .value = 0x24242424, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2324, /* IPE1_BPS_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x24242424, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .port_name = "IPE_BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C08, /* IPE_BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C20, /* IPE_BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C24, /* IPE_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2820, /* IFE_IPE_BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D08, /* JPEG_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D20, /* JPEG_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D24, /* JPEG_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x2E20, /* IFE_JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x3888, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3488, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam580_cpas100_err_logger_offsets = { + .mainctrl = 0x7008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x7010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x7020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x7024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x7028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x702c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x7030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x7034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x7038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x703c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam580_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x3900, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam580_cpas100_camnoc_info = { + .specific = &cam_cpas_v580_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v580_100_camnoc_specific), + .irq_sbm = &cam_cpas_v580_100_irq_sbm, + .irq_err = &cam_cpas_v580_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v580_100_irq_err), + .err_logger = &cam580_cpas100_err_logger_offsets, + .errata_wa_list = &cam580_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x40, + .sbm_clear_mask = 0x4, + } +}; + +static struct cam_cpas_camnoc_qchannel cam580_cpas100_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam580_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam580_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V580_100_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_custom.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_custom.h new file mode 100644 index 0000000000..a289006131 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v580_custom.h @@ -0,0 +1,1054 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V580_CUSTOM_H_ +#define _CPASTOP_V580_CUSTOM_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v580_custom_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3840, /* SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x1000, /* SBM_FAULTINEN0_LOW_PORT12_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3848, /* SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x3880, /* SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v580_custom_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7008, /* ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7010, /* ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7018, /* ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1BA0, /* IFE_UBWC_STATS_0_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1B90, + /* IFE_UBWC_STATS_0_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1B98, /* IFE_UBWC_STATS_0_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2520, /* IPE1_BPS_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2510, /* IPE1_BPS_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2518, /* IPE1_BPS_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x1F20, /* IPE0_RD_DECERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x1F10, /* IPE0_RD_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x1F18, /* IPE0_RD_DECERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x29A0, /* IPE_BPS_WR_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2990, + /* IPE_BPS_WR_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2998, /* IPE_BPS_WR_ENCERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x3888, /* SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x3890, /* SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x1000, /* SBM_FAULTINSTATUS0_LOW_PORT12_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x77A0, /* IFE_UBWC_STATS_1_ENCERREN_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7790, + /* IFE_UBWC_STATS_1_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7798, /* IFE_UBWC_STATS_1_ENCERRCLR_LOW */ + .value = 1, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v580_custom_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_CDM, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* CDM_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x408, /* CDM_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x420, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x424, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE08, /* IFE_LINEAR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE20, /* IFE_LINEAR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xE24, /* IFE_LINEAR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */ + .value = 0x2, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF08, /* IFE_RDI_RD_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF20, /* IFE_RDI_RD_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xF24, /* IFE_RDI_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1430, /* IFE_RDI_WR_0_PRIORITYLUT_LOW */ + .value = 0x66666666, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1434, /* IFE_RDI_WR_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1438, /* IFE_RDI_WR_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1440, /* IFE_RDI_WR_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1448, /* IFE_RDI_WR_0_SAFELUT_LOW */ + .value = 0x0000, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1808, /* IFE_RDI_WR_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1820, /* IFE_RDI_WR_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1824, /* IFE_RDI_WR_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR_1, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7430, /* IFE_RDI_WR_1_PRIORITYLUT_LOW */ + .value = 0x66666666, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7434, /* IFE_RDI_WR_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7438, /* IFE_RDI_WR_1_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7440, /* IFE_RDI_WR_1_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7448, /* IFE_RDI_WR_1_SAFELUT_LOW */ + .value = 0x0000, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C08, /* IFE_RDI_WR_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C20, /* IFE_RDI_WR_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C24, /* IFE_RDI_WR_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A30, /* IFE_UBWC_STATS_0_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A34, + /* IFE_UBWC_STATS_0_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1A38, /* IFE_UBWC_STATS_0_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A40, /* IFE_UBWC_STATS_0_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x1A48, /* IFE_UBWC_STATS_0_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1B88, /* IFE_UBWC_STATS_0_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1908, /* IFE_UBWC_STATS_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1920, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1924, + /* IFE_UBWC_STATS_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_UBWC_STATS_1, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7630, /* IFE_UBWC_STATS_1_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7634, + /* IFE_UBWC_STATS_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7638, /* IFE_UBWC_STATS_1_URGENCY_LOW */ + .value = 0x1030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7640, /* IFE_UBWC_STATS_1_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .offset = 0x7648, /* IFE_UBWC_STATS_1_SAFELUT_LOW */ + .value = 0x000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7788, /* IFE_UBWC_STATS_1_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C88, /* IFE_UBWC_STATS_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7CA0, + /* IFE_UBWC_STATS_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7CA4, + /* IFE_UBWC_STATS_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2208, /* IPE0_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2220, /* IPE0_RD_QOSGEN_SHAPING_LOW */ + .value = 0x13131313, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2224, /* IPE0_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x13131313, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_BPS_RD, + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2308, /* IPE1_BPS_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2320, /* IPE1_BPS_RD_QOSGEN_SHAPING_LOW */ + .value = 0x24242424, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2324, /* IPE1_BPS_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x24242424, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_BPS_WR, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C08, /* IPE_BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C20, /* IPE_BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2C24, /* IPE_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2E48, /* JPEG_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D08, /* JPEG_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D20, /* JPEG_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x2D24, /* JPEG_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x3888, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3488, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam580_custom_err_logger_offsets = { + .mainctrl = 0x7008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x7010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x7020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x7024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x7028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x702c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x7030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x7034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x7038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x703c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam580_custom_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x3900, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam580_custom_camnoc_info = { + .specific = &cam_cpas_v580_custom_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v580_custom_camnoc_specific), + .irq_sbm = &cam_cpas_v580_custom_irq_sbm, + .irq_err = &cam_cpas_v580_custom_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v580_custom_irq_err), + .err_logger = &cam580_custom_err_logger_offsets, + .errata_wa_list = &cam580_custom_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x40, + .sbm_clear_mask = 0x4, + } +}; + +static struct cam_cpas_info cam580_custom_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, +}; + +#endif /* _CPASTOP_V580_CUSTOM_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v640_200.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v640_200.h new file mode 100644 index 0000000000..df91ed4bbd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v640_200.h @@ -0,0 +1,583 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V640_200_H_ +#define _CPASTOP_V640_200_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v640_200_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6840, /* CAM_NOC_SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x04 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x08 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x80 : /* SBM_FAULTINEN0_LOW_PORT7_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6848, /* CAM_NOC_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6880, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x5 : 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v640_200_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6608, /* CAM_NOC_ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6610, /* CAM_NOC_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6618, /* CAM_NOC_ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5DA0, /* WR_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5D90, /* WR_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5D98, /* WR_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5F20, /* CAM_NOC_IPE_0_RD_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5F10, /* CAM_NOC_IPE_0_RD_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5F18, /* CAM_NOC_IPE_0_RD_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6888, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6890, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v640_200_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_TFE_BAYER_STATS, + .port_name = "TFE_BAYER", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /*PRIORITYLUT_LOW */ + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* SAFELUT_LOW */ + .value = 0x0000000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4208, /* QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4220, /* QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4224, /* QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* UBWC_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_TFE_RAW, + .port_name = "TFE_RDI_RAW", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A30, /* PRIORITYLUT_LOW */ + .value = 0x55554433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A34, /* PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A38, /* URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A40, /* DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A48, /* SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4408, /* QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4420, /* QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4424, /* QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5A20, /* STATS_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_OPE_BPS_WR, + .port_name = "OPE_BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5C30, /* PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5C34, /* PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5C38, /* URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5C40, /* DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5C48, /* SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4608, /* QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4620, /* QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4624, /* QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5C20, /* MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_OPE_BPS_CDM_RD, + .port_name = "OPE_BPS_CDM_RD", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E30, /* IPE_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E34, /* IPE_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E38, /* IPE_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E40, /* IPE_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E48, /* IPE_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IPE_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IPE_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IPE_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_CRE, + .port_name = "CRE_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* BPS_WR_URGENCY_LOW */ + .value = 0x03, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* BPS_WR_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A08, /* BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A20, /* BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A24, /* BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6020, /* BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x6888, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4008, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4020, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4024, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam640_cpas200_err_logger_offsets = { + .mainctrl = 0x6608, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x6610, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x6620, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x6624, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x6628, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x662c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x6630, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x6634, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x6638, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x663c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam640_cpas200_errata_wa_list = { +}; + +static struct cam_camnoc_info cam640_cpas200_camnoc_info = { + .specific = &cam_cpas_v640_200_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v640_200_camnoc_specific), + .irq_sbm = &cam_cpas_v640_200_irq_sbm, + .irq_err = &cam_cpas_v640_200_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v640_200_irq_err), + .err_logger = &cam640_cpas200_err_logger_offsets, + .errata_wa_list = &cam640_cpas200_errata_wa_list, +}; + +static struct cam_cpas_camnoc_qchannel cam640_cpas200_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam640_cpas200_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam640_cpas200_qchannel_info}, + .num_qchannel = 1, +}; + +static struct cam_cpas_top_regs cam640_cpas200_cpas_top_info = { + .tpg_mux_sel_enabled = true, + .tpg_mux_sel_shift = 0x0, + .tpg_mux_sel = 0x1C, +}; + +#endif /* _CPASTOP_V640_200_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_100.h new file mode 100644 index 0000000000..166b7f16c9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_100.h @@ -0,0 +1,1305 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V680_100_H_ +#define _CPASTOP_V680_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v680_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2240, /* CAM_NOC_SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x04 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x08 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20, /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2248, /* CAM_NOC_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2280, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v680_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2008, /* CAM_NOC_ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2010, /* CAM_NOC_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2018, /* CAM_NOC_ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IFE_UBWC_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IFE_UBWC_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IFE_UBWC_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7A0, /* CAM_NOC_BPS_WR_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x790, /* CAM_NOC_BPS_WR_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x798, /* CAM_NOC_BPS_WR_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5F20, /* CAM_NOC_IPE_0_RD_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5F10, /* CAM_NOC_IPE_0_RD_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5F18, /* CAM_NOC_IPE_0_RD_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* CAM_NOC_IPE_1_RD_NIU_DECERREN_LOW */ + .value = 0XFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* CAM_NOC_IPE_1_RD_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* CAM_NOC_IPE_1_RD_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6BA0, /* CAM_NOC_IPE_WR_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B90, /* CAM_NOC_IPE_WR_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B98, /* CAM_NOC_IPE_WR_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x80, /* SBM_FAULTINSTATUS0_LOW_PORT7_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v680_100_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_IFE_UBWC, + .port_name = "IFE_UBWC", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IFE_UBWC_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IFE_UBWC_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IFE_UBWC_URGENCY_LOW */ + .value = 0x1E30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IFE_UBWC_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IFE_UBWC_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A08, /* IFE_UBWC_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A20, /* IFE_UBWC_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A24, /* IFE_UBWC_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IFE_UBWC_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5230, /* IFE_RDI_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5234, /* IFE_RDI_WR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5238, /* IFE_RDI_WR_URGENCY_LOW */ + .value = 0x1E30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5240, /* IFE_RDI_WR_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5248, /* IFE_RDI_WR_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* IFE_RDI_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5420, /* IFE_RDI_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5424, /* IFE_RDI_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5220, /* IFE_RDI_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_PDAF, + .port_name = "IFE_PDAF", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c30, /* IFE_PDAF_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c34, /* IFE_PDAF_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c38, /* IFE_PDAF_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c40, /* IFE_PDAF_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c48, /* IFE_PDAF_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e08, /* IFE_PDAF_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e20, /* IFE_PDAF_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e24, /* IFE_PDAF_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4C20, /* IFE_PDAF_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR_STATS, + .port_name = "IFE_LINEAR_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4030, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4034, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4038, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4040, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4048, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4208, /* IFE_LINEAR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4220, /* IFE_LINEAR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4224, /* IFE_LINEAR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4020, /* IFE_LINEAR_STATS_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR_STATS_1, + .port_name = "IFE_LINEAR_STATS_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8230, /* IFE_LINEAR_1_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8234, /* IFE_LINEAR_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8238, /* IFE_LINEAR_1_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8240, /* IFE_LINEAR_1_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8248, /* IFE_LINEAR_1_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8408, /* IFE_LINEAR_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8420, /* IFE_LINEAR_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8424, /* IFE_LINEAR_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x8220, /* IFE_LINEAR_STATS_1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LITE, + .port_name = "IFE_LITE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4630, /* IFE_LITE_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4634, /* IFE_LITE_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4638, /* IFE_LITE_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4640, /* IFE_LITE_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4648, /* IFE_LITE_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IFE_LITE_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IFE_LITE_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IFE_LITE_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4620, /* IFE_LITE_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_SFE_RD, + .port_name = "SFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7030, /* SFE_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7034, /* SFE_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7038, /* SFE_RD_URGENCY_LOW */ + .value = 0x4, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7040, /* SFE_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7048, /* SFE_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* SFE_RD_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7220, /* SFE_RD_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7224, /* SFE_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_WR, + .port_name = "IPE_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a30, /* IPE_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a34, /* IPE_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a38, /* IPE_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a40, /* IPE_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a48, /* IPE_WR_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c08, /* IPE_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c20, /* IPE_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c24, /* IPE_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A20, /* IPE_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_BPS_WR, + .port_name = "BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x630, /* BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x634, /* BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x638, /* BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x640, /* BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x648, /* BPS_WR_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x808, /* BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x820, /* BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x824, /* BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x620, /* BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_BPS_RD, + .port_name = "BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* BPS_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* BPS_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x208, /* BPS_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x220, /* BPS_RD_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x224, /* BPS_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c48, /* JPEG_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e08, /* JPEG_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e20, /* JPEG_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e24, /* JPEG_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7C20, /* JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6008, /* IPE0_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6020, /* IPE0_RD_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6024, /* IPE0_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_RD, + .port_name = "IPE1_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* IPE1_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* IPE1_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* IPE1_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* IPE1_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* IPE1_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6608, /* IPE1_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6620, /* IPE1_RD_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6624, /* IPE1_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3830, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3834, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3838, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3840, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3848, /* CDM_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a08, /* CDM_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a20, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a24, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2288, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7688, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x76A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x76A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam680_cpas100_err_logger_offsets = { + .mainctrl = 0x2008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x202c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x203c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam680_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam680_cpas100_camnoc_info = { + .specific = &cam_cpas_v680_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v680_100_camnoc_specific), + .irq_sbm = &cam_cpas_v680_100_irq_sbm, + .irq_err = &cam_cpas_v680_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v680_100_irq_err), + .err_logger = &cam680_cpas100_err_logger_offsets, + .errata_wa_list = &cam680_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x80, + .sbm_clear_mask = 0x4, + } +}; + +static struct cam_cpas_camnoc_qchannel cam680_cpas100_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam680_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam680_cpas100_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V680_100_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_110.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_110.h new file mode 100644 index 0000000000..576bfbb611 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v680_110.h @@ -0,0 +1,1306 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V680_110_H_ +#define _CPASTOP_V680_110_H_ + +#define TEST_IRQ_ENABLE 0 + +static struct cam_camnoc_irq_sbm cam_cpas_v680_110_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2240, /* CAM_NOC_SBM_FAULTINEN0_LOW */ + .value = 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x04 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x08 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + (TEST_IRQ_ENABLE ? + 0x80 : /* SBM_FAULTINEN0_LOW_PORT7_MASK */ + 0x0), + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2248, /* CAM_NOC_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2280, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + .value = TEST_IRQ_ENABLE ? 0x5 : 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v680_110_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = false, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2008, /* CAM_NOC_ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2010, /* CAM_NOC_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x2018, /* CAM_NOC_ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IFE_UBWC_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IFE_UBWC_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IFE_UBWC_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7A0, /* CAM_NOC_BPS_WR_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x790, /* CAM_NOC_BPS_WR_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x798, /* CAM_NOC_BPS_WR_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x5F20, /* CAM_NOC_IPE_0_RD_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5F10, /* CAM_NOC_IPE_0_RD_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5F18, /* CAM_NOC_IPE_0_RD_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* CAM_NOC_IPE_1_RD_NIU_DECERREN_LOW */ + .value = 0XFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* CAM_NOC_IPE_1_RD_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* CAM_NOC_IPE_1_RD_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6BA0, /* CAM_NOC_IPE_WR_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B90, /* CAM_NOC_IPE_WR_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B98, /* CAM_NOC_IPE_WR_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = TEST_IRQ_ENABLE ? true : false, + .sbm_port = 0x80, /* SBM_FAULTINSTATUS0_LOW_PORT7_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x2288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x5, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x2290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v680_110_camnoc_specific[] = { + { + .port_type = CAM_CAMNOC_IFE_UBWC, + .port_name = "IFE_UBWC", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IFE_UBWC_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IFE_UBWC_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IFE_UBWC_URGENCY_LOW */ + .value = 0x1E30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IFE_UBWC_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IFE_UBWC_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A08, /* IFE_UBWC_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A20, /* IFE_UBWC_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5A24, /* IFE_UBWC_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IFE_UBWC_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_RDI_WR, + .port_name = "IFE_RDI_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5230, /* IFE_RDI_WR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5234, /* IFE_RDI_WR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5238, /* IFE_RDI_WR_URGENCY_LOW */ + .value = 0x1E30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5240, /* IFE_RDI_WR_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5248, /* IFE_RDI_WR_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* IFE_RDI_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5420, /* IFE_RDI_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5424, /* IFE_RDI_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5220, /* IFE_RDI_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_PDAF, + .port_name = "IFE_PDAF", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c30, /* IFE_PDAF_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c34, /* IFE_PDAF_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c38, /* IFE_PDAF_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c40, /* IFE_PDAF_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4c48, /* IFE_PDAF_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e08, /* IFE_PDAF_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e20, /* IFE_PDAF_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4e24, /* IFE_PDAF_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4C20, /* IFE_PDAF_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR_STATS, + .port_name = "IFE_LINEAR_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4030, /* IFE_LINEAR_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4034, /* IFE_LINEAR_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4038, /* IFE_LINEAR_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4040, /* IFE_LINEAR_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4048, /* IFE_LINEAR_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4208, /* IFE_LINEAR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4220, /* IFE_LINEAR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4224, /* IFE_LINEAR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4020, /* IFE_LINEAR_STATS_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LINEAR_STATS_1, + .port_name = "IFE_LINEAR_STATS_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8230, /* IFE_LINEAR_1_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8234, /* IFE_LINEAR_1_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8238, /* IFE_LINEAR_1_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8240, /* IFE_LINEAR_1_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8248, /* IFE_LINEAR_1_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8408, /* IFE_LINEAR_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8420, /* IFE_LINEAR_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8424, /* IFE_LINEAR_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x8220, /* IFE_LINEAR_STATS_1_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IFE_LITE, + .port_name = "IFE_LITE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4630, /* IFE_LITE_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4634, /* IFE_LITE_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4638, /* IFE_LITE_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4640, /* IFE_LITE_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4648, /* IFE_LITE_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IFE_LITE_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IFE_LITE_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IFE_LITE_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4620, /* IFE_LITE_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_SFE_RD, + .port_name = "SFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7030, /* SFE_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7034, /* SFE_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7038, /* SFE_RD_URGENCY_LOW */ + .value = 0x4, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7040, /* SFE_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7048, /* SFE_RD_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* SFE_RD_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7220, /* SFE_RD_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7224, /* SFE_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE_WR, + .port_name = "IPE_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a30, /* IPE_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a34, /* IPE_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a38, /* IPE_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a40, /* IPE_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6a48, /* IPE_WR_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c08, /* IPE_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c20, /* IPE_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6c24, /* IPE_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A20, /* IPE_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_BPS_WR, + .port_name = "BPS_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x630, /* BPS_WR_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x634, /* BPS_WR_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x638, /* BPS_WR_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x640, /* BPS_WR_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x648, /* BPS_WR_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x808, /* BPS_WR_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x820, /* BPS_WR_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x824, /* BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x620, /* BPS_WR_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_BPS_RD, + .port_name = "BPS_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x30, /* BPS_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x34, /* BPS_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x38, /* BPS_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40, /* BPS_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48, /* BPS_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x208, /* BPS_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x220, /* BPS_RD_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x224, /* BPS_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + }, + { + .port_type = CAM_CAMNOC_JPEG, + .port_name = "JPEG", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c30, /* JPEG_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c34, /* JPEG_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c38, /* JPEG_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c40, /* JPEG_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7c48, /* JPEG_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e08, /* JPEG_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e20, /* JPEG_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7e24, /* JPEG_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7C20, /* JPEG_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_IPE0_RD, + .port_name = "IPE0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E30, /* IPE0_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E34, /* IPE0_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E38, /* IPE0_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E40, /* IPE0_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5E48, /* IPE0_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5F08, /* IPE0_RD_DECCTL_LOW */ + .value = 1, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6008, /* IPE0_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6020, /* IPE0_RD_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6024, /* IPE0_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_type = CAM_CAMNOC_IPE1_RD, + .port_name = "IPE1_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* IPE1_RD_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* IPE1_RD_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* IPE1_RD_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* IPE1_RD_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* IPE1_RD_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6608, /* IPE1_RD_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6620, /* IPE1_RD_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6624, /* IPE1_RD_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_type = CAM_CAMNOC_CDM, + .port_name = "CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3830, /* CDM_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3834, /* CDM_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3838, /* CDM_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3840, /* CDM_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3848, /* CDM_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a08, /* CDM_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a20, /* CDM_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x3a24, /* CDM_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_type = CAM_CAMNOC_ICP, + .port_name = "ICP", + .enable = true, + .flag_out_set0_low = { + .enable = false, + .access_type = CAM_REG_TYPE_WRITE, + .masked_value = 0, + .offset = 0x2288, + .value = 0x100000, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7688, /* ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x76A0, /* ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x76A4, /* ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam680_cpas110_err_logger_offsets = { + .mainctrl = 0x2008, /* ERRLOGGER_MAINCTL_LOW */ + .errvld = 0x2010, /* ERRLOGGER_ERRVLD_LOW */ + .errlog0_low = 0x2020, /* ERRLOGGER_ERRLOG0_LOW */ + .errlog0_high = 0x2024, /* ERRLOGGER_ERRLOG0_HIGH */ + .errlog1_low = 0x2028, /* ERRLOGGER_ERRLOG1_LOW */ + .errlog1_high = 0x202c, /* ERRLOGGER_ERRLOG1_HIGH */ + .errlog2_low = 0x2030, /* ERRLOGGER_ERRLOG2_LOW */ + .errlog2_high = 0x2034, /* ERRLOGGER_ERRLOG2_HIGH */ + .errlog3_low = 0x2038, /* ERRLOGGER_ERRLOG3_LOW */ + .errlog3_high = 0x203c, /* ERRLOGGER_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam680_cpas110_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x2300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, +}; + +static struct cam_camnoc_info cam680_cpas110_camnoc_info = { + .specific = &cam_cpas_v680_110_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v680_110_camnoc_specific), + .irq_sbm = &cam_cpas_v680_110_irq_sbm, + .irq_err = &cam_cpas_v680_110_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v680_110_irq_err), + .err_logger = &cam680_cpas110_err_logger_offsets, + .errata_wa_list = &cam680_cpas110_errata_wa_list, +}; + +static struct cam_cpas_camnoc_qchannel cam680_cpas110_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_info cam680_cpas110_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam680_cpas110_qchannel_info}, + .num_qchannel = 1, +}; + +#endif /* _CPASTOP_V680_110_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v780_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v780_100.h new file mode 100644 index 0000000000..3779cfe5d2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v780_100.h @@ -0,0 +1,1248 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V780_100_H_ +#define _CPASTOP_V780_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v780_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* SBM_FAULTINEN0_LOW_PORT0_MASK */ + 0x02 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x04 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x08 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20, /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + .value = 0x1, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v780_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x77A0, /* IFE_UBWC : RT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7790, /* IFE_UBWC : RT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7798, /* IFE_UBWC : RT_1_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6BA0, /* BPS_WR : NRT_2_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B90, /* BPS_WR : NRT_2_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B98, /* BPS_WR : NRT_2_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6F20, /* IPE_0_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6F10, /* IPE_0_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6F18, /* IPE_0_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x7120, /* IPE_1_RD : NRT_5_NIU_DECERREN_LOW */ + .value = 0XFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7110, /* IPE_1_RD : NRT_5_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7118, /* IPE_1_RD : NRT_5_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x73A0, /* IPE_WR : NRT_6_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x7390, /* IPE_WR : NRT_6_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x7398, /* IPE_WR : NRT_6_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x80, /* SBM_FAULTINSTATUS0_LOW_PORT7_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x3, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v780_100_camnoc_specific[] = { + /* RT ports */ + { + .port_name = "RT0-SFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7430, /* SFE_RD : NOC_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7434, /* SFE_RD : NOC_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7438, /* SFE_RD : NOC_RT_0_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7440, /* SFE_RD : NOC_RT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7448, /* SFE_RD : NOC_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8188, /* SFE_RD : NOC_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5388, /* SFE_RD : NOC_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x53A0, /* SFE_RD : NOC_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x53A4, /* SFE_RD : NOC_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-IFE_UBWC_LINEAR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7630, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7634, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7638, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7640, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7648, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8208, /* IFE_UBWC_LINEAR : NOC_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5420, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5424, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7620, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-IFE_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7830, /* IFE_STATS : NOC_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7834, /* IFE_STATS : NOC_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7838, /* IFE_STATS : NOC_RT_2_NIU_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7840, /* IFE_STATS : NOC_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7848, /* IFE_STATS : NOC_RT_2_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8288, /* IFE_STATS : NOC_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5488, /* IFE_STATS : NOC_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x54A0, /* IFE_STATS : NOC_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x54A4, /* IFE_STATS : NOC_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7820, /* IFE_STATS : NOC_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-IFE_PDAF_IFELITE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7A30, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7A34, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7A38, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7A40, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7A48, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8308, /* IFE_PDAF_IFELITE : NOC_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5508, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5520, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5524, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7A20, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT4-IFE_RDI_SFE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C30, /* IFE_RDI_SFE : NOC_RT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x66665433, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C34, /* IFE_RDI_SFE : NOC_RT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C38, /* IFE_RDI_SFE : NOC_RT_4_NIU_URGENCY_LOW */ + .value = 0x1B30, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C40, /* IFE_RDI_SFE : NOC_RT_4_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7C48, /* IFE_RDI_SFE : NOC_RT_4_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8388, /* IFE_RDI_SFE : NOC_RT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5588, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x55A0, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x55A4, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7C20, /* IFE_RDI_SFE : NOC_RT_4_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + /* NRT ports */ + { + .port_name = "NRT0-CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* CDM : NOC_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* CDM : NOC_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* CDM : NOC_NRT_0_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6640, /* CDM : NOC_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6648, /* CDM : NOC_NRT_0_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7E08, /* CDM : NOC_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5008, /* CDM : NOC_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5020, /* CDM : NOC_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5024, /* CDM : NOC_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-JPEG_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* JPEG : NOC_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* JPEG : NOC_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* JPEG : NOC_NRT_1_NIU_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6840, /* JPEG : NOC_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6848, /* JPEG : NOC_NRT_1_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7E88, /* JPEG : NOC_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5088, /* JPEG : NOC_NRT_1_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x50A0, /* JPEG : NOC_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x50A4, /* JPEG : NOC_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* JPEG : NOC_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-BPS_CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* BPS_CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* BPS_CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* BPS_CRE_WR : NOC_NRT_2_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* BPS_CRE_WR : NOC_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* BPS_CRE_WR : NOC_NRT_2_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7F08, /* BPS_CRE_WR : NOC_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5108, /* BPS_CRE_WR : NOC_NRT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5120, /* BPS_CRE_WR : NOC_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5124, /* BPS_CRE_WR : NOC_NRT_2_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A20, /* BPS_CRE_WR : NOC_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-BPS_CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C30, /* BPS_CRE_RD : NOC_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C34, /* BPS_CRE_RD : NOC_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C38, /* BPS_CRE_RD : NOC_NRT_3_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C40, /* BPS_CRE_RD : NOC_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C48, /* BPS_CRE_RD : NOC_NRT_3_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7F88, /* BPS_CRE_RD : NOC_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5188, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A0, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x51A4, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + }, + { + .port_name = "NRT4-IPE_0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* IPE_0_RD : NOC_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* IPE_0_RD : NOC_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* IPE_0_RD : NOC_NRT_4_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* IPE_0_RD : NOC_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* IPE_0_RD : NOC_NRT_4_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6F08, /* IPE_0_RD : NOC_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8008, /* IPE_0_RD : NOC_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5208, /* IPE_0_RD : NOC_NRT_4_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5220, /* IPE_0_RD : NOC_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5224, /* IPE_0_RD : NOC_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_name = "NRT5-IPE_1_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7030, /* IPE_1_RD : NOC_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7034, /* IPE_1_RD : NOC_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7038, /* IPE_1_RD : NOC_NRT_5_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7040, /* IPE_1_RD : NOC_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7048, /* IPE_1_RD : NOC_NRT_5_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8088, /* IPE_1_RD : NOC_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5288, /* IPE_1_RD : NOC_NRT_5_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x52A0, /* IPE_1_RD : NOC_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x29292929, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x52A4, /* IPE_1_RD : NOC_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x29292929, + }, + }, + { + .port_name = "NRT6-IPE_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7230, /* IPE_WR : NOC_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7234, /* IPE_WR : NOC_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7238, /* IPE_WR : NOC_NRT_6_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7240, /* IPE_WR : NOC_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7248, /* IPE_WR : NOC_NRT_6_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8108, /* IPE_WR : NOC_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5308, /* IPE_WR : NOC_NRT_6_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5320, /* IPE_WR : NOC_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5324, /* IPE_WR : NOC_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x7220, /* IPE_WR : NOC_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8408, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x10, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5608, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x40, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5620, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5624, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam780_cpas100_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam780_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = true, + }, +}; + +static struct cam_camnoc_info cam780_cpas100_camnoc_info = { + .specific = &cam_cpas_v780_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v780_100_camnoc_specific), + .irq_sbm = &cam_cpas_v780_100_irq_sbm, + .irq_err = &cam_cpas_v780_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v780_100_irq_err), + .err_logger = &cam780_cpas100_err_logger_offsets, + .errata_wa_list = &cam780_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x80, + .sbm_clear_mask = 0x2, + } +}; + +static struct cam_cpas_camnoc_qchannel cam780_cpas100_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +static struct cam_cpas_subpart_info cam780_cpas_camera_subpart_info = { + .num_bits = 8, + /* + * Below fuse indexing is based on software fuse definition which is in SMEM and provided + * by XBL team. + */ + .hw_bitmap_mask = { + {CAM_CPAS_CAM_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(1)}, + {CAM_CPAS_ISP_FUSE, BIT(2)}, + {CAM_CPAS_ISP_FUSE, BIT(0)}, + {CAM_CPAS_SFE_FUSE, BIT(0)}, + {CAM_CPAS_SFE_FUSE, BIT(1)}, + {CAM_CPAS_SFE_FUSE, BIT(2)}, + {CAM_CPAS_CUSTOM_FUSE, BIT(0)}, + } +}; + +static struct cam_cpas_info cam780_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 1, + .hw_caps_offsets = {0x8}, + }, + .qchannel_info = {&cam780_cpas100_qchannel_info}, + .num_qchannel = 1, + .subpart_info = &cam780_cpas_camera_subpart_info, +}; + +#endif /* _CPASTOP_V780_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v880_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v880_100.h new file mode 100644 index 0000000000..5815a200f1 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v880_100.h @@ -0,0 +1,1381 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V880_100_H_ +#define _CPASTOP_V880_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v880_100_irq_sbm = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_SBM_FAULTINEN0_LOW */ + .value = 0x01 |/* SBM_FAULTINEN0_LOW_PORT0_MASK */ + 0x02 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */ + 0x04 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */ + 0x08 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */ + 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */ + 0x20, /* SBM_FAULTINEN0_LOW_PORT5_MASK */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + .value = 0xE, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v880_100_irq_err[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_ERL_MAINCTL_LOW */ + .value = 1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_ERL_ERRCLR_LOW */ + .value = 1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x93A0, /* IFE_UBWC : RT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x9390, /* IFE_UBWC : RT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x9398, /* IFE_UBWC : RT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_BPS_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x87A0, /* BPS_WR : NRT_2_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x8790, /* BPS_WR : NRT_2_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x8798, /* BPS_WR : NRT_2_NIU_ENCERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8B20, /* IPE_0_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x8B10, /* IPE_0_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x8B18, /* IPE_0_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8D20, /* IPE_1_RD : NRT_5_NIU_DECERREN_LOW */ + .value = 0XFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x8D10, /* IPE_1_RD : NRT_5_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x8D18, /* IPE_1_RD : NRT_5_NIU_DECERRCLR_LOW */ + .value = 0X1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8FA0, /* IPE_WR : NRT_6_NIU_ENCERREN_LOW */ + .value = 0XF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x8F90, /* IPE_WR : NRT_6_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x8F98, /* IPE_WR : NRT_6_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x80, /* SBM_FAULTINSTATUS0_LOW_PORT7_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_SBM_FLAGOUTSET0_LOW */ + .value = 0x3, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v880_100_camnoc_specific[] = { + /* RT ports */ + { + .port_name = "RT0-SFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9030, /* SFE_RD : NOC_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9034, /* SFE_RD : NOC_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9038, /* SFE_RD : NOC_RT_0_NIU_URGENCY_LOW */ + .value = 0x4, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9040, /* SFE_RD : NOC_RT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9048, /* SFE_RD : NOC_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9D88, /* SFE_RD : NOC_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7388, /* SFE_RD : NOC_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x73A0, /* SFE_RD : NOC_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x73A4, /* SFE_RD : NOC_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-IFE_UBWC", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9230, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9234, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9238, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_URGENCY_LOW */ + .value = 0x1E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9240, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9248, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9E08, /* IFE_UBWC_LINEAR : NOC_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7408, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7420, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7424, /* IFE_UBWC_LINEAR : NOC_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x9220, /* IFE_UBWC_LINEAR : NOC_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-IFE_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9430, /* IFE_STATS : NOC_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9434, /* IFE_STATS : NOC_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9438, /* IFE_STATS : NOC_RT_2_NIU_URGENCY_LOW */ + .value = 0x1C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9440, /* IFE_STATS : NOC_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9448, /* IFE_STATS : NOC_RT_2_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9E88, /* IFE_STATS : NOC_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7488, /* IFE_STATS : NOC_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x74A0, /* IFE_STATS : NOC_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x74A4, /* IFE_STATS : NOC_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x9420, /* IFE_STATS : NOC_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-IFE_PDAF_LINEAR_IFELITE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9630, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9634, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9638, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_URGENCY_LOW */ + .value = 0x1C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9640, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9648, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9F08, /* IFE_PDAF_IFELITE : NOC_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7508, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7520, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7524, /* IFE_PDAF_IFELITE : NOC_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x9620, /* IFE_PDAF_IFELITE : NOC_RT_3_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT4-IFE_RDI_SFE", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9830, /* IFE_RDI_SFE : NOC_RT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9834, /* IFE_RDI_SFE : NOC_RT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9838, /* IFE_RDI_SFE : NOC_RT_4_NIU_URGENCY_LOW */ + .value = 0x1E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9840, /* IFE_RDI_SFE : NOC_RT_4_NIU_DANGERLUT_LOW */ + .value = 0xffffff00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9848, /* IFE_RDI_SFE : NOC_RT_4_NIU_SAFELUT_LOW */ + .value = 0x000f, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9F88, /* IFE_RDI_SFE : NOC_RT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7588, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x75A0, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x75A4, /* IFE_RDI_SFE : NOC_RT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x9820, /* IFE_RDI_SFE : NOC_RT_4_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + /* NRT ports */ + { + .port_name = "NRT0-CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8230, /* CDM : NOC_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8234, /* CDM : NOC_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8238, /* CDM : NOC_NRT_0_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8240, /* CDM : NOC_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8248, /* CDM : NOC_NRT_0_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9A08, /* CDM : NOC_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7008, /* CDM : NOC_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7020, /* CDM : NOC_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7024, /* CDM : NOC_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-JPEG_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8430, /* JPEG : NOC_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8434, /* JPEG : NOC_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8438, /* JPEG : NOC_NRT_1_NIU_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8440, /* JPEG : NOC_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8448, /* JPEG : NOC_NRT_1_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9A88, /* JPEG : NOC_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7088, /* JPEG : NOC_NRT_1_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x70A0, /* JPEG : NOC_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x70A4, /* JPEG : NOC_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x8420, /* JPEG : NOC_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-BPS_CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8630, /* BPS_CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8634, /* BPS_CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8638, /* BPS_CRE_WR : NOC_NRT_2_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8640, /* BPS_CRE_WR : NOC_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8648, /* BPS_CRE_WR : NOC_NRT_2_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9B08, /* BPS_CRE_WR : NOC_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7108, /* BPS_CRE_WR : NOC_NRT_2_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7120, /* BPS_CRE_WR : NOC_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7124, /* BPS_CRE_WR : NOC_NRT_2_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x8620, /* BPS_CRE_WR : NOC_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-BPS_CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8830, /* BPS_CRE_RD : NOC_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8834, /* BPS_CRE_RD : NOC_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8838, /* BPS_CRE_RD : NOC_NRT_3_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8840, /* BPS_CRE_RD : NOC_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8848, /* BPS_CRE_RD : NOC_NRT_3_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9B88, /* BPS_CRE_RD : NOC_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7188, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x71A0, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x71A4, /* BPS_CRE_RD : NOC_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + }, + { + .port_name = "NRT4-IPE_0_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8A30, /* IPE_0_RD : NOC_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8A34, /* IPE_0_RD : NOC_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8A38, /* IPE_0_RD : NOC_NRT_4_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8A40, /* IPE_0_RD : NOC_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8A48, /* IPE_0_RD : NOC_NRT_4_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8B08, /* IPE_0_RD : NOC_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9C08, /* IPE_0_RD : NOC_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* IPE_0_RD : NOC_NRT_4_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7220, /* IPE_0_RD : NOC_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x2E2E2E2E, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7224, /* IPE_0_RD : NOC_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x2E2E2E2E, + }, + }, + { + .port_name = "NRT5-IPE_1_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8C30, /* IPE_1_RD : NOC_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8C34, /* IPE_1_RD : NOC_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8C38, /* IPE_1_RD : NOC_NRT_5_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8C40, /* IPE_1_RD : NOC_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8C48, /* IPE_1_RD : NOC_NRT_5_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9C88, /* IPE_1_RD : NOC_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7288, /* IPE_1_RD : NOC_NRT_5_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x72A0, /* IPE_1_RD : NOC_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x72A4, /* IPE_1_RD : NOC_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT6-IPE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8E30, /* IPE_WR_0 : NOC_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8E34, /* IPE_WR_0 : NOC_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8E38, /* IPE_WR_0 : NOC_NRT_6_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8E40, /* IPE_WR_0 : NOC_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x8E48, /* IPE_WR_0 : NOC_NRT_6_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x9D08, /* IPE_WR_0 : NOC_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7308, /* IPE_WR_0 : NOC_NRT_6_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7320, /* IPE_WR_0 : NOC_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7324, /* IPE_WR_0 : NOC_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x8E20, /* IPE_WR_0 : NOC_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT7-IPE_WR_1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA430, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA434, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA438, /* IPE_WR_1 : NOC_NRT_7_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA440, /* IPE_WR_1 : NOC_NRT_7_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA448, /* IPE_WR_1 : NOC_NRT_7_NIU_SAFELUT_LOW */ + .value = 0xffff, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA608, /* IPE_WR_1 : NOC_NRT_7_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA688, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA6A0, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA6A4, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0xA420, /* IPE_WR_1 : NOC_NRT_7_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0xA008, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x10, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7608, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x50, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7620, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7624, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam880_cpas100_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam880_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = true, + }, +}; + +static struct cam_cpas_cesta_vcd_reg_info cam_cpas_v880_100_cesta_reg_info = { + .vcd_currol = { + .reg_offset = 0x300c, + .vcd_base_inc = 0x200, + .num_vcds = 8, + }, + +}; + +static struct cam_cpas_vcd_info cam_v880_100_vcd_info[] = { + { + .index = 0, .type = CAM_CESTA_CRMC, .clk = "cam_cc_ife_0_clk_src", + }, + { + .index = 1, .type = CAM_CESTA_CRMC, .clk = "cam_cc_ife_1_clk_src", + }, + { + .index = 2, .type = CAM_CESTA_CRMC, .clk = "cam_cc_ife_2_clk_src", + }, + { + .index = 3, .type = CAM_CESTA_CRMC, .clk = "cam_cc_sfe_0_clk_src", + }, + { + .index = 4, .type = CAM_CESTA_CRMC, .clk = "cam_cc_sfe_1_clk_src", + }, + { + .index = 5, .type = CAM_CESTA_CRMC, .clk = "cam_cc_sfe_2_clk_src", + }, + { + .index = 6, .type = CAM_CESTA_CRMC, .clk = "cam_cc_csid_clk_src", + }, + { + .index = 7, .type = CAM_CESTA_CRMC, .clk = "cam_cc_cphy_rx_clk_src", + }, + { + .index = 8, .type = CAM_CESTA_CRMB, .clk = "cam_cc_camnoc_axi_rt_clk_src", + }, +}; + +static struct cam_cpas_cesta_info cam_v880_cesta_info = { + .vcd_info = &cam_v880_100_vcd_info[0], + .num_vcds = ARRAY_SIZE(cam_v880_100_vcd_info), + .cesta_reg_info = &cam_cpas_v880_100_cesta_reg_info, +}; + +static struct cam_camnoc_info cam880_cpas100_camnoc_info = { + .specific = &cam_cpas_v880_100_camnoc_specific[0], + .specific_size = ARRAY_SIZE(cam_cpas_v880_100_camnoc_specific), + .irq_sbm = &cam_cpas_v880_100_irq_sbm, + .irq_err = &cam_cpas_v880_100_irq_err[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v880_100_irq_err), + .err_logger = &cam880_cpas100_err_logger_offsets, + .errata_wa_list = &cam880_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x80, + .sbm_clear_mask = 0x4, + }, +}; + +static struct cam_cpas_camnoc_qchannel cam880_cpas100_qchannel_info = { + .qchannel_ctrl = 0x5C, + .qchannel_status = 0x60, +}; + +/* + * struct cam_cpas_secure_info: CPAS secure information are used only for + * debug purpose, Register access is restricted in normal builds. + * + */ +static struct cam_cpas_secure_info cam880_cpas100_secure_info = { + .secure_access_ctrl_offset = 0x1C, + .secure_access_ctrl_value = 0xFFFFFFFF, +}; + +static struct cam_cpas_subpart_info cam880_cpas_camera_subpart_info = { + .num_bits = 8, + /* + * Below fuse indexing is based on software fuse definition which is in SMEM and provided + * by XBL team. + */ + .hw_bitmap_mask = { + {CAM_CPAS_CAM_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(1)}, + {CAM_CPAS_ISP_FUSE, BIT(2)}, + {CAM_CPAS_SFE_FUSE, BIT(0)}, + {CAM_CPAS_SFE_FUSE, BIT(1)}, + {CAM_CPAS_SFE_FUSE, BIT(2)}, + {CAM_CPAS_CUSTOM_FUSE, BIT(0)}, + } +}; + +static struct cam_cpas_info cam880_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 2, + .hw_caps_offsets = {0x8, 0xDC}, + }, + .qchannel_info = {&cam880_cpas100_qchannel_info}, + .num_qchannel = 1, + .hw_caps_secure_info = &cam880_cpas100_secure_info, + .subpart_info = &cam880_cpas_camera_subpart_info, +}; + +#endif /* _CPASTOP_V880_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v970_110.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v970_110.h new file mode 100644 index 0000000000..ba1408df29 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v970_110.h @@ -0,0 +1,1557 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V970_110_H_ +#define _CPASTOP_V970_110_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v970_110_irq_sbm_rt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_RT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* RT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave error IRQ */ + 0x02, /* RT_SBM_FAULTINEN0_LOW_PORT1_MASK - TFE UBWC Encoder Error IRQ */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_RT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_sbm cam_cpas_v970_110_irq_sbm_nrt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_NRT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* NRT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave Error */ + 0x02 | /* NRT_SBM_FAULTINEN0_LOW_PORT1_MASK - IPE WR UBWC En */ + 0x04 | /* NRT_SBM_FAULTINEN0_LOW_PORT2_MASK - OFE WR UBWC En */ + 0x08 | /* NRT_SBM_FAULTINEN0_LOW_PORT3_MASK - OFE RD UBWC De */ + 0x10, /* NRT_SBM_FAULTINEN0_LOW_PORT4_MASK - IPE RD UBWC En */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_NRT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v970_110_irq_err_rt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* RT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_RT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_RT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_RT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* RT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x47A0, /* TFE_UBWC : RT_0_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x4790, /* IFE_UBWC : RT_0_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x4798, /* IFE_UBWC : RT_0_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x20, /* RT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* RT_CAM_NOC_RT_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_RT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_irq_err + cam_cpas_v970_110_irq_err_nrt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_NRT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_NRT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_NRT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x63A0, /* OFE_WR : NRT_3_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6390, /* OFE_WR : NRT_3_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6398, /* OFE_WR : NRT_3_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* OFE_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* OFE_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* OFE_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IPE_WR : NRT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IPE_WR : NRT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IPE_WR : NRT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6B20, /* IPE_1_RD : NRT_7_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B10, /* IPE_1_RD : NRT_7_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B18, /* IPE_1_RD : NRT_7_NIU_DECERRCLR_LOW */ + .value = 0xFF, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0xE, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x400, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT10_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0x2, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v970_110_camnoc_specific_rt[] = { + /* RT ports */ + { + .port_name = "RT0-TFE0,1_FULL_DS2_PDAF1_TFE2_RDI_IR_DS4_DS16", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4630, /* NOC_RT_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4634, /* NOC_RT_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4638, /* NOC_RT_RT_0_NIU_URGENCY_LOW */ + .value = 0x00001e40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4640, /* NOC_RT_RT_0_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4648, /* NOC_RT_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E08, /* NOC_RT_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4008, /* NOC_RT_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4020, /* NOC_RT_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4024, /* NOC_RT_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4620, /* NOC_RT_RT_0_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-TFE0,1,2_PDAF_FD_IFE_LITE_0,1_RDI_GAMMA_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4830, /* NOC_RT_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4834, /* NOC_RT_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4838, /* NOC_RT_RT_1_NIU_URGENCY_LOW */ + .value = 0x00001c40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4840, /* NOC_RT_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4848, /* NOC_RT_RT_1_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E88, /* NOC_RT_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4088, /* NOC_RT_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A0, /* NOC_RT_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A4, /* NOC_RT_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4820, /* NOC_RT_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-TFE0,1,2_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A30, /* NOC_RT_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A34, /* NOC_RT_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A38, /* NOC_RT_RT_2_NIU_URGENCY_LOW */ + .value = 0x00001c40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A40, /* NOC_RT_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A48, /* NOC_RT_RT_2_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F08, /* NOC_RT_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4108, /* NOC_RT_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4120, /* NOC_RT_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4124, /* NOC_RT_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4A20, /* NOC_RT_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-TFE0,1,2_IFE_LITE0,1_CDM", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C30, /* NOC_RT_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C34, /* NOC_RT_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C38, /* NOC_RT_RT_3_NIU_URGENCY_LOW */ + .value = 0x00001000, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C40, /* NOC_RT_RT_3_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C48, /* NOC_RT_RT_3_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F88, /* NOC_RT_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4188, /* NOC_RT_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A0, /* NOC_RT_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A4, /* NOC_RT_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "RT4-TFE0,1_RDI_IR_DS4_DS16_TFE2_FULL_DS2_PDAF1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* NOC_RT_RT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* NOC_RT_RT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* NOC_RT_RT_4_NIU_URGENCY_LOW */ + .value = 0x00001e40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* NOC_RT_RT_4_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* NOC_RT_RT_4_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5208, /* NOC_RT_RT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* NOC_RT_RT_4_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v970_110_camnoc_specific_nrt[] = { + /* NRT ports */ + { + .port_name = "NRT0-IPE_WR_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5630, /* IPE_WR_1 : NOC_NRT_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5634, /* IPE_WR_1 : NOC_NRT_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5638, /* IPE_WR_1 : NOC_NRT_NRT_0_NIU_URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5640, /* IPE_WR_1 : NOC_NRT_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5648, /* IPE_WR_1 : NOC_NRT_NRT_0_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7008, /* IPE_WR_1 : NOC_NRT_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IPE_WR_1 : NOC_NRT_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IPE_WR_1 : NOC_NRT_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IPE_WR_1 : NOC_NRT_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-IPE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7088, /* IPE_WR_0 : NOC_NRT_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4888, /* IPE_WR_0 : NOC_NRT_NRT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A0, /* IPE_WR_0 : NOC_NRT_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A4, /* IPE_WR_0 : NOC_NRT_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IPE_WR_0 : NOC_NRT_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-OFE_WR_1-CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7108, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4908, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4920, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4924, /* OFE_WR_1-CRE_WR:NOC_NRT_NRT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6020, /* OFE_WR_1-CRE_WR : NOC_NRT_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-OFE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6230, /* OFE_WR_0 : NOC_NRT_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6234, /* OFE_WR_0 : NOC_NRT_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6238, /* OFE_WR_0 : NOC_NRT_NRT_3_NIU_URGENCY_LOW */ + .value = 0x00000030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6240, /* OFE_WR_0 : NOC_NRT_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6248, /* OFE_WR_0 : NOC_NRT_NRT_3_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7188, /* OFE_WR_0 : NOC_NRT_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4988, /* OFE_WR_0 : NOC_NRT_NRT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A0, /* OFE_WR_0 : NOC_NRT_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A4, /* OFE_WR_0 : NOC_NRT_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT4-OFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* OFE_RD : NOC_NRT_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* OFE_RD : NOC_NRT_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* OFE_RD : NOC_NRT_NRT_4_NIU_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* OFE_RD : NOC_NRT_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* OFE_RD : NOC_NRT_NRT_4_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6508, /* OFE_RD : NOC_NRT_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* OFE_RD : NOC_NRT_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A08, /* OFE_RD : NOC_NRT_NRT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A20, /* OFE_RD : NOC_NRT_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A24, /* OFE_RD : NOC_NRT_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6410, /* OFE_RD : NOC_NRT_NRT_4_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT5-CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* CRE_RD : NOC_NRT_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* CRE_RD : NOC_NRT_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* CRE_RD : NOC_NRT_NRT_5_NIU_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6640, /* CRE_RD : NOC_NRT_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6648, /* CRE_RD : NOC_NRT_NRT_5_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7288, /* CRE_RD : NOC_NRT_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A88, /* CRE_RD : NOC_NRT_NRT_5_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA0, /* CRE_RD : NOC_NRT_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA4, /* CRE_RD : NOC_NRT_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6610, /* CRE_RD : NOC_NRT_NRT_5_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT6-JPEG0,1,2,3_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_URGENCY_LOW */ + .value = 0x00000022, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6840, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6848, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7308, /* JPEG_RD_WR : NOC_NRT_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B08, /* JPEG_RD_WR : NOC_NRT_NRT_6_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B20, /* JPEG_RD_WR : NOC_NRT_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B24, /* JPEG_RD_WR : NOC_NRT_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6810, /* JPEG_RD_WR : NOC_NRT_NRT_6_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT7-IPE_RD_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_URGENCY_LOW */ + .value = 0x00000003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7388, /* IPE_WR_1 : NOC_NRT_NRT_7_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B88, /* IPE_WR_1 : NOC_NRT_NRT_7_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA0, /* IPE_WR_1 : NOC_NRT_NRT_7_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA4, /* IPE_WR_1 : NOC_NRT_NRT_7_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A10, /* IPE_WR_1 : NOC_NRT_NRT_7_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT9-CDM_IPE_OFE", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7488, /* CDM_IPE_OFE : NOC_NRT_NRT_9_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C88, /* CDM_IPE_OFE : NOC_NRT_NRT_9_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA0, /* CDM_IPE_OFE : NOC_NRT_NRT_9_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA4, /* CDM_IPE_OFE : NOC_NRT_NRT_9_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6E10, /* CDM_IPE_OFE : NOC_NRT_NRT_9_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_0_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7508, /* ICP_RD_WR : NOC_NRT_XM_ICP_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D08, /* ICP_RD_WR : NOC_NRT_XM_ICP_0_QOSGEN_MAINCTL */ + .value = 0x00000040, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D20, /* ICP_RD_WR : NOC_NRT_XM_ICP_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D24, /* ICP_RD_WR : NOC_NRT_XM_ICP_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_1_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7588, /* ICP_RD_WR : NOC_NRT_XM_ICP_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D88, /* ICP_RD_WR : NOC_NRT_XM_ICP_1_QOSGEN_MAINCTL */ + .value = 0x00000040, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA0, /* ICP_RD_WR : NOC_NRT_XM_ICP_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA4, /* ICP_RD_WR : NOC_NRT_XM_ICP_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam970_cpas110_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam970_cpas110_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = false, + }, +}; + +static struct cam_camnoc_addr_trans_client_info + cam970_cpas110_addr_trans_client_info[] = { + { + .client_name = "icp1", + .reg_enable = 0x5508, + .reg_offset0 = 0x5518, + .reg_base1 = 0x5520, + .reg_offset1 = 0x5528, + .reg_base2 = 0x5530, + .reg_offset2 = 0x5538, + .reg_base3 = 0x5540, + .reg_offset3 = 0x5548, + }, +}; + +static struct cam_camnoc_addr_trans_info cam970_cpas110_addr_trans_info = { + .num_supported_clients = ARRAY_SIZE(cam970_cpas110_addr_trans_client_info), + .addr_trans_client_info = &cam970_cpas110_addr_trans_client_info[0], +}; + +static struct cam_camnoc_info cam970_cpas110_camnoc_info_rt = { + .specific = &cam_cpas_v970_110_camnoc_specific_rt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v970_110_camnoc_specific_rt), + .irq_sbm = &cam_cpas_v970_110_irq_sbm_rt, + .irq_err = &cam_cpas_v970_110_irq_err_rt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v970_110_irq_err_rt), + .err_logger = &cam970_cpas110_err_logger_offsets, + .errata_wa_list = &cam970_cpas110_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x20, + .sbm_clear_mask = 0x4, + }, +}; + +static struct cam_camnoc_info cam970_cpas110_camnoc_info_nrt = { + .specific = &cam_cpas_v970_110_camnoc_specific_nrt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v970_110_camnoc_specific_nrt), + .irq_sbm = &cam_cpas_v970_110_irq_sbm_nrt, + .irq_err = &cam_cpas_v970_110_irq_err_nrt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v970_110_irq_err_nrt), + .err_logger = &cam970_cpas110_err_logger_offsets, + .errata_wa_list = &cam970_cpas110_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x400, + .sbm_clear_mask = 0x1, + }, + .addr_trans_info = &cam970_cpas110_addr_trans_info, +}; + +static struct cam_cpas_camnoc_qchannel cam970_cpas110_qchannel_info_rt = { + .qchannel_ctrl = 0xEC, + .qchannel_status = 0xF0, +}; + +static struct cam_cpas_camnoc_qchannel cam970_cpas110_qchannel_info_nrt = { + .qchannel_ctrl = 0xF4, + .qchannel_status = 0xF8, +}; + +/* + * struct cam_cpas_secure_info: CPAS secure information are used only for + * debug purpose, Register access is restricted in normal builds. + * + */ +static struct cam_cpas_secure_info cam970_cpas110_secure_info = { + .secure_access_ctrl_offset = 0x1C, + .secure_access_ctrl_value = 0xFFFFFFFF, +}; + +static struct cam_cpas_subpart_info cam970_cpas_camera_subpart_info = { + .num_bits = 3, + /* + * Below fuse indexing is based on software fuse definition which is in SMEM and provided + * by XBL team. + */ + .hw_bitmap_mask = { + {CAM_CPAS_ISP_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(1)}, + {CAM_CPAS_ISP_FUSE, BIT(2)}, + } +}; + +static struct cam_cpas_info cam970_cpas110_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 2, + .hw_caps_offsets = {0x8, 0xDC}, + }, + .qchannel_info = {&cam970_cpas110_qchannel_info_rt, + &cam970_cpas110_qchannel_info_nrt}, + .num_qchannel = 2, + .hw_caps_secure_info = &cam970_cpas110_secure_info, + .subpart_info = &cam970_cpas_camera_subpart_info, +}; + +#endif /* _CPASTOP_V970_110_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v975_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v975_100.h new file mode 100644 index 0000000000..d9464681b2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v975_100.h @@ -0,0 +1,1692 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V975_100_H_ +#define _CPASTOP_V975_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v975_100_irq_sbm_rt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_RT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* RT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave error IRQ */ + 0x02, /* RT_SBM_FAULTINEN0_LOW_PORT1_MASK - TFE UBWC Encoder Error IRQ */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_RT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_sbm cam_cpas_v975_100_irq_sbm_nrt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_NRT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* NRT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave Error */ + 0x02 | /* NRT_SBM_FAULTINEN0_LOW_PORT1_MASK - IPE WR UBWC En */ + 0x04 | /* NRT_SBM_FAULTINEN0_LOW_PORT2_MASK - OFE WR UBWC En */ + 0x08 | /* NRT_SBM_FAULTINEN0_LOW_PORT3_MASK - OFE RD UBWC De */ + 0x10 | /* NRT_SBM_FAULTINEN0_LOW_PORT4_MASK - IPE RD UBWC En */ + 0x20, /* NRT_SBM_FAULTINEN0_LOW_PORT5_MASK - IPE RD UBWC En */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_NRT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v975_100_irq_err_rt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* RT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_RT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_RT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_RT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* RT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x47A0, /* TFE_UBWC : RT_0_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x4790, /* IFE_UBWC : RT_0_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x4798, /* IFE_UBWC : RT_0_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x20, /* RT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* RT_CAM_NOC_RT_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_RT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_irq_err + cam_cpas_v975_100_irq_err_nrt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_NRT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_NRT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_NRT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x63A0, /* OFE_WR : NRT_3_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6390, /* OFE_WR : NRT_3_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6398, /* OFE_WR : NRT_3_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* OFE_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* OFE_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* OFE_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IPE_WR : NRT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IPE_WR : NRT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IPE_WR : NRT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6D20, /* IPE_0_RD : NRT_8_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6D10, /* IPE_0_RD : NRT_8_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6D18, /* IPE_0_RD : NRT_8_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6B20, /* IPE_1_RD : NRT_7_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B10, /* IPE_1_RD : NRT_7_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B18, /* IPE_1_RD : NRT_7_NIU_DECERRCLR_LOW */ + .value = 0xFF, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0xE, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x400, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT10_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0x2, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v975_100_camnoc_specific_rt[] = { + /* RT ports */ + { + .port_name = "RT0-TFE0,1_FULL_DS2_PDAF1_TFE2_RDI_IR_DS4_DS16", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4630, /* NOC_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4634, /* NOC_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4638, /* NOC_RT_0_NIU_URGENCY_LOW */ + .value = 0x00001E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4640, /* NOC_RT_0_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4648, /* NOC_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E08, /* NOC_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4008, /* NOC_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4020, /* NOC_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4024, /* NOC_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4620, /* NOC_RT_0_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-TFE0,1,2_PDAF_FD_IFE_LITE_0,1_RDI_GAMMA_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4830, /* NOC_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4834, /* NOC_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4838, /* NOC_RT_1_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4840, /* NOC_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4848, /* NOC_RT_1_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E88, /* NOC_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4088, /* NOC_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A0, /* NOC_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A4, /* NOC_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4820, /* NOC_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-TFE0,1,2_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A30, /* NOC_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A34, /* NOC_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A38, /* NOC_RT_2_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A40, /* NOC_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A48, /* NOC_RT_2_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F08, /* NOC_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4108, /* NOC_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4120, /* NOC_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4124, /* NOC_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4A20, /* NOC_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-TFE0,1,2_IFE_LITE0,1_CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C30, /* NOC_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C34, /* NOC_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C38, /* NOC_RT_3_NIU_URGENCY_LOW */ + .value = 0x00001000, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C40, /* NOC_RT_3_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C48, /* NOC_RT_3_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F88, /* NOC_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4188, /* NOC_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A0, /* NOC_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A4, /* NOC_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "RT4-TFE0,1_RDI_IR_DS4_DS16_TFE2_FULL_DS2_PDAF1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5230, /* NOC_RT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5234, /* NOC_RT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5238, /* NOC_RT_4_NIU_URGENCY_LOW */ + .value = 0x00001E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5240, /* NOC_RT_4_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5248, /* NOC_RT_4_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* NOC_RT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5220, /* NOC_RT_4_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v975_100_camnoc_specific_nrt[] = { + /* NRT ports */ + { + .port_name = "NRT0-IPE_WR_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5630, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5634, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5638, /* IPE_WR_1 : NOC_NRT_0_NIU_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5640, /* IPE_WR_1 : NOC_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5648, /* IPE_WR_1 : NOC_NRT_0_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7008, /* IPE_WR_1 : NOC_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-IPE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IPE_WR_0 : NOC_NRT_1_NIU_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IPE_WR_0 : NOC_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IPE_WR_0 : NOC_NRT_1_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7088, /* IPE_WR_0 : NOC_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4888, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A0, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A4, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IPE_WR_0 : NOC_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-OFE_WR_1-CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7108, /* OFE_WR_1-CRE_WR : NOC_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4908, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4920, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4924, /* OFE_WR_1-CRE_WR:NOC_NRT_2_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6020, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-OFE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6230, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6234, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6238, /* OFE_WR_0 : NOC_NRT_3_NIU_URGENCY_LOW */ + .value = 0x00001030, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6240, /* OFE_WR_0 : NOC_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6248, /* OFE_WR_0 : NOC_NRT_3_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7188, /* OFE_WR_0 : NOC_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4988, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A0, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A4, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT4-OFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* OFE_RD : NOC_NRT_4_NIU_URGENCY_LOW */ + .value = 0x00001003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* OFE_RD : NOC_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* OFE_RD : NOC_NRT_4_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6508, /* OFE_RD : NOC_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* OFE_RD : NOC_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A08, /* OFE_RD : NOC_NRT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A20, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A24, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6410, /* OFE_RD : NOC_NRT_4_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT5-CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* CRE_RD : NOC_NRT_5_NIU_URGENCY_LOW */ + .value = 0x00001003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6640, /* CRE_RD : NOC_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6648, /* CRE_RD : NOC_NRT_5_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7288, /* CRE_RD : NOC_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A88, /* CRE_RD : NOC_NRT_5_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA0, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA4, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6610, /* CRE_RD : NOC_NRT_5_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT6-JPEG0,1,2,3_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x22222222, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x22222222, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* JPEG_RD_WR : NOC_NRT_6_NIU_URGENCY_LOW */ + .value = 0x00001022, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6840, /* JPEG_RD_WR : NOC_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6848, /* JPEG_RD_WR : NOC_NRT_6_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7308, /* JPEG_RD_WR : NOC_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B08, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B20, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B24, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6810, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT7-IPE_RD_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* IPE_WR_1 : NOC_NRT_7_NIU_URGENCY_LOW */ + .value = 0x00001003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* IPE_WR_1 : NOC_NRT_7_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* IPE_WR_1 : NOC_NRT_7_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7388, /* IPE_WR_1 : NOC_NRT_7_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B88, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA0, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA4, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A10, /* IPE_WR_1 : NOC_NRT_7_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT8-IPE_RD_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C30, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C34, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C38, /* IPE_RD_0 : NOC_NRT_8_NIU_URGENCY_LOW */ + .value = 0x00001003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C40, /* IPE_RD_0 : NOC_NRT_8_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C48, /* IPE_RD_0 : NOC_NRT_8_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7408, /* IPE_RD_0 : NOC_NRT_8_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C08, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C20, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C24, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6C10, /* IPE_RD_0 : NOC_NRT_8_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT9-CDM_IPE_OFE", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* CDM_IPE_OFE : NOC_NRT_9_NIU_URGENCY_LOW */ + .value = 0x00001003, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* CDM_IPE_OFE : NOC_NRT_9_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* CDM_IPE_OFE : NOC_NRT_9_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7488, /* CDM_IPE_OFE : NOC_NRT_9_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C88, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA0, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA4, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6E10, /* CDM_IPE_OFE : NOC_NRT_10_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_0_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7508, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D08, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x00000040, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D20, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D24, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_1_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7588, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D88, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x00000040, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA0, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA4, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam975_cpas100_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam975_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = false, + }, +}; + +static struct cam_cpas_cesta_vcd_reg_info cam_cpas_v975_100_cesta_reg_info = { + .vcd_currol = { + .reg_offset = 0x266C, + .vcd_base_inc = 0x210, + .num_vcds = 8, + }, +}; + +static struct cam_cpas_vcd_info cam_v975_100_vcd_info[] = { + { + .index = 0, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_0_clk_src", + }, + { + .index = 1, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_1_clk_src", + }, + { + .index = 2, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_2_clk_src", + }, + { + .index = 6, .type = CAM_CESTA_CRMC, .clk = "cam_cc_csid_clk_src", + }, + { + .index = 7, .type = CAM_CESTA_CRMC, .clk = "cam_cc_cphy_rx_clk_src", + }, + { + .index = 8, .type = CAM_CESTA_CRMB, .clk = "cam_cc_camnoc_axi_rt_clk_src", + }, +}; + +static struct cam_cpas_cesta_info cam_v975_cesta_info = { + .vcd_info = &cam_v975_100_vcd_info[0], + .num_vcds = ARRAY_SIZE(cam_v975_100_vcd_info), + .cesta_reg_info = &cam_cpas_v975_100_cesta_reg_info, +}; + +static struct cam_camnoc_addr_trans_client_info + cam975_cpas100_addr_trans_client_info[] = { + { + .client_name = "icp1", + .reg_enable = 0x5508, + .reg_offset0 = 0x5518, + .reg_base1 = 0x5520, + .reg_offset1 = 0x5528, + .reg_base2 = 0x5530, + .reg_offset2 = 0x5538, + .reg_base3 = 0x5540, + .reg_offset3 = 0x5548, + }, +}; + +static struct cam_camnoc_addr_trans_info cam975_cpas100_addr_trans_info = { + .num_supported_clients = ARRAY_SIZE(cam975_cpas100_addr_trans_client_info), + .addr_trans_client_info = &cam975_cpas100_addr_trans_client_info[0], +}; + +static struct cam_camnoc_info cam975_cpas100_camnoc_info_rt = { + .specific = &cam_cpas_v975_100_camnoc_specific_rt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v975_100_camnoc_specific_rt), + .irq_sbm = &cam_cpas_v975_100_irq_sbm_rt, + .irq_err = &cam_cpas_v975_100_irq_err_rt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v975_100_irq_err_rt), + .err_logger = &cam975_cpas100_err_logger_offsets, + .errata_wa_list = &cam975_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x20, + .sbm_clear_mask = 0x4, + }, +}; + +static struct cam_camnoc_info cam975_cpas100_camnoc_info_nrt = { + .specific = &cam_cpas_v975_100_camnoc_specific_nrt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v975_100_camnoc_specific_nrt), + .irq_sbm = &cam_cpas_v975_100_irq_sbm_nrt, + .irq_err = &cam_cpas_v975_100_irq_err_nrt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v975_100_irq_err_nrt), + .err_logger = &cam975_cpas100_err_logger_offsets, + .errata_wa_list = &cam975_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x400, + .sbm_clear_mask = 0x1, + }, + .addr_trans_info = &cam975_cpas100_addr_trans_info, +}; + +static struct cam_cpas_camnoc_qchannel cam975_cpas100_qchannel_info_rt = { + .qchannel_ctrl = 0xEC, + .qchannel_status = 0xF0, +}; + +static struct cam_cpas_camnoc_qchannel cam975_cpas100_qchannel_info_nrt = { + .qchannel_ctrl = 0xF4, + .qchannel_status = 0xF8, +}; + +/* + * struct cam_cpas_secure_info: CPAS secure information are used only for + * debug purpose, Register access is restricted in normal builds. + * + */ +static struct cam_cpas_secure_info cam975_cpas100_secure_info = { + .secure_access_ctrl_offset = 0x1C, + .secure_access_ctrl_value = 0xFFFFFFFF, +}; + +static struct cam_cpas_subpart_info cam975_cpas_camera_subpart_info = { + .num_bits = 3, + /* + * Below fuse indexing is based on software fuse definition which is in SMEM and provided + * by XBL team. + */ + .hw_bitmap_mask = { + {CAM_CPAS_ISP_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(1)}, + {CAM_CPAS_ISP_FUSE, BIT(2)}, + } +}; + +static struct cam_cpas_info cam975_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 2, + .hw_caps_offsets = {0x8, 0xDC}, + }, + .qchannel_info = {&cam975_cpas100_qchannel_info_rt, + &cam975_cpas100_qchannel_info_nrt}, + .num_qchannel = 2, + .hw_caps_secure_info = &cam975_cpas100_secure_info, + .subpart_info = &cam975_cpas_camera_subpart_info, +}; + +#endif /* _CPASTOP_V975_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v980_100.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v980_100.h new file mode 100644 index 0000000000..c6d4a8bce9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/cpas_top/cpastop_v980_100.h @@ -0,0 +1,1693 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CPASTOP_V980_100_H_ +#define _CPASTOP_V980_100_H_ + +static struct cam_camnoc_irq_sbm cam_cpas_v980_100_irq_sbm_rt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_RT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* RT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave error IRQ */ + 0x02, /* RT_SBM_FAULTINEN0_LOW_PORT1_MASK - TFE UBWC Encoder Error IRQ */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_RT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_sbm cam_cpas_v980_100_irq_sbm_nrt = { + .sbm_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x240, /* CAM_NOC_NRT_SBM_FAULTINEN0_LOW */ + .value = 0x01 | /* NRT_SBM_FAULTINEN0_LOW_PORT0_MASK - Slave Error */ + 0x02 | /* NRT_SBM_FAULTINEN0_LOW_PORT1_MASK - IPE WR UBWC En */ + 0x04 | /* NRT_SBM_FAULTINEN0_LOW_PORT2_MASK - OFE WR UBWC En */ + 0x08 | /* NRT_SBM_FAULTINEN0_LOW_PORT3_MASK - OFE RD UBWC De */ + 0x10 | /* NRT_SBM_FAULTINEN0_LOW_PORT4_MASK - IPE RD UBWC En */ + 0x20, /* NRT_SBM_FAULTINEN0_LOW_PORT5_MASK - IPE RD UBWC En */ + }, + .sbm_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x248, /* CAM_NOC_NRT_SBM_FAULTINSTATUS0_LOW */ + }, + .sbm_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x280, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + .value = 0x7, + } +}; + +static struct cam_camnoc_irq_err + cam_cpas_v980_100_irq_err_rt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* RT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_RT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_RT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_RT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_TFE_UBWC_ENCODE_ERROR, + .enable = false, + .sbm_port = 0x2, /* RT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = false, + .offset = 0x47A0, /* TFE_UBWC : RT_0_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = false, + .offset = 0x4790, /* IFE_UBWC : RT_0_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = false, + .offset = 0x4798, /* IFE_UBWC : RT_0_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x20, /* RT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* RT_CAM_NOC_RT_SBM_FLAGOUTSET0_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_RT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_RT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_irq_err + cam_cpas_v980_100_irq_err_nrt[] = { + { + .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR, + .enable = true, + .sbm_port = 0x1, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT0_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x8, /* CAM_NOC_NRT_ERL_MAINCTL_LOW */ + .value = 0x1, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x10, /* CAM_NOC_NRT_ERL_ERRVLD_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x18, /* CAM_NOC_NRT_ERL_ERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_WRITE_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x4, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT2_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x63A0, /* OFE_WR : NRT_3_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6390, /* OFE_WR : NRT_3_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6398, /* OFE_WR : NRT_3_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_OFE_UBWC_READ_DECODE_ERROR, + .enable = true, + .sbm_port = 0x8, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT3_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6520, /* OFE_RD : NRT_4_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6510, /* OFE_RD : NRT_4_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6518, /* OFE_RD : NRT_4_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE_UBWC_ENCODE_ERROR, + .enable = true, + .sbm_port = 0x2, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT1_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x59A0, /* IPE_WR : NRT_1_NIU_ENCERREN_LOW */ + .value = 0xF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x5990, /* IPE_WR : NRT_1_NIU_ENCERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x5998, /* IPE_WR : NRT_1_NIU_ENCERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE0_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x20, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT5_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6D20, /* IPE_0_RD : NRT_8_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6D10, /* IPE_0_RD : NRT_8_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6D18, /* IPE_0_RD : NRT_8_NIU_DECERRCLR_LOW */ + .value = 0x1, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_IPE1_UBWC_DECODE_ERROR, + .enable = true, + .sbm_port = 0x10, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT4_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x6B20, /* IPE_1_RD : NRT_7_NIU_DECERREN_LOW */ + .value = 0xFF, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x6B10, /* IPE_1_RD : NRT_7_NIU_DECERRSTATUS_LOW */ + }, + .err_clear = { + .access_type = CAM_REG_TYPE_WRITE, + .enable = true, + .offset = 0x6B18, /* IPE_1_RD : NRT_7_NIU_DECERRCLR_LOW */ + .value = 0xFF, + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT, + .enable = false, + .sbm_port = 0x40, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT6_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0xE, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2, + .enable = false, + }, + { + .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST, + .enable = false, + .sbm_port = 0x400, /* NRT_SBM_FAULTINSTATUS0_LOW_PORT10_MASK */ + .err_enable = { + .access_type = CAM_REG_TYPE_READ_WRITE, + .enable = true, + .offset = 0x288, /* CAM_NOC_NRT_SBM_FLAGOUTSET0_LOW */ + .value = 0x2, + }, + .err_status = { + .access_type = CAM_REG_TYPE_READ, + .enable = true, + .offset = 0x290, /* CAM_NOC_NRT_SBM_FLAGOUTSTATUS0_LOW */ + }, + .err_clear = { + .enable = false, /* CAM_NOC_NRT_SBM_FLAGOUTCLR0_LOW */ + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v980_100_camnoc_specific_rt[] = { + /* RT ports */ + { + .port_name = "RT0-TFE0,1_FULL_DS2_PDAF1_TFE2_RDI_IR_DS4_DS16", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4630, /* NOC_RT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x66655554, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4634, /* NOC_RT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4638, /* NOC_RT_0_NIU_URGENCY_LOW */ + .value = 0x00001E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4640, /* NOC_RT_0_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4648, /* NOC_RT_0_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E08, /* NOC_RT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4008, /* NOC_RT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4020, /* NOC_RT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4024, /* NOC_RT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4620, /* NOC_RT_0_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT1-TFE0,1,2_PDAF_FD_IFE_LITE_0,1_RDI_GAMMA_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4830, /* NOC_RT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x55544444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4834, /* NOC_RT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666665, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4838, /* NOC_RT_1_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4840, /* NOC_RT_1_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4848, /* NOC_RT_1_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4E88, /* NOC_RT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4088, /* NOC_RT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A0, /* NOC_RT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x40A4, /* NOC_RT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4820, /* NOC_RT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT2-TFE0,1,2_STATS", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A30, /* NOC_RT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x55554444, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A34, /* NOC_RT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666665, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A38, /* NOC_RT_2_NIU_URGENCY_LOW */ + .value = 0x00001C40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A40, /* NOC_RT_2_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A48, /* NOC_RT_2_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F08, /* NOC_RT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4108, /* NOC_RT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4120, /* NOC_RT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4124, /* NOC_RT_2_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x4A20, /* NOC_RT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "RT3-TFE0,1,2_IFE_LITE0,1_CDM", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C30, /* NOC_RT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x33333333, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C34, /* NOC_RT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x33333333, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C38, /* NOC_RT_3_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C40, /* NOC_RT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C48, /* NOC_RT_3_NIU_SAFELUT_LOW */ + .value = 0xF, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4F88, /* NOC_RT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4188, /* NOC_RT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A0, /* NOC_RT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x41A4, /* NOC_RT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "RT4-TFE0,1_RDI_IR_DS4_DS16_TFE2_FULL_DS2_PDAF1", + .enable = true, + .priority_lut_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5230, /* NOC_RT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x65555544, + }, + .priority_lut_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5234, /* NOC_RT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x66666666, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5238, /* NOC_RT_4_NIU_URGENCY_LOW */ + .value = 0x00001E40, + }, + .danger_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5240, /* NOC_RT_4_NIU_DANGERLUT_LOW */ + .value = 0xFFFFFF00, + }, + .safe_lut = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5248, /* NOC_RT_4_NIU_SAFELUT_LOW */ + .value = 0x0000000F, + }, + .ubwc_ctl = { + /* + * Do not explicitly set ubwc config register. + * Power on default values are taking care of required + * register settings. + */ + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5408, /* NOC_RT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .maxwr_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5220, /* NOC_RT_4_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_specific + cam_cpas_v980_100_camnoc_specific_nrt[] = { + /* NRT ports */ + { + .port_name = "NRT0-IPE_WR_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5630, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5634, /* IPE_WR_1 : NOC_NRT_0_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5638, /* IPE_WR_1 : NOC_NRT_0_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5640, /* IPE_WR_1 : NOC_NRT_0_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5648, /* IPE_WR_1 : NOC_NRT_0_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7008, /* IPE_WR_1 : NOC_NRT_0_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4808, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4820, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4824, /* IPE_WR_1 : NOC_NRT_0_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT1-IPE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5830, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5834, /* IPE_WR_0 : NOC_NRT_1_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5838, /* IPE_WR_0 : NOC_NRT_1_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5840, /* IPE_WR_0 : NOC_NRT_1_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x5848, /* IPE_WR_0 : NOC_NRT_1_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7088, /* IPE_WR_0 : NOC_NRT_1_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4888, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A0, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x48A4, /* IPE_WR_0 : NOC_NRT_1_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x5820, /* IPE_WR_0 : NOC_NRT_1_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT2-OFE_WR_1-CRE_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6030, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6034, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6038, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6040, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6048, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7108, /* OFE_WR_1-CRE_WR : NOC_NRT_2_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4908, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4920, /* OFE_WR_1-CRE_WR : NOC_NRT_2_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4924, /* OFE_WR_1-CRE_WR:NOC_NRT_2_BPS_WR_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6020, /* OFE_WR_1-CRE_WR : NOC_NRT_2_NIU_MAXWR_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT3-OFE_WR_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6230, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6234, /* OFE_WR_0 : NOC_NRT_3_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6238, /* OFE_WR_0 : NOC_NRT_3_NIU_URGENCY_LOW */ + .value = 0x30, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6240, /* OFE_WR_0 : NOC_NRT_3_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6248, /* OFE_WR_0 : NOC_NRT_3_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7188, /* OFE_WR_0 : NOC_NRT_3_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4988, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A0, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x49A4, /* OFE_WR_0 : NOC_NRT_3_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "NRT4-OFE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6430, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6434, /* OFE_RD : NOC_NRT_4_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6438, /* OFE_RD : NOC_NRT_4_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6440, /* OFE_RD : NOC_NRT_4_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6448, /* OFE_RD : NOC_NRT_4_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6508, /* OFE_RD : NOC_NRT_4_NIU_DECCTL_LOW */ + .value = 1, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7208, /* OFE_RD : NOC_NRT_4_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A08, /* OFE_RD : NOC_NRT_4_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A20, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A24, /* OFE_RD : NOC_NRT_4_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6410, /* OFE_RD : NOC_NRT_4_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT5-CRE_RD", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6630, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6634, /* CRE_RD : NOC_NRT_5_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6638, /* CRE_RD : NOC_NRT_5_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6640, /* CRE_RD : NOC_NRT_5_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6648, /* CRE_RD : NOC_NRT_5_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7288, /* CRE_RD : NOC_NRT_5_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4A88, /* CRE_RD : NOC_NRT_5_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA0, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_LOW */ + .value = 0x14141414, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4AA4, /* CRE_RD : NOC_NRT_5_QOSGEN_SHAPING_HIGH */ + .value = 0x14141414, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6610, /* CRE_RD : NOC_NRT_5_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT6-JPEG0,1,2,3_RD_WR", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6830, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6834, /* JPEG_RD_WR : NOC_NRT_6_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6838, /* JPEG_RD_WR : NOC_NRT_6_NIU_URGENCY_LOW */ + .value = 0x22, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6840, /* JPEG_RD_WR : NOC_NRT_6_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6848, /* JPEG_RD_WR : NOC_NRT_6_NIU_SAFELUT_LOW */ + .value = 0xFFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7308, /* JPEG_RD_WR : NOC_NRT_6_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B08, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_MAINCTL */ + .value = 0x2, + }, + .qosgen_shaping_low = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B20, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_LOW */ + .value = 0x10101010, + }, + .qosgen_shaping_high = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B24, /* JPEG_RD_WR : NOC_NRT_6_QOSGEN_SHAPING_HIGH */ + .value = 0x10101010, + }, + .maxwr_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6820, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXWR_LOW */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6810, /* JPEG_RD_WR : NOC_NRT_6_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT7-IPE_RD_1", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A30, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A34, /* IPE_WR_1 : NOC_NRT_7_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A38, /* IPE_WR_1 : NOC_NRT_7_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A40, /* IPE_WR_1 : NOC_NRT_7_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6A48, /* IPE_WR_1 : NOC_NRT_7_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7388, /* IPE_WR_1 : NOC_NRT_7_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4B88, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA0, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4BA4, /* IPE_WR_1 : NOC_NRT_7_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6A10, /* IPE_WR_1 : NOC_NRT_7_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT8-IPE_RD_0", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C30, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C34, /* IPE_RD_0 : NOC_NRT_8_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C38, /* IPE_RD_0 : NOC_NRT_8_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C40, /* IPE_RD_0 : NOC_NRT_8_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6C48, /* IPE_RD_0 : NOC_NRT_8_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7408, /* IPE_RD_0 : NOC_NRT_8_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C08, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C20, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C24, /* IPE_RD_0 : NOC_NRT_8_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6C10, /* IPE_RD_0 : NOC_NRT_8_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "NRT9-CDM_IPE_OFE", + .enable = true, + .priority_lut_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E30, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_LOW */ + .value = 0x0, + }, + .priority_lut_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E34, /* CDM_IPE_OFE : NOC_NRT_9_NIU_PRIORITYLUT_HIGH */ + .value = 0x0, + }, + .urgency = { + .enable = true, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E38, /* CDM_IPE_OFE : NOC_NRT_9_NIU_URGENCY_LOW */ + .value = 0x3, + }, + .danger_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E40, /* CDM_IPE_OFE : NOC_NRT_9_NIU_DANGERLUT_LOW */ + .value = 0x0, + }, + .safe_lut = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x6E48, /* CDM_IPE_OFE : NOC_NRT_9_NIU_SAFELUT_LOW */ + .value = 0x0000FFFF, + }, + .ubwc_ctl = { + .enable = false, + }, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7488, /* CDM_IPE_OFE : NOC_NRT_9_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4C88, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA0, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4CA4, /* CDM_IPE_OFE : NOC_NRT_9_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + .maxrd_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ, + .masked_value = 0, + .offset = 0x6E10, /* CDM_IPE_OFE : NOC_NRT_10_NIU_MAXRD_LOW */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_0_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7508, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D08, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D20, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D24, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, + { + .port_name = "ICP_1_RD_WR", + .enable = false, + .dynattr_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x7588, /* ICP_RD_WR : NOC_XM_ICP_DYNATTR_MAINCTL */ + .value = 0x0, + }, + .qosgen_mainctl = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4D88, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_MAINCTL */ + .value = 0x0, + }, + .qosgen_shaping_low = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA0, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_LOW */ + .value = 0x0, + }, + .qosgen_shaping_high = { + .enable = false, + .access_type = CAM_REG_TYPE_READ_WRITE, + .masked_value = 0, + .offset = 0x4DA4, /* ICP_RD_WR : NOC_XM_ICP_QOSGEN_SHAPING_HIGH */ + .value = 0x0, + }, + }, +}; + +static struct cam_camnoc_err_logger_info cam980_cpas100_err_logger_offsets = { + .mainctrl = 0x08, /* NOC_ERL_MAINCTL_LOW */ + .errvld = 0x10, /* NOC_ERL_ERRVLD_LOW */ + .errlog0_low = 0x20, /* NOC_ERL_ERRLOG0_LOW */ + .errlog0_high = 0x24, /* NOC_ERL_ERRLOG0_HIGH */ + .errlog1_low = 0x28, /* NOC_ERL_ERRLOG1_LOW */ + .errlog1_high = 0x2C, /* NOC_ERL_ERRLOG1_HIGH */ + .errlog2_low = 0x30, /* NOC_ERL_ERRLOG2_LOW */ + .errlog2_high = 0x34, /* NOC_ERL_ERRLOG2_HIGH */ + .errlog3_low = 0x38, /* NOC_ERL_ERRLOG3_LOW */ + .errlog3_high = 0x3C, /* NOC_ERL_ERRLOG3_HIGH */ +}; + +static struct cam_cpas_hw_errata_wa_list cam980_cpas100_errata_wa_list = { + .camnoc_flush_slave_pending_trans = { + .enable = false, + .data.reg_info = { + .access_type = CAM_REG_TYPE_READ, + .offset = 0x300, /* sbm_SenseIn0_Low */ + .mask = 0xE0000, /* Bits 17, 18, 19 */ + .value = 0, /* expected to be 0 */ + }, + }, + .enable_icp_clk_for_qchannel = { + .enable = false, + }, +}; + +static struct cam_cpas_cesta_vcd_reg_info cam_cpas_v980_100_cesta_reg_info = { + .vcd_currol = { + .reg_offset = 0x266C, + .vcd_base_inc = 0x210, + .num_vcds = 8, + }, +}; + +static struct cam_cpas_vcd_info cam_v980_100_vcd_info[] = { + { + .index = 0, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_0_clk_src", + }, + { + .index = 1, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_1_clk_src", + }, + { + .index = 2, .type = CAM_CESTA_CRMC, .clk = "cam_cc_tfe_2_clk_src", + }, + { + .index = 6, .type = CAM_CESTA_CRMC, .clk = "cam_cc_csid_clk_src", + }, + { + .index = 7, .type = CAM_CESTA_CRMC, .clk = "cam_cc_cphy_rx_clk_src", + }, + { + .index = 8, .type = CAM_CESTA_CRMB, .clk = "cam_cc_camnoc_axi_rt_clk_src", + }, +}; + +static struct cam_cpas_cesta_info cam_v980_cesta_info = { + .vcd_info = &cam_v980_100_vcd_info[0], + .num_vcds = ARRAY_SIZE(cam_v980_100_vcd_info), + .cesta_reg_info = &cam_cpas_v980_100_cesta_reg_info, +}; + +static struct cam_camnoc_addr_trans_client_info + cam980_cpas100_addr_trans_client_info[] = { + { + .client_name = "icp1", + .reg_enable = 0x5508, + .reg_offset0 = 0x5518, + .reg_base1 = 0x5520, + .reg_offset1 = 0x5528, + .reg_base2 = 0x5530, + .reg_offset2 = 0x5538, + .reg_base3 = 0x5540, + .reg_offset3 = 0x5548, + }, +}; + +static struct cam_camnoc_addr_trans_info cam980_cpas100_addr_trans_info = { + .num_supported_clients = ARRAY_SIZE(cam980_cpas100_addr_trans_client_info), + .addr_trans_client_info = &cam980_cpas100_addr_trans_client_info[0], +}; + +static struct cam_camnoc_info cam980_cpas100_camnoc_info_rt = { + .specific = &cam_cpas_v980_100_camnoc_specific_rt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v980_100_camnoc_specific_rt), + .irq_sbm = &cam_cpas_v980_100_irq_sbm_rt, + .irq_err = &cam_cpas_v980_100_irq_err_rt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v980_100_irq_err_rt), + .err_logger = &cam980_cpas100_err_logger_offsets, + .errata_wa_list = &cam980_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x20, + .sbm_clear_mask = 0x4, + }, +}; + +static struct cam_camnoc_info cam980_cpas100_camnoc_info_nrt = { + .specific = &cam_cpas_v980_100_camnoc_specific_nrt[0], + .specific_size = ARRAY_SIZE(cam_cpas_v980_100_camnoc_specific_nrt), + .irq_sbm = &cam_cpas_v980_100_irq_sbm_nrt, + .irq_err = &cam_cpas_v980_100_irq_err_nrt[0], + .irq_err_size = ARRAY_SIZE(cam_cpas_v980_100_irq_err_nrt), + .err_logger = &cam980_cpas100_err_logger_offsets, + .errata_wa_list = &cam980_cpas100_errata_wa_list, + .test_irq_info = { + .sbm_enable_mask = 0x400, + .sbm_clear_mask = 0x1, + }, + .addr_trans_info = &cam980_cpas100_addr_trans_info, +}; + +static struct cam_cpas_camnoc_qchannel cam980_cpas100_qchannel_info_rt = { + .qchannel_ctrl = 0xEC, + .qchannel_status = 0xF0, +}; + +static struct cam_cpas_camnoc_qchannel cam980_cpas100_qchannel_info_nrt = { + .qchannel_ctrl = 0xF4, + .qchannel_status = 0xF8, +}; + +/* + * struct cam_cpas_secure_info: CPAS secure information are used only for + * debug purpose, Register access is restricted in normal builds. + * + */ +static struct cam_cpas_secure_info cam980_cpas100_secure_info = { + .secure_access_ctrl_offset = 0x1C, + .secure_access_ctrl_value = 0xFFFFFFFF, +}; + +static struct cam_cpas_subpart_info cam980_cpas_camera_subpart_info = { + .num_bits = 3, + /* + * Below fuse indexing is based on software fuse definition which is in SMEM and provided + * by XBL team. + */ + .hw_bitmap_mask = { + {CAM_CPAS_ISP_FUSE, BIT(0)}, + {CAM_CPAS_ISP_FUSE, BIT(1)}, + {CAM_CPAS_ISP_FUSE, BIT(2)}, + } +}; + +static struct cam_cpas_info cam980_cpas100_cpas_info = { + .hw_caps_info = { + .num_caps_registers = 2, + .hw_caps_offsets = {0x8, 0xDC}, + }, + .qchannel_info = {&cam980_cpas100_qchannel_info_rt, + &cam980_cpas100_qchannel_info_nrt}, + .num_qchannel = 2, + .hw_caps_secure_info = &cam980_cpas100_secure_info, + .subpart_info = &cam980_cpas_camera_subpart_info, +}; + +#endif /* _CPASTOP_V980_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cpas/include/cam_cpas_api.h b/qcom/opensource/camera-kernel/drivers/cam_cpas/include/cam_cpas_api.h new file mode 100644 index 0000000000..2219d75978 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cpas/include/cam_cpas_api.h @@ -0,0 +1,1176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CPAS_API_H_ +#define _CAM_CPAS_API_H_ + +#include +#include + +#include +#include "cam_soc_util.h" +#include "cam_req_mgr_interface.h" + +#define CAM_HW_IDENTIFIER_LENGTH 128 + +/* Default AXI Bandwidth vote */ +#define CAM_CPAS_DEFAULT_AXI_BW 1024 + +/* Default RT AXI Bandwidth vote */ +#define CAM_CPAS_DEFAULT_RT_AXI_BW 2000000000L + +#define CAM_CPAS_MAX_PATHS_PER_CLIENT 15 +#define CAM_CPAS_API_PATH_DATA_STD_START 512 + +#define CAM_CPAS_VOTE_LEVEL_NONE 0 +#define CAM_CPAS_VOTE_LEVEL_MAX 3 + +/* QoS Selection mask */ +#define CAM_CPAS_QOS_DEFAULT_SETTINGS_MASK 0x1 +#define CAM_CPAS_QOS_CUSTOM_SETTINGS_MASK 0x2 + +#define CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(type) (type + 1) + +/* Reg base type for Smart QoS update */ +#define CAM_CAMNOC_HW_RT_MASK 0x1 +#define CAM_CAMNOC_HW_NRT_MASK 0x2 +#define CAM_CAMNOC_HW_COMBINED_MASK 0x4 +#define CAM_CAMNOC_HW_TYPE_SHIFT 28 + +/** + * Secure camera QoS update id - Enum for identify QOS settings update type + */ +enum secure_camera_qos_update_type { + CAM_QOS_UPDATE_TYPE_STATIC = 0x0, + CAM_QOS_UPDATE_TYPE_SMART = 0x1, + CAM_QOS_UPDATE_TYPE_MAX, +}; + +/** + * enum cam_cpas_regbase_types - Enum for cpas regbase available for clients + * to read/write + */ +enum cam_cpas_regbase_types { + CAM_CPAS_REGBASE_CPASTOP, + CAM_CPAS_REGBASE_MAX +}; + +/** + * enum cam_cpas_vote_type - Enum for cpas vote type + */ +enum cam_cpas_vote_type { + CAM_CPAS_VOTE_TYPE_HLOS, + CAM_CPAS_VOTE_TYPE_DRV, + CAM_CPAS_VOTE_TYPE_MAX, +}; + +/** + * enum cam_cpas_hw_index - Enum for identify HW index + */ +enum cam_cpas_hw_index { + CAM_CPAS_HW_IDX_ANY = 0, + CAM_CPAS_HW_IDX_0 = 1<<0, + CAM_CPAS_HW_IDX_1 = 1<<1, + CAM_CPAS_HW_IDX_2 = 1<<2, + CAM_CPAS_HW_IDX_3 = 1<<3, + CAM_CPAS_HW_IDX_4 = 1<<4, + CAM_CPAS_HW_IDX_5 = 1<<5, + CAM_CPAS_HW_IDX_6 = 1<<6, + CAM_CPAS_HW_IDX_7 = 1<<7, + CAM_CPAS_HW_IDX_MAX = 1<<8 +}; + +/** + * enum cam_cpas_camera_version Enum for Titan Camera Versions + */ +enum cam_cpas_camera_version { + CAM_CPAS_CAMERA_VERSION_NONE = 0, + CAM_CPAS_CAMERA_VERSION_150 = 0x00010500, + CAM_CPAS_CAMERA_VERSION_170 = 0x00010700, + CAM_CPAS_CAMERA_VERSION_175 = 0x00010705, + CAM_CPAS_CAMERA_VERSION_480 = 0x00040800, + CAM_CPAS_CAMERA_VERSION_520 = 0x00050200, + CAM_CPAS_CAMERA_VERSION_540 = 0x00050400, + CAM_CPAS_CAMERA_VERSION_580 = 0x00050800, + CAM_CPAS_CAMERA_VERSION_545 = 0x00050405, + CAM_CPAS_CAMERA_VERSION_570 = 0x00050700, + CAM_CPAS_CAMERA_VERSION_680 = 0x00060800, + CAM_CPAS_CAMERA_VERSION_165 = 0x00010605, + CAM_CPAS_CAMERA_VERSION_780 = 0x00070800, + CAM_CPAS_CAMERA_VERSION_640 = 0x00060400, + CAM_CPAS_CAMERA_VERSION_880 = 0x00080800, + CAM_CPAS_CAMERA_VERSION_975 = 0x00090705, + CAM_CPAS_CAMERA_VERSION_970 = 0x00090700, + CAM_CPAS_CAMERA_VERSION_980 = 0x00090800, + CAM_CPAS_CAMERA_VERSION_1080 = 0x000100800, + CAM_CPAS_CAMERA_VERSION_MAX +}; + +/** + * enum cam_cpas_version Enum for Titan CPAS Versions + */ +enum cam_cpas_version { + CAM_CPAS_VERSION_NONE = 0, + CAM_CPAS_VERSION_100 = 0x10000000, + CAM_CPAS_VERSION_101 = 0x10000001, + CAM_CPAS_VERSION_110 = 0x10010000, + CAM_CPAS_VERSION_120 = 0x10020000, + CAM_CPAS_VERSION_130 = 0x10030000, + CAM_CPAS_VERSION_200 = 0x20000000, + CAM_CPAS_VERSION_MAX +}; + +/** + * enum cam_cpas_camera_version_map_id Enum for camera version map id + * This enum is mapped with cam_cpas_camera_version + */ +enum cam_cpas_camera_version_map_id { + CAM_CPAS_CAMERA_VERSION_ID_150 = 0x0, + CAM_CPAS_CAMERA_VERSION_ID_170 = 0x1, + CAM_CPAS_CAMERA_VERSION_ID_175 = 0x2, + CAM_CPAS_CAMERA_VERSION_ID_480 = 0x3, + CAM_CPAS_CAMERA_VERSION_ID_580 = 0x4, + CAM_CPAS_CAMERA_VERSION_ID_520 = 0x5, + CAM_CPAS_CAMERA_VERSION_ID_540 = 0x6, + CAM_CPAS_CAMERA_VERSION_ID_545 = 0x7, + CAM_CPAS_CAMERA_VERSION_ID_570 = 0x8, + CAM_CPAS_CAMERA_VERSION_ID_680 = 0x9, + CAM_CPAS_CAMERA_VERSION_ID_165 = 0xA, + CAM_CPAS_CAMERA_VERSION_ID_780 = 0xB, + CAM_CPAS_CAMERA_VERSION_ID_640 = 0xC, + CAM_CPAS_CAMERA_VERSION_ID_880 = 0xD, + CAM_CPAS_CAMERA_VERSION_ID_980 = 0xE, + CAM_CPAS_CAMERA_VERSION_ID_1080 = 0xF, + CAM_CPAS_CAMERA_VERSION_ID_975 = 0x10, + CAM_CPAS_CAMERA_VERSION_ID_970 = 0x11, + CAM_CPAS_CAMERA_VERSION_ID_MAX +}; + +/** + * enum cam_cpas_version_map_id Enum for cpas version map id + * This enum is mapped with cam_cpas_version + */ +enum cam_cpas_version_map_id { + CAM_CPAS_VERSION_ID_100 = 0x0, + CAM_CPAS_VERSION_ID_101 = 0x1, + CAM_CPAS_VERSION_ID_110 = 0x2, + CAM_CPAS_VERSION_ID_120 = 0x3, + CAM_CPAS_VERSION_ID_130 = 0x4, + CAM_CPAS_VERSION_ID_200 = 0x5, + CAM_CPAS_VERSION_ID_MAX +}; + +/** + * enum cam_cpas_hw_version - Enum for Titan CPAS HW Versions + */ +enum cam_cpas_hw_version { + CAM_CPAS_TITAN_NONE = 0, + CAM_CPAS_TITAN_150_V100 = 0x150100, + CAM_CPAS_TITAN_165_V100 = 0x165100, + CAM_CPAS_TITAN_170_V100 = 0x170100, + CAM_CPAS_TITAN_170_V110 = 0x170110, + CAM_CPAS_TITAN_170_V120 = 0x170120, + CAM_CPAS_TITAN_170_V200 = 0x170200, + CAM_CPAS_TITAN_175_V100 = 0x175100, + CAM_CPAS_TITAN_175_V101 = 0x175101, + CAM_CPAS_TITAN_175_V120 = 0x175120, + CAM_CPAS_TITAN_175_V130 = 0x175130, + CAM_CPAS_TITAN_480_V100 = 0x480100, + CAM_CPAS_TITAN_580_V100 = 0x580100, + CAM_CPAS_TITAN_540_V100 = 0x540100, + CAM_CPAS_TITAN_520_V100 = 0x520100, + CAM_CPAS_TITAN_545_V100 = 0x545100, + CAM_CPAS_TITAN_570_V100 = 0x570100, + CAM_CPAS_TITAN_570_V200 = 0x570200, + CAM_CPAS_TITAN_680_V100 = 0x680100, + CAM_CPAS_TITAN_680_V110 = 0x680110, + CAM_CPAS_TITAN_780_V100 = 0x780100, + CAM_CPAS_TITAN_640_V200 = 0x640200, + CAM_CPAS_TITAN_880_V100 = 0x880100, + CAM_CPAS_TITAN_975_V100 = 0x975100, + CAM_CPAS_TITAN_970_V110 = 0x970110, + CAM_CPAS_TITAN_980_V100 = 0x980100, + CAM_CPAS_TITAN_1080_V100 = 0x1080100, + CAM_CPAS_TITAN_MAX +}; + +/** + * enum cam_camnoc_slave_error_codes - Enum for camnoc slave error codes + * + */ +enum cam_camnoc_slave_error_codes { + CAM_CAMNOC_TARGET_ERROR, + CAM_CAMNOC_ADDRESS_DECODE_ERROR, + CAM_CAMNOC_UNSUPPORTED_REQUEST_ERROR, + CAM_CAMNOC_DISCONNECTED_TARGET_ERROR, + CAM_CAMNOC_SECURITY_VIOLATION, + CAM_CAMNOC_HIDDEN_SECURITY_VIOLATION, + CAM_CAMNOC_TIME_OUT, + CAM_CAMNOC_UNUSED, +}; + +/** + * enum cam_camnoc_irq_type - Enum for camnoc irq types + * + * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and + * 1 QHB port) has an error logger. The error + * observed at any slave port is logged into + * the error logger register and an IRQ is + * triggered + * @CAM_CAMNOC_IRQ_IFE_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IFE UBWC encoder instance + * @CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR: Triggered if any error detected + * in the IFE UBWC-Stats encoder + * instance + * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IFE0 UBWC encoder instance + * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IFE1 or IFE3 UBWC encoder + * instance + * @CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IFE0 UBWC encoder instance + * @CAM_CAMNOC_IRQ_IFE1_WR_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IFE1 UBWC encoder + * instance + * @CAM_CAMNOC_IRQ_IPE_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the IPE write path encoder + * instance + * @CAM_CAMNOC_IRQ_BPS_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the BPS write path encoder + * instance + * @CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR: Triggered if any error detected + * in the IPE1/BPS read path decoder + * instance + * @CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR : Triggered if any error detected + * in the IPE0 read path decoder + * instance + * @CAM_CAMNOC_IRQ_IPE1_UBWC_DECODE_ERROR : Triggered if any error detected + * in the IPE1 read path decoder + * instance + * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected + * in the IPE/BPS UBWC decoder + * instance + * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected + * in the IPE/BPS UBWC encoder + * instance + * @CAM_CAMNOC_IRQ_OFE_WR_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the OFE write UBWC decoder + * instance + * @CAM_CAMNOC_IRQ_OFE_RD_UBWC_DECODE_ERROR : Triggered if any error detected + * in the OFE read UBWC decoder + * instance + * @CAM_CAMNOC_IRQ_TFE_UBWC_ENCODE_ERROR : Triggered if any error detected + * in the TFE UBWC encoder + * instance + * @CAM_CAMNOC_IRQ_TFE_UBWC_1_ENCODE_ERROR : Triggered if any error detected + * in the TFE UBWC encoder + * instance + * @CAM_CAMNOC_IRQ_AHB_TIMEOUT : Triggered when the QHS_ICP slave + * times out after 4000 AHB cycles + */ +enum cam_camnoc_irq_type { + CAM_CAMNOC_IRQ_SLAVE_ERROR, + CAM_CAMNOC_IRQ_IFE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE_UBWC_STATS_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE_UBWC_STATS_1_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IPE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_BPS_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_IPE1_BPS_UBWC_DECODE_ERROR, + CAM_CAMNOC_IRQ_IPE0_UBWC_DECODE_ERROR, + CAM_CAMNOC_IRQ_IPE1_UBWC_DECODE_ERROR, + CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR, + CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_OFE_WR_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_OFE_RD_UBWC_DECODE_ERROR, + CAM_CAMNOC_IRQ_TFE_UBWC_ENCODE_ERROR, + CAM_CAMNOC_IRQ_TFE_UBWC_1_ENCODE_ERROR, + CAM_CAMNOC_IRQ_AHB_TIMEOUT, +}; + +/** + * enum cam_subparts_index - Enum for camera subparts indices + */ +enum cam_subparts_index { + CAM_IFE_HW_IDX, + CAM_IFE_LITE_HW_IDX, + CAM_SFE_HW_IDX, + CAM_CUSTOM_HW_IDX +}; + +/** + * struct cam_camnoc_irq_slave_err_data : Data for Slave error. + * + * @mainctrl : Err logger mainctrl info + * @errvld : Err logger errvld info + * @errlog0_low : Err logger errlog0_low info + * @errlog0_high : Err logger errlog0_high info + * @errlog1_low : Err logger errlog1_low info + * @errlog1_high : Err logger errlog1_high info + * @errlog2_low : Err logger errlog2_low info + * @errlog2_high : Err logger errlog2_high info + * @errlog3_low : Err logger errlog3_low info + * @errlog3_high : Err logger errlog3_high info + * + */ +struct cam_camnoc_irq_slave_err_data { + union { + struct { + uint32_t stall_en : 1; /* bit 0 */ + uint32_t fault_en : 1; /* bit 1 */ + uint32_t rsv : 30; /* bits 2-31 */ + }; + uint32_t value; + } mainctrl; + union { + struct { + uint32_t err_vld : 1; /* bit 0 */ + uint32_t rsv : 31; /* bits 1-31 */ + }; + uint32_t value; + } errvld; + union { + struct { + uint32_t loginfo_vld : 1; /* bit 0 */ + uint32_t word_error : 1; /* bit 1 */ + uint32_t non_secure : 1; /* bit 2 */ + uint32_t device : 1; /* bit 3 */ + uint32_t opc : 3; /* bits 4 - 6 */ + uint32_t rsv0 : 1; /* bit 7 */ + uint32_t err_code : 3; /* bits 8 - 10 */ + uint32_t sizef : 3; /* bits 11 - 13 */ + uint32_t rsv1 : 2; /* bits 14 - 15 */ + uint32_t addr_space : 6; /* bits 16 - 21 */ + uint32_t rsv2 : 10; /* bits 22 - 31 */ + }; + uint32_t value; + } errlog0_low; + union { + struct { + uint32_t len1 : 10; /* bits 0 - 9 */ + uint32_t rsv : 22; /* bits 10 - 31 */ + }; + uint32_t value; + } errlog0_high; + union { + struct { + uint32_t path : 16; /* bits 0 - 15 */ + uint32_t rsv : 16; /* bits 16 - 31 */ + }; + uint32_t value; + } errlog1_low; + union { + struct { + uint32_t extid : 18; /* bits 0 - 17 */ + uint32_t rsv : 14; /* bits 18 - 31 */ + }; + uint32_t value; + } errlog1_high; + union { + struct { + uint32_t errlog2_lsb : 32; /* bits 0 - 31 */ + }; + uint32_t value; + } errlog2_low; + union { + struct { + uint32_t errlog2_msb : 16; /* bits 0 - 16 */ + uint32_t rsv : 16; /* bits 16 - 31 */ + }; + uint32_t value; + } errlog2_high; + union { + struct { + uint32_t errlog3_lsb : 32; /* bits 0 - 31 */ + }; + uint32_t value; + } errlog3_low; + union { + struct { + uint32_t errlog3_msb : 32; /* bits 0 - 31 */ + }; + uint32_t value; + } errlog3_high; +}; + +/** + * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error. + * + * @encerr_status : Encode error status + * + */ +struct cam_camnoc_irq_ubwc_enc_data { + union { + struct { + uint32_t encerrstatus : 3; /* bits 0 - 2 */ + uint32_t rsv : 29; /* bits 3 - 31 */ + }; + uint32_t value; + } encerr_status; +}; + +/** + * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error. + * + * @decerr_status : Decoder error status + * @thr_err : Set to 1 if + * At least one of the bflc_len fields in the bit steam exceeds + * its threshold value. This error is possible only for + * RGBA1010102, TP10, and RGB565 formats + * @fcl_err : Set to 1 if + * Fast clear with a legal non-RGB format + * @len_md_err : Set to 1 if + * The calculated burst length does not match burst length + * specified by the metadata value + * @format_err : Set to 1 if + * Illegal format + * 1. bad format :2,3,6 + * 2. For 32B MAL, metadata=6 + * 3. For 32B MAL RGB565, Metadata != 0,1,7 + * 4. For 64B MAL RGB565, metadata[3:1] == 1,2 + * + */ +struct cam_camnoc_irq_ubwc_dec_data { + union { + struct { + uint32_t thr_err : 1; /* bit 0 */ + uint32_t fcl_err : 1; /* bit 1 */ + uint32_t len_md_err : 1; /* bit 2 */ + uint32_t format_err : 1; /* bit 3 */ + uint32_t rsv : 28; /* bits 4 - 31 */ + }; + uint32_t value; + } decerr_status; +}; + +struct cam_camnoc_irq_ahb_timeout_data { + uint32_t data; +}; + +/** + * struct cam_cpas_irq_data : CAMNOC IRQ data + * + * @irq_type : To identify the type of IRQ + * @u : Union of irq err data information + * @slave_err : Data for Slave error. + * Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR + * @enc_err : Data for UBWC Encode error. + * Valid if type is one of below: + * CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR + * CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR + * CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR + * @dec_err : Data for UBWC Decode error. + * Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR + * @ahb_err : Data for Slave error. + * Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT + * + */ +struct cam_cpas_irq_data { + enum cam_camnoc_irq_type irq_type; + union { + struct cam_camnoc_irq_slave_err_data slave_err; + struct cam_camnoc_irq_ubwc_enc_data enc_err; + struct cam_camnoc_irq_ubwc_dec_data dec_err; + struct cam_camnoc_irq_ahb_timeout_data ahb_err; + } u; +}; + +/* + * CPAS client callback + * + * @client_handle : CPAS client handle + * @userdata : User data given at the time of register + * @irq_data : Event data + */ +typedef bool (*cam_cpas_client_cb_func)( + uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data); + +/** + * struct cam_cpas_register_params : Register params for cpas client + * + * @identifier : Input identifier string which is the device label + * from dt like vfe, ife, jpeg etc + * @cell_index : Input integer identifier pointing to the cell index + * from dt of the device. This can be used to form a + * unique string with @identifier like vfe0, ife1, + * jpeg0, etc + * @dev : device handle + * @userdata : Input private data which will be passed as + * an argument while callback. + * @cam_cpas_callback : Input callback pointer for triggering the + * callbacks from CPAS driver. + * @client_handle : Output Unique handle generated for this register + * + */ +struct cam_cpas_register_params { + char identifier[CAM_HW_IDENTIFIER_LENGTH]; + uint32_t cell_index; + struct device *dev; + void *userdata; + cam_cpas_client_cb_func cam_cpas_client_cb; + uint32_t client_handle; +}; + +/** + * enum cam_vote_type - Enum for voting type + * + * @CAM_VOTE_ABSOLUTE : Absolute vote + * @CAM_VOTE_DYNAMIC : Dynamic vote + */ +enum cam_vote_type { + CAM_VOTE_ABSOLUTE, + CAM_VOTE_DYNAMIC, +}; + +/** + * struct cam_ahb_vote : AHB vote + * + * @type : AHB voting type. + * CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set + * CAM_VOTE_DYNAMIC : vote calculated dynamically using 'freq' + * and 'dev' handle is set + * @level : AHB vote level + * @freq : AHB vote dynamic frequency + * + */ +struct cam_ahb_vote { + enum cam_vote_type type; + union { + enum cam_vote_level level; + unsigned long freq; + } vote; +}; + +/** + * struct cam_cpas_axi_per_path_bw_vote - Internal per path bandwidth vote information + * + * @usage_data: client usage data (left/right/rdi) + * @transac_type: Transaction type on the path (read/write) + * @path_data_type: Path for which vote is given (video, display, rdi) + * @vote_level: Vote level for this path + * @camnoc_bw: CAMNOC bw for this path + * @mnoc_ab_bw: MNOC AB bw for this path + * @mnoc_ib_bw: MNOC IB bw for this path + */ +struct cam_cpas_axi_per_path_bw_vote { + uint32_t usage_data; + uint32_t transac_type; + uint32_t path_data_type; + uint32_t vote_level; + uint64_t camnoc_bw; + uint64_t mnoc_ab_bw; + uint64_t mnoc_ib_bw; +}; + +/** + * struct cam_axi_vote : AXI vote + * + * @num_paths: Number of paths on which BW vote is sent to CPAS + * @axi_path: Per path BW vote info + * + */ +struct cam_axi_vote { + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_CPAS_MAX_PATHS_PER_CLIENT]; +}; + +/** + * struct cam_cpas_addr_trans_data : Register value to be programmed for address translator + * + * @enable: Indicate whether to enable address translator + * @val_offset0: Address delta for 0 to base1 + * @val_base1: Address from 0 to base1 is shifted by offset0 + * @val_offset1: Address delta for base1 to base2 + * @val_base2: Address from base1 to base2 is shifted by offset1 + * @val_offset2: Address delta for base2 to base3 + * @val_base3: Address from base2 to base3 is shifted by offset2 + * @val_offset3: Address delta for the rest memory region + * + */ +struct cam_cpas_addr_trans_data { + bool enable; + uint32_t val_offset0; + uint32_t val_base1; + uint32_t val_offset1; + uint32_t val_base2; + uint32_t val_offset2; + uint32_t val_base3; + uint32_t val_offset3; +}; + +/** + * enum cam_device_type - Enum for camera HW device types + */ +enum cam_device_type { + CAM_CPAS_HW_TYPE_IFE, + CAM_CPAS_HW_TYPE_TFE, + CAM_CPAS_HW_TYPE_IFE_LITE, + CAM_CPAS_HW_TYPE_IPE, + CAM_CPAS_HW_TYPE_BPS, + CAM_CPAS_HW_TYPE_OFE, + CAM_CPAS_HW_TYPE_MAX +}; + +#define CAM_CPAS_IS_VALID_CAM_DEV_TYPE(type) \ +({ \ + ((type) >= CAM_CPAS_HW_TYPE_IFE) && ((type) < CAM_CPAS_HW_TYPE_MAX); \ +}) +#define CAM_MAX_OUTPUT_PORTS_PER_DEVICE 65 + +enum cam_ipe_out_port_type { + CAM_CPAS_IPE_OUTPUT_IMAGE_DISPLAY, + CAM_CPAS_IPE_OUTPUT_IMAGE_VIDEO, + CAM_CPAS_IPE_OUTPUT_IMAGE_FULL_REF, + CAM_CPAS_IPE_OUTPUT_IMAGE_DS4_REF, + CAM_CPAS_IPE_OUTPUT_IMAGE_DS16_REF, + CAM_CPAS_IPE_OUTPUT_IMAGE_DS64_REF, + CAM_CPAS_IPE_OUTPUT_IMAGE_FD, + CAM_CPAS_IPE_OUTPUT_IMAGE_STATS_IHIST, + CAM_CPAS_IPE_OUTPUT_MAX +}; + +/** + * struct cam_cpas_cp_mapping_config_info : CP CTRL config info + * + * @device_type: HW device type (IFE, IPE, OFE etc.) + * @hw_instance_id_mask: Mask of the Indices of the devices to configure + * @protect: Whether to configure the ports as secure or non secure + * @phy_id: Index of the PHY that is used for this stream + * Applicable for only real time devices + * @num_ports: Number of ports to be configured + * @port_ids: IDs of the ports to be configured + * + */ +struct cam_cpas_cp_mapping_config_info { + enum cam_device_type device_type; + uint32_t hw_instance_id_mask; + bool protect; + uint32_t phy_id; + uint32_t num_ports; + uint32_t port_ids[CAM_MAX_OUTPUT_PORTS_PER_DEVICE]; +}; + +/** + * cam_cpas_prepare_subpart_info() + * + * @brief: API to update the number of ifes, ife_lites, sfes and custom + * in the struct cam_cpas_private_soc. + * + * @idx : Camera subpart index + * @num_subpart_available : Number of available subparts + * @num_subpart_functional: Number of functional subparts + * + * @returns 0 on success & -EINVAL when @subpart_type is invalid. + * + */ +int cam_cpas_prepare_subpart_info( + enum cam_subparts_index idx, uint32_t num_subpart_available, + uint32_t num_subpart_functional); + +/** + * cam_cpas_register_client() + * + * @brief: API to register cpas client + * + * @register_params: Input params to register as a client to CPAS + * + * @return 0 on success. + * + */ +int cam_cpas_register_client( + struct cam_cpas_register_params *register_params); + +/** + * cam_cpas_unregister_client() + * + * @brief: API to unregister cpas client + * + * @client_handle: Client handle to be unregistered + * + * @return 0 on success. + * + */ +int cam_cpas_unregister_client(uint32_t client_handle); + +/** + * cam_cpas_start() + * + * @brief: API to start cpas client hw. Clients have to vote for minimal + * bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote + * to scale bandwidth after start. + * + * @client_handle: client cpas handle + * @ahb_vote : Pointer to ahb vote info + * @axi_vote : Pointer to axi bandwidth vote info + * + * If AXI vote is not applicable to a particular client, use the value exposed + * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request. + * + * @return 0 on success. + * + */ +int cam_cpas_start( + uint32_t client_handle, + struct cam_ahb_vote *ahb_vote, + struct cam_axi_vote *axi_vote); + +/** + * cam_cpas_stop() + * + * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes + * would be removed for this client on this call. Clients should not + * use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote + * to remove their bandwidth vote. + * + * @client_handle: client cpas handle + * + * @return 0 on success. + * + */ +int cam_cpas_stop(uint32_t client_handle); + +/** + * cam_cpas_update_ahb_vote() + * + * @brief: API to update AHB vote requirement. Use this function only + * between cam_cpas_start and cam_cpas_stop in case clients wants + * to scale to different vote level. Do not use this function to de-vote, + * removing client's vote is implicit on cam_cpas_stop + * + * @client_handle : Client cpas handle + * @ahb_vote : Pointer to ahb vote info + * + * @return 0 on success. + * + */ +int cam_cpas_update_ahb_vote( + uint32_t client_handle, + struct cam_ahb_vote *ahb_vote); + +/** + * cam_cpas_update_axi_vote() + * + * @brief: API to update AXI vote requirement. Use this function only + * between cam_cpas_start and cam_cpas_stop in case clients wants + * to scale to different vote level. Do not use this function to de-vote, + * removing client's vote is implicit on cam_cpas_stop + * + * @client_handle : Client cpas handle + * @axi_vote : Pointer to axi bandwidth vote info + * + * @return 0 on success. + * + */ +int cam_cpas_update_axi_vote( + uint32_t client_handle, + struct cam_axi_vote *axi_vote); + +/** + * cam_cpas_update_axi_floor_lvl() + * + * @brief: API to update AXI vote requirement from all clients. + * + * + * @client_handle : Client cpas handle + * @axi_floor_lvl : Core clock level + * + * @return 0 on success. + * + */ +int cam_cpas_update_axi_floor_lvl(uint32_t client_handle, int32_t axi_floor_lvl); + +/** + * cam_cpas_reg_write() + * + * @brief: API to write a register value in CPAS register space + * + * @client_handle : Client cpas handle + * @reg_base : Register base identifier + * @offset : Offset from the register base address + * @mb : Whether to do reg write with memory barrier + * @value : Value to be written in register + * + * @return 0 on success. + * + */ +int cam_cpas_reg_write( + uint32_t client_handle, + enum cam_cpas_regbase_types reg_base, + uint32_t offset, + bool mb, + uint32_t value); + +/** + * cam_cpas_set_addr_trans() + * + * @brief: API to program ICP address translator registers + * + * @client_handle : Client cpas handle + * @addr_trans_data : Register values to be programmed for address translator + * + * @return 0 on success. + * + */ +int cam_cpas_set_addr_trans( + uint32_t client_handle, + struct cam_cpas_addr_trans_data *addr_trans_data); + +/** + * cam_cpas_reg_read() + * + * @brief: API to read a register value from CPAS register space + * + * @client_handle : Client cpas handle + * @reg_base : Register base identifier + * @offset : Offset from the register base address + * @mb : Whether to do reg read with memory barrier + * @value : Value to be red from register + * + * @return 0 on success. + * + */ +int cam_cpas_reg_read( + uint32_t client_handle, + enum cam_cpas_regbase_types reg_base, + uint32_t offset, + bool mb, + uint32_t *value); + +/** + * cam_cpas_get_hw_info() + * + * @brief: API to get camera hw information + * + * @camera_family : Camera family type. One of + * CAM_FAMILY_CAMERA_SS + * CAM_FAMILY_CPAS_SS + * @camera_version : Camera platform version + * @cpas_version : Camera cpas version + * @cam_caps : Camera capability array + * @num_cap_mask : number of capability masks + * @cam_fuse_info : Camera fuse info + * @domain_id_info : Domain id info + * + * @return 0 on success. + * + */ +int cam_cpas_get_hw_info( + uint32_t *camera_family, + struct cam_hw_version *camera_version, + struct cam_hw_version *cpas_version, + uint32_t **cam_caps, + uint32_t *num_cap_mask, + struct cam_cpas_fuse_info *cam_fuse_info, + struct cam_cpas_domain_id_caps *domain_id_info); + +/** + * cam_cpas_get_cpas_hw_version() + * + * @brief: API to get camera cpas hw version + * + * @hw_version : Camera cpas hw version + * + * @return 0 on success. + * + */ +int cam_cpas_get_cpas_hw_version(uint32_t *hw_version); + +/** + * cam_cpas_is_feature_supported() + * + * @brief: API to get camera features + * + * @flag : Camera hw features to check + * + * @hw_map : To indicate which HWs are supported + * + * @fule_val : Return fule value in case of value type feature + * + * @return 1 if feature is supported + * + */ +bool cam_cpas_is_feature_supported(uint32_t flag, uint32_t hw_map, + uint32_t *fuse_val); + +/** + * cam_cpas_axi_util_path_type_to_string() + * + * @brief: API to get string for given path type + * + * @path_data_type : Path type + * + * @return string. + * + */ +const char *cam_cpas_axi_util_path_type_to_string( + uint32_t path_data_type); + +/** + * cam_cpas_axi_util_trans_type_to_string() + * + * @brief: API to get string for given transaction type + * + * @path_data_type : Transaction type + * + * @return string. + * + */ +const char *cam_cpas_axi_util_trans_type_to_string( + uint32_t path_data_type); + +/** + * cam_cpas_axi_util_drv_vote_lvl_to_string() + * + * @brief: API to get string for given DRV vote level + * + * @vote_lvl : DRV vote level + * + * @return string. + * + */ +const char *cam_cpas_axi_util_drv_vote_lvl_to_string( + uint32_t vote_lvl); + +/** + * cam_cpas_util_vote_type_to_string() + * + * @brief: API to get string for given vote type + * + * @vote_type : DRV vote level + * + * @return string. + * + */ +const char *cam_cpas_util_vote_type_to_string(enum cam_cpas_vote_type vote_type); + +/** + * cam_cpas_log_votes() + * + * @brief: API to print the all bw votes of axi client. It also print the + * applied camnoc axi clock vote value and ahb vote value + * + * @ddr_only: Print only DDR info + * + * @return 0 on success. + * + */ +void cam_cpas_log_votes(bool ddr_only); + +/** + * cam_cpas_select_qos_settings() + * + * @brief: API to select specific qos settings based on usecase requirements + * + * @return 0 on success. + * + */ +int cam_cpas_select_qos_settings(uint32_t selection_mask); + +/** + * cam_cpas_notify_event() + * + * @brief: API that clients can notify about their events. CPAS save the event + * and any other useful information related to this event. This will + * be printed while cpas state dump - cam_cpas_log_votes. + * One such example is IFE notifiying SOF or EPOCH to cpas and cpas + * saving axi clock information (camnoc_axi, mnoc_hf) at that point + * and printing latest history on IFE overflow. + * + * @identifier_string: Identifier string passed by caller + * @identifier_value: Identifier value passed by caller + * + * @return 0 on success. + * + */ +int cam_cpas_notify_event(const char *identifier_string, + int32_t identifier_value); + +/** + * cam_cpas_get_scid() + * + * @brief: API to obtain slice id for the given type + * + * @type: Cache type + * + * @return slice id, -1 for invalid id. + * + */ +int cam_cpas_get_scid(uint32_t type); + +/** + * cam_cpas_activate_llcc() + * + * @brief: API to activate system cache + * + * @type: Cache type + * + * @return 0 for success. + * + */ +int cam_cpas_activate_llcc(uint32_t type); + +/** + * cam_cpas_deactivate_llcc() + * + * @brief: API to de-activate system cache + * + * @type: Cache type + * + * @return 0 for success. + * + */ +int cam_cpas_deactivate_llcc(uint32_t type); + +/** + * cam_cpas_configure_staling_llcc() + * + * @brief: Configure cache staling mode by setting the + * staling_mode and corresponding params + * + @type: Cache type, For example cache types are + * CAM_LLCC_SMALL_1/CAM_LLCC_SMALL_2/CAM_LLCC_LARGE_1/ .... + * CAM_LLCC_IPE_SRT_IP/CAM_LLCC_IPE_RT_RF + * @mode_param: camera llcc's stalling mode params, possible allowed values + * CAM_LLCC_STALING_MODE_CAPACITY/CAM_LLCC_STALING_MODE_NOTIFY + * @operation_type: cache operation type, possible allowed values are + * CAM_LLCC_NOTIFY_STALING_EVICT/CAM_LLCC_NOTIFY_STALING_FORGET + * @stalling_distance: llcc sys cache stalling distance + * + * @return 0 for success. + * + */ +int cam_cpas_configure_staling_llcc( + uint32_t type, + uint32_t mode_param, + uint32_t operation_type, + uint32_t staling_distance); + +/** + * cam_cpas_notif_increment_staling_counter() + * + * @brief: This will increment the stalling counter + * depends on what operation it does. + * The operation mode what we have setup in other function. + * + @type: Cache type, For example cache types are + * CAM_LLCC_SMALL_1/CAM_LLCC_SMALL_2/CAM_LLCC_LARGE_1/ .... + * CAM_LLCC_IPE_SRT_IP/CAM_LLCC_IPE_RT_RF + * + * @return 0 for success. + * + */ +int cam_cpas_notif_increment_staling_counter( + uint32_t type); + +/** + * cam_cpas_dump_camnoc_buff_fill_info() + * + * @brief: API to dump camnoc buffer fill level info + * + * @client_handle : Client cpas handle + * + * @return 0 on success + * + */ +int cam_cpas_dump_camnoc_buff_fill_info(uint32_t client_handle); + +/** + * cam_cpas_csid_input_core_info_update() + * + * @brief: API to communicate csid input core info to cpas + * + * @csid_idx: csid hw index connected to particular sfe + * @sfe_idx: sfe idx to be connected to particular DRV path + * @set_port: Indicates whether to set or reset DRV port info in dynamic client + * + * @return 0 on success + * + */ +int cam_cpas_csid_input_core_info_update(int csid_idx, int sfe_idx, bool set_port); + +/** + * cam_cpas_csid_process_resume() + * + * @brief: API to process csid resume in cpas + * @csid_idx: CSID idx to notify resume for + * + * @return 0 on success + * + */ +int cam_cpas_csid_process_resume(uint32_t csid_idx); + +/** + * cam_cpas_query_drv_enable() + * + * @brief: API to indicate DRV enabled on hw or not + * @is_ddr_drv_enabled: If DDR DRV enabled + * @is_clk_drv_enabled: If Clock Cesta DRV enabled + * + * @return 0 on success + * + */ +int cam_cpas_query_drv_enable(bool *is_ddr_drv_enabled, bool *is_clk_drv_enabled); + +/** + * cam_cpas_query_domain_id_security_support() + * @brief: API to determine if target supports domain id feature + * This information is determined by cpas during probe + * + * @return true if there's support, false otherwise + */ +bool cam_cpas_query_domain_id_security_support(void); + +/** + * cam_cpas_enable_clks_for_domain_id() + * + * @brief: API to enable/disable clocks for domain id support. + * All CSIDs including those not in use for a ctxt + * needs to be programmed in a secure session. + * @enable: True to turn on, false otherwise. + * @return 0 on success + */ +int cam_cpas_enable_clks_for_domain_id(bool enable); + +/** + * cam_cpas_is_notif_staling_supported() + * + * @brief: API to check stalling feature is supported or not + * + * @return rue if supported + */ +bool cam_cpas_is_notif_staling_supported(void); + +/** + * cam_cpas_dump_state_monitor_info() + * + * @brief: API to dump the state monitor info of cpas. + * @info: Dump information. + * @return 0 on success + */ +int cam_cpas_dump_state_monitor_info(struct cam_req_mgr_dump_info *info); + +/** + * cam_cpas_is_fw_based_sys_caching_supported() + * + * @brief: API to return true if feature is supported + * @return true or false + */ +bool cam_cpas_is_fw_based_sys_caching_supported(void); + +/** + * cam_cpas_config_cp_mapping_ctrl() + * + * @config: CP Mapping CTRL config info + * @return 0 on success + */ +int cam_cpas_config_cp_mapping_ctrl( + struct cam_cpas_cp_mapping_config_info *config); + +#endif /* _CAM_CPAS_API_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.c new file mode 100644 index 0000000000..9bd6b9ddd9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "cam_trace.h" +#include "cam_mem_mgr.h" +#include "cam_cre_context.h" +#include "cam_context_utils.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_context.h" + +static const char cre_dev_name[] = "cam-cre"; + +static int __cam_cre_start_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_start_dev_to_hw(ctx, cmd); + if (!rc) { + ctx->state = CAM_CTX_READY; + trace_cam_context_state("CRE", ctx); + } + + return rc; +} + + +static int __cam_cre_ctx_flush_dev_in_ready(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + flush_args.cmd = cmd; + flush_args.flush_active_req = false; + + rc = cam_context_flush_dev_to_hw(ctx, cmd, &flush_args); + if (rc) + CAM_ERR(CAM_CRE, "Failed to flush device"); + + return rc; +} + +static int __cam_cre_ctx_dump_dev_in_ready(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc; + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_CRE, "Failed to dump device"); + + return rc; +} + +static int __cam_cre_ctx_config_dev_in_ready(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + size_t len; + uintptr_t packet_addr; + + rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle, + &packet_addr, &len); + if (rc) { + CAM_ERR(CAM_CRE, "[%s][%d] Can not get packet address", + ctx->dev_name, ctx->ctx_id); + rc = -EINVAL; + return rc; + } + + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + + if (rc) + CAM_ERR(CAM_CRE, "Failed to prepare device"); + + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); + return rc; +} + +static int __cam_cre_ctx_stop_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) + CAM_ERR(CAM_CRE, "Failed to stop device"); + + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("CRE", ctx); + return rc; +} + +static int __cam_cre_ctx_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_CRE, "Unable to release device %d", rc); + + ctx->state = CAM_CTX_AVAILABLE; + + return rc; +} + +static int __cam_cre_ctx_release_dev_in_ready(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = __cam_cre_ctx_stop_dev_in_ready(ctx, NULL); + if (rc) + CAM_ERR(CAM_CRE, "Failed to stop device"); + + rc = __cam_cre_ctx_release_dev_in_acquired(ctx, cmd); + if (rc) + CAM_ERR(CAM_CRE, "Failed to release device"); + + return rc; +} + +static int __cam_cre_ctx_handle_buf_done_in_ready(void *ctx, + uint32_t evt_id, void *done) +{ + return cam_context_buf_done_from_hw(ctx, done, evt_id); +} + +static int cam_cre_context_dump_active_request(void *data, void *args) +{ + + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args; + int rc = 0; + + if (!ctx || !pf_args) { + CAM_ERR(CAM_CRE, "Invalid ctx %pK or pf arguments %pK", + ctx, pf_args); + return -EINVAL; + } + + CAM_INFO(CAM_CRE, "iommu fault for cre ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + + CAM_INFO(CAM_CRE, "Active req_id: %llu ctx_id: %u", + req->request_id, ctx->ctx_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data); + if (rc) + CAM_ERR(CAM_CRE, "Failed to dump pf info ctx_id: %u state: %d", + ctx->ctx_id, ctx->state); + } + + if (pf_args->pf_context_info.ctx_found) { + /* Send PF notification to UMD if PF found on current CTX */ + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_CRE, + "Failed to notify PF event to userspace rc: %d", rc); + } + + return rc; +} + +static int __cam_cre_ctx_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc; + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (rc) + CAM_ERR(CAM_CRE, "Unable to Acquire device %d", rc); + else + ctx->state = CAM_CTX_ACQUIRED; + + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_cre_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = { }, + .crm_ops = { }, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_cre_ctx_acquire_dev_in_available, + }, + .crm_ops = { }, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .release_dev = __cam_cre_ctx_release_dev_in_acquired, + .start_dev = __cam_cre_start_dev_in_acquired, + .config_dev = __cam_cre_ctx_config_dev_in_ready, + .flush_dev = __cam_cre_ctx_flush_dev_in_ready, + .dump_dev = __cam_cre_ctx_dump_dev_in_ready, + }, + .crm_ops = { }, + .irq_ops = __cam_cre_ctx_handle_buf_done_in_ready, + .pagefault_ops = cam_cre_context_dump_active_request, + }, + /* Ready */ + { + .ioctl_ops = { + .stop_dev = __cam_cre_ctx_stop_dev_in_ready, + .release_dev = __cam_cre_ctx_release_dev_in_ready, + .config_dev = __cam_cre_ctx_config_dev_in_ready, + .flush_dev = __cam_cre_ctx_flush_dev_in_ready, + .dump_dev = __cam_cre_ctx_dump_dev_in_ready, + }, + .crm_ops = {}, + .irq_ops = __cam_cre_ctx_handle_buf_done_in_ready, + .pagefault_ops = cam_cre_context_dump_active_request, + }, + /* Activated */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + .pagefault_ops = cam_cre_context_dump_active_request, + }, +}; + +int cam_cre_context_init(struct cam_cre_context *ctx, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl) +{ + int rc; + int i; + + if (!ctx || !ctx->base) { + CAM_ERR(CAM_CRE, "Invalid Context"); + rc = -EFAULT; + goto err; + } + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) + ctx->req_base[i].req_priv = ctx; + + rc = cam_context_init(ctx->base, cre_dev_name, CAM_CRE, ctx_id, + NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_CRE, "Camera Context Base init failed"); + goto err; + } + + ctx->base->state_machine = cam_cre_ctx_state_machine; + ctx->base->ctx_priv = ctx; + + ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX; + ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX; + ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX; +err: + return rc; +} + +int cam_cre_context_deinit(struct cam_cre_context *ctx) +{ + if (!ctx || !ctx->base) { + CAM_ERR(CAM_CRE, "Invalid params: %pK", ctx); + return -EINVAL; + } + + cam_context_deinit(ctx->base); + + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.h new file mode 100644 index 0000000000..c3e5e3f646 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_context.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CRE_CONTEXT_H_ +#define _CAM_CRE_CONTEXT_H_ + +#include + +#include "cam_context.h" +#include "cam_cre_hw_mgr_intf.h" + +#define CAM_CRE_HW_EVENT_MAX 20 + +/** + * struct cam_cre_context - CRE context + * @base: Base cre cam context object + * @req_base: Common request structure + */ +struct cam_cre_context { + struct cam_context *base; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; +}; + +/* cam cre context irq handling function type */ +typedef int (*cam_cre_hw_event_cb_func)( + struct cam_cre_context *ctx_cre, + void *evt_data); + +/** + * struct cam_cre_ctx_irq_ops - Function table for handling IRQ callbacks + * + * @irq_ops: Array of handle function pointers. + * + */ +struct cam_cre_ctx_irq_ops { + cam_cre_hw_event_cb_func irq_ops[CAM_CRE_HW_EVENT_MAX]; +}; + +/** + * cam_cre_context_init() + * + * @brief: Initialization function for the CRE context + * + * @ctx: CRE context obj to be initialized + * @hw_intf: CRE hw manager interface + * @ctx_id: ID for this context + * @img_iommu_hdl: IOMMU HDL for image buffers + * + */ +int cam_cre_context_init(struct cam_cre_context *ctx, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, + int img_iommu_hdl); + +/** + * cam_cre_context_deinit() + * + * @brief: Deinitialize function for the CRE context + * + * @ctx: CRE context obj to be deinitialized + * + */ +int cam_cre_context_deinit(struct cam_cre_context *ctx); + +#endif /* __CAM_CRE_CONTEXT_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.c new file mode 100644 index 0000000000..c6f72f8b4c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_node.h" +#include "cam_hw_mgr_intf.h" +#include "cam_cre_hw_mgr.h" +#include "cam_cre_dev.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_context_utils.h" +#include "cam_mem_mgr_api.h" + +#define CAM_CRE_DEV_NAME "cam-cre" + +struct cam_cre_subdev { + struct cam_subdev sd; + struct cam_node *node; + struct cam_context ctx[CRE_CTX_MAX]; + struct cam_cre_context ctx_cre[CRE_CTX_MAX]; + struct mutex cre_lock; + int32_t open_cnt; + int32_t reserved; +}; +static struct cam_cre_subdev g_cre_dev; + +static void cam_cre_dev_iommu_fault_handler( + struct cam_smmu_pf_info *pf_smmu_info) +{ + int i, rc; + struct cam_node *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + + if (!pf_smmu_info || !pf_smmu_info->token) { + CAM_ERR(CAM_CRE, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_smmu_info->token; + + pf_args.pf_smmu_info = pf_smmu_info; + + for (i = 0; i < node->ctx_size; i++) { + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_context_info.ctx_found) + /* found ctx and packet of the faulted address */ + break; + } + + if (i == node->ctx_size) { + /* Faulted ctx not found. Report PF to userspace */ + rc = cam_context_send_pf_evt(NULL, &pf_args); + if (rc) + CAM_ERR(CAM_CRE, + "Failed to notify PF event to userspace rc: %d", rc); + } +} + +static int cam_cre_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + + mutex_lock(&g_cre_dev.cre_lock); + g_cre_dev.open_cnt++; + mutex_unlock(&g_cre_dev.cre_lock); + + return 0; +} + +static int cam_cre_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_node *node = v4l2_get_subdevdata(sd); + + + mutex_lock(&g_cre_dev.cre_lock); + if (g_cre_dev.open_cnt <= 0) { + CAM_DBG(CAM_CRE, "CRE subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_cre_dev.open_cnt--; + + if (!node) { + CAM_ERR(CAM_CRE, "Node ptr is NULL"); + rc = -EINVAL; + goto end; + } + + if (g_cre_dev.open_cnt == 0) + cam_node_shutdown(node); + +end: + mutex_unlock(&g_cre_dev.cre_lock); + return rc; +} + +static int cam_cre_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_ICP, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_cre_subdev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_cre_subdev_internal_ops = { + .close = cam_cre_subdev_close, + .open = cam_cre_subdev_open, +}; + +static int cam_cre_subdev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + int rc = 0; + struct cam_hw_mgr_intf *hw_mgr_intf; + struct cam_node *node; + int iommu_hdl = -1; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + g_cre_dev.sd.pdev = pdev; + g_cre_dev.sd.internal_ops = &cam_cre_subdev_internal_ops; + rc = cam_subdev_probe(&g_cre_dev.sd, pdev, CAM_CRE_DEV_NAME, + CAM_CRE_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_CRE, "CRE cam_subdev_probe failed %d", rc); + goto err; + } + node = (struct cam_node *)g_cre_dev.sd.token; + + hw_mgr_intf = CAM_MEM_ZALLOC(sizeof(*hw_mgr_intf), GFP_KERNEL); + if (!hw_mgr_intf) { + CAM_ERR(CAM_CRE, "Error allocating memory"); + rc = -ENOMEM; + goto hw_alloc_fail; + } + + rc = cam_cre_hw_mgr_init(pdev->dev.of_node, hw_mgr_intf, + &iommu_hdl); + if (rc) { + CAM_ERR(CAM_CRE, "Can not initialize CRE HWmanager %d", rc); + goto hw_init_fail; + } + + memset(g_cre_dev.ctx_cre, 0, sizeof(g_cre_dev.ctx_cre)); + for (i = 0; i < CAM_CRE_CTX_MAX; i++) { + g_cre_dev.ctx_cre[i].base = &g_cre_dev.ctx[i]; + rc = cam_cre_context_init(&g_cre_dev.ctx_cre[i], + hw_mgr_intf, i, iommu_hdl); + if (rc) { + CAM_ERR(CAM_CRE, "CRE context init failed %d %d", + i, rc); + goto ctx_init_fail; + } + } + + rc = cam_node_init(node, hw_mgr_intf, g_cre_dev.ctx, + CAM_CRE_CTX_MAX, CAM_CRE_DEV_NAME); + if (rc) { + CAM_ERR(CAM_CRE, "CRE node init failed %d", rc); + goto ctx_init_fail; + } + + node->sd_handler = cam_cre_subdev_close_internal; + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_cre_dev_iommu_fault_handler, node); + + g_cre_dev.open_cnt = 0; + mutex_init(&g_cre_dev.cre_lock); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + CAM_DBG(CAM_CRE, "Component bound successfully"); + + return rc; + +ctx_init_fail: + for (--i; i >= 0; i--) + if (cam_cre_context_deinit(&g_cre_dev.ctx_cre[i])) + CAM_ERR(CAM_CRE, "deinit fail %d %d", i, rc); +hw_init_fail: + CAM_MEM_FREE(hw_mgr_intf); +hw_alloc_fail: + if (cam_subdev_remove(&g_cre_dev.sd)) + CAM_ERR(CAM_CRE, "remove fail %d", rc); +err: + return rc; +} + +static void cam_cre_subdev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + + for (i = 0; i < CRE_CTX_MAX; i++) + cam_cre_context_deinit(&g_cre_dev.ctx_cre[i]); + + cam_node_deinit(g_cre_dev.node); + cam_subdev_remove(&g_cre_dev.sd); + mutex_destroy(&g_cre_dev.cre_lock); +} + +const static struct component_ops cam_cre_subdev_component_ops = { + .bind = cam_cre_subdev_component_bind, + .unbind = cam_cre_subdev_component_unbind, +}; + +static int cam_cre_subdev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_cre_subdev_component_ops); + return 0; +} + +static int cam_cre_subdev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CRE, "Adding CRE sub component"); + rc = component_add(&pdev->dev, &cam_cre_subdev_component_ops); + if (rc) + CAM_ERR(CAM_CRE, "failed to add component rc: %d", rc); + return rc; +} + +static const struct of_device_id cam_cre_subdev_dt_match[] = { + { + .compatible = "qcom,cam-cre", + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_cre_subdev_dt_match); + +struct platform_driver cam_cre_subdev_driver = { + .probe = cam_cre_subdev_probe, + .remove = cam_cre_subdev_remove, + .driver = { + .name = "cam_cre", + .of_match_table = cam_cre_subdev_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_cre_subdev_init_module(void) +{ + return platform_driver_register(&cam_cre_subdev_driver); +} + +void cam_cre_subdev_exit_module(void) +{ + platform_driver_unregister(&cam_cre_subdev_driver); +} + +MODULE_DESCRIPTION("MSM CRE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.h new file mode 100644 index 0000000000..2892024dba --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_dev.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CRE_DEV_H_ +#define _CAM_CRE_DEV_H_ + +#include "cam_subdev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_context.h" +#include "cam_cre_context.h" + +/** + * struct cam_cre_dev - Camera CRE V4l2 device node + * + * @sd: Commone camera subdevice node + * @node: Pointer to cre subdevice + * @ctx: CRE base context storage + * @ctx_cre: CRE private context storage + * @cre_mutex: CRE dev mutex + * @open_cnt: Open device count + */ +struct cam_cre_dev { + struct cam_subdev sd; + struct cam_node *node; + struct cam_context ctx[CAM_CRE_CTX_MAX]; + struct cam_cre_context ctx_cre[CAM_CRE_CTX_MAX]; + struct mutex cre_mutex; + int32_t open_cnt; +}; + +/** + * @brief : API to register CRE dev to platform framework. + * @return struct platform_device pointer on success, or ERR_PTR() on error. + */ +int cam_cre_dev_init_module(void); + +/** + * @brief : API to remove CRE dev from platform framework. + */ +void cam_cre_dev_exit_module(void); +#endif /* __CAM_CRE_DEV_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c new file mode 100644 index 0000000000..775824d04a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c @@ -0,0 +1,3007 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_sync_api.h" +#include "cam_packet_util.h" +#include "cam_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_cre_hw_mgr_intf.h" +#include "cam_cre_hw_mgr.h" +#include "cre_hw.h" +#include "cam_smmu_api.h" +#include "cam_mem_mgr.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include "cam_cpas_api.h" +#include "cam_common_util.h" +#include "cre_dev_intf.h" +#include "cam_compat.h" + +static struct cam_cre_hw_mgr *cre_hw_mgr; + +static bool cam_cre_debug_clk_update(struct cam_cre_clk_info *hw_mgr_clk_info) +{ + if (cre_hw_mgr->cre_debug_clk && + cre_hw_mgr->cre_debug_clk != hw_mgr_clk_info->curr_clk) { + hw_mgr_clk_info->base_clk = cre_hw_mgr->cre_debug_clk; + hw_mgr_clk_info->curr_clk = cre_hw_mgr->cre_debug_clk; + hw_mgr_clk_info->uncompressed_bw = cre_hw_mgr->cre_debug_clk; + hw_mgr_clk_info->compressed_bw = cre_hw_mgr->cre_debug_clk; + CAM_DBG(CAM_PERF, "bc = %d cc = %d ub %d cb %d", + hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk, + hw_mgr_clk_info->uncompressed_bw, + hw_mgr_clk_info->compressed_bw); + return true; + } + + return false; +} + +static struct cam_cre_io_buf_info *cam_cre_mgr_get_rsc( + struct cam_cre_ctx *ctx_data, + struct cam_buf_io_cfg *in_io_buf) +{ + int k = 0; + + if (in_io_buf->direction == CAM_BUF_INPUT) { + for (k = 0; k < CRE_MAX_IN_RES; k++) { + if (ctx_data->cre_acquire.in_res[k].res_id == + in_io_buf->resource_type) + return &ctx_data->cre_acquire.in_res[k]; + } + if (k == CRE_MAX_IN_RES) { + CAM_ERR(CAM_CRE, "Invalid res_id %d", + in_io_buf->resource_type); + goto end; + } + } else if (in_io_buf->direction == CAM_BUF_OUTPUT) { + for (k = 0; k < CRE_MAX_OUT_RES; k++) { + if (ctx_data->cre_acquire.out_res[k].res_id == + in_io_buf->resource_type) + return &ctx_data->cre_acquire.out_res[k]; + } + if (k == CRE_MAX_OUT_RES) { + CAM_ERR(CAM_CRE, "Invalid res_id %d", + in_io_buf->resource_type); + goto end; + } + } + +end: + return NULL; +} + +static int cam_cre_mgr_update_reg_set(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_request *cre_req, int batch_index) +{ + struct cam_cre_dev_reg_set_update reg_set_upd_cmd; + int i; + + reg_set_upd_cmd.cre_reg_buf = cre_req->cre_reg_buf[batch_index]; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_REG_SET_UPDATE, + ®_set_upd_cmd, sizeof(reg_set_upd_cmd)); + } + + return 0; +} + +static void cam_cre_free_io_config(struct cam_cre_request *req) +{ + int i, j; + + for (i = 0; i < CRE_MAX_BATCH_SIZE; i++) { + for (j = 0; j < CRE_MAX_IO_BUFS; j++) { + if (req->io_buf[i][j]) { + CAM_MEM_ZFREE(req->io_buf[i][j], + sizeof(struct cre_io_buf)); + req->io_buf[i][j] = NULL; + } + } + } +} + +static int cam_cre_mgr_process_cmd_io_buf_req(struct cam_cre_hw_mgr *hw_mgr, + struct cam_packet *packet, struct cam_cre_ctx *ctx_data, + uint32_t req_idx, struct list_head *buf_tracker) +{ + int rc = 0; + int i, j, k; + dma_addr_t iova_addr; + + size_t len; + struct cam_cre_request *cre_request; + struct cre_io_buf *io_buf; + struct plane_info *plane_info; + + uint32_t alignment; + bool is_secure; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + struct cam_cre_io_buf_info *acq_io_buf; + + io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + + cre_request = ctx_data->req_list[req_idx]; + cre_request->num_batch = ctx_data->cre_acquire.batch_size; + CAM_DBG(CAM_CRE, "num_io_configs %d", packet->num_io_configs); + + for (i = 0; i < cre_request->num_batch; i++) { + for (j = 0; j < packet->num_io_configs; j++) { + cre_request->num_io_bufs[i]++; + acq_io_buf = cam_cre_mgr_get_rsc(ctx_data, &io_cfg_ptr[j]); + if (!acq_io_buf) { + CAM_ERR(CAM_CRE, "get rsc failed"); + return -EINVAL; + } + + cre_request->io_buf[i][j] = + CAM_MEM_ZALLOC(sizeof(struct cre_io_buf), GFP_KERNEL); + if (!cre_request->io_buf[i][j]) { + CAM_ERR(CAM_CRE, + "IO config allocation failure"); + cam_cre_free_io_config(cre_request); + return -ENOMEM; + } + + io_buf = cre_request->io_buf[i][j]; + io_buf->num_planes = acq_io_buf->num_planes; + io_buf->resource_type = acq_io_buf->res_id; + io_buf->direction = acq_io_buf->direction; + io_buf->format = acq_io_buf->format; + + alignment = acq_io_buf->alignment; + io_buf->fence = io_cfg_ptr[j].fence; + + CAM_DBG(CAM_CRE, + "i %d j %d Number of planes %d res_type %d dir %d, fence %d format %d align %d", + i, j, io_buf->num_planes, io_buf->resource_type, + io_buf->direction, io_buf->fence, io_buf->format, alignment); + + for (k = 0; k < io_buf->num_planes; k++) { + is_secure = cam_mem_is_secure_buf( + io_cfg_ptr[j].mem_handle[k]); + + if (is_secure) + rc = cam_mem_get_io_buf( + io_cfg_ptr[j].mem_handle[k], + hw_mgr->iommu_sec_hdl, + &iova_addr, &len, NULL, buf_tracker); + else + rc = cam_mem_get_io_buf( + io_cfg_ptr[j].mem_handle[k], + hw_mgr->iommu_hdl, + &iova_addr, &len, NULL, buf_tracker); + + if (rc) { + CAM_ERR(CAM_CRE, "get buf failed: %d", + rc); + return -EINVAL; + } + iova_addr += io_cfg_ptr[j].offsets[k]; + plane_info = &io_buf->p_info[k]; + + plane_info->offset = io_cfg_ptr[j].offsets[k]; + plane_info->format = acq_io_buf->format; + plane_info->iova_addr = iova_addr + + ((io_cfg_ptr[j].planes[k].plane_stride * + io_cfg_ptr[j].planes[k].slice_height) * k); + + plane_info->stride = + io_cfg_ptr[j].planes[k].plane_stride; + + /* Width for WE has to be updated in number of pixels */ + if (acq_io_buf->direction == CAM_BUF_OUTPUT) { + /* PLAIN 128/8 = 16 Bytes per pixel */ + plane_info->width = + io_cfg_ptr[j].planes[k].plane_stride/16; + } else { + /* FE width should be in bytes */ + plane_info->width = + io_cfg_ptr[j].planes[k].plane_stride; + } + plane_info->height = + io_cfg_ptr[j].planes[k].height; + plane_info->len = len; + plane_info->alignment = alignment; + } + } + } + + return rc; +} + +static void cam_cre_device_timer_reset(struct cam_cre_hw_mgr *hw_mgr) +{ + + if (hw_mgr->clk_info.watch_dog) { + CAM_DBG(CAM_CRE, "reset timer"); + crm_timer_reset(hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog_reset_counter++; + } +} + +static int cam_cre_mgr_reset_hw(void) +{ + struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr; + int i, rc = 0; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RESET, + NULL, 0); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Reset failed: %d", rc); + return rc; + } + } + + return rc; +} + + +static void cam_cre_ctx_wait_for_idle_irq(struct cam_cre_ctx *ctx) +{ + int rc; + + if (ctx->ctx_state != CRE_CTX_STATE_ACQUIRED) { + CAM_ERR(CAM_CRE, "ctx %u is in %d state", + ctx->ctx_id, ctx->ctx_state); + return; + } + + rc = cam_common_wait_for_completion_timeout( + &ctx->cre_top->idle_done, + msecs_to_jiffies(CAM_CRE_RESPONSE_TIME_THRESHOLD)); + if (!rc) { + cam_cre_device_timer_reset(cre_hw_mgr); + } else { + CAM_DBG(CAM_CRE, "IDLE done for req idx %d", + ctx->last_req_idx); + } +} + +static int cam_cre_mgr_create_cre_reg_buf(struct cam_cre_hw_mgr *hw_mgr, + struct cam_packet *packet, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_cre_ctx *ctx_data, uint32_t req_idx) +{ + int i, rc = 0; + struct cam_cre_dev_prepare_req prepare_req; + + prepare_req.ctx_data = ctx_data; + prepare_req.hw_mgr = hw_mgr; + prepare_req.packet = packet; + prepare_req.prepare_args = prepare_args; + prepare_req.req_idx = req_idx; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_PREPARE, &prepare_req, sizeof(prepare_req)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev prepare failed: %d", rc); + goto end; + } + } + +end: + return rc; +} + +static int cam_cre_mgr_calculate_num_path( + struct cam_cre_clk_bw_req_internal_v2 *clk_info, + struct cam_cre_ctx *ctx_data) +{ + int i, path_index = 0; + + for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) { + if ((clk_info->axi_path[i].path_data_type < + CAM_AXI_PATH_DATA_CRE_START_OFFSET) || + (clk_info->axi_path[i].path_data_type > + CAM_AXI_PATH_DATA_CRE_MAX_OFFSET) || + ((clk_info->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_CRE_START_OFFSET) >= + CAM_CRE_MAX_PER_PATH_VOTES)) { + CAM_DBG(CAM_CRE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_CRE_START_OFFSET, + CAM_CRE_MAX_PER_PATH_VOTES); + continue; + } + + path_index = clk_info->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_CRE_START_OFFSET; + + CAM_DBG(CAM_CRE, + "clk_info: i[%d]: [%s %s] bw [%lld %lld] num_path: %d", + i, + cam_cpas_axi_util_trans_type_to_string( + clk_info->axi_path[i].transac_type), + cam_cpas_axi_util_path_type_to_string( + clk_info->axi_path[i].path_data_type), + clk_info->axi_path[i].camnoc_bw, + clk_info->axi_path[i].mnoc_ab_bw, + clk_info->num_paths); + } + return 0; +} + +static int cam_cre_update_cpas_vote(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data) +{ + int i = 0; + struct cam_cre_clk_info *clk_info; + struct cam_cre_dev_bw_update bw_update = {{0}, {0}, 0, 0}; + + clk_info = &hw_mgr->clk_info; + + bw_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + bw_update.ahb_vote.vote.freq = 0; + bw_update.ahb_vote_valid = false; + + bw_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&bw_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + bw_update.axi_vote.num_paths * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + bw_update.axi_vote_valid = true; + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_BW_UPDATE, + &bw_update, sizeof(bw_update)); + } + return 0; +} + +static int cam_cre_mgr_remove_bw(struct cam_cre_hw_mgr *hw_mgr, int ctx_id) +{ + int path_index, i, rc = 0; + struct cam_cre_ctx *ctx_data = NULL; + struct cam_cre_clk_info *hw_mgr_clk_info; + + ctx_data = &hw_mgr->ctx[ctx_id]; + hw_mgr_clk_info = &hw_mgr->clk_info; + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_CRE_START_OFFSET; + if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_CRE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_CRE_START_OFFSET, + CAM_CRE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + rc = cam_cre_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} + +static bool cam_cre_update_bw_v2(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_cre_clk_info *hw_mgr_clk_info, + struct cam_cre_clk_bw_req_internal_v2 *clk_info, + bool busy) +{ + int i, path_index; + bool update_required = true; + + for (i = 0; i < clk_info->num_paths; i++) + CAM_DBG(CAM_CRE, "clk_info camnoc = %lld busy = %d", + clk_info->axi_path[i].camnoc_bw, busy); + + if (clk_info->num_paths == ctx_data->clk_info.num_paths) { + update_required = false; + for (i = 0; i < clk_info->num_paths; i++) { + if ((clk_info->axi_path[i].transac_type == + ctx_data->clk_info.axi_path[i].transac_type) && + (clk_info->axi_path[i].path_data_type == + ctx_data->clk_info.axi_path[i].path_data_type) && + (clk_info->axi_path[i].camnoc_bw == + ctx_data->clk_info.axi_path[i].camnoc_bw) && + (clk_info->axi_path[i].mnoc_ab_bw == + ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) { + continue; + } else { + update_required = true; + break; + } + } + } + if (!update_required) { + CAM_DBG(CAM_CRE, + "Incoming BW hasn't changed, no update required"); + return false; + } + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_CRE_START_OFFSET; + + if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_CRE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_CRE_START_OFFSET, + CAM_CRE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + ctx_data->clk_info.num_paths = + cam_cre_mgr_calculate_num_path(clk_info, ctx_data); + + memcpy(&ctx_data->clk_info.axi_path[0], + &clk_info->axi_path[0], + clk_info->num_paths * sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + /* + * Add new vote of this context in hw mgr. + * hw_mgr_clk_info has all paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_CRE_START_OFFSET; + + if (path_index >= CAM_CRE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_CRE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_CRE_START_OFFSET, + CAM_CRE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].path_data_type = + ctx_data->clk_info.axi_path[i].path_data_type; + hw_mgr_clk_info->axi_path[path_index].transac_type = + ctx_data->clk_info.axi_path[i].transac_type; + hw_mgr_clk_info->axi_path[path_index].camnoc_bw += + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw += + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw += + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw += + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw += + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + CAM_DBG(CAM_CRE, + "Consolidate Path Vote : Dev[%s] i[%d] path_idx[%d] : [%s %s] [%lld %lld]", + ctx_data->cre_acquire.dev_name, + i, path_index, + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[path_index].transac_type), + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[path_index].path_data_type), + hw_mgr_clk_info->axi_path[path_index].camnoc_bw, + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw); + } + + if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths) + hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths; + + return true; +} + +static bool cam_cre_check_bw_update(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, int idx) +{ + bool busy = false, bw_updated = false; + int i; + struct cam_cre_clk_bw_req_internal_v2 *clk_info_v2; + struct cam_cre_clk_info *hw_mgr_clk_info; + uint64_t req_id; + + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + clk_info_v2 = &ctx_data->req_list[idx]->clk_info_v2; + + bw_updated = cam_cre_update_bw_v2(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info_v2, busy); + for (i = 0; i < hw_mgr_clk_info->num_paths; i++) { + CAM_DBG(CAM_CRE, + "Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld, device: %s", + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[i].transac_type), + hw_mgr_clk_info->axi_path[i].camnoc_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ab_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ib_bw, + ctx_data->cre_acquire.dev_name); + } + + return bw_updated; +} + +static int cam_cre_mgr_handle_config_err( + struct cam_hw_config_args *config_args, + struct cam_cre_ctx *ctx_data) +{ + struct cam_hw_done_event_data err_data; + struct cam_cre_request *cre_req; + uint32_t req_idx; + + cre_req = config_args->priv; + + err_data.request_id = cre_req->request_id; + err_data.evt_param = CAM_SYNC_CRE_EVENT_CONFIG_ERR; + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_CTX_EVT_ID_ERROR, + &err_data); + + req_idx = cre_req->req_idx; + cre_req->request_id = 0; + cam_cre_free_io_config(ctx_data->req_list[req_idx]); + CAM_MEM_ZFREE(ctx_data->req_list[req_idx], sizeof(struct cam_cre_request)); + ctx_data->req_list[req_idx] = NULL; + clear_bit(req_idx, ctx_data->bitmap); + return 0; +} + +static bool cam_cre_is_pending_request(struct cam_cre_ctx *ctx_data) +{ + return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX); +} + +static int cam_cre_supported_clk_rates(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data) +{ + int i; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *dev_intf = NULL; + struct cam_hw_info *dev = NULL; + + dev_intf = hw_mgr->cre_dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_CRE, "dev_intf is invalid"); + return -EINVAL; + } + + dev = (struct cam_hw_info *)dev_intf->hw_priv; + soc_info = &dev->soc_info; + + for (i = 0; i < CAM_MAX_VOTE; i++) { + ctx_data->clk_info.clk_rate[i] = + soc_info->clk_rate[i][soc_info->src_clk_idx]; + CAM_DBG(CAM_CRE, "clk_info[%d] = %d", + i, ctx_data->clk_info.clk_rate[i]); + } + + return 0; +} + +static int cam_cre_ctx_clk_info_init(struct cam_cre_ctx *ctx_data) +{ + int i; + + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) { + ctx_data->clk_info.axi_path[i].camnoc_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0; + } + + cam_cre_supported_clk_rates(cre_hw_mgr, ctx_data); + + return 0; +} + +static int32_t cam_cre_deinit_idle_clk(void *priv, void *data) +{ + struct cam_cre_hw_mgr *hw_mgr = (struct cam_cre_hw_mgr *)priv; + struct cre_clk_work_data *task_data = (struct cre_clk_work_data *)data; + struct cam_cre_clk_info *clk_info = + (struct cam_cre_clk_info *)task_data->data; + uint32_t id; + uint32_t i; + struct cam_cre_ctx *ctx_data; + struct cam_hw_intf *dev_intf = NULL; + int rc = 0; + bool busy = false; + + clk_info->base_clk = 0; + clk_info->curr_clk = 0; + clk_info->over_clked = 0; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + for (i = 0; i < CRE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state == CRE_CTX_STATE_ACQUIRED) { + busy = cam_cre_is_pending_request(ctx_data); + if (busy) { + mutex_unlock(&ctx_data->ctx_mutex); + break; + } + cam_cre_ctx_clk_info_init(ctx_data); + } + mutex_unlock(&ctx_data->ctx_mutex); + } + + if (busy) { + cam_cre_device_timer_reset(hw_mgr); + rc = -EBUSY; + goto done; + } + + dev_intf = hw_mgr->cre_dev_intf[0]; + id = CRE_HW_CLK_DISABLE; + + CAM_DBG(CAM_CRE, "Disable %d", clk_info->hw_type); + + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id, NULL, 0); + +done: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_cre_device_timer_cb(struct timer_list *timer_data) +{ + unsigned long flags; + struct crm_workq_task *task; + struct cre_clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + + spin_lock_irqsave(&cre_hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(cre_hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_CRE, "no empty task"); + spin_unlock_irqrestore(&cre_hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct cre_clk_work_data *)task->payload; + task_data->data = timer->parent; + task_data->type = CRE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_cre_deinit_idle_clk; + cam_req_mgr_workq_enqueue_task(task, cre_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&cre_hw_mgr->hw_mgr_lock, flags); +} + +static int cam_cre_device_timer_start(struct cam_cre_hw_mgr *hw_mgr) +{ + int rc = 0; + + if (!hw_mgr->clk_info.watch_dog) { + rc = crm_timer_init(&hw_mgr->clk_info.watch_dog, + CRE_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info, + &cam_cre_device_timer_cb); + if (rc) + CAM_ERR(CAM_CRE, "Failed to start timer"); + hw_mgr->clk_info.watch_dog_reset_counter = 0; + } + + return rc; +} + +static void cam_cre_device_timer_stop(struct cam_cre_hw_mgr *hw_mgr) +{ + if (hw_mgr->clk_info.watch_dog) { + hw_mgr->clk_info.watch_dog_reset_counter = 0; + crm_timer_exit(&hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog = NULL; + } +} + +static int cam_cre_mgr_process_cmd(void *priv, void *data) +{ + int rc = 0, i = 0; + struct cre_cmd_work_data *task_data = NULL; + struct cam_cre_ctx *ctx_data; + struct cam_cre_request *cre_req; + struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr; + uint32_t num_batch; + + if (!data || !priv) { + CAM_ERR(CAM_CRE, "Invalid params%pK %pK", data, priv); + return -EINVAL; + } + + ctx_data = priv; + task_data = (struct cre_cmd_work_data *)data; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_CRE, "ctx id :%u is not in use", + ctx_data->ctx_id); + return -EINVAL; + } + + if (task_data->req_idx >= CAM_CTX_REQ_MAX) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_CRE, "Invalid reqIdx = %llu", + task_data->req_idx); + return -EINVAL; + } + + cre_req = ctx_data->req_list[task_data->req_idx]; + if (cre_req->request_id > ctx_data->last_flush_req) + ctx_data->last_flush_req = 0; + + if (cre_req->request_id <= ctx_data->last_flush_req) { + CAM_WARN(CAM_CRE, + "request %lld has been flushed, reject packet", + cre_req->request_id, ctx_data->last_flush_req); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if (!cam_cre_is_pending_request(ctx_data)) { + CAM_WARN(CAM_CRE, "no pending req, req %lld last flush %lld", + cre_req->request_id, ctx_data->last_flush_req); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + hw_mgr = task_data->data; + num_batch = cre_req->num_batch; + + CAM_DBG(CAM_CRE, + "Going to configure cre for req %d, req_idx %d num_batch %d", + cre_req->request_id, cre_req->req_idx, num_batch); + + for (i = 0; i < num_batch; i++) { + if (i != 0) { + rc = cam_common_wait_for_completion_timeout( + &ctx_data->cre_top->bufdone, + msecs_to_jiffies(100)); + if (!rc) { + cam_cre_device_timer_reset(cre_hw_mgr); + CAM_ERR(CAM_CRE, + "Timedout waiting for bufdone on last frame"); + return -ETIMEDOUT; + } else { + reinit_completion(&ctx_data->cre_top->bufdone); + CAM_INFO(CAM_CRE, + "done for frame %d in batch of %d", + i-1, num_batch); + } + } + + cam_cre_mgr_update_reg_set(hw_mgr, cre_req, i); + cam_cre_ctx_wait_for_idle_irq(ctx_data); + } + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_get_valid_ctx_id(void) +{ + struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr; + int i; + + for (i = 0; i < CRE_CTX_MAX; i++) { + if (hw_mgr->ctx[i].ctx_state == CRE_CTX_STATE_ACQUIRED) + break; + } + + if (i == CRE_CTX_MAX) + return -EINVAL; + + return i; +} + +static int32_t cam_cre_mgr_process_msg(void *priv, void *data) +{ + struct cre_msg_work_data *task_data; + struct cam_hw_done_event_data buf_data; + struct cam_cre_hw_mgr *hw_mgr; + struct cam_cre_ctx *ctx; + struct cam_cre_request *active_req; + struct cam_cre_irq_data irq_data; + int32_t ctx_id; + uint32_t evt_id; + uint32_t active_req_idx; + int rc = 0; + + if (!data || !priv) { + CAM_ERR(CAM_CRE, "Invalid data"); + return -EINVAL; + } + + task_data = data; + hw_mgr = priv; + ctx_id = cam_get_valid_ctx_id(); + if (ctx_id < 0) { + CAM_ERR(CAM_CRE, "No valid context to handle error"); + return ctx_id; + } + + ctx = &hw_mgr->ctx[ctx_id]; + + mutex_lock(&ctx->ctx_mutex); + irq_data = task_data->irq_data; + if (ctx->ctx_state != CRE_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_CRE, "ctx id: %d not in right state: %d", + ctx_id, ctx->ctx_state); + mutex_unlock(&ctx->ctx_mutex); + return -EINVAL; + } + + active_req_idx = find_next_bit(ctx->bitmap, ctx->bits, ctx->last_done_req_idx); + CAM_DBG(CAM_CRE, "active_req_idx %d last_done_req_idx %d", + active_req_idx, ctx->last_done_req_idx); + + active_req = ctx->req_list[active_req_idx]; + if (!active_req) + CAM_ERR(CAM_CRE, "Active req cannot be null"); + + if (irq_data.error) { + evt_id = CAM_CTX_EVT_ID_ERROR; + buf_data.evt_param = CAM_SYNC_CRE_EVENT_HW_ERR; + buf_data.request_id = active_req->request_id; + ctx->ctxt_event_cb(ctx->context_priv, evt_id, &buf_data); + rc = cam_cre_mgr_reset_hw(); + clear_bit(active_req_idx, ctx->bitmap); + cam_cre_free_io_config(active_req); + CAM_MEM_ZFREE((void *)active_req, sizeof(struct cam_cre_request)); + ctx->req_cnt--; + ctx->req_list[active_req_idx] = NULL; + } else if (irq_data.wr_buf_done) { + /* Signal Buf done */ + active_req->frames_done++; + CAM_DBG(CAM_CRE, "Received frames_done %d num_batch %d req id %d", + active_req->frames_done, active_req->num_batch, + active_req->request_id); + complete(&ctx->cre_top->bufdone); + if (active_req->frames_done == active_req->num_batch) { + ctx->last_done_req_idx = active_req_idx; + CAM_DBG(CAM_CRE, "signaling buff done for req %d", + active_req->request_id); + evt_id = CAM_CTX_EVT_ID_SUCCESS; + buf_data.evt_param = CAM_SYNC_COMMON_EVENT_SUCCESS; + buf_data.request_id = active_req->request_id; + ctx->ctxt_event_cb(ctx->context_priv, evt_id, &buf_data); + clear_bit(active_req_idx, ctx->bitmap); + cam_cre_free_io_config(active_req); + CAM_MEM_ZFREE((void *)active_req, sizeof(struct cam_cre_request)); + ctx->req_cnt--; + ctx->req_list[active_req_idx] = NULL; + } + } + mutex_unlock(&ctx->ctx_mutex); + return rc; +} + +static int cam_cre_get_actual_clk_rate_idx( + struct cam_cre_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return i; + + /* + * Caller has to ensure returned index is within array + * size bounds while accessing that index. + */ + + return i; +} + +static bool cam_cre_is_over_clk(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_cre_clk_info *hw_mgr_clk_info) +{ + int base_clk_idx; + int curr_clk_idx; + + base_clk_idx = cam_cre_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->base_clk); + + curr_clk_idx = cam_cre_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->curr_clk); + + CAM_DBG(CAM_CRE, "bc_idx = %d cc_idx = %d %d %d", + base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk, + hw_mgr_clk_info->curr_clk); + + if (curr_clk_idx > base_clk_idx) + return true; + + return false; +} + +static int cam_cre_get_lower_clk_rate(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_cre_get_actual_clk_rate_idx(ctx_data, base_clk); + + while (i > 0) { + if (ctx_data->clk_info.clk_rate[i - 1]) + return ctx_data->clk_info.clk_rate[i - 1]; + i--; + } + + CAM_DBG(CAM_CRE, "Already clk at lower level"); + + return base_clk; +} + +static int cam_cre_get_next_clk_rate(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_cre_get_actual_clk_rate_idx(ctx_data, base_clk); + + while (i < CAM_MAX_VOTE - 1) { + if (ctx_data->clk_info.clk_rate[i + 1]) + return ctx_data->clk_info.clk_rate[i + 1]; + i++; + } + + CAM_DBG(CAM_CRE, "Already clk at higher level"); + return base_clk; +} + +static bool cam_cre_update_clk_overclk_free(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_cre_clk_info *hw_mgr_clk_info, + struct cam_cre_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + + /* + * In caseof no pending packets case + * 1. In caseof overclk cnt is less than threshold, increase + * overclk count and no update in the clock rate + * 2. In caseof overclk cnt is greater than or equal to threshold + * then lower clock rate by one level and update hw_mgr current + * clock value. + * a. In case of new clock rate greater than sum of clock + * rates, reset overclk count value to zero if it is + * overclock + * b. if it is less than sum of base clocks then go to next + * level of clock and make overclk count to zero + * c. if it is same as sum of base clock rates update overclock + * cnt to 0 + */ + if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) { + hw_mgr_clk_info->over_clked++; + rc = false; + } else { + hw_mgr_clk_info->curr_clk = + cam_cre_get_lower_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + if (cam_cre_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info)) + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk < + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = + cam_cre_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk == + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + } + rc = true; + } + + return rc; +} + +static int cam_cre_calc_total_clk(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_clk_info *hw_mgr_clk_info, uint32_t dev_type) +{ + int i; + struct cam_cre_ctx *ctx_data; + + hw_mgr_clk_info->base_clk = 0; + for (i = 0; i < CRE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + if (ctx_data->ctx_state == CRE_CTX_STATE_ACQUIRED) + hw_mgr_clk_info->base_clk += + ctx_data->clk_info.base_clk; + } + + return 0; +} + +static int cam_cre_get_actual_clk_rate(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return ctx_data->clk_info.clk_rate[i]; + + return base_clk; +} + +static bool cam_cre_update_clk_busy(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_cre_clk_info *hw_mgr_clk_info, + struct cam_cre_clk_bw_request *clk_info, + uint32_t base_clk) +{ + uint32_t next_clk_level; + uint32_t actual_clk; + bool rc = false; + + /* 1. if current request frame cycles(fc) are more than previous + * frame fc + * Calculate the new base clock. + * if sum of base clocks are more than next available clk level + * Update clock rate, change curr_clk_rate to sum of base clock + * rates and make over_clked to zero + * else + * Update clock rate to next level, update curr_clk_rate and make + * overclked cnt to zero + * 2. if current fc is less than or equal to previous frame fc + * Still Bump up the clock to next available level + * if it is available, then update clock, make overclk cnt to + * zero. If the clock is already at highest clock rate then + * no need to update the clock + */ + ctx_data->clk_info.base_clk = base_clk; + hw_mgr_clk_info->over_clked = 0; + if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) { + cam_cre_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->cre_acquire.dev_type); + actual_clk = cam_cre_get_actual_clk_rate(hw_mgr, + ctx_data, base_clk); + if (hw_mgr_clk_info->base_clk > actual_clk) { + hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk; + } else { + next_clk_level = cam_cre_get_next_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->curr_clk = next_clk_level; + } + rc = true; + } else { + next_clk_level = + cam_cre_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk < next_clk_level) { + hw_mgr_clk_info->curr_clk = next_clk_level; + rc = true; + } + } + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + + return rc; +} + +static bool cam_cre_update_clk_free(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_cre_clk_info *hw_mgr_clk_info, + struct cam_cre_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + bool over_clocked = false; + + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + ctx_data->clk_info.base_clk = base_clk; + cam_cre_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->cre_acquire.dev_type); + + /* + * Current clock is not always sum of base clocks, due to + * clock scales update to next higher or lower levels, it + * equals to one of discrete clock values supported by hardware. + * So even current clock is higher than sum of base clocks, we + * can not consider it is over clocked. if it is greater than + * discrete clock level then only it is considered as over clock. + * 1. Handle over clock case + * 2. If current clock is less than sum of base clocks + * update current clock + * 3. If current clock is same as sum of base clocks no action + */ + + over_clocked = cam_cre_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info); + + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk && + over_clocked) { + rc = cam_cre_update_clk_overclk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + } else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + rc = false; + } else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = cam_cre_get_actual_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->base_clk); + rc = true; + } + + return rc; +} + +static uint32_t cam_cre_mgr_calc_base_clk(uint32_t frame_cycles, + uint64_t budget) +{ + uint64_t mul = 1000000000; + uint64_t base_clk = frame_cycles * mul; + + do_div(base_clk, budget); + + CAM_DBG(CAM_CRE, "budget = %lld fc = %d ib = %lld base_clk = %lld", + budget, frame_cycles, + (long long)(frame_cycles * mul), base_clk); + + return base_clk; +} + +static bool cam_cre_check_clk_update(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, int idx) +{ + bool busy = false, rc = false; + uint64_t base_clk; + struct cam_cre_clk_bw_request *clk_info; + uint64_t req_id; + struct cam_cre_clk_info *hw_mgr_clk_info; + + cam_cre_device_timer_reset(hw_mgr); + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + CAM_DBG(CAM_CRE, "busy = %d req_id = %lld", busy, req_id); + + clk_info = &ctx_data->req_list[idx]->clk_info; + + /* Calculate base clk rate */ + base_clk = cam_cre_mgr_calc_base_clk( + clk_info->frame_cycles, clk_info->budget_ns); + ctx_data->clk_info.rt_flag = clk_info->rt_flag; + + if (cre_hw_mgr->cre_debug_clk) + return cam_cre_debug_clk_update(hw_mgr_clk_info); + + if (busy) + rc = cam_cre_update_clk_busy(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + else + rc = cam_cre_update_clk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + + CAM_DBG(CAM_CRE, "bc = %d cc = %d busy = %d overclk = %d uc = %d", + hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk, + busy, hw_mgr_clk_info->over_clked, rc); + + return rc; +} + +static int cam_cre_mgr_update_clk_rate(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data) +{ + struct cam_cre_dev_clk_update clk_upd_cmd; + int i; + + clk_upd_cmd.clk_rate = hw_mgr->clk_info.curr_clk; + + CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", clk_upd_cmd.clk_rate, + ctx_data->cre_acquire.dev_type); + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_CLK_UPDATE, + &clk_upd_cmd, sizeof(clk_upd_cmd)); + } + + return 0; +} + +static int cam_cre_mgr_cre_clk_remove(struct cam_cre_hw_mgr *hw_mgr, int ctx_id) +{ + struct cam_cre_ctx *ctx_data = NULL; + struct cam_cre_clk_info *hw_mgr_clk_info; + + ctx_data = &hw_mgr->ctx[ctx_id]; + hw_mgr_clk_info = &hw_mgr->clk_info; + + if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk) + hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk; + + /* reset clock info */ + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + return 0; +} + + +static int cam_cre_mgr_cre_clk_update(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, int idx) +{ + int rc = 0; + + if (cam_cre_check_clk_update(hw_mgr, ctx_data, idx)) + rc = cam_cre_mgr_update_clk_rate(hw_mgr, ctx_data); + + if (cam_cre_check_bw_update(hw_mgr, ctx_data, idx)) + rc |= cam_cre_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} + +int32_t cam_cre_hw_mgr_cb(void *irq_data, int32_t result_size, void *data) +{ + int32_t rc = 0; + unsigned long flags; + struct cam_cre_hw_mgr *hw_mgr = data; + struct crm_workq_task *task; + struct cre_msg_work_data *task_data; + struct cam_cre_irq_data *local_irq_data = irq_data; + + if (!data) { + CAM_ERR(CAM_CRE, "irq cb data is NULL"); + return rc; + } + + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(cre_hw_mgr->msg_work); + if (!task) { + CAM_ERR(CAM_CRE, "no empty task"); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + return -ENOMEM; + } + + task_data = (struct cre_msg_work_data *)task->payload; + task_data->data = hw_mgr; + task_data->irq_data = *local_irq_data; + task_data->type = CRE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_cre_mgr_process_msg; + rc = cam_req_mgr_workq_enqueue_task(task, cre_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + + return rc; +} + +static int cam_cre_mgr_process_io_cfg(struct cam_cre_hw_mgr *hw_mgr, + struct cam_packet *packet, + struct cam_cre_ctx *ctx_data, + uint32_t req_idx, + struct cam_hw_prepare_update_args *prep_arg) +{ + int i, j = 0, k = 0, l, rc = 0; + struct cre_io_buf *io_buf; + int32_t sync_in_obj[CRE_MAX_IN_RES]; + int32_t merged_sync_in_obj; + struct cam_cre_request *cre_request; + + rc = cam_cre_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data, + req_idx, prep_arg->buf_tracker); + if (rc) { + CAM_ERR(CAM_CRE, "Process CRE cmd io request is failed: %d", + rc); + goto end; + } + + cre_request = ctx_data->req_list[req_idx]; + prep_arg->num_out_map_entries = 0; + prep_arg->num_in_map_entries = 0; + + CAM_DBG(CAM_CRE, "E: req_idx = %u %x num batch%d", + req_idx, packet, cre_request->num_batch); + + for (i = 0; i < cre_request->num_batch; i++) { + for (l = 0; l < cre_request->num_io_bufs[i]; l++) { + io_buf = cre_request->io_buf[i][l]; + if (io_buf->direction == CAM_BUF_INPUT) { + if (io_buf->fence != -1) { + if (j < CRE_MAX_IN_RES) { + sync_in_obj[j++] = + io_buf->fence; + prep_arg->num_in_map_entries++; + } else { + CAM_ERR(CAM_CRE, + "reached max in_res %d %d", + io_buf->resource_type, + cre_request->request_id); + } + } else { + CAM_ERR(CAM_CRE, "Invalid fence %d %d", + io_buf->resource_type, + cre_request->request_id); + } + } else { + if (io_buf->fence != -1) { + prep_arg->out_map_entries[k].sync_id = + io_buf->fence; + k++; + prep_arg->num_out_map_entries++; + } + } + CAM_DBG(CAM_CRE, + "ctx_id: %u req_id: %llu dir[%d] %u, fence: %d", + ctx_data->ctx_id, packet->header.request_id, i, + io_buf->direction, io_buf->fence); + } + } + + if (prep_arg->num_in_map_entries > 1 && + prep_arg->num_in_map_entries <= CRE_MAX_IN_RES) + prep_arg->num_in_map_entries = + cam_common_util_remove_duplicate_arr( + sync_in_obj, prep_arg->num_in_map_entries); + + if (prep_arg->num_in_map_entries > 1 && + prep_arg->num_in_map_entries <= CRE_MAX_IN_RES) { + rc = cam_sync_merge(&sync_in_obj[0], + prep_arg->num_in_map_entries, &merged_sync_in_obj); + if (rc) { + prep_arg->num_out_map_entries = 0; + prep_arg->num_in_map_entries = 0; + return rc; + } + + cre_request->in_resource = merged_sync_in_obj; + + prep_arg->in_map_entries[0].sync_id = merged_sync_in_obj; + prep_arg->num_in_map_entries = 1; + CAM_DBG(CAM_CRE, "ctx_id: %u req_id: %llu Merged Sync obj: %d", + ctx_data->ctx_id, packet->header.request_id, + merged_sync_in_obj); + } else if (prep_arg->num_in_map_entries == 1) { + prep_arg->in_map_entries[0].sync_id = sync_in_obj[0]; + prep_arg->num_in_map_entries = 1; + cre_request->in_resource = 0; + } else { + CAM_ERR(CAM_CRE, + "Invalid count of input fences, count: %d", + prep_arg->num_in_map_entries); + prep_arg->num_in_map_entries = 0; + cre_request->in_resource = 0; + rc = -EINVAL; + } +end: + return rc; +} + +static bool cam_cre_mgr_is_valid_inconfig(struct cam_packet *packet) +{ + int i, num_in_map_entries = 0; + bool in_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) + num_in_map_entries++; + + if (num_in_map_entries <= CRE_MAX_IN_RES) { + in_config_valid = true; + } else { + CAM_ERR(CAM_CRE, "In config entries(%u) more than allowed(%u)", + num_in_map_entries, CRE_MAX_IN_RES); + } + + CAM_DBG(CAM_CRE, "number of in_config info: %u %u %u %u", + packet->num_io_configs, CRE_MAX_IO_BUFS, + num_in_map_entries, CRE_MAX_IN_RES); + + return in_config_valid; +} + +static bool cam_cre_mgr_is_valid_outconfig(struct cam_packet *packet) +{ + int i, num_out_map_entries = 0; + bool out_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) + num_out_map_entries++; + + if (num_out_map_entries <= CRE_MAX_OUT_RES) { + out_config_valid = true; + } else { + CAM_ERR(CAM_CRE, "Out config entries(%u) more than allowed(%u)", + num_out_map_entries, CRE_MAX_OUT_RES); + } + + CAM_DBG(CAM_CRE, "number of out_config info: %u %u %u %u", + packet->num_io_configs, CRE_MAX_IO_BUFS, + num_out_map_entries, CRE_MAX_OUT_RES); + + return out_config_valid; +} + +static int cam_cre_mgr_pkt_validation(struct cam_packet *packet) +{ + if ((packet->header.op_code & 0xff) != + CAM_CRE_OPCODE_CONFIG) { + CAM_ERR(CAM_CRE, "Invalid Opcode in pkt: %d", + packet->header.op_code & 0xff); + return -EINVAL; + } + + if (!packet->num_io_configs || + packet->num_io_configs > CRE_MAX_IO_BUFS) { + CAM_ERR(CAM_CRE, "Invalid number of io configs: %d %d", + CRE_MAX_IO_BUFS, packet->num_io_configs); + return -EINVAL; + } + + if (packet->num_cmd_buf > CRE_PACKET_MAX_CMD_BUFS) { + CAM_ERR(CAM_CRE, "Invalid number of cmd buffers: %d %d", + CRE_PACKET_MAX_CMD_BUFS, packet->num_cmd_buf); + return -EINVAL; + } + + if (!cam_cre_mgr_is_valid_inconfig(packet) || + !cam_cre_mgr_is_valid_outconfig(packet)) { + return -EINVAL; + } + + CAM_DBG(CAM_CRE, "number of cmd/patch info: %u %u %u %u", + packet->num_cmd_buf, + packet->num_io_configs, CRE_MAX_IO_BUFS, + packet->num_patches); + return 0; +} + +static int cam_cre_validate_acquire_res_info( + struct cam_cre_acquire_dev_info *cre_acquire) +{ + int i; + + if (cre_acquire->num_out_res > CRE_MAX_OUT_RES) { + CAM_ERR(CAM_CRE, "num of out resources exceeding : %u", + cre_acquire->num_out_res); + return -EINVAL; + } + + if (cre_acquire->num_in_res > CRE_MAX_IN_RES) { + CAM_ERR(CAM_CRE, "num of in resources exceeding : %u", + cre_acquire->num_in_res); + return -EINVAL; + } + + if (cre_acquire->dev_type >= CAM_CRE_DEV_TYPE_MAX) { + CAM_ERR(CAM_CRE, "Invalid device type: %d", + cre_acquire->dev_type); + return -EINVAL; + } + + for (i = 0; i < cre_acquire->num_in_res; i++) { + switch (cre_acquire->in_res[i].format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_PLAIN128: + break; + default: + CAM_ERR(CAM_CRE, "Invalid input format %d", + cre_acquire->in_res[i].format); + return -EINVAL; + } + } + + for (i = 0; i < cre_acquire->num_out_res; i++) { + switch (cre_acquire->out_res[i].format) { + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN32_20: + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_PLAIN128: + break; + default: + CAM_ERR(CAM_CRE, "Invalid output format %d", + cre_acquire->out_res[i].format); + return -EINVAL; + } + } + + return 0; +} + +static int cam_cre_get_acquire_info(struct cam_cre_hw_mgr *hw_mgr, + struct cam_hw_acquire_args *args, + struct cam_cre_ctx *ctx) +{ + int i = 0; + + if (args->num_acq > CRE_DEV_MAX) { + CAM_ERR(CAM_CRE, "Invalid number of resources: %d", + args->num_acq); + return -EINVAL; + } + + if (copy_from_user(&ctx->cre_acquire, + (void __user *)args->acquire_info, + sizeof(struct cam_cre_acquire_dev_info))) { + CAM_ERR(CAM_CRE, "Failed in acquire"); + return -EFAULT; + } + + CAM_DBG(CAM_CRE, "top: %u %s %u %u %u", + ctx->cre_acquire.dev_type, + ctx->cre_acquire.dev_name, + ctx->cre_acquire.secure_mode, + ctx->cre_acquire.num_in_res, ctx->cre_acquire.num_out_res); + + for (i = 0; i < ctx->cre_acquire.num_in_res; i++) { + CAM_DBG(CAM_CRE, "IN: %u %u %u %u", + ctx->cre_acquire.in_res[i].res_id, + ctx->cre_acquire.in_res[i].width, + ctx->cre_acquire.in_res[i].height, + ctx->cre_acquire.in_res[i].format); + } + + for (i = 0; i < ctx->cre_acquire.num_out_res; i++) { + CAM_DBG(CAM_CRE, "OUT: %u %u %u %u", + ctx->cre_acquire.out_res[i].res_id, + ctx->cre_acquire.out_res[i].width, + ctx->cre_acquire.out_res[i].height, + ctx->cre_acquire.out_res[i].format); + } + + if (cam_cre_validate_acquire_res_info(&ctx->cre_acquire)) + return -EINVAL; + + return 0; +} + +static int cam_cre_get_free_ctx(struct cam_cre_hw_mgr *hw_mgr) +{ + int i; + + i = find_first_zero_bit(hw_mgr->ctx_bitmap, hw_mgr->ctx_bits); + if (i >= CRE_CTX_MAX || i < 0) { + CAM_ERR(CAM_CRE, "Invalid ctx id = %d", i); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[i].ctx_mutex); + if (hw_mgr->ctx[i].ctx_state != CRE_CTX_STATE_FREE) { + CAM_ERR(CAM_CRE, "Invalid ctx %d state %d", + i, hw_mgr->ctx[i].ctx_state); + mutex_unlock(&hw_mgr->ctx[i].ctx_mutex); + return -EINVAL; + } + set_bit(i, hw_mgr->ctx_bitmap); + mutex_unlock(&hw_mgr->ctx[i].ctx_mutex); + + return i; +} + + +static int cam_cre_put_free_ctx(struct cam_cre_hw_mgr *hw_mgr, uint32_t ctx_id) +{ + if (ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid ctx_id: %d", ctx_id); + return 0; + } + + hw_mgr->ctx[ctx_id].ctx_state = CRE_CTX_STATE_FREE; + clear_bit(ctx_id, hw_mgr->ctx_bitmap); + + return 0; +} + +static int cam_cre_mgr_get_hw_caps(void *hw_priv, void *hw_caps_args) +{ + struct cam_cre_hw_mgr *hw_mgr; + struct cam_query_cap_cmd *query_cap = hw_caps_args; + struct cam_cre_hw_ver hw_ver; + int rc = 0, i; + + if (!hw_priv || !hw_caps_args) { + CAM_ERR(CAM_CRE, "Invalid args: %x %x", hw_priv, hw_caps_args); + return -EINVAL; + } + + if (sizeof(struct cam_cre_query_cap_cmd) != query_cap->size) { + CAM_ERR(CAM_CRE, + "Input query cap size:%u does not match expected query cap size: %u", + query_cap->size, sizeof(struct cam_cre_query_cap_cmd)); + return -EFAULT; + } + + hw_mgr = hw_priv; + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (copy_from_user(&hw_mgr->cre_caps, + u64_to_user_ptr(query_cap->caps_handle), + sizeof(struct cam_cre_query_cap_cmd))) { + CAM_ERR(CAM_CRE, "copy_from_user failed: size = %d", + sizeof(struct cam_cre_query_cap_cmd)); + rc = -EFAULT; + goto end; + } + + for (i = 0; i < hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.get_hw_caps( + hw_mgr->cre_dev_intf[i]->hw_priv, + &hw_ver, sizeof(hw_ver)); + if (rc) + goto end; + + hw_mgr->cre_caps.dev_ver[i] = hw_ver; + } + + hw_mgr->cre_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl; + hw_mgr->cre_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl; + + CAM_DBG(CAM_CRE, "iommu sec %d iommu ns %d", + hw_mgr->cre_caps.dev_iommu_handle.secure, + hw_mgr->cre_caps.dev_iommu_handle.non_secure); + + if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle), + &hw_mgr->cre_caps, sizeof(struct cam_cre_query_cap_cmd))) { + CAM_ERR(CAM_CRE, "copy_to_user failed: size = %d", + sizeof(struct cam_cre_query_cap_cmd)); + rc = -EFAULT; + } + +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_cre_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args) +{ + int rc = 0, i; + int ctx_id; + struct cam_cre_hw_mgr *hw_mgr = hw_priv; + struct cam_cre_ctx *ctx; + struct cam_hw_acquire_args *args = hw_acquire_args; + struct cam_cre_dev_acquire cre_dev_acquire; + struct cam_cre_dev_release cre_dev_release; + struct cam_cre_dev_init init; + struct cam_cre_dev_clk_update clk_update; + struct cam_cre_dev_bw_update *bw_update; + struct cam_cre_set_irq_cb irq_cb; + struct cam_hw_info *dev = NULL; + struct cam_hw_soc_info *soc_info = NULL; + int32_t idx; + + if ((!hw_priv) || (!hw_acquire_args)) { + CAM_ERR(CAM_CRE, "Invalid args: %x %x", + hw_priv, hw_acquire_args); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_id = cam_cre_get_free_ctx(hw_mgr); + if (ctx_id < 0) { + CAM_ERR(CAM_CRE, "No free ctx"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return ctx_id; + } + + ctx = &hw_mgr->ctx[ctx_id]; + ctx->ctx_id = ctx_id; + mutex_lock(&ctx->ctx_mutex); + rc = cam_cre_get_acquire_info(hw_mgr, args, ctx); + if (rc < 0) { + CAM_ERR(CAM_CRE, "get_acquire info failed: %d", rc); + goto end; + } + + if (!hw_mgr->cre_ctx_cnt) { + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.init( + hw_mgr->cre_dev_intf[i]->hw_priv, &init, + sizeof(init)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev init failed: %d", rc); + goto end; + } + } + + /* Install IRQ CB */ + irq_cb.cre_hw_mgr_cb = cam_cre_hw_mgr_cb; + irq_cb.data = hw_mgr; + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev init failed: %d", rc); + goto cre_irq_set_failed; + } + } + + dev = (struct cam_hw_info *)hw_mgr->cre_dev_intf[0]->hw_priv; + soc_info = &dev->soc_info; + idx = soc_info->src_clk_idx; + + hw_mgr->clk_info.base_clk = + soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + hw_mgr->clk_info.threshold = 5; + hw_mgr->clk_info.over_clked = 0; + + for (i = 0; i < CAM_CRE_MAX_PER_PATH_VOTES; i++) { + hw_mgr->clk_info.axi_path[i].camnoc_bw = 0; + hw_mgr->clk_info.axi_path[i].mnoc_ab_bw = 0; + hw_mgr->clk_info.axi_path[i].mnoc_ib_bw = 0; + hw_mgr->clk_info.axi_path[i].ddr_ab_bw = 0; + hw_mgr->clk_info.axi_path[i].ddr_ib_bw = 0; + } + } + + cre_dev_acquire.ctx_id = ctx_id; + cre_dev_acquire.cre_acquire = &ctx->cre_acquire; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_ACQUIRE, + &cre_dev_acquire, sizeof(cre_dev_acquire)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev acquire failed: %d", rc); + goto cre_dev_acquire_failed; + } + } + + ctx->cre_top = cre_dev_acquire.cre_top; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + dev = (struct cam_hw_info *)hw_mgr->cre_dev_intf[i]->hw_priv; + soc_info = &dev->soc_info; + idx = soc_info->src_clk_idx; + clk_update.clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + hw_mgr->clk_info.curr_clk = + soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_CLK_UPDATE, + &clk_update, sizeof(clk_update)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev clk update failed: %d", rc); + goto cre_clk_update_failed; + } + } + + bw_update = CAM_MEM_ZALLOC(sizeof(struct cam_cre_dev_bw_update), GFP_KERNEL); + if (!bw_update) { + CAM_ERR(CAM_ISP, "Out of memory"); + goto cre_clk_update_failed; + } + bw_update->ahb_vote_valid = false; + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + bw_update->axi_vote.num_paths = 1; + bw_update->axi_vote_valid = true; + bw_update->axi_vote.axi_path[0].camnoc_bw = 600000000; + bw_update->axi_vote.axi_path[0].mnoc_ab_bw = 600000000; + bw_update->axi_vote.axi_path[0].mnoc_ib_bw = 600000000; + bw_update->axi_vote.axi_path[0].ddr_ab_bw = 600000000; + bw_update->axi_vote.axi_path[0].ddr_ib_bw = 600000000; + bw_update->axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + bw_update->axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_ALL; + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_BW_UPDATE, + bw_update, sizeof(*bw_update)); + if (rc) { + CAM_ERR(CAM_CRE, "CRE Dev clk update failed: %d", rc); + goto free_bw_update; + } + } + + cam_cre_device_timer_start(hw_mgr); + hw_mgr->cre_ctx_cnt++; + ctx->context_priv = args->context_data; + args->ctxt_to_hw_map = ctx; + ctx->ctxt_event_cb = args->event_cb; + cam_cre_ctx_clk_info_init(ctx); + ctx->ctx_state = CRE_CTX_STATE_ACQUIRED; + CAM_MEM_ZFREE(bw_update, sizeof(struct cam_cre_dev_bw_update)); + bw_update = NULL; + + mutex_unlock(&ctx->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_INFO(CAM_CRE, "CRE: %d acquire succesfull rc %d", ctx_id, rc); + return rc; + +free_bw_update: + CAM_MEM_ZFREE(bw_update, sizeof(struct cam_cre_dev_bw_update)); + bw_update = NULL; +cre_clk_update_failed: + cre_dev_release.ctx_id = ctx_id; + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + if (hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RELEASE, + &cre_dev_release, sizeof(cre_dev_release))) + CAM_ERR(CAM_CRE, "CRE Dev release failed"); + } +cre_dev_acquire_failed: + if (!hw_mgr->cre_ctx_cnt) { + irq_cb.cre_hw_mgr_cb = NULL; + irq_cb.data = hw_mgr; + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + if (hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb))) + CAM_ERR(CAM_CRE, + "CRE IRQ de register failed"); + } + } +cre_irq_set_failed: + if (!hw_mgr->cre_ctx_cnt) { + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + if (hw_mgr->cre_dev_intf[i]->hw_ops.deinit( + hw_mgr->cre_dev_intf[i]->hw_priv, NULL, 0)) + CAM_ERR(CAM_CRE, "CRE deinit fail"); + } + } +end: + args->ctxt_to_hw_map = NULL; + cam_cre_put_free_ctx(hw_mgr, ctx_id); + mutex_unlock(&ctx->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_cre_mgr_release_ctx(struct cam_cre_hw_mgr *hw_mgr, int ctx_id) +{ + int i = 0, rc = 0; + struct cam_cre_dev_release cre_dev_release; + + if (ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "ctx_id is wrong: %d", ctx_id); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex); + if (hw_mgr->ctx[ctx_id].ctx_state != + CRE_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + CAM_DBG(CAM_CRE, "ctx id: %d not in right state: %d", + ctx_id, hw_mgr->ctx[ctx_id].ctx_state); + return 0; + } + + hw_mgr->ctx[ctx_id].ctx_state = CRE_CTX_STATE_RELEASE; + + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + cre_dev_release.ctx_id = ctx_id; + rc = hw_mgr->cre_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, CRE_HW_RELEASE, + &cre_dev_release, sizeof(cre_dev_release)); + if (rc) + CAM_ERR(CAM_CRE, "CRE Dev release failed: %d", rc); + } + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + if (!hw_mgr->ctx[ctx_id].req_list[i]) + continue; + cam_cre_free_io_config(hw_mgr->ctx[ctx_id].req_list[i]); + CAM_MEM_ZFREE(hw_mgr->ctx[ctx_id].req_list[i], + sizeof(struct cam_cre_request)); + hw_mgr->ctx[ctx_id].req_list[i] = NULL; + clear_bit(i, hw_mgr->ctx[ctx_id].bitmap); + } + + hw_mgr->ctx[ctx_id].req_cnt = 0; + hw_mgr->ctx[ctx_id].last_flush_req = 0; + hw_mgr->ctx[ctx_id].last_req_idx = 0; + hw_mgr->ctx[ctx_id].last_done_req_idx = 0; + cam_cre_put_free_ctx(hw_mgr, ctx_id); + + rc = cam_cre_mgr_cre_clk_remove(hw_mgr, ctx_id); + if (rc) + CAM_ERR(CAM_CRE, "CRE clk update failed: %d", rc); + + hw_mgr->cre_ctx_cnt--; + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + CAM_DBG(CAM_CRE, "X: ctx_id = %d", ctx_id); + + return 0; +} + +static int cam_cre_mgr_release_hw(void *hw_priv, void *hw_release_args) +{ + int i, rc = 0; + int ctx_id = 0; + struct cam_hw_release_args *release_hw = hw_release_args; + struct cam_cre_hw_mgr *hw_mgr = hw_priv; + struct cam_cre_ctx *ctx_data = NULL; + struct cam_cre_set_irq_cb irq_cb; + struct cam_hw_intf *dev_intf; + + if (!release_hw || !hw_mgr) { + CAM_ERR(CAM_CRE, "Invalid args: %pK %pK", release_hw, hw_mgr); + return -EINVAL; + } + + ctx_data = release_hw->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_CRE, "NULL ctx data"); + return -EINVAL; + } + + ctx_id = ctx_data->ctx_id; + if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid ctx id: %d", ctx_id); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex); + if (hw_mgr->ctx[ctx_id].ctx_state != CRE_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_CRE, "ctx is not in use: %d", ctx_id); + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + return -EINVAL; + } + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + + mutex_lock(&hw_mgr->hw_mgr_mutex); + rc = cam_cre_mgr_release_ctx(hw_mgr, ctx_id); + if (!hw_mgr->cre_ctx_cnt) { + CAM_DBG(CAM_CRE, "Last Release"); + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + dev_intf = hw_mgr->cre_dev_intf[i]; + irq_cb.cre_hw_mgr_cb = NULL; + irq_cb.data = NULL; + rc = dev_intf->hw_ops.process_cmd( + hw_mgr->cre_dev_intf[i]->hw_priv, + CRE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) + CAM_ERR(CAM_CRE, "IRQ dereg failed: %d", rc); + } + for (i = 0; i < cre_hw_mgr->num_cre; i++) { + dev_intf = hw_mgr->cre_dev_intf[i]; + rc = dev_intf->hw_ops.deinit( + hw_mgr->cre_dev_intf[i]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_CRE, "deinit failed: %d", rc); + } + cam_cre_device_timer_stop(hw_mgr); + } + + rc = cam_cre_mgr_remove_bw(hw_mgr, ctx_id); + if (rc) + CAM_ERR(CAM_CRE, "CRE remove bw failed: %d", rc); + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_CRE, "Release done for ctx_id %d", ctx_id); + return rc; +} + +static int cam_cre_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + struct cam_cre_clk_bw_request *clk_info; + struct cam_cre_clk_bw_req_internal_v2 *clk_info_v2; + struct cre_clk_bw_request_v2 *soc_req; + + struct cre_cmd_generic_blob *blob; + struct cam_cre_ctx *ctx_data; + uint32_t index; + size_t clk_update_size = 0; + int rc = 0, i; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_CRE, "Invalid blob info %pK %d", blob_data, + blob_size); + return -EINVAL; + } + + blob = (struct cre_cmd_generic_blob *)user_data; + ctx_data = blob->ctx; + index = blob->req_idx; + + switch (blob_type) { + case CAM_CRE_CMD_GENERIC_BLOB_CLK_V2: + if (blob_size < sizeof(struct cre_clk_bw_request_v2)) { + CAM_ERR(CAM_CRE, "Mismatch blob size %d expected %lu", + blob_size, + sizeof(struct cre_clk_bw_request_v2)); + return -EINVAL; + } + + soc_req = (struct cre_clk_bw_request_v2 *)blob_data; + if (soc_req->num_paths > CAM_CRE_MAX_PER_PATH_VOTES) { + CAM_ERR(CAM_CRE, "Invalid num paths: %d", + soc_req->num_paths); + return -EINVAL; + } + + /* Check for integer overflow */ + if (soc_req->num_paths != 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct cre_clk_bw_request_v2)) / + (soc_req->num_paths - 1))) { + CAM_ERR(CAM_CRE, + "Size exceeds limit paths:%u size per path:%lu", + soc_req->num_paths - 1, + sizeof( + struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + } + + clk_update_size = sizeof(struct cre_clk_bw_request_v2) + + ((soc_req->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote)); + if (blob_size < clk_update_size) { + CAM_ERR(CAM_CRE, "Invalid blob size: %u", + blob_size); + return -EINVAL; + } + + clk_info = &ctx_data->req_list[index]->clk_info; + clk_info_v2 = &ctx_data->req_list[index]->clk_info_v2; + clk_info_v2.budget_ns = soc_req->budget_ns; + clk_info_v2.frame_cycles = soc_req->frame_cycles; + clk_info_v2.rt_flag = soc_req->rt_flag; + clk_info_v2.num_paths = soc_req->num_paths; + + for (i = 0; i < soc_req->num_paths; i++) { + clk_info_v2.axi_path[i].usage_data = soc_req->axi_path_flex[i].usage_data; + clk_info_v2.axi_path[i].transac_type = + soc_req->axi_path_flex[i].transac_type; + clk_info_v2.axi_path[i].path_data_type = + soc_req->axi_path_flex[i].path_data_type; + clk_info_v2.axi_path[i].vote_level = 0; + clk_info_v2.axi_path[i].camnoc_bw = soc_req->axi_path_flex[i].camnoc_bw; + clk_info_v2.axi_path[i].mnoc_ab_bw = soc_req->axi_path_flex[i].mnoc_ab_bw; + clk_info_v2.axi_path[i].mnoc_ib_bw = soc_req->axi_path_flex[i].mnoc_ib_bw; + } + + /* Use v1 structure for clk fields */ + clk_info->budget_ns = clk_info_v2->budget_ns; + clk_info->frame_cycles = clk_info_v2->frame_cycles; + clk_info->rt_flag = clk_info_v2->rt_flag; + + CAM_DBG(CAM_CRE, "budget=%llu, frame_cycle=%llu, rt_flag=%d", + clk_info_v2->budget_ns, clk_info_v2->frame_cycles, + clk_info_v2->rt_flag); + break; + + default: + CAM_WARN(CAM_CRE, "Invalid blob type %d", blob_type); + break; + } + return rc; +} + +static int cam_cre_process_generic_cmd_buffer( + struct cam_packet *packet, + struct cam_cre_ctx *ctx_data, + int32_t index, + uint64_t *io_buf_addr) +{ + int i, rc = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cre_cmd_generic_blob cmd_generic_blob; + + cmd_generic_blob.ctx = ctx_data; + cmd_generic_blob.req_idx = index; + cmd_generic_blob.io_buf_addr = io_buf_addr; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data != CAM_CRE_CMD_META_GENERIC_BLOB) + continue; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_cre_packet_generic_blob_handler, &cmd_generic_blob); + if (rc) + CAM_ERR(CAM_CRE, "Failed in processing blobs %d", rc); + } + + return rc; +} + +static int cam_cre_mgr_prepare_hw_update(void *hw_priv, + void *hw_prepare_update_args) +{ + int rc = 0; + struct cam_packet *packet = NULL; + struct cam_cre_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_prepare_update_args *prepare_args = + hw_prepare_update_args; + struct cam_cre_ctx *ctx_data = NULL; + uint32_t request_idx = 0; + struct cam_cre_request *cre_req; + struct timespec64 ts; + + if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) { + CAM_ERR(CAM_CRE, "Invalid args: %x %x", + prepare_args, hw_mgr); + return -EINVAL; + } + + ctx_data = prepare_args->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_CRE, "Invalid Context"); + return -EINVAL; + } + + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_CRE, "ctx id %u is not acquired state: %d", + ctx_data->ctx_id, ctx_data->ctx_state); + return -EINVAL; + } + + packet = prepare_args->packet; + rc = cam_packet_util_validate_packet(packet, prepare_args->remain_len); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_CRE, + "packet validation failed: %d req_id: %d ctx: %d", + rc, packet->header.request_id, ctx_data->ctx_id); + return rc; + } + + rc = cam_cre_mgr_pkt_validation(packet); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_CRE, + "cre packet validation failed: %d req_id: %d ctx: %d", + rc, packet->header.request_id, ctx_data->ctx_id); + return -EINVAL; + } + + rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker, + hw_mgr->iommu_hdl, hw_mgr->iommu_sec_hdl, true); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_CRE, "Patch processing failed %d", rc); + return rc; + } + + request_idx = find_next_zero_bit(ctx_data->bitmap, + ctx_data->bits, ctx_data->last_req_idx); + if (request_idx >= CAM_CTX_REQ_MAX || request_idx < 0) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_CRE, "Invalid ctx req slot = %d", request_idx); + return -EINVAL; + } + CAM_DBG(CAM_CRE, "req_idx %d last_req_idx %d bitmap size in bits %d", + request_idx, ctx_data->last_req_idx, ctx_data->bits); + ctx_data->last_req_idx = request_idx; + + ctx_data->req_list[request_idx] = + CAM_MEM_ZALLOC(sizeof(struct cam_cre_request), GFP_KERNEL); + if (!ctx_data->req_list[request_idx]) { + CAM_ERR(CAM_CRE, "mem allocation failed ctx:%d req_idx:%d", + ctx_data->ctx_id, request_idx); + rc = -ENOMEM; + goto req_mem_alloc_failed; + } + memset(ctx_data->req_list[request_idx], 0, + sizeof(struct cam_cre_request)); + + cre_req = ctx_data->req_list[request_idx]; + cre_req->request_id = packet->header.request_id; + cre_req->frames_done = 0; + cre_req->req_idx = request_idx; + + rc = cam_cre_mgr_process_io_cfg(hw_mgr, packet, ctx_data, + request_idx, prepare_args); + if (rc) { + CAM_ERR(CAM_CRE, + "IO cfg processing failed: %d ctx: %d req_id:%d", + rc, ctx_data->ctx_id, packet->header.request_id); + goto end; + } + + rc = cam_cre_mgr_create_cre_reg_buf(hw_mgr, packet, prepare_args, + ctx_data, request_idx); + if (rc) { + CAM_ERR(CAM_CRE, + "create kmd buf failed: %d ctx: %d request_id:%d", + rc, ctx_data->ctx_id, packet->header.request_id); + goto end; + } + + rc = cam_cre_process_generic_cmd_buffer(packet, ctx_data, + request_idx, NULL); + if (rc) { + CAM_ERR(CAM_CRE, "Failed: %d ctx: %d req_id: %d req_idx: %d", + rc, ctx_data->ctx_id, packet->header.request_id, + request_idx); + goto end; + } + + prepare_args->num_hw_update_entries = 1; + prepare_args->priv = ctx_data->req_list[request_idx]; + cre_req->hang_data.packet = packet; + ktime_get_boottime_ts64(&ts); + ctx_data->last_req_time = (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + CAM_DBG(CAM_REQ, "req_id= %llu ctx_id= %d lrt=%llu", + packet->header.request_id, ctx_data->ctx_id, + ctx_data->last_req_time); + set_bit(request_idx, ctx_data->bitmap); + mutex_unlock(&ctx_data->ctx_mutex); + + CAM_DBG(CAM_REQ, "Prepare Hw update Successful request_id: %d ctx: %d", + packet->header.request_id, ctx_data->ctx_id); + return rc; + +end: + CAM_MEM_ZFREE((void *)ctx_data->req_list[request_idx], + sizeof(struct cam_cre_request)); + ctx_data->req_list[request_idx] = NULL; +req_mem_alloc_failed: + clear_bit(request_idx, ctx_data->bitmap); + mutex_unlock(&ctx_data->ctx_mutex); + return rc; +} + +static int cam_cre_mgr_enqueue_config(struct cam_cre_hw_mgr *hw_mgr, + struct cam_cre_ctx *ctx_data, + struct cam_hw_config_args *config_args) +{ + int rc = 0; + uint64_t request_id = 0; + struct crm_workq_task *task; + struct cre_cmd_work_data *task_data; + struct cam_hw_update_entry *hw_update_entries; + struct cam_cre_request *cre_req = NULL; + + cre_req = config_args->priv; + request_id = config_args->request_id; + hw_update_entries = config_args->hw_update_entries; + + CAM_DBG(CAM_CRE, "req_id = %lld %pK", request_id, config_args->priv); + + task = cam_req_mgr_workq_get_task(cre_hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_CRE, "no empty task"); + return -ENOMEM; + } + + task_data = (struct cre_cmd_work_data *)task->payload; + task_data->data = (void *)hw_mgr; + task_data->req_idx = cre_req->req_idx; + task_data->type = CRE_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_cre_mgr_process_cmd; + + if (ctx_data->cre_acquire.dev_type == CAM_CRE_DEV_TYPE_RT) + rc = cam_req_mgr_workq_enqueue_task(task, ctx_data, + CRM_TASK_PRIORITY_0); + else + rc = cam_req_mgr_workq_enqueue_task(task, ctx_data, + CRM_TASK_PRIORITY_1); + + return rc; +} + +static int cam_cre_mgr_config_hw(void *hw_priv, void *hw_config_args) +{ + int rc = 0; + struct cam_cre_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_config_args *config_args = hw_config_args; + struct cam_cre_ctx *ctx_data = NULL; + struct cam_cre_request *cre_req = NULL; + + CAM_DBG(CAM_CRE, "E"); + if (!hw_mgr || !config_args) { + CAM_ERR(CAM_CRE, "Invalid arguments %pK %pK", + hw_mgr, config_args); + return -EINVAL; + } + + if (!config_args->num_hw_update_entries) { + CAM_ERR(CAM_CRE, "No hw update enteries are available"); + return -EINVAL; + } + + ctx_data = config_args->ctxt_to_hw_map; + mutex_lock(&hw_mgr->hw_mgr_mutex); + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state != CRE_CTX_STATE_ACQUIRED) { + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_CRE, "ctx id :%u is not in use", + ctx_data->ctx_id); + return -EINVAL; + } + + cre_req = config_args->priv; + + cam_cre_mgr_cre_clk_update(hw_mgr, ctx_data, cre_req->req_idx); + ctx_data->req_list[cre_req->req_idx]->submit_timestamp = ktime_get(); + + if (cre_req->request_id <= ctx_data->last_flush_req) + CAM_WARN(CAM_CRE, + "Anomaly submitting flushed req %llu [last_flush %llu] in ctx %u", + cre_req->request_id, ctx_data->last_flush_req, + ctx_data->ctx_id); + + rc = cam_cre_mgr_enqueue_config(hw_mgr, ctx_data, config_args); + if (rc) + goto config_err; + + CAM_DBG(CAM_REQ, "req_id %llu, ctx_id %u io config", + cre_req->request_id, ctx_data->ctx_id); + + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +config_err: + cam_cre_mgr_handle_config_err(config_args, ctx_data); + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_cre_mgr_dump_pf_data(struct cam_cre_hw_mgr *hw_mgr, + struct cam_hw_cmd_pf_args *pf_cmd_args) +{ + struct cam_packet *packet; + struct cam_hw_dump_pf_args *pf_args; + struct cam_ctx_request *req_pf; + + req_pf = (struct cam_ctx_request *) + pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + pf_args = pf_cmd_args->pf_args; + + cam_packet_util_dump_io_bufs(packet, hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, pf_args, false); + + cam_packet_util_dump_patch_info(packet, hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, pf_args); + + cam_mem_put_cpu_buf(pf_cmd_args->pf_req_info->packet_handle); +} + +static int cam_cre_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_cre_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_CRE, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_cre_mgr_dump_pf_data(hw_mgr, hw_cmd_args->u.pf_cmd_args); + break; + default: + CAM_ERR(CAM_CRE, "Invalid cmd"); + } + + return rc; +} + +static int cam_cre_mgr_flush_req(struct cam_cre_ctx *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + int idx; + int64_t request_id; + uint32_t evt_id; + struct cam_hw_done_event_data buf_data; + + request_id = *(int64_t *)flush_args->flush_req_pending[0]; + for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) { + if (!ctx_data->req_list[idx]) + continue; + + if (ctx_data->req_list[idx]->request_id != request_id) + continue; + + evt_id = CAM_CTX_EVT_ID_ERROR; + buf_data.evt_param = CAM_SYNC_CRE_EVENT_HW_ERR; + buf_data.request_id = ctx_data->req_list[idx]->request_id; + ctx_data->ctxt_event_cb(ctx_data->context_priv, evt_id, &buf_data); + ctx_data->req_list[idx]->request_id = 0; + cam_cre_free_io_config(ctx_data->req_list[idx]); + CAM_MEM_ZFREE(ctx_data->req_list[idx], sizeof(struct cam_cre_request)); + ctx_data->req_list[idx] = NULL; + clear_bit(idx, ctx_data->bitmap); + } + + return 0; +} + +static int cam_cre_mgr_flush_all(struct cam_cre_ctx *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + int i, rc; + uint32_t evt_id; + struct cam_hw_done_event_data buf_data; + + mutex_lock(&ctx_data->ctx_mutex); + rc = cam_cre_mgr_reset_hw(); + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + if (!ctx_data->req_list[i]) + continue; + + evt_id = CAM_CTX_EVT_ID_ERROR; + buf_data.evt_param = CAM_SYNC_CRE_EVENT_HW_ERR; + buf_data.request_id = ctx_data->req_list[i]->request_id; + ctx_data->ctxt_event_cb(ctx_data->context_priv, evt_id, &buf_data); + ctx_data->req_list[i]->request_id = 0; + cam_cre_free_io_config(ctx_data->req_list[i]); + CAM_MEM_ZFREE(ctx_data->req_list[i], sizeof(struct cam_cre_request)); + ctx_data->req_list[i] = NULL; + clear_bit(i, ctx_data->bitmap); + } + mutex_unlock(&ctx_data->ctx_mutex); + + return rc; +} + +static int cam_cre_mgr_hw_dump(void *hw_priv, void *hw_dump_args) +{ + struct cam_cre_ctx *ctx_data; + struct cam_cre_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_dump_args *dump_args; + int idx; + ktime_t cur_time; + struct timespec64 cur_ts, req_ts; + uint64_t diff; + + if ((!hw_priv) || (!hw_dump_args)) { + CAM_ERR(CAM_CRE, "Invalid params %pK %pK", + hw_priv, hw_dump_args); + return -EINVAL; + } + + dump_args = (struct cam_hw_dump_args *)hw_dump_args; + ctx_data = dump_args->ctxt_to_hw_map; + + if (!ctx_data) { + CAM_ERR(CAM_CRE, "Invalid context"); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + mutex_lock(&ctx_data->ctx_mutex); + + CAM_INFO(CAM_CRE, "Req %lld", dump_args->request_id); + for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) { + if (!ctx_data->req_list[idx]) + continue; + + if (ctx_data->req_list[idx]->request_id == + dump_args->request_id) + break; + } + + /* no matching request found */ + if (idx == CAM_CTX_REQ_MAX) { + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + } + + cur_time = ktime_get(); + diff = ktime_us_delta(cur_time, + ctx_data->req_list[idx]->submit_timestamp); + cur_ts = ktime_to_timespec64(cur_time); + req_ts = ktime_to_timespec64(ctx_data->req_list[idx]->submit_timestamp); + + if (diff < (CRE_REQUEST_TIMEOUT * 1000)) { + CAM_INFO(CAM_CRE, + "No Error req %llu req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + } + + CAM_ERR(CAM_CRE, "Error req %llu req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; +} + +static int cam_cre_mgr_hw_flush(void *hw_priv, void *hw_flush_args) +{ + struct cam_hw_flush_args *flush_args = hw_flush_args; + struct cam_cre_ctx *ctx_data; + struct cam_cre_hw_mgr *hw_mgr = cre_hw_mgr; + + if ((!hw_priv) || (!hw_flush_args)) { + CAM_ERR(CAM_CRE, "Input params are Null"); + return -EINVAL; + } + + ctx_data = flush_args->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_CRE, "Ctx data is NULL"); + return -EINVAL; + } + + if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) || + (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) { + CAM_ERR(CAM_CRE, "Invalid flush type: %d", + flush_args->flush_type); + return -EINVAL; + } + + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_ALL: + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_data->last_flush_req = flush_args->last_flush_req; + + CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d last_flush_req %u", + ctx_data->ctx_id, flush_args->flush_type, + ctx_data->last_flush_req); + + cam_cre_mgr_flush_all(ctx_data, flush_args); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + break; + case CAM_FLUSH_TYPE_REQ: + mutex_lock(&ctx_data->ctx_mutex); + if (flush_args->num_req_active) { + CAM_ERR(CAM_CRE, "Flush request is not supported"); + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + if (flush_args->num_req_pending) + cam_cre_mgr_flush_req(ctx_data, flush_args); + mutex_unlock(&ctx_data->ctx_mutex); + break; + default: + CAM_ERR(CAM_CRE, "Invalid flush type: %d", + flush_args->flush_type); + return -EINVAL; + } + + return 0; +} + +static int cam_cre_mgr_alloc_devs(struct device_node *of_node) +{ + int rc; + uint32_t num_dev; + + rc = of_property_read_u32(of_node, "num-cre", &num_dev); + if (rc) { + CAM_ERR(CAM_CRE, "getting num of cre failed: %d", rc); + return -EINVAL; + } + + cre_hw_mgr->devices[CRE_DEV_CRE] = CAM_MEM_ZALLOC( + sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL); + if (!cre_hw_mgr->devices[CRE_DEV_CRE]) + return -ENOMEM; + + return 0; +} + +static int cam_cre_mgr_init_devs(struct device_node *of_node) +{ + int rc = 0; + int count, i; + const char *name = NULL; + struct device_node *child_node = NULL; + struct platform_device *child_pdev = NULL; + struct cam_hw_intf *child_dev_intf = NULL; + struct cam_hw_info *cre_dev; + struct cam_hw_soc_info *soc_info = NULL; + + rc = cam_cre_mgr_alloc_devs(of_node); + if (rc) + return rc; + + count = of_property_count_strings(of_node, "compat-hw-name"); + if (!count) { + CAM_ERR(CAM_CRE, "no compat hw found in dev tree, cnt = %d", + count); + rc = -EINVAL; + goto compat_hw_name_failed; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "compat-hw-name", + i, &name); + if (rc) { + CAM_ERR(CAM_CRE, "getting dev object name failed"); + goto compat_hw_name_failed; + } + + child_node = of_find_node_by_name(NULL, name); + if (!child_node) { + CAM_ERR(CAM_CRE, "Cannot find node in dtsi %s", name); + rc = -ENODEV; + goto compat_hw_name_failed; + } + + child_pdev = of_find_device_by_node(child_node); + if (!child_pdev) { + CAM_ERR(CAM_CRE, "failed to find device on bus %s", + child_node->name); + rc = -ENODEV; + of_node_put(child_node); + goto compat_hw_name_failed; + } + + child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata( + child_pdev); + if (!child_dev_intf) { + CAM_ERR(CAM_CRE, "no child device"); + of_node_put(child_node); + goto compat_hw_name_failed; + } + cre_hw_mgr->devices[child_dev_intf->hw_type] + [child_dev_intf->hw_idx] = child_dev_intf; + + if (!child_dev_intf->hw_ops.process_cmd) + goto compat_hw_name_failed; + + of_node_put(child_node); + } + + cre_hw_mgr->num_cre = count; + for (i = 0; i < count; i++) { + cre_hw_mgr->cre_dev_intf[i] = + cre_hw_mgr->devices[CRE_DEV_CRE][i]; + cre_dev = cre_hw_mgr->cre_dev_intf[i]->hw_priv; + soc_info = &cre_dev->soc_info; + } + + return 0; +compat_hw_name_failed: + CAM_MEM_FREE(cre_hw_mgr->devices[CRE_DEV_CRE]); + cre_hw_mgr->devices[CRE_DEV_CRE] = NULL; + return rc; +} + +static void cam_req_mgr_process_cre_command_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_cre_msg_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_cre_timer_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static int cam_cre_mgr_create_wq(void) +{ + + int rc; + int i; + + rc = cam_req_mgr_workq_create("cre_command_queue", CRE_WORKQ_NUM_TASK, + &cre_hw_mgr->cmd_work, CRM_WORKQ_USAGE_NON_IRQ, + 0, cam_req_mgr_process_cre_command_queue); + if (rc) { + CAM_ERR(CAM_CRE, "unable to create a command worker"); + goto cmd_work_failed; + } + + rc = cam_req_mgr_workq_create("cre_message_queue", CRE_WORKQ_NUM_TASK, + &cre_hw_mgr->msg_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_cre_msg_queue); + if (rc) { + CAM_ERR(CAM_CRE, "unable to create a message worker"); + goto msg_work_failed; + } + + rc = cam_req_mgr_workq_create("cre_timer_queue", CRE_WORKQ_NUM_TASK, + &cre_hw_mgr->timer_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_cre_timer_queue); + if (rc) { + CAM_ERR(CAM_CRE, "unable to create a timer worker"); + goto timer_work_failed; + } + + cre_hw_mgr->cmd_work_data = + CAM_MEM_ZALLOC(sizeof(struct cre_cmd_work_data) * CRE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!cre_hw_mgr->cmd_work_data) { + rc = -ENOMEM; + goto cmd_work_data_failed; + } + + cre_hw_mgr->msg_work_data = + CAM_MEM_ZALLOC(sizeof(struct cre_msg_work_data) * CRE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!cre_hw_mgr->msg_work_data) { + rc = -ENOMEM; + goto msg_work_data_failed; + } + + cre_hw_mgr->timer_work_data = + CAM_MEM_ZALLOC(sizeof(struct cre_clk_work_data) * CRE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!cre_hw_mgr->timer_work_data) { + rc = -ENOMEM; + goto timer_work_data_failed; + } + + for (i = 0; i < CRE_WORKQ_NUM_TASK; i++) + cre_hw_mgr->msg_work->task.pool[i].payload = + &cre_hw_mgr->msg_work_data[i]; + + for (i = 0; i < CRE_WORKQ_NUM_TASK; i++) + cre_hw_mgr->cmd_work->task.pool[i].payload = + &cre_hw_mgr->cmd_work_data[i]; + + for (i = 0; i < CRE_WORKQ_NUM_TASK; i++) + cre_hw_mgr->timer_work->task.pool[i].payload = + &cre_hw_mgr->timer_work_data[i]; + return 0; + +timer_work_data_failed: + CAM_MEM_FREE(cre_hw_mgr->msg_work_data); +msg_work_data_failed: + CAM_MEM_FREE(cre_hw_mgr->cmd_work_data); +cmd_work_data_failed: + cam_req_mgr_workq_destroy(&cre_hw_mgr->timer_work); +timer_work_failed: + cam_req_mgr_workq_destroy(&cre_hw_mgr->msg_work); +msg_work_failed: + cam_req_mgr_workq_destroy(&cre_hw_mgr->cmd_work); +cmd_work_failed: + return rc; +} + +static int cam_cre_set_dbg_default_clk(void *data, u64 val) +{ + cre_hw_mgr->cre_debug_clk = val; + return 0; +} + +static int cam_cre_get_dbg_default_clk(void *data, u64 *val) +{ + *val = cre_hw_mgr->cre_debug_clk; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(cam_cre_debug_default_clk, + cam_cre_get_dbg_default_clk, + cam_cre_set_dbg_default_clk, "%16llu"); + +static int cam_cre_create_debug_fs(void) +{ + if (!cam_debugfs_available()) + return 0; + + cam_debugfs_create_subdir("cre", &cre_hw_mgr->dentry); + if (!cre_hw_mgr->dentry) { + CAM_ERR(CAM_CRE, "failed to create dentry"); + return -ENOMEM; + } + + debugfs_create_bool("dump_req_data_enable", 0644, cre_hw_mgr->dentry, + &cre_hw_mgr->dump_req_data_enable); + + debugfs_create_file("cre_debug_clk", 0644, cre_hw_mgr->dentry, + NULL, &cam_cre_debug_default_clk); + + return 0; +} + +int cam_cre_hw_mgr_init(struct device_node *of_node, void *hw_mgr, + int *iommu_hdl) +{ + int i, rc = 0, j; + struct cam_hw_mgr_intf *hw_mgr_intf; + + if (!of_node || !hw_mgr) { + CAM_ERR(CAM_CRE, "Invalid args of_node %pK hw_mgr %pK", + of_node, hw_mgr); + return -EINVAL; + } + hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr; + + cre_hw_mgr = CAM_MEM_ZALLOC(sizeof(struct cam_cre_hw_mgr), GFP_KERNEL); + if (!cre_hw_mgr) { + CAM_ERR(CAM_CRE, "Unable to allocate mem for: size = %d", + sizeof(struct cam_cre_hw_mgr)); + return -ENOMEM; + } + + hw_mgr_intf->hw_mgr_priv = cre_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_cre_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_cre_mgr_acquire_hw; + hw_mgr_intf->hw_release = cam_cre_mgr_release_hw; + hw_mgr_intf->hw_start = NULL; + hw_mgr_intf->hw_stop = NULL; + hw_mgr_intf->hw_prepare_update = cam_cre_mgr_prepare_hw_update; + hw_mgr_intf->hw_config_stream_settings = NULL; + hw_mgr_intf->hw_config = cam_cre_mgr_config_hw; + hw_mgr_intf->hw_read = NULL; + hw_mgr_intf->hw_write = NULL; + hw_mgr_intf->hw_cmd = cam_cre_mgr_cmd; + hw_mgr_intf->hw_flush = cam_cre_mgr_hw_flush; + hw_mgr_intf->hw_dump = cam_cre_mgr_hw_dump; + + cre_hw_mgr->secure_mode = false; + mutex_init(&cre_hw_mgr->hw_mgr_mutex); + spin_lock_init(&cre_hw_mgr->hw_mgr_lock); + + for (i = 0; i < CRE_CTX_MAX; i++) { + cre_hw_mgr->ctx[i].bitmap_size = + BITS_TO_LONGS(CAM_CTX_REQ_MAX) * + sizeof(long); + cre_hw_mgr->ctx[i].bitmap = CAM_MEM_ZALLOC( + cre_hw_mgr->ctx[i].bitmap_size, GFP_KERNEL); + if (!cre_hw_mgr->ctx[i].bitmap) { + CAM_ERR(CAM_CRE, "bitmap allocation failed: size = %d", + cre_hw_mgr->ctx[i].bitmap_size); + rc = -ENOMEM; + goto cre_ctx_bitmap_failed; + } + cre_hw_mgr->ctx[i].bits = cre_hw_mgr->ctx[i].bitmap_size * + BITS_PER_BYTE; + mutex_init(&cre_hw_mgr->ctx[i].ctx_mutex); + } + + rc = cam_cre_mgr_init_devs(of_node); + if (rc) + goto dev_init_failed; + + cre_hw_mgr->ctx_bitmap_size = + BITS_TO_LONGS(CRE_CTX_MAX) * sizeof(long); + cre_hw_mgr->ctx_bitmap = CAM_MEM_ZALLOC(cre_hw_mgr->ctx_bitmap_size, + GFP_KERNEL); + if (!cre_hw_mgr->ctx_bitmap) { + rc = -ENOMEM; + goto ctx_bitmap_alloc_failed; + } + + cre_hw_mgr->ctx_bits = cre_hw_mgr->ctx_bitmap_size * + BITS_PER_BYTE; + + rc = cam_smmu_get_handle("cre", &cre_hw_mgr->iommu_hdl); + if (rc) { + CAM_ERR(CAM_CRE, "get mmu handle failed: %d", rc); + goto cre_get_hdl_failed; + } + + rc = cam_smmu_get_handle("cam-secure", &cre_hw_mgr->iommu_sec_hdl); + if (rc) { + CAM_ERR(CAM_CRE, "get secure mmu handle failed: %d", rc); + goto secure_hdl_failed; + } + + rc = cam_cre_mgr_create_wq(); + if (rc) + goto cre_wq_create_failed; + + cam_cre_create_debug_fs(); + + if (iommu_hdl) + *iommu_hdl = cre_hw_mgr->iommu_hdl; + + return rc; + +cre_wq_create_failed: + cam_smmu_destroy_handle(cre_hw_mgr->iommu_sec_hdl); + cre_hw_mgr->iommu_sec_hdl = -1; +secure_hdl_failed: + cam_smmu_destroy_handle(cre_hw_mgr->iommu_hdl); + cre_hw_mgr->iommu_hdl = -1; +cre_get_hdl_failed: + CAM_MEM_ZFREE(cre_hw_mgr->ctx_bitmap, cre_hw_mgr->ctx_bitmap_size); + cre_hw_mgr->ctx_bitmap = NULL; + cre_hw_mgr->ctx_bitmap_size = 0; + cre_hw_mgr->ctx_bits = 0; +ctx_bitmap_alloc_failed: + CAM_MEM_ZFREE(cre_hw_mgr->devices[CRE_DEV_CRE], + (cre_hw_mgr->num_cre) * (sizeof(struct cam_hw_intf *))); + cre_hw_mgr->devices[CRE_DEV_CRE] = NULL; +dev_init_failed: +cre_ctx_bitmap_failed: + mutex_destroy(&cre_hw_mgr->hw_mgr_mutex); + for (j = i - 1; j >= 0; j--) { + mutex_destroy(&cre_hw_mgr->ctx[j].ctx_mutex); + CAM_MEM_ZFREE(cre_hw_mgr->ctx[j].bitmap, + cre_hw_mgr->ctx[j].bitmap_size); + cre_hw_mgr->ctx[j].bitmap = NULL; + cre_hw_mgr->ctx[j].bitmap_size = 0; + cre_hw_mgr->ctx[j].bits = 0; + } + CAM_MEM_ZFREE(cre_hw_mgr, sizeof(struct cam_cre_hw_mgr)); + cre_hw_mgr = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.h new file mode 100644 index 0000000000..dcaff5da1f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_CRE_HW_MGR_H +#define CAM_CRE_HW_MGR_H + +#include +#include +#include + +#include "cam_cre_hw_intf.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" +#include "cam_context.h" +#include "cre_top.h" + +#define CRE_CTX_MAX 32 + +#define CRE_WORKQ_NUM_TASK 64 +#define CRE_WORKQ_TASK_CMD_TYPE 1 +#define CRE_WORKQ_TASK_MSG_TYPE 2 + +#define CRE_PACKET_MAX_CMD_BUFS 1 + +#define CRE_CTX_STATE_FREE 0 +#define CRE_CTX_STATE_IN_USE 1 +#define CRE_CTX_STATE_ACQUIRED 2 +#define CRE_CTX_STATE_RELEASE 3 + +#define CRE_MAX_IN_RES 2 +#define CRE_MAX_OUT_RES 2 +#define CRE_MAX_IO_BUFS 3 + +#define CAM_CRE_BW_CONFIG_UNKNOWN 0 +#define CAM_CRE_BW_CONFIG_V2 2 + +#define CRE_DEVICE_IDLE_TIMEOUT 400 +#define CRE_REQUEST_TIMEOUT 200 + +#define CAM_CRE_MAX_PER_PATH_VOTES 2 +#define CAM_CRE_MAX_REG_SET 32 + +#define CAM_CRE_MAX_ACTIVE 8 +/* + * Response time threshold in ms beyond which a request is not expected + * to be with CRE hw + */ +#define CAM_CRE_RESPONSE_TIME_THRESHOLD 300 + +/* + * struct cam_cre_irq_data + * + * @error: IRQ error + * @top_irq_status: CRE top irq status + * @wr_buf_done: write engine buf done + */ +struct cam_cre_irq_data { + uint32_t error; + uint32_t top_irq_status; + uint32_t wr_buf_done; +}; + + +/** + * struct cam_cre_hw_intf_data - CRE hw intf data + * + * @Brief: cre hw intf pointer and pid list data + * + * @devices: cre hw intf pointer + * @num_devices: Number of CRE devices + * @num_hw_pid: Number of pids for this hw + * @hw_pid: cre hw pid values + * + */ +struct cam_cre_hw_intf_data { + struct cam_hw_intf *hw_intf; + uint32_t num_hw_pid; + uint32_t hw_pid[CRE_DEV_MAX]; +}; + +/** + * struct cam_cre_ctx_clk_info + * @curr_fc: Context latest request frame cycles + * @rt_flag: Flag to indicate real time request + * @base_clk: Base clock to process the request + * @reserved: Reserved field + * @clk_rate: Supported clock rates for the context + * @num_paths: Number of valid AXI paths + * @axi_path: ctx based per path bw vote + */ +struct cam_cre_ctx_clk_info { + uint32_t curr_fc; + uint32_t rt_flag; + uint32_t base_clk; + uint32_t reserved; + int32_t clk_rate[CAM_MAX_VOTE]; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct cre_cmd_generic_blob + * @ctx: Current context info + * @req_info_idx: Index used for request + * @io_buf_addr: pointer to io buffer address + */ +struct cre_cmd_generic_blob { + struct cam_cre_ctx *ctx; + uint32_t req_idx; + uint64_t *io_buf_addr; +}; + +/** + * struct cam_cre_clk_info + * @base_clk: Base clock to process request + * @curr_clk: Current clock of hadrware + * @threshold: Threshold for overclk count + * @over_clked: Over clock count + * @num_paths: Number of AXI vote paths + * @axi_path: Current per path bw vote info + * @hw_type: IPE/BPS device type + * @watch_dog: watchdog timer handle + * @watch_dog_reset_counter: Counter for watch dog reset + * @uncompressed_bw: uncompressed BW + * @compressed_bw: compressed BW + */ +struct cam_cre_clk_info { + uint32_t base_clk; + uint32_t curr_clk; + uint32_t threshold; + uint32_t over_clked; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES]; + uint32_t hw_type; + struct cam_req_mgr_timer *watch_dog; + uint32_t watch_dog_reset_counter; + uint64_t uncompressed_bw; + uint64_t compressed_bw; +}; + +/** + * struct cre_cmd_work_data + * + * @type: Type of work data + * @data: Private data + * @req_id: Request Idx + */ +struct cre_cmd_work_data { + uint32_t type; + void *data; + int64_t req_idx; +}; + +/** + * struct cre_msg_work_data + * + * @type: Type of work data + * @data: Private data + * @irq_status: IRQ status + */ +struct cre_msg_work_data { + uint32_t type; + void *data; + struct cam_cre_irq_data irq_data; +}; + +/** + * struct cre_clk_work_data + * + * @type: Type of work data + * @data: Private data + */ +struct cre_clk_work_data { + uint32_t type; + void *data; +}; + +struct plane_info { + uintptr_t cpu_addr; + dma_addr_t iova_addr; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + uint32_t alignment; + uint32_t offset; + size_t len; +}; + +/** + * struct cre_io_buf + * + * @direction: Direction of a buffer + * @resource_type: Resource type of IO Buffer + * @format: Format + * @fence: Fence + * @num_planes: Number of planes + * p_info: per plane info + */ +struct cre_io_buf { + uint32_t direction; + uint32_t resource_type; + uint32_t format; + uint32_t fence; + uint32_t num_planes; + struct plane_info p_info[CAM_CRE_MAX_PLANES]; +}; + +struct cre_reg_set { + uint32_t offset; + uint32_t value; +}; + +struct cre_reg_buffer { + uint32_t num_rd_reg_set; + uint32_t num_wr_reg_set; + struct cre_reg_set rd_reg_set[CAM_CRE_MAX_REG_SET]; + struct cre_reg_set wr_reg_set[CAM_CRE_MAX_REG_SET]; +}; + +/** + * struct cam_cre_clk_bw_request + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @uncompressed_bw: Bandwidth required to process frame + * @compressed_bw: Compressed bandwidth to process frame + */ +struct cam_cre_clk_bw_request { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint64_t uncompressed_bw; + uint64_t compressed_bw; +}; + +/** + * struct cam_cre_clk_bw_req_internal_v2 + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: Reserved for future use + * @num_paths: Number of paths for per path bw vote + * @axi_path: Per path vote info for CRE + */ +struct cam_cre_clk_bw_req_internal_v2 { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint32_t reserved; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_CRE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct cam_cre_request + * + * @request_id: Request Id + * @req_idx: Index in request list + * @state: Request state + * @num_batch: Number of batches + * @num_frame_bufs: Number of frame buffers + * @num_pass_bufs: Number of pass Buffers + * @num_io_bufs: Number of IO Buffers + * @in_resource: Input resource + * @cre_debug_buf: Debug buffer + * @io_buf: IO config info of a request + * @clk_info: Clock Info V1 + * @clk_info_v2: Clock Info V2 + * @hang_data: Debug data for HW error + * @submit_timestamp: Submit timestamp to hw + */ +struct cam_cre_request { + uint64_t request_id; + uint32_t req_idx; + uint32_t state; + uint32_t num_batch; + uint32_t frames_done; + uint32_t num_frame_bufs; + uint32_t num_pass_bufs; + uint32_t num_io_bufs[CRE_MAX_BATCH_SIZE]; + uint32_t in_resource; + struct cre_reg_buffer cre_reg_buf[CRE_MAX_BATCH_SIZE]; + struct cre_io_buf *io_buf[CRE_MAX_BATCH_SIZE][CRE_MAX_IO_BUFS]; + struct cam_cre_clk_bw_request clk_info; + struct cam_cre_clk_bw_req_internal_v2 clk_info_v2; + struct cam_hw_mgr_pf_request_info hang_data; + ktime_t submit_timestamp; +}; + +/** + * struct cam_cre_ctx + * + * @ctx_id: Context ID + * @ctx_state: State of a context + * @req_cnt: Requests count + * @last_flush_req: last flush req for this ctx + * @last_req_time: Timestamp of last request + * @last_req_idx: Last submitted req index + * @last_done_req_idx: Last done req index + * @bitmap: Context bit map + * @bitmap_size: Context bit map size + * @bits: Context bit map bits + * @context_priv: Private data of context + * @iommu_hdl: smmu handle + * @ctx_mutex: Mutex for context + * @acquire_dev_cmd: Cam acquire command + * @cre_acquire: CRE acquire command + * @clk_info: CRE Ctx clock info + * @packet: Current packet to process + * @cre_top: Pointer to CRE top data structure + * @req_list: Request List + * @ctxt_event_cb: Callback of a context + */ +struct cam_cre_ctx { + uint32_t ctx_id; + uint32_t ctx_state; + uint32_t req_cnt; + uint64_t last_flush_req; + uint64_t last_req_time; + uint64_t last_req_idx; + uint64_t last_done_req_idx; + void *bitmap; + size_t bitmap_size; + size_t bits; + void *context_priv; + int iommu_hdl; + + struct mutex ctx_mutex; + struct cam_acquire_dev_cmd acquire_dev_cmd; + struct cam_cre_acquire_dev_info cre_acquire; + struct cam_cre_ctx_clk_info clk_info; + struct cre_top *cre_top; + struct cam_packet *packet; + struct cam_cre_request *req_list[CAM_CTX_REQ_MAX]; + cam_hw_event_cb_func ctxt_event_cb; +}; + +/** + * struct cam_cre_hw_mgr + * + * @cre_ctx_cnt: Open context count + * @hw_mgr_mutex: Mutex for HW manager + * @hw_mgr_lock: Spinlock for HW manager + * @iommu_hdl: CRE Handle + * @iommu_sec_hdl: CRE Handle for secure + * @num_cre: Number of CRE + * @secure_mode: Mode of CRE creration + * @ctx_bitmap: Context bit map + * @ctx_bitmap_size: Context bit map size + * @ctx_bits: Context bit map bits + * @ctx: CRE context + * @devices: CRE devices + * @cre_caps: CRE capabilities + * @cmd_work: Command work + * @msg_work: Message work + * @timer_work: Timer work + * @cmd_work_data: Command work data + * @msg_work_data: Message work data + * @timer_work_data: Timer work data + * @cre_dev_intf: CRE device interface + * @clk_info: CRE clock Info for HW manager + * @dentry: Pointer to CRE debugfs directory + * @dump_req_data_enable: CRE hang dump enablement + */ +struct cam_cre_hw_mgr { + uint32_t cre_ctx_cnt; + struct mutex hw_mgr_mutex; + spinlock_t hw_mgr_lock; + int32_t iommu_hdl; + int32_t iommu_sec_hdl; + uint32_t num_cre; + uint64_t cre_debug_clk; + bool secure_mode; + void *ctx_bitmap; + size_t ctx_bitmap_size; + size_t ctx_bits; + struct cam_cre_ctx ctx[CRE_CTX_MAX]; + struct cam_hw_intf **devices[CRE_DEV_MAX]; + struct cam_cre_query_cap_cmd cre_caps; + + struct cam_req_mgr_core_workq *cmd_work; + struct cam_req_mgr_core_workq *msg_work; + struct cam_req_mgr_core_workq *timer_work; + struct cre_cmd_work_data *cmd_work_data; + struct cre_msg_work_data *msg_work_data; + struct cre_clk_work_data *timer_work_data; + struct cam_hw_intf *cre_dev_intf[CRE_DEV_MAX]; + struct cam_soc_reg_map *reg_map[CRE_DEV_MAX][CRE_BASE_MAX]; + struct cam_cre_clk_info clk_info; + struct dentry *dentry; + bool dump_req_data_enable; +}; + +/** + * struct cam_cre_hw_ctx_data + * + * @context_priv: Context private data, cam_context from + * acquire. + * @ctx_mutex: Mutex for context + * @cre_dev_acquire_info: Acquire device info + * @ctxt_event_cb: Context callback function + * @in_use: Flag for context usage + * @wait_complete: Completion info + * @last_flush_req: req id which was flushed last. + */ +struct cam_cre_hw_ctx_data { + void *context_priv; + struct mutex ctx_mutex; + struct cam_cre_acquire_dev_info cre_dev_acquire_info; + cam_hw_event_cb_func ctxt_event_cb; + bool in_use; + struct completion wait_complete; + uint64_t last_flush_req; +}; +#endif /* CAM_CRE_HW_MGR_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.c new file mode 100644 index 0000000000..bdc21e00f4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cre_core.h" +#include "cre_hw.h" +#include "cre_dev_intf.h" +#include "cre_bus_rd.h" +#include "cam_mem_mgr_api.h" +#include + +static struct cre_bus_rd *bus_rd; + +#define update_cre_reg_set(cre_reg_buf, off, val) \ + do { \ + cre_reg_buf->rd_reg_set[cre_reg_buf->num_rd_reg_set].offset = (off); \ + cre_reg_buf->rd_reg_set[cre_reg_buf->num_rd_reg_set].value = (val); \ + cre_reg_buf->num_rd_reg_set++; \ + } while (0) + +static int cam_cre_bus_rd_in_port_idx(uint32_t input_port_id) +{ + int i; + + for (i = 0; i < CRE_MAX_IN_RES; i++) + if (bus_rd->in_port_to_rm[i].input_port_id == + input_port_id) + return i; + + return -EINVAL; +} + +static void cam_cre_update_read_reg_val(struct plane_info p_info, + struct cam_cre_bus_rd_client_reg_val *rd_client_reg_val) +{ + switch (p_info.format) { + case CAM_FORMAT_MIPI_RAW_10: + rd_client_reg_val->format = 0xd; + break; + case CAM_FORMAT_MIPI_RAW_12: + rd_client_reg_val->format = 0xe; + break; + case CAM_FORMAT_MIPI_RAW_14: + rd_client_reg_val->format = 0xf; + break; + case CAM_FORMAT_MIPI_RAW_20: + rd_client_reg_val->format = 0x13; + break; + case CAM_FORMAT_PLAIN128: + rd_client_reg_val->format = 0x0; + break; + default: + CAM_ERR(CAM_CRE, "Unsupported read format"); + return; + } + + CAM_DBG(CAM_CRE, + "format %d width(in bytes) %d height %d stride(in byte) %d", + p_info.format, p_info.width, p_info.height, p_info.stride); + CAM_DBG(CAM_CRE, "alignment 0x%x", + p_info.alignment); + + /* Fetch engine width has to be updated in number of bytes */ + rd_client_reg_val->img_width = p_info.stride; + rd_client_reg_val->stride = p_info.stride; + rd_client_reg_val->img_height = p_info.height; + rd_client_reg_val->alignment = p_info.alignment; +} + +static int cam_cre_bus_rd_release(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + vfree(bus_rd->bus_rd_ctx[ctx_id]); + bus_rd->bus_rd_ctx[ctx_id] = NULL; + + return 0; +} + +static int cam_cre_bus_rd_update(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, struct cre_reg_buffer *cre_reg_buf, int batch_idx, + int io_idx, struct cam_cre_dev_prepare_req *prepare) +{ + int k, in_port_idx; + uint32_t req_idx, val; + uint32_t iova_base, iova_offset; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_cre_ctx *ctx_data; + struct cam_cre_request *cre_request; + struct cre_io_buf *io_buf; + struct cam_cre_bus_rd_reg *rd_reg; + struct cam_cre_bus_rd_client_reg *rd_reg_client; + struct cam_cre_bus_rd_reg_val *rd_reg_val; + struct cam_cre_bus_rd_client_reg_val *rd_client_reg_val; + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare); + return -EINVAL; + } + + if (batch_idx >= CRE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx); + return -EINVAL; + } + + if (io_idx >= CRE_MAX_IO_BUFS) { + CAM_ERR(CAM_CRE, "Invalid IO idx: %d", io_idx); + return -EINVAL; + } + + prepare_args = prepare->prepare_args; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + + cre_request = ctx_data->req_list[req_idx]; + CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld", + req_idx, cre_request->request_id); + rd_reg = cam_cre_hw_info->bus_rd_reg_offset; + rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + io_buf = cre_request->io_buf[batch_idx][io_idx]; + + CAM_DBG(CAM_CRE, + "req_idx = %d req_id = %lld rsc %d", + req_idx, cre_request->request_id, + io_buf->resource_type); + CAM_DBG(CAM_CRE, "batch:%d iobuf:%d direction:%d", + batch_idx, io_idx, io_buf->direction); + + in_port_idx = + cam_cre_bus_rd_in_port_idx(io_buf->resource_type); + + CAM_DBG(CAM_CRE, "in_port_idx %d", in_port_idx); + for (k = 0; k < io_buf->num_planes; k++) { + rd_reg_client = &rd_reg->rd_clients[in_port_idx]; + rd_client_reg_val = &rd_reg_val->rd_clients[in_port_idx]; + + /* security cfg */ + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg->security_cfg, + ctx_data->cre_acquire.secure_mode & 0x1); + + /* enable client */ + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->core_cfg, + 1); + + /* ccif meta data */ + update_cre_reg_set(cre_reg_buf, + (rd_reg->offset + rd_reg_client->ccif_meta_data), + 0); + /* + * As CRE have 36 Bit addressing support Image Address + * register will have 28 bit MSB of 36 bit iova. + * and addr_config will have 8 bit byte offset. + */ + iova_base = CAM_36BIT_INTF_GET_IOVA_BASE( + io_buf->p_info[k].iova_addr); + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->img_addr, + iova_base); + iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET( + io_buf->p_info[k].iova_addr); + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->addr_cfg, + iova_offset); + + cam_cre_update_read_reg_val(io_buf->p_info[k], + rd_client_reg_val); + + /* Buffer size */ + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->rd_width, + rd_client_reg_val->img_width); + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->rd_height, + rd_client_reg_val->img_height); + + /* stride */ + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->rd_stride, + rd_client_reg_val->stride); + + val = 0; + val |= (rd_client_reg_val->format & + rd_client_reg_val->format_mask) << + rd_client_reg_val->format_shift; + val |= (rd_client_reg_val->alignment & + rd_client_reg_val->alignment_mask) << + rd_client_reg_val->alignment_shift; + /* unpacker cfg : format and alignment */ + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->unpacker_cfg, + val); + + /* Enable Debug cfg */ + val = 0xFFFF; + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg_client->debug_status_cfg, + val); + } + + return 0; +} + +static int cam_cre_bus_rd_prepare(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + int i, j; + uint32_t req_idx; + struct cam_cre_dev_prepare_req *prepare; + struct cam_cre_ctx *ctx_data; + struct cam_cre_request *cre_request; + struct cre_io_buf *io_buf; + struct cam_cre_bus_rd_reg *rd_reg; + struct cam_cre_bus_rd_reg_val *rd_reg_val; + struct cre_reg_buffer *cre_reg_buf; + + int val; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + prepare = data; + + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + + cre_request = ctx_data->req_list[req_idx]; + + CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld", + req_idx, cre_request->request_id); + rd_reg = cam_cre_hw_info->bus_rd_reg_offset; + rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + + for (i = 0; i < cre_request->num_batch; i++) { + cre_reg_buf = &cre_request->cre_reg_buf[i]; + for (j = 0; j < cre_request->num_io_bufs[i]; j++) { + io_buf = cre_request->io_buf[i][j]; + if (io_buf->direction != CAM_BUF_INPUT) + continue; + + CAM_DBG(CAM_CRE, "batch:%d iobuf:%d direction:%d", + i, j, io_buf->direction); + + rc = cam_cre_bus_rd_update(cam_cre_hw_info, + ctx_id, cre_reg_buf, i, j, prepare); + if (rc) + goto end; + } + + /* Go command */ + val = 0; + val |= rd_reg_val->go_cmd; + val |= rd_reg_val->static_prg & rd_reg_val->static_prg_mask; + update_cre_reg_set(cre_reg_buf, + rd_reg->offset + rd_reg->input_if_cmd, + val); + } + + for (i = 0; i < cre_reg_buf->num_rd_reg_set; i++) { + CAM_DBG(CAM_CRE, "CRE value 0x%x offset 0x%x", + cre_reg_buf->rd_reg_set[i].value, + cre_reg_buf->rd_reg_set[i].offset); + } +end: + return 0; +} + +static int cam_cre_bus_rd_acquire(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i; + struct cam_cre_acquire_dev_info *in_acquire; + struct cre_bus_rd_ctx *bus_rd_ctx; + struct cre_bus_in_port_to_rm *in_port_to_rm; + struct cam_cre_bus_rd_reg_val *bus_rd_reg_val; + int in_port_idx; + + if (ctx_id < 0 || !data || !cam_cre_hw_info || ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x %x", + ctx_id, data, cam_cre_hw_info); + return -EINVAL; + } + + bus_rd->bus_rd_ctx[ctx_id] = vzalloc(sizeof(struct cre_bus_rd_ctx)); + if (!bus_rd->bus_rd_ctx[ctx_id]) { + CAM_ERR(CAM_CRE, "Out of memory"); + return -ENOMEM; + } + + bus_rd->bus_rd_ctx[ctx_id]->cre_acquire = data; + in_acquire = data; + bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id]; + bus_rd_ctx->num_in_ports = in_acquire->num_in_res; + bus_rd_ctx->security_flag = in_acquire->secure_mode; + bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + + for (i = 0; i < in_acquire->num_in_res; i++) { + if (!in_acquire->in_res[i].width) + continue; + + CAM_DBG(CAM_CRE, "i = %d format = %u width = 0x%x height = 0x%x res id %d", + i, in_acquire->in_res[i].format, + in_acquire->in_res[i].width, + in_acquire->in_res[i].height, + in_acquire->in_res[i].res_id); + + in_port_idx = + cam_cre_bus_rd_in_port_idx(in_acquire->in_res[i].res_id); + if (in_port_idx < 0) { + CAM_ERR(CAM_CRE, "Invalid in_port_idx: %d", i + 1); + rc = -EINVAL; + goto end; + } + + in_port_to_rm = &bus_rd->in_port_to_rm[in_port_idx]; + if (!in_port_to_rm->num_rm) { + CAM_ERR(CAM_CRE, "Invalid format for Input port"); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_CRE, "i:%d port_id = %u format %u", + i, in_acquire->in_res[i].res_id, + in_acquire->in_res[i].format); + } + +end: + return rc; +} + +static int cam_cre_bus_rd_reg_set_update(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int i; + uint32_t num_reg_set; + struct cre_reg_set *rd_reg_set; + struct cam_cre_dev_reg_set_update *reg_set_upd_cmd = + (struct cam_cre_dev_reg_set_update *)data; + + num_reg_set = reg_set_upd_cmd->cre_reg_buf.num_rd_reg_set; + rd_reg_set = reg_set_upd_cmd->cre_reg_buf.rd_reg_set; + + for (i = 0; i < num_reg_set; i++) { + CAM_DBG(CAM_CRE, "base 0x%x CRE value 0x%x offset 0x%x", + cam_cre_hw_info->bus_rd_reg_offset->base, + rd_reg_set[i].value, rd_reg_set[i].offset); + cam_io_w_mb(rd_reg_set[i].value, + cam_cre_hw_info->bus_rd_reg_offset->base + rd_reg_set[i].offset); + } + return 0; +} + +static int cam_cre_bus_rd_init(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + struct cam_cre_bus_rd_reg_val *bus_rd_reg_val; + struct cam_cre_bus_rd_reg *bus_rd_reg; + struct cam_cre_dev_init *dev_init = data; + + if (!cam_cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + + bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset; + bus_rd_reg->base = + dev_init->core_info->cre_hw_info->cre_hw->bus_rd_reg_offset->base; + + /* enable interrupt mask */ + cam_io_w_mb(bus_rd_reg_val->irq_mask, + cam_cre_hw_info->bus_rd_reg_offset->base + bus_rd_reg->irq_mask); + + return 0; +} + +static int cam_cre_bus_rd_probe(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int i, k, rm_idx; + struct cam_cre_bus_rd_reg_val *bus_rd_reg_val; + struct cam_cre_bus_rd_reg *bus_rd_reg; + struct cre_bus_in_port_to_rm *in_port_to_rm; + uint32_t input_port_idx; + + if (!cam_cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + + bus_rd = CAM_MEM_ZALLOC(sizeof(struct cre_bus_rd), GFP_KERNEL); + if (!bus_rd) { + CAM_ERR(CAM_CRE, "Out of memory"); + return -ENOMEM; + } + bus_rd->cre_hw_info = cam_cre_hw_info; + bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset; + + for (i = 0; i < bus_rd_reg_val->num_clients; i++) { + input_port_idx = + bus_rd_reg_val->rd_clients[i].rm_port_id; + in_port_to_rm = &bus_rd->in_port_to_rm[input_port_idx]; + + rm_idx = in_port_to_rm->num_rm; + in_port_to_rm->input_port_id = + bus_rd_reg_val->rd_clients[i].input_port_id; + in_port_to_rm->rm_port_id[rm_idx] = + bus_rd_reg_val->rd_clients[i].rm_port_id; + in_port_to_rm->num_rm++; + } + + for (i = 0; i < CRE_MAX_IN_RES; i++) { + in_port_to_rm = &bus_rd->in_port_to_rm[i]; + CAM_DBG(CAM_CRE, "input port id = %d", + in_port_to_rm->input_port_id); + CAM_DBG(CAM_CRE, "num_rms = %d", + in_port_to_rm->num_rm); + for (k = 0; k < in_port_to_rm->num_rm; k++) { + CAM_DBG(CAM_CRE, "rm port id = %d", + in_port_to_rm->rm_port_id[k]); + } + } + + return 0; +} + +static int cam_cre_bus_rd_isr(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + uint32_t irq_status; + uint32_t violation_status; + uint32_t debug_status_0; + uint32_t debug_status_1; + struct cam_cre_bus_rd_reg *bus_rd_reg; + struct cam_cre_bus_rd_reg_val *bus_rd_reg_val; + struct cam_cre_irq_data *irq_data = data; + + if (!cam_cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + + bus_rd_reg = cam_cre_hw_info->bus_rd_reg_offset; + bus_rd_reg_val = cam_cre_hw_info->bus_rd_reg_val; + + /* Read and Clear Top Interrupt status */ + irq_status = cam_io_r_mb(bus_rd_reg->base + bus_rd_reg->irq_status); + cam_io_w_mb(irq_status, + bus_rd_reg->base + bus_rd_reg->irq_clear); + + cam_io_w_mb(bus_rd_reg_val->irq_cmd_clear, + bus_rd_reg->base + bus_rd_reg->irq_cmd); + + if (irq_status & bus_rd_reg_val->rup_done) + CAM_DBG(CAM_CRE, "CRE Read Bus RUP done"); + + if (irq_status & bus_rd_reg_val->rd_buf_done) + CAM_DBG(CAM_CRE, "CRE Read Bus Buff done"); + + if (irq_status & bus_rd_reg_val->cons_violation) { + irq_data->error = 1; + violation_status = cam_io_r_mb(bus_rd_reg->base + + bus_rd_reg->rd_clients[0].cons_violation_status); + debug_status_0 = cam_io_r_mb(bus_rd_reg->base + + bus_rd_reg->rd_clients[0].debug_status_0); + debug_status_1 = cam_io_r_mb(bus_rd_reg->base + + bus_rd_reg->rd_clients[0].debug_status_1); + CAM_DBG(CAM_CRE, "CRE Read Bus Violation"); + CAM_DBG(CAM_CRE, + "violation status 0x%x debug status 0/1 0x%x/0x%x", + violation_status, debug_status_0, debug_status_1); + } + + return 0; +} + +int cam_cre_bus_rd_process(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = -EINVAL; + + switch (cmd_id) { + case CRE_HW_PROBE: + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E"); + rc = cam_cre_bus_rd_probe(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X"); + break; + case CRE_HW_INIT: + CAM_DBG(CAM_CRE, "CRE_HW_INIT: E"); + rc = cam_cre_bus_rd_init(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_INIT: X"); + break; + case CRE_HW_ACQUIRE: + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E"); + rc = cam_cre_bus_rd_acquire(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X"); + break; + case CRE_HW_RELEASE: + CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: E"); + rc = cam_cre_bus_rd_release(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: X"); + break; + case CRE_HW_PREPARE: + CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: E"); + rc = cam_cre_bus_rd_prepare(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: X"); + break; + case CRE_HW_ISR: + rc = cam_cre_bus_rd_isr(cam_cre_hw_info, 0, data); + break; + case CRE_HW_REG_SET_UPDATE: + rc = cam_cre_bus_rd_reg_set_update(cam_cre_hw_info, 0, data); + break; + case CRE_HW_DEINIT: + case CRE_HW_START: + case CRE_HW_STOP: + case CRE_HW_FLUSH: + case CRE_HW_CLK_UPDATE: + case CRE_HW_BW_UPDATE: + case CRE_HW_RESET: + case CRE_HW_SET_IRQ_CB: + rc = 0; + CAM_DBG(CAM_CRE, "Unhandled cmds: %d", cmd_id); + break; + default: + CAM_ERR(CAM_CRE, "Unsupported cmd: %d", cmd_id); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.h new file mode 100644 index 0000000000..0af995d03c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_rd/cre_bus_rd.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CRE_BUS_RD_H +#define CRE_BUS_RD_H + +#include +#include +#include +#include "cre_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_cre_hw_mgr.h" + +/** + * struct cre_bus_rd_ctx + * + * @cre_acquire: CRE acquire structure + * @security_flag: security flag + * @num_in_ports: Number of in ports + */ +struct cre_bus_rd_ctx { + struct cam_cre_acquire_dev_info *cre_acquire; + bool security_flag; + uint32_t num_in_ports; +}; + +/** + * struct cre_bus_in_port_to_rm + * + * @input_port_id: Intput port ID + * @num_rm: Number of RMs + * @rm_port_id: RM port Id + */ +struct cre_bus_in_port_to_rm { + uint32_t input_port_id; + uint32_t num_rm; + uint32_t rm_port_id[CRE_MAX_IN_RES]; +}; + +/** + * struct cre_bus_rd + * + * @cre_hw_info: CRE hardware info + * @in_port_to_rm: IO port to RM mapping + * @bus_rd_ctx: RM context + */ +struct cre_bus_rd { + struct cam_cre_hw *cre_hw_info; + struct cre_bus_in_port_to_rm in_port_to_rm[CRE_MAX_IN_RES]; + struct cre_bus_rd_ctx *bus_rd_ctx[CRE_CTX_MAX]; + struct completion reset_complete; +}; +#endif /* CRE_BUS_RD_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.c new file mode 100644 index 0000000000..e4a01af20b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include "cam_io_util.h" +#include "cam_hw_intf.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cre_core.h" +#include "cre_hw.h" +#include "cre_dev_intf.h" +#include "cre_bus_wr.h" +#include "cam_mem_mgr_api.h" +#include + +static struct cre_bus_wr *wr_info; + +#define update_cre_reg_set(cre_reg_buf, off, val) \ + do { \ + cre_reg_buf->wr_reg_set[cre_reg_buf->num_wr_reg_set].offset = (off); \ + cre_reg_buf->wr_reg_set[cre_reg_buf->num_wr_reg_set].value = (val); \ + cre_reg_buf->num_wr_reg_set++; \ + } while (0) + +static int cam_cre_translate_write_format(struct plane_info p_info, + struct cam_cre_bus_wr_client_reg_val *wr_client_reg_val) +{ + CAM_DBG(CAM_CRE, "width 0x%x, height 0x%x stride 0x%x alignment 0x%x", + p_info.width, p_info.height, p_info.stride, p_info.alignment); + + /* Number of output pixels */ + wr_client_reg_val->width = p_info.width; + /* Number of output bytes */ + wr_client_reg_val->stride = p_info.stride; + /* Number of output lines */ + wr_client_reg_val->height = p_info.height; + wr_client_reg_val->alignment = p_info.alignment; + + /* + * Update packer format to zero irrespective of output format + * This is as per the recomendation from CRE HW team for CRE 1.0 + * This logic has to be updated for CRE 1.1 + */ + wr_client_reg_val->format = 0; + + return 0; +} + +static int cam_cre_bus_wr_out_port_idx(uint32_t output_port_id) +{ + int i; + + for (i = 0; i < CRE_MAX_OUT_RES; i++) + if (wr_info->out_port_to_wm[i].output_port_id == output_port_id) + return i; + + return -EINVAL; +} + +static int cam_cre_bus_wr_reg_set_update(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int i; + uint32_t num_reg_set; + struct cre_reg_set *wr_reg_set; + struct cam_cre_dev_reg_set_update *reg_set_upd_cmd = + (struct cam_cre_dev_reg_set_update *)data; + + num_reg_set = reg_set_upd_cmd->cre_reg_buf.num_wr_reg_set; + wr_reg_set = reg_set_upd_cmd->cre_reg_buf.wr_reg_set; + + for (i = 0; i < num_reg_set; i++) { + CAM_DBG(CAM_CRE, "base 0x%x CRE value 0x%x offset 0x%x", + cam_cre_hw_info->bus_wr_reg_offset->base, + wr_reg_set[i].value, wr_reg_set[i].offset); + cam_io_w_mb(wr_reg_set[i].value, + cam_cre_hw_info->bus_wr_reg_offset->base + wr_reg_set[i].offset); + } + return 0; +} + +static int cam_cre_bus_wr_release(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + if (ctx_id < 0 || ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + vfree(wr_info->bus_wr_ctx[ctx_id]); + wr_info->bus_wr_ctx[ctx_id] = NULL; + + return 0; +} + +static int cam_cre_bus_wr_update(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, struct cam_cre_dev_prepare_req *prepare, + int batch_idx, int io_idx, + struct cre_reg_buffer *cre_reg_buf) +{ + int rc, k, out_port_idx; + uint32_t req_idx; + uint32_t val = 0; + uint32_t iova_base, iova_offset; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_cre_ctx *ctx_data; + struct cam_cre_request *cre_request; + struct cre_io_buf *io_buf; + struct cam_cre_bus_wr_reg *wr_reg; + struct cam_cre_bus_wr_client_reg *wr_reg_client; + struct cam_cre_bus_wr_reg_val *wr_reg_val; + struct cam_cre_bus_wr_client_reg_val *wr_client_reg_val; + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, prepare); + return -EINVAL; + } + + if (batch_idx >= CRE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_CRE, "Invalid batch idx: %d", batch_idx); + return -EINVAL; + } + + if (io_idx >= CRE_MAX_IO_BUFS) { + CAM_ERR(CAM_CRE, "Invalid IO idx: %d", io_idx); + return -EINVAL; + } + + prepare_args = prepare->prepare_args; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + + cre_request = ctx_data->req_list[req_idx]; + wr_reg = cam_cre_hw_info->bus_wr_reg_offset; + wr_reg_val = cam_cre_hw_info->bus_wr_reg_val; + + CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld", + req_idx, cre_request->request_id); + + io_buf = cre_request->io_buf[batch_idx][io_idx]; + CAM_DBG(CAM_CRE, "batch = %d io buf num = %d dir = %d rsc %d", + batch_idx, io_idx, io_buf->direction, io_buf->resource_type); + + out_port_idx = + cam_cre_bus_wr_out_port_idx(io_buf->resource_type); + if (out_port_idx < 0) { + CAM_ERR(CAM_CRE, "Invalid idx for rsc type: %d", + io_buf->resource_type); + return -EINVAL; + } + + CAM_DBG(CAM_CRE, "out_port_idx = %d", out_port_idx); + + for (k = 0; k < io_buf->num_planes; k++) { + wr_reg_client = &wr_reg->wr_clients[out_port_idx]; + wr_client_reg_val = &wr_reg_val->wr_clients[out_port_idx]; + + CAM_DBG(CAM_CRE, "wr_reg_client %x wr_client_reg_val %x", + wr_reg_client, wr_client_reg_val, wr_client_reg_val); + + /* Core cfg: enable, Mode */ + val = 0; + val |= ((wr_client_reg_val->mode & + wr_client_reg_val->mode_mask) << + wr_client_reg_val->mode_shift); + val |= wr_client_reg_val->client_en; + + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->client_cfg, + val); + + /* + * As CRE have 36 Bit addressing support Image Address + * register will have 28 bit MSB of 36 bit iova. + * and addr_config will have 8 bit byte offset. + */ + iova_base = CAM_36BIT_INTF_GET_IOVA_BASE(io_buf->p_info[k].iova_addr); + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->img_addr, + iova_base); + iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(io_buf->p_info[k].iova_addr); + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->addr_cfg, + iova_offset); + + rc = cam_cre_translate_write_format(io_buf->p_info[k], + wr_client_reg_val); + if (rc < 0) + return -EINVAL; + + /* Buffer size */ + val = 0; + val = wr_client_reg_val->width; + val |= (wr_client_reg_val->height & + wr_client_reg_val->height_mask) << + wr_client_reg_val->height_shift; + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->img_cfg_0, + val); + + /* stride */ + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->img_cfg_2, + wr_client_reg_val->stride); + + val = 0; + val |= ((wr_client_reg_val->format & + wr_client_reg_val->format_mask) << + wr_client_reg_val->format_shift); + val |= ((wr_client_reg_val->alignment & + wr_client_reg_val->alignment_mask) << + wr_client_reg_val->alignment_shift); + /* pack cfg : Format and alignment */ + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->packer_cfg, + val); + + /* Upadte debug status CFG*/ + val = 0xFFFF; + update_cre_reg_set(cre_reg_buf, + wr_reg->offset + wr_reg_client->debug_status_cfg, + val); + } + + return 0; +} + +static int cam_cre_bus_wr_prepare(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + int i, j = 0; + uint32_t req_idx; + struct cam_cre_dev_prepare_req *prepare; + struct cam_cre_ctx *ctx_data; + struct cam_cre_request *cre_request; + struct cre_io_buf *io_buf; + struct cre_reg_buffer *cre_reg_buf; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + prepare = data; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + + cre_request = ctx_data->req_list[req_idx]; + + CAM_DBG(CAM_CRE, "req_idx = %d req_id = %lld num_io_bufs = %d", + req_idx, cre_request->request_id, cre_request->num_io_bufs[0]); + + for (i = 0; i < cre_request->num_batch; i++) { + cre_reg_buf = &cre_request->cre_reg_buf[i]; + for (j = 0; j < cre_request->num_io_bufs[i]; j++) { + io_buf = cre_request->io_buf[i][j]; + CAM_DBG(CAM_CRE, "batch = %d io buf num = %d", + i, j); + if (io_buf->direction != CAM_BUF_OUTPUT) + continue; + + rc = cam_cre_bus_wr_update(cam_cre_hw_info, + ctx_id, prepare, i, j, + cre_reg_buf); + if (rc) + goto end; + } + } + +end: + return rc; +} + +static int cam_cre_bus_wr_acquire(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i; + struct cam_cre_acquire_dev_info *in_acquire; + struct cre_bus_wr_ctx *bus_wr_ctx; + struct cre_bus_out_port_to_wm *out_port_to_wr; + int out_port_idx; + + if (ctx_id < 0 || !data || ctx_id >= CRE_CTX_MAX) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + + wr_info->bus_wr_ctx[ctx_id] = vzalloc(sizeof(struct cre_bus_wr_ctx)); + if (!wr_info->bus_wr_ctx[ctx_id]) { + CAM_ERR(CAM_CRE, "Out of memory"); + return -ENOMEM; + } + + wr_info->bus_wr_ctx[ctx_id]->cre_acquire = data; + in_acquire = data; + bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id]; + bus_wr_ctx->num_out_ports = in_acquire->num_out_res; + bus_wr_ctx->security_flag = in_acquire->secure_mode; + + for (i = 0; i < in_acquire->num_out_res; i++) { + if (!in_acquire->out_res[i].width) + continue; + + CAM_DBG(CAM_CRE, "i = %d format = %u width = 0x%x height = 0x%x res_id %d", + i, in_acquire->out_res[i].format, + in_acquire->out_res[i].width, + in_acquire->out_res[i].height, + in_acquire->in_res[i].res_id); + + out_port_idx = + cam_cre_bus_wr_out_port_idx(in_acquire->out_res[i].res_id); + if (out_port_idx < 0) { + CAM_DBG(CAM_CRE, "Invalid out_port_idx: %d", + in_acquire->out_res[i].res_id); + rc = -EINVAL; + goto end; + } + CAM_DBG(CAM_CRE, "out_port_idx %d", out_port_idx); + out_port_to_wr = &wr_info->out_port_to_wm[out_port_idx]; + if (!out_port_to_wr->num_wm) { + CAM_DBG(CAM_CRE, "Invalid format for Input port"); + rc = -EINVAL; + goto end; + } + } + +end: + return rc; +} + +static int cam_cre_bus_wr_init(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + struct cam_cre_bus_wr_reg_val *bus_wr_reg_val; + struct cam_cre_bus_wr_reg *bus_wr_reg; + struct cam_cre_dev_init *dev_init = data; + + if (!cam_cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + + wr_info->cre_hw_info = cam_cre_hw_info; + bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val; + bus_wr_reg = cam_cre_hw_info->bus_wr_reg_offset; + bus_wr_reg->base = dev_init->core_info->cre_hw_info->cre_bus_wr_base; + + CAM_DBG(CAM_CRE, "bus_wr_reg->base 0x%x", bus_wr_reg->base); + + cam_io_w_mb(bus_wr_reg_val->irq_mask_0, + bus_wr_reg->base + bus_wr_reg->irq_mask_0); + cam_io_w_mb(bus_wr_reg_val->irq_mask_1, + bus_wr_reg->base + bus_wr_reg->irq_mask_1); + + return 0; +} + +static int cam_cre_bus_wr_probe(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + int i, k; + struct cam_cre_bus_wr_reg_val *bus_wr_reg_val; + struct cre_bus_out_port_to_wm *out_port_to_wm; + uint32_t output_port_idx; + uint32_t wm_idx; + + if (!cam_cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + wr_info = CAM_MEM_ZALLOC(sizeof(struct cre_bus_wr), GFP_KERNEL); + if (!wr_info) { + CAM_ERR(CAM_CRE, "Out of memory"); + return -ENOMEM; + } + + wr_info->cre_hw_info = cam_cre_hw_info; + bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val; + + for (i = 0; i < bus_wr_reg_val->num_clients; i++) { + output_port_idx = + bus_wr_reg_val->wr_clients[i].wm_port_id; + out_port_to_wm = &wr_info->out_port_to_wm[output_port_idx]; + wm_idx = out_port_to_wm->num_wm; + out_port_to_wm->output_port_id = + bus_wr_reg_val->wr_clients[i].output_port_id; + out_port_to_wm->wm_port_id[wm_idx] = + bus_wr_reg_val->wr_clients[i].wm_port_id; + out_port_to_wm->num_wm++; + } + + for (i = 0; i < CRE_MAX_OUT_RES; i++) { + out_port_to_wm = &wr_info->out_port_to_wm[i]; + CAM_DBG(CAM_CRE, "output port id = %d", + out_port_to_wm->output_port_id); + CAM_DBG(CAM_CRE, "num_wms = %d", + out_port_to_wm->num_wm); + for (k = 0; k < out_port_to_wm->num_wm; k++) { + CAM_DBG(CAM_CRE, "wm port id = %d", + out_port_to_wm->wm_port_id[k]); + } + } + + return 0; +} + +static int cam_cre_bus_wr_isr(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, void *data) +{ + uint32_t irq_status_0, irq_status_1; + struct cam_cre_bus_wr_reg *bus_wr_reg; + struct cam_cre_bus_wr_reg_val *bus_wr_reg_val; + struct cam_cre_irq_data *irq_data = data; + uint32_t debug_status_0; + uint32_t debug_status_1; + uint32_t img_violation_status; + uint32_t violation_status; + int i; + + if (!cam_cre_hw_info || !irq_data) { + CAM_ERR(CAM_CRE, "Invalid cam_cre_hw_info"); + return -EINVAL; + } + + bus_wr_reg = cam_cre_hw_info->bus_wr_reg_offset; + bus_wr_reg_val = cam_cre_hw_info->bus_wr_reg_val; + + /* Read and Clear Top Interrupt status */ + irq_status_0 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_0); + irq_status_1 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_1); + + CAM_DBG(CAM_CRE, "BUS irq_status_0 0x%x irq_status_1 0x%x", + irq_status_0, irq_status_1); + + cam_io_w_mb(irq_status_0, + bus_wr_reg->base + bus_wr_reg->irq_clear_0); + cam_io_w_mb(irq_status_1, + bus_wr_reg->base + bus_wr_reg->irq_clear_1); + + cam_io_w_mb(bus_wr_reg_val->irq_cmd_clear, + bus_wr_reg->base + bus_wr_reg->irq_cmd); + + if (irq_status_0 & bus_wr_reg_val->cons_violation) { + irq_data->error = 1; + CAM_ERR(CAM_CRE, "cre bus wr cons_violation"); + } + + if ((irq_status_0 & bus_wr_reg_val->violation) || + (irq_status_0 & bus_wr_reg_val->img_size_violation) || + (irq_status_0 & bus_wr_reg_val->cons_violation)) { + irq_data->error = 1; + img_violation_status = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->image_size_violation_status); + violation_status = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->violation_status); + + debug_status_0 = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->wr_clients[0].debug_status_0); + debug_status_1 = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->wr_clients[0].debug_status_1); + CAM_ERR(CAM_CRE, + "violation status 0x%x 0x%x debug status 0/1 0x%x/0x%x", + violation_status, img_violation_status, + debug_status_0, debug_status_1); + } + + if (irq_status_0 & bus_wr_reg_val->comp_buf_done) { + for (i = 0; i < bus_wr_reg_val->num_clients; i++) { + if (irq_status_1 & bus_wr_reg_val-> + wr_clients[i].client_buf_done) + CAM_INFO(CAM_CRE, "Cleint %d Buff done", i); + irq_data->wr_buf_done = 1 << i; + } + } + + return 0; +} + +int cam_cre_bus_wr_process(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = 0; + + switch (cmd_id) { + case CRE_HW_PROBE: + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E"); + rc = cam_cre_bus_wr_probe(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X"); + break; + case CRE_HW_INIT: + CAM_DBG(CAM_CRE, "CRE_HW_INIT: E"); + rc = cam_cre_bus_wr_init(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_INIT: X"); + break; + case CRE_HW_ACQUIRE: + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E"); + rc = cam_cre_bus_wr_acquire(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X"); + break; + case CRE_HW_RELEASE: + CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: E"); + rc = cam_cre_bus_wr_release(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_RELEASE: X"); + break; + case CRE_HW_PREPARE: + CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: E"); + rc = cam_cre_bus_wr_prepare(cam_cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_PREPARE: X"); + break; + case CRE_HW_REG_SET_UPDATE: + rc = cam_cre_bus_wr_reg_set_update(cam_cre_hw_info, 0, data); + break; + case CRE_HW_DEINIT: + case CRE_HW_START: + case CRE_HW_STOP: + case CRE_HW_FLUSH: + case CRE_HW_CLK_UPDATE: + case CRE_HW_BW_UPDATE: + case CRE_HW_RESET: + case CRE_HW_SET_IRQ_CB: + rc = 0; + CAM_DBG(CAM_CRE, "Unhandled cmds: %d", cmd_id); + break; + case CRE_HW_ISR: + rc = cam_cre_bus_wr_isr(cam_cre_hw_info, 0, data); + break; + default: + CAM_ERR(CAM_CRE, "Unsupported cmd: %d", cmd_id); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.h new file mode 100644 index 0000000000..1d9ae2441f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/bus_wr/cre_bus_wr.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CRE_BUS_WR_H +#define CRE_BUS_WR_H + +#include +#include +#include +#include "cre_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_cre_hw_mgr.h" + +/** + * struct cre_bus_out_port_to_wm + * + * @output_port_id: Output port ID + * @num_combos: Number of combos + * @num_wm: Number of WMs + * @wm_port_id: WM port Id + */ +struct cre_bus_out_port_to_wm { + uint32_t output_port_id; + uint32_t num_wm; + uint32_t wm_port_id[CRE_MAX_OUT_RES]; +}; + +/** + * struct cre_bus_wr_ctx + * + * @cre_acquire: CRE acquire structure + * @security_flag: security flag + * @num_out_ports: Number of out ports + */ +struct cre_bus_wr_ctx { + struct cam_cre_acquire_dev_info *cre_acquire; + bool security_flag; + uint32_t num_out_ports; +}; + +/** + * struct cre_bus_wr + * + * @cre_hw_info: CRE hardware info + * @out_port_to_wm: IO port to WM mapping + * @bus_wr_ctx: WM context + */ +struct cre_bus_wr { + struct cam_cre_hw *cre_hw_info; + struct cre_bus_out_port_to_wm out_port_to_wm[CRE_MAX_OUT_RES]; + struct cre_bus_wr_ctx *bus_wr_ctx[CRE_CTX_MAX]; +}; +#endif /* CRE_BUS_WR_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.c new file mode 100644 index 0000000000..80077439d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.c @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cre_core.h" +#include "cre_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cre_hw_intf.h" +#include "cam_cre_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cre_dev_intf.h" +#include "cam_compat.h" +#include "cre_bus_wr.h" +#include "cre_bus_rd.h" + +#define CAM_CRE_RESET_TIMEOUT msecs_to_jiffies(500) + +struct cam_cre_irq_data irq_data; + +static int cam_cre_caps_vote(struct cam_cre_device_core_info *core_info, + struct cam_cre_dev_bw_update *cpas_vote) +{ + int rc = 0; + + if (cpas_vote->ahb_vote_valid) + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, + &cpas_vote->ahb_vote); + if (cpas_vote->axi_vote_valid) + rc = cam_cpas_update_axi_vote(core_info->cpas_handle, + &cpas_vote->axi_vote); + if (rc) + CAM_ERR(CAM_CRE, "cpas vote is failed: %d", rc); + + return rc; +} + +int cam_cre_get_hw_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size) +{ + struct cam_hw_info *cre_dev = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cre_device_core_info *core_info = NULL; + struct cam_cre_hw_ver *cre_hw_ver; + struct cam_cre_top_reg_val *top_reg_val; + + if (!hw_priv) { + CAM_ERR(CAM_CRE, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &cre_dev->soc_info; + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_CRE, "soc_info = %x core_info = %x", + soc_info, core_info); + return -EINVAL; + } + + if (!get_hw_cap_args) { + CAM_ERR(CAM_CRE, "Invalid caps"); + return -EINVAL; + } + + top_reg_val = core_info->cre_hw_info->cre_hw->top_reg_val; + cre_hw_ver = get_hw_cap_args; + cre_hw_ver->hw_ver.major = + (core_info->hw_version & top_reg_val->major_mask) >> + top_reg_val->major_shift; + cre_hw_ver->hw_ver.minor = + (core_info->hw_version & top_reg_val->minor_mask) >> + top_reg_val->minor_shift; + + return 0; +} + +static int cam_cre_dev_process_init(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + int rc = 0; + + rc = cam_cre_top_process(cre_hw, 0, CRE_HW_INIT, cmd_args); + if (rc) + goto top_init_fail; + + rc = cam_cre_bus_wr_process(cre_hw, 0, CRE_HW_INIT, cmd_args); + if (rc) + goto bus_wr_init_fail; + + rc = cam_cre_bus_rd_process(cre_hw, 0, CRE_HW_INIT, cmd_args); + if (rc) + goto bus_rd_init_fail; + + return rc; + +bus_rd_init_fail: + rc = cam_cre_bus_wr_process(cre_hw, 0, + CRE_HW_DEINIT, NULL); +bus_wr_init_fail: + rc = cam_cre_top_process(cre_hw, 0, + CRE_HW_DEINIT, NULL); +top_init_fail: + return rc; +} + +static int cam_cre_process_init(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_init(cre_hw, cmd_args); +} + +int cam_cre_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *cre_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cre_device_core_info *core_info = NULL; + struct cam_cre_cpas_vote *cpas_vote; + int rc = 0; + struct cam_cre_dev_init *init; + struct cam_cre_hw *cre_hw; + + if (!device_priv) { + CAM_ERR(CAM_CRE, "Invalid cam_dev_info"); + rc = -EINVAL; + goto end; + } + + soc_info = &cre_dev->soc_info; + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_CRE, "soc_info = %pK core_info = %pK", + soc_info, core_info); + rc = -EINVAL; + goto end; + } + cre_hw = core_info->cre_hw_info->cre_hw; + + cpas_vote = CAM_MEM_ZALLOC(sizeof(struct cam_cre_cpas_vote), GFP_KERNEL); + if (!cpas_vote) { + CAM_ERR(CAM_ISP, "Out of memory"); + rc = -ENOMEM; + goto end; + } + + cpas_vote->ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote->ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote->axi_vote.num_paths = 1; + cpas_vote->axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_ALL; + cpas_vote->axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + cpas_vote->axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].ddr_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].ddr_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, &cpas_vote->axi_vote); + if (rc) { + CAM_ERR(CAM_CRE, "cpass start failed: %d", rc); + goto free_cpas_vote; + } + core_info->cpas_start = true; + + rc = cam_cre_enable_soc_resources(soc_info); + if (rc) + goto enable_soc_resource_failed; + else + core_info->clk_enable = true; + + init = init_hw_args; + + init->core_info = core_info; + rc = cam_cre_process_init(cre_hw, init_hw_args); + if (rc) + goto process_init_failed; + else + goto free_cpas_vote; + +process_init_failed: + if (cam_cre_disable_soc_resources(soc_info)) + CAM_ERR(CAM_CRE, "disable soc resource failed"); +enable_soc_resource_failed: + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_CRE, "cpas stop is failed"); + else + core_info->cpas_start = false; +free_cpas_vote: + CAM_MEM_ZFREE((void *)cpas_vote, sizeof(struct cam_cre_cpas_vote)); + cpas_vote = NULL; +end: + return rc; +} + +int cam_cre_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *cre_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cre_device_core_info *core_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_CRE, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &cre_dev->soc_info; + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_CRE, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + rc = cam_cre_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_CRE, "soc disable is failed : %d", rc); + core_info->clk_enable = false; + + return rc; +} + +static int cam_cre_dev_process_dump_debug_reg(struct cam_cre_hw *cre_hw) +{ + int rc = 0; + + rc = cam_cre_top_process(cre_hw, -1, + CRE_HW_DUMP_DEBUG, NULL); + + return rc; +} + +static int cam_cre_dev_process_reset(struct cam_cre_hw *cre_hw, void *cmd_args) +{ + int rc = 0; + + rc = cam_cre_top_process(cre_hw, -1, + CRE_HW_RESET, NULL); + + return rc; +} + +static int cam_cre_dev_process_release(struct cam_cre_hw *cre_hw, void *cmd_args) +{ + int rc = 0; + struct cam_cre_dev_release *cre_dev_release; + + cre_dev_release = cmd_args; + rc = cam_cre_top_process(cre_hw, cre_dev_release->ctx_id, + CRE_HW_RELEASE, NULL); + + rc |= cam_cre_bus_wr_process(cre_hw, cre_dev_release->ctx_id, + CRE_HW_RELEASE, NULL); + + rc |= cam_cre_bus_rd_process(cre_hw, cre_dev_release->ctx_id, + CRE_HW_RELEASE, NULL); + + return rc; +} + +static int cam_cre_dev_process_acquire(struct cam_cre_hw *cre_hw, void *cmd_args) +{ + int rc = 0; + struct cam_cre_dev_acquire *cre_dev_acquire; + + if (!cmd_args || !cre_hw) { + CAM_ERR(CAM_CRE, "Invalid arguments: %pK %pK", + cmd_args, cre_hw); + return -EINVAL; + } + + cre_dev_acquire = cmd_args; + rc = cam_cre_top_process(cre_hw, cre_dev_acquire->ctx_id, + CRE_HW_ACQUIRE, cre_dev_acquire); + if (rc) + goto top_acquire_fail; + + rc = cam_cre_bus_wr_process(cre_hw, cre_dev_acquire->ctx_id, + CRE_HW_ACQUIRE, cre_dev_acquire->cre_acquire); + if (rc) + goto bus_wr_acquire_fail; + + rc = cam_cre_bus_rd_process(cre_hw, cre_dev_acquire->ctx_id, + CRE_HW_ACQUIRE, cre_dev_acquire->cre_acquire); + if (rc) + goto bus_rd_acquire_fail; + + return 0; + +bus_rd_acquire_fail: + cam_cre_bus_wr_process(cre_hw, cre_dev_acquire->ctx_id, + CRE_HW_RELEASE, cre_dev_acquire->cre_acquire); +bus_wr_acquire_fail: + cam_cre_top_process(cre_hw, cre_dev_acquire->ctx_id, + CRE_HW_RELEASE, cre_dev_acquire->cre_acquire); +top_acquire_fail: + return rc; +} + +static int cam_cre_dev_process_reg_set_update(struct cam_cre_hw *cre_hw, void *cmd_args) +{ + int rc = 0; + struct cam_cre_dev_reg_set_update *reg_set_update; + + reg_set_update = cmd_args; + + rc = cam_cre_top_process(cre_hw, 0, + CRE_HW_REG_SET_UPDATE, reg_set_update); + if (rc) + goto end; + + rc = cam_cre_bus_wr_process(cre_hw, 0, + CRE_HW_REG_SET_UPDATE, reg_set_update); + if (rc) + goto end; + + rc = cam_cre_bus_rd_process(cre_hw, 0, + CRE_HW_REG_SET_UPDATE, reg_set_update); + if (rc) + goto end; + +end: + return rc; +} + +static int cam_cre_dev_process_prepare(struct cam_cre_hw *cre_hw, void *cmd_args) +{ + int rc = 0; + struct cam_cre_dev_prepare_req *cre_dev_prepare_req; + + cre_dev_prepare_req = cmd_args; + + rc = cam_cre_top_process(cre_hw, cre_dev_prepare_req->ctx_data->ctx_id, + CRE_HW_PREPARE, cre_dev_prepare_req); + if (rc) + goto end; + + rc = cam_cre_bus_wr_process(cre_hw, + cre_dev_prepare_req->ctx_data->ctx_id, + CRE_HW_PREPARE, cre_dev_prepare_req); + if (rc) + goto end; + + rc = cam_cre_bus_rd_process(cre_hw, + cre_dev_prepare_req->ctx_data->ctx_id, + CRE_HW_PREPARE, cre_dev_prepare_req); + if (rc) + goto end; + +end: + return rc; +} + +static int cam_cre_dev_process_probe(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + cam_cre_top_process(cre_hw, -1, CRE_HW_PROBE, NULL); + cam_cre_bus_wr_process(cre_hw, -1, CRE_HW_PROBE, NULL); + cam_cre_bus_rd_process(cre_hw, -1, CRE_HW_PROBE, NULL); + + return 0; +} + +static int cam_cre_process_probe(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_probe(cre_hw, cmd_args); +} + +static int cam_cre_process_dump_debug_reg(struct cam_cre_hw *cre_hw) +{ + return cam_cre_dev_process_dump_debug_reg(cre_hw); +} + +static int cam_cre_process_reset(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_reset(cre_hw, cmd_args); +} + +static int cam_cre_process_release(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_release(cre_hw, cmd_args); +} + +static int cam_cre_process_acquire(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_acquire(cre_hw, cmd_args); +} + +static int cam_cre_process_prepare(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_prepare(cre_hw, cmd_args); +} + +static int cam_cre_process_reg_set_update(struct cam_cre_hw *cre_hw, + void *cmd_args) +{ + return cam_cre_dev_process_reg_set_update(cre_hw, cmd_args); +} + +int cam_cre_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_hw_info *cre_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_cre_device_core_info *core_info = NULL; + struct cam_cre_hw *cre_hw; + unsigned long flags; + + if (!device_priv) { + CAM_ERR(CAM_CRE, "Invalid args %x for cmd %u", + device_priv, cmd_type); + return -EINVAL; + } + + soc_info = &cre_dev->soc_info; + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_CRE, "soc_info = %x core_info = %x", + soc_info, core_info); + return -EINVAL; + } + + cre_hw = core_info->cre_hw_info->cre_hw; + if (!cre_hw) { + CAM_ERR(CAM_CRE, "Invalid cre hw info"); + return -EINVAL; + } + + switch (cmd_type) { + case CRE_HW_PROBE: + rc = cam_cre_process_probe(cre_hw, cmd_args); + break; + case CRE_HW_ACQUIRE: + rc = cam_cre_process_acquire(cre_hw, cmd_args); + break; + case CRE_HW_RELEASE: + rc = cam_cre_process_release(cre_hw, cmd_args); + break; + case CRE_HW_PREPARE: + rc = cam_cre_process_prepare(cre_hw, cmd_args); + break; + case CRE_HW_START: + break; + case CRE_HW_STOP: + break; + case CRE_HW_FLUSH: + break; + case CRE_HW_RESET: + rc = cam_cre_process_reset(cre_hw, cmd_args); + break; + case CRE_HW_CLK_UPDATE: { + struct cam_cre_dev_clk_update *clk_upd_cmd = + (struct cam_cre_dev_clk_update *)cmd_args; + + if (!core_info->clk_enable) { + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, + soc_info->lowest_clk_level); + if (rc) { + CAM_ERR(CAM_CRE, "Clock enable is failed"); + return rc; + } + core_info->clk_enable = true; + } + + rc = cam_cre_update_clk_rate(soc_info, clk_upd_cmd->clk_rate); + if (rc) + CAM_ERR(CAM_CRE, "Failed to update clk: %d", rc); + } + break; + case CRE_HW_CLK_DISABLE: { + if (core_info->clk_enable) + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + core_info->clk_enable = false; + } + break; + case CRE_HW_BW_UPDATE: { + struct cam_cre_dev_bw_update *cpas_vote = cmd_args; + + if (!cmd_args) + return -EINVAL; + + rc = cam_cre_caps_vote(core_info, cpas_vote); + if (rc) + CAM_ERR(CAM_CRE, "failed to update bw: %d", rc); + } + break; + case CRE_HW_SET_IRQ_CB: { + struct cam_cre_set_irq_cb *irq_cb = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_CRE, "cmd args NULL"); + return -EINVAL; + } + + spin_lock_irqsave(&cre_dev->hw_lock, flags); + core_info->irq_cb.cre_hw_mgr_cb = irq_cb->cre_hw_mgr_cb; + core_info->irq_cb.data = irq_cb->data; + spin_unlock_irqrestore(&cre_dev->hw_lock, flags); + } + break; + case CRE_HW_REG_SET_UPDATE: + rc = cam_cre_process_reg_set_update(cre_hw, cmd_args); + break; + case CRE_HW_DUMP_DEBUG: + rc = cam_cre_process_dump_debug_reg(cre_hw); + break; + default: + break; + } + + return rc; +} + +irqreturn_t cam_cre_irq(int irq_num, void *data) +{ + struct cam_hw_info *cre_dev = data; + struct cam_cre_device_core_info *core_info = NULL; + struct cam_cre_hw *cre_hw; + + if (!data) { + CAM_ERR(CAM_CRE, "Invalid cam_dev_info or query_cap args"); + return IRQ_HANDLED; + } + + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + cre_hw = core_info->cre_hw_info->cre_hw; + + irq_data.error = 0; + irq_data.wr_buf_done = 0; + + cam_cre_top_process(cre_hw, 0, CRE_HW_ISR, &irq_data); + + if (irq_data.top_irq_status & CAM_CRE_WE_IRQ) + cam_cre_bus_wr_process(cre_hw, 0, CRE_HW_ISR, &irq_data); + if (irq_data.top_irq_status & CAM_CRE_FE_IRQ) + cam_cre_bus_rd_process(cre_hw, 0, CRE_HW_ISR, &irq_data); + + spin_lock(&cre_dev->hw_lock); + CAM_DBG(CAM_CRE, "core_info->irq_cb.cre_hw_mgr_cb %x core_info->irq_cb.data %x", + core_info->irq_cb.cre_hw_mgr_cb, core_info->irq_cb.data); + if (core_info->irq_cb.cre_hw_mgr_cb && core_info->irq_cb.data) + if (irq_data.error || irq_data.wr_buf_done) + core_info->irq_cb.cre_hw_mgr_cb(&irq_data, + sizeof(struct cam_hw_info), + core_info->irq_cb.data); + spin_unlock(&cre_dev->hw_lock); + + return IRQ_HANDLED; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.h new file mode 100644 index 0000000000..ffbfc7cc68 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_core.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_CRE_CORE_H +#define CAM_CRE_CORE_H + +#include +#include +#include +#include +#include +#include +#include "cam_cpas_api.h" +#include "cre_hw.h" +#include "cam_cre_hw_intf.h" + +#define CAM_CRE_IDLE_IRQ 0x8 +#define CAM_CRE_FE_IRQ 0x4 +#define CAM_CRE_WE_IRQ 0x2 +#define CAM_CRE_RESET_IRQ 0x1 + +/** + * struct cam_cre_cpas_vote + * @ahb_vote: AHB vote info + * @axi_vote: AXI vote info + * @ahb_vote_valid: Flag for ahb vote data + * @axi_vote_valid: flag for axi vote data + */ +struct cam_cre_cpas_vote { + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote; + uint32_t ahb_vote_valid; + uint32_t axi_vote_valid; +}; + + +struct cam_cre_device_hw_info { + struct cam_cre_hw *cre_hw; + uint32_t hw_idx; + void *cre_top_base; + void *cre_qos_base; + void *cre_pp_base; + void *cre_bus_rd_base; + void *cre_bus_wr_base; + uint32_t reserved; +}; + +struct cam_cre_device_core_info { + struct cam_cre_device_hw_info *cre_hw_info; + uint32_t hw_version; + uint32_t hw_idx; + uint32_t hw_type; + uint32_t cpas_handle; + bool cpas_start; + bool clk_enable; + struct cam_cre_set_irq_cb irq_cb; +}; + +int cam_cre_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_cre_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_cre_start_hw(void *device_priv, + void *start_hw_args, uint32_t arg_size); +int cam_cre_stop_hw(void *device_priv, + void *stop_hw_args, uint32_t arg_size); +int cam_cre_reset_hw(void *device_priv, + void *reset_hw_args, uint32_t arg_size); +int cam_cre_flush_hw(void *device_priv, + void *flush_args, uint32_t arg_size); +int cam_cre_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_cre_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +irqreturn_t cam_cre_irq(int irq_num, void *data); + +/** + * @brief : API to register CRE hw to platform framework. + * @return struct platform_device pointer on success, or ERR_PTR() on error. + */ +int cam_cre_init_module(void); + +/** + * @brief : API to remove CRE Hw from platform framework. + */ +void cam_cre_exit_module(void); +#endif /* CAM_CRE_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.c new file mode 100644 index 0000000000..8b10a198d5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include +#include + +#include "cam_node.h" +#include "cre_core.h" +#include "cre_soc.h" +#include "cam_hw.h" +#include "cre_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_cre_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cre_hw_100.h" +#include "cre_dev_intf.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" + +static struct cam_cre_hw_intf_data cam_cre_dev_list[CRE_DEV_MAX]; +static struct cam_cre_device_hw_info cre_hw_info; +static struct cam_cre_soc_private cre_soc_info; + +static char cre_dev_name[8]; + +static struct cre_hw_version_reg cre_hw_version_reg = { + .hw_ver = 0x0, +}; + +static int cam_cre_init_hw_version(struct cam_hw_soc_info *soc_info, + struct cam_cre_device_core_info *core_info) +{ + int rc = 0; + + CAM_DBG(CAM_CRE, "soc_info = %x core_info = %x", + soc_info, core_info); + CAM_DBG(CAM_CRE, "TOP: %x RD: %x WR: %x", + soc_info->reg_map[CRE_TOP_BASE].mem_base, + soc_info->reg_map[CRE_BUS_RD].mem_base, + soc_info->reg_map[CRE_BUS_WR].mem_base); + + core_info->cre_hw_info->cre_top_base = + soc_info->reg_map[CRE_TOP_BASE].mem_base; + core_info->cre_hw_info->cre_bus_rd_base = + soc_info->reg_map[CRE_BUS_RD].mem_base; + core_info->cre_hw_info->cre_bus_wr_base = + soc_info->reg_map[CRE_BUS_WR].mem_base; + + core_info->hw_version = cam_io_r_mb( + core_info->cre_hw_info->cre_top_base + + cre_hw_version_reg.hw_ver); + + switch (core_info->hw_version) { + case CRE_HW_VER_1_0_0: + core_info->cre_hw_info->cre_hw = &cre_hw_100; + break; + default: + CAM_ERR(CAM_CRE, "Unsupported version : %u", + core_info->hw_version); + rc = -EINVAL; + break; + } + + cre_hw_100.top_reg_offset->base = core_info->cre_hw_info->cre_top_base; + cre_hw_100.bus_rd_reg_offset->base = core_info->cre_hw_info->cre_bus_rd_base; + cre_hw_100.bus_wr_reg_offset->base = core_info->cre_hw_info->cre_bus_wr_base; + + return rc; +} + +int cam_cre_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_cre_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "cre", sizeof("cre")); + cpas_register_params.cam_cpas_client_cb = NULL; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_CRE, "failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_cre_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *cre_dev_intf = NULL; + struct cam_hw_info *cre_dev = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_cre_device_core_info *core_info = NULL; + struct cam_cre_dev_probe cre_probe; + struct cam_cre_cpas_vote cpas_vote; + struct cam_cre_soc_private *soc_private; + int i; + uint32_t hw_idx; + int rc = 0; + + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + + cre_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!cre_dev_intf) + return -ENOMEM; + + cre_dev_intf->hw_idx = hw_idx; + cre_dev_intf->hw_type = CRE_DEV_CRE; + cre_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!cre_dev) { + rc = -ENOMEM; + goto cre_dev_alloc_failed; + } + + memset(cre_dev_name, 0, sizeof(cre_dev_name)); + snprintf(cre_dev_name, sizeof(cre_dev_name), + "cre%1u", cre_dev_intf->hw_idx); + + cre_dev->soc_info.pdev = pdev; + cre_dev->soc_info.dev = &pdev->dev; + cre_dev->soc_info.dev_name = cre_dev_name; + cre_dev_intf->hw_priv = cre_dev; + cre_dev_intf->hw_ops.init = cam_cre_init_hw; + cre_dev_intf->hw_ops.deinit = cam_cre_deinit_hw; + cre_dev_intf->hw_ops.get_hw_caps = cam_cre_get_hw_caps; + cre_dev_intf->hw_ops.process_cmd = cam_cre_process_cmd; + + CAM_DBG(CAM_CRE, "type %d index %d", + cre_dev_intf->hw_type, + cre_dev_intf->hw_idx); + + if (cre_dev_intf->hw_idx < CRE_DEV_MAX) + cam_cre_dev_list[cre_dev_intf->hw_idx].hw_intf = + cre_dev_intf; + + platform_set_drvdata(pdev, cre_dev_intf); + + + cre_dev->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_cre_device_core_info), + GFP_KERNEL); + if (!cre_dev->core_info) { + rc = -ENOMEM; + goto cre_core_alloc_failed; + } + core_info = (struct cam_cre_device_core_info *)cre_dev->core_info; + core_info->cre_hw_info = &cre_hw_info; + cre_dev->soc_info.soc_private = &cre_soc_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + rc = -EINVAL; + CAM_DBG(CAM_CRE, "No cre hardware info"); + goto cre_match_dev_failed; + } + + rc = cam_cre_init_soc_resources(&cre_dev->soc_info, cam_cre_irq, + cre_dev); + if (rc < 0) { + CAM_ERR(CAM_CRE, "failed to init_soc"); + goto init_soc_failed; + } + core_info->hw_type = CRE_DEV_CRE; + core_info->hw_idx = hw_idx; + rc = cam_cre_register_cpas(&cre_dev->soc_info, + core_info, cre_dev_intf->hw_idx); + if (rc < 0) + goto register_cpas_failed; + + rc = cam_cre_enable_soc_resources(&cre_dev->soc_info); + if (rc < 0) { + CAM_ERR(CAM_CRE, "enable soc resorce failed: %d", rc); + goto enable_soc_failed; + } + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_CRE_WR_OUT; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].ddr_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].ddr_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + + rc = cam_cre_init_hw_version(&cre_dev->soc_info, cre_dev->core_info); + if (rc) + goto init_hw_failure; + + cam_cre_disable_soc_resources(&cre_dev->soc_info); + cam_cpas_stop(core_info->cpas_handle); + cre_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + + cam_cre_process_cmd(cre_dev, CRE_HW_PROBE, + &cre_probe, sizeof(cre_probe)); + mutex_init(&cre_dev->hw_mutex); + spin_lock_init(&cre_dev->hw_lock); + init_completion(&cre_dev->hw_complete); + + CAM_DBG(CAM_CRE, "CRE:%d component bound successfully", + cre_dev_intf->hw_idx); + soc_private = cre_dev->soc_info.soc_private; + cam_cre_dev_list[cre_dev_intf->hw_idx].num_hw_pid = + soc_private->num_pid; + + for (i = 0; i < soc_private->num_pid; i++) + cam_cre_dev_list[cre_dev_intf->hw_idx].hw_pid[i] = + soc_private->pid[i]; + + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +init_hw_failure: +enable_soc_failed: +register_cpas_failed: +init_soc_failed: +cre_match_dev_failed: + CAM_MEM_FREE(cre_dev->core_info); +cre_core_alloc_failed: + CAM_MEM_FREE(cre_dev); +cre_dev_alloc_failed: + CAM_MEM_FREE(cre_dev_intf); + return rc; +} + +static void cam_cre_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_DBG(CAM_CRE, "Unbinding component: %s", pdev->name); +} + +int cam_cre_hw_init(struct cam_cre_hw_intf_data **cre_hw_intf_data, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_cre_dev_list[hw_idx].hw_intf) { + *cre_hw_intf_data = &cam_cre_dev_list[hw_idx]; + rc = 0; + } else { + CAM_ERR(CAM_CRE, "inval param"); + *cre_hw_intf_data = NULL; + rc = -ENODEV; + } + return rc; +} + +const static struct component_ops cam_cre_component_ops = { + .bind = cam_cre_component_bind, + .unbind = cam_cre_component_unbind, +}; + +int cam_cre_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CRE, "Adding CRE component"); + rc = component_add(&pdev->dev, &cam_cre_component_ops); + if (rc) + CAM_ERR(CAM_CRE, "failed to add component rc: %d", rc); + + return rc; +} + +static const struct of_device_id cam_cre_dt_match[] = { + { + .compatible = "qcom,cre", + .data = &cre_hw_version_reg, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_cre_dt_match); + +struct platform_driver cam_cre_driver = { + .probe = cam_cre_probe, + .driver = { + .name = "cre", + .of_match_table = cam_cre_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_cre_init_module(void) +{ + return platform_driver_register(&cam_cre_driver); +} + +void cam_cre_exit_module(void) +{ + platform_driver_unregister(&cam_cre_driver); +} + +MODULE_DESCRIPTION("CAM CRE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev_intf.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev_intf.h new file mode 100644 index 0000000000..69263dc079 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_dev_intf.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#ifndef CAM_CRE_DEV_INTF_H +#define CAM_CRE_DEV_INTF_H + +#include +#include "cam_cre_hw_mgr.h" +#include "cam_cpas_api.h" +#include "cre_top.h" + +#define CRE_HW_INIT 0x1 +#define CRE_HW_DEINIT 0x2 +#define CRE_HW_ACQUIRE 0x3 +#define CRE_HW_RELEASE 0x4 +#define CRE_HW_START 0x5 +#define CRE_HW_STOP 0x6 +#define CRE_HW_FLUSH 0x7 +#define CRE_HW_PREPARE 0x8 +#define CRE_HW_ISR 0x9 +#define CRE_HW_PROBE 0xA +#define CRE_HW_CLK_UPDATE 0xB +#define CRE_HW_BW_UPDATE 0xC +#define CRE_HW_RESET 0xD +#define CRE_HW_SET_IRQ_CB 0xE +#define CRE_HW_CLK_DISABLE 0xF +#define CRE_HW_CLK_ENABLE 0x10 +#define CRE_HW_DUMP_DEBUG 0x11 +#define CRE_HW_REG_SET_UPDATE 0x12 + +/** + * struct cam_cre_dev_probe + * + * @reserved: Reserved field for future use + */ +struct cam_cre_dev_probe { + uint32_t reserved; +}; + +/** + * struct cam_cre_dev_init + * + * @core_info: CRE core info + */ +struct cam_cre_dev_init { + struct cam_cre_device_core_info *core_info; +}; + +/** + * struct cam_cre_dev_clk_update + * + * @clk_rate: Clock rate + */ +struct cam_cre_dev_clk_update { + uint32_t clk_rate; +}; + +struct cam_cre_dev_reg_set_update { + struct cre_reg_buffer cre_reg_buf; +}; + +/** + * struct cam_cre_dev_bw_update + * + * @ahb_vote: AHB vote info + * @axi_vote: AXI vote info + * @ahb_vote_valid: Flag for ahb vote + * @axi_vote_valid: Flag for axi vote + */ +struct cam_cre_dev_bw_update { + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote; + uint32_t ahb_vote_valid; + uint32_t axi_vote_valid; +}; + +/** + * struct cam_cre_dev_acquire + * + * @ctx_id: Context id + * @cre_acquire: CRE acquire info + * @bus_wr_ctx: Bus Write context + * @bus_rd_ctx: Bus Read context + */ +struct cam_cre_dev_acquire { + uint32_t ctx_id; + struct cre_top *cre_top; + struct cam_cre_acquire_dev_info *cre_acquire; + struct cre_bus_wr_ctx *bus_wr_ctx; + struct cre_bus_rd_ctx *bus_rd_ctx; +}; + +/** + * struct cam_cre_dev_release + * + * @ctx_id: Context id + * @bus_wr_ctx: Bus Write context + * @bus_rd_ctx: Bus Read context + */ +struct cam_cre_dev_release { + uint32_t ctx_id; + struct cre_bus_wr_ctx *bus_wr_ctx; + struct cre_bus_rd_ctx *bus_rd_ctx; +}; + +/** + * struct cam_cre_dev_prepare_req + * + * @hw_mgr: CRE hardware manager + * @packet: Packet + * @prepare_args: Prepare request args + * @ctx_data: Context data + * @frame_process: Frame process command + * @req_idx: Request Index + */ +struct cam_cre_dev_prepare_req { + struct cam_cre_hw_mgr *hw_mgr; + struct cam_packet *packet; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_cre_ctx *ctx_data; + uint32_t req_idx; +}; + +int cam_cre_subdev_init_module(void); +void cam_cre_subdev_exit_module(void); + +int cam_cre_top_process(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +int cam_cre_bus_rd_process(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +int cam_cre_bus_wr_process(struct cam_cre_hw *cam_cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +#endif /* CAM_CRE_DEV_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw.h new file mode 100644 index 0000000000..e359edeb0c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_CRE_HW_H +#define CAM_CRE_HW_H + +#define CRE_HW_VER_1_0_0 0x10000000 + +#define CRE_DEV_CRE 0 +#define CRE_DEV_MAX 1 + +#define MAX_CRE_RD_CLIENTS 1 +#define MAX_CRE_WR_CLIENTS 1 + +#define CRE_TOP_BASE 0x0 +#define CRE_BUS_RD 0x1 +#define CRE_BUS_WR 0x2 +#define CRE_BASE_MAX 0x3 + +#define CRE_WAIT_BUS_WR_RUP 0x1 +#define CRE_WAIT_BUS_WR_DONE 0x2 +#define CRE_WAIT_BUS_RD_DONE 0x3 +#define CRE_WAIT_IDLE_IRQ 0x4 + +struct cam_cre_top_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t hw_cap; + uint32_t debug_0; + uint32_t debug_1; + uint32_t debug_cfg; + uint32_t testbus_ctrl; + uint32_t scratch_0; + uint32_t irq_status; + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_set; + uint32_t irq_cmd; + uint32_t reset_cmd; + uint32_t core_clk_cfg_ctrl_0; + uint32_t core_clk_cfg_ctrl_1; + uint32_t top_spare; +}; + +struct cam_cre_top_reg_val { + uint32_t hw_version; + uint32_t hw_cap; + uint32_t major_mask; + uint32_t major_shift; + uint32_t minor_mask; + uint32_t minor_shift; + uint32_t irq_mask; + uint32_t irq_set; + uint32_t irq_clear; + uint32_t irq_cmd_set; + uint32_t irq_cmd_clear; + uint32_t idle; + uint32_t fe_done; + uint32_t we_done; + uint32_t rst_done; + uint32_t sw_reset_cmd; + uint32_t hw_reset_cmd; + uint32_t core_clk_cfg_ctrl_0; + uint32_t core_clk_cfg_ctrl_1; + uint32_t top_override; + uint32_t we_override; + uint32_t fe_override; + uint32_t ahb_override; +}; + +struct cam_cre_bus_rd_client_reg { + uint32_t bus_ctrl; + uint32_t spare; + uint32_t cons_violation; + uint32_t cons_violation_status; + uint32_t core_cfg; + uint32_t ccif_meta_data; + uint32_t img_addr; + uint32_t rd_width; + uint32_t rd_height; + uint32_t rd_stride; + uint32_t unpacker_cfg; + uint32_t latency_buf_allocation; + uint32_t misr_cfg_0; + uint32_t misr_cfg_1; + uint32_t misr_rd_val; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; + uint32_t read_buff_cfg; + uint32_t addr_cfg; +}; + +struct cam_cre_bus_rd_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_cmd; + uint32_t irq_status; + uint32_t input_if_cmd; + uint32_t irq_set; + uint32_t misr_reset; + uint32_t security_cfg; + uint32_t iso_cfg; + uint32_t iso_seed; + uint32_t test_bus_ctrl; + + uint32_t num_clients; + struct cam_cre_bus_rd_client_reg rd_clients[MAX_CRE_RD_CLIENTS]; +}; + +struct cam_cre_bus_rd_client_reg_val { + uint32_t client_en; + uint32_t ai_en; + uint32_t ai_en_mask; + uint32_t ai_en_shift; + uint32_t pix_pattern; + uint32_t pix_pattern_mask; + uint32_t pix_pattern_shift; + uint32_t stripe_location; + uint32_t stripe_location_mask; + uint32_t stripe_location_shift; + uint32_t img_addr; + uint32_t img_width; + uint32_t img_height; + uint32_t stride; + uint32_t alignment; + uint32_t alignment_mask; + uint32_t alignment_shift; + uint32_t format; + uint32_t format_mask; + uint32_t format_shift; + uint32_t latency_buf_size; + uint32_t latency_buf_size_mask; + uint32_t misr_cfg_en; + uint32_t misr_cfg_en_mask; + uint32_t misr_cfg_samp_mode; + uint32_t misr_cfg_samp_mode_mask; + uint32_t misr_cfg_1; + uint32_t misr_rd_val; + uint32_t x_int; + uint32_t x_int_mask; + uint32_t byte_offset; + uint32_t byte_offset_mask; + uint32_t input_port_id; + uint32_t rm_port_id; +}; + +struct cam_cre_bus_rd_reg_val { + uint32_t hw_version; + uint32_t cgc_override; + uint32_t irq_mask; + uint32_t irq_status; + uint32_t irq_cmd_set; + uint32_t irq_cmd_clear; + uint32_t rup_done; + uint32_t rd_buf_done; + uint32_t cons_violation; + uint32_t static_prg; + uint32_t static_prg_mask; + uint32_t ica_en; + uint32_t ica_en_mask; + uint32_t go_cmd; + uint32_t go_cmd_mask; + uint32_t irq_set; + uint32_t irq_clear; + uint32_t misr_reset; + uint32_t security_cfg; + uint32_t iso_bpp_select; + uint32_t iso_bpp_select_mask; + uint32_t iso_pattern_select; + uint32_t iso_pattern_select_mask; + uint32_t iso_en; + uint32_t iso_en_mask; + uint32_t iso_seed; + uint32_t bus_ctrl; + uint32_t spare; + uint32_t num_clients; + struct cam_cre_bus_rd_client_reg_val rd_clients[MAX_CRE_RD_CLIENTS]; +}; + +struct cam_cre_bus_wr_client_reg { + uint32_t client_cfg; + uint32_t img_addr; + uint32_t img_cfg_0; + uint32_t img_cfg_1; + uint32_t img_cfg_2; + uint32_t bw_limit; + uint32_t packer_cfg; + uint32_t addr_cfg; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; +}; + +struct cam_cre_bus_wr_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t cgc_override; + uint32_t irq_mask_0; + uint32_t irq_mask_1; + uint32_t irq_clear_0; + uint32_t irq_clear_1; + uint32_t irq_status_0; + uint32_t irq_status_1; + uint32_t irq_cmd; + uint32_t frame_header_cfg_0; + uint32_t local_frame_header_cfg_0; + uint32_t irq_set_0; + uint32_t irq_set_1; + uint32_t iso_cfg; + uint32_t violation_status; + uint32_t image_size_violation_status; + uint32_t perf_count_cfg_0; + uint32_t perf_count_cfg_1; + uint32_t perf_count_cfg_2; + uint32_t perf_count_cfg_3; + uint32_t perf_count_val_0; + uint32_t perf_count_val_1; + uint32_t perf_count_val_2; + uint32_t perf_count_val_3; + uint32_t perf_count_status; + uint32_t misr_cfg_0; + uint32_t misr_cfg_1; + uint32_t misr_rd_sel; + uint32_t misr_reset; + uint32_t misr_val; + + uint32_t num_clients; + struct cam_cre_bus_wr_client_reg wr_clients[MAX_CRE_WR_CLIENTS]; +}; + +struct cam_cre_bus_wr_client_reg_val { + uint32_t client_en; + uint32_t client_en_mask; + uint32_t client_en_shift; + uint32_t auto_recovery_en; + uint32_t auto_recovery_en_mask; + uint32_t auto_recovery_en_shift; + uint32_t mode; + uint32_t mode_mask; + uint32_t mode_shift; + uint32_t img_addr; + uint32_t width; + uint32_t width_mask; + uint32_t width_shift; + uint32_t height; + uint32_t height_mask; + uint32_t height_shift; + uint32_t x_init; + uint32_t x_init_mask; + uint32_t stride; + uint32_t stride_mask; + uint32_t client_buf_done; + uint32_t format; + uint32_t format_mask; + uint32_t format_shift; + uint32_t alignment; + uint32_t alignment_mask; + uint32_t alignment_shift; + uint32_t bw_limit_en; + uint32_t bw_limit_en_mask; + uint32_t bw_limit_counter; + uint32_t bw_limit_counter_mask; + uint32_t frame_header_addr; + uint32_t output_port_id; + uint32_t wm_port_id; +}; + +struct cam_cre_bus_wr_reg_val { + uint32_t hw_version; + uint32_t cgc_override; + uint32_t irq_mask_0; + uint32_t irq_set_0; + uint32_t irq_clear_0; + uint32_t irq_status_0; + uint32_t img_size_violation; + uint32_t violation; + uint32_t cons_violation; + uint32_t comp_buf_done; + uint32_t comp_rup_done; + uint32_t irq_mask_1; + uint32_t irq_set_1; + uint32_t irq_clear_1; + uint32_t irq_status_1; + uint32_t irq_cmd_set; + uint32_t irq_cmd_clear; + uint32_t frame_header_cfg_0; + uint32_t local_frame_header_cfg_0; + uint32_t iso_en; + uint32_t iso_en_mask; + uint32_t violation_status; + uint32_t img_size_violation_status; + uint32_t misr_0_en; + uint32_t misr_0_en_mask; + uint32_t misr_0_samp_mode; + uint32_t misr_0_samp_mode_mask; + uint32_t misr_0_id; + uint32_t misr_0_id_mask; + uint32_t misr_rd_misr_sel; + uint32_t misr_rd_misr_sel_mask; + uint32_t misr_rd_word_sel; + uint32_t misr_rd_word_sel_mask; + uint32_t misr_reset; + uint32_t misr_val; + + uint32_t num_clients; + struct cam_cre_bus_wr_client_reg_val wr_clients[MAX_CRE_WR_CLIENTS]; +}; + +struct cam_cre_debug_register { + uint32_t offset; +}; + +struct cam_cre_hw { + struct cam_cre_top_reg *top_reg_offset; + struct cam_cre_top_reg_val *top_reg_val; + + struct cam_cre_bus_rd_reg *bus_rd_reg_offset; + struct cam_cre_bus_rd_reg_val *bus_rd_reg_val; + + struct cam_cre_bus_wr_reg *bus_wr_reg_offset; + struct cam_cre_bus_wr_reg_val *bus_wr_reg_val; + + struct cam_cre_common *common; +}; + +struct cre_hw_version_reg { + uint32_t hw_ver; + uint32_t reserved; +}; + +#endif /* CAM_CRE_HW_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw_100.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw_100.h new file mode 100644 index 0000000000..e1bb5e0304 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_hw_100.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_CRE_HW_100_H +#define CAM_CRE_HW_100_H + +#include "cre_hw.h" + +#define CRE_BUS_RD_TYPE 0x1 +#define CRE_BUS_WR_TYPE 0x2 + +static struct cam_cre_top_reg top_reg = { + .hw_version = 0x000, + .hw_cap = 0x004, + .debug_0 = 0x080, + .debug_1 = 0x084, + .debug_cfg = 0x0DC, + .testbus_ctrl = 0x1F4, + .scratch_0 = 0x1F8, + .irq_status = 0x00C, + .irq_mask = 0x010, + .irq_clear = 0x014, + .irq_set = 0x018, + .irq_cmd = 0x01C, + .reset_cmd = 0x008, + .core_clk_cfg_ctrl_0 = 0x020, + .core_clk_cfg_ctrl_1 = 0x024, + .top_spare = 0x1FC, +}; + +struct cam_cre_top_reg_val top_reg_value = { + .hw_version = 0x10000000, + .hw_cap = 0x4000, + .irq_mask = 0xf, + .irq_clear = 0xf, + .irq_set = 0xf, + .irq_cmd_set = 0xf, + .irq_cmd_clear = 0xf, + .idle = 0x8, + .fe_done = 0x4, + .we_done = 0x2, + .rst_done = 0x1, + .sw_reset_cmd = 0x2, + .hw_reset_cmd = 0x1, +}; + +struct cam_cre_bus_rd_reg bus_rd_reg = { + .hw_version = 0x00, + .irq_mask = 0x04, + .irq_clear = 0x08, + .irq_cmd = 0x0C, + .irq_status = 0x10, + .input_if_cmd = 0x14, + .irq_set = 0x18, + .misr_reset = 0x1C, + .security_cfg = 0x20, + .iso_cfg = 0x24, + .iso_seed = 0x28, + .test_bus_ctrl = 0x2C, + .num_clients = 1, + .rd_clients[0] = { + .core_cfg = 0x50, + .ccif_meta_data = 0x54, + .img_addr = 0x58, + .rd_width = 0x5C, + .rd_height = 0x60, + .rd_stride = 0x64, + .unpacker_cfg = 0x68, + .latency_buf_allocation = 0x7C, + .misr_cfg_0 = 0x84, + .misr_cfg_1 = 0x88, + .misr_rd_val = 0x8C, + .debug_status_cfg = 0x90, + .debug_status_0 = 0x94, + .debug_status_1 = 0x98, + .read_buff_cfg = 0xA0, + .addr_cfg = 0xA4, + .spare = 0x30, + .cons_violation = 0x34, + }, +}; + +struct cam_cre_bus_wr_reg_val bus_wr_reg_value = { + .hw_version = 0x30000000, + .cgc_override = 0x1, + .irq_mask_0 = 0xd0000101, + .irq_set_0 = 0xd0000101, + .irq_clear_0 = 0xd0000101, + .img_size_violation = 0x80000000, + .violation = 0x40000000, + .cons_violation = 0x10000000, + .comp_buf_done = 0x100, + .comp_rup_done = 0x1, + .irq_mask_1 = 0x1, + .irq_set_1 = 0x1, + .irq_clear_1 = 0x1, + .irq_status_1 = 0x1, + .irq_cmd_set = 0x10, + .irq_cmd_clear = 0x1, + .iso_en = 0x1, + .iso_en_mask = 0x1, + .misr_0_en = 0x1, + .misr_0_en_mask = 0x1, + .misr_0_samp_mode = 0x1, + .misr_0_samp_mode_mask = 0x10000, + .misr_0_id_mask = 0xff, + .misr_rd_misr_sel_mask = 0xf, + .misr_rd_word_sel_mask = 0x70, + .num_clients = 1, + .wr_clients[0] = { + .client_en = 0x1, + .client_en_mask = 0x1, + .client_en_shift = 0x0, + .auto_recovery_en = 0x1, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xffff, + .width_shift = 0x0, + .height_mask = 0xffff, + .height_shift = 0x10, + .x_init_mask = 0xff, + .stride_mask = 0xffffff, + .format_mask = 0xf, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x5, + .bw_limit_en = 0x1, + .bw_limit_en_mask = 0x1, + .bw_limit_counter_mask = 0x1fe, + .client_buf_done = 0x1, + .output_port_id = CAM_CRE_OUTPUT_IMAGE, + .wm_port_id = 0, + }, +}; + +struct cam_cre_bus_rd_reg_val bus_rd_reg_value = { + .hw_version = 0x30000000, + .irq_mask = 0x1, /* INFO_CONS_VIOLATION */ + .rd_buf_done = 0x4, + .rup_done = 0x2, + .cons_violation = 0x1, + .irq_cmd_set = 0x10, + .irq_cmd_clear = 0x1, + .static_prg = 0x8, + .static_prg_mask = 0x8, + .ica_en = 0x1, + .ica_en_mask = 0x2, + .go_cmd = 0x1, + .go_cmd_mask = 0x1, + .irq_set = 0x7, + .irq_clear = 0x7, + .misr_reset = 0x1, + .security_cfg = 0x1, + .iso_bpp_select_mask = 0x60, + .iso_pattern_select_mask = 0x6, + .iso_en = 0x1, + .iso_en_mask = 0x1, + .num_clients = 1, + .rd_clients[0] = { + .client_en = 0x1, + .ai_en = 0x1, + .ai_en_mask = 0x1000, + .ai_en_shift = 0xc, + .pix_pattern_mask = 0xfc, + .pix_pattern_shift = 0x2, + .stripe_location_mask = 0x3, + .stripe_location_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x5, + .format_mask = 0x1f, + .format_shift = 0x0, + .latency_buf_size_mask = 0xffff, + .misr_cfg_en_mask = 0x4, + .misr_cfg_samp_mode_mask = 0x3, + .x_int_mask = 0xffff, + .byte_offset_mask = 0xff, + .input_port_id = CAM_CRE_INPUT_IMAGE, + .rm_port_id = 0x0, + }, +}; + +struct cam_cre_bus_wr_reg bus_wr_reg = { + .hw_version = 0x00, + .cgc_override = 0x08, + .irq_mask_0 = 0x18, + .irq_mask_1 = 0x1C, + .irq_clear_0 = 0x20, + .irq_clear_1 = 0x24, + .irq_status_0 = 0x28, + .irq_status_1 = 0x2C, + .irq_cmd = 0x30, + .frame_header_cfg_0 = 0x0, + .local_frame_header_cfg_0 = 0x4C, + .irq_set_0 = 0x50, + .irq_set_1 = 0x54, + .iso_cfg = 0x5C, + .violation_status = 0x64, + .image_size_violation_status = 0x70, + .perf_count_cfg_0 = 0x74, + .perf_count_cfg_1 = 0x78, + .perf_count_cfg_2 = 0x7C, + .perf_count_cfg_3 = 0x80, + .perf_count_val_0 = 0x94, + .perf_count_val_1 = 0x98, + .perf_count_val_2 = 0x9C, + .perf_count_val_3 = 0xA0, + .perf_count_status = 0xB4, + .misr_cfg_0 = 0xB8, + .misr_cfg_1 = 0xBC, + .misr_rd_sel = 0xC8, + .misr_reset = 0xCC, + .misr_val = 0xD0, + .wr_clients[0] = { + .client_cfg = 0x200, + .img_addr = 0x204, + .img_cfg_0 = 0x20C, + .img_cfg_1 = 0x210, + .img_cfg_2 = 0x214, + .packer_cfg = 0x218, + .bw_limit = 0x21C, + .addr_cfg = 0x270, + .debug_status_cfg = 0x284, + .debug_status_0 = 0x288, + .debug_status_1 = 0x28C, + }, +}; + +static struct cam_cre_hw cre_hw_100 = { + .top_reg_offset = &top_reg, + .top_reg_val = &top_reg_value, + .bus_wr_reg_offset = &bus_wr_reg, + .bus_wr_reg_val = &bus_wr_reg_value, + .bus_rd_reg_offset = &bus_rd_reg, + .bus_rd_reg_val = &bus_rd_reg_value, +}; +#endif // CAM_CRE_HW_100_H diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.c new file mode 100644 index 0000000000..65abe24908 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cre_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" + +int cam_cre_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t cre_irq_handler, void *data) +{ + int rc, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) + return rc; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, + cre_irq_handler, &(irq_data[0])); + if (rc) + CAM_ERR(CAM_CRE, "init soc failed %d", rc); + + return rc; +} + +int cam_cre_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) + CAM_ERR(CAM_CRE, "enable platform failed %d", rc); + + return rc; +} + +int cam_cre_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_CRE, "disable platform failed %d", rc); + + return rc; +} + +int cam_cre_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate) +{ + int32_t src_clk_idx; + + if (!soc_info) { + CAM_ERR(CAM_CRE, "Invalid soc info"); + return -EINVAL; + } + + src_clk_idx = soc_info->src_clk_idx; + + CAM_DBG(CAM_CRE, "clk_rate = %u src_clk_index = %d", + clk_rate, src_clk_idx); + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_CRE, "clk_rate %d greater than max, reset to %d", + clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + CAM_DBG(CAM_CRE, "clk_rate = %u src_clk_index = %d", + clk_rate, src_clk_idx); + return cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.h new file mode 100644 index 0000000000..59a31a93eb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/cre_soc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CRE_SOC_H_ +#define _CAM_CRE_SOC_H_ + +#include "cam_soc_util.h" + +#define CAM_CRE_HW_MAX_NUM_PID 2 + +/** + * struct cam_cre_soc_private + * + * @num_pid: CRE number of pids + * @pid: CRE pid value list + */ +struct cam_cre_soc_private { + uint32_t num_pid; + uint32_t pid[CAM_CRE_HW_MAX_NUM_PID]; +}; + +int cam_cre_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t cre_irq_handler, void *irq_data); + +int cam_cre_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_cre_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_cre_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate); +#endif /* _CAM_CRE_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_intf.h new file mode 100644 index 0000000000..80a377e84a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_intf.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_CRE_HW_INTF_H +#define CAM_CRE_HW_INTF_H + +#include "cam_cpas_api.h" + +#define CAM_CRE_DEV_PER_TYPE_MAX 1 + +#define CAM_CRE_CMD_BUF_MAX_SIZE 128 +#define CAM_CRE_MSG_BUF_MAX_SIZE CAM_CRE_CMD_BUF_MAX_SIZE + +#define CRE_VOTE 640000000 + +#define CAM_CRE_HW_DUMP_TAG_MAX_LEN 128 +#define CAM_CRE_HW_DUMP_NUM_WORDS 5 + +struct cam_cre_set_irq_cb { + int32_t (*cre_hw_mgr_cb)(void *irq_data, + int32_t result_size, void *data); + void *data; + uint32_t b_set_cb; +}; + +struct cam_cre_hw_dump_args { + uint64_t request_id; + uintptr_t cpu_addr; + size_t offset; + size_t buf_len; +}; + +struct cam_cre_hw_dump_header { + uint8_t tag[CAM_CRE_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +enum cam_cre_cmd_type { + CAM_CRE_CMD_CFG, + CAM_CRE_CMD_SET_IRQ_CB, + CAM_CRE_CMD_HW_DUMP, + CAM_CRE_CMD_RESET_HW, + CAM_CRE_CMD_MAX, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_mgr_intf.h new file mode 100644 index 0000000000..4149bc797f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/include/cam_cre_hw_mgr_intf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_CRE_HW_MGR_INTF_H +#define CAM_CRE_HW_MGR_INTF_H + +#include +#include +#include + +#define CAM_CRE_CTX_MAX 16 + +int cam_cre_hw_mgr_init(struct device_node *of_node, + void *hw_mgr, int *iommu_hdl); + +#endif /* CAM_CRE_HW_MGR_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.c b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.c new file mode 100644 index 0000000000..4e339fa169 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cre_core.h" +#include "cre_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cre_hw.h" +#include "cre_dev_intf.h" +#include "cre_top.h" + +static struct cre_top cre_top_info; + +static int cam_cre_top_reset(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_cre_top_reg *top_reg; + struct cam_cre_top_reg_val *top_reg_val; + uint32_t irq_mask, irq_status; + unsigned long flags; + + if (!cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cre_hw_info"); + return -EINVAL; + } + + top_reg = cre_hw_info->top_reg_offset; + top_reg_val = cre_hw_info->top_reg_val; + + mutex_lock(&cre_top_info.cre_hw_mutex); + reinit_completion(&cre_top_info.reset_complete); + reinit_completion(&cre_top_info.idle_done); + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + cre_hw_info->top_reg_offset->base + top_reg->irq_mask); + + /* CRE SW RESET */ + cam_io_w_mb(top_reg_val->sw_reset_cmd, + cre_hw_info->top_reg_offset->base + top_reg->reset_cmd); + + rc = wait_for_completion_timeout( + &cre_top_info.reset_complete, + msecs_to_jiffies(60)); + + if (!rc || rc < 0) { + spin_lock_irqsave(&cre_top_info.hw_lock, flags); + if (!completion_done(&cre_top_info.reset_complete)) { + CAM_DBG(CAM_CRE, + "IRQ delayed, checking the status registers"); + irq_mask = cam_io_r_mb(cre_hw_info->top_reg_offset->base + + top_reg->irq_mask); + irq_status = cam_io_r_mb(cre_hw_info->top_reg_offset->base + + top_reg->irq_status); + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_CRE, "cre reset done"); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + cam_io_w_mb(top_reg_val->irq_cmd_clear, + top_reg->base + top_reg->irq_cmd); + } else { + CAM_ERR(CAM_CRE, + "irq mask 0x%x irq status 0x%x", + irq_mask, irq_status); + rc = -ETIMEDOUT; + } + } else { + rc = 0; + } + spin_unlock_irqrestore(&cre_top_info.hw_lock, flags); + } else { + rc = 0; + } + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + cre_hw_info->top_reg_offset->base + top_reg->irq_mask); + + mutex_unlock(&cre_top_info.cre_hw_mutex); + return rc; +} + +static int cam_cre_top_release(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (ctx_id < 0) { + CAM_ERR(CAM_CRE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + cre_top_info.top_ctx[ctx_id].cre_acquire = NULL; + + return rc; +} + +static int cam_cre_top_acquire(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_cre_dev_acquire *cre_dev_acquire = data; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_CRE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + + cre_top_info.top_ctx[ctx_id].cre_acquire = cre_dev_acquire->cre_acquire; + cre_dev_acquire->cre_top = &cre_top_info; + + return rc; +} + +static int cam_cre_top_init(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_cre_top_reg *top_reg; + struct cam_cre_top_reg_val *top_reg_val; + struct cam_cre_dev_init *dev_init = data; + uint32_t irq_mask, irq_status; + unsigned long flags; + + if (!cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cre_hw_info"); + return -EINVAL; + } + + top_reg = cre_hw_info->top_reg_offset; + top_reg_val = cre_hw_info->top_reg_val; + + top_reg->base = dev_init->core_info->cre_hw_info->cre_top_base; + + mutex_init(&cre_top_info.cre_hw_mutex); + /* CRE SW RESET */ + init_completion(&cre_top_info.reset_complete); + init_completion(&cre_top_info.idle_done); + init_completion(&cre_top_info.bufdone); + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + cre_hw_info->top_reg_offset->base + top_reg->irq_mask); + cam_io_w_mb(top_reg_val->sw_reset_cmd, + cre_hw_info->top_reg_offset->base + top_reg->reset_cmd); + + rc = wait_for_completion_timeout( + &cre_top_info.reset_complete, + msecs_to_jiffies(60)); + + if (!rc || rc < 0) { + spin_lock_irqsave(&cre_top_info.hw_lock, flags); + if (!completion_done(&cre_top_info.reset_complete)) { + CAM_DBG(CAM_CRE, + "IRQ delayed, checking the status registers"); + irq_mask = cam_io_r_mb(cre_hw_info->top_reg_offset->base + + top_reg->irq_mask); + irq_status = cam_io_r_mb(cre_hw_info->top_reg_offset->base + + top_reg->irq_status); + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_CRE, "cre reset done"); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + cam_io_w_mb(top_reg_val->irq_cmd_clear, + top_reg->base + top_reg->irq_cmd); + } else { + CAM_ERR(CAM_CRE, + "irq mask 0x%x irq status 0x%x", + irq_mask, irq_status); + rc = -ETIMEDOUT; + } + } else { + CAM_DBG(CAM_CRE, "reset done"); + rc = 0; + } + spin_unlock_irqrestore(&cre_top_info.hw_lock, flags); + } else { + rc = 0; + } + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + cre_hw_info->top_reg_offset->base + top_reg->irq_mask); + return rc; +} + +static int cam_cre_top_probe(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (!cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cre_hw_info"); + return -EINVAL; + } + + cre_top_info.cre_hw_info = cre_hw_info; + spin_lock_init(&cre_top_info.hw_lock); + + return rc; +} + +static int cam_cre_top_isr(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + uint32_t irq_status; + struct cam_cre_top_reg *top_reg; + struct cam_cre_top_reg_val *top_reg_val; + struct cam_cre_irq_data *irq_data = data; + + if (!cre_hw_info) { + CAM_ERR(CAM_CRE, "Invalid cre_hw_info"); + return -EINVAL; + } + + top_reg = cre_hw_info->top_reg_offset; + top_reg_val = cre_hw_info->top_reg_val; + + spin_lock(&cre_top_info.hw_lock); + /* Read and Clear Top Interrupt status */ + irq_status = cam_io_r_mb(top_reg->base + top_reg->irq_status); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + + cam_io_w_mb(top_reg_val->irq_cmd_clear, + top_reg->base + top_reg->irq_cmd); + + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_CRE, "cre reset done"); + complete(&cre_top_info.reset_complete); + } + + if (irq_status & top_reg_val->idle) { + CAM_DBG(CAM_CRE, "cre idle IRQ, can configure new settings"); + complete(&cre_top_info.idle_done); + } + + if (irq_status & top_reg_val->we_done) + CAM_DBG(CAM_CRE, "Received Write Engine IRQ"); + + if (irq_status & top_reg_val->fe_done) + CAM_DBG(CAM_CRE, "Received Fetch Engine IRQ"); + + irq_data->top_irq_status = irq_status; + spin_unlock(&cre_top_info.hw_lock); + + return rc; +} + +int cam_cre_top_process(struct cam_cre_hw *cre_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = 0; + + switch (cmd_id) { + case CRE_HW_PROBE: + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: E"); + rc = cam_cre_top_probe(cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_PROBE: X"); + break; + case CRE_HW_INIT: + CAM_DBG(CAM_CRE, "CRE_HW_INIT: E"); + rc = cam_cre_top_init(cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_INIT: X"); + break; + case CRE_HW_DEINIT: + break; + case CRE_HW_ACQUIRE: + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: E"); + rc = cam_cre_top_acquire(cre_hw_info, ctx_id, data); + CAM_DBG(CAM_CRE, "CRE_HW_ACQUIRE: X"); + break; + case CRE_HW_PREPARE: + break; + case CRE_HW_RELEASE: + rc = cam_cre_top_release(cre_hw_info, ctx_id, data); + break; + case CRE_HW_REG_SET_UPDATE: + break; + case CRE_HW_START: + break; + case CRE_HW_STOP: + break; + case CRE_HW_FLUSH: + break; + case CRE_HW_ISR: + rc = cam_cre_top_isr(cre_hw_info, ctx_id, data); + break; + case CRE_HW_RESET: + rc = cam_cre_top_reset(cre_hw_info, ctx_id, 0); + break; + default: + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.h b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.h new file mode 100644 index 0000000000..30728cff8e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cre/cam_cre_hw_mgr/cre_hw/top/cre_top.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CRE_TOP_H +#define CRE_TOP_H + +#include +#include +#include +#include "cre_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_context.h" +#include "cam_cre_context.h" +#include "cam_cre_hw_mgr.h" + +/** + * struct cre_top_ctx + * + * @cre_acquire: CRE acquire info + */ +struct cre_top_ctx { + struct cam_cre_acquire_dev_info *cre_acquire; +}; + +/** + * struct cre_top + * + * @cre_hw_info: CRE hardware info + * @top_ctx: CRE top context + * @reset_complete: Reset complete flag + * @cre_mutex: CRE hardware mutex + * @hw_lock: CRE hardware spinlock + */ +struct cre_top { + struct cam_cre_hw *cre_hw_info; + struct cre_top_ctx top_ctx[CAM_CRE_CTX_MAX]; + struct completion reset_complete; + struct completion idle_done; + struct completion bufdone; + struct mutex cre_hw_mutex; + spinlock_t hw_lock; +}; +#endif /* CRE_TOP_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.c new file mode 100644 index 0000000000..4f352f9869 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.c @@ -0,0 +1,1734 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_mem_mgr.h" +#include "cam_sync_api.h" +#include "cam_req_mgr_dev.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_context_utils.h" +#include "cam_custom_context.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +static const char custom_dev_name[] = "cam-custom"; + +static int __cam_custom_ctx_handle_irq_in_activated( + void *context, uint32_t evt_id, void *evt_data); + +static int __cam_custom_ctx_start_dev_in_ready( + struct cam_context *ctx, struct cam_start_stop_dev_cmd *cmd); + +static int __cam_custom_ctx_apply_req_in_activated_state( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply, + uint32_t next_state); + +static int __cam_custom_ctx_apply_default_settings( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply); + +static int __cam_custom_ctx_apply_req_in_activated( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + rc = __cam_custom_ctx_apply_req_in_activated_state( + ctx, apply, CAM_CUSTOM_CTX_ACTIVATED_APPLIED); + CAM_DBG(CAM_CUSTOM, "new substate %d", custom_ctx->substate_activated); + + if (rc) + CAM_ERR(CAM_CUSTOM, "Apply failed in state %d rc %d", + custom_ctx->substate_activated, rc); + + return rc; +} + +static int __cam_custom_ctx_handle_error( + struct cam_custom_context *custom_ctx, void *evt_data) +{ + /* + * Handle any HW error scenerios here, all the + * requests in all the lists can be signaled error. + * Notify UMD about this error if needed. + */ + + return 0; +} + +static int __cam_custom_ctx_reg_upd_in_sof( + struct cam_custom_context *custom_ctx, void *evt_data) +{ + struct cam_ctx_request *req = NULL; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_context *ctx = custom_ctx->base; + + custom_ctx->frame_id++; + + /* + * This is for the first update before streamon. + * The initial setting will cause the reg_upd in the + * first frame. + */ + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + if (req_custom->num_fence_map_out == req_custom->num_acked) { + list_add_tail(&req->list, &ctx->free_req_list); + } else { + list_add_tail(&req->list, &ctx->active_req_list); + custom_ctx->active_req_cnt++; + CAM_DBG(CAM_REQ, + "move request %lld to active list(cnt = %d), ctx %u", + req->request_id, custom_ctx->active_req_cnt, + ctx->ctx_id); + } + } + + return 0; +} + +static int __cam_custom_ctx_reg_upd_in_applied_state( + struct cam_custom_context *custom_ctx, void *evt_data) +{ + struct cam_ctx_request *req; + struct cam_context *ctx = custom_ctx->base; + struct cam_custom_dev_ctx_req *req_custom; + + custom_ctx->frame_id++; + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR(CAM_CUSTOM, + "Reg upd ack with no waiting request"); + goto end; + } + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + if (req_custom->num_fence_map_out != 0) { + list_add_tail(&req->list, &ctx->active_req_list); + custom_ctx->active_req_cnt++; + CAM_DBG(CAM_REQ, + "move request %lld to active list(cnt = %d), ctx %u", + req->request_id, custom_ctx->active_req_cnt, + ctx->ctx_id); + } else { + /* no io config, so the request is completed. */ + list_add_tail(&req->list, &ctx->free_req_list); + CAM_DBG(CAM_ISP, + "move active request %lld to free list(cnt = %d), ctx %u", + req->request_id, custom_ctx->active_req_cnt, + ctx->ctx_id); + } + + custom_ctx->substate_activated = CAM_CUSTOM_CTX_ACTIVATED_SOF; + CAM_DBG(CAM_CUSTOM, "next substate %d", custom_ctx->substate_activated); + +end: + return 0; +} + +static int __cam_custom_ctx_frame_done( + struct cam_custom_context *custom_ctx, void *evt_data) +{ + int rc = 0, i, j; + uint64_t frame_done_req_id; + struct cam_ctx_request *req; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_context *ctx = custom_ctx->base; + struct cam_custom_hw_done_event_data *done_data = + (struct cam_custom_hw_done_event_data *)evt_data; + + if (list_empty(&ctx->active_req_list)) { + CAM_DBG(CAM_CUSTOM, "Frame done with no active request"); + return 0; + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + req_custom = req->req_priv; + + for (i = 0; i < done_data->num_handles; i++) { + for (j = 0; j < req_custom->num_fence_map_out; j++) { + if (done_data->resource_handle[i] == + req_custom->fence_map_out[j].resource_handle) + break; + } + + if (j == req_custom->num_fence_map_out) { + CAM_ERR(CAM_CUSTOM, + "Can not find matching rsrc handle 0x%x!", + done_data->resource_handle[i]); + rc = -EINVAL; + continue; + } + + if (req_custom->fence_map_out[j].sync_id == -1) { + CAM_WARN(CAM_CUSTOM, + "Duplicate frame done for req %lld", + req->request_id); + continue; + } + + if (!req_custom->bubble_detected) { + rc = cam_sync_signal( + req_custom->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Sync failed with rc = %d", rc); + } else if (!req_custom->bubble_report) { + rc = cam_sync_signal( + req_custom->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Sync failed with rc = %d", rc); + } else { + req_custom->num_acked++; + CAM_DBG(CAM_CUSTOM, "frame done with bubble for %llu", + req->request_id); + continue; + } + + req_custom->num_acked++; + req_custom->fence_map_out[j].sync_id = -1; + } + + if (req_custom->num_acked > req_custom->num_fence_map_out) { + CAM_ERR(CAM_CUSTOM, + "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u", + req->request_id, req_custom->num_acked, + req_custom->num_fence_map_out, ctx->ctx_id); + } + + if (req_custom->num_acked != req_custom->num_fence_map_out) + return rc; + + custom_ctx->active_req_cnt--; + frame_done_req_id = req->request_id; + if (req_custom->bubble_detected && req_custom->bubble_report) { + req_custom->num_acked = 0; + req_custom->bubble_detected = false; + list_del_init(&req->list); + if (frame_done_req_id <= ctx->last_flush_req) { + for (i = 0; i < req_custom->num_fence_map_out; i++) + rc = cam_sync_signal( + req_custom->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + + list_add_tail(&req->list, &ctx->free_req_list); + atomic_set(&custom_ctx->process_bubble, 0); + CAM_DBG(CAM_REQ, + "Move active request %lld to free list(cnt = %d) [flushed], ctx %u", + frame_done_req_id, custom_ctx->active_req_cnt, + ctx->ctx_id); + } else { + list_add(&req->list, &ctx->pending_req_list); + atomic_set(&custom_ctx->process_bubble, 0); + CAM_DBG(CAM_REQ, + "Move active request %lld to pending list in ctx %u", + frame_done_req_id, ctx->ctx_id); + } + } else { + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->free_req_list); + CAM_DBG(CAM_REQ, + "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u", + frame_done_req_id, + custom_ctx->active_req_cnt, + ctx->ctx_id); + } + + return rc; +} + +static int __cam_custom_ctx_handle_bubble( + struct cam_context *ctx, uint64_t req_id) +{ + int rc = -EINVAL; + bool found = false; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp; + struct cam_custom_dev_ctx_req *req_custom; + + list_for_each_entry_safe(req, req_temp, + &ctx->wait_req_list, list) { + if (req->request_id == req_id) { + req_custom = + (struct cam_custom_dev_ctx_req *)req->req_priv; + if (!req_custom->bubble_report) { + CAM_DBG(CAM_CUSTOM, + "Skip bubble recovery for %llu", + req_id); + goto end; + } + + req_custom->bubble_detected = true; + found = true; + CAM_DBG(CAM_CUSTOM, + "Found bubbled req %llu in wait list", + req_id); + } + } + + if (found) { + rc = 0; + goto end; + } + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + if (req->request_id == req_id) { + req_custom = + (struct cam_custom_dev_ctx_req *)req->req_priv; + if (!req_custom->bubble_report) { + CAM_DBG(CAM_CUSTOM, + "Skip bubble recovery for %llu", + req_id); + goto end; + } + + req_custom->bubble_detected = true; + found = true; + CAM_DBG(CAM_CUSTOM, + "Found bubbled req %llu in active list", + req_id); + } + } + + if (found) + rc = 0; + else + CAM_ERR(CAM_CUSTOM, + "req %llu not found in wait or active list bubble recovery failed ctx: %u", + req_id, ctx->ctx_id); + +end: + return rc; +} + +static int __cam_custom_ctx_handle_evt( + struct cam_context *ctx, + struct cam_req_mgr_link_evt_data *evt_data) +{ + int rc = -1; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + if (evt_data->u.error == CRM_KMD_ERR_BUBBLE) { + rc = __cam_custom_ctx_handle_bubble(ctx, evt_data->req_id); + if (rc) + return rc; + } else { + CAM_WARN(CAM_CUSTOM, "Unsupported error type %d", + evt_data->u.error); + } + + CAM_DBG(CAM_CUSTOM, "Set bubble flag for req %llu in ctx %u", + evt_data->req_id, ctx->ctx_id); + atomic_set(&custom_ctx->process_bubble, 1); + return 0; +} + +static struct cam_ctx_ops + cam_custom_ctx_activated_state_machine + [CAM_CUSTOM_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_custom_ctx_apply_req_in_activated, + .notify_frame_skip = + __cam_custom_ctx_apply_default_settings, + }, + .irq_ops = NULL, + }, + /* APPLIED */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_custom_ctx_apply_req_in_activated, + .notify_frame_skip = + __cam_custom_ctx_apply_default_settings, + }, + .irq_ops = NULL, + }, + /* HW ERROR */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HALT */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, +}; + +static struct cam_custom_ctx_irq_ops + cam_custom_ctx_activated_state_machine_irq + [CAM_CUSTOM_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .irq_ops = { + __cam_custom_ctx_handle_error, + __cam_custom_ctx_reg_upd_in_sof, + __cam_custom_ctx_frame_done, + }, + }, + /* APPLIED */ + { + .irq_ops = { + __cam_custom_ctx_handle_error, + __cam_custom_ctx_reg_upd_in_applied_state, + __cam_custom_ctx_frame_done, + }, + }, + /* HW ERROR */ + { + .irq_ops = { + NULL, + NULL, + NULL, + }, + }, + /* HALT */ + { + }, +}; + +static int __cam_custom_ctx_enqueue_request_in_order( + struct cam_context *ctx, struct cam_ctx_request *req) +{ + struct cam_ctx_request *req_current; + struct cam_ctx_request *req_prev; + struct list_head temp_list; + + INIT_LIST_HEAD(&temp_list); + spin_lock_bh(&ctx->lock); + if (list_empty(&ctx->pending_req_list)) { + list_add_tail(&req->list, &ctx->pending_req_list); + } else { + list_for_each_entry_safe_reverse( + req_current, req_prev, &ctx->pending_req_list, list) { + if (req->request_id < req_current->request_id) { + list_del_init(&req_current->list); + list_add(&req_current->list, &temp_list); + continue; + } else if (req->request_id == req_current->request_id) { + CAM_WARN(CAM_CUSTOM, + "Received duplicated request %lld", + req->request_id); + } + break; + } + list_add_tail(&req->list, &ctx->pending_req_list); + + if (!list_empty(&temp_list)) { + list_for_each_entry_safe( + req_current, req_prev, &temp_list, list) { + list_del_init(&req_current->list); + list_add_tail(&req_current->list, + &ctx->pending_req_list); + } + } + } + spin_unlock_bh(&ctx->lock); + return 0; +} + +static int __cam_custom_ctx_flush_req(struct cam_context *ctx, + struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req) +{ + int i, rc; + uint32_t cancel_req_id_found = 0; + struct cam_ctx_request *req; + struct cam_ctx_request *req_temp; + struct cam_custom_dev_ctx_req *req_custom; + struct list_head flush_list; + + INIT_LIST_HEAD(&flush_list); + if (list_empty(req_list)) { + CAM_DBG(CAM_CUSTOM, "request list is empty"); + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + CAM_INFO(CAM_CUSTOM, "no request to cancel (req:%lld)", flush_req->req_id); + return -EINVAL; + } else { + return 0; + } + } + + CAM_DBG(CAM_CUSTOM, "Flush [%u] in progress for req_id %llu", + flush_req->type, flush_req->req_id); + list_for_each_entry_safe(req, req_temp, req_list, list) { + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + if (req->request_id != flush_req->req_id) { + continue; + } else { + list_del_init(&req->list); + list_add_tail(&req->list, &flush_list); + cancel_req_id_found = 1; + break; + } + } + list_del_init(&req->list); + list_add_tail(&req->list, &flush_list); + } + + list_for_each_entry_safe(req, req_temp, &flush_list, list) { + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + for (i = 0; i < req_custom->num_fence_map_out; i++) { + if (req_custom->fence_map_out[i].sync_id != -1) { + CAM_DBG(CAM_CUSTOM, + "Flush req 0x%llx, fence %d", + req->request_id, + req_custom->fence_map_out[i].sync_id); + rc = cam_sync_signal( + req_custom->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_FLUSH); + if (rc) + CAM_ERR_RATE_LIMIT(CAM_CUSTOM, + "signal fence failed\n"); + req_custom->fence_map_out[i].sync_id = -1; + } + } + list_add_tail(&req->list, &ctx->free_req_list); + } + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ && + !cancel_req_id_found) + CAM_DBG(CAM_CUSTOM, + "Flush request id:%lld is not found in the list", + flush_req->req_id); + + return 0; +} + +static int __cam_custom_ctx_unlink_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + ctx->link_hdl = -1; + ctx->ctx_crm_intf = NULL; + + return 0; +} + +static int __cam_custom_ctx_unlink_in_ready(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + ctx->link_hdl = -1; + ctx->ctx_crm_intf = NULL; + ctx->state = CAM_CTX_ACQUIRED; + + return 0; +} + +static int __cam_custom_ctx_get_dev_info_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_device_info *dev_info) +{ + dev_info->dev_hdl = ctx->dev_hdl; + strscpy(dev_info->name, CAM_CUSTOM_DEV_NAME, sizeof(dev_info->name)); + dev_info->dev_id = CAM_REQ_MGR_DEVICE_CUSTOM_HW; + dev_info->p_delay = 1; + dev_info->trigger = CAM_TRIGGER_POINT_SOF; + + return 0; +} + +static int __cam_custom_ctx_flush_req_in_top_state( + struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush_req) +{ + int rc = 0; + struct cam_custom_context *custom_ctx; + struct cam_hw_reset_args reset_args; + struct cam_hw_stop_args stop_args; + struct cam_custom_stop_args custom_stop; + + custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + CAM_DBG(CAM_CUSTOM, "Flushing pending list"); + spin_lock_bh(&ctx->lock); + __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); + spin_unlock_bh(&ctx->lock); + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + if (ctx->state <= CAM_CTX_READY) { + ctx->state = CAM_CTX_ACQUIRED; + goto end; + } + + spin_lock_bh(&ctx->lock); + ctx->state = CAM_CTX_FLUSHED; + spin_unlock_bh(&ctx->lock); + + CAM_INFO(CAM_CUSTOM, "Last request id to flush is %lld", + flush_req->req_id); + ctx->last_flush_req = flush_req->req_id; + + /* stop hw first */ + if (ctx->hw_mgr_intf->hw_stop) { + custom_stop.stop_only = true; + stop_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + stop_args.args = (void *) &custom_stop; + rc = ctx->hw_mgr_intf->hw_stop( + ctx->hw_mgr_intf->hw_mgr_priv, &stop_args); + if (rc) + CAM_ERR(CAM_CUSTOM, + "HW stop failed in flush rc %d", rc); + } + + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->wait_req_list)) + __cam_custom_ctx_flush_req(ctx, &ctx->wait_req_list, + flush_req); + + if (!list_empty(&ctx->active_req_list)) + __cam_custom_ctx_flush_req(ctx, &ctx->active_req_list, + flush_req); + + custom_ctx->active_req_cnt = 0; + spin_unlock_bh(&ctx->lock); + + reset_args.ctxt_to_hw_map = custom_ctx->hw_ctx; + rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv, + &reset_args); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Reset HW failed in flush rc %d", rc); + + custom_ctx->init_received = false; + } + +end: + atomic_set(&custom_ctx->process_bubble, 0); + return rc; +} + +static int __cam_custom_ctx_flush_req_in_ready( + struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush_req) +{ + int rc = 0; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + CAM_DBG(CAM_CUSTOM, "try to flush pending list"); + spin_lock_bh(&ctx->lock); + rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); + + /* if nothing is in pending req list, change state to acquire */ + if (list_empty(&ctx->pending_req_list)) + ctx->state = CAM_CTX_ACQUIRED; + spin_unlock_bh(&ctx->lock); + + atomic_set(&custom_ctx->process_bubble, 0); + CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d", + ctx->state); + return rc; +} + +static int __cam_custom_stop_dev_core( + struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd) +{ + int rc = 0; + uint32_t i; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + struct cam_ctx_request *req; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_hw_stop_args stop; + struct cam_custom_stop_args custom_stop; + + if ((ctx->state != CAM_CTX_FLUSHED) && (ctx_custom->hw_ctx) && + (ctx->hw_mgr_intf->hw_stop)) { + custom_stop.stop_only = false; + stop.ctxt_to_hw_map = ctx_custom->hw_ctx; + stop.args = (void *) &custom_stop; + rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv, + &stop); + if (rc) + CAM_ERR(CAM_CUSTOM, "HW stop failed rc %d", rc); + } + + while (!list_empty(&ctx->pending_req_list)) { + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + CAM_DBG(CAM_CUSTOM, + "signal fence in pending list. fence num %d", + req_custom->num_fence_map_out); + for (i = 0; i < req_custom->num_fence_map_out; i++) + if (req_custom->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_custom->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_STOP); + } + list_add_tail(&req->list, &ctx->free_req_list); + } + + while (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + CAM_DBG(CAM_CUSTOM, "signal fence in wait list. fence num %d", + req_custom->num_fence_map_out); + for (i = 0; i < req_custom->num_fence_map_out; i++) + if (req_custom->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_custom->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_STOP); + } + list_add_tail(&req->list, &ctx->free_req_list); + } + + while (!list_empty(&ctx->active_req_list)) { + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + CAM_DBG(CAM_CUSTOM, "signal fence in active list. fence num %d", + req_custom->num_fence_map_out); + for (i = 0; i < req_custom->num_fence_map_out; i++) + if (req_custom->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_custom->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_COMMON_EVENT_STOP); + } + list_add_tail(&req->list, &ctx->free_req_list); + } + ctx_custom->frame_id = 0; + ctx_custom->active_req_cnt = 0; + + CAM_DBG(CAM_CUSTOM, "Stop device success next state %d on ctx %u", + ctx->state, ctx->ctx_id); + + if (!stop_cmd) { + rc = __cam_custom_ctx_unlink_in_ready(ctx, NULL); + if (rc) + CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc); + } + return rc; +} + +static int __cam_custom_stop_dev_in_activated(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *)ctx->ctx_priv; + + __cam_custom_stop_dev_core(ctx, cmd); + ctx_custom->init_received = false; + ctx->state = CAM_CTX_ACQUIRED; + + return 0; +} + +static int __cam_custom_ctx_release_hw_in_top_state( + struct cam_context *ctx, void *cmd) +{ + int rc = 0; + struct cam_hw_release_args rel_arg; + struct cam_req_mgr_flush_request flush_req; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + if (custom_ctx->hw_ctx) { + rel_arg.ctxt_to_hw_map = custom_ctx->hw_ctx; + rc = ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &rel_arg); + custom_ctx->hw_ctx = NULL; + if (rc) + CAM_ERR(CAM_CUSTOM, + "Failed to release HW for ctx:%u", ctx->ctx_id); + } else { + CAM_ERR(CAM_CUSTOM, "No HW resources acquired for this ctx"); + } + + ctx->last_flush_req = 0; + custom_ctx->frame_id = 0; + custom_ctx->active_req_cnt = 0; + custom_ctx->hw_acquired = false; + custom_ctx->init_received = false; + + /* check for active requests as well */ + flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL; + flush_req.link_hdl = ctx->link_hdl; + flush_req.dev_hdl = ctx->dev_hdl; + flush_req.req_id = 0; + + CAM_DBG(CAM_CUSTOM, "try to flush pending list"); + spin_lock_bh(&ctx->lock); + rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, + &flush_req); + spin_unlock_bh(&ctx->lock); + ctx->state = CAM_CTX_ACQUIRED; + + CAM_DBG(CAM_CUSTOM, "Release HW success[%u] next state %d", + ctx->ctx_id, ctx->state); + + return rc; +} + +static int __cam_custom_ctx_release_hw_in_activated_state( + struct cam_context *ctx, void *cmd) +{ + int rc = 0; + + rc = __cam_custom_stop_dev_in_activated(ctx, NULL); + if (rc) + CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc); + + rc = __cam_custom_ctx_release_hw_in_top_state(ctx, cmd); + if (rc) + CAM_ERR(CAM_CUSTOM, "Release hw failed rc=%d", rc); + + return rc; +} + +static int __cam_custom_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + struct cam_req_mgr_flush_request flush_req; + + if (cmd && ctx_custom->hw_ctx) { + CAM_ERR(CAM_CUSTOM, "releasing hw"); + __cam_custom_ctx_release_hw_in_top_state(ctx, NULL); + } + + ctx->ctx_crm_intf = NULL; + ctx->last_flush_req = 0; + ctx_custom->frame_id = 0; + ctx_custom->active_req_cnt = 0; + ctx_custom->hw_acquired = false; + ctx_custom->init_received = false; + + if (!list_empty(&ctx->active_req_list)) + CAM_ERR(CAM_CUSTOM, "Active list is not empty"); + + /* Flush all the pending request list */ + flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL; + flush_req.link_hdl = ctx->link_hdl; + flush_req.dev_hdl = ctx->dev_hdl; + flush_req.req_id = 0; + + CAM_DBG(CAM_CUSTOM, "try to flush pending list"); + spin_lock_bh(&ctx->lock); + rc = __cam_custom_ctx_flush_req(ctx, &ctx->pending_req_list, + &flush_req); + spin_unlock_bh(&ctx->lock); + ctx->state = CAM_CTX_AVAILABLE; + + CAM_DBG(CAM_CUSTOM, "Release device success[%u] next state %d", + ctx->ctx_id, ctx->state); + + return rc; +} + +static int __cam_custom_ctx_apply_default_settings( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_custom_hw_cmd_args custom_hw_cmd_args; + + hw_cmd_args.ctxt_to_hw_map = custom_ctx->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + custom_hw_cmd_args.cmd_type = + CAM_CUSTOM_HW_MGR_PROG_DEFAULT_CONFIG; + hw_cmd_args.u.internal_args = (void *)&custom_hw_cmd_args; + + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Failed to apply default settings rc %d", rc); + else + CAM_DBG(CAM_CUSTOM, "Applied default settings rc %d", rc); + + return rc; +} + +static int __cam_custom_ctx_apply_req_in_activated_state( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply, + uint32_t next_state) +{ + int rc = 0; + struct cam_ctx_request *req; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_custom_context *custom_ctx = NULL; + struct cam_hw_config_args cfg; + + if (atomic_read(&custom_ctx->process_bubble)) { + CAM_WARN(CAM_CUSTOM, + "ctx_id:%d Processing bubble cannot apply Request Id %llu", + ctx->ctx_id, apply->request_id); + rc = -EAGAIN; + goto end; + } + + if (list_empty(&ctx->pending_req_list)) { + CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld", + apply->request_id); + rc = -EFAULT; + goto end; + } + + if (!list_empty(&ctx->wait_req_list)) + CAM_WARN(CAM_CUSTOM, "Apply invoked with a req in wait list"); + + custom_ctx = (struct cam_custom_context *) ctx->ctx_priv; + spin_lock_bh(&ctx->lock); + req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + list); + spin_unlock_bh(&ctx->lock); + + /* + * Check whether the request id is matching the tip + */ + if (req->request_id != apply->request_id) { + CAM_ERR_RATE_LIMIT(CAM_CUSTOM, + "Invalid Request Id asking %llu existing %llu", + apply->request_id, req->request_id); + rc = -EFAULT; + goto end; + } + + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + req_custom->bubble_report = apply->report_if_bubble; + cfg.ctxt_to_hw_map = custom_ctx->hw_ctx; + cfg.request_id = req->request_id; + cfg.hw_update_entries = req_custom->cfg; + cfg.num_hw_update_entries = req_custom->num_cfg; + cfg.priv = &req_custom->hw_update_data; + cfg.init_packet = 0; + + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_CUSTOM, + "Can not apply the configuration"); + } else { + spin_lock_bh(&ctx->lock); + custom_ctx->substate_activated = next_state; + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->wait_req_list); + spin_unlock_bh(&ctx->lock); + } + +end: + return rc; +} + +static int __cam_custom_ctx_acquire_hw_v1( + struct cam_context *ctx, void *args) +{ + int rc = 0; + struct cam_acquire_hw_cmd_v1 *cmd = + (struct cam_acquire_hw_cmd_v1 *)args; + struct cam_hw_acquire_args param; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + struct cam_custom_acquire_hw_info *acquire_hw_info = NULL; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CUSTOM, "HW interface is not ready"); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_CUSTOM, + "session_hdl 0x%x, hdl type %d, res %lld", + cmd->session_handle, cmd->handle_type, cmd->resource_hdl); + + if (cmd->handle_type != 1) { + CAM_ERR(CAM_CUSTOM, "Only user pointer is supported"); + rc = -EINVAL; + goto end; + } + + if (cmd->data_size < sizeof(*acquire_hw_info)) { + CAM_ERR(CAM_CUSTOM, "data_size is not a valid value"); + goto end; + } + + acquire_hw_info = CAM_MEM_ZALLOC(cmd->data_size, GFP_KERNEL); + if (!acquire_hw_info) { + rc = -ENOMEM; + goto end; + } + + CAM_DBG(CAM_CUSTOM, "start copy resources from user"); + + if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl, + cmd->data_size)) { + rc = -EFAULT; + goto free_res; + } + + memset(¶m, 0, sizeof(param)); + param.context_data = ctx; + param.event_cb = ctx->irq_cb_intf; + param.acquire_info_size = cmd->data_size; + param.acquire_info = (uint64_t) acquire_hw_info; + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_CUSTOM, "Acquire HW failed"); + goto free_res; + } + + ctx_custom->substate_machine_irq = + cam_custom_ctx_activated_state_machine_irq; + ctx_custom->substate_machine = + cam_custom_ctx_activated_state_machine; + ctx_custom->hw_ctx = param.ctxt_to_hw_map; + ctx_custom->hw_acquired = true; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + + CAM_DBG(CAM_CUSTOM, + "Acquire HW success on session_hdl 0x%xs for ctx_id %u", + ctx->session_hdl, ctx->ctx_id); + + CAM_MEM_FREE(acquire_hw_info); + return rc; + +free_res: + CAM_MEM_FREE(acquire_hw_info); +end: + return rc; +} + +static int __cam_custom_ctx_acquire_dev_in_available( + struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd) +{ + int rc = 0; + struct cam_create_dev_hdl req_hdl_param; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CUSTOM, "HW interface is not ready"); + rc = -EFAULT; + return rc; + } + + CAM_DBG(CAM_CUSTOM, + "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld", + cmd->session_handle, cmd->num_resources, + cmd->handle_type, cmd->resource_hdl); + + if (cmd->num_resources != CAM_API_COMPAT_CONSTANT) { + CAM_ERR(CAM_CUSTOM, "Invalid num_resources 0x%x", + cmd->num_resources); + return -EINVAL; + } + + req_hdl_param.session_hdl = cmd->session_handle; + req_hdl_param.v4l2_sub_dev_flag = 0; + req_hdl_param.media_entity_flag = 0; + req_hdl_param.ops = ctx->crm_ctx_intf; + req_hdl_param.priv = ctx; + + CAM_DBG(CAM_CUSTOM, "get device handle from bridge"); + ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param); + if (ctx->dev_hdl <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_CUSTOM, "Can not create device handle"); + return rc; + } + + cmd->dev_handle = ctx->dev_hdl; + ctx->session_hdl = cmd->session_handle; + ctx->state = CAM_CTX_ACQUIRED; + + CAM_DBG(CAM_CUSTOM, + "Acquire dev success on session_hdl 0x%x for ctx %u", + cmd->session_handle, ctx->ctx_id); + + return rc; +} + +static int __cam_custom_ctx_enqueue_init_request( + struct cam_context *ctx, struct cam_ctx_request *req) +{ + int rc = 0; + struct cam_ctx_request *req_old; + struct cam_custom_dev_ctx_req *req_custom_old; + struct cam_custom_dev_ctx_req *req_custom_new; + + spin_lock_bh(&ctx->lock); + if (list_empty(&ctx->pending_req_list)) { + list_add_tail(&req->list, &ctx->pending_req_list); + goto end; + } + + req_old = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + req_custom_old = (struct cam_custom_dev_ctx_req *) req_old->req_priv; + req_custom_new = (struct cam_custom_dev_ctx_req *) req->req_priv; + if (req_custom_old->hw_update_data.packet_opcode_type == + CAM_CUSTOM_PACKET_INIT_DEV) { + if ((req_custom_old->num_cfg + req_custom_new->num_cfg) >= + CAM_CUSTOM_CTX_CFG_MAX) { + CAM_WARN(CAM_CUSTOM, "Can not merge INIT pkt"); + rc = -ENOMEM; + } + + if (req_custom_old->num_fence_map_out != 0 || + req_custom_old->num_fence_map_in != 0) { + CAM_WARN(CAM_CUSTOM, "Invalid INIT pkt sequence"); + rc = -EINVAL; + } + + if (!rc) { + memcpy(req_custom_old->fence_map_out, + req_custom_new->fence_map_out, + sizeof(req_custom_new->fence_map_out[0])* + req_custom_new->num_fence_map_out); + req_custom_old->num_fence_map_out = + req_custom_new->num_fence_map_out; + + memcpy(req_custom_old->fence_map_in, + req_custom_new->fence_map_in, + sizeof(req_custom_new->fence_map_in[0])* + req_custom_new->num_fence_map_in); + req_custom_old->num_fence_map_in = + req_custom_new->num_fence_map_in; + + memcpy(&req_custom_old->cfg[req_custom_old->num_cfg], + req_custom_new->cfg, + sizeof(req_custom_new->cfg[0])* + req_custom_new->num_cfg); + req_custom_old->num_cfg += req_custom_new->num_cfg; + + req_old->request_id = req->request_id; + + list_add_tail(&req->list, &ctx->free_req_list); + } + } else { + CAM_WARN(CAM_CUSTOM, + "Received Update pkt before INIT pkt. req_id= %lld", + req->request_id); + rc = -EINVAL; + } +end: + spin_unlock_bh(&ctx->lock); + return rc; +} + +static int __cam_custom_ctx_config_dev(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0, i; + struct cam_ctx_request *req = NULL; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_packet *packet; + struct cam_hw_prepare_update_args cfg; + struct cam_req_mgr_add_request add_req; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + + /* get free request */ + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->free_req_list)) { + req = list_first_entry(&ctx->free_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + } + spin_unlock_bh(&ctx->lock); + + if (!req) { + CAM_ERR(CAM_CUSTOM, "No more request obj free"); + return -ENOMEM; + } + + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + + cam_context_parse_config_cmd(ctx, cmd, &packet); + if (IS_ERR(packet)) { + rc = PTR_ERR(packet); + goto free_req; + } + + if ((((packet->header.op_code) & 0xF) == + CAM_CUSTOM_PACKET_UPDATE_DEV) + && (packet->header.request_id <= ctx->last_flush_req)) { + CAM_DBG(CAM_CUSTOM, + "request %lld has been flushed, reject packet", + packet->header.request_id); + rc = -EINVAL; + goto free_req; + } + + /* preprocess the configuration */ + memset(&cfg, 0, sizeof(cfg)); + cfg.packet = packet; + cfg.ctxt_to_hw_map = ctx_custom->hw_ctx; + cfg.out_map_entries = req_custom->fence_map_out; + cfg.max_out_map_entries = CAM_CUSTOM_DEV_CTX_RES_MAX; + cfg.in_map_entries = req_custom->fence_map_in; + cfg.max_in_map_entries = CAM_CUSTOM_DEV_CTX_RES_MAX; + cfg.priv = &req_custom->hw_update_data; + cfg.pf_data = &(req->pf_data); + + rc = ctx->hw_mgr_intf->hw_prepare_update( + ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc != 0) { + CAM_ERR(CAM_CUSTOM, "Prepare config packet failed in HW layer"); + rc = -EFAULT; + goto free_req; + } + + req_custom->num_cfg = cfg.num_hw_update_entries; + req_custom->num_fence_map_out = cfg.num_out_map_entries; + req_custom->num_fence_map_in = cfg.num_in_map_entries; + req_custom->num_acked = 0; + req_custom->hw_update_data.num_cfg = cfg.num_out_map_entries; + + for (i = 0; i < req_custom->num_fence_map_out; i++) { + rc = cam_sync_get_obj_ref(req_custom->fence_map_out[i].sync_id); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Can't get ref for fence %d", + req_custom->fence_map_out[i].sync_id); + goto put_ref; + } + } + + CAM_DBG(CAM_CUSTOM, + "num_entry: %d, num fence out: %d, num fence in: %d", + req_custom->num_cfg, req_custom->num_fence_map_out, + req_custom->num_fence_map_in); + + req->request_id = packet->header.request_id; + req->status = 1; + + CAM_DBG(CAM_CUSTOM, "Packet request id %lld packet opcode:%d", + packet->header.request_id, + req_custom->hw_update_data.packet_opcode_type); + + if (req_custom->hw_update_data.packet_opcode_type == + CAM_CUSTOM_PACKET_INIT_DEV) { + if (ctx->state < CAM_CTX_ACTIVATED) { + rc = __cam_custom_ctx_enqueue_init_request(ctx, req); + if (rc) + CAM_ERR(CAM_CUSTOM, "Enqueue INIT pkt failed"); + ctx_custom->init_received = true; + } else { + rc = -EINVAL; + CAM_ERR(CAM_CUSTOM, "Recevied INIT pkt in wrong state"); + } + } else { + if ((ctx->state != CAM_CTX_FLUSHED) && + (ctx->state >= CAM_CTX_READY) && + (ctx->ctx_crm_intf->add_req)) { + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = ctx->link_hdl; + add_req.dev_hdl = ctx->dev_hdl; + add_req.req_id = req->request_id; + rc = ctx->ctx_crm_intf->add_req(&add_req); + if (rc) { + CAM_ERR(CAM_CUSTOM, + "Add req failed: req id=%llu", + req->request_id); + } else { + __cam_custom_ctx_enqueue_request_in_order( + ctx, req); + } + } else { + rc = -EINVAL; + CAM_ERR(CAM_CUSTOM, "Recevied Update in wrong state"); + } + } + + if (rc) + goto put_ref; + + CAM_DBG(CAM_CUSTOM, + "Preprocessing Config req_id %lld successful on ctx %u", + req->request_id, ctx->ctx_id); + + return rc; + +put_ref: + for (--i; i >= 0; i--) { + if (cam_sync_put_obj_ref(req_custom->fence_map_out[i].sync_id)) + CAM_ERR(CAM_CUSTOM, "Failed to put ref of fence %d", + req_custom->fence_map_out[i].sync_id); + } +free_req: + spin_lock_bh(&ctx->lock); + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock_bh(&ctx->lock); + + return rc; + +} + +static int __cam_custom_ctx_config_dev_in_flushed(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + struct cam_start_stop_dev_cmd start_cmd; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + if (!custom_ctx->hw_acquired) { + CAM_ERR(CAM_CUSTOM, "HW is not acquired, reject packet"); + rc = -EINVAL; + goto end; + } + + rc = __cam_custom_ctx_config_dev(ctx, cmd); + if (rc) + goto end; + + if (!custom_ctx->init_received) { + CAM_WARN(CAM_CUSTOM, + "Received update packet in flushed state, skip start"); + goto end; + } + + start_cmd.dev_handle = cmd->dev_handle; + start_cmd.session_handle = cmd->session_handle; + rc = __cam_custom_ctx_start_dev_in_ready(ctx, &start_cmd); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Failed to re-start HW after flush rc: %d", rc); + else + CAM_INFO(CAM_CUSTOM, + "Received init after flush. Re-start HW complete."); + +end: + return rc; +} + +static int __cam_custom_ctx_config_dev_in_acquired(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + + if (!ctx_custom->hw_acquired) { + CAM_ERR(CAM_CUSTOM, "HW not acquired, reject config packet"); + return -EAGAIN; + } + + rc = __cam_custom_ctx_config_dev(ctx, cmd); + + if (!rc && (ctx->link_hdl >= 0)) + ctx->state = CAM_CTX_READY; + + return rc; +} + +static int __cam_custom_ctx_link_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *link) +{ + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + + ctx->link_hdl = link->link_hdl; + ctx->ctx_crm_intf = link->crm_cb; + + /* change state only if we had the init config */ + if (ctx_custom->init_received) + ctx->state = CAM_CTX_READY; + + CAM_DBG(CAM_CUSTOM, "next state %d", ctx->state); + + return 0; +} + +static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + struct cam_custom_start_args custom_start; + struct cam_ctx_request *req; + struct cam_custom_dev_ctx_req *req_custom; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *) ctx->ctx_priv; + + if (cmd->session_handle != ctx->session_hdl || + cmd->dev_handle != ctx->dev_hdl) { + rc = -EPERM; + goto end; + } + + if (list_empty(&ctx->pending_req_list)) { + /* should never happen */ + CAM_ERR(CAM_CUSTOM, "Start device with empty configuration"); + rc = -EFAULT; + goto end; + } else { + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + } + req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv; + + if (!ctx_custom->hw_ctx) { + CAM_ERR(CAM_CUSTOM, "Wrong hw context pointer."); + rc = -EFAULT; + goto end; + } + + custom_start.hw_config.ctxt_to_hw_map = ctx_custom->hw_ctx; + custom_start.hw_config.request_id = req->request_id; + custom_start.hw_config.hw_update_entries = req_custom->cfg; + custom_start.hw_config.num_hw_update_entries = req_custom->num_cfg; + custom_start.hw_config.priv = &req_custom->hw_update_data; + custom_start.hw_config.init_packet = 1; + if (ctx->state == CAM_CTX_FLUSHED) + custom_start.start_only = true; + else + custom_start.start_only = false; + + ctx_custom->frame_id = 0; + ctx_custom->active_req_cnt = 0; + atomic_set(&ctx_custom->process_bubble, 0); + ctx_custom->substate_activated = + (req_custom->num_fence_map_out) ? + CAM_CUSTOM_CTX_ACTIVATED_APPLIED : + CAM_CUSTOM_CTX_ACTIVATED_SOF; + + ctx->state = CAM_CTX_ACTIVATED; + rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, + &custom_start); + if (rc) { + /* HW failure. User need to clean up the resource */ + CAM_ERR(CAM_CUSTOM, "Start HW failed"); + ctx->state = CAM_CTX_READY; + goto end; + } + + CAM_DBG(CAM_CUSTOM, "start device success ctx %u", + ctx->ctx_id); + + spin_lock_bh(&ctx->lock); + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->wait_req_list); + spin_unlock_bh(&ctx->lock); + +end: + return rc; +} + +static int __cam_custom_ctx_release_dev_in_activated(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc = 0; + + rc = __cam_custom_stop_dev_core(ctx, NULL); + if (rc) + CAM_ERR(CAM_CUSTOM, "Stop device failed rc=%d", rc); + + rc = __cam_custom_release_dev_in_acquired(ctx, cmd); + if (rc) + CAM_ERR(CAM_CUSTOM, "Release device failed rc=%d", rc); + + return rc; +} + +static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + int rc = 0; + + CAM_WARN(CAM_CUSTOM, + "Received unlink in activated state. It's unexpected"); + + rc = __cam_custom_stop_dev_in_activated(ctx, NULL); + if (rc) + CAM_WARN(CAM_CUSTOM, "Stop device failed rc=%d", rc); + + rc = __cam_custom_ctx_unlink_in_ready(ctx, unlink); + if (rc) + CAM_ERR(CAM_CUSTOM, "Unlink failed rc=%d", rc); + + return rc; +} + +static int __cam_custom_ctx_handle_irq_in_activated(void *context, + uint32_t evt_id, void *evt_data) +{ + int rc = 0; + struct cam_custom_ctx_irq_ops *custom_irq_ops = NULL; + struct cam_context *ctx = (struct cam_context *)context; + struct cam_custom_context *ctx_custom = + (struct cam_custom_context *)ctx->ctx_priv; + + spin_lock(&ctx->lock); + CAM_DBG(CAM_CUSTOM, "Enter: State %d, Substate %d, evt id %d", + ctx->state, ctx_custom->substate_activated, evt_id); + custom_irq_ops = &ctx_custom->substate_machine_irq[ + ctx_custom->substate_activated]; + if (custom_irq_ops->irq_ops[evt_id]) + rc = custom_irq_ops->irq_ops[evt_id](ctx_custom, + evt_data); + else + CAM_DBG(CAM_CUSTOM, "No handle function for substate %d", + ctx_custom->substate_activated); + + CAM_DBG(CAM_CUSTOM, "Exit: State %d Substate %d", + ctx->state, ctx_custom->substate_activated); + + spin_unlock(&ctx->lock); + return rc; +} + +static int __cam_custom_ctx_acquire_hw_in_acquired( + struct cam_context *ctx, void *args) +{ + int rc = -EINVAL; + uint32_t api_version; + + if (!ctx || !args) { + CAM_ERR(CAM_CUSTOM, "Invalid input pointer"); + return rc; + } + + api_version = *((uint32_t *)args); + if (api_version == 1) + rc = __cam_custom_ctx_acquire_hw_v1(ctx, args); + else + CAM_ERR(CAM_CUSTOM, "Unsupported api version %d", + api_version); + + return rc; +} + +static int __cam_custom_ctx_apply_req(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_ctx_ops *ctx_ops = NULL; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + CAM_DBG(CAM_CUSTOM, + "Enter: apply req in Substate %d request _id:%lld", + custom_ctx->substate_activated, apply->request_id); + + ctx_ops = &custom_ctx->substate_machine[ + custom_ctx->substate_activated]; + if (ctx_ops->crm_ops.apply_req) { + rc = ctx_ops->crm_ops.apply_req(ctx, apply); + } else { + CAM_WARN_RATE_LIMIT(CAM_CUSTOM, + "No handle function in activated substate %d", + custom_ctx->substate_activated); + rc = -EFAULT; + } + + if (rc) + CAM_WARN_RATE_LIMIT(CAM_CUSTOM, + "Apply failed in active substate %d rc %d", + custom_ctx->substate_activated, rc); + return rc; +} + +static int __cam_custom_ctx_apply_default_req( + struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_ctx_ops *ctx_ops = NULL; + struct cam_custom_context *custom_ctx = + (struct cam_custom_context *) ctx->ctx_priv; + + CAM_DBG(CAM_CUSTOM, + "Enter: apply req in Substate %d request _id:%lld", + custom_ctx->substate_activated, apply->request_id); + + ctx_ops = &custom_ctx->substate_machine[ + custom_ctx->substate_activated]; + if (ctx_ops->crm_ops.notify_frame_skip) { + rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply); + } else { + CAM_WARN_RATE_LIMIT(CAM_CUSTOM, + "No handle function in activated substate %d", + custom_ctx->substate_activated); + rc = -EFAULT; + } + + if (rc) + CAM_WARN_RATE_LIMIT(CAM_CUSTOM, + "Apply default failed in active substate %d rc %d", + custom_ctx->substate_activated, rc); + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_custom_dev_ctx_top_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = + __cam_custom_ctx_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .acquire_hw = __cam_custom_ctx_acquire_hw_in_acquired, + .release_dev = __cam_custom_release_dev_in_acquired, + .config_dev = __cam_custom_ctx_config_dev_in_acquired, + .release_hw = __cam_custom_ctx_release_hw_in_top_state, + }, + .crm_ops = { + .link = __cam_custom_ctx_link_in_acquired, + .unlink = __cam_custom_ctx_unlink_in_acquired, + .get_dev_info = + __cam_custom_ctx_get_dev_info_in_acquired, + .flush_req = __cam_custom_ctx_flush_req_in_top_state, + }, + .irq_ops = NULL, + .pagefault_ops = NULL, + }, + /* Ready */ + { + .ioctl_ops = { + .start_dev = __cam_custom_ctx_start_dev_in_ready, + .release_dev = __cam_custom_release_dev_in_acquired, + .config_dev = __cam_custom_ctx_config_dev, + .release_hw = __cam_custom_ctx_release_hw_in_top_state, + }, + .crm_ops = { + .unlink = __cam_custom_ctx_unlink_in_ready, + .flush_req = __cam_custom_ctx_flush_req_in_ready, + }, + .irq_ops = NULL, + .pagefault_ops = NULL, + }, + /* Flushed */ + { + .ioctl_ops = { + .stop_dev = __cam_custom_stop_dev_in_activated, + .release_dev = + __cam_custom_ctx_release_dev_in_activated, + .config_dev = __cam_custom_ctx_config_dev_in_flushed, + .release_hw = + __cam_custom_ctx_release_hw_in_activated_state, + }, + .crm_ops = { + .unlink = __cam_custom_ctx_unlink_in_ready, + .process_evt = __cam_custom_ctx_handle_evt, + }, + .irq_ops = NULL, + }, + /* Activated */ + { + .ioctl_ops = { + .stop_dev = __cam_custom_stop_dev_in_activated, + .release_dev = + __cam_custom_ctx_release_dev_in_activated, + .config_dev = __cam_custom_ctx_config_dev, + .release_hw = + __cam_custom_ctx_release_hw_in_activated_state, + }, + .crm_ops = { + .unlink = __cam_custom_ctx_unlink_in_activated, + .apply_req = __cam_custom_ctx_apply_req, + .notify_frame_skip = + __cam_custom_ctx_apply_default_req, + .flush_req = __cam_custom_ctx_flush_req_in_top_state, + .process_evt = __cam_custom_ctx_handle_evt, + }, + .irq_ops = __cam_custom_ctx_handle_irq_in_activated, + .pagefault_ops = NULL, + }, +}; + +int cam_custom_dev_context_init(struct cam_custom_context *ctx, + struct cam_context *ctx_base, + struct cam_req_mgr_kmd_ops *crm_node_intf, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl) +{ + int rc = -1, i = 0; + + if (!ctx || !ctx_base) { + CAM_ERR(CAM_CUSTOM, "Invalid Context"); + return -EINVAL; + } + + /* Custom HW context setup */ + memset(ctx, 0, sizeof(*ctx)); + + ctx->base = ctx_base; + ctx->frame_id = 0; + ctx->active_req_cnt = 0; + ctx->hw_ctx = NULL; + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + ctx->req_base[i].req_priv = &ctx->req_custom[i]; + ctx->req_custom[i].base = &ctx->req_base[i]; + } + + /* camera context setup */ + rc = cam_context_init(ctx_base, custom_dev_name, CAM_CUSTOM, ctx_id, + crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Camera Context Base init failed"); + return rc; + } + + ctx_base->max_hw_update_entries = CAM_CTX_CFG_MAX; + ctx_base->max_in_map_entries = CAM_CTX_CFG_MAX; + ctx_base->max_out_map_entries = CAM_CTX_CFG_MAX; + /* link camera context with custom HW context */ + ctx_base->state_machine = cam_custom_dev_ctx_top_state_machine; + ctx_base->ctx_priv = ctx; + + return rc; +} + +int cam_custom_dev_context_deinit(struct cam_custom_context *ctx) +{ + if (ctx->base) + cam_context_deinit(ctx->base); + + memset(ctx, 0, sizeof(*ctx)); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.h new file mode 100644 index 0000000000..054fb3815f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_context.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_CONTEXT_H_ +#define _CAM_CUSTOM_CONTEXT_H_ + +#include + +#include +#include + +#include "cam_context.h" +#include "cam_custom_hw_mgr_intf.h" + +/* + * Maximum hw resource - This number is based on the maximum + * output port resource. The current maximum resource number + * is 2. + */ +#define CAM_CUSTOM_DEV_CTX_RES_MAX 1 + +#define CAM_CUSTOM_CTX_CFG_MAX 8 + +/* forward declaration */ +struct cam_custom_context; + +/* cam custom context irq handling function type */ +typedef int (*cam_custom_hw_event_cb_func)( + struct cam_custom_context *custom_ctx, void *evt_data); + +/** + * enum cam_custom_ctx_activated_substate - sub states for activated + * + */ +enum cam_custom_ctx_activated_substate { + CAM_CUSTOM_CTX_ACTIVATED_SOF, + CAM_CUSTOM_CTX_ACTIVATED_APPLIED, + CAM_CUSTOM_CTX_ACTIVATED_HW_ERROR, + CAM_CUSTOM_CTX_ACTIVATED_HALT, + CAM_CUSTOM_CTX_ACTIVATED_MAX, +}; + +/** + * struct cam_custom_ctx_irq_ops - Function table for handling IRQ callbacks + * + * @irq_ops: Array of handle function pointers. + * + */ +struct cam_custom_ctx_irq_ops { + cam_custom_hw_event_cb_func irq_ops[CAM_CUSTOM_HW_EVENT_MAX]; +}; + +/** + * struct cam_custom_dev_ctx_req - Custom context request object + * + * @base: Common request object pointer + * @cfg: Custom hardware configuration array + * @num_cfg: Number of custom hardware configuration entries + * @fence_map_out: Output fence mapping array + * @num_fence_map_out: Number of the output fence map + * @fence_map_in: Input fence mapping array + * @num_fence_map_in: Number of input fence map + * @num_acked: Count to track acked entried for output. + * If count equals the number of fence out, it means + * the request has been completed. + * @bubble_report: If bubble recovery is needed + * @bubble_detected: request has bubbled + * @hw_update_data: HW update data for this request + * + */ +struct cam_custom_dev_ctx_req { + struct cam_ctx_request *base; + struct cam_hw_update_entry cfg + [CAM_CUSTOM_CTX_CFG_MAX]; + uint32_t num_cfg; + struct cam_hw_fence_map_entry fence_map_out + [CAM_CUSTOM_DEV_CTX_RES_MAX]; + uint32_t num_fence_map_out; + struct cam_hw_fence_map_entry fence_map_in + [CAM_CUSTOM_DEV_CTX_RES_MAX]; + uint32_t num_fence_map_in; + uint32_t num_acked; + int32_t bubble_report; + bool bubble_detected; + struct cam_custom_prepare_hw_update_data hw_update_data; +}; + +/** + * struct cam_custom_context - Custom device context + * @base: custom device context object + * @state_machine: state machine for Custom device context + * @state: Common context state + * @hw_ctx: HW object returned by the acquire device command + * @init_received: Indicate whether init config packet is received + * @active_req_cnt: Counter for the active request + * @frame_id: Frame id tracking for the custom context + * @hw_acquired: Flag to indicate if HW is acquired for this context + * @process_bubble: If ctx currently processing bubble + * @substate_actiavted: Current substate for the activated state. + * @substate_machine: Custom substate machine for external interface + * @substate_machine_irq: Custom substate machine for irq handling + * @req_base: common request structure + * @req_custom: custom request structure + * + */ +struct cam_custom_context { + struct cam_context *base; + struct cam_ctx_ops *state_machine; + uint32_t state; + void *hw_ctx; + bool init_received; + uint32_t active_req_cnt; + int64_t frame_id; + bool hw_acquired; + uint32_t substate_activated; + atomic_t process_bubble; + struct cam_ctx_ops *substate_machine; + struct cam_custom_ctx_irq_ops *substate_machine_irq; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; + struct cam_custom_dev_ctx_req req_custom[CAM_CTX_REQ_MAX]; +}; + + +/** + * cam_custom_dev_context_init() + * + * @brief: Initialization function for the custom context + * + * @ctx: Custom context obj to be initialized + * @bridge_ops: Bridge call back funciton + * @hw_intf: Cust hw manager interface + * @ctx_id: ID for this context + * @img_iommu_hdl: IOMMU HDL for Image buffers + * + */ +int cam_custom_dev_context_init(struct cam_custom_context *ctx, + struct cam_context *ctx_base, + struct cam_req_mgr_kmd_ops *bridge_ops, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl); + +/** + * cam_custom_dev_context_deinit() + * + * @brief: Deinitialize function for the Custom context + * + * @ctx: Custom context obj to be deinitialized + * + */ +int cam_custom_dev_context_deinit(struct cam_custom_context *ctx); + +#endif /* _CAM_CUSTOM_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.c new file mode 100644 index 0000000000..969077c89e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cam_custom_dev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_custom_hw_mgr_intf.h" +#include "cam_node.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "cam_compat.h" +#include "camera_main.h" + +static struct cam_custom_dev g_custom_dev; +static uint32_t g_num_custom_hws; + +static void cam_custom_dev_iommu_fault_handler( + struct cam_smmu_pf_info *pf_info) +{ + int i = 0; + struct cam_node *node = NULL; + + if (!pf_info || !pf_info->token) { + CAM_ERR(CAM_CUSTOM, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_info->token; + + for (i = 0; i < node->ctx_size; i++) + cam_context_dump_pf_info(&(node->ctx_list[i]), pf_info); +} + +static const struct of_device_id cam_custom_dt_match[] = { + { + .compatible = "qcom,cam-custom" + }, + {} +}; + +static int cam_custom_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + mutex_lock(&g_custom_dev.custom_dev_mutex); + g_custom_dev.open_cnt++; + mutex_unlock(&g_custom_dev.custom_dev_mutex); + + return 0; +} + +static int cam_custom_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_node *node = v4l2_get_subdevdata(sd); + + mutex_lock(&g_custom_dev.custom_dev_mutex); + if (g_custom_dev.open_cnt <= 0) { + CAM_DBG(CAM_CUSTOM, "Custom subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_custom_dev.open_cnt--; + if (!node) { + CAM_ERR(CAM_CUSTOM, "Node ptr is NULL"); + rc = -EINVAL; + goto end; + } + + if (g_custom_dev.open_cnt == 0) + cam_node_shutdown(node); + +end: + mutex_unlock(&g_custom_dev.custom_dev_mutex); + return rc; +} + +static int cam_custom_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_CUSTOM, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_custom_subdev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_custom_subdev_internal_ops = { + .close = cam_custom_subdev_close, + .open = cam_custom_subdev_open, +}; + +static int cam_custom_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = -EINVAL; + int i; + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_node *node; + int iommu_hdl = -1; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + g_custom_dev.sd.internal_ops = &cam_custom_subdev_internal_ops; + g_custom_dev.sd.close_seq_prior = CAM_SD_CLOSE_HIGH_PRIORITY; + + if (!cam_cpas_is_feature_supported(CAM_CPAS_CUSTOM_FUSE, BIT(0), NULL)) { + CAM_DBG(CAM_CUSTOM, "CUSTOM:0 is not supported"); + goto err; + } + + rc = cam_subdev_probe(&g_custom_dev.sd, pdev, CAM_CUSTOM_DEV_NAME, + CAM_CUSTOM_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Custom device cam_subdev_probe failed!"); + goto err; + } + node = (struct cam_node *) g_custom_dev.sd.token; + + memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf)); + rc = cam_custom_hw_mgr_init(pdev->dev.of_node, + &hw_mgr_intf, &iommu_hdl); + if (rc != 0) { + CAM_ERR(CAM_CUSTOM, "Can not initialized Custom HW manager!"); + goto unregister; + } + + for (i = 0; i < CAM_CUSTOM_HW_MAX_INSTANCES; i++) { + rc = cam_custom_dev_context_init(&g_custom_dev.ctx_custom[i], + &g_custom_dev.ctx[i], + &node->crm_node_intf, + &node->hw_mgr_intf, + i, iommu_hdl); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Custom context init failed!"); + goto unregister; + } + } + + rc = cam_node_init(node, &hw_mgr_intf, g_custom_dev.ctx, + CAM_CUSTOM_HW_MAX_INSTANCES, CAM_CUSTOM_DEV_NAME); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Custom HW node init failed!"); + goto unregister; + } + + node->sd_handler = cam_custom_subdev_close_internal; + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_custom_dev_iommu_fault_handler, node); + + mutex_init(&g_custom_dev.custom_dev_mutex); + + CAM_DBG(CAM_CUSTOM, "%s component bound successfully", pdev->name); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; +unregister: + rc = cam_subdev_remove(&g_custom_dev.sd); +err: + return rc; +} + +void cam_custom_get_num_hws(uint32_t *custom_num) +{ + if (custom_num) + *custom_num = g_num_custom_hws; + else + CAM_ERR(CAM_CUSTOM, "Invalid argument, g_num_custom_hws: %u", g_num_custom_hws); +} + +static void cam_custom_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + int i; + + /* clean up resources */ + for (i = 0; i < CAM_CUSTOM_HW_MAX_INSTANCES; i++) { + rc = cam_custom_dev_context_deinit(&g_custom_dev.ctx_custom[i]); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Custom context %d deinit failed", i); + } + + rc = cam_subdev_remove(&g_custom_dev.sd); + if (rc) + CAM_ERR(CAM_CUSTOM, "Unregister failed"); + + memset(&g_custom_dev, 0, sizeof(g_custom_dev)); +} + +const static struct component_ops cam_custom_component_ops = { + .bind = cam_custom_component_bind, + .unbind = cam_custom_component_unbind, +}; + +static int cam_custom_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_custom_component_ops); + return 0; +} + +static int cam_custom_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CUSTOM, "Adding Custom HW component"); + g_num_custom_hws++; + + rc = component_add(&pdev->dev, &cam_custom_component_ops); + if (rc) + CAM_ERR(CAM_CUSTOM, "failed to add component rc: %d", rc); + + return rc; +} + +struct platform_driver custom_driver = { + .probe = cam_custom_dev_probe, + .remove = cam_custom_dev_remove, + .driver = { + .name = "cam_custom", + .of_match_table = cam_custom_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_custom_dev_init_module(void) +{ + return platform_driver_register(&custom_driver); +} + +void cam_custom_dev_exit_module(void) +{ + platform_driver_unregister(&custom_driver); +} + +MODULE_DESCRIPTION("MSM CUSTOM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.h new file mode 100644 index 0000000000..7f8fa98004 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_dev.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_DEV_H_ +#define _CAM_CUSTOM_DEV_H_ + +#include "cam_subdev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_custom_hw_mgr.h" +#include "cam_context.h" +#include "cam_custom_context.h" + +#define CAM_CUSTOM_HW_MAX_INSTANCES 3 + +/** + * struct cam_custom_dev - Camera Custom V4l2 device node + * + * @sd: Common camera subdevice node + * @ctx: Custom base context storage + * @ctx_custom: Custom private context storage + * @custom_dev_mutex: Custom dev mutex + * @open_cnt: Open device count + */ +struct cam_custom_dev { + struct cam_subdev sd; + struct cam_context ctx[CAM_CUSTOM_HW_MAX_INSTANCES]; + struct cam_custom_context ctx_custom[CAM_CUSTOM_HW_MAX_INSTANCES]; + struct mutex custom_dev_mutex; + int32_t open_cnt; +}; + +/** + * @brief : API to register Custom hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_custom_dev_init_module(void); + +/** + * @brief : API to remove Custom hw interface from platform framework. + */ +void cam_custom_dev_exit_module(void); +#endif /* _CAM_CUSTOM_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid480.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid480.h new file mode 100644 index 0000000000..44cdced35d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid480.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_CSID_480_H_ +#define _CAM_CUSTOM_CSID_480_H_ + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +#define CAM_CSID_VERSION_V480 0x40080000 + +static struct cam_ife_csid_ver1_path_reg_info + cam_custom_csid_480_udi_0_reg_offset = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x250, + .misr_val0_addr = 0x254, + .misr_val1_addr = 0x258, + .misr_val2_addr = 0x25c, + .misr_val3_addr = 0x260, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + .err_recovery_cfg0_addr = 0x2b0, + .err_recovery_cfg1_addr = 0x2b4, + .err_recovery_cfg2_addr = 0x2b8, + .multi_vcdt_cfg0_addr = 0x2bc, + .byte_cntr_ping_addr = 0x2e0, + .byte_cntr_pong_addr = 0x2e4, + /* configurations */ + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .ccif_violation_en = 1, + .overflow_ctrl_en = 0, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_custom_csid_480_udi_1_reg_offset = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .err_recovery_cfg0_addr = 0x3b0, + .err_recovery_cfg1_addr = 0x3b4, + .err_recovery_cfg2_addr = 0x3b8, + .multi_vcdt_cfg0_addr = 0x3bc, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + /* configurations */ + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 30, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 2, + .ccif_violation_en = 1, + .format_measure_en_shift_val = 1, + .overflow_ctrl_en = 0, + .non_fatal_err_mask = 0x2e000, + .fatal_err_mask = 0x4, + .overflow_ctrl_mode_val = 0x8, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_custom_csid_480_udi_2_reg_offset = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .yuv_chroma_conversion_addr = 0x434, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .err_recovery_cfg0_addr = 0x4b0, + .err_recovery_cfg1_addr = 0x4b4, + .err_recovery_cfg2_addr = 0x4b8, + .multi_vcdt_cfg0_addr = 0x4bc, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + /* configurations */ + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .ccif_violation_en = 1, + .overflow_ctrl_en = 0, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_custom_csid_480_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x3, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_custom_csid_480_cmn_reg_offset = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_udis = 3, + .num_rdis = 0, + .num_pix = 0, + .num_ppp = 0, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .ipp_irq_mask_all = 0, + .rdi_irq_mask_all = 0, + .ppp_irq_mask_all = 0, + .udi_irq_mask_all = 0x7FFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .num_bytes_out_shift_val = 3, +}; + +static struct cam_ife_csid_ver1_reg_info cam_custom_csid_480_reg_offset = { + .cmn_reg = &cam_custom_csid_480_cmn_reg_offset, + .csi2_reg = &cam_custom_csid_480_csi2_reg_info, + .ipp_reg = NULL, + .ppp_reg = NULL, + .rdi_reg = { + NULL, + NULL, + NULL, + NULL, + }, + .udi_reg = { + &cam_custom_csid_480_udi_0_reg_offset, + &cam_custom_csid_480_udi_1_reg_offset, + &cam_custom_csid_480_udi_2_reg_offset, + }, + .tpg_reg = NULL, +}; + +#endif /*_CAM_IFE_CSID_480_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.c new file mode 100644 index 0000000000..e4fe4ca659 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "linux/module.h" +#include "cam_custom_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_custom_csid480.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +#define CAM_CUSTOM_CSID_DRV_NAME "custom_csid" + +static struct cam_hw_intf *cam_custom_csid_hw_list[CAM_IFE_CSID_HW_NUM_MAX] = { + 0, 0, 0, 0}; + +static struct cam_ife_csid_core_info cam_custom_csid480_hw_info = { + .csid_reg = &cam_custom_csid_480_reg_offset, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static int cam_custom_csid_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *csid_hw_intf; + struct cam_hw_info *csid_hw_info; + const struct of_device_id *match_dev = NULL; + struct cam_ife_csid_core_info *csid_core_info = NULL; + uint32_t csid_dev_idx; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + csid_hw_intf = CAM_MEM_ZALLOC(sizeof(*csid_hw_intf), GFP_KERNEL); + if (!csid_hw_intf) { + rc = -ENOMEM; + goto err; + } + + csid_hw_info = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + + if (!csid_hw_info) { + rc = -ENOMEM; + goto free_hw_intf; + } + + /* get custom csid hw index */ + of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx); + /* get custom csid hw information */ + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_CUSTOM, + "No matching table for the CUSTOM CSID HW!"); + rc = -EINVAL; + goto free_hw_info; + } + + csid_hw_intf->hw_idx = csid_dev_idx; + csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID; + csid_hw_intf->hw_priv = csid_hw_info; + + csid_hw_info->soc_info.pdev = pdev; + csid_hw_info->soc_info.dev = &pdev->dev; + csid_hw_info->soc_info.dev_name = pdev->name; + csid_hw_info->soc_info.index = csid_dev_idx; + + csid_core_info = (struct cam_ife_csid_core_info *)match_dev->data; + + /* call the driver init and fill csid_hw_info->core_info */ + rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_core_info, true); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] probe init failed", + csid_dev_idx); + goto free_hw_info; + } + + platform_set_drvdata(pdev, csid_hw_intf); + CAM_DBG(CAM_ISP, "CSID:%d component bound successfully", + csid_hw_intf->hw_idx); + + if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_NUM_MAX) + cam_custom_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf; + else + goto free_hw_info; + + CAM_DBG(CAM_CUSTOM, "CSID:%d component bound successfully", + csid_hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +free_hw_info: + CAM_MEM_FREE(csid_hw_info); +free_hw_intf: + CAM_MEM_FREE(csid_hw_intf); +err: + return rc; +} + +static void cam_custom_csid_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *csid_hw_intf = NULL; + struct cam_hw_info *csid_hw_info; + struct cam_ife_csid_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + const struct of_device_id *match_dev = NULL; + + csid_hw_intf = (struct cam_hw_intf *)platform_get_drvdata(pdev); + + if (!csid_hw_intf) { + CAM_ERR(CAM_CUSTOM, "ERROR No data in csid_hw_intf"); + return; + } + + csid_hw_info = csid_hw_intf->hw_priv; + core_info = csid_hw_info->core_info; + + CAM_DBG(CAM_CUSTOM, "CSID:%d component unbind", + csid_hw_intf->hw_idx); + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + + if (!match_dev) { + CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!"); + goto free_mem; + } + + cam_ife_csid_hw_deinit(csid_hw_intf, core_info); + +free_mem: + /*release the csid device memory */ + CAM_MEM_FREE(csid_hw_info); + CAM_MEM_FREE(csid_hw_intf); +} + +const static struct component_ops cam_custom_csid_component_ops = { + .bind = cam_custom_csid_component_bind, + .unbind = cam_custom_csid_component_unbind, +}; + +static int cam_custom_csid_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CUSTOM, "Adding Custom CSID component"); + rc = component_add(&pdev->dev, &cam_custom_csid_component_ops); + if (rc) + CAM_ERR(CAM_CUSTOM, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_custom_csid_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_custom_csid_component_ops); + return 0; +} + +static const struct of_device_id cam_custom_csid_dt_match[] = { + { + .compatible = "qcom,csid-custom480", + .data = &cam_custom_csid480_hw_info + }, + { + .compatible = "qcom,csid-custom580", + .data = &cam_custom_csid480_hw_info + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, cam_custom_csid_dt_match); + +struct platform_driver cam_custom_csid_driver = { + .probe = cam_custom_csid_probe, + .driver = { + .name = "qcom,custom-csid", + .of_match_table = cam_custom_csid_dt_match, + .suppress_bind_attrs = true, + }, + .remove = cam_custom_csid_remove, +}; + +int cam_custom_csid_driver_init(void) +{ + int32_t rc = 0; + + rc = platform_driver_register(&cam_custom_csid_driver); + if (rc < 0) + CAM_ERR(CAM_CUSTOM, "platform_driver_register Failed: rc = %d", + rc); + + return rc; +} + +int cam_custom_csid_hw_init(struct cam_hw_intf **custom_csid_hw, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_custom_csid_hw_list[hw_idx]) { + *custom_csid_hw = cam_custom_csid_hw_list[hw_idx]; + } else { + *custom_csid_hw = NULL; + rc = -1; + } + + return rc; +} + +void cam_custom_csid_driver_exit(void) +{ + platform_driver_unregister(&cam_custom_csid_driver); +} + +MODULE_DESCRIPTION("cam_custom_csid_driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.h new file mode 100644 index 0000000000..506469f765 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_csid/cam_custom_csid_dev.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_CSID_DEV_H_ +#define _CAM_CUSTOM_CSID_DEV_H_ + +#include "cam_debug_util.h" +#include "cam_custom_hw_mgr_intf.h" + +/** + * @brief : API to register Custom CSID hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_custom_csid_driver_init(void); + +/** + * @brief : API to remove Custom CSID hw interface from platform framework. + */ +void cam_custom_csid_driver_exit(void); + +#endif /*_CAM_CUSTOM_CSID_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.c new file mode 100644 index 0000000000..88ee77bc4c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "cam_custom_sub_mod_core.h" + +int cam_custom_hw_sub_mod_get_hw_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + /* Add HW Capabilities to be published */ + return rc; +} + +int cam_custom_hw_sub_mod_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *custom_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_custom_resource_node *custom_res = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + mutex_lock(&custom_hw->hw_mutex); + custom_hw->open_count++; + if (custom_hw->open_count > 1) { + mutex_unlock(&custom_hw->hw_mutex); + CAM_DBG(CAM_CUSTOM, + "Cam Custom has already been initialized cnt %d", + custom_hw->open_count); + return 0; + } + mutex_unlock(&custom_hw->hw_mutex); + + soc_info = &custom_hw->soc_info; + + /* Turn ON Regulators, Clocks and other SOC resources */ + rc = cam_custom_hw_sub_mod_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Enable SOC failed"); + rc = -EFAULT; + goto decrement_open_cnt; + } + + custom_res = (struct cam_custom_resource_node *)init_hw_args; + if (custom_res && custom_res->init) { + rc = custom_res->init(custom_res, NULL, 0); + if (rc) { + CAM_ERR(CAM_CUSTOM, "init Failed rc=%d", rc); + goto decrement_open_cnt; + } + } + + rc = cam_custom_hw_sub_mod_reset(hw_priv, NULL, 0); + if (rc < 0) { + CAM_ERR(CAM_CUSTOM, "Custom HW reset failed : %d", rc); + goto decrement_open_cnt; + } + /* Initialize all resources here */ + custom_hw->hw_state = CAM_HW_STATE_POWER_UP; + return rc; + +decrement_open_cnt: + mutex_lock(&custom_hw->hw_mutex); + custom_hw->open_count--; + mutex_unlock(&custom_hw->hw_mutex); + return rc; +} + +int cam_custom_hw_sub_mod_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *custom_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_custom_resource_node *custom_res = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + mutex_lock(&custom_hw->hw_mutex); + if (!custom_hw->open_count) { + mutex_unlock(&custom_hw->hw_mutex); + CAM_ERR(CAM_CUSTOM, "Error! Unbalanced deinit"); + return -EFAULT; + } + custom_hw->open_count--; + if (custom_hw->open_count) { + mutex_unlock(&custom_hw->hw_mutex); + CAM_DBG(CAM_CUSTOM, + "open_cnt non-zero =%d", custom_hw->open_count); + return 0; + } + mutex_unlock(&custom_hw->hw_mutex); + + soc_info = &custom_hw->soc_info; + + custom_res = (struct cam_custom_resource_node *)deinit_hw_args; + if (custom_res && custom_res->deinit) { + rc = custom_res->deinit(custom_res, NULL, 0); + if (rc) + CAM_ERR(CAM_CUSTOM, "deinit failed"); + } + + rc = cam_custom_hw_sub_mod_reset(hw_priv, NULL, 0); + if (rc) + CAM_WARN(CAM_CUSTOM, "Reset of HW failed, rc: %d", rc); + + /* Turn OFF Regulators, Clocks and other SOC resources */ + CAM_DBG(CAM_CUSTOM, "Disable SOC resource"); + rc = cam_custom_hw_sub_mod_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_CUSTOM, "Disable SOC failed"); + + custom_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + + return rc; +} + +int cam_custom_hw_sub_mod_reset(void *hw_priv, + void *reserve_args, uint32_t arg_size) +{ + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_CUSTOM, "Invalid input arguments"); + return -EINVAL; + } + + /* Do Reset of HW */ + return rc; +} + +int cam_custom_hw_sub_mod_reserve(void *hw_priv, + void *reserve_args, uint32_t arg_size) +{ + int rc = 0; + + if (!hw_priv || !reserve_args) { + CAM_ERR(CAM_CUSTOM, "Invalid input arguments"); + return -EINVAL; + } + + /*Reserve Args */ + return rc; +} + + +int cam_custom_hw_sub_mod_release(void *hw_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_hw_info *custom_hw = hw_priv; + int rc = 0; + + if (!hw_priv || !release_args) { + CAM_ERR(CAM_CUSTOM, "Invalid input arguments"); + return -EINVAL; + } + + mutex_lock(&custom_hw->hw_mutex); + /* Release Resources */ + mutex_unlock(&custom_hw->hw_mutex); + + return rc; +} + + +int cam_custom_hw_sub_mod_start(void *hw_priv, + void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *custom_hw = hw_priv; + int rc = 0; + + if (!hw_priv || !start_args) { + CAM_ERR(CAM_CUSTOM, "Invalid input arguments"); + return -EINVAL; + } + + mutex_lock(&custom_hw->hw_mutex); + /* Start HW -- Stream On*/ + mutex_unlock(&custom_hw->hw_mutex); + + return rc; +} + +int cam_custom_hw_sub_mod_stop(void *hw_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_hw_info *custom_hw = hw_priv; + int rc = 0; + + if (!hw_priv || !stop_args) { + CAM_ERR(CAM_CUSTOM, "Invalid input arguments"); + return -EINVAL; + } + + mutex_lock(&custom_hw->hw_mutex); + /* Stop HW */ + mutex_unlock(&custom_hw->hw_mutex); + + return rc; +} + +int cam_custom_hw_sub_mod_read(void *hw_priv, + void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_custom_hw_sub_mod_write(void *hw_priv, + void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_custom_hw_submit_req(void *hw_priv, void *hw_submit_args, + uint32_t arg_size) +{ + struct cam_hw_info *custom_dev = hw_priv; + struct cam_custom_sub_mod_req_to_dev *submit_req = + (struct cam_custom_sub_mod_req_to_dev *)hw_submit_args; + struct cam_custom_sub_mod_core_info *core_info = NULL; + + core_info = + (struct cam_custom_sub_mod_core_info *)custom_dev->core_info; + + spin_lock(&custom_dev->hw_lock); + if (core_info->curr_req) { + CAM_WARN(CAM_CUSTOM, "Req %lld still processed by %s", + core_info->curr_req->req_id, + custom_dev->soc_info.dev_name); + spin_unlock(&custom_dev->hw_lock); + return -EAGAIN; + } + + core_info->curr_req = submit_req; + spin_unlock(&custom_dev->hw_lock); + + /* Do other submit procedures */ + return 0; +} + +irqreturn_t cam_custom_hw_sub_mod_irq(int irq_num, void *data) +{ + struct cam_hw_info *custom_dev = data; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_custom_hw_cb_args cb_args; + struct cam_custom_sub_mod_core_info *core_info = NULL; + uint32_t irq_status = 0; + + if (!data) { + CAM_ERR(CAM_CUSTOM, "Invalid custom_dev_info"); + return IRQ_HANDLED; + } + + soc_info = &custom_dev->soc_info; + core_info = + (struct cam_custom_sub_mod_core_info *)custom_dev->core_info; + + irq_status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + core_info->device_hw_info->irq_status); + + cam_io_w_mb(irq_status, + soc_info->reg_map[0].mem_base + + core_info->device_hw_info->irq_clear); + + spin_lock(&custom_dev->hw_lock); + core_info->curr_req = NULL; + if (core_info->irq_cb.custom_hw_mgr_cb) + core_info->irq_cb.custom_hw_mgr_cb( + core_info->irq_cb.data, &cb_args); + spin_unlock(&custom_dev->hw_lock); + + return IRQ_HANDLED; +} + +int cam_custom_hw_sub_mod_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *hw = hw_priv; + struct cam_custom_sub_mod_core_info *core_info = NULL; + unsigned long flag = 0; + int rc = 0; + + if (!hw_priv || !cmd_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + core_info = hw->core_info; + /* Handle any custom process cmds */ + + switch (cmd_type) { + case CAM_CUSTOM_SET_IRQ_CB: { + struct cam_custom_sub_mod_set_irq_cb *irq_cb = cmd_args; + /* This can be deprecated */ + CAM_DBG(CAM_CUSTOM, "Setting irq cb"); + spin_lock_irqsave(&hw->hw_lock, flag); + core_info->irq_cb.custom_hw_mgr_cb = irq_cb->custom_hw_mgr_cb; + core_info->irq_cb.data = irq_cb->data; + spin_unlock_irqrestore(&hw->hw_lock, flag); + break; + } + case CAM_CUSTOM_SUBMIT_REQ: { + rc = cam_custom_hw_submit_req(hw_priv, cmd_args, arg_size); + break; + } + default: + break; + } + + return rc; +} + + diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.h new file mode 100644 index 0000000000..88a5ad6619 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_core.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_SUB_MOD_CORE_H_ +#define _CAM_CUSTOM_SUB_MOD_CORE_H_ + +#include "cam_debug_util.h" +#include "cam_custom_hw.h" +#include "cam_custom_sub_mod_soc.h" +#include "cam_custom_hw_mgr_intf.h" + +struct cam_custom_sub_mod_set_irq_cb { + int32_t (*custom_hw_mgr_cb)(void *data, + struct cam_custom_hw_cb_args *cb_args); + void *data; +}; + +struct cam_custom_sub_mod_req_to_dev { + uint64_t req_id; + uint32_t ctx_idx; + uint32_t dev_idx; +}; + +struct cam_custom_device_hw_info { + uint32_t hw_ver; + uint32_t irq_status; + uint32_t irq_mask; + uint32_t irq_clear; +}; + +struct cam_custom_sub_mod_core_info { + uint32_t cpas_handle; + bool cpas_start; + bool clk_enable; + struct cam_custom_sub_mod_set_irq_cb irq_cb; + struct cam_custom_sub_mod_req_to_dev *curr_req; + struct cam_custom_device_hw_info *device_hw_info; + struct cam_hw_info *custom_hw_info; +}; + +enum cam_custom_hw_resource_type { + CAM_CUSTOM_HW_RESOURCE_UNINT, + CAM_CUSTOM_HW_RESOURCE_SRC, + CAM_CUSTOM_HW_RESOURCE_MAX, +}; + +struct cam_custom_sub_mod_acq { + enum cam_custom_hw_resource_type rsrc_type; + int32_t acq; + cam_hw_mgr_event_cb_func event_cb; + void *priv; + struct cam_custom_resource_node *rsrc_node; +}; + +int cam_custom_hw_sub_mod_get_hw_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_reset(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_reserve(void *hw_priv, + void *reserve_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_release(void *hw_priv, + void *release_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_start(void *hw_priv, + void *start_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_stop(void *hw_priv, + void *stop_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_read(void *hw_priv, + void *read_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_write(void *hw_priv, + void *write_args, uint32_t arg_size); +int cam_custom_hw_sub_mod_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +irqreturn_t cam_custom_hw_sub_mod_irq(int irq_num, void *data); + +#endif /* _CAM_CUSTOM_SUB_MOD_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.c new file mode 100644 index 0000000000..2e5f757eec --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "cam_custom_sub_mod_dev.h" +#include "cam_custom_sub_mod_core.h" +#include "cam_custom_sub_mod_soc.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_hw_intf *cam_custom_hw_sub_mod_list + [CAM_CUSTOM_SUB_MOD_MAX_INSTANCES] = {0, 0}; + +struct cam_custom_device_hw_info cam_custom_hw_info = { + .hw_ver = 0x0, + .irq_status = 0x0, + .irq_mask = 0x0, + .irq_clear = 0x0, +}; +EXPORT_SYMBOL(cam_custom_hw_info); + +int cam_custom_hw_sub_mod_init(struct cam_hw_intf **custom_hw, uint32_t hw_idx) +{ + int rc = 0; + + if (cam_custom_hw_sub_mod_list[hw_idx]) { + *custom_hw = cam_custom_hw_sub_mod_list[hw_idx]; + } else { + *custom_hw = NULL; + rc = -ENODEV; + } + return rc; +} + +static int cam_custom_hw_sub_mod_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *hw = NULL; + struct cam_hw_intf *hw_intf = NULL; + struct cam_custom_sub_mod_core_info *core_info = NULL; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!hw_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_intf->hw_idx); + + hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!hw) { + rc = -ENOMEM; + goto free_hw_intf; + } + + hw->soc_info.pdev = pdev; + hw->soc_info.dev = &pdev->dev; + hw->soc_info.dev_name = pdev->name; + hw_intf->hw_priv = hw; + hw_intf->hw_ops.get_hw_caps = cam_custom_hw_sub_mod_get_hw_caps; + hw_intf->hw_ops.init = cam_custom_hw_sub_mod_init_hw; + hw_intf->hw_ops.deinit = cam_custom_hw_sub_mod_deinit_hw; + hw_intf->hw_ops.reset = cam_custom_hw_sub_mod_reset; + hw_intf->hw_ops.reserve = cam_custom_hw_sub_mod_reserve; + hw_intf->hw_ops.release = cam_custom_hw_sub_mod_release; + hw_intf->hw_ops.start = cam_custom_hw_sub_mod_start; + hw_intf->hw_ops.stop = cam_custom_hw_sub_mod_stop; + hw_intf->hw_ops.read = cam_custom_hw_sub_mod_read; + hw_intf->hw_ops.write = cam_custom_hw_sub_mod_write; + hw_intf->hw_ops.process_cmd = cam_custom_hw_sub_mod_process_cmd; + hw_intf->hw_type = CAM_CUSTOM_HW_TYPE_1; + + platform_set_drvdata(pdev, hw_intf); + + hw->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_custom_sub_mod_core_info), + GFP_KERNEL); + if (!hw->core_info) { + CAM_DBG(CAM_CUSTOM, "Failed to alloc for core"); + rc = -ENOMEM; + goto free_hw; + } + core_info = (struct cam_custom_sub_mod_core_info *)hw->core_info; + + core_info->custom_hw_info = hw; + + rc = cam_custom_hw_sub_mod_init_soc_resources(&hw->soc_info, + cam_custom_hw_sub_mod_irq, hw); + if (rc < 0) { + CAM_ERR(CAM_CUSTOM, "Failed to init soc rc=%d", rc); + goto free_core_info; + } + + /* Initialize HW */ + + hw->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&hw->hw_mutex); + spin_lock_init(&hw->hw_lock); + init_completion(&hw->hw_complete); + + if (hw_intf->hw_idx < CAM_CUSTOM_HW_SUB_MOD_MAX) + cam_custom_hw_sub_mod_list[hw_intf->hw_idx] = hw_intf; + + /* needs to be invoked when custom hw is in place */ + //cam_custom_hw_sub_mod_init_hw(hw, NULL, 0); + + CAM_DBG(CAM_CUSTOM, "HW idx:%d component bound successfully", + hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +free_core_info: + CAM_MEM_FREE(hw->core_info); +free_hw: + CAM_MEM_FREE(hw); +free_hw_intf: + CAM_MEM_FREE(hw_intf); + return rc; +} + +static void cam_custom_hw_sub_mod_component_unbind( + struct device *dev, struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_DBG(CAM_CUSTOM, "Unbinding component: %s", pdev->name); +} + +const static struct component_ops cam_custom_hw_sub_mod_component_ops = { + .bind = cam_custom_hw_sub_mod_component_bind, + .unbind = cam_custom_hw_sub_mod_component_unbind, +}; + +int cam_custom_hw_sub_mod_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CUSTOM, "Adding Custom HW sub module component"); + rc = component_add(&pdev->dev, &cam_custom_hw_sub_mod_component_ops); + if (rc) + CAM_ERR(CAM_CUSTOM, "failed to add component rc: %d", rc); + + return rc; +} + +static const struct of_device_id cam_custom_hw_sub_mod_dt_match[] = { + { + .compatible = "qcom,cam_custom_hw_sub_mod", + .data = &cam_custom_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_custom_hw_sub_mod_dt_match); + +struct platform_driver cam_custom_hw_sub_mod_driver = { + .probe = cam_custom_hw_sub_mod_probe, + .driver = { + .name = CAM_CUSTOM_SUB_MOD_NAME, + .of_match_table = cam_custom_hw_sub_mod_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_custom_hw_sub_module_init(void) +{ + return platform_driver_register(&cam_custom_hw_sub_mod_driver); +} + +void cam_custom_hw_sub_module_exit(void) +{ + platform_driver_unregister(&cam_custom_hw_sub_mod_driver); +} + +MODULE_DESCRIPTION("CAM CUSTOM HW SUB MODULE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.h new file mode 100644 index 0000000000..4b9af3656c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_dev.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_SUB_MOD_DEV_H_ +#define _CAM_CUSTOM_SUB_MOD_DEV_H_ + +#include "cam_custom_hw_mgr_intf.h" + +#define CAM_CUSTOM_SUB_MOD_NAME "cam_custom_sub_mod" + +#define CAM_CUSTOM_SUB_MOD_MAX_INSTANCES 2 + +/** + * @brief : API to register Custom submodule to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_custom_hw_sub_module_init(void); + +/** + * @brief : API to remove Custom submodule interface from platform framework. + */ +void cam_custom_hw_sub_module_exit(void); +#endif /* _CAM_CUSTOM_SUB_MOD_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.c new file mode 100644 index 0000000000..c3692dffba --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_cpas_api.h" +#include "cam_custom_sub_mod_soc.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +int cam_custom_hw_sub_mod_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *data) +{ + int rc = 0, i; + struct cam_custom_hw_soc_private *soc_private = NULL; + struct cam_cpas_register_params cpas_register_param; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_CUSTOM, + "Error! Get DT properties failed rc=%d", rc); + /* For Test Purposes */ + return 0; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_custom_hw_soc_private), + GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_CUSTOM, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, irq_handler, &(irq_data[0])); + if (rc < 0) { + CAM_ERR(CAM_CUSTOM, + "Error! Request platform resources failed rc=%d", rc); + return rc; + } + + memset(&cpas_register_param, 0, sizeof(cpas_register_param)); + + strscpy(cpas_register_param.identifier, "custom", + CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + cpas_register_param.cam_cpas_client_cb = NULL; + cpas_register_param.userdata = soc_info; + + rc = cam_cpas_register_client(&cpas_register_param); + if (rc < 0) + goto release_soc; + + soc_private->cpas_handle = + cpas_register_param.client_handle; + + rc = cam_cpas_select_qos_settings(CAM_CPAS_QOS_CUSTOM_SETTINGS_MASK); + if (rc) { + CAM_ERR(CAM_CPAS, "Custom QoS selection failed %d", rc); + goto unregister_cpas; + } + + return rc; + +unregister_cpas: + if (cam_cpas_unregister_client(soc_private->cpas_handle)) + CAM_ERR(CAM_CUSTOM, "CPAS0 unregistration failed"); +release_soc: + if (cam_soc_util_release_platform_resource(soc_info)) + CAM_ERR(CAM_CUSTOM, "CPAS0 release resource failed"); + + return rc; +} + +int cam_custom_hw_sub_mod_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_custom_hw_soc_private *soc_private = NULL; + + if (!soc_info) { + CAM_ERR(CAM_CUSTOM, "Error! soc_info NULL"); + return -ENODEV; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_CUSTOM, "Error! soc_private NULL"); + return -ENODEV; + } + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_CUSTOM, "CPAS0 unregistration failed rc=%d", rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc < 0) + CAM_ERR(CAM_CUSTOM, + "Error! Release platform resources failed rc=%d", rc); + + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_custom_hw_sub_mod_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_custom_hw_soc_private *soc_private = soc_info->soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 2; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = 7200000; + axi_vote.axi_path[0].mnoc_ab_bw = 7200000; + axi_vote.axi_path[0].mnoc_ib_bw = 7200000; + axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[1].camnoc_bw = 512000000; + axi_vote.axi_path[1].mnoc_ab_bw = 512000000; + axi_vote.axi_path[1].mnoc_ib_bw = 512000000; + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Error! CPAS0 start failed rc=%d", rc); + rc = -EFAULT; + goto end; + } + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Error! enable platform failed rc=%d", rc); + goto stop_cpas; + } + + return 0; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_custom_hw_sub_mod_disable_soc_resources( + struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_custom_hw_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_CUSTOM, "Error! Invalid params"); + rc = -EINVAL; + return rc; + } + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Disable platform failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Error! CPAS stop failed rc=%d", rc); + return rc; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.h new file mode 100644 index 0000000000..e9c95d43d3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw1/cam_custom_sub_mod_soc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_HW_SUB_MOD_SOC_H_ +#define _CAM_CUSTOM_HW_SUB_MOD_SOC_H_ + +#include "cam_soc_util.h" +/* + * struct cam_custom_hw_soc_private: + * + * @Brief: Private SOC data specific to Custom HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver. + * This handle is used for all further interface + * with CPAS. + */ +struct cam_custom_hw_soc_private { + uint32_t cpas_handle; +}; + +int cam_custom_hw_sub_mod_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *irq_data); + +int cam_custom_hw_sub_mod_deinit_soc_resources( + struct cam_hw_soc_info *soc_info); + +int cam_custom_hw_sub_mod_enable_soc_resources( + struct cam_hw_soc_info *soc_info); + +int cam_custom_hw_sub_mod_disable_soc_resources( + struct cam_hw_soc_info *soc_info); + +#endif /* _CAM_CUSTOM_HW_SUB_MOD_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.c new file mode 100644 index 0000000000..9b3adbf7b0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.c @@ -0,0 +1,1576 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include + +#include "cam_compat.h" +#include "cam_sync_api.h" +#include "cam_smmu_api.h" +#include "cam_req_mgr_workq.h" +#include "cam_custom_hw_mgr.h" +#include "cam_packet_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" +#include "cam_common_util.h" +#include "cam_hw.h" + +static struct cam_custom_hw_mgr g_custom_hw_mgr; + +static int cam_custom_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc = 0; + struct cam_custom_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query = hw_caps_args; + struct cam_custom_query_cap_cmd custom_hw_cap; + + if (sizeof(struct cam_custom_query_cap_cmd) != query->size) { + CAM_ERR(CAM_CUSTOM, + "Input query cap size:%u does not match expected query cap size: %u", + query->size, sizeof(struct cam_custom_query_cap_cmd)); + return -EFAULT; + } + CAM_DBG(CAM_CUSTOM, "enter"); + + if (query->handle_type != CAM_HANDLE_USER_POINTER) + CAM_ERR(CAM_CUSTOM, "Wrong Args"); + + if (copy_from_user(&custom_hw_cap, + u64_to_user_ptr(query->caps_handle), + sizeof(struct cam_custom_query_cap_cmd))) { + rc = -EFAULT; + return rc; + } + + custom_hw_cap.device_iommu.non_secure = hw_mgr->img_iommu_hdl; + custom_hw_cap.device_iommu.secure = -1; + + /* Initializing cdm handles to -1 */ + custom_hw_cap.cdm_iommu.non_secure = -1; + custom_hw_cap.cdm_iommu.secure = -1; + + custom_hw_cap.num_dev = 1; + custom_hw_cap.dev_caps[0].hw_type = 0; + custom_hw_cap.dev_caps[0].hw_version = 0; + + if (copy_to_user(u64_to_user_ptr(query->caps_handle), + &custom_hw_cap, sizeof(struct cam_custom_query_cap_cmd))) + rc = -EFAULT; + + CAM_DBG(CAM_CUSTOM, "exit rc :%d", rc); + return rc; +} + +enum cam_custom_hw_resource_state + cam_custom_hw_mgr_get_custom_res_state( + uint32_t in_rsrc_state) +{ + enum cam_custom_hw_resource_state rsrc_state; + + CAM_DBG(CAM_CUSTOM, "rsrc_state %x", in_rsrc_state); + + switch (in_rsrc_state) { + case CAM_ISP_RESOURCE_STATE_UNAVAILABLE: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_UNAVAILABLE; + break; + case CAM_ISP_RESOURCE_STATE_AVAILABLE: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_AVAILABLE; + break; + case CAM_ISP_RESOURCE_STATE_RESERVED: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_RESERVED; + break; + case CAM_ISP_RESOURCE_STATE_INIT_HW: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_INIT_HW; + break; + case CAM_ISP_RESOURCE_STATE_STREAMING: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_STREAMING; + break; + default: + rsrc_state = CAM_CUSTOM_HW_RESOURCE_STATE_UNAVAILABLE; + CAM_DBG(CAM_CUSTOM, "invalid rsrc type"); + break; + } + + return rsrc_state; +} + +enum cam_isp_resource_state + cam_custom_hw_mgr_get_isp_res_state( + uint32_t in_rsrc_state) +{ + enum cam_isp_resource_state rsrc_state; + + CAM_DBG(CAM_CUSTOM, "rsrc_state %x", in_rsrc_state); + + switch (in_rsrc_state) { + case CAM_CUSTOM_HW_RESOURCE_STATE_UNAVAILABLE: + rsrc_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + break; + case CAM_CUSTOM_HW_RESOURCE_STATE_AVAILABLE: + rsrc_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + break; + case CAM_CUSTOM_HW_RESOURCE_STATE_RESERVED: + rsrc_state = CAM_ISP_RESOURCE_STATE_RESERVED; + break; + case CAM_CUSTOM_HW_RESOURCE_STATE_INIT_HW: + rsrc_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + break; + case CAM_CUSTOM_HW_RESOURCE_STATE_STREAMING: + rsrc_state = CAM_ISP_RESOURCE_STATE_STREAMING; + break; + default: + rsrc_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + CAM_DBG(CAM_CUSTOM, "invalid rsrc type"); + break; + } + + return rsrc_state; +} + +enum cam_isp_resource_type + cam_custom_hw_mgr_get_isp_res_type( + enum cam_custom_hw_mgr_res_type res_type) +{ + switch (res_type) { + case CAM_CUSTOM_CID_HW: + return CAM_ISP_RESOURCE_CID; + case CAM_CUSTOM_CSID_HW: + return CAM_ISP_RESOURCE_PIX_PATH; + default: + return CAM_ISP_RESOURCE_MAX; + } +} + +static void cam_custom_mgr_count_functional_hws(uint32_t *num_custom_functional) +{ + int i; + + for (i = 0; i < CAM_CUSTOM_CSID_HW_MAX; i++) { + if (g_custom_hw_mgr.custom_hw[i]) + (*num_custom_functional)++; + } +} + +static int cam_custom_hw_mgr_deinit_hw_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = -1; + struct cam_isp_resource_node *isp_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + isp_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!isp_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_intf = isp_rsrc_node->hw_intf; + if (hw_intf->hw_ops.deinit) { + CAM_DBG(CAM_CUSTOM, "DEINIT HW for res_id:%u", + hw_mgr_res->res_id); + rc = hw_intf->hw_ops.deinit(hw_intf->hw_priv, + isp_rsrc_node, sizeof(struct cam_isp_resource_node)); + if (rc) + goto err; + } + + return 0; + +err: + CAM_DBG(CAM_CUSTOM, "DEINIT HW failed for res_id:%u", + hw_mgr_res->res_id); + return rc; +} + +static int cam_custom_hw_mgr_stop_hw_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = -1; + struct cam_csid_hw_stop_args stop_cmd; + struct cam_isp_resource_node *isp_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + isp_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!isp_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_intf = isp_rsrc_node->hw_intf; + if (hw_intf->hw_ops.stop) { + CAM_DBG(CAM_CUSTOM, "STOP HW for res_id:%u", + hw_mgr_res->res_id); + stop_cmd.num_res = 1; + stop_cmd.node_res = &isp_rsrc_node; + stop_cmd.stop_cmd = CAM_CSID_HALT_AT_FRAME_BOUNDARY; + rc = hw_intf->hw_ops.stop(hw_intf->hw_priv, + &stop_cmd, sizeof(struct cam_csid_hw_stop_args)); + if (rc) + goto err; + } + + return 0; + +err: + CAM_DBG(CAM_CUSTOM, "STOP HW failed for res_id:%u", + hw_mgr_res->res_id); + return rc; +} + +static int cam_custom_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args) +{ + int rc = 0; + struct cam_custom_stop_args *custom_args; + struct cam_hw_stop_args *stop_args = stop_hw_args; + struct cam_custom_hw_mgr_res *hw_mgr_res; + struct cam_custom_hw_mgr_ctx *ctx; + + if (!hw_mgr_priv || !stop_hw_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + custom_args = (struct cam_custom_stop_args *)stop_args->args; + ctx = (struct cam_custom_hw_mgr_ctx *) + stop_args->ctxt_to_hw_map; + + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_CUSTOM, " Enter...ctx id:%d", ctx->ctx_index); + + /* Stop custom cid here */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_cid, list) { + rc = cam_custom_hw_mgr_stop_hw_res(hw_mgr_res); + if (rc) + CAM_ERR(CAM_CUSTOM, "failed to stop hw %d", + hw_mgr_res->res_id); + } + + /* Stop custom csid here */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_stop_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "failed to stop hw %d", + hw_mgr_res->res_id); + } + } + + + /* stop custom hw here */ + + if (custom_args->stop_only) + goto end; + + /* Deinit custom cid here */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_cid, list) { + rc = cam_custom_hw_mgr_deinit_hw_res(hw_mgr_res); + if (rc) + CAM_ERR(CAM_CUSTOM, "failed to stop hw %d", + hw_mgr_res->res_id); + } + + /* Deinit custom csid here */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_deinit_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "failed to stop hw %d", + hw_mgr_res->res_id); + } + } + + /* deinit custom rsrc */ + +end: + return rc; +} + +static int cam_custom_hw_mgr_init_hw_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = -1; + struct cam_isp_resource_node *isp_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + isp_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!isp_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_intf = isp_rsrc_node->hw_intf; + if (hw_intf->hw_ops.init) { + CAM_DBG(CAM_CUSTOM, "INIT HW for res_id:%u", + hw_mgr_res->res_id); + rc = hw_intf->hw_ops.init(hw_intf->hw_priv, + isp_rsrc_node, sizeof(struct cam_isp_resource_node)); + if (rc) + goto err; + } + + return 0; + +err: + CAM_DBG(CAM_CUSTOM, "INIT HW failed for res_id:%u", + hw_mgr_res->res_id); + return rc; +} + +static int cam_custom_hw_mgr_start_hw_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = -1; + struct cam_isp_resource_node *isp_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + isp_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!isp_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_intf = isp_rsrc_node->hw_intf; + if (hw_intf->hw_ops.start) { + CAM_DBG(CAM_CUSTOM, "Start HW for res_id:%u", + hw_mgr_res->res_id); + rc = hw_intf->hw_ops.start(hw_intf->hw_priv, + isp_rsrc_node, sizeof(struct cam_isp_resource_node)); + if (rc) + goto err; + } + + return 0; + +err: + CAM_DBG(CAM_CUSTOM, "START HW failed for res_id:%u", + hw_mgr_res->res_id); + return rc; +} + +static int cam_custom_mgr_start_hw(void *hw_mgr_priv, + void *start_hw_args) +{ + int rc = 0; + struct cam_hw_config_args *hw_config; + struct cam_hw_stop_args stop_args; + struct cam_custom_hw_mgr_res *hw_mgr_res; + struct cam_custom_hw_mgr_ctx *ctx; + struct cam_custom_stop_args custom_stop_args; + struct cam_custom_start_args *custom_args; + + if (!hw_mgr_priv || !start_hw_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + custom_args = + (struct cam_custom_start_args *)start_hw_args; + + hw_config = (struct cam_hw_config_args *) + &custom_args->hw_config; + + ctx = (struct cam_custom_hw_mgr_ctx *) + hw_config->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_CUSTOM, "Enter... ctx id:%d", + ctx->ctx_index); + + if (custom_args->start_only) + goto start_only; + + /* Init custom cid */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_cid, list) { + rc = cam_custom_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Can not INIT CID(id :%d)", + hw_mgr_res->res_id); + goto deinit_hw; + } + } + + /* Init custom csid */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Can not INIT CSID(id :%d)", + hw_mgr_res->res_id); + goto deinit_hw; + } + } + + + /* Init custom hw here */ + + /* Apply init config */ + +start_only: + + /* Start custom csid */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_start_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Can not START CSID(id :%d)", + hw_mgr_res->res_id); + goto err; + } + } + + /* Start custom cid */ + list_for_each_entry(hw_mgr_res, + &ctx->res_list_custom_cid, list) { + rc = cam_custom_hw_mgr_start_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Can not START CID(id :%d)", + hw_mgr_res->res_id); + goto err; + } + } + + CAM_DBG(CAM_CUSTOM, "Start success for ctx id:%d", ctx->ctx_index); + return 0; + +err: + custom_stop_args.stop_only = false; + stop_args.args = (void *) &custom_stop_args; + stop_args.ctxt_to_hw_map = hw_config->ctxt_to_hw_map; + cam_custom_mgr_stop_hw(hw_mgr_priv, &stop_args); +deinit_hw: + /* deinit the hw previously initialized */ + CAM_DBG(CAM_CUSTOM, "Exit...(rc=%d)", rc); + return rc; +} + +static int cam_custom_mgr_read(void *hw_mgr_priv, void *read_args) +{ + return -EPERM; +} + +static int cam_custom_mgr_write(void *hw_mgr_priv, void *write_args) +{ + return -EPERM; +} + +static int cam_custom_hw_mgr_notify( + uint32_t evt_id, + struct cam_custom_hw_mgr_ctx *custom_ctx, + struct cam_custom_hw_event_info *evt_info) +{ + int rc = 0; + struct cam_custom_hw_reg_update_event_data reg_upd_data; + struct cam_custom_hw_done_event_data done_evt_data; + struct cam_custom_hw_error_event_data err_evt_data; + + switch (evt_id) { + case CAM_CUSTOM_HW_EVENT_RUP_DONE: + CAM_DBG(CAM_CUSTOM, "Notify RUP for ctx %u", + custom_ctx->ctx_index); + /* fill the evt data struct */ + custom_ctx->event_cb(custom_ctx->cb_priv, + CAM_CUSTOM_HW_EVENT_RUP_DONE, ®_upd_data); + break; + case CAM_CUSTOM_HW_EVENT_FRAME_DONE: + CAM_DBG(CAM_CUSTOM, "Notify FRAME DONE for ctx %u", + custom_ctx->ctx_index); + /* fill the evt data struct */ + done_evt_data.num_handles = 1; + custom_ctx->event_cb(custom_ctx->cb_priv, + CAM_CUSTOM_HW_EVENT_RUP_DONE, &done_evt_data); + break; + case CAM_CUSTOM_HW_EVENT_ERROR: + CAM_DBG(CAM_CUSTOM, "Notify ERROR for ctx %u", + custom_ctx->ctx_index); + /* fill the evt data struct */ + custom_ctx->event_cb(custom_ctx->cb_priv, + CAM_CUSTOM_HW_EVENT_RUP_DONE, &err_evt_data); + break; + default: + CAM_ERR(CAM_CUSTOM, "Invalid evt_id %u", evt_id); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_custom_hw_mgr_evt_handler( + void *priv, uint32_t evt_id, void *hw_evt_info) +{ + int rc = 0; + struct cam_custom_hw_mgr_ctx *custom_ctx = NULL; + struct cam_custom_hw_mgr_ctx *custom_ctx_tmp = NULL; + struct cam_custom_hw_event_info *evt_info = NULL; + struct cam_custom_hw_mgr *hw_mgr = &g_custom_hw_mgr; + bool ctx_found = false; + + if (!hw_evt_info) { + CAM_ERR(CAM_CUSTOM, "invalid evt info"); + return -EINVAL; + } + + CAM_DBG(CAM_CUSTOM, "Invoked for HW Event ID 0x%x", + evt_id); + evt_info = (struct cam_custom_hw_event_info *)hw_evt_info; + + list_for_each_entry_safe(custom_ctx, custom_ctx_tmp, + &hw_mgr->used_ctx_list, list) { + if (custom_ctx->task_type == evt_info->task_type) { + ctx_found = true; + rc = cam_custom_hw_mgr_notify(evt_id, custom_ctx, + evt_info); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Failed to notify"); + return rc; + } + break; + } + } + + if (!ctx_found) + CAM_DBG(CAM_CUSTOM, "Failed to find affected ctx"); + + return rc; +} + +static int cam_custom_hw_mgr_put_ctx( + struct list_head *src_list, + struct cam_custom_hw_mgr_ctx **custom_ctx) +{ + struct cam_custom_hw_mgr_ctx *ctx_ptr = NULL; + + ctx_ptr = *custom_ctx; + if (ctx_ptr) + list_add_tail(&ctx_ptr->list, src_list); + *custom_ctx = NULL; + return 0; +} + +static int cam_custom_hw_mgr_get_ctx( + struct list_head *src_list, + struct cam_custom_hw_mgr_ctx **custom_ctx) +{ + struct cam_custom_hw_mgr_ctx *ctx_ptr = NULL; + + if (!list_empty(src_list)) { + ctx_ptr = list_first_entry(src_list, + struct cam_custom_hw_mgr_ctx, list); + list_del_init(&ctx_ptr->list); + } else { + CAM_ERR(CAM_CUSTOM, "No more free custom hw mgr ctx"); + return -EINVAL; + } + *custom_ctx = ctx_ptr; + memset(ctx_ptr->sub_hw_list, 0, + sizeof(struct cam_custom_hw_mgr_res) * + CAM_CUSTOM_HW_RES_MAX); + + return 0; +} + +static int cam_custom_hw_mgr_put_res( + struct list_head *src_list, + struct cam_custom_hw_mgr_res **res) +{ + struct cam_custom_hw_mgr_res *res_ptr = NULL; + + res_ptr = *res; + if (res_ptr) + list_add_tail(&res_ptr->list, src_list); + + return 0; +} + +static int cam_custom_hw_mgr_get_res( + struct list_head *src_list, + struct cam_custom_hw_mgr_res **res) +{ + int rc = 0; + struct cam_custom_hw_mgr_res *res_ptr = NULL; + + if (!list_empty(src_list)) { + res_ptr = list_first_entry(src_list, + struct cam_custom_hw_mgr_res, list); + list_del_init(&res_ptr->list); + } else { + CAM_ERR(CAM_CUSTOM, "No more free custom ctx rsrc"); + rc = -1; + } + *res = res_ptr; + + return rc; +} + +static enum cam_ife_pix_path_res_id + cam_custom_hw_mgr_get_csid_res_type( + uint32_t out_port_type, + struct cam_custom_hw_mgr_ctx *custom_ctx) +{ + enum cam_ife_pix_path_res_id path_id; + + CAM_DBG(CAM_CUSTOM, "out_port_type %x", out_port_type); + + switch (out_port_type) { + case CAM_CUSTOM_OUT_RES_UDI_0: + path_id = CAM_IFE_PIX_PATH_RES_UDI_0; + break; + case CAM_CUSTOM_OUT_RES_UDI_1: + path_id = CAM_IFE_PIX_PATH_RES_UDI_1; + custom_ctx->task_type = CAM_CUSTOM_EVENT_TASK2; + break; + case CAM_CUSTOM_OUT_RES_UDI_2: + path_id = CAM_IFE_PIX_PATH_RES_UDI_2; + custom_ctx->task_type = CAM_CUSTOM_EVENT_TASK3; + break; + default: + path_id = CAM_IFE_PIX_PATH_RES_MAX; + CAM_DBG(CAM_CUSTOM, "maximum rdi output type exceeded"); + break; + } + + CAM_DBG(CAM_CUSTOM, "out_port %x path_id %d", out_port_type, path_id); + + return path_id; +} + +static int cam_custom_hw_mgr_acquire_cid_res( + struct cam_custom_hw_mgr_ctx *custom_ctx, + struct cam_isp_in_port_generic_info *in_port, + struct cam_custom_hw_mgr_res **cid_res, + enum cam_ife_pix_path_res_id path_res_id, + struct cam_isp_resource_node **cid_rsrc_node) +{ + int rc = -1; + int i; + struct cam_custom_hw_mgr *custom_hw_mgr; + struct cam_hw_intf *hw_intf; + struct cam_custom_hw_mgr_res *cid_res_temp; + struct cam_csid_hw_reserve_resource_args csid_acquire; + struct cam_isp_resource_node *isp_rsrc_node; + + custom_hw_mgr = custom_ctx->hw_mgr; + *cid_res = NULL; + + rc = cam_custom_hw_mgr_get_res( + &custom_ctx->free_res_list, cid_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "No more free hw mgr resource"); + goto end; + } + + memset(&csid_acquire, 0, sizeof(csid_acquire)); + cid_res_temp = *cid_res; + csid_acquire.res_type = CAM_ISP_RESOURCE_CID; + csid_acquire.in_port = in_port; + csid_acquire.res_id = path_res_id; + csid_acquire.node_res = NULL; + CAM_DBG(CAM_CUSTOM, "path_res_id %d", path_res_id); + + for (i = 0; i < CAM_CUSTOM_CSID_HW_MAX; i++) { + if (!custom_hw_mgr->csid_devices[i]) + continue; + + hw_intf = custom_hw_mgr->csid_devices[i]; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + /* since there is a need of 1 cid at this stage */ + if (rc) + continue; + else + break; + + } + + if (!csid_acquire.node_res) { + CAM_ERR(CAM_CUSTOM, + "Can not acquire custom cid resource for path %d", + path_res_id); + rc = -EAGAIN; + goto put_res; + } + + *cid_rsrc_node = csid_acquire.node_res; + isp_rsrc_node = csid_acquire.node_res; + cid_res_temp->rsrc_node = isp_rsrc_node; + cid_res_temp->res_type = CAM_CUSTOM_CID_HW; + cid_res_temp->res_id = isp_rsrc_node->res_id; + cam_custom_hw_mgr_put_res(&custom_ctx->res_list_custom_cid, + &cid_res_temp); + + CAM_DBG(CAM_CUSTOM, "CID acquired successfully %u", + isp_rsrc_node->res_id); + + return 0; + +put_res: + cam_custom_hw_mgr_put_res(&custom_ctx->free_res_list, cid_res); +end: + return rc; + +} + +static int cam_custom_hw_mgr_acquire_csid_res( + struct cam_custom_hw_mgr_ctx *custom_ctx, + struct cam_isp_in_port_generic_info *in_port_info) +{ + int rc = 0, i = 0; + struct cam_isp_out_port_generic_info *out_port; + struct cam_custom_hw_mgr_res *custom_csid_res; + struct cam_custom_hw_mgr_res *custom_cid_res; + struct cam_hw_intf *hw_intf; + struct cam_csid_hw_reserve_resource_args custom_csid_acquire; + enum cam_ife_pix_path_res_id path_res_id; + struct cam_isp_resource_node *isp_rsrc_node; + struct cam_isp_resource_node *cid_rsrc_node = NULL; + + for (i = 0; i < in_port_info->num_out_res; i++) { + out_port = &in_port_info->data[i]; + path_res_id = cam_custom_hw_mgr_get_csid_res_type( + out_port->res_type, custom_ctx); + + if (path_res_id == CAM_IFE_PIX_PATH_RES_MAX) { + CAM_WARN(CAM_CUSTOM, "Invalid out port res_type %u", + out_port->res_type); + continue; + } + + rc = cam_custom_hw_mgr_acquire_cid_res(custom_ctx, in_port_info, + &custom_cid_res, path_res_id, &cid_rsrc_node); + if (rc) { + CAM_ERR(CAM_CUSTOM, "No free cid rsrc %d", rc); + goto end; + } + + rc = cam_custom_hw_mgr_get_res(&custom_ctx->free_res_list, + &custom_csid_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, "No more free hw mgr rsrc"); + goto end; + } + + memset(&custom_csid_acquire, 0, sizeof(custom_csid_acquire)); + custom_csid_acquire.res_id = path_res_id; + custom_csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + custom_csid_acquire.in_port = in_port_info; + custom_csid_acquire.out_port = out_port; + custom_csid_acquire.sync_mode = 0; + custom_csid_acquire.node_res = NULL; + + hw_intf = cid_rsrc_node->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &custom_csid_acquire, sizeof(custom_csid_acquire)); + if (rc) { + CAM_ERR(CAM_CUSTOM, + "Custom csid acquire failed for hw_idx %u rc %d", + hw_intf->hw_idx, rc); + goto put_res; + } + + if (custom_csid_acquire.node_res == NULL) { + CAM_ERR(CAM_CUSTOM, "Acquire custom csid failed"); + rc = -EAGAIN; + goto put_res; + } + + isp_rsrc_node = custom_csid_acquire.node_res; + custom_csid_res->rsrc_node = isp_rsrc_node; + custom_csid_res->res_type = CAM_CUSTOM_CSID_HW; + custom_csid_res->res_id = custom_csid_acquire.res_id; + cam_custom_hw_mgr_put_res( + &custom_ctx->res_list_custom_csid, + &custom_csid_res); + CAM_DBG(CAM_CUSTOM, "Custom CSID acquired for path %d", + path_res_id); + } + + return 0; + +put_res: + cam_custom_hw_mgr_put_res(&custom_ctx->free_res_list, + &custom_csid_res); +end: + return rc; +} + +static int cam_custom_hw_mgr_free_hw_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = 0; + struct cam_isp_resource_node *isp_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + isp_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!isp_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_intf = isp_rsrc_node->hw_intf; + if (hw_intf->hw_ops.release) { + CAM_DBG(CAM_CUSTOM, "RELEASE HW for res_id:%u", + hw_mgr_res->res_id); + rc = hw_intf->hw_ops.release(hw_intf->hw_priv, + isp_rsrc_node, sizeof(struct cam_isp_resource_node)); + if (rc) + CAM_ERR(CAM_CUSTOM, + "Release HW failed for hw_idx %d", + hw_intf->hw_idx); + } + + /* caller should make sure the resource is in a list */ + list_del_init(&hw_mgr_res->list); + memset(hw_mgr_res, 0, sizeof(*hw_mgr_res)); + INIT_LIST_HEAD(&hw_mgr_res->list); + + return 0; +} + +static int cam_custom_hw_mgr_release_hw_for_ctx( + struct cam_custom_hw_mgr_ctx *custom_ctx) +{ + int rc = -1; + struct cam_custom_hw_mgr_res *hw_mgr_res; + struct cam_custom_hw_mgr_res *hw_mgr_res_temp; + + /* Release custom cid */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &custom_ctx->res_list_custom_cid, list) { + rc = cam_custom_hw_mgr_free_hw_res(hw_mgr_res); + if (rc) + CAM_ERR(CAM_CUSTOM, "Can not release CID(id :%d)", + hw_mgr_res->res_id); + cam_custom_hw_mgr_put_res( + &custom_ctx->free_res_list, &hw_mgr_res); + } + + /* Release custom csid */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &custom_ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_free_hw_res(hw_mgr_res); + if (rc) + CAM_ERR(CAM_CUSTOM, "Can not release CSID(id :%d)", + hw_mgr_res->res_id); + cam_custom_hw_mgr_put_res( + &custom_ctx->free_res_list, &hw_mgr_res); + } + + /* Release custom HW Here */ + + return 0; +} +static int cam_custom_mgr_release_hw(void *hw_mgr_priv, + void *release_hw_args) +{ + int rc = 0; + struct cam_hw_release_args *release_args = release_hw_args; + struct cam_custom_hw_mgr_ctx *custom_ctx; + + if (!hw_mgr_priv || !release_hw_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + custom_ctx = + (struct cam_custom_hw_mgr_ctx *)release_args->ctxt_to_hw_map; + if (!custom_ctx || !custom_ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_CUSTOM, "Enter...ctx id:%d", + custom_ctx->ctx_index); + + cam_custom_hw_mgr_release_hw_for_ctx(custom_ctx); + list_del_init(&custom_ctx->list); + custom_ctx->scratch_buffer_addr = 0; + custom_ctx->ctx_in_use = 0; + custom_ctx->task_type = CAM_CUSTOM_EVENT_INVALID; + cam_custom_hw_mgr_put_ctx(&g_custom_hw_mgr.free_ctx_list, &custom_ctx); + CAM_DBG(CAM_CUSTOM, "Release Exit.."); + return rc; +} + +static int cam_custom_hw_mgr_acquire_get_unified_dev_str( + struct cam_custom_acquire_hw_info *acquire_hw_info, + uint32_t *input_size, + struct cam_isp_in_port_generic_info **gen_port_info) +{ + int32_t rc = 0, i; + uint32_t in_port_length = 0; + struct cam_custom_in_port_info *in = NULL; + struct cam_isp_in_port_generic_info *port_info = NULL; + + in = (struct cam_custom_in_port_info *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_custom_in_port_info) + + (in->num_out_res - 1) * + sizeof(struct cam_custom_out_port_info); + + *input_size += in_port_length; + + if ((*input_size) > acquire_hw_info->input_info_size) { + CAM_ERR(CAM_CUSTOM, "Input is not proper"); + rc = -EINVAL; + return rc; + } + + port_info = CAM_MEM_ZALLOC( + sizeof(struct cam_isp_in_port_generic_info), GFP_KERNEL); + + if (!port_info) + return -ENOMEM; + + port_info->res_type = in->res_type + + CAM_ISP_IFE_IN_RES_BASE - CAM_CUSTOM_IN_RES_BASE; + port_info->lane_type = in->lane_type; + port_info->lane_num = in->lane_num; + port_info->lane_cfg = in->lane_cfg; + port_info->vc[0] = in->vc[0]; + port_info->dt[0] = in->dt[0]; + port_info->num_valid_vc_dt = in->num_valid_vc_dt; + port_info->format[0] = in->format; + port_info->test_pattern = in->test_pattern; + port_info->usage_type = in->usage_type; + port_info->left_start = in->left_start; + port_info->left_stop = in->left_stop; + port_info->left_width = in->left_width; + port_info->right_start = in->right_start; + port_info->right_stop = in->right_stop; + port_info->right_width = in->right_width; + port_info->line_start = in->line_start; + port_info->line_stop = in->line_stop; + port_info->height = in->height; + port_info->pixel_clk = in->pixel_clk; + port_info->cust_node = 1; + port_info->num_out_res = in->num_out_res; + port_info->num_bytes_out = in->num_bytes_out; + + port_info->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_out_port_generic_info), + GFP_KERNEL); + if (port_info->data == NULL) { + rc = -ENOMEM; + goto release_port_mem; + } + + for (i = 0; i < in->num_out_res; i++) { + port_info->data[i].res_type = in->data_flex[i].res_type; + port_info->data[i].format = in->data_flex[i].format; + } + + *gen_port_info = port_info; + return 0; + +release_port_mem: + CAM_MEM_FREE(port_info); + return rc; +} + +static int cam_custom_mgr_acquire_hw_for_ctx( + struct cam_custom_hw_mgr_ctx *custom_ctx, + struct cam_isp_in_port_generic_info *in_port_info, + uint32_t *acquired_hw_id, uint32_t *acquired_hw_path) +{ + int rc = 0, i = 0; + struct cam_hw_intf *hw_intf; + struct cam_custom_hw_mgr *custom_hw_mgr; + struct cam_custom_sub_mod_acq acq; + + custom_hw_mgr = custom_ctx->hw_mgr; + + /* Acquire custom csid */ + rc = cam_custom_hw_mgr_acquire_csid_res(custom_ctx, in_port_info); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Custom csid acquire failed rc %d"); + goto err; + } + + /* Acquire custom hw */ + for (i = 0; i < CAM_CUSTOM_HW_SUB_MOD_MAX; i++) { + hw_intf = custom_hw_mgr->custom_hw[i]; + if (!hw_intf) + continue; + + acq.event_cb = cam_custom_hw_mgr_evt_handler; + acq.priv = custom_ctx; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &acq, sizeof(acq)); + if (rc) { + CAM_DBG(CAM_CUSTOM, + "No custom resource from hw %d", + hw_intf->hw_idx); + continue; + } + /* need to be set in reserve based on HW being acquired */ + //custom_ctx->sub_hw_list[i].hw_res = acq.rsrc_node; + //custom_ctx->sub_hw_list[i].res_type = + //custom_ctx->sub_hw_list[i].res_id = ; + break; + } + +err: + return rc; +} + +static int cam_custom_mgr_acquire_hw( + void *hw_mgr_priv, + void *acquire_hw_args) +{ + int rc = -1, i; + uint32_t input_size = 0; + struct cam_custom_hw_mgr_ctx *custom_ctx; + struct cam_custom_hw_mgr *custom_hw_mgr; + struct cam_custom_acquire_hw_info *acquire_hw_info = NULL; + struct cam_isp_in_port_generic_info *gen_port_info = NULL; + struct cam_hw_acquire_args *acquire_args = + (struct cam_hw_acquire_args *)acquire_hw_args; + + if (!hw_mgr_priv || !acquire_args) { + CAM_ERR(CAM_CUSTOM, "Invalid params"); + return -EINVAL; + } + + custom_hw_mgr = (struct cam_custom_hw_mgr *) hw_mgr_priv; + mutex_lock(&g_custom_hw_mgr.ctx_mutex); + rc = cam_custom_hw_mgr_get_ctx( + &custom_hw_mgr->free_ctx_list, &custom_ctx); + if (rc || !custom_ctx) { + CAM_ERR(CAM_CUSTOM, "Get custom hw context failed"); + mutex_unlock(&g_custom_hw_mgr.ctx_mutex); + goto err; + } + mutex_unlock(&g_custom_hw_mgr.ctx_mutex); + + custom_ctx->hw_mgr = custom_hw_mgr; + custom_ctx->cb_priv = acquire_args->context_data; + custom_ctx->event_cb = acquire_args->event_cb; + + acquire_hw_info = + (struct cam_custom_acquire_hw_info *)acquire_args->acquire_info; + + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + rc = cam_custom_hw_mgr_acquire_get_unified_dev_str( + acquire_hw_info, &input_size, &gen_port_info); + + if (rc < 0) { + CAM_ERR(CAM_CUSTOM, "Failed in parsing: %d", rc); + goto free_res; + } + + CAM_DBG(CAM_CUSTOM, "in_res_type %x", gen_port_info->res_type); + rc = cam_custom_mgr_acquire_hw_for_ctx(custom_ctx, + gen_port_info, &acquire_args->acquired_hw_id[i], + acquire_args->acquired_hw_path[i]); + if (rc) { + CAM_ERR(CAM_CUSTOM, "can not acquire resource"); + goto free_mem; + } + + CAM_MEM_FREE(gen_port_info->data); + CAM_MEM_FREE(gen_port_info); + gen_port_info = NULL; + } + + custom_ctx->ctx_in_use = 1; + custom_ctx->scratch_buffer_addr = 0; + acquire_args->ctxt_to_hw_map = custom_ctx; + cam_custom_hw_mgr_put_ctx(&custom_hw_mgr->used_ctx_list, &custom_ctx); + CAM_DBG(CAM_CUSTOM, "Exit...(success)"); + return 0; + +free_mem: + CAM_MEM_FREE(gen_port_info->data); + CAM_MEM_FREE(gen_port_info); +free_res: + cam_custom_hw_mgr_release_hw_for_ctx(custom_ctx); + cam_custom_hw_mgr_put_ctx(&custom_hw_mgr->free_ctx_list, &custom_ctx); +err: + CAM_DBG(CAM_CUSTOM, "Exit...(rc=%d)", rc); + return rc; +} + +static int cam_custom_add_io_buffers( + int iommu_hdl, + struct cam_hw_prepare_update_args *prepare) +{ + int rc = 0, i = 0, num_out_buf = 0; + int32_t hdl; + uint32_t plane_id; + size_t size; + struct cam_buf_io_cfg *io_cfg; + struct cam_hw_fence_map_entry *out_map_entries; + struct cam_custom_prepare_hw_update_data *prepare_hw_data; + bool is_buf_secure; + + io_cfg = (struct cam_buf_io_cfg *)((uint8_t *) + &prepare->packet->payload_flex + + prepare->packet->io_configs_offset); + prepare_hw_data = + (struct cam_custom_prepare_hw_update_data *) + prepare->priv; + + /* Validate hw update entries */ + for (i = 0; i < prepare->packet->num_io_configs; i++) { + CAM_DBG(CAM_CUSTOM, "======= io config idx %d ============", i); + CAM_DBG(CAM_CUSTOM, + "i %d req_id %llu resource_type:%d fence:%d direction %d", + i, prepare->packet->header.request_id, + io_cfg[i].resource_type, io_cfg[i].fence, + io_cfg[i].direction); + + CAM_DBG(CAM_CUSTOM, "format: %d", io_cfg[i].format); + + if (io_cfg[i].direction == CAM_BUF_OUTPUT) { + CAM_DBG(CAM_CUSTOM, + "output fence 0x%x", io_cfg[i].fence); + out_map_entries = + &prepare->out_map_entries[num_out_buf]; + if (num_out_buf < prepare->max_out_map_entries) { + out_map_entries->resource_handle = + io_cfg[i].resource_type; + out_map_entries->sync_id = + io_cfg[i].fence; + num_out_buf++; + } else { + CAM_ERR(CAM_CUSTOM, "out: %d max: %d", + num_out_buf, + prepare->max_out_map_entries); + return -EINVAL; + } + } else if (io_cfg[i].direction == CAM_BUF_INPUT) { + CAM_DBG(CAM_CUSTOM, + "input fence 0x%x", io_cfg[i].fence); + return -EINVAL; + } else { + CAM_ERR(CAM_CUSTOM, "Invalid io config direction :%d", + io_cfg[i].direction); + return -EINVAL; + } + + if (io_cfg[i].direction == CAM_BUF_OUTPUT) { + for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; + plane_id++) { + /* for custom HW it's one plane only */ + if (!io_cfg[i].mem_handle[plane_id]) + continue; + + hdl = io_cfg[i].mem_handle[plane_id]; + is_buf_secure = cam_mem_is_secure_buf(hdl); + if (is_buf_secure) { + CAM_ERR(CAM_CUSTOM, + "secure buffer not supported"); + return -EINVAL; + } + + rc = cam_mem_get_io_buf( + io_cfg[i].mem_handle[plane_id], + iommu_hdl, + &prepare_hw_data->io_addr[plane_id], + &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CUSTOM, + "No io addr for plane: %d", + plane_id); + return -EINVAL; + } + + prepare_hw_data->io_addr[plane_id] += + io_cfg[i].offsets[plane_id]; + CAM_DBG(CAM_CUSTOM, + "handle 0x%x for plane %d addr %pK", + hdl, plane_id, + prepare_hw_data->io_addr[plane_id]); + } + } + } + + prepare->num_out_map_entries = num_out_buf; + prepare->num_in_map_entries = 0; + return rc; +} + +static int cam_custom_mgr_prepare_hw_update(void *hw_mgr_priv, + void *prepare_hw_update_args) +{ + int rc = 0; + struct cam_hw_prepare_update_args *prepare; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_custom_prepare_hw_update_data *prepare_hw_data; + struct cam_custom_hw_mgr *hw_mgr; + struct cam_custom_hw_mgr_ctx *ctx = NULL; + uint32_t *ptr; + size_t len; + struct cam_custom_cmd_buf_type_1 *custom_buf_type1; + + if (!hw_mgr_priv || !prepare_hw_update_args) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + hw_mgr = (struct cam_custom_hw_mgr *) hw_mgr_priv; + prepare = + (struct cam_hw_prepare_update_args *) prepare_hw_update_args; + + CAM_DBG(CAM_CUSTOM, "Enter for req_id %lld", + prepare->packet->header.request_id); + + /* Prepare packet */ + prepare_hw_data = + (struct cam_custom_prepare_hw_update_data *)prepare->priv; + prepare_hw_data->packet_opcode_type = + (prepare->packet->header.op_code & 0xFFF); + ctx = (struct cam_custom_hw_mgr_ctx *) prepare->ctxt_to_hw_map; + + prepare->num_hw_update_entries = 0; + prepare->num_in_map_entries = 0; + prepare->num_out_map_entries = 0; + + /* Test purposes-check the data in cmd buffer */ + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + rc = cam_packet_util_get_cmd_mem_addr( + cmd_desc->mem_handle, &ptr, &len); + if (rc) { + CAM_ERR(CAM_CUSTOM, "Failed to get CPU addr handle 0x%x rc=%d", + cmd_desc->mem_handle, rc); + return rc; + } + + if (!rc) { + if (((size_t)cmd_desc->offset >= len) || + ((size_t)cmd_desc->size > (len - (size_t)cmd_desc->offset))) { + CAM_ERR(CAM_UTIL, "invalid mem len:%zd cmd desc size:%u off:%u hdl:0x%x", + len, cmd_desc->size, cmd_desc->offset, cmd_desc->mem_handle); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return -EINVAL; + } + + ptr += (cmd_desc->offset / 4); + custom_buf_type1 = + (struct cam_custom_cmd_buf_type_1 *)ptr; + CAM_DBG(CAM_CUSTOM, "frame num %u", + custom_buf_type1->custom_info); + } + + /* + * Populate scratch buffer addr here based on INIT + */ + ctx->scratch_buffer_addr = 0x0; + prepare_hw_data->num_cfg = 0; + cam_custom_add_io_buffers(hw_mgr->img_iommu_hdl, prepare); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return 0; +} + +static int cam_custom_hw_mgr_reset_csid_res( + struct cam_custom_hw_mgr_res *hw_mgr_res) +{ + int rc = -1; + struct cam_csid_reset_cfg_args csid_reset_args; + struct cam_isp_resource_node *custom_rsrc_node = NULL; + struct cam_hw_intf *hw_intf = NULL; + + custom_rsrc_node = + (struct cam_isp_resource_node *)hw_mgr_res->rsrc_node; + if (!custom_rsrc_node) { + CAM_ERR(CAM_CUSTOM, "Invalid args"); + return -EINVAL; + } + + csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH; + csid_reset_args.node_res = custom_rsrc_node; + hw_intf = custom_rsrc_node->hw_intf; + if (hw_intf->hw_ops.reset) { + CAM_DBG(CAM_CUSTOM, "RESET HW for res_id:%u", + hw_mgr_res->res_id); + rc = hw_intf->hw_ops.reset(hw_intf->hw_priv, + &csid_reset_args, + sizeof(struct cam_csid_reset_cfg_args)); + if (rc) + goto err; + } + + return 0; + +err: + CAM_ERR(CAM_CUSTOM, + "RESET HW failed for res_id:%u", + hw_mgr_res->res_id); + return rc; +} + +static int cam_custom_hw_mgr_reset( + void *hw_mgr_priv, void *hw_reset_args) +{ + struct cam_hw_reset_args *reset_args = + hw_reset_args; + struct cam_custom_hw_mgr_ctx *ctx; + struct cam_custom_hw_mgr_res *hw_mgr_res; + int rc = 0; + + if (!hw_mgr_priv || !hw_reset_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_custom_hw_mgr_ctx *) + reset_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_CUSTOM, "Reset SBI CSID and SBI core"); + list_for_each_entry(hw_mgr_res, &ctx->res_list_custom_csid, list) { + rc = cam_custom_hw_mgr_reset_csid_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_CUSTOM, + "Failed to reset CSID:%d rc: %d", + hw_mgr_res->res_id, rc); + goto end; + } + } + + /* Reset SBI HW */ + +end: + return rc; +} + +static int cam_custom_mgr_config_hw(void *hw_mgr_priv, + void *hw_config_args) +{ + int rc = 0; + int i = 0; + struct cam_custom_hw_mgr_ctx *custom_ctx; + struct cam_custom_hw_mgr_res *res; + struct cam_hw_config_args *cfg; + struct cam_hw_intf *hw_intf = NULL; + struct cam_custom_prepare_hw_update_data *prepare_hw_data; + + CAM_DBG(CAM_CUSTOM, "Enter"); + if (!hw_mgr_priv || !hw_config_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + cfg = + (struct cam_hw_config_args *)hw_config_args; + custom_ctx = cfg->ctxt_to_hw_map; + + if (!custom_ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Invalid context parameters"); + return -EPERM; + } + + prepare_hw_data = + (struct cam_custom_prepare_hw_update_data *) + cfg->priv; + for (i = 0; i < prepare_hw_data->num_cfg; i++) { + CAM_DBG(CAM_CUSTOM, "plane %d io_addr %pK", + i, prepare_hw_data->io_addr[i]); + } + + prepare_hw_data = + (struct cam_custom_prepare_hw_update_data *)cfg->priv; + for (i = 0; i < prepare_hw_data->num_cfg; i++) { + CAM_DBG(CAM_CUSTOM, "plane %d io_addr %p cfg %u", i, + prepare_hw_data->io_addr[i], + prepare_hw_data->num_cfg); + /* this will be ZSLBuffer addr */ + } + + for (i = 0; i < CAM_CUSTOM_HW_SUB_MOD_MAX; i++) { + res = &custom_ctx->sub_hw_list[i]; + if (res->hw_res) { + hw_intf = res->hw_res->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + struct cam_custom_sub_mod_req_to_dev req_to_dev; + + req_to_dev.ctx_idx = custom_ctx->ctx_index; + req_to_dev.dev_idx = i; + req_to_dev.req_id = cfg->request_id; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_CUSTOM_SUBMIT_REQ, + &req_to_dev, sizeof(req_to_dev)); + } + } + } + + return rc; +} + +static int cam_custom_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_custom_hw_cmd_args *custom_hw_cmd_args; + struct cam_custom_hw_mgr_ctx *custom_ctx = NULL; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_CUSTOM, "Invalid arguments"); + return -EINVAL; + } + + custom_ctx = + (struct cam_custom_hw_mgr_ctx *) + hw_cmd_args->ctxt_to_hw_map; + + if (!custom_ctx || !custom_ctx->ctx_in_use) { + CAM_ERR(CAM_CUSTOM, "Fatal: Invalid context is used"); + return -EPERM; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_INTERNAL: + if (!hw_cmd_args->u.internal_args) { + CAM_ERR(CAM_CUSTOM, "Invalid cmd arguments"); + return -EINVAL; + } + + custom_hw_cmd_args = (struct cam_custom_hw_cmd_args *) + hw_cmd_args->u.internal_args; + + switch (custom_hw_cmd_args->cmd_type) { + case CAM_CUSTOM_HW_MGR_PROG_DEFAULT_CONFIG: + CAM_DBG(CAM_CUSTOM, "configure RUP and scratch buffer"); + //use custom_ctx->scratch_buffer_addr + break; + default: + CAM_ERR(CAM_CUSTOM, "Invalid HW mgr command:0x%x", + hw_cmd_args->cmd_type); + rc = -EINVAL; + break; + } + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int cam_custom_hw_mgr_init(struct device_node *of_node, + struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl) +{ + int rc = 0; + int i, j; + uint32_t num_custom_available, num_custom_functional = 0; + + memset(&g_custom_hw_mgr, 0, sizeof(g_custom_hw_mgr)); + mutex_init(&g_custom_hw_mgr.ctx_mutex); + + /* fill custom hw intf information */ + for (i = 0; i < CAM_CUSTOM_HW_SUB_MOD_MAX; i++) { + /* Initialize sub modules */ + rc = cam_custom_hw_sub_mod_init( + &g_custom_hw_mgr.custom_hw[i], i); + CAM_DBG(CAM_CUSTOM, "sub module initialized. rc:%d", rc); + } + + for (i = 0; i < CAM_CUSTOM_CSID_HW_MAX; i++) { + /* Initialize csid custom modules */ + rc = cam_custom_csid_hw_init( + &g_custom_hw_mgr.csid_devices[i], i); + CAM_DBG(CAM_CUSTOM, "csid custom module initialized. rc:%d", rc); + } + + INIT_LIST_HEAD(&g_custom_hw_mgr.free_ctx_list); + INIT_LIST_HEAD(&g_custom_hw_mgr.used_ctx_list); + + /* + * for now, we only support one iommu handle. later + * we will need to setup more iommu handle for other + * use cases. + * Also, we have to release them once we have the + * deinit support + */ + if (cam_smmu_get_handle("custom", + &g_custom_hw_mgr.img_iommu_hdl)) { + CAM_ERR(CAM_CUSTOM, "Can not get iommu handle"); + return -EINVAL; + } + + for (i = 0; i < CAM_CTX_MAX; i++) { + memset(&g_custom_hw_mgr.ctx_pool[i], 0, + sizeof(g_custom_hw_mgr.ctx_pool[i])); + INIT_LIST_HEAD(&g_custom_hw_mgr.ctx_pool[i].list); + + /* init context pool */ + INIT_LIST_HEAD(&g_custom_hw_mgr.ctx_pool[i].free_res_list); + INIT_LIST_HEAD( + &g_custom_hw_mgr.ctx_pool[i].res_list_custom_csid); + INIT_LIST_HEAD( + &g_custom_hw_mgr.ctx_pool[i].res_list_custom_cid); + for (j = 0; j < CAM_CUSTOM_HW_RES_MAX; j++) { + INIT_LIST_HEAD( + &g_custom_hw_mgr.ctx_pool[i].res_pool[j].list); + list_add_tail( + &g_custom_hw_mgr.ctx_pool[i].res_pool[j].list, + &g_custom_hw_mgr.ctx_pool[i].free_res_list); + } + + g_custom_hw_mgr.ctx_pool[i].ctx_index = i; + g_custom_hw_mgr.ctx_pool[i].hw_mgr = &g_custom_hw_mgr; + + list_add_tail(&g_custom_hw_mgr.ctx_pool[i].list, + &g_custom_hw_mgr.free_ctx_list); + } + + /* fill return structure */ + hw_mgr_intf->hw_mgr_priv = &g_custom_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_custom_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_custom_mgr_acquire_hw; + hw_mgr_intf->hw_start = cam_custom_mgr_start_hw; + hw_mgr_intf->hw_stop = cam_custom_mgr_stop_hw; + hw_mgr_intf->hw_read = cam_custom_mgr_read; + hw_mgr_intf->hw_write = cam_custom_mgr_write; + hw_mgr_intf->hw_release = cam_custom_mgr_release_hw; + hw_mgr_intf->hw_prepare_update = cam_custom_mgr_prepare_hw_update; + hw_mgr_intf->hw_config = cam_custom_mgr_config_hw; + hw_mgr_intf->hw_reset = cam_custom_hw_mgr_reset; + hw_mgr_intf->hw_cmd = cam_custom_mgr_cmd; + + if (iommu_hdl) + *iommu_hdl = g_custom_hw_mgr.img_iommu_hdl; + + cam_custom_get_num_hws(&num_custom_available); + cam_custom_mgr_count_functional_hws(&num_custom_functional); + rc = cam_cpas_prepare_subpart_info(CAM_CUSTOM_HW_IDX, num_custom_available, + num_custom_functional); + if (rc) + CAM_ERR(CAM_CUSTOM, "Failed to populate num_custom, rc: %d", rc); + + CAM_DBG(CAM_CUSTOM, "HW manager initialized"); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.h new file mode 100644 index 0000000000..f5add5d149 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_HW_MGR_H_ +#define _CAM_CUSTOM_HW_MGR_H_ + +#include +#include "cam_custom_hw_mgr_intf.h" +#include "cam_custom_sub_mod_core.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_isp_hw.h" +#include "cam_custom_hw.h" + +#include +#include + +enum cam_custom_hw_mgr_res_type { + CAM_CUSTOM_HW_SUB_MODULE, + CAM_CUSTOM_CID_HW, + CAM_CUSTOM_CSID_HW, + CAM_CUSTOM_HW_MAX, +}; + +/* Needs to be suitably defined */ +#define CAM_CUSTOM_HW_OUT_RES_MAX 1 + +/** + * struct cam_custom_hw_mgr_res - HW resources for the Custom manager + * + * @list: used by the resource list + * @res_type: Custom manager resource type + * @res_id: resource id based on the resource type for root or + * leaf resource, it matches the KMD interface port id. + * For branch resource, it is defined by the Custom HW + * layer + * @rsrc_node: isp hw layer resource for csid/cid + * @hw_res: hw layer resource array. + */ +struct cam_custom_hw_mgr_res { + struct list_head list; + enum cam_custom_hw_mgr_res_type res_type; + uint32_t res_id; + void *rsrc_node; + struct cam_custom_resource_node *hw_res; +}; + + +/** + * struct ctx_base_info - Base hardware information for the context + * + * @idx: Base resource index + * + */ +struct ctx_base_info { + uint32_t idx; +}; + + +/** + * struct cam_custom_hw_mgr_ctx - Custom HW manager ctx object + * + * @list: used by the ctx list. + * @ctx_index: acquired context id. + * @hw_mgr: Custom hw mgr which owns this context + * @ctx_in_use: flag to tell whether context is active + * @res_list_custom_csid: custom csid modules for this context + * @res_list_custom_cid: custom cid modules for this context + * @sub_hw_list: HW submodules for this context + * @free_res_list: Free resources list for the branch node + * @res_pool: memory storage for the free resource list + * @base: device base index array contain the all + * Custom HW instance associated with this ctx. + * @num_base: number of valid base data in the base array + * @init_done: indicate whether init hw is done + * @event_cb: event_cb to ctx + * @scratch_buffer_addr: scratch buffer address + * @task_type: Custom HW task type + * @cb_priv: data sent back with event_cb + * + */ +struct cam_custom_hw_mgr_ctx { + struct list_head list; + + uint32_t ctx_index; + struct cam_custom_hw_mgr *hw_mgr; + uint32_t ctx_in_use; + + struct list_head res_list_custom_csid; + struct list_head res_list_custom_cid; + struct cam_custom_hw_mgr_res sub_hw_list[ + CAM_CUSTOM_HW_RES_MAX]; + + struct list_head free_res_list; + struct cam_custom_hw_mgr_res res_pool[CAM_CUSTOM_HW_RES_MAX]; + struct ctx_base_info base[CAM_CUSTOM_HW_SUB_MOD_MAX]; + uint32_t num_base; + bool init_done; + cam_hw_event_cb_func event_cb; + uint64_t scratch_buffer_addr; + enum cam_custom_hw_task_type task_type; + void *cb_priv; +}; + +/** + * struct cam_custom_hw_mgr - Custom HW Manager + * + * @img_iommu_hdl: iommu handle + * @custom_hw: Custom device instances array. This will be filled by + * HW layer during initialization + * @csid_devices: Custom csid device instance array + * @ctx_mutex: mutex for the hw context pool + * @free_ctx_list: free hw context list + * @used_ctx_list: used hw context list + * @ctx_pool: context storage + * + */ +struct cam_custom_hw_mgr { + int img_iommu_hdl; + struct cam_hw_intf *custom_hw[CAM_CUSTOM_HW_SUB_MOD_MAX]; + struct cam_hw_intf *csid_devices[CAM_CUSTOM_CSID_HW_MAX]; + struct mutex ctx_mutex; + struct list_head free_ctx_list; + struct list_head used_ctx_list; + struct cam_custom_hw_mgr_ctx ctx_pool[CAM_CTX_MAX]; +}; + +/** + * cam_custom_get_num_hws() + * + * @brief : Populates number of custom. + * + * @num_custom : Fills number of custom hws in the address passed. + * + */ +void cam_custom_get_num_hws(uint32_t *num_custom); + +/** + * cam_custom_hw_mgr_init() + * + * @brief: Initialize the Custom hardware manger. This is the + * etnry functinon for the Cust HW manager. + * + * @of_node: Device node + * @hw_mgr_intf: Custom hardware manager object returned + * @iommu_hdl: Iommu handle to be returned + * + */ +int cam_custom_hw_mgr_init(struct device_node *of_node, + struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl); + + +/* Utility APIs */ + +/** + * cam_custom_hw_mgr_get_custom_res_state() + * + * @brief: Obtain equivalent custom rsrc state + * from isp rsrc state + * + * @in_rsrc_state: isp rsrc state + * + */ +enum cam_custom_hw_resource_state + cam_custom_hw_mgr_get_custom_res_state( + uint32_t in_rsrc_state); + +/** + * cam_custom_hw_mgr_get_isp_res_state() + * + * @brief: Obtain equivalent isp rsrc state + * from custom rsrc state + * + * @in_rsrc_state: custom rsrc state + * + */ +enum cam_isp_resource_state + cam_custom_hw_mgr_get_isp_res_state( + uint32_t in_rsrc_state); + +/** + * cam_custom_hw_mgr_get_isp_res_type() + * + * @brief: Obtain equivalent isp rsrc type + * from custom rsrc type + * + * @res_type: custom rsrc type + * + */ +enum cam_isp_resource_type + cam_custom_hw_mgr_get_isp_res_type( + enum cam_custom_hw_mgr_res_type res_type); + +#endif /* _CAM_CUSTOM_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw.h new file mode 100644 index 0000000000..33c0ebff77 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_HW_H_ +#define _CAM_CUSTOM_HW_H_ + +#include +#include +#include + +#include + +enum cam_custom_hw_resource_state { + CAM_CUSTOM_HW_RESOURCE_STATE_UNAVAILABLE = 0, + CAM_CUSTOM_HW_RESOURCE_STATE_AVAILABLE = 1, + CAM_CUSTOM_HW_RESOURCE_STATE_RESERVED = 2, + CAM_CUSTOM_HW_RESOURCE_STATE_INIT_HW = 3, + CAM_CUSTOM_HW_RESOURCE_STATE_STREAMING = 4, +}; + +enum cam_custom_hw_task_type { + CAM_CUSTOM_EVENT_INVALID, + CAM_CUSTOM_EVENT_TASK1, + CAM_CUSTOM_EVENT_TASK2, + CAM_CUSTOM_EVENT_TASK3, + CAM_CUSTOM_EVENT_MAX, +}; + +/* + * struct cam_custom_hw_event_info: + * + * @Brief: Structure to pass event details to hw mgr + * + * @task_type: Type of CUSTOM HW Task + * @err_type: Error type if any + * + */ +struct cam_custom_hw_event_info { + enum cam_custom_hw_task_type task_type; + uint32_t err_type; +}; + +/* + * struct cam_custom_resource_node: + * + * @Brief: Structure representing HW resource object + * + * @res_id: Unique resource ID within res_type objects + * for a particular HW + * @res_state: State of the resource + * @hw_intf: HW Interface of HW to which this resource + * belongs + * @res_priv: Private data of the resource + * @irq_handle: handle returned on subscribing for IRQ event + * @init: function pointer to init the HW resource + * @deinit: function pointer to deinit the HW resource + * @start: function pointer to start the HW resource + * @stop: function pointer to stop the HW resource + * @process_cmd: function pointer for processing commands + * specific to the resource + */ +struct cam_custom_resource_node { + uint32_t res_id; + enum cam_custom_hw_resource_state res_state; + struct cam_hw_intf *hw_intf; + void *res_priv; + int irq_handle; + + int (*init)(struct cam_custom_resource_node *rsrc_node, + void *init_args, uint32_t arg_size); + int (*deinit)(struct cam_custom_resource_node *rsrc_node, + void *deinit_args, uint32_t arg_size); + int (*start)(struct cam_custom_resource_node *rsrc_node); + int (*stop)(struct cam_custom_resource_node *rsrc_node); + int (*process_cmd)(struct cam_custom_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); +}; +#endif /* _CAM_CUSTOM_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw_mgr_intf.h new file mode 100644 index 0000000000..673735d523 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_cust/cam_custom_hw_mgr/include/cam_custom_hw_mgr_intf.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CUSTOM_HW_MGR_INTF_H_ +#define _CAM_CUSTOM_HW_MGR_INTF_H_ + +#include +#include +#include +#include +#include "cam_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_custom_hw.h" + +#define CAM_CUSTOM_HW_TYPE_1 1 + +#define CAM_CUSTOM_HW_RES_MAX 32 + +#define CAM_CUSTOM_HW_SUB_MOD_MAX 1 +#define CAM_CUSTOM_CSID_HW_MAX 1 + +enum cam_custom_hw_event_type { + CAM_CUSTOM_HW_EVENT_ERROR, + CAM_CUSTOM_HW_EVENT_RUP_DONE, + CAM_CUSTOM_HW_EVENT_FRAME_DONE, + CAM_CUSTOM_HW_EVENT_MAX +}; + +enum cam_custom_cmd_types { + CAM_CUSTOM_CMD_NONE, + CAM_CUSTOM_SET_IRQ_CB, + CAM_CUSTOM_SUBMIT_REQ, +}; + +enum cam_custom_hw_mgr_cmd { + CAM_CUSTOM_HW_MGR_CMD_NONE, + CAM_CUSTOM_HW_MGR_PROG_DEFAULT_CONFIG, +}; + +/** + * struct cam_custom_hw_cmd_args - Payload for hw manager command + * + * @cmd_type HW command type + * @reserved any other required data + */ +struct cam_custom_hw_cmd_args { + uint32_t cmd_type; + uint32_t reserved; +}; + +/** + * struct cam_custom_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF + * + * @timestamp: Time stamp for the sof event + * @boot_time: Boot time stamp for the sof event + * + */ +struct cam_custom_hw_sof_event_data { + uint64_t timestamp; + uint64_t boot_time; +}; + +/** + * struct cam_custom_hw_reg_update_event_data - Event payload for + * CAM_HW_EVENT_REG_UPDATE + * + * @timestamp: Time stamp for the reg update event + * + */ +struct cam_custom_hw_reg_update_event_data { + uint64_t timestamp; +}; + +/** + * struct cam_custom_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE + * + * @num_handles: Number of resource handeles + * @resource_handle: Resource handle array + * + */ +struct cam_custom_hw_done_event_data { + uint32_t num_handles; + uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX]; +}; + +/** + * struct cam_custom_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR + * + * @error_type: Error type for the error event + * @timestamp: Timestamp for the error event + */ +struct cam_custom_hw_error_event_data { + uint32_t error_type; + uint64_t timestamp; +}; + +/** + * struct cam_custom_stop_args - hardware stop arguments + * + * @stop_only Send stop only to hw drivers. No Deinit to be + * done. + */ +struct cam_custom_stop_args { + bool stop_only; +}; + +/** + * struct cam_custom_start_args - custom hardware start arguments + * + * @hw_config: Hardware configuration commands. + * @start_only Send start only to hw drivers. No init to + * be done. + * + */ +struct cam_custom_start_args { + struct cam_hw_config_args hw_config; + bool start_only; +}; + +/** + * struct cam_custom_prepare_hw_update_data - hw prepare data + * + * @packet_opcode_type: Packet header opcode in the packet header + * this opcode defines, packet is init packet or + * update packet + * @buffer_addr: IO Buffer address + * + */ +struct cam_custom_prepare_hw_update_data { + uint32_t packet_opcode_type; + uint32_t num_cfg; + uint64_t io_addr[CAM_PACKET_MAX_PLANES]; +}; + +/** + * struct cam_custom_hw_cb_args : HW manager callback args + * + * @res_type : resource type + * @err_type : error type + */ +struct cam_custom_hw_cb_args { + uint32_t res_type; + uint32_t err_type; +}; + +/** + * cam_custom_hw_sub_mod_init() + * + * @Brief: Initialize Custom HW device + * + * @custom_hw: cust_hw interface to fill in and return on + * successful initialization + * @hw_idx: Index of Custom HW + */ +int cam_custom_hw_sub_mod_init(struct cam_hw_intf **custom_hw, uint32_t hw_idx); + +/** + * cam_custom_csid_hw_init() + * + * @Brief: Initialize Custom HW device + * + * @custom_hw: cust_hw interface to fill in and return on + * successful initialization + * @hw_idx: Index of Custom HW + */ +int cam_custom_csid_hw_init( + struct cam_hw_intf **custom_hw, uint32_t hw_idx); + +#endif /* _CAM_CUSTOM_HW_MGR_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.c b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.c new file mode 100644 index 0000000000..39aff07797 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "cam_debug_util.h" +#include "cam_fd_context.h" +#include "cam_trace.h" + +static const char fd_dev_name[] = "cam-fd"; + +/* Functions in Available state */ +static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc; + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc); + return rc; + } + + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("FD", ctx); + + return rc; +} + +/* Functions in Acquired state */ +static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc); + return rc; + } + + ctx->state = CAM_CTX_AVAILABLE; + trace_cam_context_state("FD", ctx); + + return rc; +} + +static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc); + return rc; + } + + return rc; +} + +static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_start_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc); + return rc; + } + + ctx->state = CAM_CTX_ACTIVATED; + trace_cam_context_state("FD", ctx); + + return rc; +} + +/* Functions in Activated state */ +static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc); + return rc; + } + + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("FD", ctx); + + return rc; +} + +static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc); + return rc; + } + + rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc); + return rc; + } + + return rc; +} + +static int __cam_fd_ctx_dump_dev_in_activated( + struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc; + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_FD, "Failed to dump device, rc=%d", rc); + + return rc; +} + +static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + flush_args.cmd = cmd; + flush_args.flush_active_req = true; + + rc = cam_context_flush_dev_to_hw(ctx, &flush_args); + if (rc) + CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc); + + return rc; +} +static int __cam_fd_ctx_config_dev_in_activated( + struct cam_context *ctx, struct cam_config_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc); + return rc; + } + + return rc; +} + +static int __cam_fd_ctx_handle_irq_in_activated(void *context, + uint32_t evt_id, void *evt_data) +{ + int rc; + + rc = cam_context_buf_done_from_hw(context, evt_data, evt_id); + if (rc) { + CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc); + return rc; + } + + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_fd_ctx_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .release_dev = __cam_fd_ctx_release_dev_in_acquired, + .config_dev = __cam_fd_ctx_config_dev_in_acquired, + .start_dev = __cam_fd_ctx_start_dev_in_acquired, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Ready */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Flushed */ + { + .ioctl_ops = {}, + }, + /* Activated */ + { + .ioctl_ops = { + .stop_dev = __cam_fd_ctx_stop_dev_in_activated, + .release_dev = __cam_fd_ctx_release_dev_in_activated, + .config_dev = __cam_fd_ctx_config_dev_in_activated, + .flush_dev = __cam_fd_ctx_flush_dev_in_activated, + .dump_dev = __cam_fd_ctx_dump_dev_in_activated, + }, + .crm_ops = {}, + .irq_ops = __cam_fd_ctx_handle_irq_in_activated, + }, +}; + + +int cam_fd_context_init(struct cam_fd_context *fd_ctx, + struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl) +{ + int rc; + + if (!base_ctx || !fd_ctx) { + CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx); + return -EINVAL; + } + + memset(fd_ctx, 0, sizeof(*fd_ctx)); + + rc = cam_context_init(base_ctx, fd_dev_name, CAM_FD, ctx_id, + NULL, hw_intf, fd_ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc); + return rc; + } + + fd_ctx->base = base_ctx; + base_ctx->ctx_priv = fd_ctx; + base_ctx->state_machine = cam_fd_ctx_state_machine; + base_ctx->max_hw_update_entries = CAM_CTX_CFG_MAX; + base_ctx->max_in_map_entries = CAM_CTX_CFG_MAX; + base_ctx->max_out_map_entries = CAM_CTX_CFG_MAX; + + return rc; +} + +int cam_fd_context_deinit(struct cam_fd_context *fd_ctx) +{ + int rc = 0; + + if (!fd_ctx || !fd_ctx->base) { + CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx); + return -EINVAL; + } + + rc = cam_context_deinit(fd_ctx->base); + if (rc) + CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc); + + memset(fd_ctx, 0, sizeof(*fd_ctx)); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.h b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.h new file mode 100644 index 0000000000..1f4e846093 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_context.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_CONTEXT_H_ +#define _CAM_FD_CONTEXT_H_ + +#include "cam_context.h" +#include "cam_context_utils.h" +#include "cam_hw_mgr_intf.h" +#include "cam_req_mgr_interface.h" + +/** + * struct cam_fd_context - Face Detection context information + * + * @base : Base context pointer for this FD context + * @req_base : List of base requests for this FD context + */ +struct cam_fd_context { + struct cam_context *base; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; +}; + +int cam_fd_context_init(struct cam_fd_context *fd_ctx, + struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl); +int cam_fd_context_deinit(struct cam_fd_context *ctx); + +#endif /* _CAM_FD_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.c b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.c new file mode 100644 index 0000000000..fb441675c4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_subdev.h" +#include "cam_node.h" +#include "cam_fd_context.h" +#include "cam_fd_hw_mgr.h" +#include "cam_fd_hw_mgr_intf.h" +#include "camera_main.h" +#include "cam_req_mgr_dev.h" + +#define CAM_FD_DEV_NAME "cam-fd" + +/** + * struct cam_fd_dev - FD device information + * + * @sd: Subdev information + * @base_ctx: List of base contexts + * @fd_ctx: List of FD contexts + * @lock: Mutex handle + * @open_cnt: FD subdev open count + * @probe_done: Whether FD probe is completed + */ +struct cam_fd_dev { + struct cam_subdev sd; + struct cam_context base_ctx[CAM_CTX_MAX]; + struct cam_fd_context fd_ctx[CAM_CTX_MAX]; + struct mutex lock; + uint32_t open_cnt; + bool probe_done; +}; + +static struct cam_fd_dev g_fd_dev; + +static int cam_fd_dev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_fd_dev *fd_dev = &g_fd_dev; + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + + if (!fd_dev->probe_done) { + CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + return -ENODEV; + } + + mutex_lock(&fd_dev->lock); + fd_dev->open_cnt++; + CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt); + mutex_unlock(&fd_dev->lock); + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + + return 0; +} + +static int cam_fd_dev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_fd_dev *fd_dev = &g_fd_dev; + struct cam_node *node = v4l2_get_subdevdata(sd); + + if (!fd_dev->probe_done) { + CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev); + return -ENODEV; + } + + mutex_lock(&fd_dev->lock); + if (fd_dev->open_cnt == 0) { + CAM_WARN(CAM_FD, "device already closed"); + mutex_unlock(&fd_dev->lock); + return 0; + } + fd_dev->open_cnt--; + CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt); + mutex_unlock(&fd_dev->lock); + + if (!node) { + CAM_ERR(CAM_FD, "Node ptr is NULL"); + return -EINVAL; + } + + cam_node_shutdown(node); + + return 0; +} + +static int cam_fd_dev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_FD, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_fd_dev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = { + .open = cam_fd_dev_open, + .close = cam_fd_dev_close, +}; + +static int cam_fd_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + int i; + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_node *node; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops; + g_fd_dev.sd.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + + /* Initialize the v4l2 subdevice first. (create cam_node) */ + rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME, + CAM_FD_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc); + return rc; + } + node = (struct cam_node *) g_fd_dev.sd.token; + + rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf); + if (rc) { + CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d", + rc); + goto unregister_subdev; + } + + for (i = 0; i < CAM_CTX_MAX; i++) { + rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i], + &g_fd_dev.base_ctx[i], &node->hw_mgr_intf, i, -1); + if (rc) { + CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d", + i, rc); + goto deinit_ctx; + } + } + + rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX, + CAM_FD_DEV_NAME); + if (rc) { + CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc); + goto deinit_ctx; + } + + node->sd_handler = cam_fd_dev_close_internal; + mutex_init(&g_fd_dev.lock); + g_fd_dev.probe_done = true; + CAM_DBG(CAM_FD, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +deinit_ctx: + for (--i; i >= 0; i--) { + if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i])) + CAM_ERR(CAM_FD, "FD context %d deinit failed", i); + } +unregister_subdev: + if (cam_subdev_remove(&g_fd_dev.sd)) + CAM_ERR(CAM_FD, "Failed in subdev remove"); + + return rc; +} + +static void cam_fd_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i, rc; + struct platform_device *pdev = to_platform_device(dev); + + for (i = 0; i < CAM_CTX_MAX; i++) { + rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]); + if (rc) + CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d", + i, rc); + } + + rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node); + if (rc) + CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc); + + rc = cam_subdev_remove(&g_fd_dev.sd); + if (rc) + CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc); + + mutex_destroy(&g_fd_dev.lock); + g_fd_dev.probe_done = false; +} + +const static struct component_ops cam_fd_dev_component_ops = { + .bind = cam_fd_dev_component_bind, + .unbind = cam_fd_dev_component_unbind, +}; + +static int cam_fd_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_FD, "Adding FD dev component"); + rc = component_add(&pdev->dev, &cam_fd_dev_component_ops); + if (rc) + CAM_ERR(CAM_FD, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_fd_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_fd_dev_component_ops); + return 0; +} + +static const struct of_device_id cam_fd_dt_match[] = { + { + .compatible = "qcom,cam-fd" + }, + {} +}; + +struct platform_driver cam_fd_driver = { + .probe = cam_fd_dev_probe, + .remove = cam_fd_dev_remove, + .driver = { + .name = "cam_fd", + .owner = THIS_MODULE, + .of_match_table = cam_fd_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_fd_dev_init_module(void) +{ + return platform_driver_register(&cam_fd_driver); +} + +void cam_fd_dev_exit_module(void) +{ + platform_driver_unregister(&cam_fd_driver); +} + +MODULE_DESCRIPTION("MSM FD driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.h b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.h new file mode 100644 index 0000000000..74db41b8dc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/cam_fd_dev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_DEV_H_ +#define _CAM_FD_DEV_H_ + +/** + * @brief : API to register FD Dev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_fd_dev_init_module(void); + +/** + * @brief : API to remove FD Dev interface from platform framework. + */ +void cam_fd_dev_exit_module(void); + +#endif /* _CAM_FD_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c new file mode 100644 index 0000000000..c17c36ed3b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c @@ -0,0 +1,2210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_soc_util.h" +#include "cam_mem_mgr_api.h" +#include "cam_smmu_api.h" +#include "cam_packet_util.h" +#include "cam_fd_context.h" +#include "cam_fd_hw_intf.h" +#include "cam_fd_hw_core.h" +#include "cam_fd_hw_soc.h" +#include "cam_fd_hw_mgr_intf.h" +#include "cam_fd_hw_mgr.h" +#include "cam_trace.h" + +static struct cam_fd_hw_mgr g_fd_hw_mgr; + +static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet, + size_t remain_len) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + int i, rc; + + if (!packet) + return -EINVAL; + + CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d", + packet->header.request_id, packet->header.op_code, + packet->header.size, packet->header.flags); + CAM_DBG(CAM_FD, + "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)", + packet->cmd_buf_offset, packet->num_cmd_buf, + packet->io_configs_offset, packet->num_io_configs); + CAM_DBG(CAM_FD, + "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)", + packet->patch_offset, packet->num_patches, + packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index); + + if (cam_packet_util_validate_packet(packet, remain_len)) { + CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d", + packet->kmd_cmd_buf_index, + packet->num_cmd_buf, packet->cmd_buf_offset, + packet->io_configs_offset, packet->header.size); + return -EINVAL; + } + + /* All buffers must come through io config, do not support patching */ + if (packet->num_patches || !packet->num_io_configs || !packet->num_cmd_buf) { + CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u", + packet->num_cmd_buf, packet->num_patches); + return -EINVAL; + } + + /* KMD Buf index can never be greater than or equal to num cmd bufs */ + if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) { + CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)", + packet->kmd_cmd_buf_index, packet->num_cmd_buf); + return -EINVAL; + } + + if ((packet->header.op_code & 0xff) != + CAM_PACKET_OPCODES_FD_FRAME_UPDATE) { + CAM_ERR(CAM_FD, "Invalid op_code %u", + packet->header.op_code & 0xff); + return -EINVAL; + } + + cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload_flex + + packet->cmd_buf_offset); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + /* + * We can allow 0 length cmd buffer. This can happen in case + * umd gives an empty cmd buffer as kmd buffer + */ + if (!cmd_desc[i].length) + continue; + + if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) && + (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) { + CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i, + cmd_desc[i].meta_data); + return -EINVAL; + } + + CAM_DBG(CAM_FD, + "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d", + i, + cmd_desc[i].mem_handle, cmd_desc[i].offset, + cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type, + cmd_desc[i].meta_data); + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) { + CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i); + return rc; + } + } + + return 0; +} + +static int cam_fd_mgr_util_put_ctx( + struct list_head *src_list, + struct cam_fd_hw_mgr_ctx **fd_ctx) +{ + int rc = 0; + struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_fd_hw_mgr.ctx_mutex); + ctx_ptr = *fd_ctx; + if (ctx_ptr) + list_add_tail(&ctx_ptr->list, src_list); + *fd_ctx = NULL; + mutex_unlock(&g_fd_hw_mgr.ctx_mutex); + + return rc; +} + +static int cam_fd_mgr_util_get_ctx( + struct list_head *src_list, + struct cam_fd_hw_mgr_ctx **fd_ctx) +{ + int rc = 0; + struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_fd_hw_mgr.ctx_mutex); + if (!list_empty(src_list)) { + ctx_ptr = list_first_entry(src_list, + struct cam_fd_hw_mgr_ctx, list); + list_del_init(&ctx_ptr->list); + } else { + CAM_ERR(CAM_FD, "No more free fd hw mgr ctx"); + rc = -1; + } + *fd_ctx = ctx_ptr; + mutex_unlock(&g_fd_hw_mgr.ctx_mutex); + + return rc; +} + +static int cam_fd_mgr_util_put_frame_req( + struct list_head *src_list, + struct cam_fd_mgr_frame_request **frame_req, bool free_buffer) +{ + int rc = 0; + struct cam_fd_mgr_frame_request *req_ptr = NULL; + + mutex_lock(&g_fd_hw_mgr.frame_req_mutex); + req_ptr = *frame_req; + if (req_ptr) { + if (free_buffer) + cam_mem_put_cpu_buf(frame_req->hw_update_entries[0]->handle); + + list_del_init(&req_ptr->list); + list_add_tail(&req_ptr->list, src_list); + } + *frame_req = NULL; + mutex_unlock(&g_fd_hw_mgr.frame_req_mutex); + + return rc; +} + +static int cam_fd_mgr_util_get_frame_req( + struct list_head *src_list, + struct cam_fd_mgr_frame_request **frame_req) +{ + int rc = 0; + struct cam_fd_mgr_frame_request *req_ptr = NULL; + + mutex_lock(&g_fd_hw_mgr.frame_req_mutex); + if (!list_empty(src_list)) { + req_ptr = list_first_entry(src_list, + struct cam_fd_mgr_frame_request, list); + list_del_init(&req_ptr->list); + } else { + CAM_DBG(CAM_FD, "Frame req not available"); + rc = -EPERM; + } + *frame_req = req_ptr; + mutex_unlock(&g_fd_hw_mgr.frame_req_mutex); + + return rc; +} + +static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr, + struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device) +{ + if (!hw_mgr || !hw_ctx || !hw_device) { + CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx, + hw_device); + return -EINVAL; + } + + if ((hw_ctx->device_index < 0) || + (hw_ctx->device_index >= CAM_FD_HW_MAX)) { + CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index); + return -EINVAL; + } + + CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + *hw_device = &hw_mgr->hw_device[hw_ctx->device_index]; + + return 0; +} + +static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr, + struct cam_fd_hw_mgr_ctx *hw_ctx) +{ + struct cam_fd_device *hw_device; + struct cam_fd_hw_release_args hw_release_args; + int rc; + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + if (hw_device->hw_intf->hw_ops.release) { + hw_release_args.hw_ctx = hw_ctx; + hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private; + rc = hw_device->hw_intf->hw_ops.release( + hw_device->hw_intf->hw_priv, &hw_release_args, + sizeof(hw_release_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW release %d", rc); + return rc; + } + } else { + CAM_ERR(CAM_FD, "Invalid release function"); + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + hw_device->num_ctxts--; + + if (!hw_device->num_ctxts) { + mutex_lock(&hw_device->lock); + hw_device->ready_to_process = true; + hw_device->req_id = -1; + hw_device->cur_hw_ctx = NULL; + mutex_unlock(&hw_device->lock); + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + hw_ctx->device_index = -1; + + return rc; +} + +static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr, + struct cam_fd_hw_mgr_ctx *hw_ctx, + struct cam_fd_acquire_dev_info *fd_acquire_args) +{ + int i, rc; + struct cam_fd_hw_reserve_args hw_reserve_args; + struct cam_fd_device *hw_device = NULL; + + if (!hw_mgr || !hw_ctx || !fd_acquire_args) { + CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx, + fd_acquire_args); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + /* Check if a device is free which can satisfy the requirements */ + for (i = 0; i < hw_mgr->num_devices; i++) { + hw_device = &hw_mgr->hw_device[i]; + CAM_DBG(CAM_FD, + "[%d] : num_ctxts=%d, modes=%d, raw_results=%d", + i, hw_device->num_ctxts, + hw_device->hw_caps.supported_modes, + hw_device->hw_caps.raw_results_available); + if ((hw_device->num_ctxts == 0) && + (fd_acquire_args->mode & + hw_device->hw_caps.supported_modes) && + (!fd_acquire_args->get_raw_results || + hw_device->hw_caps.raw_results_available)) { + CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i); + mutex_lock(&hw_device->lock); + hw_device->ready_to_process = true; + hw_device->req_id = -1; + hw_device->cur_hw_ctx = NULL; + mutex_unlock(&hw_device->lock); + hw_device->num_ctxts++; + break; + } + } + + /* + * We couldn't find a free HW which meets requirement, now check if + * there is a HW which meets acquire requirements + */ + if (i == hw_mgr->num_devices) { + for (i = 0; i < hw_mgr->num_devices; i++) { + hw_device = &hw_mgr->hw_device[i]; + if ((fd_acquire_args->mode & + hw_device->hw_caps.supported_modes) && + (!fd_acquire_args->get_raw_results || + hw_device->hw_caps.raw_results_available)) { + hw_device->num_ctxts++; + CAM_DBG(CAM_FD, + "Found sharing HW Index=%d, num_ctxts=%d", + i, hw_device->num_ctxts); + break; + } + } + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + if ((i == hw_mgr->num_devices) || !hw_device) { + CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d", + fd_acquire_args->mode, + fd_acquire_args->get_raw_results); + return -EBUSY; + } + + CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i); + + /* Check if we can reserve this HW */ + if (hw_device->hw_intf->hw_ops.reserve) { + hw_reserve_args.hw_ctx = hw_ctx; + hw_reserve_args.mode = fd_acquire_args->mode; + rc = hw_device->hw_intf->hw_ops.reserve( + hw_device->hw_intf->hw_priv, &hw_reserve_args, + sizeof(hw_reserve_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc); + return rc; + } + hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private; + } else { + CAM_ERR(CAM_FD, "Invalid reserve function"); + return -EPERM; + } + + /* Update required info in hw context */ + hw_ctx->device_index = i; + + CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + return 0; +} + +static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node, + int i, struct cam_hw_intf **device_hw_intf) +{ + struct device_node *device_node = NULL; + struct platform_device *child_pdev = NULL; + struct cam_hw_intf *hw_intf = NULL; + const char *name = NULL; + int rc; + + rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name); + if (rc) { + CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc); + goto put_node; + } + + device_node = of_find_node_by_name(NULL, name); + if (!device_node) { + CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name); + return -ENODEV; + } + + child_pdev = of_find_device_by_node(device_node); + if (!child_pdev) { + CAM_ERR(CAM_FD, "Failed to find device on bus %s", + device_node->name); + rc = -ENODEV; + goto put_node; + } + + hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev); + if (!hw_intf) { + CAM_ERR(CAM_FD, "No driver data for child device"); + rc = -ENODEV; + goto put_node; + } + + CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK", + hw_intf->hw_type, hw_intf->hw_idx, hw_intf); + + if (hw_intf->hw_idx >= CAM_FD_HW_MAX) { + CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx); + rc = -ENODEV; + goto put_node; + } + + rc = 0; + *device_hw_intf = hw_intf; + +put_node: + of_node_put(device_node); + + return rc; +} + +static int cam_fd_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + struct cam_fd_hw_cmd_prestart_args *prestart_args = + (struct cam_fd_hw_cmd_prestart_args *)user_data; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_FD, "Invalid blob info %pK %u", blob_data, + blob_size); + return -EINVAL; + } + + if (!prestart_args) { + CAM_ERR(CAM_FD, "Invalid user data"); + return -EINVAL; + } + + switch (blob_type) { + case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: { + uint32_t *get_raw_results = (uint32_t *)blob_data; + + if (sizeof(uint32_t) != blob_size) { + CAM_ERR(CAM_FD, "Invalid blob size %zu %u", + sizeof(uint32_t), blob_size); + return -EINVAL; + } + + prestart_args->get_raw_results = *get_raw_results; + break; + } + case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: { + struct cam_fd_soc_clock_bw_request *clk_req = + (struct cam_fd_soc_clock_bw_request *)blob_data; + + if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) { + CAM_ERR(CAM_FD, "Invalid blob size %zu %u", + sizeof(struct cam_fd_soc_clock_bw_request), + blob_size); + return -EINVAL; + } + + CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld", + clk_req->clock_rate, clk_req->bandwidth); + + break; + } + default: + CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type); + break; + } + + return 0; +} + +static int cam_fd_mgr_util_parse_generic_cmd_buffer( + struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet, + struct cam_fd_hw_cmd_prestart_args *prestart_args) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + int i, rc = 0; + + cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload_flex + + packet->cmd_buf_offset); + + for (i = 0; i < packet->num_cmd_buf; i++) { + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM) + continue; + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_fd_packet_generic_blob_handler, prestart_args); + if (rc) + CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc); + + break; + } + + return rc; +} + +static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction, + uint32_t resource_type, bool *need_io_map, bool *need_cpu_map) +{ + if (!need_io_map || !need_cpu_map) { + CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", need_io_map, + need_cpu_map); + return -EINVAL; + } + + if (direction == CAM_BUF_INPUT) { + switch (resource_type) { + case CAM_FD_INPUT_PORT_ID_IMAGE: + *need_io_map = true; + *need_cpu_map = false; + break; + default: + CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d", + direction, resource_type); + return -EINVAL; + } + } else if (direction == CAM_BUF_OUTPUT) { + switch (resource_type) { + case CAM_FD_OUTPUT_PORT_ID_RESULTS: + *need_io_map = true; + *need_cpu_map = true; + break; + case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: + *need_io_map = true; + *need_cpu_map = true; + break; + case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: + *need_io_map = true; + *need_cpu_map = false; + break; + default: + CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d", + direction, resource_type); + return -EINVAL; + } + } else { + CAM_WARN(CAM_FD, "Invalid direction %d", direction); + return -EINVAL; + } + + return 0; +} + +static int cam_fd_mgr_put_cpu_buf(struct cam_hw_prepare_update_args *prepare) +{ + int i, rc; + uint32_t plane; + bool need_io_map, need_cpu_map; + struct cam_buf_io_cfg *io_cfg; + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &prepare->packet->payload_flex + prepare->packet->io_configs_offset); + + if (!io_cfg) + return -EINVAL; + + for (i = 0; i < prepare->packet->num_io_configs; i++) { + rc = cam_fd_mgr_util_get_buf_map_requirement( + io_cfg[i].direction, io_cfg[i].resource_type, + &need_io_map, &need_cpu_map); + + if (rc || !need_cpu_map) + continue; + + for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) + cam_mem_put_cpu_buf(io_cfg[i].mem_handle[plane]); + } + return 0; +} + +static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl, + struct cam_hw_prepare_update_args *prepare, + struct cam_fd_hw_io_buffer *input_buf, + struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size) +{ + int rc = -EINVAL; + uint32_t plane, num_out_buf, num_in_buf; + int i, j; + struct cam_buf_io_cfg *io_cfg; + dma_addr_t io_addr[CAM_PACKET_MAX_PLANES]; + uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES]; + size_t size; + bool need_io_map, need_cpu_map; + + /* Get IO Buf information */ + num_out_buf = 0; + num_in_buf = 0; + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &prepare->packet->payload_flex + prepare->packet->io_configs_offset); + + for (i = 0; i < prepare->packet->num_io_configs; i++) { + CAM_DBG(CAM_FD, + "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]", + i, io_cfg[i].mem_handle[0], io_cfg[i].direction, + io_cfg[i].resource_type, + io_cfg[i].fence, io_cfg[i].format); + + if ((num_in_buf >= io_buf_size) || + (num_out_buf >= io_buf_size)) { + CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d", + num_in_buf, num_out_buf, io_buf_size); + return -EINVAL; + } + + rc = cam_fd_mgr_util_get_buf_map_requirement( + io_cfg[i].direction, io_cfg[i].resource_type, + &need_io_map, &need_cpu_map); + if (rc) { + CAM_WARN(CAM_FD, "Invalid io buff [%d] : %d %d %d", + i, io_cfg[i].direction, + io_cfg[i].resource_type, rc); + continue; + } + + memset(io_addr, 0x0, sizeof(io_addr)); + for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) { + if (!io_cfg[i].mem_handle[plane]) + break; + + io_addr[plane] = 0x0; + cpu_addr[plane] = 0x0; + + if (need_io_map) { + rc = cam_mem_get_io_buf( + io_cfg[i].mem_handle[plane], + iommu_hdl, &io_addr[plane], &size, NULL, + prepare->buf_tracker); + if (rc) { + CAM_ERR(CAM_FD, + "Failed to get io buf %u %u %u %d", + io_cfg[i].direction, + io_cfg[i].resource_type, plane, + rc); + return -ENOMEM; + } + + if (io_cfg[i].offsets[plane] >= size) { + CAM_ERR(CAM_FD, + "Invalid io buf %u %u %u %d %u %zu", + io_cfg[i].direction, + io_cfg[i].resource_type, plane, + i, io_cfg[i].offsets[plane], + size); + return -EINVAL; + } + + io_addr[plane] += io_cfg[i].offsets[plane]; + } + + if (need_cpu_map) { + rc = cam_mem_get_cpu_buf( + io_cfg[i].mem_handle[plane], + &cpu_addr[plane], &size); + if (rc || ((io_addr[plane] & 0xFFFFFFFF) + != io_addr[plane])) { + rc = -ENOSPC; + CAM_ERR(CAM_FD, + "Invalid cpu buf %d %d %d %d", + io_cfg[i].direction, + io_cfg[i].resource_type, plane, + rc); + return rc; + } + if (io_cfg[i].offsets[plane] >= size) { + CAM_ERR(CAM_FD, + "Invalid cpu buf %d %d %d", + io_cfg[i].direction, + io_cfg[i].resource_type, plane); + cam_mem_put_cpu_buf(io_cfg[i].mem_handle[plane]); + rc = -EINVAL; + return rc; + } + cpu_addr[plane] += io_cfg[i].offsets[plane]; + } + + CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK", + io_cfg[i].direction, plane, io_addr[plane], + cpu_addr[plane]); + } + + switch (io_cfg[i].direction) { + case CAM_BUF_INPUT: { + prepare->in_map_entries[num_in_buf].resource_handle = + io_cfg[i].resource_type; + prepare->in_map_entries[num_in_buf].sync_id = + io_cfg[i].fence; + + input_buf[num_in_buf].valid = true; + for (j = 0; j < plane; j++) { + input_buf[num_in_buf].io_addr[j] = io_addr[j]; + input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j]; + } + input_buf[num_in_buf].num_buf = plane; + input_buf[num_in_buf].io_cfg = &io_cfg[i]; + + num_in_buf++; + break; + } + case CAM_BUF_OUTPUT: { + prepare->out_map_entries[num_out_buf].resource_handle = + io_cfg[i].resource_type; + prepare->out_map_entries[num_out_buf].sync_id = + io_cfg[i].fence; + + output_buf[num_out_buf].valid = true; + for (j = 0; j < plane; j++) { + output_buf[num_out_buf].io_addr[j] = io_addr[j]; + output_buf[num_out_buf].cpu_addr[j] = + cpu_addr[j]; + } + output_buf[num_out_buf].num_buf = plane; + output_buf[num_out_buf].io_cfg = &io_cfg[i]; + + num_out_buf++; + break; + } + default: + CAM_ERR(CAM_FD, "Unsupported io direction %d", + io_cfg[i].direction); + rc = -EINVAL; + break; + } + } + + prepare->num_in_map_entries = num_in_buf; + prepare->num_out_map_entries = num_out_buf; + return rc; +} + +static int cam_fd_mgr_util_prepare_hw_update_entries( + struct cam_fd_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare, + struct cam_fd_hw_cmd_prestart_args *prestart_args, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int i, rc; + struct cam_hw_update_entry *hw_entry; + uint32_t num_ent; + struct cam_fd_hw_mgr_ctx *hw_ctx = + (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map; + struct cam_fd_device *hw_device; + uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0; + uint32_t *kmd_buf_addr; + struct cam_cmd_buf_desc *cmd_desc = NULL; + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes); + kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes; + + prestart_args->cmd_buf_addr = kmd_buf_addr; + prestart_args->size = kmd_buf_max_size; + prestart_args->pre_config_buf_size = 0; + prestart_args->post_config_buf_size = 0; + + if (hw_device->hw_intf->hw_ops.process_cmd) { + rc = hw_device->hw_intf->hw_ops.process_cmd( + hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART, + prestart_args, + sizeof(struct cam_fd_hw_cmd_prestart_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc); + return rc; + } + } + + kmd_buf_used_bytes += prestart_args->pre_config_buf_size; + kmd_buf_used_bytes += prestart_args->post_config_buf_size; + + /* HW layer is expected to add commands */ + if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) { + CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)", + kmd_buf_used_bytes, kmd_buf_max_size); + return -ENOMEM; + } + + hw_entry = prepare->hw_update_entries; + num_ent = 0; + + if (prestart_args->pre_config_buf_size) { + if ((num_ent + 1) >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + hw_entry[num_ent].handle = kmd_buf_info->handle; + hw_entry[num_ent].len = prestart_args->pre_config_buf_size; + hw_entry[num_ent].offset = kmd_buf_info->offset; + + kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size; + kmd_buf_info->offset += prestart_args->pre_config_buf_size; + num_ent++; + } + + /* + * set the cmd_desc to point the first command descriptor in the + * packet and update hw entries with CDM command buffers + */ + cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *) + &prepare->packet->payload_flex + prepare->packet->cmd_buf_offset); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM) + continue; + + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].offset = cmd_desc[i].offset; + num_ent++; + } + + if (prestart_args->post_config_buf_size) { + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + hw_entry[num_ent].handle = kmd_buf_info->handle; + hw_entry[num_ent].len = prestart_args->post_config_buf_size; + hw_entry[num_ent].offset = kmd_buf_info->offset; + + kmd_buf_info->used_bytes += prestart_args->post_config_buf_size; + kmd_buf_info->offset += prestart_args->post_config_buf_size; + + num_ent++; + } + + prepare->num_hw_update_entries = num_ent; + + CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)", + prepare->num_hw_update_entries, prepare->num_in_map_entries, + prepare->num_out_map_entries); + + return rc; +} + +static int cam_fd_mgr_util_submit_frame(void *priv, void *data) +{ + struct cam_fd_device *hw_device; + struct cam_fd_hw_mgr *hw_mgr; + struct cam_fd_mgr_frame_request *frame_req; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_hw_cmd_start_args start_args; + int rc; + + if (!priv) { + CAM_ERR(CAM_FD, "Invalid data"); + return -EINVAL; + } + + hw_mgr = (struct cam_fd_hw_mgr *)priv; + mutex_lock(&hw_mgr->frame_req_mutex); + + /* Check if we have any frames pending in high priority list */ + if (!list_empty(&hw_mgr->frame_pending_list_high)) { + CAM_DBG(CAM_FD, "Pending frames in high priority list"); + frame_req = list_first_entry(&hw_mgr->frame_pending_list_high, + struct cam_fd_mgr_frame_request, list); + } else if (!list_empty(&hw_mgr->frame_pending_list_normal)) { + CAM_DBG(CAM_FD, "Pending frames in normal priority list"); + frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal, + struct cam_fd_mgr_frame_request, list); + } else { + mutex_unlock(&hw_mgr->frame_req_mutex); + CAM_DBG(CAM_FD, "No pending frames"); + return 0; + } + + CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id); + hw_ctx = frame_req->hw_ctx; + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + mutex_unlock(&hw_mgr->frame_req_mutex); + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + mutex_lock(&hw_device->lock); + if (hw_device->ready_to_process == false) { + if (hw_mgr->num_pending_frames > 6) { + CAM_WARN(CAM_FD, + "Device busy for longer time with cur_hw_ctx=%pK, ReqId=%lld", + hw_device->cur_hw_ctx, hw_device->req_id); + } + mutex_unlock(&hw_device->lock); + mutex_unlock(&hw_mgr->frame_req_mutex); + return -EBUSY; + } + + trace_cam_submit_to_hw("FD", frame_req->request_id); + + list_del_init(&frame_req->list); + hw_mgr->num_pending_frames--; + list_add_tail(&frame_req->list, &hw_mgr->frame_processing_list); + + if (hw_device->hw_intf->hw_ops.start) { + start_args.hw_ctx = hw_ctx; + start_args.ctx_hw_private = hw_ctx->ctx_hw_private; + start_args.hw_req_private = &frame_req->hw_req_private; + start_args.hw_update_entries = frame_req->hw_update_entries; + start_args.num_hw_update_entries = + frame_req->num_hw_update_entries; + + rc = hw_device->hw_intf->hw_ops.start( + hw_device->hw_intf->hw_priv, &start_args, + sizeof(start_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW Start %d", rc); + list_del_init(&frame_req->list); + mutex_unlock(&hw_device->lock); + mutex_unlock(&hw_mgr->frame_req_mutex); + goto put_req_into_free_list; + } + } else { + CAM_ERR(CAM_FD, "Invalid hw_ops.start"); + list_del_init(&frame_req->list); + mutex_unlock(&hw_device->lock); + mutex_unlock(&hw_mgr->frame_req_mutex); + rc = -EPERM; + goto put_req_into_free_list; + } + + hw_device->ready_to_process = false; + hw_device->cur_hw_ctx = hw_ctx; + hw_device->req_id = frame_req->request_id; + mutex_unlock(&hw_device->lock); + mutex_unlock(&hw_mgr->frame_req_mutex); + + return rc; +put_req_into_free_list: + cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req, true); + + return rc; +} + +static int cam_fd_mgr_util_schedule_frame_worker_task( + struct cam_fd_hw_mgr *hw_mgr) +{ + int32_t rc = 0; + struct crm_workq_task *task; + struct cam_fd_mgr_work_data *work_data; + + task = cam_req_mgr_workq_get_task(hw_mgr->work); + if (!task) { + CAM_ERR(CAM_FD, "no empty task available"); + return -ENOMEM; + } + + work_data = (struct cam_fd_mgr_work_data *)task->payload; + work_data->type = CAM_FD_WORK_FRAME; + + task->process_cb = cam_fd_mgr_util_submit_frame; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0); + + return rc; +} + +static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data) +{ + struct cam_fd_device *hw_device = NULL; + struct cam_fd_hw_mgr *hw_mgr; + struct cam_fd_mgr_work_data *work_data; + struct cam_fd_mgr_frame_request *frame_req = NULL; + enum cam_fd_hw_irq_type irq_type; + uint32_t evt_id = CAM_CTX_EVT_ID_ERROR; + int rc; + + if (!data || !priv) { + CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv); + return -EINVAL; + } + + hw_mgr = (struct cam_fd_hw_mgr *)priv; + work_data = (struct cam_fd_mgr_work_data *)data; + irq_type = work_data->irq_type; + + CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type); + + if (irq_type == CAM_FD_IRQ_HALT_DONE) { + /* HALT would be followed by a RESET, ignore this */ + CAM_DBG(CAM_FD, "HALT IRQ callback"); + return 0; + } + + /* Get the frame from processing list */ + rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list, + &frame_req); + if (rc || !frame_req) { + /* + * This can happen if reset is triggered while no frames + * were pending, so not an error, just continue to check if + * there are any pending frames and submit + */ + CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc); + goto submit_next_frame; + } + + if (!frame_req->hw_ctx) { + CAM_ERR(CAM_FD, "Invalid Frame request %lld", + frame_req->request_id); + goto put_req_in_free_list; + } + + rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + goto put_req_in_free_list; + } + + /* Read frame results first */ + if (irq_type == CAM_FD_IRQ_FRAME_DONE) { + struct cam_fd_hw_frame_done_args frame_done_args; + + CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]", + frame_req->request_id); + + frame_done_args.hw_ctx = frame_req->hw_ctx; + frame_done_args.ctx_hw_private = + frame_req->hw_ctx->ctx_hw_private; + frame_done_args.request_id = frame_req->request_id; + frame_done_args.hw_req_private = &frame_req->hw_req_private; + + if (hw_device->hw_intf->hw_ops.process_cmd) { + rc = hw_device->hw_intf->hw_ops.process_cmd( + hw_device->hw_intf->hw_priv, + CAM_FD_HW_CMD_FRAME_DONE, + &frame_done_args, sizeof(frame_done_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", + rc); + goto notify_context; + } + } + + evt_id = CAM_CTX_EVT_ID_SUCCESS; + } + + trace_cam_irq_handled("FD", irq_type); + +notify_context: + /* Do a callback to inform frame done or stop done */ + if (frame_req->hw_ctx->event_cb) { + struct cam_hw_done_event_data buf_data; + + CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]", + frame_req->request_id); + + buf_data.num_handles = frame_req->num_hw_update_entries; + buf_data.request_id = frame_req->request_id; + buf_data.evt_param = (irq_type == CAM_FD_IRQ_FRAME_DONE) ? + CAM_SYNC_FD_EVENT_IRQ_FRAME_DONE : + CAM_SYNC_FD_EVENT_IRQ_RESET_DONE; + + rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv, + evt_id, &buf_data); + if (rc) + CAM_ERR(CAM_FD, "Error in event cb handling %d", rc); + } + + /* + * Now we can set hw device is free to process further frames. + * Note - Do not change state to IDLE until we read the frame results, + * Otherwise, other thread may schedule frame processing before + * reading current frame's results. Also, we need to set to IDLE state + * in case some error happens after getting this irq callback + */ + mutex_lock(&hw_device->lock); + hw_device->ready_to_process = true; + hw_device->req_id = -1; + hw_device->cur_hw_ctx = NULL; + CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process); + mutex_unlock(&hw_device->lock); + +put_req_in_free_list: + rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &frame_req, true); + if (rc) { + CAM_ERR(CAM_FD, "Failed in putting frame req in free list"); + /* continue */ + } + +submit_next_frame: + /* Check if there are any frames pending for processing and submit */ + rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL); + if (rc) { + CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc); + return rc; + } + + return rc; +} + +static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type) +{ + struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr; + int rc = 0; + unsigned long flags; + struct crm_workq_task *task; + struct cam_fd_mgr_work_data *work_data; + + spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags); + task = cam_req_mgr_workq_get_task(hw_mgr->work); + if (!task) { + CAM_ERR(CAM_FD, "no empty task available"); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags); + return -ENOMEM; + } + + work_data = (struct cam_fd_mgr_work_data *)task->payload; + work_data->type = CAM_FD_WORK_IRQ; + work_data->irq_type = irq_type; + + task->process_cb = cam_fd_mgr_workq_irq_cb; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0); + if (rc) + CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc); + + spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags); + + return rc; +} + +static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args) +{ + int rc = 0; + struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query = hw_get_caps_args; + struct cam_fd_query_cap_cmd query_fd; + void __user *caps_handle = + u64_to_user_ptr(query->caps_handle); + + if (sizeof(struct cam_fd_query_cap_cmd) != query->size) { + CAM_ERR(CAM_FD, + "Input query cap size:%u does not match expected query cap size: %u", + query->size, sizeof(struct cam_fd_query_cap_cmd)); + return -EFAULT; + } + + if (copy_from_user(&query_fd, caps_handle, + sizeof(struct cam_fd_query_cap_cmd))) { + CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc); + return -EFAULT; + } + + query_fd = hw_mgr->fd_caps; + + CAM_DBG(CAM_FD, + "IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d", + query_fd.device_iommu.secure, query_fd.device_iommu.non_secure, + query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure, + query_fd.hw_caps.core_version.major, + query_fd.hw_caps.core_version.minor, + query_fd.hw_caps.wrapper_version.major, + query_fd.hw_caps.wrapper_version.minor); + + if (copy_to_user(caps_handle, &query_fd, + sizeof(struct cam_fd_query_cap_cmd))) + rc = -EFAULT; + + return rc; +} + +static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args) +{ + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = + (struct cam_hw_acquire_args *)hw_acquire_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_acquire_dev_info fd_acquire_args; + int rc; + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args); + return -EINVAL; + } + + if (copy_from_user(&fd_acquire_args, + (void __user *)acquire_args->acquire_info, + sizeof(struct cam_fd_acquire_dev_info))) { + CAM_ERR(CAM_FD, "Copy from user failed"); + return -EFAULT; + } + + CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d", + fd_acquire_args.mode, fd_acquire_args.get_raw_results, + fd_acquire_args.priority); + + /* get a free fd hw mgr ctx */ + rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx); + if (rc || !hw_ctx) { + CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK", + rc, hw_ctx); + return -EINVAL; + } + + if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) { + CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)", + fd_acquire_args.get_raw_results, + hw_mgr->raw_results_available); + goto put_ctx; + } + + if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) { + CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)", + fd_acquire_args.mode, hw_mgr->supported_modes); + rc = -EPERM; + goto put_ctx; + } + + rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args); + if (rc) { + CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc); + goto put_ctx; + } + + hw_ctx->ctx_in_use = true; + hw_ctx->hw_mgr = hw_mgr; + hw_ctx->get_raw_results = fd_acquire_args.get_raw_results; + hw_ctx->mode = fd_acquire_args.mode; + + /* Save incoming cam core info into hw ctx*/ + hw_ctx->cb_priv = acquire_args->context_data; + hw_ctx->event_cb = acquire_args->event_cb; + + /* Update out args */ + acquire_args->ctxt_to_hw_map = hw_ctx; + + cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx); + + return 0; +put_ctx: + list_del_init(&hw_ctx->list); + cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx); + return rc; +} + +static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args) +{ + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_release_args *release_args = hw_release_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + int rc; + + if (!hw_mgr_priv || !hw_release_args) { + CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK", + hw_mgr_priv, hw_release_args); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map; + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + + rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx); + if (rc) + CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc); + + hw_ctx->ctx_in_use = false; + list_del_init(&hw_ctx->list); + cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx); + + return 0; +} + +static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args) +{ + int rc = 0; + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_start_args *hw_mgr_start_args = + (struct cam_hw_start_args *)mgr_start_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_device *hw_device; + struct cam_fd_hw_init_args hw_init_args; + struct cam_hw_info *fd_hw; + struct cam_fd_core *fd_core; + + if (!hw_mgr_priv || !hw_mgr_start_args) { + CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", + hw_mgr_priv, hw_mgr_start_args); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map; + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + + CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + fd_hw = (struct cam_hw_info *)hw_device->hw_intf->hw_priv; + fd_core = (struct cam_fd_core *)fd_hw->core_info; + + if (hw_device->hw_intf->hw_ops.init) { + hw_init_args.hw_ctx = hw_ctx; + hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private; + hw_init_args.is_hw_reset = false; + if (fd_core->hw_static_info->enable_errata_wa.skip_reset) + hw_init_args.reset_required = false; + else + hw_init_args.reset_required = true; + + rc = hw_device->hw_intf->hw_ops.init( + hw_device->hw_intf->hw_priv, &hw_init_args, + sizeof(hw_init_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW Init %d", rc); + return rc; + } + + if (hw_init_args.is_hw_reset) { + mutex_lock(&hw_device->lock); + hw_device->ready_to_process = true; + hw_device->req_id = -1; + hw_device->cur_hw_ctx = NULL; + mutex_unlock(&hw_device->lock); + } + } else { + CAM_ERR(CAM_FD, "Invalid init function"); + return -EINVAL; + } + + return rc; +} + +static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv, + struct cam_hw_flush_args *flush_args) +{ + int rc = 0; + struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req; + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_fd_device *hw_device; + struct cam_fd_hw_stop_args hw_stop_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + uint32_t i = 0; + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map; + + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + mutex_lock(&hw_mgr->frame_req_mutex); + for (i = 0; i < flush_args->num_req_active; i++) { + flush_req = (struct cam_fd_mgr_frame_request *) + flush_args->flush_req_active[i]; + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_pending_list_high, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + if (frame_req->request_id != flush_req->request_id) + continue; + + hw_mgr->num_pending_frames--; + list_del_init(&frame_req->list); + break; + } + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_pending_list_normal, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + if (frame_req->request_id != flush_req->request_id) + continue; + + hw_mgr->num_pending_frames--; + list_del_init(&frame_req->list); + break; + } + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_processing_list, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + if (frame_req->request_id != flush_req->request_id) + continue; + + list_del_init(&frame_req->list); + + mutex_lock(&hw_device->lock); + if ((hw_device->ready_to_process == true) || + (hw_device->cur_hw_ctx != hw_ctx)) + goto unlock_dev_flush_req; + + if (hw_device->hw_intf->hw_ops.stop) { + hw_stop_args.hw_ctx = hw_ctx; + rc = hw_device->hw_intf->hw_ops.stop( + hw_device->hw_intf->hw_priv, + &hw_stop_args, + sizeof(hw_stop_args)); + if (rc) { + CAM_ERR(CAM_FD, + "Failed in HW Stop %d", rc); + goto unlock_dev_flush_req; + } + hw_device->ready_to_process = true; + } + +unlock_dev_flush_req: + mutex_unlock(&hw_device->lock); + break; + } + } + mutex_unlock(&hw_mgr->frame_req_mutex); + + for (i = 0; i < flush_args->num_req_pending; i++) { + flush_req = (struct cam_fd_mgr_frame_request *) + flush_args->flush_req_pending[i]; + cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &flush_req, true); + } + + return rc; +} + +static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv, + struct cam_hw_flush_args *flush_args) +{ + int rc = 0; + struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req; + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_fd_device *hw_device; + struct cam_fd_hw_stop_args hw_stop_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + uint32_t i = 0; + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map; + + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + mutex_lock(&hw_mgr->frame_req_mutex); + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_pending_list_high, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + hw_mgr->num_pending_frames--; + list_del_init(&frame_req->list); + } + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_pending_list_normal, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + hw_mgr->num_pending_frames--; + list_del_init(&frame_req->list); + } + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_processing_list, list) { + if (frame_req->hw_ctx != hw_ctx) + continue; + + list_del_init(&frame_req->list); + mutex_lock(&hw_device->lock); + if ((hw_device->ready_to_process == true) || + (hw_device->cur_hw_ctx != hw_ctx)) + goto unlock_dev_flush_ctx; + + if (hw_device->hw_intf->hw_ops.stop) { + hw_stop_args.hw_ctx = hw_ctx; + rc = hw_device->hw_intf->hw_ops.stop( + hw_device->hw_intf->hw_priv, &hw_stop_args, + sizeof(hw_stop_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc); + goto unlock_dev_flush_ctx; + } + hw_device->ready_to_process = true; + } + +unlock_dev_flush_ctx: + mutex_unlock(&hw_device->lock); + } + mutex_unlock(&hw_mgr->frame_req_mutex); + + for (i = 0; i < flush_args->num_req_pending; i++) { + flush_req = (struct cam_fd_mgr_frame_request *) + flush_args->flush_req_pending[i]; + CAM_DBG(CAM_FD, "flush pending req %llu", + flush_req->request_id); + cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &flush_req, true); + } + + for (i = 0; i < flush_args->num_req_active; i++) { + flush_req = (struct cam_fd_mgr_frame_request *) + flush_args->flush_req_active[i]; + CAM_DBG(CAM_FD, "flush active req %llu", flush_req->request_id); + cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &flush_req, true); + } + + return rc; +} + +static int cam_fd_mgr_hw_flush(void *hw_mgr_priv, + void *hw_flush_args) +{ + int rc = 0; + struct cam_hw_flush_args *flush_args = + (struct cam_hw_flush_args *)hw_flush_args; + + if (!hw_mgr_priv || !hw_flush_args) { + CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", + hw_mgr_priv, hw_flush_args); + return -EINVAL; + } + + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_REQ: + rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args); + break; + case CAM_FLUSH_TYPE_ALL: + rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args); + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_FD, "Invalid flush type %d", + flush_args->flush_type); + break; + } + return rc; +} + +static int cam_fd_mgr_hw_dump( + void *hw_mgr_priv, + void *hw_dump_args) +{ + int rc; + uint8_t *dst; + ktime_t cur_time; + size_t remain_len; + uint32_t min_len; + uint64_t diff; + uint64_t *addr, *start; + struct timespec64 cur_ts; + struct timespec64 req_ts; + struct cam_fd_hw_mgr *hw_mgr; + struct cam_hw_dump_args *dump_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_device *hw_device; + struct cam_fd_hw_dump_args fd_dump_args; + struct cam_fd_hw_dump_header *hdr; + struct cam_fd_mgr_frame_request *frame_req, *req_temp; + + hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + dump_args = (struct cam_hw_dump_args *)hw_dump_args; + if (!hw_mgr || !dump_args) { + CAM_ERR(CAM_FD, "Invalid args %pK %pK", + hw_mgr, dump_args); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)dump_args->ctxt_to_hw_map; + + if (!hw_ctx) { + CAM_ERR(CAM_FD, "Invalid ctx"); + return -EINVAL; + } + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + list_for_each_entry_safe(frame_req, req_temp, + &hw_mgr->frame_processing_list, list) { + if (frame_req->request_id == dump_args->request_id) + goto hw_dump; + } + + CAM_DBG(CAM_FD, "fd dump cannot find req %llu", + dump_args->request_id); + return rc; +hw_dump: + cur_time = ktime_get(); + diff = ktime_us_delta(frame_req->submit_timestamp, cur_time); + cur_ts = ktime_to_timespec64(cur_time); + req_ts = ktime_to_timespec64(frame_req->submit_timestamp); + if (diff < CAM_FD_RESPONSE_TIME_THRESHOLD) { + CAM_INFO(CAM_FD, + "No Error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + return 0; + } + CAM_INFO(CAM_FD, "Error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &fd_dump_args.cpu_addr, &fd_dump_args.buf_len); + if (rc) { + CAM_ERR(CAM_FD, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + if (fd_dump_args.buf_len <= dump_args->offset) { + CAM_WARN(CAM_FD, "dump offset overshoot len %zu offset %zu", + fd_dump_args.buf_len, dump_args->offset); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + remain_len = fd_dump_args.buf_len - dump_args->offset; + min_len = sizeof(struct cam_fd_hw_dump_header) + + (CAM_FD_HW_DUMP_NUM_WORDS * sizeof(uint64_t)); + + if (remain_len < min_len) { + CAM_WARN(CAM_FD, "dump buffer exhaust remain %zu min %u", + remain_len, min_len); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + + dst = (uint8_t *)fd_dump_args.cpu_addr + dump_args->offset; + hdr = (struct cam_fd_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_FD_HW_DUMP_TAG_MAX_LEN, + "FD_REQ:"); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_fd_hw_dump_header)); + start = addr; + *addr++ = frame_req->request_id; + *addr++ = req_ts.tv_sec; + *addr++ = req_ts.tv_nsec/NSEC_PER_USEC; + *addr++ = cur_ts.tv_sec; + *addr++ = cur_ts.tv_nsec/NSEC_PER_USEC; + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_fd_hw_dump_header); + + fd_dump_args.request_id = dump_args->request_id; + fd_dump_args.offset = dump_args->offset; + if (hw_device->hw_intf->hw_ops.process_cmd) { + rc = hw_device->hw_intf->hw_ops.process_cmd( + hw_device->hw_intf->hw_priv, + CAM_FD_HW_CMD_HW_DUMP, + &fd_dump_args, + sizeof(struct + cam_fd_hw_dump_args)); + if (rc) { + CAM_ERR(CAM_FD, "Hw Dump cmd fails req %lld rc %d", + frame_req->request_id, rc); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; + } + } + CAM_DBG(CAM_FD, "Offset before %zu after %zu", + dump_args->offset, fd_dump_args.offset); + dump_args->offset = fd_dump_args.offset; + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args) +{ + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_stop_args *hw_mgr_stop_args = + (struct cam_hw_stop_args *)mgr_stop_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_device *hw_device; + struct cam_fd_hw_deinit_args hw_deinit_args; + int rc = 0; + + if (!hw_mgr_priv || !hw_mgr_stop_args) { + CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", + hw_mgr_priv, hw_mgr_stop_args); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map; + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index, + hw_ctx->device_index); + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + return rc; + } + + CAM_DBG(CAM_FD, "FD Device ready_to_process = %d", + hw_device->ready_to_process); + + if (hw_device->hw_intf->hw_ops.deinit) { + hw_deinit_args.hw_ctx = hw_ctx; + hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private; + rc = hw_device->hw_intf->hw_ops.deinit( + hw_device->hw_intf->hw_priv, &hw_deinit_args, + sizeof(hw_deinit_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc); + return rc; + } + } + + return rc; +} + +static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv, + void *hw_prepare_update_args) +{ + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_prepare_update_args *prepare = + (struct cam_hw_prepare_update_args *) hw_prepare_update_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_device *hw_device; + struct cam_kmd_buf_info kmd_buf; + int rc; + struct cam_fd_hw_cmd_prestart_args prestart_args; + struct cam_fd_mgr_frame_request *frame_req; + + if (!hw_mgr_priv || !hw_prepare_update_args) { + CAM_ERR(CAM_FD, "Invalid args %pK %pK", + hw_mgr_priv, hw_prepare_update_args); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map; + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + + rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device); + if (rc) { + CAM_ERR(CAM_FD, "Error in getting device %d", rc); + goto error; + } + + rc = cam_fd_mgr_util_packet_validate(prepare->packet, + prepare->remain_len); + if (rc) { + CAM_ERR(CAM_FD, "Error in packet validation %d", rc); + goto error; + } + + rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf); + if (rc) { + CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc); + goto error; + } + + CAM_DBG(CAM_FD, + "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d", + kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset, + kmd_buf.size, kmd_buf.used_bytes); + + /* We do not expect any patching, but just do it anyway */ + rc = cam_packet_util_process_patches(prepare->packet, prepare->buf_tracker, + hw_mgr->device_iommu.non_secure, -1, false); + if (rc) { + CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc); + return rc; + } + + memset(&prestart_args, 0x0, sizeof(prestart_args)); + prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private; + prestart_args.hw_ctx = hw_ctx; + prestart_args.request_id = prepare->packet->header.request_id; + + rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet, + &prestart_args); + if (rc) { + CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc); + goto error; + } + + rc = cam_fd_mgr_util_prepare_io_buf_info( + hw_mgr->device_iommu.non_secure, prepare, + prestart_args.input_buf, prestart_args.output_buf, + CAM_FD_MAX_IO_BUFFERS); + + if (rc) { + CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc); + + if (rc == -ENOSPC) + goto error; + goto put_cpu_buf; + } + + rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare, + &prestart_args, &kmd_buf); + if (rc) { + CAM_ERR(CAM_FD, "Error in hw update entries %d", rc); + goto put_cpu_buf; + } + + /* get a free frame req from free list */ + rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list, + &frame_req); + if (rc || !frame_req) { + CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK", + rc, hw_ctx); + rc = -ENOMEM; + goto put_cpu_buf; + } + + /* Setup frame request info and queue to pending list */ + frame_req->hw_ctx = hw_ctx; + frame_req->request_id = prepare->packet->header.request_id; + /* This has to be passed to HW while calling hw_ops->start */ + frame_req->hw_req_private = prestart_args.hw_req_private; + + /* + * Save the current frame_req into priv, + * this will come as priv while hw_config + */ + prepare->priv = frame_req; + + cam_fd_mgr_put_cpu_buf(prepare); + CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id); + + return 0; + +put_cpu_buf: + cam_fd_mgr_put_cpu_buf(prepare); +error: + return rc; +} + +static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args) +{ + struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv; + struct cam_hw_config_args *config = + (struct cam_hw_config_args *) hw_config_args; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_mgr_frame_request *frame_req; + int rc; + int i; + uint64_t req_id; + + if (!hw_mgr || !config) { + CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config); + return -EINVAL; + } + + if (!config->num_hw_update_entries) { + CAM_ERR(CAM_FD, "No hw update enteries are available"); + return -EINVAL; + } + + hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map; + if (!hw_ctx || !hw_ctx->ctx_in_use) { + CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx); + return -EPERM; + } + + frame_req = config->priv; + req_id = frame_req->request_id; + + trace_cam_apply_req("FD", hw_ctx->ctx_index, frame_req->request_id, 0); + CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id); + + frame_req->num_hw_update_entries = config->num_hw_update_entries; + for (i = 0; i < config->num_hw_update_entries; i++) { + frame_req->hw_update_entries[i] = config->hw_update_entries[i]; + CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK", + frame_req->hw_update_entries[i].handle, + frame_req->hw_update_entries[i].offset, + frame_req->hw_update_entries[i].len, + frame_req->hw_update_entries[i].flags, + frame_req->hw_update_entries[i].addr); + } + + if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) { + CAM_DBG(CAM_FD, "Insert frame into prio0 queue"); + rc = cam_fd_mgr_util_put_frame_req( + &hw_mgr->frame_pending_list_high, &frame_req, false); + } else { + CAM_DBG(CAM_FD, "Insert frame into prio1 queue"); + rc = cam_fd_mgr_util_put_frame_req( + &hw_mgr->frame_pending_list_normal, &frame_req, false); + } + if (rc) { + CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc); + goto put_free_list; + } + + mutex_lock(&g_fd_hw_mgr.frame_req_mutex); + hw_mgr->num_pending_frames++; + CAM_DBG(CAM_FD, + "Adding ctx[%pK] Req[%llu] : Total number of pending frames %d", + hw_ctx, req_id, hw_mgr->num_pending_frames); + mutex_unlock(&g_fd_hw_mgr.frame_req_mutex); + + rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr); + if (rc) { + CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc); + goto remove_and_put_free_list; + } + + return 0; + +remove_and_put_free_list: + + if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) { + CAM_DBG(CAM_FD, "Removing frame into prio0 queue"); + cam_fd_mgr_util_get_frame_req( + &hw_mgr->frame_pending_list_high, &frame_req); + } else { + CAM_DBG(CAM_FD, "Removing frame into prio1 queue"); + cam_fd_mgr_util_get_frame_req( + &hw_mgr->frame_pending_list_normal, &frame_req); + } + + mutex_lock(&g_fd_hw_mgr.frame_req_mutex); + hw_mgr->num_pending_frames--; + mutex_unlock(&g_fd_hw_mgr.frame_req_mutex); +put_free_list: + cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &frame_req, true); + + return rc; +} + +int cam_fd_hw_mgr_deinit(struct device_node *of_node) +{ + CAM_DBG(CAM_FD, "HW Mgr Deinit"); + + cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work); + + cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure); + g_fd_hw_mgr.device_iommu.non_secure = -1; + + mutex_destroy(&g_fd_hw_mgr.ctx_mutex); + mutex_destroy(&g_fd_hw_mgr.frame_req_mutex); + mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex); + + return 0; +} + +static void cam_req_mgr_process_workq_cam_fd_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +int cam_fd_hw_mgr_init(struct device_node *of_node, + struct cam_hw_mgr_intf *hw_mgr_intf) +{ + int count, i, rc = 0; + struct cam_hw_intf *hw_intf = NULL; + struct cam_fd_hw_mgr_ctx *hw_mgr_ctx; + struct cam_fd_device *hw_device; + struct cam_fd_mgr_frame_request *frame_req; + + if (!of_node || !hw_mgr_intf) { + CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK", + of_node, hw_mgr_intf); + return -EINVAL; + } + + memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr)); + memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf)); + + mutex_init(&g_fd_hw_mgr.ctx_mutex); + mutex_init(&g_fd_hw_mgr.frame_req_mutex); + mutex_init(&g_fd_hw_mgr.hw_mgr_mutex); + spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock); + + count = of_property_count_strings(of_node, "compat-hw-name"); + if (!count || (count > CAM_FD_HW_MAX)) { + CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count); + return -EINVAL; + } + g_fd_hw_mgr.num_devices = count; + + g_fd_hw_mgr.raw_results_available = false; + g_fd_hw_mgr.supported_modes = 0; + + for (i = 0; i < count; i++) { + hw_device = &g_fd_hw_mgr.hw_device[i]; + + rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf); + if (rc) { + CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc); + return rc; + } + + mutex_init(&hw_device->lock); + + hw_device->valid = true; + hw_device->hw_intf = hw_intf; + hw_device->ready_to_process = true; + hw_device->req_id = -1; + hw_device->cur_hw_ctx = NULL; + + if (hw_device->hw_intf->hw_ops.process_cmd) { + struct cam_fd_hw_cmd_set_irq_cb irq_cb_args; + + irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb; + irq_cb_args.data = hw_device; + + rc = hw_device->hw_intf->hw_ops.process_cmd( + hw_device->hw_intf->hw_priv, + CAM_FD_HW_CMD_REGISTER_CALLBACK, + &irq_cb_args, sizeof(irq_cb_args)); + if (rc) { + CAM_ERR(CAM_FD, + "Failed in REGISTER_CALLBACK %d", rc); + return rc; + } + } + + if (hw_device->hw_intf->hw_ops.get_hw_caps) { + rc = hw_device->hw_intf->hw_ops.get_hw_caps( + hw_intf->hw_priv, &hw_device->hw_caps, + sizeof(hw_device->hw_caps)); + if (rc) { + CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc); + return rc; + } + + g_fd_hw_mgr.raw_results_available |= + hw_device->hw_caps.raw_results_available; + g_fd_hw_mgr.supported_modes |= + hw_device->hw_caps.supported_modes; + + CAM_DBG(CAM_FD, + "Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]", + hw_device->hw_caps.supported_modes, + hw_device->hw_caps.raw_results_available, + g_fd_hw_mgr.supported_modes, + g_fd_hw_mgr.raw_results_available); + } + } + + INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list); + INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list); + INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list); + INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high); + INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal); + INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list); + + g_fd_hw_mgr.device_iommu.non_secure = -1; + g_fd_hw_mgr.device_iommu.secure = -1; + g_fd_hw_mgr.cdm_iommu.non_secure = -1; + g_fd_hw_mgr.cdm_iommu.secure = -1; + g_fd_hw_mgr.num_pending_frames = 0; + + rc = cam_smmu_get_handle("fd", + &g_fd_hw_mgr.device_iommu.non_secure); + if (rc) { + CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc); + goto destroy_mutex; + } + + rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu); + if (rc) + CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles"); + + CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)", + g_fd_hw_mgr.device_iommu.non_secure, + g_fd_hw_mgr.device_iommu.secure, + g_fd_hw_mgr.cdm_iommu.non_secure, + g_fd_hw_mgr.cdm_iommu.secure); + + /* Init hw mgr contexts and add to free list */ + for (i = 0; i < CAM_CTX_MAX; i++) { + hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i]; + + memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx)); + INIT_LIST_HEAD(&hw_mgr_ctx->list); + + hw_mgr_ctx->ctx_index = i; + hw_mgr_ctx->device_index = -1; + hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr; + + list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list); + } + + /* Init hw mgr frame requests and add to free list */ + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + frame_req = &g_fd_hw_mgr.frame_req[i]; + + memset(frame_req, 0x0, sizeof(*frame_req)); + INIT_LIST_HEAD(&frame_req->list); + + list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list); + } + + rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK, + &g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_cam_fd_worker); + if (rc) { + CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc); + goto detach_smmu; + } + + g_fd_hw_mgr.work_data = CAM_MEM_ZALLOC_ARRAY(CAM_FD_WORKQ_NUM_TASK, + sizeof(struct cam_fd_mgr_work_data), GFP_KERNEL); + + for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++) + g_fd_hw_mgr.work->task.pool[i].payload = + &g_fd_hw_mgr.work_data[i]; + + /* Setup hw cap so that we can just return the info when requested */ + memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps)); + g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu; + g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu; + g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps; + + CAM_DBG(CAM_FD, + "IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]", + g_fd_hw_mgr.fd_caps.device_iommu.secure, + g_fd_hw_mgr.fd_caps.device_iommu.non_secure, + g_fd_hw_mgr.fd_caps.cdm_iommu.secure, + g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure, + g_fd_hw_mgr.fd_caps.hw_caps.core_version.major, + g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor, + g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major, + g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor); + + hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps; + hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire; + hw_mgr_intf->hw_release = cam_fd_mgr_hw_release; + hw_mgr_intf->hw_start = cam_fd_mgr_hw_start; + hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop; + hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update; + hw_mgr_intf->hw_config = cam_fd_mgr_hw_config; + hw_mgr_intf->hw_read = NULL; + hw_mgr_intf->hw_write = NULL; + hw_mgr_intf->hw_close = NULL; + hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush; + hw_mgr_intf->hw_dump = cam_fd_mgr_hw_dump; + + return rc; + +detach_smmu: + cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure); + g_fd_hw_mgr.device_iommu.non_secure = -1; +destroy_mutex: + mutex_destroy(&g_fd_hw_mgr.ctx_mutex); + mutex_destroy(&g_fd_hw_mgr.frame_req_mutex); + mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h new file mode 100644 index 0000000000..d453923b09 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_MGR_H_ +#define _CAM_FD_HW_MGR_H_ + +#include +#include + +#include +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_hw_mgr_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_fd_hw_intf.h" + +#define CAM_FD_HW_MAX 1 +#define CAM_FD_WORKQ_NUM_TASK 10 + +/* + * Response time threshold in ms beyond which a request is not expected to be + * with FD hw + */ +#define CAM_FD_RESPONSE_TIME_THRESHOLD 100000 + +struct cam_fd_hw_mgr; + +/** + * enum cam_fd_mgr_work_type - Type of worker task + * + * @CAM_FD_WORK_FRAME : Work type indicating frame task + * @CAM_FD_WORK_IRQ : Work type indicating irq task + */ +enum cam_fd_mgr_work_type { + CAM_FD_WORK_FRAME, + CAM_FD_WORK_IRQ, +}; + +/** + * struct cam_fd_hw_mgr_ctx : FD HW Mgr context + * + * @list : List pointer used to maintain this context + * in free, used list + * @ctx_index : Index of this context + * @ctx_in_use : Whether this context is in use + * @event_cb : Event callback pointer to notify cam core context + * @cb_priv : Event callback private pointer + * @hw_mgr : Pointer to hw manager + * @get_raw_results : Whether this context needs raw results + * @mode : Mode in which this context runs + * @device_index : HW Device used by this context + * @ctx_hw_private : HW layer's private context pointer for this context + * @priority : Priority of this context + */ +struct cam_fd_hw_mgr_ctx { + struct list_head list; + uint32_t ctx_index; + bool ctx_in_use; + cam_hw_event_cb_func event_cb; + void *cb_priv; + struct cam_fd_hw_mgr *hw_mgr; + bool get_raw_results; + enum cam_fd_hw_mode mode; + int32_t device_index; + void *ctx_hw_private; + uint32_t priority; +}; + +/** + * struct cam_fd_device : FD HW Device + * + * @hw_caps : This FD device's capabilities + * @hw_intf : FD device's interface information + * @ready_to_process : Whether this device is ready to process next frame + * @num_ctxts : Number of context currently running on this device + * @valid : Whether this device is valid + * @lock : Lock used for protectin + * @cur_hw_ctx : current hw context running in the device + * @req_id : current processing req id + */ +struct cam_fd_device { + struct cam_fd_hw_caps hw_caps; + struct cam_hw_intf *hw_intf; + bool ready_to_process; + uint32_t num_ctxts; + bool valid; + struct mutex lock; + struct cam_fd_hw_mgr_ctx *cur_hw_ctx; + int64_t req_id; +}; + +/** + * struct cam_fd_mgr_frame_request : Frame request information maintained + * in HW Mgr layer + * + * @list : List pointer used to maintain this request in + * free, pending, processing request lists + * @request_id : Request ID corresponding to this request + * @hw_ctx : HW context from which this request is coming + * @hw_req_private : HW layer's private information specific to + * this request + * @hw_update_entries : HW update entries corresponding to this request + * which needs to be submitted to HW through CDM + * @num_hw_update_entries : Number of HW update entries + * @submit_timestamp : Time stamp for submit req with hw + */ +struct cam_fd_mgr_frame_request { + struct list_head list; + uint64_t request_id; + struct cam_fd_hw_mgr_ctx *hw_ctx; + struct cam_fd_hw_req_private hw_req_private; + struct cam_hw_update_entry hw_update_entries[CAM_FD_MAX_HW_ENTRIES]; + uint32_t num_hw_update_entries; + ktime_t submit_timestamp; +}; + +/** + * struct cam_fd_mgr_work_data : HW Mgr work data information + * + * @type : Type of work + * @irq_type : IRQ type when this work is queued because of irq callback + */ +struct cam_fd_mgr_work_data { + enum cam_fd_mgr_work_type type; + enum cam_fd_hw_irq_type irq_type; +}; + +/** + * struct cam_fd_hw_mgr : FD HW Mgr information + * + * @free_ctx_list : List of free contexts available for acquire + * @used_ctx_list : List of contexts that are acquired + * @frame_free_list : List of free frame requests available + * @frame_pending_list_high : List of high priority frame requests pending + * for processing + * @frame_pending_list_normal : List of normal priority frame requests pending + * for processing + * @frame_processing_list : List of frame requests currently being + * processed currently. Generally maximum one + * request would be present in this list + * @hw_mgr_mutex : Mutex to protect hw mgr data when accessed + * from multiple threads + * @hw_mgr_slock : Spin lock to protect hw mgr data when accessed + * from multiple threads + * @ctx_mutex : Mutex to protect context list + * @frame_req_mutex : Mutex to protect frame request list + * @device_iommu : Device IOMMU information + * @cdm_iommu : CDM IOMMU information + * @hw_device : Underlying HW device information + * @num_devices : Number of HW devices available + * @raw_results_available : Whether raw results available in this driver + * @supported_modes : Supported modes by this driver + * @ctx_pool : List of context + * @frame_req : List of frame requests + * @work : Worker handle + * @work_data : Worker data + * @fd_caps : FD driver capabilities + * @num_pending_frames : Number of total frames pending for processing + * across contexts + */ +struct cam_fd_hw_mgr { + struct list_head free_ctx_list; + struct list_head used_ctx_list; + struct list_head frame_free_list; + struct list_head frame_pending_list_high; + struct list_head frame_pending_list_normal; + struct list_head frame_processing_list; + struct mutex hw_mgr_mutex; + spinlock_t hw_mgr_slock; + struct mutex ctx_mutex; + struct mutex frame_req_mutex; + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + struct cam_fd_device hw_device[CAM_FD_HW_MAX]; + uint32_t num_devices; + bool raw_results_available; + uint32_t supported_modes; + struct cam_fd_hw_mgr_ctx ctx_pool[CAM_CTX_MAX]; + struct cam_fd_mgr_frame_request frame_req[CAM_CTX_REQ_MAX]; + struct cam_req_mgr_core_workq *work; + struct cam_fd_mgr_work_data *work_data; + struct cam_fd_query_cap_cmd fd_caps; + uint32_t num_pending_frames; +}; + +#endif /* _CAM_FD_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h new file mode 100644 index 0000000000..81ed464e40 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_MGR_INTF_H_ +#define _CAM_FD_HW_MGR_INTF_H_ + +#include + +#include "cam_debug_util.h" +#include "cam_hw_mgr_intf.h" + +int cam_fd_hw_mgr_init(struct device_node *of_node, + struct cam_hw_mgr_intf *hw_mgr_intf); +int cam_fd_hw_mgr_deinit(struct device_node *of_node); + +#endif /* _CAM_FD_HW_MGR_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c new file mode 100644 index 0000000000..ffb85a37a5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c @@ -0,0 +1,1281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_fd_hw_core.h" +#include "cam_fd_hw_soc.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +#define CAM_FD_REG_VAL_PAIR_SIZE 256 + +static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer, + uint32_t index, uint32_t reg_offset, uint32_t reg_value) +{ + buffer[index++] = reg_offset; + buffer[index++] = reg_value; + + CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]", + reg_offset, reg_value); + + return index; +} + +static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata, + enum cam_cdm_cb_status status, void *cookie_arg) +{ + uint64_t cookie = 0; + + if (status != CAM_CDM_CB_STATUS_PAGEFAULT) + cookie = *(uint64_t *) cookie_arg; + + trace_cam_cdm_cb("FD", status); + CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu", + handle, userdata, status, cookie); +} + +static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw) +{ + struct cam_hw_soc_info *soc_info = &fd_hw->soc_info; + struct cam_fd_hw_static_info *hw_static_info = + ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info; + + if (hw_static_info->enable_errata_wa.single_irq_only == false) { + /* Enable IRQs here */ + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_mask, + hw_static_info->irq_mask); + } + + /* QoS settings */ + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.vbif_req_priority, + hw_static_info->qos_priority); + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.vbif_priority_level, + hw_static_info->qos_priority_level); +} + +int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw, + struct cam_fd_hw_caps *hw_caps) +{ + struct cam_hw_soc_info *soc_info = &fd_hw->soc_info; + struct cam_fd_hw_static_info *hw_static_info = + ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info; + uint32_t reg_value; + + if (!hw_static_info) { + CAM_ERR(CAM_FD, "Invalid hw info data"); + return -EINVAL; + } + + reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE, + hw_static_info->core_regs.version); + hw_caps->core_version.major = + CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8); + hw_caps->core_version.minor = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4); + hw_caps->core_version.incr = + CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0); + + reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.wrapper_version); + hw_caps->wrapper_version.major = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c); + hw_caps->wrapper_version.minor = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->wrapper_version.incr = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + hw_caps->raw_results_available = + hw_static_info->results.raw_results_available; + hw_caps->supported_modes = hw_static_info->supported_modes; + + CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d", + hw_caps->core_version.major, hw_caps->core_version.minor, + hw_caps->core_version.incr, hw_caps->wrapper_version.major, + hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr, + hw_caps->raw_results_available); + + return 0; +} + +static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw) +{ + struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info; + struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info; + struct cam_hw_soc_info *soc_info = &fd_hw->soc_info; + long time_left; + + /* Before triggering reset to HW, clear the reset complete */ + reinit_completion(&fd_core->reset_complete); + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE, + hw_static_info->core_regs.control, 0x1); + + if (hw_static_info->enable_errata_wa.single_irq_only) { + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_mask, + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)); + } + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.sw_reset, 0x1); + + time_left = cam_common_wait_for_completion_timeout( + &fd_core->reset_complete, + msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT)); + if (time_left <= 0) + CAM_WARN(CAM_FD, "HW reset timeout time_left=%ld", time_left); + + CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete"); + + return 0; +} + + +static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw) +{ + struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info; + struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info; + struct cam_hw_soc_info *soc_info = &fd_hw->soc_info; + long time_left; + + /* Before triggering halt to HW, clear halt complete */ + reinit_completion(&fd_core->halt_complete); + + if (hw_static_info->enable_errata_wa.single_irq_only) { + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_mask, + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)); + } + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.hw_stop, 0x1); + + time_left = cam_common_wait_for_completion_timeout( + &fd_core->halt_complete, + msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT)); + if (time_left <= 0) + CAM_WARN(CAM_FD, "HW halt timeout time_left=%ld", time_left); + + CAM_DBG(CAM_FD, "FD Wrapper Halt complete"); + + return 0; +} + +static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw, + struct cam_fd_hw_cmd_prestart_args *prestart_args) +{ + struct cam_hw_soc_info *soc_info = &fd_hw->soc_info; + struct cam_fd_hw_static_info *hw_static_info = + ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info; + struct cam_fd_ctx_hw_private *ctx_hw_private = + prestart_args->ctx_hw_private; + uint32_t size, size_required = 0; + uint32_t mem_base; + uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr; + uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE]; + uint32_t num_cmds = 0; + uint32_t num_regval_pairs = 0; + int i; + struct cam_fd_hw_io_buffer *io_buf; + struct cam_fd_hw_req_private *req_private; + uint32_t available_size = prestart_args->size; + bool work_buffer_configured = false; + + if (!ctx_hw_private || !cmd_buf_addr) { + CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK", + ctx_hw_private, cmd_buf_addr); + return -EINVAL; + } + + if (prestart_args->get_raw_results && + !hw_static_info->results.raw_results_available) { + CAM_ERR(CAM_FD, "Raw results not supported %d %d", + prestart_args->get_raw_results, + hw_static_info->results.raw_results_available); + return -EINVAL; + } + + req_private = &prestart_args->hw_req_private; + req_private->ctx_hw_private = prestart_args->ctx_hw_private; + req_private->request_id = prestart_args->request_id; + req_private->get_raw_results = prestart_args->get_raw_results; + req_private->fd_results = NULL; + req_private->raw_results = NULL; + + /* Start preparing CDM register values that KMD has to insert */ + num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds, + hw_static_info->core_regs.control, 0x1); + num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds, + hw_static_info->core_regs.control, 0x0); + + for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) { + io_buf = &prestart_args->input_buf[i]; + + if (io_buf->valid == false) + break; + + if (io_buf->io_cfg->direction != CAM_BUF_INPUT) { + CAM_ERR(CAM_FD, "Incorrect direction %d %d", + io_buf->io_cfg->direction, CAM_BUF_INPUT); + return -EINVAL; + } + + switch (io_buf->io_cfg->resource_type) { + case CAM_FD_INPUT_PORT_ID_IMAGE: { + if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) { + CAM_ERR(CAM_FD, + "Invalid reg_val pair size %d, %d", + num_cmds, CAM_FD_REG_VAL_PAIR_SIZE); + return -EINVAL; + } + + num_cmds = cam_fd_cdm_write_reg_val_pair( + reg_val_pair, num_cmds, + hw_static_info->core_regs.image_addr, + io_buf->io_addr[0]); + break; + } + default: + CAM_ERR(CAM_FD, "Invalid resource type %d", + io_buf->io_cfg->resource_type); + return -EINVAL; + } + } + + for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) { + io_buf = &prestart_args->output_buf[i]; + + if (io_buf->valid == false) + break; + + if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) { + CAM_ERR(CAM_FD, "Incorrect direction %d %d", + io_buf->io_cfg->direction, CAM_BUF_INPUT); + return -EINVAL; + } + + switch (io_buf->io_cfg->resource_type) { + case CAM_FD_OUTPUT_PORT_ID_RESULTS: { + uint32_t face_results_offset; + + size_required = hw_static_info->results.max_faces * + hw_static_info->results.per_face_entries * 4; + + if (io_buf->io_cfg->planes[0].plane_stride < + size_required) { + CAM_ERR(CAM_FD, "Invalid results size %d %d", + io_buf->io_cfg->planes[0].plane_stride, + size_required); + return -EINVAL; + } + + req_private->fd_results = + (struct cam_fd_results *)io_buf->cpu_addr[0]; + + face_results_offset = + (uint8_t *)&req_private->fd_results->faces[0] - + (uint8_t *)req_private->fd_results; + + if (hw_static_info->ro_mode_supported) { + if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) { + CAM_ERR(CAM_FD, + "Invalid reg_val size %d, %d", + num_cmds, + CAM_FD_REG_VAL_PAIR_SIZE); + return -EINVAL; + } + /* + * Face data actually starts 16bytes later in + * the io buffer Check cam_fd_results. + */ + num_cmds = cam_fd_cdm_write_reg_val_pair( + reg_val_pair, num_cmds, + hw_static_info->core_regs.result_addr, + io_buf->io_addr[0] + + face_results_offset); + num_cmds = cam_fd_cdm_write_reg_val_pair( + reg_val_pair, num_cmds, + hw_static_info->core_regs.ro_mode, + 0x1); + + req_private->ro_mode_enabled = true; + } else { + req_private->ro_mode_enabled = false; + } + break; + } + case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: { + size_required = + hw_static_info->results.raw_results_entries * + sizeof(uint32_t); + + if (io_buf->io_cfg->planes[0].plane_stride < + size_required) { + CAM_ERR(CAM_FD, "Invalid results size %d %d", + io_buf->io_cfg->planes[0].plane_stride, + size_required); + return -EINVAL; + } + + req_private->raw_results = + (uint32_t *)io_buf->cpu_addr[0]; + break; + } + case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: { + if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) { + CAM_ERR(CAM_FD, + "Invalid reg_val pair size %d, %d", + num_cmds, CAM_FD_REG_VAL_PAIR_SIZE); + return -EINVAL; + } + + num_cmds = cam_fd_cdm_write_reg_val_pair( + reg_val_pair, num_cmds, + hw_static_info->core_regs.work_addr, + io_buf->io_addr[0]); + + work_buffer_configured = true; + break; + } + default: + CAM_ERR(CAM_FD, "Invalid resource type %d", + io_buf->io_cfg->resource_type); + return -EINVAL; + } + } + + if (!req_private->fd_results || !work_buffer_configured) { + CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d", + req_private->fd_results, work_buffer_configured); + return -EINVAL; + } + + /* First insert CHANGE_BASE command */ + size = ctx_hw_private->cdm_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > available_size) { + CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d", + prestart_args->size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, + ((struct cam_fd_soc_private *)soc_info->soc_private)-> + regbase_index[CAM_FD_REG_CORE]); + + if (mem_base == -1) { + CAM_ERR(CAM_FD, "failed to get mem_base, index: %d num_reg_map: %u", + ((struct cam_fd_soc_private *)soc_info->soc_private)-> + regbase_index[CAM_FD_REG_CORE], soc_info->num_reg_map); + return -EINVAL; + } + + ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base); + cmd_buf_addr += size; + available_size -= (size * 4); + num_regval_pairs = num_cmds / 2; + + if (num_regval_pairs) { + size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random( + num_regval_pairs); + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > available_size) { + CAM_ERR(CAM_FD, + "Insufficient size:%d , expected size:%d", + available_size, size); + return -ENOMEM; + } + ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, + num_regval_pairs, reg_val_pair); + cmd_buf_addr += size; + available_size -= (size * 4); + } else { + CAM_DBG(CAM_FD, "No reg val pairs"); + } + + /* Update pre_config_buf_size in bytes */ + prestart_args->pre_config_buf_size = + prestart_args->size - available_size; + + /* Insert start trigger command into CDM as post config commands. */ + num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0, + hw_static_info->core_regs.control, 0x2); + + num_regval_pairs = num_cmds / 2; + + if (num_regval_pairs) { + size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random( + num_regval_pairs); + if ((size * 4) > available_size) { + CAM_ERR(CAM_FD, + "Insufficient size:%d , expected size:%d", + available_size, size); + return -ENOMEM; + } + ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, + num_regval_pairs, reg_val_pair); + cmd_buf_addr += size; + available_size -= (size * 4); + prestart_args->post_config_buf_size = size * 4; + } else { + CAM_DBG(CAM_FD, "No reg val pairs"); + prestart_args->post_config_buf_size = 0; + } + + CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]", + prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size, + cmd_buf_addr, prestart_args->post_config_buf_size); + + for (i = 0; i < (prestart_args->pre_config_buf_size + + prestart_args->post_config_buf_size) / 4; i++) + CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i, + &prestart_args->cmd_buf_addr[i], + prestart_args->cmd_buf_addr[i]); + + return 0; +} + +static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw, + struct cam_fd_hw_frame_done_args *frame_done_args) +{ + struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info; + struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info; + struct cam_fd_hw_req_private *req_private; + uint32_t base, face_cnt; + uint32_t *buffer; + unsigned long flags; + int i; + + mutex_lock(&fd_hw->hw_mutex); + spin_lock_irqsave(&fd_core->spin_lock, flags); + if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) || + (fd_core->results_valid == false) || + !fd_core->hw_req_private) { + CAM_ERR(CAM_FD, + "Invalid state for results state=%d, results=%d %pK", + fd_core->core_state, fd_core->results_valid, + fd_core->hw_req_private); + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + mutex_unlock(&fd_hw->hw_mutex); + return -EINVAL; + } + fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS; + req_private = fd_core->hw_req_private; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + /* + * Copy the register value as is into output buffers. + * Wehter we are copying the output data by reading registers or + * programming output buffer directly to HW must be transparent to UMD. + * In case HW supports writing face count value directly into + * DDR memory in future, these values should match. + */ + req_private->fd_results->face_count = + cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE, + hw_static_info->core_regs.result_cnt); + + face_cnt = req_private->fd_results->face_count & 0x3F; + + if (face_cnt > hw_static_info->results.max_faces) { + CAM_WARN(CAM_FD, "Face count greater than max %d %d", + face_cnt, hw_static_info->results.max_faces); + face_cnt = hw_static_info->results.max_faces; + } + + CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d", + req_private->request_id, face_cnt); + + /* + * We need to read the face data information from registers only + * if one of below is true + * 1. RO mode is not set. i.e FD HW doesn't write face data into + * DDR memory + * 2. On the current chipset, results written into DDR memory by FD HW + * are not gauranteed to be correct + */ + if (!req_private->ro_mode_enabled || + hw_static_info->enable_errata_wa.ro_mode_results_invalid) { + buffer = (uint32_t *)&req_private->fd_results->faces[0]; + base = hw_static_info->core_regs.results_reg_base; + + /* + * Write register values as is into face data buffer. Its UMD + * driver responsibility to interpret the data and extract face + * properties from output buffer. Think in case output buffer + * is directly programmed to HW, then KMD has no control to + * extract the face properties and UMD anyway has to extract + * face properties. So we follow the same approach and keep + * this transparent to UMD. + */ + for (i = 0; + i < (face_cnt * + hw_static_info->results.per_face_entries); i++) { + *buffer = cam_fd_soc_register_read(&fd_hw->soc_info, + CAM_FD_REG_CORE, base + (i * 0x4)); + CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer); + buffer++; + } + } + + if (req_private->get_raw_results && + req_private->raw_results && + hw_static_info->results.raw_results_available) { + buffer = req_private->raw_results; + base = hw_static_info->core_regs.raw_results_reg_base; + + for (i = 0; + i < hw_static_info->results.raw_results_entries; + i++) { + *buffer = cam_fd_soc_register_read(&fd_hw->soc_info, + CAM_FD_REG_CORE, base + (i * 0x4)); + CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer); + buffer++; + } + } + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_core->hw_req_private = NULL; + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + mutex_unlock(&fd_hw->hw_mutex); + + return 0; +} + +static int cam_fd_hw_util_processcmd_hw_dump( + struct cam_hw_info *fd_hw, + void *args) +{ + int i, j; + uint8_t *dst; + uint32_t *addr, *start; + uint32_t num_reg, min_len; + uint64_t remain_len; + struct cam_hw_soc_info *soc_info; + struct cam_fd_hw_dump_header *hdr; + struct cam_fd_hw_dump_args *dump_args; + + if (!fd_hw || !args) { + CAM_ERR(CAM_FD, "Invalid args %pK %pK", + fd_hw, args); + return -EINVAL; + } + + mutex_lock(&fd_hw->hw_mutex); + + if (fd_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_INFO(CAM_FD, "power off state"); + mutex_unlock(&fd_hw->hw_mutex); + return 0; + } + + dump_args = (struct cam_fd_hw_dump_args *)args; + soc_info = &fd_hw->soc_info; + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_FD, "dump offset overshoot len %zu offset %zu", + dump_args->buf_len, dump_args->offset); + mutex_unlock(&fd_hw->hw_mutex); + return -ENOSPC; + } + + remain_len = dump_args->buf_len - dump_args->offset; + min_len = sizeof(struct cam_fd_hw_dump_header) + + soc_info->reg_map[0].size + sizeof(uint32_t); + + if (remain_len < min_len) { + CAM_WARN(CAM_FD, "dump buffer exhaust remain %zu min %u", + remain_len, min_len); + mutex_unlock(&fd_hw->hw_mutex); + return -ENOSPC; + } + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_fd_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_FD_HW_DUMP_TAG_MAX_LEN, + "FD_REG:"); + hdr->word_size = sizeof(uint32_t); + addr = (uint32_t *)(dst + sizeof(struct cam_fd_hw_dump_header)); + start = addr; + *addr++ = soc_info->index; + + for (j = 0; j < soc_info->num_reg_map; j++) { + num_reg = soc_info->reg_map[j].size/4; + for (i = 0; i < num_reg; i++) { + *addr++ = soc_info->mem_block[j]->start + i*4; + *addr++ = cam_io_r(soc_info->reg_map[j].mem_base + + (i*4)); + } + } + + mutex_unlock(&fd_hw->hw_mutex); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_fd_hw_dump_header); + CAM_DBG(CAM_FD, "%zu", dump_args->offset); + return 0; +} + +irqreturn_t cam_fd_hw_irq(int irq_num, void *data) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)data; + struct cam_fd_core *fd_core; + struct cam_hw_soc_info *soc_info; + struct cam_fd_hw_static_info *hw_static_info; + uint32_t reg_value; + enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE; + uint32_t num_irqs = 0; + + if (!fd_hw) { + CAM_ERR(CAM_FD, "Invalid data in IRQ callback"); + return IRQ_NONE; + } + + fd_core = (struct cam_fd_core *) fd_hw->core_info; + soc_info = &fd_hw->soc_info; + hw_static_info = fd_core->hw_static_info; + + reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_status); + + CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value); + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_clear, + reg_value); + + if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) { + complete_all(&fd_core->halt_complete); + irq_type = CAM_FD_IRQ_HALT_DONE; + num_irqs++; + } + + if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) { + complete_all(&fd_core->reset_complete); + irq_type = CAM_FD_IRQ_RESET_DONE; + num_irqs++; + } + + if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) { + complete_all(&fd_core->processing_complete); + irq_type = CAM_FD_IRQ_FRAME_DONE; + num_irqs++; + } + + /* + * We should never get an IRQ callback with no or more than one mask. + * Validate first to make sure nothing going wrong. + */ + if (num_irqs != 1) { + CAM_ERR(CAM_FD, + "Invalid number of IRQs, value=0x%x, num_irqs=%d", + reg_value, num_irqs); + return IRQ_NONE; + } + + trace_cam_irq_activated("FD", irq_type); + + if (irq_type == CAM_FD_IRQ_HALT_DONE) { + /* + * Do not send HALT IRQ callback to Hw Mgr, + * a reset would always follow + */ + return IRQ_HANDLED; + } + + spin_lock(&fd_core->spin_lock); + /* Do not change state to IDLE on HALT IRQ. Reset must follow halt */ + if ((irq_type == CAM_FD_IRQ_RESET_DONE) || + (irq_type == CAM_FD_IRQ_FRAME_DONE)) { + + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + if (irq_type == CAM_FD_IRQ_FRAME_DONE) + fd_core->results_valid = true; + + CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d", + irq_type, fd_core->core_state); + } + spin_unlock(&fd_core->spin_lock); + + if (fd_core->irq_cb.cam_fd_hw_mgr_cb) + fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data, + irq_type); + + return IRQ_HANDLED; +} + +int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + struct cam_fd_core *fd_core; + struct cam_fd_hw_caps *fd_hw_caps = + (struct cam_fd_hw_caps *)get_hw_cap_args; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", + hw_priv, get_hw_cap_args); + return -EINVAL; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + *fd_hw_caps = fd_core->hw_caps; + + CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d", + fd_hw_caps->core_version.major, + fd_hw_caps->core_version.minor, + fd_hw_caps->wrapper_version.major, + fd_hw_caps->wrapper_version.minor, + fd_hw_caps->supported_modes, + fd_hw_caps->raw_results_available); + + return 0; +} + +int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + struct cam_fd_core *fd_core; + struct cam_fd_hw_init_args *init_args = + (struct cam_fd_hw_init_args *)init_hw_args; + int rc = 0; + unsigned long flags; + + if (!fd_hw || !init_args) { + CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_fd_hw_init_args)) { + CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size, + sizeof(struct cam_fd_hw_init_args)); + return -EINVAL; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + + mutex_lock(&fd_hw->hw_mutex); + CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count); + + if (fd_hw->open_count > 0) { + rc = 0; + goto cdm_streamon; + } + + rc = cam_fd_soc_enable_resources(&fd_hw->soc_info); + if (rc) { + CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc); + goto unlock_return; + } + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_hw->hw_state = CAM_HW_STATE_POWER_UP; + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + if (init_args->reset_required) { + rc = cam_fd_hw_reset(hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc); + goto disable_soc; + } + init_args->is_hw_reset = true; + } + + cam_fd_hw_util_enable_power_on_settings(fd_hw); + +cdm_streamon: + fd_hw->open_count++; + CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count); + + if (init_args->ctx_hw_private) { + struct cam_fd_ctx_hw_private *ctx_hw_private = + init_args->ctx_hw_private; + + rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle); + if (rc) { + CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d", + ctx_hw_private->cdm_handle, rc); + fd_hw->open_count--; + if (!fd_hw->open_count) + goto disable_soc; + } + } + + mutex_unlock(&fd_hw->hw_mutex); + + return rc; + +disable_soc: + if (cam_fd_soc_disable_resources(&fd_hw->soc_info)) + CAM_ERR(CAM_FD, "Error in disable soc resources"); + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); +unlock_return: + mutex_unlock(&fd_hw->hw_mutex); + return rc; +} + +int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = hw_priv; + struct cam_fd_core *fd_core = NULL; + struct cam_fd_hw_deinit_args *deinit_args = + (struct cam_fd_hw_deinit_args *)deinit_hw_args; + int rc = 0; + unsigned long flags; + + if (!fd_hw || !deinit_hw_args) { + CAM_ERR(CAM_FD, "Invalid argument"); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) { + CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size, + sizeof(struct cam_fd_hw_deinit_args)); + return -EINVAL; + } + + mutex_lock(&fd_hw->hw_mutex); + if (fd_hw->open_count == 0) { + mutex_unlock(&fd_hw->hw_mutex); + CAM_ERR(CAM_FD, "Error Unbalanced deinit"); + return -EFAULT; + } + + fd_hw->open_count--; + CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count); + + if (fd_hw->open_count > 0) { + rc = 0; + goto positive_ref_cnt; + } + + rc = cam_fd_soc_disable_resources(&fd_hw->soc_info); + if (rc) + CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc); + + fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + fd_core = (struct cam_fd_core *)fd_hw->core_info; + + /* With the ref_cnt correct, this should never happen */ + WARN_ON(!fd_core); + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); +positive_ref_cnt: + if (deinit_args->ctx_hw_private) { + struct cam_fd_ctx_hw_private *ctx_hw_private = + deinit_args->ctx_hw_private; + + rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle); + if (rc) { + CAM_ERR(CAM_FD, + "Failed in CDM StreamOff, handle=0x%x, rc=%d", + ctx_hw_private->cdm_handle, rc); + } + } + + mutex_unlock(&fd_hw->hw_mutex); + return rc; +} + +int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + struct cam_fd_core *fd_core; + struct cam_fd_hw_static_info *hw_static_info; + struct cam_hw_soc_info *soc_info; + unsigned long flags; + int rc; + + if (!fd_hw) { + CAM_ERR(CAM_FD, "Invalid input handle"); + return -EINVAL; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + hw_static_info = fd_core->hw_static_info; + soc_info = &fd_hw->soc_info; + + spin_lock_irqsave(&fd_core->spin_lock, flags); + if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) || + (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) { + CAM_ERR(CAM_FD, "Reset not allowed in %d state", + fd_core->core_state); + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + return -EINVAL; + } + + fd_core->results_valid = false; + fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.cgc_disable, 0x1); + + rc = cam_fd_hw_util_fdwrapper_halt(fd_hw); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc); + return rc; + } + + rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw); + if (rc) { + CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc); + return rc; + } + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.cgc_disable, 0x0); + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + return rc; +} + +int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + struct cam_fd_core *fd_core; + struct cam_fd_hw_static_info *hw_static_info; + struct cam_fd_hw_cmd_start_args *start_args = + (struct cam_fd_hw_cmd_start_args *)hw_start_args; + struct cam_fd_ctx_hw_private *ctx_hw_private; + unsigned long flags; + int rc; + + if (!hw_priv || !start_args) { + CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv, + start_args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) { + CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size, + sizeof(struct cam_fd_hw_cmd_start_args)); + return -EINVAL; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + hw_static_info = fd_core->hw_static_info; + + spin_lock_irqsave(&fd_core->spin_lock, flags); + if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) { + CAM_ERR(CAM_FD, "Cannot start in %d state", + fd_core->core_state); + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + return -EINVAL; + } + + /* + * We are about to start FD HW processing, save the request + * private data which is being processed by HW. Once the frame + * processing is finished, process_cmd(FRAME_DONE) should be called + * with same hw_req_private as input. + */ + fd_core->hw_req_private = start_args->hw_req_private; + fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING; + fd_core->results_valid = false; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + ctx_hw_private = start_args->ctx_hw_private; + + /* Before starting HW process, clear processing complete */ + reinit_completion(&fd_core->processing_complete); + + if (hw_static_info->enable_errata_wa.single_irq_only) { + cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.irq_mask, + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)); + } + + if (start_args->num_hw_update_entries > 0) { + struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd; + struct cam_hw_update_entry *cmd; + int i; + + cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries; + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->flag = false; + cdm_cmd->userdata = NULL; + cdm_cmd->cookie = 0; + cdm_cmd->gen_irq_arb = false; + + for (i = 0 ; i <= start_args->num_hw_update_entries; i++) { + cmd = (start_args->hw_update_entries + i); + cdm_cmd->cmd_flex[i].bl_addr.mem_handle = cmd->handle; + cdm_cmd->cmd_flex[i].offset = cmd->offset; + cdm_cmd->cmd_flex[i].len = cmd->len; + cdm_cmd->cmd_flex[i].arbitrate = false; + } + + rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd); + if (rc) { + CAM_ERR(CAM_FD, + "Failed to submit cdm commands, rc=%d", rc); + goto error; + } + } else { + CAM_ERR(CAM_FD, "Invalid number of hw update entries"); + rc = -EINVAL; + goto error; + } + + return 0; +error: + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + return rc; +} + +int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + struct cam_fd_core *fd_core; + struct cam_fd_hw_static_info *hw_static_info; + struct cam_hw_soc_info *soc_info; + unsigned long flags; + int rc; + + if (!fd_hw) { + CAM_ERR(CAM_FD, "Invalid input handle"); + return -EINVAL; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + hw_static_info = fd_core->hw_static_info; + soc_info = &fd_hw->soc_info; + + spin_lock_irqsave(&fd_core->spin_lock, flags); + if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) || + (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) { + CAM_ERR(CAM_FD, "Reset not allowed in %d state", + fd_core->core_state); + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + return -EINVAL; + } + + fd_core->results_valid = false; + fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.cgc_disable, 0x1); + + rc = cam_fd_hw_util_fdwrapper_halt(fd_hw); + if (rc) { + CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc); + return rc; + } + + /* HALT must be followed by RESET */ + rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw); + if (rc) { + CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc); + return rc; + } + + cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER, + hw_static_info->wrapper_regs.cgc_disable, 0x0); + + spin_lock_irqsave(&fd_core->spin_lock, flags); + fd_core->core_state = CAM_FD_CORE_STATE_IDLE; + spin_unlock_irqrestore(&fd_core->spin_lock, flags); + + return rc; +} + +int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + int rc = -EINVAL; + struct cam_fd_ctx_hw_private *ctx_hw_private; + struct cam_fd_hw_reserve_args *reserve_args = + (struct cam_fd_hw_reserve_args *)hw_reserve_args; + struct cam_cdm_acquire_data cdm_acquire; + struct cam_cdm_bl_request *cdm_cmd; + int i; + + if (!fd_hw || !reserve_args) { + CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) { + CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size, + sizeof(struct cam_fd_hw_reserve_args)); + return -EINVAL; + } + + cdm_cmd = CAM_MEM_ZALLOC(((sizeof(struct cam_cdm_bl_request)) + + ((CAM_FD_MAX_HW_ENTRIES - 1) * + sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL); + if (!cdm_cmd) + return -ENOMEM; + + ctx_hw_private = CAM_MEM_ZALLOC(sizeof(struct cam_fd_ctx_hw_private), + GFP_KERNEL); + if (!ctx_hw_private) { + CAM_MEM_FREE(cdm_cmd); + return -ENOMEM; + } + + memset(&cdm_acquire, 0, sizeof(cdm_acquire)); + strscpy(cdm_acquire.identifier, "fd", sizeof("fd")); + cdm_acquire.cell_index = fd_hw->soc_info.index; + cdm_acquire.handle = 0; + cdm_acquire.userdata = ctx_hw_private; + cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback; + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + for (i = 0; i < fd_hw->soc_info.num_reg_map; i++) + cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i]; + + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_FD, "Failed to acquire the CDM HW"); + goto error; + } + + ctx_hw_private->hw_ctx = reserve_args->hw_ctx; + ctx_hw_private->fd_hw = fd_hw; + ctx_hw_private->mode = reserve_args->mode; + ctx_hw_private->cdm_handle = cdm_acquire.handle; + ctx_hw_private->cdm_ops = cdm_acquire.ops; + ctx_hw_private->cdm_cmd = cdm_cmd; + + reserve_args->ctx_hw_private = ctx_hw_private; + + CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x", + ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode, + ctx_hw_private->cdm_handle); + + return 0; +error: + CAM_MEM_FREE(ctx_hw_private); + CAM_MEM_FREE(cdm_cmd); + return rc; +} + +int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + int rc = -EINVAL; + struct cam_fd_ctx_hw_private *ctx_hw_private; + struct cam_fd_hw_release_args *release_args = + (struct cam_fd_hw_release_args *)hw_release_args; + + if (!fd_hw || !release_args) { + CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_fd_hw_release_args)) { + CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size, + sizeof(struct cam_fd_hw_release_args)); + return -EINVAL; + } + + ctx_hw_private = + (struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private; + + rc = cam_cdm_release(ctx_hw_private->cdm_handle); + if (rc) + CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d", + ctx_hw_private->cdm_handle, rc); + + CAM_MEM_FREE(ctx_hw_private->cdm_cmd); + CAM_MEM_FREE(ctx_hw_private); + release_args->ctx_hw_private = NULL; + + return 0; +} + +int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv; + int rc = -EINVAL; + + if (!hw_priv || !cmd_args || + (cmd_type >= CAM_FD_HW_CMD_MAX)) { + CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv, + cmd_args, cmd_type); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_FD_HW_CMD_REGISTER_CALLBACK: { + struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args; + struct cam_fd_core *fd_core = + (struct cam_fd_core *)fd_hw->core_info; + + if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) { + CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args; + fd_core->irq_cb.cam_fd_hw_mgr_cb = + irq_cb_args->cam_fd_hw_mgr_cb; + fd_core->irq_cb.data = irq_cb_args->data; + rc = 0; + break; + } + case CAM_FD_HW_CMD_PRESTART: { + struct cam_fd_hw_cmd_prestart_args *prestart_args; + + if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) { + CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args; + rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args); + break; + } + case CAM_FD_HW_CMD_FRAME_DONE: { + struct cam_fd_hw_frame_done_args *cmd_frame_results; + + if (sizeof(struct cam_fd_hw_frame_done_args) != + arg_size) { + CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d", + cmd_type, arg_size); + break; + } + + cmd_frame_results = + (struct cam_fd_hw_frame_done_args *)cmd_args; + rc = cam_fd_hw_util_processcmd_frame_done(fd_hw, + cmd_frame_results); + break; + } + case CAM_FD_HW_CMD_HW_DUMP: { + rc = cam_fd_hw_util_processcmd_hw_dump(fd_hw, + cmd_args); + break; + } + default: + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h new file mode 100644 index 0000000000..22bcdf6fc7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_CORE_H_ +#define _CAM_FD_HW_CORE_H_ + +#include +#include +#include +#include +#include + +#include "cam_common_util.h" +#include "cam_debug_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_cdm_intf_api.h" +#include "cam_fd_hw_intf.h" +#include "cam_fd_hw_soc.h" + +#define CAM_FD_IRQ_TO_MASK(irq) (1 << (irq)) +#define CAM_FD_MASK_TO_IRQ(mask, irq) ((mask) >> (irq)) + +#define CAM_FD_HW_HALT_RESET_TIMEOUT 750 + +/** + * enum cam_fd_core_state - FD Core internal states + * + * @CAM_FD_CORE_STATE_POWERDOWN : Indicates FD core is powered down + * @CAM_FD_CORE_STATE_IDLE : Indicates FD HW is in idle state. + * Core can be in this state when it is + * ready to process frames or when + * processing is finished and results are + * available + * @CAM_FD_CORE_STATE_PROCESSING : Indicates FD core is processing frame + * @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from + * FD core + * @CAM_FD_CORE_STATE_RESET_PROGRESS : Indicates FD Core is in reset state + */ +enum cam_fd_core_state { + CAM_FD_CORE_STATE_POWERDOWN, + CAM_FD_CORE_STATE_IDLE, + CAM_FD_CORE_STATE_PROCESSING, + CAM_FD_CORE_STATE_READING_RESULTS, + CAM_FD_CORE_STATE_RESET_PROGRESS, +}; + +/** + * struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx. + * This information is populated by HW layer on + * reserve() and given back to HW Mgr as private + * data for the hw context. This private_data + * has to be passed by HW Mgr layer while + * further HW layer calls + * + * @hw_ctx : Corresponding hw_ctx pointer + * @fd_hw : FD HW info pointer + * @cdm_handle : CDM Handle for this context + * @cdm_ops : CDM Ops + * @cdm_cmd : CDM command pointer + * @mode : Mode this context is running + * @curr_req_private : Current Request information + * + */ +struct cam_fd_ctx_hw_private { + void *hw_ctx; + struct cam_hw_info *fd_hw; + uint32_t cdm_handle; + struct cam_cdm_utils_ops *cdm_ops; + struct cam_cdm_bl_request *cdm_cmd; + enum cam_fd_hw_mode mode; + struct cam_fd_hw_req_private *curr_req_private; +}; + +/** + * struct cam_fd_core_regs : FD HW Core register offsets info + * + * @version : Offset of version register + * @control : Offset of control register + * @result_cnt : Offset of result count register + * @result_addr : Offset of results address register + * @image_addr : Offset of image address register + * @work_addr : Offset of work address register + * @ro_mode : Offset of ro_mode register + * @results_reg_base : Offset of results_reg_base register + * @raw_results_reg_base : Offset of raw_results_reg_base register + * + */ +struct cam_fd_core_regs { + uint32_t version; + uint32_t control; + uint32_t result_cnt; + uint32_t result_addr; + uint32_t image_addr; + uint32_t work_addr; + uint32_t ro_mode; + uint32_t results_reg_base; + uint32_t raw_results_reg_base; +}; + +/** + * struct cam_fd_core_regs : FD HW Wrapper register offsets info + * + * @wrapper_version : Offset of wrapper_version register + * @cgc_disable : Offset of cgc_disable register + * @hw_stop : Offset of hw_stop register + * @sw_reset : Offset of sw_reset register + * @vbif_req_priority : Offset of vbif_req_priority register + * @vbif_priority_level : Offset of vbif_priority_level register + * @vbif_done_status : Offset of vbif_done_status register + * @irq_mask : Offset of irq mask register + * @irq_status : Offset of irq status register + * @irq_clear : Offset of irq clear register + * + */ +struct cam_fd_wrapper_regs { + uint32_t wrapper_version; + uint32_t cgc_disable; + uint32_t hw_stop; + uint32_t sw_reset; + uint32_t vbif_req_priority; + uint32_t vbif_priority_level; + uint32_t vbif_done_status; + uint32_t irq_mask; + uint32_t irq_status; + uint32_t irq_clear; +}; + +/** + * struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info + * + * @single_irq_only : Whether to enable only one irq at any time + * @ro_mode_enable_always : Whether to enable ro mode always + * @ro_mode_results_invalid : Whether results written directly into output + * memory by HW are valid or not + * @skip_reset : Whether to skip reset during init + */ +struct cam_fd_hw_errata_wa { + bool single_irq_only; + bool ro_mode_enable_always; + bool ro_mode_results_invalid; + bool skip_reset; +}; + +/** + * struct cam_fd_hw_results_prop : FD HW Results properties + * + * @max_faces : Maximum number of faces supported + * @per_face_entries : Number of register with properties for each face + * @raw_results_entries : Number of raw results entries for the full search + * @raw_results_available : Whether raw results available on this HW + * + */ +struct cam_fd_hw_results_prop { + uint32_t max_faces; + uint32_t per_face_entries; + uint32_t raw_results_entries; + bool raw_results_available; +}; + +/** + * struct cam_fd_hw_static_info : FD HW information based on HW version + * + * @core_version : Core version of FD HW + * @wrapper_version : Wrapper version of FD HW + * @core_regs : Register offset information for core registers + * @wrapper_regs : Register offset information for wrapper registers + * @results : Information about results available on this HW + * @enable_errata_wa : Errata workaround information + * @irq_mask : IRQ mask to enable + * @qos_priority : QoS priority setting for this chipset + * @qos_priority_level : QoS priority level setting for this chipset + * @supported_modes : Supported HW modes on this HW version + * @ro_mode_supported : Whether RO mode is supported on this HW + * + */ +struct cam_fd_hw_static_info { + struct cam_hw_version core_version; + struct cam_hw_version wrapper_version; + struct cam_fd_core_regs core_regs; + struct cam_fd_wrapper_regs wrapper_regs; + struct cam_fd_hw_results_prop results; + struct cam_fd_hw_errata_wa enable_errata_wa; + uint32_t irq_mask; + uint32_t qos_priority; + uint32_t qos_priority_level; + uint32_t supported_modes; + bool ro_mode_supported; +}; + +/** + * struct cam_fd_core : FD HW core data structure + * + * @hw_static_info : HW information specific to version + * @hw_caps : HW capabilities + * @core_state : Current HW state + * @processing_complete : Whether processing is complete + * @reset_complete : Whether reset is complete + * @halt_complete : Whether halt is complete + * @hw_req_private : Request that is being currently processed by HW + * @results_valid : Whether HW frame results are available to get + * @spin_lock : Mutex to protect shared data in hw layer + * @irq_cb : HW Manager callback information + * + */ +struct cam_fd_core { + struct cam_fd_hw_static_info *hw_static_info; + struct cam_fd_hw_caps hw_caps; + enum cam_fd_core_state core_state; + struct completion processing_complete; + struct completion reset_complete; + struct completion halt_complete; + struct cam_fd_hw_req_private *hw_req_private; + bool results_valid; + spinlock_t spin_lock; + struct cam_fd_hw_cmd_set_irq_cb irq_cb; +}; + +int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw, + struct cam_fd_hw_caps *hw_caps); +irqreturn_t cam_fd_hw_irq(int irq_num, void *data); + +int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size); +int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size); +int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size); +int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size); +int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size); +int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size); +int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size); +int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size); +int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size); +int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size); +int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + +#endif /* _CAM_FD_HW_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c new file mode 100644 index 0000000000..58af20cf8d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_subdev.h" +#include "cam_fd_hw_intf.h" +#include "cam_fd_hw_core.h" +#include "cam_fd_hw_soc.h" +#include "cam_fd_hw_v41.h" +#include "cam_fd_hw_v501.h" +#include "cam_fd_hw_v600.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static int cam_fd_hw_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *fd_hw; + struct cam_hw_intf *fd_hw_intf; + struct cam_fd_core *fd_core; + const struct of_device_id *match_dev = NULL; + struct cam_fd_hw_static_info *hw_static_info = NULL; + int rc = 0; + uint32_t hw_idx; + struct cam_fd_hw_init_args init_args; + struct cam_fd_hw_deinit_args deinit_args; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + fd_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!fd_hw_intf) + return -ENOMEM; + + fd_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!fd_hw) { + CAM_MEM_FREE(fd_hw_intf); + return -ENOMEM; + } + + fd_core = CAM_MEM_ZALLOC(sizeof(struct cam_fd_core), GFP_KERNEL); + if (!fd_core) { + CAM_MEM_FREE(fd_hw); + CAM_MEM_FREE(fd_hw_intf); + return -ENOMEM; + } + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + + fd_hw_intf->hw_priv = fd_hw; + fd_hw->core_info = fd_core; + fd_hw_intf->hw_idx = hw_idx; + + fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + fd_hw->soc_info.pdev = pdev; + fd_hw->soc_info.dev = &pdev->dev; + fd_hw->soc_info.dev_name = pdev->name; + fd_hw->open_count = 0; + mutex_init(&fd_hw->hw_mutex); + spin_lock_init(&fd_hw->hw_lock); + init_completion(&fd_hw->hw_complete); + + spin_lock_init(&fd_core->spin_lock); + init_completion(&fd_core->processing_complete); + init_completion(&fd_core->halt_complete); + init_completion(&fd_core->reset_complete); + + fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps; + fd_hw_intf->hw_ops.init = cam_fd_hw_init; + fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit; + fd_hw_intf->hw_ops.reset = cam_fd_hw_reset; + fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve; + fd_hw_intf->hw_ops.release = cam_fd_hw_release; + fd_hw_intf->hw_ops.start = cam_fd_hw_start; + fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset; + fd_hw_intf->hw_ops.read = NULL; + fd_hw_intf->hw_ops.write = NULL; + fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd; + fd_hw_intf->hw_type = CAM_HW_FD; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev || !match_dev->data) { + CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev); + rc = -EINVAL; + goto free_memory; + } + hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data; + fd_core->hw_static_info = hw_static_info; + + CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]", + hw_static_info->core_version.major, + hw_static_info->core_version.minor, + hw_static_info->wrapper_version.major, + hw_static_info->wrapper_version.minor); + + rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw); + if (rc) { + CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc); + goto free_memory; + } + + memset(&init_args, 0x0, sizeof(init_args)); + memset(&deinit_args, 0x0, sizeof(deinit_args)); + init_args.reset_required = true; + rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc); + goto deinit_platform_res; + } + + rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps); + if (rc) { + CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc); + goto deinit_hw; + } + + rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)); + if (rc) { + CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc); + goto deinit_platform_res; + } + + platform_set_drvdata(pdev, fd_hw_intf); + CAM_DBG(CAM_FD, "FD:%d component bound successfully", + fd_hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +deinit_hw: + if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args))) + CAM_ERR(CAM_FD, "Failed in hw deinit"); +deinit_platform_res: + if (cam_fd_soc_deinit_resources(&fd_hw->soc_info)) + CAM_ERR(CAM_FD, "Failed in soc deinit"); + mutex_destroy(&fd_hw->hw_mutex); +free_memory: + CAM_MEM_FREE(fd_hw); + CAM_MEM_FREE(fd_hw_intf); + CAM_MEM_FREE(fd_core); + + return rc; +} + +static void cam_fd_hw_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct cam_hw_intf *fd_hw_intf; + struct cam_hw_info *fd_hw; + struct cam_fd_core *fd_core; + struct platform_device *pdev = to_platform_device(dev); + + fd_hw_intf = platform_get_drvdata(pdev); + if (!fd_hw_intf) { + CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev"); + return; + } + + fd_hw = fd_hw_intf->hw_priv; + if (!fd_hw) { + CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf"); + goto free_fd_hw_intf; + } + + fd_core = (struct cam_fd_core *)fd_hw->core_info; + if (!fd_core) { + CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw"); + goto deinit_platform_res; + } + + CAM_MEM_FREE(fd_core); + +deinit_platform_res: + rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info); + if (rc) + CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc); + + mutex_destroy(&fd_hw->hw_mutex); + CAM_MEM_FREE(fd_hw); + +free_fd_hw_intf: + CAM_MEM_FREE(fd_hw_intf); +} + +const static struct component_ops cam_fd_hw_dev_component_ops = { + .bind = cam_fd_hw_dev_component_bind, + .unbind = cam_fd_hw_dev_component_unbind, +}; + +static int cam_fd_hw_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_FD, "Adding FD HW dev component"); + rc = component_add(&pdev->dev, &cam_fd_hw_dev_component_ops); + if (rc) + CAM_ERR(CAM_FD, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_fd_hw_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_fd_hw_dev_component_ops); + return 0; +} + +static const struct of_device_id cam_fd_hw_dt_match[] = { + { + .compatible = "qcom,fd41", + .data = &cam_fd_wrapper120_core410_info, + }, + { + .compatible = "qcom,fd501", + .data = &cam_fd_wrapper200_core501_info, + }, + { + .compatible = "qcom,fd600", + .data = &cam_fd_wrapper200_core600_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match); + +struct platform_driver cam_fd_hw_driver = { + .probe = cam_fd_hw_dev_probe, + .remove = cam_fd_hw_dev_remove, + .driver = { + .name = "cam_fd_hw", + .owner = THIS_MODULE, + .of_match_table = cam_fd_hw_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_fd_hw_init_module(void) +{ + return platform_driver_register(&cam_fd_hw_driver); +} + +void cam_fd_hw_exit_module(void) +{ + platform_driver_unregister(&cam_fd_hw_driver); +} + +MODULE_DESCRIPTION("CAM FD HW driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h new file mode 100644 index 0000000000..e7a7dea81b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_FD_HW_INTF_H_ +#define _CAM_FD_HW_INTF_H_ + +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_soc_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_subdev.h" +#include "cam_cpas_api.h" +#include "cam_hw_mgr_intf.h" +#include "cam_debug_util.h" + +#define CAM_FD_MAX_IO_BUFFERS 5 +#define CAM_FD_MAX_HW_ENTRIES 5 +#define CAM_FD_HW_DUMP_TAG_MAX_LEN 128 +#define CAM_FD_HW_DUMP_NUM_WORDS 5 + +/** + * enum cam_fd_hw_type - Enum for FD HW type + * + * @CAM_HW_FD : FaceDetection HW type + */ +enum cam_fd_hw_type { + CAM_HW_FD, +}; + +/** + * enum cam_fd_hw_mode - Mode in which HW can run + * + * @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search + * is done on the given frame + * @CAM_FD_MODE_PYRAMID : Pyramid mode where a pyramid image is generated + * from an input image + */ +enum cam_fd_hw_mode { + CAM_FD_MODE_FACEDETECTION = 0x1, + CAM_FD_MODE_PYRAMID = 0x2, +}; + +/** + * enum cam_fd_priority - FD priority levels + * + * @CAM_FD_PRIORITY_HIGH : Indicates high priority client, driver prioritizes + * frame requests coming from contexts with HIGH + * priority compared to context with normal priority + * @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client + */ +enum cam_fd_priority { + CAM_FD_PRIORITY_HIGH = 0x0, + CAM_FD_PRIORITY_NORMAL, +}; + +/** + * enum cam_fd_hw_irq_type - FD HW IRQ types + * + * @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished + * @CAM_FD_IRQ_HALT_DONE : Indicates HW halt is finished + * @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished + */ +enum cam_fd_hw_irq_type { + CAM_FD_IRQ_FRAME_DONE, + CAM_FD_IRQ_HALT_DONE, + CAM_FD_IRQ_RESET_DONE, +}; + +/** + * enum cam_fd_hw_cmd_type - FD HW layer custom commands + * + * @CAM_FD_HW_CMD_PRESTART : Command to process pre-start settings + * @CAM_FD_HW_CMD_FRAME_DONE : Command to process frame done settings + * @CAM_FD_HW_CMD_UPDATE_SOC : Command to process soc update + * @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback + * @CAM_FD_HW_CMD_MAX : Indicates max cmd + * @CAM_FD_HW_CMD_HW_DUMP : Command to dump fd hw information + */ +enum cam_fd_hw_cmd_type { + CAM_FD_HW_CMD_PRESTART, + CAM_FD_HW_CMD_FRAME_DONE, + CAM_FD_HW_CMD_UPDATE_SOC, + CAM_FD_HW_CMD_REGISTER_CALLBACK, + CAM_FD_HW_CMD_HW_DUMP, + CAM_FD_HW_CMD_MAX, +}; + +/** + * struct cam_fd_hw_io_buffer : FD HW IO Buffer information + * + * @valid : Whether this IO Buf configuration is valid + * @io_cfg : IO Configuration information + * @num_buf : Number planes in io_addr, cpu_addr array + * @io_addr : Array of IO address information for planes + * @cpu_addr : Array of CPU address information for planes + */ +struct cam_fd_hw_io_buffer { + bool valid; + struct cam_buf_io_cfg *io_cfg; + uint32_t num_buf; + uint64_t io_addr[CAM_PACKET_MAX_PLANES]; + uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES]; +}; + +/** + * struct cam_fd_hw_req_private : FD HW layer's private information + * specific to a request + * + * @ctx_hw_private : FD HW layer's ctx specific private data + * @request_id : Request ID corresponding to this private information + * @get_raw_results : Whether to get raw results for this request + * @ro_mode_enabled : Whether RO mode is enabled for this request + * @fd_results : Pointer to save face detection results + * @raw_results : Pointer to save face detection raw results + */ +struct cam_fd_hw_req_private { + void *ctx_hw_private; + uint64_t request_id; + bool get_raw_results; + bool ro_mode_enabled; + struct cam_fd_results *fd_results; + uint32_t *raw_results; +}; + +/** + * struct cam_fd_hw_reserve_args : Reserve args for this HW context + * + * @hw_ctx : HW context for which reserve is requested + * @mode : Mode for which this reserve is requested + * @ctx_hw_private : Pointer to save HW layer's private information specific + * to this hw context. This has to be passed while calling + * further HW layer calls + */ +struct cam_fd_hw_reserve_args { + void *hw_ctx; + enum cam_fd_hw_mode mode; + void *ctx_hw_private; +}; + +/** + * struct cam_fd_hw_release_args : Release args for this HW context + * + * @hw_ctx : HW context for which release is requested + * @ctx_hw_private : HW layer's private information specific to this hw context + */ +struct cam_fd_hw_release_args { + void *hw_ctx; + void *ctx_hw_private; +}; + +/** + * struct cam_fd_hw_init_args : Init args for this HW context + * + * @hw_ctx : HW context for which init is requested + * @ctx_hw_private : HW layer's private information specific to this hw context + * @reset_required : Indicates if the reset is required during init or not + * @is_hw_reset : Output from hw layer, whether hw is reset on this init + */ +struct cam_fd_hw_init_args { + void *hw_ctx; + void *ctx_hw_private; + bool reset_required; + bool is_hw_reset; +}; + +/** + * struct cam_fd_hw_deinit_args : Deinit args for this HW context + * + * @hw_ctx : HW context for which deinit is requested + * @ctx_hw_private : HW layer's private information specific to this hw context + */ +struct cam_fd_hw_deinit_args { + void *hw_ctx; + void *ctx_hw_private; +}; + +/** + * struct cam_fd_hw_cmd_prestart_args : Prestart command args + * + * @hw_ctx : HW context which submitted this prestart + * @ctx_hw_private : HW layer's private information specific to + * this hw context + * @request_id : Request ID corresponds to this pre-start command + * @get_raw_results : Whether to get raw results for this request + * @input_buf : Input IO Buffer information for this request + * @output_buf : Output IO Buffer information for this request + * @cmd_buf_addr : Command buffer address to fill kmd commands + * @size : Size available in command buffer + * @pre_config_buf_size : Buffer size filled with commands by KMD that has + * to be inserted before umd commands + * @post_config_buf_size : Buffer size filled with commands by KMD that has + * to be inserted after umd commands + * @hw_req_private : HW layer's private information specific to + * this request + */ +struct cam_fd_hw_cmd_prestart_args { + void *hw_ctx; + void *ctx_hw_private; + uint64_t request_id; + bool get_raw_results; + struct cam_fd_hw_io_buffer input_buf[CAM_FD_MAX_IO_BUFFERS]; + struct cam_fd_hw_io_buffer output_buf[CAM_FD_MAX_IO_BUFFERS]; + uint32_t *cmd_buf_addr; + uint32_t size; + uint32_t pre_config_buf_size; + uint32_t post_config_buf_size; + struct cam_fd_hw_req_private hw_req_private; +}; + +/** + * struct cam_fd_hw_cmd_start_args : Start command args + * + * @hw_ctx : HW context which submitting start command + * @ctx_hw_private : HW layer's private information specific to + * this hw context + * @hw_req_private : HW layer's private information specific to + * this request + * @hw_update_entries : HW update entries corresponds to this request + * @num_hw_update_entries : Number of hw update entries + */ +struct cam_fd_hw_cmd_start_args { + void *hw_ctx; + void *ctx_hw_private; + struct cam_fd_hw_req_private *hw_req_private; + struct cam_hw_update_entry *hw_update_entries; + uint32_t num_hw_update_entries; +}; + +/** + * struct cam_fd_hw_stop_args : Stop command args + * + * @hw_ctx : HW context which submitting stop command + * @ctx_hw_private : HW layer's private information specific to this hw context + * @request_id : Request ID that need to be stopped + * @hw_req_private : HW layer's private information specific to this request + */ +struct cam_fd_hw_stop_args { + void *hw_ctx; + void *ctx_hw_private; + uint64_t request_id; + struct cam_fd_hw_req_private *hw_req_private; +}; + +/** + * struct cam_fd_hw_frame_done_args : Frame done command args + * + * @hw_ctx : HW context which submitting frame done request + * @ctx_hw_private : HW layer's private information specific to this hw context + * @request_id : Request ID that need to be stopped + * @hw_req_private : HW layer's private information specific to this request + */ +struct cam_fd_hw_frame_done_args { + void *hw_ctx; + void *ctx_hw_private; + uint64_t request_id; + struct cam_fd_hw_req_private *hw_req_private; +}; + +/** + * struct cam_fd_hw_reset_args : Reset command args + * + * @hw_ctx : HW context which submitting reset command + * @ctx_hw_private : HW layer's private information specific to this hw context + */ +struct cam_fd_hw_reset_args { + void *hw_ctx; + void *ctx_hw_private; +}; + +/** + * struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args + * + * @cam_fd_hw_mgr_cb : HW Mgr's callback pointer + * @data : HW Mgr's private data + */ +struct cam_fd_hw_cmd_set_irq_cb { + int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type); + void *data; +}; + +/** + * struct cam_fd_hw_dump_args : Args for dump request + * + * @request_id : Issue request id + * @offset : offset of the buffer + * @buf_len : Length of target buffer + * @cpu_addr : start address of the target buffer + */ +struct cam_fd_hw_dump_args { + uint64_t request_id; + size_t offset; + size_t buf_len; + uintptr_t cpu_addr; +}; + +/** + * struct cam_fd_hw_dump_header : fd hw dump header + * + * @tag : fd hw dump header tag + * @size : Size of data + * @word_size : size of each word + */ +struct cam_fd_hw_dump_header { + uint8_t tag[CAM_FD_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * @brief : API to register FD Hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_fd_hw_init_module(void); + +/** + * @brief : API to remove FD Hw interface from platform framework. + */ +void cam_fd_hw_exit_module(void); + +#endif /* _CAM_FD_HW_INTF_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c new file mode 100644 index 0000000000..33001cbf64 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cam_fd_hw_core.h" +#include "cam_fd_hw_soc.h" +#include "cam_mem_mgr_api.h" + +static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + if (!irq_data) + return false; + + CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d", + handle, userdata, irq_data->irq_type); + + return false; +} + +static int cam_fd_hw_soc_util_setup_regbase_indices( + struct cam_hw_soc_info *soc_info) +{ + struct cam_fd_soc_private *soc_private = + (struct cam_fd_soc_private *)soc_info->soc_private; + uint32_t index; + int rc, i; + + for (i = 0; i < CAM_FD_REG_MAX; i++) + soc_private->regbase_index[i] = -1; + + if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) || + (soc_info->num_mem_block != CAM_FD_REG_MAX)) { + CAM_ERR(CAM_FD, "Invalid num_mem_block=%d", + soc_info->num_mem_block); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "fd_core", &index); + if ((rc == 0) && (index < CAM_FD_REG_MAX)) { + soc_private->regbase_index[CAM_FD_REG_CORE] = index; + } else { + CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d", + rc, index, CAM_FD_REG_MAX); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "fd_wrapper", &index); + if ((rc == 0) && (index < CAM_FD_REG_MAX)) { + soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index; + } else { + CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d", + rc, index, CAM_FD_REG_MAX); + return -EINVAL; + } + + CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d", + soc_private->regbase_index[CAM_FD_REG_CORE], + soc_private->regbase_index[CAM_FD_REG_WRAPPER]); + + return 0; +} + +void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info, + enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value) +{ + struct cam_fd_soc_private *soc_private = + (struct cam_fd_soc_private *)soc_info->soc_private; + int32_t reg_index = soc_private->regbase_index[reg_base]; + + CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]", + reg_base, reg_offset, reg_value); + + cam_io_w_mb(reg_value, + soc_info->reg_map[reg_index].mem_base + reg_offset); +} + +uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info, + enum cam_fd_reg_base reg_base, uint32_t reg_offset) +{ + struct cam_fd_soc_private *soc_private = + (struct cam_fd_soc_private *)soc_info->soc_private; + int32_t reg_index = soc_private->regbase_index[reg_base]; + uint32_t reg_value; + + reg_value = cam_io_r_mb( + soc_info->reg_map[reg_index].mem_base + reg_offset); + + CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]", + reg_base, reg_offset, reg_value); + + return reg_value; +} + +int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info) +{ + struct cam_fd_soc_private *soc_private = soc_info->soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + int rc; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 2; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = 7200000; + axi_vote.axi_path[0].mnoc_ab_bw = 7200000; + axi_vote.axi_path[0].mnoc_ib_bw = 7200000; + axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[1].camnoc_bw = 7200000; + axi_vote.axi_path[1].mnoc_ab_bw = 7200000; + axi_vote.axi_path[1].mnoc_ib_bw = 7200000; + + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc); + return -EFAULT; + } + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc); + goto stop_cpas; + } + + return rc; + +stop_cpas: + if (cam_cpas_stop(soc_private->cpas_handle)) + CAM_ERR(CAM_FD, "Error in CPAS STOP"); + + return rc; +} + + +int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info) +{ + struct cam_fd_soc_private *soc_private; + int rc = 0; + + if (!soc_info) { + CAM_ERR(CAM_FD, "Invalid soc_info param"); + return -EINVAL; + } + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc); + return rc; + } + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d", + soc_private->cpas_handle, rc); + return rc; + } + + return rc; +} + +int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *private_data) +{ + struct cam_fd_soc_private *soc_private; + struct cam_cpas_register_params cpas_register_param; + int rc, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc); + return rc; + } + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = private_data; + + rc = cam_soc_util_request_platform_resource(soc_info, irq_handler, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d", + rc); + return rc; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_fd_soc_private), GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto release_res; + } + soc_info->soc_private = soc_private; + + rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info); + if (rc) { + CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc); + goto free_soc_private; + } + + memset(&cpas_register_param, 0, sizeof(cpas_register_param)); + strscpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = &soc_info->pdev->dev; + cpas_register_param.userdata = private_data; + cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback; + + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_FD, "CPAS registration failed"); + goto free_soc_private; + } + soc_private->cpas_handle = cpas_register_param.client_handle; + CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle); + + return rc; + +free_soc_private: + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; +release_res: + cam_soc_util_release_platform_resource(soc_info); + + return rc; +} + +int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info) +{ + struct cam_fd_soc_private *soc_private = + (struct cam_fd_soc_private *)soc_info->soc_private; + int rc; + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d", + soc_private->cpas_handle, rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc); + + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h new file mode 100644 index 0000000000..b27ce09e36 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_SOC_H_ +#define _CAM_FD_HW_SOC_H_ + +#include "cam_soc_util.h" + +/** + * enum cam_fd_reg_base - Enum for FD register sets + * + * @CAM_FD_REG_CORE : Indicates FD Core register space + * @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space + * @CAM_FD_REG_MAX : Max number of register sets supported + */ +enum cam_fd_reg_base { + CAM_FD_REG_CORE, + CAM_FD_REG_WRAPPER, + CAM_FD_REG_MAX +}; + +/** + * struct cam_fd_soc_private : FD private SOC information + * + * @regbase_index : Mapping between Register base enum to register index in SOC + * @cpas_handle : CPAS handle + * + */ +struct cam_fd_soc_private { + int32_t regbase_index[CAM_FD_REG_MAX]; + uint32_t cpas_handle; +}; + +int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *private_data); +int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info); +int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info); +int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info); +uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info, + enum cam_fd_reg_base reg_base, uint32_t reg_offset); +void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info, + enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value); + +#endif /* _CAM_FD_HW_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h new file mode 100644 index 0000000000..ad1fb0bc57 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_V41_H_ +#define _CAM_FD_HW_V41_H_ + +static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = { + .core_version = { + .major = 4, + .minor = 1, + .incr = 0, + }, + .wrapper_version = { + .major = 1, + .minor = 2, + .incr = 0, + }, + .core_regs = { + .version = 0x38, + .control = 0x0, + .result_cnt = 0x4, + .result_addr = 0x20, + .image_addr = 0x24, + .work_addr = 0x28, + .ro_mode = 0x34, + .results_reg_base = 0x400, + .raw_results_reg_base = 0x800, + }, + .wrapper_regs = { + .wrapper_version = 0x0, + .cgc_disable = 0x4, + .hw_stop = 0x8, + .sw_reset = 0x10, + .vbif_req_priority = 0x20, + .vbif_priority_level = 0x24, + .vbif_done_status = 0x34, + .irq_mask = 0x50, + .irq_status = 0x54, + .irq_clear = 0x58, + }, + .results = { + .max_faces = 35, + .per_face_entries = 4, + .raw_results_available = true, + .raw_results_entries = 512, + }, + .enable_errata_wa = { + .single_irq_only = true, + .ro_mode_enable_always = true, + .ro_mode_results_invalid = true, + }, + .irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE), + .qos_priority = 4, + .qos_priority_level = 4, + .supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID, + .ro_mode_supported = true, +}; + +#endif /* _CAM_FD_HW_V41_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h new file mode 100644 index 0000000000..d8b78d6e44 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_V501_H_ +#define _CAM_FD_HW_V501_H_ + +static struct cam_fd_hw_static_info cam_fd_wrapper200_core501_info = { + .core_version = { + .major = 5, + .minor = 0, + .incr = 1, + }, + .wrapper_version = { + .major = 2, + .minor = 0, + .incr = 0, + }, + .core_regs = { + .version = 0x38, + .control = 0x0, + .result_cnt = 0x4, + .result_addr = 0x20, + .image_addr = 0x24, + .work_addr = 0x28, + .ro_mode = 0x34, + .results_reg_base = 0x400, + .raw_results_reg_base = 0x800, + }, + .wrapper_regs = { + .wrapper_version = 0x0, + .cgc_disable = 0x4, + .hw_stop = 0x8, + .sw_reset = 0x10, + .vbif_req_priority = 0x20, + .vbif_priority_level = 0x24, + .vbif_done_status = 0x34, + .irq_mask = 0x50, + .irq_status = 0x54, + .irq_clear = 0x58, + }, + .results = { + .max_faces = 35, + .per_face_entries = 4, + .raw_results_available = true, + .raw_results_entries = 512, + }, + .enable_errata_wa = { + .single_irq_only = true, + .ro_mode_enable_always = true, + .ro_mode_results_invalid = true, + .skip_reset = true, + }, + .irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE), + .qos_priority = 4, + .qos_priority_level = 4, + .supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID, + .ro_mode_supported = true, +}; + +#endif /* _CAM_FD_HW_V501_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v600.h b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v600.h new file mode 100644 index 0000000000..0c81cb6429 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v600.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FD_HW_V600_H_ +#define _CAM_FD_HW_V600_H_ + +static struct cam_fd_hw_static_info cam_fd_wrapper200_core600_info = { + .core_version = { + .major = 6, + .minor = 0, + .incr = 0, + }, + .wrapper_version = { + .major = 2, + .minor = 0, + .incr = 0, + }, + .core_regs = { + .version = 0x38, + .control = 0x0, + .result_cnt = 0x4, + .result_addr = 0x20, + .image_addr = 0x24, + .work_addr = 0x28, + .ro_mode = 0x34, + .results_reg_base = 0x400, + }, + .wrapper_regs = { + .wrapper_version = 0x0, + .cgc_disable = 0x4, + .hw_stop = 0x8, + .sw_reset = 0x10, + .vbif_req_priority = 0x20, + .vbif_priority_level = 0x24, + .vbif_done_status = 0x34, + .irq_mask = 0x50, + .irq_status = 0x54, + .irq_clear = 0x58, + }, + .results = { + .max_faces = 35, + .per_face_entries = 4, + .raw_results_available = false, + }, + .enable_errata_wa = { + .single_irq_only = true, + .ro_mode_enable_always = true, + .ro_mode_results_invalid = true, + }, + .irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) | + CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE), + .qos_priority = 4, + .qos_priority_level = 4, + .supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID, + .ro_mode_supported = true, +}; + +#endif /* _CAM_FD_HW_V600_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.c b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.c new file mode 100644 index 0000000000..eb50ade1ec --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "cam_node.h" +#include "cam_context.h" +#include "cam_context_utils.h" +#include "cam_icp_context.h" +#include "cam_req_mgr_util.h" +#include "cam_mem_mgr.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_req_mgr_dev.h" +#include "cam_icp_hw_mgr_intf.h" + +static int cam_icp_context_dump_active_request(void *data, void *args) +{ + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args; + int rc = 0; + + if (!ctx || !pf_args) { + CAM_ERR(CAM_ICP, "Invalid ctx %pK or pf args %pK", + ctx, pf_args); + return -EINVAL; + } + + if (pf_args->check_pid) { + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, NULL); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed to check PID info", + ctx->dev_name); + if (pf_args->pf_pid_found_status == CAM_PF_PID_FOUND_FAILURE) + CAM_INFO(CAM_ICP, + "[%s] Client with the issue PID is not detected, stop dumping or notifying to the userspace, wait for the next handler to check", + ctx->dev_name); + return 0; + } + + CAM_INFO(CAM_ICP, "[%s] iommu fault for icp ctx %d state %d", + ctx->dev_name, ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + CAM_INFO(CAM_ICP, "[%s] ctx[%u]: Active req_id: %llu", + ctx->dev_name, ctx->ctx_id, req->request_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to dump pf info.ctx->state: %d", + ctx->dev_name, ctx->ctx_id, ctx->state); + } + + /* + * Faulted ctx found. Since IPE/BPS instances are shared among contexts, + * faulted ctx is found if and only if the context contains + * faulted buffer + */ + if (pf_args->pf_context_info.ctx_found) { + /* Send PF notification to UMD if PF found on current CTX */ + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Failed to notify PF event to userspace rc: %d", + ctx->dev_name, ctx->ctx_id, rc); + } + + return rc; +} + +static int cam_icp_context_mini_dump(void *priv, void *args) +{ + int rc; + struct cam_context *ctx; + + if (!priv || !args) { + CAM_ERR(CAM_ICP, "Invalid priv %pK args %pK", priv, args); + return -EINVAL; + } + + ctx = (struct cam_context *)priv; + rc = cam_context_mini_dump(ctx, args); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Mini Dump failed rc %d", + ctx->dev_name, ctx->ctx_id, rc); + + return rc; +} + +static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc; + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (!rc) { + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state(ctx->dev_name, ctx); + } + + return rc; +} + +static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + cam_common_release_evt_params(ctx->dev_hdl); + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Unable to release device", + ctx->dev_name, ctx->ctx_id); + + ctx->state = CAM_CTX_AVAILABLE; + trace_cam_context_state(ctx->dev_name, ctx); + return rc; +} + +static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_start_dev_to_hw(ctx, cmd); + if (!rc) { + ctx->state = CAM_CTX_READY; + trace_cam_context_state(ctx->dev_name, ctx); + } + + return rc; +} + +static int __cam_icp_dump_dev_in_ready( + struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc; + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to dump device", + ctx->dev_name, ctx->ctx_id); + + return rc; +} + +static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + flush_args.cmd = cmd; + flush_args.flush_active_req = false; + + rc = cam_context_flush_dev_to_hw(ctx, &flush_args); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to flush device", + ctx->dev_name, ctx->ctx_id); + + return rc; +} + +static int __cam_icp_config_dev_in_ready(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + size_t len; + uintptr_t packet_addr; + struct cam_packet *packet; + struct cam_packet *packet_u; + size_t remain_len = 0; + + rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle, + &packet_addr, &len); + if (rc) { + CAM_ERR(CAM_ICP, "[%s][%d] Can not get packet address", + ctx->dev_name, ctx->ctx_id); + rc = -EINVAL; + return rc; + } + + remain_len = len; + if ((len < sizeof(struct cam_packet)) || + (cmd->offset >= (len - sizeof(struct cam_packet)))) { + CAM_ERR(CAM_CTXT, + "[%s] ctx[%u]: Invalid offset, len: %zu cmd offset: %llu sizeof packet: %zu", + ctx->dev_name, ctx->ctx_id, + len, cmd->offset, sizeof(struct cam_packet)); + goto put_cpu_buf; + } + + remain_len -= (size_t)cmd->offset; + packet_u = (struct cam_packet *) ((uint8_t *)packet_addr + + (uint32_t)cmd->offset); + rc = cam_packet_util_copy_pkt_to_kmd(packet_u, &packet, remain_len); + if (rc) { + CAM_ERR(CAM_ICP, "copying packet to kmd failed"); + goto put_cpu_buf; + } + + if (((packet->header.op_code & 0xff) == + CAM_ICP_OPCODE_IPE_SETTINGS) || + ((packet->header.op_code & 0xff) == + CAM_ICP_OPCODE_BPS_SETTINGS) || + ((packet->header.op_code & 0xff) == + CAM_ICP_OPCODE_OFE_SETTINGS)) + rc = cam_context_config_dev_to_hw(ctx, cmd); + else + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]:Failed to prepare device", + ctx->dev_name, ctx->ctx_id); + + cam_common_mem_free(packet); +put_cpu_buf: + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); + return rc; +} + +static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to stop device", + ctx->dev_name, ctx->ctx_id); + + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state(ctx->dev_name, ctx); + return rc; +} + +static int __cam_icp_release_dev_in_ready(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = __cam_icp_stop_dev_in_ready(ctx, NULL); + if (rc) + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Failed to stop device", + ctx->dev_name, ctx->ctx_id); + + rc = __cam_icp_release_dev_in_acquired(ctx, cmd); + if (rc) + CAM_ERR(CAM_ICP, "Failed to release device"); + + return rc; +} + +static uint32_t cam_icp_context_get_error_code(uint32_t err_type) +{ + switch (err_type) { + case CAM_ICP_HW_ERROR_NO_MEM: + return CAM_REQ_MGR_ICP_NO_MEMORY; + case CAM_ICP_HW_ERROR_SYSTEM_FAILURE: + return CAM_REQ_MGR_ICP_SYSTEM_FAILURE; + default: + return 0; + } +} + +static int __cam_icp_notify_v4l2_err_evt(struct cam_context *ctx, + uint32_t err_type, uint32_t err_code, uint64_t request_id) +{ + struct cam_req_mgr_message req_msg = {0}; + int rc; + + req_msg.session_hdl = ctx->session_hdl; + req_msg.u.err_msg.device_hdl = ctx->dev_hdl; + req_msg.u.err_msg.error_type = err_type; + req_msg.u.err_msg.link_hdl = ctx->link_hdl; + req_msg.u.err_msg.request_id = request_id; + req_msg.u.err_msg.resource_size = 0x0; + req_msg.u.err_msg.error_code = err_code; + + rc = cam_req_mgr_notify_message(&req_msg, V4L_EVENT_CAM_REQ_MGR_ERROR, + V4L_EVENT_CAM_REQ_MGR_EVENT); + if (rc) + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Error in notifying the error time for req id:%lld", + ctx->dev_name, ctx->ctx_id, request_id); + + CAM_INFO(CAM_ICP, + "[%s] ctx[%u]: notifying error to userspace err type: %d, err code: %u, req id: %llu", + ctx->dev_name, ctx->ctx_id, err_type, err_code, request_id); + + return rc; +} + +static int cam_icp_ctx_handle_fatal_error(void *ctx, void *err_evt_data) +{ + struct cam_icp_hw_error_evt_data *err_evt; + uint32_t err_code = 0; + int rc; + + err_evt = (struct cam_icp_hw_error_evt_data *)err_evt_data; + err_code = cam_icp_context_get_error_code(err_evt->err_type); + + rc = __cam_icp_notify_v4l2_err_evt(ctx, CAM_REQ_MGR_ERROR_TYPE_RECOVERY, + err_code, err_evt->req_id); + + return rc; +} + +static int cam_icp_ctx_handle_buf_done_in_ready(void *ctx, void *done_evt_data) +{ + struct cam_icp_hw_buf_done_evt_data *buf_done; + + buf_done = (struct cam_icp_hw_buf_done_evt_data *)done_evt_data; + return cam_context_buf_done_from_hw(ctx, buf_done->buf_done_data, buf_done->evt_id); +} + +static int cam_icp_ctx_handle_error_inducement(void *ctx, void *inject_evt_arg) +{ + return cam_context_apply_evt_injection(ctx, inject_evt_arg); +} + +static int __cam_icp_ctx_handle_hw_event(void *ctx, + uint32_t evt_id, void *evt_data) +{ + int rc; + + if (!ctx || !evt_data) { + CAM_ERR(CAM_ICP, "Invalid ctx %s and event data %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_data)); + return -EINVAL; + } + + switch (evt_id) { + case CAM_ICP_EVT_ID_BUF_DONE: + rc = cam_icp_ctx_handle_buf_done_in_ready(ctx, evt_data); + break; + case CAM_ICP_EVT_ID_ERROR: + rc = cam_icp_ctx_handle_fatal_error(ctx, evt_data); + break; + case CAM_ICP_EVT_ID_INJECT_EVENT: + rc = cam_icp_ctx_handle_error_inducement(ctx, evt_data); + break; + default: + CAM_ERR(CAM_ICP, "Invalid event id: %u", evt_id); + rc = -EINVAL; + } + + return rc; +} + +static int cam_icp_context_validate_event_notify_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + int rc = 0; + uint32_t evt_type; + uint64_t req_id; + + req_id = evt_params->req_id; + evt_type = evt_params->u.evt_notify.evt_notify_type; + + switch (evt_type) { + case V4L_EVENT_CAM_REQ_MGR_ERROR: { + struct cam_hw_inject_err_evt_param *err_evt_params = + &evt_params->u.evt_notify.u.err_evt_params; + + switch (err_evt_params->err_type) { + case CAM_REQ_MGR_ERROR_TYPE_RECOVERY: + case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY: + break; + default: + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Invalid error type: %u for error event injection err code: %u req id: %llu dev hdl: %d", + ctx->dev_name, ctx->ctx_id, err_evt_params->err_type, + err_evt_params->err_code, req_id, ctx->dev_hdl); + return -EINVAL; + } + + CAM_INFO(CAM_ICP, + "[%s] ctx[%u]: Inject ERR evt: err code: %u err type: %u req id: %llu dev hdl: %d", + ctx->dev_name, ctx->ctx_id, err_evt_params->err_code, + err_evt_params->err_type, req_id, ctx->dev_hdl); + break; + } + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: { + struct cam_hw_inject_pf_evt_param *pf_evt_params = + &evt_params->u.evt_notify.u.pf_evt_params; + bool non_fatal_en; + + rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en); + if (rc) { + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Fail to query whether device's cb has non-fatal enabled rc: %d", + ctx->dev_name, ctx->ctx_id, rc); + return rc; + } + + if (!non_fatal_en) { + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Fail to inject page fault event notification. Page fault is fatal for ICP", + ctx->dev_name, ctx->ctx_id); + return -EINVAL; + } + + CAM_INFO(CAM_ICP, + "[%s] ctx[%u]: Inject PF evt: req_id: %llu dev hdl: %d ctx found: %hhu", + ctx->dev_name, ctx->ctx_id, + req_id, ctx->dev_hdl, pf_evt_params->ctx_found); + break; + } + default: + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Event notification type not supported: %u", + ctx->dev_name, ctx->ctx_id, evt_type); + rc = -EINVAL; + } + + return rc; +} + +static int cam_icp_context_inject_evt(void *context, void *evt_args) +{ + struct cam_context *ctx = context; + struct cam_hw_inject_evt_param *evt_params = NULL; + struct cam_hw_inject_buffer_error_param *buf_err_params = NULL; + int rc = 0; + + if (!ctx || !evt_args) { + CAM_ERR(CAM_ICP, + "invalid params ctx %s event args %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args)); + return -EINVAL; + } + + evt_params = (struct cam_hw_inject_evt_param *)evt_args; + + if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) { + buf_err_params = &evt_params->u.buf_err_evt; + if (buf_err_params->sync_error > CAM_SYNC_ICP_EVENT_START || + buf_err_params->sync_error < CAM_SYNC_ICP_EVENT_END) { + CAM_INFO(CAM_ICP, "[%s] ctx[%u]: Inject buffer sync error %u req id %llu", + ctx->dev_name, ctx->ctx_id, buf_err_params->sync_error, + evt_params->req_id); + } else { + CAM_ERR(CAM_ICP, "[%s] ctx[%u]: Invalid buffer sync error %u req id %llu", + ctx->dev_name, ctx->ctx_id, buf_err_params->sync_error, + evt_params->req_id); + return -EINVAL; + } + } else { + rc = cam_icp_context_validate_event_notify_injection(ctx, evt_params); + if (rc) { + CAM_ERR(CAM_ICP, + "[%s] ctx[%u]: Event notification injection failed validation rc: %d", + ctx->dev_name, ctx->ctx_id, rc); + return -EINVAL; + } + } + + if (ctx->hw_mgr_intf->hw_inject_evt) + ctx->hw_mgr_intf->hw_inject_evt(ctx->ctxt_to_hw_map, evt_args); + + return rc; +} + +static struct cam_ctx_ops + cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_icp_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + .mini_dump_ops = cam_icp_context_mini_dump, + }, + /* Acquired */ + { + .ioctl_ops = { + .release_dev = __cam_icp_release_dev_in_acquired, + .start_dev = __cam_icp_start_dev_in_acquired, + .config_dev = __cam_icp_config_dev_in_ready, + .flush_dev = __cam_icp_flush_dev_in_ready, + .dump_dev = __cam_icp_dump_dev_in_ready, + }, + .crm_ops = {}, + .irq_ops = __cam_icp_ctx_handle_hw_event, + .pagefault_ops = cam_icp_context_dump_active_request, + .mini_dump_ops = cam_icp_context_mini_dump, + .evt_inject_ops = cam_icp_context_inject_evt, + }, + /* Ready */ + { + .ioctl_ops = { + .stop_dev = __cam_icp_stop_dev_in_ready, + .release_dev = __cam_icp_release_dev_in_ready, + .config_dev = __cam_icp_config_dev_in_ready, + .flush_dev = __cam_icp_flush_dev_in_ready, + .dump_dev = __cam_icp_dump_dev_in_ready, + }, + .crm_ops = {}, + .irq_ops = __cam_icp_ctx_handle_hw_event, + .pagefault_ops = cam_icp_context_dump_active_request, + .mini_dump_ops = cam_icp_context_mini_dump, + .evt_inject_ops = cam_icp_context_inject_evt, + }, + /* Flushed */ + { + .ioctl_ops = {}, + }, + /* Activated */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + .pagefault_ops = cam_icp_context_dump_active_request, + .mini_dump_ops = cam_icp_context_mini_dump, + .evt_inject_ops = cam_icp_context_inject_evt, + }, +}; + +int cam_icp_context_init(struct cam_icp_context *ctx, struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, int img_iommu_hdl, const char *icp_dev_name) +{ + int rc; + + if ((!ctx) || (!ctx->base) || (!hw_intf) || (!icp_dev_name)) { + CAM_ERR(CAM_ICP, + "Invalid params: ctx: %s hw intf: %s dev name: %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(hw_intf), + CAM_IS_NULL_TO_STR(icp_dev_name)); + rc = -EINVAL; + goto err; + } + + rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id, + NULL, hw_intf, ctx->req_base, CAM_CTX_ICP_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Camera Context Base init failed", icp_dev_name); + goto err; + } + + ctx->base->state_machine = cam_icp_ctx_state_machine; + ctx->base->ctx_priv = ctx; + ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX; + ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX; + ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX; + ctx->ctxt_to_hw_map = NULL; + +err: + return rc; +} + +int cam_icp_context_deinit(struct cam_icp_context *ctx) +{ + if ((!ctx) || (!ctx->base)) { + CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx); + return -EINVAL; + } + + cam_context_deinit(ctx->base); + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.h b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.h new file mode 100644 index 0000000000..0ff7560560 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_context.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_CONTEXT_H_ +#define _CAM_ICP_CONTEXT_H_ + +#include "cam_context.h" + +/** + * struct cam_icp_context - icp context + * @base: icp context object + * @state_machine: state machine for ICP context + * @req_base: common request structure + * @state: icp context state + * @ctxt_to_hw_map: context to FW handle mapping + */ +struct cam_icp_context { + struct cam_context *base; + struct cam_ctx_ops *state_machine; + struct cam_ctx_request req_base[CAM_CTX_ICP_REQ_MAX]; + uint32_t state; + void *ctxt_to_hw_map; +}; + +/** + * cam_icp_context_init() - ICP context init + * @ctx: Pointer to context + * @hw_intf: Pointer to ICP hardware interface + * @ctx_id: ID for this context + * @img_iommu_hdl: IOMMU HDL for image buffers + * @icp_dev_name: name of the icp subdevice + */ +int cam_icp_context_init(struct cam_icp_context *ctx, + struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id, + int img_iommu_hdl, const char *icp_dev_name); + +/** + * cam_icp_context_deinit() - ICP context deinit + * @ctx: Pointer to context + */ +int cam_icp_context_deinit(struct cam_icp_context *ctx); + +#endif /* _CAM_ICP_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.c new file mode 100644 index 0000000000..f4762e5963 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_req_mgr_dev.h" +#include "cam_subdev.h" +#include "cam_node.h" +#include "cam_context.h" +#include "cam_icp_context.h" +#include "cam_hw_mgr_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_common_util.h" +#include "cam_context_utils.h" +#include "cam_mem_mgr_api.h" + +#define CAM_ICP_IS_DEV_IDX_INVALID(dev_idx) \ +({ \ + ((dev_idx) >= CAM_ICP_SUBDEV_MAX); \ +}) + +struct cam_icp_subdev { + struct cam_subdev sd; + struct cam_node *node; + struct cam_context ctx[CAM_ICP_CTX_MAX]; + struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX]; + struct mutex icp_lock; + int32_t open_cnt; + int32_t reserved; +}; + +static DEFINE_MUTEX(g_dev_lock); +static struct cam_icp_subdev *g_icp_dev[CAM_ICP_SUBDEV_MAX]; + +static char cam_icp_subdev_name_arr[CAM_ICP_SUBDEV_MAX][CAM_ICP_SUBDEV_NAME_LEN] = { + "cam-icp0", + "cam-icp1", +}; + +static const struct of_device_id cam_icp_dt_match[] = { + {.compatible = "qcom,cam-icp"}, + {.compatible = "qcom,cam-icp0"}, + {.compatible = "qcom,cam-icp1"}, + {} +}; + +static int cam_icp_dev_evt_inject_cb(void *inject_args) +{ + struct cam_common_inject_evt_param *inject_params = inject_args; + struct cam_icp_subdev *icp_dev; + int i; + + /* Event Injection currently supported for only a single instance of ICP */ + icp_dev = g_icp_dev[0]; + + for (i = 0; i < CAM_ICP_CTX_MAX; i++) { + if (icp_dev->ctx[i].dev_hdl == inject_params->dev_hdl) { + cam_context_add_evt_inject(&icp_dev->ctx[i], + &inject_params->evt_params); + return 0; + } + } + + CAM_ERR(CAM_ICP, "No dev hdl found %d", inject_params->dev_hdl); + return -ENODEV; +} + +static void cam_icp_dev_iommu_fault_handler(struct cam_smmu_pf_info *pf_smmu_info) +{ + int i, rc; + struct cam_node *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + + if (!pf_smmu_info || !pf_smmu_info->token) { + CAM_ERR(CAM_ICP, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_smmu_info->token; + + pf_args.pf_smmu_info = pf_smmu_info; + + for (i = 0; i < node->ctx_size; i++) { + CAM_DBG(CAM_ICP, "Node name %s ctx_idx %d", node->name, i); + pf_args.check_pid = true; + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_pid_found_status == CAM_PF_PID_FOUND_PENDING) { + continue; + } else if (pf_args.pf_pid_found_status == CAM_PF_PID_FOUND_FAILURE) { + CAM_INFO(CAM_ICP, "pid %d was not found for %s", + pf_args.pf_smmu_info->pid, node->name); + return; + } else + break; + } + + if (i == node->ctx_size) { + CAM_INFO(CAM_ICP, "All contexts are inactive. PID %d was not found for %s", + pf_args.pf_smmu_info->pid, node->name); + return; + } + + for (i = 0; i < node->ctx_size; i++) { + pf_args.check_pid = false; + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_context_info.ctx_found) + /* found ctx and packet of the faulted address */ + break; + } + + if (i == node->ctx_size) { + /* Faulted ctx not found. Report PF to userspace */ + rc = cam_context_send_pf_evt(NULL, &pf_args); + if (rc) + CAM_ERR(CAM_ICP, + "Failed to notify PF event to userspace rc: %d", rc); + } +} + +static void cam_icp_dev_mini_dump_cb(void *priv, void *args) +{ + struct cam_context *ctx = NULL; + + if (!priv || !args) { + CAM_ERR(CAM_ICP, "Invalid params: priv: %pK args %pK", priv, args); + return; + } + + ctx = (struct cam_context *)priv; + cam_context_mini_dump_from_hw(ctx, args); +} + +static int cam_icp_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_hw_mgr_intf *hw_mgr_intf = NULL; + struct cam_node *node = v4l2_get_subdevdata(sd); + struct cam_icp_subdev *icp_dev = NULL; + int rc = 0; + + if (!node) { + CAM_ERR(CAM_ICP, "Invalid params: Node is NULL"); + return -EINVAL; + } + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + CAM_DBG(CAM_ICP, "Enter device open for %s[%u]", + sd->name, node->device_idx); + + if (CAM_ICP_IS_DEV_IDX_INVALID(node->device_idx)) { + CAM_ERR(CAM_ICP, "Invalid device idx: %u for device: %s", + node->device_idx, sd->name); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + return -ENODEV; + } + icp_dev = g_icp_dev[node->device_idx]; + + mutex_lock(&icp_dev->icp_lock); + if (icp_dev->open_cnt >= 1) { + CAM_ERR(CAM_ICP, "device[%s] is already opened, open count: %u", + sd->name, icp_dev->open_cnt); + rc = -EALREADY; + goto end; + } + + hw_mgr_intf = &node->hw_mgr_intf; + rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL); + if (rc < 0) { + CAM_ERR(CAM_ICP, "FW download failed for device [%s]", sd->name); + goto end; + } + + icp_dev->open_cnt++; + +end: + mutex_unlock(&icp_dev->icp_lock); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + return rc; +} + +static int cam_icp_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_hw_mgr_intf *hw_mgr_intf = NULL; + struct cam_node *node = v4l2_get_subdevdata(sd); + struct cam_icp_subdev *icp_dev = NULL; + + if (!node) { + CAM_ERR(CAM_ICP, "device[%s] Invalid params: node is NULL", + sd->name); + rc = -EINVAL; + goto end; + } + + if (CAM_ICP_IS_DEV_IDX_INVALID(node->device_idx)) { + CAM_ERR(CAM_ICP, "Invalid device idx: %u for device: %s", + node->device_idx, node->name); + return -ENODEV; + } + icp_dev = g_icp_dev[node->device_idx]; + + mutex_lock(&icp_dev->icp_lock); + if (icp_dev->open_cnt <= 0) { + CAM_DBG(CAM_ICP, "device[%s] is already closed", + sd->name); + goto end; + } + + icp_dev->open_cnt--; + + hw_mgr_intf = &node->hw_mgr_intf; + if (!hw_mgr_intf) { + CAM_ERR(CAM_ICP, "device[%s] hw_mgr_intf is not initialized", + sd->name); + rc = -EINVAL; + goto end; + } + + rc = cam_node_shutdown(node); + if (rc < 0) { + CAM_ERR(CAM_ICP, "device[%s] HW close failed", + sd->name); + goto end; + } + +end: + mutex_unlock(&icp_dev->icp_lock); + return rc; +} + +static int cam_icp_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_ICP, + "CRM is ACTIVE, close should be from CRM for device[%s]", + sd->name); + return 0; + } + + return cam_icp_subdev_close_internal(sd, fh); +} + +const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = { + .open = cam_icp_subdev_open, + .close = cam_icp_subdev_close, +}; + +static inline void cam_icp_subdev_clean_up(uint32_t device_idx) +{ + CAM_MEM_FREE(g_icp_dev[device_idx]); + g_icp_dev[device_idx] = NULL; +} + +static int cam_icp_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0, i = 0; + struct cam_node *node; + struct cam_hw_mgr_intf hw_mgr_intf; + int iommu_hdl = -1; + struct platform_device *pdev = to_platform_device(dev); + struct cam_icp_subdev *icp_dev; + char *subdev_name; + uint32_t device_idx; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + if (!pdev) { + CAM_ERR(CAM_ICP, "Invalid params: pdev is %s", + CAM_IS_NULL_TO_STR(pdev)); + return -EINVAL; + } + + rc = of_property_read_u32(dev->of_node, "cell-index", &device_idx); + if (rc) + device_idx = 0; + + if (CAM_ICP_IS_DEV_IDX_INVALID(device_idx)) { + CAM_ERR(CAM_ICP, "Invalid device idx: %u exceeds subdev max: %u", + device_idx, CAM_ICP_SUBDEV_MAX); + return -EINVAL; + } + + /* + * For targets where only one subdevice exists, cell-index property is not listed + * in the DT node, so the default name and device index are "cam-icp" and 0 respectively + */ + if (rc) + subdev_name = "cam-icp"; + else + subdev_name = cam_icp_subdev_name_arr[device_idx]; + + + mutex_lock(&g_dev_lock); + if (g_icp_dev[device_idx]) { + CAM_ERR(CAM_ICP, + "Invalid device index: %u for pdev: %s, ICP device for this idx is already bound", + device_idx, pdev->name); + rc = -EBADSLT; + mutex_unlock(&g_dev_lock); + goto probe_fail; + } + mutex_unlock(&g_dev_lock); + + icp_dev = CAM_MEM_ZALLOC(sizeof(struct cam_icp_subdev), GFP_KERNEL); + if (!icp_dev) { + CAM_ERR(CAM_ICP, + "Unable to allocate memory for icp device:%s size:%llu", + pdev->name, sizeof(struct cam_icp_subdev)); + return -ENOMEM; + } + + mutex_lock(&g_dev_lock); + g_icp_dev[device_idx] = icp_dev; + mutex_unlock(&g_dev_lock); + + icp_dev->sd.internal_ops = &cam_icp_subdev_internal_ops; + icp_dev->sd.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + rc = cam_subdev_probe(&icp_dev->sd, pdev, subdev_name, + CAM_ICP_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_ICP, "device[%s] probe failed", subdev_name); + goto probe_fail; + } + + node = (struct cam_node *) icp_dev->sd.token; + node->sd_handler = cam_icp_subdev_close_internal; + node->device_idx = device_idx; + mutex_init(&icp_dev->icp_lock); + + rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)(&hw_mgr_intf), + &iommu_hdl, cam_icp_dev_mini_dump_cb, device_idx); + if (rc) { + CAM_ERR(CAM_ICP, "device[%s] HW manager init failed: %d", subdev_name, rc); + goto hw_init_fail; + } + + for (i = 0; i < CAM_ICP_CTX_MAX; i++) { + icp_dev->ctx_icp[i].base = &icp_dev->ctx[i]; + rc = cam_icp_context_init(&icp_dev->ctx_icp[i], + &node->hw_mgr_intf, i, iommu_hdl, subdev_name); + if (rc) { + CAM_ERR(CAM_ICP, "device[%s] context init failed", subdev_name); + goto ctx_fail; + } + } + + rc = cam_node_init(node, &hw_mgr_intf, icp_dev->ctx, + CAM_ICP_CTX_MAX, subdev_name); + if (rc) { + CAM_ERR(CAM_ICP, "device[%s] node init failed", subdev_name); + goto ctx_fail; + } + + cam_common_register_evt_inject_cb(cam_icp_dev_evt_inject_cb, + CAM_COMMON_EVT_INJECT_HW_ICP); + + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_icp_dev_iommu_fault_handler, node); + + icp_dev->open_cnt = 0; + + CAM_DBG(CAM_ICP, "device[%s] id: %u component bound successfully", + subdev_name, device_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +ctx_fail: + for (--i; i >= 0; i--) + cam_icp_context_deinit(&icp_dev->ctx_icp[i]); + cam_icp_hw_mgr_deinit(device_idx); +hw_init_fail: + cam_node_deinit(icp_dev->node); + cam_subdev_remove(&icp_dev->sd); +probe_fail: + cam_icp_subdev_clean_up(device_idx); + return rc; +} + +static void cam_icp_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i, rc; + struct platform_device *pdev = to_platform_device(dev); + struct v4l2_subdev *sd; + struct cam_icp_subdev *icp_dev; + uint32_t device_idx; + + if (!pdev) { + CAM_ERR(CAM_ICP, "pdev is NULL"); + return; + } + + sd = platform_get_drvdata(pdev); + if (!sd) { + CAM_ERR(CAM_ICP, + "V4l2 subdev is NULL for pdev: %s", pdev->name); + return; + } + + rc = of_property_read_u32(dev->of_node, "cell-index", &device_idx); + if (rc) + device_idx = 0; + + if (CAM_ICP_IS_DEV_IDX_INVALID(device_idx)) { + CAM_ERR(CAM_ICP, "Invalid device idx: %u exceeds subdev max: %u", + device_idx, CAM_ICP_SUBDEV_MAX); + return; + } + + icp_dev = g_icp_dev[device_idx]; + + for (i = 0; i < CAM_ICP_CTX_MAX; i++) + cam_icp_context_deinit(&icp_dev->ctx_icp[i]); + + cam_icp_hw_mgr_deinit(device_idx); + cam_node_deinit(icp_dev->node); + cam_subdev_remove(&icp_dev->sd); + mutex_destroy(&icp_dev->icp_lock); + cam_icp_subdev_clean_up(device_idx); + + CAM_DBG(CAM_ICP, "device[%s] component unbinded successfully", pdev->name); +} + +const static struct component_ops cam_icp_component_ops = { + .bind = cam_icp_component_bind, + .unbind = cam_icp_component_unbind, +}; + +static int cam_icp_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ICP, "%s Adding ICP component", pdev->name); + rc = component_add(&pdev->dev, &cam_icp_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "%s failed to add component rc: %d", pdev->name, rc); + + return rc; +} + +static int cam_icp_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_icp_component_ops); + return 0; +} + +struct platform_driver cam_icp_driver = { + .probe = cam_icp_probe, + .remove = cam_icp_remove, + .driver = { + .name = "cam_icp", + .owner = THIS_MODULE, + .of_match_table = cam_icp_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_icp_init_module(void) +{ + return platform_driver_register(&cam_icp_driver); +} + +void cam_icp_exit_module(void) +{ + platform_driver_unregister(&cam_icp_driver); +} + +MODULE_DESCRIPTION("MSM ICP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.h b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.h new file mode 100644 index 0000000000..af38e26a24 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/cam_icp_subdev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_ICP_SUBDEV_H_ +#define _CAM_ICP_SUBDEV_H_ + +/** + * @brief : API to register ICP subdev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_icp_init_module(void); + +/** + * @brief : API to remove ICP subdev from platform framework. + */ +void cam_icp_exit_module(void); + +#endif /* _CAM_ICP_SUBDEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_intf.h new file mode 100644 index 0000000000..79dfeb56c1 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_intf.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _HFI_INTF_H_ +#define _HFI_INTF_H_ + +#include + +#define HFI_CMD_Q_MINI_DUMP_SIZE_IN_BYTES 4096 +#define HFI_MSG_Q_MINI_DUMP_SIZE_IN_BYTES 4096 + +#define HFI_NUM_MAX 2 +#define HFI_HANDLE_INIT_VALUE HFI_NUM_MAX + +#define HFI_CLIENT_NAME_LEN 32 + +/** + * struct hfi_mem + * @len: length of memory + * @kva: kernel virtual address + * @iova: IO virtual address + * @reserved: reserved field + */ +struct hfi_mem { + uint64_t len; + uintptr_t kva; + uint32_t iova; + uint32_t reserved; +}; + +/** + * struct hfi_mem_info + * @qtbl: qtable hfi memory + * @cmd_q: command queue hfi memory for host to firmware communication + * @msg_q: message queue hfi memory for firmware to host communication + * @dbg_q: debug queue hfi memory for firmware debug information + * @sfr_buf: buffer for subsystem failure reason[SFR] + * @sec_heap: secondary heap hfi memory for firmware + * @qdss: qdss mapped memory for fw + * @io_mem: io memory info + * @io_mem2: 2nd io memory info + * @fw_uncached: FW uncached region info + * @global_sync: Global sync mem for IPC + * @device_mem: device memory + */ +struct hfi_mem_info { + struct hfi_mem qtbl; + struct hfi_mem cmd_q; + struct hfi_mem msg_q; + struct hfi_mem dbg_q; + struct hfi_mem sfr_buf; + struct hfi_mem sec_heap; + struct hfi_mem shmem; + struct hfi_mem qdss; + struct hfi_mem io_mem; + struct hfi_mem io_mem2; + struct hfi_mem fw_uncached; + struct hfi_mem device_mem; +}; + +/** + * struct hfi_ops + * @irq_raise: called to raise H2ICP interrupt + * @irq_enable: called to enable interrupts from ICP + * @iface_addr: called to get interface registers base address + */ +struct hfi_ops { + void (*irq_raise)(void *data); + void (*irq_enable)(void *data); + void __iomem *(*iface_addr)(void *data); +}; + +/** + * struct hfi_mini_dump_info + * @cmd_q: cmd queue + * @msg_q: msg queue + * @msg_q_state: msg queue state + * @cmd_q_state: cmd queue state + * @dbg_q_state: dbg queue state + */ +struct hfi_mini_dump_info { + uint32_t cmd_q[HFI_CMD_Q_MINI_DUMP_SIZE_IN_BYTES]; + uint32_t msg_q[HFI_MSG_Q_MINI_DUMP_SIZE_IN_BYTES]; + bool msg_q_state; + bool cmd_q_state; + bool dbg_q_state; +}; +/** + * hfi_write_cmd() - function for hfi write + * @client_handle: client handle + * @cmd_ptr: pointer to command data for hfi write + * + * Returns success(zero)/failure(non zero) + */ +int hfi_write_cmd(int client_handle, void *cmd_ptr); + +/** + * hfi_read_message() - function for hfi read + * @client_handle: client handle + * @pmsg: buffer to place read message for hfi queue + * @q_id: Queue to read from + * @buf_words_size: size in words of the input buffer + * @words_read: total number of words read from the queue + * returned as output to the caller + * + * Returns success(zero)/failure(non zero) + */ +int hfi_read_message(int client_handle, uint32_t *pmsg, uint8_t q_id, + size_t buf_words_size, uint32_t *words_read); + +/** + * hfi_init() - function initialize hfi after firmware download + * @client_handle: client handle + * @hfi_mem: hfi memory info + * @hfi_ops: processor-specific hfi ops + * @priv: device private data + * @event_driven_mode: event mode + * + * Returns success(zero)/failure(non zero) + */ +int cam_hfi_init(int client_handle, struct hfi_mem_info *hfi_mem, + const struct hfi_ops *hfi_ops, void *priv, uint8_t event_driven_modem); + +/** + * cam_hfi_register() - function to register user as hfi client and retrieve handle + * @client_handle: client handle to be retrieved + * @client_name: Name of the client to be registered + * + * Returns success(zero)/failure(non zero) + */ +int cam_hfi_register(int *client_handle, const char *client_name); + +/** + * cam_hfi_unregister() - function to unregister hfi client + * @client_handle: client handle + * + * Returns success(zero)/failure(non zero) + */ +int cam_hfi_unregister(int *client_handle); + +/** + * hfi_get_hw_caps() - hardware capabilities from firmware + * @query_caps: holds query information from hfi + * + * Returns success(zero)/failure(non zero) + */ +int hfi_get_hw_caps(void *query_caps); + +/** + * hfi_get_hw_caps_v2() - hardware capabilities from firmware, used for v2 query cap + * @client_handle: client handle + * @query_caps: holds query information from hfi + * + * Returns success(zero)/failure(non zero) + */ +int hfi_get_hw_caps_v2(int client_handle, void *query_caps); + +/** + * hfi_send_system_cmd() - send hfi system command to firmware + * @client_handle: client handle + * @type: type of system command + * @data: command data + * @size: size of command data + * + * Returns success(zero)/failure(non zero) + */ +int hfi_send_system_cmd(int client_handle, uint32_t type, uint64_t data, uint32_t size); + +/** + * cam_hfi_deinit() - cleanup HFI + * @client_handle: client handle to be retrieved + */ +void cam_hfi_deinit(int client_handle); +/** + * hfi_set_debug_level() - set debug level + * @client_handle: client handle to be retrieved + * @icp_dbg_type: 1 for debug_q & 2 for qdss + * @lvl: FW debug message level + * + * Returns success(zero)/failure(non zero) + */ +int hfi_set_debug_level(int client_handle, u64 icp_dbg_type, uint32_t lvl); + +/** + * hfi_set_fw_dump_levels() - set firmware hang dump/ramdump levels + * @client_handle: client handle to be retrieved + * @hang_dump_lvl : level of firmware hang dump + * @ram_dump_lvl : level of firmware ram dump + * + * Returns success(zero)/failure(non zero) + */ +int hfi_set_fw_dump_levels(int client_handle, + uint32_t hang_dump_lvl, uint32_t ram_dump_lvl); + +/** + * hfi_send_freq_info() - set firmware dump level + * @client_handle: client handle to be retrieved + * @freq: icp freq + * + * Returns success(zero)/failure(non zero) + */ +int hfi_send_freq_info(int client_handle, int32_t freq); + +/** + * hfi_cmd_ubwc_config_ext() - UBWC configuration to firmware + * @client_handle: client handle to be retrieved + * @ubwc_ipe_cfg: UBWC ipe fetch/write configuration params + * @ubwc_bps_cfg: UBWC bps fetch/write configuration params + * @ubwc_ofe_cfg: UBWC ofe fetch/write configuration params + * + * Returns success(zero)/failure(non zero) + */ +int hfi_cmd_ubwc_config_ext(int client_handle, uint32_t *ubwc_ipe_cfg, + uint32_t *ubwc_bps_cfg, uint32_t *ubwc_ofe_cfg); + +/** + * hfi_cmd_ubwc_config() - UBWC configuration to firmware + * for older targets + * @client_handle: client handle to be retrieved + * @ubwc_cfg: UBWC configuration parameters + * + * Returns success(zero)/failure(non zero) + */ +int hfi_cmd_ubwc_config(int client_handle, uint32_t *ubwc_cfg); + +/** + * cam_hfi_resume() - function to resume + * @client_handle: client handle to be retrieved + * + * Returns success(zero)/failure(non zero) + */ +int cam_hfi_resume(int client_handle); + +/** + * cam_hfi_queue_dump() - utility function to dump hfi queues + * @client_handle: client handle to be retrieved + * @dump_queue_data: if set dumps queue contents + */ +void cam_hfi_queue_dump(int client_handle, bool dump_queue_data); + +/** + * cam_hfi_mini_dump() - utility function for mini dump + * @client_handle: client handle to be retrieved + * @dst: memory destination + */ +void cam_hfi_mini_dump(int client_handle, struct hfi_mini_dump_info *dst); +#endif /* _HFI_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_reg.h b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_reg.h new file mode 100644 index 0000000000..6ab6b6e9e8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_reg.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_HFI_REG_H_ +#define _CAM_HFI_REG_H_ + +#include +#include "hfi_intf.h" + +/* general purpose registers */ +#define GEN_PURPOSE_REG(n) (n << 2) + +#define HFI_REG_FW_VERSION GEN_PURPOSE_REG(1) +#define HFI_REG_HOST_ICP_INIT_REQUEST GEN_PURPOSE_REG(2) +#define HFI_REG_ICP_HOST_INIT_RESPONSE GEN_PURPOSE_REG(3) +#define HFI_REG_SHARED_MEM_PTR GEN_PURPOSE_REG(4) +#define HFI_REG_SHARED_MEM_SIZE GEN_PURPOSE_REG(5) +#define HFI_REG_QTBL_PTR GEN_PURPOSE_REG(6) +#define HFI_REG_SECONDARY_HEAP_PTR GEN_PURPOSE_REG(7) +#define HFI_REG_SECONDARY_HEAP_SIZE GEN_PURPOSE_REG(8) +#define HFI_REG_SFR_PTR GEN_PURPOSE_REG(10) +#define HFI_REG_QDSS_IOVA GEN_PURPOSE_REG(11) +#define HFI_REG_QDSS_IOVA_SIZE GEN_PURPOSE_REG(12) +#define HFI_REG_IO_REGION_IOVA GEN_PURPOSE_REG(13) +#define HFI_REG_IO_REGION_SIZE GEN_PURPOSE_REG(14) +#define HFI_REG_IO2_REGION_IOVA GEN_PURPOSE_REG(15) +#define HFI_REG_IO2_REGION_SIZE GEN_PURPOSE_REG(16) +#define HFI_REG_FWUNCACHED_REGION_IOVA GEN_PURPOSE_REG(17) +#define HFI_REG_FWUNCACHED_REGION_SIZE GEN_PURPOSE_REG(18) +#define HFI_REG_DEVICE_REGION_IOVA GEN_PURPOSE_REG(19) +#define HFI_REG_DEVICE_REGION_IOVA_SIZE GEN_PURPOSE_REG(20) + +/* start of Queue table and queues */ +#define MAX_ICP_HFI_QUEUES 4 +#define ICP_QHDR_TX_TYPE_MASK 0xFF000000 +#define ICP_QHDR_RX_TYPE_MASK 0x00FF0000 +#define ICP_QHDR_PRI_TYPE_MASK 0x0000FF00 +#define ICP_QHDR_Q_ID_MASK 0x000000FF +#define ICP_QTBL_SIZE_IN_BYTES sizeof(struct hfi_qtbl) + +#define ICP_CMD_Q_SIZE_IN_BYTES 8192 +#define ICP_MSG_Q_SIZE_IN_BYTES 8192 +#define ICP_DBG_Q_SIZE_IN_BYTES 102400 +#define ICP_MSG_SFR_SIZE_IN_BYTES 4096 +#define ICP_SEC_HEAP_SIZE_IN_BYTES 1048576 + +#define ICP_HFI_QTBL_HOSTID1 0x01000000 +#define ICP_HFI_QTBL_STATUS_ENABLED 0x00000001 +#define ICP_HFI_NUMBER_OF_QS 3 +#define ICP_HFI_NUMBER_OF_ACTIVE_QS 3 +#define ICP_HFI_QTBL_OFFSET 0 +#define ICP_HFI_VAR_SIZE_PKT 0 +#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS 128 + + +/* Queue Header type masks. Use these to access bitfields in qhdr_type */ +#define HFI_MASK_QHDR_TX_TYPE 0xFF000000 +#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000 +#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00 +#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF + + +#define TX_EVENT_DRIVEN_MODE_1 0 +#define RX_EVENT_DRIVEN_MODE_1 0 +#define TX_EVENT_DRIVEN_MODE_2 0x01000000 +#define RX_EVENT_DRIVEN_MODE_2 0x00010000 +#define TX_EVENT_POLL_MODE_2 0x02000000 +#define RX_EVENT_POLL_MODE_2 0x00020000 +#define U32_OFFSET 0x1 +#define BYTE_WORD_SHIFT 2 + +#define HFI_GET_CLIENT_HANDLE(idx) (idx) +#define HFI_GET_INDEX(client_handle) (client_handle) +#define IS_VALID_HFI_INDEX(idx) ((idx) < HFI_NUM_MAX) + +/** + * @INVALID: Invalid state + * @HFI_DEINIT: HFI is not initialized yet + * @HFI_INIT: HFI is initialized + * @HFI_READY: HFI is ready to send/receive commands/messages + */ +enum hfi_state { + HFI_DEINIT, + HFI_INIT, + HFI_READY +}; + +/** + * @RESET: init success + * @SET: init failed + */ +enum reg_settings { + RESET, + SET, + SET_WM = 1024 +}; + +/** + * @ICP_INIT_RESP_RESET: reset init state + * @ICP_INIT_RESP_SUCCESS: init success + * @ICP_INIT_RESP_FAILED: init failed + */ +enum host_init_resp { + ICP_INIT_RESP_RESET, + ICP_INIT_RESP_SUCCESS, + ICP_INIT_RESP_FAILED +}; + +/** + * @ICP_INIT_REQUEST_RESET: reset init request + * @ICP_INIT_REQUEST_SET: set init request + */ +enum host_init_request { + ICP_INIT_REQUEST_RESET, + ICP_INIT_REQUEST_SET +}; + +/** + * @QHDR_INACTIVE: Queue is inactive + * @QHDR_ACTIVE: Queue is active + */ +enum qhdr_status { + QHDR_INACTIVE, + QHDR_ACTIVE +}; + +/** + * @INTR_MODE: event driven mode 1, each send and receive generates interrupt + * @WM_MODE: event driven mode 2, interrupts based on watermark mechanism + * @POLL_MODE: poll method + */ +enum qhdr_event_drv_type { + INTR_MODE, + WM_MODE, + POLL_MODE +}; + +/** + * @TX_INT: event driven mode 1, each send and receive generates interrupt + * @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism + * @TX_POLL: poll method + * @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type + */ +enum qhdr_tx_type { + TX_INT, + TX_INT_WM, + TX_POLL +}; + +/** + * @RX_INT: event driven mode 1, each send and receive generates interrupt + * @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism + * @RX_POLL: poll method + * @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type + */ +enum qhdr_rx_type { + RX_INT, + RX_INT_WM, + RX_POLL +}; + +/** + * @Q_CMD: Host to FW command queue + * @Q_MSG: FW to Host message queue + * @Q_DEBUG: FW to Host debug queue + * @ICP_QHDR_Q_ID_MASK defines position in qhdr_type + */ +enum qhdr_q_id { + Q_CMD, + Q_MSG, + Q_DBG +}; + +/** + * struct hfi_qtbl_hdr + * @qtbl_version: Queue table version number + * Higher 16 bits: Major version + * Lower 16 bits: Minor version + * @qtbl_size: Queue table size from version to last parametr in qhdr entry + * @qtbl_qhdr0_offset: Offset to the start of first qhdr + * @qtbl_qhdr_size: Queue header size in bytes + * @qtbl_num_q: Total number of queues in Queue table + * @qtbl_num_active_q: Total number of active queues + */ +struct hfi_qtbl_hdr { + uint32_t qtbl_version; + uint32_t qtbl_size; + uint32_t qtbl_qhdr0_offset; + uint32_t qtbl_qhdr_size; + uint32_t qtbl_num_q; + uint32_t qtbl_num_active_q; +} __packed; + +/** + * struct hfi_q_hdr + * @qhdr_status: Queue status, qhdr_state define possible status + * @qhdr_start_addr: Queue start address in non cached memory + * @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type + * @qhdr_q_size: Queue size + * Number of queue packets if qhdr_pkt_size is non-zero + * Queue size in bytes if qhdr_pkt_size is zero + * @qhdr_pkt_size: Size of queue packet entries + * 0x0: variable queue packet size + * non zero: size of queue packet entry, fixed + * @qhdr_pkt_drop_cnt: Number of packets dropped by sender + * @qhdr_rx_wm: Receiver watermark, applicable in event driven mode + * @qhdr_tx_wm: Sender watermark, applicable in event driven mode + * @qhdr_rx_req: Receiver sets this bit if queue is empty + * @qhdr_tx_req: Sender sets this bit if queue is full + * @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to + * the sender after packets are dequeued. Sender clears this bit + * @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to + * the receiver after packets are queued. Receiver clears this bit + * @qhdr_read_idx: Read index + * @qhdr_write_idx: Write index + */ +struct hfi_q_hdr { + uint32_t dummy[15]; + uint32_t qhdr_status; + uint32_t dummy1[15]; + uint32_t qhdr_start_addr; + uint32_t dummy2[15]; + uint32_t qhdr_type; + uint32_t dummy3[15]; + uint32_t qhdr_q_size; + uint32_t dummy4[15]; + uint32_t qhdr_pkt_size; + uint32_t dummy5[15]; + uint32_t qhdr_pkt_drop_cnt; + uint32_t dummy6[15]; + uint32_t qhdr_rx_wm; + uint32_t dummy7[15]; + uint32_t qhdr_tx_wm; + uint32_t dummy8[15]; + uint32_t qhdr_rx_req; + uint32_t dummy9[15]; + uint32_t qhdr_tx_req; + uint32_t dummy10[15]; + uint32_t qhdr_rx_irq_status; + uint32_t dummy11[15]; + uint32_t qhdr_tx_irq_status; + uint32_t dummy12[15]; + uint32_t qhdr_read_idx; + uint32_t dummy13[15]; + uint32_t qhdr_write_idx; + uint32_t dummy14[15]; +}; + +/** + * struct sfr_buf + * @size: Number of characters + * @msg : Subsystem failure reason + */ +struct sfr_buf { + uint32_t size; + char msg[ICP_MSG_SFR_SIZE_IN_BYTES]; +}; + +/** + * struct hfi_q_tbl + * @q_tbl_hdr: Queue table header + * @q_hdr: Queue header info, it holds info of cmd, msg and debug queues + */ +struct hfi_qtbl { + struct hfi_qtbl_hdr q_tbl_hdr; + struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES]; +}; + +/** + * struct hfi_info + * @map: Hfi shared memory info + * @ops: processor-specific ops + * @smem_size: Shared memory size + * @uncachedheap_size: uncached heap size + * @msgpacket_buf: message buffer + * @cmd_q_lock: Lock for command queue + * @msg_q_lock: Lock for message queue + * @dbg_q_lock: Lock for debug queue + * @hfi_state: State machine for hfi + * @priv: device private data + * @dbg_lvl: debug level set to FW + * @fw_version: firmware version + * @client_name: hfi client's name + * @cmd_q_state: State of command queue + * @msg_q_state: State of message queue + * @dbg_q_state: State of debug queue + */ +struct hfi_info { + struct hfi_mem_info map; + struct hfi_ops ops; + uint32_t smem_size; + uint32_t uncachedheap_size; + uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS]; + struct mutex cmd_q_lock; + struct mutex msg_q_lock; + struct mutex dbg_q_lock; + uint8_t hfi_state; + void *priv; + u64 dbg_lvl; + uint32_t fw_version; + char client_name[HFI_CLIENT_NAME_LEN]; + bool cmd_q_state; + bool msg_q_state; + bool dbg_q_state; +}; + +#endif /* _CAM_HFI_REG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_session_defs.h b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_session_defs.h new file mode 100644 index 0000000000..b924f81923 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_session_defs.h @@ -0,0 +1,632 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_HFI_SESSION_DEFS_H +#define _CAM_HFI_SESSION_DEFS_H + +#include + +#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO 0x1 +#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS 0x2 +#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT 0x3 +#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY 0x4 + +#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO 0x5 +#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS 0x6 +#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT 0x7 +#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY 0x8 + +#define HFI_OFE_CMD_OPCODE_CONFIG_IO 0x1 +#define HFI_OFE_CMD_OPCODE_FRAME_PROCESS 0x2 +#define HFI_OFE_CMD_OPCODE_ABORT 0x3 +#define HFI_OFE_CMD_OPCODE_DESTROY 0x4 + +#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE 0x9 +#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS 0xa +#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS 0xb +#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE 0xc + +#define HFI_IPEBPS_CMD_OPCODE_MEM_MAP 0xe +#define HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP 0xf + +#define HFI_IPEBPS_HANDLE_TYPE_BPS_NON_RT 0x1 +#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT 0x2 +#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT 0x3 +#define HFI_IPEBPS_HANDLE_TYPE_IPE_SEMI_RT 0x4 +#define HFI_IPEBPS_HANDLE_TYPE_BPS_RT 0x5 +#define HFI_IPEBPS_HANDLE_TYPE_BPS_SEMI_RT 0x6 + +#define HFI_OFE_HANDLE_TYPE_OFE_RT 0x1 +#define HFI_OFE_HANDLE_TYPE_OFE_NON_RT 0x2 +#define HFI_OFE_HANDLE_TYPE_OFE_SEMI_RT 0x3 + +/** + * struct mem_map_region_data + * @start_addr: cmd buffer region start addr + * @len : length of the region + * + * create mem_map_region_data + */ +struct mem_map_region_data { + uint32_t start_addr; + uint32_t len; +}; + +/** + * struct hfi_cmd_ipe_bps_map + * @user_data : user arg + * @mem_map_request_num: number of mappings + * @mem_map_region_sets: array of all map/unmap info + * + * create hfi_cmd_ipe_bps_map + */ +struct hfi_cmd_ipe_bps_map { + uint64_t user_data; + uint32_t mem_map_request_num; + union { + struct mem_map_region_data mem_map_region_sets[1]; + __DECLARE_FLEX_ARRAY(struct mem_map_region_data, mem_map_region_sets_flex); + }; +} __packed; + +/** + * struct hfi_cmd_ipe_bps_map_ack + * @rc : Async return code + * @user_data: user_arg + * + * create hfi_cmd_ipe_bps_map_ack + */ +struct hfi_cmd_ipe_bps_map_ack { + uint32_t rc; + uint64_t user_data; +}; + +/** + * struct abort_data + * @num_req_ids: Number of req ids + * @num_req_id: point to specific req id + * + * create abort data + */ +struct abort_data { + uint32_t num_req_ids; + union { + uint32_t num_req_id[1]; + __DECLARE_FLEX_ARRAY(uint32_t, num_req_id_flex); + }; +}; + +/** + * struct hfi_cmd_data + * @abort: abort data + * @user data: user supplied argument + * + * create session abort data + */ +struct hfi_cmd_abort { + struct abort_data abort; + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_destroy + * @user_data: user supplied data + * + * IPE/BPS/OFE destroy command + * @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY + * @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY + * @HFI_OFE_CMD_OPCODE_OFE_DESTROY + */ +struct hfi_cmd_destroy { + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_chaining_ops + * @wait_hdl: current session handle waits on wait_hdl to complete operation + * @user_data: user supplied argument + * + * this structure for chaining opcodes + * BPS_WAITS_FOR_IPE + * BPS_WAITS_FOR_BPS + * IPE_WAITS_FOR_BPS + * IPE_WAITS_FOR_IPE + */ +struct hfi_cmd_chaining_ops { + uint32_t wait_hdl; + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_create_handle + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @handle_type: IPE/BPS/OFE firmware session handle type + * @user_data1: caller provided data1 + * @user_data2: caller provided data2 + * + * create firmware session handle + */ +struct hfi_cmd_create_handle { + uint32_t size; + uint32_t pkt_type; + uint32_t handle_type; + uint64_t user_data1; + uint64_t user_data2; +} __packed; + +/** + * struct hfi_cmd_dev_async + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @opcode: opcode for devices I/O async operation + * CONFIG_IO: configures I/O for device handle + * FRAME_PROCESS: image frame to be processed by device + * ABORT: abort all processing frames of device handle + * DESTROY: destroy earlier created device handle + * BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE + * BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS + * IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE + * IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS + * @num_fw_handles: number of device firmware handles in fw_handles array + * @fw_handles: device handles array + * @payload: command payload for device opcodes + * @direct: points to actual payload + * @indirect: points to address of payload + * + * sends async command to the earlier created device handle + * for asynchronous operation. + */ +struct hfi_cmd_dev_async { + uint32_t size; + uint32_t pkt_type; + uint32_t opcode; + uint64_t user_data1; + uint64_t user_data2; + uint32_t num_fw_handles; + union { + uint32_t fw_handles[1]; + __DECLARE_FLEX_ARRAY(uint32_t, fw_handles_flex); + }; + union { + union { + uint32_t direct[1]; + __DECLARE_FLEX_ARRAY(uint32_t, direct_flex); + }; + uint32_t indirect; + } payload; +} __packed; + +/** + * struct hfi_msg_create_handle_ack + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @err_type: error code + * @fw_handle: output param for device handle + * @user_data1: user provided data1 + * @user_data2: user provided data2 + * + * ack for create handle command of ICP's controlled HW + * @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK + */ +struct hfi_msg_create_handle_ack { + uint32_t size; + uint32_t pkt_type; + uint32_t err_type; + uint32_t fw_handle; + uint64_t user_data1; + uint64_t user_data2; +} __packed; + +/** + * struct hfi_msg_dev_async_ack + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @opcode: opcode of the device async operation + * @user_data1: user provided data1 + * @user_data2: user provided data2 + * @err_type: error code + * @msg_data: device async done message data + * + * result of device async command + * @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK + */ +struct hfi_msg_dev_async_ack { + uint32_t size; + uint32_t pkt_type; + uint32_t opcode; + uint64_t user_data1; + uint64_t user_data2; + uint32_t err_type; + union { + uint32_t msg_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, msg_data_flex); + }; +} __packed; + +/** + * struct hfi_msg_frame_process_done + * @result: result of frame process command + * @scratch_buffer_address: address of scratch buffer + */ +struct hfi_msg_frame_process_done { + uint32_t result; + uint32_t scratch_buffer_address; +}; + +/** + * struct hfi_msg_chaining_op + * @status: return status + * @user_data: user data provided as part of chaining ops + * + * Device wait response + */ +struct hfi_msg_chaining_op { + uint32_t status; + uint64_t user_data; +} __packed; + +/** + * struct hfi_msg_abort_destroy + * @status: return status + * @user_data: user data provided as part of abort/destroy ops + * + * Device abort/destroy response + */ +struct hfi_msg_abort_destroy { + uint32_t status; + uint64_t user_data; +} __packed; + +#define MAX_NUM_OF_IMAGE_PLANES 2 +#define MAX_HFR_GROUP 16 + +enum hfi_ipe_io_images { + IPE_INPUT_IMAGE_FULL, + IPE_INPUT_IMAGE_DS4, + IPE_INPUT_IMAGE_DS16, + IPE_INPUT_IMAGE_DS64, + IPE_INPUT_IMAGE_FULL_REF, + IPE_INPUT_IMAGE_DS4_REF, + IPE_INPUT_IMAGE_DS16_REF, + IPE_INPUT_IMAGE_DS64_REF, + IPE_OUTPUT_IMAGE_DISPLAY, + IPE_OUTPUT_IMAGE_VIDEO, + IPE_OUTPUT_IMAGE_FULL_REF, + IPE_OUTPUT_IMAGE_DS4_REF, + IPE_OUTPUT_IMAGE_DS16_REF, + IPE_OUTPUT_IMAGE_DS64_REF, + IPE_INPUT2_IMAGE_FULL, + IPE_INPUT2_IMAGE_DSX, + IPE_INPUT_OUTPUT_SCRATCHBUFFER, + IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL, + IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF, + IPE_INPUT_IMAGE_REF_FIRST = IPE_INPUT_IMAGE_FULL_REF, + IPE_INPUT_IMAGE_REF_LAST = IPE_INPUT_IMAGE_DS64_REF, + IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY, + IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF, + IPE_OUTPUT_IMAGE_REF_FIRST = IPE_OUTPUT_IMAGE_FULL_REF, + IPE_OUTPUT_IMAGE_REF_LAST = IPE_OUTPUT_IMAGE_DS64_REF, + IPE_INPUT2_IMAGE_FIRST = IPE_INPUT2_IMAGE_FULL, + IPE_INPUT2_IMAGE_LAST = IPE_INPUT2_IMAGE_DSX, + IPE_INPUT_OUTPUT_IMAGE_LAST = IPE_INPUT_OUTPUT_SCRATCHBUFFER, + IPE_IO_IMAGES_MAX +}; + +enum bps_io_images { + BPS_INPUT_IMAGE, + BPS_OUTPUT_IMAGE_FULL, + BPS_OUTPUT_IMAGE_DS4, + BPS_OUTPUT_IMAGE_DS16, + BPS_OUTPUT_IMAGE_DS64, + BPS_OUTPUT_IMAGE_STATS_BG, + BPS_OUTPUT_IMAGE_STATS_BHIST, + BPS_OUTPUT_IMAGE_REG1, + BPS_OUTPUT_IMAGE_REG2, + BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL, + BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2, + BPS_IO_IMAGES_MAX +}; + +struct frame_buffer { + uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES]; + uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES]; +} __packed; + +struct bps_frame_process_data { + uint32_t max_num_cores; + uint32_t target_time; + uint32_t ubwc_stats_buffer_addr; + uint32_t ubwc_stats_buffer_size; + uint32_t cdm_buffer_addr; + uint32_t cdm_buffer_size; + uint32_t iq_settings_addr; + uint32_t strip_lib_out_addr; + uint32_t cdm_prog_addr; + uint32_t request_id; + struct frame_buffer buffers[BPS_IO_IMAGES_MAX]; +}; + +enum hfi_ipe_image_format { + IMAGE_FORMAT_INVALID, + IMAGE_FORMAT_MIPI_8, + IMAGE_FORMAT_MIPI_10, + IMAGE_FORMAT_MIPI_12, + IMAGE_FORMAT_MIPI_14, + IMAGE_FORMAT_BAYER_8, + IMAGE_FORMAT_BAYER_10, + IMAGE_FORMAT_BAYER_12, + IMAGE_FORMAT_BAYER_14, + IMAGE_FORMAT_PDI_10, + IMAGE_FORMAT_PD_10, + IMAGE_FORMAT_PD_8, + IMAGE_FORMAT_INDICATIONS, + IMAGE_FORMAT_REFINEMENT, + IMAGE_FORMAT_UBWC_TP_10, + IMAGE_FORMAT_UBWC_NV_12, + IMAGE_FORMAT_UBWC_NV12_4R, + IMAGE_FORMAT_UBWC_P010, + IMAGE_FORMAT_LINEAR_TP_10, + IMAGE_FORMAT_LINEAR_P010, + IMAGE_FORMAT_LINEAR_NV12, + IMAGE_FORMAT_LINEAR_PLAIN_16, + IMAGE_FORMAT_YUV422_8, + IMAGE_FORMAT_YUV422_10, + IMAGE_FORMAT_STATISTICS_BAYER_GRID, + IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM, + IMAGE_FORMAT_MAX +}; + +enum hfi_ipe_plane_format { + PLANE_FORMAT_INVALID = 0, + PLANE_FORMAT_MIPI_8, + PLANE_FORMAT_MIPI_10, + PLANE_FORMAT_MIPI_12, + PLANE_FORMAT_MIPI_14, + PLANE_FORMAT_BAYER_8, + PLANE_FORMAT_BAYER_10, + PLANE_FORMAT_BAYER_12, + PLANE_FORMAT_BAYER_14, + PLANE_FORMAT_PDI_10, + PLANE_FORMAT_PD_10, + PLANE_FORMAT_PD_8, + PLANE_FORMAT_INDICATIONS, + PLANE_FORMAT_REFINEMENT, + PLANE_FORMAT_UBWC_TP_10_Y, + PLANE_FORMAT_UBWC_TP_10_C, + PLANE_FORMAT_UBWC_NV_12_Y, + PLANE_FORMAT_UBWC_NV_12_C, + PLANE_FORMAT_UBWC_NV_12_4R_Y, + PLANE_FORMAT_UBWC_NV_12_4R_C, + PLANE_FORMAT_UBWC_P010_Y, + PLANE_FORMAT_UBWC_P010_C, + PLANE_FORMAT_LINEAR_TP_10_Y, + PLANE_FORMAT_LINEAR_TP_10_C, + PLANE_FORMAT_LINEAR_P010_Y, + PLANE_FORMAT_LINEAR_P010_C, + PLANE_FORMAT_LINEAR_NV12_Y, + PLANE_FORMAT_LINEAR_NV12_C, + PLANE_FORMAT_LINEAR_PLAIN_16_Y, + PLANE_FORMAT_LINEAR_PLAIN_16_C, + PLANE_FORMAT_YUV422_8, + PLANE_FORMAT_YUV422_10, + PLANE_FORMAT_STATISTICS_BAYER_GRID, + PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM, + PLANE_FORMAT_MAX +}; + +enum hfi_ipe_bayer_pixel_order { + FIRST_PIXEL_R, + FIRST_PIXEL_GR, + FIRST_PIXEL_B, + FIRST_PIXEL_GB, + FIRST_PIXEL_MAX +}; + +enum hfi_ipe_pixel_pack_alignment { + PIXEL_LSB_ALIGNED, + PIXEL_MSB_ALIGNED, +}; + +enum hfi_ipe_yuv_422_order { + PIXEL_ORDER_Y_U_Y_V, + PIXEL_ORDER_Y_V_Y_U, + PIXEL_ORDER_U_Y_V_Y, + PIXEL_ORDER_V_Y_U_Y, + PIXEL_ORDER_YUV422_MAX +}; + +enum ubwc_write_client { + IPE_WR_CLIENT_0 = 0, + IPE_WR_CLIENT_1, + IPE_WR_CLIENT_5, + IPE_WR_CLIENT_6, + IPE_WR_CLIENT_7, + IPE_WR_CLIENT_8, + IPE_WR_CLIENT_MAX +}; + +/** + * struct image_info + * @format: image format + * @img_width: image width + * @img_height: image height + * @bayer_order: pixel order + * @pix_align: alignment + * @yuv422_order: YUV order + * @byte_swap: byte swap + */ +struct image_info { + enum hfi_ipe_image_format format; + uint32_t img_width; + uint32_t img_height; + enum hfi_ipe_bayer_pixel_order bayer_order; + enum hfi_ipe_pixel_pack_alignment pix_align; + enum hfi_ipe_yuv_422_order yuv422_order; + uint32_t byte_swap; +} __packed; + +/** + * struct buffer_layout + * @buf_stride: buffer stride + * @buf_height: buffer height + */ +struct buffer_layout { + uint32_t buf_stride; + uint32_t buf_height; +} __packed; + +/** + * struct image_desc + * @info: image info + * @buf_layout: buffer layout + * @meta_buf_layout: meta buffer layout + */ +struct image_desc { + struct image_info info; + struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES]; + struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES]; +} __packed; + +struct ica_stab_coeff { + uint32_t coeffs[8]; +} __packed; + +struct ica_stab_params { + uint32_t mode; + struct ica_stab_coeff transforms[3]; +} __packed; + +struct frame_set { + struct frame_buffer buffers[IPE_IO_IMAGES_MAX]; + uint32_t cdm_ica1_addr; + uint32_t cdm_ica2_addr; +} __packed; + +struct ipe_frame_process_data { + uint32_t strip_lib_out_addr; + uint32_t iq_settings_addr; + uint32_t scratch_buffer_addr; + uint32_t scratch_buffer_size; + uint32_t ubwc_stats_buffer_addr; + uint32_t ubwc_stats_buffer_size; + uint32_t cdm_buffer_addr; + uint32_t cdm_buffer_size; + uint32_t max_num_cores; + uint32_t target_time; + uint32_t cdm_prog_base; + uint32_t cdm_pre_ltm; + uint32_t cdm_post_ltm; + uint32_t cdm_anr_full_pass; + uint32_t cdm_anr_ds4; + uint32_t cdm_anr_ds16; + uint32_t cdm_anr_ds64; + uint32_t cdm_tf_full_pass; + uint32_t cdm_tf_ds4; + uint32_t cdm_tf_ds16; + uint32_t cdm_tf_ds64; + uint32_t cdm_dsx_dc4; + uint32_t cdm_dsx_dc16; + uint32_t cdm_dsz_dc64; + uint32_t cdm_mfhdr_full_pass; + uint32_t cdm_mfhdr_dcx; + uint32_t request_id; + uint32_t frames_in_batch; + struct frame_set framesets[MAX_HFR_GROUP]; +} __packed; + +/** + * struct hfi_cmd_ipe_config + * @images: images descreptions + * @user_data: user supplied data + * + * payload for IPE async command + */ +struct hfi_cmd_ipe_config { + struct image_desc images[IPE_IO_IMAGES_MAX]; + uint64_t user_data; +} __packed; + +/** + * struct frame_buffers + * @buf_ptr: buffer pointers for all planes + * @meta_buf_ptr: meta buffer pointers for all planes + */ +struct frame_buffers { + uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES]; + uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES]; +} __packed; + +/** + * struct hfi_msg_ipe_config + * @rc: result of ipe config command + * @scratch_mem_size: scratch mem size for a config + * @user_data: user data + */ +struct hfi_msg_ipe_config { + uint32_t rc; + uint32_t scratch_mem_size; + uint64_t user_data; +} __packed; + +/** + * struct hfi_msg_bps_config + * @rc: result of bps config command + * @user_data: user data + */ +struct hfi_msg_bps_config { + uint32_t rc; + uint64_t user_data; +} __packed; + +/** + * struct hfi_msg_ofe_config + * @rc: result of ofe config command + */ +struct hfi_msg_ofe_config { + uint32_t rc; +} __packed; + +/** + * struct icp_dev_destroy + * @user_data: user data + */ +struct icp_dev_destroy { + uint64_t userdata; +}; + +/** + * struct hfi_msg_ipe_frame_process + * @status: result of ipe frame process command + * @scratch_buf_addr: address of scratch buffer + * @user_data: user data + */ +struct hfi_msg_ipe_frame_process { + uint32_t status; + uint32_t scratch_buf_addr; + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_synx_test_payload + * Same struct used for cmd and response + * + * @size: Size of the pkt + * @pkt_type: pkt type (cmd/response) + * @input_size: Input buffer size + * @input_iova: Input buffer address + * @output_size: Output buffer size + * @output_iova: Output buffer address + */ +struct hfi_cmd_synx_test_payload { + uint32_t size; + uint32_t pkt_type; + uint32_t input_size; + uint32_t input_iova; + uint32_t output_size; + uint32_t output_iova; +} __packed; +#endif /* _CAM_HFI_SESSION_DEFS_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_sys_defs.h b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_sys_defs.h new file mode 100644 index 0000000000..b79d1e0ecb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/fw_inc/hfi_sys_defs.h @@ -0,0 +1,690 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _HFI_DEFS_H_ +#define _HFI_DEFS_H_ + +#include + +/* + * Following base acts as common starting points + * for all enumerations. + */ +#define HFI_COMMON_BASE 0x0 + +/* HFI Domain base offset for commands and messages */ +#define HFI_DOMAIN_SHFT (24) +#define HFI_DOMAIN_BMSK (0x7 << HFI_DOMAIN_SHFT) +#define HFI_DOMAIN_BASE_ICP (0x0 << HFI_DOMAIN_SHFT) +#define HFI_DOMAIN_BASE_IPE_BPS (0x1 << HFI_DOMAIN_SHFT) +#define HFI_DOMAIN_BASE_CDM (0x2 << HFI_DOMAIN_SHFT) +#define HFI_DOMAIN_BASE_DBG (0x3 << HFI_DOMAIN_SHFT) +#define HFI_DOMAIN_BASE_OFE (0x4 << HFI_DOMAIN_SHFT) + +/* Command base offset for commands */ +#define HFI_CMD_START_OFFSET 0x10000 + +/* Command base offset for messages */ +#define HFI_MSG_START_OFFSET 0x20000 + +/* System Level Error types */ +#define HFI_ERR_SYS_NONE (HFI_COMMON_BASE) +#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1) +#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x2) +#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x3) +#define HFI_ERR_SYS_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x4) +#define HFI_ERR_SYS_CMDFAILED (HFI_COMMON_BASE + 0x5) +#define HFI_ERR_SYS_CMDSIZE (HFI_COMMON_BASE + 0x6) + +/* System Level Event types */ +#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1) +#define HFI_EVENT_ICP_ERROR (HFI_COMMON_BASE + 0x2) +#define HFI_EVENT_IPE_BPS_ERROR (HFI_COMMON_BASE + 0x3) +#define HFI_EVENT_CDM_ERROR (HFI_COMMON_BASE + 0x4) +#define HFI_EVENT_DBG_ERROR (HFI_COMMON_BASE + 0x5) +#define HFI_EVENT_OFE_ERROR (HFI_COMMON_BASE + 0x6) + +/* Core level start Ranges for errors */ +#define HFI_ERR_ICP_START (HFI_COMMON_BASE + 0x64) +#define HFI_ERR_IPE_BPS_START (HFI_ERR_ICP_START + 0x64) +#define HFI_ERR_CDM_START (HFI_ERR_IPE_BPS_START + 0x64) +#define HFI_ERR_DBG_START (HFI_ERR_CDM_START + 0x64) +#define HFI_ERR_OFE_START (HFI_ERR_DBG_START + 0x64) + +/*ICP Core level error messages */ +#define HFI_ERR_NO_RES (HFI_ERR_ICP_START + 0x1) +#define HFI_ERR_UNSUPPORTED_RES (HFI_ERR_ICP_START + 0x2) +#define HFI_ERR_UNSUPPORTED_PROP (HFI_ERR_ICP_START + 0x3) +#define HFI_ERR_INIT_EXPECTED (HFI_ERR_ICP_START + 0x4) +#define HFI_ERR_INIT_IGNORED (HFI_ERR_ICP_START + 0x5) + +/* System level commands */ +#define HFI_CMD_COMMON_START \ + (HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0) +#define HFI_CMD_SYS_INIT (HFI_CMD_COMMON_START + 0x1) +#define HFI_CMD_SYS_PC_PREP (HFI_CMD_COMMON_START + 0x2) +#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_COMMON_START + 0x3) +#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_COMMON_START + 0x4) +#define HFI_CMD_SYS_PING (HFI_CMD_COMMON_START + 0x5) +#define HFI_CMD_SYS_RESET (HFI_CMD_COMMON_START + 0x6) + +/* General Frame process errors */ +#define CAMERAICP_SUCCESS 0 +#define CAMERAICP_EFAILED 1 +#define CAMERAICP_ENOMEMORY 2 +#define CAMERAICP_EBADSTATE 3 +#define CAMERAICP_EBADPARM 4 +#define CAMERAICP_EBADITEM 5 +#define CAMERAICP_EINVALIDFORMAT 6 +#define CAMERAICP_EUNSUPPORTED 7 +#define CAMERAICP_EOUTOFBOUND 8 +#define CAMERAICP_ETIMEDOUT 9 +#define CAMERAICP_EABORTED 10 +#define CAMERAICP_EHWVIOLATION 11 +#define CAMERAICP_ECDMERROR 12 + +/* HFI Specific errors. */ +#define CAMERAICP_HFI_ERR_COMMAND_SIZE 1000 +#define CAMERAICP_HFI_ERR_MESSAGE_SIZE 1001 +#define CAMERAICP_HFI_QUEUE_EMPTY 1002 +#define CAMERAICP_HFI_QUEUE_FULL 1003 + +/* Core level commands */ +/* IPE/BPS core Commands */ +#define HFI_CMD_IPE_BPS_COMMON_START \ + (HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0) +#define HFI_CMD_IPEBPS_CREATE_HANDLE \ + (HFI_CMD_IPE_BPS_COMMON_START + 0x8) +#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \ + (HFI_CMD_IPE_BPS_COMMON_START + 0xa) +#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \ + (HFI_CMD_IPE_BPS_COMMON_START + 0xe) + +/* Core level commands */ +/* OFE core Commands */ +#define HFI_CMD_OFE_COMMON_START \ + (HFI_DOMAIN_BASE_OFE + HFI_CMD_START_OFFSET + 0x0) +#define HFI_CMD_OFE_CREATE_HANDLE \ + (HFI_CMD_OFE_COMMON_START + 0x08) +#define HFI_CMD_OFE_ASYNC_COMMAND_DIRECT \ + (HFI_CMD_OFE_COMMON_START + 0x0a) +#define HFI_CMD_OFE_ASYNC_COMMAND_INDIRECT \ + (HFI_CMD_OFE_COMMON_START + 0x0e) + +/* CDM core Commands */ +#define HFI_CMD_CDM_COMMON_START \ + (HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0) +#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800) +#define HFI_CMD_CDM_END (HFI_CMD_CDM_COMMON_START + 0xFFF) + +/* Debug/Test Commands */ +#define HFI_CMD_DBG_COMMON_START \ + (HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0) +#define HFI_CMD_DBG_TEST_START (HFI_CMD_DBG_COMMON_START + 0x800) +#define HFI_CMD_DBG_SYNX_TEST (HFI_CMD_DBG_TEST_START + 0x1) +#define HFI_CMD_DBG_END (HFI_CMD_DBG_COMMON_START + 0xFFF) + +/* System level messages */ +#define HFI_MSG_ICP_COMMON_START \ + (HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0) +#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_ICP_COMMON_START + 0x1) +#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_ICP_COMMON_START + 0x2) +#define HFI_MSG_SYS_DEBUG (HFI_MSG_ICP_COMMON_START + 0x3) +#define HFI_MSG_SYS_IDLE (HFI_MSG_ICP_COMMON_START + 0x4) +#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_ICP_COMMON_START + 0x5) +#define HFI_MSG_SYS_PING_ACK (HFI_MSG_ICP_COMMON_START + 0x6) +#define HFI_MSG_SYS_RESET_ACK (HFI_MSG_ICP_COMMON_START + 0x7) +#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_ICP_COMMON_START + 0x8) + +/* Core level Messages */ +/* IPE/BPS core Messages */ +#define HFI_MSG_IPE_BPS_COMMON_START \ + (HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0) +#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \ + (HFI_MSG_IPE_BPS_COMMON_START + 0x08) +#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \ + (HFI_MSG_IPE_BPS_COMMON_START + 0x0a) +#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \ + (HFI_MSG_IPE_BPS_COMMON_START + 0x0e) +#define HFI_MSG_IPE_BPS_TEST_START \ + (HFI_MSG_IPE_BPS_COMMON_START + 0x800) +#define HFI_MSG_IPE_BPS_END \ + (HFI_MSG_IPE_BPS_COMMON_START + 0xFFF) + +/* Core level Messages */ +/* OFE core Messages */ +#define HFI_MSG_OFE_COMMON_START \ + (HFI_DOMAIN_BASE_OFE + HFI_MSG_START_OFFSET + 0x0) +#define HFI_MSG_OFE_CREATE_HANDLE_ACK \ + (HFI_MSG_OFE_COMMON_START + 0X08) +#define HFI_MSG_OFE_ASYNC_COMMAND_DIRECT_ACK \ + (HFI_MSG_OFE_COMMON_START + 0x0a) +#define HFI_MSG_OFE_ASYNC_COMMAND_INDIRECT_ACK \ + (HFI_MSG_OFE_COMMON_START + 0x0e) +#define HFI_MSG_OFE_TEST_START \ + (HFI_MSG_OFE_COMMON_START + 0x0800) +#define HFI_MSG_OFE_END \ + (HFI_MSG_OFE_COMMON_START + 0x0FFF) + +/* CDM core Messages */ +#define HFI_MSG_CDM_COMMON_START \ + (HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0) +#define HFI_MSG_PRI_CDM_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xa) +#define HFI_MSG_PRI_LLD_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xb) +#define HFI_MSG_CDM_TEST_START (HFI_MSG_CDM_COMMON_START + 0x800) +#define HFI_MSG_CDM_END (HFI_MSG_CDM_COMMON_START + 0xFFF) + +/* core level test command ranges */ +/* ICP core level test command range */ +#define HFI_CMD_ICP_TEST_START (HFI_CMD_ICP_COMMON_START + 0x800) +#define HFI_CMD_ICP_END (HFI_CMD_ICP_COMMON_START + 0xFFF) + +/* IPE/BPS core level test command range */ +#define HFI_CMD_IPE_BPS_TEST_START \ + (HFI_CMD_IPE_BPS_COMMON_START + 0x800) +#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF) + +/* OFE core level test command range */ +#define HFI_CMD_OFE_TEST_START (HFI_CMD_OFE_COMMON_START + 0x0800) +#define HFI_CMD_OFE_END (HFI_CMD_OFE_COMMON_START + 0x0FFF) + +/* ICP core level test message range */ +#define HFI_MSG_ICP_TEST_START (HFI_MSG_ICP_COMMON_START + 0x800) +#define HFI_MSG_ICP_END (HFI_MSG_ICP_COMMON_START + 0xFFF) + +/* ICP core level Debug test message range */ +#define HFI_MSG_DBG_COMMON_START \ + (HFI_DOMAIN_BASE_DBG + HFI_MSG_START_OFFSET + 0x0) + +#define HFI_MSG_DBG_TEST_START (HFI_MSG_DBG_COMMON_START + 0x800) +#define HFI_MSG_DBG_SYNX_TEST (HFI_MSG_DBG_TEST_START + 0x1) +#define HFI_MSG_DBG_END (HFI_MSG_DBG_COMMON_START + 0xFFF) + +/* System level property base offset */ +#define HFI_PROPERTY_ICP_COMMON_START (HFI_DOMAIN_BASE_ICP + 0x0) + +#define HFI_PROP_SYS_DEBUG_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x1) +#define HFI_PROP_SYS_UBWC_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x2) +#define HFI_PROP_SYS_IMAGE_VER (HFI_PROPERTY_ICP_COMMON_START + 0x3) +#define HFI_PROP_SYS_SUPPORTED (HFI_PROPERTY_ICP_COMMON_START + 0x4) +#define HFI_PROP_SYS_IPEBPS_PC (HFI_PROPERTY_ICP_COMMON_START + 0x5) +#define HFI_PROP_SYS_FW_DUMP_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x8) +#define HFI_PROP_SYS_UBWC_CONFIG_EX (HFI_PROPERTY_ICP_COMMON_START + 0x9) +#define HFI_PROP_SYS_ICP_HW_FREQUENCY (HFI_PROPERTY_ICP_COMMON_START + 0xa) +#define HFI_PROP_SYS_ICP_RAMDUMP_MODE (HFI_PROPERTY_ICP_COMMON_START + 0xb) +#define HFI_PROP_SYS_OFE_PC (HFI_PROPERTY_ICP_COMMON_START + 0xc) +#define HFI_PROP_SYS_MEM_REGIONS (HFI_PROPERTY_ICP_COMMON_START + 0xe) + +/* Capabilities reported at sys init */ +#define HFI_CAPS_PLACEHOLDER_1 (HFI_COMMON_BASE + 0x1) +#define HFI_CAPS_PLACEHOLDER_2 (HFI_COMMON_BASE + 0x2) + +/* Section describes different debug levels (HFI_DEBUG_MSG_X) + * available for debug messages from FW + */ +#define HFI_DEBUG_MSG_LOW 0x00000001 +#define HFI_DEBUG_MSG_MEDIUM 0x00000002 +#define HFI_DEBUG_MSG_HIGH 0x00000004 +#define HFI_DEBUG_MSG_ERROR 0x00000008 +#define HFI_DEBUG_MSG_FATAL 0x00000010 +/* Messages containing performance data */ +#define HFI_DEBUG_MSG_PERF 0x00000020 +/* Disable ARM9 WFI in low power mode. */ +#define HFI_DEBUG_CFG_WFI 0x01000000 +/* Disable ARM9 watchdog. */ +#define HFI_DEBUG_CFG_ARM9WD 0x10000000 + + +/* + * HFI_FW_DUMP levels + * HFI_FW_DUMP_xx + */ +#define HFI_FW_DUMP_DISABLED 0x00000000 +#define HFI_FW_DUMP_ON_FAILURE 0x00000001 +#define HFI_FW_DUMP_ALWAYS 0x00000002 + +/* Number of available dump levels. */ +#define NUM_HFI_DUMP_LVL 0x00000003 + +/* Number of available ramdump levels. */ +#define HFI_FW_RAMDUMP_DISABLED 0x00000000 +#define HFI_FW_RAMDUMP_ENABLED 0x00000001 + +/* Number of available ramdump levels. */ +#define NUM_HFI_RAMDUMP_LVLS 0x00000002 + +/* Debug Msg Communication types: + * Section describes different modes (HFI_DEBUG_MODE_X) + * available to communicate the debug messages + */ + /* Debug message output through the interface debug queue. */ +#define HFI_DEBUG_MODE_QUEUE 0x00000001 + /* Debug message output through QDSS. */ +#define HFI_DEBUG_MODE_QDSS 0x00000002 + /* Number of debug modes available. */ +#define NUM_HFI_DEBUG_MODE 0x00000002 + +#define HFI_DEBUG_MSG_LOW 0x00000001 +#define HFI_DEBUG_MSG_MEDIUM 0x00000002 +#define HFI_DEBUG_MSG_HIGH 0x00000004 +#define HFI_DEBUG_MSG_ERROR 0x00000008 +#define HFI_DEBUG_MSG_FATAL 0x00000010 +#define HFI_DEBUG_MSG_PERF 0x00000020 +#define HFI_DEBUG_CFG_WFI 0x01000000 +#define HFI_DEBUG_CFG_ARM9WD 0x10000000 + +#define HFI_DEV_VERSION_MAX 0x6 + +#define ICP_PWR_CLP_BPS 0x00000001 +#define ICP_PWR_CLP_IPE0 0x00010000 +#define ICP_PWR_CLP_IPE1 0x00020000 +#define ICP_PWR_CLP_OFE 0x00000001 + +/* New mem region definitions */ +#define HFI_MEM_REGION_ID_IPCLITE_SHARED_MEM 0 +#define HFI_MEM_REGION_ID_SYNX_HW_MUTEX 1 +#define HFI_MEM_REGION_ID_GLOBAL_ATOMIC_HW_MUTEX 2 +#define HFI_MEM_REGION_ID_GLOBAL_CNTR 3 +#define HFI_MEM_REGION_ID_LLCC_REGISTER 4 + +/* Type of the new regions */ +#define HFI_MEM_REGION_TYPE_CACHED 0 +#define HFI_MEM_REGION_TYPE_UNCACHED 1 +#define HFI_MEM_REGION_TYPE_DEVICE 2 + +/** + * start of sys command packet types + * These commands are used to get system level information + * from firmware + */ + +/** + * struct hfi_caps_support + * payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED + * @type: capability type + * @min: minimum supported value for the capability + * @max: maximum supported value for the capability + * @step_size: supported steps between min-max + */ +struct hfi_caps_support { + uint32_t type; + uint32_t min; + uint32_t max; + uint32_t step_size; +} __packed; + +/** + * struct hfi_caps_support_info + * capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED + * @num_caps: number of capabilities listed + * @caps_data: capabilities info array + */ +struct hfi_caps_support_info { + uint32_t num_caps; + union { + struct hfi_caps_support caps_data[1]; + __DECLARE_FLEX_ARRAY(struct hfi_caps_support, caps_data_flex); + }; +} __packed; + +/** + * struct hfi_debug + * payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG + * @debug_config: it is a result of HFI_DEBUG_MSG_X values that + * are OR-ed together to specify the debug message types + * to output + * @debug_mode: debug message output through debug queue/qdss + * @HFI_PROPERTY_SYS_DEBUG_CONFIG + */ +struct hfi_debug { + uint32_t debug_config; + uint32_t debug_mode; +} __packed; + +/** + * struct hfi_dev_pc + * payload structure to configure HFI_PROPERTY_SYS_DEV_PC + * @enable: Flag to enable IPE, BPS interfrane power collapse + * @core_info: Core information to firmware + */ +struct hfi_dev_pc { + uint32_t enable; + uint32_t core_info; +} __packed; + +/** + * struct hfi_cmd_ubwc_cfg + * Payload structure to configure HFI_PROP_SYS_UBWC_CFG + * @ubwc_fetch_cfg: UBWC configuration for fecth + * @ubwc_write_cfg: UBWC configuration for write + */ +struct hfi_cmd_ubwc_cfg { + uint32_t ubwc_fetch_cfg; + uint32_t ubwc_write_cfg; +} __packed; + +/** + * struct hfi_cmd_ubwc_cfg_ext + * Payload structure to configure HFI_UBWC_CFG_TYPE_EXT + * @bps: UBWC configuration for bps + * @ipe: UBWC configuration for ipe + * @ofe: UBWC configuration for ofe + */ +struct hfi_cmd_ubwc_cfg_ext { + struct hfi_cmd_ubwc_cfg bps; + struct hfi_cmd_ubwc_cfg ipe; + struct hfi_cmd_ubwc_cfg ofe; +} __packed; + +/** + * struct hfi_cmd_sys_init + * command to initialization of system session + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @HFI_CMD_SYS_INIT + */ +struct hfi_cmd_sys_init { + uint32_t size; + uint32_t pkt_type; +} __packed; + +/** + * struct hfi_cmd_pc_prep + * command to firmware to prepare for power collapse + * @eize: packet size in bytes + * @pkt_type: opcode of a packet + * @HFI_CMD_SYS_PC_PREP + */ +struct hfi_cmd_pc_prep { + uint32_t size; + uint32_t pkt_type; +} __packed; + +/** + * struct hfi_cmd_prop + * command to get/set properties of firmware + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @num_prop: number of properties queried/set + * @prop_data: array of property IDs being queried. size depends on num_prop + * array of property IDs and associated structure pairs in set + * @HFI_CMD_SYS_GET_PROPERTY + * @HFI_CMD_SYS_SET_PROPERTY + */ +struct hfi_cmd_prop { + uint32_t size; + uint32_t pkt_type; + uint32_t num_prop; + union { + uint32_t prop_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, prop_data_flex); + }; +} __packed; + +/** + * struct hfi_cmd_ping_pkt + * ping command pings the firmware to confirm whether + * it is alive. + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @user_data: client data, firmware returns this data + * as part of HFI_MSG_SYS_PING_ACK + * @HFI_CMD_SYS_PING + */ +struct hfi_cmd_ping_pkt { + uint32_t size; + uint32_t pkt_type; + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_sys_reset_pkt + * sends the reset command to FW. FW responds in the same type + * of packet. so can be used for reset_ack_pkt type also + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @user_data: client data, firmware returns this data + * as part of HFI_MSG_SYS_RESET_ACK + * @HFI_CMD_SYS_RESET + */ +struct hfi_cmd_sys_reset_pkt { + uint32_t size; + uint32_t pkt_type; + uint64_t user_data; +} __packed; + +/** + * struct hfi_cmd_mem_region_info + * Payload structure to configure HFI_PROP_SYS_MEM_REGIONS + * @region_id: region id (HW mutex/synx global mem/...) + * @region_type: Type of the region (cached/uncached/device/..) + * @start_addr: va to the start of this region + * @size: size of this region + */ +struct hfi_cmd_mem_region_info { + uint32_t region_id; + uint32_t region_type; + uint64_t start_addr; + uint32_t size; +} __packed; + +/** + * struct hfi_cmd_mem_region_info + * Payload structure to configure HFI_PROP_SYS_MEM_REGIONS + * @num_valid_regions: Valid number of regions configured to FW + * @region_info: Region specific info + */ +struct hfi_cmd_config_mem_regions { + uint32_t num_valid_regions; + union { + struct hfi_cmd_mem_region_info region_info[1]; + __DECLARE_FLEX_ARRAY(struct hfi_cmd_mem_region_info, region_info_flex); + }; +} __packed; + +/* end of sys command packet types */ + +/* start of sys message packet types */ + +/** + * struct hfi_prop + * structure to report maximum supported features of firmware. + * @api_ver: Firmware API version + * @dev_ver: Device version + * @num_icp_hw: Number of ICP hardware information + * @dev_hw_ver: Supported hardware version information + * @force_ipe_8bpp: flag to indicate if 8bpp ipe fuse is blown + * @reserved: Reserved field + */ +struct hfi_sys_support { + uint32_t api_ver; + uint32_t dev_ver; + uint32_t num_icp_hw; + uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX]; + uint32_t force_ipe_8bpp; + uint32_t reserved; +} __packed; + +/** + * struct hfi_supported_prop + * structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED + * for a session + * @num_prop: number of properties supported + * @prop_data: array of supported property IDs + */ +struct hfi_supported_prop { + uint32_t num_prop; + union { + uint32_t prop_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, prop_data_flex); + }; +} __packed; + +/** + * struct hfi_image_version + * system image version + * @major: major version number + * @minor: minor version number + * @ver_name_size: size of version name + * @ver_name: image version name + */ +struct hfi_image_version { + uint32_t major; + uint32_t minor; + uint32_t ver_name_size; + union { + uint8_t ver_name[1]; + __DECLARE_FLEX_ARRAY(uint8_t, ver_name_flex); + }; +} __packed; + +/** + * struct hfi_msg_init_done_data + * @api_ver: Firmware API version + * @dev_ver: Device version + * @num_icp_hw: Number of ICP hardware information + * @dev_hw_ver: Supported hardware version information + * @reserved: Reserved field + */ +struct hfi_msg_init_done_data { + uint32_t api_ver; + uint32_t dev_ver; + uint32_t num_icp_hw; + uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX]; + uint32_t reserved; +}; + +/** + * struct hfi_msg_init_done + * system init done message from firmware. Many system level properties + * are returned with the packet + * @size: Packet size in bytes + * @pkt_type: Opcode of a packet + * @err_type: Error code associated with response + * @num_prop: Number of default capability info + * @prop_data: Array of property ids and corresponding structure pairs + */ +struct hfi_msg_init_done { + uint32_t size; + uint32_t pkt_type; + uint32_t err_type; + uint32_t num_prop; + union { + uint32_t prop_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, prop_data_flex); + }; +} __packed; + +/** + * struct hfi_msg_pc_prep_done + * system power collapse preparation done message + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @err_type: error code associated with the response + */ +struct hfi_msg_pc_prep_done { + uint32_t size; + uint32_t pkt_type; + uint32_t err_type; +} __packed; + +/** + * struct hfi_msg_prop + * system property info from firmware + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @num_prop: number of property info structures + * @prop_data: array of property IDs and associated structure pairs + */ +struct hfi_msg_prop { + uint32_t size; + uint32_t pkt_type; + uint32_t num_prop; + union { + uint32_t prop_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, prop_data_flex); + }; +} __packed; + +/** + * struct hfi_msg_idle + * system idle message from firmware + * @size: packet size in bytes + * @pkt_type: opcode of a packet + */ +struct hfi_msg_idle { + uint32_t size; + uint32_t pkt_type; +} __packed; + +/** + * struct hfi_msg_ping_ack + * system ping ack message + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @user_data: this data is sent as part of ping command from host + */ +struct hfi_msg_ping_ack { + uint32_t size; + uint32_t pkt_type; + uint64_t user_data; +} __packed; + +/** + * struct hfi_msg_debug + * system debug message defination + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @msg_type: debug message type + * @msg_size: size of debug message in bytes + * @timestamp_hi: most significant 32 bits of the 64 bit timestamp field. + * timestamp shall be interpreted as a signed 64-bit value + * representing microseconds. + * @timestamp_lo: least significant 32 bits of the 64 bit timestamp field. + * timestamp shall be interpreted as a signed 64-bit value + * representing microseconds. + * @msg_data: message data in string form + */ +struct hfi_msg_debug { + uint32_t size; + uint32_t pkt_type; + uint32_t msg_type; + uint32_t msg_size; + uint32_t timestamp_hi; + uint32_t timestamp_lo; + union { + uint8_t msg_data[1]; + __DECLARE_FLEX_ARRAY(uint8_t, msg_data_flex); + }; +} __packed; +/** + * struct hfi_msg_event_notify + * event notify message + * @size: packet size in bytes + * @pkt_type: opcode of a packet + * @fw_handle: firmware session handle + * @event_id: session event id + * @event_data1: event data corresponding to event ID + * @event_data2: event data corresponding to event ID + * @ext_event_data: info array, interpreted based on event_data1 + * and event_data2 + */ +struct hfi_msg_event_notify { + uint32_t size; + uint32_t pkt_type; + uint32_t fw_handle; + uint32_t event_id; + uint32_t event_data1; + uint32_t event_data2; + union { + uint32_t ext_event_data[1]; + __DECLARE_FLEX_ARRAY(uint32_t, ext_event_data_flex); + }; +} __packed; +/** + * end of sys message packet types + */ +#endif /* _HFI_DEFS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/hfi.c b/qcom/opensource/camera-kernel/drivers/cam_icp/hfi.c new file mode 100644 index 0000000000..3f5387603c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/hfi.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_presil_hw_access.h" +#include "cam_io_util.h" +#include "hfi_reg.h" +#include "hfi_sys_defs.h" +#include "hfi_session_defs.h" +#include "hfi_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_debug_util.h" +#include "cam_compat.h" +#include "cam_soc_util.h" +#include "cam_mem_mgr_api.h" + +#define HFI_VERSION_INFO_MAJOR_VAL 1 +#define HFI_VERSION_INFO_MINOR_VAL 1 +#define HFI_VERSION_INFO_STEP_VAL 0 +#define HFI_VERSION_INFO_MAJOR_BMSK 0xFF000000 +#define HFI_VERSION_INFO_MAJOR_SHFT 24 +#define HFI_VERSION_INFO_MINOR_BMSK 0xFFFF00 +#define HFI_VERSION_INFO_MINOR_SHFT 8 +#define HFI_VERSION_INFO_STEP_BMSK 0xFF +#define HFI_VERSION_INFO_STEP_SHFT 0 + +/* TO DO Lower timeout value */ +#define HFI_POLL_DELAY_US 10 +#define HFI_POLL_TIMEOUT_US 1500000 + +struct hfi_top_info { + uint32_t num_hfi; + struct hfi_info *hfi[HFI_NUM_MAX]; +}; + +struct hfi_top_info g_hfi; +static DEFINE_MUTEX(g_hfi_lock); + + +static int cam_hfi_presil_setup(struct hfi_mem_info *hfi_mem); +static int cam_hfi_presil_set_init_request(void); + +#ifndef CONFIG_CAM_PRESIL +static void hfi_irq_raise(struct hfi_info *hfi) +{ + if (hfi->ops.irq_raise) + hfi->ops.irq_raise(hfi->priv); +} +#endif + +static void hfi_irq_enable(struct hfi_info *hfi) +{ + if (hfi->ops.irq_enable) + hfi->ops.irq_enable(hfi->priv); +} + +static void __iomem *hfi_iface_addr(struct hfi_info *hfi) +{ + void __iomem *ret = NULL; + + if (hfi->ops.iface_addr) + ret = hfi->ops.iface_addr(hfi->priv); + + return IS_ERR_OR_NULL(ret) ? NULL : ret; +} + +static inline int hfi_get_client_info(int client_handle, struct hfi_info **hfi) +{ + uint32_t idx; + + idx = HFI_GET_INDEX(client_handle); + if (!IS_VALID_HFI_INDEX(idx)) { + CAM_ERR(CAM_HFI, "Invalid HFI index: %u from hdl:%d", + idx, client_handle); + return -EINVAL; + } + + *hfi = g_hfi.hfi[idx]; + if (!g_hfi.hfi[idx]) { + CAM_ERR(CAM_HFI, "[%s] HFI interface not setup for client hdl: %d", + g_hfi.hfi[idx]->client_name, client_handle); + return -ENODEV; + } + + return 0; +} + +static void hfi_queue_dump(uint32_t *dwords, int count) +{ + int i; + int rows; + int remaining; + + rows = count / 4; + remaining = count % 4; + + for (i = 0; i < rows; i++, dwords += 4) + CAM_DBG(CAM_HFI, + "word[%04d]: 0x%08x 0x%08x 0x%08x 0x%08x", + i * 4, dwords[0], dwords[1], dwords[2], dwords[3]); + + if (remaining == 1) + CAM_DBG(CAM_HFI, "word[%04d]: 0x%08x", rows * 4, dwords[0]); + else if (remaining == 2) + CAM_DBG(CAM_HFI, "word[%04d]: 0x%08x 0x%08x", + rows * 4, dwords[0], dwords[1]); + else if (remaining == 3) + CAM_DBG(CAM_HFI, "word[%04d]: 0x%08x 0x%08x 0x%08x", + rows * 4, dwords[0], dwords[1], dwords[2]); +} + +void cam_hfi_mini_dump(int client_handle, struct hfi_mini_dump_info *dst) +{ + struct hfi_info *hfi; + struct hfi_mem_info *hfi_mem; + uint32_t *dwords; + int rc; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return; + } + + hfi_mem = &hfi->map; + if (!hfi_mem) { + CAM_ERR(CAM_HFI, "[%s] hfi mem info NULL... unable to dump queues for hdl: %d", + hfi->client_name, client_handle); + return; + } + + dwords = (uint32_t *)hfi_mem->cmd_q.kva; + memcpy(dst->cmd_q, dwords, ICP_CMD_Q_SIZE_IN_BYTES); + + dwords = (uint32_t *)hfi_mem->msg_q.kva; + memcpy(dst->msg_q, dwords, ICP_CMD_Q_SIZE_IN_BYTES); + dst->msg_q_state = hfi->msg_q_state; + dst->cmd_q_state = hfi->cmd_q_state; + dst->dbg_q_state = hfi->dbg_q_state; +} + +void cam_hfi_queue_dump(int client_handle, bool dump_queue_data) +{ + struct hfi_info *hfi; + struct hfi_mem_info *hfi_mem; + struct hfi_qtbl *qtbl; + struct hfi_q_hdr *q_hdr; + uint32_t *dwords; + int num_dwords, rc; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc:%d for hdl:%d", + rc, client_handle); + return; + } + + hfi_mem = &hfi->map; + if (!hfi_mem) { + CAM_ERR(CAM_HFI, "[%s] mem info NULL... unable to dump queues for hdl: %d", + hfi->client_name, client_handle); + return; + } + + qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva; + CAM_INFO(CAM_HFI, + "[%s] hfi hdl: %u qtbl header: version=0x%08x tbl_size=%u numq=%u qhdr_size=%u", + hfi->client_name, client_handle, qtbl->q_tbl_hdr.qtbl_version, + qtbl->q_tbl_hdr.qtbl_size, qtbl->q_tbl_hdr.qtbl_num_q, + qtbl->q_tbl_hdr.qtbl_qhdr_size); + + q_hdr = &qtbl->q_hdr[Q_CMD]; + CAM_INFO(CAM_HFI, + "cmd_q: addr=0x%08x size=%u read_idx=%u write_idx=%u", + hfi_mem->cmd_q.iova, + q_hdr->qhdr_q_size, + q_hdr->qhdr_read_idx, + q_hdr->qhdr_write_idx); + + dwords = (uint32_t *)hfi_mem->cmd_q.kva; + num_dwords = ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; + + if (dump_queue_data) + hfi_queue_dump(dwords, num_dwords); + + q_hdr = &qtbl->q_hdr[Q_MSG]; + CAM_INFO(CAM_HFI, + "msg_q: addr=0x%08x size=%u read_idx=%u write_idx=%u", + hfi_mem->msg_q.iova, + q_hdr->qhdr_q_size, + q_hdr->qhdr_read_idx, + q_hdr->qhdr_write_idx); + + dwords = (uint32_t *)hfi_mem->msg_q.kva; + num_dwords = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; + + if (dump_queue_data) + hfi_queue_dump(dwords, num_dwords); +} + +#ifndef CONFIG_CAM_PRESIL +int hfi_write_cmd(int client_handle, void *cmd_ptr) +{ + uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp; + uint32_t *write_q, *write_ptr; + struct hfi_info *hfi; + struct hfi_qtbl *q_tbl; + struct hfi_q_hdr *q; + int rc = 0; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + if (!cmd_ptr) { + CAM_ERR(CAM_HFI, "[%s] command is null for hfi hdl: %d", + hfi->client_name, client_handle); + return -EINVAL; + } + + mutex_lock(&hfi->cmd_q_lock); + if (hfi->hfi_state != HFI_READY || + !hfi->cmd_q_state) { + CAM_ERR(CAM_HFI, "[%s] Invalid hfi state: %u cmd q state: %u hfi hdl: %d", + hfi->client_name, hfi->hfi_state, + hfi->cmd_q_state, client_handle); + rc = -ENODEV; + goto err; + } + + q_tbl = (struct hfi_qtbl *)hfi->map.qtbl.kva; + q = &q_tbl->q_hdr[Q_CMD]; + + write_q = (uint32_t *)hfi->map.cmd_q.kva; + + size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT; + if (!size_in_words) { + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %u word size is NULL", + hfi->client_name, client_handle); + rc = -EINVAL; + goto err; + } + + read_idx = q->qhdr_read_idx; + empty_space = (q->qhdr_write_idx >= read_idx) ? + (q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) : + (read_idx - q->qhdr_write_idx); + if (empty_space <= size_in_words) { + CAM_ERR(CAM_HFI, "[%s] hfi hdl: %u failed: empty space %u, size_in_words %u", + hfi->client_name, client_handle, empty_space, size_in_words); + rc = -EIO; + goto err; + } + + new_write_idx = q->qhdr_write_idx + size_in_words; + write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx); + + if (new_write_idx < q->qhdr_q_size) { + memcpy(write_ptr, (uint8_t *)cmd_ptr, + size_in_words << BYTE_WORD_SHIFT); + } else { + new_write_idx -= q->qhdr_q_size; + temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT; + memcpy(write_ptr, (uint8_t *)cmd_ptr, temp); + memcpy(write_q, (uint8_t *)cmd_ptr + temp, + new_write_idx << BYTE_WORD_SHIFT); + } + + /* + * To make sure command data in a command queue before + * updating write index + */ + wmb(); + + q->qhdr_write_idx = new_write_idx; + + /* + * Before raising interrupt make sure command data is ready for + * firmware to process + */ + wmb(); + hfi_irq_raise(hfi); + + /* Ensure HOST2ICP trigger is received by FW */ + wmb(); +err: + mutex_unlock(&hfi->cmd_q_lock); + return rc; +} + +int hfi_read_message(int client_handle, uint32_t *pmsg, uint8_t q_id, + size_t buf_words_size, uint32_t *words_read) +{ + struct hfi_info *hfi; + struct hfi_qtbl *q_tbl_ptr; + struct hfi_q_hdr *q; + uint32_t new_read_idx, size_in_words, word_diff, temp; + uint32_t *read_q, *read_ptr, *write_ptr; + struct mutex *q_lock; + int rc = 0; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl:%d", + rc, client_handle); + return rc; + } + + if (!pmsg) { + CAM_ERR(CAM_HFI, "[%s] client hdl: %d Invalid msg", + hfi->client_name, client_handle); + return -EINVAL; + } + + switch (q_id) { + case Q_MSG: + q_lock = &hfi->msg_q_lock; + break; + case Q_DBG: + q_lock = &hfi->dbg_q_lock; + break; + default: + CAM_ERR(CAM_HFI, "Invalid q_id: %u for read", q_id); + return -EINVAL; + } + + mutex_lock(q_lock); + if (hfi->hfi_state != HFI_READY || + !hfi->msg_q_state) { + CAM_ERR(CAM_HFI, "[%s] Invalid hfi state:%u msg q state: %u hfi hdl: %d", + hfi->client_name, hfi->hfi_state, hfi->msg_q_state, + client_handle); + rc = -ENODEV; + goto err; + } + + q_tbl_ptr = (struct hfi_qtbl *)hfi->map.qtbl.kva; + q = &q_tbl_ptr->q_hdr[q_id]; + + if (q_id == Q_MSG) + read_q = (uint32_t *)hfi->map.msg_q.kva; + else + read_q = (uint32_t *)hfi->map.dbg_q.kva; + + read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx); + write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx); + + if (write_ptr >= read_ptr) + size_in_words = write_ptr - read_ptr; + else { + word_diff = read_ptr - write_ptr; + size_in_words = q->qhdr_q_size - word_diff; + } + + if (size_in_words == 0) { + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %d Q is empty, state:%u, r idx:%u, w idx:%u", + hfi->client_name, client_handle, hfi->hfi_state, + q->qhdr_read_idx, q->qhdr_write_idx); + rc = -ENOMSG; + goto err; + } else if (size_in_words > q->qhdr_q_size) { + CAM_ERR(CAM_HFI, "[%s] Invalid HFI message packet size - 0x%08x hfi hdl:%d", + hfi->client_name, size_in_words << BYTE_WORD_SHIFT, + client_handle); + q->qhdr_read_idx = q->qhdr_write_idx; + rc = -EIO; + goto err; + } + + /* size to read from q is bounded by size of buffer */ + if (size_in_words > buf_words_size) + size_in_words = buf_words_size; + + new_read_idx = q->qhdr_read_idx + size_in_words; + + if (new_read_idx < q->qhdr_q_size) { + memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT); + } else { + new_read_idx -= q->qhdr_q_size; + temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT; + memcpy(pmsg, read_ptr, temp); + memcpy((uint8_t *)pmsg + temp, read_q, + new_read_idx << BYTE_WORD_SHIFT); + } + + q->qhdr_read_idx = new_read_idx; + *words_read = size_in_words; + /* Memory Barrier to make sure message + * queue parameters are updated after read + */ + wmb(); +err: + mutex_unlock(q_lock); + return rc; +} + +#endif /* #ifndef CONFIG_CAM_PRESIL */ + +int hfi_cmd_ubwc_config(int client_handle, uint32_t *ubwc_cfg) +{ + uint8_t *prop; + struct hfi_cmd_prop *dbg_prop; + struct hfi_info *hfi; + uint32_t size = 0; + int rc; + uint32_t *prop_ref_data; + + size = sizeof(struct hfi_cmd_prop) + + sizeof(struct hfi_cmd_ubwc_cfg); + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl:%d", + rc, client_handle); + return rc; + } + + CAM_DBG(CAM_HFI, + "[%s] hfi hdl: %d size of ubwc %u, ubwc_cfg [rd-0x%x,wr-0x%x]", + hfi->client_name, client_handle, size, ubwc_cfg[0], ubwc_cfg[1]); + + prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!prop) + return -ENOMEM; + + dbg_prop = (struct hfi_cmd_prop *)prop; + dbg_prop->size = size; + dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + dbg_prop->num_prop = 1; + prop_ref_data = &dbg_prop->prop_data_flex[0]; + prop_ref_data[0] = HFI_PROP_SYS_UBWC_CFG; + prop_ref_data[1] = ubwc_cfg[0]; + prop_ref_data[2] = ubwc_cfg[1]; + + hfi_write_cmd(client_handle, prop); + CAM_MEM_FREE(prop); + + return 0; +} + +int hfi_cmd_ubwc_config_ext(int client_handle, uint32_t *ubwc_ipe_cfg, + uint32_t *ubwc_bps_cfg, uint32_t *ubwc_ofe_cfg) +{ + uint8_t *prop; + struct hfi_cmd_prop *dbg_prop; + struct hfi_info *hfi; + uint32_t size = 0; + int rc; + uint32_t *prop_ref_data; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl:%d", + rc, client_handle); + return rc; + } + + size = sizeof(struct hfi_cmd_prop) + + sizeof(struct hfi_cmd_ubwc_cfg_ext); + + CAM_DBG(CAM_HFI, + "[%s] hfi hdl: %d size of ubwc %u, ubwc_ipe_cfg[rd-0x%x,wr-0x%x] ubwc_bps_cfg[rd-0x%x,wr-0x%x] ubwc_ofe_cfg[rd-0x%x,wr-0x%x]", + hfi->client_name, client_handle, size, + ubwc_ipe_cfg[0], ubwc_ipe_cfg[1], ubwc_bps_cfg[0], + ubwc_bps_cfg[1], ubwc_ofe_cfg[0], ubwc_ofe_cfg[1]); + + prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!prop) + return -ENOMEM; + + dbg_prop = (struct hfi_cmd_prop *)prop; + dbg_prop->size = size; + dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + dbg_prop->num_prop = 1; + prop_ref_data = &dbg_prop->prop_data_flex[0]; + prop_ref_data[0] = HFI_PROP_SYS_UBWC_CONFIG_EX; + prop_ref_data[1] = ubwc_bps_cfg[0]; + prop_ref_data[2] = ubwc_bps_cfg[1]; + prop_ref_data[3] = ubwc_ipe_cfg[0]; + prop_ref_data[4] = ubwc_ipe_cfg[1]; + prop_ref_data[5] = ubwc_ofe_cfg[0]; + prop_ref_data[6] = ubwc_ofe_cfg[1]; + hfi_write_cmd(client_handle, prop); + CAM_MEM_FREE(prop); + + return 0; +} + +int hfi_set_debug_level(int client_handle, u64 icp_dbg_type, uint32_t lvl) +{ + uint8_t *prop; + struct hfi_info *hfi; + struct hfi_cmd_prop *dbg_prop; + uint32_t size = 0, val; + int rc; + uint32_t *prop_ref_data; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + val = HFI_DEBUG_MSG_LOW | + HFI_DEBUG_MSG_MEDIUM | + HFI_DEBUG_MSG_HIGH | + HFI_DEBUG_MSG_ERROR | + HFI_DEBUG_MSG_FATAL | + HFI_DEBUG_MSG_PERF | + HFI_DEBUG_CFG_WFI | + HFI_DEBUG_CFG_ARM9WD; + + if (lvl > val) + return -EINVAL; + + if (hfi) + hfi->dbg_lvl = lvl; + + size = sizeof(struct hfi_cmd_prop) + + sizeof(struct hfi_debug); + + prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!prop) + return -ENOMEM; + + dbg_prop = (struct hfi_cmd_prop *)prop; + dbg_prop->size = size; + dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + dbg_prop->num_prop = 1; + prop_ref_data = &dbg_prop->prop_data_flex[0]; + prop_ref_data[0] = HFI_PROP_SYS_DEBUG_CFG; + prop_ref_data[1] = lvl; + prop_ref_data[2] = icp_dbg_type; + hfi_write_cmd(client_handle, prop); + + CAM_MEM_FREE(prop); + + return 0; +} + +int hfi_set_fw_dump_levels(int client_handle, uint32_t hang_dump_lvl, + uint32_t ram_dump_lvl) +{ + uint8_t *prop = NULL; + struct hfi_info *hfi; + struct hfi_cmd_prop *fw_dump_level_switch_prop = NULL; + uint32_t size = 0; + int rc; + uint32_t *prop_ref_data; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %d fw dump ENTER", + hfi->client_name, client_handle); + + size = sizeof(struct hfi_cmd_prop) + sizeof(uint32_t); + prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!prop) + return -ENOMEM; + + fw_dump_level_switch_prop = (struct hfi_cmd_prop *)prop; + fw_dump_level_switch_prop->size = size; + fw_dump_level_switch_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + fw_dump_level_switch_prop->num_prop = 1; + prop_ref_data = &fw_dump_level_switch_prop->prop_data_flex[0]; + prop_ref_data[0] = HFI_PROP_SYS_FW_DUMP_CFG; + prop_ref_data[1] = hang_dump_lvl; + + /* Write hang dump level */ + hfi_write_cmd(client_handle, prop); + + /* Update and write ramdump level */ + prop_ref_data[0] = HFI_PROP_SYS_ICP_RAMDUMP_MODE; + prop_ref_data[1] = ram_dump_lvl; + + hfi_write_cmd(client_handle, prop); + CAM_DBG(CAM_HFI, + "[%s] hfi hdl: %d prop->size = %d prop->pkt_type = %d prop->num_prop = %d hang_dump_lvl = %u ram_dump_lvl = %u", + hfi->client_name, client_handle, fw_dump_level_switch_prop->size, + fw_dump_level_switch_prop->pkt_type, fw_dump_level_switch_prop->num_prop, + hang_dump_lvl, ram_dump_lvl); + + CAM_MEM_FREE(prop); + return 0; +} + +int hfi_send_freq_info(int client_handle, int32_t freq) +{ + uint8_t *prop = NULL; + struct hfi_info *hfi; + struct hfi_cmd_prop *dbg_prop = NULL; + uint32_t size = 0; + int rc; + uint32_t *prof_ref_data; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + if (!(hfi->dbg_lvl & HFI_DEBUG_MSG_PERF)) + return -EINVAL; + + size = sizeof(struct hfi_cmd_prop) + sizeof(freq); + prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!prop) + return -ENOMEM; + + dbg_prop = (struct hfi_cmd_prop *)prop; + dbg_prop->size = size; + dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + dbg_prop->num_prop = 1; + prof_ref_data = &dbg_prop->prop_data_flex[0]; + prof_ref_data[0] = HFI_PROP_SYS_ICP_HW_FREQUENCY; + prof_ref_data[1] = freq; + + CAM_DBG(CAM_HFI, + "[%s] hfi hdl: %d\n" + "prop->size = %d\n" + "prop->pkt_type = %d\n" + "prop->num_prop = %d\n" + "prop->prop_data[0] = %d\n" + "prop->prop_data[1] = %d\n" + "dbg_lvl = 0x%x\n", + hfi->client_name, + client_handle, + dbg_prop->size, + dbg_prop->pkt_type, + dbg_prop->num_prop, + prof_ref_data[0], + prof_ref_data[1], + hfi->dbg_lvl); + + hfi_write_cmd(client_handle, prop); + CAM_MEM_FREE(prop); + return 0; +} + +int hfi_send_system_cmd(int client_handle, uint32_t type, uint64_t data, uint32_t size) +{ + int rc = 0; + struct hfi_info *hfi; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + switch (type) { + case HFI_CMD_SYS_INIT: { + struct hfi_cmd_sys_init init; + + init.size = sizeof(struct hfi_cmd_sys_init); + init.pkt_type = type; + rc = hfi_write_cmd(client_handle, &init); + } + break; + case HFI_CMD_SYS_PC_PREP: { + struct hfi_cmd_pc_prep prep; + + prep.size = sizeof(struct hfi_cmd_pc_prep); + prep.pkt_type = type; + rc = hfi_write_cmd(client_handle, &prep); + } + break; + case HFI_CMD_SYS_SET_PROPERTY: { + struct hfi_cmd_prop prop; + + if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) { + prop.size = sizeof(struct hfi_cmd_prop); + prop.pkt_type = type; + prop.num_prop = 1; + prop.prop_data_flex[0] = HFI_PROP_SYS_DEBUG_CFG; + rc = hfi_write_cmd(client_handle, &prop); + } + } + break; + case HFI_CMD_SYS_GET_PROPERTY: + break; + case HFI_CMD_SYS_PING: { + struct hfi_cmd_ping_pkt ping; + + ping.size = sizeof(struct hfi_cmd_ping_pkt); + ping.pkt_type = type; + ping.user_data = (uint64_t)data; + rc = hfi_write_cmd(client_handle, &ping); + } + break; + case HFI_CMD_SYS_RESET: { + struct hfi_cmd_sys_reset_pkt reset; + + reset.size = sizeof(struct hfi_cmd_sys_reset_pkt); + reset.pkt_type = type; + reset.user_data = (uint64_t)data; + rc = hfi_write_cmd(client_handle, &reset); + } + break; + case HFI_CMD_IPEBPS_CREATE_HANDLE: { + struct hfi_cmd_create_handle handle; + + handle.size = sizeof(struct hfi_cmd_create_handle); + handle.pkt_type = type; + handle.handle_type = (uint32_t)data; + handle.user_data1 = 0; + rc = hfi_write_cmd(client_handle, &handle); + } + break; + case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT: + break; + default: + CAM_ERR(CAM_HFI, "[%s] command not supported: %u client handle: %d", + hfi->client_name, type, client_handle); + break; + } + + return rc; +} + + +int hfi_get_hw_caps(void *query_buf) +{ + int i = 0; + struct cam_icp_query_cap_cmd *query_cmd = NULL; + + if (!query_buf) { + CAM_ERR(CAM_HFI, "query buf is NULL"); + return -EINVAL; + } + + query_cmd = (struct cam_icp_query_cap_cmd *)query_buf; + query_cmd->fw_version.major = 0x12; + query_cmd->fw_version.minor = 0x12; + query_cmd->fw_version.revision = 0x12; + + query_cmd->api_version.major = 0x13; + query_cmd->api_version.minor = 0x13; + query_cmd->api_version.revision = 0x13; + + query_cmd->num_ipe = 2; + query_cmd->num_bps = 1; + + for (i = 0; i < CAM_ICP_MAX_NUM_OF_DEV_TYPES; i++) { + query_cmd->dev_ver[i].dev_type = i; + query_cmd->dev_ver[i].hw_ver.major = 0x34 + i; + query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i; + query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i; + } + return 0; +} + +int hfi_get_hw_caps_v2(int client_handle, void *query_buf) +{ + struct cam_icp_query_cap_cmd_v2 *query_cmd = NULL; + struct hfi_info *hfi; + int rc = 0; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + if (!query_buf) { + CAM_ERR(CAM_HFI, "[%s] query cap buf is NULL", hfi->client_name); + return -EINVAL; + } + + query_cmd = (struct cam_icp_query_cap_cmd_v2 *)query_buf; + query_cmd->fw_version.major = (hfi->fw_version & 0xFF000000) >> 24; + query_cmd->fw_version.minor = (hfi->fw_version & 0x00FF0000) >> 16; + query_cmd->fw_version.revision = (hfi->fw_version & 0xFFFF); + + return 0; +} + +int cam_hfi_resume(int client_handle) +{ + int rc = 0; + struct hfi_info *hfi; + struct hfi_mem_info *hfi_mem; + uint32_t status = 0; + void __iomem *icp_base = NULL; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl:%d", + rc, client_handle); + return rc; + } + + icp_base = hfi_iface_addr(hfi); + + if (!icp_base) { + CAM_ERR(CAM_HFI, "[%s] Invalid HFI interface address for hdl:%d", + hfi->client_name, client_handle); + return -EINVAL; + } + + hfi_mem = &hfi->map; + + cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR); + cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova, + icp_base + HFI_REG_SFR_PTR); + cam_io_w_mb((uint32_t)hfi_mem->shmem.iova, + icp_base + HFI_REG_SHARED_MEM_PTR); + cam_io_w_mb((uint32_t)hfi_mem->shmem.len, + icp_base + HFI_REG_SHARED_MEM_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova, + icp_base + HFI_REG_SECONDARY_HEAP_PTR); + cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len, + icp_base + HFI_REG_SECONDARY_HEAP_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->qdss.iova, + icp_base + HFI_REG_QDSS_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->qdss.len, + icp_base + HFI_REG_QDSS_IOVA_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova, + icp_base + HFI_REG_IO_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->io_mem.len, + icp_base + HFI_REG_IO_REGION_SIZE); + + cam_io_w_mb((uint32_t)hfi_mem->io_mem2.iova, + icp_base + HFI_REG_IO2_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->io_mem2.len, + icp_base + HFI_REG_IO2_REGION_SIZE); + + cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.iova, + icp_base + HFI_REG_FWUNCACHED_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len, + icp_base + HFI_REG_FWUNCACHED_REGION_SIZE); + + cam_io_w_mb((uint32_t)hfi_mem->device_mem.iova, + icp_base + HFI_REG_DEVICE_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->device_mem.len, + icp_base + HFI_REG_DEVICE_REGION_IOVA_SIZE); + + cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET, + icp_base + HFI_REG_HOST_ICP_INIT_REQUEST); + + CAM_DBG(CAM_HFI, "IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]", + hfi_mem->io_mem.iova, hfi_mem->io_mem.len, + hfi_mem->io_mem2.iova, hfi_mem->io_mem2.len); + + CAM_DBG(CAM_HFI, "FwUncached : [0x%x 0x%x] Shared [0x%x 0x%x]", + hfi_mem->fw_uncached.iova, hfi_mem->fw_uncached.len, + hfi_mem->shmem.iova, hfi_mem->shmem.len); + + CAM_DBG(CAM_HFI, "SecHeap : [0x%x 0x%x] QDSS [0x%x 0x%x]", + hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len, + hfi_mem->qdss.iova, hfi_mem->qdss.len); + + CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x] Device [0x%x 0x%x]", + hfi_mem->qtbl.iova, hfi_mem->qtbl.len, + hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len, + hfi_mem->device_mem.iova, hfi_mem->device_mem.len); + + if (cam_common_read_poll_timeout(icp_base + + HFI_REG_ICP_HOST_INIT_RESPONSE, + HFI_POLL_DELAY_US, HFI_POLL_TIMEOUT_US, + (uint32_t)UINT_MAX, ICP_INIT_RESP_SUCCESS, &status)) { + CAM_ERR(CAM_HFI, "[%s] response poll timed out: status=0x%08x hfi hdl: %d", + hfi->client_name, status, client_handle); + return -ETIMEDOUT; + } + + hfi_irq_enable(hfi); + + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %d fw version : [0x%x]", + hfi->client_name, client_handle, hfi->fw_version); + + return rc; +} + +int cam_hfi_init(int client_handle, struct hfi_mem_info *hfi_mem, + const struct hfi_ops *hfi_ops, + void *priv, uint8_t event_driven_mode) +{ + int rc = 0; + uint32_t status = 0; + struct hfi_info *hfi = NULL; + struct hfi_qtbl *qtbl; + struct hfi_qtbl_hdr *qtbl_hdr; + struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr; + struct sfr_buf *sfr_buffer; + void __iomem *icp_base; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl %d", + rc, client_handle); + return rc; + } + + if (!hfi_mem || !hfi_ops || !priv) { + CAM_ERR(CAM_HFI, + "[%s] Invalid arg: hfi_mem=%pK hfi_ops=%pK priv=%pK hfi hdl:%d", + hfi->client_name, hfi_mem, hfi_ops, priv, client_handle); + return -EINVAL; + } + + mutex_lock(&hfi->cmd_q_lock); + mutex_lock(&hfi->msg_q_lock); + mutex_lock(&hfi->dbg_q_lock); + + hfi->hfi_state = HFI_INIT; + memcpy(&hfi->map, hfi_mem, sizeof(hfi->map)); + + qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva; + qtbl_hdr = &qtbl->q_tbl_hdr; + qtbl_hdr->qtbl_version = 0xFFFFFFFF; + qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl); + qtbl_hdr->qtbl_qhdr0_offset = offsetof(struct hfi_qtbl, q_hdr); + qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr); + qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS; + qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS; + + /* setup host-to-firmware command queue */ + cmd_q_hdr = &qtbl->q_hdr[Q_CMD]; + cmd_q_hdr->qhdr_status = QHDR_ACTIVE; + cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova; + cmd_q_hdr->qhdr_q_size = ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; + cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT; + cmd_q_hdr->qhdr_pkt_drop_cnt = RESET; + cmd_q_hdr->qhdr_read_idx = RESET; + cmd_q_hdr->qhdr_write_idx = RESET; + + /* setup firmware-to-Host message queue */ + msg_q_hdr = &qtbl->q_hdr[Q_MSG]; + msg_q_hdr->qhdr_status = QHDR_ACTIVE; + msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova; + msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; + msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT; + msg_q_hdr->qhdr_pkt_drop_cnt = RESET; + msg_q_hdr->qhdr_read_idx = RESET; + msg_q_hdr->qhdr_write_idx = RESET; + + /* setup firmware-to-Host message queue */ + dbg_q_hdr = &qtbl->q_hdr[Q_DBG]; + dbg_q_hdr->qhdr_status = QHDR_ACTIVE; + dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova; + dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; + dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT; + dbg_q_hdr->qhdr_pkt_drop_cnt = RESET; + dbg_q_hdr->qhdr_read_idx = RESET; + dbg_q_hdr->qhdr_write_idx = RESET; + + sfr_buffer = (struct sfr_buf *)hfi_mem->sfr_buf.kva; + sfr_buffer->size = ICP_MSG_SFR_SIZE_IN_BYTES; + + switch (event_driven_mode) { + case INTR_MODE: + cmd_q_hdr->qhdr_type = Q_CMD; + cmd_q_hdr->qhdr_rx_wm = SET; + cmd_q_hdr->qhdr_tx_wm = SET; + cmd_q_hdr->qhdr_rx_req = SET; + cmd_q_hdr->qhdr_tx_req = RESET; + cmd_q_hdr->qhdr_rx_irq_status = RESET; + cmd_q_hdr->qhdr_tx_irq_status = RESET; + + msg_q_hdr->qhdr_type = Q_MSG; + msg_q_hdr->qhdr_rx_wm = SET; + msg_q_hdr->qhdr_tx_wm = SET; + msg_q_hdr->qhdr_rx_req = SET; + msg_q_hdr->qhdr_tx_req = RESET; + msg_q_hdr->qhdr_rx_irq_status = RESET; + msg_q_hdr->qhdr_tx_irq_status = RESET; + + dbg_q_hdr->qhdr_type = Q_DBG; + dbg_q_hdr->qhdr_rx_wm = SET; + dbg_q_hdr->qhdr_tx_wm = SET_WM; + dbg_q_hdr->qhdr_rx_req = RESET; + dbg_q_hdr->qhdr_tx_req = RESET; + dbg_q_hdr->qhdr_rx_irq_status = RESET; + dbg_q_hdr->qhdr_tx_irq_status = RESET; + + break; + + case POLL_MODE: + cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 | + RX_EVENT_POLL_MODE_2; + msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 | + RX_EVENT_POLL_MODE_2; + dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 | + RX_EVENT_POLL_MODE_2; + break; + + case WM_MODE: + cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 | + RX_EVENT_DRIVEN_MODE_2; + cmd_q_hdr->qhdr_rx_wm = SET; + cmd_q_hdr->qhdr_tx_wm = SET; + cmd_q_hdr->qhdr_rx_req = RESET; + cmd_q_hdr->qhdr_tx_req = SET; + cmd_q_hdr->qhdr_rx_irq_status = RESET; + cmd_q_hdr->qhdr_tx_irq_status = RESET; + + msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 | + RX_EVENT_DRIVEN_MODE_2; + msg_q_hdr->qhdr_rx_wm = SET; + msg_q_hdr->qhdr_tx_wm = SET; + msg_q_hdr->qhdr_rx_req = SET; + msg_q_hdr->qhdr_tx_req = RESET; + msg_q_hdr->qhdr_rx_irq_status = RESET; + msg_q_hdr->qhdr_tx_irq_status = RESET; + + dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 | + RX_EVENT_DRIVEN_MODE_2; + dbg_q_hdr->qhdr_rx_wm = SET; + dbg_q_hdr->qhdr_tx_wm = SET_WM; + dbg_q_hdr->qhdr_rx_req = RESET; + dbg_q_hdr->qhdr_tx_req = RESET; + dbg_q_hdr->qhdr_rx_irq_status = RESET; + dbg_q_hdr->qhdr_tx_irq_status = RESET; + break; + + default: + CAM_ERR(CAM_HFI, "[%s] Invalid event driven mode :%u for hdl:%d", + hfi->client_name, event_driven_mode, client_handle); + break; + } + + hfi->ops = *hfi_ops; + hfi->priv = priv; + + icp_base = hfi_iface_addr(hfi); + if (!icp_base) { + CAM_ERR(CAM_HFI, "[%s] Invalid HFI interface address for hdl: %d", + hfi->client_name, client_handle); + rc = -EINVAL; + goto regions_fail; + } + + cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, + icp_base + HFI_REG_QTBL_PTR); + cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova, + icp_base + HFI_REG_SFR_PTR); + cam_io_w_mb((uint32_t)hfi_mem->shmem.iova, + icp_base + HFI_REG_SHARED_MEM_PTR); + cam_io_w_mb((uint32_t)hfi_mem->shmem.len, + icp_base + HFI_REG_SHARED_MEM_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova, + icp_base + HFI_REG_SECONDARY_HEAP_PTR); + cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len, + icp_base + HFI_REG_SECONDARY_HEAP_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->qdss.iova, + icp_base + HFI_REG_QDSS_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->qdss.len, + icp_base + HFI_REG_QDSS_IOVA_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova, + icp_base + HFI_REG_IO_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->io_mem.len, + icp_base + HFI_REG_IO_REGION_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->io_mem2.iova, + icp_base + HFI_REG_IO2_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->io_mem2.len, + icp_base + HFI_REG_IO2_REGION_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.iova, + icp_base + HFI_REG_FWUNCACHED_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len, + icp_base + HFI_REG_FWUNCACHED_REGION_SIZE); + cam_io_w_mb((uint32_t)hfi_mem->device_mem.iova, + icp_base + HFI_REG_DEVICE_REGION_IOVA); + cam_io_w_mb((uint32_t)hfi_mem->device_mem.len, + icp_base + HFI_REG_DEVICE_REGION_IOVA_SIZE); + + CAM_DBG(CAM_HFI, "[%s] HFI handle: %d", + hfi->client_name, client_handle); + + CAM_DBG(CAM_HFI, "IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]", + hfi_mem->io_mem.iova, hfi_mem->io_mem.len, + hfi_mem->io_mem2.iova, hfi_mem->io_mem2.len); + + CAM_DBG(CAM_HFI, "FwUncached : [0x%x 0x%x] Shared [0x%x 0x%x]", + hfi_mem->fw_uncached.iova, hfi_mem->fw_uncached.len, + hfi_mem->shmem.iova, hfi_mem->shmem.len); + + CAM_DBG(CAM_HFI, "SecHeap : [0x%x 0x%x] QDSS [0x%x 0x%x]", + hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len, + hfi_mem->qdss.iova, hfi_mem->qdss.len); + + CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x] Device [0x%x 0x%x]", + hfi_mem->qtbl.iova, hfi_mem->qtbl.len, + hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len, + hfi_mem->device_mem.iova, hfi_mem->device_mem.len); + + if (cam_presil_mode_enabled()) + cam_hfi_presil_setup(hfi_mem); + + cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET, + icp_base + HFI_REG_HOST_ICP_INIT_REQUEST); + + if (cam_presil_mode_enabled()) + cam_hfi_presil_set_init_request(); + + if (cam_common_read_poll_timeout(icp_base + + HFI_REG_ICP_HOST_INIT_RESPONSE, + HFI_POLL_DELAY_US, HFI_POLL_TIMEOUT_US, + (uint32_t)UINT_MAX, ICP_INIT_RESP_SUCCESS, &status)) { + CAM_ERR(CAM_HFI, "[%s] hfi hdl:%u response poll timed out: status=0x%08x", + hfi->client_name, client_handle, status); + rc = -ETIMEDOUT; + goto regions_fail; + } + + hfi->fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION); + CAM_DBG(CAM_HFI, "[%s] ICP fw version: 0x%x", + hfi->client_name, hfi->fw_version); + + hfi->cmd_q_state = true; + hfi->msg_q_state = true; + hfi->dbg_q_state = true; + hfi->hfi_state = HFI_READY; + + hfi_irq_enable(hfi); + + mutex_unlock(&hfi->dbg_q_lock); + mutex_unlock(&hfi->msg_q_lock); + mutex_unlock(&hfi->cmd_q_lock); + + return rc; + +regions_fail: + mutex_unlock(&hfi->dbg_q_lock); + mutex_unlock(&hfi->msg_q_lock); + mutex_unlock(&hfi->cmd_q_lock); + return rc; +} + +void cam_hfi_deinit(int client_handle) +{ + struct hfi_info *hfi; + int rc; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return; + } + + if (cam_presil_mode_enabled()) { + CAM_DBG(CAM_HFI, + "[%s] HFI hdl: %d SYS_RESET Needed in presil for back to back hfi_init success", + hfi->client_name, client_handle); + hfi_send_system_cmd(client_handle, HFI_CMD_SYS_RESET, 0, 0); + } + + mutex_lock(&hfi->cmd_q_lock); + mutex_lock(&hfi->msg_q_lock); + mutex_lock(&hfi->dbg_q_lock); + + hfi->hfi_state = HFI_DEINIT; + hfi->cmd_q_state = false; + hfi->msg_q_state = false; + hfi->dbg_q_state = false; + + mutex_unlock(&hfi->dbg_q_lock); + mutex_unlock(&hfi->cmd_q_lock); + mutex_unlock(&hfi->msg_q_lock); + + memset(&hfi->map, 0, sizeof(struct hfi_mem_info)); + memset(&hfi->ops, 0, sizeof(struct hfi_ops)); + hfi->smem_size = 0; + hfi->uncachedheap_size = 0; + memset(hfi->msgpacket_buf, 0, sizeof(ICP_HFI_MAX_MSG_SIZE_IN_WORDS)); + hfi->priv = NULL; + hfi->dbg_lvl = 0; +} + +static int hfi_get_free_index(uint32_t *free_index) +{ + int i; + + for (i = 0; i < HFI_NUM_MAX; i++) { + if (!g_hfi.hfi[i]) { + *free_index = i; + return 0; + } + } + + return -EUSERS; +} + +int cam_hfi_register(int *client_handle, const char *client_name) +{ + struct hfi_info *hfi = NULL; + int hfi_index, rc = 0; + + if (!client_handle) { + CAM_ERR(CAM_HFI, "Client handle is NULL"); + return -EINVAL; + } + + mutex_lock(&g_hfi_lock); + if (IS_VALID_HFI_INDEX(*client_handle)) { + rc = hfi_get_client_info(*client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Unable to retrieve existing hfi info for handle:%d", + *client_handle); + rc = -EINVAL; + goto failed_hfi_register; + } + + CAM_ERR(CAM_HFI, "[%s] HFI client handle:%d is already established", + hfi->client_name, *client_handle); + rc = -EINVAL; + goto failed_hfi_register; + } + + rc = hfi_get_free_index(&hfi_index); + if (rc) { + CAM_ERR(CAM_HFI, "No available hfi slots rc:%d", rc); + goto failed_hfi_register; + } + + hfi = CAM_MEM_ZALLOC(sizeof(struct hfi_info), GFP_KERNEL); + if (!hfi) { + rc = -ENOMEM; + goto failed_hfi_register; + } + + if (hfi->hfi_state != HFI_DEINIT) { + CAM_ERR(CAM_HFI, "hfi_init: invalid state: %u hfi idx: %u", + hfi->hfi_state, hfi_index); + rc = -EINVAL; + goto hfi_failed_state; + } + + g_hfi.hfi[hfi_index] = hfi; + g_hfi.num_hfi++; + *client_handle = HFI_GET_CLIENT_HANDLE(hfi_index); + memcpy(hfi->client_name, client_name, HFI_CLIENT_NAME_LEN); + mutex_unlock(&g_hfi_lock); + + mutex_init(&hfi->cmd_q_lock); + mutex_init(&hfi->msg_q_lock); + mutex_init(&hfi->dbg_q_lock); + + return rc; + +hfi_failed_state: + CAM_MEM_FREE(hfi); +failed_hfi_register: + mutex_unlock(&g_hfi_lock); + return rc; +} + +int cam_hfi_unregister(int *client_handle) +{ + struct hfi_info *hfi; + uint32_t idx; + int rc; + + rc = hfi_get_client_info(*client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + mutex_lock(&g_hfi_lock); + mutex_destroy(&hfi->dbg_q_lock); + mutex_destroy(&hfi->msg_q_lock); + mutex_destroy(&hfi->cmd_q_lock); + CAM_MEM_ZFREE((void *)hfi, sizeof(struct hfi_info)); + idx = HFI_GET_INDEX(*client_handle); + g_hfi.hfi[idx] = NULL; + g_hfi.num_hfi--; + mutex_unlock(&g_hfi_lock); + + *client_handle = HFI_HANDLE_INIT_VALUE; + + return 0; +} + + +#ifdef CONFIG_CAM_PRESIL +static int cam_hfi_presil_setup(struct hfi_mem_info *hfi_mem) +{ + /** + * The pchost maintains its own set of queue structures and + * needs additional info to accomplish this. Use the set of + * dummy registers to pass along this info. + */ + /** + * IOVA region length for each queue is currently hardcoded in + * pchost (except for SFR). No need to send for now. + */ + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_CMD_Q_IOVA, hfi_mem->cmd_q.iova); + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_MSG_Q_IOVA, hfi_mem->msg_q.iova); + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_DBG_Q_IOVA, hfi_mem->dbg_q.iova); + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_SFR_LEN, hfi_mem->sfr_buf.len); + + return 0; +} + +static int cam_hfi_presil_set_init_request(void) +{ + CAM_DBG(CAM_PRESIL, "notifying pchost to start HFI init..."); + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_ICP_V1_HW_VERSION_TO_START_HFI_INIT, 0xFF); + CAM_DBG(CAM_PRESIL, "got done with PCHOST HFI init..."); + + return 0; +} + +int hfi_write_cmd(int client_handle, void *cmd_ptr) +{ + struct hfi_info *hfi; + int presil_rc = CAM_PRESIL_BLOCKED; + int rc = 0; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl:%d", + rc, client_handle); + return rc; + } + + if (!cmd_ptr) { + CAM_ERR(CAM_HFI, "[%s] command is null for hfi hdl:%d", + hfi->client_name, client_handle); + return -EINVAL; + } + + mutex_lock(&hfi->cmd_q_lock); + + presil_rc = cam_presil_hfi_write_cmd(cmd_ptr, (*(uint32_t *)cmd_ptr), + CAM_PRESIL_CLIENT_ID_CAMERA); + + if ((presil_rc != CAM_PRESIL_SUCCESS) && (presil_rc != CAM_PRESIL_BLOCKED)) { + CAM_ERR(CAM_HFI, "[%s] hfi hdl: %d failed presil rc %d", + hfi->client_name, client_handle, presil_rc); + rc = -EINVAL; + } else { + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %d presil rc %d", + hfi->client_name, client_handle, presil_rc); + } + + mutex_unlock(&hfi->cmd_q_lock); + return rc; +} + +int hfi_read_message(int client_handle, uint32_t *pmsg, uint8_t q_id, + size_t buf_words_size, uint32_t *words_read) +{ + struct hfi_info *hfi; + int presil_rc = CAM_PRESIL_BLOCKED; + struct mutex *q_lock = NULL; + int rc = 0; + + rc = hfi_get_client_info(client_handle, &hfi); + if (rc) { + CAM_ERR(CAM_HFI, "Failed to get hfi info rc: %d for hdl: %d", + rc, client_handle); + return rc; + } + + if (!pmsg) { + CAM_ERR(CAM_HFI, "[%s] Invalid msg for hdl: %d", + hfi->client_name, client_handle); + return -EINVAL; + } + + switch (q_id) { + case Q_MSG: + q_lock = &hfi->msg_q_lock; + break; + case Q_DBG: + q_lock = &hfi->dbg_q_lock; + break; + default: + CAM_ERR(CAM_HFI, "Invalid q_id: %u for read", q_id); + return -EINVAL; + } + + mutex_lock(q_lock); + + memset(pmsg, 0x0, sizeof(uint32_t) * 256 /* ICP_MSG_BUF_SIZE */); + *words_read = 0; + + presil_rc = cam_presil_hfi_read_message(pmsg, q_id, words_read, + CAM_PRESIL_CLIENT_ID_CAMERA); + + if ((presil_rc != CAM_PRESIL_SUCCESS) && (presil_rc != CAM_PRESIL_BLOCKED)) { + CAM_ERR(CAM_HFI, "[%s] hfi hdl: %d failed presil rc %d", + hfi->client_name, client_handle, presil_rc); + rc = -EINVAL; + } else { + CAM_DBG(CAM_HFI, "[%s] hfi hdl: %d presil rc %d", + hfi->client_name, client_handle, presil_rc); + } + + mutex_unlock(q_lock); + return rc; +} +#else +/* when presil mode not enabled */ +static int cam_hfi_presil_setup(struct hfi_mem_info *hfi_mem) +{ + return 0; +} + +static int cam_hfi_presil_set_init_request(void) +{ + return 0; +} +#endif /* #ifdef CONFIG_CAM_PRESIL */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.c new file mode 100644 index 0000000000..b5e14c5ffb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "bps_core.h" +#include "bps_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_bps_hw_intf.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "hfi_reg.h" +#include "cam_common_util.h" + +static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info, + struct cam_icp_cpas_vote *cpas_vote) +{ + int rc = 0; + + if (cpas_vote->ahb_vote_valid) + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, + &cpas_vote->ahb_vote); + if (cpas_vote->axi_vote_valid) + rc = cam_cpas_update_axi_vote(core_info->cpas_handle, + &cpas_vote->axi_vote); + + if (rc < 0) + CAM_ERR(CAM_PERF, "cpas vote is failed: %d", rc); + + return rc; +} + + +int cam_bps_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *bps_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct cam_icp_cpas_vote cpas_vote; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &bps_dev->soc_info; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&bps_dev->hw_lock, flags); + if (bps_dev->hw_state == CAM_HW_STATE_POWER_UP) { + core_info->power_on_cnt++; + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "BPS%u powered on (refcnt: %u)", + soc_info->index, core_info->power_on_cnt); + return 0; + } + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_BPS + core_info->bps_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "BPS hw id %x acquire ownership failed", + CAM_HW_ID_BPS + core_info->bps_hw_info->hw_idx); + return rc; + } + + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_BPS_DEFAULT_AXI_PATH; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_BPS_DEFAULT_AXI_TRANSAC; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "cpas start failed: %d", rc); + goto error; + } + core_info->cpas_start = true; + + rc = cam_bps_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc); + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } else { + core_info->clk_enable = true; + } + + spin_lock_irqsave(&bps_dev->hw_lock, flags); + bps_dev->hw_state = CAM_HW_STATE_POWER_UP; + core_info->power_on_cnt++; + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "BPS%u powered on (refcnt: %u)", + soc_info->index, core_info->power_on_cnt); + +error: + return rc; +} + +int cam_bps_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *bps_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_bps_device_core_info *core_info = NULL; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &bps_dev->soc_info; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&bps_dev->hw_lock, flags); + if (bps_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + return 0; + } + + core_info->power_on_cnt--; + if (core_info->power_on_cnt) { + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "BPS%u power on reference still held %u", + soc_info->index, core_info->power_on_cnt); + return 0; + } + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + + rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable); + if (rc) + CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc); + core_info->clk_enable = false; + + if (core_info->cpas_start) { + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } + + spin_lock_irqsave(&bps_dev->hw_lock, flags); + bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&bps_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "BPS%u powered off (refcnt: %u)", + soc_info->index, core_info->power_on_cnt); + + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_BPS + core_info->bps_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "BPS hw id %x release ownership failed", + CAM_HW_ID_BPS + core_info->bps_hw_info->hw_idx); + } + + + return rc; +} + +static int cam_bps_handle_pc(struct cam_hw_info *bps_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct cam_bps_device_hw_info *hw_info = NULL; + int rc = 0; + int pwr_ctrl; + int pwr_status; + + soc_info = &bps_dev->soc_info; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + hw_info = core_info->bps_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS BPS client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (!(pwr_ctrl & BPS_COLLAPSE_MASK)) { + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x1); + + if ((pwr_status >> BPS_PWR_ON_MASK)) + CAM_WARN(CAM_PERF, "BPS: pwr_status(%x):pwr_ctrl(%x)", + pwr_status, pwr_ctrl); + } + + rc = cam_bps_get_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return 0; +} + +static int cam_bps_handle_resume(struct cam_hw_info *bps_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct cam_bps_device_hw_info *hw_info = NULL; + int pwr_ctrl; + int pwr_status; + int rc = 0; + + soc_info = &bps_dev->soc_info; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + hw_info = core_info->bps_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS BPS client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (pwr_ctrl & BPS_COLLAPSE_MASK) { + CAM_DBG(CAM_PERF, "BPS: pwr_ctrl set(%x)", pwr_ctrl); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0); + } + + rc = cam_bps_transfer_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to transfer gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return rc; +} + +int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *bps_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct cam_bps_device_hw_info *hw_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_ICP_DEV_CMD_MAX) { + CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + soc_info = &bps_dev->soc_info; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + hw_info = core_info->bps_hw_info; + + switch (cmd_type) { + case CAM_ICP_DEV_CMD_VOTE_CPAS: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + cam_bps_cpas_vote(core_info, cpas_vote); + break; + } + + case CAM_ICP_DEV_CMD_CPAS_START: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + if (!core_info->cpas_start) { + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, + &cpas_vote->axi_vote); + core_info->cpas_start = true; + } + break; + } + + case CAM_ICP_DEV_CMD_CPAS_STOP: + if (core_info->cpas_start) { + cam_cpas_stop(core_info->cpas_handle); + core_info->cpas_start = false; + } + break; + case CAM_ICP_DEV_CMD_POWER_COLLAPSE: + rc = cam_bps_handle_pc(bps_dev); + break; + case CAM_ICP_DEV_CMD_POWER_RESUME: + rc = cam_bps_handle_resume(bps_dev); + break; + case CAM_ICP_DEV_CMD_UPDATE_CLK: { + struct cam_icp_dev_clk_update_cmd *clk_upd_cmd = cmd_args; + struct cam_ahb_vote ahb_vote; + uint32_t clk_rate = clk_upd_cmd->curr_clk_rate; + int32_t clk_level = 0, err = 0; + + CAM_DBG(CAM_PERF, "bps_src_clk rate = %d", (int)clk_rate); + + if (!core_info->clk_enable) { + if (clk_upd_cmd->dev_pc_enable) { + cam_bps_handle_pc(bps_dev); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x0); + } + rc = cam_bps_toggle_clk(soc_info, true); + if (rc) + CAM_ERR(CAM_ICP, "Enable failed"); + else + core_info->clk_enable = true; + if (clk_upd_cmd->dev_pc_enable) { + rc = cam_bps_handle_resume(bps_dev); + if (rc) + CAM_ERR(CAM_ICP, "BPS resume failed"); + } + } + CAM_DBG(CAM_PERF, "clock rate %d", clk_rate); + rc = cam_bps_update_clk_rate(soc_info, clk_rate); + if (rc) + CAM_ERR(CAM_PERF, "Failed to update clk %d", clk_rate); + + err = cam_soc_util_get_clk_level(soc_info, + clk_rate, soc_info->src_clk_idx, + &clk_level); + + if (!err) { + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_level; + + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, &ahb_vote); + if (rc) { + CAM_ERR(CAM_PERF, "failed at updating ahb vote level rc: %d", + rc); + return rc; + } + + rc = cam_cpas_update_axi_floor_lvl(core_info->cpas_handle, + clk_level); + if (rc) { + CAM_ERR(CAM_PERF, "failed at updating axi vote level clk_level: %d rc: %d", + clk_level, rc); + return rc; + } + } + break; + } + case CAM_ICP_DEV_CMD_DISABLE_CLK: + if (core_info->clk_enable == true) + cam_bps_toggle_clk(soc_info, false); + core_info->clk_enable = false; + break; + default: + CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type); + rc = -EINVAL; + break; + } + return rc; +} + +irqreturn_t cam_bps_irq(int irq_num, void *data) +{ + return IRQ_HANDLED; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.h new file mode 100644 index 0000000000..572052cc0c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_core.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_BPS_CORE_H +#define CAM_BPS_CORE_H + +#include +#include +#include +#include + +#define BPS_COLLAPSE_MASK 0x1 +#define BPS_PWR_ON_MASK 0x2 + +struct cam_bps_device_hw_info { + uint32_t hw_idx; + uint32_t pwr_ctrl; + uint32_t pwr_status; +}; + +struct cam_bps_device_core_info { + struct cam_bps_device_hw_info *bps_hw_info; + uint32_t cpas_handle; + uint32_t power_on_cnt; + bool cpas_start; + bool clk_enable; +}; + +int cam_bps_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_bps_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + +irqreturn_t cam_bps_irq(int irq_num, void *data); + +/** + * @brief : API to register BPS hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_bps_init_module(void); + +/** + * @brief : API to remove BPS Hw from platform framework. + */ +void cam_bps_exit_module(void); +#endif /* CAM_BPS_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_dev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_dev.c new file mode 100644 index 0000000000..ef21f59daf --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_dev.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "bps_core.h" +#include "bps_soc.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_icp_hw_intf_data cam_bps_hw_list[CAM_BPS_HW_NUM_MAX]; + +static struct cam_bps_device_hw_info cam_bps_hw_info = { + .hw_idx = 0, + .pwr_ctrl = 0x48, + .pwr_status = 0x44, +}; + +static struct cam_bps_device_hw_info cam_bps680_hw_info = { + .hw_idx = 0, + .pwr_ctrl = 0x48, + .pwr_status = 0x44, +}; + +static struct cam_bps_device_hw_info cam_bps880_hw_info = { + .hw_idx = 0, + .pwr_ctrl = 0x48, + .pwr_status = 0x44, +}; + +static bool cam_bps_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + bool error_handled = false; + + if (!irq_data) + return error_handled; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d", + irq_data->irq_type, + irq_data->u.dec_err.decerr_status.value, + irq_data->u.dec_err.decerr_status.thr_err, + irq_data->u.dec_err.decerr_status.fcl_err, + irq_data->u.dec_err.decerr_status.len_md_err, + irq_data->u.dec_err.decerr_status.format_err); + error_handled = true; + break; + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC Encode error type=%d status=%x", + irq_data->irq_type, + irq_data->u.enc_err.encerr_status.value); + error_handled = true; + break; + default: + break; + } + + return error_handled; +} + +int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_bps_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "bps", sizeof("bps")); + cpas_register_params.cam_cpas_client_cb = cam_bps_cpas_cb; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_bps_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *bps_dev = NULL; + struct cam_hw_intf *bps_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct cam_bps_device_hw_info *hw_info = NULL; + int rc = 0, i; + uint32_t hw_idx; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct cam_bps_soc_private *bps_soc_priv; + + CAM_GET_TIMESTAMP(ts_start); + bps_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!bps_dev_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + bps_dev_intf->hw_idx = hw_idx; + + bps_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!bps_dev) { + rc = -ENOMEM; + goto free_dev_intf; + } + + bps_dev->soc_info.pdev = pdev; + bps_dev->soc_info.dev = &pdev->dev; + bps_dev->soc_info.dev_name = pdev->name; + bps_dev_intf->hw_priv = bps_dev; + bps_dev_intf->hw_ops.init = cam_bps_init_hw; + bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw; + bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd; + bps_dev_intf->hw_type = CAM_ICP_DEV_BPS; + platform_set_drvdata(pdev, &cam_bps_hw_list[hw_idx]); + bps_dev->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_bps_device_core_info), + GFP_KERNEL); + if (!bps_dev->core_info) { + rc = -ENOMEM; + goto free_dev; + } + + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + match_dev = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ICP, "No bps hardware info"); + rc = -EINVAL; + goto free_core_info; + } + + hw_info = (struct cam_bps_device_hw_info *)match_dev->data; + core_info->bps_hw_info = hw_info; + + rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq, + bps_dev); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed to init_soc"); + goto free_core_info; + } + + bps_dev->soc_info.hw_id = CAM_HW_ID_BPS + bps_dev->soc_info.index; + CAM_DBG(CAM_ICP, "soc info : %pK", + (void *)&bps_dev->soc_info); + + rc = cam_vmvm_populate_hw_instance_info(&bps_dev->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, " hw instance populate failed: %d", rc); + goto free_soc_resources; + } + + rc = cam_bps_register_cpas(&bps_dev->soc_info, + core_info, bps_dev_intf->hw_idx); + if (rc < 0) + goto free_soc_resources; + + bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + bps_soc_priv = bps_dev->soc_info.soc_private; + cam_bps_hw_list[hw_idx].pid = + CAM_MEM_ZALLOC_ARRAY(bps_soc_priv->num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!cam_bps_hw_list[hw_idx].pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for bps hw list"); + rc = -ENOMEM; + goto free_soc_resources; + } + + cam_bps_hw_list[hw_idx].hw_intf = bps_dev_intf; + cam_bps_hw_list[hw_idx].num_pid = bps_soc_priv->num_pid; + for (i = 0; i < bps_soc_priv->num_pid; i++) + cam_bps_hw_list[hw_idx].pid[i] = bps_soc_priv->pid[i]; + + mutex_init(&bps_dev->hw_mutex); + spin_lock_init(&bps_dev->hw_lock); + init_completion(&bps_dev->hw_complete); + CAM_DBG(CAM_ICP, "BPS:%d component bound successfully", + bps_dev_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +free_soc_resources: + cam_bps_deinit_soc_resources(&bps_dev->soc_info); +free_core_info: + CAM_MEM_FREE(bps_dev->core_info); +free_dev: + CAM_MEM_FREE(bps_dev); +free_dev_intf: + CAM_MEM_FREE(bps_dev_intf); + return rc; +} + +static void cam_bps_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *bps_dev = NULL; + struct cam_hw_intf *bps_dev_intf = NULL; + struct cam_bps_device_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct cam_icp_hw_intf_data *bps_intf_data; + + CAM_DBG(CAM_ICP, "Unbinding component: %s", pdev->name); + bps_intf_data = platform_get_drvdata(pdev); + if (!bps_intf_data) { + CAM_ERR(CAM_ICP, "Error No data in pdev"); + return; + } + + bps_dev_intf = bps_intf_data->hw_intf; + if (!bps_dev_intf) { + CAM_ERR(CAM_ICP, "Error No BPS dev interface"); + return; + } + + bps_dev = bps_dev_intf->hw_priv; + core_info = (struct cam_bps_device_core_info *)bps_dev->core_info; + cam_cpas_unregister_client(core_info->cpas_handle); + cam_bps_deinit_soc_resources(&bps_dev->soc_info); + + CAM_MEM_FREE(bps_dev->core_info); + CAM_MEM_FREE(bps_dev); + CAM_MEM_FREE(bps_dev_intf); +} + +const static struct component_ops cam_bps_component_ops = { + .bind = cam_bps_component_bind, + .unbind = cam_bps_component_unbind, +}; + +int cam_bps_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ICP, "Adding BPS component"); + rc = component_add(&pdev->dev, &cam_bps_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_bps_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_bps_component_ops); + return 0; +} + +static const struct of_device_id cam_bps_dt_match[] = { + { + .compatible = "qcom,cam-bps", + .data = &cam_bps_hw_info, + }, + { + .compatible = "qcom,cam-bps680", + .data = &cam_bps680_hw_info, + }, + { + .compatible = "qcom,cam-bps880", + .data = &cam_bps880_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_bps_dt_match); + +struct platform_driver cam_bps_driver = { + .probe = cam_bps_probe, + .remove = cam_bps_remove, + .driver = { + .name = "cam-bps", + .owner = THIS_MODULE, + .of_match_table = cam_bps_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_bps_init_module(void) +{ + return platform_driver_register(&cam_bps_driver); +} + +void cam_bps_exit_module(void) +{ + platform_driver_unregister(&cam_bps_driver); +} + +MODULE_DESCRIPTION("CAM BPS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.c new file mode 100644 index 0000000000..49f78cc9d9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "bps_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, num_pid, i; + struct platform_device *pdev = soc_info->pdev; + struct device_node *of_node = pdev->dev.of_node; + struct cam_bps_soc_private *bps_soc_private = soc_info->soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ICP, "get bps dt prop is failed"); + goto end; + } + + num_pid = of_property_count_u32_elems(of_node, "cam_hw_pid"); + CAM_DBG(CAM_ICP, "BPS pid count: %d", num_pid); + + if (num_pid <= 0) + goto end; + + bps_soc_private->pid = CAM_MEM_ZALLOC_ARRAY(num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!bps_soc_private->pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for BPS hw pids"); + rc = -ENOMEM; + goto end; + } + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(of_node, "cam_hw_pid", i, &bps_soc_private->pid[i]); + bps_soc_private->num_pid = num_pid; + +end: + return rc; +} + +static int cam_bps_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t bps_irq_handler, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler, &(irq_data[0])); + + return rc; +} + +int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t bps_irq_handler, void *irq_data) +{ + struct cam_bps_soc_private *soc_private; + int rc = 0; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_bps_soc_private), GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ICP, "Failed at allocating BPS soc_private"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_bps_get_dt_properties(soc_info); + if (rc < 0) + return rc; + + rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler, + irq_data); + if (rc < 0) + return rc; + + return rc; +} + +void cam_bps_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_bps_soc_private *soc_private; + + soc_private = soc_info->soc_private; + if (soc_private) { + if (soc_private->pid) { + CAM_MEM_FREE(soc_private->pid); + soc_private->pid = NULL; + } + + CAM_MEM_FREE(soc_private); + soc_private = NULL; + } + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_WARN(CAM_ICP, "release platform resources fail"); +} + +int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + CAM_SVS_VOTE, false); + if (rc) + CAM_ERR(CAM_ICP, "enable platform failed"); + + return rc; +} + +int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, disable_clk, + false); + if (rc) + CAM_ERR(CAM_ICP, "disable platform failed"); + + return rc; +} + +int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + int i; + int rc; + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_FAST, soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_ICP, "Regulator set mode %s failed", + soc_info->rgltr_name[i]); + goto rgltr_set_mode_failed; + } + } + return 0; + +rgltr_set_mode_failed: + for (i = i - 1; i >= 0; i--) + if (soc_info->rgltr[i]) + cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_NORMAL, soc_info->rgltr_name[i]); + + return rc; +} + +int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + int i; + int rc; + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_NORMAL, soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_ICP, "Regulator set mode %s failed", + soc_info->rgltr_name[i]); + goto rgltr_set_mode_failed; + } + } + return 0; + +rgltr_set_mode_failed: + for (i = i - 1; i >= 0; i--) + if (soc_info->rgltr[i]) + cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_FAST, soc_info->rgltr_name[i]); + + return rc; +} + +int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate) +{ + int32_t src_clk_idx; + + if (!soc_info) + return -EINVAL; + + src_clk_idx = soc_info->src_clk_idx; + + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_PERF, "clk_rate %d greater than max, reset to %d", + clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + return cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); +} + +int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable) +{ + int rc = 0; + + if (clk_enable) + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, CAM_SVS_VOTE); + else + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + CAM_DBG(CAM_ICP, "%s BPS clock", clk_enable ? "Enable" : "Disable"); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.h new file mode 100644 index 0000000000..b58583c1b4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/bps_hw/bps_soc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_BPS_SOC_H_ +#define _CAM_BPS_SOC_H_ + +#include "cam_soc_util.h" + +struct cam_bps_soc_private { + uint32_t num_pid; + uint32_t *pid; +}; + +int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t bps_irq_handler, void *irq_data); + +int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk); + +int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate); +int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable); +void cam_bps_deinit_soc_resources(struct cam_hw_soc_info *soc_info); +#endif /* _CAM_BPS_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c new file mode 100644 index 0000000000..123d5fdec3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c @@ -0,0 +1,10115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_sync_api.h" +#include "cam_packet_util.h" +#include "cam_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_icp_hw_mgr.h" +#include "cam_bps_hw_intf.h" +#include "cam_ipe_hw_intf.h" +#include "cam_smmu_api.h" +#include "cam_mem_mgr.h" +#include "hfi_intf.h" +#include "hfi_reg.h" +#include "hfi_session_defs.h" +#include "hfi_sys_defs.h" +#include "cam_req_mgr_workq.h" +#include "hfi_sys_defs.h" +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include "cam_trace.h" +#include "cam_cpas_api.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" +#include "cam_presil_hw_access.h" +#include "cam_icp_proc.h" + +#define ICP_WORKQ_TASK_CMD_TYPE 1 +#define ICP_WORKQ_TASK_MSG_TYPE 2 + +#define ICP_DEVICE_IDLE_TIMEOUT 400 + +/* + * If synx fencing is enabled, send FW memory mapping + * for synx hw_mutex, ipc hw_mutex, synx global mem + * and global cntr for qtimer + */ +#define ICP_NUM_MEM_REGIONS_FOR_SYNX 4 + +#define ICP_NUM_MEM_REGIONS_FOR_LLCC 1 + +DECLARE_RWSEM(frame_in_process_sem); + +static int cam_icp_vm_send_msg(struct cam_icp_hw_mgr *hw_mgr, uint32_t dest_vm, + uint32_t msg_type, bool need_ack); + +static struct cam_icp_hw_mgr *g_icp_hw_mgr[CAM_ICP_SUBDEV_MAX]; + +uint32_t icp_cpas_mask[CAM_ICP_SUBDEV_MAX] = {CPAS_ICP_BIT, CPAS_ICP1_BIT}; + +static void cam_icp_mgr_process_dbg_buf(struct cam_icp_hw_mgr *hw_mgr); +static int cam_icp_mgr_restart_icp(struct cam_icp_hw_mgr *hw_mgr); + +static int cam_icp_dump_io_cfg(struct cam_icp_hw_ctx_data *ctx_data, + int32_t buf_handle, uint32_t size) +{ + uintptr_t vaddr_ptr; + uint32_t *ptr; + uint32_t io_size; + size_t len; + int rc, i; + char buf[512]; + int used = 0; + + rc = cam_mem_get_cpu_buf(buf_handle, &vaddr_ptr, &len); + if (rc) { + CAM_ERR(CAM_ICP, "%s: Unable to get io_cfg buf address", + ctx_data->ctx_id_string); + return rc; + } + + io_size = size / sizeof(uint32_t); + ptr = (uint32_t *)vaddr_ptr; + for (i = 0; i < io_size; i++) { + used += snprintf(buf + used, + sizeof(buf) - used, "0X%08X-", ptr[i]); + if (!(i % 8)) { + CAM_DBG(CAM_ICP, "%s: %s", __func__, buf); + used = 0; + } + } + cam_mem_put_cpu_buf(buf_handle); + return rc; +} + +static const char *cam_icp_dev_type_to_name(uint32_t dev_type) +{ + switch (dev_type) { + case CAM_ICP_RES_TYPE_BPS: + return "BPS"; + case CAM_ICP_RES_TYPE_BPS_RT: + return "BPS_RT"; + case CAM_ICP_RES_TYPE_BPS_SEMI_RT: + return "BPS_SEMI_RT"; + case CAM_ICP_RES_TYPE_IPE: + return "IPE"; + case CAM_ICP_RES_TYPE_IPE_RT: + return "IPE_RT"; + case CAM_ICP_RES_TYPE_IPE_SEMI_RT: + return "IPE_SEMI_RT"; + case CAM_ICP_RES_TYPE_OFE: + return "OFE"; + case CAM_ICP_RES_TYPE_OFE_RT: + return "OFE_RT"; + case CAM_ICP_RES_TYPE_OFE_SEMI_RT: + return "OFE_SEMI_RT"; + default: + return "Invalid dev type"; + } +} + +static inline enum cam_device_type cam_icp_get_camera_device_type( + enum cam_icp_hw_type hw_type) +{ + switch (hw_type) { + case CAM_ICP_DEV_IPE: + return CAM_CPAS_HW_TYPE_IPE; + case CAM_ICP_DEV_BPS: + return CAM_CPAS_HW_TYPE_BPS; + case CAM_ICP_DEV_OFE: + return CAM_CPAS_HW_TYPE_OFE; + default: + CAM_ERR(CAM_ICP, "Invalid hardware type: %u", hw_type); + } + + return CAM_CPAS_HW_TYPE_MAX; +} + +static inline int cam_get_cpas_out_port_id(enum cam_icp_hw_type hw_dev_type, + uint32_t port_id, enum cam_ipe_out_port_type *cpas_port_id) +{ + int rc = 0; + + if (hw_dev_type == CAM_ICP_DEV_IPE) { + switch (port_id) { + case CAM_ICP_IPE_OUTPUT_IMAGE_DISPLAY: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_DISPLAY; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_VIDEO: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_VIDEO; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_FULL_REF: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_FULL_REF; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_DS4_REF: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_DS4_REF; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_DS16_REF: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_DS16_REF; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_DS64_REF: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_DS64_REF; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_FD: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_FD; + break; + case CAM_ICP_IPE_OUTPUT_IMAGE_STATS_IHIST: + *cpas_port_id = CAM_CPAS_IPE_OUTPUT_IMAGE_STATS_IHIST; + break; + default: + CAM_ERR(CAM_ICP, "Invalid IPE output port ID: %u", port_id); + rc = -EINVAL; + } + } else { + CAM_ERR(CAM_ICP, "Dynamic port config is not supported on this device %d" + , hw_dev_type); + rc = -EINVAL; + } + + return rc; +} + +static inline void cam_icp_dump_debug_info(struct cam_icp_hw_mgr *hw_mgr, + bool skip_dump) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + uint32_t dump_type; + int rc; + + if (skip_dump) + return; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", + hw_mgr->hw_mgr_name); + return; + } + + dump_type = CAM_ICP_DUMP_STATUS_REGISTERS; + cam_icp_mgr_process_dbg_buf(hw_mgr); + cam_hfi_queue_dump(hw_mgr->hfi_handle, false); + + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, + CAM_ICP_CMD_HW_REG_DUMP, &dump_type, sizeof(dump_type)); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Fail to dump debug info", hw_mgr->hw_mgr_name); +} + +static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + struct cam_icp_ubwc_cfg_cmd ubwc_cmd; + int rc; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + ubwc_cmd.ubwc_cfg_dev_mask = hw_mgr->hw_cap_mask; + ubwc_cmd.disable_ubwc_comp = hw_mgr->disable_ubwc_comp; + + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, CAM_ICP_CMD_UBWC_CFG, + &ubwc_cmd, sizeof(ubwc_cmd)); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Fail to submit UBWC config", hw_mgr->hw_mgr_name); + + return rc; +} + +static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_ctx_data *ctx_data) +{ + struct cam_icp_clk_info *dev_clk_info; + + dev_clk_info = &ctx_data->device_info->clk_info; + + if (dev_clk_info->base_clk >= ctx_data->clk_info.base_clk) + dev_clk_info->base_clk -= ctx_data->clk_info.base_clk; +} + +static void cam_icp_hw_mgr_reset_clk_info(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_icp_clk_info *clk_info; + int i; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + clk_info = &hw_mgr->dev_info[i].clk_info; + + clk_info->base_clk = 0; + clk_info->curr_clk = hw_mgr->icp_svs_clk; + clk_info->threshold = ICP_OVER_CLK_THRESHOLD; + clk_info->over_clked = 0; + clk_info->uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW; + clk_info->compressed_bw = CAM_CPAS_DEFAULT_AXI_BW; + } + + hw_mgr->icp_default_clk = hw_mgr->icp_svs_clk; +} + +static int cam_icp_get_actual_clk_rate_idx( + struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return i; + + /* + * Caller has to ensure returned index is within array + * size bounds while accessing that index. + */ + + return i; +} + +static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_clk_info *hw_mgr_clk_info) +{ + int base_clk_idx; + int curr_clk_idx; + + base_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->base_clk); + + curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->curr_clk); + + CAM_DBG(CAM_PERF, "%s: bc_idx = %d cc_idx = %d %d %d", + ctx_data->ctx_id_string, base_clk_idx, curr_clk_idx, + hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk); + + if (curr_clk_idx > base_clk_idx) + return true; + + return false; +} + +static int cam_icp_get_lower_clk_rate(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk); + + if (i > 0) + return ctx_data->clk_info.clk_rate[i - 1]; + + CAM_DBG(CAM_PERF, "%s: Already clk at lower level", ctx_data->ctx_id_string); + return base_clk; +} + +static int cam_icp_get_next_clk_rate(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk); + + if (i < CAM_MAX_VOTE - 1) + return ctx_data->clk_info.clk_rate[i + 1]; + + CAM_DBG(CAM_PERF, "%s: Already clk at higher level", ctx_data->ctx_id_string); + + return base_clk; +} + +static int cam_icp_get_actual_clk_rate(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return ctx_data->clk_info.clk_rate[i]; + + return base_clk; +} + +static int cam_icp_get_supported_clk_rates(struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *dev_intf = NULL; + struct cam_hw_info *dev = NULL; + + dev_intf = ctx_data->device_info->dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "%s Invalid device intf for %s", + ctx_data->ctx_id_string, ctx_data->device_info->dev_name); + return -EINVAL; + } + + dev = (struct cam_hw_info *)dev_intf->hw_priv; + soc_info = &dev->soc_info; + + for (i = 0; i < CAM_MAX_VOTE; i++) { + ctx_data->clk_info.clk_rate[i] = + soc_info->clk_rate[i][soc_info->src_clk_idx]; + CAM_DBG(CAM_PERF, "%s: clk_info[%d] = %d", + ctx_data->ctx_id_string, i, ctx_data->clk_info.clk_rate[i]); + } + + return 0; +} + +static int cam_icp_get_frame_process_idx_from_req_id(struct cam_icp_hw_ctx_data *ctx_data, + uint64_t req_id) +{ + struct hfi_frame_process_info *frame_process; + int i; + + frame_process = &ctx_data->hfi_frame_process; + + for (i = 0; i < CAM_FRAME_CMD_MAX; i++) + if (frame_process->request_id[i] == req_id) + break; + + return i; +} + +static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + ctx_data->clk_info.uncompressed_bw = 0; + ctx_data->clk_info.compressed_bw = 0; + for (i = 0; i < CAM_ICP_MAX_PER_PATH_VOTES; i++) { + ctx_data->clk_info.axi_path[i].camnoc_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0; + } + + cam_icp_get_supported_clk_rates(ctx_data); + + return 0; +} + +static bool cam_icp_frame_pending(struct cam_icp_hw_ctx_data *ctx_data) +{ + return !bitmap_empty(ctx_data->hfi_frame_process.bitmap, + CAM_FRAME_CMD_MAX); +} + +static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data) +{ + if (ctx_data && ctx_data->watch_dog) { + ctx_data->watch_dog_reset_counter++; + CAM_DBG(CAM_PERF, "%s: reset timer : counter=%d", + ctx_data->ctx_id_string, ctx_data->watch_dog_reset_counter); + crm_timer_reset(ctx_data->watch_dog); + } + + return 0; +} + +static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_device_info *dev_info) +{ + struct cam_icp_clk_info *clk_info = &dev_info->clk_info; + + if (clk_info->watch_dog) { + CAM_DBG(CAM_PERF, "[%s] reset timer for device: %s", + hw_mgr->hw_mgr_name, dev_info->dev_name); + crm_timer_reset(clk_info->watch_dog); + clk_info->watch_dog_reset_counter++; + } +} + +static int32_t cam_icp_deinit_idle_clk(void *priv, void *data) +{ + struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv; + struct clk_work_data *task_data = (struct clk_work_data *)data; + struct cam_icp_hw_device_info *dev_info = task_data->data; + struct cam_icp_clk_info *clk_info = &dev_info->clk_info; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_hw_intf *dev_intf = NULL; + struct cam_icp_dev_clk_update_cmd clk_upd_cmd; + int rc = 0, i; + bool busy = false; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + clk_info->base_clk = 0; + clk_info->curr_clk = 0; + clk_info->over_clked = 0; + + list_for_each_entry(ctx_data, &hw_mgr->active_ctx_info.active_ctx_list, list) { + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED) { + if (ctx_data->device_info->hw_dev_type == dev_info->hw_dev_type) { + busy = cam_icp_frame_pending(ctx_data); + if (busy) { + mutex_unlock( + &hw_mgr->ctx_mutex[ctx_data->ctx_id]); + break; + } + cam_icp_ctx_clk_info_init(ctx_data); + } + } + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + } + + if (busy) { + cam_icp_device_timer_reset(hw_mgr, dev_info); + rc = -EBUSY; + goto done; + } + + if (!ctx_data) { + CAM_ERR(CAM_ICP, "[%s] No acquired ctx data found", hw_mgr->hw_mgr_name); + rc = -EFAULT; + goto done; + } + + clk_upd_cmd.dev_pc_enable = hw_mgr->dev_pc_flag; + + CAM_DBG(CAM_PERF, "[%s] Disable %d", + hw_mgr->hw_mgr_name, dev_info->dev_name); + + for (i = 0; i < dev_info->hw_dev_cnt; i++) { + dev_intf = dev_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] Device intf for %s[%u] is NULL", + hw_mgr->hw_mgr_name, dev_info->dev_name, i); + rc = -EINVAL; + goto done; + } + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, CAM_ICP_DEV_CMD_DISABLE_CLK, + &clk_upd_cmd, sizeof(clk_upd_cmd)); + } + +done: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static inline bool cam_icp_validate_bw_path_idx( + int path_idx, uint32_t path_data_type) +{ + if (path_idx < 0) { + return true; + } else if (path_idx >= CAM_ICP_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_PERF, + "Invalid path: %u IPE start offset: %d, OFE start offset: %d max: %d", + path_data_type, + CAM_AXI_PATH_DATA_IPE_START_OFFSET, + CAM_AXI_PATH_DATA_OFE_START_OFFSET, + CAM_ICP_MAX_PER_PATH_VOTES); + return true; + } else { + return false; + } +} + +static inline int cam_icp_get_axi_path_index(struct cam_cpas_axi_per_path_bw_vote *axi_path, + enum cam_icp_hw_type hw_dev_type) +{ + switch (hw_dev_type) { + case CAM_ICP_DEV_BPS: + /* By assuming BPS has Read-All, Write-All votes only */ + return axi_path->transac_type - CAM_AXI_TRANSACTION_READ; + case CAM_ICP_DEV_IPE: + return axi_path->path_data_type - CAM_AXI_PATH_DATA_IPE_START_OFFSET; + case CAM_ICP_DEV_OFE: + return axi_path->path_data_type - CAM_AXI_PATH_DATA_OFE_START_OFFSET; + default: + CAM_ERR(CAM_ICP, "Invalid hw dev type not supported: %u", + hw_dev_type); + return -EINVAL; + } +} + +static inline int cam_icp_get_bw_device_share_ratio(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_device_info *dev_info) +{ + uint32_t device_share_ratio = 1, num_dev; + + /* + * Since there are 2 devices, we assume the load is evenly shared + * between HWs and corresponding AXI paths. So divide total bw by half + * to vote on each device + */ + + num_dev = dev_info->hw_dev_cnt; + if (num_dev > 1) { + device_share_ratio = ICP_TWO_DEV_BW_SHARE_RATIO; + if (num_dev > 2) { + CAM_ERR(CAM_ICP, + "[%s] Number of devices %u not supported for geting bw device share ratio", + hw_mgr->hw_mgr_name, num_dev); + return -EINVAL; + } + } + + return device_share_ratio; +} + +static int cam_icp_remove_ctx_bw(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0; + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_hw_intf *dev_intf = NULL; + uint64_t temp, total_ab_bw = 0; + struct cam_icp_clk_info *clk_info; + struct cam_icp_cpas_vote clk_update; + int i = 0, device_share_ratio; + enum cam_icp_hw_type hw_dev_type; + + if (!ctx_data->icp_dev_acquire_info) { + CAM_WARN(CAM_ICP, "%s: NULL acquire info", ctx_data->ctx_id_string); + return -EINVAL; + } + + CAM_DBG(CAM_PERF, + "%s: ubw = %lld cbw = %lld curr_fc = %u bc = %u", + ctx_data->ctx_id_string, + ctx_data->clk_info.uncompressed_bw, + ctx_data->clk_info.compressed_bw, + ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk); + + if (!ctx_data->clk_info.bw_included) { + CAM_DBG(CAM_PERF, "%s: BW vote already removed", + ctx_data->ctx_id_string); + return 0; + } + + dev_info = ctx_data->device_info; + clk_info = &dev_info->clk_info; + hw_dev_type = dev_info->hw_dev_type; + + device_share_ratio = cam_icp_get_bw_device_share_ratio(hw_mgr, dev_info); + if (device_share_ratio < 0) { + CAM_ERR(CAM_ICP, "%s: Fail to get device share ratio", + ctx_data->ctx_id_string); + return -EINVAL; + } + + if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) { + clk_update.axi_vote.num_paths = 1; + if (hw_dev_type == CAM_ICP_DEV_BPS) { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_BPS_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_BPS_DEFAULT_AXI_TRANSAC; + } else if (hw_dev_type == CAM_ICP_DEV_IPE) { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_IPE_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_IPE_DEFAULT_AXI_TRANSAC; + } else { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_OFE_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_OFE_DEFAULT_AXI_TRANSAC; + } + + clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw; + clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw; + + total_ab_bw = clk_info->compressed_bw; + + ctx_data->clk_info.uncompressed_bw = 0; + ctx_data->clk_info.compressed_bw = 0; + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + clk_update.axi_vote.num_paths = 1; + + temp = clk_info->uncompressed_bw; + do_div(temp, device_share_ratio); + clk_update.axi_vote.axi_path[0].camnoc_bw = temp; + + temp = clk_info->compressed_bw; + do_div(temp, device_share_ratio); + clk_update.axi_vote.axi_path[0].mnoc_ab_bw = temp; + clk_update.axi_vote.axi_path[0].mnoc_ib_bw = temp; + } else { + int path_index; + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its + * own index. BW that we wanted to vote now is after removing + * current context's vote from hw mgr consolidated vote + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = cam_icp_get_axi_path_index(&ctx_data->clk_info.axi_path[i], + hw_dev_type); + + if (cam_icp_validate_bw_path_idx(path_index, + ctx_data->clk_info.axi_path[i].path_data_type)) + continue; + + clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + + total_ab_bw += + clk_info->axi_path[path_index].mnoc_ab_bw; + + CAM_DBG(CAM_PERF, + "%s: Removing ctx bw from path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld", + ctx_data->ctx_id_string, + cam_cpas_axi_util_path_type_to_string( + ctx_data->clk_info.axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + ctx_data->clk_info.axi_path[i].transac_type), + ctx_data->clk_info.axi_path[i].camnoc_bw, + ctx_data->clk_info.axi_path[i].mnoc_ab_bw, + ctx_data->clk_info.axi_path[i].mnoc_ib_bw); + + CAM_DBG(CAM_PERF, + "%s: Final HW bw for path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld", + ctx_data->ctx_id_string, + cam_cpas_axi_util_path_type_to_string( + clk_info->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + clk_info->axi_path[i].transac_type), + clk_info->axi_path[i].camnoc_bw, + clk_info->axi_path[i].mnoc_ab_bw, + clk_info->axi_path[i].mnoc_ib_bw); + } + + memset(&ctx_data->clk_info.axi_path[0], 0, + CAM_ICP_MAX_PER_PATH_VOTES * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + clk_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&clk_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + clk_update.axi_vote.num_paths * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + if (device_share_ratio > 1) { + for (i = 0; i < clk_update.axi_vote.num_paths; i++) { + do_div( + clk_update.axi_vote.axi_path[i].camnoc_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ab_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ib_bw, + device_share_ratio); + } + } + } + + clk_update.axi_vote_valid = true; + + if (total_ab_bw == 0) { + /* If no more contexts are active, reduce AHB vote to minimum */ + clk_update.ahb_vote.type = CAM_VOTE_ABSOLUTE; + clk_update.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + clk_update.ahb_vote_valid = true; + } else { + clk_update.ahb_vote_valid = false; + } + + /* + * If two devices, vote half bandwidth each on both devices. + * Total bw at mnoc - CPAS will take care of adding up. + * camnoc clk calculate is more accurate this way. + */ + + for (i = 0; i < dev_info->hw_dev_cnt; i++) { + dev_intf = dev_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "%s: Device intf for %s[%u] is NULL", + ctx_data->ctx_id_string, dev_info->dev_name, i); + return -EINVAL; + } + rc = dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, + CAM_ICP_DEV_CMD_VOTE_CPAS, &clk_update, sizeof(clk_update)); + if (rc) { + CAM_ERR(CAM_PERF, + "%s: Failed in updating cpas vote for %s cell idx: %u, rc=%d", + ctx_data->ctx_id_string, dev_info->dev_name, i, rc); + } + } + + ctx_data->clk_info.bw_included = false; + + CAM_DBG(CAM_PERF, "%s: X :curr_fc = %u bc = %u", ctx_data->ctx_id_string, + ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk); + + return rc; +} + + +static int32_t cam_icp_ctx_timer(void *priv, void *data) +{ + struct clk_work_data *task_data = (struct clk_work_data *)data; + struct cam_icp_hw_mgr *hw_mgr = priv; + struct cam_icp_hw_ctx_info *ctx_info = + (struct cam_icp_hw_ctx_info *)task_data->data; + struct cam_icp_hw_ctx_data *ctx_data; + int rc = 0, ctx_id; + + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Null ICP ctx info"); + return -EINVAL; + } + + ctx_id = ctx_info->ctx_id; + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk"); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + if ((!test_bit(ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) || + (ctx_info->ctx_acquired_timestamp != + hw_mgr->ctx_acquired_timestamp[ctx_id])) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_id); + goto end; + } + + CAM_DBG(CAM_PERF, + "%s: ubw = %lld cbw = %lld curr_fc = %u bc = %u", + ctx_data->ctx_id_string, + ctx_data->clk_info.uncompressed_bw, + ctx_data->clk_info.compressed_bw, + ctx_data->clk_info.curr_fc, + ctx_data->clk_info.base_clk); + + if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) || + (ctx_data->watch_dog_reset_counter == 0)) { + CAM_DBG(CAM_PERF, "%s: state %d, counter=%d", + ctx_data->ctx_id_string, ctx_data->state, + ctx_data->watch_dog_reset_counter); + goto end; + } + + if (cam_icp_frame_pending(ctx_data)) { + cam_icp_ctx_timer_reset(ctx_data); + rc = -EBUSY; + goto end; + } + + cam_icp_remove_ctx_bw(hw_mgr, ctx_data); + +end: + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_MEM_FREE(ctx_info); + return rc; +} + +static void cam_icp_ctx_timer_cb(struct timer_list *timer_data) +{ + int rc; + unsigned long flags; + struct crm_workq_task *task; + struct clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + struct cam_icp_hw_ctx_data *ctx_data = timer->parent; + struct cam_icp_hw_ctx_info *ctx_info; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_ATOMIC); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating ICP ctx info"); + return; + } + + ctx_info->ctx_data = ctx_data; + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_acquired_timestamp = hw_mgr->ctx_acquired_timestamp[ctx_data->ctx_id]; + + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_ICP, "%s: empty task", ctx_data->ctx_id_string); + CAM_MEM_FREE(ctx_info); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct clk_work_data *)task->payload; + task_data->data = ctx_info; + task_data->type = ICP_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_icp_ctx_timer; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_ICP, "Failed at enqueuing task to workq, ctx_id: %d", ctx_info->ctx_id); + CAM_MEM_FREE(ctx_info); + } + + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); +} + +static void cam_icp_device_timer_cb(struct timer_list *timer_data) +{ + unsigned long flags; + struct crm_workq_task *task; + struct clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + struct cam_icp_hw_device_info *dev_info = timer->parent; + struct cam_icp_hw_mgr *hw_mgr = dev_info->clk_info.timeout_cb_data; + + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_ICP, "[%s] empty task", hw_mgr->hw_mgr_name); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct clk_work_data *)task->payload; + task_data->data = timer->parent; + task_data->type = ICP_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_icp_deinit_idle_clk; + cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); +} + +static int cam_icp_get_svs_clk_info(struct cam_icp_hw_mgr *hw_mgr) +{ + int32_t src_clk_idx; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *dev_intf = NULL; + struct cam_hw_info *dev = NULL; + + if (CAM_ICP_IS_DEV_HW_EXIST(hw_mgr->hw_cap_mask, CAM_ICP_DEV_IPE)) { + dev_intf = hw_mgr->dev_info[hw_mgr->dev_info_idx[CAM_ICP_DEV_IPE]].dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] IPE dev intf is invalid", hw_mgr->hw_mgr_name); + return -EINVAL; + } + } else if (CAM_ICP_IS_DEV_HW_EXIST(hw_mgr->hw_cap_mask, CAM_ICP_DEV_OFE)) { + dev_intf = hw_mgr->dev_info[hw_mgr->dev_info_idx[CAM_ICP_DEV_OFE]].dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] OFE dev inf is invalid", hw_mgr->hw_mgr_name); + return -EINVAL; + } + } else { + CAM_ERR(CAM_ICP, "[%s] No supported device to get svs clock info", + hw_mgr->hw_mgr_name); + return -ENODEV; + } + + dev = (struct cam_hw_info *)dev_intf->hw_priv; + soc_info = &dev->soc_info; + src_clk_idx = soc_info->src_clk_idx; + hw_mgr->icp_svs_clk = soc_info->clk_rate[CAM_SVS_VOTE][src_clk_idx]; + + if (hw_mgr->icp_svs_clk <= 0) + hw_mgr->icp_svs_clk = ICP_CLK_SVS_HZ; + + CAM_DBG(CAM_PERF, "[%s] icp_svs_clk = %lld", hw_mgr->hw_mgr_name, hw_mgr->icp_svs_clk); + return 0; +} + +static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr) +{ + int i; + struct cam_icp_clk_info *clk_info; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + clk_info = &hw_mgr->dev_info[i].clk_info; + + clk_info->base_clk = hw_mgr->icp_svs_clk; + clk_info->curr_clk = hw_mgr->icp_svs_clk; + clk_info->threshold = ICP_OVER_CLK_THRESHOLD; + clk_info->over_clked = 0; + clk_info->uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW; + clk_info->compressed_bw = CAM_CPAS_DEFAULT_AXI_BW; + memset(clk_info->axi_path, 0, + CAM_ICP_MAX_PER_PATH_VOTES * sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + clk_info->watch_dog_reset_counter = 0; + } + + hw_mgr->icp_default_clk = hw_mgr->icp_svs_clk; + + return 0; +} + +static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0; + + rc = crm_timer_init(&ctx_data->watch_dog, + 200, ctx_data, &cam_icp_ctx_timer_cb); + if (rc) + CAM_ERR(CAM_ICP, "%s: Failed to start timer", ctx_data->ctx_id_string); + + ctx_data->watch_dog_reset_counter = 0; + + CAM_DBG(CAM_PERF, "%s: start timer", ctx_data->ctx_id_string); + return rc; +} + +static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0, i; + struct cam_icp_clk_info *clk_info = NULL; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + clk_info = &hw_mgr->dev_info[i].clk_info; + if (!clk_info->watch_dog) { + clk_info->timeout_cb_data = hw_mgr; + rc = crm_timer_init(&clk_info->watch_dog, + ICP_DEVICE_IDLE_TIMEOUT, &hw_mgr->dev_info[i], + &cam_icp_device_timer_cb); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed to start %s timer", + hw_mgr->hw_mgr_name, hw_mgr->dev_info[i].dev_name); + clk_info->watch_dog_reset_counter = 0; + } + } + + return rc; +} + +static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data) +{ + if (ctx_data->watch_dog) { + CAM_DBG(CAM_PERF, "%s: stop timer", ctx_data->ctx_id_string); + ctx_data->watch_dog_reset_counter = 0; + crm_timer_exit(&ctx_data->watch_dog); + ctx_data->watch_dog = NULL; + } + + return 0; +} + +static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_icp_clk_info *clk_info = NULL; + int i; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + dev_info = &hw_mgr->dev_info[i]; + clk_info = &dev_info->clk_info; + if (!dev_info->dev_ctx_info.dev_ctxt_cnt && clk_info->watch_dog) { + clk_info->watch_dog_reset_counter = 0; + crm_timer_exit(&clk_info->watch_dog); + clk_info->watch_dog = NULL; + } + } +} + +static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles, + uint64_t budget) +{ + uint64_t base_clk; + uint64_t mul = 1000000000; + + base_clk = frame_cycles * mul; + do_div(base_clk, budget); + + CAM_DBG(CAM_PERF, "budget = %lld fc = %d ib = %lld base_clk = %lld", + budget, frame_cycles, + (long long)(frame_cycles * mul), base_clk); + + return base_clk; +} + +static bool cam_icp_busy_prev_reqs(struct hfi_frame_process_info *frm_process, + uint64_t req_id) +{ + int i; + int cnt; + + for (i = 0, cnt = 0; i < CAM_FRAME_CMD_MAX; i++) { + if (frm_process->request_id[i] && frm_process->fw_process_flag[i]) { + CAM_DBG(CAM_PERF, "r id = %lld busy = %d", + frm_process->request_id[i], + frm_process->fw_process_flag[i]); + cnt++; + } + } + if (cnt > 1) + return true; + + return false; +} + +static int cam_icp_calc_total_clk(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_clk_info *dev_clk_info, enum cam_icp_hw_type hw_dev_type) +{ + struct cam_icp_hw_ctx_data *ctx_data; + + dev_clk_info->base_clk = 0; + list_for_each_entry(ctx_data, &hw_mgr->active_ctx_info.active_ctx_list, list) { + if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED && + (ctx_data->device_info->hw_dev_type == hw_dev_type)) + dev_clk_info->base_clk += ctx_data->clk_info.base_clk; + } + + return 0; +} + +static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_clk_info *dev_clk_info, + struct cam_icp_clk_bw_request *request_clk_info, + uint32_t base_clk) +{ + uint32_t next_clk_level; + uint32_t actual_clk; + bool rc = false; + + /* 1. if current request frame cycles(fc) are more than previous + * frame fc + * Calculate the new base clock. + * if sum of base clocks are more than next available clk level + * Update clock rate, change curr_clk_rate to sum of base clock + * rates and make over_clked to zero + * else + * Update clock rate to next level, update curr_clk_rate and make + * overclked cnt to zero + * 2. if current fc is less than or equal to previous frame fc + * Still Bump up the clock to next available level + * if it is available, then update clock, make overclk cnt to + * zero. If the clock is already at highest clock rate then + * no need to update the clock + */ + ctx_data->clk_info.base_clk = base_clk; + dev_clk_info->over_clked = 0; + if (request_clk_info->frame_cycles > ctx_data->clk_info.curr_fc) { + cam_icp_calc_total_clk(hw_mgr, dev_clk_info, + ctx_data->device_info->hw_dev_type); + actual_clk = cam_icp_get_actual_clk_rate(hw_mgr, + ctx_data, base_clk); + if (dev_clk_info->base_clk > actual_clk) { + dev_clk_info->curr_clk = dev_clk_info->base_clk; + } else { + next_clk_level = cam_icp_get_next_clk_rate(hw_mgr, + ctx_data, dev_clk_info->curr_clk); + dev_clk_info->curr_clk = next_clk_level; + } + rc = true; + } else { + next_clk_level = + cam_icp_get_next_clk_rate(hw_mgr, ctx_data, + dev_clk_info->curr_clk); + if (dev_clk_info->curr_clk < next_clk_level) { + dev_clk_info->curr_clk = next_clk_level; + rc = true; + } + } + ctx_data->clk_info.curr_fc = request_clk_info->frame_cycles; + + return rc; +} + +static bool cam_icp_update_clk_overclk_free(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, struct cam_icp_clk_info *dev_clk_info, + struct cam_icp_clk_bw_request *request_clk_info, uint32_t base_clk) +{ + int rc = false; + + /* + * In caseof no pending packets case + * 1. In caseof overclk cnt is less than threshold, increase + * overclk count and no update in the clock rate + * 2. In caseof overclk cnt is greater than or equal to threshold + * then lower clock rate by one level and update hw_mgr current + * clock value. + * a. In case of new clock rate greater than sum of clock + * rates, reset overclk count value to zero if it is + * overclock + * b. if it is less than sum of base clocks then go to next + * level of clock and make overclk count to zero + * c. if it is same as sum of base clock rates update overclock + * cnt to 0 + */ + if (dev_clk_info->over_clked < dev_clk_info->threshold) { + dev_clk_info->over_clked++; + rc = false; + } else { + dev_clk_info->curr_clk = + cam_icp_get_lower_clk_rate(hw_mgr, ctx_data, + dev_clk_info->curr_clk); + if (dev_clk_info->curr_clk > dev_clk_info->base_clk) { + if (cam_icp_is_over_clk(hw_mgr, ctx_data, + dev_clk_info)) + dev_clk_info->over_clked = 0; + } else if (dev_clk_info->curr_clk < + dev_clk_info->base_clk) { + dev_clk_info->curr_clk = + cam_icp_get_next_clk_rate(hw_mgr, ctx_data, + dev_clk_info->curr_clk); + dev_clk_info->over_clked = 0; + } else if (dev_clk_info->curr_clk == + dev_clk_info->base_clk) { + dev_clk_info->over_clked = 0; + } + rc = true; + } + + return rc; +} + +static bool cam_icp_update_clk_free(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_clk_info *hw_mgr_clk_info, + struct cam_icp_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + bool over_clocked = false; + + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + ctx_data->clk_info.base_clk = base_clk; + cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info, ctx_data->device_info->hw_dev_type); + + /* + * Current clock is not always sum of base clocks, due to + * clock scales update to next higher or lower levels, it + * equals to one of discrete clock values supported by hardware. + * So even current clock is higher than sum of base clocks, we + * can not consider it is over clocked. if it is greater than + * discrete clock level then only it is considered as over clock. + * 1. Handle over clock case + * 2. If current clock is less than sum of base clocks + * update current clock + * 3. If current clock is same as sum of base clocks no action + */ + + over_clocked = cam_icp_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info); + + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk && + over_clocked) { + rc = cam_icp_update_clk_overclk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + } else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + rc = false; + } else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->base_clk); + rc = true; + } + + return rc; +} + +static bool cam_icp_debug_clk_update(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_clk_info *hw_mgr_clk_info) +{ + if (hw_mgr->icp_debug_clk && + hw_mgr->icp_debug_clk != hw_mgr_clk_info->curr_clk) { + hw_mgr_clk_info->base_clk = hw_mgr->icp_debug_clk; + hw_mgr_clk_info->curr_clk = hw_mgr->icp_debug_clk; + hw_mgr_clk_info->uncompressed_bw = hw_mgr->icp_debug_clk; + hw_mgr_clk_info->compressed_bw = hw_mgr->icp_debug_clk; + CAM_DBG(CAM_PERF, "[%s] bc = %d cc = %d", + hw_mgr->hw_mgr_name, hw_mgr_clk_info->base_clk, + hw_mgr_clk_info->curr_clk); + return true; + } + + return false; +} + +static bool cam_icp_default_clk_update(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_clk_info *hw_mgr_clk_info) +{ + if (hw_mgr->icp_default_clk != hw_mgr_clk_info->curr_clk) { + hw_mgr_clk_info->base_clk = hw_mgr->icp_default_clk; + hw_mgr_clk_info->curr_clk = hw_mgr->icp_default_clk; + hw_mgr_clk_info->uncompressed_bw = hw_mgr->icp_default_clk; + hw_mgr_clk_info->compressed_bw = hw_mgr->icp_default_clk; + CAM_DBG(CAM_PERF, "[%s] bc = %d cc = %d", + hw_mgr->hw_mgr_name, hw_mgr_clk_info->base_clk, + hw_mgr_clk_info->curr_clk); + return true; + } + + return false; +} + +static bool cam_icp_update_bw_v2(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_clk_info *hw_mgr_clk_info, + struct cam_icp_clk_bw_req_internal_v2 *clk_info, + bool busy) +{ + int i, path_index; + bool update_required = true; + + /* + * If current request bandwidth is different from previous frames, then + * recalculate bandwidth of all contexts of same hardware and update + * voting of bandwidth + */ + + for (i = 0; i < clk_info->num_paths; i++) + CAM_DBG(CAM_PERF, "%s: clk_info camnoc = %lld busy = %d", + ctx_data->ctx_id_string, clk_info->axi_path[i].camnoc_bw, busy); + + if (clk_info->num_paths == ctx_data->clk_info.num_paths) { + update_required = false; + for (i = 0; i < clk_info->num_paths; i++) { + if ((clk_info->axi_path[i].transac_type == + ctx_data->clk_info.axi_path[i].transac_type) && + (clk_info->axi_path[i].path_data_type == + ctx_data->clk_info.axi_path[i].path_data_type) && + (clk_info->axi_path[i].camnoc_bw == + ctx_data->clk_info.axi_path[i].camnoc_bw) && + (clk_info->axi_path[i].mnoc_ab_bw == + ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) { + continue; + } else { + update_required = true; + break; + } + } + } + + if (!update_required) { + CAM_DBG(CAM_PERF, + "%s: Incoming BW hasn't changed, no update required, num_paths=%d", + ctx_data->ctx_id_string, clk_info->num_paths); + return false; + } + + if (busy) { + for (i = 0; i < clk_info->num_paths; i++) { + if (ctx_data->clk_info.axi_path[i].camnoc_bw > + clk_info->axi_path[i].camnoc_bw) + return false; + } + } + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + + path_index = cam_icp_get_axi_path_index(&ctx_data->clk_info.axi_path[i], + ctx_data->device_info->hw_dev_type); + + if (cam_icp_validate_bw_path_idx(path_index, + ctx_data->clk_info.axi_path[i].path_data_type)) + continue; + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + } + + ctx_data->clk_info.num_paths = clk_info->num_paths; + + memcpy(&ctx_data->clk_info.axi_path[0], + &clk_info->axi_path[0], + clk_info->num_paths * sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + /* + * Add new vote of this context in hw mgr. + * hw_mgr_clk_info has all paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = cam_icp_get_axi_path_index(&ctx_data->clk_info.axi_path[i], + ctx_data->device_info->hw_dev_type); + + if (cam_icp_validate_bw_path_idx(path_index, + ctx_data->clk_info.axi_path[i].path_data_type)) + continue; + + hw_mgr_clk_info->axi_path[path_index].path_data_type = + ctx_data->clk_info.axi_path[i].path_data_type; + hw_mgr_clk_info->axi_path[path_index].transac_type = + ctx_data->clk_info.axi_path[i].transac_type; + hw_mgr_clk_info->axi_path[path_index].camnoc_bw += + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw += + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw += + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + CAM_DBG(CAM_PERF, + "%s: Consolidate Path Vote: i[%d] path_idx[%d] : [%s %s] [%lld %lld]", + ctx_data->ctx_id_string, + i, path_index, + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[path_index].transac_type), + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[path_index].path_data_type), + hw_mgr_clk_info->axi_path[path_index].camnoc_bw, + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw); + } + + ctx_data->clk_info.bw_included = true; + + if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths) + hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths; + + return true; +} + +static bool cam_icp_update_bw(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_clk_info *hw_mgr_clk_info, + struct cam_icp_clk_bw_request *clk_info, + bool busy) +{ + struct cam_icp_hw_ctx_data *ctx; + + /* + * If current request bandwidth is different from previous frames, then + * recalculate bandwidth of all contexts of same hardware and update + * voting of bandwidth + */ + CAM_DBG(CAM_PERF, "%s: ubw ctx = %lld clk_info ubw = %lld busy = %d", + ctx_data->ctx_id_string, ctx_data->clk_info.uncompressed_bw, + clk_info->uncompressed_bw, busy); + + if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) && + (ctx_data->clk_info.uncompressed_bw == + hw_mgr_clk_info->uncompressed_bw)) { + CAM_DBG(CAM_PERF, "%s: Update not required bw=%lld", + ctx_data->ctx_id_string, ctx_data->clk_info.uncompressed_bw); + return false; + } + + if (busy && + (ctx_data->clk_info.uncompressed_bw > + clk_info->uncompressed_bw)) { + CAM_DBG(CAM_PERF, + "%s: Busy, Update not required existing=%lld, new=%lld", + ctx_data->ctx_id_string, + ctx_data->clk_info.uncompressed_bw, + clk_info->uncompressed_bw); + return false; + } + + ctx_data->clk_info.uncompressed_bw = clk_info->uncompressed_bw; + ctx_data->clk_info.compressed_bw = clk_info->compressed_bw; + hw_mgr_clk_info->uncompressed_bw = 0; + hw_mgr_clk_info->compressed_bw = 0; + + list_for_each_entry(ctx, &hw_mgr->active_ctx_info.active_ctx_list, list) { + if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED && + (ctx_data->device_info->hw_dev_type == ctx->device_info->hw_dev_type)) { + hw_mgr_clk_info->uncompressed_bw += + ctx->clk_info.uncompressed_bw; + hw_mgr_clk_info->compressed_bw += + ctx->clk_info.compressed_bw; + CAM_DBG(CAM_PERF, + "%s: Current context=[%lld %lld] Total=[%lld %lld]", + ctx_data->ctx_id_string, + ctx->clk_info.uncompressed_bw, + ctx->clk_info.compressed_bw, + hw_mgr_clk_info->uncompressed_bw, + hw_mgr_clk_info->compressed_bw); + } + } + + ctx_data->clk_info.bw_included = true; + + return true; +} + +static bool cam_icp_check_clk_update(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, int idx) +{ + bool busy, rc = false; + uint32_t base_clk; + struct cam_icp_clk_bw_request *request_clk_info; + struct hfi_frame_process_info *frame_info; + uint64_t req_id; + struct cam_icp_hw_device_info *dev_info = ctx_data->device_info; + struct cam_icp_clk_info *dev_clk_info; + + cam_icp_ctx_timer_reset(ctx_data); + cam_icp_device_timer_reset(hw_mgr, dev_info); + CAM_DBG(CAM_ICP, "Reset %s device timer", dev_info->dev_name); + + dev_clk_info = &dev_info->clk_info; + + if (hw_mgr->icp_debug_clk) + return cam_icp_debug_clk_update(hw_mgr, dev_clk_info); + + /* Check is there any pending frames in this context */ + frame_info = &ctx_data->hfi_frame_process; + req_id = frame_info->request_id[idx]; + busy = cam_icp_busy_prev_reqs(frame_info, req_id); + CAM_DBG(CAM_PERF, "%s: busy = %d req_id = %lld", + ctx_data->ctx_id_string, busy, req_id); + + request_clk_info = &ctx_data->hfi_frame_process.clk_info[idx]; + if (!request_clk_info->frame_cycles) + return cam_icp_default_clk_update(hw_mgr, dev_clk_info); + + ctx_data->clk_info.rt_flag = request_clk_info->rt_flag; + + /* Override base clock to max or calculate base clk rate */ + if (!ctx_data->clk_info.rt_flag && + (dev_info->hw_dev_type != CAM_ICP_DEV_BPS)) + base_clk = ctx_data->clk_info.clk_rate[CAM_MAX_VOTE-1]; + else + base_clk = cam_icp_mgr_calc_base_clk(request_clk_info->frame_cycles, + request_clk_info->budget_ns); + + if (busy) + rc = cam_icp_update_clk_busy(hw_mgr, ctx_data, + dev_clk_info, request_clk_info, base_clk); + else + rc = cam_icp_update_clk_free(hw_mgr, ctx_data, + dev_clk_info, request_clk_info, base_clk); + + CAM_DBG(CAM_PERF, "%s: bc = %d cc = %d busy = %d overclk = %d uc = %d", + ctx_data->ctx_id_string, dev_clk_info->base_clk, dev_clk_info->curr_clk, + busy, dev_clk_info->over_clked, rc); + + return rc; +} + +static bool cam_icp_check_bw_update(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, int idx) +{ + bool busy, bw_updated = false; + int i; + struct cam_icp_clk_bw_request *request_clk_info; + struct cam_icp_clk_bw_req_internal_v2 *request_clk_info_v2; + struct cam_icp_clk_info *dev_clk_info; + struct hfi_frame_process_info *frame_info; + uint64_t req_id; + + dev_clk_info = &ctx_data->device_info->clk_info; + frame_info = &ctx_data->hfi_frame_process; + req_id = frame_info->request_id[idx]; + busy = cam_icp_busy_prev_reqs(frame_info, req_id); + + if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) { + request_clk_info = &ctx_data->hfi_frame_process.clk_info[idx]; + + CAM_DBG(CAM_PERF, "%s: Req[%lld] Current camno=%lld, mnoc=%lld", + ctx_data->ctx_id_string, req_id, dev_clk_info->uncompressed_bw, + dev_clk_info->compressed_bw); + + bw_updated = cam_icp_update_bw(hw_mgr, ctx_data, + dev_clk_info, request_clk_info, busy); + } else if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V2) { + request_clk_info_v2 = &ctx_data->hfi_frame_process.clk_info_v2[idx]; + + if (request_clk_info_v2->num_paths > CAM_ICP_MAX_PER_PATH_VOTES || + dev_clk_info->num_paths > CAM_ICP_MAX_PER_PATH_VOTES) { + CAM_ERR(CAM_ICP, "Invalid num of clk path req num path:%d, dev num path:%d" + , request_clk_info_v2->num_paths, dev_clk_info->num_paths); + return false; + } + + CAM_DBG(CAM_PERF, "%s: index=%d, num_paths=%d, ctx_data=%pK", + ctx_data->ctx_id_string, idx, request_clk_info_v2->num_paths, ctx_data); + + bw_updated = cam_icp_update_bw_v2(hw_mgr, ctx_data, + dev_clk_info, request_clk_info_v2, busy); + + for (i = 0; i < dev_clk_info->num_paths; i++) { + CAM_DBG(CAM_PERF, + "%s: Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld", + ctx_data->ctx_id_string, + cam_cpas_axi_util_path_type_to_string( + dev_clk_info->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + dev_clk_info->axi_path[i].transac_type), + dev_clk_info->axi_path[i].camnoc_bw, + dev_clk_info->axi_path[i].mnoc_ab_bw, + dev_clk_info->axi_path[i].mnoc_ib_bw); + } + } else { + CAM_ERR(CAM_PERF, "%s: Invalid bw config version: %d", + hw_mgr->hw_mgr_name, ctx_data->bw_config_version); + return false; + } + + return bw_updated; +} + +static int cam_icp_update_clk_util( + uint32_t curr_clk_rate, + struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + struct cam_icp_dev_clk_update_cmd clk_upd_cmd; + struct cam_hw_intf *dev_intf = NULL; + + clk_upd_cmd.curr_clk_rate = curr_clk_rate; + clk_upd_cmd.dev_pc_enable = hw_mgr->dev_pc_flag; + clk_upd_cmd.clk_level = -1; + + for (i = 0; i < ctx_data->device_info->hw_dev_cnt; i++) { + dev_intf = ctx_data->device_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device intf for %s[%u] is NULL", + ctx_data->device_info->dev_name, i); + return -EINVAL; + } + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, CAM_ICP_DEV_CMD_UPDATE_CLK, + &clk_upd_cmd, sizeof(struct cam_icp_dev_clk_update_cmd)); + } + + /* Scale ICP clock to IPE clk rate or OFE clk rate */ + if (ctx_data->device_info->hw_dev_type != CAM_ICP_DEV_BPS) { + /* update ICP Proc clock */ + CAM_DBG(CAM_PERF, "%s: Update ICP clk to level [%d]", + ctx_data->ctx_id_string, clk_upd_cmd.clk_level); + dev_intf = hw_mgr->icp_dev_intf; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device interface is NULL"); + return -EINVAL; + } + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, CAM_ICP_CMD_CLK_UPDATE, + &clk_upd_cmd.clk_level, sizeof(clk_upd_cmd.clk_level)); + } + + return 0; +} + +static int cam_icp_update_clk_rate(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + uint32_t curr_clk_rate; + struct cam_icp_clk_info *dev_clk_info = NULL; + char tmp_buff[64]; + + dev_clk_info = &ctx_data->device_info->clk_info; + scnprintf(tmp_buff, sizeof(tmp_buff), "%s Before clk update rate=%u", + ctx_data->ctx_id_string, + dev_clk_info->prev_clk); + cam_cpas_notify_event(tmp_buff, dev_clk_info->prev_clk); + + curr_clk_rate = dev_clk_info->curr_clk; + dev_clk_info->prev_clk = curr_clk_rate; + scnprintf(tmp_buff, sizeof(tmp_buff), "%s After clk update rate=%u", + ctx_data->ctx_id_string, + curr_clk_rate); + cam_cpas_notify_event(tmp_buff, curr_clk_rate); + + CAM_DBG(CAM_PERF, "%s: clk_rate %u", + ctx_data->ctx_id_string, curr_clk_rate); + + if (atomic_read(&hw_mgr->abort_in_process)) + return 0; + + return cam_icp_update_clk_util(curr_clk_rate, hw_mgr, ctx_data); +} + +static int cam_icp_update_cpas_vote(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0, i = 0, device_share_ratio; + uint64_t temp; + struct cam_hw_intf *dev_intf = NULL; + struct cam_icp_clk_info *dev_clk_info; + struct cam_icp_cpas_vote clk_update = {0}; + enum cam_icp_hw_type hw_dev_type; + + dev_clk_info = &ctx_data->device_info->clk_info; + hw_dev_type = ctx_data->device_info->hw_dev_type; + + device_share_ratio = cam_icp_get_bw_device_share_ratio(hw_mgr, + ctx_data->device_info); + if (device_share_ratio < 0) { + CAM_ERR(CAM_ICP, "%s: Fail to get device share ratio", + ctx_data->ctx_id_string); + return -EINVAL; + } + + clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + clk_update.ahb_vote.vote.freq = 0; + clk_update.ahb_vote_valid = false; + + if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_V1) { + clk_update.axi_vote.num_paths = 1; + if (hw_dev_type == CAM_ICP_DEV_BPS) { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_BPS_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_BPS_DEFAULT_AXI_TRANSAC; + } else if (hw_dev_type == CAM_ICP_DEV_IPE) { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_IPE_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_IPE_DEFAULT_AXI_TRANSAC; + } else { + clk_update.axi_vote.axi_path[0].path_data_type = + CAM_OFE_DEFAULT_AXI_PATH; + clk_update.axi_vote.axi_path[0].transac_type = + CAM_OFE_DEFAULT_AXI_TRANSAC; + } + + temp = dev_clk_info->uncompressed_bw; + do_div(temp, device_share_ratio); + clk_update.axi_vote.axi_path[0].camnoc_bw = temp; + + temp = dev_clk_info->compressed_bw; + do_div(temp, device_share_ratio); + clk_update.axi_vote.axi_path[0].mnoc_ab_bw = temp; + clk_update.axi_vote.axi_path[0].mnoc_ib_bw = temp; + } else { + clk_update.axi_vote.num_paths = dev_clk_info->num_paths; + memcpy(&clk_update.axi_vote.axi_path[0], + &dev_clk_info->axi_path[0], + clk_update.axi_vote.num_paths * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + if (device_share_ratio > 1) { + for (i = 0; i < clk_update.axi_vote.num_paths; i++) { + do_div( + clk_update.axi_vote.axi_path[i].camnoc_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ab_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ib_bw, + device_share_ratio); + } + } + } + + clk_update.axi_vote_valid = true; + + /* + * If two devices, vote half bandwidth each on both devices. + * Total bw at mnoc - CPAS will take care of adding up. + * camnoc clk calculate is more accurate this way. + */ + + for (i = 0; i < ctx_data->device_info->hw_dev_cnt; i++) { + dev_intf = ctx_data->device_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device intf for %s[%u] is NULL", + ctx_data->device_info->dev_name, i); + return -EINVAL; + } + rc = dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, + CAM_ICP_DEV_CMD_VOTE_CPAS, &clk_update, sizeof(clk_update)); + if (rc) { + CAM_ERR(CAM_PERF, + "%s: Failed in updating cpas vote for %s cell idx: %u, rc=%d", + ctx_data->ctx_id_string, ctx_data->device_info->dev_name, i, rc); + } + } + + return rc; +} + +static void cam_icp_cpas_reset_sys_cache( + struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + struct cam_icp_sys_cache_cfg *sys_cache_cfg; + + sys_cache_cfg = &ctx_data->sys_cache_cfg; + for (i = 0; i < sys_cache_cfg->num; i++) { + if (sys_cache_cfg->scid_cfg[i].activated) + CAM_ERR(CAM_ICP, "probably scid = %d deactivation failed", + sys_cache_cfg->scid_cfg[i].scid_id); + + sys_cache_cfg->scid_cfg[i].scid_id = 0; + sys_cache_cfg->scid_cfg[i].staling_distance = 0; + sys_cache_cfg->scid_cfg[i].llcc_staling_mode = 0; + sys_cache_cfg->scid_cfg[i].llcc_staling_op_type = 0; + sys_cache_cfg->scid_cfg[i].activated = false; + } + + CAM_DBG(CAM_ICP, "resetting completed of total number of sys cache %d", + sys_cache_cfg->num); + sys_cache_cfg->num = 0; +} + +static int cam_icp_cpas_deactivate_llcc( + struct cam_icp_hw_ctx_data *ctx_data) +{ + int i, rc = 0; + int rc1 = 0; + struct cam_icp_sys_cache_cfg *sys_cache_cfg; + + sys_cache_cfg = &ctx_data->sys_cache_cfg; + for (i = 0; i < sys_cache_cfg->num; i++) { + if (sys_cache_cfg->scid_cfg[i].activated) { + rc = cam_cpas_deactivate_llcc(sys_cache_cfg->scid_cfg[i].scid_id); + if (rc) { + CAM_ERR(CAM_ICP, + "llcc staling activation is failing cache: %d rc = %d", + sys_cache_cfg->scid_cfg[i].scid_id, rc); + rc1 = rc; + } else { + sys_cache_cfg->scid_cfg[i].activated = false; + } + } + } + + cam_icp_cpas_reset_sys_cache(ctx_data); + return rc1; +} + +static int cam_icp_mgr_dev_clk_update(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, int idx) +{ + int rc = 0; + + if (cam_icp_check_clk_update(hw_mgr, ctx_data, idx)) + rc = cam_icp_update_clk_rate(hw_mgr, ctx_data); + + if (cam_icp_check_bw_update(hw_mgr, ctx_data, idx)) + rc |= cam_icp_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} + +static inline int cam_icp_mgr_get_core_info_mask(enum cam_icp_hw_type hw_dev_type, + uint32_t dev_num, uint32_t *core_info_mask) +{ + if (dev_num > 2) { + CAM_ERR(CAM_ICP, "%u devices core info mask is not supported", dev_num); + return -EINVAL; + } + + switch (hw_dev_type) { + case CAM_ICP_DEV_BPS: + *core_info_mask = ICP_PWR_CLP_BPS; + break; + case CAM_ICP_DEV_IPE: + *core_info_mask = ICP_PWR_CLP_IPE0; + if (dev_num > 1) + *core_info_mask |= ICP_PWR_CLP_IPE1; + break; + case CAM_ICP_DEV_OFE: + *core_info_mask = ICP_PWR_CLP_OFE; + break; + default: + CAM_ERR(CAM_ICP, "Invalid hw device type: %u", + hw_dev_type); + return -EINVAL; + } + + return 0; +} + +static int cam_icp_mgr_device_resume(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_hw_intf *dev_intf = NULL; + struct hfi_cmd_prop *dbg_prop = NULL; + uint32_t core_info_mask = 0, size; + int rc = 0, i; + enum cam_icp_hw_type hw_dev_type; + uint32_t *prop_ref_data; + + hw_dev_type = ctx_data->device_info->hw_dev_type; + dev_info = ctx_data->device_info; + + if (dev_info->dev_ctx_info.dev_ctxt_cnt++) + goto end; + + for (i = 0; i < dev_info->hw_dev_cnt; i++) { + dev_intf = dev_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device intf for %s[%u] is NULL", + dev_info->dev_name, i); + rc = -EINVAL; + goto end; + } + rc = dev_intf->hw_ops.init(dev_intf->hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to resume device %s[%u]", + dev_info->dev_name, i); + } + if (hw_mgr->dev_pc_flag) { + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, + CAM_ICP_DEV_CMD_POWER_RESUME, NULL, 0); + } + } + + rc = cam_icp_mgr_get_core_info_mask(hw_dev_type, dev_info->hw_dev_cnt, &core_info_mask); + if (rc) { + CAM_ERR(CAM_ICP, + "%s Fail to get core info mask for hw dev: %s ctx id: %u rc:%d", + ctx_data->ctx_id_string, dev_info->dev_name, ctx_data->ctx_id, rc); + } + + CAM_DBG(CAM_PERF, "%s core_info 0x%x", + ctx_data->ctx_id_string, core_info_mask); + + size = sizeof(struct hfi_cmd_prop) + sizeof(struct hfi_dev_pc); + dbg_prop = CAM_MEM_ZALLOC(size, GFP_KERNEL); + if (!dbg_prop) { + CAM_ERR(CAM_ICP, "%s Allocate command prop failed", + ctx_data->ctx_id_string); + rc = -ENOMEM; + goto end; + } + + dbg_prop->size = size; + dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + dbg_prop->num_prop = 1; + prop_ref_data = &dbg_prop->prop_data_flex[0]; + + switch (hw_dev_type) { + case CAM_ICP_DEV_IPE: + fallthrough; + case CAM_ICP_DEV_BPS: + prop_ref_data[0] = HFI_PROP_SYS_IPEBPS_PC; + break; + case CAM_ICP_DEV_OFE: + prop_ref_data[0] = HFI_PROP_SYS_OFE_PC; + break; + default: + CAM_ERR(CAM_ICP, "%s Invalid hw dev type: %u", + ctx_data->ctx_id_string, hw_dev_type); + rc = -EINVAL; + goto free_dbg_prop; + } + + prop_ref_data[1] = hw_mgr->dev_pc_flag; + prop_ref_data[2] = core_info_mask; + + hfi_write_cmd(hw_mgr->hfi_handle, dbg_prop); + +free_dbg_prop: + CAM_MEM_FREE(dbg_prop); + +end: + return rc; +} + +static int cam_icp_mgr_dev_power_collapse(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, int dev_type) +{ + int rc = 0, i; + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_hw_intf *dev_intf = NULL; + enum cam_icp_hw_type hw_dev_type; + uint32_t hw_id; + + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Invalid ctx data is NULL"); + return -EINVAL; + } + + dev_info = ctx_data->device_info; + + CAM_DBG(CAM_PERF, "%s: device %s ctx cnt: %u", + ctx_data->ctx_id_string, dev_info->dev_name, + dev_info->dev_ctx_info.dev_ctxt_cnt); + + if (--dev_info->dev_ctx_info.dev_ctxt_cnt) + goto end; + + for (i = 0; i < dev_info->hw_dev_cnt; i++) { + dev_intf = dev_info->dev_intf[i]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "%s Device intf for %s[%u] is NULL", + ctx_data->ctx_id_string, dev_info->dev_name, i); + return -EINVAL; + } + if (hw_mgr->dev_pc_flag && !atomic_read(&hw_mgr->recovery)) { + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, + CAM_ICP_DEV_CMD_POWER_COLLAPSE, NULL, 0); + } + dev_intf->hw_ops.deinit(dev_intf->hw_priv, NULL, 0); + } + + hw_dev_type = dev_info->hw_dev_type; + switch (hw_dev_type) { + case CAM_ICP_DEV_BPS: + hw_id = CAM_HW_ID_BPS; + break; + case CAM_ICP_DEV_IPE: + hw_id = CAM_HW_ID_IPE; + break; + case CAM_ICP_DEV_OFE: + hw_id = CAM_HW_ID_OFE; + break; + default: + CAM_ERR(CAM_ICP, "Invalid hw dev type: %d", hw_dev_type); + return -EINVAL; + } + + rc = cam_vmrm_soc_release_resources(hw_id); + if (rc) { + CAM_ERR(CAM_ICP, "hw id %x vmrm soc release resources failed", hw_id); + return rc; + } + +end: + return rc; +} + +static int cam_icp_mgr_dev_get_gdsc_control( + struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0, i, j; + struct cam_hw_intf *dev_intf; + + if (!hw_mgr->dev_pc_flag) + return rc; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + for (j = 0; j < hw_mgr->dev_info[i].hw_dev_cnt; j++) { + dev_intf = hw_mgr->dev_info[i].dev_intf[j]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device intf for %s[%u] is NULL", + hw_mgr->dev_info[i].dev_name, j); + return -EINVAL; + } + rc = dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, + CAM_ICP_DEV_CMD_POWER_COLLAPSE, NULL, 0); + } + } + + return rc; +} + +static int cam_icp_set_dbg_default_clk(void *data, u64 val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + hw_mgr->icp_debug_clk = val; + return 0; +} + +static int cam_icp_get_dbg_default_clk(void *data, u64 *val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + *val = hw_mgr->icp_debug_clk; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_default_clk, + cam_icp_get_dbg_default_clk, + cam_icp_set_dbg_default_clk, "%16llu"); + +static int cam_icp_set_icp_dbg_lvl(void *data, u64 val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + hw_mgr->icp_dbg_lvl = val; + return 0; +} + +static int cam_icp_get_icp_dbg_lvl(void *data, u64 *val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + *val = hw_mgr->icp_dbg_lvl; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fs, cam_icp_get_icp_dbg_lvl, + cam_icp_set_icp_dbg_lvl, "%08llu"); + +static int cam_icp_set_icp_dbg_type(void *data, u64 val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + if (val <= NUM_HFI_DEBUG_MODE) + hw_mgr->icp_debug_type = val; + + return 0; +} + +static int cam_icp_get_icp_dbg_type(void *data, u64 *val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + *val = hw_mgr->icp_debug_type; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_type_fs, cam_icp_get_icp_dbg_type, + cam_icp_set_icp_dbg_type, "%08llu"); + +static int cam_icp_set_icp_fw_hang_dump_lvl(void *data, u64 val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + if (val < NUM_HFI_DUMP_LVL) + hw_mgr->icp_fw_dump_lvl = val; + + return 0; +} + +static int cam_icp_get_icp_fw_hang_dump_lvl(void *data, u64 *val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + *val = hw_mgr->icp_fw_dump_lvl; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_icp_debug_fw_dump, cam_icp_get_icp_fw_hang_dump_lvl, + cam_icp_set_icp_fw_hang_dump_lvl, "%08llu"); + +static int cam_icp_set_icp_fw_ramdump_lvl(void *data, u64 val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + if (val < NUM_HFI_RAMDUMP_LVLS) + hw_mgr->icp_fw_ramdump_lvl = (uint32_t)val; + return 0; +} + +static int cam_icp_get_icp_fw_ramdump_lvl(void *data, u64 *val) +{ + struct cam_icp_hw_mgr *hw_mgr = data; + + *val = hw_mgr->icp_fw_ramdump_lvl; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_icp_debug_fw_ramdump, cam_icp_get_icp_fw_ramdump_lvl, + cam_icp_set_icp_fw_ramdump_lvl, "%08llu"); + +#ifdef CONFIG_CAM_TEST_ICP_FW_DOWNLOAD +static ssize_t cam_icp_mgr_get_icp_status(struct file *file, char __user *ubuf, size_t size, + loff_t *loff_t) +{ + struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *) file->private_data; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "hw_mgr is NULL"); + return -EINVAL; + } + CAM_INFO(CAM_ICP, "ICP operating in %s mode, %s state, hfi_init is %s, ICP is %s", + ((CAM_IS_SECONDARY_VM()) ? "TVM" : "PVM"), + ((hw_mgr->icp_booted) ? "booted up" : "shutdown"), + ((hw_mgr->hfi_init_done) ? "done" : "not done"), + ((hw_mgr->icp_resumed) ? "ICP resumed" : "ICP in power collapse")); + + return 0; +} + +static ssize_t cam_icp_hw_mgr_fw_load_unload( + struct file *file, const char __user *ubuf, + size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[16]; + struct cam_icp_hw_mgr *hw_mgr = file->private_data; + struct cam_icp_mgr_hw_args power_args = {0}; + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) + return -EFAULT; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "hw_mgr is NULL"); + return -EINVAL; + } + + if (!hw_mgr->icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is invalid", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + power_args.hfi_setup = true; + power_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + power_args.icp_pc = hw_mgr->icp_pc_flag; + + CAM_DBG(CAM_ICP, "%s command:%s", hw_mgr->hw_mgr_name, input_buf); + + if (strcmp(input_buf, "load\n") == 0) { + rc = cam_mem_mgr_init(); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] memmgr init failed rc: %d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + cam_icp_mgr_hw_open(hw_mgr, &power_args); + } else if (strcmp(input_buf, "unload\n") == 0) { + cam_icp_mgr_hw_close(hw_mgr, &power_args); + cam_mem_mgr_deinit(); + } else if (strcmp(input_buf, "resume\n") == 0) { + cam_icp_mgr_icp_resume(hw_mgr, &power_args); + } else if (strcmp(input_buf, "suspend\n") == 0) { + cam_icp_mgr_icp_power_collapse(hw_mgr, &power_args); + } else { +#ifndef CONFIG_ARCH_QTI_VM +#ifndef CONFIG_SPECTRA_VMRM + power_args.hfi_setup = false; + if (strcmp(input_buf, "proxy_load\n") == 0) { + rc = cam_mem_mgr_init(); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] memmgr init failed rc: %d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + cam_icp_mgr_hw_open(hw_mgr, &power_args); + } else if (strcmp(input_buf, "proxy_unload\n") == 0) { + cam_icp_mgr_hw_close(hw_mgr, &power_args); + cam_mem_mgr_deinit(); + } else if (strcmp(input_buf, "proxy_resume\n") == 0) { + cam_icp_mgr_icp_resume(hw_mgr, &power_args); + } else if (strcmp(input_buf, "proxy_suspend\n") == 0) { + cam_icp_mgr_icp_power_collapse(hw_mgr, &power_args); + } else +#endif +#endif + { + CAM_WARN(CAM_ICP, "[%s] Invalid input: %s", + hw_mgr->hw_mgr_name, input_buf); + } + } + +end: + return size; +} + +static const struct file_operations cam_icp_hw_mgr_fw_load_options = { + .owner = THIS_MODULE, + .open = simple_open, + .write = cam_icp_hw_mgr_fw_load_unload, + .read = cam_icp_mgr_get_icp_status, +}; +#endif + +#ifdef CONFIG_CAM_TEST_IRQ_LINE + +static int cam_icp_test_irq_line(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = -EINVAL; + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "ICP device interface is NULL"); + return -EINVAL; + } + + if (icp_dev_intf->hw_ops.test_irq_line) + rc = icp_dev_intf->hw_ops.test_irq_line(icp_dev_intf->hw_priv); + + if (rc) + CAM_ERR(CAM_ICP, "[%s] failed to verify IRQ line", hw_mgr->hw_mgr_name); + else + CAM_INFO(CAM_ICP, "[%s] successfully verified IRQ line", hw_mgr->hw_mgr_name); + + return 0; +} + +#else + +static int cam_icp_test_irq_line(struct cam_icp_hw_mgr *hw_mgr) +{ + CAM_ERR(CAM_ICP, "[%s] IRQ line verification disabled", hw_mgr->hw_mgr_name); + return -EPERM; +} + +#endif + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + +static int cam_icp_test_irq_line_at_probe(struct cam_icp_hw_mgr *hw_mgr) +{ + return cam_icp_test_irq_line(hw_mgr); +} + +#else + +static int cam_icp_test_irq_line_at_probe(struct cam_icp_hw_mgr *hw_mgr) +{ + return 0; +} + +#endif + +static int cam_icp_set_irq_line_test(void *data, u64 val) +{ + cam_icp_test_irq_line(data); + return 0; +} + +static int cam_icp_get_irq_line_test(void *data, u64 *val) +{ + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_icp_irq_line_test, cam_icp_get_irq_line_test, + cam_icp_set_irq_line_test, "%08llu"); + +static int cam_icp_hw_mgr_create_debugfs_entry(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir(hw_mgr->hw_mgr_name, &dbgfileptr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] DebugFS could not create directory", hw_mgr->hw_mgr_name); + rc = -ENOENT; + goto end; + } + + /* Store parent inode for cleanup in caller */ + hw_mgr->dentry = dbgfileptr; + + debugfs_create_bool("icp_pc", 0644, hw_mgr->dentry, + &hw_mgr->icp_pc_flag); + + debugfs_create_bool("dev_interframe_pc", 0644, hw_mgr->dentry, + &hw_mgr->dev_pc_flag); + + debugfs_create_file("icp_debug_clk", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_debug_default_clk); + + debugfs_create_bool("icp_jtag_debug", 0644, + hw_mgr->dentry, &hw_mgr->icp_jtag_debug); + + debugfs_create_file("icp_debug_type", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_debug_type_fs); + + debugfs_create_file("icp_debug_lvl", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_debug_fs); + + debugfs_create_file("icp_fw_dump_lvl", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_debug_fw_dump); + + debugfs_create_file("icp_fw_ramdump_lvl", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_debug_fw_ramdump); + + debugfs_create_bool("disable_ubwc_comp", 0644, + hw_mgr->dentry, &hw_mgr->disable_ubwc_comp); + +#ifdef CONFIG_CAM_TEST_ICP_FW_DOWNLOAD + debugfs_create_file("icp_fw_load_unload", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_hw_mgr_fw_load_options); +#endif + debugfs_create_file("test_irq_line", 0644, + hw_mgr->dentry, hw_mgr, &cam_icp_irq_line_test); + +end: + + /* Set default hang dump lvl */ + hw_mgr->icp_fw_dump_lvl = HFI_FW_DUMP_ON_FAILURE; + + /* By default, FW ramdump is disabled on both ICPs to avoid potential perf issue */ + hw_mgr->icp_fw_ramdump_lvl = HFI_FW_RAMDUMP_DISABLED; + + return rc; +} + +static int cam_icp_mgr_process_cmd(void *priv, void *data) +{ + int rc; + struct hfi_cmd_work_data *task_data = NULL; + struct cam_icp_hw_mgr *hw_mgr; + + if (!data || !priv) { + CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv); + return -EINVAL; + } + + hw_mgr = priv; + + /* Block any HFI writes if SSR is in progress */ + if (atomic_read(&hw_mgr->recovery)) + return -EAGAIN; + + task_data = (struct hfi_cmd_work_data *)data; + + rc = hfi_write_cmd(hw_mgr->hfi_handle, task_data->data); + + return rc; +} + +static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + struct hfi_frame_process_info *hfi_frame_process; + struct cam_icp_hw_buf_done_evt_data icp_evt_data; + struct cam_hw_done_event_data buf_data = {0}; + + hfi_frame_process = &ctx_data->hfi_frame_process; + for (i = 0; i < CAM_FRAME_CMD_MAX; i++) { + if (!hfi_frame_process->request_id[i]) + continue; + buf_data.request_id = hfi_frame_process->request_id[i]; + icp_evt_data.evt_id = CAM_CTX_EVT_ID_ERROR; + icp_evt_data.buf_done_data = &buf_data; + ctx_data->ctxt_event_cb(ctx_data->context_priv, + CAM_ICP_EVT_ID_BUF_DONE, &icp_evt_data); + hfi_frame_process->request_id[i] = 0; + if (ctx_data->hfi_frame_process.in_resource[i] > 0) { + CAM_DBG(CAM_ICP, "%s: Delete merged sync in object: %d", + ctx_data->ctx_id_string, + ctx_data->hfi_frame_process.in_resource[i]); + cam_sync_destroy( + ctx_data->hfi_frame_process.in_resource[i]); + ctx_data->hfi_frame_process.in_resource[i] = 0; + } + hfi_frame_process->fw_process_flag[i] = false; + clear_bit(i, ctx_data->hfi_frame_process.bitmap); + } + + for (i = 0; i < CAM_FRAME_CMD_MAX; i++) { + if (!hfi_frame_process->in_free_resource[i]) + continue; + + CAM_DBG(CAM_ICP, "%s: Delete merged sync in object: %d", + ctx_data->ctx_id_string, + ctx_data->hfi_frame_process.in_free_resource[i]); + cam_sync_destroy( + ctx_data->hfi_frame_process.in_free_resource[i]); + ctx_data->hfi_frame_process.in_free_resource[i] = 0; + } + + ctx_data->abort_timed_out = false; + return 0; +} + +static uint32_t cam_icp_handle_err_type_to_evt_param(uint32_t error_type) +{ + switch (error_type) { + case CAMERAICP_ENOMEMORY: + return CAM_SYNC_ICP_EVENT_NO_MEMORY; + case CAMERAICP_EFAILED: + return CAM_SYNC_ICP_EVENT_FRAME_PROCESS_FAILURE; + case CAMERAICP_EBADSTATE: + return CAM_SYNC_ICP_EVENT_BAD_STATE; + case CAMERAICP_EBADPARM: + return CAM_SYNC_ICP_EVENT_BAD_PARAM; + case CAMERAICP_EBADITEM: + return CAM_SYNC_ICP_EVENT_BAD_ITEM; + case CAMERAICP_EINVALIDFORMAT: + return CAM_SYNC_ICP_EVENT_INVALID_FORMAT; + case CAMERAICP_EUNSUPPORTED: + return CAM_SYNC_ICP_EVENT_UNSUPPORTED; + case CAMERAICP_EOUTOFBOUND: + return CAM_SYNC_ICP_EVENT_OUT_OF_BOUND; + case CAMERAICP_ETIMEDOUT: + return CAM_SYNC_ICP_EVENT_TIME_OUT; + case CAMERAICP_EABORTED: + return CAM_SYNC_ICP_EVENT_ABORTED; + case CAMERAICP_EHWVIOLATION: + return CAM_SYNC_ICP_EVENT_HW_VIOLATION; + case CAMERAICP_ECDMERROR: + return CAM_SYNC_ICP_EVENT_CMD_ERROR; + case CAMERAICP_HFI_ERR_COMMAND_SIZE: + return CAM_SYNC_ICP_EVENT_HFI_ERR_COMMAND_SIZE; + case CAMERAICP_HFI_ERR_MESSAGE_SIZE: + return CAM_SYNC_ICP_EVENT_HFI_ERR_MESSAGE_SIZE; + case CAMERAICP_HFI_QUEUE_EMPTY: + return CAM_SYNC_ICP_EVENT_HFI_ERR_QUEUE_EMPTY; + case CAMERAICP_HFI_QUEUE_FULL: + return CAM_SYNC_ICP_EVENT_HFI_ERR_QUEUE_FULL; + default: + return CAM_SYNC_ICP_EVENT_UNKNOWN; + } +} + +static const char *cam_icp_error_handle_id_to_type( + uint32_t error_handle) +{ + const char *name = NULL; + + switch (error_handle) { + case CAMERAICP_SUCCESS: + name = "SUCCESS"; + break; + case CAMERAICP_EFAILED: + name = "EFAILED"; + break; + case CAMERAICP_ENOMEMORY: + name = "ENOMEMORY"; + break; + case CAMERAICP_EBADSTATE: + name = "EBADSTATE"; + break; + case CAMERAICP_EBADPARM: + name = "EBADPARM"; + break; + case CAMERAICP_EBADITEM: + name = "EBADITEM"; + break; + case CAMERAICP_EINVALIDFORMAT: + name = "EINVALIDFORMAT"; + break; + case CAMERAICP_EUNSUPPORTED: + name = "EUNSUPPORTED"; + break; + case CAMERAICP_EOUTOFBOUND: + name = "EOUTOFBOUND"; + break; + case CAMERAICP_ETIMEDOUT: + name = "ETIMEDOUT"; + break; + case CAMERAICP_EABORTED: + name = "EABORTED"; + break; + case CAMERAICP_EHWVIOLATION: + name = "EHWVIOLATION"; + break; + case CAMERAICP_ECDMERROR: + name = "ECDMERROR"; + break; + case CAMERAICP_HFI_ERR_COMMAND_SIZE: + name = "HFI_ERR_COMMAND_SIZE"; + break; + case CAMERAICP_HFI_ERR_MESSAGE_SIZE: + name = "HFI_ERR_MESSAGE_SIZE"; + break; + case CAMERAICP_HFI_QUEUE_EMPTY: + name = "HFI_QUEUE_EMPTY"; + break; + case CAMERAICP_HFI_QUEUE_FULL: + name = "HFI_QUEUE_FULL"; + break; + default: + name = NULL; + break; + } + return name; +} + +static inline void cam_icp_mgr_apply_evt_injection(struct cam_hw_done_event_data *buf_done_data, + struct cam_icp_hw_ctx_data *ctx_data, bool *signal_fence_buffer) +{ + struct cam_hw_inject_evt_param *evt_params = &ctx_data->evt_inject_params; + struct cam_common_evt_inject_data inject_evt; + + inject_evt.buf_done_data = buf_done_data; + inject_evt.evt_params = evt_params; + + if (ctx_data->ctxt_event_cb) + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_ICP_EVT_ID_INJECT_EVENT, + &inject_evt); + + if (ctx_data->evt_inject_params.inject_id == + CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) + *signal_fence_buffer = false; + + evt_params->is_valid = false; +} + +static void cam_icp_mgr_dump_active_req_info(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_icp_hw_ctx_data *ctx_data = NULL; + char log_info[256]; + size_t buf_size, len; + uint32_t total_active_streams = 0, total_active_requests = 0; + int j; + + buf_size = sizeof(log_info); + + mutex_lock(&hw_mgr->hw_mgr_mutex); + list_for_each_entry(ctx_data, &hw_mgr->active_ctx_info.active_ctx_list, list) { + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + continue; + } + + len = 0; + for (j = 0; j < CAM_FRAME_CMD_MAX; j++) { + if (!ctx_data->hfi_frame_process.request_id[j]) + continue; + + len += scnprintf(log_info + len, (buf_size - len), " %llu", + ctx_data->hfi_frame_process.request_id[j]); + total_active_requests++; + } + total_active_streams++; + CAM_INFO(CAM_ICP, "%s: Active Requests IDs: %s", + ctx_data->ctx_id_string, len ? log_info : " None"); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + } + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_INFO(CAM_ICP, "%s: Total Active streams: %u, Total active requests: %u", + hw_mgr->hw_mgr_name, total_active_streams, total_active_requests); +} + +static void cam_icp_mgr_compute_fw_avg_response_time(struct cam_icp_hw_ctx_data *ctx_data, + uint32_t request_idx) +{ + struct cam_icp_ctx_perf_stats *perf_stats; + uint64_t delta; + + delta = ktime_ms_delta(ktime_get(), + ctx_data->hfi_frame_process.submit_timestamp[request_idx]); + + perf_stats = &ctx_data->perf_stats; + perf_stats->total_resp_time += delta; + perf_stats->total_requests++; + + CAM_DBG(CAM_PERF, + "%s: Avg response time: current_req: %llu total_processed_requests: %llu avg_time: %llums", + ctx_data->ctx_id_string, + ctx_data->hfi_frame_process.request_id[request_idx], perf_stats->total_requests, + (perf_stats->total_resp_time / perf_stats->total_requests)); +} + +static int cam_icp_mgr_handle_frame_process( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr, int flag) +{ + int i, rc = 0, ctx_id; + uint32_t idx, event_id; + uint64_t request_id; + struct cam_icp_hw_ctx_info *ctx_info; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct hfi_msg_dev_async_ack *ioconfig_ack = NULL; + struct hfi_frame_process_info *hfi_frame_process; + struct cam_hw_done_event_data buf_data = {0}; + struct cam_icp_hw_buf_done_evt_data icp_done_evt; + struct cam_icp_hw_error_evt_data icp_err_evt = {0}; + struct cam_hangdump_mem_regions *mem_regions = NULL; + bool signal_fence_buffer = true; + + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + request_id = ioconfig_ack->user_data2; + ctx_info = (struct cam_icp_hw_ctx_info *) + U64_TO_PTR(ioconfig_ack->user_data1); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Invalid Context req %llu", request_id); + return -EINVAL; + } + + ctx_data = ctx_info->ctx_data; + ctx_id = ctx_info->ctx_id; + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + if (!test_bit(ctx_info->ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_id); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + goto end; + } + + cam_icp_ctx_timer_reset(ctx_data); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_ICP, "%s: is in %d state", + ctx_data->ctx_id_string, ctx_data->state); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + goto end; + } + + CAM_DBG(CAM_REQ, + "%s: request_id: %lld", + ctx_data->ctx_id_string, request_id); + + cam_icp_device_timer_reset(hw_mgr, ctx_data->device_info); + + hfi_frame_process = &ctx_data->hfi_frame_process; + + idx = cam_icp_get_frame_process_idx_from_req_id(ctx_data, request_id); + if (idx >= CAM_FRAME_CMD_MAX) { + CAM_ERR(CAM_ICP, "%s: pkt not found for req_id =%lld", + ctx_data->ctx_id_string, request_id); + rc = -EINVAL; + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + goto end; + } + + cam_icp_mgr_compute_fw_avg_response_time(ctx_data, idx); + + if (flag == ICP_FRAME_PROCESS_FAILURE) { + if (ioconfig_ack->err_type == CAMERAICP_EABORTED) { + CAM_WARN(CAM_ICP, + "%s: req %llu has been aborted[flushed]", + ctx_data->ctx_id_string, request_id); + event_id = CAM_CTX_EVT_ID_CANCEL; + } else { + CAM_ERR(CAM_ICP, + "%s: Done with error: %u err_type= [%s] for req %llu", + ctx_data->ctx_id_string, ioconfig_ack->err_type, + cam_icp_error_handle_id_to_type(ioconfig_ack->err_type), + request_id); + event_id = CAM_CTX_EVT_ID_ERROR; + } + buf_data.evt_param = cam_icp_handle_err_type_to_evt_param(ioconfig_ack->err_type); + } else { + event_id = CAM_CTX_EVT_ID_SUCCESS; + } + + if (cam_presil_mode_enabled()) { + mem_regions = &hfi_frame_process->hangdump_mem_regions[idx]; + CAM_INFO(CAM_ICP, "Hangdump Num Regions %d", + mem_regions->num_mem_regions); + for (i = 0; i < mem_regions->num_mem_regions; i++) { + CAM_INFO(CAM_PRESIL, "Hangdump Mem %d handle 0x%08x offset 0x%08x len %u", + i, mem_regions->mem_info_array[i].mem_handle, + mem_regions->mem_info_array[i].offset, + mem_regions->mem_info_array[i].size); + cam_mem_mgr_retrieve_buffer_from_presil( + mem_regions->mem_info_array[i].mem_handle, + mem_regions->mem_info_array[i].size, + mem_regions->mem_info_array[i].offset, + hw_mgr->iommu_hdl); + } + } + + CAM_TRACE(CAM_ICP, + "%s: BufDone Req %llu event_id %d", + ctx_data->ctx_id_string, hfi_frame_process->request_id[idx], event_id); + + buf_data.request_id = hfi_frame_process->request_id[idx]; + if ((ctx_data->evt_inject_params.is_valid) && + (ctx_data->evt_inject_params.req_id == request_id)) + cam_icp_mgr_apply_evt_injection(&buf_data, ctx_data, &signal_fence_buffer); + + if (signal_fence_buffer) { + icp_done_evt.evt_id = event_id; + icp_done_evt.buf_done_data = &buf_data; + if (ctx_data->ctxt_event_cb) + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_ICP_EVT_ID_BUF_DONE, + &icp_done_evt); + } + + hfi_frame_process->request_id[idx] = 0; + if (ctx_data->hfi_frame_process.in_resource[idx] > 0) { + CAM_DBG(CAM_ICP, "%s: Delete merged sync in object: %d", + ctx_data->ctx_id_string, + ctx_data->hfi_frame_process.in_resource[idx]); + cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]); + ctx_data->hfi_frame_process.in_resource[idx] = 0; + } + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + hfi_frame_process->fw_process_flag[idx] = false; + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + + /* report recovery to userspace if FW encounters no memory */ + if (ioconfig_ack->err_type == CAMERAICP_ENOMEMORY) { + cam_icp_mgr_dump_active_req_info(hw_mgr); + + icp_err_evt.err_type = CAM_ICP_HW_ERROR_NO_MEM; + icp_err_evt.req_id = request_id; + + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + if (ctx_data->ctxt_event_cb) + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_ICP_EVT_ID_ERROR, + &icp_err_evt); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + } + + if (cam_presil_mode_enabled()) { + if (!atomic_read(&hw_mgr->frame_in_process)) { + CAM_ERR(CAM_PRESIL, "%s: presil: frame_in_process not set", + ctx_data->ctx_id_string); + } else { + hw_mgr->frame_in_process_ctx_id = -1; + atomic_set(&hw_mgr->frame_in_process, 0); + up_write(&frame_in_process_sem); + CAM_DBG(CAM_PRESIL, "%s: presil: unlocked frame_in_process", + ctx_data->ctx_id_string); + } + } + +end: + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_process_msg_frame_process( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct hfi_msg_dev_async_ack *ioconfig_ack = NULL; + struct hfi_msg_frame_process_done *frame_done; + int rc = 0, flag = ICP_FRAME_PROCESS_SUCCESS; + + if (!msg_ptr) { + CAM_ERR(CAM_ICP, "msg ptr is NULL"); + return -EINVAL; + } + + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + if (ioconfig_ack->err_type != CAMERAICP_SUCCESS) { + flag = ICP_FRAME_PROCESS_FAILURE; + if (ioconfig_ack->err_type == CAMERAICP_EABORTED) + rc = 0; + else + rc = -EIO; + goto end; + } + + frame_done = + (struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data_flex; + if (!frame_done) { + flag = ICP_FRAME_PROCESS_FAILURE; + rc = -EINVAL; + goto end; + } + + if (frame_done->result) + flag = ICP_FRAME_PROCESS_FAILURE; + +end: + cam_icp_mgr_handle_frame_process(hw_mgr, msg_ptr, flag); + return rc; +} + +static int cam_icp_mgr_process_msg_config_io( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_hw_ctx_info *ctx_info; + struct hfi_msg_dev_async_ack *ioconfig_ack; + int rc = 0, ctx_id; + + if (!msg_ptr) { + CAM_ERR(CAM_ICP, "msg ptr is NULL"); + return -EINVAL; + } + + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + + ctx_info = (struct cam_icp_hw_ctx_info *) + U64_TO_PTR(ioconfig_ack->user_data1); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "wrong ctx info from IPE/BPS config io response"); + return -EINVAL; + } + + ctx_id = ctx_info->ctx_id; + if (ctx_info->need_lock) + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + + if (!test_bit(ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_id); + goto end; + } + + if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) { + struct hfi_msg_ipe_config *ipe_config_ack = NULL; + + ipe_config_ack = + (struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data_flex); + if (ipe_config_ack->rc) { + CAM_ERR(CAM_ICP, "rc = %d failed with\n" + "err_no = [%u] err_type = [%s]", + ipe_config_ack->rc, + ioconfig_ack->err_type, + cam_icp_error_handle_id_to_type( + ioconfig_ack->err_type)); + rc = -EIO; + goto end; + } + + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "wrong ctx data from IPE config io response"); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ICP, "%s: received IPE config io response", + ctx_data->ctx_id_string); + ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size; + } else if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO) { + struct hfi_msg_bps_config *bps_config_ack = NULL; + + bps_config_ack = + (struct hfi_msg_bps_config *)(ioconfig_ack->msg_data_flex); + if (bps_config_ack->rc) { + CAM_ERR(CAM_ICP, "rc : %u, opcode :%u", + bps_config_ack->rc, ioconfig_ack->opcode); + rc = -EIO; + goto end; + } + + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "wrong ctx data from BPS config io response"); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ICP, "%s: received BPS config io response", + ctx_data->ctx_id_string); + } else { + CAM_ERR(CAM_ICP, "Invalid OPCODE: %u", ioconfig_ack->opcode); + rc = -EINVAL; + goto end; + } + + complete(&ctx_data->wait_complete); +end: + if (ctx_info->need_lock) + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_process_msg_create_handle( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct hfi_msg_create_handle_ack *create_handle_ack = NULL; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + int rc = 0; + struct cam_icp_hw_ctx_info *ctx_info; + + create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr; + if (!create_handle_ack) { + CAM_ERR(CAM_ICP, "Invalid create_handle_ack"); + return -EINVAL; + } + + ctx_info = (struct cam_icp_hw_ctx_info *)(uintptr_t) + create_handle_ack->user_data1; + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Invalid ctx_info"); + return -EINVAL; + } + + if (!test_bit(ctx_info->ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_info->ctx_id); + goto end; + } + + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Invalid ctx_data, ctx_id: %d", ctx_info->ctx_id); + rc = -EINVAL; + goto end; + } + + if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) { + ctx_data->fw_handle = create_handle_ack->fw_handle; + CAM_DBG(CAM_ICP, "%s: fw_handle = %x", + ctx_data->ctx_id_string, ctx_data->fw_handle); + } else { + CAM_WARN(CAM_ICP, + "%s: This ctx is no longer in use current state: %d", + ctx_data->ctx_id_string, ctx_data->state); + ctx_data->fw_handle = 0; + rc = -EPERM; + } + complete(&ctx_data->wait_complete); + +end: + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_process_msg_ping_ack( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct hfi_msg_ping_ack *ping_ack = NULL; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_icp_hw_ctx_info *ctx_info; + int rc = 0; + + ping_ack = (struct hfi_msg_ping_ack *)msg_ptr; + if (!ping_ack) { + CAM_ERR(CAM_ICP, "Empty ping ack message"); + return -EINVAL; + } + + ctx_info = (struct cam_icp_hw_ctx_info *) + U64_TO_PTR(ping_ack->user_data); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Invalid ctx_info"); + return -EINVAL; + } + + if (!test_bit(ctx_info->ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_info->ctx_id); + goto end; + } + + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Invalid ctx_data, ctx_id: %d", ctx_info->ctx_id); + rc = -EINVAL; + goto end; + } + + if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) + complete(&ctx_data->wait_complete); + +end: + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_process_ipebps_indirect_ack_msg( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + int rc = 0; + + switch (msg_ptr[ICP_PACKET_OPCODE]) { + case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO: + case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO: + CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:"); + rc = cam_icp_mgr_process_msg_config_io(hw_mgr, msg_ptr); + if (rc) + return rc; + break; + case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS: + case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS: + CAM_DBG(CAM_ICP, "received IPE/BPS_FRAME_PROCESS:"); + rc = cam_icp_mgr_process_msg_frame_process(hw_mgr, msg_ptr); + if (rc) + return rc; + break; + default: + CAM_ERR(CAM_ICP, "Invalid opcode : %u", + msg_ptr[ICP_PACKET_OPCODE]); + return -EINVAL; + } + + return rc; +} + +static inline int cam_icp_mgr_process_msg_ofe_config_io( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct hfi_msg_dev_async_ack *ioconfig_ack = + (struct hfi_msg_dev_async_ack *)msg_ptr; + struct hfi_msg_ofe_config *ofe_config_ack = + (struct hfi_msg_ofe_config *)(ioconfig_ack->msg_data_flex); + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_hw_ctx_info *ctx_info; + int rc = 0, ctx_id; + + ctx_info = (struct cam_icp_hw_ctx_info *) + U64_TO_PTR(ioconfig_ack->user_data1); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "wrong ctx info from OFE config io response"); + return -EINVAL; + } + + ctx_id = ctx_info->ctx_id; + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "wrong ctx data from OFE config io response, ctx_id: %u", + ctx_id); + return -EINVAL; + } + + if (ctx_info->need_lock) + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + + if (!test_bit(ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_id); + goto end; + } + + if (ofe_config_ack->rc) { + CAM_ERR(CAM_ICP, "rc : %u, error type: %u error: [%s] opcode :%u", + ofe_config_ack->rc, ioconfig_ack->err_type, + cam_icp_error_handle_id_to_type(ioconfig_ack->err_type), + ioconfig_ack->opcode); + rc = -EIO; + goto end; + } + + CAM_DBG(CAM_ICP, "%s: received OFE config io response", + ctx_data->ctx_id_string); + complete(&ctx_data->wait_complete); + +end: + if (ctx_info->need_lock) + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_process_ofe_direct_ack_msg( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + int rc = 0; + uint32_t ctx_id; + + struct hfi_msg_dev_async_ack *ioconfig_ack = NULL; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + + ctx_id = (uint32_t)ioconfig_ack->user_data2; + if (!test_bit(ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_id); + rc = -EFAULT; + goto end; + } + + ctx_data = U64_TO_PTR(ioconfig_ack->user_data1); + + switch (msg_ptr[ICP_PACKET_OPCODE]) { + case HFI_OFE_CMD_OPCODE_ABORT: { + if (cam_presil_mode_enabled()) { + if (atomic_read(&hw_mgr->frame_in_process)) { + if (hw_mgr->frame_in_process_ctx_id == ctx_data->ctx_id) { + CAM_DBG(CAM_PRESIL, "presil: frame process abort ctx %d", + hw_mgr->frame_in_process_ctx_id); + hw_mgr->frame_in_process_ctx_id = -1; + atomic_set(&hw_mgr->frame_in_process, 0); + up_write(&frame_in_process_sem); + } else { + CAM_WARN(CAM_PRESIL, "presil: abort mismatch %d %d", + hw_mgr->frame_in_process_ctx_id, + ctx_data->ctx_id); + } + } + } + + CAM_DBG(CAM_ICP, "received OFE Abort done msg ctx_state: %u", + ctx_data->state); + complete(&ctx_data->wait_complete); + break; + } + case HFI_OFE_CMD_OPCODE_DESTROY: { + CAM_DBG(CAM_ICP, "received OFE destroy done msg: %u", ctx_data->state); + if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) || + (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) + complete(&ctx_data->wait_complete); + break; + } + default: + CAM_ERR(CAM_ICP, "Invalid opcode : %u", + msg_ptr[ICP_PACKET_OPCODE]); + return -EINVAL; + } + +end: + return rc; +} + +static int cam_icp_mgr_process_ofe_indirect_ack_msg( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + int rc = 0; + + switch (msg_ptr[ICP_PACKET_OPCODE]) { + case HFI_OFE_CMD_OPCODE_CONFIG_IO: { + rc = cam_icp_mgr_process_msg_ofe_config_io(hw_mgr, msg_ptr); + if (rc) + return rc; + break; + } + case HFI_OFE_CMD_OPCODE_FRAME_PROCESS: + CAM_DBG(CAM_ICP, "received OFE_FRAME_PROCESS:"); + rc = cam_icp_mgr_process_msg_frame_process(hw_mgr, msg_ptr); + if (rc) + return rc; + break; + default: + CAM_ERR(CAM_ICP, "Invalid opcode : %u", + msg_ptr[ICP_PACKET_OPCODE]); + return -EINVAL; + } + + return rc; +} + +static int cam_icp_mgr_process_direct_ack_msg( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct hfi_msg_dev_async_ack *ioconfig_ack = NULL; + int rc = 0; + + if (!msg_ptr) + return -EINVAL; + + switch (msg_ptr[ICP_PACKET_OPCODE]) { + case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT: + case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT: + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + ctx_data = (struct cam_icp_hw_ctx_data *) + U64_TO_PTR(ioconfig_ack->user_data1); + if (cam_presil_mode_enabled()) { + if (atomic_read(&hw_mgr->frame_in_process)) { + if (hw_mgr->frame_in_process_ctx_id == ctx_data->ctx_id) { + CAM_DBG(CAM_PRESIL, "%s: presil: frame process abort", + ctx_data->ctx_id_string); + hw_mgr->frame_in_process_ctx_id = -1; + atomic_set(&hw_mgr->frame_in_process, 0); + up_write(&frame_in_process_sem); + } else { + CAM_WARN(CAM_PRESIL, + "%s: presil: abort mismatch frame_in_process_ctx_id: %d", + ctx_data->ctx_id_string, + hw_mgr->frame_in_process_ctx_id); + } + } + } + + CAM_DBG(CAM_ICP, "%s: received ABORT: ctx_state =%d", + ctx_data->ctx_id_string, ctx_data->state); + complete(&ctx_data->wait_complete); + break; + case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY: + case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY: + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + ctx_data = (struct cam_icp_hw_ctx_data *) + U64_TO_PTR(ioconfig_ack->user_data1); + CAM_DBG(CAM_ICP, "%s: received DESTROY: ctx_state =%d", + ctx_data->ctx_id_string, ctx_data->state); + if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) || + (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) { + complete(&ctx_data->wait_complete); + } + break; + case HFI_IPEBPS_CMD_OPCODE_MEM_MAP: + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + ctx_data = + (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1; + + CAM_DBG(CAM_ICP, "%s: received\n" + "MAP ACK:ctx_state =%d\n" + "failed with err_no = [%u] err_type = [%s]", + ctx_data->ctx_id_string, ctx_data->state, ioconfig_ack->err_type, + cam_icp_error_handle_id_to_type(ioconfig_ack->err_type)); + complete(&ctx_data->wait_complete); + break; + case HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP: + ioconfig_ack = (struct hfi_msg_dev_async_ack *)msg_ptr; + ctx_data = + (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1; + + CAM_DBG(CAM_ICP, + "%s: received UNMAP ACK:ctx_state =%d\n" + "failed with err_no = [%u] err_type = [%s]", + ctx_data->ctx_id_string, ctx_data->state, ioconfig_ack->err_type, + cam_icp_error_handle_id_to_type(ioconfig_ack->err_type)); + complete(&ctx_data->wait_complete); + break; + default: + CAM_ERR(CAM_ICP, "Invalid opcode : %u", + msg_ptr[ICP_PACKET_OPCODE]); + rc = -EINVAL; + } + return rc; +} + +static int cam_icp_mgr_trigger_recovery(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0; + bool found_active = false; + struct sfr_buf *sfr_buffer = NULL; + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_hw_error_evt_data icp_err_evt = {0}; + + CAM_DBG(CAM_ICP, "[%s] Enter", hw_mgr->hw_mgr_name); + + if (atomic_read(&hw_mgr->recovery)) { + CAM_ERR(CAM_ICP, "%s SSR is set", hw_mgr->hw_mgr_name); + return rc; + } + + atomic_set(&hw_mgr->recovery, 1); + cam_icp_mgr_dev_get_gdsc_control(hw_mgr); + + sfr_buffer = (struct sfr_buf *)hw_mgr->hfi_mem.sfr_buf.kva; + CAM_WARN(CAM_ICP, "[%s] hw_mgr[%u] SFR:%s", hw_mgr->hw_mgr_name, + hw_mgr->hw_mgr_id, sfr_buffer->msg); + + cam_icp_dump_debug_info(hw_mgr, false); + cam_icp_mgr_dump_active_req_info(hw_mgr); + + /* Find any active context and notify userspace of system failure */ + mutex_lock(&hw_mgr->hw_mgr_mutex); + list_for_each_entry(ctx_data, &hw_mgr->active_ctx_info.active_ctx_list, list) { + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state != CAM_CTX_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + continue; + } + + icp_err_evt.err_type = CAM_ICP_HW_ERROR_SYSTEM_FAILURE; + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_ICP_EVT_ID_ERROR, + &icp_err_evt); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + found_active = true; + break; + } + if (!found_active) + CAM_ERR(CAM_ICP, + "[%s] Fail to report system failure to userspace due to no active ctx", + hw_mgr->hw_mgr_name); + + /* + * Restart only if ICP has been booted up successfully + * If the cold boot is failing, retrying loading is futile + */ + if (!atomic_read(&hw_mgr->load_in_process) && + atomic_read(&hw_mgr->recovery)) { + rc = cam_icp_mgr_restart_icp(hw_mgr); + if (!rc) + atomic_set(&hw_mgr->recovery, 0); + + CAM_DBG(CAM_ICP, "[%s] recovery success: %s", + hw_mgr->hw_mgr_name, + CAM_BOOL_TO_YESNO(!atomic_read(&hw_mgr->recovery))); + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_icp_mgr_process_fatal_error( + struct cam_icp_hw_mgr *hw_mgr, uint32_t *msg_ptr) +{ + struct hfi_msg_event_notify *event_notify; + int rc = 0; + + CAM_DBG(CAM_ICP, "%s: Enter", hw_mgr->hw_mgr_name); + + event_notify = (struct hfi_msg_event_notify *)msg_ptr; + if (!event_notify) { + CAM_ERR(CAM_ICP, "[%s] Empty event message", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "[%s] evt_id: %u evt_data1: %u evt_data2: %u", + hw_mgr->hw_mgr_name, event_notify->event_id, event_notify->event_data1, + event_notify->event_data2); + + if (event_notify->event_id == HFI_EVENT_SYS_ERROR) { + CAM_INFO(CAM_ICP, "[%s] received HFI_EVENT_SYS_ERROR", + hw_mgr->hw_mgr_name); + if (event_notify->event_data1 == HFI_ERR_SYS_FATAL) { + CAM_ERR(CAM_ICP, "[%s] received HFI_ERR_SYS_FATAL", + hw_mgr->hw_mgr_name); + BUG(); + } + rc = cam_icp_mgr_trigger_recovery(hw_mgr); + } + + return rc; +} + +static void cam_icp_mgr_process_dbg_buf(struct cam_icp_hw_mgr *hw_mgr) +{ + uint32_t *msg_ptr = NULL, *pkt_ptr = NULL; + struct hfi_msg_debug *dbg_msg = NULL; + uint32_t read_in_words = 0; + uint32_t remain_len, pre_remain_len = 0; + uint32_t buf_word_size = ICP_DBG_BUF_SIZE_IN_WORDS; + uint32_t pre_buf_word_size = 0; + uint64_t timestamp = 0; + char *msg_data; + int rc = 0; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Invalid data"); + return; + } + + do { + rc = hfi_read_message(hw_mgr->hfi_handle, + hw_mgr->dbg_buf + (pre_remain_len >> BYTE_WORD_SHIFT), + Q_DBG, buf_word_size, &read_in_words); + if (rc) + break; + + remain_len = pre_remain_len + (read_in_words << BYTE_WORD_SHIFT); + pre_remain_len = 0; + pre_buf_word_size = buf_word_size; + msg_ptr = (uint32_t *)hw_mgr->dbg_buf; + buf_word_size = ICP_DBG_BUF_SIZE_IN_WORDS; + + while (remain_len) { + pkt_ptr = msg_ptr; + + if (remain_len >= (ICP_DBG_BUF_SIZE_IN_WORDS << BYTE_WORD_SHIFT) || + (pkt_ptr >= hw_mgr->dbg_buf + ICP_DBG_BUF_SIZE_IN_WORDS) || + (pkt_ptr[ICP_PACKET_TYPE] != HFI_MSG_SYS_DEBUG)) { + CAM_WARN(CAM_ICP, + "Error message: remain_len:%u, dbg_buf:%p pkt_ptr:%p pkt_size:%u pkt_type:0x%x read_in_words:%d", + remain_len, hw_mgr->dbg_buf, pkt_ptr, + pkt_ptr[ICP_PACKET_SIZE], pkt_ptr[ICP_PACKET_TYPE], + read_in_words); + return; + } + + if (remain_len < pkt_ptr[ICP_PACKET_SIZE]) { + /* + * MSG is broken into two parts, need to read from dbg q again + * to complete the msg and get the remaining packets. Copy + * the remain data to start of buffer and shift buffer ptr to + * after the remaining data ends to read from queue. + */ + memcpy(hw_mgr->dbg_buf, msg_ptr, remain_len); + pre_remain_len = remain_len; + buf_word_size -= (pre_remain_len >> BYTE_WORD_SHIFT); + break; + } + + if (pkt_ptr[ICP_PACKET_TYPE] == HFI_MSG_SYS_DEBUG) { + dbg_msg = (struct hfi_msg_debug *)pkt_ptr; + msg_data = (char *)&dbg_msg->msg_data_flex; + timestamp = ((((uint64_t)(dbg_msg->timestamp_hi) << 32) + | dbg_msg->timestamp_lo) >> 16); + trace_cam_icp_fw_dbg(msg_data, timestamp/2, + hw_mgr->hw_mgr_name); + if (!hw_mgr->icp_dbg_lvl) + CAM_INFO(CAM_ICP, "[%s]: FW_DBG:%s", + hw_mgr->hw_mgr_name, msg_data); + } + + remain_len -= pkt_ptr[ICP_PACKET_SIZE]; + if (remain_len > 0) + msg_ptr += (pkt_ptr[ICP_PACKET_SIZE] >> BYTE_WORD_SHIFT); + else + break; + } + + /* Repeat reading if drain buffer is insufficient to read all MSGs at once */ + } while (read_in_words >= pre_buf_word_size); +} + +static int cam_icp_process_msg_pkt_type( + struct cam_icp_hw_mgr *hw_mgr, + uint32_t *msg_ptr) +{ + int rc = 0; + + switch (msg_ptr[ICP_PACKET_TYPE]) { + case HFI_MSG_SYS_INIT_DONE: + CAM_DBG(CAM_ICP, "[%s] received SYS_INIT_DONE", hw_mgr->hw_mgr_name); + complete(&hw_mgr->icp_complete); + break; + + case HFI_MSG_SYS_PC_PREP_DONE: + CAM_DBG(CAM_ICP, "[%s] HFI_MSG_SYS_PC_PREP_DONE is received\n", + hw_mgr->hw_mgr_name); + complete(&hw_mgr->icp_complete); + break; + + case HFI_MSG_SYS_PING_ACK: + CAM_DBG(CAM_ICP, "[%s] received SYS_PING_ACK", hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_msg_ping_ack(hw_mgr, msg_ptr); + break; + + case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK: + case HFI_MSG_OFE_CREATE_HANDLE_ACK: + CAM_DBG(CAM_ICP, "[%s] received IPE/BPS/OFE CREATE_HANDLE_ACK", + hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_msg_create_handle(hw_mgr, msg_ptr); + break; + + case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK: + CAM_DBG(CAM_ICP, "[%s] received IPE/BPS ASYNC_INDIRECT_ACK", + hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_ipebps_indirect_ack_msg(hw_mgr, msg_ptr); + break; + + case HFI_MSG_OFE_ASYNC_COMMAND_DIRECT_ACK: + CAM_DBG(CAM_ICP, "[%s] received OFE ASYNC DIRECT COMMAND ACK", + hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_ofe_direct_ack_msg(hw_mgr, msg_ptr); + break; + + case HFI_MSG_OFE_ASYNC_COMMAND_INDIRECT_ACK: + CAM_DBG(CAM_ICP, "[%s] received OFE ASYNC INDIRECT COMMAND ACK", + hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_ofe_indirect_ack_msg(hw_mgr, msg_ptr); + break; + + case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK: + CAM_DBG(CAM_ICP, "[%s] received ASYNC_DIRECT_ACK", hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_direct_ack_msg(hw_mgr, msg_ptr); + break; + + case HFI_MSG_EVENT_NOTIFY: + CAM_DBG(CAM_ICP, "[%s] received EVENT_NOTIFY", hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_process_fatal_error(hw_mgr, msg_ptr); + if (rc) + CAM_ERR(CAM_ICP, "[%s] failed in processing evt notify", + hw_mgr->hw_mgr_name); + break; + + case HFI_MSG_DBG_SYNX_TEST: + CAM_DBG(CAM_ICP, "received DBG_SYNX_TEST"); + complete(&hw_mgr->icp_complete); + break; + + default: + CAM_ERR(CAM_ICP, "[%s] invalid msg : %u", + hw_mgr->hw_mgr_name, msg_ptr[ICP_PACKET_TYPE]); + rc = -EINVAL; + } + + return rc; +} + +static int cam_icp_mgr_process_msg(struct cam_icp_hw_mgr *hw_mgr) +{ + uint32_t read_in_words = 0; + uint32_t remain_len, pre_remain_len = 0; + uint32_t *msg_ptr = NULL; + uint32_t buf_word_size = ICP_MSG_BUF_SIZE_IN_WORDS; + uint32_t pre_buf_word_size = 0; + int rc = 0; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Invalid data"); + return -EINVAL; + } + + do { + rc = hfi_read_message(hw_mgr->hfi_handle, + hw_mgr->msg_buf + (pre_remain_len >> BYTE_WORD_SHIFT), + Q_MSG, buf_word_size, &read_in_words); + if (rc) { + if (rc != -ENOMSG) + CAM_DBG(CAM_ICP, "Unable to read msg q rc %d", rc); + break; + } + + remain_len = pre_remain_len + (read_in_words << BYTE_WORD_SHIFT); + pre_remain_len = 0; + pre_buf_word_size = buf_word_size; + msg_ptr = (uint32_t *)hw_mgr->msg_buf; + buf_word_size = ICP_MSG_BUF_SIZE_IN_WORDS; + + while (remain_len) { + if (remain_len < msg_ptr[ICP_PACKET_SIZE]) { + /* + * MSG is broken into two parts, need to read from msg q again + * to complete the msg and get the remaining packets. Copy + * the remain data to start of buffer and shift buffer ptr to + * after the remaining data ends to read from queue. + */ + memcpy(hw_mgr->msg_buf, msg_ptr, remain_len); + pre_remain_len = remain_len; + buf_word_size -= (pre_remain_len >> BYTE_WORD_SHIFT); + break; + } + + rc = cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr); + if (rc) + CAM_ERR(CAM_ICP, "Failed to process MSG"); + + remain_len -= msg_ptr[ICP_PACKET_SIZE]; + if (remain_len > 0) + msg_ptr += (msg_ptr[ICP_PACKET_SIZE] >> BYTE_WORD_SHIFT); + else + break; + } + /* Repeat reading if drain buffer is insufficient to read all MSGs at once */ + } while (read_in_words >= pre_buf_word_size); + + return rc; +} + +static int32_t cam_icp_mgr_process_cb(void *priv, void *data) +{ + struct hfi_msg_work_data *task_data; + struct cam_icp_hw_mgr *hw_mgr; + int rc = 0; + + if (!data || !priv) { + CAM_ERR(CAM_ICP, "Invalid data"); + return -EINVAL; + } + + task_data = data; + hw_mgr = priv; + + rc = cam_icp_mgr_process_msg(hw_mgr); + if (rc && (rc != -ENOMSG)) + CAM_ERR(CAM_ICP, "Failed to process MSG"); + + cam_icp_mgr_process_dbg_buf(hw_mgr); + + if (task_data->recover) { + CAM_ERR_RATE_LIMIT(CAM_ICP, "issuing device recovery..."); + rc = cam_icp_mgr_trigger_recovery(hw_mgr); + } + + return rc; +} + +static int32_t cam_icp_hw_mgr_cb(void *data, bool recover) +{ + int rc = 0; + unsigned long flags; + struct cam_icp_hw_mgr *hw_mgr = data; + struct crm_workq_task *task; + struct hfi_msg_work_data *task_data; + + if (!data) { + CAM_ERR(CAM_ICP, "irq cb data is NULL"); + return rc; + } + + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(hw_mgr->msg_work); + if (!task) { + CAM_ERR(CAM_ICP, "no empty task"); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + return -ENOMEM; + } + + task_data = (struct hfi_msg_work_data *)task->payload; + task_data->data = hw_mgr; + task_data->recover = recover; + task_data->type = ICP_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_icp_mgr_process_cb; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + + return rc; +} + +static void cam_icp_free_fw_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + /* Skip freeing FW memory if not allocated */ + if (hw_mgr->icp_use_pil) + return; + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "PRESIL-ICP-B2B-HFI-INIT No Shutdown No Deinit No HFIfreeMem"); + return; + } + + cam_smmu_dealloc_firmware(hw_mgr->iommu_hdl); +} + +static void cam_icp_free_hfi_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + + cam_icp_free_fw_mem(hw_mgr); + + if (hw_mgr->hfi_mem.fw_uncached_region) { + rc = cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.fw_uncached_generic); + if (rc) + CAM_ERR(CAM_ICP, + "[%s] failed to unreserve fwuncached region", hw_mgr->hw_mgr_name); + hw_mgr->hfi_mem.fw_uncached_region = false; + } else { + rc = cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.sec_heap); + if (rc) + CAM_ERR(CAM_ICP, "[%s] failed to unreserve sec heap", hw_mgr->hw_mgr_name); + + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.qtbl); + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.cmd_q); + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.msg_q); + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.dbg_q); + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.sfr_buf); + } + + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_QDSS, 0); + + if (hw_mgr->fw_based_sys_caching) + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_LLCC_REGISTER); + + /* Skip freeing if not mapped */ + if (hw_mgr->synx_signaling_en) { + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_FWUNCACHED, + CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM); + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_SYNX_HWMUTEX); + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_IPC_HWMUTEX); + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_GLOBAL_CNTR); + } +} + +static int cam_icp_alloc_secheap_mem(struct cam_icp_hw_mgr *hw_mgr, + struct cam_mem_mgr_memory_desc *secheap) +{ + int rc; + struct cam_mem_mgr_request_desc alloc; + struct cam_mem_mgr_memory_desc out; + struct cam_smmu_region_info secheap_info; + + rc = cam_smmu_get_region_info(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_SECHEAP, &secheap_info); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to get secheap memory info", hw_mgr->hw_mgr_name); + return rc; + } + + alloc.size = secheap_info.iova_len; + alloc.align = 0; + alloc.flags = 0; + alloc.smmu_hdl = hw_mgr->iommu_hdl; + rc = cam_mem_mgr_reserve_memory_region(&alloc, + CAM_SMMU_REGION_SECHEAP, + &out); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to reserve secheap memory", hw_mgr->hw_mgr_name); + return rc; + } + + *secheap = out; + CAM_DBG(CAM_ICP, "[%s] kva: %llX, iova: %x, hdl: %x, len: %lld", + hw_mgr->hw_mgr_name, out.kva, out.iova, out.mem_handle, out.len); + + return rc; +} + +static int cam_icp_alloc_shared_mem( + struct cam_icp_hw_mgr *hw_mgr, size_t size_requested, + struct cam_mem_mgr_memory_desc *alloc_out) +{ + int rc; + struct cam_mem_mgr_request_desc alloc; + struct cam_mem_mgr_memory_desc out; + + alloc.size = size_requested; + alloc.align = 0; + alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE | + CAM_MEM_FLAG_HW_SHARED_ACCESS; + alloc.smmu_hdl = hw_mgr->iommu_hdl; + rc = cam_mem_mgr_request_mem(&alloc, &out); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc shared mem rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + *alloc_out = out; + CAM_DBG(CAM_ICP, "[%s] kva: %llX, iova: %x, hdl: %x, len: %lld", + hw_mgr->hw_mgr_name, + out.kva, out.iova, out.mem_handle, out.len); + + return rc; +} + +static int cam_icp_allocate_fw_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + uintptr_t kvaddr; + size_t len; + dma_addr_t iova; + + if (hw_mgr->icp_use_pil) + return 0; + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "PRESIL-ICP-B2B-HFI-INIT No Shutdown No Deinit No HFIfreeMem"); + return 0; + } + + rc = cam_smmu_alloc_firmware(hw_mgr->iommu_hdl, + &iova, &kvaddr, &len); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc firmware rc %d", + hw_mgr->hw_mgr_name, rc); + return -ENOMEM; + } + + hw_mgr->hfi_mem.fw_buf.len = len; + hw_mgr->hfi_mem.fw_buf.kva = kvaddr; + hw_mgr->hfi_mem.fw_buf.iova = iova; + hw_mgr->hfi_mem.fw_buf.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s], kva: %zX, iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, + kvaddr, iova, len); + + return rc; +} + +static int cam_icp_allocate_qdss_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_QDSS, 0, &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc qdss mem rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + hw_mgr->hfi_mem.qdss_buf.len = len; + hw_mgr->hfi_mem.qdss_buf.iova = iova; + hw_mgr->hfi_mem.qdss_buf.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_llcc_register_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_LLCC_REGISTER, &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc llcc mem rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + hw_mgr->hfi_mem.llcc_reg.len = len; + hw_mgr->hfi_mem.llcc_reg.iova = iova; + hw_mgr->hfi_mem.llcc_reg.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_global_sync_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_FWUNCACHED, + CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM, &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, + "[%s] Failed in allocating global sync ipc mem rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + hw_mgr->hfi_mem.fw_uncached_global_sync.len = len; + hw_mgr->hfi_mem.fw_uncached_global_sync.iova = iova; + hw_mgr->hfi_mem.fw_uncached_global_sync.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_device_synx_hwmutex_mem( + struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, CAM_SMMU_SUBREGION_SYNX_HWMUTEX, &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, + "Failed in allocating hwmutex mem rc %d", rc); + return rc; + } + + hw_mgr->hfi_mem.synx_hwmutex.len = len; + hw_mgr->hfi_mem.synx_hwmutex.iova = iova; + hw_mgr->hfi_mem.synx_hwmutex.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_device_global_cnt_mem( + struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, CAM_SMMU_SUBREGION_GLOBAL_CNTR, + &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, + "Failed in allocating global cntr mem rc %d", rc); + return rc; + } + + hw_mgr->hfi_mem.global_cntr.len = len; + hw_mgr->hfi_mem.global_cntr.iova = iova; + hw_mgr->hfi_mem.global_cntr.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_device_ipc_hwmutex_mem( + struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len; + dma_addr_t iova; + + rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, CAM_SMMU_SUBREGION_IPC_HWMUTEX, + &iova, &len); + if (rc) { + CAM_ERR(CAM_ICP, + "Failed in allocating hwmutex mem rc %d", rc); + return rc; + } + + hw_mgr->hfi_mem.ipc_hwmutex.len = len; + hw_mgr->hfi_mem.ipc_hwmutex.iova = iova; + hw_mgr->hfi_mem.ipc_hwmutex.smmu_hdl = hw_mgr->iommu_hdl; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu", + hw_mgr->hw_mgr_name, iova, len); + + return rc; +} + +static int cam_icp_allocate_mem_for_fence_signaling( + struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + + rc = cam_icp_allocate_global_sync_mem(hw_mgr); + if (rc) + return rc; + + rc = cam_icp_allocate_device_synx_hwmutex_mem(hw_mgr); + if (rc) + goto unmap_global_sync; + + rc = cam_icp_allocate_device_ipc_hwmutex_mem(hw_mgr); + if (rc) + goto unmap_synx_hwmutex; + + rc = cam_icp_allocate_device_global_cnt_mem(hw_mgr); + if (rc) + goto unmap_ipc_mutex; + + return 0; + +unmap_ipc_mutex: + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_IPC_HWMUTEX); +unmap_synx_hwmutex: + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_SYNX_HWMUTEX); +unmap_global_sync: + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_FWUNCACHED, + CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM); + + return rc; +} + +static int cam_icp_get_io_mem_info(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + size_t len, discard_iova_len; + dma_addr_t iova, discard_iova_start; + + rc = cam_smmu_get_io_region_info(hw_mgr->iommu_hdl, + &iova, &len, &discard_iova_start, &discard_iova_len); + if (rc) + return rc; + + hw_mgr->hfi_mem.io_mem.iova_len = len; + hw_mgr->hfi_mem.io_mem.iova_start = iova; + hw_mgr->hfi_mem.io_mem.discard_iova_start = discard_iova_start; + hw_mgr->hfi_mem.io_mem.discard_iova_len = discard_iova_len; + + CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu discard iova %llx len %llx", + hw_mgr->hw_mgr_name, iova, len, discard_iova_start, discard_iova_len); + + return rc; +} + +static int cam_icp_allocate_hfi_mem(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + struct cam_smmu_region_info fwuncached_region_info = {0}; + bool fwuncached_region_exists = false; + size_t qtbl_size, cmdq_size, msgq_size, dbgq_size, sfr_size, sec_heap_size; + + rc = cam_smmu_get_region_info(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_SHARED, &hw_mgr->hfi_mem.shmem); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to get shared memory info rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + rc = cam_icp_allocate_fw_mem(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate FW memory rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + rc = cam_icp_allocate_qdss_mem(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate qdss memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto fw_alloc_failed; + } + + /* + * Compute sizes aligned to page size, and add a padding + * of a page between regions + */ + qtbl_size = ALIGN(ICP_QTBL_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + cmdq_size = ALIGN(ICP_CMD_Q_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + msgq_size = ALIGN(ICP_MSG_Q_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + dbgq_size = ALIGN(ICP_DBG_Q_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + sfr_size = ALIGN(ICP_MSG_SFR_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + sec_heap_size = ALIGN(ICP_SEC_HEAP_SIZE_IN_BYTES, PAGE_SIZE) + PAGE_SIZE; + + rc = cam_smmu_get_region_info(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_FWUNCACHED, &fwuncached_region_info); + if (!rc) + fwuncached_region_exists = true; + + if (fwuncached_region_exists) { + struct cam_mem_mgr_request_desc alloc; + struct cam_mem_mgr_memory_desc out; + uint32_t offset; + + /* + * FW uncached consists of the qtbl, HFI queues, SFR buffer + * and secondary heap + */ + alloc.size = qtbl_size + cmdq_size + msgq_size + dbgq_size + + sfr_size + sec_heap_size; + alloc.align = 0; + alloc.flags = CAM_MEM_FLAG_KMD_ACCESS; + alloc.smmu_hdl = hw_mgr->iommu_hdl; + rc = cam_mem_mgr_reserve_memory_region(&alloc, + CAM_SMMU_REGION_FWUNCACHED, &out); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to reserve fw uncached memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto qtbl_alloc_failed; + } + + hw_mgr->hfi_mem.fw_uncached_region = true; + /* + * If the FW uncached region is carved into different subregions + * send the whole region to FW, resulting in one ICP MMU entry. + * If reserving for FW uncached the SMMU will map the FW uncached + * generic, and return the appropriate va + * + */ + hw_mgr->hfi_mem.fw_uncached = fwuncached_region_info; + hw_mgr->hfi_mem.fw_uncached_generic = out; + + offset = 0; + + hw_mgr->hfi_mem.sec_heap.iova = out.iova + offset; + hw_mgr->hfi_mem.sec_heap.kva = out.kva + offset; + hw_mgr->hfi_mem.sec_heap.len = sec_heap_size; + hw_mgr->hfi_mem.sec_heap.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.sec_heap.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.sec_heap.region = out.region; + offset += (uint32_t)sec_heap_size; + + hw_mgr->hfi_mem.qtbl.iova = out.iova + offset; + hw_mgr->hfi_mem.qtbl.kva = out.kva + offset; + hw_mgr->hfi_mem.qtbl.len = qtbl_size; + hw_mgr->hfi_mem.qtbl.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.qtbl.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.qtbl.region = out.region; + offset += (uint32_t)qtbl_size; + + hw_mgr->hfi_mem.cmd_q.iova = out.iova + offset; + hw_mgr->hfi_mem.cmd_q.kva = out.kva + offset; + hw_mgr->hfi_mem.cmd_q.len = cmdq_size; + hw_mgr->hfi_mem.cmd_q.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.cmd_q.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.cmd_q.region = out.region; + offset += (uint32_t)cmdq_size; + + hw_mgr->hfi_mem.msg_q.iova = out.iova + offset; + hw_mgr->hfi_mem.msg_q.kva = out.kva + offset; + hw_mgr->hfi_mem.msg_q.len = msgq_size; + hw_mgr->hfi_mem.msg_q.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.msg_q.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.msg_q.region = out.region; + offset += (uint32_t)msgq_size; + + hw_mgr->hfi_mem.dbg_q.iova = out.iova + offset; + hw_mgr->hfi_mem.dbg_q.kva = out.kva + offset; + hw_mgr->hfi_mem.dbg_q.len = dbgq_size; + hw_mgr->hfi_mem.dbg_q.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.dbg_q.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.dbg_q.region = out.region; + offset += (uint32_t)dbgq_size; + + hw_mgr->hfi_mem.sfr_buf.iova = out.iova + offset; + hw_mgr->hfi_mem.sfr_buf.kva = out.kva + offset; + hw_mgr->hfi_mem.sfr_buf.len = sfr_size; + hw_mgr->hfi_mem.sfr_buf.smmu_hdl = out.smmu_hdl; + hw_mgr->hfi_mem.sfr_buf.mem_handle = out.mem_handle; + hw_mgr->hfi_mem.sfr_buf.region = out.region; + offset += (uint32_t)sfr_size; + + if (offset > out.len) { + CAM_ERR(CAM_ICP, + "[%s] FW uncached region size %lld not enough, required %lld", + hw_mgr->hw_mgr_name, offset, out.len); + cam_mem_mgr_free_memory_region(&hw_mgr->hfi_mem.fw_uncached_generic); + goto qtbl_alloc_failed; + } + } else { + rc = cam_icp_alloc_shared_mem(hw_mgr, qtbl_size, &hw_mgr->hfi_mem.qtbl); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate qtbl memory, rc %d", + hw_mgr->hw_mgr_name, rc); + goto qtbl_alloc_failed; + } + + rc = cam_icp_alloc_shared_mem(hw_mgr, cmdq_size, &hw_mgr->hfi_mem.cmd_q); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate cmd q memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto cmd_q_alloc_failed; + } + + rc = cam_icp_alloc_shared_mem(hw_mgr, msgq_size, &hw_mgr->hfi_mem.msg_q); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate msg q memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto msg_q_alloc_failed; + } + + rc = cam_icp_alloc_shared_mem(hw_mgr, dbgq_size, &hw_mgr->hfi_mem.dbg_q); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate dbg q memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto dbg_q_alloc_failed; + } + + rc = cam_icp_alloc_shared_mem(hw_mgr, sfr_size, &hw_mgr->hfi_mem.sfr_buf); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate sfr buffer rc %d", + hw_mgr->hw_mgr_name, rc); + goto sfr_buf_alloc_failed; + } + + rc = cam_icp_alloc_secheap_mem(hw_mgr, &hw_mgr->hfi_mem.sec_heap); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate sec heap memory rc %d", + hw_mgr->hw_mgr_name, rc); + goto sec_heap_alloc_failed; + } + } + + CAM_DBG(CAM_ICP, "[%s] Shared Region [0x%x %lld] FW Uncached nested Region [0x%x %lld]", + hw_mgr->hw_mgr_name, + hw_mgr->hfi_mem.shmem.iova_start, + hw_mgr->hfi_mem.shmem.iova_len, + fwuncached_region_info.iova_start, + fwuncached_region_info.iova_len); + + CAM_DBG(CAM_ICP, + "[%s] FwUncached[0x%x %lld] FwUncached_Generic[0x%x %p %lld] QTbl[0x%x %p %lld] CmdQ[0x%x %p %lld] MsgQ[0x%x %p %lld]", + hw_mgr->hw_mgr_name, + hw_mgr->hfi_mem.fw_uncached.iova_start, + hw_mgr->hfi_mem.fw_uncached.iova_len, + hw_mgr->hfi_mem.fw_uncached_generic.iova, + hw_mgr->hfi_mem.fw_uncached_generic.kva, + hw_mgr->hfi_mem.fw_uncached_generic.len, + hw_mgr->hfi_mem.qtbl.iova, + hw_mgr->hfi_mem.qtbl.kva, + hw_mgr->hfi_mem.qtbl.len, + hw_mgr->hfi_mem.cmd_q.iova, + hw_mgr->hfi_mem.cmd_q.kva, + hw_mgr->hfi_mem.cmd_q.len, + hw_mgr->hfi_mem.msg_q.iova, + hw_mgr->hfi_mem.msg_q.kva, + hw_mgr->hfi_mem.msg_q.len); + + CAM_DBG(CAM_ICP, + "[%s] DbgQ[0x%x %p %lld] SFR[0x%x %p %lld] SecHeap[0x%x %p %lld]", + hw_mgr->hw_mgr_name, + hw_mgr->hfi_mem.dbg_q.iova, + hw_mgr->hfi_mem.dbg_q.kva, + hw_mgr->hfi_mem.dbg_q.len, + hw_mgr->hfi_mem.sfr_buf.iova, + hw_mgr->hfi_mem.sfr_buf.kva, + hw_mgr->hfi_mem.sfr_buf.len, + hw_mgr->hfi_mem.sec_heap.iova, + hw_mgr->hfi_mem.sec_heap.kva, + hw_mgr->hfi_mem.sec_heap.len); + + rc = cam_icp_get_io_mem_info(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to get I/O region info rc %d", + hw_mgr->hw_mgr_name, rc); + if (fwuncached_region_exists) { + cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.fw_uncached_generic); + goto qtbl_alloc_failed; + } else { + goto get_io_mem_failed; + } + } + + if (hw_mgr->fw_based_sys_caching || hw_mgr->synx_signaling_en) { + rc = cam_smmu_get_region_info(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, &hw_mgr->hfi_mem.device); + if (!rc) { + if (hw_mgr->fw_based_sys_caching) { + rc = cam_icp_allocate_llcc_register_mem(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Unable to allocate llcc memory rc %d", + hw_mgr->hw_mgr_name, rc); + if (fwuncached_region_exists) { + cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.fw_uncached_generic); + goto qtbl_alloc_failed; + } else { + goto get_io_mem_failed; + } + } + } + + /* Allocate sync global mem & hwmutex for IPC */ + if (hw_mgr->synx_signaling_en) { + rc = cam_icp_allocate_mem_for_fence_signaling(hw_mgr); + if (rc) { + if (hw_mgr->fw_based_sys_caching) { + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, + CAM_SMMU_REGION_DEVICE, + CAM_SMMU_SUBREGION_LLCC_REGISTER); + } + if (fwuncached_region_exists) { + cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.fw_uncached_generic); + goto qtbl_alloc_failed; + } else { + goto get_io_mem_failed; + } + } + } + } else { + CAM_ERR(CAM_ICP, "[%s] Unable to get device memory info rc %d", + hw_mgr->hw_mgr_name, rc); + if (fwuncached_region_exists) { + cam_mem_mgr_free_memory_region( + &hw_mgr->hfi_mem.fw_uncached_generic); + goto qtbl_alloc_failed; + } else { + goto get_io_mem_failed; + } + } + } + + return rc; + +get_io_mem_failed: + cam_mem_mgr_free_memory_region(&hw_mgr->hfi_mem.sec_heap); +sec_heap_alloc_failed: + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.sfr_buf); +sfr_buf_alloc_failed: + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.dbg_q); +dbg_q_alloc_failed: + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.msg_q); +msg_q_alloc_failed: + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.cmd_q); +cmd_q_alloc_failed: + cam_mem_mgr_release_mem(&hw_mgr->hfi_mem.qtbl); +qtbl_alloc_failed: + cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_QDSS, 0); +fw_alloc_failed: + cam_icp_free_fw_mem(hw_mgr); + return rc; +} + +static int cam_icp_mgr_allocate_ctx( + struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data **icp_ctx_data, + uint32_t *icp_ctx_id) +{ + struct cam_icp_hw_ctx_data *cur_ctx_data; + struct cam_icp_hw_ctx_data *ctx_data; + struct list_head *next_list_head = &hw_mgr->active_ctx_info.active_ctx_list; + uint32_t i = 0, size; + + ctx_data = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_data), GFP_KERNEL); + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Failed to allocate ctx data in the queue"); + return -ENOMEM; + } + + *icp_ctx_data = ctx_data; + if (list_empty(&hw_mgr->active_ctx_info.active_ctx_list)) + goto add_ctx_data; + + /* Context data queue should be in ascending order */ + list_for_each_entry(cur_ctx_data, + &hw_mgr->active_ctx_info.active_ctx_list, list) { + if (i != cur_ctx_data->ctx_id) { + next_list_head = &cur_ctx_data->list; + goto add_ctx_data; + } + i += 1; + } + +add_ctx_data: + *icp_ctx_id = i; + ctx_data->ctx_id = i; + ctx_data->state = CAM_ICP_CTX_STATE_IN_USE; + if (cam_presil_mode_enabled()) { + size = CAM_FRAME_CMD_MAX * sizeof(struct cam_hangdump_mem_regions); + ctx_data->hfi_frame_process.hangdump_mem_regions = + CAM_MEM_ZALLOC(size, GFP_KERNEL); + } + list_add_tail(&ctx_data->list, next_list_head); + + hw_mgr->ctx_acquired_timestamp[i] = ktime_get_boottime_ns(); + + set_bit(i, hw_mgr->active_ctx_info.active_ctx_bitmap); + return 0; +} + +static inline void cam_icp_mgr_put_ctx( + struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + /* Reset bit for that ctx data to indicate its unavailable now */ + clear_bit(ctx_data->ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap); + + if (cam_presil_mode_enabled()) { + CAM_MEM_FREE(ctx_data->hfi_frame_process.hangdump_mem_regions); + ctx_data->hfi_frame_process.hangdump_mem_regions = NULL; + } + list_del(&ctx_data->list); + CAM_MEM_FREE(ctx_data); + ctx_data = NULL; +} + +static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + unsigned long rem_jiffies; + int timeout = 5000; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] device interface is NULL", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + reinit_completion(&hw_mgr->icp_complete); + + CAM_DBG(CAM_ICP, "[%s] Sending pc prep command", hw_mgr->hw_mgr_name); + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, + CAM_ICP_CMD_PC_PREP, NULL, 0); + if (rc) + return rc; + + CAM_DBG(CAM_ICP, "[%s] Wait for PC_PREP_DONE Message\n", hw_mgr->hw_mgr_name); + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &hw_mgr->icp_complete, + msecs_to_jiffies((timeout)), CAM_ICP, + "[%s] FW response timeout for PC PREP handle command", hw_mgr->hw_mgr_name); + if (!rem_jiffies) + rc = -ETIMEDOUT; + CAM_DBG(CAM_ICP, "[%s] Done Waiting for PC_PREP Message\n", hw_mgr->hw_mgr_name); + + return rc; +} + +static int cam_icp_device_deint(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_hw_intf *dev_intf = NULL; + int rc = 0, i, j; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + dev_info = &hw_mgr->dev_info[i]; + for (j = 0; j < dev_info->hw_dev_cnt; j++) { + dev_intf = dev_info->dev_intf[j]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] Device intf for %s[%u] is NULL", + hw_mgr->hw_mgr_name, dev_info->dev_name, j); + return -EINVAL; + } + rc = dev_intf->hw_ops.deinit(dev_intf->hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] %s[%u] failed to deinit rc: %d", + hw_mgr->hw_mgr_name, dev_info->dev_name, j, rc); + } + } + } + + return rc; +} + +static int cam_icp_mgr_hw_close_u(void *hw_priv, void *hw_close_args) +{ + struct cam_icp_hw_mgr *hw_mgr = hw_priv; + struct cam_icp_mgr_hw_args hw_args = {0}; + struct cam_icp_mgr_hw_args *close_args = hw_close_args; + int rc = 0; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Null hw mgr"); + return 0; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (!close_args) { + hw_args.hfi_setup = hw_mgr->hfi_init_done; + hw_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + hw_args.icp_pc = hw_mgr->icp_pc_flag; + close_args = &hw_args; + } + + CAM_DBG(CAM_ICP, "[%s] UMD calls close", hw_mgr->hw_mgr_name); + + rc = cam_icp_mgr_hw_close(hw_mgr, close_args); + atomic_set(&hw_mgr->recovery, 0); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_icp_mgr_hw_close_k(void *hw_priv, void *hw_close_args) +{ + struct cam_icp_hw_mgr *hw_mgr = hw_priv; + struct cam_icp_mgr_hw_args hw_args; + struct cam_icp_mgr_hw_args *close_args = hw_close_args; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Null hw mgr"); + return 0; + } + + if (!close_args) { + hw_args.hfi_setup = hw_mgr->hfi_init_done; + hw_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + hw_args.icp_pc = hw_mgr->icp_pc_flag; + close_args = &hw_args; + } + + CAM_DBG(CAM_ICP, "[%s] KMD calls close", hw_mgr->hw_mgr_name); + + return cam_icp_mgr_hw_close(hw_mgr, close_args); + +} + +static int cam_icp_mgr_proc_resume(struct cam_icp_hw_mgr *hw_mgr, bool use_proxy_boot_up) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + int rc = 0; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "ICP device interface is NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "%s use_proxy_boot_up:%s", hw_mgr->hw_mgr_name, + CAM_BOOL_TO_YESNO(use_proxy_boot_up)); + + if (!use_proxy_boot_up) { + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, + CAM_ICP_CMD_POWER_RESUME, &hw_mgr->icp_jtag_debug, + sizeof(hw_mgr->icp_jtag_debug)); + } else { + rc = cam_icp_vm_send_msg(hw_mgr, CAM_PVM, CAM_ICP_POWER_RESUME, true); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending icp resume command to PVM rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + } + + if (!rc) + hw_mgr->icp_resumed = true; + + return rc; +} + +static void cam_icp_mgr_proc_suspend(struct cam_icp_hw_mgr *hw_mgr, bool use_proxy_boot_up) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + int rc = 0; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "ICP device interface is NULL"); + return; + } + + CAM_DBG(CAM_ICP, "%s use_proxy_boot_up:%d", hw_mgr->hw_mgr_name, + CAM_BOOL_TO_YESNO(use_proxy_boot_up)); + + if (!use_proxy_boot_up) { + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, + CAM_ICP_CMD_POWER_COLLAPSE, NULL, 0); + } else { + rc = cam_icp_vm_send_msg(hw_mgr, CAM_PVM, CAM_ICP_POWER_COLLAPSE, + true); + if (rc) { + CAM_ERR(CAM_ICP, + "[%s] Failed in sending icp power_collapse command to PVM rc %d", + hw_mgr->hw_mgr_name, rc); + } + } + + if (rc) + CAM_ERR(CAM_ICP, "[%s] Fail to suspend processor rc %d", + hw_mgr->hw_mgr_name, rc); + + hw_mgr->icp_resumed = false; +} + +static int __power_collapse(struct cam_icp_hw_mgr *hw_mgr, struct cam_icp_mgr_hw_args *pc_args) +{ + int rc = 0; + + if (!pc_args) { + CAM_ERR(CAM_ICP, "pc_args cannot be NULL"); + return -EINVAL; + } + + if ((!pc_args->use_proxy_boot_up) && + (!hw_mgr->icp_pc_flag || atomic_read(&hw_mgr->recovery))) { + cam_icp_mgr_proc_suspend(hw_mgr, pc_args->use_proxy_boot_up); + + rc = cam_icp_mgr_hw_close_k(hw_mgr, pc_args); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed in hw close rc %d", + hw_mgr->hw_mgr_name, rc); + } else { + CAM_DBG(CAM_PERF, "[%s] Sending PC prep ICP PC enabled", + hw_mgr->hw_mgr_name); + + if (pc_args->hfi_setup) { + rc = cam_icp_mgr_send_pc_prep(hw_mgr); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed in send pc prep rc %d", + hw_mgr->hw_mgr_name, rc); + } + + cam_icp_mgr_proc_suspend(hw_mgr, pc_args->use_proxy_boot_up); + } + + return rc; +} + +static int cam_icp_mgr_icp_power_collapse( + struct cam_icp_hw_mgr *hw_mgr, + void *args) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + struct cam_icp_mgr_hw_args *pc_args = (struct cam_icp_mgr_hw_args *) args; + int rc; + bool send_freq_info = true; + + CAM_DBG(CAM_PERF, "[%s] ENTER", hw_mgr->hw_mgr_name); + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + rc = __power_collapse(hw_mgr, pc_args); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Fail to power collapse ICP rc: %d", + hw_mgr->hw_mgr_name, rc); + + if (!pc_args->skip_icp_init) { + rc = icp_dev_intf->hw_ops.deinit(icp_dev_intf->hw_priv, &send_freq_info, + sizeof(send_freq_info)); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Fail to deinit ICP", hw_mgr->hw_mgr_name); + } + + CAM_DBG(CAM_PERF, "[%s] EXIT", hw_mgr->hw_mgr_name); + + return rc; +} + +static int cam_icp_mgr_proc_boot(struct cam_icp_hw_mgr *hw_mgr, bool use_proxy_boot_up) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + struct cam_icp_boot_args args; + int rc = 0; + enum cam_icp_cmd_type boot_cmd = ((use_proxy_boot_up) ? + CAM_ICP_CMD_PREP_BOOT : CAM_ICP_CMD_PROC_BOOT); + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] invalid device interface", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if (!hw_mgr->icp_use_pil) { + /* We handle the iommu mapping */ + args.firmware.iova = hw_mgr->hfi_mem.fw_buf.iova; + args.firmware.kva = hw_mgr->hfi_mem.fw_buf.kva; + args.firmware.len = hw_mgr->hfi_mem.fw_buf.len; + args.use_sec_pil = false; + } else { + args.use_sec_pil = true; + } + + args.irq_cb.data = hw_mgr; + args.irq_cb.cb = cam_icp_hw_mgr_cb; + args.debug_enabled = hw_mgr->icp_jtag_debug; + + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, boot_cmd, &args, + sizeof(args)); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] processor boot failed rc=%d", hw_mgr->hw_mgr_name, rc); + return rc; + } + + if (use_proxy_boot_up) { + rc = cam_icp_vm_send_msg(hw_mgr, CAM_PVM, CAM_ICP_POWER_RESUME, true); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending icp boot up command to PVM rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + } + + hw_mgr->icp_resumed = true; + + return rc; +} + +static void cam_icp_mgr_proc_shutdown(struct cam_icp_hw_mgr *hw_mgr, bool use_proxy_boot_up, + bool skip_icp_init) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + bool send_freq_info = false, fw_dump = atomic_read(&hw_mgr->recovery); + enum cam_icp_cmd_type shutdown_cmd = ((use_proxy_boot_up) ? + CAM_ICP_CMD_PREP_SHUTDOWN : CAM_ICP_CMD_PROC_SHUTDOWN); + int rc; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", hw_mgr->hw_mgr_name); + return; + } + if (!skip_icp_init) + icp_dev_intf->hw_ops.init(icp_dev_intf->hw_priv, &send_freq_info, + sizeof(send_freq_info)); + + icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, shutdown_cmd, &fw_dump, + sizeof(bool)); + + if (use_proxy_boot_up) { + rc = cam_icp_vm_send_msg(hw_mgr, CAM_PVM, CAM_ICP_SHUTDOWN, true); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending icp boot up command to PVM rc %d", + hw_mgr->hw_mgr_name, rc); + } + } + + if (!skip_icp_init) + icp_dev_intf->hw_ops.deinit(icp_dev_intf->hw_priv, &send_freq_info, + sizeof(send_freq_info)); + + if (hw_mgr->synx_signaling_en) + cam_sync_synx_core_recovery(hw_mgr->synx_core_id); + + hw_mgr->icp_resumed = false; +} + +static void cam_icp_mgr_populate_hfi_mem_info(struct cam_icp_hw_mgr *hw_mgr, + struct hfi_mem_info *hfi_mem) +{ + hfi_mem->qtbl.kva = hw_mgr->hfi_mem.qtbl.kva; + hfi_mem->qtbl.iova = hw_mgr->hfi_mem.qtbl.iova; + hfi_mem->qtbl.len = hw_mgr->hfi_mem.qtbl.len; + + CAM_DBG(CAM_ICP, "[%s] qtbl kva = %llx IOVA = %x length = %lld\n", + hw_mgr->hw_mgr_name, hfi_mem->qtbl.kva, hfi_mem->qtbl.iova, hfi_mem->qtbl.len); + + hfi_mem->cmd_q.kva = hw_mgr->hfi_mem.cmd_q.kva; + hfi_mem->cmd_q.iova = hw_mgr->hfi_mem.cmd_q.iova; + hfi_mem->cmd_q.len = hw_mgr->hfi_mem.cmd_q.len; + CAM_DBG(CAM_ICP, "[%s] cmd_q kva = %llx IOVA = %x length = %lld\n", + hw_mgr->hw_mgr_name, hfi_mem->cmd_q.kva, hfi_mem->cmd_q.iova, hfi_mem->cmd_q.len); + + hfi_mem->msg_q.kva = hw_mgr->hfi_mem.msg_q.kva; + hfi_mem->msg_q.iova = hw_mgr->hfi_mem.msg_q.iova; + hfi_mem->msg_q.len = hw_mgr->hfi_mem.msg_q.len; + CAM_DBG(CAM_ICP, "[%s] msg_q kva = %llx IOVA = %x length = %lld\n", + hw_mgr->hw_mgr_name, hfi_mem->msg_q.kva, hfi_mem->msg_q.iova, hfi_mem->msg_q.len); + + hfi_mem->dbg_q.kva = hw_mgr->hfi_mem.dbg_q.kva; + hfi_mem->dbg_q.iova = hw_mgr->hfi_mem.dbg_q.iova; + hfi_mem->dbg_q.len = hw_mgr->hfi_mem.dbg_q.len; + CAM_DBG(CAM_ICP, "[%s] dbg_q kva = %llx IOVA = %x length = %lld\n", + hw_mgr->hw_mgr_name, hfi_mem->dbg_q.kva, hfi_mem->dbg_q.iova, hfi_mem->dbg_q.len); + + hfi_mem->sfr_buf.kva = hw_mgr->hfi_mem.sfr_buf.kva; + hfi_mem->sfr_buf.iova = hw_mgr->hfi_mem.sfr_buf.iova; + hfi_mem->sfr_buf.len = hw_mgr->hfi_mem.sfr_buf.len; + CAM_DBG(CAM_ICP, "[%s] sfr kva = %llx IOVA = %x length = %lld\n", + hw_mgr->hw_mgr_name, hfi_mem->sfr_buf.kva, hfi_mem->sfr_buf.iova, + hfi_mem->sfr_buf.len); + + hfi_mem->sec_heap.kva = hw_mgr->hfi_mem.sec_heap.kva; + hfi_mem->sec_heap.iova = hw_mgr->hfi_mem.sec_heap.iova; + hfi_mem->sec_heap.len = hw_mgr->hfi_mem.sec_heap.len; + + hfi_mem->shmem.iova = hw_mgr->hfi_mem.shmem.iova_start; + hfi_mem->shmem.len = hw_mgr->hfi_mem.shmem.iova_len; + + hfi_mem->qdss.iova = hw_mgr->hfi_mem.qdss_buf.iova; + hfi_mem->qdss.len = hw_mgr->hfi_mem.qdss_buf.len; + + if (hw_mgr->synx_signaling_en || hw_mgr->fw_based_sys_caching) { + hfi_mem->device_mem.iova = hw_mgr->hfi_mem.device.iova_start; + hfi_mem->device_mem.len = hw_mgr->hfi_mem.device.iova_len; + CAM_DBG(CAM_ICP, + "device memory [iova = 0x%llx len = 0x%llx]", + hfi_mem->device_mem.iova, hfi_mem->device_mem.len); + } + + if (hw_mgr->hfi_mem.io_mem.discard_iova_start && + hw_mgr->hfi_mem.io_mem.discard_iova_len) { + /* IO Region 1 */ + hfi_mem->io_mem.iova = hw_mgr->hfi_mem.io_mem.iova_start; + hfi_mem->io_mem.len = + hw_mgr->hfi_mem.io_mem.discard_iova_start - + hw_mgr->hfi_mem.io_mem.iova_start; + + /* IO Region 2 */ + hfi_mem->io_mem2.iova = + hw_mgr->hfi_mem.io_mem.discard_iova_start + + hw_mgr->hfi_mem.io_mem.discard_iova_len; + hfi_mem->io_mem2.len = + hw_mgr->hfi_mem.io_mem.iova_start + + hw_mgr->hfi_mem.io_mem.iova_len - + hfi_mem->io_mem2.iova; + } else { + /* IO Region 1 */ + hfi_mem->io_mem.iova = hw_mgr->hfi_mem.io_mem.iova_start; + hfi_mem->io_mem.len = hw_mgr->hfi_mem.io_mem.iova_len; + + /* IO Region 2 */ + hfi_mem->io_mem2.iova = 0x0; + hfi_mem->io_mem2.len = 0x0; + } + + hfi_mem->fw_uncached.iova = hw_mgr->hfi_mem.fw_uncached.iova_start; + hfi_mem->fw_uncached.len = hw_mgr->hfi_mem.fw_uncached.iova_len; + + CAM_DBG(CAM_ICP, + "[%s] IO region1 IOVA = %X length = %lld, IO region2 IOVA = %X length = %lld", + hw_mgr->hw_mgr_name, + hfi_mem->io_mem.iova, + hfi_mem->io_mem.len, + hfi_mem->io_mem2.iova, + hfi_mem->io_mem2.len); +} + +static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr) +{ + return cam_hfi_resume(hw_mgr->hfi_handle); +} + +static int cam_icp_mgr_populate_abort_cmd(struct cam_icp_hw_ctx_data *ctx_data, + struct hfi_cmd_dev_async **abort_cmd_ptr) +{ + struct hfi_cmd_dev_async *abort_cmd = NULL; + uint32_t opcode, pkt_type; + size_t packet_size; + + packet_size = sizeof(struct hfi_cmd_dev_async); + + switch (ctx_data->device_info->hw_dev_type) { + case CAM_ICP_DEV_BPS: + pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_abort) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT; + break; + case CAM_ICP_DEV_IPE: + pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_abort) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT; + break; + case CAM_ICP_DEV_OFE: + pkt_type = HFI_CMD_OFE_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_abort) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_OFE_CMD_OPCODE_ABORT; + break; + default: + CAM_ERR(CAM_ICP, + "%s: Invalid device type not supported: %u", + ctx_data->ctx_id_string, ctx_data->device_info->hw_dev_type); + return -EINVAL; + } + + abort_cmd = CAM_MEM_ZALLOC(packet_size, GFP_KERNEL); + if (!abort_cmd) + return -ENOMEM; + + CAM_DBG(CAM_ICP, "%s: abort pkt size = %d", + ctx_data->ctx_id_string, (int)packet_size); + + abort_cmd->size = packet_size; + abort_cmd->pkt_type = pkt_type; + abort_cmd->opcode = opcode; + abort_cmd->num_fw_handles = 1; + abort_cmd->fw_handles_flex[0] = ctx_data->fw_handle; + abort_cmd->user_data1 = PTR_TO_U64(ctx_data); + abort_cmd->user_data2 = (uint64_t)ctx_data->ctx_id; + + *abort_cmd_ptr = abort_cmd; + + return 0; +} + +static int cam_icp_mgr_abort_handle_wq( + void *priv, void *data) +{ + int rc = 0; + struct cam_icp_hw_mgr *hw_mgr; + struct hfi_cmd_work_data *task_data = NULL; + struct cam_icp_hw_ctx_data *ctx_data; + struct hfi_cmd_dev_async *abort_cmd; + struct cam_icp_hw_ctx_info *ctx_info; + + if (!data || !priv) { + CAM_ERR(CAM_ICP, "Invalid params %pK %pK", data, priv); + return -EINVAL; + } + + task_data = (struct hfi_cmd_work_data *)data; + + ctx_info = (struct cam_icp_hw_ctx_info *)task_data->data; + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Invalid ctx_info"); + return -EINVAL; + } + + hw_mgr = ctx_info->hw_mgr; + if (!test_bit(ctx_info->ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_WARN(CAM_ICP, "ctx data is released before accessing it, ctx_id: %u", + ctx_info->ctx_id); + goto free_ctx_info; + } + + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Invalid ctx_data, ctx_id: %d", ctx_info->ctx_id); + rc = -EINVAL; + goto free_ctx_info; + } + + rc = cam_icp_mgr_populate_abort_cmd(ctx_data, &abort_cmd); + if (rc) + goto free_ctx_info; + + rc = hfi_write_cmd(hw_mgr->hfi_handle, abort_cmd); + if (rc) + goto free_abort_cmd; + + CAM_DBG(CAM_ICP, "%s: fw_handle = 0x%x ctx_data = %pK", + ctx_data->ctx_id_string, ctx_data->fw_handle, ctx_data); + +free_abort_cmd: + CAM_MEM_FREE(abort_cmd); +free_ctx_info: + CAM_MEM_FREE(ctx_info); + return rc; +} + +static int cam_icp_mgr_abort_handle(struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0; + unsigned long rem_jiffies = 0; + int timeout = 2000; + struct hfi_cmd_dev_async *abort_cmd; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + + if (atomic_read(&hw_mgr->recovery)) + return 0; + + rc = cam_icp_mgr_populate_abort_cmd(ctx_data, &abort_cmd); + if (rc) + return rc; + + reinit_completion(&ctx_data->wait_complete); + + rc = hfi_write_cmd(hw_mgr->hfi_handle, abort_cmd); + if (rc) { + CAM_MEM_FREE(abort_cmd); + return rc; + } + CAM_DBG(CAM_ICP, "%s: fw_handle = 0x%x ctx_data = %pK", + ctx_data->ctx_id_string, ctx_data->fw_handle, ctx_data); + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for Abort handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(ctx_data->hw_mgr_priv, false); + ctx_data->abort_timed_out = true; + } + + CAM_MEM_FREE(abort_cmd); + return rc; +} + +static int cam_icp_mgr_destroy_handle( + struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0, opcode, pkt_type; + int timeout = 1000; + unsigned long rem_jiffies; + size_t packet_size; + struct hfi_cmd_dev_async *destroy_cmd; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + + if (atomic_read(&hw_mgr->recovery)) + return 0; + + packet_size = sizeof(struct hfi_cmd_dev_async); + + switch (ctx_data->device_info->hw_dev_type) { + case CAM_ICP_DEV_BPS: + pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_destroy) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY; + break; + case CAM_ICP_DEV_IPE: + pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_destroy) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY; + break; + case CAM_ICP_DEV_OFE: + pkt_type = HFI_CMD_OFE_ASYNC_COMMAND_DIRECT; + packet_size = packet_size + sizeof(struct hfi_cmd_destroy) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + opcode = HFI_OFE_CMD_OPCODE_DESTROY; + break; + default: + CAM_ERR(CAM_ICP, "%s: Invalid hw dev type not supported: %u", + ctx_data->ctx_id_string, ctx_data->device_info->hw_dev_type); + return -EINVAL; + } + + destroy_cmd = CAM_MEM_ZALLOC(packet_size, GFP_KERNEL); + if (!destroy_cmd) { + rc = -ENOMEM; + return rc; + } + + destroy_cmd->size = packet_size; + destroy_cmd->pkt_type = pkt_type; + destroy_cmd->opcode = opcode; + destroy_cmd->num_fw_handles = 1; + destroy_cmd->fw_handles_flex[0] = ctx_data->fw_handle; + destroy_cmd->user_data1 = PTR_TO_U64(ctx_data); + destroy_cmd->user_data2 = (uint64_t)ctx_data->ctx_id; + + reinit_completion(&ctx_data->wait_complete); + + rc = hfi_write_cmd(hw_mgr->hfi_handle, destroy_cmd); + if (rc) { + CAM_MEM_FREE(destroy_cmd); + return rc; + } + CAM_DBG(CAM_ICP, "%s: fw_handle = 0x%x ctx_data = %pK", + ctx_data->ctx_id_string, ctx_data->fw_handle, ctx_data); + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for Destroy handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(hw_mgr, ctx_data->abort_timed_out); + } + CAM_MEM_FREE(destroy_cmd); + return rc; +} + +static int cam_icp_handle_secure_port_config( + struct cam_icp_hw_ctx_data *ctx_data, bool protect) +{ + struct cam_cpas_cp_mapping_config_info cp_config = {0}; + int i, j; + bool has_non_secure_ports = false; + int rc; + + if (ctx_data->acquire_dev_api_version == CAM_ACQUIRE_DEV_STRUCT_VERSION_1) { + CAM_DBG(CAM_ICP, "%s: Legacy acquire with all secure ports: %d", + ctx_data->ctx_id_string, ctx_data->state); + return 0; + } + + cp_config.device_type = cam_icp_get_camera_device_type( + ctx_data->device_info->hw_dev_type); + + if (!CAM_CPAS_IS_VALID_CAM_DEV_TYPE(cp_config.device_type)) { + CAM_ERR(CAM_ICP, + "Invalid CPAS camera device type %d", cp_config.device_type); + return -EINVAL; + } + + /* Configure all instances of an offline device type at once as + * it is not in the control of the software driver, rather with ICP, + * to decide which instance is used in the runtime. + */ + for (i = 0; i < ctx_data->device_info->hw_dev_cnt; i++) { + cp_config.hw_instance_id_mask |= 1 << i; + } + + cp_config.protect = protect; + for (i = 0, j = 0; i < CAM_MAX_OUTPUT_PORTS_PER_DEVICE; i++) { + enum cam_ipe_out_port_type cpas_port_id; + if (!ctx_data->port_security_map[i]) { + rc = cam_get_cpas_out_port_id( + ctx_data->device_info->hw_dev_type, i, &cpas_port_id); + if (!rc) { + cp_config.port_ids[j] = cpas_port_id; + CAM_DBG(CAM_ICP, "%s: Secure usecase on device %d has non-secure port %d", + ctx_data->ctx_id_string, + ctx_data->device_info->hw_dev_type, i); + j++; + } + else { + CAM_ERR(CAM_ICP, "%s: Invalid port for the device type requested - %d", + ctx_data->ctx_id_string, ctx_data->state); + return rc; + } + has_non_secure_ports = true; + } + } + cp_config.num_ports = j; + + if (has_non_secure_ports) { + rc = cam_cpas_config_cp_mapping_ctrl(&cp_config); + if (rc) { + CAM_ERR(CAM_ICP,"%s: Unable to configure the output ports for the usecase:%d", + ctx_data->ctx_id_string, ctx_data->state); + return rc; + } + } + + return 0; +} + + +static int cam_icp_mgr_release_ctx( + struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + struct cam_icp_ctx_perf_stats *perf_stats; + int i = 0, ctx_id; + + ctx_id = ctx_data->ctx_id; + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + perf_stats = &ctx_data->perf_stats; + CAM_DBG(CAM_PERF, + "Avg response time on %s: total_processed_requests: %llu avg_time: %llums", + ctx_data->ctx_id_string, perf_stats->total_requests, + perf_stats->total_requests ? + (perf_stats->total_resp_time / perf_stats->total_requests) : 0); + + memset(&ctx_data->evt_inject_params, 0, + sizeof(struct cam_hw_inject_evt_param)); + cam_icp_remove_ctx_bw(hw_mgr, ctx_data); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_DBG(CAM_ICP, + "%s: Not in right state to release: %d", + ctx_data->ctx_id_string, + ctx_data->state); + return 0; + } + cam_icp_mgr_dev_power_collapse(hw_mgr, + ctx_data, 0); + ctx_data->state = CAM_ICP_CTX_STATE_RELEASE; + CAM_DBG(CAM_ICP, "%s: E: Recovery: %s", + ctx_data->ctx_id_string, + CAM_BOOL_TO_YESNO(atomic_read(&hw_mgr->recovery))); + + cam_icp_mgr_abort_handle(ctx_data); + cam_icp_mgr_destroy_handle(ctx_data); + + if (ctx_data->icp_dev_acquire_info->secure_mode + == CAM_SECURE_MODE_SECURE) { + /* If this is the last secure context for the device, reset the + * port security configuration. + */ + if (!(--hw_mgr->num_secure_contexts[ctx_data->device_info->hw_dev_type])) + cam_icp_handle_secure_port_config(ctx_data, true); + } + + cam_icp_mgr_cleanup_ctx(ctx_data); + cam_icp_cpas_deactivate_llcc(ctx_data); + + ctx_data->fw_handle = 0; + ctx_data->scratch_mem_size = 0; + ctx_data->last_flush_req = 0; + for (i = 0; i < CAM_FRAME_CMD_MAX; i++) + clear_bit(i, ctx_data->hfi_frame_process.bitmap); + CAM_MEM_FREE(ctx_data->hfi_frame_process.bitmap); + ctx_data->hfi_frame_process.bitmap = NULL; + cam_icp_hw_mgr_clk_info_update(ctx_data); + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + hw_mgr->ctxt_cnt--; + CAM_MEM_FREE(ctx_data->icp_dev_acquire_info); + ctx_data->icp_dev_acquire_info = NULL; + cam_icp_ctx_timer_stop(ctx_data); + ctx_data->hw_mgr_priv = NULL; + + CAM_DBG(CAM_ICP, "[%s] X: ctx_id = %d", hw_mgr->hw_mgr_name, ctx_data->ctx_id); + + hw_mgr->ctx_acquired_timestamp[ctx_id] = 0; + + /* Free ctx data in the queue */ + cam_icp_mgr_put_ctx(hw_mgr, ctx_data); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + return 0; +} + +static unsigned long cam_icp_hw_mgr_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_icp_hw_mini_dump_info *md; + struct cam_icp_hw_ctx_mini_dump *ctx_md; + struct cam_icp_hw_ctx_data *ctx; + struct cam_icp_hw_mgr *hw_mgr = NULL; + struct cam_hw_mini_dump_args hw_dump_args; + struct cam_icp_hw_dump_args icp_dump_args; + struct cam_hw_intf *icp_dev_intf = NULL; + uint32_t i = 0, j = 0; + unsigned long dumped_len = 0; + unsigned long remain_len = len; + int rc = 0; + uint32_t hw_mgr_idx; + + if (!dst || len < sizeof(*md)) { + CAM_ERR(CAM_ICP, "Invalid params dst %pk len %lu", dst, len); + return 0; + } + + hw_mgr_idx = *((uint32_t *)priv_data); + if (hw_mgr_idx >= CAM_ICP_SUBDEV_MAX) { + CAM_ERR(CAM_ICP, "Invalid index to hw mgr: %u", hw_mgr_idx); + return 0; + } + + hw_mgr = g_icp_hw_mgr[hw_mgr_idx]; + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Uninitialized hw mgr for subdev: %u", hw_mgr_idx); + return 0; + } + + md = (struct cam_icp_hw_mini_dump_info *)dst; + md->num_context = 0; + cam_hfi_mini_dump(hw_mgr->hfi_handle, &md->hfi_info); + memcpy(md->hw_mgr_name, hw_mgr->hw_mgr_name, strlen(md->hw_mgr_name)); + memcpy(&md->hfi_mem_info, &hw_mgr->hfi_mem, + sizeof(struct icp_hfi_mem_info)); + memcpy(&md->dev_info, hw_mgr->dev_info, + hw_mgr->num_dev_info * sizeof(struct cam_icp_hw_device_info)); + md->num_device_info = hw_mgr->num_dev_info; + md->recovery = atomic_read(&hw_mgr->recovery); + md->icp_booted = hw_mgr->icp_booted; + md->icp_resumed = hw_mgr->icp_resumed; + md->disable_ubwc_comp = hw_mgr->disable_ubwc_comp; + md->icp_pc_flag = hw_mgr->icp_pc_flag; + md->icp_use_pil = hw_mgr->icp_use_pil; + + dumped_len += sizeof(*md); + remain_len = len - dumped_len; + list_for_each_entry(ctx, &hw_mgr->active_ctx_info.active_ctx_list, list) { + if (i >= CAM_ICP_CTX_MAX) { + CAM_WARN(CAM_ICP, "Mini dump can not hold that much ctx data"); + break; + } + + if (ctx->state == CAM_ICP_CTX_STATE_RELEASE) + continue; + + if (remain_len < sizeof(*ctx_md)) + goto end; + + md->num_context++; + ctx_md = (struct cam_icp_hw_ctx_mini_dump *) + ((uint8_t *)dst + dumped_len); + md->ctx[i] = ctx_md; + ctx_md->state = ctx->state; + ctx_md->ctx_id = ctx->ctx_id; + memcpy(ctx_md->ctx_id_string, ctx->ctx_id_string, + strlen(ctx->ctx_id_string)); + if (ctx->icp_dev_acquire_info) { + ctx_md->acquire.acquire_dev_api_version = ctx->acquire_dev_api_version; + ctx_md->acquire.secure_mode = + ctx->icp_dev_acquire_info->secure_mode; + ctx_md->acquire.dev_type = + ctx->icp_dev_acquire_info->dev_type; + ctx_md->acquire.num_out_res = + ctx->icp_dev_acquire_info->num_out_res; + memcpy(&ctx_md->acquire.in_res, + &ctx->icp_dev_acquire_info->in_res, + sizeof(struct cam_icp_res_info_unified)); + memcpy(ctx_md->acquire.out_res, + ctx->icp_dev_acquire_info->out_res_flex, + (sizeof(struct cam_icp_res_info_unified) * + ctx->icp_dev_acquire_info->num_out_res)); + } else { + memset(&ctx_md->acquire, 0, + sizeof(struct cam_icp_mini_dump_acquire_info)); + } + + memcpy(ctx_md->hfi_frame.request_id, ctx->hfi_frame_process.request_id, + sizeof(ctx->hfi_frame_process.request_id)); + memcpy(ctx_md->hfi_frame.in_resource, ctx->hfi_frame_process.in_resource, + sizeof(ctx->hfi_frame_process.in_resource)); + memcpy(ctx_md->hfi_frame.submit_timestamp, + ctx->hfi_frame_process.submit_timestamp, + sizeof(ctx->hfi_frame_process.submit_timestamp)); + memcpy(ctx_md->hfi_frame.num_out_res, ctx->hfi_frame_process.num_out_resources, + sizeof(ctx->hfi_frame_process.num_out_resources)); + memcpy(ctx_md->hfi_frame.out_res, ctx->hfi_frame_process.out_resource, + sizeof(ctx->hfi_frame_process.out_resource)); + for (j = 0; j < CAM_FRAME_CMD_MAX; j++) { + ctx_md->hfi_frame.fw_process_flag[j] = + ctx->hfi_frame_process.fw_process_flag[j]; + } + + dumped_len += sizeof(*ctx_md); + remain_len = len - dumped_len; + hw_dump_args.len = remain_len; + hw_dump_args.bytes_written = 0; + hw_dump_args.start_addr = (void *)((uint8_t *)dst + dumped_len); + hw_mgr->mini_dump_cb(ctx->context_priv, &hw_dump_args); + if (dumped_len + hw_dump_args.bytes_written >= len) + goto end; + + ctx_md->hw_ctx = hw_dump_args.start_addr; + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + i += 1; + } + + /* Dump fw image */ + if (!hw_mgr->icp_use_pil) { + icp_dump_args.cpu_addr = (uintptr_t)((uint8_t *)dst + dumped_len); + icp_dump_args.offset = 0; + icp_dump_args.buf_len = remain_len; + icp_dev_intf = hw_mgr->icp_dev_intf; + rc = icp_dev_intf->hw_ops.process_cmd( + icp_dev_intf->hw_priv, + CAM_ICP_CMD_HW_MINI_DUMP, &icp_dump_args, + sizeof(struct cam_icp_hw_dump_args)); + if (rc) + goto end; + + dumped_len += icp_dump_args.offset; + md->fw_img = (void *)icp_dump_args.cpu_addr; + } +end: + return dumped_len; +} + +static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_icp_hw_device_info *dev_info = NULL; + struct cam_hw_intf *dev_intf = NULL; + int i, j; + bool send_freq_info = false; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + dev_info = &hw_mgr->dev_info[i]; + for (j = 0; j < dev_info->hw_dev_cnt; j++) { + dev_intf = dev_info->dev_intf[j]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] Device intf for %s[%u] is NULL", + hw_mgr->hw_mgr_name, dev_info->dev_name, j); + return; + } + dev_intf->hw_ops.deinit(dev_intf->hw_priv, NULL, 0); + } + } + + dev_intf = hw_mgr->icp_dev_intf; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", + hw_mgr->hw_mgr_name); + return; + } + dev_intf->hw_ops.deinit(dev_intf->hw_priv, &send_freq_info, sizeof(send_freq_info)); +} + +static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args) +{ + struct cam_icp_hw_mgr *hw_mgr = hw_priv; + struct cam_icp_mgr_hw_args *close_args = (struct cam_icp_mgr_hw_args *) hw_close_args; + int rc = 0; + + if (!close_args) { + CAM_ERR(CAM_ICP, "close args can't be NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "[%s] Enter", hw_mgr->hw_mgr_name); + if (!hw_mgr->icp_booted) { + CAM_DBG(CAM_ICP, "[%s] hw mgr is already closed", hw_mgr->hw_mgr_name); + return 0; + } + + if (!hw_mgr->icp_dev_intf) { + CAM_DBG(CAM_ICP, "[%s] ICP device interface is NULL", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "PRESIL-ICP-B2B-HFI-INIT No Shutdown No Deinit No HFIfreeMem"); + return 0; + } + + cam_icp_mgr_proc_shutdown(hw_mgr, close_args->use_proxy_boot_up, + close_args->skip_icp_init); + + if (close_args->hfi_setup) { + CAM_DBG(CAM_ICP, + "[%s] hw mgr is freeing hfi mem", hw_mgr->hw_mgr_name); + cam_hfi_deinit(hw_mgr->hfi_handle); + cam_icp_free_hfi_mem(hw_mgr); + hw_mgr->hfi_init_done = false; + } + + hw_mgr->icp_booted = false; + + CAM_DBG(CAM_ICP, "[%s] Exit", hw_mgr->hw_mgr_name); + return rc; +} + +static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0, i, j; + struct cam_icp_hw_device_info *dev_info; + struct cam_hw_intf *dev_intf = NULL; + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + dev_info = &hw_mgr->dev_info[i]; + for (j = 0; j < dev_info->hw_dev_cnt; j++) { + dev_intf = dev_info->dev_intf[j]; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Device intf for %s[%u] is NULL", + dev_info->dev_name, j); + rc = -EINVAL; + goto hw_dev_deinit; + } + + rc = dev_intf->hw_ops.init(dev_intf->hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to init %s[%u]", + hw_mgr->hw_mgr_name, dev_info->dev_name, j); + goto hw_dev_deinit; + } + } + } + + return rc; + +hw_dev_deinit: + for (; i >= 0; i--) { + dev_info = &hw_mgr->dev_info[i]; + j = (j == -1) ? (dev_info->hw_dev_cnt - 1) : (j - 1); + for (; j >= 0; j--) { + dev_intf = dev_info->dev_intf[j]; + dev_intf->hw_ops.deinit(dev_intf->hw_priv, NULL, 0); + } + } + + return rc; +} + +static int cam_icp_mgr_init_all_cores(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + struct cam_hw_intf *dev_intf = NULL; + bool send_freq_info = false; + + dev_intf = hw_mgr->icp_dev_intf; + if (!dev_intf) { + CAM_ERR(CAM_ICP, "Invalid ICP device interface"); + return -EINVAL; + } + rc = dev_intf->hw_ops.init(dev_intf->hw_priv, &send_freq_info, sizeof(send_freq_info)); + if (rc) { + CAM_ERR(CAM_ICP, "Failed in ICP init rc=%d", rc); + return rc; + } + + rc = cam_icp_mgr_device_init(hw_mgr); + if (rc) + goto deinit_icp; + + return rc; + +deinit_icp: + dev_intf->hw_ops.deinit(dev_intf->hw_priv, &send_freq_info, sizeof(send_freq_info)); + + return rc; +} + +static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + struct hfi_mem_info hfi_mem = {0}; + const struct hfi_ops *hfi_ops; + int rc; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is invalid", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + cam_icp_mgr_populate_hfi_mem_info(hw_mgr, &hfi_mem); + rc = cam_icp_get_hfi_device_ops(icp_dev_intf->hw_type, &hfi_ops); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to get HFI device ops rc: %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + if (hw_mgr->synx_signaling_en) { + /* Expect global sync to be at the start of FW uncached region */ + if (hw_mgr->hfi_mem.fw_uncached_global_sync.iova >= + hw_mgr->hfi_mem.fw_uncached_generic.iova) { + CAM_ERR(CAM_ICP, + "global sync memory [start: 0x%x] expected to be at the start of FW uncached [uncached_generic start: 0x%x]", + hw_mgr->hfi_mem.fw_uncached_global_sync.iova, + hw_mgr->hfi_mem.fw_uncached_generic.iova); + return -EINVAL; + } + + /* + * Global sync memory is part of FW uncached region, but to FW remove this entry + * from FW uncached to avoid it being mapped with FW uncached. Global sync + * mem will be mapped with sharable attributes for IPC access, and hence + * an independent mapping of it's own. + */ + hfi_mem.fw_uncached.iova += hw_mgr->hfi_mem.fw_uncached_global_sync.len; + hfi_mem.fw_uncached.len -= hw_mgr->hfi_mem.fw_uncached_global_sync.len; + } + + rc = cam_hfi_init(hw_mgr->hfi_handle, &hfi_mem, hfi_ops, + icp_dev_intf->hw_priv, 0); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to init HFI rc=%d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + CAM_DBG(CAM_ICP, "[%s] hw mgr succeed hfi init with hfi handle: %d", + hw_mgr->hw_mgr_name, hw_mgr->hfi_handle); + + return rc; +} + +static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + unsigned long rem_jiffies; + int timeout = 5000; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "%s: ICP device interface is NULL", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + reinit_completion(&hw_mgr->icp_complete); + + CAM_DBG(CAM_ICP, "[%s] Sending HFI init command", hw_mgr->hw_mgr_name); + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, + CAM_ICP_SEND_INIT, NULL, 0); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending HFI init command rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &hw_mgr->icp_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "[%s] FW response timeout for FW init handle command", + hw_mgr->hw_mgr_name); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(hw_mgr, false); + } + CAM_DBG(CAM_ICP, "[%s] Done Waiting for INIT DONE Message", + hw_mgr->hw_mgr_name); + + return rc; +} + +static int cam_icp_mgr_send_memory_region_info( + struct cam_icp_hw_mgr *hw_mgr) +{ + struct hfi_cmd_prop *set_prop = NULL; + struct hfi_cmd_config_mem_regions *region_info = NULL; + uint32_t num_regions = 0; + size_t payload_size; + + if (hw_mgr->synx_signaling_en) + num_regions += ICP_NUM_MEM_REGIONS_FOR_SYNX; + + if (hw_mgr->fw_based_sys_caching) + num_regions += ICP_NUM_MEM_REGIONS_FOR_LLCC; + + if (!num_regions) + return 0; + + payload_size = sizeof(struct hfi_cmd_prop) + + (sizeof(struct hfi_cmd_config_mem_regions)) + + (sizeof(struct hfi_cmd_mem_region_info) * (num_regions - 1)); + + set_prop = CAM_MEM_ZALLOC(payload_size, GFP_KERNEL); + if (!set_prop) + return -ENOMEM; + + set_prop->size = payload_size; + set_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY; + set_prop->num_prop = 1; + set_prop->prop_data_flex[0] = HFI_PROP_SYS_MEM_REGIONS; + + region_info = (struct hfi_cmd_config_mem_regions *)&set_prop->prop_data_flex[1]; + if (hw_mgr->synx_signaling_en) { + /* Update synx global mem */ + region_info->region_info_flex[region_info->num_valid_regions].region_id = + HFI_MEM_REGION_ID_IPCLITE_SHARED_MEM; + region_info->region_info_flex[region_info->num_valid_regions].region_type = + HFI_MEM_REGION_TYPE_UNCACHED; + region_info->region_info_flex[region_info->num_valid_regions].start_addr = + hw_mgr->hfi_mem.fw_uncached_global_sync.iova; + region_info->region_info_flex[region_info->num_valid_regions].size = + hw_mgr->hfi_mem.fw_uncached_global_sync.len; + + region_info->num_valid_regions++; + + /* Update synx hw_mutex mem */ + region_info->region_info_flex[region_info->num_valid_regions].region_id = + HFI_MEM_REGION_ID_SYNX_HW_MUTEX; + region_info->region_info_flex[region_info->num_valid_regions].region_type = + HFI_MEM_REGION_TYPE_DEVICE; + region_info->region_info_flex[region_info->num_valid_regions].start_addr = + hw_mgr->hfi_mem.synx_hwmutex.iova; + region_info->region_info_flex[region_info->num_valid_regions].size = + hw_mgr->hfi_mem.synx_hwmutex.len; + + region_info->num_valid_regions++; + + /* Update ipc hw_mutex mem */ + region_info->region_info_flex[region_info->num_valid_regions].region_id = + HFI_MEM_REGION_ID_GLOBAL_ATOMIC_HW_MUTEX; + region_info->region_info_flex[region_info->num_valid_regions].region_type = + HFI_MEM_REGION_TYPE_DEVICE; + region_info->region_info_flex[region_info->num_valid_regions].start_addr = + hw_mgr->hfi_mem.ipc_hwmutex.iova; + region_info->region_info_flex[region_info->num_valid_regions].size = + hw_mgr->hfi_mem.ipc_hwmutex.len; + + region_info->num_valid_regions++; + + /* Update global cntr mem */ + region_info->region_info_flex[region_info->num_valid_regions].region_id = + HFI_MEM_REGION_ID_GLOBAL_CNTR; + region_info->region_info_flex[region_info->num_valid_regions].region_type = + HFI_MEM_REGION_TYPE_DEVICE; + region_info->region_info_flex[region_info->num_valid_regions].start_addr = + hw_mgr->hfi_mem.global_cntr.iova; + region_info->region_info_flex[region_info->num_valid_regions].size = + hw_mgr->hfi_mem.global_cntr.len; + + region_info->num_valid_regions++; + CAM_DBG(CAM_ICP, + "Synx mem regions global_sync[0x%x:0x%x] synx_hw_mutex[0x%x:0x%x] ipc_hw_mutex[0x%x:0x%x] global_cntr[0x%x:0x%x]", + hw_mgr->hfi_mem.fw_uncached_global_sync.iova, + hw_mgr->hfi_mem.fw_uncached_global_sync.len, + hw_mgr->hfi_mem.synx_hwmutex.iova, hw_mgr->hfi_mem.synx_hwmutex.len, + hw_mgr->hfi_mem.ipc_hwmutex.iova, hw_mgr->hfi_mem.ipc_hwmutex.len, + hw_mgr->hfi_mem.global_cntr.iova, hw_mgr->hfi_mem.global_cntr.len); + } + + if (hw_mgr->fw_based_sys_caching) { + /* Update ipc llcc mem */ + region_info->region_info_flex[region_info->num_valid_regions].region_id = + HFI_MEM_REGION_ID_LLCC_REGISTER; + region_info->region_info_flex[region_info->num_valid_regions].region_type = + HFI_MEM_REGION_TYPE_DEVICE; + region_info->region_info_flex[region_info->num_valid_regions].start_addr = + hw_mgr->hfi_mem.llcc_reg.iova; + region_info->region_info_flex[region_info->num_valid_regions].size = + hw_mgr->hfi_mem.llcc_reg.len; + + region_info->num_valid_regions++; + + CAM_DBG(CAM_ICP, + "LLCC mem regions llcc_reg[0x%x:0x%x]", + hw_mgr->hfi_mem.llcc_reg.iova, hw_mgr->hfi_mem.llcc_reg.len); + } + + CAM_DBG(CAM_ICP, + "Mem region property payload size: %zu num_regions: %u", + payload_size, region_info->num_valid_regions); + + hfi_write_cmd(hw_mgr->hfi_handle, set_prop); + CAM_MEM_FREE(set_prop); + + return 0; +} + +static int cam_icp_mgr_hw_open_u(void *hw_mgr_priv, void *download_fw_args) +{ + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + int rc = 0; + struct cam_icp_mgr_hw_args open_args = {0}; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Null hw mgr"); + return 0; + } + + open_args.hfi_setup = true; + open_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + open_args.icp_pc = hw_mgr->icp_pc_flag; + + if (download_fw_args) + open_args.icp_pc = !(*((bool *)download_fw_args)); + + + if (cam_presil_mode_enabled()) { + CAM_DBG(CAM_PRESIL, "[%s] hw_open from umd skipped for presil", + hw_mgr->hw_mgr_name); + return 0; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + rc = cam_icp_mgr_hw_open(hw_mgr, &open_args); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_icp_mgr_icp_resume( + struct cam_icp_hw_mgr *hw_mgr, + void *resume_args) +{ + int rc = 0; + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + bool send_freq_info; + struct cam_icp_mgr_hw_args *res_args = (struct cam_icp_mgr_hw_args *) resume_args; + + CAM_DBG(CAM_ICP, "[%s] Enter", hw_mgr->hw_mgr_name); + + if (!resume_args) { + CAM_ERR(CAM_ICP, "resume_args can't be NULL"); + return -EINVAL; + } + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if (!hw_mgr->icp_booted) { + res_args->icp_pc = false; + CAM_DBG(CAM_ICP, "[%s] booting ICP processor", hw_mgr->hw_mgr_name); + rc = cam_icp_mgr_hw_open(hw_mgr, res_args); + CAM_DBG(CAM_ICP, "[%s] hw_open from kmd %s %d", hw_mgr->hw_mgr_name, + (rc ? "failed":"success"), rc); + return rc; + } + + send_freq_info = true; + + if (!res_args->skip_icp_init) { + rc = icp_dev_intf->hw_ops.init(icp_dev_intf->hw_priv, &send_freq_info, + sizeof(send_freq_info)); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to init ICP hw rc: %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + } + + rc = cam_icp_mgr_proc_resume(hw_mgr, res_args->use_proxy_boot_up); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to resume proc rc: %d", hw_mgr->hw_mgr_name, rc); + goto hw_deinit; + } + + if (res_args->hfi_setup) { + rc = cam_icp_mgr_hfi_resume(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to resume HFI rc: %d", hw_mgr->hw_mgr_name, + rc); + goto power_collapse; + } + } + + CAM_DBG(CAM_ICP, "[%s] Exit", hw_mgr->hw_mgr_name); + return rc; + +power_collapse: + __power_collapse(hw_mgr, res_args); +hw_deinit: + send_freq_info = false; + icp_dev_intf->hw_ops.deinit(icp_dev_intf->hw_priv, &send_freq_info, + sizeof(send_freq_info)); + + return rc; +} + +static int cam_icp_mgr_hw_open( + void *hw_mgr_priv, + void *hw_args) +{ + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + uint32_t dump_type; + int rc = 0; + struct cam_icp_mgr_hw_args *open_args = + (struct cam_icp_mgr_hw_args *) hw_args; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "hw_mgr is NULL"); + return -EINVAL; + } + + if (hw_mgr->icp_booted && hw_mgr->hfi_init_done) { + CAM_DBG(CAM_ICP, "[%s] ICP already booted", hw_mgr->hw_mgr_name); + return rc; + } + + if (!hw_mgr->icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is invalid", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if (!open_args) { + CAM_ERR(CAM_ICP, "open args can't be NULL"); + return -EINVAL; + } + + if (atomic_read(&hw_mgr->recovery)) { + CAM_WARN(CAM_ICP, "[%s] recovery in progress", hw_mgr->hw_mgr_name); + return -EAGAIN; + } + + CAM_DBG(CAM_ICP, "[%s] Start icp hw open", hw_mgr->hw_mgr_name); + atomic_set(&hw_mgr->load_in_process, 1); + + if (open_args->hfi_setup) { + rc = cam_icp_allocate_hfi_mem(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc hfi mem, rc %d", + hw_mgr->hw_mgr_name, rc); + goto alloc_hfi_mem_failed; + } + } + + if (!open_args->skip_icp_init) { + rc = cam_icp_mgr_init_all_cores(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in device init, rc %d", + hw_mgr->hw_mgr_name, rc); + goto dev_init_fail; + } + } + + if (!hw_mgr->icp_booted) { + rc = cam_icp_mgr_proc_boot(hw_mgr, open_args->use_proxy_boot_up); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in proc boot, rc %d", + hw_mgr->hw_mgr_name, rc); + goto boot_failed; + } + } else if (!hw_mgr->icp_resumed) { + rc = cam_icp_mgr_proc_resume(hw_mgr, open_args->use_proxy_boot_up); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in proc resume, rc %d", + hw_mgr->hw_mgr_name, rc); + goto boot_failed; + } + } + + if (open_args->hfi_setup) { + rc = cam_icp_mgr_hfi_init(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in hfi init, rc %d", + hw_mgr->hw_mgr_name, rc); + dump_type = (CAM_ICP_DUMP_STATUS_REGISTERS | CAM_ICP_DUMP_CSR_REGISTERS); + hw_mgr->icp_dev_intf->hw_ops.process_cmd(hw_mgr->icp_dev_intf->hw_priv, + CAM_ICP_CMD_HW_REG_DUMP, &dump_type, sizeof(dump_type)); + goto hfi_init_failed; + } + + rc = cam_icp_mgr_send_fw_init(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending fw init, rc %d", + hw_mgr->hw_mgr_name, rc); + goto fw_init_failed; + } + + rc = cam_icp_mgr_send_memory_region_info(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending mem region info, rc %d", + hw_mgr->hw_mgr_name, rc); + goto fw_init_failed; + } + + hw_mgr->hfi_init_done = true; + } + + hw_mgr->ctxt_cnt = 0; + hw_mgr->icp_booted = true; + atomic_set(&hw_mgr->recovery, 0); + atomic_set(&hw_mgr->load_in_process, 0); + + CAM_INFO(CAM_ICP, "[%s] FW download done successfully", hw_mgr->hw_mgr_name); + + if (!open_args->skip_icp_init) { + rc = cam_icp_device_deint(hw_mgr); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed in device deinit rc %d", + hw_mgr->hw_mgr_name, rc); + } + + if (!(open_args->icp_pc)) + return rc; + + rc = cam_icp_mgr_icp_power_collapse(hw_mgr, open_args); + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed in icp power collapse rc %d", + hw_mgr->hw_mgr_name, rc); + + CAM_DBG(CAM_ICP, "[%s] deinit all clocks at boot up", hw_mgr->hw_mgr_name); + return rc; + +fw_init_failed: + cam_hfi_deinit(hw_mgr->hfi_handle); + hw_mgr->hfi_init_done = false; +hfi_init_failed: + cam_icp_mgr_proc_shutdown(hw_mgr, open_args->use_proxy_boot_up, open_args->skip_icp_init); +boot_failed: + cam_icp_mgr_device_deinit(hw_mgr); +dev_init_fail: + cam_icp_free_hfi_mem(hw_mgr); +alloc_hfi_mem_failed: + atomic_set(&hw_mgr->load_in_process, 0); + return rc; +} + +static int cam_icp_mgr_hfi_init_util(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + uint32_t dump_type; + + rc = cam_icp_mgr_hfi_init(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in hfi init, rc %d", + hw_mgr->hw_mgr_name, rc); + dump_type = (CAM_ICP_DUMP_STATUS_REGISTERS | CAM_ICP_DUMP_CSR_REGISTERS); + hw_mgr->icp_dev_intf->hw_ops.process_cmd(hw_mgr->icp_dev_intf->hw_priv, + CAM_ICP_CMD_HW_REG_DUMP, &dump_type, sizeof(dump_type)); + goto end; + } + + rc = cam_icp_mgr_send_memory_region_info(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending mem region info, rc %d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + + hw_mgr->hfi_init_done = true; + +end: + return rc; +} + +static int cam_icp_mgr_restart_icp(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc; + bool use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + + /* Shutdown processor */ + if (hw_mgr->icp_booted) + cam_icp_mgr_proc_shutdown(hw_mgr, use_proxy_boot_up, !hw_mgr->hfi_init_done); + + /* power on all cores */ + rc = cam_icp_mgr_init_all_cores(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in device init, rc %d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + /* reload and reset ICP */ + rc = cam_icp_mgr_proc_boot(hw_mgr, use_proxy_boot_up); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in proc boot, rc %d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + + /* initialize HFI */ + if (hw_mgr->hfi_init_done) { + cam_hfi_deinit(hw_mgr->hfi_handle); + + rc = cam_icp_mgr_hfi_init_util(hw_mgr); + if (rc) + goto end; + } else { + rc = cam_icp_allocate_hfi_mem(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in alloc hfi mem, rc %d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + + rc = cam_icp_mgr_hfi_init_util(hw_mgr); + if (rc) { + cam_icp_free_hfi_mem(hw_mgr); + goto end; + } + } + + hw_mgr->icp_booted = true; + CAM_INFO(CAM_ICP, "[%s] FW download done successfully", + hw_mgr->hw_mgr_name); +end: + /* power down cores */ + cam_icp_mgr_device_deinit(hw_mgr); + return rc; +} + +static int cam_icp_mgr_handle_config_err( + struct cam_hw_config_args *config_args, + struct cam_icp_hw_ctx_data *ctx_data, + int idx) +{ + struct cam_icp_hw_buf_done_evt_data icp_evt_data; + struct cam_hw_done_event_data buf_data = {0}; + + buf_data.request_id = *(uint64_t *)config_args->priv; + buf_data.evt_param = CAM_SYNC_ICP_EVENT_CONFIG_ERR; + icp_evt_data.evt_id = CAM_CTX_EVT_ID_ERROR; + icp_evt_data.buf_done_data = &buf_data; + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_ICP_EVT_ID_BUF_DONE, + &icp_evt_data); + + ctx_data->hfi_frame_process.request_id[idx] = 0; + ctx_data->hfi_frame_process.fw_process_flag[idx] = false; + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + + return 0; +} + +static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_config_args *config_args) +{ + int rc = 0; + uint64_t request_id = 0; + struct crm_workq_task *task; + struct hfi_cmd_work_data *task_data; + struct cam_hw_update_entry *hw_update_entries; + struct icp_frame_info *frame_info = NULL; + + /* Block any frame process cmds if SSR has been triggered */ + if (atomic_read(&hw_mgr->recovery)) + return -EAGAIN; + + frame_info = (struct icp_frame_info *)config_args->priv; + request_id = frame_info->request_id; + hw_update_entries = config_args->hw_update_entries; + CAM_DBG(CAM_ICP, "[%s] req_id = %lld %pK", + hw_mgr->hw_mgr_name, request_id, config_args->priv); + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_ICP, "[%s] no empty task", hw_mgr->hw_mgr_name); + return -ENOMEM; + } + + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)hw_update_entries->addr; + task_data->request_id = request_id; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_process_cmd; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + + return rc; +} + +static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data, + uint32_t io_buf_addr) +{ + int rc = 0; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + struct hfi_cmd_work_data *task_data; + struct hfi_cmd_dev_async ioconfig_cmd; + unsigned long rem_jiffies; + int timeout = 5000; + struct crm_workq_task *task; + uint32_t size_in_words; + struct cam_icp_hw_ctx_info *ctx_info; + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) { + CAM_ERR_RATE_LIMIT(CAM_ICP, "%s: No free cmd task", ctx_data->ctx_id_string); + return -ENOMEM; + } + + switch (ctx_data->device_info->hw_dev_type) { + case CAM_ICP_DEV_BPS: + ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO; + ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_IPE: + ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO; + ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_OFE: + ioconfig_cmd.opcode = HFI_OFE_CMD_OPCODE_CONFIG_IO; + ioconfig_cmd.pkt_type = HFI_CMD_OFE_ASYNC_COMMAND_INDIRECT; + break; + default: + CAM_ERR(CAM_ICP, "%s Invalid hw dev type not supported: %u", + ctx_data->ctx_id_string, ctx_data->device_info->hw_dev_type); + return -EINVAL; + } + + reinit_completion(&ctx_data->wait_complete); + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_ATOMIC); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + ctx_info->need_lock = false; + + ioconfig_cmd.size = sizeof(struct hfi_cmd_dev_async); + ioconfig_cmd.num_fw_handles = 1; + ioconfig_cmd.fw_handles_flex[0] = ctx_data->fw_handle; + ioconfig_cmd.payload.indirect = io_buf_addr; + ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_info); + ioconfig_cmd.user_data2 = (uint64_t)0x0; + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)&ioconfig_cmd; + task_data->request_id = 0; + task_data->type = ICP_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_icp_mgr_process_cmd; + size_in_words = (*(uint32_t *)task_data->data) >> 2; + CAM_DBG(CAM_ICP, "%s: size_in_words %u", + ctx_data->ctx_id_string, size_in_words); + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ICP, "%s: Failed to enqueue io config task", + ctx_data->ctx_id_string); + CAM_MEM_FREE(ctx_info); + return rc; + } + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for send IO cfg handle command on", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + /* send specific error for io config failure */ + rc = -EREMOTEIO; + cam_icp_dump_debug_info(hw_mgr, false); + } + + return rc; +} + +static int cam_icp_mgr_send_recfg_io(struct cam_icp_hw_ctx_data *ctx_data, + struct hfi_cmd_dev_async *ioconfig_cmd, uint64_t req_id) +{ + int rc = 0; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + struct hfi_cmd_work_data *task_data; + struct crm_workq_task *task; + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) + return -ENOMEM; + + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)ioconfig_cmd; + task_data->request_id = req_id; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_process_cmd; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + return rc; +} + +static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) +{ + int rc = 0; + int idx; + uint64_t req_id; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_config_args *config_args = config_hw_args; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct icp_frame_info *frame_info = NULL; + + if (!hw_mgr || !config_args) { + CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK", + hw_mgr, config_args); + return -EINVAL; + } + + ctx_data = config_args->ctxt_to_hw_map; + + if (!config_args->num_hw_update_entries) { + CAM_ERR(CAM_ICP, "%s: No hw update enteries are available", + ctx_data->ctx_id_string); + return -EINVAL; + } + + if (cam_presil_mode_enabled()) { + CAM_DBG(CAM_PRESIL, "%s: presil: locking frame_in_process %d req id %llu", + ctx_data->ctx_id_string, atomic_read(&hw_mgr->frame_in_process), + config_args->request_id); + down_write(&frame_in_process_sem); + atomic_set(&hw_mgr->frame_in_process, 1); + hw_mgr->frame_in_process_ctx_id = ctx_data->ctx_id; + CAM_DBG(CAM_PRESIL, "%s: presil: locked frame_in_process req id %llu ctx_id %d", + ctx_data->ctx_id_string, config_args->request_id, + hw_mgr->frame_in_process_ctx_id); + msleep(100); + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_ICP, "%s: is not in use", ctx_data->ctx_id_string); + return -EINVAL; + } + + frame_info = (struct icp_frame_info *)config_args->priv; + req_id = frame_info->request_id; + idx = cam_icp_get_frame_process_idx_from_req_id(ctx_data, req_id); + if (idx >= CAM_FRAME_CMD_MAX) { + CAM_ERR(CAM_ICP, "%s: frame process index not found for req_id: %llu", + ctx_data->ctx_id_string, req_id); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "%s: Sending relevant buffers for request: %llu to presil", + ctx_data->ctx_id_string, config_args->request_id); + cam_presil_send_buffers_from_packet(frame_info->pkt, hw_mgr->iommu_hdl, + hw_mgr->iommu_hdl); + } + + cam_icp_mgr_dev_clk_update(hw_mgr, ctx_data, idx); + ctx_data->hfi_frame_process.fw_process_flag[idx] = true; + ctx_data->hfi_frame_process.submit_timestamp[idx] = ktime_get(); + + CAM_DBG(CAM_ICP, "%s: req_id %llu, io config %llu", + ctx_data->ctx_id_string, req_id, frame_info->io_config); + + if (frame_info->io_config != 0) { + CAM_INFO(CAM_ICP, "%s: Send recfg io", ctx_data->ctx_id_string); + rc = cam_icp_mgr_send_recfg_io(ctx_data, + &frame_info->hfi_cfg_io_cmd, req_id); + if (rc) + CAM_ERR(CAM_ICP, "%s: Fail to send reconfig io cmd", + ctx_data->ctx_id_string); + } + + if (req_id <= ctx_data->last_flush_req) + CAM_WARN(CAM_ICP, + "%s: Anomaly submitting flushed req %llu [last_flush %llu]", + ctx_data->ctx_id_string, req_id, ctx_data->last_flush_req); + + cam_cpas_notify_event(ctx_data->ctx_id_string, req_id); + + rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args); + if (rc) + goto config_err; + + CAM_DBG(CAM_REQ, "%s: req_id = %lld queued to FW", + ctx_data->ctx_id_string, req_id); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return 0; +config_err: + cam_icp_mgr_handle_config_err(config_args, ctx_data, idx); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_icp_mgr_prepare_frame_process_cmd( + struct cam_icp_hw_ctx_data *ctx_data, + struct hfi_cmd_dev_async *hfi_cmd, + uint64_t request_id, + uint32_t fw_cmd_buf_iova_addr) +{ + struct cam_icp_hw_ctx_info *ctx_info; + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_ATOMIC); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + + switch (ctx_data->device_info->hw_dev_type) { + case CAM_ICP_DEV_BPS: + hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS; + hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_IPE: + hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS; + hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_OFE: + hfi_cmd->opcode = HFI_OFE_CMD_OPCODE_FRAME_PROCESS; + hfi_cmd->pkt_type = HFI_CMD_OFE_ASYNC_COMMAND_INDIRECT; + break; + default: + CAM_ERR(CAM_ICP, "%s: Invalid hw dev type not supported: %u", + ctx_data->ctx_id_string, ctx_data->device_info->hw_dev_type); + return -EINVAL; + } + + hfi_cmd->size = sizeof(struct hfi_cmd_dev_async); + hfi_cmd->num_fw_handles = 1; + hfi_cmd->fw_handles_flex[0] = ctx_data->fw_handle; + hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr; + hfi_cmd->user_data1 = PTR_TO_U64(ctx_info); + hfi_cmd->user_data2 = request_id; + + CAM_DBG(CAM_ICP, "%s: ctx_data : %pK, request_id :%lld cmd_buf %x", + ctx_data->ctx_id_string, (void *)ctx_data->context_priv, request_id, + fw_cmd_buf_iova_addr); + + return 0; +} + +static bool cam_icp_mgr_is_valid_inconfig(struct cam_packet *packet) +{ + int i, num_in_map_entries = 0; + bool in_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) + num_in_map_entries++; + + if (num_in_map_entries <= CAM_MAX_IN_RES) + in_config_valid = true; + else + CAM_ERR(CAM_ICP, "In config entries(%u) more than allowed(%u)", + num_in_map_entries, CAM_MAX_IN_RES); + + CAM_DBG(CAM_ICP, + "number of io config: %u in config: %u max in res: %u", + packet->num_io_configs, + num_in_map_entries, CAM_MAX_IN_RES); + + return in_config_valid; +} + +static bool cam_icp_mgr_is_valid_outconfig(struct cam_packet *packet) +{ + int i, num_out_map_entries = 0; + bool out_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if ((io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) || + (io_cfg_ptr[i].direction == CAM_BUF_IN_OUT)) + num_out_map_entries++; + + if (num_out_map_entries <= CAM_MAX_OUT_RES) { + out_config_valid = true; + } else { + CAM_ERR(CAM_ICP, "Out config entries(%u) more than allowed(%u)", + num_out_map_entries, CAM_MAX_OUT_RES); + } + + CAM_DBG(CAM_ICP, + "number of io config: %u out config: %u max out res: %u", + packet->num_io_configs, + num_out_map_entries, CAM_MAX_OUT_RES); + + return out_config_valid; +} + +static int cam_icp_mgr_pkt_validation(struct cam_icp_hw_ctx_data *ctx_data, + struct cam_packet *packet) +{ + uint32_t op_code; + enum cam_icp_hw_type hw_dev_type; + + op_code = packet->header.op_code & 0xff; + hw_dev_type = ctx_data->device_info->hw_dev_type; + + if (((op_code != CAM_ICP_OPCODE_IPE_UPDATE) && + hw_dev_type == CAM_ICP_DEV_IPE) || + ((op_code != CAM_ICP_OPCODE_BPS_UPDATE) && + hw_dev_type == CAM_ICP_DEV_BPS) || + ((op_code != CAM_ICP_OPCODE_OFE_UPDATE) && + hw_dev_type == CAM_ICP_DEV_OFE)) { + CAM_ERR(CAM_ICP, "%s: Invalid Opcode in pkt: %d device: %s", + ctx_data->ctx_id_string, op_code, ctx_data->device_info->dev_name); + return -EINVAL; + } + + if (!packet->num_io_configs) { + CAM_ERR(CAM_ICP, "%s: Invalid number of io configs: %d", + ctx_data->ctx_id_string, packet->num_io_configs); + return -EINVAL; + } + + if (!packet->num_cmd_buf || packet->num_cmd_buf > CAM_ICP_CTX_MAX_CMD_BUFFERS) { + CAM_ERR(CAM_ICP, "%s: Invalid number of cmd buffers: %d max cmd buf: %d", + ctx_data->ctx_id_string, packet->num_cmd_buf, + CAM_ICP_CTX_MAX_CMD_BUFFERS); + return -EINVAL; + } + + if (!cam_icp_mgr_is_valid_inconfig(packet) || + !cam_icp_mgr_is_valid_outconfig(packet)) { + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "%s: number of cmd/patch info: %u %u %u", + ctx_data->ctx_id_string, packet->num_cmd_buf, packet->num_io_configs, + packet->num_patches); + + return 0; +} + +static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr, + struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data, + uint32_t *fw_cmd_buf_iova_addr, struct list_head *buf_tracker) +{ + int rc = 0; + int i; + dma_addr_t addr; + size_t len; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uintptr_t cpu_addr = 0; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + rc = cam_packet_util_validate_cmd_desc(cmd_desc); + if (rc) + return rc; + + *fw_cmd_buf_iova_addr = 0; + for (i = 0; i < packet->num_cmd_buf; i++) { + if (cmd_desc[i].type == CAM_CMD_BUF_FW) { + rc = cam_mem_get_io_buf(cmd_desc[i].mem_handle, + hw_mgr->iommu_hdl, &addr, &len, NULL, buf_tracker); + if (rc) { + CAM_ERR(CAM_ICP, "%s: get cmd buf failed %x", + ctx_data->ctx_id_string, hw_mgr->iommu_hdl); + return rc; + } + + /* FW buffers are expected to be within 32-bit address range */ + *fw_cmd_buf_iova_addr = addr; + + if ((cmd_desc[i].offset >= len) || + ((len - cmd_desc[i].offset) < + cmd_desc[i].size)) { + CAM_ERR(CAM_ICP, + "%s: Invalid offset, i: %d offset: %u len: %zu size: %zu", + ctx_data->ctx_id_string, i, cmd_desc[i].offset, + len, cmd_desc[i].size); + return -EINVAL; + } + + *fw_cmd_buf_iova_addr = + (*fw_cmd_buf_iova_addr + cmd_desc[i].offset); + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &cpu_addr, &len); + if (rc || !cpu_addr) { + CAM_ERR(CAM_ICP, "%s: get cmd buf failed %x", + ctx_data->ctx_id_string, hw_mgr->iommu_hdl); + *fw_cmd_buf_iova_addr = 0; + return rc; + } + if ((len <= cmd_desc[i].offset) || + (cmd_desc[i].size < cmd_desc[i].length) || + ((len - cmd_desc[i].offset) < + cmd_desc[i].length)) { + CAM_ERR(CAM_ICP, "%s: Invalid offset or length", + ctx_data->ctx_id_string); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return -EINVAL; + } + cpu_addr = cpu_addr + cmd_desc[i].offset; + + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + } + + if (!cpu_addr) { + CAM_ERR(CAM_ICP, "%s: invalid number of cmd buf", ctx_data->ctx_id_string); + return -EINVAL; + } + + return rc; +} + +static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_packet *packet, + struct cam_hw_prepare_update_args *prepare_args, + int32_t index) +{ + int i, j, k, rc = 0; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + int32_t sync_in_obj[CAM_MAX_IN_RES]; + int32_t merged_sync_in_obj; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + prepare_args->num_out_map_entries = 0; + prepare_args->num_in_map_entries = 0; + + for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) { + if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) { + sync_in_obj[j++] = io_cfg_ptr[i].fence; + prepare_args->num_in_map_entries++; + } else if ((io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) || + (io_cfg_ptr[i].direction == CAM_BUF_IN_OUT)) { + prepare_args->out_map_entries[k].sync_id = + io_cfg_ptr[i].fence; + prepare_args->out_map_entries[k].resource_handle = + io_cfg_ptr[i].resource_type; + k++; + prepare_args->num_out_map_entries++; + } else { + CAM_ERR(CAM_ICP, "dir: %d, max_out:%u, out %u", + io_cfg_ptr[i].direction, + prepare_args->max_out_map_entries, + prepare_args->num_out_map_entries); + return -EINVAL; + } + + CAM_DBG(CAM_REQ, + "%s: req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x", + ctx_data->ctx_id_string, packet->header.request_id, i, + io_cfg_ptr[i].direction, io_cfg_ptr[i].fence, + io_cfg_ptr[i].resource_type, + io_cfg_ptr[i].mem_handle[0]); + } + + if (prepare_args->num_in_map_entries > 1) + prepare_args->num_in_map_entries = + cam_common_util_remove_duplicate_arr( + sync_in_obj, prepare_args->num_in_map_entries); + + if (prepare_args->num_in_map_entries > 1) { + rc = cam_sync_merge(&sync_in_obj[0], + prepare_args->num_in_map_entries, &merged_sync_in_obj); + if (rc) { + prepare_args->num_out_map_entries = 0; + prepare_args->num_in_map_entries = 0; + return rc; + } + + ctx_data->hfi_frame_process.in_resource[index] = + merged_sync_in_obj; + prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj; + prepare_args->num_in_map_entries = 1; + CAM_DBG(CAM_REQ, "%s: req_id: %llu Merged Sync obj: %d", + ctx_data->ctx_id_string, packet->header.request_id, + merged_sync_in_obj); + } else if (prepare_args->num_in_map_entries == 1) { + prepare_args->in_map_entries[0].sync_id = sync_in_obj[0]; + prepare_args->num_in_map_entries = 1; + ctx_data->hfi_frame_process.in_resource[index] = 0; + } else { + CAM_DBG(CAM_ICP, "%s: No input fences for req id: %llu", + ctx_data->ctx_id_string, packet->header.request_id); + prepare_args->num_in_map_entries = 0; + ctx_data->hfi_frame_process.in_resource[index] = 0; + } + + return rc; +} + +static int cam_icp_process_stream_settings( + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_cmd_mem_regions *cmd_mem_regions, + bool map_unmap) +{ + int rc = 0, i = 0; + size_t packet_size, map_cmd_size, len; + dma_addr_t iova; + unsigned long rem_jiffies; + int timeout = 5000; + struct hfi_cmd_ipe_bps_map *map_cmd; + struct hfi_cmd_dev_async *async_direct; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + + if (ctx_data->device_info->hw_dev_type == CAM_ICP_DEV_OFE) { + CAM_DBG(CAM_ICP, "%s OFE FW does not support map/unmap operations", + ctx_data->ctx_id_string); + return 0; + } + + map_cmd_size = + sizeof(struct hfi_cmd_ipe_bps_map) + + ((cmd_mem_regions->num_regions - 1) * + sizeof(struct mem_map_region_data)); + + map_cmd = CAM_MEM_ZALLOC(map_cmd_size, GFP_KERNEL); + if (!map_cmd) + return -ENOMEM; + + for (i = 0; i < cmd_mem_regions->num_regions; i++) { + rc = cam_mem_get_io_buf( + cmd_mem_regions->map_info_array_flex[i].mem_handle, + hw_mgr->iommu_hdl, &iova, &len, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, + "%s: Failed to get cmd region iova for handle %u", + ctx_data->ctx_id_string, + cmd_mem_regions->map_info_array_flex[i].mem_handle); + CAM_MEM_FREE(map_cmd); + return -EINVAL; + } + + /* FW/CDM buffers are expected to be mapped in 32-bit address range */ + map_cmd->mem_map_region_sets_flex[i].start_addr = (uint32_t)iova + + (cmd_mem_regions->map_info_array_flex[i].offset); + map_cmd->mem_map_region_sets_flex[i].len = (uint32_t) len; + + CAM_DBG(CAM_ICP, "%s: Region %u mem_handle %d iova %pK len %u", + ctx_data->ctx_id_string, (i+1), + cmd_mem_regions->map_info_array_flex[i].mem_handle, + (uint32_t)iova, (uint32_t)len); + } + + map_cmd->mem_map_request_num = cmd_mem_regions->num_regions; + map_cmd->user_data = 0; + + packet_size = + sizeof(struct hfi_cmd_dev_async) + + (sizeof(struct hfi_cmd_ipe_bps_map) + + ((cmd_mem_regions->num_regions - 1) * + sizeof(struct mem_map_region_data))) - + sizeof(((struct hfi_cmd_dev_async *)0)->payload.direct); + + async_direct = CAM_MEM_ZALLOC(packet_size, GFP_KERNEL); + if (!async_direct) { + CAM_MEM_FREE(map_cmd); + return -ENOMEM; + } + + async_direct->size = packet_size; + async_direct->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT; + if (map_unmap) + async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_MAP; + else + async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP; + async_direct->num_fw_handles = 1; + async_direct->fw_handles_flex[0] = ctx_data->fw_handle; + async_direct->user_data1 = (uint64_t)ctx_data; + async_direct->user_data2 = (uint64_t)0x0; + memcpy(async_direct->payload.direct_flex, map_cmd, + map_cmd_size); + + reinit_completion(&ctx_data->wait_complete); + rc = hfi_write_cmd(hw_mgr->hfi_handle, async_direct); + if (rc) { + CAM_ERR(CAM_ICP, "%s: hfi write failed rc %d", + ctx_data->ctx_id_string, rc); + goto end; + } + + CAM_DBG(CAM_ICP, "%s: Sent FW %s cmd", ctx_data->ctx_id_string, + map_unmap ? "Map" : "Unmap"); + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for process stream setting handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(ctx_data->hw_mgr_priv, false); + } + +end: + CAM_MEM_FREE(map_cmd); + CAM_MEM_FREE(async_direct); + return rc; +} + +static int cam_icp_process_presil_hangdump_info( + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_cmd_mem_regions *cmd_mem_regions, + uint32_t index) +{ + int i = 0; + struct cam_hangdump_mem_regions *mem_regions = NULL; + + if (!ctx_data || !cmd_mem_regions) { + CAM_ERR(CAM_ICP, "Invalid hangdump info blob ctx %pK mem_region %pK", + ctx_data, cmd_mem_regions); + return -EINVAL; + } + + if ((cmd_mem_regions->num_regions == 0) || + (cmd_mem_regions->num_regions > HANG_DUMP_REGIONS_MAX)) { + CAM_ERR(CAM_ICP, "%s Invalid num hangdump mem regions %d ", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + return -EINVAL; + } + + mem_regions = &ctx_data->hfi_frame_process.hangdump_mem_regions[index]; + CAM_INFO(CAM_ICP, "%s Hangdump Mem Num Regions %d index %d mem_regions 0x%pK", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions, index, mem_regions); + + for (i = 0; i < cmd_mem_regions->num_regions; i++) { + mem_regions->mem_info_array[i].mem_handle = + cmd_mem_regions->map_info_array_flex[i].mem_handle; + mem_regions->mem_info_array[i].offset = + cmd_mem_regions->map_info_array_flex[i].offset; + mem_regions->mem_info_array[i].size = + cmd_mem_regions->map_info_array_flex[i].size; + CAM_INFO(CAM_ICP, "%s Hangdump Mem Region %u mem_handle 0x%08x iova 0x%08x len %u", + ctx_data->ctx_id_string, i, + cmd_mem_regions->map_info_array_flex[i].mem_handle, + (uint32_t)cmd_mem_regions->map_info_array_flex[i].offset, + (uint32_t)cmd_mem_regions->map_info_array_flex[i].size); + } + mem_regions->num_mem_regions = cmd_mem_regions->num_regions; + + return 0; +} + +static bool cam_icp_sys_cache_scid_params_changed( + struct cam_icp_scid_cfg *current_info, + struct cam_sys_cache_config *blob_info) +{ + if ((current_info->staling_distance != blob_info->staling_distance) || + (current_info->llcc_staling_op_type != blob_info->llcc_staling_op_type) || + (current_info->llcc_staling_mode != blob_info->llcc_staling_mode)) + return true; + else + return false; +} + +static int cam_icp_llcc_sys_cache_config_util( + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_icp_scid_cfg *current_info, + struct cam_sys_cache_config *blob_info, bool scid_match) +{ + int rc = 0; + + if (scid_match == false) { + current_info->scid_id = blob_info->scid_id; + current_info->staling_distance = blob_info->staling_distance; + current_info->llcc_staling_mode = blob_info->llcc_staling_mode; + current_info->llcc_staling_op_type = blob_info->llcc_staling_op_type; + current_info->activated = false; + ctx_data->sys_cache_cfg.num++; + } + + CAM_DBG(CAM_ICP, + "scid_match = %d, current_info details scid_id = %d staling_distance = %d staling_mode = %d op_type = %d activated = %d", + scid_match, current_info->scid_id, current_info->staling_distance, + current_info->llcc_staling_mode, current_info->llcc_staling_op_type, + current_info->activated); + + if (blob_info->deactivate) { + if (current_info->activated) { + rc = cam_cpas_deactivate_llcc(current_info->scid_id); + if (rc) { + CAM_ERR(CAM_ICP, + "llcc activation is failing cache: %d rc = %d", + current_info->scid_id, rc); + goto end; + } + current_info->activated = false; + CAM_DBG(CAM_ICP, "llcc deactivate is success activated = %d", + current_info->activated); + } else { + CAM_ERR(CAM_ICP, "scid = %d already in deactivated state", + current_info->scid_id); + } + } + + if (blob_info->change_params) { + if (current_info->activated == false) { + current_info->scid_id = blob_info->scid_id; + current_info->staling_distance = blob_info->staling_distance; + current_info->llcc_staling_mode = blob_info->llcc_staling_mode; + current_info->llcc_staling_op_type = + blob_info->llcc_staling_op_type; + rc = cam_cpas_configure_staling_llcc(current_info->scid_id, + current_info->llcc_staling_mode, + current_info->llcc_staling_op_type, + current_info->staling_distance); + if (rc) { + CAM_ERR(CAM_ICP, + "llcc staling configuration is failing cache: %d mode %d op_type %d staling_distance %d", + current_info->scid_id, current_info->llcc_staling_mode, + current_info->llcc_staling_op_type, + current_info->staling_distance); + goto end; + } + CAM_DBG(CAM_ICP, "llcc configuration is success rc = %d change_params = %d", + rc, blob_info->change_params); + } else if (cam_icp_sys_cache_scid_params_changed(current_info, blob_info)) { + CAM_ERR(CAM_ICP, "configuration of llcc cache is failed scid = %d", + current_info->scid_id); + } + } + + if (blob_info->activate && current_info->activated == false) { + rc = cam_cpas_activate_llcc(current_info->scid_id); + if (rc) { + CAM_ERR(CAM_ICP, + "llcc staling activation is failing cache: %d", + current_info->scid_id); + goto end; + } + current_info->activated = true; + CAM_DBG(CAM_ICP, "llcc activation is success rc = %d activated = %d", + rc, current_info->activated); + } + +end: + return rc; + +} + +static int cam_icp_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + struct cam_icp_clk_bw_request *soc_req; + struct cam_icp_clk_bw_request *clk_info; + struct cam_icp_clk_bw_request_v2 *soc_req_v2; + struct cam_icp_clk_bw_req_internal_v2 *clk_info_v2; + struct cam_cmd_mem_regions *cmd_mem_regions; + struct icp_cmd_generic_blob *blob; + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_hw_mgr *hw_mgr; + struct cam_icp_acquire_dev_info dev_io_info; + struct cam_sys_cache_config_request *sys_cache_blob_info; + struct cam_icp_sys_cache_cfg *sys_cache_cfg; + uint32_t index; + size_t io_buf_size, clk_update_size; + int rc = 0; + uintptr_t pResource; + uint32_t i = 0, j; + bool scid_match; + size_t scid_blob_size; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data, + blob_size); + return -EINVAL; + } + + blob = (struct icp_cmd_generic_blob *)user_data; + ctx_data = blob->ctx; + hw_mgr = ctx_data->hw_mgr_priv; + index = blob->frame_info_idx; + + switch (blob_type) { + case CAM_ICP_CMD_GENERIC_BLOB_CLK: + if (index < 0) { + CAM_ERR(CAM_ICP, "Invalid index %d", index); + return -EINVAL; + } + + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_PERF, 300, 1, + "Using deprecated blob type GENERIC_BLOB_CLK"); + if (blob_size != sizeof(struct cam_icp_clk_bw_request)) { + CAM_ERR(CAM_ICP, "%s: Mismatch blob size %d expected %lu", + ctx_data->ctx_id_string, blob_size, + sizeof(struct cam_icp_clk_bw_request)); + return -EINVAL; + } + + if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_UNKNOWN) { + ctx_data->bw_config_version = CAM_ICP_BW_CONFIG_V1; + } else if (ctx_data->bw_config_version != + CAM_ICP_BW_CONFIG_V1) { + CAM_ERR(CAM_ICP, + "%s: Mismatch blob versions %d expected v1 %d, blob_type=%d", + ctx_data->ctx_id_string, ctx_data->bw_config_version, + CAM_ICP_BW_CONFIG_V1, blob_type); + return -EINVAL; + } + + clk_info = &ctx_data->hfi_frame_process.clk_info[index]; + + soc_req = (struct cam_icp_clk_bw_request *)blob_data; + *clk_info = *soc_req; + CAM_DBG(CAM_PERF, "%s: budget:%llu fc: %llu %d BW %lld %lld", + ctx_data->ctx_id_string, clk_info->budget_ns, clk_info->frame_cycles, + clk_info->rt_flag, clk_info->uncompressed_bw, + clk_info->compressed_bw); + break; + + case CAM_ICP_CMD_GENERIC_BLOB_CLK_V2: + if (index < 0) { + CAM_ERR(CAM_ICP, "Invalid index %d", index); + return -EINVAL; + } + + if (blob_size < sizeof(struct cam_icp_clk_bw_request_v2)) { + CAM_ERR(CAM_ICP, "%s: Mismatch blob size %d expected %lu", + ctx_data->ctx_id_string, + blob_size, + sizeof(struct cam_icp_clk_bw_request_v2)); + return -EINVAL; + } + + if (ctx_data->bw_config_version == CAM_ICP_BW_CONFIG_UNKNOWN) { + ctx_data->bw_config_version = CAM_ICP_BW_CONFIG_V2; + } else if (ctx_data->bw_config_version != + CAM_ICP_BW_CONFIG_V2) { + CAM_ERR(CAM_ICP, + "%s: Mismatch blob versions %d expected v2 %d, blob_type=%d", + ctx_data->ctx_id_string, ctx_data->bw_config_version, + CAM_ICP_BW_CONFIG_V2, blob_type); + return -EINVAL; + } + + soc_req_v2 = (struct cam_icp_clk_bw_request_v2 *)blob_data; + if (soc_req_v2->num_paths > CAM_ICP_MAX_PER_PATH_VOTES) { + CAM_ERR(CAM_PERF, "%s: Invalid num paths: %d", + ctx_data->ctx_id_string, soc_req_v2->num_paths); + return -EINVAL; + } + + /* Check for integer overflow */ + if (soc_req_v2->num_paths != 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct cam_icp_clk_bw_request_v2)) / + (soc_req_v2->num_paths - 1))) { + CAM_ERR(CAM_ICP, + "%s: Size exceeds limit paths:%u size per path:%lu", + ctx_data->ctx_id_string, soc_req_v2->num_paths - 1, + sizeof(struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + } + + clk_update_size = sizeof(struct cam_icp_clk_bw_request_v2) + + ((soc_req_v2->num_paths - 1) * sizeof(struct cam_axi_per_path_bw_vote)); + if (blob_size < clk_update_size) { + CAM_ERR(CAM_ICP, "%s: Invalid blob size: %u", + ctx_data->ctx_id_string, blob_size); + return -EINVAL; + } + + clk_info = &ctx_data->hfi_frame_process.clk_info[index]; + clk_info_v2 = &ctx_data->hfi_frame_process.clk_info_v2[index]; + clk_info_v2->budget_ns = soc_req_v2->budget_ns; + clk_info_v2->frame_cycles = soc_req_v2->frame_cycles; + clk_info_v2->rt_flag = soc_req_v2->rt_flag; + clk_info_v2->num_paths = soc_req_v2->num_paths; + + for (i = 0; i < soc_req_v2->num_paths; i++) { + clk_info_v2->axi_path[i].usage_data = + soc_req_v2->axi_path_flex[i].usage_data; + clk_info_v2->axi_path[i].transac_type = + soc_req_v2->axi_path_flex[i].transac_type; + clk_info_v2->axi_path[i].path_data_type = + soc_req_v2->axi_path_flex[i].path_data_type; + clk_info_v2->axi_path[i].vote_level = 0; + clk_info_v2->axi_path[i].camnoc_bw = soc_req_v2->axi_path_flex[i].camnoc_bw; + clk_info_v2->axi_path[i].mnoc_ab_bw = + soc_req_v2->axi_path_flex[i].mnoc_ab_bw; + clk_info_v2->axi_path[i].mnoc_ib_bw = + soc_req_v2->axi_path_flex[i].mnoc_ib_bw; + } + + /* Use v1 structure for clk fields */ + clk_info->budget_ns = clk_info_v2->budget_ns; + clk_info->frame_cycles = clk_info_v2->frame_cycles; + clk_info->rt_flag = clk_info_v2->rt_flag; + + CAM_DBG(CAM_PERF, + "%s: budget=%llu, frame_cycle=%llu, rt_flag=%d, num_paths=%d, index=%d, ctx_data=%pK", + ctx_data->ctx_id_string, clk_info_v2->budget_ns, + clk_info_v2->frame_cycles, clk_info_v2->rt_flag, clk_info_v2->num_paths, + index, ctx_data); + + for (i = 0; i < clk_info_v2->num_paths; i++) { + CAM_DBG(CAM_PERF, + "%s: [%d] : path_type=%d, trans_type=%d, camnoc=%lld, mnoc_ab=%lld, mnoc_ib=%lld", + ctx_data->ctx_id_string, + i, + clk_info_v2->axi_path[i].path_data_type, + clk_info_v2->axi_path[i].transac_type, + clk_info_v2->axi_path[i].camnoc_bw, + clk_info_v2->axi_path[i].mnoc_ab_bw, + clk_info_v2->axi_path[i].mnoc_ib_bw); + } + + break; + + case CAM_ICP_CMD_GENERIC_BLOB_CFG_IO: + CAM_DBG(CAM_ICP, "%s: CAM_ICP_CMD_GENERIC_BLOB_CFG_IO", ctx_data->ctx_id_string); + pResource = *((uint32_t *)blob_data); + if (copy_from_user(&dev_io_info, + (void __user *)pResource, + sizeof(struct cam_icp_acquire_dev_info))) { + CAM_ERR(CAM_ICP, "%s: Failed in copy from user", ctx_data->ctx_id_string); + return -EFAULT; + } + CAM_DBG(CAM_ICP, "%s: buf handle %d", ctx_data->ctx_id_string, + dev_io_info.io_config_cmd_handle); + rc = cam_mem_get_io_buf(dev_io_info.io_config_cmd_handle, hw_mgr->iommu_hdl, + blob->io_buf_addr, &io_buf_size, NULL, NULL); + if (rc) + CAM_ERR(CAM_ICP, "%s: Failed in blob update", ctx_data->ctx_id_string); + else + CAM_DBG(CAM_ICP, "%s: io buf addr %llu", + ctx_data->ctx_id_string, *blob->io_buf_addr); + break; + + case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP: + cmd_mem_regions = + (struct cam_cmd_mem_regions *)blob_data; + if (cmd_mem_regions->num_regions <= 0) { + rc = -EINVAL; + CAM_ERR(CAM_ICP, + "%s: Invalid number of regions for FW map %u", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + } else { + CAM_DBG(CAM_ICP, + "%s: Processing blob for mapping %u regions", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + rc = cam_icp_process_stream_settings(ctx_data, + cmd_mem_regions, true); + } + break; + + case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP: + cmd_mem_regions = + (struct cam_cmd_mem_regions *)blob_data; + if (cmd_mem_regions->num_regions <= 0) { + rc = -EINVAL; + CAM_ERR(CAM_ICP, + "%s: Invalid number of regions for FW unmap %u", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + } else { + CAM_DBG(CAM_ICP, + "%s: Processing blob for unmapping %u regions", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + rc = cam_icp_process_stream_settings(ctx_data, + cmd_mem_regions, false); + } + break; + + case CAM_ICP_CMD_GENERIC_BLOB_PRESIL_HANGDUMP: + if (index < 0) { + CAM_ERR(CAM_ICP, "Invalid index %d", index); + return -EINVAL; + } + + if (cam_presil_mode_enabled()) { + cmd_mem_regions = (struct cam_cmd_mem_regions *)blob_data; + if (cmd_mem_regions->num_regions <= 0) { + CAM_INFO(CAM_ICP, "%s: Pre-sil Hangdump disabled %u", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions); + } else { + CAM_INFO(CAM_ICP, + "%s: Pre-sil Hangdump enabled %u entries index %d", + ctx_data->ctx_id_string, cmd_mem_regions->num_regions, + index); + rc = cam_icp_process_presil_hangdump_info(ctx_data, + cmd_mem_regions, index); + } + } + break; + + case CAM_ICP_CMD_GENERIC_BLOB_SYSCACHE_CONFIG: + if (blob_size < sizeof(struct cam_sys_cache_config_request)) { + CAM_ERR(CAM_ICP, "%s: Mismatch blob size %d expected %lu", + ctx_data->ctx_id_string, blob_size, + sizeof(struct cam_sys_cache_config_request)); + return -EINVAL; + } + + sys_cache_blob_info = (struct cam_sys_cache_config_request *)blob_data; + scid_blob_size = sizeof(struct cam_sys_cache_config_request) + + ((sys_cache_blob_info->num - 1) * + sizeof(struct cam_sys_cache_config)); + + if (blob_size < scid_blob_size) { + CAM_ERR(CAM_ICP, "%s: Invalid blob size: %u", + ctx_data->ctx_id_string, blob_size); + return -EINVAL; + } + + sys_cache_cfg = &ctx_data->sys_cache_cfg; + + CAM_DBG(CAM_ICP, "num of blob cache config = %d sys cache config = %d", + sys_cache_blob_info->num, sys_cache_cfg->num); + + for (j = 0; j < sys_cache_blob_info->num; j++) { + scid_match = false; + for (i = 0; i < sys_cache_cfg->num; i++) { + if (sys_cache_cfg->scid_cfg[i].scid_id == + sys_cache_blob_info->sys_cache_config_flex[j].scid_id) { + scid_match = true; + CAM_DBG(CAM_ICP, "matched scid = %d, old param: i = %d, op_type: %d mode: %d staling distance = %d", + sys_cache_cfg->scid_cfg[i].scid_id, i, + sys_cache_cfg->scid_cfg[i].llcc_staling_op_type, + sys_cache_cfg->scid_cfg[i].llcc_staling_mode, + sys_cache_cfg->scid_cfg[i].staling_distance); + CAM_DBG(CAM_ICP, "new param: j = %d, op_type: %d mode: %d staling distance = %d", + j, sys_cache_blob_info->sys_cache_config_flex[j] + .llcc_staling_op_type, + sys_cache_blob_info->sys_cache_config_flex[j] + .llcc_staling_mode, + sys_cache_blob_info->sys_cache_config_flex[j] + .staling_distance); + break; + } + } + + rc = cam_icp_llcc_sys_cache_config_util( + ctx_data, &sys_cache_cfg->scid_cfg[i], + &sys_cache_blob_info->sys_cache_config_flex[j], scid_match); + if (rc) { + CAM_ERR(CAM_ICP, "%d: llcc cache configuration failed %d", + sys_cache_blob_info->sys_cache_config_flex[j].scid_id, rc); + } + } + break; + + default: + CAM_WARN(CAM_ICP, "%s: Invalid blob type %d", ctx_data->ctx_id_string, + blob_type); + break; + } + + return rc; +} + +static int cam_icp_process_generic_cmd_buffer( + struct cam_packet *packet, + struct cam_icp_hw_ctx_data *ctx_data, + int32_t index, + dma_addr_t *io_buf_addr) +{ + int i, rc = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct icp_cmd_generic_blob cmd_generic_blob; + + cmd_generic_blob.ctx = ctx_data; + cmd_generic_blob.frame_info_idx = index; + cmd_generic_blob.io_buf_addr = io_buf_addr; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB) + continue; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_icp_packet_generic_blob_handler, &cmd_generic_blob); + if (rc) + CAM_ERR(CAM_ICP, "Failed in processing blobs %d", rc); + } + + return rc; +} + +static int cam_icp_mgr_process_cfg_io_cmd( + struct cam_icp_hw_ctx_data *ctx_data, + struct hfi_cmd_dev_async *ioconfig_cmd, + uint64_t request_id, + uint64_t io_config) +{ + struct cam_icp_hw_ctx_info *ctx_info; + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_ATOMIC); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + + switch (ctx_data->device_info->hw_dev_type) { + case CAM_ICP_DEV_BPS: + ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO; + ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_IPE: + ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO; + ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT; + break; + case CAM_ICP_DEV_OFE: + ioconfig_cmd->opcode = HFI_OFE_CMD_OPCODE_CONFIG_IO; + ioconfig_cmd->pkt_type = HFI_CMD_OFE_ASYNC_COMMAND_INDIRECT; + break; + default: + CAM_ERR(CAM_ICP, "%s: Invalid device type %u not supported", + ctx_data->ctx_id_string, ctx_data->device_info->hw_dev_type); + return -EINVAL; + } + + ioconfig_cmd->size = sizeof(struct hfi_cmd_dev_async); + ioconfig_cmd->num_fw_handles = 1; + ioconfig_cmd->fw_handles_flex[0] = ctx_data->fw_handle; + ioconfig_cmd->payload.indirect = io_config; + ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_info); + ioconfig_cmd->user_data2 = request_id; + + return 0; +} + +static int cam_icp_mgr_update_hfi_frame_process( + struct cam_icp_hw_ctx_data *ctx_data, + struct cam_packet *packet, + struct cam_hw_prepare_update_args *prepare_args, + int32_t *idx) +{ + int32_t index, rc; + struct hfi_cmd_dev_async *hfi_cmd = NULL; + + index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap, + ctx_data->hfi_frame_process.bits); + if (index < 0 || index >= CAM_FRAME_CMD_MAX) { + CAM_ERR(CAM_ICP, "%s request idx is wrong: %d", + ctx_data->ctx_id_string, index); + return -EINVAL; + } + set_bit(index, ctx_data->hfi_frame_process.bitmap); + + ctx_data->hfi_frame_process.request_id[index] = + packet->header.request_id; + ctx_data->hfi_frame_process.frame_info[index].request_id = + packet->header.request_id; + ctx_data->hfi_frame_process.frame_info[index].io_config = 0; + ctx_data->hfi_frame_process.frame_info[index].pkt = packet; + rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index, + &ctx_data->hfi_frame_process.frame_info[index].io_config); + if (rc) { + clear_bit(index, ctx_data->hfi_frame_process.bitmap); + ctx_data->hfi_frame_process.request_id[index] = -1; + return rc; + } + + if (ctx_data->hfi_frame_process.frame_info[index].io_config) { + hfi_cmd = (struct hfi_cmd_dev_async *) + &ctx_data->hfi_frame_process.frame_info[index].hfi_cfg_io_cmd; + rc = cam_icp_mgr_process_cfg_io_cmd(ctx_data, hfi_cmd, + packet->header.request_id, + ctx_data->hfi_frame_process.frame_info[index].io_config); + } + *idx = index; + + return rc; +} + +static int cam_icp_mgr_config_stream_settings( + void *hw_mgr_priv, void *hw_stream_settings) +{ + int rc = 0; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_packet *packet = NULL; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct icp_cmd_generic_blob cmd_generic_blob; + struct cam_hw_stream_setttings *config_args = + hw_stream_settings; + + if ((!hw_stream_settings) || + (!hw_mgr) || (!config_args->packet)) { + CAM_ERR(CAM_ICP, "Invalid input arguments"); + return -EINVAL; + } + + ctx_data = config_args->ctxt_to_hw_map; + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + packet = config_args->packet; + + cmd_generic_blob.ctx = ctx_data; + cmd_generic_blob.frame_info_idx = -1; + cmd_generic_blob.io_buf_addr = NULL; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + + rc = cam_packet_util_validate_cmd_desc(cmd_desc); + if (rc) + goto end; + + if (!cmd_desc[0].length || + cmd_desc[0].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB) { + CAM_ERR(CAM_ICP, "%s: Invalid cmd buffer length/metadata", + ctx_data->ctx_id_string); + rc = -EINVAL; + goto end; + } + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[0], + cam_icp_packet_generic_blob_handler, &cmd_generic_blob); + if (rc) + CAM_ERR(CAM_ICP, "%s: Failed in processing cmd mem blob %d", + ctx_data->ctx_id_string, rc); + +end: + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + return rc; +} + +static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv, + void *prepare_hw_update_args) +{ + int rc = 0; + int32_t idx; + uint32_t fw_cmd_buf_iova_addr; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_packet *packet = NULL; + struct hfi_cmd_dev_async *hfi_cmd = NULL; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_prepare_update_args *prepare_args = + prepare_hw_update_args; + + if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) { + CAM_ERR(CAM_ICP, "Invalid args"); + return -EINVAL; + } + + ctx_data = prepare_args->ctxt_to_hw_map; + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + CAM_ERR(CAM_ICP, "%s: is not in use", ctx_data->ctx_id_string); + rc = -EINVAL; + goto end; + } + + packet = prepare_args->packet; + + if (cam_packet_util_validate_packet(packet, prepare_args->remain_len)) { + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + + rc = cam_icp_mgr_pkt_validation(ctx_data, packet); + if (rc) + goto end; + + rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet, + ctx_data, &fw_cmd_buf_iova_addr, prepare_args->buf_tracker); + if (rc) + goto end; + + CAM_DBG(CAM_REQ, "%s: req id = %lld", ctx_data->ctx_id_string, + packet->header.request_id); + /* Update Buffer Address from handles and patch information */ + rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker, + hw_mgr->iommu_hdl, hw_mgr->iommu_sec_hdl, true); + if (rc) + goto end; + + rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet, + prepare_args, &idx); + if (rc) + goto end; + + rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data, + packet, prepare_args, idx); + if (rc) { + if (ctx_data->hfi_frame_process.in_resource[idx] > 0) + cam_sync_destroy( + ctx_data->hfi_frame_process.in_resource[idx]); + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + ctx_data->hfi_frame_process.request_id[idx] = -1; + goto end; + } + + hfi_cmd = (struct hfi_cmd_dev_async *) + &ctx_data->hfi_frame_process.hfi_frame_cmd[idx]; + cam_icp_mgr_prepare_frame_process_cmd( + ctx_data, hfi_cmd, packet->header.request_id, + fw_cmd_buf_iova_addr); + + prepare_args->num_hw_update_entries = 1; + prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd; + prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx]; + + CAM_DBG(CAM_ICP, "%s: X: req id = %lld", ctx_data->ctx_id_string, + packet->header.request_id); + +end: + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + return rc; +} + +static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data) +{ + struct hfi_frame_process_info *hfi_frame_process; + struct cam_icp_hw_buf_done_evt_data icp_evt_data; + struct cam_hw_done_event_data buf_data = {0}; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + int idx; + + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + hfi_frame_process = &ctx_data->hfi_frame_process; + buf_data.evt_param = CAM_SYNC_ICP_EVENT_ABORTED; + icp_evt_data.evt_id = CAM_CTX_EVT_ID_CANCEL; + for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) { + if (!hfi_frame_process->request_id[idx]) + continue; + + buf_data.request_id = hfi_frame_process->request_id[idx]; + icp_evt_data.buf_done_data = &buf_data; + ctx_data->ctxt_event_cb(ctx_data->context_priv, + CAM_ICP_EVT_ID_BUF_DONE, + &icp_evt_data); + + /* now release memory for hfi frame process command */ + hfi_frame_process->request_id[idx] = 0; + if (ctx_data->hfi_frame_process.in_resource[idx] > 0) { + CAM_DBG(CAM_ICP, "%s: Delete merged sync in object: %d", + ctx_data->ctx_id_string, + ctx_data->hfi_frame_process.in_resource[idx]); + cam_sync_destroy( + ctx_data->hfi_frame_process.in_resource[idx]); + ctx_data->hfi_frame_process.in_resource[idx] = 0; + } + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + } + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + return 0; +} + +static int cam_icp_mgr_delete_sync(void *priv, void *data) +{ + struct hfi_cmd_work_data *task_data = NULL; + struct cam_icp_hw_ctx_info *ctx_info; + struct cam_icp_hw_ctx_data *ctx_data; + struct hfi_frame_process_info *hfi_frame_process; + struct cam_icp_hw_mgr *hw_mgr = priv; + int idx, ctx_id; + + if (!data || !priv) { + CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv); + return -EINVAL; + } + + task_data = (struct hfi_cmd_work_data *)data; + ctx_info = task_data->data; + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Null ICP ctx info"); + CAM_MEM_FREE(task_data->data); + task_data->data = NULL; + return -EINVAL; + } + + ctx_id = ctx_info->ctx_id; + ctx_data = ctx_info->ctx_data; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "Null Context"); + CAM_MEM_FREE(task_data->data); + task_data->data = NULL; + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + if (!test_bit(ctx_id, hw_mgr->active_ctx_info.active_ctx_bitmap)) { + CAM_DBG(CAM_ICP, "ctx data is already released before accessing, ctx_id: %u", + ctx_id); + goto end; + } + + hfi_frame_process = &ctx_data->hfi_frame_process; + for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) { + if (!hfi_frame_process->in_free_resource[idx]) + continue; + //cam_sync_destroy( + //ctx_data->hfi_frame_process.in_free_resource[idx]); + ctx_data->hfi_frame_process.in_resource[idx] = 0; + } + +end: + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_MEM_FREE(task_data->data); + task_data->data = NULL; + return 0; +} + +static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data) +{ + int rc = 0; + struct crm_workq_task *task; + struct hfi_cmd_work_data *task_data; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + struct cam_icp_hw_ctx_info *ctx_info; + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_ICP, "%s: no empty task", ctx_data->ctx_id_string); + return -ENOMEM; + } + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_ATOMIC); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_data = ctx_data; + ctx_info->ctx_id = ctx_data->ctx_id; + + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)ctx_info; + task_data->request_id = 0; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_delete_sync; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_ICP, "Failed at enqueuing task to workq, ctx_id: %d", ctx_info->ctx_id); + CAM_MEM_FREE(ctx_info); + } + + return rc; +} + +static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + struct hfi_frame_process_info *hfi_frame_process; + int idx; + bool clear_in_resource = false; + + hfi_frame_process = &ctx_data->hfi_frame_process; + cam_icp_cpas_deactivate_llcc(ctx_data); + for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) { + if (!hfi_frame_process->request_id[idx]) + continue; + + /* now release memory for hfi frame process command */ + hfi_frame_process->request_id[idx] = 0; + if (ctx_data->hfi_frame_process.in_resource[idx] > 0) { + ctx_data->hfi_frame_process.in_free_resource[idx] = + ctx_data->hfi_frame_process.in_resource[idx]; + ctx_data->hfi_frame_process.in_resource[idx] = 0; + } + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + clear_in_resource = true; + } + + if (clear_in_resource) + cam_icp_mgr_delete_sync_obj(ctx_data); + + return 0; +} + +static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + int64_t request_id; + struct hfi_frame_process_info *hfi_frame_process; + int idx; + bool clear_in_resource = false; + + hfi_frame_process = &ctx_data->hfi_frame_process; + request_id = *(int64_t *)flush_args->flush_req_pending[0]; + for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) { + if (!hfi_frame_process->request_id[idx]) + continue; + + if (hfi_frame_process->request_id[idx] != request_id) + continue; + + hfi_frame_process->request_id[idx] = 0; + if (ctx_data->hfi_frame_process.in_resource[idx] > 0) { + ctx_data->hfi_frame_process.in_free_resource[idx] = + ctx_data->hfi_frame_process.in_resource[idx]; + ctx_data->hfi_frame_process.in_resource[idx] = 0; + } + clear_bit(idx, ctx_data->hfi_frame_process.bitmap); + clear_in_resource = true; + } + + if (clear_in_resource) + cam_icp_mgr_delete_sync_obj(ctx_data); + + return 0; +} + +static void cam_icp_mgr_flush_info_dump( + struct cam_hw_flush_args *flush_args, uint32_t ctx_id) +{ + int i; + + for (i = 0; i < flush_args->num_req_active; i++) { + CAM_DBG(CAM_ICP, "Flushing active request %lld in ctx %u", + *(int64_t *)flush_args->flush_req_active[i], + ctx_id); + } + + for (i = 0; i < flush_args->num_req_pending; i++) { + CAM_DBG(CAM_ICP, "Flushing pending request %lld in ctx %u", + *(int64_t *)flush_args->flush_req_pending[i], + ctx_id); + } +} + +static int cam_icp_mgr_enqueue_abort( + struct cam_icp_hw_ctx_data *ctx_data) +{ + int timeout = 2000, rc; + unsigned long rem_jiffies = 0; + struct cam_icp_hw_mgr *hw_mgr = ctx_data->hw_mgr_priv; + struct hfi_cmd_work_data *task_data; + struct crm_workq_task *task; + struct cam_icp_hw_ctx_info *ctx_info; + struct cam_icp_clk_info *dev_clk_info = NULL; + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_ICP, "%s: no empty task", ctx_data->ctx_id_string); + return -ENOMEM; + } + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_KERNEL); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + ctx_info->hw_mgr = hw_mgr; + + reinit_completion(&ctx_data->wait_complete); + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)ctx_info; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_abort_handle_wq; + + atomic_inc(&hw_mgr->abort_in_process); + cam_icp_update_clk_util(ctx_data->clk_info.clk_rate[CAM_TURBO_VOTE], + hw_mgr, ctx_data); + CAM_DBG(CAM_ICP, "[%s] voting device to %u rate", + hw_mgr->hw_mgr_name, ctx_data->clk_info.clk_rate[CAM_TURBO_VOTE]); + + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_ICP, "Failed at enqueuing task to workq, ctx_id: %d", ctx_info->ctx_id); + CAM_MEM_FREE(ctx_info); + goto end; + } + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s FW response timeout for Abort handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(hw_mgr, false); + ctx_data->abort_timed_out = true; + goto end; + } + + rc = 0; + CAM_DBG(CAM_ICP, "%s: Abort after flush is success", ctx_data->ctx_id_string); + +end: + atomic_dec(&hw_mgr->abort_in_process); + if (!atomic_read(&hw_mgr->abort_in_process)) { + dev_clk_info = &ctx_data->device_info->clk_info; + + cam_icp_update_clk_util(dev_clk_info->curr_clk, hw_mgr, ctx_data); + CAM_DBG(CAM_ICP, "[%s] voting device back to %u rate", + hw_mgr->hw_mgr_name, dev_clk_info->curr_clk); + } + return rc; +} + +static int cam_icp_mgr_hw_dump(void *hw_priv, void *hw_dump_args) +{ + int rc; + int i, j; + size_t remain_len; + uint8_t *dst; + uint32_t min_len; + uint64_t *addr, *start; + uint64_t *clk_addr, *clk_start; + uint32_t *mgr_addr, *mgr_start; + struct timespec64 cur_ts; + struct timespec64 req_ts; + ktime_t cur_time; + struct cam_hw_intf *icp_dev_intf; + struct cam_icp_hw_mgr *hw_mgr; + struct cam_hw_dump_args *dump_args; + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_dump_header *hdr, *inner_hdr; + struct cam_icp_hw_dump_args icp_dump_args; + struct hfi_frame_process_info *frm_process; + int frm_idx = -1; + + if ((!hw_priv) || (!hw_dump_args)) { + CAM_ERR(CAM_ICP, "Invalid params %pK %pK", + hw_priv, hw_dump_args); + return -EINVAL; + } + + memset(&req_ts, 0, sizeof(struct timespec64)); + + dump_args = (struct cam_hw_dump_args *)hw_dump_args; + hw_mgr = hw_priv; + ctx_data = dump_args->ctxt_to_hw_map; + CAM_DBG(CAM_ICP, "[%s] Req %lld", + hw_mgr->hw_mgr_name, + dump_args->request_id); + frm_process = &ctx_data->hfi_frame_process; + for (i = 0; i < CAM_FRAME_CMD_MAX; i++) { + if ((frm_process->request_id[i] == + dump_args->request_id) && + frm_process->fw_process_flag[i]) { + frm_idx = i; + break; + } + } + + cur_time = ktime_get(); + cur_ts = ktime_to_timespec64(cur_time); + if (frm_idx >= 0) { + req_ts = ktime_to_timespec64(frm_process->submit_timestamp[frm_idx]); + } + + CAM_INFO(CAM_ICP, + "[%s] Error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + hw_mgr->hw_mgr_name, dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &icp_dump_args.cpu_addr, &icp_dump_args.buf_len); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Invalid addr %u rc %d", + hw_mgr->hw_mgr_name, dump_args->buf_handle, rc); + return rc; + } + if (icp_dump_args.buf_len <= dump_args->offset) { + CAM_WARN(CAM_ICP, "[%s] dump buffer overshoot len %zu offset %zu", + hw_mgr->hw_mgr_name, icp_dump_args.buf_len, dump_args->offset); + rc = -ENOSPC; + goto put_cpu_buf; + } + + remain_len = icp_dump_args.buf_len - dump_args->offset; + min_len = sizeof(struct cam_icp_dump_header) + + (CAM_ICP_DUMP_NUM_WORDS_CLK * sizeof(uint64_t)) + + sizeof(struct cam_icp_dump_header) * + (hw_mgr->num_dev_info + ctx_data->clk_info.num_paths); + if (remain_len < min_len) { + CAM_WARN(CAM_ICP, "[%s] dump buffer exhaust remain %zu min %u", + hw_mgr->hw_mgr_name, remain_len, min_len); + rc = -ENOSPC; + goto put_cpu_buf; + } + + /* Dumping clock and bandwidth info */ + dst = (uint8_t *)icp_dump_args.cpu_addr + dump_args->offset; + hdr = (struct cam_icp_dump_header *)dst; + scnprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_HW_CLK:"); + hdr->word_size = sizeof(uint64_t); + clk_addr = (uint64_t *)(dst + sizeof(struct cam_icp_dump_header)); + clk_start = clk_addr; + *clk_addr++ = hw_mgr->num_dev_info; + *clk_addr++ = ctx_data->clk_info.num_paths; + dst = (uint8_t *)clk_addr; + for (i = 0; i < hw_mgr->num_dev_info; i++) { + inner_hdr = (struct cam_icp_dump_header *)dst; + scnprintf(inner_hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, + "dev[%s] prev_clk = 0x%x curr_clk = 0x%x:", + hw_mgr->dev_info[i].dev_name, + hw_mgr->dev_info[i].clk_info.prev_clk, + hw_mgr->dev_info[i].clk_info.curr_clk); + dst += sizeof(struct cam_icp_dump_header); + } + for (j = 0; j < ctx_data->clk_info.num_paths; j++) { + inner_hdr = (struct cam_icp_dump_header *)dst; + scnprintf(inner_hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, + "camnoc_bw %lld ab_bw = %lld ib_bw = %lld:", + ctx_data->clk_info.axi_path[j].camnoc_bw, + ctx_data->clk_info.axi_path[j].mnoc_ab_bw, + ctx_data->clk_info.axi_path[j].mnoc_ib_bw); + dst += sizeof(struct cam_icp_dump_header); + } + hdr->size = dst - (uint8_t *)clk_start; + dump_args->offset += (hdr->size + sizeof(struct cam_icp_dump_header)); + + remain_len = icp_dump_args.buf_len - dump_args->offset; + min_len = sizeof(struct cam_icp_dump_header) + + (CAM_ICP_DUMP_NUM_WORDS_MGR * sizeof(uint32_t)); + /* Dumping hw mgr info */ + dst = (uint8_t *)icp_dump_args.cpu_addr + dump_args->offset; + hdr = (struct cam_icp_dump_header *)dst; + scnprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_HW_MGR.%s:", + cam_icp_dev_type_to_name(ctx_data->icp_dev_acquire_info->dev_type)); + hdr->word_size = sizeof(uint32_t); + mgr_addr = (uint32_t *)(dst + sizeof(struct cam_icp_dump_header)); + mgr_start = mgr_addr; + *mgr_addr++ = atomic_read(&hw_mgr->recovery); + *mgr_addr++ = hw_mgr->icp_booted; + *mgr_addr++ = hw_mgr->icp_resumed; + *mgr_addr++ = hw_mgr->disable_ubwc_comp; + *mgr_addr++ = hw_mgr->icp_pc_flag; + *mgr_addr++ = hw_mgr->dev_pc_flag; + *mgr_addr++ = hw_mgr->icp_use_pil; + hdr->size = hdr->word_size * (mgr_addr - mgr_start); + dump_args->offset += (hdr->size + sizeof(struct cam_icp_dump_header)); + + remain_len = icp_dump_args.buf_len - dump_args->offset; + min_len = sizeof(struct cam_icp_dump_header) + + (CAM_ICP_DUMP_NUM_WORDS_REQ * sizeof(uint64_t)); + /* Dumping time info */ + dst = (uint8_t *)icp_dump_args.cpu_addr + dump_args->offset; + hdr = (struct cam_icp_dump_header *)dst; + scnprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_REQ:"); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_icp_dump_header)); + start = addr; + *addr++ = dump_args->request_id; + *addr++ = req_ts.tv_sec; + *addr++ = req_ts.tv_nsec / NSEC_PER_USEC; + *addr++ = cur_ts.tv_sec; + *addr++ = cur_ts.tv_nsec / NSEC_PER_USEC; + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += (hdr->size + sizeof(struct cam_icp_dump_header)); + + /* Dumping the fw image*/ + icp_dump_args.offset = dump_args->offset; + icp_dev_intf = hw_mgr->icp_dev_intf; + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", + hw_mgr->hw_mgr_name); + rc = -EINVAL; + goto put_cpu_buf; + } + rc = icp_dev_intf->hw_ops.process_cmd( + icp_dev_intf->hw_priv, + CAM_ICP_CMD_HW_DUMP, &icp_dump_args, + sizeof(struct cam_icp_hw_dump_args)); + CAM_DBG(CAM_ICP, "[%s] Offset before %zu after %zu", + hw_mgr->hw_mgr_name, dump_args->offset, icp_dump_args.offset); + dump_args->offset = icp_dump_args.offset; + +put_cpu_buf: + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_icp_mgr_synx_core_control( + struct cam_icp_hw_mgr *hw_mgr, + struct cam_synx_core_control *synx_core_ctrl) +{ + int rc = 0; + struct cam_icp_mgr_hw_args pc_args = {0}; + + pc_args.hfi_setup = true; + pc_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + + if (synx_core_ctrl->core_control) { + rc = cam_icp_mgr_icp_resume(hw_mgr, &pc_args); + if (!rc) + /* Set FW log level for synx */ + if (hw_mgr->icp_debug_type) + hfi_set_debug_level(hw_mgr->hfi_handle, + hw_mgr->icp_debug_type, hw_mgr->icp_dbg_lvl); + } else { + rc = cam_icp_mgr_icp_power_collapse(hw_mgr, &pc_args); + } + + if (rc) + CAM_ERR(CAM_ICP, "[%s] Failed to process core control resume: %s", + hw_mgr->hw_mgr_name, CAM_BOOL_TO_YESNO(synx_core_ctrl->core_control)); + + CAM_INFO(CAM_ICP, "Synx test core control: %s done rc: %d", + CAM_BOOL_TO_YESNO(synx_core_ctrl->core_control), rc); + return rc; +} + +static int cam_icp_mgr_synx_send_test_cmd( + struct cam_icp_hw_mgr *hw_mgr, + struct cam_synx_test_cmd *synx_test_params) +{ + int rc = 0; + size_t size; + dma_addr_t iova; + struct hfi_cmd_synx_test_payload synx_test_cmd; + unsigned long rem_jiffies; + int timeout = 5000; + struct cam_icp_mgr_hw_args resume_args = {0}; + + resume_args.hfi_setup = true; + resume_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + + if (!hw_mgr->icp_resumed) { + rc = cam_icp_mgr_icp_resume(hw_mgr, &resume_args); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to resume ICP rc: %d", rc); + goto end; + } + + /* Set FW log level for synx */ + if (hw_mgr->icp_debug_type) + hfi_set_debug_level(hw_mgr->hfi_handle, + hw_mgr->icp_debug_type, hw_mgr->icp_dbg_lvl); + } + + synx_test_cmd.pkt_type = HFI_CMD_DBG_SYNX_TEST; + synx_test_cmd.size = sizeof(synx_test_cmd); + + rc = cam_mem_get_io_buf(synx_test_params->ip_mem_hdl, hw_mgr->iommu_hdl, + &iova, &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to get buf for hdl: %d rc: %d", + synx_test_params->ip_mem_hdl, rc); + goto end; + } + + synx_test_cmd.input_iova = (uint32_t)iova; + synx_test_cmd.input_size = (uint32_t)size; + + rc = cam_mem_get_io_buf(synx_test_params->op_mem_hdl, hw_mgr->iommu_hdl, + &iova, &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to get buf for hdl: %d rc: %d", + synx_test_params->ip_mem_hdl, rc); + goto end; + } + + synx_test_cmd.output_iova = (uint32_t)iova; + synx_test_cmd.output_size = (uint32_t)size; + + CAM_DBG(CAM_ICP, + "Input (hdl: 0x%x iova: 0x%x size: 0x%x) output (hdl: 0x%x iova: 0x%x size: 0x%x)", + synx_test_params->ip_mem_hdl, synx_test_cmd.input_iova, synx_test_cmd.input_size, + synx_test_params->op_mem_hdl, synx_test_cmd.output_iova, synx_test_cmd.output_size); + + reinit_completion(&hw_mgr->icp_complete); + rc = hfi_write_cmd(hw_mgr->hfi_handle, &synx_test_cmd); + if (rc) + goto end; + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &hw_mgr->icp_complete, msecs_to_jiffies(timeout), CAM_ICP, + "FW response timeout for synx test cmd"); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + goto end; + } + + CAM_INFO(CAM_ICP, "Synx test cmd done rc: %d", rc); + +end: + return rc; +} + +static int cam_icp_mgr_service_synx_test_cmds(void *hw_priv, void *synx_args) +{ + int rc; + struct cam_icp_hw_mgr *hw_mgr = hw_priv; + struct cam_synx_test_params *synx_params; + + if ((!hw_priv) || (!synx_args)) { + CAM_ERR(CAM_ICP, "Input params are Null:"); + return -EINVAL; + } + + synx_params = (struct cam_synx_test_params *)synx_args; + mutex_lock(&hw_mgr->hw_mgr_mutex); + switch (synx_params->cmd_type) { + case CAM_SYNX_TEST_CMD_TYPE_CORE_CTRL: { + rc = cam_icp_mgr_synx_core_control(hw_mgr, &synx_params->u.core_ctrl); + } + break; + case CAM_SYNX_TEST_CMD_TYPE_SYNX_CMD: { + rc = cam_icp_mgr_synx_send_test_cmd(hw_mgr, &synx_params->u.test_cmd); + } + break; + default: + rc = -EINVAL; + goto end; + } + +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args) +{ + struct cam_hw_flush_args *flush_args = hw_flush_args; + struct cam_icp_hw_ctx_data *ctx_data; + struct cam_icp_hw_mgr *hw_mgr = hw_priv; + + if ((!hw_priv) || (!hw_flush_args)) { + CAM_ERR(CAM_ICP, "Input params are Null:"); + return -EINVAL; + } + + ctx_data = flush_args->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_ICP, "[%s] ctx data is NULL", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) || + (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) { + CAM_ERR(CAM_ICP, "%s: Invalid lush type: %d", + ctx_data->ctx_id_string, flush_args->flush_type); + return -EINVAL; + } + + ctx_data->last_flush_req = flush_args->last_flush_req; + CAM_DBG(CAM_REQ, "%s: Flush type %d last_flush_req %u", + ctx_data->ctx_id_string, flush_args->flush_type, + ctx_data->last_flush_req); + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_ALL: + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (!atomic_read(&hw_mgr->recovery) + && flush_args->num_req_active) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + cam_icp_mgr_flush_info_dump(flush_args, + ctx_data->ctx_id); + cam_icp_mgr_enqueue_abort(ctx_data); + } else { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + } + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + cam_icp_mgr_flush_all(ctx_data, flush_args); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + break; + case CAM_FLUSH_TYPE_REQ: + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (flush_args->num_req_active) { + CAM_ERR(CAM_ICP, + "%s: Flush a specific active request id: %lld is not supported", + ctx_data->ctx_id_string, + *(int64_t *)flush_args->flush_req_active[0]); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + return -EINVAL; + } + if (flush_args->num_req_pending) + cam_icp_mgr_flush_req(ctx_data, flush_args); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + break; + default: + CAM_ERR(CAM_ICP, "%s: Invalid flush type: %d", + ctx_data->ctx_id_string, flush_args->flush_type); + return -EINVAL; + } + + return 0; +} + +static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args) +{ + int rc = 0, i; + struct cam_hw_release_args *release_hw = release_hw_args; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_icp_mgr_hw_args hw_args = {0}; + + if (!release_hw || !hw_mgr) { + CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr); + return -EINVAL; + } + + hw_args.hfi_setup = true; + hw_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + + ctx_data = release_hw->ctxt_to_hw_map; + + if (!ctx_data) { + CAM_ERR(CAM_ICP, "NULL ctx data"); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "%s: Enter recovery set %d", + ctx_data->ctx_id_string, atomic_read(&hw_mgr->recovery)); + + mutex_lock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_ICP, "%s: is not acquired", + ctx_data->ctx_id_string); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + return -EINVAL; + } + mutex_unlock(&hw_mgr->ctx_mutex[ctx_data->ctx_id]); + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (!atomic_read(&hw_mgr->recovery) && release_hw->active_req) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + cam_icp_mgr_abort_handle(ctx_data); + cam_icp_mgr_send_abort_status(ctx_data); + } else { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_data); + if (!hw_mgr->ctxt_cnt) { + /* Clear SSR flag on last release */ + atomic_set(&hw_mgr->abort_in_process, 0); + CAM_DBG(CAM_ICP, "[%s] Last Release", hw_mgr->hw_mgr_name); + cam_icp_mgr_icp_power_collapse(hw_mgr, &hw_args); + cam_icp_hw_mgr_reset_clk_info(hw_mgr); + rc = cam_icp_device_deint(hw_mgr); + } + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + if (hw_mgr->dev_info[i].dev_ctx_info.dev_ctxt_cnt) + break; + } + + if (i == hw_mgr->num_dev_info) + cam_icp_device_timer_stop(hw_mgr); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_ICP, "[%s] Release done.", hw_mgr->hw_mgr_name); + return rc; +} + +static int cam_icp_mgr_create_handle(struct cam_icp_hw_mgr *hw_mgr, + uint32_t dev_type, struct cam_icp_hw_ctx_data *ctx_data) +{ + struct hfi_cmd_create_handle create_handle; + struct hfi_cmd_work_data *task_data; + unsigned long rem_jiffies; + int timeout = 5000; + struct crm_workq_task *task; + int rc = 0; + uint32_t handle_type; + struct cam_icp_hw_ctx_info *ctx_info; + + if (ctx_data->device_info->hw_dev_type == CAM_ICP_DEV_OFE) { + create_handle.pkt_type = HFI_CMD_OFE_CREATE_HANDLE; + switch (dev_type) { + case CAM_ICP_RES_TYPE_OFE_RT: + handle_type = HFI_OFE_HANDLE_TYPE_OFE_RT; + break; + case CAM_ICP_RES_TYPE_OFE: + handle_type = HFI_OFE_HANDLE_TYPE_OFE_NON_RT; + break; + case CAM_ICP_RES_TYPE_OFE_SEMI_RT: + handle_type = HFI_OFE_HANDLE_TYPE_OFE_SEMI_RT; + break; + default: + CAM_ERR(CAM_ICP, "Invalid OFE stream type: %u", dev_type); + return -EINVAL; + } + } else { + create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE; + switch (dev_type) { + case CAM_ICP_RES_TYPE_BPS: + handle_type = HFI_IPEBPS_HANDLE_TYPE_BPS_NON_RT; + break; + case CAM_ICP_RES_TYPE_IPE_RT: + handle_type = HFI_IPEBPS_HANDLE_TYPE_IPE_RT; + break; + case CAM_ICP_RES_TYPE_IPE: + handle_type = HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT; + break; + case CAM_ICP_RES_TYPE_IPE_SEMI_RT: + handle_type = HFI_IPEBPS_HANDLE_TYPE_IPE_SEMI_RT; + break; + case CAM_ICP_RES_TYPE_BPS_RT: + handle_type = HFI_IPEBPS_HANDLE_TYPE_BPS_RT; + break; + case CAM_ICP_RES_TYPE_BPS_SEMI_RT: + handle_type = HFI_IPEBPS_HANDLE_TYPE_BPS_SEMI_RT; + break; + default: + CAM_ERR(CAM_ICP, "Invalid IPE/BPS stream type: %u", dev_type); + return -EINVAL; + } + } + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) + return -ENOMEM; + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_KERNEL); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + + create_handle.size = sizeof(struct hfi_cmd_create_handle); + create_handle.handle_type = handle_type; + create_handle.user_data1 = PTR_TO_U64(ctx_info); + reinit_completion(&ctx_data->wait_complete); + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)&create_handle; + task_data->request_id = 0; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_process_cmd; + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_ICP, "Failed at enqueuing task to workq, ctx_id: %d", ctx_info->ctx_id); + CAM_MEM_FREE(ctx_info); + return rc; + } + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for create handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(hw_mgr, false); + } + + if (ctx_data->fw_handle == 0) { + CAM_ERR(CAM_ICP, "%s: Invalid handle created", ctx_data->ctx_id_string); + rc = -EINVAL; + } + + return rc; +} + +static int cam_icp_mgr_send_ping(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + struct hfi_cmd_ping_pkt ping_pkt; + struct hfi_cmd_work_data *task_data; + unsigned long rem_jiffies; + int timeout = 5000; + struct crm_workq_task *task; + int rc = 0; + struct cam_icp_hw_ctx_info *ctx_info; + + task = cam_req_mgr_workq_get_task(hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_ICP, "%s: No free task to send ping command", + ctx_data->ctx_id_string); + return -ENOMEM; + } + + ctx_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_ctx_info), GFP_KERNEL); + if (!ctx_info) { + CAM_ERR(CAM_ICP, "Failed in allocating memory for ICP ctx info"); + return -ENOMEM; + } + + ctx_info->ctx_id = ctx_data->ctx_id; + ctx_info->ctx_data = ctx_data; + + ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt); + ping_pkt.pkt_type = HFI_CMD_SYS_PING; + ping_pkt.user_data = PTR_TO_U64(ctx_info); + init_completion(&ctx_data->wait_complete); + task_data = (struct hfi_cmd_work_data *)task->payload; + task_data->data = (void *)&ping_pkt; + task_data->request_id = 0; + task_data->type = ICP_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_icp_mgr_process_cmd; + + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_ICP, "Failed at enqueuing task to workq, ctx_id: %d", ctx_info->ctx_id); + CAM_MEM_FREE(ctx_info); + return rc; + } + + rem_jiffies = CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG( + &ctx_data->wait_complete, + msecs_to_jiffies(timeout), CAM_ICP, + "%s: FW response timeout for Ping handle command", + ctx_data->ctx_id_string); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + cam_icp_dump_debug_info(hw_mgr, false); + } + + return rc; +} + +static int cam_icp_get_acquire_info_v1(struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_acquire_args *args, + struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + int acquire_size, acquire_size_unified; + struct cam_icp_acquire_dev_info icp_dev_acquire_info; + struct cam_icp_acquire_dev_info *acquire_info; + struct cam_icp_res_info_unified *p_ctx_icp_out = NULL; + + if (copy_from_user(&icp_dev_acquire_info, + (void __user *)args->acquire_info, + sizeof(struct cam_icp_acquire_dev_info))) { + CAM_ERR(CAM_ICP, "%s: Failed in acquire", ctx_data->ctx_id_string); + return -EFAULT; + } + + if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) { + CAM_ERR(CAM_ICP, "%s: Invalid mode: %d", + ctx_data->ctx_id_string, icp_dev_acquire_info.secure_mode); + return -EINVAL; + } + + if (icp_dev_acquire_info.secure_mode == CAM_SECURE_MODE_SECURE) { + /* Initialize the port security map to the default all secure */ + for (i = 0; i < CAM_MAX_OUTPUT_PORTS_PER_DEVICE; i++) + ctx_data->port_security_map[i] = true; + } + + if ((icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) || + (icp_dev_acquire_info.num_out_res <= 0)) { + CAM_ERR(CAM_ICP, "%s: Invalid num of out resources: %u", + ctx_data->ctx_id_string, icp_dev_acquire_info.num_out_res); + return -EINVAL; + } + + if (icp_dev_acquire_info.dev_type < CAM_ICP_RES_TYPE_BPS || + icp_dev_acquire_info.dev_type > CAM_ICP_RES_TYPE_OFE_SEMI_RT) { + CAM_ERR(CAM_ICP, "%s Invalid device type", + ctx_data->ctx_id_string); + return -EFAULT; + } + + acquire_size = sizeof(struct cam_icp_acquire_dev_info) + + ((icp_dev_acquire_info.num_out_res - 1) * + sizeof(struct cam_icp_res_info)); + acquire_info = CAM_MEM_ZALLOC(acquire_size, GFP_KERNEL); + if (!acquire_info) + return -ENOMEM; + + if (copy_from_user(acquire_info, + (void __user *)args->acquire_info, acquire_size)) { + CAM_ERR(CAM_ICP, "%s: Failed in acquire: size = %d", + ctx_data->ctx_id_string, acquire_size); + CAM_MEM_FREE(acquire_info); + return -EFAULT; + } + + acquire_size_unified = sizeof(struct cam_icp_acquire_dev_info_unified) + + ((icp_dev_acquire_info.num_out_res) * + sizeof(struct cam_icp_res_info_unified)); + ctx_data->icp_dev_acquire_info = CAM_MEM_ZALLOC(acquire_size_unified, + GFP_KERNEL); + if (!ctx_data->icp_dev_acquire_info) { + CAM_MEM_FREE(acquire_info); + return -ENOMEM; + } + + ctx_data->icp_dev_acquire_info->scratch_mem_size = + acquire_info->scratch_mem_size; + ctx_data->icp_dev_acquire_info->dev_type = + acquire_info->dev_type; + ctx_data->icp_dev_acquire_info->io_config_cmd_size = + acquire_info->io_config_cmd_size; + ctx_data->icp_dev_acquire_info->io_config_cmd_handle = + acquire_info->io_config_cmd_handle; + ctx_data->icp_dev_acquire_info->secure_mode = + acquire_info->secure_mode; + ctx_data->icp_dev_acquire_info->chain_info = + acquire_info->chain_info; + ctx_data->icp_dev_acquire_info->in_res.format = + acquire_info->in_res.format; + ctx_data->icp_dev_acquire_info->in_res.width = + acquire_info->in_res.width; + ctx_data->icp_dev_acquire_info->in_res.height = + acquire_info->in_res.height; + ctx_data->icp_dev_acquire_info->in_res.fps = + acquire_info->in_res.fps; + ctx_data->icp_dev_acquire_info->in_res.port_id = + CAM_ICP_INVALID_IN_OUT_PORT_ID; + ctx_data->icp_dev_acquire_info->in_res.is_secure = 0; + ctx_data->icp_dev_acquire_info->num_out_res = + acquire_info->num_out_res; + + p_ctx_icp_out = ctx_data->icp_dev_acquire_info->out_res_flex; + for (i = 0; i < icp_dev_acquire_info.num_out_res; i++) { + p_ctx_icp_out[i].format = + acquire_info->out_res_flex[i].format; + p_ctx_icp_out[i].width = + acquire_info->out_res_flex[i].width; + p_ctx_icp_out[i].height = + acquire_info->out_res_flex[i].height; + p_ctx_icp_out[i].fps = + acquire_info->out_res_flex[i].fps; + p_ctx_icp_out[i].port_id = + CAM_ICP_INVALID_IN_OUT_PORT_ID; + p_ctx_icp_out[i].is_secure = CAM_SECURE_MODE_NON_SECURE; + } + + CAM_DBG(CAM_ICP, "%s: %x %x %x %x %x %x", + ctx_data->ctx_id_string, + ctx_data->icp_dev_acquire_info->in_res.format, + ctx_data->icp_dev_acquire_info->in_res.width, + ctx_data->icp_dev_acquire_info->in_res.height, + ctx_data->icp_dev_acquire_info->in_res.fps, + ctx_data->icp_dev_acquire_info->num_out_res, + ctx_data->icp_dev_acquire_info->scratch_mem_size); + + for (i = 0; i < icp_dev_acquire_info.num_out_res; i++) + CAM_DBG(CAM_ICP, "%s: out[i] %x %x %x %x", + ctx_data->ctx_id_string, + p_ctx_icp_out[i].format, + p_ctx_icp_out[i].width, + p_ctx_icp_out[i].height, + p_ctx_icp_out[i].fps); + + CAM_MEM_FREE(acquire_info); + return 0; +} + +static int cam_icp_get_acquire_info_v2(struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_acquire_args *args, + struct cam_icp_hw_ctx_data *ctx_data) +{ + int i; + int acquire_size, acquire_size_unified; + struct cam_icp_acquire_dev_info_v2 icp_dev_acquire_info_v2; + struct cam_icp_acquire_dev_info_v2 *acquire_info; + struct cam_icp_res_info_unified *p_ctx_icp_out = NULL; + + if (copy_from_user(&icp_dev_acquire_info_v2, + (void __user *)args->acquire_info, + sizeof(struct cam_icp_acquire_dev_info_v2))) { + CAM_ERR(CAM_ICP, "%s: Failed in acquire", ctx_data->ctx_id_string); + return -EFAULT; + } + + if (icp_dev_acquire_info_v2.secure_mode > CAM_SECURE_MODE_SECURE) { + CAM_ERR(CAM_ICP, "%s: Invalid mode: %d", + ctx_data->ctx_id_string, icp_dev_acquire_info_v2.secure_mode); + return -EINVAL; + } + + if (icp_dev_acquire_info_v2.secure_mode == CAM_SECURE_MODE_SECURE) { + /* Initialize the port security map to the default all secure */ + for (i = 0; i < CAM_MAX_OUTPUT_PORTS_PER_DEVICE; i++) + ctx_data->port_security_map[i] = true; + } + + if ((icp_dev_acquire_info_v2.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) || + (icp_dev_acquire_info_v2.num_out_res <= 0)) { + CAM_ERR(CAM_ICP, "%s: Invalid num of out resources: %u", + ctx_data->ctx_id_string, icp_dev_acquire_info_v2.num_out_res); + return -EINVAL; + } + + if (icp_dev_acquire_info_v2.dev_type < CAM_ICP_RES_TYPE_BPS || + icp_dev_acquire_info_v2.dev_type > CAM_ICP_RES_TYPE_OFE_SEMI_RT) { + CAM_ERR(CAM_ICP, "%s Invalid device type", + ctx_data->ctx_id_string); + return -EFAULT; + } + + acquire_size = sizeof(struct cam_icp_acquire_dev_info_v2) + + ((icp_dev_acquire_info_v2.num_out_res) * + sizeof(struct cam_icp_res_info_v2)); + acquire_info = CAM_MEM_ZALLOC(acquire_size, GFP_KERNEL); + if (!acquire_info) + return -ENOMEM; + + if (copy_from_user(acquire_info, + (void __user *)args->acquire_info, acquire_size)) { + CAM_ERR(CAM_ICP, "%s: Failed in acquire: size = %d", + ctx_data->ctx_id_string, acquire_size); + CAM_MEM_FREE(acquire_info); + return -EFAULT; + } + + acquire_size_unified = sizeof(struct cam_icp_acquire_dev_info_unified) + + ((icp_dev_acquire_info_v2.num_out_res) * + sizeof(struct cam_icp_res_info_unified)); + ctx_data->icp_dev_acquire_info = CAM_MEM_ZALLOC(acquire_size_unified, + GFP_KERNEL); + if (!ctx_data->icp_dev_acquire_info) { + CAM_MEM_FREE(acquire_info); + return -ENOMEM; + } + + ctx_data->icp_dev_acquire_info->scratch_mem_size = + acquire_info->scratch_mem_size; + ctx_data->icp_dev_acquire_info->dev_type = + acquire_info->dev_type; + ctx_data->icp_dev_acquire_info->io_config_cmd_size = + acquire_info->io_config_cmd_size; + ctx_data->icp_dev_acquire_info->io_config_cmd_handle = + acquire_info->io_config_cmd_handle; + ctx_data->icp_dev_acquire_info->secure_mode = + acquire_info->secure_mode; + ctx_data->icp_dev_acquire_info->chain_info = + acquire_info->chain_info; + ctx_data->icp_dev_acquire_info->in_res.format = + acquire_info->in_res.format; + ctx_data->icp_dev_acquire_info->in_res.width = + acquire_info->in_res.width; + ctx_data->icp_dev_acquire_info->in_res.height = + acquire_info->in_res.height; + ctx_data->icp_dev_acquire_info->in_res.fps = + acquire_info->in_res.fps; + ctx_data->icp_dev_acquire_info->in_res.port_id = + acquire_info->in_res.port_id; + ctx_data->icp_dev_acquire_info->in_res.is_secure = + acquire_info->in_res.is_secure; + ctx_data->icp_dev_acquire_info->num_out_res = + acquire_info->num_out_res; + + p_ctx_icp_out = ctx_data->icp_dev_acquire_info->out_res_flex; + for (i = 0; i < icp_dev_acquire_info_v2.num_out_res; i++) { + p_ctx_icp_out[i].format = + acquire_info->out_res_flex[i].format; + p_ctx_icp_out[i].width = + acquire_info->out_res_flex[i].width; + p_ctx_icp_out[i].height = + acquire_info->out_res_flex[i].height; + p_ctx_icp_out[i].fps = + acquire_info->out_res_flex[i].fps; + p_ctx_icp_out[i].port_id = + acquire_info->out_res_flex[i].port_id; + p_ctx_icp_out[i].is_secure = + acquire_info->out_res_flex[i].is_secure; + } + + if (icp_dev_acquire_info_v2.secure_mode == CAM_SECURE_MODE_SECURE) { + for (i = 0; i < icp_dev_acquire_info_v2.num_out_res; i++) { + if (p_ctx_icp_out[i].is_secure != CAM_SECURE_MODE_SECURE) { + if (p_ctx_icp_out[i].port_id >= CAM_MAX_OUTPUT_PORTS_PER_DEVICE) { + CAM_DBG(CAM_ICP, "%s: Invalid out port ID: %d", + ctx_data->ctx_id_string, p_ctx_icp_out[i].port_id); + CAM_MEM_FREE(acquire_info); + CAM_MEM_FREE(ctx_data->icp_dev_acquire_info); + return -EINVAL; + } + ctx_data->port_security_map[p_ctx_icp_out[i].port_id] = false; + } + } + } + + CAM_DBG(CAM_ICP, "%s: %x %x %x %x %x %x", + ctx_data->ctx_id_string, + ctx_data->icp_dev_acquire_info->in_res.format, + ctx_data->icp_dev_acquire_info->in_res.width, + ctx_data->icp_dev_acquire_info->in_res.height, + ctx_data->icp_dev_acquire_info->in_res.fps, + ctx_data->icp_dev_acquire_info->num_out_res, + ctx_data->icp_dev_acquire_info->scratch_mem_size); + + for (i = 0; i < icp_dev_acquire_info_v2.num_out_res; i++) + CAM_DBG(CAM_ICP, "%s: out[i] %x %x %x %x %d", + ctx_data->ctx_id_string, + p_ctx_icp_out[i].format, + p_ctx_icp_out[i].width, + p_ctx_icp_out[i].height, + p_ctx_icp_out[i].fps, + p_ctx_icp_out[i].is_secure); + + CAM_MEM_FREE(acquire_info); + return 0; +} + +static int cam_icp_put_acquire_info_v1( + uintptr_t user_acquire_info, + struct cam_icp_acquire_dev_info_unified *acquire_info_unified) +{ + struct cam_icp_acquire_dev_info acquire_info; + + acquire_info.scratch_mem_size = acquire_info_unified->scratch_mem_size; + acquire_info.dev_type = acquire_info_unified->dev_type; + acquire_info.io_config_cmd_size = acquire_info_unified->io_config_cmd_size; + acquire_info.io_config_cmd_handle = acquire_info_unified->io_config_cmd_handle; + acquire_info.secure_mode = acquire_info_unified->secure_mode; + acquire_info.chain_info = acquire_info_unified->chain_info; + acquire_info.in_res.format = acquire_info_unified->in_res.format; + acquire_info.in_res.width = acquire_info_unified->in_res.width; + acquire_info.in_res.height = acquire_info_unified->in_res.height; + acquire_info.in_res.fps = acquire_info_unified->in_res.fps; + acquire_info.num_out_res = acquire_info_unified->num_out_res; + + if (copy_to_user((void __user *)user_acquire_info, + &acquire_info, + sizeof(struct cam_icp_acquire_dev_info))) + return -EINVAL; + + return 0; +} + +static int cam_icp_put_acquire_info_v2( + uintptr_t user_acquire_info, + struct cam_icp_acquire_dev_info_unified *acquire_info_unified) +{ + struct cam_icp_acquire_dev_info_v2 acquire_info; + + acquire_info.scratch_mem_size = acquire_info_unified->scratch_mem_size; + acquire_info.dev_type = acquire_info_unified->dev_type; + acquire_info.io_config_cmd_size = acquire_info_unified->io_config_cmd_size; + acquire_info.io_config_cmd_handle = acquire_info_unified->io_config_cmd_handle; + acquire_info.secure_mode = acquire_info_unified->secure_mode; + acquire_info.chain_info = acquire_info_unified->chain_info; + acquire_info.in_res.format = acquire_info_unified->in_res.format; + acquire_info.in_res.width = acquire_info_unified->in_res.width; + acquire_info.in_res.height = acquire_info_unified->in_res.height; + acquire_info.in_res.fps = acquire_info_unified->in_res.fps; + acquire_info.in_res.port_id = acquire_info_unified->in_res.port_id; + acquire_info.in_res.is_secure = acquire_info_unified->in_res.is_secure; + acquire_info.num_out_res = acquire_info_unified->num_out_res; + + if (copy_to_user((void __user *)user_acquire_info, + &acquire_info, + sizeof(struct cam_icp_acquire_dev_info_v2))) + return -EINVAL; + + return 0; +} + +static inline enum cam_icp_hw_type cam_icp_get_hw_dev_type(uint32_t dev_type) +{ + switch (dev_type) { + case CAM_ICP_RES_TYPE_BPS: + case CAM_ICP_RES_TYPE_BPS_RT: + case CAM_ICP_RES_TYPE_BPS_SEMI_RT: + return CAM_ICP_DEV_BPS; + case CAM_ICP_RES_TYPE_IPE: + case CAM_ICP_RES_TYPE_IPE_RT: + case CAM_ICP_RES_TYPE_IPE_SEMI_RT: + return CAM_ICP_DEV_IPE; + case CAM_ICP_RES_TYPE_OFE: + case CAM_ICP_RES_TYPE_OFE_RT: + case CAM_ICP_RES_TYPE_OFE_SEMI_RT: + return CAM_ICP_DEV_OFE; + default: + CAM_ERR(CAM_ICP, "Invalid resource type: %u", dev_type); + } + + return CAM_ICP_HW_MAX; +} + +static int cam_icp_validate_secure_port_config(struct cam_icp_hw_mgr *hw_mgr, + struct cam_icp_hw_ctx_data *ctx_data) +{ + struct cam_icp_hw_ctx_data *cur_ctx_data; + int i; + + list_for_each_entry(cur_ctx_data, + &hw_mgr->active_ctx_info.active_ctx_list, list) { + if (cur_ctx_data->icp_dev_acquire_info->secure_mode + == CAM_SECURE_MODE_SECURE) { + for (i = 0; i < CAM_MAX_OUTPUT_PORTS_PER_DEVICE; i++) { + if (cur_ctx_data->port_security_map[i] != + ctx_data->port_security_map[i]) { + CAM_ERR(CAM_ICP, + "%s: port security map mismatch %d prev: 0x%x, curr: 0x%x", + ctx_data->ctx_id_string, i, + cur_ctx_data->port_security_map[i], + ctx_data->port_security_map[i]); + return -EINVAL; + } + } + /* If it doesn't conflict with one, it shouldn't conflict with others as well */ + break; + } + } + + return 0; +} + +static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_hw_acquire_args *args = acquire_hw_args; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + int rc = 0, bitmap_size = 0, i, ctx_id; + dma_addr_t io_buf_addr; + size_t io_buf_size; + struct cam_icp_hw_ctx_data *ctx_data = NULL; + struct cam_icp_acquire_dev_info_unified *icp_dev_acquire_info; + struct cam_cmd_mem_regions cmd_mem_region; + enum cam_icp_hw_type hw_dev_type; + struct cam_icp_res_info_unified *icp_ref_res_info; + struct cam_icp_mgr_hw_args hw_args = {0}; + + if ((!hw_mgr_priv) || (!acquire_hw_args)) { + CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv, + acquire_hw_args); + return -EINVAL; + } + + if (args->num_acq > 1) { + CAM_ERR(CAM_ICP, "[%s] number of resources are wrong: %u", + hw_mgr->hw_mgr_name, args->num_acq); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "[%s] ENTER Api = %d", hw_mgr->hw_mgr_name, args->api_version); + + hw_args.hfi_setup = true; + hw_args.use_proxy_boot_up = CAM_IS_SECONDARY_VM(); + + mutex_lock(&hw_mgr->hw_mgr_mutex); + rc = cam_icp_mgr_allocate_ctx(hw_mgr, &ctx_data, &ctx_id); + if (rc) + goto end; + + ctx_data->hw_mgr_priv = hw_mgr_priv; + ctx_data->acquire_dev_api_version = args->api_version; + + mutex_lock(&hw_mgr->ctx_mutex[ctx_id]); + if (args->api_version == CAM_ACQUIRE_DEV_STRUCT_VERSION_1) { + rc = cam_icp_get_acquire_info_v1(hw_mgr, args, ctx_data); + if (rc) + goto acquire_info_failed; + } else { + rc = cam_icp_get_acquire_info_v2(hw_mgr, args, ctx_data); + if (rc) + goto acquire_info_failed; + } + + icp_dev_acquire_info = ctx_data->icp_dev_acquire_info; + hw_dev_type = cam_icp_get_hw_dev_type(icp_dev_acquire_info->dev_type); + if (!CAM_ICP_IS_VALID_HW_DEV_TYPE(hw_dev_type)) { + CAM_ERR(CAM_ICP, "[%s] Wrong hw device type in acquire: %u", + hw_mgr->hw_mgr_name, icp_dev_acquire_info->dev_type); + rc = -EINVAL; + goto get_io_buf_failed; + } + + if (!CAM_ICP_IS_DEV_HW_EXIST(hw_mgr->hw_cap_mask, hw_dev_type)) { + CAM_ERR(CAM_ICP, + "Attempt to acquire device %s not supported by [%s]", + cam_icp_dev_type_to_name(icp_dev_acquire_info->dev_type), + hw_mgr->hw_mgr_name); + rc = -EINVAL; + goto get_io_buf_failed; + } + + ctx_data->device_info = &hw_mgr->dev_info[hw_mgr->dev_info_idx[hw_dev_type]]; + + scnprintf(ctx_data->ctx_id_string, sizeof(ctx_data->ctx_id_string), + "[%s]_%s_ctx[%d]_hwmgr_ctx[%d]", + hw_mgr->hw_mgr_name, + cam_icp_dev_type_to_name( + ctx_data->icp_dev_acquire_info->dev_type), + args->ctx_id, ctx_data->ctx_id); + + CAM_DBG(CAM_ICP, "%s: acquire io buf handle %d", + ctx_data->ctx_id_string, icp_dev_acquire_info->io_config_cmd_handle); + /* FW/CDM buffers are expected to be mapped in 32-bit address range */ + rc = cam_mem_get_io_buf( + icp_dev_acquire_info->io_config_cmd_handle, + hw_mgr->iommu_hdl, + &io_buf_addr, &io_buf_size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "%s: unable to get src buf info from io desc", + ctx_data->ctx_id_string); + goto get_io_buf_failed; + } + + CAM_DBG(CAM_ICP, "%s: hdl: %d, addr: %pK, size: %zu", + ctx_data->ctx_id_string, icp_dev_acquire_info->io_config_cmd_handle, + (void *)io_buf_addr, io_buf_size); + + if (!hw_mgr->ctxt_cnt) { + rc = cam_icp_clk_info_init(hw_mgr); + if (rc) + goto get_io_buf_failed; + + rc = cam_icp_mgr_icp_resume(hw_mgr, &hw_args); + if (rc) + goto get_io_buf_failed; + + if (hw_mgr->icp_debug_type) + hfi_set_debug_level(hw_mgr->hfi_handle, hw_mgr->icp_debug_type, + hw_mgr->icp_dbg_lvl); + + hfi_set_fw_dump_levels(hw_mgr->hfi_handle, hw_mgr->icp_fw_dump_lvl, + hw_mgr->icp_fw_ramdump_lvl); + + rc = cam_icp_send_ubwc_cfg(hw_mgr); + if (rc) + goto ubwc_cfg_failed; + } + + if (icp_dev_acquire_info->secure_mode == CAM_SECURE_MODE_SECURE) { + /* If the device is being acquired in the secure mode, make sure that + * this doesn't conflict with previous secure sessions if any, both + * in terms of acquire API version and the port security configuration. + */ + CAM_DBG(CAM_ICP, "%s: number of existing secure contexts = %d", + ctx_data->ctx_id_string, hw_mgr->num_secure_contexts[hw_dev_type]); + if (hw_mgr->num_secure_contexts[hw_dev_type]) { + rc = cam_icp_validate_secure_port_config (hw_mgr, ctx_data); + if (rc) + goto secure_check_failed; + } else { + /* Based on the port level security configuration call into TZ + * using the CPAS API to configure some ports to non secure, if + * required. By default all pixel ports are secure. Need not repeat + * this if the previous secure session, if any, already did this. + */ + rc = cam_icp_handle_secure_port_config(ctx_data, false); + if (rc) + goto secure_check_failed; + } + hw_mgr->num_secure_contexts[hw_dev_type]++; + } + + rc = cam_icp_mgr_device_resume(hw_mgr, ctx_data); + if (rc) + goto icp_dev_resume_failed; + + rc = cam_icp_mgr_send_ping(hw_mgr, ctx_data); + if (rc) { + CAM_ERR(CAM_ICP, "%s: ping ack not received", ctx_data->ctx_id_string); + goto send_ping_failed; + } + CAM_DBG(CAM_ICP, "%s: ping ack received", ctx_data->ctx_id_string); + + rc = cam_icp_mgr_create_handle(hw_mgr, icp_dev_acquire_info->dev_type, + ctx_data); + if (rc) { + CAM_ERR(CAM_ICP, "%s: create handle failed", ctx_data->ctx_id_string); + goto create_handle_failed; + } + + CAM_DBG(CAM_ICP, + "%s: created stream handle", + ctx_data->ctx_id_string); + + cmd_mem_region.num_regions = 1; + cmd_mem_region.map_info_array_flex[0].mem_handle = + icp_dev_acquire_info->io_config_cmd_handle; + cmd_mem_region.map_info_array_flex[0].offset = 0; + cmd_mem_region.map_info_array_flex[0].size = + icp_dev_acquire_info->io_config_cmd_size; + cmd_mem_region.map_info_array_flex[0].flags = 0; + + rc = cam_icp_process_stream_settings(ctx_data, + &cmd_mem_region, true); + if (rc) { + CAM_ERR(CAM_ICP, + "%s: sending config io mapping failed rc %d", + ctx_data->ctx_id_string, rc); + goto send_map_info_failed; + } + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_PRESIL, "%s: Sending IO Config buffers to presil: FD %d ", + ctx_data->ctx_id_string, + (icp_dev_acquire_info->io_config_cmd_handle >> 16)); + cam_mem_mgr_send_buffer_to_presil(hw_mgr->iommu_hdl, + icp_dev_acquire_info->io_config_cmd_handle); + } + + rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ICP, + "%s: IO Config command failed %d size:%d", + ctx_data->ctx_id_string, rc, icp_dev_acquire_info->io_config_cmd_size); + cam_icp_dump_io_cfg(ctx_data, + icp_dev_acquire_info->io_config_cmd_handle, + icp_dev_acquire_info->io_config_cmd_size); + goto ioconfig_failed; + } + + rc = cam_icp_process_stream_settings(ctx_data, + &cmd_mem_region, false); + if (rc) { + CAM_ERR(CAM_ICP, + "%s: sending config io unmapping failed %d", + ctx_data->ctx_id_string, rc); + goto send_map_info_failed; + } + + ctx_data->context_priv = args->context_data; + args->ctxt_to_hw_map = ctx_data; + args->hw_mgr_ctx_id = ctx_data->ctx_id; + + bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long); + ctx_data->hfi_frame_process.bitmap = + CAM_MEM_ZALLOC(bitmap_size, GFP_KERNEL); + if (!ctx_data->hfi_frame_process.bitmap) { + CAM_ERR_RATE_LIMIT(CAM_ICP, + "%s: failed to allocate hfi frame bitmap", ctx_data->ctx_id_string); + rc = -ENOMEM; + goto ioconfig_failed; + } + + ctx_data->perf_stats.total_resp_time = 0; + ctx_data->perf_stats.total_requests = 0; + + ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE; + ctx_data->ctxt_event_cb = args->event_cb; + icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size; + + if (args->api_version == CAM_ACQUIRE_DEV_STRUCT_VERSION_1) { + rc = cam_icp_put_acquire_info_v1(args->acquire_info, icp_dev_acquire_info); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ICP, + "%s: copy to user failed", ctx_data->ctx_id_string); + goto copy_to_user_failed; + } + } else { + rc = cam_icp_put_acquire_info_v2(args->acquire_info, icp_dev_acquire_info); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ICP, + "%s: copy to user failed", ctx_data->ctx_id_string); + goto copy_to_user_failed; + } + } + + cam_icp_ctx_clk_info_init(ctx_data); + ctx_data->state = CAM_ICP_CTX_STATE_ACQUIRED; + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); + CAM_DBG(CAM_ICP, "%s: scratch size = %x fw_handle = %x", + ctx_data->ctx_id_string, + (unsigned int)icp_dev_acquire_info->scratch_mem_size, + (unsigned int)ctx_data->fw_handle); + + /* Start device timer*/ + for (i = 0; i < hw_mgr->num_dev_info; i++) { + if (hw_mgr->dev_info[i].dev_ctx_info.dev_ctxt_cnt == 1) + break; + } + + if (i != hw_mgr->num_dev_info) + cam_icp_device_timer_start(hw_mgr); + + /* Start context timer */ + cam_icp_ctx_timer_start(ctx_data); + hw_mgr->ctxt_cnt++; + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_ICP, "%s: Acquire Done", + ctx_data->ctx_id_string); + + CAM_TRACE(CAM_ICP, + "%s: Acquired, in_res : format=%d, widht=%d, height=%d, fps=%d", + ctx_data->ctx_id_string, + ctx_data->icp_dev_acquire_info->in_res.format, + ctx_data->icp_dev_acquire_info->in_res.width, + ctx_data->icp_dev_acquire_info->in_res.height, + ctx_data->icp_dev_acquire_info->in_res.fps); + + icp_ref_res_info = ctx_data->icp_dev_acquire_info->out_res_flex; + if (ctx_data->icp_dev_acquire_info->num_out_res > 0) { + CAM_TRACE(CAM_ICP, + "%s: Acquired, out_res[0] : format=%d, widht=%d, height=%d, fps=%d", + ctx_data->ctx_id_string, + icp_ref_res_info[0].format, icp_ref_res_info[0].width, + icp_ref_res_info[0].height, icp_ref_res_info[0].fps); + } + + if (ctx_data->icp_dev_acquire_info->num_out_res > 1) { + CAM_TRACE(CAM_ICP, + "%s: Acquired, out_res[1] : format=%d, widht=%d, height=%d, fps=%d", + ctx_data->ctx_id_string, + icp_ref_res_info[1].format, icp_ref_res_info[1].width, + icp_ref_res_info[1].height, icp_ref_res_info[1].fps); + } + + return 0; + +copy_to_user_failed: + CAM_MEM_FREE(ctx_data->hfi_frame_process.bitmap); + ctx_data->hfi_frame_process.bitmap = NULL; +ioconfig_failed: + cam_icp_process_stream_settings(ctx_data, + &cmd_mem_region, false); +send_map_info_failed: + cam_icp_mgr_destroy_handle(ctx_data); +create_handle_failed: +send_ping_failed: + cam_icp_mgr_dev_power_collapse(hw_mgr, ctx_data, 0); +icp_dev_resume_failed: + if (icp_dev_acquire_info->secure_mode == CAM_SECURE_MODE_SECURE) { + if(!(--hw_mgr->num_secure_contexts[hw_dev_type])) + cam_icp_handle_secure_port_config(ctx_data, true); + } +secure_check_failed: +ubwc_cfg_failed: + if (!hw_mgr->ctxt_cnt) + cam_icp_mgr_icp_power_collapse(hw_mgr, &hw_args); +get_io_buf_failed: + CAM_MEM_FREE(ctx_data->icp_dev_acquire_info); + ctx_data->icp_dev_acquire_info = NULL; + ctx_data->device_info = NULL; +acquire_info_failed: + cam_icp_mgr_process_dbg_buf(hw_mgr); + cam_icp_mgr_put_ctx(hw_mgr, ctx_data); + mutex_unlock(&hw_mgr->ctx_mutex[ctx_id]); +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +#ifdef CONFIG_SPECTRA_VMRM +static inline int cam_icp_mgr_pvm_inter_vm_proc_power_collapse(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0; + struct cam_icp_mgr_hw_args pc_args = {0}; + + pc_args.hfi_setup = false; + pc_args.use_proxy_boot_up = false; + pc_args.icp_pc = true; + pc_args.skip_icp_init = true; + + if (!hw_mgr->icp_booted) { + CAM_ERR(CAM_ICP, "%s icp is not booted up", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + rc = cam_icp_mgr_icp_power_collapse(hw_mgr, &pc_args); + if (rc) { + CAM_ERR(CAM_ICP, "%s icp power_collapse failed", hw_mgr->hw_mgr_name); + return rc; + } + + return rc; +} + +static inline int cam_icp_mgr_pvm_inter_vm_proc_shutdown(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0; + struct cam_icp_mgr_hw_args pc_args = {0}; + + pc_args.hfi_setup = hw_mgr->hfi_init_done; + pc_args.use_proxy_boot_up = false; + pc_args.icp_pc = true; + pc_args.skip_icp_init = true; + + if (!hw_mgr->icp_booted) { + CAM_ERR(CAM_ICP, "%s icp is already closed", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + rc = cam_icp_mgr_hw_close(hw_mgr, &pc_args); + if (rc) { + CAM_ERR(CAM_ICP, "%s icp close failed", hw_mgr->hw_mgr_name); + return rc; + } + + return rc; +} + +static inline int cam_icp_mgr_pvm_inter_vm_proc_resume(struct cam_icp_hw_mgr *hw_mgr) +{ + int rc = 0; + struct cam_icp_mgr_hw_args hw_args; + + hw_args.icp_pc = false; + hw_args.use_proxy_boot_up = false; + hw_args.hfi_setup = false; + hw_args.skip_icp_init = true; + + if (!hw_mgr->icp_booted) { + rc = cam_icp_mgr_hw_open(hw_mgr, &hw_args); + if (rc) + CAM_ERR(CAM_ICP, "%s icp boot up failed", hw_mgr->hw_mgr_name); + } else { + hw_args.icp_pc = true; + rc = cam_icp_mgr_icp_resume(hw_mgr, &hw_args); + if (rc) + CAM_ERR(CAM_ICP, "%s icp power_resume failed", hw_mgr->hw_mgr_name); + } + + return rc; +} + +static int cam_icp_tvm_rcv_msg(struct cam_icp_hw_mgr *hw_mgr, uint32_t msg_type) +{ + CAM_ERR(CAM_ICP, "%s Not Supported", hw_mgr->hw_mgr_name); + return -EINVAL; +} + +static int cam_icp_pvm_rcv_msg(struct cam_icp_hw_mgr *hw_mgr, uint32_t msg_type) +{ + int rc = 0; + + switch (msg_type) { + case CAM_ICP_POWER_COLLAPSE: + rc = cam_icp_mgr_pvm_inter_vm_proc_power_collapse(hw_mgr); + if (rc) + CAM_ERR(CAM_ICP, "%s PVM ICP power collapse failed", hw_mgr->hw_mgr_name); + break; + case CAM_ICP_POWER_RESUME: + rc = cam_icp_mgr_pvm_inter_vm_proc_resume(hw_mgr); + if (rc) + CAM_ERR(CAM_ICP, "%s PVM ICP resume failed", hw_mgr->hw_mgr_name); + break; + case CAM_ICP_SHUTDOWN: + rc = cam_icp_mgr_pvm_inter_vm_proc_shutdown(hw_mgr); + if (rc) + CAM_ERR(CAM_ICP, "%s PVM ICP shutdown failed", hw_mgr->hw_mgr_name); + break; + default: + CAM_ERR(CAM_ICP, "%s Invalid destination PVM cmd:%d", hw_mgr->hw_mgr_name, + msg_type); + rc = -EINVAL; + } + + return rc; +} + +static int cam_icp_vm_rcv_msg(void *hw_mgr_priv, void *msg, uint32_t size) +{ + struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *) hw_mgr_priv; + struct cam_vmrm_msg *vm_msg = (struct cam_vmrm_msg *) msg; + int rc = 0; + + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "%s invalid hw_mgr_priv", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + if (!icp_vm_msg) { + CAM_ERR(CAM_ICP, "%s invalid icp_vm_data", hw_mgr->hw_mgr_name); + return -EINVAL; + } + + switch (vm_msg->des_vmid) { + case CAM_PVM: + rc = cam_icp_pvm_rcv_msg(hw_mgr, vm_msg->msg_type); + break; + case CAM_SVM1: + rc = cam_icp_tvm_rcv_msg(hw_mgr, vm_msg->msg_type); + break; + default: + CAM_ERR(CAM_ICP, "%s invalid destination vm dest_vm:%d current_vm:%d", + hw_mgr->hw_mgr_name, vm_msg->des_vmid, cam_vmrm_intf_get_vmid()); + return -EINVAL; + } + + return rc; +} + +static int cam_icp_vm_send_msg(struct cam_icp_hw_mgr *hw_mgr, uint32_t dest_vm, + uint32_t vmrm_msg_type, bool need_ack) +{ + struct cam_icp_inter_vm_msg icp_vm_msg = {0}; + int rc = 0; + + icp_vm_msg.skip_icp_init = true; + + rc = cam_vmrm_icp_send_msg(dest_vm, hw_mgr->hw_mgr_id, vmrm_msg_type, need_ack, + NULL, 1, CAM_ICP_INTER_VM_COMM_TIMEOUT_US); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed in sending icp %d command to PVM rc %d", + hw_mgr->hw_mgr_name, vmrm_msg_type, rc); + return rc; + } + + CAM_DBG(CAM_ICP, "ICP%d sent cmd%d to VM:%d", hw_mgr->hw_mgr_id, vmrm_msg_type, dest_vm); + + return rc; +} +#else +static int cam_icp_vm_send_msg(struct cam_icp_hw_mgr *hw_mgr, uint32_t dest_vm, + uint32_t vmrm_msg_type, bool need_ack) +{ + return -EINVAL; +} +#endif + +static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc = 0; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query_cap = hw_caps_args; + struct cam_icp_query_cap_cmd icp_caps; + + if ((!hw_mgr_priv) || (!hw_caps_args)) { + CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv, hw_caps_args); + return -EINVAL; + } + + if (sizeof(struct cam_icp_query_cap_cmd) != query_cap->size) { + CAM_ERR(CAM_ICP, + "[%s] Input query cap size:%u does not match expected query cap size: %u", + hw_mgr->hw_mgr_name, query_cap->size, + sizeof(struct cam_icp_query_cap_cmd)); + return -EFAULT; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (copy_from_user(&icp_caps, + u64_to_user_ptr(query_cap->caps_handle), + sizeof(struct cam_icp_query_cap_cmd))) { + CAM_ERR(CAM_ICP, "[%s] copy from_user failed", hw_mgr->hw_mgr_name); + rc = -EFAULT; + goto end; + } + + rc = hfi_get_hw_caps(&icp_caps); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to get hfi caps", hw_mgr->hw_mgr_name); + goto end; + } + + icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl; + icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl; + + if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle), + &icp_caps, sizeof(struct cam_icp_query_cap_cmd))) { + CAM_ERR(CAM_ICP, "[%s] copy_to_user failed", hw_mgr->hw_mgr_name); + rc = -EFAULT; + } +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_icp_mgr_get_hw_caps_v2(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc = 0; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_icp_hw_device_info *dev_info; + struct cam_query_cap_cmd *query_cap = hw_caps_args; + struct cam_icp_query_cap_cmd_v2 query_cmd; + uint32_t supported_hw_dev, num_supported_device = 0; + int i; + bool is_ipe_supported = FALSE; + + if ((!hw_mgr_priv) || (!hw_caps_args)) { + CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", + hw_mgr_priv, hw_caps_args); + return -EINVAL; + } + + if (sizeof(struct cam_icp_query_cap_cmd_v2) != query_cap->size) { + CAM_ERR(CAM_ICP, + "[%s] Input query cap size:%u does not match expected query cap size: %ud", + hw_mgr->hw_mgr_name, query_cap->size, + sizeof(struct cam_icp_query_cap_cmd_v2)); + return -EFAULT; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (copy_from_user(&query_cmd, u64_to_user_ptr(query_cap->caps_handle), + sizeof(struct cam_icp_query_cap_cmd_v2))) { + CAM_ERR(CAM_ICP, "[%s] copy from user failed", hw_mgr->hw_mgr_name); + rc = -EFAULT; + goto end; + } + + query_cmd.num_valid_params = 0; + memset(&query_cmd.dev_info, 0, + (CAM_ICP_MAX_NUM_OF_DEV_TYPES * sizeof(struct cam_icp_device_info))); + + for (i = 0; i < CAM_ICP_MAX_ICP_HW_TYPE; i++) { + if (!CAM_ICP_IS_DEV_HW_EXIST(hw_mgr->hw_cap_mask, i)) + continue; + + query_cmd.dev_info[num_supported_device].dev_type = CAM_ICP_DEV_TYPE_ICP; + query_cmd.dev_info[num_supported_device].num_devices = CAM_ICP_MAX_ICP_PROC_PER_DEV; + num_supported_device++; + break; + } + + if (!num_supported_device) { + CAM_ERR(CAM_ICP, "No ICP HW binded to %s", hw_mgr->hw_mgr_name); + rc = -ENODEV; + goto end; + } + + for (i = 0; i < hw_mgr->num_dev_info; i++) { + dev_info = &hw_mgr->dev_info[i]; + + switch (dev_info->hw_dev_type) { + case CAM_ICP_DEV_IPE: + supported_hw_dev = CAM_ICP_DEV_TYPE_IPE; + is_ipe_supported = TRUE; + break; + case CAM_ICP_DEV_BPS: + supported_hw_dev = CAM_ICP_DEV_TYPE_BPS; + break; + case CAM_ICP_DEV_OFE: + supported_hw_dev = CAM_ICP_DEV_TYPE_OFE; + break; + default: + CAM_ERR(CAM_ICP, "[%s] Invalid icp hw type: %u", + hw_mgr->hw_mgr_name, i); + rc = -EBADSLT; + goto end; + } + + if (num_supported_device >= CAM_ICP_MAX_NUM_OF_DEV_TYPES) { + CAM_ERR(CAM_ICP, + "[%s] number of supported hw devices:%u exceeds maximum number of devices supported by query cap struct: %u", + hw_mgr->hw_mgr_name, num_supported_device, + CAM_ICP_MAX_NUM_OF_DEV_TYPES); + rc = -EFAULT; + goto end; + } + + query_cmd.dev_info[num_supported_device].dev_type = supported_hw_dev; + query_cmd.dev_info[num_supported_device].num_devices = dev_info->hw_dev_cnt; + + num_supported_device++; + } + + query_cmd.num_dev_info = num_supported_device; + + rc = hfi_get_hw_caps_v2(hw_mgr->hfi_handle, &query_cmd); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to get hfi caps rc:%d", + hw_mgr->hw_mgr_name, rc); + goto end; + } + + query_cmd.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl; + query_cmd.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl; + if (is_ipe_supported) { + query_cmd.num_valid_params++; + query_cmd.valid_param_mask |= (1 << CAM_ICP_QUERY_MAX_ACQUIRE_DEV_VER_SUPPORTED_INDEX); + query_cmd.params[CAM_ICP_QUERY_MAX_ACQUIRE_DEV_VER_SUPPORTED_INDEX] = + CAM_ACQUIRE_DEV_STRUCT_VERSION_2; + } + if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle), + &query_cmd, sizeof(struct cam_icp_query_cap_cmd_v2))) { + CAM_ERR(CAM_ICP, "[%s] copy_to_user failed", hw_mgr->hw_mgr_name); + rc = -EFAULT; + } +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_icp_mgr_free_hw_devs(struct cam_icp_hw_mgr *hw_mgr) +{ + int i; + + for (i = 0; i < hw_mgr->num_dev_info; i++) + CAM_MEM_FREE(hw_mgr->dev_info[i].dev_intf); + + CAM_MEM_FREE(hw_mgr->dev_info); + hw_mgr->dev_info = NULL; + hw_mgr->icp_dev_intf = NULL; +} + +static int cam_icp_mgr_verify_hw_caps(struct cam_icp_hw_mgr *hw_mgr, uint32_t *icp_dev_mask, + uint32_t num_icp_dev_mask) +{ + struct cam_cpas_query_cap query; + int rc, i; + uint32_t *cam_caps, num_cpas_cap_mask; + + rc = cam_cpas_get_hw_info(&query.camera_family, &query.camera_version, + &query.cpas_version, &cam_caps, &num_cpas_cap_mask, + NULL, NULL); + if (rc) + return rc; + + if (num_icp_dev_mask > num_cpas_cap_mask) { + CAM_ERR(CAM_ICP, + "[%s] Number of found icp device caps mask %u exceeds cpas cap mask %u", + hw_mgr->hw_mgr_name, num_icp_dev_mask, num_cpas_cap_mask); + return -EINVAL; + } + + for (i = 0; i < num_icp_dev_mask; i++) { + if ((icp_dev_mask[i] & cam_caps[i]) != icp_dev_mask[i]) { + CAM_ERR(CAM_ICP, + "[%s] Found unsupported HW, cpas caps mask: %u icp device mask: %u", + hw_mgr->hw_mgr_name, cam_caps[i], icp_dev_mask[i]); + return -ENODEV; + } + } + + return 0; +} + +static int cam_icp_mgr_alloc_devs(struct device_node *np, struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_intf ***devices, uint32_t *hw_dev_cnt) +{ + struct cam_hw_intf **alloc_devices = NULL; + int rc, i; + enum cam_icp_hw_type icp_hw_type; + uint32_t num = 0, num_cpas_mask = 0, cpas_hw_mask[MAX_HW_CAPS_MASK] = {0}; + + rc = cam_icp_alloc_processor_devs(np, &icp_hw_type, &alloc_devices, hw_dev_cnt); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] proc devices allocation failed rc=%d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + if (!CAM_ICP_IS_VALID_HW_DEV_TYPE(icp_hw_type)) { + CAM_ERR(CAM_ICP, "[%s] Invalid hw dev type: %u", + hw_mgr->hw_mgr_name, icp_hw_type); + rc = -EINVAL; + CAM_MEM_FREE(devices); + CAM_MEM_FREE(alloc_devices); + return rc; + } + + if (hw_dev_cnt[icp_hw_type] > CAM_ICP_MAX_ICP_PROC_PER_DEV) { + CAM_ERR(CAM_ICP, + "Invalid number of ICP device: %u allocated exceeds ICP device supported per hw mgr: %u", + hw_dev_cnt[icp_hw_type], CAM_ICP_MAX_ICP_PROC_PER_DEV); + rc = -EINVAL; + goto free_devs; + } + + devices[icp_hw_type] = alloc_devices; + hw_mgr->hw_cap_mask |= BIT(icp_hw_type); + num_cpas_mask = max(num_cpas_mask, (uint32_t)(ICP_CAPS_MASK_IDX + 1)); + cpas_hw_mask[ICP_CAPS_MASK_IDX] |= icp_cpas_mask[hw_mgr->hw_mgr_id]; + + rc = of_property_read_u32(np, "num-ipe", &num); + if (!rc) { + alloc_devices = CAM_MEM_ZALLOC_ARRAY(num, sizeof(*alloc_devices), GFP_KERNEL); + if (!alloc_devices) { + CAM_ERR(CAM_ICP, "[%s] ipe device allocation failed", + hw_mgr->hw_mgr_name); + goto free_devs; + } + + hw_dev_cnt[CAM_ICP_DEV_IPE] = num; + devices[CAM_ICP_DEV_IPE] = alloc_devices; + hw_mgr->hw_cap_mask |= BIT(CAM_ICP_DEV_IPE); + num_cpas_mask = max(num_cpas_mask, (uint32_t)(IPE_CAPS_MASK_IDX + 1)); + cpas_hw_mask[IPE_CAPS_MASK_IDX] |= CPAS_TITAN_IPE0_CAP_BIT; + } + + rc = of_property_read_u32(np, "num-bps", &num); + if (!rc) { + alloc_devices = CAM_MEM_ZALLOC_ARRAY(num, sizeof(*alloc_devices), GFP_KERNEL); + if (!alloc_devices) { + CAM_ERR(CAM_ICP, "[%s] bps device allocation failed", + hw_mgr->hw_mgr_name); + goto free_devs; + } + + hw_dev_cnt[CAM_ICP_DEV_BPS] = num; + devices[CAM_ICP_DEV_BPS] = alloc_devices; + hw_mgr->hw_cap_mask |= BIT(CAM_ICP_DEV_BPS); + num_cpas_mask = max(num_cpas_mask, (uint32_t)(BPS_CAPS_MASK_IDX + 1)); + cpas_hw_mask[BPS_CAPS_MASK_IDX] |= CPAS_BPS_BIT; + } + + rc = of_property_read_u32(np, "num-ofe", &num); + if (!rc) { + alloc_devices = CAM_MEM_ZALLOC_ARRAY(num, sizeof(*alloc_devices), GFP_KERNEL); + if (!alloc_devices) { + CAM_ERR(CAM_ICP, "[%s] OFE device allocation failed", + hw_mgr->hw_mgr_name); + goto free_devs; + } + + hw_dev_cnt[CAM_ICP_DEV_OFE] = num; + devices[CAM_ICP_DEV_OFE] = alloc_devices; + hw_mgr->hw_cap_mask |= BIT(CAM_ICP_DEV_OFE); + num_cpas_mask = max(num_cpas_mask, (uint32_t)(OFE_CAPS_MASK_IDX + 1)); + cpas_hw_mask[OFE_CAPS_MASK_IDX] |= CPAS_OFE_BIT; + } + + rc = cam_icp_mgr_verify_hw_caps(hw_mgr, cpas_hw_mask, num_cpas_mask); + if (rc) { + CAM_ERR(CAM_ICP, "CPAS ICP HW capability verification fails rc=%d", rc); + goto free_devs; + } + + hw_mgr->dev_pc_flag = of_property_read_bool(np, "ipe_bps_pc_en"); + hw_mgr->icp_pc_flag = of_property_read_bool(np, "icp_pc_en"); + hw_mgr->icp_use_pil = of_property_read_bool(np, "icp_use_pil"); + hw_mgr->synx_signaling_en = of_property_read_bool(np, "synx_signaling_en"); + hw_mgr->fw_based_sys_caching = cam_cpas_is_fw_based_sys_caching_supported(); + + return 0; + +free_devs: + CAM_MEM_FREE(alloc_devices); + for (i = 0; i < CAM_ICP_HW_MAX; i++) + CAM_MEM_FREE(devices[i]); + + return rc; +} + +static char *cam_icp_hw_dev_type_to_name(enum cam_icp_hw_type hw_dev_type) +{ + switch (hw_dev_type) { + case CAM_ICP_HW_ICP_V1: + return "ICP_V1"; + case CAM_ICP_HW_ICP_V2: + return "ICP_V2"; + case CAM_ICP_DEV_IPE: + return "IPE"; + case CAM_ICP_DEV_BPS: + return "BPS"; + case CAM_ICP_DEV_OFE: + return "OFE"; + default: + return "Invalid hw dev type"; + } +} + +static int cam_icp_mgr_set_up_dev_info(struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_intf ***devices, uint32_t *hw_dev_cnt) +{ + int i, j; + + hw_mgr->icp_dev_intf = CAM_ICP_GET_PROC_DEV_INTF(devices); + if (!hw_mgr->icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] Invalid ICP dev interface is NULL", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + for (i = CAM_ICP_DEV_START_IDX; i < CAM_ICP_HW_MAX; i++) { + if (hw_dev_cnt[i] > 0) + hw_mgr->num_dev_info++; + } + + hw_mgr->dev_info = CAM_MEM_ZALLOC_ARRAY(hw_mgr->num_dev_info, + sizeof(struct cam_icp_hw_device_info), + GFP_KERNEL); + if (!hw_mgr->dev_info) + return -ENOMEM; + + for (i = CAM_ICP_DEV_START_IDX, j = 0; i < CAM_ICP_HW_MAX; i++) { + if (hw_dev_cnt[i] > 0) { + hw_mgr->dev_info_idx[i] = j; + hw_mgr->dev_info[j].dev_intf = devices[i]; + hw_mgr->dev_info[j].dev_name = cam_icp_hw_dev_type_to_name(i); + hw_mgr->dev_info[j].hw_dev_type = i; + hw_mgr->dev_info[j].hw_dev_cnt = hw_dev_cnt[i]; + j++; + } else { + hw_mgr->dev_info_idx[i] = -1; + } + } + + return 0; +} + +static int cam_icp_mgr_init_devs(struct device_node *np, struct cam_icp_hw_mgr *hw_mgr) +{ + int rc, i, j, count, num_hw; + struct cam_hw_intf **devices[CAM_ICP_HW_MAX] = {0}; + uint32_t hw_dev_cnt[CAM_ICP_HW_MAX] = {0}; + + rc = cam_icp_mgr_alloc_devs(np, hw_mgr, devices, hw_dev_cnt); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] devices allocation failed rc=%d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + count = of_property_count_strings(np, "compat-hw-name"); + if (count < 0) { + CAM_ERR(CAM_ICP, "[%s] Invalid compat-hw-name count=%d", hw_mgr->hw_mgr_name, + count); + rc = count; + goto free_devices; + } + + for (i = 0; i < count; i++) { + const char *name = NULL; + struct platform_device *pdev; + struct device_node *node; + struct cam_hw_intf *iface; + struct cam_icp_hw_intf_data *dev_intf_data; + + rc = of_property_read_string_index(np, "compat-hw-name", + i, &name); + if (rc) { + CAM_ERR(CAM_ICP, + "[%s] unable to get property name: idx=%d rc=%d", + hw_mgr->hw_mgr_name, i, rc); + goto free_devices; + } + + node = of_find_node_by_name(NULL, name); + if (!node) { + CAM_ERR(CAM_ICP, "[%s] missing node %s", + hw_mgr->hw_mgr_name, name); + rc = -ENODEV; + goto free_devices; + } + + pdev = of_find_device_by_node(node); + of_node_put(node); + if (!pdev) { + CAM_ERR(CAM_ICP, + "[%s] platform device not found for %s", + hw_mgr->hw_mgr_name, name); + rc = -ENODEV; + goto free_devices; + } + + dev_intf_data = platform_get_drvdata(pdev); + if (!dev_intf_data) { + CAM_ERR(CAM_ICP, "[%s] invalid drvdata in ICP related devices", + hw_mgr->hw_mgr_name); + rc = -EINVAL; + goto free_devices; + } + + iface = dev_intf_data->hw_intf; + if (!iface || !iface->hw_ops.process_cmd) { + CAM_ERR(CAM_ICP, + "[%s] invalid interface: iface=%pK process_cmd=%pK", + hw_mgr->hw_mgr_name, iface, + (iface ? iface->hw_ops.process_cmd : NULL)); + + rc = -EINVAL; + goto free_devices; + } + + if (!CAM_ICP_IS_VALID_HW_DEV_TYPE(iface->hw_type)) { + CAM_ERR(CAM_ICP, "[%s] Invalid HW type:%u", + hw_mgr->hw_mgr_name, iface->hw_type); + rc = -EINVAL; + goto free_devices; + } + + num_hw = iface->hw_idx + 1; + for (j = 0; j < num_hw; j++) { + if (!devices[iface->hw_type][j]) { + devices[iface->hw_type][j] = iface; + break; + } + } + + /* Stash all cam_hw_pid that linked to this ICP hw mgr */ + if (hw_mgr->num_pid + dev_intf_data->num_pid > CAM_ICP_PID_NUM_MAX) { + CAM_ERR(CAM_ICP, + "Num of PIDs may go beyond the threshold, current num_pid: %d, num_pid in the dev: %d, threshold: %d", + hw_mgr->num_pid, dev_intf_data->num_pid, CAM_ICP_PID_NUM_MAX); + rc = -EINVAL; + goto free_devices; + } + + for (j = 0; j < dev_intf_data->num_pid; j++) { + hw_mgr->pid[hw_mgr->num_pid] = dev_intf_data->pid[j]; + hw_mgr->num_pid++; + } + } + + rc = cam_icp_mgr_set_up_dev_info(hw_mgr, devices, hw_dev_cnt); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to set up hw device info rc=%d", rc); + goto free_devices; + } + + return 0; + +free_devices: + for (i = 0; i < CAM_ICP_HW_MAX; i++) + CAM_MEM_FREE(devices[i]); + + return rc; +} + +static void cam_req_mgr_process_workq_icp_command_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_workq_icp_message_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_workq_icp_timer_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static int cam_icp_mgr_create_wq(struct cam_icp_hw_mgr *hw_mgr) +{ + char q_name[32]; + int rc, i; + + scnprintf(q_name, sizeof(q_name), "%s_command_queue", hw_mgr->hw_mgr_name); + rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK, + &hw_mgr->cmd_work, CRM_WORKQ_USAGE_NON_IRQ, 0, + cam_req_mgr_process_workq_icp_command_queue); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] unable to create a command worker for %s", + hw_mgr->hw_mgr_name, q_name); + goto cmd_work_failed; + } + + scnprintf(q_name, sizeof(q_name), "%s_message_queue", hw_mgr->hw_mgr_name); + rc = cam_req_mgr_workq_create(q_name, ICP_WORKQ_NUM_TASK, + &hw_mgr->msg_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_icp_message_queue); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] unable to create a message worker for %s", + hw_mgr->hw_mgr_name, q_name); + goto msg_work_failed; + } + + scnprintf(q_name, sizeof(q_name), "%s_timer_queue", hw_mgr->hw_mgr_name); + rc = cam_req_mgr_workq_create(q_name, ICP_WORKQ_NUM_TASK, + &hw_mgr->timer_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_icp_timer_queue); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] unable to create a timer worker for %s", + hw_mgr->hw_mgr_name, q_name); + goto timer_work_failed; + } + + hw_mgr->cmd_work_data = + CAM_MEM_ZALLOC(sizeof(struct hfi_cmd_work_data) * ICP_WORKQ_NUM_TASK, GFP_KERNEL); + if (!hw_mgr->cmd_work_data) { + CAM_ERR(CAM_ICP, "[%s] Mem reservation fail for cmd_work_data", + hw_mgr->hw_mgr_name); + goto cmd_work_data_failed; + } + hw_mgr->msg_work_data = + CAM_MEM_ZALLOC(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK, GFP_KERNEL); + if (!hw_mgr->msg_work_data) { + CAM_ERR(CAM_ICP, "[%s] Mem reservation fail for msg_work_data", + hw_mgr->hw_mgr_name); + goto msg_work_data_failed; + } + + hw_mgr->timer_work_data = + CAM_MEM_ZALLOC(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK, GFP_KERNEL); + if (!hw_mgr->timer_work_data) { + CAM_ERR(CAM_ICP, "[%s] Mem reservation fail for timer_work_data", + hw_mgr->hw_mgr_name); + goto timer_work_data_failed; + } + + for (i = 0; i < ICP_WORKQ_NUM_TASK; i++) + hw_mgr->msg_work->task.pool[i].payload = + &hw_mgr->msg_work_data[i]; + + for (i = 0; i < ICP_WORKQ_NUM_TASK; i++) + hw_mgr->cmd_work->task.pool[i].payload = + &hw_mgr->cmd_work_data[i]; + + for (i = 0; i < ICP_WORKQ_NUM_TASK; i++) + hw_mgr->timer_work->task.pool[i].payload = + &hw_mgr->timer_work_data[i]; + return 0; + +timer_work_data_failed: + CAM_MEM_FREE(hw_mgr->msg_work_data); +msg_work_data_failed: + CAM_MEM_FREE(hw_mgr->cmd_work_data); +cmd_work_data_failed: + cam_req_mgr_workq_destroy(&hw_mgr->timer_work); +timer_work_failed: + cam_req_mgr_workq_destroy(&hw_mgr->msg_work); +msg_work_failed: + cam_req_mgr_workq_destroy(&hw_mgr->cmd_work); +cmd_work_failed: + return rc; +} + +void cam_icp_mgr_destroy_wq(struct cam_icp_hw_mgr *hw_mgr) +{ + cam_req_mgr_workq_destroy(&hw_mgr->timer_work); + cam_req_mgr_workq_destroy(&hw_mgr->msg_work); + cam_req_mgr_workq_destroy(&hw_mgr->cmd_work); +} + +static void cam_icp_mgr_dump_pf_data(struct cam_icp_hw_mgr *hw_mgr, + struct cam_hw_cmd_pf_args *pf_cmd_args) +{ + struct cam_packet *packet; + struct cam_hw_dump_pf_args *pf_args; + struct cam_ctx_request *req_pf; + + req_pf = (struct cam_ctx_request *) + pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + pf_args = pf_cmd_args->pf_args; + + /* + * res_id_support is false since ICP doesn't have knowledge + * of res_id. FW submits packet to HW + */ + cam_packet_util_dump_io_bufs(packet, hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, pf_args, false); + + cam_packet_util_dump_patch_info(packet, hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, pf_args); + +} + +static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: { + struct cam_hw_cmd_pf_args *pf_cmd_args; + int i; + + pf_cmd_args = hw_cmd_args->u.pf_cmd_args; + pf_cmd_args->pf_args->pf_pid_found_status = CAM_PF_PID_FOUND_FAILURE; + if (pf_cmd_args->pf_args->check_pid) { + for (i = 0; i < hw_mgr->num_pid; i++) { + if (pf_cmd_args->pf_args->pf_smmu_info->pid == hw_mgr->pid[i]) { + pf_cmd_args->pf_args->pf_pid_found_status + = CAM_PF_PID_FOUND_SUCCESS; + break; + } + } + + /* + * No dump for pf data if simply checking whether culprit + * hw pid linked to the current icp hw mgr + */ + break; + } + + cam_icp_mgr_dump_pf_data(hw_mgr, pf_cmd_args); + } + break; + default: + CAM_ERR(CAM_ICP, "[%s] Invalid cmd", hw_mgr->hw_mgr_name); + } + + return rc; +} + +static void cam_icp_mgr_inject_evt(void *hw_mgr_priv, void *evt_args) +{ + struct cam_icp_hw_ctx_data *ctx_data = hw_mgr_priv; + struct cam_hw_inject_evt_param *evt_params = evt_args; + + if (!ctx_data || !evt_params) { + CAM_ERR(CAM_ICP, "Invalid params ctx data %s event params %s", + CAM_IS_NULL_TO_STR(ctx_data), CAM_IS_NULL_TO_STR(evt_params)); + return; + } + + memcpy(&ctx_data->evt_inject_params, evt_params, sizeof(struct cam_hw_inject_evt_param)); + + ctx_data->evt_inject_params.is_valid = true; +} + +static int cam_icp_mgr_register_hfi_client(struct cam_icp_hw_mgr *hw_mgr) +{ + + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + int hfi_handle; + int rc; + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP device interface is NULL", + hw_mgr->hw_mgr_name); + return -EINVAL; + } + + hw_mgr->hfi_handle = HFI_HANDLE_INIT_VALUE; + rc = cam_hfi_register(&hw_mgr->hfi_handle, hw_mgr->hw_mgr_name); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to register hw mgr as hfi client rc=%d", + hw_mgr->hw_mgr_name, rc); + return rc; + } + + hfi_handle = hw_mgr->hfi_handle; + rc = icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, CAM_ICP_CMD_SET_HFI_HANDLE, + &hfi_handle, sizeof(hfi_handle)); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to share hfi handle to ICP core rc=%d hfi hdl: %d", + hw_mgr->hw_mgr_name, rc, hfi_handle); + cam_hfi_unregister(&hw_mgr->hfi_handle); + return rc; + } + + CAM_DBG(CAM_ICP, "[%s] successfully registered as hfi client with handle: %d", + hw_mgr->hw_mgr_name, hfi_handle); + + return 0; +} + +static void cam_icp_mgr_unregister_hfi_client(struct cam_icp_hw_mgr *hw_mgr) +{ + struct cam_hw_intf *icp_dev_intf = hw_mgr->icp_dev_intf; + int rc; + + rc = cam_hfi_unregister(&hw_mgr->hfi_handle); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Failed to unregister hfi client hdl: %d rc: %d", + hw_mgr->hw_mgr_name, hw_mgr->hfi_handle, rc); + return; + } + + if (!icp_dev_intf) { + CAM_ERR(CAM_ICP, "[%s] ICP dev intf is NULL", hw_mgr->hw_mgr_name); + return; + } + + icp_dev_intf->hw_ops.process_cmd(icp_dev_intf->hw_priv, CAM_ICP_CMD_SET_HFI_HANDLE, + &hw_mgr->hfi_handle, sizeof(hw_mgr->hfi_handle)); +} + +static int cam_icp_mgr_get_hw_mgr_name(uint32_t device_idx, char *hw_mgr_name) +{ + if (!hw_mgr_name) { + CAM_ERR(CAM_ICP, "Invalid output parameter hw mgr id NULL"); + return -EINVAL; + } + + if (device_idx) + scnprintf(hw_mgr_name, CAM_ICP_HW_MGR_NAME_SIZE, "icp%u", device_idx); + else + strscpy(hw_mgr_name, "icp", CAM_ICP_HW_MGR_NAME_SIZE); + + return 0; +} + +int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl, cam_icp_mini_dump_cb mini_dump_cb, int device_idx) +{ + int i, rc = 0; + struct cam_icp_hw_mgr *hw_mgr = NULL; + struct cam_hw_mgr_intf *hw_mgr_intf; +#ifdef CONFIG_SPECTRA_VMRM + struct cam_driver_node icp_driver_node; +#endif + + hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl; + if (!of_node || !hw_mgr_intf) { + CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK", + of_node, hw_mgr_intf); + return -EINVAL; + } + + if (g_icp_hw_mgr[device_idx]) { + CAM_ERR(CAM_ICP, "HW mgr for device idx: %u already initialized", device_idx); + return -EPERM; + } + + memset(hw_mgr_intf, 0, sizeof(struct cam_hw_mgr_intf)); + + hw_mgr = CAM_MEM_ZALLOC(sizeof(struct cam_icp_hw_mgr), GFP_KERNEL); + if (!hw_mgr) + return -ENOMEM; + + /* Init linked list for context data */ + INIT_LIST_HEAD(&hw_mgr->active_ctx_info.active_ctx_list); + hw_mgr->ctx_mutex = CAM_MEM_ZALLOC(sizeof(struct mutex) * CAM_ICP_CTX_MAX, + GFP_KERNEL); + if (!hw_mgr->ctx_mutex) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for mutex of each ctx"); + return -ENOMEM; + } + + for (i = 0; i < CAM_ICP_CTX_MAX; i++) + mutex_init(&hw_mgr->ctx_mutex[i]); + + hw_mgr->hw_mgr_id = device_idx; + + rc = cam_icp_mgr_get_hw_mgr_name(device_idx, hw_mgr->hw_mgr_name); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to get hw mgr name rc: %d for icp dev[%u]", + rc, device_idx); + goto free_hw_mgr; + } + + CAM_DBG(CAM_ICP, "Initailize hw mgr[%u] with name: %s", + device_idx, hw_mgr->hw_mgr_name); + + hw_mgr_intf->hw_mgr_priv = hw_mgr; + hw_mgr_intf->hw_get_caps = cam_icp_mgr_get_hw_caps; + hw_mgr_intf->hw_get_caps_v2 = cam_icp_mgr_get_hw_caps_v2; + hw_mgr_intf->hw_acquire = cam_icp_mgr_acquire_hw; + hw_mgr_intf->hw_release = cam_icp_mgr_release_hw; + hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update; + hw_mgr_intf->hw_config_stream_settings = + cam_icp_mgr_config_stream_settings; + hw_mgr_intf->hw_config = cam_icp_mgr_config_hw; + hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u; + hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u; + hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush; + hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd; + hw_mgr_intf->hw_dump = cam_icp_mgr_hw_dump; + hw_mgr_intf->hw_inject_evt = cam_icp_mgr_inject_evt; + hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE; + hw_mgr->mini_dump_cb = mini_dump_cb; + hw_mgr_intf->synx_trigger = cam_icp_mgr_service_synx_test_cmds; + hw_mgr->hfi_init_done = false; + +#ifdef CONFIG_SPECTRA_VMRM + icp_driver_node.driver_id = hw_mgr->hw_mgr_id + CAM_DRIVER_ID_ICP; + scnprintf(icp_driver_node.driver_name, sizeof(icp_driver_node.driver_name), + "%s", hw_mgr->hw_mgr_name); + icp_driver_node.driver_msg_callback = cam_icp_vm_rcv_msg; + icp_driver_node.driver_msg_callback_data = hw_mgr; + + rc = cam_vmrm_populate_driver_node_info(&icp_driver_node); + if (rc) { + CAM_ERR(CAM_ICP, "%s sw node populate failed: %d", hw_mgr->hw_mgr_name, rc); + goto free_hw_mgr; + } +#endif + mutex_init(&hw_mgr->hw_mgr_mutex); + spin_lock_init(&hw_mgr->hw_mgr_lock); + + atomic_set(&hw_mgr->frame_in_process, 0); + hw_mgr->frame_in_process_ctx_id = -1; + + rc = cam_icp_mgr_init_devs(of_node, hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] cam_icp_mgr_init_devs fail: rc: %d", + hw_mgr->hw_mgr_name, rc); + goto destroy_mutex; + } + + rc = cam_smmu_get_handle(hw_mgr->hw_mgr_name, &hw_mgr->iommu_hdl); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] get mmu handle failed: %d", + hw_mgr->hw_mgr_name, rc); + goto icp_get_hdl_failed; + } + + rc = cam_smmu_get_handle("cam-secure", &hw_mgr->iommu_sec_hdl); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] get secure mmu handle failed: %d", + hw_mgr->hw_mgr_name, rc); + goto secure_hdl_failed; + } + + rc = cam_icp_mgr_create_wq(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] cam_icp_mgr_create_wq fail: rc=%d", + hw_mgr->hw_mgr_name, rc); + goto icp_wq_create_failed; + } + + rc = cam_icp_hw_mgr_create_debugfs_entry(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to create debugfs entry", + hw_mgr->hw_mgr_name); + goto icp_debugfs_create_failed; + } + + if (iommu_hdl) + *iommu_hdl = hw_mgr->iommu_hdl; + + rc = cam_icp_mgr_register_hfi_client(hw_mgr); + if (rc) { + CAM_ERR(CAM_ICP, "[%s] Fail to register hw mgr as hfi handle", + hw_mgr->hw_mgr_name); + goto icp_hfi_register_failed; + } + + init_completion(&hw_mgr->icp_complete); + cam_common_register_mini_dump_cb(cam_icp_hw_mgr_mini_dump_cb, hw_mgr->hw_mgr_name, + &hw_mgr->hw_mgr_id); + cam_icp_test_irq_line_at_probe(hw_mgr); + + rc = cam_icp_get_svs_clk_info(hw_mgr); + if (rc) + goto icp_get_svs_clk_failed; + + if (hw_mgr->synx_signaling_en) { + switch (hw_mgr->hw_mgr_id) { + case 0: + hw_mgr->synx_core_id = CAM_ICP_0_SYNX_CORE; + break; + case 1: + hw_mgr->synx_core_id = CAM_ICP_1_SYNX_CORE; + break; + default: + hw_mgr->synx_core_id = CAM_INVALID_SYNX_CORE; + break; + } + } + + g_icp_hw_mgr[device_idx] = hw_mgr; + atomic_set(&hw_mgr->abort_in_process, 0); + + CAM_DBG(CAM_ICP, "Done hw mgr[%u] init: icp name:%s", + device_idx, hw_mgr->hw_mgr_name); + + return rc; + +icp_get_svs_clk_failed: + cam_hfi_unregister(&hw_mgr->hfi_handle); +icp_hfi_register_failed: + debugfs_remove_recursive(hw_mgr->dentry); + hw_mgr->dentry = NULL; +icp_debugfs_create_failed: + cam_icp_mgr_destroy_wq(hw_mgr); +icp_wq_create_failed: + cam_smmu_destroy_handle(hw_mgr->iommu_sec_hdl); + hw_mgr->iommu_sec_hdl = -1; +secure_hdl_failed: + cam_smmu_destroy_handle(hw_mgr->iommu_hdl); + hw_mgr->iommu_hdl = -1; +icp_get_hdl_failed: + cam_icp_mgr_free_hw_devs(hw_mgr); +destroy_mutex: + mutex_destroy(&hw_mgr->hw_mgr_mutex); +free_hw_mgr: + for (i = 0; i < CAM_ICP_CTX_MAX; i++) + mutex_destroy(&hw_mgr->ctx_mutex[i]); + CAM_MEM_FREE(hw_mgr->ctx_mutex); + CAM_MEM_FREE(hw_mgr); + return rc; +} + +void cam_icp_hw_mgr_deinit(int device_idx) +{ + struct cam_icp_hw_mgr *hw_mgr = NULL; + struct cam_icp_hw_ctx_data *ctx_data, *temp; + int i; + + hw_mgr = g_icp_hw_mgr[device_idx]; + if (!hw_mgr) { + CAM_ERR(CAM_ICP, "Uninitialized hw mgr for subdev: %u", device_idx); + return; + } + + CAM_DBG(CAM_ICP, "hw mgr deinit: %u icp name: %s", device_idx, hw_mgr->hw_mgr_name); + + hw_mgr->dentry = NULL; + + cam_icp_mgr_unregister_hfi_client(hw_mgr); + cam_icp_mgr_destroy_wq(hw_mgr); + cam_smmu_destroy_handle(hw_mgr->iommu_hdl); + cam_icp_mgr_free_hw_devs(hw_mgr); + for (i = 0; i < CAM_ICP_CTX_MAX; i++) + mutex_destroy(&hw_mgr->ctx_mutex[i]); + mutex_destroy(&hw_mgr->hw_mgr_mutex); + if (unlikely(!list_empty(&hw_mgr->active_ctx_info.active_ctx_list))) { + list_for_each_entry_safe(ctx_data, temp, + &hw_mgr->active_ctx_info.active_ctx_list, list) { + cam_icp_cpas_deactivate_llcc(ctx_data); + if (cam_presil_mode_enabled()) + CAM_MEM_FREE(ctx_data->hfi_frame_process.hangdump_mem_regions); + list_del(&ctx_data->list); + CAM_MEM_FREE(ctx_data); + ctx_data = NULL; + } + } + + CAM_MEM_FREE(hw_mgr->ctx_mutex); + CAM_MEM_FREE(hw_mgr); + g_icp_hw_mgr[device_idx] = NULL; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h new file mode 100644 index 0000000000..e2eeda8afb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h @@ -0,0 +1,718 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_ICP_HW_MGR_H +#define CAM_ICP_HW_MGR_H + +#include +#include +#include +#include +#include "cam_icp_hw_intf.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "hfi_session_defs.h" +#include "hfi_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" +#include "cam_smmu_api.h" +#include "cam_soc_util.h" +#include "cam_req_mgr_timer.h" + +#define CAM_ICP_ROLE_PARENT 1 +#define CAM_ICP_ROLE_CHILD 2 + +#define CAM_FRAME_CMD_MAX 40 + +#define CAM_MAX_OUT_RES 14 +#define CAM_MAX_IN_RES 16 + +#define ICP_WORKQ_NUM_TASK 100 +#define ICP_WORKQ_TASK_CMD_TYPE 1 +#define ICP_WORKQ_TASK_MSG_TYPE 2 + +#define ICP_PACKET_SIZE 0 +#define ICP_PACKET_TYPE 1 +#define ICP_PACKET_OPCODE 2 +#define ICP_MAX_OUTPUT_SUPPORTED 15 + +#define ICP_FRAME_PROCESS_SUCCESS 0 +#define ICP_FRAME_PROCESS_FAILURE 1 + +/* size of buffer to drain from msg/dbq queue */ +#define ICP_MSG_BUF_SIZE_IN_WORDS 512 +#define ICP_DBG_BUF_SIZE_IN_WORDS 10240 + +#define ICP_OVER_CLK_THRESHOLD 5 +#define ICP_TWO_DEV_BW_SHARE_RATIO 2 + +#define CPAS_IPE0_BIT 0x1000 +#define CPAS_IPE1_BIT 0x2000 +#define CPAS_BPS_BIT 0x400 +#define CPAS_ICP_BIT 0x1 +#define CPAS_ICP1_BIT 0x4 +#define CPAS_OFE_BIT 0x10 + +/* Used for targets >= 480 and its variants */ +#define CPAS_TITAN_IPE0_CAP_BIT 0x800 + +#define CAM_ICP_CTX_STATE_IN_USE 0x1 +#define CAM_ICP_CTX_STATE_ACQUIRED 0x2 +#define CAM_ICP_CTX_STATE_RELEASE 0x3 + +#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2 + +#define CAM_ICP_SYS_CACHE_TYPE_MAX 5 + +/* Current appliacble vote paths, based on number of UAPI definitions */ +#define CAM_ICP_MAX_PER_PATH_VOTES 12 + +#define CAM_ICP_HW_MGR_NAME_SIZE 32 + +#define CAM_ICP_IS_DEV_HW_EXIST(hw_cap_mask, hw_dev_type) \ +({ \ + (hw_cap_mask) & BIT((hw_dev_type)); \ +}) + +#define CAM_ICP_IS_VALID_HW_DEV_TYPE(type) \ +({ \ + ((type) < CAM_ICP_HW_MAX); \ +}) + +#define CAM_ICP_MAX_ICP_PROC_PER_DEV 1 + +/* The value used for invalid port IDs in the kernel */ +#define CAM_ICP_INVALID_IN_OUT_PORT_ID 0xFFFFFFFF; + +struct hfi_mini_dump_info; + +/** + * struct icp_hfi_mem_info + * @qtbl: Memory info of queue table + * @cmd_q: Memory info of command queue + * @msg_q: Memory info of message queue + * @dbg_q: Memory info of debug queue + * @sec_heap: Memory info of secondary heap + * @fw_buf: Memory info of firmware + * @qdss_buf: Memory info of qdss + * @sfr_buf: Memory info for sfr buffer + * @fw_uncached_generic: Memory info for fw uncached region + * @fw_uncached_global_sync: Memory info for global sync, in fw uncached region + * @synx_hwmutex: Memory info for synx hwmutex region mapped as device memory + * @ipc_hwmutex: Memory info for ipc hwmutex region mapped as device memory + * @global_cntr: Memory info for global cntr region mapped as device memory + * @llcc_reg: Memory info for llcc cache + * @shmem: Memory info for shared region + * @io_mem: Memory info for io region + * @fw_uncached: Memory info for fw uncached nested region + * @device: Memory info for the device region + * @fw_uncached_region: region support for fw uncached + */ +struct icp_hfi_mem_info { + struct cam_mem_mgr_memory_desc qtbl; + struct cam_mem_mgr_memory_desc cmd_q; + struct cam_mem_mgr_memory_desc msg_q; + struct cam_mem_mgr_memory_desc dbg_q; + struct cam_mem_mgr_memory_desc sec_heap; + struct cam_mem_mgr_memory_desc fw_buf; + struct cam_mem_mgr_memory_desc qdss_buf; + struct cam_mem_mgr_memory_desc sfr_buf; + struct cam_mem_mgr_memory_desc fw_uncached_generic; + struct cam_mem_mgr_memory_desc fw_uncached_global_sync; + struct cam_mem_mgr_memory_desc synx_hwmutex; + struct cam_mem_mgr_memory_desc ipc_hwmutex; + struct cam_mem_mgr_memory_desc global_cntr; + struct cam_mem_mgr_memory_desc llcc_reg; + struct cam_smmu_region_info shmem; + struct cam_smmu_region_info io_mem; + struct cam_smmu_region_info fw_uncached; + struct cam_smmu_region_info device; + bool fw_uncached_region; +}; + +/** + * struct hfi_cmd_work_data + * @type: Task type + * @data: Pointer to command data + * @request_id: Request id + */ +struct hfi_cmd_work_data { + uint32_t type; + void *data; + int32_t request_id; +}; + +/** + * struct hfi_msg_work_data + * @type: Task type + * @data: Pointer to message data + * @recover: Device needs recovery + */ +struct hfi_msg_work_data { + uint32_t type; + void *data; + bool recover; +}; + +/** + * struct clk_work_data + * @type: Task type + * @data: Pointer to clock info + */ +struct clk_work_data { + uint32_t type; + void *data; +}; + +/* + * struct icp_frame_info + * @request_id: request id + * @io_config: the address of io config + * @hfi_cfg_io_cmd: command struct to be sent to hfi + * @pkt: pointer to the packet header of current request + */ +struct icp_frame_info { + uint64_t request_id; + dma_addr_t io_config; + struct hfi_cmd_dev_async hfi_cfg_io_cmd; + struct cam_packet *pkt; +}; + +/** + * struct cam_icp_clk_bw_request_v2 + * + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: Reserved filed. + * @num_paths: Number of paths for per path bw vote + * @axi_path: Per path vote info for IPE/BPS + */ +struct cam_icp_clk_bw_req_internal_v2 { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint32_t reserved; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES]; +}; + +#define HANG_DUMP_REGIONS_MAX 10 + +/** + * struct cam_hangdump_mem_regions - + * List of multiple memory descriptors of different + * regions + * + * @num_regions : Number of regions + * @map_info_array : Array of all the regions + */ +struct cam_hangdump_mem_regions { + uint32_t num_mem_regions; + struct cam_cmd_mem_region_info mem_info_array[HANG_DUMP_REGIONS_MAX]; +}; + +/** + * struct cam_icp_ctx_perf_stats - + * ICP general Perf stats per ctx + * + * @total_resp_time: accumulative FW response time + * @total_requests : accumulative submitted requests + */ +struct cam_icp_ctx_perf_stats { + uint64_t total_resp_time; + uint64_t total_requests; +}; + +/** + * struct cam_icp_hw_ctx_dev_info - + * Info of ICP devices (IPE/BPS/OFE) that can be attached to a context + * + * @dev_ctxt_cnt : device context count + */ +struct cam_icp_hw_ctx_dev_info { + uint32_t dev_ctxt_cnt; +}; + +/** + * struct cam_icp_scid_cfg - + * sys cache config information + * + * @scid_id cache scid id + * @staling_distance staling distance used for notification + * @llcc_staling_mode staling mode evict/forget + * @llcc_staling_op_type operation type capacity/notify + * @activated maintain the state of scid + */ +struct cam_icp_scid_cfg { + uint32_t scid_id; + uint32_t staling_distance; + uint32_t llcc_staling_mode; + uint32_t llcc_staling_op_type; + bool activated; +}; + +/** + * struct cam_icp_sys_cache_cfg - + * sys cache config request information + * + * @num num of cache need to configure + * @scid_cfg cache config information + */ +struct cam_icp_sys_cache_cfg { + uint32_t num; + struct cam_icp_scid_cfg scid_cfg[CAM_ICP_SYS_CACHE_TYPE_MAX]; +}; + + +/** + * struct hfi_frame_process_info + * @hfi_frame_cmd: Frame process command info + * @bitmap: Bitmap for hfi_frame_cmd + * @bits: Used in hfi_frame_cmd bitmap + * @lock: Lock for hfi_frame_cmd + * @request_id: Request id list + * @num_out_resources: Number of out syncs + * @out_resource: Out sync info + * @fw_process_flag: Frame process flag + * @clk_info: Clock information for a request + * @clk_info_v2: Clock info for AXI bw voting v2 + * @frame_info: information needed to process request + * @submit_timestamp: Submit timestamp to hw + * @hangdump_mem_regions: Mem regions for hangdump + */ +struct hfi_frame_process_info { + struct hfi_cmd_dev_async hfi_frame_cmd[CAM_FRAME_CMD_MAX]; + void *bitmap; + size_t bits; + struct mutex lock; + uint64_t request_id[CAM_FRAME_CMD_MAX]; + uint32_t num_out_resources[CAM_FRAME_CMD_MAX]; + uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES]; + uint32_t in_resource[CAM_FRAME_CMD_MAX]; + uint32_t in_free_resource[CAM_FRAME_CMD_MAX]; + bool fw_process_flag[CAM_FRAME_CMD_MAX]; + struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX]; + struct cam_icp_clk_bw_req_internal_v2 clk_info_v2[CAM_FRAME_CMD_MAX]; + struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX]; + ktime_t submit_timestamp[CAM_FRAME_CMD_MAX]; + struct cam_hangdump_mem_regions *hangdump_mem_regions; +}; + +/** + * struct cam_ctx_clk_info + * @curr_fc: Context latest request frame cycles + * @rt_flag: Flag to indicate real time request + * @base_clk: Base clock to process the request + * @reserved: Reserved field + * #uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @clk_rate: Supported clock rates for the context + * @num_paths: Number of valid AXI paths + * @axi_path: ctx based per path bw vote + * @bw_included: Whether bw of this context is included in overal voting + */ +struct cam_ctx_clk_info { + uint32_t curr_fc; + uint32_t rt_flag; + uint32_t base_clk; + uint32_t reserved; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + int32_t clk_rate[CAM_MAX_VOTE]; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES]; + bool bw_included; +}; + +/** + * struct icp_cmd_generic_blob + * @ctx: Current context info + * @frame_info_idx: Index used for frame process info + * @io_buf_addr: pointer to io buffer address + */ +struct icp_cmd_generic_blob { + struct cam_icp_hw_ctx_data *ctx; + uint32_t frame_info_idx; + dma_addr_t *io_buf_addr; +}; + +/** + * struct cam_icp_clk_info + * @base_clk: Base clock to process request + * @curr_clk: Current clock of hadrware + * @prev_clk: Previous clock of hadrware + * @threshold: Threshold for overclk count + * @over_clked: Over clock count + * @uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @num_paths: Number of AXI vote paths + * @axi_path: Current per path bw vote info + * @watch_dog: watchdog timer handle + * @watch_dog_reset_counter: Counter for watch dog reset + * @timeout_cb_data: private cb data to be used when device timeouts + */ +struct cam_icp_clk_info { + uint32_t base_clk; + uint32_t curr_clk; + uint32_t prev_clk; + uint32_t threshold; + uint32_t over_clked; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_ICP_MAX_PER_PATH_VOTES]; + struct cam_req_mgr_timer *watch_dog; + uint32_t watch_dog_reset_counter; + void *timeout_cb_data; +}; + +/** + * struct cam_icp_hw_device_info + * @dev_name: name of IPE/BPS/OFE device + * @hw_dev_type: type of IPE/BPS/OFE device + * @dev_intf: interface to device hardware layer + * @clk_info: clock info for the device + * @dev_ctx_info: context related info for the device + * @hw_dev_cnt: number of devices for this particular device type + * Exa - for IPE device - IPE0 and IPE1 + */ +struct cam_icp_hw_device_info { + char *dev_name; + enum cam_icp_hw_type hw_dev_type; + struct cam_hw_intf **dev_intf; + struct cam_icp_clk_info clk_info; + struct cam_icp_hw_ctx_dev_info dev_ctx_info; + uint32_t hw_dev_cnt; +}; + +/** + * struct cam_icp_hw_ctx_info + * @need_lock: Indicate whether it's needed to acquire ctx mutex + * @ctx_id: Index of ctx data in active ctx list + * @ctx_acquired_timestamp: ctx acquired timestamp + * @hw_mgr: HW MGR of the context + * @ctx_data: Point to the exact ctx data + */ +struct cam_icp_hw_ctx_info { + bool need_lock; + uint32_t ctx_id; + uint64_t ctx_acquired_timestamp; + struct cam_icp_hw_mgr *hw_mgr; + struct cam_icp_hw_ctx_data *ctx_data; +}; + +/** + * struct cam_icp_hw_ctx_data + * @list: List member used to append this node to a linked list + * @context_priv: Context private data + * @hw_mgr_priv: HW MGR of the context + * @device_info: device info associated with this ctx + * @fw_handle: Firmware handle + * @scratch_mem_size: Scratch memory size + * @icp_dev_acquire_info: Acquire device info + * @acquire_dev_api_version: API version of this acquire + * @ctxt_event_cb: Context callback function + * @state: context state + * @role: Role of a context in case of chaining + * @chain_ctx: Peer context + * @hfi_frame_process: Frame process command + * @wait_complete: Completion info + * @ctx_id: Context Id + * @bw_config_version: BW config version indicator + * @clk_info: Current clock info of a context + * @watch_dog: watchdog timer handle + * @watch_dog_reset_counter: Counter for watch dog reset + * @last_flush_req: last flush req for this ctx + * @ctx_id_string: string helps to identify context (primarily used for logging) + * @perf_stats: performance statistics info + * @evt_inject_params: Event injection data for hw_mgr_ctx + * @abort_timed_out: Indicates if abort timed out + * @sys_cache_cfg: sys cache config information + * @port_security_map: security status per port in a secure usecase + */ +struct cam_icp_hw_ctx_data { + struct list_head list; + void *context_priv; + void *hw_mgr_priv; + struct cam_icp_hw_device_info *device_info; + struct mutex ctx_mutex; + uint32_t fw_handle; + uint32_t scratch_mem_size; + struct cam_icp_acquire_dev_info_unified *icp_dev_acquire_info; + uint32_t acquire_dev_api_version; + cam_hw_event_cb_func ctxt_event_cb; + uint32_t state; + uint32_t role; + struct cam_icp_hw_ctx_data *chain_ctx; + struct hfi_frame_process_info hfi_frame_process; + struct completion wait_complete; + uint32_t ctx_id; + uint32_t bw_config_version; + struct cam_ctx_clk_info clk_info; + struct cam_req_mgr_timer *watch_dog; + uint32_t watch_dog_reset_counter; + uint64_t last_flush_req; + char ctx_id_string[128]; + struct cam_icp_ctx_perf_stats perf_stats; + struct cam_hw_inject_evt_param evt_inject_params; + bool abort_timed_out; + struct cam_icp_sys_cache_cfg sys_cache_cfg; + bool port_security_map[CAM_MAX_OUTPUT_PORTS_PER_DEVICE]; +}; + +/** + * struct cam_icp_hw_active_ctx_info + * @active_ctx_list: Linked list for allocated active ctx + * @active_ctx_bitmap: Indicate which ctx data is available + */ +struct cam_icp_hw_active_ctx_info { + struct list_head active_ctx_list; + DECLARE_BITMAP(active_ctx_bitmap, CAM_ICP_CTX_MAX); +}; + +/** + * struct cam_icp_hw_mgr + * @hw_mgr_mutex: Mutex for ICP hardware manager + * @hw_mgr_lock: Spinlock for ICP hardware manager + * @dev_info: array of available device info (IPE/BPS/OFE) + * for the hw mgr + * @num_dev_info: number of device info for available device for the hw mgr + * @dev_info_idx: map hw dev type to index for device info array indexing + * @ctx_acquired_timestamp: ctx acquired timestamp array + * @icp_dev_intf: ICP device interface + * @ctx_mutex: Mutex for all possbile ctx data + * @active_ctx_info: Active context info + * @mini_dump_cb: Mini dump cb + * @hw_mgr_name: name of the hw mgr + * @hw_mgr_id: ID of the hw mgr, equivalent to hw mgr index + * @iommu_hdl: Non secure IOMMU handle + * @iommu_sec_hdl: Secure IOMMU handle + * @hfi_handle: hfi handle for this ICP hw mgr + * @synx_core_id: Synx core ID if applicable + * @hfi_mem: Memory for hfi + * @cmd_work: Work queue for hfi commands + * @msg_work: Work queue for hfi messages + * @timer_work: Work queue for timer watchdog + * @msg_buf: Drain Buffer for message data from firmware + * Buffer is an array of type __u32, total size + * would be sizeof(_u32) * queue_size + * @dbg_buf: Drain Buffer for debug data from firmware + * @icp_complete: Completion info + * @cmd_work_data: Pointer to command work queue task + * @msg_work_data: Pointer to message work queue task + * @timer_work_data: Pointer to timer work queue task + * @ctxt_cnt: Active context count + * @dentry: Debugfs entry + * @icp_debug_clk: Set clock based on debug value + * @icp_default_clk: Set this clok if user doesn't supply + * @secure_mode: Flag to enable/disable secure camera + * @icp_debug_type : entry to enable FW debug message/qdss + * @icp_dbg_lvl : debug level set to FW. + * @icp_fw_dump_lvl : level set for dumping the FW data + * @icp_fw_ramdump_lvl : level set for FW ram dumps + * and ICP was reinitialized + * @recovery: Flag to validate if in previous session FW + * reported a fatal error or wdt. If set FW is + * re-downloaded for new camera session. This + * would be set only if SSR also failed to reload ICP. + * @load_in_process: To indicate if FW is being loaded (cold boot) + * @frame_in_process: Counter for frames in process + * @frame_in_process_ctx_id: Contxt id processing frame + * @abort_in_process: Abort is in progress + * @hw_cap_mask: device capability mask to indicate which devices type + * are available in this hw mgr + * @num_pid: Number of cam hw pids linked to this icp hw mgr + * @pid: Exact pid values linked to this icp hw mgr + * @icp_booted: Processor is booted i.e. firmware loaded + * @icp_resumed: Processor is powered on + * @icp_pc_flag: Flag to enable/disable power collapse + * @dev_pc_flag: Flag to enable/disable + * power collapse for ipe & bps + * @icp_use_pil: Flag to indicate usage of PIL framework + * @icp_jtag_debug: entry to enable ICP JTAG debugging + * @disable_ubwc_comp: Disable UBWC compression + * @synx_signaling_en: core to core fencing is enabled + * using synx + * @fw_based_sys_caching: to check llcc cache feature is enabled or not + * @hfi_init_done: hfi initialisation is done + * @num_secure_contexts: Number of the existing secure contexts + */ +struct cam_icp_hw_mgr { + struct mutex hw_mgr_mutex; + spinlock_t hw_mgr_lock; + struct cam_icp_hw_device_info *dev_info; + uint32_t num_dev_info; + int8_t dev_info_idx[CAM_ICP_HW_MAX]; + uint64_t ctx_acquired_timestamp[CAM_ICP_CTX_MAX]; + struct cam_hw_intf *icp_dev_intf; + struct mutex *ctx_mutex; + struct cam_icp_hw_active_ctx_info active_ctx_info; + cam_icp_mini_dump_cb mini_dump_cb; + char hw_mgr_name[CAM_ICP_HW_MGR_NAME_SIZE]; + uint32_t hw_mgr_id; + + int32_t iommu_hdl; + int32_t iommu_sec_hdl; + int32_t hfi_handle; + enum cam_sync_synx_supported_cores synx_core_id; + struct icp_hfi_mem_info hfi_mem; + struct cam_req_mgr_core_workq *cmd_work; + struct cam_req_mgr_core_workq *msg_work; + struct cam_req_mgr_core_workq *timer_work; + uint32_t msg_buf[ICP_MSG_BUF_SIZE_IN_WORDS]; + uint32_t dbg_buf[ICP_DBG_BUF_SIZE_IN_WORDS]; + struct completion icp_complete; + struct hfi_cmd_work_data *cmd_work_data; + struct hfi_msg_work_data *msg_work_data; + struct hfi_msg_work_data *timer_work_data; + uint32_t ctxt_cnt; + struct dentry *dentry; + uint64_t icp_debug_clk; + uint64_t icp_default_clk; + uint32_t secure_mode; + u64 icp_debug_type; + u64 icp_dbg_lvl; + u64 icp_fw_dump_lvl; + u32 icp_fw_ramdump_lvl; + atomic_t recovery; + atomic_t load_in_process; + uint64_t icp_svs_clk; + atomic_t frame_in_process; + int frame_in_process_ctx_id; + atomic_t abort_in_process; + uint32_t hw_cap_mask; + uint32_t num_pid; + uint32_t pid[CAM_ICP_PID_NUM_MAX]; + bool icp_booted; + bool icp_resumed; + bool icp_pc_flag; + bool dev_pc_flag; + bool icp_use_pil; + bool icp_jtag_debug; + bool disable_ubwc_comp; + bool synx_signaling_en; + bool fw_based_sys_caching; + bool hfi_init_done; + uint32_t num_secure_contexts[CAM_ICP_MAX_NUM_OF_DEV_TYPES]; +}; + +/** + * struct cam_icp_mini_dump_acquire_info - ICP mini dump device info + * + * @in_res: intput resource info + * @out_res: output resource info + * @num_out_res: number of output resources + * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS) + * @secure_mode: camera mode (secure/non secure) + * @acquire_dev_api_version: API version of this acquire + */ +struct cam_icp_mini_dump_acquire_info { + struct cam_icp_res_info_unified in_res; + struct cam_icp_res_info_unified out_res[ICP_MAX_OUTPUT_SUPPORTED]; + uint16_t num_out_res; + uint8_t dev_type; + uint8_t secure_mode; + uint32_t acquire_dev_api_version; +}; + +/** + * struct hfi_frame_process_info + * @request_id: Request id list + * @num_out_res: Number of out syncs + * @out_res: Out sync info + * @in_resource: In sync info + * @submit_timestamp: Submit timestamp to hw + * @fw_process_flag: Frame process flag + */ +struct hfi_frame_mini_dump_info { + uint64_t request_id[CAM_FRAME_CMD_MAX]; + uint32_t num_out_res[CAM_FRAME_CMD_MAX]; + uint32_t out_res[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES]; + uint32_t in_resource[CAM_FRAME_CMD_MAX]; + ktime_t submit_timestamp[CAM_FRAME_CMD_MAX]; + uint8_t fw_process_flag[CAM_FRAME_CMD_MAX]; +}; + +/** + * struct cam_icp_hw_ctx_mini_dump + * @acquire: Acquire device info + * @hfi_frame: Frame command + * @hw_ctx: Context private data + * @state: context state + * @ctx_id_string: Context id string + * @ctx_id: Context Id + */ +struct cam_icp_hw_ctx_mini_dump { + struct cam_icp_mini_dump_acquire_info acquire; + struct hfi_frame_mini_dump_info hfi_frame; + void *hw_ctx; + char ctx_id_string[128]; + uint8_t state; + uint8_t ctx_id; +}; + +/** + * struct cam_icp_hw_mini_dump_info + * + * @hw_mgr_name: name of the hw mgr that is dumped + * @ctx: Context for minidump + * @hfi_info: hfi info + * @hfi_mem_info: hfi mem info + * @dev_info: array of device info + * @fw_img: FW image + * @recovery: To indicate if recovery is on + * @num_context: number of active context + * @num_device_info: number of available device info + * @icp_booted: Indicate if ICP is booted + * @icp_resumed: Indicate if ICP is resumed + * @disable_ubwc_comp: Indicate if ubws comp is disabled + * @icp_pc_flag: Is ICP PC enabled + * @dev_pc_flag: Is IPE BPS PC enabled + * @icp_use_pil: Is PIL used + */ +struct cam_icp_hw_mini_dump_info { + char hw_mgr_name[CAM_ICP_HW_MGR_NAME_SIZE]; + struct cam_icp_hw_ctx_mini_dump *ctx[CAM_ICP_CTX_MAX]; + struct hfi_mini_dump_info hfi_info; + struct icp_hfi_mem_info hfi_mem_info; + struct cam_icp_hw_device_info dev_info[CAM_ICP_DEV_NUM]; + void *fw_img; + uint32_t recovery; + uint32_t num_context; + uint32_t num_device_info; + bool icp_booted; + bool icp_resumed; + bool disable_ubwc_comp; + bool icp_pc_flag; + bool dev_pc_flag; + bool icp_use_pil; +}; + +/** + * struct cam_icp_mgr_hw_args + * + * @icp_pc: Indicate if we must enter power collapse, set true + * if we should enter power collapse. + * @hfi_setup: Indicate if we should hfi initialization is done or needs to be done, + * set true if hfi init needs to be done or was done. + * @use_proxy_boot_up: Indicate if proxy fw loading is needed, + * set true if proxy firmware loading is needed. + * + * @skip_icp_init: Indicates if the icp init needs to be done. + */ +struct cam_icp_mgr_hw_args { + bool icp_pc; + bool hfi_setup; + bool use_proxy_boot_up; + bool skip_icp_init; +}; + +static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args); +static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args); +static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr, void *resume_args); +static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr, void *args); +#endif /* CAM_ICP_HW_MGR_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h new file mode 100644 index 0000000000..c1fe96a9d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_BPS_HW_INTF_H +#define CAM_BPS_HW_INTF_H + +#include +#include +#include "cam_hw_mgr_intf.h" +#include "cam_icp_hw_intf.h" + + +#endif /* CAM_BPS_HW_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h new file mode 100644 index 0000000000..cb7b5f62be --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_ICP_HW_INTF_H +#define CAM_ICP_HW_INTF_H + +#include "cam_vmrm_interface.h" + +#define CAM_ICP_CMD_BUF_MAX_SIZE 128 +#define CAM_ICP_MSG_BUF_MAX_SIZE CAM_ICP_CMD_BUF_MAX_SIZE + +#define CAM_ICP_BW_CONFIG_UNKNOWN 0 +#define CAM_ICP_BW_CONFIG_V1 1 +#define CAM_ICP_BW_CONFIG_V2 2 + +#define CAM_ICP_UBWC_COMP_EN BIT(1) + +#define HFI_MAX_POLL_TRY 5 +#define PC_POLL_DELAY_US 100 +#define PC_POLL_TIMEOUT_US 10000 + +#define CAM_ICP_DUMP_STATUS_REGISTERS BIT(0) +#define CAM_ICP_DUMP_CSR_REGISTERS BIT(1) + +#define CAM_ICP_MAX_ICP_HW_TYPE 2 +#define CAM_ICP_DEV_START_IDX CAM_ICP_MAX_ICP_HW_TYPE + +#define ICP_CAPS_MASK_IDX 0 +#define IPE_CAPS_MASK_IDX 0 +#define BPS_CAPS_MASK_IDX 0 +#define OFE_CAPS_MASK_IDX 1 + +/* max caps mask is max value of all device caps mask index added by 1 */ +#define MAX_HW_CAPS_MASK 2 + +/* + * icp inter vm commnication timeout must be higher, + * as it has to wait for completion of icp power related operations + */ +#define CAM_ICP_INTER_VM_COMM_TIMEOUT_US 10000 + +#define CAM_BPS_HW_NUM_MAX 1 +#define CAM_OFE_HW_NUM_MAX 1 +#define CAM_IPE_HW_NUM_MAX 1 +#define CAM_ICP_HW_NUM_MAX 2 + +#define CAM_ICP_PID_NUM_MAX 10 + +enum cam_icp_hw_type { + CAM_ICP_HW_ICP_V1, + CAM_ICP_HW_ICP_V2, + CAM_ICP_DEV_IPE, + CAM_ICP_DEV_BPS, + CAM_ICP_DEV_OFE, + CAM_ICP_HW_MAX, +}; + +#define CAM_ICP_DEV_NUM (CAM_ICP_HW_MAX - CAM_ICP_DEV_START_IDX) + +enum cam_icp_dev_cmd_type { + CAM_ICP_DEV_CMD_POWER_COLLAPSE, + CAM_ICP_DEV_CMD_POWER_RESUME, + CAM_ICP_DEV_CMD_SET_FW_BUF, + CAM_ICP_DEV_CMD_VOTE_CPAS, + CAM_ICP_DEV_CMD_CPAS_START, + CAM_ICP_DEV_CMD_CPAS_STOP, + CAM_ICP_DEV_CMD_UPDATE_CLK, + CAM_ICP_DEV_CMD_DISABLE_CLK, + CAM_ICP_DEV_CMD_RESET, + CAM_ICP_DEV_CMD_MAX +}; + +enum cam_icp_cmd_type { + CAM_ICP_CMD_PROC_SHUTDOWN, + CAM_ICP_CMD_PROC_BOOT, + CAM_ICP_CMD_POWER_COLLAPSE, + CAM_ICP_CMD_POWER_RESUME, + CAM_ICP_CMD_ACQUIRE, + CAM_ICP_TEST_IRQ, + CAM_ICP_SEND_INIT, + CAM_ICP_CMD_VOTE_CPAS, + CAM_ICP_CMD_CPAS_START, + CAM_ICP_CMD_CPAS_STOP, + CAM_ICP_CMD_UBWC_CFG, + CAM_ICP_CMD_PC_PREP, + CAM_ICP_CMD_CLK_UPDATE, + CAM_ICP_CMD_HW_DUMP, + CAM_ICP_CMD_HW_MINI_DUMP, + CAM_ICP_CMD_HW_REG_DUMP, + CAM_ICP_CMD_SET_HFI_HANDLE, + CAM_ICP_CMD_PREP_BOOT, + CAM_ICP_CMD_PREP_SHUTDOWN, + CAM_ICP_CMD_MAX, +}; + +struct cam_icp_irq_cb { + int32_t (*cb)(void *data, bool recover); + void *data; +}; + +/** + * struct cam_icp_boot_args - Boot arguments for ICP processor + * + * @firmware.iova: device vaddr to firmware region + * @firmware.kva: kernel vaddr to firmware region + * @firmware.len: length of firmware region + * @irq_cb: irq callback + * @debug_enabled: processor will be booted with debug enabled + * @use_sec_pil: If set to true, use secure PIL for load + */ +struct cam_icp_boot_args { + struct { + uint32_t iova; + uint64_t kva; + uint64_t len; + } firmware; + + struct cam_icp_irq_cb irq_cb; + bool debug_enabled; + bool use_sec_pil; +}; + +/** + * struct cam_icp_dev_clk_update_cmd - Payload for hw manager command + * + * @curr_clk_rate: clk rate to HW + * @clk_level: clk level corresponding to the clk rate + * populated as output while the clk is being + * updated to the given rate + * @dev_pc_enable: power collpase enable flag + */ +struct cam_icp_dev_clk_update_cmd { + uint32_t curr_clk_rate; + int32_t clk_level; + bool dev_pc_enable; +}; + +/** + * struct cam_icp_ubwc_cfg_cmd - ubwc cmd to send + * + * @ubwc_cfg_dev_mask: mask to indicate which device ubwc cfg to send to fw + * @disable_ubwc_comp: flag to force disable ubwc + */ +struct cam_icp_ubwc_cfg_cmd { + uint32_t ubwc_cfg_dev_mask; + bool disable_ubwc_comp; +}; + +/** + * struct cam_icp_hw_intf_data - ICP hw intf data + * + * @Brief: ICP hw intf pointer and pid list data + * + * @icp_hw_intf: ICP hw intf pointer + * @num_pid: Number of pids for given hw + * @pid: ICP hw pid values + */ +struct cam_icp_hw_intf_data { + struct cam_hw_intf *hw_intf; + uint32_t num_pid; + uint32_t *pid; +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h new file mode 100644 index 0000000000..9e42206853 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_IPE_HW_INTF_H +#define CAM_IPE_HW_INTF_H + +#include +#include +#include "cam_hw_mgr_intf.h" +#include "cam_icp_hw_intf.h" + + +#endif /* CAM_IPE_HW_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ofe_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ofe_hw_intf.h new file mode 100644 index 0000000000..b81c0df9f6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_hw_mgr/include/cam_ofe_hw_intf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_OFE_HW_INTF_H +#define CAM_OFE_HW_INTF_H + +#include +#include +#include "cam_hw_mgr_intf.h" +#include "cam_icp_hw_intf.h" + + +#endif /* CAM_OFE_HW_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.c new file mode 100644 index 0000000000..713d206fe3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_debug_util.h" +#include "cam_icp_proc.h" +#include "cam_mem_mgr_api.h" + +uint32_t icp_request_cnt[CAM_ICP_MAX_ICP_HW_TYPE]; + +static int cam_icp_get_device_num(enum cam_icp_hw_type dev_type, uint32_t *num_dev) +{ + int rc = 0; + + switch (dev_type) { + case CAM_ICP_HW_ICP_V1: + *num_dev = cam_icp_v1_get_device_num(); + break; + case CAM_ICP_HW_ICP_V2: + *num_dev = cam_icp_v2_get_device_num(); + break; + default: + CAM_ERR(CAM_ICP, "Invalid dev type: %d", dev_type); + rc = -EINVAL; + } + + return rc; +} + +int cam_icp_alloc_processor_devs(struct device_node *np, enum cam_icp_hw_type *icp_hw_type, + struct cam_hw_intf ***devices, uint32_t *hw_dev_cnt) +{ + uint32_t num_icp_found = 0, num_icp_listed; + int rc, i; + + if (!np) { + CAM_ERR(CAM_ICP, "Invalid device node"); + return -EINVAL; + } + + rc = of_property_read_u32(np, "num-icp", &num_icp_listed); + if (rc) { + CAM_ERR(CAM_ICP, "read num-icp failed rc=%d", rc); + return -ENODEV; + } + + if (!num_icp_listed) { + CAM_ERR(CAM_ICP, "No ICP device %d", num_icp_listed); + return -EINVAL; + } + + /* Only one version of ICP processor supported per ICP subdevice */ + for (i = 0; i < CAM_ICP_MAX_ICP_HW_TYPE; i++) { + rc = cam_icp_get_device_num(i, &num_icp_found); + if (rc) + return rc; + + if (num_icp_found) { + *icp_hw_type = i; + break; + } + } + + if (i == CAM_ICP_MAX_ICP_HW_TYPE) { + CAM_ERR(CAM_ICP, "No ICP device probed"); + return -ENODEV; + } + + icp_request_cnt[i] += num_icp_listed; + + if (icp_request_cnt[i] > num_icp_found) { + CAM_ERR(CAM_ICP, + "number of ICP_V%u total requested: %u exceeds number of icp hw available: %u", + i+1, icp_request_cnt[i], num_icp_found); + return -EINVAL; + } + + *devices = CAM_MEM_ZALLOC_ARRAY(num_icp_listed, sizeof(**devices), GFP_KERNEL); + if (!(*devices)) { + CAM_ERR(CAM_ICP, + "ICP device memory allocation failed. Num devices: %u", + num_icp_listed); + return -ENOMEM; + } + + hw_dev_cnt[i] = num_icp_listed; + + CAM_DBG(CAM_ICP, "allocated device iface for %s", + i == CAM_ICP_HW_ICP_V1 ? "ICP_V1" : "ICP_V2"); + + return rc; +} + +int cam_icp_get_hfi_device_ops(uint32_t hw_type, const struct hfi_ops **hfi_proc_ops) +{ + int rc = 0; + + switch (hw_type) { + case CAM_ICP_HW_ICP_V1: + cam_icp_v1_populate_hfi_ops(hfi_proc_ops); + break; + case CAM_ICP_HW_ICP_V2: + cam_icp_v2_populate_hfi_ops(hfi_proc_ops); + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_ICP, "Invalid ICP device type: %u", hw_type); + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.h new file mode 100644 index 0000000000..20fb7ceae2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/cam_icp_proc.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_icp_v1_core.h" +#include "cam_icp_v2_core.h" +#include "hfi_intf.h" + +#define CAM_ICP_GET_PROC_DEV_INTF(devices) \ + (devices[CAM_ICP_HW_ICP_V1] ? devices[CAM_ICP_HW_ICP_V1][0] : \ + devices[CAM_ICP_HW_ICP_V2][0]) + +/** + * @brief : Get ICP device type (ICP_V1/ICP_V2/...) + */ +int cam_icp_alloc_processor_devs(struct device_node *np, enum cam_icp_hw_type *icp_hw_type, + struct cam_hw_intf ***devices, uint32_t *hw_dev_cnt); + +/** + * @brief : Get device operations per device type + */ +int cam_icp_get_hfi_device_ops(uint32_t hw_type, const struct hfi_ops **hif_ops); + +/** + * @brief : Get number of icp_v2 hw instances + */ +uint32_t cam_icp_v2_get_device_num(void); + +/** + * @brief : Get number of icp_v1 hw instances + */ +uint32_t cam_icp_v1_get_device_num(void); + diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.c new file mode 100644 index 0000000000..b7728529d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_icp_proc_common.h" +#include "cam_compat.h" +#include "cam_icp_hw_intf.h" +#include "hfi_intf.h" + +int32_t cam_icp_validate_fw(const uint8_t *elf, + uint32_t machine_id) +{ + struct elf32_hdr *elf_hdr; + + if (!elf) { + CAM_ERR(CAM_ICP, "Invalid params"); + return -EINVAL; + } + + elf_hdr = (struct elf32_hdr *)elf; + + if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) { + CAM_ERR(CAM_ICP, "ICP elf identifier is failed"); + return -EINVAL; + } + + /* check architecture */ + if (elf_hdr->e_machine != machine_id) { + CAM_ERR(CAM_ICP, "unsupported arch: 0x%x", elf_hdr->e_machine); + return -EINVAL; + } + + /* check elf bit format */ + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) { + CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format"); + return -EINVAL; + } + + return 0; +} + +int32_t cam_icp_get_fw_size( + const uint8_t *elf, uint32_t *fw_size) +{ + int32_t rc = 0; + int32_t i = 0; + uint32_t num_prg_hdrs; + unsigned char *icp_prg_hdr_tbl; + uint32_t seg_mem_size = 0; + struct elf32_hdr *elf_hdr; + struct elf32_phdr *prg_hdr; + + if (!elf || !fw_size) { + CAM_ERR(CAM_ICP, "invalid args"); + return -EINVAL; + } + + *fw_size = 0; + + elf_hdr = (struct elf32_hdr *)elf; + num_prg_hdrs = elf_hdr->e_phnum; + icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff; + prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0]; + + if (!prg_hdr) { + CAM_ERR(CAM_ICP, "failed to get elf program header attr"); + return -EINVAL; + } + + CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs); + for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) { + if (prg_hdr->p_flags == 0) + continue; + + seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) & + ~(prg_hdr->p_align - 1); + seg_mem_size += prg_hdr->p_paddr; + CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x", + (int)prg_hdr->p_memsz, (int)prg_hdr->p_align, + (int)prg_hdr->p_paddr, (int)seg_mem_size); + if (*fw_size < seg_mem_size) + *fw_size = seg_mem_size; + + } + + if (*fw_size == 0) { + CAM_ERR(CAM_ICP, "invalid elf fw file"); + return -EINVAL; + } + + return rc; +} + +int32_t cam_icp_program_fw(const uint8_t *elf, + uintptr_t fw_kva_addr) +{ + int32_t rc = 0; + uint32_t num_prg_hdrs; + unsigned char *icp_prg_hdr_tbl; + int32_t i = 0; + u8 *dest; + u8 *src; + struct elf32_hdr *elf_hdr; + struct elf32_phdr *prg_hdr; + + elf_hdr = (struct elf32_hdr *)elf; + num_prg_hdrs = elf_hdr->e_phnum; + icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff; + prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0]; + + if (!prg_hdr) { + CAM_ERR(CAM_ICP, "failed to get elf program header attr"); + return -EINVAL; + } + + for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) { + if (prg_hdr->p_flags == 0) + continue; + + CAM_DBG(CAM_ICP, "Loading FW header size: %u paddr: %pK", + prg_hdr->p_filesz, prg_hdr->p_paddr); + if (prg_hdr->p_filesz != 0) { + src = (u8 *)((u8 *)elf + prg_hdr->p_offset); + dest = (u8 *)(((u8 *)fw_kva_addr) + + prg_hdr->p_paddr); + + memcpy_toio(dest, src, prg_hdr->p_filesz); + } + } + + return rc; +} + +int cam_icp_proc_cpas_vote(uint32_t cpas_handle, + struct cam_icp_cpas_vote *vote) +{ + int rc; + + if (!vote) + return -EINVAL; + + if (vote->ahb_vote_valid) { + rc = cam_cpas_update_ahb_vote(cpas_handle, &vote->ahb_vote); + if (rc) { + CAM_ERR(CAM_ICP, "AHB vote update failed rc=%d", rc); + return rc; + } + } + + if (vote->axi_vote_valid) { + rc = cam_cpas_update_axi_vote(cpas_handle, &vote->axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "AXI vote update failed rc=%d", rc); + return rc; + } + } + + return 0; +} + +int cam_icp_proc_mini_dump(struct cam_icp_hw_dump_args *args, + uintptr_t fw_kva_addr, uint64_t fw_buf_len) +{ + u8 *dest; + u8 *src; + struct cam_icp_hw_dump_args *dump_args = args; + + if (!dump_args) { + CAM_ERR(CAM_ICP, "Invalid param %pK", dump_args); + return -EINVAL; + } + + if (!fw_kva_addr || !dump_args->cpu_addr) { + CAM_ERR(CAM_ICP, "invalid params %pK, 0x%zx", + fw_kva_addr, dump_args->cpu_addr); + return -EINVAL; + } + + if (dump_args->buf_len < fw_buf_len) { + CAM_WARN(CAM_ICP, "Insufficient Len %lu fw_len %llu", + dump_args->buf_len, fw_buf_len); + return -ENOSPC; + } + + dest = (u8 *)dump_args->cpu_addr; + src = (u8 *)fw_kva_addr; + memcpy_fromio(dest, src, fw_buf_len); + dump_args->offset = fw_buf_len; + + return 0; +} + +static int cam_icp_proc_validate_ubwc_cfg(struct cam_icp_ubwc_cfg *ubwc_cfg, + uint32_t ubwc_cfg_dev_mask) +{ + uint32_t found_ubwc_cfg_mask = ubwc_cfg->found_ubwc_cfg_mask; + + if ((ubwc_cfg_dev_mask & BIT(CAM_ICP_DEV_IPE)) && + !(found_ubwc_cfg_mask & BIT(CAM_ICP_DEV_IPE))) { + CAM_ERR(CAM_ICP, "IPE does not have UBWC cfg value"); + return -ENODATA; + } + + if ((ubwc_cfg_dev_mask & BIT(CAM_ICP_DEV_BPS)) && + !(found_ubwc_cfg_mask & BIT(CAM_ICP_DEV_BPS))) { + CAM_ERR(CAM_ICP, "BPS does not have UBWC cfg value"); + return -ENODATA; + } + + if ((ubwc_cfg_dev_mask & BIT(CAM_ICP_DEV_OFE)) && + !(found_ubwc_cfg_mask & BIT(CAM_ICP_DEV_OFE))) { + CAM_ERR(CAM_ICP, "OFE does not have UBWC cfg value"); + return -ENODATA; + } + + return 0; +} + +int cam_icp_proc_ubwc_configure(struct cam_icp_proc_ubwc_cfg_cmd *ubwc_cfg_cmd, + bool force_disable_ubwc, int hfi_handle) +{ + struct cam_icp_ubwc_cfg *ubwc_cfg; + int i = 0, ddr_type, rc; + uint32_t ipe_ubwc_cfg[ICP_UBWC_CFG_MAX] = {0}; + uint32_t bps_ubwc_cfg[ICP_UBWC_CFG_MAX] = {0}; + uint32_t ofe_ubwc_cfg[ICP_UBWC_CFG_MAX] = {0}; + + if (!ubwc_cfg_cmd) { + CAM_ERR(CAM_ICP, "ubwc config command is NULL"); + return -EINVAL; + } + + ubwc_cfg = ubwc_cfg_cmd->ubwc_cfg; + rc = cam_icp_proc_validate_ubwc_cfg(ubwc_cfg, ubwc_cfg_cmd->ubwc_cfg_dev_mask); + if (rc) { + CAM_ERR(CAM_ICP, "UBWC config failed validation rc:%d", rc); + return rc; + } + + ddr_type = cam_get_ddr_type(); + + if (ddr_type == DDR_TYPE_LPDDR5 || ddr_type == DDR_TYPE_LPDDR5X) + i = 1; + + ipe_ubwc_cfg[0] = ubwc_cfg->ipe_fetch[i]; + ipe_ubwc_cfg[1] = ubwc_cfg->ipe_write[i]; + + bps_ubwc_cfg[0] = ubwc_cfg->bps_fetch[i]; + bps_ubwc_cfg[1] = ubwc_cfg->bps_write[i]; + + ofe_ubwc_cfg[0] = ubwc_cfg->ofe_fetch[i]; + ofe_ubwc_cfg[1] = ubwc_cfg->ofe_write[i]; + + if (force_disable_ubwc) { + ipe_ubwc_cfg[1] &= ~CAM_ICP_UBWC_COMP_EN; + bps_ubwc_cfg[1] &= ~CAM_ICP_UBWC_COMP_EN; + ofe_ubwc_cfg[1] &= ~CAM_ICP_UBWC_COMP_EN; + CAM_DBG(CAM_ICP, + "Force disable UBWC compression, ipe_ubwc_cfg: 0x%x, bps_ubwc_cfg: 0x%x ofe_ubwc_cfg: 0x%x", + ipe_ubwc_cfg[1], bps_ubwc_cfg[1], ofe_ubwc_cfg[1]); + } + + rc = hfi_cmd_ubwc_config_ext(hfi_handle, ipe_ubwc_cfg, bps_ubwc_cfg, ofe_ubwc_cfg); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to write UBWC configure rc=%d", rc); + return rc; + } + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.h new file mode 100644 index 0000000000..fb06b6f236 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_proc_common.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_UTILS_H_ +#define _CAM_ICP_UTILS_H_ + +#include +#include +#include + +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_icp_soc_common.h" + +#define ICP_FW_NAME_MAX_SIZE 32 +#define PC_POLL_DELAY_US 100 +#define PC_POLL_TIMEOUT_US 10000 + +/** + * struct cam_icp_proc_ubwc_cfg_cmd + * @ubwc_cfg: UBWC cfg info parsed from DT + * @ubwc_cfg_dev_mask: mask to indicate which device + * ubwc cfg to send to fw + */ +struct cam_icp_proc_ubwc_cfg_cmd { + struct cam_icp_ubwc_cfg *ubwc_cfg; + uint32_t ubwc_cfg_dev_mask; +}; + +/** + * @brief : Validate FW elf image + */ +int32_t cam_icp_validate_fw(const uint8_t *elf, uint32_t machine_id); + +/** + * @brief : Get FW elf size + */ +int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size); + +/** + * @brief : Program FW memory + */ +int32_t cam_icp_program_fw(const uint8_t *elf, + uintptr_t fw_kva_addr); + +/** + * @brief : Update ahb and axi votes + */ +int cam_icp_proc_cpas_vote(uint32_t cpas_handle, + struct cam_icp_cpas_vote *vote); + +/** + * @brief : dump FW memory into mini dump + */ +int cam_icp_proc_mini_dump(struct cam_icp_hw_dump_args *args, + uintptr_t fw_kva_addr, uint64_t fw_buf_len); + +/** + * @brief : Update UBWC configuration for IPE and BPS + */ +int cam_icp_proc_ubwc_configure(struct cam_icp_proc_ubwc_cfg_cmd *ubwc_cfg_cmd, + bool force_disable_ubwc, int hfi_handle); + +#endif /* _CAM_ICP_UTILS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.c new file mode 100644 index 0000000000..84023725ce --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include "hfi_intf.h" +#include "cam_icp_soc_common.h" +#include "cam_mem_mgr_api.h" + +static int __ubwc_config_get(struct device_node *np, char *name, uint32_t *cfg, bool *ubwc_listed) +{ + int nconfig, i, rc; + + nconfig = of_property_count_u32_elems(np, name); + if (nconfig < 0) { + CAM_DBG(CAM_ICP, "prop: %s is not listed", name); + *ubwc_listed = false; + return 0; + } + *ubwc_listed = true; + + if (nconfig > ICP_UBWC_CFG_MAX) { + CAM_ERR(CAM_ICP, "Invalid number of UBWC configs[=%d] MAX UBWC=%d", + nconfig, ICP_UBWC_CFG_MAX); + return -EINVAL; + } + + for (i = 0; i < nconfig; i++) { + rc = of_property_read_u32_index(np, name, i, &cfg[i]); + if (rc) { + CAM_ERR(CAM_ICP, + "Node %pOF has no valid %s prop at index=%d", + np, name, i); + return rc; + } + } + + return 0; +} + +static int cam_icp_soc_ubwc_config_get(struct device_node *np, + struct cam_icp_soc_info *icp_soc_info) +{ + struct cam_icp_ubwc_cfg *ubwc_cfg_ext; + int rc; + uint32_t dev_type; + bool ubwc_listed, ubwc_fetch_found, ubwc_write_found; + + dev_type = icp_soc_info->dev_type; + + rc = __ubwc_config_get(np, "ubwc-cfg", icp_soc_info->uconfig.ubwc_cfg, + &ubwc_listed); + if (ubwc_listed) { + if (dev_type == CAM_ICP_HW_ICP_V1) + icp_soc_info->is_ubwc_cfg = true; + return rc; + } + + ubwc_cfg_ext = &icp_soc_info->uconfig.ubwc_cfg_ext; + rc = __ubwc_config_get(np, "ubwc-ipe-fetch-cfg", + ubwc_cfg_ext->ipe_fetch, &ubwc_fetch_found); + if (rc) + return rc; + + rc = __ubwc_config_get(np, "ubwc-ipe-write-cfg", + ubwc_cfg_ext->ipe_write, &ubwc_write_found); + if (rc) + return rc; + + if (ubwc_fetch_found && ubwc_write_found) + ubwc_cfg_ext->found_ubwc_cfg_mask |= BIT(CAM_ICP_DEV_IPE); + else { + if (ubwc_fetch_found ^ ubwc_write_found) { + CAM_ERR(CAM_ICP, "Missing %s ipe ubwc config", + ubwc_fetch_found ? "write" : "fetch"); + return -EINVAL; + } + } + + rc = __ubwc_config_get(np, "ubwc-bps-fetch-cfg", + ubwc_cfg_ext->bps_fetch, &ubwc_fetch_found); + if (rc) + return rc; + + rc = __ubwc_config_get(np, "ubwc-bps-write-cfg", + ubwc_cfg_ext->bps_write, &ubwc_write_found); + if (rc) + return rc; + + if (ubwc_fetch_found && ubwc_write_found) + ubwc_cfg_ext->found_ubwc_cfg_mask |= BIT(CAM_ICP_DEV_BPS); + else { + if (ubwc_fetch_found ^ ubwc_write_found) { + CAM_ERR(CAM_ICP, "Missing %s bps ubwc config", + ubwc_fetch_found ? "write" : "fetch"); + return -EINVAL; + } + } + + rc = __ubwc_config_get(np, "ubwc-ofe-fetch-cfg", + ubwc_cfg_ext->ofe_fetch, &ubwc_fetch_found); + if (rc) + return rc; + + rc = __ubwc_config_get(np, "ubwc-ofe-write-cfg", + ubwc_cfg_ext->ofe_write, &ubwc_write_found); + if (rc) + return rc; + + if (ubwc_fetch_found && ubwc_write_found) + ubwc_cfg_ext->found_ubwc_cfg_mask |= BIT(CAM_ICP_DEV_OFE); + else { + if (ubwc_fetch_found ^ ubwc_write_found) { + CAM_ERR(CAM_ICP, "Missing %s ofe ubwc config", + ubwc_fetch_found ? "write" : "fetch"); + return -EINVAL; + } + } + + return 0; +} + +static inline void cam_icp_soc_qos_get(struct device_node *np, + struct cam_icp_soc_info *icp_soc_info) +{ + if (of_property_read_u32(np, "qos-val", &icp_soc_info->qos_val)) { + CAM_WARN(CAM_ICP, "QoS not set for device: %d", + icp_soc_info->dev_type); + icp_soc_info->qos_val = 0; + } +} + +static int cam_icp_soc_get_hw_version(struct device_node *np, + struct cam_icp_soc_info *icp_soc_info) +{ + int rc; + uint32_t version; + + rc = of_property_read_u32(np, "icp-version", &version); + if (rc) { + CAM_ERR(CAM_ICP, "read icp-version failed rc=%d", rc); + return -ENODEV; + } + + switch (version) { + case CAM_ICP_V1_VERSION: + case CAM_ICP_V2_VERSION: + case CAM_ICP_V2_1_VERSION: + icp_soc_info->hw_version = version; + break; + default: + CAM_ERR(CAM_ICP, "Invalid ICP version: %u", version); + rc = -ENODEV; + } + return rc; +} + +static void cam_icp_soc_get_fw_pas_id(struct device_node *np, + struct cam_icp_soc_info *icp_soc_info) +{ + if (of_property_read_u32(np, "fw-pas-id", &icp_soc_info->fw_pas_id)) { + CAM_WARN(CAM_ICP, "PAS_ID is not passed from DTSI and use default value: %d", + CAM_FW_PAS_ID_DEFAULT); + icp_soc_info->fw_pas_id = CAM_FW_PAS_ID_DEFAULT; + } +} + +static int cam_icp_soc_dt_properties_get(struct cam_hw_soc_info *soc_info) +{ + struct cam_icp_soc_info *icp_soc_info; + struct device_node *np; + int rc, num_pid, i; + + if (!soc_info->soc_private) { + CAM_ERR(CAM_ICP, "soc private is NULL"); + rc = -EINVAL; + goto end; + } + + icp_soc_info = (struct cam_icp_soc_info *)soc_info->soc_private; + np = soc_info->pdev->dev.of_node; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get DT properties rc=%d", rc); + goto end; + } + + rc = cam_icp_soc_ubwc_config_get(np, icp_soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get UBWC config props rc=%d", rc); + goto end; + } + + cam_icp_soc_qos_get(np, icp_soc_info); + + cam_icp_soc_get_fw_pas_id(np, icp_soc_info); + + rc = cam_icp_soc_get_hw_version(np, icp_soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "Get ICP HW version failed"); + goto end; + } + + num_pid = of_property_count_u32_elems(np, "cam_hw_pid"); + CAM_DBG(CAM_ICP, "ICP pid count: %d", num_pid); + + if (num_pid <= 0) + goto end; + + icp_soc_info->pid = CAM_MEM_ZALLOC_ARRAY(num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!icp_soc_info->pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for OFE hw pids"); + rc = -ENOMEM; + return rc; + } + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(np, "cam_hw_pid", i, &icp_soc_info->pid[i]); + icp_soc_info->num_pid = num_pid; + +end: + return 0; +} + +int cam_icp_soc_resources_init(struct cam_hw_soc_info *soc_info, + irq_handler_t handler, void *data) +{ + int rc, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_icp_soc_dt_properties_get(soc_info); + if (rc) + return rc; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, handler, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_ICP, + "request for soc platform resource failed rc=%d", rc); + return rc; + } + + return 0; +} + +int cam_icp_soc_resources_deinit(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_ICP, + "release of soc platform resource failed rc=%d", rc); + + return rc; +} + +int cam_icp_soc_resources_enable(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) + CAM_ERR(CAM_ICP, "failed to enable soc resources rc=%d", rc); + + return rc; +} + +int cam_icp_soc_resources_disable(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_ICP, "failed to disable soc resources rc=%d", rc); + + return rc; +} + +int cam_icp_soc_update_clk_rate(struct cam_hw_soc_info *soc_info, + int32_t clk_level, int hfi_handle) +{ + int32_t src_clk_idx = 0; + int32_t clk_rate = 0; + int rc = 0; + + if (!soc_info) { + CAM_ERR(CAM_ICP, "Invalid soc_info"); + return -EINVAL; + } + + if ((clk_level < 0) || (clk_level >= CAM_MAX_VOTE)) { + CAM_ERR(CAM_ICP, "clock level %d is not valid", + clk_level); + return -EINVAL; + } + + if (!soc_info->clk_level_valid[clk_level]) { + CAM_ERR(CAM_ICP, + "Clock level %d not supported", + clk_level); + return -EINVAL; + } + + src_clk_idx = soc_info->src_clk_idx; + if ((src_clk_idx < 0) || (src_clk_idx >= CAM_SOC_MAX_CLK)) { + CAM_WARN(CAM_ICP, "src_clk not defined for %s", + soc_info->dev_name); + return -EINVAL; + } + + clk_rate = soc_info->clk_rate[clk_level][src_clk_idx]; + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE]) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d", + clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); + if (rc) + return rc; + + hfi_send_freq_info(hfi_handle, clk_rate); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.h new file mode 100644 index 0000000000..f5895fe36a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_common/cam_icp_soc_common.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_ICP_SOC_COMMON_H +#define CAM_ICP_SOC_COMMON_H + +#include "cam_soc_util.h" +#include "cam_icp_hw_intf.h" + +#define ICP_UBWC_CFG_MAX 2 +#define CAM_ICP_V1_VERSION 0x0100 +#define CAM_ICP_V2_VERSION 0x0200 +#define CAM_ICP_V2_1_VERSION 0x0201 + +#define CAM_FW_PAS_ID_DEFAULT 33 + +/** + * struct cam_icp_ubwc_cfg - ICP ubwc cfg + * @ipe_fetch: UBWC configuration for IPE fetch. + * @ipe_write: UBWC configuration for IPE write. + * @bps_fetch: UBWC configuration for BPS fetch. + * @bps_write: UBWC configuration for BPS write. + * @ofe_fetch: UBWC configuration for ofe fetch. + * @ofe_write: UBWC configuration for ofe write. + * @found_ubwc_cfg_mask: mask to indicate which + * ubwc dev cfg found from DT + */ +struct cam_icp_ubwc_cfg { + uint32_t ipe_fetch[ICP_UBWC_CFG_MAX]; + uint32_t ipe_write[ICP_UBWC_CFG_MAX]; + uint32_t bps_fetch[ICP_UBWC_CFG_MAX]; + uint32_t bps_write[ICP_UBWC_CFG_MAX]; + uint32_t ofe_fetch[ICP_UBWC_CFG_MAX]; + uint32_t ofe_write[ICP_UBWC_CFG_MAX]; + uint32_t found_ubwc_cfg_mask; +}; + +/** + * struct cam_icp_soc_info - ICP soc info + * @dev_type: Device type. + * @qos_val: QOS value. + * @hw_version: hw version. + * @fw_pas_id: Firmware pas id + * @uconfig: union of ubwc_cfg_ext and ubwc_cfg. ubwc_cfg may + * be used in icp_v1 for older chipsets. icp_v2 only + * uses ubwc_cfg_ext. + * @is_ubwc_cfg: indicate if ubwc_cfg is used. + * @num_pid: Number of PID + * @pid: Exact PID values + */ +struct cam_icp_soc_info { + enum cam_icp_hw_type dev_type; + uint32_t qos_val; + uint32_t hw_version; + uint32_t fw_pas_id; + union { + uint32_t ubwc_cfg[ICP_UBWC_CFG_MAX]; + struct cam_icp_ubwc_cfg ubwc_cfg_ext; + } uconfig; + bool is_ubwc_cfg; + uint32_t num_pid; + uint32_t *pid; +}; + +int cam_icp_soc_resources_init(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *irq_data); + +int cam_icp_soc_resources_deinit(struct cam_hw_soc_info *soc_info); + +int cam_icp_soc_resources_enable(struct cam_hw_soc_info *soc_info); + +int cam_icp_soc_resources_disable(struct cam_hw_soc_info *soc_info); + +int cam_icp_soc_update_clk_rate(struct cam_hw_soc_info *soc_info, + int32_t clk_level, int hfi_handle); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.c new file mode 100644 index 0000000000..1d477898d4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.c @@ -0,0 +1,784 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_icp_v1_core.h" +#include "cam_icp_v1_reg.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "hfi_sys_defs.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_icp_proc_common.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "cam_icp_soc_common.h" + +static const struct hfi_ops hfi_icp_v1_ops = { + .irq_raise = cam_icp_v1_irq_raise, + .irq_enable = cam_icp_v1_irq_enable, + .iface_addr = cam_icp_v1_iface_addr, +}; + +static int cam_icp_v1_cpas_vote(struct cam_icp_v1_device_core_info *core_info, + struct cam_icp_cpas_vote *cpas_vote) +{ + if (!core_info) + return -EINVAL; + + return cam_icp_proc_cpas_vote(core_info->cpas_handle, cpas_vote); +} + +static int32_t cam_icp_v1_download_fw(void *device_priv) +{ + int32_t rc = 0; + uint32_t fw_size; + const uint8_t *fw_start = NULL; + struct cam_hw_info *icp_v1_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + struct platform_device *pdev = NULL; + const char *firmware_name = NULL; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &icp_v1_dev->soc_info; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + pdev = soc_info->pdev; + + rc = of_property_read_string(pdev->dev.of_node, "fw_name", &firmware_name); + if (rc) { + CAM_INFO(CAM_ICP, + "FW image name not found. Use default name: CAMERA_ICP.elf"); + rc = firmware_request_nowarn(&core_info->fw_elf, + "CAMERA_ICP.elf", &pdev->dev); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc); + return rc; + } + } else { + CAM_INFO(CAM_ICP, "Downloading firmware %s", + firmware_name); + rc = firmware_request_nowarn(&core_info->fw_elf, + firmware_name, &pdev->dev); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc); + return rc; + } + } + + if (!core_info->fw_elf) { + CAM_ERR(CAM_ICP, "Invalid elf size"); + rc = -EINVAL; + goto fw_download_failed; + } + + fw_start = core_info->fw_elf->data; + rc = cam_icp_validate_fw(fw_start, EM_ARM); + if (rc) { + CAM_ERR(CAM_ICP, "fw elf validation failed"); + goto fw_download_failed; + } + + rc = cam_icp_get_fw_size(fw_start, &fw_size); + if (rc) { + CAM_ERR(CAM_ICP, "unable to get fw size"); + goto fw_download_failed; + } + + if (core_info->fw_buf_len < fw_size) { + CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu", + fw_size, core_info->fw_buf_len); + rc = -EINVAL; + goto fw_download_failed; + } + + rc = cam_icp_program_fw(fw_start, core_info->fw_kva_addr); + if (rc) { + CAM_ERR(CAM_ICP, "fw program is failed"); + goto fw_download_failed; + } + +fw_download_failed: + release_firmware(core_info->fw_elf); + return rc; +} + +static int cam_icp_v1_fw_dump( + struct cam_icp_hw_dump_args *dump_args, + struct cam_icp_v1_device_core_info *core_info) +{ + u8 *dest; + u8 *src; + uint64_t size_required; + struct cam_icp_dump_header *hdr; + + if (!core_info || !dump_args) { + CAM_ERR(CAM_ICP, "invalid params %pK %pK", + core_info, dump_args); + return -EINVAL; + } + if (!core_info->fw_kva_addr || !dump_args->cpu_addr) { + CAM_ERR(CAM_ICP, "invalid params %pK, 0x%zx", + core_info->fw_kva_addr, dump_args->cpu_addr); + return -EINVAL; + } + + size_required = core_info->fw_buf_len + + sizeof(struct cam_icp_dump_header); + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ICP, "Dump offset overshoot len %zu offset %zu", + dump_args->buf_len, dump_args->offset); + return -ENOSPC; + } + + if ((dump_args->buf_len - dump_args->offset) < size_required) { + CAM_WARN(CAM_ICP, "Dump buffer exhaust required %llu len %llu", + size_required, core_info->fw_buf_len); + return -ENOSPC; + } + + dest = (u8 *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_icp_dump_header *)dest; + scnprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_FW:"); + hdr->word_size = sizeof(u8); + hdr->size = core_info->fw_buf_len; + src = (u8 *)core_info->fw_kva_addr; + dest = (u8 *)dest + sizeof(struct cam_icp_dump_header); + memcpy_fromio(dest, src, core_info->fw_buf_len); + dump_args->offset += hdr->size + sizeof(struct cam_icp_dump_header); + return 0; +} + +static int cam_icp_v1_fw_mini_dump(struct cam_icp_hw_dump_args *dump_args, + struct cam_icp_v1_device_core_info *core_info) +{ + if (!core_info) { + CAM_ERR(CAM_ICP, "Invalid param %pK", core_info); + return -EINVAL; + } + + return cam_icp_proc_mini_dump(dump_args, core_info->fw_kva_addr, + core_info->fw_buf_len); +} + +int cam_icp_v1_init_hw(void *device_priv, void *args, + uint32_t arg_size) +{ + struct cam_hw_info *icp_v1_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + struct cam_icp_soc_info *icp_v1_soc_info; + struct cam_icp_cpas_vote cpas_vote; + unsigned long flags; + int rc = 0; + bool send_freq_info; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &icp_v1_dev->soc_info; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + + if ((!soc_info) || (!core_info) || (!args)) { + CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK args: %pK", + soc_info, core_info, args); + return -EINVAL; + } + + send_freq_info = *((bool *)args); + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + if (icp_v1_dev->hw_state == CAM_HW_STATE_POWER_UP) { + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + return 0; + } + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + + icp_v1_soc_info = soc_info->soc_private; + + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_ICP_DEFAULT_AXI_PATH; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_ICP_DEFAULT_AXI_TRANSAC; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_ICP_BW_BYTES_VOTE; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_ICP_BW_BYTES_VOTE; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_ICP_BW_BYTES_VOTE; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "cpas start failed: %d", rc); + goto error; + } + core_info->cpas_start = true; + + rc = cam_icp_soc_resources_enable(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc); + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } else { + CAM_DBG(CAM_ICP, "icp_v1_qos %d", icp_v1_soc_info->qos_val); + if (icp_v1_soc_info->qos_val) + cam_io_w_mb(icp_v1_soc_info->qos_val, + soc_info->reg_map[ICP_V1_BASE].mem_base + + ICP_V1_CSR_ACCESS); + if (send_freq_info) { + int32_t clk_rate = 0; + + clk_rate = cam_wrapper_clk_get_rate(soc_info->clk[soc_info->src_clk_idx], + soc_info->clk_name[soc_info->src_clk_idx]); + hfi_send_freq_info(core_info->hfi_handle, clk_rate); + } + } + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + icp_v1_dev->hw_state = CAM_HW_STATE_POWER_UP; + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + +error: + return rc; +} + +int cam_icp_v1_deinit_hw(void *device_priv, + void *args, uint32_t arg_size) +{ + struct cam_hw_info *icp_v1_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + unsigned long flags; + int rc = 0; + bool send_freq_info; + + if (!device_priv || !args) { + CAM_ERR(CAM_ICP, "Invalid icp hw info is %s args is %s", + CAM_IS_NULL_TO_STR(icp_v1_dev), CAM_IS_NULL_TO_STR(args)); + return -EINVAL; + } + + send_freq_info = *((bool *)args); + soc_info = &icp_v1_dev->soc_info; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + if (icp_v1_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + return 0; + } + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + + if (send_freq_info) + hfi_send_freq_info(core_info->hfi_handle, 0); + + rc = cam_icp_soc_resources_disable(soc_info); + + if (rc) + CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc); + + if (core_info->cpas_start) { + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + icp_v1_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); + + return rc; +} + +static int cam_icp_v1_power_resume(struct cam_hw_info *icp_v1_info, bool debug_enabled) +{ + uint32_t val = ICP_V1_CSR_FULL_CPU_EN; + void __iomem *base; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_soc_info *icp_v1_soc_info; + + if (!icp_v1_info) { + CAM_ERR(CAM_ICP, "Invalid ICP device info"); + return -EINVAL; + } + + base = icp_v1_info->soc_info.reg_map[ICP_V1_BASE].mem_base; + soc_info = &icp_v1_info->soc_info; + icp_v1_soc_info = soc_info->soc_private; + + cam_io_w_mb(ICP_V1_CSR_CPU_EN, base + ICP_V1_CSR_CONTROL); + cam_io_w_mb(ICP_V1_CSR_FUNC_RESET, base + ICP_V1_CSR_NSEC_RESET); + + if (debug_enabled) + val |= ICP_V1_CSR_FULL_DBG_EN; + + cam_io_w_mb(val, base + ICP_V1_CSR_CONTROL); + cam_io_w_mb(icp_v1_soc_info->qos_val, + base + ICP_V1_CSR_ACCESS); + + CAM_DBG(CAM_ICP, "icp_v1 qos-val : 0x%x", + cam_io_r_mb(base + ICP_V1_CSR_ACCESS)); + + return 0; +} + +static int cam_icp_v1_power_collapse(struct cam_hw_info *icp_v1_info) +{ + uint32_t val, status = 0; + void __iomem *base; + + if (!icp_v1_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return -EINVAL; + } + + base = icp_v1_info->soc_info.reg_map[ICP_V1_BASE].mem_base; + + /** + * Need to poll here to confirm that FW has triggered WFI + * and Host can then proceed. No interrupt is expected + * from FW at this time. + */ + + if (cam_common_read_poll_timeout(base + + ICP_V1_CSR_STATUS, + PC_POLL_DELAY_US, PC_POLL_TIMEOUT_US, + ICP_V1_CSR_STANDBYWFI, + ICP_V1_CSR_STANDBYWFI, &status)) { + CAM_ERR(CAM_ICP, "WFI poll timed out: status=0x%08x", status); + return -ETIMEDOUT; + } + + val = cam_io_r(base + ICP_V1_CSR_CONTROL); + val &= ~(ICP_V1_CSR_CPU_EN | ICP_V1_CSR_WAKE_UP_EN); + cam_io_w_mb(val, base + ICP_V1_CSR_CONTROL); + + return 0; +} + +static void prepare_boot(struct cam_hw_info *icp_v1_dev, + struct cam_icp_boot_args *args) +{ + struct cam_icp_v1_device_core_info *core_info = icp_v1_dev->core_info; + unsigned long flags; + + core_info->fw_buf = args->firmware.iova; + core_info->fw_kva_addr = args->firmware.kva; + core_info->fw_buf_len = args->firmware.len; + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + core_info->irq_cb.data = args->irq_cb.data; + core_info->irq_cb.cb = args->irq_cb.cb; + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); +} + +static void prepare_shutdown(struct cam_hw_info *icp_v1_dev) +{ + struct cam_icp_v1_device_core_info *core_info = icp_v1_dev->core_info; + unsigned long flags; + + core_info->fw_buf = 0; + core_info->fw_kva_addr = 0; + core_info->fw_buf_len = 0; + + spin_lock_irqsave(&icp_v1_dev->hw_lock, flags); + core_info->irq_cb.data = NULL; + core_info->irq_cb.cb = NULL; + spin_unlock_irqrestore(&icp_v1_dev->hw_lock, flags); +} + +static int cam_icp_v1_boot(struct cam_hw_info *icp_v1_dev, + struct cam_icp_boot_args *args, size_t arg_size) +{ + int rc; + + if (!icp_v1_dev || !args) { + CAM_ERR(CAM_ICP, + "Invalid args: icp_v1_dev=%pK args=%pK", + icp_v1_dev, args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_boot_args)) { + CAM_ERR(CAM_ICP, "Invalid boot args size"); + return -EINVAL; + } + + prepare_boot(icp_v1_dev, args); + + rc = cam_icp_v1_download_fw(icp_v1_dev); + if (rc) { + CAM_ERR(CAM_ICP, "firmware download failed rc=%d", rc); + goto err; + } + + rc = cam_icp_v1_power_resume(icp_v1_dev, args->debug_enabled); + if (rc) { + CAM_ERR(CAM_ICP, "ICP boot failed rc=%d", rc); + goto err; + } + + return 0; +err: + prepare_shutdown(icp_v1_dev); + return rc; +} + +static int cam_icp_v1_shutdown(struct cam_hw_info *icp_v1_dev) +{ + if (!icp_v1_dev) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return -EINVAL; + } + + prepare_shutdown(icp_v1_dev); + cam_icp_v1_power_collapse(icp_v1_dev); + return 0; +} + +irqreturn_t cam_icp_v1_irq(int irq_num, void *data) +{ + struct cam_hw_info *icp_v1_dev = data; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + uint32_t irq_status = 0; + bool recover = false; + + if (!data) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args"); + return IRQ_HANDLED; + } + + spin_lock(&icp_v1_dev->hw_lock); + if (icp_v1_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ICP, "ICP HW powered off"); + spin_unlock(&icp_v1_dev->hw_lock); + return IRQ_HANDLED; + } + spin_unlock(&icp_v1_dev->hw_lock); + + soc_info = &icp_v1_dev->soc_info; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + + irq_status = cam_io_r_mb(soc_info->reg_map[ICP_V1_BASE].mem_base + + ICP_V1_HOST_INT_STATUS); + + cam_io_w_mb(irq_status, + soc_info->reg_map[ICP_V1_BASE].mem_base + ICP_V1_HOST_INT_CLR); + + if ((irq_status & ICP_V1_WDT_0) || + (irq_status & ICP_V1_WDT_1)) { + CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from ICP"); + recover = true; + } + + spin_lock(&icp_v1_dev->hw_lock); + if (core_info->irq_cb.cb) + core_info->irq_cb.cb(core_info->irq_cb.data, recover); + spin_unlock(&icp_v1_dev->hw_lock); + + return IRQ_HANDLED; +} + +void cam_icp_v1_irq_raise(void *priv) +{ + struct cam_hw_info *icp_v1_info = priv; + + if (!icp_v1_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return; + } + + cam_io_w_mb(ICP_V1_HOSTINT, + icp_v1_info->soc_info.reg_map[ICP_V1_BASE].mem_base + + ICP_V1_CSR_HOST2ICPINT); +} + +void cam_icp_v1_irq_enable(void *priv) +{ + struct cam_hw_info *icp_v1_info = priv; + + if (!icp_v1_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return; + } + + cam_io_w_mb(ICP_V1_WDT_WS0EN | ICP_V1_A2HOSTINTEN, + icp_v1_info->soc_info.reg_map[ICP_V1_BASE].mem_base + + ICP_V1_CSR_A2HOSTINTEN); +} + +void __iomem *cam_icp_v1_iface_addr(void *priv) +{ + struct cam_hw_info *icp_v1_info = priv; + void __iomem *base; + + if (!icp_v1_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return ERR_PTR(-EINVAL); + } + + base = icp_v1_info->soc_info.reg_map[ICP_V1_BASE].mem_base; + + return base + ICP_V1_GEN_PURPOSE_REG_OFFSET; +} + +void cam_icp_v1_populate_hfi_ops(const struct hfi_ops **hfi_proc_ops) +{ + if (!hfi_proc_ops) { + CAM_ERR(CAM_ICP, "Invalid hfi ops argument"); + return; + } + + *hfi_proc_ops = &hfi_icp_v1_ops; +} + +static int cam_icp_v1_send_fw_init(struct cam_icp_v1_device_core_info *core_info) +{ + int rc; + + if (!core_info) { + CAM_ERR(CAM_ICP, "Invalid core info is NULL"); + return -EINVAL; + } + + rc = hfi_send_system_cmd(core_info->hfi_handle, HFI_CMD_SYS_INIT, 0, 0); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to send sys init command for hfi handle: %d", + core_info->hfi_handle); + return rc; + } + + return 0; +} + +static int cam_icp_v1_pc_prep(struct cam_icp_v1_device_core_info *core_info) +{ + int rc; + + if (!core_info) { + CAM_ERR(CAM_ICP, "Invalid ICP core info is NULL"); + return -EINVAL; + } + + rc = hfi_send_system_cmd(core_info->hfi_handle, HFI_CMD_SYS_PC_PREP, 0, 0); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to send PC collapse command for hfi handle: %d", + core_info->hfi_handle); + return rc; + } + + return 0; +} + +int cam_icp_v1_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *icp_v1_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + struct cam_icp_soc_info *icp_soc_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_ICP_CMD_MAX) { + CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + soc_info = &icp_v1_dev->soc_info; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + + switch (cmd_type) { + case CAM_ICP_CMD_PROC_SHUTDOWN: + rc = cam_icp_v1_shutdown(device_priv); + break; + case CAM_ICP_CMD_PROC_BOOT: + rc = cam_icp_v1_boot(device_priv, cmd_args, arg_size); + break; + case CAM_ICP_CMD_POWER_COLLAPSE: + rc = cam_icp_v1_power_collapse(icp_v1_dev); + break; + case CAM_ICP_CMD_POWER_RESUME: + rc = cam_icp_v1_power_resume(icp_v1_dev, *((bool *)cmd_args)); + break; + case CAM_ICP_SEND_INIT: + rc = cam_icp_v1_send_fw_init(core_info); + break; + case CAM_ICP_CMD_PC_PREP: + rc = cam_icp_v1_pc_prep(core_info); + break; + case CAM_ICP_CMD_VOTE_CPAS: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + cam_icp_v1_cpas_vote(core_info, cpas_vote); + break; + } + + case CAM_ICP_CMD_SET_HFI_HANDLE: { + if (!core_info || !cmd_args) { + CAM_ERR(CAM_ICP, "Core info is %s and args is %s", + CAM_IS_NULL_TO_STR(core_info), CAM_IS_NULL_TO_STR(cmd_args)); + return -EINVAL; + } + + if (arg_size != sizeof(int)) { + CAM_ERR(CAM_ICP, "Invalid set hfi handle command arg size:%u", + arg_size); + return -EINVAL; + } + + core_info->hfi_handle = *((int *)cmd_args); + break; + } + + case CAM_ICP_CMD_CPAS_START: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + if (!core_info->cpas_start) { + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, + &cpas_vote->axi_vote); + core_info->cpas_start = true; + } + break; + } + + case CAM_ICP_CMD_CPAS_STOP: + if (core_info->cpas_start) { + cam_cpas_stop(core_info->cpas_handle); + core_info->cpas_start = false; + } + break; + case CAM_ICP_CMD_UBWC_CFG: { + struct cam_icp_ubwc_cfg_cmd *ubwc_cmd = cmd_args; + struct cam_icp_proc_ubwc_cfg_cmd ubwc_proc_cmd; + + icp_soc_info = soc_info->soc_private; + if (!icp_soc_info) { + CAM_ERR(CAM_ICP, "ICP private soc info is NULL"); + return -EINVAL; + } + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "Invalid ubwc cmd args is NULL"); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_ubwc_cfg_cmd)) { + CAM_ERR(CAM_ICP, "Invalid ubwc cmd size:%u", arg_size); + return -EINVAL; + } + + if (icp_soc_info->is_ubwc_cfg) + rc = hfi_cmd_ubwc_config(core_info->hfi_handle, + icp_soc_info->uconfig.ubwc_cfg); + else { + ubwc_proc_cmd.ubwc_cfg = &icp_soc_info->uconfig.ubwc_cfg_ext; + rc = cam_icp_proc_ubwc_configure(&ubwc_proc_cmd, + ubwc_cmd->disable_ubwc_comp, core_info->hfi_handle); + } + break; + } + case CAM_ICP_CMD_CLK_UPDATE: { + int32_t clk_level = 0; + struct cam_ahb_vote ahb_vote; + + if (!cmd_args || !core_info) { + CAM_ERR(CAM_ICP, "Invalid args: core info is %s cmd args is %s", + CAM_IS_NULL_TO_STR(core_info), CAM_IS_NULL_TO_STR(cmd_args)); + return -EINVAL; + } + + if (arg_size != sizeof(int32_t)) { + CAM_ERR(CAM_ICP, "Invalid icp clock update command"); + return -EINVAL; + } + + clk_level = *((int32_t *)cmd_args); + CAM_DBG(CAM_ICP, + "Update ICP clock to level [%d]", clk_level); + rc = cam_icp_soc_update_clk_rate(soc_info, clk_level, core_info->hfi_handle); + if (rc) + CAM_ERR(CAM_ICP, + "Failed to update clk to level: %d rc: %d", + clk_level, rc); + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_level; + cam_cpas_update_ahb_vote( + core_info->cpas_handle, &ahb_vote); + break; + } + case CAM_ICP_CMD_HW_DUMP: { + struct cam_icp_hw_dump_args *dump_args = cmd_args; + + rc = cam_icp_v1_fw_dump(dump_args, core_info); + break; + } + + case CAM_ICP_CMD_HW_MINI_DUMP: { + struct cam_icp_hw_dump_args *dump_args = cmd_args; + + rc = cam_icp_v1_fw_mini_dump(dump_args, core_info); + break; + } + case CAM_ICP_CMD_HW_REG_DUMP: { + /* reg dump not supported */ + break; + } + default: + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.h new file mode 100644 index 0000000000..296d4b8962 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_core.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_ICP_V1_CORE_H +#define CAM_ICP_V1_CORE_H + +#include +#include +#include +#include + +#include "cam_icp_hw_intf.h" +#include "hfi_intf.h" + +#define ICP_V1_QGIC_BASE 0 +#define ICP_V1_BASE 1 +#define ICP_V1_CSR_BASE 2 + +#define ICP_V1_HOST_INT 0x1 +#define ICP_V1_WDT_0 0x2 +#define ICP_V1_WDT_1 0x4 + +#define ICP_V1_CSR_ACCESS 0x3C + +#define ELF_GUARD_PAGE (2 * 1024 * 1024) + +/** + * struct cam_icp_v1_device_hw_info + * @icp_v1_hw_info: ICP_V1 hardware info + * @fw_elf: start address of fw start with elf header + * @fw: start address of fw blob + * @fw_buf: smmu alloc/mapped fw buffer + * @fw_buf_len: fw buffer length + * @query_cap: ICP_V1 query info from firmware + * @icp_v1_acquire: Acquire information of ICP_V1 + * @irq_cb: IRQ callback + * @cpas_handle: CPAS handle for ICP_V1 + * @hfi_handle: hfi handle for ICP V1 + * @hw_version: hw version of icp v1 processor + * @cpas_start: state variable for cpas + */ +struct cam_icp_v1_device_core_info { + const struct firmware *fw_elf; + void *fw; + uint32_t fw_buf; + uintptr_t fw_kva_addr; + uint64_t fw_buf_len; + struct cam_icp_irq_cb irq_cb; + uint32_t cpas_handle; + int32_t hfi_handle; + bool cpas_start; +}; + +int cam_icp_v1_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_icp_v1_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_icp_v1_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + +irqreturn_t cam_icp_v1_irq(int irq_num, void *data); + +void cam_icp_v1_irq_raise(void *priv); +void cam_icp_v1_irq_enable(void *priv); +void __iomem *cam_icp_v1_iface_addr(void *priv); +void cam_icp_v1_populate_hfi_ops(const struct hfi_ops **hfi_proc_ops); + +#endif /* CAM_ICP_V1_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.c new file mode 100644 index 0000000000..b8d071ac0f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "cam_icp_v1_core.h" +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_icp_soc_common.h" +#include "cam_icp_v1_dev.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static int max_icp_v1_hw_idx = -1; + +uint32_t cam_icp_v1_get_device_num(void) +{ + return max_icp_v1_hw_idx + 1; +} + +static bool cam_icp_v1_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + bool error_handled = false; + + if (!irq_data) + return error_handled; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d", + irq_data->irq_type, + irq_data->u.dec_err.decerr_status.value, + irq_data->u.dec_err.decerr_status.thr_err, + irq_data->u.dec_err.decerr_status.fcl_err, + irq_data->u.dec_err.decerr_status.len_md_err, + irq_data->u.dec_err.decerr_status.format_err); + error_handled = true; + break; + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC Encode error type=%d status=%x", + irq_data->irq_type, + irq_data->u.enc_err.encerr_status.value); + error_handled = true; + break; + default: + break; + } + + return error_handled; +} + +int cam_icp_v1_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_icp_v1_device_core_info *core_info, uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "icp", sizeof("icp")); + cpas_register_params.cam_cpas_client_cb = cam_icp_v1_cpas_cb; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed: %d", rc); + return rc; + } + + core_info->cpas_handle = cpas_register_params.client_handle; + return rc; +} + +static inline void cam_icp_v1_soc_info_deinit(struct cam_hw_soc_info *soc_info) +{ + CAM_MEM_FREE(soc_info->soc_private); +} + +static int cam_icp_v1_soc_info_init(struct cam_hw_soc_info *soc_info, + struct platform_device *pdev) +{ + struct cam_icp_soc_info *icp_soc_info = NULL; + + icp_soc_info = CAM_MEM_ZALLOC(sizeof(*icp_soc_info), GFP_KERNEL); + if (!icp_soc_info) + return -ENOMEM; + + soc_info->pdev = pdev; + soc_info->dev = &pdev->dev; + soc_info->dev_name = pdev->name; + soc_info->soc_private = icp_soc_info; + + icp_soc_info->dev_type = CAM_ICP_HW_ICP_V1; + + return 0; +} + +static int cam_icp_v1_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct cam_hw_info *icp_v1_dev = NULL; + struct cam_hw_intf *icp_v1_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + icp_v1_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!icp_v1_dev_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &icp_v1_dev_intf->hw_idx); + + icp_v1_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!icp_v1_dev) { + rc = -ENOMEM; + goto icp_v1_dev_alloc_failure; + } + + icp_v1_dev_intf->hw_priv = icp_v1_dev; + icp_v1_dev_intf->hw_ops.init = cam_icp_v1_init_hw; + icp_v1_dev_intf->hw_ops.deinit = cam_icp_v1_deinit_hw; + icp_v1_dev_intf->hw_ops.process_cmd = cam_icp_v1_process_cmd; + icp_v1_dev_intf->hw_type = CAM_ICP_HW_ICP_V1; + + CAM_DBG(CAM_ICP, "type %d index %d", + icp_v1_dev_intf->hw_type, + icp_v1_dev_intf->hw_idx); + + platform_set_drvdata(pdev, icp_v1_dev_intf); + + core_info = CAM_MEM_ZALLOC(sizeof(struct cam_icp_v1_device_core_info), + GFP_KERNEL); + if (!core_info) { + rc = -ENOMEM; + goto core_info_alloc_failure; + } + icp_v1_dev->core_info = core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ICP, "No icp_v1 hardware info"); + rc = -EINVAL; + goto match_err; + } + + rc = cam_icp_v1_soc_info_init(&icp_v1_dev->soc_info, pdev); + if (rc) + goto init_soc_failure; + + rc = cam_icp_soc_resources_init(&icp_v1_dev->soc_info, cam_icp_v1_irq, + icp_v1_dev); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed to init_soc"); + goto init_soc_failure; + } + + rc = cam_icp_v1_register_cpas(&icp_v1_dev->soc_info, + core_info, icp_v1_dev_intf->hw_idx); + if (rc < 0) { + CAM_ERR(CAM_ICP, "icp_v1 cpas registration failed"); + goto cpas_reg_failed; + } + core_info->hfi_handle = HFI_HANDLE_INIT_VALUE; + icp_v1_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&icp_v1_dev->hw_mutex); + spin_lock_init(&icp_v1_dev->hw_lock); + init_completion(&icp_v1_dev->hw_complete); + + if ((int)(icp_v1_dev_intf->hw_idx) > max_icp_v1_hw_idx) + max_icp_v1_hw_idx = icp_v1_dev_intf->hw_idx; + + CAM_DBG(CAM_ICP, "ICP_V1:%u component bound successfully", + icp_v1_dev_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +cpas_reg_failed: + cam_icp_soc_resources_deinit(&icp_v1_dev->soc_info); +init_soc_failure: + cam_icp_v1_soc_info_deinit(&icp_v1_dev->soc_info); +match_err: + CAM_MEM_FREE(core_info); +core_info_alloc_failure: + CAM_MEM_FREE(icp_v1_dev); +icp_v1_dev_alloc_failure: + CAM_MEM_FREE(icp_v1_dev_intf); + + return rc; +} + +static void cam_icp_v1_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *icp_v1_dev = NULL; + struct cam_hw_intf *icp_v1_dev_intf = NULL; + struct cam_icp_v1_device_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + + icp_v1_dev_intf = platform_get_drvdata(pdev); + + if (!icp_v1_dev_intf) { + CAM_ERR(CAM_ICP, "Error No data in pdev"); + return; + } + + icp_v1_dev = icp_v1_dev_intf->hw_priv; + core_info = (struct cam_icp_v1_device_core_info *)icp_v1_dev->core_info; + + cam_cpas_unregister_client(core_info->cpas_handle); + cam_icp_soc_resources_deinit(&icp_v1_dev->soc_info); + cam_icp_v1_soc_info_deinit(&icp_v1_dev->soc_info); + + max_icp_v1_hw_idx = -1; + + CAM_MEM_FREE(icp_v1_dev->core_info); + CAM_MEM_FREE(icp_v1_dev); + CAM_MEM_FREE(icp_v1_dev_intf); +} + +static const struct component_ops cam_icp_v1_component_ops = { + .bind = cam_icp_v1_component_bind, + .unbind = cam_icp_v1_component_unbind, +}; + +int cam_icp_v1_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ICP, "Adding ICP_V1 component"); + rc = component_add(&pdev->dev, &cam_icp_v1_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_icp_v1_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_icp_v1_component_ops); + return 0; +} + +static const struct of_device_id cam_icp_v1_dt_match[] = { + {.compatible = "qcom,cam-icp_v1",}, + {}, +}; +MODULE_DEVICE_TABLE(of, cam_icp_v1_dt_match); + +struct platform_driver cam_icp_v1_driver = { + .probe = cam_icp_v1_probe, + .remove = cam_icp_v1_remove, + .driver = { + .name = "cam-icp_v1", + .owner = THIS_MODULE, + .of_match_table = cam_icp_v1_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_icp_v1_init_module(void) +{ + return platform_driver_register(&cam_icp_v1_driver); +} + +void cam_icp_v1_exit_module(void) +{ + platform_driver_unregister(&cam_icp_v1_driver); +} + +MODULE_DESCRIPTION("CAM ICP_V1 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.h new file mode 100644 index 0000000000..e05fad7aed --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_dev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_V1_DEV_H_ +#define _CAM_ICP_V1_DEV_H_ + +/** + * @brief : API to register icp_v1 hw to platform framework. + * @return struct platform_device pointer on success, or ERR_PTR() on error. + */ +int cam_icp_v1_init_module(void); + +/** + * @brief : API to remove icp_v1 hw from platform framework. + */ +void cam_icp_v1_exit_module(void); + +#endif /* _CAM_ICP_V1_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_reg.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_reg.h new file mode 100644 index 0000000000..343a869fa9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v1_hw/cam_icp_v1_reg.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_V1_REG_H_ +#define _CAM_ICP_V1_REG_H_ + +#define ICP_V1_CSR_NSEC_RESET 0x4 +#define ICP_V1_GEN_PURPOSE_REG_OFFSET 0x40 +#define ICP_V1_CSR_FUNC_RESET (1 << 4) +#define ICP_V1_CSR_DBG_RESET (1 << 3) +#define ICP_V1_CSR_CPU_RESET (1 << 2) + +#define ICP_V1_CSR_CONTROL 0x8 +#define ICP_V1_CSR_DBGSWENABLE (1 << 22) +#define ICP_V1_CSR_EDBGRQ (1 << 14) +#define ICP_V1_CSR_EN_CLKGATE_WFI (1 << 12) +#define ICP_V1_CSR_CPU_EN (1 << 9) +#define ICP_V1_CSR_WAKE_UP_EN (1 << 4) + +#define ICP_V1_CSR_FULL_DBG_EN (ICP_V1_CSR_DBGSWENABLE | ICP_V1_CSR_EDBGRQ) +#define ICP_V1_CSR_FULL_CPU_EN (ICP_V1_CSR_CPU_EN | \ + ICP_V1_CSR_WAKE_UP_EN | \ + ICP_V1_CSR_EN_CLKGATE_WFI) + +#define ICP_V1_CSR_A2HOSTINTEN 0x10 +#define ICP_V1_WDT_WS1EN (1 << 2) +#define ICP_V1_WDT_WS0EN (1 << 1) +#define ICP_V1_A2HOSTINTEN (1 << 0) + +#define ICP_V1_CSR_HOST2ICPINT 0x30 +#define ICP_V1_HOSTINT (1 << 0) +#define ICP_V1_HOST_INT_CLR 0x18 +#define ICP_V1_HOST_INT_STATUS 0x1c +#define ICP_V1_HOST_INT_SET 0x20 + +#define ICP_V1_CSR_STATUS 0x200 +#define ICP_V1_CSR_STANDBYWFI (1 << 7) + +#define ICP_V1_INIT_REQ 0x48 +#define ICP_V1_INIT_RESPONSE 0x4c +#define ICP_V1_SHARED_MEM_PTR 0x50 +#define ICP_V1_SHARED_MEM_SIZE 0x54 +#define ICP_V1_QTBL_PTR 0x58 +#define ICP_V1_UNCACHED_HEAP_PTR 0x5c +#define ICP_V1_UNCACHED_HEAP_SIZE 0x60 + +#endif /* _CAM_ICP_V1_REG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.c new file mode 100644 index 0000000000..bdc88e00c7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.c @@ -0,0 +1,1496 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_icp_hw_intf.h" +#include "hfi_intf.h" +#include "hfi_sys_defs.h" +#include "cam_icp_proc_common.h" +#include "cam_icp_v2_core.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "cam_presil_hw_access.h" +#include "cam_icp_soc_common.h" + +#define TZ_STATE_SUSPEND 0 +#define TZ_STATE_RESUME 1 + +#define ICP_FW_NAME_MAX_SIZE 32 + +#define ICP_V2_IRQ_TEST_TIMEOUT 1000 + +static const struct hfi_ops hfi_icp_v2_ops = { + .irq_raise = cam_icp_v2_irq_raise, + .irq_enable = cam_icp_v2_irq_enable, + .iface_addr = cam_icp_v2_iface_addr, +}; + +static void cam_icp_v2_fw_coredump(struct platform_device *pdev) +{ + int rc = 0; + struct device_node *node = NULL; + struct resource res = {0}; + phys_addr_t phys_mem = 0; + size_t res_size = 0; + void *cpu_addr = NULL, *data = NULL; + + node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); + if (!node) + return; + + rc = of_address_to_resource(node, 0, &res); + of_node_put(node); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to get firmware resource address rc=%d", rc); + return; + } + + phys_mem = res.start; + res_size = (size_t)resource_size(&res); + + cpu_addr = memremap(phys_mem, res_size, MEMREMAP_WC); + if (!cpu_addr) { + CAM_ERR(CAM_ICP, "Unable to map firmware carve out"); + return; + } + + data = vmalloc(res_size); + if (!data) { + CAM_ERR(CAM_ICP, "Failed to dynamically allocate memory of size: %llu", + res_size); + goto unmap_iomem; + } + + memcpy(data, cpu_addr, res_size); + dev_coredumpv(&pdev->dev, data, res_size, GFP_KERNEL); + +unmap_iomem: + memunmap(cpu_addr); +} + +static int cam_icp_v2_ubwc_configure(struct cam_hw_soc_info *soc_info, + struct cam_icp_v2_core_info *core_info, void *args, uint32_t arg_size) +{ + struct cam_icp_soc_info *soc_priv; + struct cam_icp_ubwc_cfg_cmd *ubwc_cmd = args; + struct cam_icp_proc_ubwc_cfg_cmd ubwc_proc_cmd; + + if (!soc_info || !core_info || !args) { + CAM_ERR(CAM_ICP, + "Invalid args: soc info is %s core info is %s cmd args is %s", + CAM_IS_NULL_TO_STR(soc_info), CAM_IS_NULL_TO_STR(core_info), + CAM_IS_NULL_TO_STR(args)); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_ubwc_cfg_cmd)) { + CAM_ERR(CAM_ICP, "Invalid ubwc cfg arg size: %u", + arg_size); + return -EINVAL; + } + + soc_priv = soc_info->soc_private; + + ubwc_proc_cmd.ubwc_cfg = &soc_priv->uconfig.ubwc_cfg_ext; + ubwc_proc_cmd.ubwc_cfg_dev_mask = ubwc_cmd->ubwc_cfg_dev_mask; + + return cam_icp_proc_ubwc_configure(&ubwc_proc_cmd, + ubwc_cmd->disable_ubwc_comp, core_info->hfi_handle); +} + +static int cam_icp_v2_cpas_vote(struct cam_icp_v2_core_info *core_info, + struct cam_icp_cpas_vote *vote) +{ + if (!core_info) + return -EINVAL; + + return cam_icp_proc_cpas_vote(core_info->cpas_handle, vote); +} + +static bool cam_icp_v2_cpas_cb(uint32_t handle, void *user_data, + struct cam_cpas_irq_data *irq_data) +{ + bool ret = false; + (void)user_data; + + if (!irq_data) + return false; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC decode error status=0x%08x", + irq_data->u.dec_err.decerr_status.value); + ret = true; + break; + case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "IPE/BPS UBWC encode error status=0x%08x", + irq_data->u.enc_err.encerr_status.value); + ret = true; + break; + default: + break; + } + + return ret; +} + +int cam_icp_v2_cpas_register(struct cam_hw_intf *icp_v2_intf) +{ + struct cam_cpas_register_params params; + struct cam_hw_info *icp_v2_info; + struct cam_icp_v2_core_info *core_info; + int rc; + + if (!icp_v2_intf) + return -EINVAL; + + icp_v2_info = icp_v2_intf->hw_priv; + + params.dev = icp_v2_info->soc_info.dev; + params.cell_index = icp_v2_intf->hw_idx; + params.cam_cpas_client_cb = cam_icp_v2_cpas_cb; + params.userdata = NULL; + + strscpy(params.identifier, "icp", CAM_HW_IDENTIFIER_LENGTH); + + rc = cam_cpas_register_client(¶ms); + if (rc) + return rc; + + core_info = icp_v2_info->core_info; + core_info->cpas_handle = params.client_handle; + + return rc; +} + +int cam_icp_v2_cpas_unregister(struct cam_hw_intf *icp_v2_intf) +{ + struct cam_hw_info *icp_v2_info; + struct cam_icp_v2_core_info *core_info; + + if (!icp_v2_intf) + return -EINVAL; + + icp_v2_info = icp_v2_intf->hw_priv; + core_info = icp_v2_info->core_info; + + return cam_cpas_unregister_client(core_info->cpas_handle); +} + +static int __icp_v2_cpas_start(struct cam_icp_v2_core_info *core_info, + struct cam_icp_cpas_vote *vote) +{ + int rc; + + if (!core_info || core_info->cpas_start) + return -EINVAL; + + rc = cam_cpas_start(core_info->cpas_handle, + &vote->ahb_vote, &vote->axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "failed to start cpas rc=%d", rc); + return rc; + } + + core_info->cpas_start = true; + + return 0; +} + +static int cam_icp_v2_cpas_start(struct cam_icp_v2_core_info *core_info) +{ + struct cam_icp_cpas_vote vote; + + vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + vote.axi_vote.num_paths = 1; + + vote.axi_vote.axi_path[0].path_data_type = CAM_ICP_DEFAULT_AXI_PATH; + vote.axi_vote.axi_path[0].transac_type = CAM_ICP_DEFAULT_AXI_TRANSAC; + vote.axi_vote.axi_path[0].camnoc_bw = CAM_ICP_BW_BYTES_VOTE; + vote.axi_vote.axi_path[0].mnoc_ab_bw = CAM_ICP_BW_BYTES_VOTE; + vote.axi_vote.axi_path[0].mnoc_ib_bw = CAM_ICP_BW_BYTES_VOTE; + + return __icp_v2_cpas_start(core_info, &vote); +} + +static int cam_icp_v2_cpas_stop(struct cam_icp_v2_core_info *core_info) +{ + int rc; + + if (!core_info || !core_info->cpas_start) + return -EINVAL; + + rc = cam_cpas_stop(core_info->cpas_handle); + if (rc) { + CAM_ERR(CAM_ICP, "failed to stop cpas rc=%d", rc); + return rc; + } + + core_info->cpas_start = false; + + return 0; +} + +int cam_icp_v2_hw_init(void *priv, void *args, uint32_t arg_size) +{ + struct cam_hw_info *icp_v2 = priv; + struct cam_icp_v2_core_info *core_info; + unsigned long flags; + int rc; + bool send_freq_info; + + if (!icp_v2 || !args) { + CAM_ERR(CAM_ICP, + "Invalid parameters: icp hw info is %s args is %s", + CAM_IS_NULL_TO_STR(icp_v2), CAM_IS_NULL_TO_STR(args)); + return -EINVAL; + } + + if (arg_size != sizeof(bool)) { + CAM_ERR(CAM_ICP, "Invalid hw init cmd args"); + return -EINVAL; + } + + send_freq_info = *((bool *)args); + core_info = icp_v2->core_info; + + spin_lock_irqsave(&icp_v2->hw_lock, flags); + if (icp_v2->hw_state == CAM_HW_STATE_POWER_UP) { + core_info->power_on_cnt++; + spin_unlock_irqrestore(&icp_v2->hw_lock, flags); + return 0; + } + spin_unlock_irqrestore(&icp_v2->hw_lock, flags); + + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_ICP + icp_v2->soc_info.index); + if (rc) { + CAM_ERR(CAM_ICP, "ICP hw id %x acquire ownership failed", + CAM_HW_ID_ICP + icp_v2->soc_info.index); + return rc; + } + + rc = cam_icp_v2_cpas_start(icp_v2->core_info); + if (rc) + return rc; + + rc = cam_icp_soc_resources_enable(&icp_v2->soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to enable soc resources rc=%d", rc); + goto soc_fail; + } else { + if (send_freq_info) { + int32_t clk_rate = 0; + + clk_rate = cam_wrapper_clk_get_rate( + icp_v2->soc_info.clk[icp_v2->soc_info.src_clk_idx], + icp_v2->soc_info.clk_name[icp_v2->soc_info.src_clk_idx]); + hfi_send_freq_info(core_info->hfi_handle, clk_rate); + } + } + + spin_lock_irqsave(&icp_v2->hw_lock, flags); + icp_v2->hw_state = CAM_HW_STATE_POWER_UP; + core_info->power_on_cnt++; + spin_unlock_irqrestore(&icp_v2->hw_lock, flags); + CAM_DBG(CAM_ICP, "ICP%u powered on", icp_v2->soc_info.index); + + return 0; + +soc_fail: + cam_icp_v2_cpas_stop(icp_v2->core_info); + return rc; +} + +int cam_icp_v2_hw_deinit(void *priv, void *args, + uint32_t arg_size) +{ + struct cam_hw_info *icp_v2_info = priv; + struct cam_icp_v2_core_info *core_info; + unsigned long flags; + int rc; + bool send_freq_info; + + if (!icp_v2_info || !args) { + CAM_ERR(CAM_ICP, "ICP device info is %s cmd args is %s", + CAM_IS_NULL_TO_STR(icp_v2_info), CAM_IS_NULL_TO_STR(args)); + return -EINVAL; + } + + if (arg_size != sizeof(bool)) { + CAM_ERR(CAM_ICP, "Invalid hw deinit cmd args"); + return -EINVAL; + } + + send_freq_info = *((bool *)args); + core_info = icp_v2_info->core_info; + + spin_lock_irqsave(&icp_v2_info->hw_lock, flags); + if (icp_v2_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + return 0; + } + + core_info->power_on_cnt--; + if (core_info->power_on_cnt) { + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + CAM_DBG(CAM_ICP, "power on reference still held %u", + core_info->power_on_cnt); + return 0; + } + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + + if (send_freq_info) + hfi_send_freq_info(core_info->hfi_handle, 0); + + rc = cam_icp_soc_resources_disable(&icp_v2_info->soc_info); + if (rc) + CAM_WARN(CAM_ICP, + "failed to disable soc resources rc=%d", rc); + + rc = cam_icp_v2_cpas_stop(icp_v2_info->core_info); + if (rc) + CAM_WARN(CAM_ICP, "cpas stop failed rc=%d", rc); + + spin_lock_irqsave(&icp_v2_info->hw_lock, flags); + icp_v2_info->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_ICP + icp_v2_info->soc_info.index); + if (rc) { + CAM_ERR(CAM_ICP, "ICP hw id %x release ownership failed", + CAM_HW_ID_ICP + icp_v2_info->soc_info.index); + return rc; + } + + CAM_DBG(CAM_ICP, "ICP%u powered off", icp_v2_info->soc_info.index); + return rc; +} +static int prepare_boot(struct cam_hw_info *icp_v2_info, struct cam_icp_boot_args *args) +{ + struct cam_icp_v2_core_info *core_info = icp_v2_info->core_info; + unsigned long flags; + + if (!core_info) { + CAM_ERR(CAM_ICP, "invalid args: core_info is NULL icp_v2_info=%pK args=%pK", + icp_v2_info, args); + return -EINVAL; + } + + if (!args->use_sec_pil) { + core_info->fw_params.fw_buf = args->firmware.iova; + core_info->fw_params.fw_kva_addr = args->firmware.kva; + core_info->fw_params.fw_buf_len = args->firmware.len; + } + + spin_lock_irqsave(&icp_v2_info->hw_lock, flags); + core_info->irq_cb.data = args->irq_cb.data; + core_info->irq_cb.cb = args->irq_cb.cb; + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + + return 0; +} + +static int cam_icp_v2_prepare_boot(struct cam_hw_info *icp_v2_info, + struct cam_icp_boot_args *args, uint32_t arg_size) +{ + int rc; + + if (!icp_v2_info || !args) { + CAM_ERR(CAM_ICP, "invalid args: icp_v2_info=%pK args=%pK", icp_v2_info, args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_boot_args)) { + CAM_ERR(CAM_ICP, "invalid boot args size"); + return -EINVAL; + } + + rc = prepare_boot(icp_v2_info, args); + if (rc) { + CAM_ERR(CAM_ICP, "prepare boot failed"); + return rc; + } + + ((struct cam_icp_v2_core_info *)(icp_v2_info->core_info))->use_sec_pil = true; + return 0; + +} + +static int prepare_shutdown(struct cam_hw_info *icp_v2_info) +{ + struct cam_icp_v2_core_info *core_info = icp_v2_info->core_info; + unsigned long flags; + + if (!core_info) { + CAM_ERR(CAM_ICP, "invalid args:core_info is NULL icp_v2_info=%pK", icp_v2_info); + return -EINVAL; + } + + core_info->fw_params.fw_buf = 0x0; + core_info->fw_params.fw_kva_addr = 0x0; + core_info->fw_params.fw_buf_len = 0x0; + + spin_lock_irqsave(&icp_v2_info->hw_lock, flags); + core_info->irq_cb.data = NULL; + core_info->irq_cb.cb = NULL; + spin_unlock_irqrestore(&icp_v2_info->hw_lock, flags); + return 0; +} + +static int cam_icp_v2_prepare_shutdown(struct cam_hw_info *icp_v2_info) +{ + int rc; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid args: icp_v2_info is NULL"); + return -EINVAL; + } + + rc = prepare_shutdown(icp_v2_info); + if (rc) { + CAM_ERR(CAM_ICP, "prepare shutdown failed"); + return rc; + } + + ((struct cam_icp_v2_core_info *)(icp_v2_info->core_info))->use_sec_pil = false; + return 0; +} + +/* Used if ICP_SYS is not protected */ +static int __cam_icp_v2_power_collapse(struct cam_hw_info *icp_v2_info) +{ + int32_t sys_base_idx; + uint32_t status = 0; + void __iomem *base; + struct cam_icp_v2_core_info *core_info = NULL; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid ICP dev info"); + return -EINVAL; + } + + core_info = icp_v2_info->core_info; + sys_base_idx = core_info->reg_base_idx[ICP_V2_SYS_BASE]; + + if (sys_base_idx < 0) { + CAM_ERR(CAM_ICP, "No reg base idx found for ICP_SYS: %d", + sys_base_idx); + return -EINVAL; + } + + base = icp_v2_info->soc_info.reg_map[sys_base_idx].mem_base; + + /** + * Need to poll here to confirm that FW has triggered WFI + * and Host can then proceed. No interrupt is expected + * from FW at this time. + */ + if (cam_common_read_poll_timeout(base + ICP_V2_SYS_STATUS, + PC_POLL_DELAY_US, PC_POLL_TIMEOUT_US, + ICP_V2_STANDBYWFI, ICP_V2_STANDBYWFI, + &status)) { + CAM_ERR(CAM_ICP, "WFI poll timed out: status=0x%08x", status); + return -ETIMEDOUT; + } + + cam_io_w_mb(0x0, base + ICP_V2_SYS_CONTROL); + return 0; +} + +/* Used if ICP_SYS and ICP_DOM_MASK are not protected */ +static int __cam_icp_v2_power_resume(struct cam_hw_info *icp_v2_info) +{ + int32_t sys_base_idx, dom_mask_base_idx; + void __iomem *sys_base, *dom_mask_base; + struct cam_icp_soc_info *soc_priv; + struct cam_hw_soc_info *soc_info; + struct cam_icp_v2_core_info *core_info = NULL; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid ICP dev info"); + return -EINVAL; + } + + soc_info = &icp_v2_info->soc_info; + core_info = icp_v2_info->core_info; + soc_priv = soc_info->soc_private; + sys_base_idx = core_info->reg_base_idx[ICP_V2_SYS_BASE]; + dom_mask_base_idx = core_info->reg_base_idx[ICP_V2_DOM_MASK_BASE]; + + if (sys_base_idx < 0) { + CAM_ERR(CAM_ICP, "No reg base idx found for ICP_SYS: %d", + sys_base_idx); + return -EINVAL; + } + + sys_base = icp_v2_info->soc_info.reg_map[sys_base_idx].mem_base; + + cam_io_w_mb(ICP_V2_FUNC_RESET, + sys_base + ICP_V2_SYS_RESET); + + /* + * ICP0 fw starts at 0x0 as before, while ICP1 fw in non-secure loading + * starts at 128MB in order to avoid conflicts with ICP0 fw or AHB space + */ + if (soc_info->index == 1) { + struct cam_cpas_addr_trans_data addr_trans_data; + + addr_trans_data.enable = true; + /* Mapped (0 - 64MB) to (128MB - 192MB) */ + addr_trans_data.val_offset0 = 0x08000000; + addr_trans_data.val_base1 = 0x04000000; + + /* Avoid address translator touching other space */ + addr_trans_data.val_offset1 = 0x0; + addr_trans_data.val_base2 = 0xfc000000; + addr_trans_data.val_offset2 = 0x0; + addr_trans_data.val_base3 = 0xfc000000; + addr_trans_data.val_offset3 = 0x0; + + cam_cpas_set_addr_trans(core_info->cpas_handle, &addr_trans_data); + } + + if (soc_priv->qos_val) + cam_io_w_mb(soc_priv->qos_val, sys_base + ICP_V2_SYS_ACCESS); + + /* Program domain ID reg mask values if reg base is available */ + if (dom_mask_base_idx >= 0) { + dom_mask_base = icp_v2_info->soc_info.reg_map[dom_mask_base_idx].mem_base; + + CAM_DBG(CAM_ICP, "domain_cfg0, offset: 0x%x: 0x%x, domain_cfg1, offset: 0x%x: 0x%x", + ICP_V2_DOM_0_CFG_OFFSET, ICP_V2_DOMAIN_MASK_CFG_0, + ICP_V2_DOM_1_CFG_OFFSET, ICP_V2_DOMAIN_MASK_CFG_1); + + cam_io_w_mb(ICP_V2_DOMAIN_MASK_CFG_0, + dom_mask_base + ICP_V2_DOM_0_CFG_OFFSET); + cam_io_w_mb(ICP_V2_DOMAIN_MASK_CFG_1, + dom_mask_base + ICP_V2_DOM_1_CFG_OFFSET); + } + + cam_io_w_mb(ICP_V2_EN_CPU, + sys_base + ICP_V2_SYS_CONTROL); + + return 0; +} + +#ifndef CONFIG_CAM_PRESIL +/* Used for non secure FW load */ +static int32_t __cam_non_sec_load_fw(void *device_priv) +{ + int32_t rc = 0; + uint32_t fw_size; + char firmware_name[ICP_FW_NAME_MAX_SIZE] = {0}; + const char *fw_name; + const uint8_t *fw_start = NULL; + struct cam_hw_info *icp_v2_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_icp_v2_core_info *core_info = NULL; + struct platform_device *pdev = NULL; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &icp_v2_dev->soc_info; + core_info = (struct cam_icp_v2_core_info *)icp_v2_dev->core_info; + pdev = soc_info->pdev; + + /** + * Use paddr to map 0xE0400000 and 0xE0420000 as these + * addresses are routed internally by the core. These segments + * are used by the firmware to make use of the rom packing feature. + */ + + rc = of_property_read_string(pdev->dev.of_node, "fw_name", + &fw_name); + if (rc) { + CAM_ERR(CAM_ICP, "FW image name not found"); + return rc; + } + + /* Account for ".elf" size [4 characters] */ + if (strlen(fw_name) >= (ICP_FW_NAME_MAX_SIZE - 4)) { + CAM_ERR(CAM_ICP, "Invalid fw name %s", fw_name); + return -EINVAL; + } + + scnprintf(firmware_name, ARRAY_SIZE(firmware_name), + "%s.elf", fw_name); + + rc = firmware_request_nowarn(&core_info->fw_params.fw_elf, + firmware_name, &pdev->dev); + if (rc) { + CAM_ERR(CAM_ICP, "Failed to locate %s fw: %d", + firmware_name, rc); + return rc; + } + + if (!core_info->fw_params.fw_elf) { + CAM_ERR(CAM_ICP, "Invalid elf size"); + rc = -EINVAL; + goto fw_download_failed; + } + + fw_start = core_info->fw_params.fw_elf->data; + + rc = cam_icp_validate_fw(fw_start, EM_XTENSA); + if (rc) { + CAM_ERR(CAM_ICP, "fw elf validation failed"); + goto fw_download_failed; + } + + rc = cam_icp_get_fw_size(fw_start, &fw_size); + if (rc) { + CAM_ERR(CAM_ICP, "unable to get fw size"); + goto fw_download_failed; + } + + if (core_info->fw_params.fw_buf_len < fw_size) { + CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu", + fw_size, core_info->fw_params.fw_buf_len); + rc = -EINVAL; + goto fw_download_failed; + } + + rc = cam_icp_program_fw(fw_start, + core_info->fw_params.fw_kva_addr); + if (rc) { + CAM_ERR(CAM_ICP, "fw program is failed"); + goto fw_download_failed; + } + +fw_download_failed: + release_firmware(core_info->fw_params.fw_elf); + return rc; +} +#else /* #ifndef CONFIG_CAM_PRESIL */ +static int __cam_non_sec_load_fw(struct cam_hw_info *icp_v2_info) +{ + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "Invalid ICP dev info"); + return -EINVAL; + } + + cam_presil_send_event(CAM_PRESIL_EVENT_HFI_REG_ON_FIRST_REG_START_FW_DOWNLOAD, 0xFF); + + return 0; +} +#endif /* #ifndef CONFIG_CAM_PRESIL */ + +/* Used for non secure FW load */ +static int cam_icp_v2_non_sec_boot( + struct cam_hw_info *icp_v2_info, + struct cam_icp_boot_args *args, + uint32_t arg_size) +{ + int rc; + struct cam_icp_soc_info *soc_priv; + + if (!icp_v2_info || !args) { + CAM_ERR(CAM_ICP, + "invalid args: icp_v2_dev=%pK args=%pK", + icp_v2_info, args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_boot_args)) { + CAM_ERR(CAM_ICP, "invalid boot args size"); + return -EINVAL; + } + + soc_priv = (struct cam_icp_soc_info *)icp_v2_info->soc_info.soc_private; + if (icp_v2_info->soc_info.num_mem_block > ICP_V2_BASE_MAX) { + CAM_ERR(CAM_ICP, "check reg config in DT v 0x%x n %d", + soc_priv->hw_version, icp_v2_info->soc_info.num_mem_block); + return -EINVAL; + } + + rc = prepare_boot(icp_v2_info, args); + if (rc) { + CAM_ERR(CAM_ICP, "prepare boot failed"); + return rc; + } + + + rc = __cam_non_sec_load_fw(icp_v2_info); + if (rc) { + CAM_ERR(CAM_ICP, + "firmware download failed rc=%d", rc); + goto err; + } + + rc = __cam_icp_v2_power_resume(icp_v2_info); + if (rc) { + CAM_ERR(CAM_ICP, "ICP boot failed rc=%d", rc); + goto err; + } + + return 0; + +err: + prepare_shutdown(icp_v2_info); + return rc; +} + +#if IS_REACHABLE(CONFIG_QCOM_MDT_LOADER) +static int __load_firmware(struct platform_device *pdev, + uint32_t fw_pas_id) +{ + const char *fw_name; + const struct firmware *firmware = NULL; + char firmware_name[ICP_FW_NAME_MAX_SIZE] = {0}; + void *vaddr = NULL; + struct device_node *node; + struct resource res; + phys_addr_t res_start; + size_t res_size; + ssize_t fw_size; + int rc; + + if (!pdev) { + CAM_ERR(CAM_ICP, "invalid args"); + return -EINVAL; + } + + rc = of_property_read_string(pdev->dev.of_node, "fw_name", + &fw_name); + if (rc) { + CAM_ERR(CAM_ICP, "FW image name not found"); + return rc; + } + + /* Account for ".mdt" size [4 characters] */ + if (strlen(fw_name) >= (ICP_FW_NAME_MAX_SIZE - 4)) { + CAM_ERR(CAM_ICP, "Invalid fw name %s", fw_name); + return -EINVAL; + } + + scnprintf(firmware_name, ARRAY_SIZE(firmware_name), + "%s.mbn", fw_name); + + node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); + if (!node) { + CAM_ERR(CAM_ICP, "firmware memory region not found"); + return -ENODEV; + } + + rc = of_address_to_resource(node, 0, &res); + of_node_put(node); + if (rc) { + CAM_ERR(CAM_ICP, "missing firmware resource address rc=%d", rc); + return rc; + } + + res_start = res.start; + res_size = (size_t)resource_size(&res); + + rc = firmware_request_nowarn(&firmware, firmware_name, &pdev->dev); + if (rc) { + CAM_ERR(CAM_ICP, + "error requesting %s firmware rc=%d", + firmware_name, rc); + return rc; + } + + /* Make sure carveout and binary sizes are compatible */ + fw_size = qcom_mdt_get_size(firmware); + if (fw_size < 0 || res_size < (size_t)fw_size) { + CAM_ERR(CAM_ICP, + "carveout[sz=%zu] not big enough for firmware[sz=%zd]", + res_size, fw_size); + rc = -EINVAL; + goto out; + } + + vaddr = ioremap_wc(res_start, res_size); + if (!vaddr) { + CAM_ERR(CAM_ICP, "unable to map firmware carveout"); + rc = -ENOMEM; + goto out; + } + + rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name, fw_pas_id, + vaddr, res_start, res_size, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "failed to load firmware rc=%d", rc); + goto out; + } + +out: + if (vaddr) + iounmap(vaddr); + + release_firmware(firmware); + return rc; +} +#endif + +static int cam_icp_v2_boot(struct cam_hw_info *icp_v2_info, + struct cam_icp_boot_args *args, uint32_t arg_size) +{ + int rc = 0; + struct cam_icp_v2_core_info *core_info = NULL; + struct cam_icp_soc_info *soc_priv; + + if (!IS_REACHABLE(CONFIG_QCOM_MDT_LOADER)) + return -EOPNOTSUPP; + + if (!icp_v2_info || !args) { + CAM_ERR(CAM_ICP, + "invalid args: icp_v2_info=%pK args=%pK", + icp_v2_info, args); + return -EINVAL; + } + + if (arg_size != sizeof(struct cam_icp_boot_args)) { + CAM_ERR(CAM_ICP, "invalid boot args size"); + return -EINVAL; + } + + core_info = (struct cam_icp_v2_core_info *)icp_v2_info->core_info; + soc_priv = (struct cam_icp_soc_info *)icp_v2_info->soc_info.soc_private; + + rc = prepare_boot(icp_v2_info, args); + if (rc) { + CAM_ERR(CAM_ICP, "prepare boot failed"); + return rc; + } + +#if IS_REACHABLE(CONFIG_QCOM_MDT_LOADER) + rc = __load_firmware(icp_v2_info->soc_info.pdev, soc_priv->fw_pas_id); + if (rc) { + CAM_ERR(CAM_ICP, "firmware loading failed rc=%d", rc); + goto err; + } +#endif + + rc = qcom_scm_pas_auth_and_reset(soc_priv->fw_pas_id); + if (rc) { + CAM_ERR(CAM_ICP, "auth and reset failed rc=%d", rc); + goto err; + } + + core_info->use_sec_pil = true; + return 0; +err: + prepare_shutdown(icp_v2_info); + return rc; +} + +static int cam_icp_v2_shutdown(struct cam_hw_info *icp_v2_info, bool *args, + uint32_t arg_size) +{ + int rc = 0; + bool fw_dump = false; + struct cam_icp_v2_core_info *core_info = + (struct cam_icp_v2_core_info *)icp_v2_info->core_info; + struct cam_icp_soc_info *soc_priv = + (struct cam_icp_soc_info *)icp_v2_info->soc_info.soc_private; + + rc = prepare_shutdown(icp_v2_info); + if (rc) { + CAM_ERR(CAM_ICP, "prepare shutdown failed"); + return rc; + } + + if (arg_size != sizeof(bool)) + CAM_ERR(CAM_ICP, "Invalid args size %u", arg_size); + else + fw_dump = *args; + + if (core_info->use_sec_pil) { + rc = qcom_scm_pas_shutdown(soc_priv->fw_pas_id); + if (fw_dump) + cam_icp_v2_fw_coredump(icp_v2_info->soc_info.pdev); + } else { + int32_t sys_base_idx = core_info->reg_base_idx[ICP_V2_SYS_BASE]; + void __iomem *base; + + if (sys_base_idx < 0) { + CAM_ERR(CAM_ICP, "No reg base idx found for ICP_SYS: %d", + sys_base_idx); + return -EINVAL; + } + + base = icp_v2_info->soc_info.reg_map[sys_base_idx].mem_base; + cam_io_w_mb(0x0, base + ICP_V2_SYS_CONTROL); + } + + core_info->use_sec_pil = false; + return rc; +} + +static void __cam_icp_v2_core_reg_dump( + struct cam_hw_info *icp_v2_info, uint32_t dump_type) +{ + int i; + int32_t csr_base_idx; + size_t len = 0; + char log_info[512]; + struct cam_icp_v2_core_info *core_info = icp_v2_info->core_info; + void __iomem *irq_base, *csr_base, *csr_gp_base; + + csr_base_idx = core_info->reg_base_idx[ICP_V2_CSR_BASE]; + csr_base = icp_v2_info->soc_info.reg_map[csr_base_idx].mem_base; + csr_gp_base = csr_base + ICP_V2_GEN_PURPOSE_REG_OFFSET; + irq_base = icp_v2_info->soc_info.reg_map[core_info->irq_regbase_idx].mem_base; + + if (dump_type & CAM_ICP_DUMP_STATUS_REGISTERS) + CAM_INFO(CAM_ICP, "ICP PFault status:0x%x", + cam_io_r_mb(irq_base + core_info->hw_info->pfault_info)); + + if (dump_type & CAM_ICP_DUMP_CSR_REGISTERS) { + for (i = 0; i < ICP_V2_CSR_GP_REG_COUNT;) { + CAM_INFO_BUF(CAM_ICP, log_info, 512, &len, + "GP_%d: 0x%x GP_%d: 0x%x GP_%d: 0x%x GP_%d: 0x%x", + i, cam_io_r_mb(csr_gp_base + (i << 2)), + (i + 1), cam_io_r_mb(csr_gp_base + ((i + 1) << 2)), + (i + 2), cam_io_r_mb(csr_gp_base + ((i + 2) << 2)), + (i + 3), cam_io_r_mb(csr_gp_base + ((i + 3) << 2))); + i += 4; + } + + CAM_INFO(CAM_ICP, "ICP CSR GP registers - %s", log_info); + } +} + +/* API controls collapse/resume of ICP */ +static int cam_icp_v2_core_control(struct cam_hw_info *icp_v2_info, + uint32_t state) +{ + int rc = 0; + struct cam_icp_v2_core_info *core_info = + (struct cam_icp_v2_core_info *)icp_v2_info->core_info; + struct cam_icp_soc_info *soc_priv = + (struct cam_icp_soc_info *)icp_v2_info->soc_info.soc_private; + + if (core_info->use_sec_pil) { + rc = qcom_scm_set_remote_state(state, soc_priv->fw_pas_id); + if (rc) { + CAM_ERR(CAM_ICP, + "remote state set to %s failed rc=%d", + (state == TZ_STATE_RESUME ? "resume" : "suspend"), rc); + __cam_icp_v2_core_reg_dump(icp_v2_info, CAM_ICP_DUMP_STATUS_REGISTERS); + } + } else { + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ICP, "PRESIL-ICP-B2B-HFI-INIT no PC no resume return 0"); + return 0; + } + + if (state == TZ_STATE_RESUME) { + rc = __cam_icp_v2_power_resume(icp_v2_info); + if (rc) + CAM_ERR(CAM_ICP, "ICP resume failed rc=%d", rc); + } else { + rc = __cam_icp_v2_power_collapse(icp_v2_info); + if (rc) + CAM_ERR(CAM_ICP, "ICP collapse failed rc=%d", rc); + } + } + + return rc; +} + +static inline int cam_icp_v2_download_fw(struct cam_hw_info *icp_v2_info, + struct cam_icp_boot_args *args, uint32_t arg_size) +{ + int rc; + + CAM_INFO(CAM_ICP, "Loading Secure PIL : %s", CAM_BOOL_TO_YESNO(args->use_sec_pil)); + + if (args->use_sec_pil) + rc = cam_icp_v2_boot( + icp_v2_info, args, arg_size); + else + rc = cam_icp_v2_non_sec_boot( + icp_v2_info, args, arg_size); + + return rc; +} + +static int __cam_icp_v2_update_clk_rate(struct cam_hw_info *icp_v2_info, + void *args, uint32_t arg_size) +{ + int32_t clk_level = 0, rc; + struct cam_ahb_vote ahb_vote; + struct cam_icp_v2_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + + if (!args) { + CAM_ERR(CAM_ICP, "Invalid args is NULL"); + return -EINVAL; + } + + soc_info = &icp_v2_info->soc_info; + core_info = icp_v2_info->core_info; + if (!core_info || !soc_info) { + CAM_ERR(CAM_ICP, "Invalid args"); + return -EINVAL; + } + + if (arg_size != sizeof(int32_t)) { + CAM_ERR(CAM_ICP, "Invalid icp update clk args"); + return -EINVAL; + } + + clk_level = *((int32_t *)args); + CAM_DBG(CAM_ICP, + "Update ICP clock to level [%d]", clk_level); + rc = cam_icp_soc_update_clk_rate(soc_info, clk_level, core_info->hfi_handle); + if (rc) + CAM_WARN(CAM_ICP, + "Failed to update clk to level: %d rc: %d", + clk_level, rc); + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_level; + rc = cam_cpas_update_ahb_vote( + core_info->cpas_handle, &ahb_vote); + if (rc) + CAM_WARN(CAM_ICP, + "Failed to update ahb vote rc: %d", rc); + + return rc; +} + +static int __cam_icp_v2_fw_mini_dump(struct cam_icp_v2_core_info *core_info, + struct cam_icp_hw_dump_args *args) +{ + if (!core_info) { + CAM_ERR(CAM_ICP, "Invalid param %pK", core_info); + return -EINVAL; + } + + return cam_icp_proc_mini_dump(args, core_info->fw_params.fw_kva_addr, + core_info->fw_params.fw_buf_len); +} + +static int cam_icp_v2_send_fw_init(struct cam_icp_v2_core_info *core_info) +{ + int rc; + + if (!core_info) { + CAM_ERR(CAM_ICP, "Core info is NULL"); + return -EINVAL; + } + + rc = hfi_send_system_cmd(core_info->hfi_handle, HFI_CMD_SYS_INIT, 0, 0); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to send sys init command for hfi handle: %d", + core_info->hfi_handle); + return rc; + } + + return 0; +} + +static int cam_icp_v2_pc_prep(struct cam_icp_v2_core_info *core_info) +{ + int rc; + + if (!core_info) { + CAM_ERR(CAM_ICP, "Invalid core info is NULL"); + return -EINVAL; + } + + rc = hfi_send_system_cmd(core_info->hfi_handle, HFI_CMD_SYS_PC_PREP, 0, 0); + if (rc) { + CAM_ERR(CAM_ICP, "Fail to send PC collapse command for hfi handle: %d", + core_info->hfi_handle); + return rc; + } + + return 0; +} + +int cam_icp_v2_set_hfi_handle(struct cam_icp_v2_core_info *core_info, + void *args, uint32_t arg_size) +{ + if (!core_info || !args) { + CAM_ERR(CAM_ICP, "Core info is %s and args is %s", + CAM_IS_NULL_TO_STR(core_info), CAM_IS_NULL_TO_STR(args)); + return -EINVAL; + } + + if (arg_size != sizeof(int)) { + CAM_ERR(CAM_ICP, "Invalid set hfi handle command arg size:%u", + arg_size); + return -EINVAL; + } + + core_info->hfi_handle = *((int *)args); + + return 0; +} + +int cam_icp_v2_process_cmd(void *priv, uint32_t cmd_type, + void *args, uint32_t arg_size) +{ + struct cam_hw_info *icp_v2_info = priv; + int rc = -EINVAL; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "ICP device info cannot be NULL"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ICP_CMD_SET_HFI_HANDLE: + rc = cam_icp_v2_set_hfi_handle(icp_v2_info->core_info, args, arg_size); + break; + case CAM_ICP_CMD_PROC_SHUTDOWN: + rc = cam_icp_v2_shutdown(icp_v2_info, args, arg_size); + break; + case CAM_ICP_CMD_PROC_BOOT: + rc = cam_icp_v2_download_fw(icp_v2_info, args, arg_size); + break; + case CAM_ICP_CMD_POWER_COLLAPSE: + rc = cam_icp_v2_core_control(icp_v2_info, TZ_STATE_SUSPEND); + break; + case CAM_ICP_CMD_POWER_RESUME: + rc = cam_icp_v2_core_control(icp_v2_info, TZ_STATE_RESUME); + break; + case CAM_ICP_CMD_VOTE_CPAS: + rc = cam_icp_v2_cpas_vote(icp_v2_info->core_info, args); + break; + case CAM_ICP_CMD_CPAS_START: + rc = __icp_v2_cpas_start(icp_v2_info->core_info, args); + break; + case CAM_ICP_CMD_CPAS_STOP: + rc = cam_icp_v2_cpas_stop(icp_v2_info->core_info); + break; + case CAM_ICP_CMD_UBWC_CFG: + rc = cam_icp_v2_ubwc_configure(&icp_v2_info->soc_info, + icp_v2_info->core_info, args, arg_size); + break; + case CAM_ICP_SEND_INIT: + rc = cam_icp_v2_send_fw_init(icp_v2_info->core_info); + break; + case CAM_ICP_CMD_PC_PREP: + rc = cam_icp_v2_pc_prep(icp_v2_info->core_info); + break; + case CAM_ICP_CMD_CLK_UPDATE: { + rc = __cam_icp_v2_update_clk_rate(icp_v2_info, args, arg_size); + break; + } + case CAM_ICP_CMD_HW_DUMP: + /* Not supported for ICP_V2 */ + rc = 0; + break; + case CAM_ICP_CMD_HW_MINI_DUMP: { + rc = __cam_icp_v2_fw_mini_dump(icp_v2_info->core_info, args); + break; + } + case CAM_ICP_CMD_HW_REG_DUMP: { + uint32_t dump_type; + + if (!args) { + CAM_ERR(CAM_ICP, "Invalid args"); + break; + } + + dump_type = *(uint32_t *) args; + __cam_icp_v2_core_reg_dump(icp_v2_info, dump_type); + rc = 0; + break; + } + case CAM_ICP_CMD_PREP_BOOT: + rc = cam_icp_v2_prepare_boot(icp_v2_info, args, arg_size); + break; + case CAM_ICP_CMD_PREP_SHUTDOWN: + rc = cam_icp_v2_prepare_shutdown(icp_v2_info); + break; + default: + CAM_ERR(CAM_ICP, "invalid command type=%u", cmd_type); + break; + } + + return rc; +} + +irqreturn_t cam_icp_v2_handle_irq(int irq_num, void *data) +{ + struct cam_hw_info *icp_v2_info = data; + struct cam_icp_v2_core_info *core_info = NULL; + bool recover = false; + uint32_t status = 0; + int32_t wd0_base_idx; + void __iomem *irq_base; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return IRQ_NONE; + } + + core_info = icp_v2_info->core_info; + irq_base = icp_v2_info->soc_info.reg_map[core_info->irq_regbase_idx].mem_base; + + status = cam_io_r_mb(irq_base + core_info->hw_info->ob_irq_status); + + CAM_DBG(CAM_ICP, "irq icp[%d] received status 0x%x", icp_v2_info->soc_info.index, status); + cam_io_w_mb(status, irq_base + core_info->hw_info->ob_irq_clear); + cam_io_w_mb(ICP_V2_IRQ_CLEAR_CMD, irq_base + core_info->hw_info->ob_irq_cmd); + + if (core_info->is_irq_test) { + CAM_INFO(CAM_ICP, "ICP_V2 IRQ verified (status=0x%x)", status); + core_info->is_irq_test = false; + complete(&icp_v2_info->hw_complete); + return IRQ_HANDLED; + } + + /* WD clear sequence - SW listens only to WD0 */ + if (status & ICP_V2_WDT_BITE_WS0) { + wd0_base_idx = core_info->reg_base_idx[ICP_V2_WD0_BASE]; + + cam_io_w_mb(0x0, + icp_v2_info->soc_info.reg_map[wd0_base_idx].mem_base + + ICP_V2_WD_CTRL); + cam_io_w_mb(0x1, + icp_v2_info->soc_info.reg_map[wd0_base_idx].mem_base + + ICP_V2_WD_INTCLR); + CAM_ERR_RATE_LIMIT(CAM_ICP, "Fatal: Watchdog Bite from ICP"); + recover = true; + } + + spin_lock(&icp_v2_info->hw_lock); + if (core_info->irq_cb.cb) + core_info->irq_cb.cb(core_info->irq_cb.data, + recover); + spin_unlock(&icp_v2_info->hw_lock); + + return IRQ_HANDLED; +} + +void cam_icp_v2_irq_raise(void *priv) +{ + struct cam_hw_info *icp_v2_info = priv; + struct cam_icp_v2_core_info *core_info = NULL; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "Invalid ICP device info"); + return; + } + + core_info = icp_v2_info->core_info; + cam_io_w_mb(ICP_V2_HOST2ICPINT, + icp_v2_info->soc_info.reg_map[core_info->irq_regbase_idx].mem_base + + core_info->hw_info->host2icpint); +} + +void cam_icp_v2_irq_enable(void *priv) +{ + struct cam_hw_info *icp_v2_info = priv; + struct cam_icp_v2_core_info *core_info = NULL; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "Invalid ICP device info"); + return; + } + + core_info = icp_v2_info->core_info; + cam_io_w_mb(ICP_V2_WDT_BITE_WS0 | ICP_V2_ICP2HOSTINT, + icp_v2_info->soc_info.reg_map[core_info->irq_regbase_idx].mem_base + + core_info->hw_info->ob_irq_mask); +} + +int cam_icp_v2_test_irq_line(void *priv) +{ + struct cam_hw_info *icp_v2_info = priv; + struct cam_icp_v2_core_info *core_info = NULL; + void __iomem *irq_membase; + unsigned long rem_jiffies; + bool send_freq_info = false; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid ICP device info"); + return -EINVAL; + } + + core_info = icp_v2_info->core_info; + irq_membase = icp_v2_info->soc_info.reg_map[core_info->irq_regbase_idx].mem_base; + + reinit_completion(&icp_v2_info->hw_complete); + core_info->is_irq_test = true; + + cam_icp_v2_hw_init(priv, &send_freq_info, sizeof(send_freq_info)); + + cam_io_w_mb(ICP_V2_WDT_BARK_WS0, irq_membase + core_info->hw_info->ob_irq_mask); + cam_io_w_mb(ICP_V2_WDT_BARK_WS0, irq_membase + core_info->hw_info->ob_irq_set); + cam_io_w_mb(ICP_V2_IRQ_SET_CMD, irq_membase + core_info->hw_info->ob_irq_cmd); + + rem_jiffies = cam_common_wait_for_completion_timeout(&icp_v2_info->hw_complete, + msecs_to_jiffies(ICP_V2_IRQ_TEST_TIMEOUT)); + if (!rem_jiffies) + CAM_ERR(CAM_ICP, "ICP IRQ verification timed out"); + + cam_io_w_mb(0, irq_membase + core_info->hw_info->ob_irq_mask); + cam_icp_v2_hw_deinit(priv, &send_freq_info, sizeof(send_freq_info)); + + core_info->is_irq_test = false; + + return 0; +} + +void __iomem *cam_icp_v2_iface_addr(void *priv) +{ + int32_t csr_base_idx; + struct cam_hw_info *icp_v2_info = priv; + struct cam_icp_v2_core_info *core_info = NULL; + void __iomem *base; + + if (!icp_v2_info) { + CAM_ERR(CAM_ICP, "invalid icp device info"); + return ERR_PTR(-EINVAL); + } + + core_info = icp_v2_info->core_info; + csr_base_idx = core_info->reg_base_idx[ICP_V2_CSR_BASE]; + base = icp_v2_info->soc_info.reg_map[csr_base_idx].mem_base; + + return base + ICP_V2_GEN_PURPOSE_REG_OFFSET; +} + +void cam_icp_v2_populate_hfi_ops(const struct hfi_ops **hfi_proc_ops) +{ + if (!hfi_proc_ops) { + CAM_ERR(CAM_ICP, "Invalid hfi ops argument"); + return; + } + + *hfi_proc_ops = &hfi_icp_v2_ops; +} + +static int cam_icp_v2_setup_register_base_indexes( + struct cam_hw_soc_info *soc_info, uint32_t hw_version, + int32_t regbase_index[], int32_t num_reg_map) +{ + int rc; + uint32_t index; + + if (num_reg_map > ICP_V2_BASE_MAX) { + CAM_ERR(CAM_ICP, "Number of reg maps: %d exceeds max: %d", + num_reg_map, ICP_V2_BASE_MAX); + return -EINVAL; + } + + if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) { + CAM_ERR(CAM_ICP, "Invalid number of mem blocks: %d", + soc_info->num_mem_block); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "icp_csr", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[ICP_V2_CSR_BASE] = index; + } else { + CAM_ERR(CAM_ICP, + "Failed to get index for icp_csr, rc: %d index: %u num_reg_map: %u", + rc, index, num_reg_map); + return -EINVAL; + } + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "icp_wd0", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[ICP_V2_WD0_BASE] = index; + } else { + CAM_ERR(CAM_ICP, + "Failed to get index for icp_wd0, rc: %d index: %u num_reg_map: %u", + rc, index, num_reg_map); + return -EINVAL; + } + + if (hw_version == CAM_ICP_V2_VERSION) { + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "icp_cirq", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[ICP_V2_CIRQ_BASE] = index; + } else { + CAM_ERR(CAM_ICP, + "Failed to get index for icp_cirq, rc: %d index: %u num_reg_map: %u", + rc, index, num_reg_map); + return -EINVAL; + } + } else { + /* Optional for other versions */ + regbase_index[ICP_V2_CIRQ_BASE] = -1; + } + + /* optional - ICP SYS map */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "icp_sys", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[ICP_V2_SYS_BASE] = index; + } else { + CAM_DBG(CAM_ICP, + "Failed to get index for icp_sys, rc: %d index: %u num_reg_map: %u", + rc, index, num_reg_map); + regbase_index[ICP_V2_SYS_BASE] = -1; + } + + /* optional - for non secure FW loading */ + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "icp_dom_mask", &index); + if ((rc == 0) && (index < num_reg_map)) { + regbase_index[ICP_V2_DOM_MASK_BASE] = index; + } else { + CAM_DBG(CAM_ICP, + "Failed to get index for icp_dom_mask, rc: %d index: %u num_reg_map: %u", + rc, index, num_reg_map); + regbase_index[ICP_V2_DOM_MASK_BASE] = -1; + } + + return 0; +} + +int cam_icp_v2_core_init( + struct cam_hw_soc_info *soc_info, + struct cam_icp_v2_core_info *core_info) +{ + int rc = 0; + struct cam_icp_soc_info *soc_priv; + + soc_priv = (struct cam_icp_soc_info *)soc_info->soc_private; + + rc = cam_icp_v2_setup_register_base_indexes(soc_info, + soc_priv->hw_version, core_info->reg_base_idx, ICP_V2_BASE_MAX); + if (rc) + return rc; + + /* Associate OB/HOST2ICP IRQ reg base */ + switch (soc_priv->hw_version) { + case CAM_ICP_V2_1_VERSION: + core_info->irq_regbase_idx = + core_info->reg_base_idx[ICP_V2_CSR_BASE]; + break; + case CAM_ICP_V2_VERSION: + core_info->irq_regbase_idx = + core_info->reg_base_idx[ICP_V2_CIRQ_BASE]; + break; + default: + CAM_ERR(CAM_ICP, "Unsupported ICP HW version: %u", + soc_priv->hw_version); + rc = -EINVAL; + } + + core_info->hfi_handle = HFI_HANDLE_INIT_VALUE; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.h new file mode 100644 index 0000000000..d824ee6f80 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_core.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_V2_CORE_H_ +#define _CAM_ICP_V2_CORE_H_ + +#include "cam_hw_intf.h" +#include "cam_icp_hw_intf.h" +#include "hfi_intf.h" +#include "cam_icp_v2_reg.h" + +/* Domain ID masks */ +#define ICP_V2_DOMAIN_MASK_CFG_0 0x00FF00FF +#define ICP_V2_DOMAIN_MASK_CFG_1 0xFF00FF00 + +enum cam_icp_v2_reg_base { + ICP_V2_CSR_BASE, + ICP_V2_CIRQ_BASE, + ICP_V2_WD0_BASE, + ICP_V2_SYS_BASE, + ICP_V2_DOM_MASK_BASE, + ICP_V2_BASE_MAX, +}; + +struct cam_icp_v2_core_info { + struct cam_icp_irq_cb irq_cb; + struct cam_icp_v2_hw_info *hw_info; + int32_t reg_base_idx[ICP_V2_BASE_MAX]; + uint32_t cpas_handle; + uint32_t power_on_cnt; + int hfi_handle; + enum cam_icp_v2_reg_base irq_regbase_idx; + struct { + const struct firmware *fw_elf; + void *fw; + uint32_t fw_buf; + uintptr_t fw_kva_addr; + uint64_t fw_buf_len; + } fw_params; + bool cpas_start; + bool use_sec_pil; + bool is_irq_test; +}; + +int cam_icp_v2_hw_init(void *priv, void *args, uint32_t arg_size); +int cam_icp_v2_hw_deinit(void *priv, void *args, uint32_t arg_size); +int cam_icp_v2_process_cmd(void *priv, uint32_t cmd_type, + void *args, uint32_t arg_size); +int cam_icp_v2_test_irq_line(void *priv); + +int cam_icp_v2_cpas_register(struct cam_hw_intf *icp_v2_intf); +int cam_icp_v2_cpas_unregister(struct cam_hw_intf *icp_v2_intf); + +irqreturn_t cam_icp_v2_handle_irq(int irq_num, void *data); + +void cam_icp_v2_irq_raise(void *priv); +void cam_icp_v2_irq_enable(void *priv); +void __iomem *cam_icp_v2_iface_addr(void *priv); +void cam_icp_v2_populate_hfi_ops(const struct hfi_ops **hfi_proc_ops); + +int cam_icp_v2_core_init(struct cam_hw_soc_info *soc_info, + struct cam_icp_v2_core_info *core_info); + +#endif /* _CAM_ICP_V2_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.c new file mode 100644 index 0000000000..1466bfe252 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "camera_main.h" +#include "cam_debug_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_v2_core.h" +#include "cam_icp_soc_common.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static int max_icp_v2_hw_idx = -1; +static struct cam_icp_hw_intf_data cam_icp_hw_list[CAM_ICP_HW_NUM_MAX]; + +struct cam_icp_v2_hw_info cam_icp_v2_hw_info[] = { + { + .ob_irq_status = 0xC, + .ob_irq_mask = 0x0, + .ob_irq_clear = 0x4, + .ob_irq_set = 0x8, + .ob_irq_cmd = 0x10, + .host2icpint = 0x124, + .pfault_info = 0x128, + }, +}; + +struct cam_icp_v2_hw_info cam_icp_v2_1_hw_info[] = { + { + .ob_irq_status = 0x20C, + .ob_irq_mask = 0x200, + .ob_irq_clear = 0x204, + .ob_irq_set = 0x208, + .ob_irq_cmd = 0x210, + .host2icpint = 0x300, + .pfault_info = 0x400, + }, +}; + +uint32_t cam_icp_v2_get_device_num(void) +{ + return max_icp_v2_hw_idx + 1; +} + +static int cam_icp_v2_soc_info_init(struct cam_hw_soc_info *soc_info, + struct platform_device *pdev) +{ + struct cam_icp_soc_info *icp_soc_info = NULL; + + icp_soc_info = CAM_MEM_ZALLOC(sizeof(*icp_soc_info), GFP_KERNEL); + if (!icp_soc_info) + return -ENOMEM; + + soc_info->pdev = pdev; + soc_info->dev = &pdev->dev; + soc_info->dev_name = pdev->name; + soc_info->soc_private = icp_soc_info; + + icp_soc_info->dev_type = CAM_ICP_HW_ICP_V2; + + return 0; +} + +static inline void cam_icp_v2_soc_info_deinit(struct cam_hw_soc_info *soc_info) +{ + struct cam_icp_soc_info *icp_soc_info = soc_info->soc_private; + + if (icp_soc_info->pid) { + CAM_MEM_FREE(icp_soc_info->pid); + icp_soc_info->pid = NULL; + } + + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; +} + +static int cam_icp_v2_component_bind(struct device *dev, + struct device *mdev, void *data) +{ + int rc = 0, i; + struct cam_hw_intf *icp_v2_intf = NULL; + struct cam_hw_info *icp_v2_info = NULL; + struct cam_icp_v2_core_info *core_info = NULL; + const struct of_device_id *match_dev = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct cam_icp_soc_info *icp_soc_info; + + CAM_GET_TIMESTAMP(ts_start); + match_dev = of_match_device( + pdev->dev.driver->of_match_table, &pdev->dev); + if (!match_dev) { + CAM_DBG(CAM_ICP, "No ICP v2 hardware info"); + return -EINVAL; + } + + icp_v2_intf = CAM_MEM_ZALLOC(sizeof(*icp_v2_intf), GFP_KERNEL); + if (!icp_v2_intf) + return -ENOMEM; + + icp_v2_info = CAM_MEM_ZALLOC(sizeof(*icp_v2_info), GFP_KERNEL); + if (!icp_v2_info) { + rc = -ENOMEM; + goto free_hw_intf; + } + + core_info = CAM_MEM_ZALLOC(sizeof(*core_info), GFP_KERNEL); + if (!core_info) { + rc = -ENOMEM; + goto free_hw_info; + } + + core_info->hw_info = (struct cam_icp_v2_hw_info *)match_dev->data; + icp_v2_info->core_info = core_info; + + rc = cam_icp_v2_soc_info_init(&icp_v2_info->soc_info, pdev); + if (rc) + goto free_core_info; + + mutex_init(&icp_v2_info->hw_mutex); + spin_lock_init(&icp_v2_info->hw_lock); + init_completion(&icp_v2_info->hw_complete); + + rc = cam_icp_soc_resources_init(&icp_v2_info->soc_info, + cam_icp_v2_handle_irq, icp_v2_info); + if (rc) { + CAM_ERR(CAM_ICP, "soc resources init failed rc=%d", rc); + goto free_soc_info; + } + + rc = cam_icp_v2_core_init(&icp_v2_info->soc_info, core_info); + if (rc) + goto free_soc_info; + + icp_v2_intf->hw_priv = icp_v2_info; + icp_v2_intf->hw_type = CAM_ICP_HW_ICP_V2; + icp_v2_intf->hw_idx = icp_v2_info->soc_info.index; + icp_v2_intf->hw_ops.init = cam_icp_v2_hw_init; + icp_v2_intf->hw_ops.deinit = cam_icp_v2_hw_deinit; + icp_v2_intf->hw_ops.process_cmd = cam_icp_v2_process_cmd; + icp_v2_intf->hw_ops.test_irq_line = cam_icp_v2_test_irq_line; + + icp_v2_info->soc_info.hw_id = CAM_HW_ID_ICP + icp_v2_info->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&icp_v2_info->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, " hw instance populate failed: %d", rc); + goto res_deinit; + } + + rc = cam_icp_v2_cpas_register(icp_v2_intf); + if (rc) { + CAM_ERR(CAM_ICP, "cpas registration failed rc=%d", rc); + goto res_deinit; + } + + if ((int)(icp_v2_intf->hw_idx) > max_icp_v2_hw_idx) + max_icp_v2_hw_idx = icp_v2_intf->hw_idx; + + icp_soc_info = icp_v2_info->soc_info.soc_private; + cam_icp_hw_list[icp_v2_intf->hw_idx].pid = + CAM_MEM_ZALLOC_ARRAY(icp_soc_info->num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!cam_icp_hw_list[icp_v2_intf->hw_idx].pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for icp hw list"); + rc = -ENOMEM; + goto res_deinit; + } + + cam_icp_hw_list[icp_v2_intf->hw_idx].hw_intf = icp_v2_intf; + cam_icp_hw_list[icp_v2_intf->hw_idx].num_pid = icp_soc_info->num_pid; + for (i = 0; i < icp_soc_info->num_pid; i++) + cam_icp_hw_list[icp_v2_intf->hw_idx].pid[i] = icp_soc_info->pid[i]; + + platform_set_drvdata(pdev, &cam_icp_hw_list[icp_v2_intf->hw_idx]); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +res_deinit: + cam_icp_soc_resources_deinit(&icp_v2_info->soc_info); +free_soc_info: + cam_icp_v2_soc_info_deinit(&icp_v2_info->soc_info); +free_core_info: + CAM_MEM_FREE(core_info); +free_hw_info: + CAM_MEM_FREE(icp_v2_info); +free_hw_intf: + CAM_MEM_FREE(icp_v2_intf); + + return rc; +} + +static void cam_icp_v2_component_unbind(struct device *dev, + struct device *mdev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cam_hw_intf *icp_v2_intf = NULL; + struct cam_hw_info *icp_v2_info = NULL; + struct cam_icp_hw_intf_data *icp_intf_data; + + icp_intf_data = platform_get_drvdata(pdev); + if (!icp_intf_data) { + CAM_ERR(CAM_ICP, "Error No data in pdev"); + return; + } + + icp_v2_intf = icp_intf_data->hw_intf; + if (!icp_v2_intf) { + CAM_ERR(CAM_ICP, "Error No ICP dev interface"); + return; + } + + icp_v2_info = icp_v2_intf->hw_priv; + + cam_icp_v2_cpas_unregister(icp_v2_intf); + cam_icp_soc_resources_deinit(&icp_v2_info->soc_info); + if (icp_intf_data->pid) + CAM_MEM_FREE(icp_intf_data->pid); + cam_icp_v2_soc_info_deinit(&icp_v2_info->soc_info); + + max_icp_v2_hw_idx = -1; + + CAM_MEM_FREE(icp_v2_info->core_info); + CAM_MEM_FREE(icp_v2_info); + CAM_MEM_FREE(icp_v2_intf); +} + +static const struct component_ops cam_icp_v2_component_ops = { + .bind = cam_icp_v2_component_bind, + .unbind = cam_icp_v2_component_unbind, +}; + +static const struct of_device_id cam_icp_v2_match[] = { + { + .compatible = "qcom,cam-icp_v2", + .data = &cam_icp_v2_hw_info, + }, + { + .compatible = "qcom,cam-icp_v2_1", + .data = &cam_icp_v2_1_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_icp_v2_match); + +static int cam_icp_v2_driver_probe(struct platform_device *pdev) +{ + int rc; + + rc = component_add(&pdev->dev, &cam_icp_v2_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "cam-icp_v2 component add failed rc=%d", rc); + + return rc; +} + +static int cam_icp_v2_driver_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_icp_v2_component_ops); + + return 0; +} + +struct platform_driver cam_icp_v2_driver = { + .probe = cam_icp_v2_driver_probe, + .remove = cam_icp_v2_driver_remove, + .driver = { + .name = "cam-icp_v2", + .of_match_table = cam_icp_v2_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_icp_v2_init_module(void) +{ + return platform_driver_register(&cam_icp_v2_driver); +} + +void cam_icp_v2_exit_module(void) +{ + platform_driver_unregister(&cam_icp_v2_driver); +} + +MODULE_DESCRIPTION("Camera ICP_V2 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.h new file mode 100644 index 0000000000..adae7c04de --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_dev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_V2_DEV_H_ +#define _CAM_ICP_V2_DEV_H_ + +int cam_icp_v2_init_module(void); +void cam_icp_v2_exit_module(void); + +#endif /* _CAM_ICP_V2_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_reg.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_reg.h new file mode 100644 index 0000000000..e1024cc099 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/icp_proc/icp_v2_hw/cam_icp_v2_reg.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ICP_V2_REG_H_ +#define _CAM_ICP_V2_REG_H_ + +struct cam_icp_v2_hw_info { + uint32_t ob_irq_status; + uint32_t ob_irq_mask; + uint32_t ob_irq_clear; + uint32_t ob_irq_set; + uint32_t ob_irq_cmd; + uint32_t host2icpint; + uint32_t pfault_info; +}; + +/* ICP CSR info */ +#define ICP_V2_GEN_PURPOSE_REG_OFFSET 0x20 +#define ICP_V2_CSR_DBG_STATUS_REG_OFFSET 0xC0 +#define ICP_V2_CSR_DBG_CTRL_REG_OFFSET 0xC4 +#define ICP_V2_CSR_GP_REG_COUNT 0x18 + +/* ICP_SYS - Protected reg space defined in AC policy */ +#define ICP_V2_SYS_RESET 0x0 +#define ICP_V2_SYS_CONTROL 0x4 +#define ICP_V2_SYS_STATUS 0xC +#define ICP_V2_SYS_ACCESS 0x10 + +#define ICP_V2_STANDBYWFI (1 << 7) +#define ICP_V2_EN_CPU (1 << 9) +#define ICP_V2_FUNC_RESET (1 << 4) + +/* ICP WD reg space */ +#define ICP_V2_WD_CTRL 0x8 +#define ICP_V2_WD_INTCLR 0xC + +/* ICP DOM_MASK reg space */ +#define ICP_V2_DOM_0_CFG_OFFSET 0x0 +#define ICP_V2_DOM_1_CFG_OFFSET 0x20 + +/* These bitfields are shared by OB_MASK, OB_CLEAR, OB_STATUS */ +#define ICP_V2_WDT_BITE_WS1 (1 << 6) +#define ICP_V2_WDT_BARK_WS1 (1 << 5) +#define ICP_V2_WDT_BITE_WS0 (1 << 4) +#define ICP_V2_WDT_BARK_WS0 (1 << 3) +#define ICP_V2_ICP2HOSTINT (1 << 2) + +#define ICP_V2_IRQ_CLEAR_CMD (1 << 1) +#define ICP_V2_IRQ_SET_CMD (1 << 0) + +#define ICP_V2_HOST2ICPINT (1 << 0) + +#endif /* _CAM_ICP_V2_REG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h new file mode 100644 index 0000000000..877f9f99eb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_ICP_HW_MGR_INTF_H +#define CAM_ICP_HW_MGR_INTF_H + +#include +#include +#include +#include "cam_cpas_api.h" + + +#define CAM_ICP_SUBDEV_NAME_LEN 32 +#define CAM_ICP_SUBDEV_NAME "cam-icp" + +#define ICP_CLK_TURBO_HZ 600000000 +#define ICP_CLK_SVS_HZ 400000000 + +#define CAM_ICP_BW_BYTES_VOTE 40000000 + +#define CAM_ICP_CTX_MAX 54 + +#define CPAS_IPE1_BIT 0x2000 + +#define CAM_IPE_DEFAULT_AXI_PATH CAM_AXI_PATH_DATA_IPE_WR_VID +#define CAM_IPE_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_WRITE +#define CAM_BPS_DEFAULT_AXI_PATH CAM_AXI_PATH_DATA_ALL +#define CAM_BPS_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_WRITE +#define CAM_OFE_DEFAULT_AXI_PATH CAM_AXI_PATH_DATA_OFE_WR_VID +#define CAM_OFE_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_WRITE +#define CAM_ICP_DEFAULT_AXI_PATH CAM_AXI_PATH_DATA_ALL +#define CAM_ICP_DEFAULT_AXI_TRANSAC CAM_AXI_TRANSACTION_READ + +#define CAM_ICP_DUMP_TAG_MAX_LEN 128 +#define CAM_ICP_DUMP_NUM_WORDS_CLK 2 +#define CAM_ICP_DUMP_NUM_WORDS_MGR 7 +#define CAM_ICP_DUMP_NUM_WORDS_REQ 5 + +enum cam_icp_subdev_id { + CAM_ICP0_SUBDEV, + CAM_ICP1_SUBDEV, + CAM_ICP_SUBDEV_MAX +}; + +enum cam_icp_hw_event_type { + CAM_ICP_EVT_ID_BUF_DONE, + CAM_ICP_EVT_ID_ERROR, + CAM_ICP_EVT_ID_INJECT_EVENT, +}; + +enum cam_icp_hw_error_type { + CAM_ICP_HW_ERROR_NO_MEM, + CAM_ICP_HW_ERROR_SYSTEM_FAILURE, +}; + +typedef void(*cam_icp_mini_dump_cb)(void *priv, + void *args); + +/** + * struct cam_icp_cpas_vote + * @ahb_vote: AHB vote info + * @axi_vote: AXI vote info + * @ahb_vote_valid: Flag for ahb vote data + * @axi_vote_valid: flag for axi vote data + */ +struct cam_icp_cpas_vote { + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote; + uint32_t ahb_vote_valid; + uint32_t axi_vote_valid; +}; + +/** + * struct cam_icp_hw_dump_args + * @cpu_addr: kernel vaddr + * @buf_len: buffer length + * @offset: offset + */ +struct cam_icp_hw_dump_args { + uintptr_t cpu_addr; + size_t buf_len; + size_t offset; +}; + +/** + * struct cam_icp_dump_header + * @tag: tag of the packet + * @size: size of data in packet + * @word_size: size of each word in packet + */ +struct cam_icp_dump_header { + uint8_t tag[CAM_ICP_DUMP_TAG_MAX_LEN]; + uint64_t size; + int32_t word_size; +}; + +/** + * struct cam_icp_hw_buf_done_evt_data + * @buf_done_data: buf done payload + * @evt_id: event id (success/cancel/error) + */ +struct cam_icp_hw_buf_done_evt_data { + void *buf_done_data; + uint32_t evt_id; +}; + +/** + * struct cam_icp_hw_error_evt_data + * @req_id: request id + * @err_type: type of fatal error + */ +struct cam_icp_hw_error_evt_data { + uint64_t req_id; + enum cam_icp_hw_error_type err_type; +}; + +int cam_icp_hw_mgr_init(struct device_node *of_node, + uint64_t *hw_mgr_hdl, int *iommu_hdl, + cam_icp_mini_dump_cb mini_dump_cb, int device_idx); + +void cam_icp_hw_mgr_deinit(int device_idx); + +#endif /* CAM_ICP_HW_MGR_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.c new file mode 100644 index 0000000000..832e311c95 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ipe_core.h" +#include "ipe_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_ipe_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "hfi_reg.h" +#include "cam_common_util.h" + +static int cam_ipe_cpas_vote(struct cam_ipe_device_core_info *core_info, + struct cam_icp_cpas_vote *cpas_vote) +{ + int rc = 0; + + if (cpas_vote->ahb_vote_valid) + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, + &cpas_vote->ahb_vote); + if (cpas_vote->axi_vote_valid) + rc = cam_cpas_update_axi_vote(core_info->cpas_handle, + &cpas_vote->axi_vote); + + if (rc) + CAM_ERR(CAM_PERF, "cpas vote is failed: %d", rc); + + return rc; +} + +int cam_ipe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ipe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct cam_icp_cpas_vote cpas_vote; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ipe_dev->soc_info; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&ipe_dev->hw_lock, flags); + if (ipe_dev->hw_state == CAM_HW_STATE_POWER_UP) { + core_info->power_on_cnt++; + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_IPE + core_info->ipe_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "IPE hw id %x acquire ownership failed", + CAM_HW_ID_IPE + core_info->ipe_hw_info->hw_idx); + return rc; + } + + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_IPE_DEFAULT_AXI_PATH; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_IPE_DEFAULT_AXI_TRANSAC; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "cpas start failed: %d", rc); + goto error; + } + core_info->cpas_start = true; + + rc = cam_ipe_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc); + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } else { + core_info->clk_enable = true; + } + + spin_lock_irqsave(&ipe_dev->hw_lock, flags); + ipe_dev->hw_state = CAM_HW_STATE_POWER_UP; + core_info->power_on_cnt++; + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "IPE%u powered on (refcnt: %u)", + soc_info->index, core_info->power_on_cnt); + +error: + return rc; +} + +int cam_ipe_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ipe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ipe_dev->soc_info; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&ipe_dev->hw_lock, flags); + if (ipe_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + return 0; + } + + core_info->power_on_cnt--; + if (core_info->power_on_cnt) { + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "IPE%u power on reference still held %u", + soc_info->index, core_info->power_on_cnt); + return 0; + } + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + + rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable); + if (rc) + CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc); + core_info->clk_enable = false; + + if (core_info->cpas_start) { + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop is failed"); + else + core_info->cpas_start = false; + } + + spin_lock_irqsave(&ipe_dev->hw_lock, flags); + ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&ipe_dev->hw_lock, flags); + + CAM_DBG(CAM_ICP, "IPE%u powered off (refcnt: %u)", + soc_info->index, core_info->power_on_cnt); + + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_IPE + core_info->ipe_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "IPE hw id %x release ownership failed", + CAM_HW_ID_IPE + core_info->ipe_hw_info->hw_idx); + return rc; + } + + return rc; +} + +static int cam_ipe_handle_pc(struct cam_hw_info *ipe_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct cam_ipe_device_hw_info *hw_info = NULL; + int rc = 0; + int pwr_ctrl; + int pwr_status; + + soc_info = &ipe_dev->soc_info; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + hw_info = core_info->ipe_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS IPE client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (!(pwr_ctrl & IPE_COLLAPSE_MASK)) { + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x1); + + if (pwr_status >> IPE_PWR_ON_MASK) + CAM_WARN(CAM_PERF, "BPS: pwr_status(%x):pwr_ctrl(%x)", + pwr_status, pwr_ctrl); + + } + + rc = cam_ipe_get_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return 0; +} + +static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct cam_ipe_device_hw_info *hw_info = NULL; + int pwr_ctrl; + int pwr_status; + int rc = 0; + + soc_info = &ipe_dev->soc_info; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + hw_info = core_info->ipe_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS IPE client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (pwr_ctrl & IPE_COLLAPSE_MASK) { + CAM_DBG(CAM_PERF, "IPE pwr_ctrl set(%x)", pwr_ctrl); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0); + } + + rc = cam_ipe_transfer_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to transfer gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return rc; +} + +int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *ipe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct cam_ipe_device_hw_info *hw_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_ICP_DEV_CMD_MAX) { + CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + soc_info = &ipe_dev->soc_info; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + hw_info = core_info->ipe_hw_info; + + switch (cmd_type) { + case CAM_ICP_DEV_CMD_VOTE_CPAS: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) + return -EINVAL; + + cam_ipe_cpas_vote(core_info, cpas_vote); + break; + } + + case CAM_ICP_DEV_CMD_CPAS_START: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) + return -EINVAL; + + if (!core_info->cpas_start) { + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, &cpas_vote->axi_vote); + core_info->cpas_start = true; + } + break; + } + + case CAM_ICP_DEV_CMD_CPAS_STOP: + if (core_info->cpas_start) { + cam_cpas_stop(core_info->cpas_handle); + core_info->cpas_start = false; + } + break; + case CAM_ICP_DEV_CMD_POWER_COLLAPSE: + rc = cam_ipe_handle_pc(ipe_dev); + break; + case CAM_ICP_DEV_CMD_POWER_RESUME: + rc = cam_ipe_handle_resume(ipe_dev); + break; + case CAM_ICP_DEV_CMD_UPDATE_CLK: { + struct cam_icp_dev_clk_update_cmd *clk_upd_cmd = cmd_args; + struct cam_ahb_vote ahb_vote; + uint32_t clk_rate = clk_upd_cmd->curr_clk_rate; + int32_t clk_level = 0, err = 0; + + CAM_DBG(CAM_PERF, "ipe_src_clk rate = %d", (int)clk_rate); + if (!core_info->clk_enable) { + if (clk_upd_cmd->dev_pc_enable) { + cam_ipe_handle_pc(ipe_dev); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x0); + } + rc = cam_ipe_toggle_clk(soc_info, true); + if (rc) + CAM_ERR(CAM_ICP, "Enable failed"); + else + core_info->clk_enable = true; + if (clk_upd_cmd->dev_pc_enable) { + rc = cam_ipe_handle_resume(ipe_dev); + if (rc) + CAM_ERR(CAM_ICP, "bps resume failed"); + } + } + CAM_DBG(CAM_PERF, "clock rate %d", clk_rate); + + rc = cam_ipe_update_clk_rate(soc_info, &clk_rate); + if (rc) + CAM_ERR(CAM_PERF, "Failed to update clk %d", clk_rate); + + err = cam_soc_util_get_clk_level(soc_info, + clk_rate, soc_info->src_clk_idx, + &clk_level); + + if (!err) { + clk_upd_cmd->clk_level = clk_level; + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_level; + + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, &ahb_vote); + if (rc) { + CAM_ERR(CAM_PERF, "failed at updating ahb vote level rc: %d", + rc); + return rc; + } + + rc = cam_cpas_update_axi_floor_lvl(core_info->cpas_handle, + clk_level); + if (rc) { + CAM_ERR(CAM_PERF, "failed at updating axi vote level clk_level: %d rc: %d", + clk_level, rc); + return rc; + } + } + break; + } + case CAM_ICP_DEV_CMD_DISABLE_CLK: + if (core_info->clk_enable == true) + cam_ipe_toggle_clk(soc_info, false); + core_info->clk_enable = false; + break; + default: + CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type); + rc = -EINVAL; + break; + } + return rc; +} + +irqreturn_t cam_ipe_irq(int irq_num, void *data) +{ + return IRQ_HANDLED; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.h new file mode 100644 index 0000000000..7618e199a2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_core.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_IPE_CORE_H +#define CAM_IPE_CORE_H + +#include +#include +#include +#include + +#define IPE_COLLAPSE_MASK 0x1 +#define IPE_PWR_ON_MASK 0x2 + +struct cam_ipe_device_hw_info { + uint32_t hw_idx; + uint32_t pwr_ctrl; + uint32_t pwr_status; +}; + +struct cam_ipe_device_core_info { + struct cam_ipe_device_hw_info *ipe_hw_info; + uint32_t cpas_handle; + uint32_t power_on_cnt; + bool cpas_start; + bool clk_enable; +}; + +int cam_ipe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ipe_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +irqreturn_t cam_ipe_irq(int irq_num, void *data); + +/** + * @brief : API to register IPE hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_ipe_init_module(void); + +/** + * @brief : API to remove IPE Hw from platform framework. + */ +void cam_ipe_exit_module(void); +#endif /* CAM_IPE_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_dev.c new file mode 100644 index 0000000000..1ef988ffa7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_dev.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ipe_core.h" +#include "ipe_soc.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_icp_hw_intf_data cam_ipe_hw_list[CAM_IPE_HW_NUM_MAX]; + +static struct cam_ipe_device_hw_info cam_ipe_hw_info[] = { + { + .hw_idx = 0, + .pwr_ctrl = 0x40, + .pwr_status = 0x3C, + }, +}; + +static struct cam_ipe_device_hw_info cam_ipe680_hw_info[] = { + { + .hw_idx = 0, + .pwr_ctrl = 0x40, + .pwr_status = 0x3C, + }, +}; + +int cam_ipe_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_ipe_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "ipe", sizeof("ipe")); + cpas_register_params.cam_cpas_client_cb = NULL; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_ipe_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *ipe_dev = NULL; + struct cam_hw_intf *ipe_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct cam_ipe_device_hw_info *hw_info = NULL; + int rc = 0, i; + struct cam_cpas_query_cap query; + uint32_t *cam_caps, num_cap_mask; + uint32_t hw_idx; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct cam_ipe_soc_private *ipe_soc_priv; + + CAM_GET_TIMESTAMP(ts_start); + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + + rc = cam_cpas_get_hw_info(&query.camera_family, + &query.camera_version, &query.cpas_version, + &cam_caps, &num_cap_mask, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get hw info rc=%d", rc); + return rc; + } + + if ((!(cam_caps[IPE_CAPS_MASK_IDX] & CPAS_IPE1_BIT)) && (hw_idx)) { + CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx); + return -EINVAL; + } + + ipe_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!ipe_dev_intf) + return -ENOMEM; + + ipe_dev_intf->hw_idx = hw_idx; + ipe_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!ipe_dev) { + rc = -ENOMEM; + goto free_dev_intf; + } + + ipe_dev->soc_info.pdev = pdev; + ipe_dev->soc_info.dev = &pdev->dev; + ipe_dev->soc_info.dev_name = pdev->name; + ipe_dev_intf->hw_priv = ipe_dev; + ipe_dev_intf->hw_ops.init = cam_ipe_init_hw; + ipe_dev_intf->hw_ops.deinit = cam_ipe_deinit_hw; + ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd; + ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE; + + CAM_DBG(CAM_ICP, "IPE component bind type %d index %d", + ipe_dev_intf->hw_type, + ipe_dev_intf->hw_idx); + + platform_set_drvdata(pdev, &cam_ipe_hw_list[hw_idx]); + + ipe_dev->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_ipe_device_core_info), + GFP_KERNEL); + if (!ipe_dev->core_info) { + rc = -ENOMEM; + goto free_dev; + } + + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + match_dev = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev); + if (!match_dev) { + CAM_DBG(CAM_ICP, "No ipe hardware info"); + rc = -EINVAL; + goto free_core_info; + } + + hw_info = (struct cam_ipe_device_hw_info *)match_dev->data; + core_info->ipe_hw_info = hw_info; + + rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq, ipe_dev); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed to init_soc"); + goto free_core_info; + } + + CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK", + (void *)&ipe_dev->soc_info); + + ipe_dev->soc_info.hw_id = CAM_HW_ID_IPE + ipe_dev->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&ipe_dev->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ICP, "hw instance populate failed: %d", rc); + goto free_soc_resources; + } + + rc = cam_ipe_register_cpas(&ipe_dev->soc_info, + core_info, ipe_dev_intf->hw_idx); + if (rc < 0) + goto free_soc_resources; + + ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + ipe_soc_priv = ipe_dev->soc_info.soc_private; + cam_ipe_hw_list[hw_idx].pid = + CAM_MEM_ZALLOC_ARRAY(ipe_soc_priv->num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!cam_ipe_hw_list[hw_idx].pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for ipe hw list"); + rc = -ENOMEM; + goto free_soc_resources; + } + + cam_ipe_hw_list[hw_idx].hw_intf = ipe_dev_intf; + cam_ipe_hw_list[hw_idx].num_pid = ipe_soc_priv->num_pid; + for (i = 0; i < ipe_soc_priv->num_pid; i++) + cam_ipe_hw_list[hw_idx].pid[i] = ipe_soc_priv->pid[i]; + + mutex_init(&ipe_dev->hw_mutex); + spin_lock_init(&ipe_dev->hw_lock); + init_completion(&ipe_dev->hw_complete); + CAM_DBG(CAM_ICP, "IPE:%d component bound successfully", + ipe_dev_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +free_soc_resources: + cam_ipe_deinit_soc_resources(&ipe_dev->soc_info); +free_core_info: + CAM_MEM_FREE(ipe_dev->core_info); +free_dev: + CAM_MEM_FREE(ipe_dev); +free_dev_intf: + CAM_MEM_FREE(ipe_dev_intf); + return rc; +} + +static void cam_ipe_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *ipe_dev = NULL; + struct cam_hw_intf *ipe_dev_intf = NULL; + struct cam_ipe_device_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct cam_icp_hw_intf_data *ipe_intf_data; + + CAM_DBG(CAM_ICP, "Unbinding component: %s", pdev->name); + ipe_intf_data = platform_get_drvdata(pdev); + if (!ipe_intf_data) { + CAM_ERR(CAM_ICP, "Error No data in pdev"); + return; + } + + ipe_dev_intf = ipe_intf_data->hw_intf; + if (!ipe_dev_intf) { + CAM_ERR(CAM_ICP, "Error No IPE dev interface"); + return; + } + + ipe_dev = ipe_dev_intf->hw_priv; + core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info; + cam_cpas_unregister_client(core_info->cpas_handle); + if (ipe_intf_data->pid) + CAM_MEM_FREE(ipe_intf_data->pid); + cam_ipe_deinit_soc_resources(&ipe_dev->soc_info); + CAM_MEM_FREE(ipe_dev->core_info); + CAM_MEM_FREE(ipe_dev); + CAM_MEM_FREE(ipe_dev_intf); +} + + +const static struct component_ops cam_ipe_component_ops = { + .bind = cam_ipe_component_bind, + .unbind = cam_ipe_component_unbind, +}; + +int cam_ipe_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ICP, "Adding IPE component"); + rc = component_add(&pdev->dev, &cam_ipe_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_ipe_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ipe_component_ops); + return 0; +} + +static const struct of_device_id cam_ipe_dt_match[] = { + { + .compatible = "qcom,cam-ipe", + .data = &cam_ipe_hw_info, + }, + { + .compatible = "qcom,cam-ipe680", + .data = &cam_ipe680_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_ipe_dt_match); + +struct platform_driver cam_ipe_driver = { + .probe = cam_ipe_probe, + .remove = cam_ipe_remove, + .driver = { + .name = "cam-ipe", + .owner = THIS_MODULE, + .of_match_table = cam_ipe_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ipe_init_module(void) +{ + return platform_driver_register(&cam_ipe_driver); +} + +void cam_ipe_exit_module(void) +{ + platform_driver_unregister(&cam_ipe_driver); +} + +MODULE_DESCRIPTION("CAM IPE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.c new file mode 100644 index 0000000000..9b0fa68e69 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ipe_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + + +int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + int i; + int rc; + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_FAST, soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_ICP, "Regulator set mode %s failed", + soc_info->rgltr_name[i]); + goto rgltr_set_mode_failed; + } + } + return 0; + +rgltr_set_mode_failed: + for (i = i - 1; i >= 0; i--) + if (soc_info->rgltr[i]) + cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_NORMAL, soc_info->rgltr_name[i]); + + return rc; +} + +int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + int i; + int rc; + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_NORMAL, soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_ICP, "Regulator set mode %s failed", + soc_info->rgltr_name[i]); + goto rgltr_set_mode_failed; + } + } + return 0; + +rgltr_set_mode_failed: + for (i = i - 1; i >= 0; i--) + if (soc_info->rgltr[i]) + cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + REGULATOR_MODE_FAST, soc_info->rgltr_name[i]); + + return rc; +} + +static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, num_pid, i; + struct platform_device *pdev = soc_info->pdev; + struct device_node *of_node = pdev->dev.of_node; + struct cam_ipe_soc_private *ipe_soc_private = soc_info->soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ICP, "get ipe dt prop is failed"); + goto end; + } + + num_pid = of_property_count_u32_elems(of_node, "cam_hw_pid"); + CAM_DBG(CAM_ICP, "IPE pid count: %d", num_pid); + + if (num_pid <= 0) + goto end; + + ipe_soc_private->pid = CAM_MEM_ZALLOC_ARRAY(num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!ipe_soc_private->pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for IPE hw pids"); + rc = -ENOMEM; + goto end; + } + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(of_node, "cam_hw_pid", i, &ipe_soc_private->pid[i]); + ipe_soc_private->num_pid = num_pid; + +end: + return rc; +} + +static int cam_ipe_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t ipe_irq_handler, void *data) +{ + int rc, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, ipe_irq_handler, &(irq_data[0])); + + return rc; +} + +int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ipe_irq_handler, void *irq_data) +{ + struct cam_ipe_soc_private *soc_private; + int rc = 0; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_ipe_soc_private), GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ICP, "Failed at allocating IPE soc_private"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_ipe_get_dt_properties(soc_info); + if (rc < 0) + return rc; + + rc = cam_ipe_request_platform_resource(soc_info, ipe_irq_handler, + irq_data); + if (rc < 0) + return rc; + + return rc; +} + +void cam_ipe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_ipe_soc_private *soc_private; + + soc_private = soc_info->soc_private; + if (soc_private) { + if (soc_private->pid) { + CAM_MEM_FREE(soc_private->pid); + soc_private->pid = NULL; + } + + CAM_MEM_FREE(soc_private); + soc_private = NULL; + } + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_WARN(CAM_ICP, "release platform resources fail"); +} + +int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + CAM_SVS_VOTE, false); + if (rc) { + CAM_ERR(CAM_ICP, "enable platform failed"); + return rc; + } + + return rc; +} + +int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, disable_clk, + false); + if (rc) + CAM_ERR(CAM_ICP, "enable platform failed"); + + return rc; +} + +int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t *clk_rate) +{ + int32_t src_clk_idx; + + if (!soc_info) + return -EINVAL; + + src_clk_idx = soc_info->src_clk_idx; + + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (*clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_PERF, "clk_rate %d greater than max, reset to %d", + *clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + *clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + return cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, *clk_rate, 0); +} + +int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable) +{ + int rc = 0; + + if (clk_enable) + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, CAM_SVS_VOTE); + else + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + CAM_DBG(CAM_ICP, "%s IPE clock", clk_enable ? "Enable" : "Disable"); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.h new file mode 100644 index 0000000000..b856433fac --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ipe_hw/ipe_soc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_IPE_SOC_H +#define CAM_IPE_SOC_H + +#include "cam_soc_util.h" + +struct cam_ipe_soc_private { + uint32_t num_pid; + uint32_t *pid; +}; + +int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ipe_irq_handler, void *irq_data); + +int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk); + +int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t *clk_rate); +int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable); +void cam_ipe_deinit_soc_resources(struct cam_hw_soc_info *soc_info); +#endif /* CAM_IPE_SOC_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.c new file mode 100644 index 0000000000..7e17476566 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ofe_core.h" +#include "ofe_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_ofe_hw_intf.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "hfi_reg.h" +#include "cam_common_util.h" + +static int cam_ofe_cpas_vote(struct cam_ofe_device_core_info *core_info, + struct cam_icp_cpas_vote *cpas_vote) +{ + int rc = 0; + + if (cpas_vote->ahb_vote_valid) + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, + &cpas_vote->ahb_vote); + if (rc) + CAM_ERR(CAM_PERF, "CPAS AHB vote failed rc:%d", rc); + + if (cpas_vote->axi_vote_valid) + rc = cam_cpas_update_axi_vote(core_info->cpas_handle, + &cpas_vote->axi_vote); + if (rc) + CAM_ERR(CAM_PERF, "CPAS AXI vote failed rc:%d", rc); + + return rc; +} + +int cam_ofe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ofe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct cam_icp_cpas_vote cpas_vote; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ofe_dev->soc_info; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&ofe_dev->hw_lock, flags); + if (ofe_dev->hw_state == CAM_HW_STATE_POWER_UP) { + core_info->power_on_cnt++; + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_OFE + core_info->ofe_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "OFE hw id %x acquire ownership failed", + CAM_HW_ID_OFE + core_info->ofe_hw_info->hw_idx); + return rc; + } + + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_OFE_DEFAULT_AXI_PATH; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_OFE_DEFAULT_AXI_TRANSAC; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + if (rc) { + CAM_ERR(CAM_ICP, "cpas start failed: %d", rc); + return rc; + } + core_info->cpas_start = true; + + rc = cam_ofe_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "soc enable failed rc:%d", rc); + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop failed"); + else + core_info->cpas_start = false; + } else { + core_info->clk_enable = true; + } + + spin_lock_irqsave(&ofe_dev->hw_lock, flags); + ofe_dev->hw_state = CAM_HW_STATE_POWER_UP; + core_info->power_on_cnt++; + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + + CAM_DBG(CAM_ICP, "OFE%u powered on", soc_info->index); + return rc; +} + +int cam_ofe_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ofe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + unsigned long flags; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ofe_dev->soc_info; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + spin_lock_irqsave(&ofe_dev->hw_lock, flags); + if (ofe_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + return 0; + } + + core_info->power_on_cnt--; + if (core_info->power_on_cnt) { + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + CAM_DBG(CAM_ICP, "OFE%u power on reference still held %u", + soc_info->index, core_info->power_on_cnt); + return 0; + } + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + + rc = cam_ofe_disable_soc_resources(soc_info, core_info->clk_enable); + if (rc) + CAM_ERR(CAM_ICP, "soc disable failed: %d", rc); + else + core_info->clk_enable = false; + + if (core_info->cpas_start) { + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_ICP, "cpas stop failed"); + else + core_info->cpas_start = false; + } + + spin_lock_irqsave(&ofe_dev->hw_lock, flags); + ofe_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&ofe_dev->hw_lock, flags); + + CAM_DBG(CAM_ICP, "OFE%u powered off", soc_info->index); + + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_OFE + core_info->ofe_hw_info->hw_idx); + if (rc) { + CAM_ERR(CAM_ICP, "OFE hw id %x release ownership failed", + CAM_HW_ID_OFE + core_info->ofe_hw_info->hw_idx); + return rc; + } + + return rc; +} + +static int cam_ofe_handle_pc(struct cam_hw_info *ofe_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct cam_ofe_device_hw_info *hw_info = NULL; + int rc = 0, pwr_ctrl, pwr_status; + + soc_info = &ofe_dev->soc_info; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + hw_info = core_info->ofe_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS OFE client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (!(pwr_ctrl & OFE_COLLAPSE_MASK)) { + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x1); + + if ((pwr_status >> OFE_PWR_ON_MASK)) + CAM_WARN(CAM_PERF, "OFE: pwr_status(%x):pwr_ctrl(%x)", + pwr_status, pwr_ctrl); + } + + rc = cam_ofe_get_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to get gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return 0; +} + +static int cam_ofe_handle_resume(struct cam_hw_info *ofe_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct cam_ofe_device_hw_info *hw_info = NULL; + int pwr_ctrl, pwr_status, rc = 0; + + soc_info = &ofe_dev->soc_info; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + hw_info = core_info->ofe_hw_info; + + if (!core_info->cpas_start) { + CAM_DBG(CAM_ICP, "CPAS OFE client not started"); + return 0; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + if (pwr_ctrl & OFE_COLLAPSE_MASK) { + CAM_DBG(CAM_PERF, "OFE: pwr_ctrl set(%x)", pwr_ctrl); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0); + } + + rc = cam_ofe_transfer_gdsc_control(soc_info); + if (rc) { + CAM_ERR(CAM_ICP, "failed to transfer gdsc control rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_ctrl, + true, &pwr_ctrl); + if (rc) { + CAM_ERR(CAM_ICP, "power ctrl read failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_reg_read(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, hw_info->pwr_status, + true, &pwr_status); + if (rc) { + CAM_ERR(CAM_ICP, "power status read failed rc=%d", rc); + return rc; + } + + CAM_DBG(CAM_PERF, "pwr_ctrl=%x pwr_status=%x", pwr_ctrl, pwr_status); + + return rc; +} + +int cam_ofe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *ofe_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct cam_ofe_device_hw_info *hw_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_ICP_DEV_CMD_MAX) { + CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + soc_info = &ofe_dev->soc_info; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + hw_info = core_info->ofe_hw_info; + + switch (cmd_type) { + case CAM_ICP_DEV_CMD_VOTE_CPAS: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + cam_ofe_cpas_vote(core_info, cpas_vote); + break; + } + + case CAM_ICP_DEV_CMD_CPAS_START: { + struct cam_icp_cpas_vote *cpas_vote = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_ICP, "cmd args NULL"); + return -EINVAL; + } + + if (!core_info->cpas_start) { + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, + &cpas_vote->axi_vote); + core_info->cpas_start = true; + } + break; + } + + case CAM_ICP_DEV_CMD_CPAS_STOP: + if (core_info->cpas_start) { + cam_cpas_stop(core_info->cpas_handle); + core_info->cpas_start = false; + } + break; + case CAM_ICP_DEV_CMD_POWER_COLLAPSE: + rc = cam_ofe_handle_pc(ofe_dev); + break; + case CAM_ICP_DEV_CMD_POWER_RESUME: + rc = cam_ofe_handle_resume(ofe_dev); + break; + case CAM_ICP_DEV_CMD_UPDATE_CLK: { + struct cam_icp_dev_clk_update_cmd *clk_upd_cmd = cmd_args; + struct cam_ahb_vote ahb_vote; + uint32_t clk_rate = clk_upd_cmd->curr_clk_rate; + int32_t clk_level = 0, err = 0; + + CAM_DBG(CAM_PERF, "ofe_src_clk rate = %d", (int)clk_rate); + + if (!core_info->clk_enable) { + if (clk_upd_cmd->dev_pc_enable) { + cam_ofe_handle_pc(ofe_dev); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REGBASE_CPASTOP, + hw_info->pwr_ctrl, true, 0x0); + } + rc = cam_ofe_toggle_clk(soc_info, true); + if (rc) + CAM_ERR(CAM_ICP, "Enable failed"); + else + core_info->clk_enable = true; + if (clk_upd_cmd->dev_pc_enable) { + rc = cam_ofe_handle_resume(ofe_dev); + if (rc) + CAM_ERR(CAM_ICP, "OFE resume failed"); + } + } + CAM_DBG(CAM_PERF, "clock rate %d", clk_rate); + rc = cam_ofe_update_clk_rate(soc_info, clk_rate); + if (rc) + CAM_ERR(CAM_PERF, "Failed to update clk %d", clk_rate); + + err = cam_soc_util_get_clk_level(soc_info, + clk_rate, soc_info->src_clk_idx, + &clk_level); + + if (!err) { + clk_upd_cmd->clk_level = clk_level; + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_level; + + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, &ahb_vote); + if (rc) { + CAM_ERR(CAM_PERF, "failed at updating ahb vote level rc: %d", + rc); + return rc; + } + + rc = cam_cpas_update_axi_floor_lvl(core_info->cpas_handle, + clk_level); + if (rc) { + CAM_ERR(CAM_PERF, + "failed at updating axi vote level clk_level rc: %d", + rc); + return rc; + } + + } + break; + } + case CAM_ICP_DEV_CMD_DISABLE_CLK: + if (core_info->clk_enable) + cam_ofe_toggle_clk(soc_info, false); + core_info->clk_enable = false; + break; + default: + CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type); + rc = -EINVAL; + break; + } + return rc; +} + +irqreturn_t cam_ofe_irq(int irq_num, void *data) +{ + return IRQ_HANDLED; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.h new file mode 100644 index 0000000000..0ded309c41 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_core.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_OFE_CORE_H +#define CAM_OFE_CORE_H + +#include +#include +#include +#include + +#define OFE_COLLAPSE_MASK 0x1 +#define OFE_PWR_ON_MASK 0x2 + +struct cam_ofe_device_hw_info { + uint32_t hw_idx; + uint32_t pwr_ctrl; + uint32_t pwr_status; +}; + +struct cam_ofe_device_core_info { + struct cam_ofe_device_hw_info *ofe_hw_info; + uint32_t cpas_handle; + uint32_t power_on_cnt; + bool cpas_start; + bool clk_enable; +}; + +int cam_ofe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ofe_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ofe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + +irqreturn_t cam_ofe_irq(int irq_num, void *data); + +/** + * @brief : API to register OFE hw to platform framework. + * @return struct platform_device pointer on success, or ERR_PTR() on error. + */ +int cam_ofe_init_module(void); + +/** + * @brief : API to remove OFE Hw from platform framework. + */ +void cam_ofe_exit_module(void); +#endif /* CAM_OFE_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_dev.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_dev.c new file mode 100644 index 0000000000..0d0807e85d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_dev.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ofe_core.h" +#include "ofe_soc.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_icp_hw_intf.h" +#include "cam_icp_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_icp_hw_intf_data cam_ofe_hw_list[CAM_OFE_HW_NUM_MAX]; + +static struct cam_ofe_device_hw_info cam_ofe_hw_info = { + .hw_idx = 0x0, + .pwr_ctrl = 0x100, + .pwr_status = 0xFC, +}; + +static bool cam_ofe_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + bool error_handled = false; + + if (!irq_data) + return error_handled; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_OFE_WR_UBWC_ENCODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "OFE Write UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d", + irq_data->irq_type, + irq_data->u.dec_err.decerr_status.value, + irq_data->u.dec_err.decerr_status.thr_err, + irq_data->u.dec_err.decerr_status.fcl_err, + irq_data->u.dec_err.decerr_status.len_md_err, + irq_data->u.dec_err.decerr_status.format_err); + error_handled = true; + break; + case CAM_CAMNOC_IRQ_OFE_RD_UBWC_DECODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ICP, + "OFE Read UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d", + irq_data->irq_type, + irq_data->u.dec_err.decerr_status.value, + irq_data->u.dec_err.decerr_status.thr_err, + irq_data->u.dec_err.decerr_status.fcl_err, + irq_data->u.dec_err.decerr_status.len_md_err, + irq_data->u.dec_err.decerr_status.format_err); + error_handled = true; + break; + default: + break; + } + + return error_handled; +} + +int cam_ofe_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_ofe_device_core_info *core_info, uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "ofe", sizeof("ofe")); + cpas_register_params.cam_cpas_client_cb = cam_ofe_cpas_cb; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_ICP, "failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_ofe_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *ofe_dev = NULL; + struct cam_hw_intf *ofe_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct cam_ofe_device_hw_info *hw_info = NULL; + int rc = 0, i; + uint32_t hw_idx; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct cam_ofe_soc_private *ofe_soc_priv; + + CAM_GET_TIMESTAMP(ts_start); + ofe_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!ofe_dev_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + ofe_dev_intf->hw_idx = hw_idx; + + ofe_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!ofe_dev) { + rc = -ENOMEM; + goto free_dev_intf; + } + + ofe_dev->soc_info.pdev = pdev; + ofe_dev->soc_info.dev = &pdev->dev; + ofe_dev->soc_info.dev_name = pdev->name; + ofe_dev_intf->hw_priv = ofe_dev; + ofe_dev_intf->hw_ops.init = cam_ofe_init_hw; + ofe_dev_intf->hw_ops.deinit = cam_ofe_deinit_hw; + ofe_dev_intf->hw_ops.process_cmd = cam_ofe_process_cmd; + ofe_dev_intf->hw_type = CAM_ICP_DEV_OFE; + platform_set_drvdata(pdev, &cam_ofe_hw_list[hw_idx]); + ofe_dev->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_ofe_device_core_info), GFP_KERNEL); + if (!ofe_dev->core_info) { + rc = -ENOMEM; + goto free_dev; + } + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ICP, "No ofe hardware info"); + goto free_core_info; + } + + hw_info = (struct cam_ofe_device_hw_info *)match_dev->data; + core_info->ofe_hw_info = hw_info; + + rc = cam_ofe_init_soc_resources(&ofe_dev->soc_info, cam_ofe_irq, ofe_dev); + if (rc) { + CAM_ERR(CAM_ICP, "failed to init_soc"); + goto free_core_info; + } + + CAM_DBG(CAM_ICP, "soc info : %pK", (void *)&ofe_dev->soc_info); + + rc = cam_ofe_register_cpas(&ofe_dev->soc_info, + core_info, ofe_dev_intf->hw_idx); + if (rc) + goto free_soc_resources; + + ofe_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + ofe_soc_priv = ofe_dev->soc_info.soc_private; + cam_ofe_hw_list[hw_idx].pid = + CAM_MEM_ZALLOC_ARRAY(ofe_soc_priv->num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!cam_ofe_hw_list[hw_idx].pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for ofe hw list"); + rc = -ENOMEM; + goto free_soc_resources; + } + + cam_ofe_hw_list[hw_idx].hw_intf = ofe_dev_intf; + cam_ofe_hw_list[hw_idx].num_pid = ofe_soc_priv->num_pid; + for (i = 0; i < ofe_soc_priv->num_pid; i++) + cam_ofe_hw_list[hw_idx].pid[i] = ofe_soc_priv->pid[i]; + + mutex_init(&ofe_dev->hw_mutex); + spin_lock_init(&ofe_dev->hw_lock); + init_completion(&ofe_dev->hw_complete); + CAM_DBG(CAM_ICP, "OFE:%d component bound successfully", + ofe_dev_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +free_soc_resources: + cam_ofe_deinit_soc_resources(&ofe_dev->soc_info); +free_core_info: + CAM_MEM_FREE(ofe_dev->core_info); +free_dev: + CAM_MEM_FREE(ofe_dev); +free_dev_intf: + CAM_MEM_FREE(ofe_dev_intf); + return rc; +} + +static void cam_ofe_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *ofe_dev = NULL; + struct cam_hw_intf *ofe_dev_intf = NULL; + struct cam_ofe_device_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct cam_icp_hw_intf_data *ofe_intf_data; + + CAM_DBG(CAM_ICP, "Unbinding component: %s", pdev->name); + ofe_intf_data = platform_get_drvdata(pdev); + if (!ofe_intf_data) { + CAM_ERR(CAM_ICP, "Error No data in pdev"); + return; + } + + ofe_dev_intf = ofe_intf_data->hw_intf; + if (!ofe_dev_intf) { + CAM_ERR(CAM_ICP, "Error No OFE dev interface"); + return; + } + + ofe_dev = ofe_dev_intf->hw_priv; + core_info = (struct cam_ofe_device_core_info *)ofe_dev->core_info; + cam_cpas_unregister_client(core_info->cpas_handle); + if (ofe_intf_data->pid) + CAM_MEM_FREE(ofe_intf_data->pid); + cam_ofe_deinit_soc_resources(&ofe_dev->soc_info); + CAM_MEM_FREE(ofe_dev->core_info); + CAM_MEM_FREE(ofe_dev); + CAM_MEM_FREE(ofe_dev_intf); +} + +static const struct component_ops cam_ofe_component_ops = { + .bind = cam_ofe_component_bind, + .unbind = cam_ofe_component_unbind, +}; + +int cam_ofe_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ICP, "Adding OFE component"); + rc = component_add(&pdev->dev, &cam_ofe_component_ops); + if (rc) + CAM_ERR(CAM_ICP, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_ofe_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ofe_component_ops); + return 0; +} + +static const struct of_device_id cam_ofe_dt_match[] = { + { + .compatible = "qcom,cam-ofe", + .data = &cam_ofe_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_ofe_dt_match); + +struct platform_driver cam_ofe_driver = { + .probe = cam_ofe_probe, + .remove = cam_ofe_remove, + .driver = { + .name = "cam-ofe", + .owner = THIS_MODULE, + .of_match_table = cam_ofe_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ofe_init_module(void) +{ + return platform_driver_register(&cam_ofe_driver); +} + +void cam_ofe_exit_module(void) +{ + platform_driver_unregister(&cam_ofe_driver); +} + +MODULE_DESCRIPTION("CAM OFE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.c b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.c new file mode 100644 index 0000000000..fd70f35210 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ofe_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static int cam_ofe_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, num_pid, i; + struct platform_device *pdev = soc_info->pdev; + struct device_node *of_node = pdev->dev.of_node; + struct cam_ofe_soc_private *ofe_soc_private = soc_info->soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ICP, "get ofe dt prop is failed"); + goto end; + } + + num_pid = of_property_count_u32_elems(of_node, "cam_hw_pid"); + CAM_DBG(CAM_ICP, "OFE pid count: %d", num_pid); + + if (num_pid <= 0) + goto end; + + ofe_soc_private->pid = CAM_MEM_ZALLOC_ARRAY(num_pid, sizeof(uint32_t), GFP_KERNEL); + if (!ofe_soc_private->pid) { + CAM_ERR(CAM_ICP, "Failed at allocating memory for OFE hw pids"); + rc = -ENOMEM; + goto end; + } + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(of_node, "cam_hw_pid", i, &ofe_soc_private->pid[i]); + ofe_soc_private->num_pid = num_pid; + +end: + return rc; +} + +static int cam_ofe_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t ofe_irq_handler, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, + ofe_irq_handler, &(irq_data[0])); + + return rc; +} + +int cam_ofe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ofe_irq_handler, void *irq_data) +{ + struct cam_ofe_soc_private *soc_private; + int rc = 0; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_ofe_soc_private), GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ICP, "Failed at allocating OFE soc_private"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_ofe_get_dt_properties(soc_info); + if (rc < 0) + return rc; + + rc = cam_ofe_request_platform_resource(soc_info, + ofe_irq_handler, irq_data); + if (rc < 0) + return rc; + + return rc; +} + +void cam_ofe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_ofe_soc_private *soc_private; + + soc_private = soc_info->soc_private; + if (soc_private) { + if (soc_private->pid) { + CAM_MEM_FREE(soc_private->pid); + soc_private->pid = NULL; + } + + CAM_MEM_FREE(soc_private); + soc_private = NULL; + } + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_WARN(CAM_ICP, "release platform resources fail"); +} + +int cam_ofe_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + true, CAM_SVS_VOTE, false); + if (rc) + CAM_ERR(CAM_ICP, "enable platform failed"); + + return rc; +} + +int cam_ofe_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + disable_clk, false); + if (rc) + CAM_ERR(CAM_ICP, "disable platform failed"); + + return rc; +} + +static inline int cam_ofe_set_regulators_mode(struct cam_hw_soc_info *soc_info, uint32_t mode) +{ + int i, rc; + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = cam_wrapper_regulator_set_mode(soc_info->rgltr[i], mode, + soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_ICP, "Regulator set mode %s failed", + soc_info->rgltr_name[i]); + goto rgltr_set_mode_failed; + } + } + return 0; + +rgltr_set_mode_failed: + for (i = i - 1; i >= 0; i--) { + if (soc_info->rgltr[i]) + cam_wrapper_regulator_set_mode(soc_info->rgltr[i], + (mode == REGULATOR_MODE_NORMAL ? + REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL), + soc_info->rgltr_name[i]); + } + + return rc; +} + +int cam_ofe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + return cam_ofe_set_regulators_mode(soc_info, REGULATOR_MODE_FAST); +} + +int cam_ofe_get_gdsc_control(struct cam_hw_soc_info *soc_info) +{ + return cam_ofe_set_regulators_mode(soc_info, REGULATOR_MODE_NORMAL); +} + +int cam_ofe_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate) +{ + int32_t src_clk_idx; + + if (!soc_info) + return -EINVAL; + + src_clk_idx = soc_info->src_clk_idx; + + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_PERF, "clk_rate %d greater than max, reset to %d", + clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + return cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); +} + +int cam_ofe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable) +{ + int rc = 0; + + if (clk_enable) + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, CAM_SVS_VOTE); + else + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + CAM_DBG(CAM_ICP, "%s OFE clock", clk_enable ? "Enable" : "Disable"); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.h b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.h new file mode 100644 index 0000000000..3a6c413838 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_icp/icp_hw/ofe_hw/ofe_soc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_OFE_SOC_H_ +#define _CAM_OFE_SOC_H_ + +#include "cam_soc_util.h" + +struct cam_ofe_soc_private { + uint32_t num_pid; + uint32_t *pid; +}; + +int cam_ofe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ofe_irq_handler, void *irq_data); + +int cam_ofe_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_ofe_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk); + +int cam_ofe_get_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_ofe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info); + +int cam_ofe_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate); + +int cam_ofe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable); + +void cam_ofe_deinit_soc_resources(struct cam_hw_soc_info *soc_info); +#endif /* _CAM_OFE_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.c b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.c new file mode 100644 index 0000000000..cad4b5fcb6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.c @@ -0,0 +1,10541 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include +#include +#include +#include +#include + +#include "cam_mem_mgr.h" +#include "cam_sync_api.h" +#include "cam_req_mgr_dev.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_context_utils.h" +#include "cam_cdm_util.h" +#include "cam_isp_context.h" +#include "cam_common_util.h" +#include "cam_req_mgr_debug.h" +#include "cam_cpas_api.h" +#include "cam_ife_hw_mgr.h" +#include "cam_subdev.h" +#include "cam_mem_mgr_api.h" + +static const char isp_dev_name[] = "cam-isp"; + +static struct cam_isp_ctx_debug isp_ctx_debug; + +#define INC_HEAD(head, max_entries, ret) \ + div_u64_rem(atomic64_add_return(1, head),\ + max_entries, (ret)) + +static int cam_isp_context_dump_requests(void *data, + void *pf_args); + +static int cam_isp_context_hw_recovery(void *priv, void *data); +static int cam_isp_context_handle_message(void *context, + uint32_t msg_type, void *data); + +static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd); + +static void __cam_isp_ctx_dump_state_monitor_array( + struct cam_isp_context *ctx_isp); + +static const char *__cam_isp_hw_evt_val_to_type( + uint32_t evt_id); + +static const char *__cam_isp_ctx_substate_val_to_type( + enum cam_isp_ctx_activated_substate type); + +static int __cam_isp_ctx_check_deferred_buf_done( + struct cam_isp_context *ctx_isp, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state); + +static int __cam_isp_ctx_print_event_record(struct cam_isp_context *ctx_isp); + +static int __cam_isp_ctx_flush_req(struct cam_context *ctx, + struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req); + +static const char *__cam_isp_evt_val_to_type( + uint32_t evt_id) +{ + switch (evt_id) { + case CAM_ISP_CTX_EVENT_SUBMIT: + return "SUBMIT"; + case CAM_ISP_CTX_EVENT_APPLY: + return "APPLY"; + case CAM_ISP_CTX_EVENT_EPOCH: + return "EPOCH"; + case CAM_ISP_CTX_EVENT_RUP: + return "RUP"; + case CAM_ISP_CTX_EVENT_BUFDONE: + return "BUFDONE"; + case CAM_ISP_CTX_EVENT_SHUTTER: + return "SHUTTER_NOTIFICATION"; + default: + return "CAM_ISP_EVENT_INVALID"; + } +} + +static void __cam_isp_ctx_update_event_record( + struct cam_isp_context *ctx_isp, + enum cam_isp_ctx_event event, + struct cam_ctx_request *req, + void *event_data) +{ + int iterator = 0; + ktime_t cur_time; + struct cam_isp_ctx_req *req_isp; + + if (!ctx_isp) { + CAM_ERR(CAM_ISP, "Invalid Args"); + return; + } + switch (event) { + case CAM_ISP_CTX_EVENT_EPOCH: + case CAM_ISP_CTX_EVENT_RUP: + case CAM_ISP_CTX_EVENT_BUFDONE: + case CAM_ISP_CTX_EVENT_SHUTTER: + break; + case CAM_ISP_CTX_EVENT_SUBMIT: + case CAM_ISP_CTX_EVENT_APPLY: + if (!req) { + CAM_ERR(CAM_ISP, "Invalid arg for event %d", event); + return; + } + break; + default: + break; + } + + INC_HEAD(&ctx_isp->dbg_monitors.event_record_head[event], + CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator); + cur_time = ktime_get(); + if (req) { + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + ctx_isp->dbg_monitors.event_record[event][iterator].req_id = + req->request_id; + req_isp->event_timestamp[event] = cur_time; + } else { + ctx_isp->dbg_monitors.event_record[event][iterator].req_id = 0; + } + ctx_isp->dbg_monitors.event_record[event][iterator].timestamp = cur_time; + + if (event_data == NULL) + return; + /* Update event specific data */ + ctx_isp->dbg_monitors.event_record[event][iterator].event_type = event; + if (event == CAM_ISP_CTX_EVENT_SHUTTER) { + ctx_isp->dbg_monitors.event_record[event][iterator].req_id = + ((struct shutter_event *)event_data)->req_id; + ctx_isp->dbg_monitors.event_record[event][iterator].event.shutter_event.req_id = + ((struct shutter_event *)event_data)->req_id; + ctx_isp->dbg_monitors.event_record[event][iterator].event.shutter_event.status = + ((struct shutter_event *)event_data)->status; + ctx_isp->dbg_monitors.event_record[event][iterator].event.shutter_event.frame_id = + ((struct shutter_event *)event_data)->frame_id; + ctx_isp->dbg_monitors.event_record[event][iterator].event.shutter_event.boot_ts = + ((struct shutter_event *)event_data)->boot_ts; + ctx_isp->dbg_monitors.event_record[event][iterator].event.shutter_event.sof_ts = + ((struct shutter_event *)event_data)->sof_ts; + } +} + +static int __cam_isp_ctx_handle_sof_freeze_evt( + struct cam_context *ctx) +{ + int rc = 0; + struct cam_isp_context *ctx_isp; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + __cam_isp_ctx_print_event_record(ctx_isp); + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG; + isp_hw_cmd_args.u.sof_irq_enable = 1; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + + ctx_isp->sof_dbg_irq_en = true; + return rc; +} + +static void *cam_isp_ctx_user_dump_events( + void *dump_struct, uint8_t *addr_ptr) +{ + uint64_t *addr; + struct cam_isp_context_event_record *record; + struct timespec64 ts; + + record = (struct cam_isp_context_event_record *)dump_struct; + + addr = (uint64_t *)addr_ptr; + ts = ktime_to_timespec64(record->timestamp); + *addr++ = record->req_id; + *addr++ = ts.tv_sec; + *addr++ = ts.tv_nsec / NSEC_PER_USEC; + + return addr; +} + +static int __cam_isp_ctx_print_event_record(struct cam_isp_context *ctx_isp) +{ + int i, j, rc = 0; + int index; + uint32_t oldest_entry, num_entries; + uint64_t state_head; + struct cam_isp_context_event_record *record; + uint32_t len; + uint8_t buf[CAM_ISP_CONTEXT_DBG_BUF_LEN]; + struct timespec64 ts; + struct cam_context *ctx = ctx_isp->base; + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) { + state_head = atomic64_read(&ctx_isp->dbg_monitors.event_record_head[i]); + + if (state_head == -1) { + return 0; + } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) { + num_entries = state_head + 1; + oldest_entry = 0; + } else { + num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, + &oldest_entry); + } + + index = oldest_entry; + len = 0; + memset(buf, 0, CAM_ISP_CONTEXT_DBG_BUF_LEN); + for (j = 0; j < num_entries; j++) { + record = &ctx_isp->dbg_monitors.event_record[i][index]; + ts = ktime_to_timespec64(record->timestamp); + if (len >= CAM_ISP_CONTEXT_DBG_BUF_LEN) { + CAM_WARN(CAM_ISP, "Overshooting buffer length %llu", len); + break; + } + if (record->event_type != CAM_ISP_CTX_EVENT_SHUTTER) + len += scnprintf(buf + len, CAM_ISP_CONTEXT_DBG_BUF_LEN - len, + "%llu[%lld:%06ld] ", record->req_id, ts.tv_sec, + ts.tv_nsec / NSEC_PER_USEC); + else + /* + * Output format + * req Id[timestamp] status frmId softs bootts + */ + len += scnprintf(buf + len, (CAM_ISP_CONTEXT_DBG_BUF_LEN) - len, + "%llu[%lld:%06ld] [%d %lld %llu %llu] | ", + record->req_id, ts.tv_sec, + ts.tv_nsec / NSEC_PER_USEC, + record->event.shutter_event.status, + record->event.shutter_event.frame_id, + record->event.shutter_event.sof_ts, + record->event.shutter_event.boot_ts); + index = (index + 1) % + CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES; + } + if (len) + CAM_INFO(CAM_ISP, "Ctx:%d %s: %s", + ctx->ctx_id, __cam_isp_evt_val_to_type(i), buf); + } + return rc; +} + +static int __cam_isp_ctx_dump_event_record( + struct cam_isp_context *ctx_isp, + struct cam_common_hw_dump_args *dump_args) +{ + int i, j, rc = 0; + int index; + size_t remain_len; + uint32_t oldest_entry, num_entries; + uint32_t min_len; + uint64_t state_head; + struct cam_isp_context_event_record *record; + + if (!dump_args || !ctx_isp) { + CAM_ERR(CAM_ISP, "Invalid args %pK %pK", + dump_args, ctx_isp); + return -EINVAL; + } + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) { + state_head = atomic64_read(&ctx_isp->dbg_monitors.event_record_head[i]); + + if (state_head == -1) + return 0; + else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) { + num_entries = state_head + 1; + oldest_entry = 0; + } else { + num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES; + div_u64_rem(state_head + 1, CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, + &oldest_entry); + } + + index = oldest_entry; + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu", + dump_args->buf_len, dump_args->offset); + return -ENOSPC; + } + + min_len = (sizeof(struct cam_isp_context_dump_header) + + (CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS * sizeof(uint64_t))) * num_entries; + remain_len = dump_args->buf_len - dump_args->offset; + + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, + "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + return -ENOSPC; + } + for (j = 0; j < num_entries; j++) { + record = &ctx_isp->dbg_monitors.event_record[i][index]; + + rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_events, + record, sizeof(uint64_t), "ISP_EVT_%s:", + __cam_isp_evt_val_to_type(i)); + if (rc) { + CAM_ERR(CAM_ISP, + "CAM_ISP_CONTEXT DUMP_EVENT_RECORD: Dump failed, rc: %d", + rc); + return rc; + } + + index = (index + 1) % CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES; + } + } + return rc; +} + +static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req, + uint8_t *start_addr, uint8_t *end_addr, + unsigned long *bytes_updated) +{ + struct cam_isp_ctx_req_mini_dump *req_md; + struct cam_buf_io_cfg *io_cfg; + struct cam_isp_ctx_req *req_isp; + struct cam_packet *packet = NULL; + unsigned long bytes_required = 0; + + bytes_required = sizeof(*req_md); + *bytes_updated = 0; + if (start_addr + bytes_required > end_addr) + return; + + req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr; + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_md->num_acked = req_isp->num_acked; + req_md->num_deferred_acks = req_isp->num_deferred_acks; + req_md->bubble_report = req_isp->bubble_report; + req_md->bubble_detected = req_isp->bubble_detected; + req_md->reapply_type = req_isp->reapply_type; + req_md->request_id = req->request_id; + *bytes_updated += bytes_required; + + if (req_isp->num_fence_map_out) { + bytes_required = sizeof(struct cam_hw_fence_map_entry) * + req_isp->num_fence_map_out; + if (start_addr + *bytes_updated + bytes_required > end_addr) + return; + + req_md->map_out = (struct cam_hw_fence_map_entry *) + ((uint8_t *)start_addr + *bytes_updated); + memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required); + req_md->num_fence_map_out = req_isp->num_fence_map_out; + *bytes_updated += bytes_required; + } + + if (req_isp->num_fence_map_in) { + bytes_required = sizeof(struct cam_hw_fence_map_entry) * + req_isp->num_fence_map_in; + if (start_addr + *bytes_updated + bytes_required > end_addr) + return; + + req_md->map_in = (struct cam_hw_fence_map_entry *) + ((uint8_t *)start_addr + *bytes_updated); + memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required); + req_md->num_fence_map_in = req_isp->num_fence_map_in; + *bytes_updated += bytes_required; + } + + packet = req_isp->hw_update_data.packet; + if (packet && packet->num_io_configs) { + bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg); + if (start_addr + *bytes_updated + bytes_required > end_addr) + return; + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated); + memcpy(req_md->io_cfg, io_cfg, bytes_required); + *bytes_updated += bytes_required; + req_md->num_io_cfg = packet->num_io_configs; + } +} + +static int __cam_isp_ctx_minidump_cb(void *priv, void *args) +{ + struct cam_isp_ctx_mini_dump_info *md; + struct cam_isp_context *ctx_isp; + struct cam_context *ctx; + struct cam_ctx_request *req, *req_temp; + struct cam_hw_mini_dump_args *dump_args; + uint8_t *start_addr; + uint8_t *end_addr; + unsigned long total_bytes = 0; + unsigned long bytes_updated = 0; + uint32_t i; + + if (!priv || !args) { + CAM_ERR(CAM_ISP, "invalid params"); + return 0; + } + + dump_args = (struct cam_hw_mini_dump_args *)args; + if (dump_args->len < sizeof(*md)) { + CAM_ERR(CAM_ISP, + "In sufficient size received %lu required size: %zu", + dump_args->len, sizeof(*md)); + return 0; + } + + ctx = (struct cam_context *)priv; + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + start_addr = (uint8_t *)dump_args->start_addr; + end_addr = start_addr + dump_args->len; + md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr; + + md->sof_timestamp_val = ctx_isp->sof_timestamp_val; + md->boot_timestamp = ctx_isp->boot_timestamp; + md->last_sof_timestamp = ctx_isp->last_sof_timestamp; + md->init_timestamp = ctx_isp->init_timestamp; + md->frame_id = ctx_isp->frame_id; + md->reported_req_id = ctx_isp->reported_req_id; + md->last_applied_req_id = ctx_isp->last_applied_req_id; + md->last_bufdone_err_apply_req_id = + ctx_isp->last_bufdone_err_apply_req_id; + md->frame_id_meta = ctx_isp->frame_id_meta; + md->substate_activated = ctx_isp->substate_activated; + md->ctx_id = ctx->ctx_id; + md->subscribe_event = ctx_isp->subscribe_event; + md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt; + md->isp_device_type = ctx_isp->isp_device_type; + md->active_req_cnt = ctx_isp->active_req_cnt; + md->trigger_id = ctx_isp->trigger_id; + md->rdi_only_context = ctx_isp->rdi_only_context; + md->offline_context = ctx_isp->offline_context; + md->hw_acquired = ctx_isp->hw_acquired; + md->init_received = ctx_isp->init_received; + md->split_acquire = ctx_isp->split_acquire; + md->use_frame_header_ts = ctx_isp->use_frame_header_ts; + md->support_consumed_addr = ctx_isp->support_consumed_addr; + md->use_default_apply = ctx_isp->use_default_apply; + md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress); + md->process_bubble = atomic_read(&ctx_isp->process_bubble); + md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) { + memcpy(md->event_record[i], ctx_isp->dbg_monitors.event_record[i], + sizeof(struct cam_isp_context_event_record) * + CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES); + } + + total_bytes += sizeof(*md); + if (start_addr + total_bytes >= end_addr) + goto end; + + if (!list_empty(&ctx->active_req_list)) { + md->active_list = (struct cam_isp_ctx_req_mini_dump *) + (start_addr + total_bytes); + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + bytes_updated = 0; + __cam_isp_ctx_req_mini_dump(req, + (uint8_t *)&md->active_list[md->active_cnt++], + end_addr, &bytes_updated); + total_bytes += bytes_updated; + if ((start_addr + total_bytes >= end_addr)) + goto end; + } + } + + if (!list_empty(&ctx->wait_req_list)) { + md->wait_list = (struct cam_isp_ctx_req_mini_dump *) + (start_addr + total_bytes); + list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) { + bytes_updated = 0; + __cam_isp_ctx_req_mini_dump(req, + (uint8_t *)&md->wait_list[md->wait_cnt++], + end_addr, &bytes_updated); + total_bytes += bytes_updated; + if ((start_addr + total_bytes >= end_addr)) + goto end; + } + } + + if (!list_empty(&ctx->pending_req_list)) { + md->pending_list = (struct cam_isp_ctx_req_mini_dump *) + (start_addr + total_bytes); + list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) { + bytes_updated = 0; + __cam_isp_ctx_req_mini_dump(req, + (uint8_t *)&md->pending_list[md->pending_cnt++], + end_addr, &bytes_updated); + total_bytes += bytes_updated; + if ((start_addr + total_bytes >= end_addr)) + goto end; + } + } +end: + dump_args->bytes_written = total_bytes; + return 0; +} + +static void __cam_isp_ctx_update_state_monitor_array( + struct cam_isp_context *ctx_isp, + enum cam_isp_state_change_trigger trigger_type, + uint64_t req_id) +{ + int iterator; + struct cam_context *ctx = ctx_isp->base; + + INC_HEAD(&ctx_isp->dbg_monitors.state_monitor_head, + CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator); + + ctx_isp->dbg_monitors.state_monitor[iterator].curr_state = + ctx_isp->substate_activated; + ctx_isp->dbg_monitors.state_monitor[iterator].frame_id = + ctx_isp->frame_id; + ctx_isp->dbg_monitors.state_monitor[iterator].trigger = + trigger_type; + ctx_isp->dbg_monitors.state_monitor[iterator].req_id = + req_id; + + if (trigger_type == CAM_ISP_STATE_CHANGE_TRIGGER_CDM_DONE) + ctx_isp->dbg_monitors.state_monitor[iterator].evt_time_stamp = + ctx->cdm_done_ts; + else + ktime_get_clocktai_ts64( + &ctx_isp->dbg_monitors.state_monitor[iterator].evt_time_stamp); +} + +static int __cam_isp_ctx_update_frame_timing_record( + enum cam_isp_hw_event_type hw_evt, struct cam_isp_context *ctx_isp) +{ + uint32_t index = 0; + + /* Update event index on IFE SOF - primary event */ + if (hw_evt == CAM_ISP_HW_EVENT_SOF) + INC_HEAD(&ctx_isp->dbg_monitors.frame_monitor_head, + CAM_ISP_CTX_MAX_FRAME_RECORDS, &index); + else + div_u64_rem(atomic64_read(&ctx_isp->dbg_monitors.frame_monitor_head), + CAM_ISP_CTX_MAX_FRAME_RECORDS, &index); + + switch (hw_evt) { + case CAM_ISP_HW_EVENT_SOF: + CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].sof_ts); + break; + case CAM_ISP_HW_EVENT_EOF: + CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].eof_ts); + break; + case CAM_ISP_HW_EVENT_EPOCH: + CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts); + break; + case CAM_ISP_HW_SECONDARY_EVENT: + CAM_GET_BOOT_TIMESTAMP(ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts); + break; + default: + break; + } + + return 0; +} + +static void __cam_isp_ctx_dump_frame_timing_record( + struct cam_isp_context *ctx_isp) +{ + int i = 0; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry; + + state_head = atomic64_read(&ctx_isp->dbg_monitors.frame_monitor_head); + + if (state_head == -1) + return; + + if (state_head < CAM_ISP_CTX_MAX_FRAME_RECORDS) { + num_entries = state_head + 1; + oldest_entry = 0; + } else { + num_entries = CAM_ISP_CTX_MAX_FRAME_RECORDS; + div_u64_rem(state_head + 1, CAM_ISP_CTX_MAX_FRAME_RECORDS, + &oldest_entry); + } + + index = oldest_entry; + for (i = 0; i < num_entries; i++) { + CAM_INFO(CAM_ISP, + "Index: %u SOF_TS: %llu.%llu EPOCH_TS: %llu.%llu EOF_TS: %llu.%llu SEC_SOF: %llu.%llu", + index, + ctx_isp->dbg_monitors.frame_monitor[index].sof_ts.tv_sec, + ctx_isp->dbg_monitors.frame_monitor[index].sof_ts.tv_nsec / NSEC_PER_USEC, + ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts.tv_sec, + ctx_isp->dbg_monitors.frame_monitor[index].epoch_ts.tv_nsec / NSEC_PER_USEC, + ctx_isp->dbg_monitors.frame_monitor[index].eof_ts.tv_sec, + ctx_isp->dbg_monitors.frame_monitor[index].eof_ts.tv_nsec / NSEC_PER_USEC, + ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts.tv_sec, + ctx_isp->dbg_monitors.frame_monitor[index].secondary_sof_ts.tv_nsec / + NSEC_PER_USEC); + + index = (index + 1) % CAM_ISP_CTX_MAX_FRAME_RECORDS; + } +} + +static const char *__cam_isp_ctx_substate_val_to_type( + enum cam_isp_ctx_activated_substate type) +{ + switch (type) { + case CAM_ISP_CTX_ACTIVATED_SOF: + return "SOF"; + case CAM_ISP_CTX_ACTIVATED_APPLIED: + return "APPLIED"; + case CAM_ISP_CTX_ACTIVATED_EPOCH: + return "EPOCH"; + case CAM_ISP_CTX_ACTIVATED_BUBBLE: + return "BUBBLE"; + case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED: + return "BUBBLE_APPLIED"; + case CAM_ISP_CTX_ACTIVATED_HW_ERROR: + return "HW_ERROR"; + case CAM_ISP_CTX_ACTIVATED_HALT: + return "HALT"; + default: + return "INVALID"; + } +} + +static const char *__cam_isp_hw_evt_val_to_type( + uint32_t evt_id) +{ + switch (evt_id) { + case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR: + return "ERROR"; + case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED: + return "APPLIED"; + case CAM_ISP_STATE_CHANGE_TRIGGER_SOF: + return "SOF"; + case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE: + return "REG_UPDATE"; + case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH: + return "EPOCH"; + case CAM_ISP_STATE_CHANGE_TRIGGER_EOF: + return "EOF"; + case CAM_ISP_STATE_CHANGE_TRIGGER_CDM_DONE: + return "CDM_DONE"; + case CAM_ISP_STATE_CHANGE_TRIGGER_DONE: + return "DONE"; + case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH: + return "FLUSH"; + case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF: + return "SEC_EVT_SOF"; + case CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH: + return "SEC_EVT_EPOCH"; + case CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP: + return "OUT_OF_SYNC_FRAME_DROP"; + default: + return "CAM_ISP_EVENT_INVALID"; + } +} + +static void __cam_isp_ctx_dump_state_monitor_array( + struct cam_isp_context *ctx_isp) +{ + int i = 0; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry; + struct tm ts; + + state_head = atomic64_read(&ctx_isp->dbg_monitors.state_monitor_head); + + if (state_head == -1) + return; + + if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) { + num_entries = state_head + 1; + oldest_entry = 0; + } else { + num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, + &oldest_entry); + } + + CAM_ERR(CAM_ISP, + "Dumping state information for preceding requests"); + + index = oldest_entry; + + for (i = 0; i < num_entries; i++) { + time64_to_tm(ctx_isp->dbg_monitors.state_monitor[index].evt_time_stamp.tv_sec, + 0, &ts); + CAM_ERR(CAM_ISP, + "Idx[%d] time[%d-%d %d:%d:%d.%lld]:Substate[%s] Frame[%lld] Req[%llu] evt[%s] at time[%llu: %09llu]", + index, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec, + ctx_isp->dbg_monitors.state_monitor[index].evt_time_stamp.tv_nsec / 1000000, + __cam_isp_ctx_substate_val_to_type( + ctx_isp->dbg_monitors.state_monitor[index].curr_state), + ctx_isp->dbg_monitors.state_monitor[index].frame_id, + ctx_isp->dbg_monitors.state_monitor[index].req_id, + __cam_isp_hw_evt_val_to_type( + ctx_isp->dbg_monitors.state_monitor[index].trigger), + ctx_isp->dbg_monitors.state_monitor[index].evt_time_stamp.tv_sec, + ctx_isp->dbg_monitors.state_monitor[index].evt_time_stamp.tv_nsec); + + index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; + } +} + +static void *cam_isp_ctx_user_dump_state_monitor_array_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_isp_context_state_monitor *evt = NULL; + uint64_t *addr; + + evt = (struct cam_isp_context_state_monitor *)dump_struct; + + addr = (uint64_t *)addr_ptr; + + *addr++ = evt->evt_time_stamp.tv_sec; + *addr++ = evt->evt_time_stamp.tv_nsec / NSEC_PER_USEC; + *addr++ = evt->frame_id; + *addr++ = evt->req_id; + return addr; +} + +static int __cam_isp_ctx_user_dump_state_monitor_array( + struct cam_isp_context *ctx_isp, + struct cam_common_hw_dump_args *dump_args) +{ + int i, rc = 0; + int index; + uint32_t oldest_entry; + uint32_t num_entries; + uint64_t state_head = 0; + + if (!dump_args || !ctx_isp) { + CAM_ERR(CAM_ISP, "Invalid args %pK %pK", + dump_args, ctx_isp); + return -EINVAL; + } + + state_head = atomic64_read(&ctx_isp->dbg_monitors.state_monitor_head); + + if (state_head == -1) { + return 0; + } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) { + num_entries = state_head; + oldest_entry = 0; + } else { + num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry); + } + CAM_ERR(CAM_ISP, + "Dumping state information for preceding requests"); + + index = oldest_entry; + for (i = 0; i < num_entries; i++) { + + rc = cam_common_user_dump_helper(dump_args, + cam_isp_ctx_user_dump_state_monitor_array_info, + &ctx_isp->dbg_monitors.state_monitor[index], + sizeof(uint64_t), "ISP_STATE_MONITOR.%s.%s:", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->dbg_monitors.state_monitor[index].curr_state), + __cam_isp_hw_evt_val_to_type( + ctx_isp->dbg_monitors.state_monitor[index].trigger)); + + if (rc) { + CAM_ERR(CAM_ISP, "CAM ISP CONTEXT: Event record dump failed, rc: %d", rc); + return rc; + } + + index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; + + } + return rc; +} + +static int cam_isp_context_info_dump(void *context, + enum cam_context_dump_id id) +{ + struct cam_context *ctx = (struct cam_context *)context; + + switch (id) { + case CAM_CTX_DUMP_ACQ_INFO: { + cam_context_dump_hw_acq_info(ctx); + break; + } + default: + CAM_DBG(CAM_ISP, "DUMP id not valid %u, ctx_idx: %u, link: 0x%x", + id, ctx->ctx_id, ctx->link_hdl); + break; + } + + return 0; +} + +static const char *__cam_isp_ctx_crm_trigger_point_to_string( + int trigger_point) +{ + switch (trigger_point) { + case CAM_TRIGGER_POINT_SOF: + return "SOF"; + case CAM_TRIGGER_POINT_EOF: + return "EOF"; + default: + return "Invalid"; + } +} + +static int __cam_isp_ctx_notify_trigger_util( + int trigger_type, struct cam_isp_context *ctx_isp) +{ + int rc = -EINVAL; + struct cam_context *ctx = ctx_isp->base; + struct cam_req_mgr_trigger_notify notify; + + /* Trigger type not supported, return */ + if (!(ctx_isp->subscribe_event & trigger_type)) { + CAM_DBG(CAM_ISP, + "%s trigger point not subscribed for in mask: %u in ctx: %u on link: 0x%x last_bufdone: %lld", + __cam_isp_ctx_crm_trigger_point_to_string(trigger_type), + ctx_isp->subscribe_event, ctx->ctx_id, ctx->link_hdl, + ctx_isp->req_info.last_bufdone_req_id); + return 0; + } + + /* Skip CRM notify when recovery is in progress */ + if (atomic_read(&ctx_isp->internal_recovery_set)) { + CAM_DBG(CAM_ISP, + "Internal recovery in progress skip notifying %s trigger point in ctx: %u on link: 0x%x", + __cam_isp_ctx_crm_trigger_point_to_string(trigger_type), + ctx->ctx_id, ctx->link_hdl); + return 0; + } + + notify.link_hdl = ctx->link_hdl; + notify.dev_hdl = ctx->dev_hdl; + notify.frame_id = ctx_isp->frame_id; + notify.trigger = trigger_type; + notify.req_id = ctx_isp->req_info.last_bufdone_req_id; + notify.sof_timestamp_val = ctx_isp->sof_timestamp_val; + notify.trigger_id = ctx_isp->trigger_id; + notify.boot_timestamp = ctx_isp->boot_timestamp; + + CAM_DBG(CAM_ISP, + "Notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld", + __cam_isp_ctx_crm_trigger_point_to_string(trigger_type), + ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl, + ctx_isp->req_info.last_bufdone_req_id); + + rc = ctx->ctx_crm_intf->notify_trigger(¬ify); + if (rc) + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Failed to notify CRM %s on frame: %llu ctx: %u link: 0x%x last_buf_done_req: %lld rc: %d", + __cam_isp_ctx_crm_trigger_point_to_string(trigger_type), + ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl, + ctx_isp->req_info.last_bufdone_req_id, rc); + + return rc; +} + +static int __cam_isp_ctx_notify_v4l2_error_event( + uint32_t error_type, uint32_t error_code, + uint64_t error_request_id, struct cam_context *ctx) +{ + int rc = 0; + struct cam_req_mgr_message req_msg; + + req_msg.session_hdl = ctx->session_hdl; + req_msg.u.err_msg.device_hdl = ctx->dev_hdl; + req_msg.u.err_msg.error_type = error_type; + req_msg.u.err_msg.link_hdl = ctx->link_hdl; + req_msg.u.err_msg.request_id = error_request_id; + req_msg.u.err_msg.resource_size = 0x0; + req_msg.u.err_msg.error_code = error_code; + + CAM_DBG(CAM_ISP, + "v4l2 error event [type: %u code: %u] for req: %llu in ctx: %u on link: 0x%x notified successfully", + error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl); + + rc = cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, + V4L_EVENT_CAM_REQ_MGR_EVENT); + if (rc) + CAM_ERR(CAM_ISP, + "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu in ctx %u on link: 0x%x", + error_type, error_code, error_request_id, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_notify_error_util( + uint32_t trigger_type, enum cam_req_mgr_device_error error, + uint64_t req_id, struct cam_isp_context *ctx_isp) +{ + int rc = -EINVAL; + struct cam_context *ctx = ctx_isp->base; + struct cam_req_mgr_error_notify notify; + + notify.link_hdl = ctx->link_hdl; + notify.dev_hdl = ctx->dev_hdl; + notify.req_id = req_id; + notify.error = error; + notify.trigger = trigger_type; + notify.frame_id = ctx_isp->frame_id; + notify.sof_timestamp_val = ctx_isp->sof_timestamp_val; + + if ((error == CRM_KMD_ERR_BUBBLE) || (error == CRM_KMD_WARN_INTERNAL_RECOVERY)) + CAM_WARN(CAM_ISP, + "Notify CRM about bubble req: %llu frame: %llu in ctx: %u on link: 0x%x", + req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl); + else + CAM_ERR(CAM_ISP, + "Notify CRM about fatal error: %u req: %llu frame: %llu in ctx: %u on link: 0x%x", + error, req_id, ctx_isp->frame_id, ctx->ctx_id, ctx->link_hdl); + + rc = ctx->ctx_crm_intf->notify_err(¬ify); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to notify error: %u for req: %lu on ctx: %u in link: 0x%x", + error, req_id, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_trigger_reg_dump( + enum cam_hw_mgr_command cmd, + struct cam_context *ctx, + struct cam_isp_prepare_hw_update_data *hw_update_data) +{ + int rc = 0; + struct cam_hw_cmd_args hw_cmd_args; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = cmd; + hw_cmd_args.u.hw_update_data = hw_update_data; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "Reg dump on error failed ctx: %u link: 0x%x rc: %d", + ctx->ctx_id, ctx->link_hdl, rc); + goto end; + } + + CAM_DBG(CAM_ISP, + "Reg dump type: %u successful in ctx: %u on link: 0x%x", + cmd, ctx->ctx_id, ctx->link_hdl); + +end: + return rc; +} + +static int __cam_isp_ctx_pause_crm_timer( + struct cam_context *ctx) +{ + int rc = -EINVAL; + struct cam_req_mgr_timer_notify timer; + + if (!ctx || !ctx->ctx_crm_intf) + goto end; + + timer.link_hdl = ctx->link_hdl; + timer.dev_hdl = ctx->dev_hdl; + timer.state = false; + rc = ctx->ctx_crm_intf->notify_timer(&timer); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to pause sof timer in ctx: %u on link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + + CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx: %u link: 0x%x success", + ctx->ctx_id, ctx->link_hdl); + +end: + return rc; +} + +static inline void __cam_isp_ctx_update_sof_ts_util( + struct cam_isp_hw_sof_event_data *sof_event_data, + struct cam_isp_context *ctx_isp) +{ + /* Delayed update, skip if ts is already updated */ + if (ctx_isp->sof_timestamp_val == sof_event_data->timestamp) + return; + + ctx_isp->frame_id++; + ctx_isp->sof_timestamp_val = sof_event_data->timestamp; + ctx_isp->boot_timestamp = sof_event_data->boot_time; +} + +static int cam_isp_ctx_dump_req( + struct cam_isp_ctx_req *req_isp, + uintptr_t cpu_addr, + size_t buf_len, + size_t *offset, + bool dump_to_buff) +{ + int i, rc = 0; + size_t len = 0; + uint32_t *buf_addr; + uint32_t *buf_start, *buf_end; + size_t remain_len = 0; + struct cam_cdm_cmd_buf_dump_info dump_info; + + for (i = 0; i < req_isp->num_cfg; i++) { + rc = cam_packet_util_get_cmd_mem_addr( + req_isp->cfg[i].handle, &buf_addr, &len); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Failed to get_cmd_mem_addr, rc=%d", + rc); + } else { + if (req_isp->cfg[i].offset >= ((uint32_t)len)) { + CAM_ERR(CAM_ISP, + "Invalid offset exp %u actual %u", + req_isp->cfg[i].offset, (uint32_t)len); + cam_mem_put_cpu_buf(req_isp->cfg[i].handle); + return -EINVAL; + } + remain_len = len - req_isp->cfg[i].offset; + + if (req_isp->cfg[i].len > + ((uint32_t)remain_len)) { + CAM_ERR(CAM_ISP, + "Invalid len exp %u remain_len %u", + req_isp->cfg[i].len, + (uint32_t)remain_len); + cam_mem_put_cpu_buf(req_isp->cfg[i].handle); + return -EINVAL; + } + + buf_start = (uint32_t *)((uint8_t *) buf_addr + + req_isp->cfg[i].offset); + buf_end = (uint32_t *)((uint8_t *) buf_start + + req_isp->cfg[i].len - 1); + + if (dump_to_buff) { + if (!cpu_addr || !offset || !buf_len) { + CAM_ERR(CAM_ISP, "Invalid args"); + cam_mem_put_cpu_buf(req_isp->cfg[i].handle); + break; + } + dump_info.src_start = buf_start; + dump_info.src_end = buf_end; + dump_info.dst_start = cpu_addr; + dump_info.dst_offset = *offset; + dump_info.dst_max_size = buf_len; + rc = cam_cdm_util_dump_cmd_bufs_v2( + &dump_info); + *offset = dump_info.dst_offset; + if (rc) { + cam_mem_put_cpu_buf(req_isp->cfg[i].handle); + return rc; + } + } else + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf(req_isp->cfg[i].handle); + } + } + return rc; +} + +static int __cam_isp_ctx_enqueue_request_in_order( + struct cam_context *ctx, struct cam_ctx_request *req, bool lock) +{ + struct cam_ctx_request *req_current; + struct cam_ctx_request *req_prev; + struct list_head temp_list; + struct cam_isp_context *ctx_isp; + + INIT_LIST_HEAD(&temp_list); + if (lock) + spin_lock_bh(&ctx->lock); + if (list_empty(&ctx->pending_req_list)) { + list_add_tail(&req->list, &ctx->pending_req_list); + } else { + list_for_each_entry_safe_reverse( + req_current, req_prev, &ctx->pending_req_list, list) { + if (req->request_id < req_current->request_id) { + list_del_init(&req_current->list); + list_add(&req_current->list, &temp_list); + continue; + } else if (req->request_id == req_current->request_id) { + CAM_WARN(CAM_ISP, + "Received duplicated request %lld, ctx_idx: %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + } + break; + } + list_add_tail(&req->list, &ctx->pending_req_list); + + if (!list_empty(&temp_list)) { + list_for_each_entry_safe( + req_current, req_prev, &temp_list, list) { + list_del_init(&req_current->list); + list_add_tail(&req_current->list, + &ctx->pending_req_list); + } + } + } + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_SUBMIT, req, NULL); + if (lock) + spin_unlock_bh(&ctx->lock); + return 0; +} + +static void __cam_isp_ctx_copy_fcg_ch_ctx( + struct cam_isp_fcg_config_internal *fcg_config_old, + struct cam_isp_fcg_config_internal *fcg_config_new) +{ + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx_old, *fcg_ch_ctx_new; + int i, j; + + fcg_config_new->num_types = fcg_config_old->num_types; + fcg_config_new->num_predictions = fcg_config_old->num_predictions; + fcg_config_new->num_ch_ctx = fcg_config_old->num_ch_ctx; + + fcg_ch_ctx_new = fcg_config_new->ch_ctx_fcg_configs; + fcg_ch_ctx_old = fcg_config_old->ch_ctx_fcg_configs; + + for (i = 0; i < fcg_config_new->num_ch_ctx; i++) { + fcg_ch_ctx_new[i].fcg_ch_ctx_id = + fcg_ch_ctx_old[i].fcg_ch_ctx_id; + fcg_ch_ctx_new[i].fcg_enable_mask = + fcg_ch_ctx_old[i].fcg_enable_mask; + + for (j = 0; j < fcg_config_new->num_predictions; j++) + memcpy(&fcg_ch_ctx_old->predicted_fcg_configs[j], + &fcg_ch_ctx_new->predicted_fcg_configs[j], + sizeof(struct cam_isp_predict_fcg_config_internal)); + } +} + +static inline void __cam_isp_ctx_copy_fcg_params( + struct cam_isp_prepare_hw_update_data *hw_update_data, + struct cam_isp_ctx_req *req_isp_old, + struct cam_isp_ctx_req *req_isp_new) +{ + struct cam_isp_fcg_config_info *fcg_info_new, *fcg_info_old; + + fcg_info_new = &hw_update_data->fcg_info; + fcg_info_old = &req_isp_old->hw_update_data.fcg_info; + fcg_info_old->use_current_cfg = true; + + if (fcg_info_new->ife_fcg_online) { + fcg_info_old->ife_fcg_online = true; + fcg_info_old->ife_fcg_entry_idx = + req_isp_old->num_cfg + + fcg_info_new->ife_fcg_entry_idx; + + __cam_isp_ctx_copy_fcg_ch_ctx(&fcg_info_old->ife_fcg_config, + &fcg_info_new->ife_fcg_config); + } + + if (fcg_info_new->sfe_fcg_online) { + fcg_info_old->sfe_fcg_online = true; + fcg_info_old->sfe_fcg_entry_idx = + req_isp_old->num_cfg + + fcg_info_new->sfe_fcg_entry_idx; + + __cam_isp_ctx_copy_fcg_ch_ctx(&fcg_info_old->sfe_fcg_config, + &fcg_info_new->sfe_fcg_config); + } +} + +static int __cam_isp_ctx_enqueue_init_request( + struct cam_context *ctx, struct cam_ctx_request *req) +{ + int rc = 0; + struct cam_ctx_request *req_old; + struct cam_ctx_request *req_pf; + struct cam_isp_ctx_req *req_isp_old; + struct cam_isp_ctx_req *req_isp_new; + struct cam_isp_prepare_hw_update_data *req_update_old; + struct cam_isp_prepare_hw_update_data *req_update_new; + struct cam_isp_prepare_hw_update_data *hw_update_data; + struct cam_kmd_buf_info *kmd_buff_old = NULL; + + spin_lock_bh(&ctx->lock); + if (list_empty(&ctx->pending_req_list)) { + list_add_tail(&req->list, &ctx->pending_req_list); + CAM_DBG(CAM_ISP, "INIT packet added req id: %llu, ctx_idx: %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + goto end; + } + + req_old = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv; + req_isp_new = (struct cam_isp_ctx_req *) req->req_priv; + if (req_isp_old->hw_update_data.packet_opcode_type == + CAM_ISP_PACKET_INIT_DEV) { + if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >= + ctx->max_hw_update_entries) { + CAM_WARN(CAM_ISP, + "Can not merge INIT pkt num_cfgs = %d, ctx_idx: %u, link: 0x%x", + (req_isp_old->num_cfg + + req_isp_new->num_cfg), ctx->ctx_id, ctx->link_hdl); + rc = -ENOMEM; + } + + if (req_isp_old->num_fence_map_out != 0 || + req_isp_old->num_fence_map_in != 0) { + CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + } + + if (!rc) { + /* Copy data to old request for EPCR */ + memcpy(req_isp_old->fence_map_out, + req_isp_new->fence_map_out, + sizeof(req_isp_new->fence_map_out[0])* + req_isp_new->num_fence_map_out); + req_isp_old->num_fence_map_out = + req_isp_new->num_fence_map_out; + + memcpy(req_isp_old->fence_map_in, + req_isp_new->fence_map_in, + sizeof(req_isp_new->fence_map_in[0])* + req_isp_new->num_fence_map_in); + req_isp_old->num_fence_map_in = + req_isp_new->num_fence_map_in; + + /* Copy hw update entries, num_cfg is updated later */ + memcpy(&req_isp_old->cfg[req_isp_old->num_cfg], + req_isp_new->cfg, + sizeof(req_isp_new->cfg[0]) * + req_isp_new->num_cfg); + + CAM_DBG(CAM_ISP, + "Enqueue req id: %llu, to old req: %llu,, ctx_idx: %u, link: 0x%x", + req->request_id, req_old->request_id, ctx->ctx_id, ctx->link_hdl); + + if (req_old->packet) { + + kmd_buff_old = &(req_isp_old->hw_update_data.kmd_cmd_buff_info); + cam_mem_put_kref(kmd_buff_old->handle); + cam_common_mem_free(req_old->packet); + req_old->packet = req->packet; + req->packet = NULL; + } + + /* Update packet pointer for pf data req */ + req_pf = (struct cam_ctx_request *)req_old->pf_data.req; + req_pf->packet = req_old->packet; + + if (req_isp_new->hw_update_data.num_reg_dump_buf) { + req_update_new = &req_isp_new->hw_update_data; + req_update_old = &req_isp_old->hw_update_data; + memcpy(&req_update_old->reg_dump_buf_desc, + &req_update_new->reg_dump_buf_desc, + sizeof(struct cam_cmd_buf_desc) * + req_update_new->num_reg_dump_buf); + req_update_old->num_reg_dump_buf = + req_update_new->num_reg_dump_buf; + } + + /* Update HW update params for ePCR */ + hw_update_data = &req_isp_new->hw_update_data; + req_isp_old->hw_update_data.frame_header_res_id = + req_isp_new->hw_update_data.frame_header_res_id; + req_isp_old->hw_update_data.frame_header_cpu_addr = + hw_update_data->frame_header_cpu_addr; + if (req_isp_new->hw_update_data.mup_en) { + req_isp_old->hw_update_data.mup_en = + req_isp_new->hw_update_data.mup_en; + req_isp_old->hw_update_data.mup_val = + req_isp_new->hw_update_data.mup_val; + req_isp_old->hw_update_data.num_exp = + req_isp_new->hw_update_data.num_exp; + } + + /* Copy FCG HW update params */ + __cam_isp_ctx_copy_fcg_params(hw_update_data, + req_isp_old, req_isp_new); + + if (req_isp_new->hw_update_data.bw_clk_config.bw_config_valid) { + memcpy(&req_isp_old->hw_update_data.bw_clk_config.bw_config, + &req_isp_new->hw_update_data.bw_clk_config.bw_config, + sizeof(struct cam_isp_bw_config_internal)); + memcpy(&req_isp_old->hw_update_data.bw_clk_config.bw_config_v2, + &req_isp_new->hw_update_data.bw_clk_config.bw_config_v2, + sizeof(struct cam_isp_bw_config_internal_v2)); + req_isp_old->hw_update_data.bw_clk_config.bw_config_valid = true; + } + + if (req_isp_new->hw_update_data.bw_clk_config.ife_clock_config_valid) { + memcpy(&req_isp_old->hw_update_data.bw_clk_config.ife_clock_config, + &req_isp_new->hw_update_data.bw_clk_config.ife_clock_config, + sizeof(struct cam_isp_clock_config_internal)); + req_isp_old->hw_update_data.bw_clk_config.ife_clock_config_valid = + true; + } + + if (req_isp_new->hw_update_data.bw_clk_config.sfe_clock_config_valid) { + memcpy(&req_isp_old->hw_update_data.bw_clk_config.sfe_clock_config, + &req_isp_new->hw_update_data.bw_clk_config.sfe_clock_config, + sizeof(struct cam_isp_clock_config_internal)); + req_isp_old->hw_update_data.bw_clk_config.sfe_clock_config_valid = + true; + } + + memcpy(&req_isp_old->hw_update_data.isp_drv_config, + &req_isp_new->hw_update_data.isp_drv_config, + sizeof(struct cam_isp_drv_config)); + + req_isp_old->num_cfg += req_isp_new->num_cfg; + req_old->request_id = req->request_id; + list_splice_init(&req->buf_tracker, &req_old->buf_tracker); + + list_add_tail(&req->list, &ctx->free_req_list); + } + } else { + CAM_WARN(CAM_ISP, + "Received Update pkt before INIT pkt. req_id= %lld, ctx_idx: %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + } +end: + spin_unlock_bh(&ctx->lock); + return rc; +} + +static inline void __cam_isp_ctx_move_req_to_free_list( + struct cam_context *ctx, struct cam_ctx_request *req) +{ + struct cam_isp_ctx_req *req_isp = (struct cam_isp_ctx_req *) req->req_priv; + struct cam_kmd_buf_info *kmd_cmd_buff_info = &(req_isp->hw_update_data.kmd_cmd_buff_info); + + CAM_DBG(CAM_ISP, + "Free req id: %lld, packet: 0x%x, ctx_idx: %u, link: 0x%x", + req->request_id, req->packet, ctx->ctx_id, ctx->link_hdl); + if (req->packet) { + cam_mem_put_kref(kmd_cmd_buff_info->handle); + cam_common_mem_free(req->packet); + req->packet = NULL; + } + + list_add_tail(&req->list, &ctx->free_req_list); +} + +static char *__cam_isp_ife_sfe_resource_handle_id_to_type( + uint32_t resource_handle) +{ + switch (resource_handle) { + /* IFE output ports */ + case CAM_ISP_IFE_OUT_RES_FULL: return "IFE_FULL"; + case CAM_ISP_IFE_OUT_RES_DS4: return "IFE_DS4"; + case CAM_ISP_IFE_OUT_RES_DS16: return "IFE_DS16"; + case CAM_ISP_IFE_OUT_RES_RAW_DUMP: return "IFE_RAW_DUMP"; + case CAM_ISP_IFE_OUT_RES_FD: return "IFE_FD"; + case CAM_ISP_IFE_OUT_RES_PDAF: return "IFE_PDAF"; + case CAM_ISP_IFE_OUT_RES_RDI_0: return "IFE_RDI_0"; + case CAM_ISP_IFE_OUT_RES_RDI_1: return "IFE_RDI_1"; + case CAM_ISP_IFE_OUT_RES_RDI_2: return "IFE_RDI_2"; + case CAM_ISP_IFE_OUT_RES_RDI_3: return "IFE_RDI_3"; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: return "IFE_STATS_HDR_BE"; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: return "IFE_STATS_HDR_BHIST"; + case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: return "IFE_STATS_TL_BG"; + case CAM_ISP_IFE_OUT_RES_STATS_BF: return "IFE_STATS_BF"; + case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: return "IFE_STATS_AWB_BG"; + case CAM_ISP_IFE_OUT_RES_STATS_BHIST: return "IFE_STATS_BHIST"; + case CAM_ISP_IFE_OUT_RES_STATS_RS: return "IFE_STATS_RS"; + case CAM_ISP_IFE_OUT_RES_STATS_CS: return "IFE_STATS_CS"; + case CAM_ISP_IFE_OUT_RES_STATS_IHIST: return "IFE_STATS_IHIST"; + case CAM_ISP_IFE_OUT_RES_FULL_DISP: return "IFE_FULL_DISP"; + case CAM_ISP_IFE_OUT_RES_DS4_DISP: return "IFE_DS4_DISP"; + case CAM_ISP_IFE_OUT_RES_DS16_DISP: return "IFE_DS16_DISP"; + case CAM_ISP_IFE_OUT_RES_2PD: return "IFE_2PD"; + case CAM_ISP_IFE_OUT_RES_LCR: return "IFE_LCR"; + case CAM_ISP_IFE_OUT_RES_AWB_BFW: return "IFE_AWB_BFW"; + case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: return "IFE_PREPROCESS_2PD"; + case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: return "IFE_STATS_AEC_BE"; + case CAM_ISP_IFE_OUT_RES_LTM_STATS: return "IFE_LTM_STATS"; + case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: return "IFE_STATS_GTM_BHIST"; + case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: return "IFE_STATS_BG"; + case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: return "IFE_PREPROCESS_RAW"; + case CAM_ISP_IFE_OUT_RES_SPARSE_PD: return "IFE_SPARSE_PD"; + case CAM_ISP_IFE_OUT_RES_STATS_CAF: return "IFE_STATS_CAF"; + case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: return "IFE_STATS_BAYER_RS"; + case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: return "IFE_PDAF_PARSED_DATA"; + case CAM_ISP_IFE_OUT_RES_STATS_ALSC: return "IFE_STATS_ALSC"; + case CAM_ISP_IFE_OUT_RES_DS2: return "IFE_DS2"; + case CAM_ISP_IFE_OUT_RES_IR: return "IFE_IR"; + case CAM_ISP_IFE_OUT_RES_RDI_4: return "IFE_RDI_4"; + case CAM_ISP_IFE_OUT_RES_STATS_TMC_BHIST: return "IFE_STATS_TMC_BHIST"; + case CAM_ISP_IFE_OUT_RES_STATS_AF_BHIST: return "IFE_STATS_AF_BHIST"; + case CAM_ISP_IFE_OUT_RES_STATS_AEC_BHIST: return "IFE_STATS_AEC_BHIST"; + /* SFE output ports */ + case CAM_ISP_SFE_OUT_RES_RDI_0: return "SFE_RDI_0"; + case CAM_ISP_SFE_OUT_RES_RDI_1: return "SFE_RDI_1"; + case CAM_ISP_SFE_OUT_RES_RDI_2: return "SFE_RDI_2"; + case CAM_ISP_SFE_OUT_RES_RDI_3: return "SFE_RDI_3"; + case CAM_ISP_SFE_OUT_RES_RDI_4: return "SFE_RDI_4"; + case CAM_ISP_SFE_OUT_BE_STATS_0: return "SFE_BE_STATS_0"; + case CAM_ISP_SFE_OUT_BE_STATS_1: return "SFE_BE_STATS_1"; + case CAM_ISP_SFE_OUT_BE_STATS_2: return "SFE_BE_STATS_2"; + case CAM_ISP_SFE_OUT_BHIST_STATS_0: return "SFE_BHIST_STATS_0"; + case CAM_ISP_SFE_OUT_BHIST_STATS_1: return "SFE_BHIST_STATS_1"; + case CAM_ISP_SFE_OUT_BHIST_STATS_2: return "SFE_BHIST_STATS_2"; + case CAM_ISP_SFE_OUT_RES_LCR: return "SFE_LCR"; + case CAM_ISP_SFE_OUT_RES_RAW_DUMP: return "SFE_PROCESSED_RAW"; + case CAM_ISP_SFE_OUT_RES_IR: return "SFE_IR"; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: return "SFE_RS_STATS_0"; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: return "SFE_RS_STATS_1"; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: return "SFE_RS_STATS_2"; + case CAM_ISP_SFE_OUT_HDR_STATS: return "HDR_STATS"; + /* SFE input ports */ + case CAM_ISP_SFE_IN_RD_0: return "SFE_RD_0"; + case CAM_ISP_SFE_IN_RD_1: return "SFE_RD_1"; + case CAM_ISP_SFE_IN_RD_2: return "SFE_RD_2"; + /* Handle invalid type */ + default: return "Invalid_Resource_Type"; + } +} + +static const char *__cam_isp_tfe_resource_handle_id_to_type( + uint32_t resource_handle) +{ + switch (resource_handle) { + /* TFE output ports */ + case CAM_ISP_TFE_OUT_RES_FULL: return "TFE_FULL"; + case CAM_ISP_TFE_OUT_RES_RAW_DUMP: return "TFE_RAW_DUMP"; + case CAM_ISP_TFE_OUT_RES_PDAF: return "TFE_PDAF"; + case CAM_ISP_TFE_OUT_RES_RDI_0: return "TFE_RDI_0"; + case CAM_ISP_TFE_OUT_RES_RDI_1: return "TFE_RDI_1"; + case CAM_ISP_TFE_OUT_RES_RDI_2: return "TFE_RDI_2"; + case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: return "TFE_STATS_HDR_BE"; + case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: return "TFE_STATS_HDR_BHIST"; + case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: return "TFE_STATS_TL_BG"; + case CAM_ISP_TFE_OUT_RES_STATS_BF: return "TFE_STATS_BF"; + case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: return "TFE_STATS_AWB_BG"; + case CAM_ISP_TFE_OUT_RES_STATS_RS: return "TFE_STATS_RS"; + case CAM_ISP_TFE_OUT_RES_DS4: return "TFE_DS_4"; + case CAM_ISP_TFE_OUT_RES_DS16: return "TFE_DS_16"; + case CAM_ISP_TFE_OUT_RES_AI: return "TFE_AI"; + /* Handle invalid type */ + default: return "Invalid_Resource_Type"; + } +} + +static const char *__cam_isp_resource_handle_id_to_type( + uint32_t device_type, uint32_t resource_handle) +{ + switch (device_type) { + case CAM_IFE_DEVICE_TYPE: + case CAM_TFE_MC_DEVICE_TYPE: + return __cam_isp_ife_sfe_resource_handle_id_to_type(resource_handle); + case CAM_TFE_DEVICE_TYPE: + return __cam_isp_tfe_resource_handle_id_to_type(resource_handle); + default: + return "INVALID_DEV_TYPE"; + } +} + +static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data) +{ + uint64_t ts = 0; + + if (!evt_data) + return 0; + + switch (evt_id) { + case CAM_ISP_HW_EVENT_ERROR: + ts = ((struct cam_isp_hw_error_event_data *)evt_data)-> + timestamp; + break; + case CAM_ISP_HW_EVENT_SOF: + ts = ((struct cam_isp_hw_sof_event_data *)evt_data)-> + timestamp; + break; + case CAM_ISP_HW_EVENT_REG_UPDATE: + ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)-> + timestamp; + break; + case CAM_ISP_HW_EVENT_EPOCH: + ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)-> + timestamp; + break; + case CAM_ISP_HW_EVENT_EOF: + ts = ((struct cam_isp_hw_eof_event_data *)evt_data)-> + timestamp; + break; + case CAM_ISP_HW_EVENT_DONE: + case CAM_ISP_HW_SECONDARY_EVENT: + break; + default: + CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id); + } + + return ts; +} + +static int __cam_isp_ctx_get_hw_timestamp(struct cam_context *ctx, uint64_t *prev_ts, + uint64_t *curr_ts, uint64_t *boot_ts) +{ + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + int rc; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + hw_cmd_args.u.internal_args = &isp_hw_cmd_args; + + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_SOF_TS; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args); + if (rc) + return rc; + + if (isp_hw_cmd_args.u.sof_ts.prev >= isp_hw_cmd_args.u.sof_ts.curr) { + CAM_ERR(CAM_ISP, "ctx:%u link:0x%x prev timestamp greater than curr timestamp", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + *prev_ts = isp_hw_cmd_args.u.sof_ts.prev; + *curr_ts = isp_hw_cmd_args.u.sof_ts.curr; + *boot_ts = isp_hw_cmd_args.u.sof_ts.boot; + + return 0; +} + +static int __cam_isp_ctx_get_cdm_done_timestamp(struct cam_context *ctx, + uint64_t *last_cdm_done_req) +{ + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct tm ts; + int rc; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + hw_cmd_args.u.internal_args = &isp_hw_cmd_args; + + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_LAST_CDM_DONE; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->ctxt_to_hw_map, &hw_cmd_args); + if (rc) + return rc; + + *last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done; + ctx->cdm_done_ts = isp_hw_cmd_args.cdm_done_ts; + time64_to_tm(isp_hw_cmd_args.cdm_done_ts.tv_sec, 0, &ts); + CAM_DBG(CAM_ISP, + "last_cdm_done req: %llu ctx: %u link: 0x%x time[%d-%d %d:%d:%d.%lld]", + last_cdm_done_req, ctx->ctx_id, ctx->link_hdl, + ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec, + isp_hw_cmd_args.cdm_done_ts.tv_nsec / 1000000); + + return 0; +} + +static int __cam_isp_ctx_recover_sof_timestamp(struct cam_context *ctx, uint64_t request_id) +{ + struct cam_isp_context *ctx_isp = ctx->ctx_priv; + uint64_t prev_ts, curr_ts, boot_ts; + uint64_t a, b, c; + int rc; + + rc = __cam_isp_ctx_get_hw_timestamp(ctx, &prev_ts, &curr_ts, &boot_ts); + if (rc) { + CAM_ERR(CAM_ISP, "ctx:%u link: 0x%x Failed to get timestamp from HW", + ctx->ctx_id, ctx->link_hdl); + return rc; + } + + /** + * If the last received SOF was for frame A and we have missed the SOF for frame B, + * then we need to find out if the hardware is at frame B or C. + * +-----+-----+-----+ + * | A | B | C | + * +-----+-----+-----+ + */ + a = ctx_isp->sof_timestamp_val; + if (a == prev_ts) { + /* Hardware is at frame B */ + b = curr_ts; + CAM_DBG(CAM_ISP, "ctx:%u link:0x%x recover time(last:0x%llx,curr:0x%llx)req:%llu", + ctx->ctx_id, ctx->link_hdl, a, b, request_id); + } else if (a < prev_ts) { + /* Hardware is at frame C */ + b = prev_ts; + c = curr_ts; + + CAM_DBG(CAM_ISP, + "ctx:%u link:0x%x recover time(last:0x%llx,prev:0x%llx,curr:0x%llx)req:%llu", + ctx->ctx_id, ctx->link_hdl, a, b, c, request_id); + } else { + /* Hardware is at frame A (which we supposedly missed) */ + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx:%u link: 0x%x erroneous call to SOF recovery(last:0x%llx, prev:0x%llx, curr:0x%llx) req: %llu", + ctx->ctx_id, ctx->link_hdl, a, prev_ts, curr_ts, request_id); + return 0; + } + + ctx_isp->boot_timestamp = boot_ts + (b - curr_ts); + ctx_isp->sof_timestamp_val = b; + ctx_isp->frame_id++; + return 0; +} + +static void __cam_isp_ctx_send_sof_boot_timestamp( + struct cam_isp_context *ctx_isp, uint64_t request_id, + uint32_t sof_event_status, struct shutter_event *shutter_event) +{ + struct cam_req_mgr_message req_msg; + + req_msg.session_hdl = ctx_isp->base->session_hdl; + req_msg.u.frame_msg.frame_id = ctx_isp->frame_id; + req_msg.u.frame_msg.request_id = request_id; + req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp; + req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl; + req_msg.u.frame_msg.sof_status = sof_event_status; + req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta; + + CAM_DBG(CAM_ISP, + "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u", + request_id, ctx_isp->frame_id, + ctx_isp->boot_timestamp, sof_event_status); + shutter_event->frame_id = ctx_isp->frame_id; + shutter_event->req_id = request_id; + shutter_event->boot_ts = ctx_isp->boot_timestamp; + shutter_event->sof_ts = ctx_isp->sof_timestamp_val; + + if (cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS, + V4L_EVENT_CAM_REQ_MGR_EVENT)) + CAM_ERR(CAM_ISP, + "Error in notifying the boot time for req id:%lld", + request_id); + + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_SHUTTER, NULL, shutter_event); +} + +static void __cam_isp_ctx_send_unified_timestamp( + struct cam_isp_context *ctx_isp, uint64_t request_id, + struct shutter_event *shutter_event) +{ + struct cam_req_mgr_message req_msg; + + req_msg.session_hdl = ctx_isp->base->session_hdl; + req_msg.u.frame_msg_v2.frame_id = ctx_isp->frame_id; + req_msg.u.frame_msg_v2.request_id = request_id; + req_msg.u.frame_msg_v2.timestamps[CAM_REQ_SOF_QTIMER_TIMESTAMP] = + (request_id == 0) ? 0 : ctx_isp->sof_timestamp_val; + req_msg.u.frame_msg_v2.timestamps[CAM_REQ_BOOT_TIMESTAMP] = ctx_isp->boot_timestamp; + req_msg.u.frame_msg_v2.link_hdl = ctx_isp->base->link_hdl; + req_msg.u.frame_msg_v2.frame_id_meta = ctx_isp->frame_id_meta; + + CAM_DBG(CAM_ISP, + "link hdl 0x%x request id:%lld frame number:%lld SOF time stamp:0x%llx ctx %d\ + boot time stamp:0x%llx", ctx_isp->base->link_hdl, request_id, + ctx_isp->frame_id, ctx_isp->sof_timestamp_val,ctx_isp->base->ctx_id, + ctx_isp->boot_timestamp); + shutter_event->frame_id = ctx_isp->frame_id; + shutter_event->req_id = request_id; + shutter_event->boot_ts = ctx_isp->boot_timestamp; + shutter_event->sof_ts = ctx_isp->sof_timestamp_val; + + if (cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS, V4L_EVENT_CAM_REQ_MGR_EVENT)) + CAM_ERR(CAM_ISP, + "Error in notifying the sof and boot time for req id:%lld", + request_id); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_SHUTTER, NULL, shutter_event); +} + +static void __cam_isp_ctx_send_sof_timestamp_frame_header( + struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr, + uint64_t request_id, uint32_t sof_event_status) +{ + uint32_t *time32 = NULL; + uint64_t timestamp = 0; + struct cam_req_mgr_message req_msg; + + time32 = frame_header_cpu_addr; + timestamp = (uint64_t) time32[1]; + timestamp = timestamp << 24; + timestamp |= (uint64_t)(time32[0] >> 8); + timestamp = mul_u64_u32_div(timestamp, + CAM_IFE_QTIMER_MUL_FACTOR, + CAM_IFE_QTIMER_DIV_FACTOR); + + ctx_isp->sof_timestamp_val = timestamp; + req_msg.session_hdl = ctx_isp->base->session_hdl; + req_msg.u.frame_msg.frame_id = ctx_isp->frame_id; + req_msg.u.frame_msg.request_id = request_id; + req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val; + req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl; + req_msg.u.frame_msg.sof_status = sof_event_status; + + CAM_DBG(CAM_ISP, + "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u", + request_id, ctx_isp->frame_id, + ctx_isp->sof_timestamp_val, sof_event_status); + + if (cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT)) + CAM_ERR(CAM_ISP, + "Error in notifying the sof time for req id:%lld", + request_id); +} + +static void __cam_isp_ctx_send_sof_timestamp( + struct cam_isp_context *ctx_isp, uint64_t request_id, + uint32_t sof_event_status) +{ + struct cam_req_mgr_message req_msg; + struct cam_context *ctx = ctx_isp->base; + struct shutter_event shutter_event = {0}; + + if ((ctx_isp->reported_frame_id == ctx_isp->frame_id) || + (ctx_isp->last_sof_timestamp >= ctx_isp->sof_timestamp_val)) { + if (__cam_isp_ctx_recover_sof_timestamp(ctx_isp->base, request_id)) + CAM_WARN(CAM_ISP, "Missed SOF.No SOF timestamp recovery,ctx:%u,link:0x%x", + ctx->ctx_id, ctx->link_hdl); + } + + if (request_id == 0 && (ctx_isp->reported_frame_id == ctx_isp->frame_id)) { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Missed SOF Recovery for invalid req, Skip notificaiton to userspace Ctx: %u link: 0x%x frame_id %u", + ctx->ctx_id, ctx->link_hdl, ctx_isp->frame_id); + return; + } + + ctx_isp->reported_frame_id = ctx_isp->frame_id; + shutter_event.status = sof_event_status; + + if (ctx_isp->last_sof_timestamp >= ctx_isp->sof_timestamp_val) { + CAM_ERR(CAM_ISP, + "SOF timestamp recovery fail, current timestamp:0x%llx, last timestamp:0x%llx, ctx:%u, link:0x%x", + ctx_isp->sof_timestamp_val, ctx_isp->last_sof_timestamp, + ctx->ctx_id, ctx->link_hdl); + return; + } + + if ((ctx_isp->v4l2_event_sub_ids & (1 << V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS)) + && !ctx_isp->use_frame_header_ts) { + __cam_isp_ctx_send_unified_timestamp(ctx_isp, request_id, &shutter_event); + return; + } + + if ((ctx_isp->use_frame_header_ts) || (request_id == 0)) + goto end; + + req_msg.session_hdl = ctx_isp->base->session_hdl; + req_msg.u.frame_msg.frame_id = ctx_isp->frame_id; + req_msg.u.frame_msg.request_id = request_id; + req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val; + req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl; + req_msg.u.frame_msg.sof_status = sof_event_status; + req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta; + + CAM_DBG(CAM_ISP, + "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u ctx_idx: %u, link: 0x%x", + request_id, ctx_isp->frame_id, + ctx_isp->sof_timestamp_val, sof_event_status, ctx->ctx_id, ctx->link_hdl); + + if (cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT)) + CAM_ERR(CAM_ISP, + "Error in notifying the sof time for req id:%lld, ctx_idx: %u, link: 0x%x", + request_id, ctx->ctx_id, ctx->link_hdl); + +end: + __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp, + request_id, sof_event_status, &shutter_event); +} + +static void __cam_isp_ctx_handle_buf_done_fail_log( + struct cam_isp_context *ctx_isp, uint64_t request_id, + struct cam_isp_ctx_req *req_isp) +{ + int i; + struct cam_context *ctx = ctx_isp->base; + const char *handle_type; + + if (req_isp->num_fence_map_out >= ctx_isp->base->max_out_map_entries) { + CAM_ERR(CAM_ISP, "Num Resources exceed mMAX %d >= %d", + req_isp->num_fence_map_out, ctx_isp->base->max_out_map_entries); + return; + } + + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d", + request_id, req_isp->num_fence_map_out, req_isp->num_acked, + req_isp->bubble_report, req_isp->bubble_detected); + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Resource Handles that fail to generate buf_done in prev frame"); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + handle_type = __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + req_isp->fence_map_out[i].resource_handle); + + trace_cam_log_event("Buf_done Congestion", + handle_type, request_id, req_isp->fence_map_out[i].sync_id); + + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]", + handle_type, + req_isp->fence_map_out[i].resource_handle, + req_isp->fence_map_out[i].sync_id); + } + } + + ctx_isp->congestion_cnt++; + + /* Trigger SOF freeze debug dump on 3 or greater instances of congestion */ + if ((ctx_isp->congestion_cnt >= CAM_ISP_CONTEXT_CONGESTION_CNT_MAX) && + (!ctx_isp->sof_dbg_irq_en)) + __cam_isp_ctx_handle_sof_freeze_evt(ctx); +} + +static void __cam_isp_context_reset_internal_recovery_params( + struct cam_isp_context *ctx_isp) +{ + atomic_set(&ctx_isp->internal_recovery_set, 0); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->aeb_error_cnt = 0; + ctx_isp->bubble_frame_cnt = 0; + ctx_isp->congestion_cnt = 0; + ctx_isp->sof_dbg_irq_en = false; +} + +static int __cam_isp_context_try_internal_recovery( + struct cam_isp_context *ctx_isp) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + + /* + * Start with wait list, if recovery is stil set + * errored request has not been moved to pending yet. + * Buf done for errored request has not occurred recover + * from here + */ + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + if (req->request_id == ctx_isp->recovery_req_id) { + rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, + CRM_KMD_WARN_INTERNAL_RECOVERY, ctx_isp->recovery_req_id, ctx_isp); + if (rc) { + /* Unable to do bubble recovery reset back to normal */ + CAM_WARN(CAM_ISP, + "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + __cam_isp_context_reset_internal_recovery_params(ctx_isp); + req_isp->bubble_detected = false; + goto end; + } + + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + CAM_INFO(CAM_ISP, + "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered", + ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl); + goto end; + } + } + + /* + * If not in wait list only other possibility is request is in pending list + * on error detection, bubble detect is set assuming new frame after detection + * comes in, there is an rup it's moved to active list and it finishes with + * it's buf done's + */ + if (!list_empty(&ctx->pending_req_list)) { + req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + if (req->request_id == ctx_isp->recovery_req_id) { + rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, + CRM_KMD_WARN_INTERNAL_RECOVERY, ctx_isp->recovery_req_id, ctx_isp); + if (rc) { + /* Unable to do bubble recovery reset back to normal */ + CAM_WARN(CAM_ISP, + "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + __cam_isp_context_reset_internal_recovery_params(ctx_isp); + req_isp->bubble_detected = false; + goto end; + } + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + CAM_INFO(CAM_ISP, + "Internal recovery for req: %llu in ctx: %u on link: 0x%x triggered", + ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl); + goto end; + } + } + + /* If request is not found in either of the lists skip recovery */ + __cam_isp_context_reset_internal_recovery_params(ctx_isp); + +end: + return rc; +} + +static int __cam_isp_ctx_handle_buf_done_for_req_list( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req) +{ + int rc = 0, i; + uint64_t buf_done_req_id; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + ctx_isp->active_req_cnt--; + buf_done_req_id = req->request_id; + + if (req_isp->bubble_detected && req_isp->bubble_report) { + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + req_isp->bubble_detected = false; + list_del_init(&req->list); + atomic_set(&ctx_isp->process_bubble, 0); + req_isp->cdm_reset_before_apply = false; + ctx_isp->bubble_frame_cnt = 0; + + if (buf_done_req_id <= ctx->last_flush_req) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + + } + + __cam_isp_ctx_move_req_to_free_list(ctx, req); + CAM_DBG(CAM_REQ, + "Move active request %lld to free list(cnt = %d) [flushed], ctx %u, link: 0x%x", + buf_done_req_id, ctx_isp->active_req_cnt, + ctx->ctx_id, ctx->link_hdl); + ctx_isp->last_bufdone_err_apply_req_id = 0; + } else { + list_add(&req->list, &ctx->pending_req_list); + CAM_DBG(CAM_REQ, + "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, + ctx->ctx_id, ctx->link_hdl); + } + } else { + if (!ctx_isp->use_frame_header_ts) { + if (ctx_isp->reported_req_id < buf_done_req_id) { + ctx_isp->reported_req_id = buf_done_req_id; + __cam_isp_ctx_send_sof_timestamp(ctx_isp, + buf_done_req_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val; + } + } + + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + + req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE; + req_isp->cdm_reset_before_apply = false; + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + /* + * Only update the process_bubble and bubble_frame_cnt + * when bubble is detected on this req, in case the other + * request is processing bubble. + */ + if (req_isp->bubble_detected) { + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->bubble_frame_cnt = 0; + req_isp->bubble_detected = false; + } + + CAM_DBG(CAM_REQ, + "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u link: 0x%x", + buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + ctx_isp->req_info.last_bufdone_req_id = req->request_id; + ctx_isp->last_bufdone_err_apply_req_id = 0; + } + + /* Reset it in case a previous rup affect new frame */ + atomic_set(&ctx_isp->unserved_rup, 0); + + if (atomic_read(&ctx_isp->internal_recovery_set) && !ctx_isp->active_req_cnt) + __cam_isp_context_try_internal_recovery(ctx_isp); + + cam_cpas_notify_event("IFE BufDone", buf_done_req_id); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id); + + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_BUFDONE, req, NULL); + return rc; +} + +static int __cam_isp_ctx_handle_buf_done_for_request( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state, + struct cam_isp_hw_done_event_data *done_next_req) +{ + int rc = 0; + int i, j, k; + bool not_found = false; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + const char *handle_type; + struct cam_isp_context_comp_record *comp_grp = NULL; + + trace_cam_buf_done("ISP", ctx, req); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d, ctx %u link: 0x%x", + bubble_state, req_isp->bubble_detected, ctx->ctx_id, ctx->link_hdl); + + done_next_req->resource_handle = 0; + done_next_req->timestamp = done->timestamp; + + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (done->resource_handle == req_isp->fence_map_out[i].resource_handle) + break; + } + + if (done->is_early_done && (i != req_isp->num_fence_map_out) && + (req_isp->fence_map_out[i].early_sync_id <= 0)) { + CAM_WARN(CAM_ISP, + "Early done already handled for res:%s Req %lld, ignoring", + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + done->resource_handle), + req->request_id); + return 0; + } + + if (done->hw_type == CAM_ISP_HW_TYPE_SFE) + comp_grp = &ctx_isp->sfe_bus_comp_grp[done->comp_group_id]; + else + comp_grp = &ctx_isp->vfe_bus_comp_grp[done->comp_group_id]; + + if (!comp_grp) { + CAM_ERR(CAM_ISP, "comp_grp is NULL"); + rc = -EINVAL; + return rc; + } + + if (i == req_isp->num_fence_map_out) { + for (j = 0; j < comp_grp->num_res; j++) { + not_found = false; + if (comp_grp->res_id[j] == done->resource_handle) + continue; + + for (k = 0; k < req_isp->num_fence_map_out; k++) + if (comp_grp->res_id[j] == + req_isp->fence_map_out[k].resource_handle) + break; + + if ((k == req_isp->num_fence_map_out) && (j != comp_grp->num_res - 1)) + continue; + else if (k != req_isp->num_fence_map_out) + break; + else + not_found = true; + } + } + + if (not_found) { + /* + * If not found in current request, it could be + * belonging to next request, this can happen if + * IRQ delay happens. It is only valid when the + * platform doesn't have last consumed address. + * + * In early done case, since the IRQ comes much earlier + * than regular buf done, it is highly likely that previous + * request is still active because of pending buf done + * processing. Put early bud done in the same bucket as IRQ + * delay and check next request. + */ + CAM_WARN(CAM_ISP, + "BUF_DONE for res %s not found in Req %lld ", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + done->resource_handle), + req->request_id); + + done_next_req->hw_type = done->hw_type; + done_next_req->resource_handle = done->resource_handle; + done_next_req->comp_group_id = done->comp_group_id; + done_next_req->is_early_done = done->is_early_done; + goto check_deferred; + } + + /* + * Compare hw ctxt id as well if applicable, because comp group record will have + * multiple entries with the same resource id. + */ + for (i = 0; i < comp_grp->num_res; i++) { + /* + * Since early done is specific to out resource, process fence only + * for that resource. + */ + if (done->is_early_done && (done->resource_handle != comp_grp->res_id[i])) + continue; + + for (j = 0; j < req_isp->num_fence_map_out; j++) { + if (((!comp_grp->hw_ctxt_id[i]) && + (comp_grp->res_id[i] == req_isp->fence_map_out[j].resource_handle)) + || (comp_grp->hw_ctxt_id[i] && + (comp_grp->res_id[i] == req_isp->fence_map_out[j].resource_handle) + && + (comp_grp->hw_ctxt_id[i] == req_isp->fence_map_out[j].hw_ctxt_id))) + break; + } + + if (j == req_isp->num_fence_map_out) { + /* + * If not found in current request, it could be + * belonging to an active port with no valid fence + * bound to it, we needn't process it. + */ + CAM_DBG(CAM_ISP, + "BUF_DONE for res %s not active in Req %lld ctx %u link: 0x%x", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + comp_grp->res_id[i]), + req->request_id, ctx->ctx_id, ctx->link_hdl); + continue; + } + + if ((!done->is_early_done) && (req_isp->fence_map_out[j].sync_id == -1)) { + handle_type = + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle); + + CAM_WARN(CAM_ISP, + "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s, ctx %u link: 0x%x", + req->request_id, i, j, handle_type, ctx->ctx_id, ctx->link_hdl); + + trace_cam_log_event("Duplicate BufDone", + handle_type, req->request_id, ctx->ctx_id); + continue; + } + + /* Get buf handles from packet and retrieve them from presil framework */ + if (cam_presil_mode_enabled()) { + rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet, + ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to retrieve image buffers req_id:%d ctx_id:%u link: 0x%x bubble detected:%d rc:%d", + req->request_id, ctx->ctx_id, ctx->link_hdl, + req_isp->bubble_detected, rc); + return rc; + } + } + + if (!req_isp->bubble_detected) { + CAM_DBG(CAM_ISP, + "Sync with success: req %lld res 0x%x %s is_early:%s fd 0x%x early_fd %d, ctx %u link: 0x%x", + req->request_id, + req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + CAM_BOOL_TO_YESNO(done->is_early_done), + req_isp->fence_map_out[j].sync_id, + req_isp->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl); + + if (done->is_early_done) { + rc = cam_sync_signal(req_isp->fence_map_out[j].early_sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) + CAM_DBG(CAM_ISP, + "Early Sync failed with rc = %d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } else { + cam_smmu_buffer_tracker_buffer_putref( + req_isp->fence_map_out[j].buffer_tracker); + + rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) + CAM_DBG(CAM_ISP, + "Sync failed with rc = %d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } + } else if (!req_isp->bubble_report) { + CAM_DBG(CAM_ISP, + "Sync with failure: req %lld res 0x%x %s is_early:%s fd 0x%x early_fd %d, ctx %u link: 0x%x", + req->request_id, + req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + CAM_BOOL_TO_YESNO(done->is_early_done), + req_isp->fence_map_out[j].sync_id, + req_isp->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl); + + if (done->is_early_done) { + rc = cam_sync_signal(req_isp->fence_map_out[j].early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) + CAM_DBG(CAM_ISP, + "Early Sync failed with rc = %d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } else { + cam_smmu_buffer_tracker_buffer_putref( + req_isp->fence_map_out[j].buffer_tracker); + rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) + CAM_DBG(CAM_ISP, + "Sync failed with rc = %d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } + } else { + /* + * Ignore the buffer done if bubble detect is on + * Increment the ack number here, and queue the + * request back to pending list whenever all the + * buffers are done. + */ + if (!done->is_early_done) + req_isp->num_acked++; + + CAM_DBG(CAM_ISP, + "buf done with bubble state %d recovery %d for req %lld is_early:%s, ctx %u link: 0x%x", + bubble_state, + req_isp->bubble_report, + req->request_id, + CAM_BOOL_TO_YESNO(done->is_early_done), + ctx->ctx_id, ctx->link_hdl); + continue; + } + + CAM_DBG(CAM_ISP, + "req %lld, is_early:%s reset sync id 0x%x early_fd %d ctx %u link: 0x%x", + req->request_id, CAM_BOOL_TO_YESNO(done->is_early_done), + req_isp->fence_map_out[j].sync_id, req_isp->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl); + if (!rc) { + if (done->is_early_done) { + req_isp->fence_map_out[j].early_sync_id = -1; + } else { + req_isp->num_acked++; + req_isp->fence_map_out[j].sync_id = -1; + } + } + + if ((ctx_isp->use_frame_header_ts) && + (req_isp->hw_update_data.frame_header_res_id == + req_isp->fence_map_out[j].resource_handle)) + __cam_isp_ctx_send_sof_timestamp_frame_header(ctx_isp, + req_isp->hw_update_data.frame_header_cpu_addr, + req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + +check_deferred: + if (req_isp->num_acked > req_isp->num_fence_map_out) { + /* Should not happen */ + CAM_ERR(CAM_ISP, + "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u link: 0x%x", + req->request_id, req_isp->num_acked, + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out); + } + + if (req_isp->num_acked != req_isp->num_fence_map_out) + return rc; + + rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req); + return rc; +} + +static int __cam_isp_handle_deferred_buf_done( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req, + bool bubble_handling, + uint32_t status, uint32_t event_cause) +{ + int i, j; + int rc = 0; + struct cam_isp_ctx_req *req_isp = + (struct cam_isp_ctx_req *) req->req_priv; + struct cam_context *ctx = ctx_isp->base; + + CAM_DBG(CAM_ISP, + "ctx[%u] link[0x%x] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d", + ctx->ctx_id, ctx->link_hdl, req->request_id, req_isp->num_deferred_acks, + req_isp->num_acked, bubble_handling); + + for (i = 0; i < req_isp->num_deferred_acks; i++) { + j = req_isp->deferred_fence_map_index[i]; + + CAM_DBG(CAM_ISP, + "ctx[%u] link[0x%x] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x", + ctx->ctx_id, ctx->link_hdl, status, event_cause, + req->request_id, + req_isp->fence_map_out[j].resource_handle, + req_isp->fence_map_out[j].sync_id); + + if (req_isp->fence_map_out[j].sync_id == -1) { + CAM_WARN(CAM_ISP, + "ctx[%u] link[0x%x] : Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x", + ctx->ctx_id, ctx->link_hdl, req->request_id, j, + req_isp->fence_map_out[j].resource_handle); + continue; + } + + if (!bubble_handling) { + CAM_WARN(CAM_ISP, + "Unexpected Buf done for res=0x%x on ctx[%u] link[0x%x] for Req %llu, status=%d, possible bh delays", + req_isp->fence_map_out[j].resource_handle, ctx->ctx_id, + ctx->link_hdl, req->request_id, status); + + rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, + status, event_cause); + if (rc) { + CAM_ERR(CAM_ISP, + "ctx[%u] link[0x%x] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d", + ctx->ctx_id, ctx->link_hdl, req->request_id, + req_isp->fence_map_out[j].sync_id, + status, rc); + } else { + req_isp->num_acked++; + req_isp->fence_map_out[j].sync_id = -1; + } + + if (req_isp->fence_map_out[j].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[j].early_sync_id, + status, + event_cause); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[j].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[j].early_sync_id = -1; + } + } else { + req_isp->num_acked++; + } + } + + CAM_DBG(CAM_ISP, + "ctx[%u] link[0x%x] : Req %llu : Handled %d deferred buf_dones num_acked=%d, num_fence_map_out=%d", + ctx->ctx_id, ctx->link_hdl, req->request_id, req_isp->num_deferred_acks, + req_isp->num_acked, req_isp->num_fence_map_out); + + req_isp->num_deferred_acks = 0; + + return rc; +} + +static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_ctx_req *req_isp; + + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + if (req_isp->num_deferred_acks) + rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req, + req_isp->bubble_report, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + + if (req_isp->num_acked > req_isp->num_fence_map_out) { + /* Should not happen */ + CAM_ERR(CAM_ISP, + "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u, link[0x%x]", + req->request_id, req_isp->num_acked, + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out); + } + + if (req_isp->num_acked == req_isp->num_fence_map_out) + rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req); + + return rc; +} + +static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state, + bool verify_consumed_addr, + bool defer_buf_done) +{ + int rc = 0; + int i, j, k, def_idx; + bool not_found = false; + bool duplicate_defer_buf_done = false; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + const char *handle_type; + uint32_t cmp_addr = 0; + struct cam_isp_hw_done_event_data unhandled_done = {0}; + struct cam_isp_context_comp_record *comp_grp = NULL; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + trace_cam_buf_done("ISP", ctx, req); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d, ctx %u, link[0x%x]", + bubble_state, req_isp->bubble_detected, ctx->ctx_id, ctx->link_hdl); + + unhandled_done.timestamp = done->timestamp; + + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (done->resource_handle == req_isp->fence_map_out[i].resource_handle) { + cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE( + req_isp->fence_map_out[i].image_buf_addr[0]) : + req_isp->fence_map_out[i].image_buf_addr[0]; + if (!verify_consumed_addr || + (verify_consumed_addr && (done->last_consumed_addr == cmp_addr))) { + break; + } + } + } + + if (done->hw_type == CAM_ISP_HW_TYPE_SFE) + comp_grp = &ctx_isp->sfe_bus_comp_grp[done->comp_group_id]; + else + comp_grp = &ctx_isp->vfe_bus_comp_grp[done->comp_group_id]; + + if (!comp_grp) { + CAM_ERR(CAM_ISP, "comp_grp is NULL"); + rc = -EINVAL; + return rc; + } + + if (i == req_isp->num_fence_map_out) { + not_found = true; + for (j = 0; j < comp_grp->num_res; j++) { + /* If the res is same with original res, we don't need to read again */ + if (comp_grp->res_id[j] == done->resource_handle) + continue; + + /* Check if the res in the requested list */ + for (k = 0; k < req_isp->num_fence_map_out; k++) + if (comp_grp->res_id[j] == + req_isp->fence_map_out[k].resource_handle) + break; + + /* If res_id[j] isn't in requested list, then try next res in the group */ + if (k == req_isp->num_fence_map_out) { + if (j != comp_grp->num_res - 1) + continue; + else + break; + } + + if (!verify_consumed_addr) { + not_found = false; + break; + } + + /* + * Find out the res from the requested list, + * then we can get last consumed address from this port. + */ + done->resource_handle = comp_grp->res_id[j]; + done->last_consumed_addr = 0; + + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_GET_LAST_CONSUMED_ADDR; + isp_hw_cmd_args.cmd_data = done; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd( + ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return rc; + } + + cmp_addr = cam_smmu_is_expanded_memory() ? + CAM_36BIT_INTF_GET_IOVA_BASE( + req_isp->fence_map_out[k].image_buf_addr[0]) : + req_isp->fence_map_out[k].image_buf_addr[0]; + CAM_DBG(CAM_ISP, + "Get res %s comp_grp_rec_idx:%d fence_map_idx:%d last_consumed_addr:0x%x cmp_addr:0x%x", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle), j, k, + done->last_consumed_addr, cmp_addr); + if (done->last_consumed_addr == cmp_addr) { + CAM_DBG(CAM_ISP, "Consumed addr compare success for res:%s ", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle)); + not_found = false; + break; + } + } + } + + if (not_found) { + /* + * If not found in current request, it could be + * belonging to next request, this can happen if + * IRQ delay happens. It is only valid when the + * platform doesn't have last consumed address. + */ + CAM_WARN(CAM_ISP, + "BUF_DONE for res %s last_consumed_addr:0x%x not found in Req %lld ", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle), + done->last_consumed_addr, + req->request_id); + + unhandled_done.hw_type = done->hw_type; + unhandled_done.resource_handle = done->resource_handle; + unhandled_done.comp_group_id = done->comp_group_id; + unhandled_done.last_consumed_addr = done->last_consumed_addr; + goto check_deferred; + } + + /* + * Compare hw ctxt id as well if applicable, because comp group record will have + * multiple entries with the same resource id. + */ + for (i = 0; i < comp_grp->num_res; i++) { + for (j = 0; j < req_isp->num_fence_map_out; j++) { + if (((!comp_grp->hw_ctxt_id[i]) && + (comp_grp->res_id[i] == req_isp->fence_map_out[j].resource_handle)) + || (comp_grp->hw_ctxt_id[i] && + (comp_grp->res_id[i] == req_isp->fence_map_out[j].resource_handle) + && + (comp_grp->hw_ctxt_id[i] == req_isp->fence_map_out[j].hw_ctxt_id))) + break; + } + + if (j == req_isp->num_fence_map_out) { + /* + * If not found in current request, it could be + * belonging to an active port with no valid fence + * bound to it, we needn't process it. + */ + CAM_DBG(CAM_ISP, + "BUF_DONE for res %s not active in Req %lld ctx %u, link[0x%x]", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, comp_grp->res_id[i]), + req->request_id, ctx->ctx_id, ctx->link_hdl); + continue; + } + + if (req_isp->fence_map_out[j].sync_id == -1) { + handle_type = __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle); + + CAM_WARN(CAM_ISP, + "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s, ctx %u, link[0x%x]", + req->request_id, i, j, handle_type, ctx->ctx_id, ctx->link_hdl); + + trace_cam_log_event("Duplicate BufDone", + handle_type, req->request_id, ctx->ctx_id); + continue; + } + + /* Get buf handles from packet and retrieve them from presil framework */ + if (cam_presil_mode_enabled()) { + rc = cam_presil_retrieve_buffers_from_packet(req_isp->hw_update_data.packet, + ctx->img_iommu_hdl, req_isp->fence_map_out[j].resource_handle); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to retrieve image buffers req_id:%d ctx_id:%u link[0x%x] bubble detected:%d rc:%d", + req->request_id, ctx->ctx_id, ctx->link_hdl, + req_isp->bubble_detected, rc); + return rc; + } + } + + if (defer_buf_done) { + uint32_t deferred_indx = req_isp->num_deferred_acks; + duplicate_defer_buf_done = false; + + for (k = 0; k < req_isp->num_deferred_acks; k++) { + def_idx = req_isp->deferred_fence_map_index[k]; + if (def_idx == j) { + CAM_WARN(CAM_ISP, + "duplicate deferred ack for ctx[%u] link[0x%x] req %lld res 0x%x sync_id 0x%x", + ctx->ctx_id, ctx->link_hdl, + req->request_id, + req_isp->fence_map_out[j].resource_handle, + req_isp->fence_map_out[j].sync_id); + duplicate_defer_buf_done = true; + break; + } + } + + if (duplicate_defer_buf_done) + continue; + + if (req_isp->num_deferred_acks == req_isp->num_fence_map_out) { + CAM_WARN(CAM_ISP, + "WARNING: req_id %lld num_deferred_acks %d > map_out %d, ctx_idx:%u link[0x%x]", + req->request_id, req_isp->num_deferred_acks, + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + continue; + } + + /* + * If we are handling this BUF_DONE event for a request + * that is still in wait_list, do not signal now, + * instead mark it as done and handle it later - + * if this request is going into BUBBLE state later + * it will automatically be re-applied. If this is not + * going into BUBBLE, signal fences later. + * Note - we will come here only if the last consumed + * address matches with this ports buffer. + */ + req_isp->deferred_fence_map_index[deferred_indx] = j; + req_isp->num_deferred_acks++; + CAM_DBG(CAM_ISP, + "ctx[%u] link[0x%x]:Deferred buf done for %llu with bubble state %d recovery %d", + ctx->ctx_id, ctx->link_hdl, req->request_id, bubble_state, + req_isp->bubble_report); + CAM_DBG(CAM_ISP, + "ctx[%u] link[0x%x]:Deferred info:num_acks=%d,fence_map_index=%d,resource_handle=0x%x,sync_id=%d", + ctx->ctx_id, ctx->link_hdl, req_isp->num_deferred_acks, j, + req_isp->fence_map_out[j].resource_handle, + req_isp->fence_map_out[j].sync_id); + continue; + } else if (!req_isp->bubble_detected) { + CAM_DBG(CAM_ISP, + "Sync with success: req %lld res 0x%x %s fd 0x%x, ctx:%u link[0x%x]", + req->request_id, + req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + req_isp->fence_map_out[j].sync_id, + ctx->ctx_id, ctx->link_hdl); + + cam_smmu_buffer_tracker_buffer_putref( + req_isp->fence_map_out[j].buffer_tracker); + + rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) { + CAM_ERR(CAM_ISP, + "Sync=%u for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[j].sync_id, req->request_id, + rc, ctx->ctx_id, ctx->link_hdl); + } else if (req_isp->num_deferred_acks) { + /* Process deferred buf_done acks */ + __cam_isp_handle_deferred_buf_done(ctx_isp, + req, false, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + } + + if (req_isp->fence_map_out[j].early_sync_id > 0) { + CAM_DBG(CAM_ISP, + "Sync with success: req %lld res 0x%x %s early_fd 0x%x, ctx:%u link[0x%x]", + req->request_id, req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + req_isp->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl); + rc = cam_sync_signal(req_isp->fence_map_out[j].early_sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[j].early_sync_id, + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + } + + req_isp->fence_map_out[j].early_sync_id = -1; + } + + /* Reset fence */ + req_isp->fence_map_out[j].sync_id = -1; + } else if (!req_isp->bubble_report) { + + CAM_DBG(CAM_ISP, + "Sync with failure: req %lld res 0x%x %s fd 0x%x, ctx:%u link[0x%x]", + req->request_id, + req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + req_isp->fence_map_out[j].sync_id, + ctx->ctx_id, ctx->link_hdl); + + cam_smmu_buffer_tracker_buffer_putref( + req_isp->fence_map_out[j].buffer_tracker); + + rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) { + CAM_ERR(CAM_ISP, + "Sync:%u for req:%llu failed with rc:%d,ctx:%u,link[0x%x]", + req_isp->fence_map_out[j].sync_id, req->request_id, + rc, ctx->ctx_id, ctx->link_hdl); + } else if (req_isp->num_deferred_acks) { + /* Process deferred buf_done acks */ + __cam_isp_handle_deferred_buf_done(ctx_isp, req, + false, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + } + + if (req_isp->fence_map_out[j].early_sync_id > 0) { + CAM_DBG(CAM_ISP, + "Sync with failure: req %lld res 0x%x %s early_fd 0x%x, ctx:%u link[0x%x]", + req->request_id, req_isp->fence_map_out[j].resource_handle, + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, + req_isp->fence_map_out[j].resource_handle), + req_isp->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl); + rc = cam_sync_signal(req_isp->fence_map_out[j].early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[j].early_sync_id, + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + } + + req_isp->fence_map_out[j].early_sync_id = -1; + } + + /* Reset fence */ + req_isp->fence_map_out[j].sync_id = -1; + } else { + /* + * Ignore the buffer done if bubble detect is on + * Increment the ack number here, and queue the + * request back to pending list whenever all the + * buffers are done. + */ + req_isp->num_acked++; + CAM_DBG(CAM_ISP, + "buf done with bubble state %d recovery %d for req %lld, ctx_idx:%u link[0x%x]", + bubble_state, + req_isp->bubble_report, + req->request_id, + ctx->ctx_id, ctx->link_hdl); + + /* Process deferred buf_done acks */ + if (req_isp->num_deferred_acks) + __cam_isp_handle_deferred_buf_done(ctx_isp, req, + true, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + + if (req_isp->num_acked == req_isp->num_fence_map_out) { + rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req); + if (rc) + CAM_ERR(CAM_ISP, + "Error in buf done for req = %llu with rc = %d, ctx_idx:%u link[0x%x]", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + continue; + } + + CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx_idx:%u link[0x%x]", + req->request_id, + req_isp->fence_map_out[j].sync_id, ctx->ctx_id, ctx->link_hdl); + if (!rc) { + req_isp->num_acked++; + } + + if ((ctx_isp->use_frame_header_ts) && + (req_isp->hw_update_data.frame_header_res_id == + req_isp->fence_map_out[j].resource_handle)) + __cam_isp_ctx_send_sof_timestamp_frame_header( + ctx_isp, + req_isp->hw_update_data.frame_header_cpu_addr, + req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + +check_deferred: + if ((unhandled_done.resource_handle > 0) && (!defer_buf_done)) + __cam_isp_ctx_check_deferred_buf_done( + ctx_isp, &unhandled_done, bubble_state); + + if (req_isp->num_acked > req_isp->num_fence_map_out) { + /* Should not happen */ + CAM_ERR(CAM_ISP, + "WARNING: req_id %lld num_acked %d > map_out %d, ctx_idx:%u link[0x%x]", + req->request_id, req_isp->num_acked, + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + } + + if (req_isp->num_acked != req_isp->num_fence_map_out) + return rc; + + rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req); + return rc; +} + +static int __cam_isp_ctx_handle_buf_done( + struct cam_isp_context *ctx_isp, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state) +{ + int rc = 0; + struct cam_ctx_request *req; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_done_event_data done_next_req = {0}; + + if (list_empty(&ctx->active_req_list)) { + CAM_WARN(CAM_ISP, "Buf done with no active request, ctx_idx:%u link[0x%x]", + ctx->ctx_id, ctx->link_hdl); + return 0; + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + + rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done, + bubble_state, &done_next_req); + + if (done_next_req.resource_handle) { + struct cam_isp_hw_done_event_data unhandled_res = {0}; + struct cam_ctx_request *next_req = list_last_entry( + &ctx->active_req_list, struct cam_ctx_request, list); + + if (next_req->request_id != req->request_id) { + /* + * Few resource handles are already signalled in the + * current request, lets check if there is another + * request waiting for these resources. This can + * happen if handling some of next request's buf done + * events are happening first before handling current + * request's remaining buf dones due to IRQ scheduling. + * Lets check only one more request as we will have + * maximum of 2 requests in active_list at any time. + */ + + CAM_WARN(CAM_ISP, + "Unhandled bufdone resources for req %lld,trying next request %lld,ctx:%u link[0x%x]", + req->request_id, next_req->request_id, ctx->ctx_id, ctx->link_hdl); + + __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, + next_req, &done_next_req, + bubble_state, &unhandled_res); + + if (unhandled_res.resource_handle == 0) + CAM_INFO(CAM_ISP, + "BUF Done event handed for next request %lld, ctx_idx:%u link[0x%x]", + next_req->request_id, ctx->ctx_id, ctx->link_hdl); + else + CAM_ERR(CAM_ISP, + "BUF Done not handled for next request %lld, ctx_idx:%u link[0x%x]", + next_req->request_id, ctx->ctx_id, ctx->link_hdl); + } else { + CAM_WARN(CAM_ISP, + "Req %lld only active request, spurious buf_done rxd, ctx_idx:%u link[0x%x]", + req->request_id, ctx->ctx_id, ctx->link_hdl); + } + } + + return rc; +} + +static void __cam_isp_ctx_buf_done_match_req( + struct cam_isp_context *ctx_isp, + struct cam_ctx_request *req, + struct cam_isp_hw_done_event_data *done, + bool *irq_delay_detected) +{ + int rc = 0; + int i, j, k; + uint32_t match_count = 0; + struct cam_isp_ctx_req *req_isp; + uint32_t cmp_addr = 0; + struct cam_isp_context_comp_record *comp_grp = NULL; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_context *ctx = ctx_isp->base; + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (done->hw_type == CAM_ISP_HW_TYPE_SFE) + comp_grp = &ctx_isp->sfe_bus_comp_grp[done->comp_group_id]; + else + comp_grp = &ctx_isp->vfe_bus_comp_grp[done->comp_group_id]; + + for (i = 0; i < req_isp->num_fence_map_out; i++) { + cmp_addr = cam_smmu_is_expanded_memory() ? CAM_36BIT_INTF_GET_IOVA_BASE( + req_isp->fence_map_out[i].image_buf_addr[0]) : + req_isp->fence_map_out[i].image_buf_addr[0]; + if ((done->resource_handle == + req_isp->fence_map_out[i].resource_handle) && + (done->last_consumed_addr == cmp_addr)) { + match_count++; + break; + } + } + + if (i == req_isp->num_fence_map_out) { + for (j = 0; j < comp_grp->num_res; j++) { + /* If the res is same with original res, we don't need to read again */ + if (comp_grp->res_id[j] == done->resource_handle) + continue; + + /* Check if the res in the requested list */ + for (k = 0; k < req_isp->num_fence_map_out; k++) + if (comp_grp->res_id[j] == + req_isp->fence_map_out[k].resource_handle) + break; + + /* If res_id[j] isn't in requested list, then try next res in the group */ + if (k == req_isp->num_fence_map_out) { + if (j != comp_grp->num_res - 1) + continue; + else { + CAM_ERR(CAM_ISP, "not in this group and exit."); + break; + } + } + + /* + * Find out the res from the requested list, + * then we can get last consumed address from this port. + */ + done->resource_handle = comp_grp->res_id[j]; + done->last_consumed_addr = 0; + + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_GET_LAST_CONSUMED_ADDR; + isp_hw_cmd_args.cmd_data = done; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd( + ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + } + + cmp_addr = cam_smmu_is_expanded_memory() ? + CAM_36BIT_INTF_GET_IOVA_BASE( + req_isp->fence_map_out[k].image_buf_addr[0]) : + req_isp->fence_map_out[k].image_buf_addr[0]; + CAM_DBG(CAM_ISP, "Get res %s last_consumed_addr:0x%x, cmp_addr:0x%x", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle), + cmp_addr, done->last_consumed_addr); + if (done->last_consumed_addr == cmp_addr) { + CAM_DBG(CAM_ISP, "Consumed addr compare success for res:%s ", + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle)); + match_count++; + break; + } + } + } + + if (match_count > 0) + *irq_delay_detected = true; + else + *irq_delay_detected = false; + + CAM_DBG(CAM_ISP, + "buf done num handles %d [%s] match count %d for next req:%lld", + done->resource_handle, + __cam_isp_resource_handle_id_to_type( + ctx_isp->isp_device_type, done->resource_handle), + match_count, req->request_id); + CAM_DBG(CAM_ISP, + "irq_delay_detected %d", *irq_delay_detected); +} + +static void __cam_isp_ctx_try_buf_done_process_for_active_request( + uint32_t deferred_ack_start_idx, struct cam_isp_context *ctx_isp, + struct cam_ctx_request *deferred_req) +{ + int i, j, deferred_map_idx, rc; + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *curr_active_req; + struct cam_isp_ctx_req *curr_active_isp_req; + struct cam_isp_ctx_req *deferred_isp_req; + + if (list_empty(&ctx->active_req_list)) + return; + + curr_active_req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + curr_active_isp_req = (struct cam_isp_ctx_req *)curr_active_req->req_priv; + deferred_isp_req = (struct cam_isp_ctx_req *)deferred_req->req_priv; + + /* Check from newly updated deferred acks */ + for (i = deferred_ack_start_idx; i < deferred_isp_req->num_deferred_acks; i++) { + deferred_map_idx = deferred_isp_req->deferred_fence_map_index[i]; + + for (j = 0; j < curr_active_isp_req->num_fence_map_out; j++) { + /* resource needs to match */ + if (curr_active_isp_req->fence_map_out[j].resource_handle != + deferred_isp_req->fence_map_out[deferred_map_idx].resource_handle) + continue; + + /* Check if fence is valid */ + if (curr_active_isp_req->fence_map_out[j].sync_id == -1) + break; + + CAM_WARN(CAM_ISP, + "Processing delayed buf done req: %llu bubble_detected: %s res: 0x%x fd: 0x%x early_fd: 0x%x, ctx: %u link: 0x%x [deferred req: %llu last applied: %llu]", + curr_active_req->request_id, + CAM_BOOL_TO_YESNO(curr_active_isp_req->bubble_detected), + curr_active_isp_req->fence_map_out[j].resource_handle, + curr_active_isp_req->fence_map_out[j].sync_id, + curr_active_isp_req->fence_map_out[j].early_sync_id, + ctx->ctx_id, ctx->link_hdl, + deferred_req->request_id, ctx_isp->last_applied_req_id); + + /* Signal only if bubble is not detected for this request */ + if (!curr_active_isp_req->bubble_detected) { + rc = cam_sync_signal(curr_active_isp_req->fence_map_out[j].sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) + CAM_ERR(CAM_ISP, + "Sync: %d for req: %llu failed with rc: %d, ctx: %u link: 0x%x", + curr_active_isp_req->fence_map_out[j].sync_id, + curr_active_req->request_id, rc, + ctx->ctx_id, ctx->link_hdl); + + curr_active_isp_req->fence_map_out[j].sync_id = -1; + + if (curr_active_isp_req->fence_map_out[j].early_sync_id > 0) { + rc = cam_sync_signal( + curr_active_isp_req->fence_map_out[j].early_sync_id, + CAM_SYNC_STATE_SIGNALED_SUCCESS, + CAM_SYNC_COMMON_EVENT_SUCCESS); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + curr_active_isp_req->fence_map_out[j].early_sync_id, + curr_active_req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + curr_active_isp_req->fence_map_out[j].early_sync_id = -1; + } + } + + curr_active_isp_req->num_acked++; + break; + } + } +} + +static int __cam_isp_ctx_check_deferred_buf_done( + struct cam_isp_context *ctx_isp, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state) +{ + int rc = 0; + uint32_t curr_num_deferred = 0; + struct cam_ctx_request *req; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_ctx_req *req_isp; + bool req_in_pending_wait_list = false; + + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + curr_num_deferred = req_isp->num_deferred_acks; + + req_in_pending_wait_list = true; + if (ctx_isp->last_applied_req_id != + ctx_isp->last_bufdone_err_apply_req_id) { + CAM_DBG(CAM_ISP, + "Trying to find buf done with req in wait list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u, ctx: %u link: 0x%x", + req->request_id, ctx_isp->last_applied_req_id, + ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred, + ctx->ctx_id, ctx->link_hdl); + ctx_isp->last_bufdone_err_apply_req_id = + ctx_isp->last_applied_req_id; + } + + /* + * Verify consumed address for this request to make sure + * we are handling the buf_done for the correct + * buffer. Also defer actual buf_done handling, i.e + * do not signal the fence as this request may go into + * Bubble state eventully. + */ + rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr( + ctx_isp, req, done, bubble_state, true, true); + + /* Check for active req if any deferred is processed */ + if (req_isp->num_deferred_acks > curr_num_deferred) + __cam_isp_ctx_try_buf_done_process_for_active_request( + curr_num_deferred, ctx_isp, req); + } else if (!list_empty(&ctx->pending_req_list)) { + /* + * We saw the case that the hw config is blocked due to + * some reason, the we get the reg upd and buf done before + * the req is added to wait req list. + */ + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + curr_num_deferred = req_isp->num_deferred_acks; + + req_in_pending_wait_list = true; + if (ctx_isp->last_applied_req_id != + ctx_isp->last_bufdone_err_apply_req_id) { + CAM_DBG(CAM_ISP, + "Trying to find buf done with req in pending list, req %llu last apply id:%lld last err id:%lld curr_num_deferred: %u, ctx: %u link: 0x%x", + req->request_id, ctx_isp->last_applied_req_id, + ctx_isp->last_bufdone_err_apply_req_id, curr_num_deferred, + ctx->ctx_id, ctx->link_hdl); + ctx_isp->last_bufdone_err_apply_req_id = + ctx_isp->last_applied_req_id; + } + + /* + * Verify consumed address for this request to make sure + * we are handling the buf_done for the correct + * buffer. Also defer actual buf_done handling, i.e + * do not signal the fence as this request may go into + * Bubble state eventully. + */ + rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr( + ctx_isp, req, done, bubble_state, true, true); + + /* Check for active req if any deferred is processed */ + if (req_isp->num_deferred_acks > curr_num_deferred) + __cam_isp_ctx_try_buf_done_process_for_active_request( + curr_num_deferred, ctx_isp, req); + } + + if (!req_in_pending_wait_list && (ctx_isp->last_applied_req_id != + ctx_isp->last_bufdone_err_apply_req_id)) { + CAM_DBG(CAM_ISP, + "Bufdone without active request bubble_state=%d last_applied_req_id:%lld,ctx:%u link:0x%x", + bubble_state, ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl); + ctx_isp->last_bufdone_err_apply_req_id = + ctx_isp->last_applied_req_id; + } + + return rc; +} + +static int __cam_isp_ctx_handle_buf_done_verify_addr( + struct cam_isp_context *ctx_isp, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state) +{ + int rc = 0; + bool irq_delay_detected = false; + struct cam_ctx_request *req; + struct cam_ctx_request *next_req = NULL; + struct cam_context *ctx = ctx_isp->base; + + if (list_empty(&ctx->active_req_list)) { + return __cam_isp_ctx_check_deferred_buf_done( + ctx_isp, done, bubble_state); + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + + if (ctx_isp->active_req_cnt > 1) { + next_req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request, list); + if (next_req->request_id != req->request_id) + __cam_isp_ctx_buf_done_match_req( + ctx_isp, next_req, done, &irq_delay_detected); + else + CAM_WARN(CAM_ISP, + "Req %lld only active request, spurious buf_done rxd, ctx: %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + } + + /* + * If irq delay isn't detected, then we need to verify + * the consumed address for current req, otherwise, we + * can't verify the consumed address. + */ + rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(ctx_isp, req, done, bubble_state, + !irq_delay_detected, false); + + /* + * Verify the consumed address for next req all the time, + * since the reported buf done event may belong to current + * req, then we can't signal this event for next req. + */ + if (!rc && irq_delay_detected) + rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(ctx_isp, next_req, done, + bubble_state, true, false); + + return rc; +} + +static int __cam_isp_ctx_handle_buf_done_in_activated_state( + struct cam_isp_context *ctx_isp, + struct cam_isp_hw_done_event_data *done, + uint32_t bubble_state) +{ + int rc = 0; + + if (ctx_isp->support_consumed_addr && (!done->is_early_done)) + rc = __cam_isp_ctx_handle_buf_done_verify_addr( + ctx_isp, done, bubble_state); + else + rc = __cam_isp_ctx_handle_buf_done( + ctx_isp, done, bubble_state); + + return rc; +} + +static int __cam_isp_ctx_apply_pending_req( + void *priv, void *data) +{ + int rc = 0; + int64_t prev_applied_req; + struct cam_context *ctx = NULL; + struct cam_isp_context *ctx_isp = priv; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_hw_config_args cfg = {0}; + + if (!ctx_isp) { + CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx); + rc = -EINVAL; + goto end; + } + ctx = ctx_isp->base; + + if (list_empty(&ctx->pending_req_list)) { + CAM_DBG(CAM_ISP, + "No pending requests to apply, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + if (ctx_isp->vfps_aux_context) { + if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED) + goto end; + + if (ctx_isp->active_req_cnt >= 1) + goto end; + } else { + if ((ctx->state != CAM_CTX_ACTIVATED) || + (!atomic_read(&ctx_isp->rxd_epoch)) || + (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED)) + goto end; + + if (ctx_isp->active_req_cnt >= 2) + goto end; + } + + + spin_lock_bh(&ctx->lock); + req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + list); + spin_unlock_bh(&ctx->lock); + + CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx_idx: %u, link: 0x%x", + req->request_id, ctx_isp->substate_activated, ctx->ctx_id, ctx->link_hdl); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + cfg.ctxt_to_hw_map = ctx_isp->hw_ctx; + cfg.request_id = req->request_id; + cfg.hw_update_entries = req_isp->cfg; + cfg.num_hw_update_entries = req_isp->num_cfg; + cfg.priv = &req_isp->hw_update_data; + + if (ctx_isp->vfps_aux_context) + cfg.init_packet = true; + + /* + * Offline mode may receive the SOF and REG_UPD earlier than + * CDM processing return back, so we set the substate before + * apply setting. + */ + spin_lock_bh(&ctx->lock); + + atomic_set(&ctx_isp->rxd_epoch, 0); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED; + prev_applied_req = ctx_isp->last_applied_req_id; + ctx_isp->last_applied_req_id = req->request_id; + atomic_set(&ctx_isp->apply_in_progress, 1); + + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->wait_req_list); + + spin_unlock_bh(&ctx->lock); + + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Can not apply the configuration,ctx: %u,link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + spin_lock_bh(&ctx->lock); + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + ctx_isp->last_applied_req_id = prev_applied_req; + atomic_set(&ctx_isp->apply_in_progress, 0); + + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + + spin_unlock_bh(&ctx->lock); + } else { + atomic_set(&ctx_isp->last_applied_default, 0); + atomic_set(&ctx_isp->apply_in_progress, 0); + CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld, ctx: %u, link: 0x%x", + CAM_ISP_CTX_ACTIVATED_APPLIED, + ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED, + req->request_id); + } + +end: + return rc; +} + +static int __cam_isp_ctx_schedule_apply_req( + struct cam_isp_context *ctx_isp) +{ + int rc = 0; + struct crm_workq_task *task; + + task = cam_req_mgr_workq_get_task(ctx_isp->workq); + if (!task) { + CAM_ERR(CAM_ISP, "No task for worker"); + return -ENOMEM; + } + + task->process_cb = __cam_isp_ctx_apply_pending_req; + rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0); + if (rc) + CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc); + + return rc; +} + +static int __cam_isp_ctx_offline_epoch_in_activated_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req, *req_temp; + uint64_t request_id = 0; + + atomic_set(&ctx_isp->rxd_epoch, 1); + + CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u link: 0x%x", ctx_isp->frame_id, + ctx->ctx_id, ctx->link_hdl); + + /* + * For offline it is not possible for epoch to be generated without + * RUP done. IRQ scheduling delays can possibly cause this. + */ + if (list_empty(&ctx->active_req_list)) { + CAM_WARN(CAM_ISP, + "Active list empty on ctx:%u link:0x%x - EPOCH serviced before RUP", + ctx->ctx_id, ctx->link_hdl); + } else { + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + break; + } + } + } + + __cam_isp_ctx_schedule_apply_req(ctx_isp); + + /* + * If no valid request, wait for RUP shutter posted after buf done + */ + if (request_id) + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, + request_id); + + return 0; +} + +static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req; + + if (ctx_isp->frame_id == 1) { + CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated)); + + if (!list_empty(&ctx->active_req_list)) { + req = list_first_entry(&ctx->active_req_list, struct cam_ctx_request, + list); + if (req->request_id) + ctx_isp->reported_req_id = req->request_id; + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, req->request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + } else { + if (!atomic_read(&ctx_isp->last_applied_default)) + atomic_set(&ctx_isp->unserved_rup, 1); + CAM_WARN_RATE_LIMIT(CAM_ISP, + "ctx:%u Unexpected regupdate in activated Substate[%s] for frame_id:%lld", + "last_applied_default:%d, unserved_rup:%d", + ctx_isp->base->ctx_id, + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), + ctx_isp->frame_id, + atomic_read(&ctx_isp->last_applied_default), + atomic_read(&ctx_isp->unserved_rup)); + + atomic_set(&ctx_isp->last_applied_default, 0); + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, 0, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + return 0; +} + +static int __cam_isp_ctx_reg_upd_in_applied_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_ctx_request *req; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_ctx_req *req_isp; + uint64_t request_id = 0; + + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (req_isp->num_fence_map_out != 0) { + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + request_id = req->request_id; + CAM_DBG(CAM_REQ, + "move request %lld to active list(cnt = %d), ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_RUP, req, NULL); + } else { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + /* no io config, so the request is completed. */ + __cam_isp_ctx_move_req_to_free_list(ctx, req); + CAM_DBG(CAM_ISP, + "move active request %lld to free list(cnt = %d), ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } + + /* + * This function only called directly from applied and bubble applied + * state so change substate here. + */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH; + CAM_DBG(CAM_ISP, "next Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (request_id) + ctx_isp->reported_req_id = request_id; + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id); + +end: + return rc; +} + +static int __cam_isp_ctx_notify_sof_in_activated_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + uint64_t request_id = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + uint64_t last_cdm_done_req = 0; + struct cam_isp_hw_epoch_event_data *epoch_done_event_data = + (struct cam_isp_hw_epoch_event_data *)evt_data; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "invalid event data"); + return -EINVAL; + } + + ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta; + + if (atomic_read(&ctx_isp->process_bubble)) { + if (list_empty(&ctx->active_req_list)) { + CAM_ERR(CAM_ISP, + "No available active req in bubble, ctx %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->bubble_frame_cnt = 0; + rc = -EINVAL; + return rc; + } + + if (ctx_isp->last_sof_timestamp == + ctx_isp->sof_timestamp_val) { + CAM_DBG(CAM_ISP, + "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld, ctx %u, link: 0x%x", + ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + goto notify_only; + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (ctx_isp->bubble_frame_cnt >= 1 && + req_isp->bubble_detected) { + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_GET_LAST_CDM_DONE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd( + ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return rc; + } + + last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done; + CAM_DBG(CAM_ISP, "last_cdm_done req: %d, ctx %u, link: 0x%x", + last_cdm_done_req, ctx->ctx_id, ctx->link_hdl); + + if (last_cdm_done_req >= req->request_id) { + CAM_DBG(CAM_ISP, + "invalid sof event data CDM cb detected for req: %lld, possible buf_done delay, waiting for buf_done, ctx %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + ctx_isp->bubble_frame_cnt = 0; + } else { + CAM_DBG(CAM_ISP, + "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay, ctx %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + ctx_isp->bubble_frame_cnt = 0; + req_isp->bubble_detected = false; + req_isp->cdm_reset_before_apply = true; + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->active_req_cnt--; + CAM_DBG(CAM_REQ, + "Move active req: %lld to pending list(cnt = %d) [bubble re-apply], ctx %u link: 0x%x", + req->request_id, + ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } + } else if (req_isp->bubble_detected) { + ctx_isp->bubble_frame_cnt++; + CAM_DBG(CAM_ISP, + "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld, ctx %u link: 0x%x", + req->request_id, + ctx_isp->bubble_frame_cnt, ctx->ctx_id, ctx->link_hdl); + } else { + CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld, ctx %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + } + } + +notify_only: + /* + * notify reqmgr with sof signal. Note, due to scheduling delay + * we can run into situation that two active requests has already + * be in the active queue while we try to do the notification. + * In this case, we need to skip the current notification. This + * helps the state machine to catch up the delay. + */ + if (ctx_isp->active_req_cnt <= 2) { + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + list_for_each_entry(req, &ctx->active_req_list, list) { + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if ((!req_isp->bubble_detected) && + (req->request_id > ctx_isp->reported_req_id)) { + request_id = req->request_id; + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, req, NULL); + break; + } + } + + if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE) + request_id = 0; + + if (request_id != 0) + ctx_isp->reported_req_id = request_id; + + if (request_id == 0) + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, + request_id); + } + + ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val; + return 0; +} + +static int __cam_isp_ctx_notify_eof_in_activated_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + uint64_t last_cdm_done_req = 0; + + /* update last cdm done timestamp */ + rc = __cam_isp_ctx_get_cdm_done_timestamp(ctx, &last_cdm_done_req); + if (rc) + CAM_ERR(CAM_ISP, "ctx:%u link: 0x%x Failed to get timestamp from HW", + ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_CDM_DONE, last_cdm_done_req); + + /* notify reqmgr with eof signal */ + rc = __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_EOF, ctx_isp); + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0); + + return rc; +} + +static int __cam_isp_ctx_reg_upd_in_hw_error( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + return 0; +} + +static int __cam_isp_ctx_sof_in_activated_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + struct cam_ctx_request *req = NULL; + struct cam_context *ctx = ctx_isp->base; + uint64_t request_id = 0; + + ctx_isp->last_sof_jiffies = jiffies; + + /* First check if there is a valid request in active list */ + list_for_each_entry(req, &ctx->active_req_list, list) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + break; + } + } + + /* + * If nothing in active list, current request might have not moved + * from wait to active list. This could happen if REG_UPDATE to sw + * is coming immediately after SOF + */ + if (request_id == 0) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + if (req) + request_id = req->request_id; + } + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id); + + CAM_DBG(CAM_ISP, + "frame id:%lld timestamp sof:0x%llx boot:0x%llx, ctx:%u, request:%llu, link:0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx_isp->boot_timestamp, + ctx->ctx_id, request_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + + if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) { + CAM_DBG(CAM_ISP, "invalid RUP"); + goto end; + } + + /* + * This is for the first update. The initial setting will + * cause the reg_upd in the first frame. + */ + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (req_isp->num_fence_map_out == req_isp->num_acked) + __cam_isp_ctx_move_req_to_free_list(ctx, req); + else + CAM_ERR(CAM_ISP, + "receive rup in unexpected state, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + } else { + if (!atomic_read(&ctx_isp->last_applied_default)) + atomic_set(&ctx_isp->unserved_rup, 1); + CAM_WARN(CAM_ISP, + "Received a unserved rup ctx:%u link: 0x%x, last_applied_default:%d, unserved_rup:%d", + ctx->ctx_id, + ctx->link_hdl, + atomic_read(&ctx_isp->last_applied_default), + atomic_read(&ctx_isp->unserved_rup)); + atomic_set(&ctx_isp->last_applied_default, 0); + } + + if (req != NULL) { + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, + req->request_id); + } +end: + return rc; +} + +static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + uint64_t request_id = 0; + uint32_t wait_req_cnt = 0; + uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_epoch_event_data *epoch_done_event_data = + (struct cam_isp_hw_epoch_event_data *)evt_data; + struct cam_isp_ctx_irq_ops *irq_ops; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "invalid event data"); + return -EINVAL; + } + + if (ctx_isp->bubble_recover_dis && !ctx_isp->sfe_en) { + if (atomic_read(&ctx_isp->unserved_rup)) { + CAM_INFO(CAM_ISP, "Processed a unserved rup, ctx:%u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_reg_upd_in_applied_state(ctx_isp, NULL); + irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated]; + if (irq_ops->irq_ops[CAM_ISP_HW_EVENT_EPOCH]) + irq_ops->irq_ops[CAM_ISP_HW_EVENT_EPOCH](ctx_isp, evt_data); + else + CAM_DBG(CAM_ISP, + "No handle function for Substate[%s], evt id %d, ctx:%u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), + CAM_ISP_HW_EVENT_EPOCH, + ctx->ctx_id, ctx->link_hdl); + atomic_set(&ctx_isp->unserved_rup, 0); + } else { + CAM_INFO(CAM_ISP, "Bubble Recovery Disabled"); + __cam_isp_ctx_send_sof_timestamp(ctx_isp, 0, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + return 0; + } + + ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta; + if (list_empty(&ctx->wait_req_list)) { + /* + * If no wait req in epoch, this is an error case. + * The recovery is to go back to sof state + */ + CAM_ERR(CAM_ISP, "Ctx:%u link: 0x%x No wait request", ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + + /* Send SOF event as empty frame*/ + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, NULL, NULL); + goto end; + } + + if (ctx_isp->last_applied_jiffies >= ctx_isp->last_sof_jiffies) { + list_for_each_entry(req, &ctx->wait_req_list, list) { + wait_req_cnt++; + } + + /* + * The previous req is applied after SOF and there is only + * one applied req, we don't need to report bubble for this case. + */ + if (wait_req_cnt == 1) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + request_id = req->request_id; + CAM_INFO(CAM_ISP, + "ctx:%d Don't report the bubble for req:%lld", + ctx->ctx_id, request_id); + goto end; + } + } + + /* Update state prior to notifying CRM */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, + list); + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + req_isp->cdm_reset_before_apply = false; + atomic_set(&ctx_isp->process_bubble, 1); + + CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%u link: 0x%x Report Bubble flag %d req id:%lld", + ctx->ctx_id, ctx->link_hdl, req_isp->bubble_report, req->request_id); + + if (req_isp->bubble_report) { + __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE, + req->request_id, ctx_isp); + trace_cam_log_event("Bubble", "Rcvd epoch in applied state", + req->request_id, ctx->ctx_id); + } else { + req_isp->bubble_report = 0; + CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + + if (ctx_isp->active_req_cnt <= 1) + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + } + + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + + /* + * Handle the deferred buf done after moving + * the bubble req to active req list. + */ + __cam_isp_ctx_handle_deferred_buf_done_in_bubble( + ctx_isp, req); + + /* + * Update the record before req pointer to + * other invalid req. + */ + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, req, NULL); + + /* + * Get the req again from active_req_list in case + * the active req cnt is 2. + */ + list_for_each_entry(req, &ctx->active_req_list, list) { + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if ((!req_isp->bubble_report) && + (req->request_id > ctx_isp->reported_req_id)) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + CAM_DBG(CAM_ISP, + "ctx %u link: 0x%x reported_req_id update to %lld", + ctx->ctx_id, ctx->link_hdl, ctx_isp->reported_req_id); + break; + } + } + + if ((request_id != 0) && req_isp->bubble_detected) + sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR; + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, 0, + sof_event_status); + + cam_req_mgr_debug_delay_detect(); + trace_cam_delay_detect("ISP", + "bubble epoch_in_applied", req->request_id, + ctx->ctx_id, ctx->link_hdl, ctx->session_hdl, + CAM_DEFAULT_VALUE); +end: + if (request_id == 0) { + req = list_last_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id); + } else { + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id); + } + + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + return 0; +} + +static int __cam_isp_ctx_buf_done_in_sof(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0); + return rc; +} + +static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0); + return rc; +} + + +static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + struct cam_ctx_request *req; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data"); + return -EINVAL; + } + + ctx_isp->last_sof_jiffies = jiffies; + + if (atomic_read(&ctx_isp->apply_in_progress)) + CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + if (list_empty(&ctx->active_req_list)) + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + else + CAM_DBG(CAM_ISP, "Still need to wait for the buf done, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + req = list_last_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + if (req) + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SOF, + req->request_id); + + if (ctx_isp->frame_id == 1) + CAM_INFO(CAM_ISP, + "First SOF in EPCR ctx:%u link: 0x%x frame_id:%lld next substate %s", + ctx->ctx_id, ctx->link_hdl, ctx_isp->frame_id, + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated)); + + CAM_DBG(CAM_ISP, "SOF in epoch ctx:%u link: 0x%x frame_id:%lld next substate:%s", + ctx->ctx_id, ctx->link_hdl, ctx_isp->frame_id, + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated)); + + return rc; +} + +static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0); + return rc; +} + +static int __cam_isp_ctx_buf_done_in_bubble( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1); + return rc; +} + +static int __cam_isp_ctx_epoch_in_bubble_applied( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + uint64_t request_id = 0; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_epoch_event_data *epoch_done_event_data = + (struct cam_isp_hw_epoch_event_data *)evt_data; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "invalid event data, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta; + + /* + * This means we missed the reg upd ack. So we need to + * transition to BUBBLE state again. + */ + + if (list_empty(&ctx->wait_req_list)) { + /* + * If no pending req in epoch, this is an error case. + * Just go back to the bubble state. + */ + CAM_ERR(CAM_ISP, "ctx:%u link: 0x%x No pending request.", + ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, NULL, NULL); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + goto end; + } + + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, + list); + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; + CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%u link: 0x%x Report Bubble flag %d req id:%lld", + ctx->ctx_id, ctx->link_hdl, req_isp->bubble_report, req->request_id); + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + req_isp->cdm_reset_before_apply = false; + + if (req_isp->bubble_report) { + __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE, + req->request_id, ctx_isp); + atomic_set(&ctx_isp->process_bubble, 1); + } else { + req_isp->bubble_report = 0; + CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + if (ctx_isp->active_req_cnt <= 1) + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + atomic_set(&ctx_isp->process_bubble, 1); + } + + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + + /* + * Handle the deferred buf done after moving + * the bubble req to active req list. + */ + __cam_isp_ctx_handle_deferred_buf_done_in_bubble( + ctx_isp, req); + + if (!req_isp->bubble_detected) { + req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + list); + req_isp->bubble_detected = true; + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + req_isp->cdm_reset_before_apply = false; + atomic_set(&ctx_isp->process_bubble, 1); + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + } + + if (!req_isp->bubble_report) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_ERROR); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, req, NULL); + } else { + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, NULL, NULL); + } + } else { + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_EPOCH, NULL, NULL); + } + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + cam_req_mgr_debug_delay_detect(); + trace_cam_delay_detect("ISP", + "bubble epoch_in_bubble_applied", + req->request_id, ctx->ctx_id, ctx->link_hdl, + ctx->session_hdl, + CAM_DEFAULT_VALUE); +end: + req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request, + list); + if (req) + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id); + return 0; +} + +static int __cam_isp_ctx_buf_done_in_bubble_applied( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1); + + return rc; +} + +static void __cam_isp_get_notification_evt_params( + uint32_t hw_error, uint32_t *fence_evt_cause, + uint32_t *req_mgr_err_code, uint32_t *recovery_type) +{ + uint32_t err_type, err_code = 0, recovery_type_temp; + + err_type = CAM_SYNC_ISP_EVENT_UNKNOWN; + recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + + if (hw_error & CAM_ISP_HW_ERROR_OVERFLOW) { + err_code |= CAM_REQ_MGR_ISP_ERR_OVERFLOW; + err_type = CAM_SYNC_ISP_EVENT_OVERFLOW; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW) { + err_code |= CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR; + err_type = CAM_SYNC_ISP_EVENT_CSID_OUTPUT_FIFO_OVERFLOW; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) { + err_code |= CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR; + err_type = CAM_SYNC_ISP_EVENT_RECOVERY_OVERFLOW; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_P2I_ERROR) { + err_code |= CAM_REQ_MGR_ISP_ERR_P2I; + err_type = CAM_SYNC_ISP_EVENT_P2I_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_VIOLATION) { + err_code |= CAM_REQ_MGR_ISP_ERR_VIOLATION; + err_type = CAM_SYNC_ISP_EVENT_VIOLATION; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_HWPD_VIOLATION) { + err_code |= CAM_REQ_MGR_ISP_ERR_HWPD_VIOLATION; + err_type = CAM_SYNC_ISP_EVENT_VIOLATION; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) { + err_code |= CAM_REQ_MGR_ISP_ERR_BUSIF_OVERFLOW; + err_type = CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR) { + err_code |= CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING; + err_type = CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW) { + err_code |= CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED) { + err_code |= CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA) { + err_code |= CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME) { + err_code |= CAM_REQ_MGR_CSID_UNBOUNDED_FRAME; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_FRAME_SIZE) { + err_code |= CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_MISSING_EOT) { + err_code |= CAM_REQ_MGR_CSID_MISSING_EOT; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED) { + err_code |= CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION; + err_type = CAM_SYNC_ISP_EVENT_CSID_RX_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_CSID_ILLEGAL_DT_SWITCH) { + err_code |= CAM_REQ_MGR_ISP_ERR_ILLEGAL_DT_SWITCH; + err_type = CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + } + if (hw_error & CAM_ISP_HW_ERROR_DRV_VOTEUP_LATE) { + err_code |= CAM_REQ_MGR_ISP_ERR_OVERFLOW; + err_type = CAM_SYNC_ISP_EVENT_OVERFLOW; + recovery_type_temp |= CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + } + + if (recovery_type_temp == (CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY | + CAM_REQ_MGR_ERROR_TYPE_RECOVERY)) + recovery_type_temp = CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY; + + if (!err_code) + err_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR; + + *req_mgr_err_code = err_code; + *fence_evt_cause = err_type; + *recovery_type = recovery_type_temp; +} + +static bool __cam_isp_ctx_request_can_reapply( + struct cam_isp_ctx_req *req_isp) +{ + int i; + + for (i = 0; i < req_isp->num_fence_map_out; i++) + if (req_isp->fence_map_out[i].sync_id == -1) + return false; + + return true; +} + +static int __cam_isp_ctx_validate_for_req_reapply_util( + struct cam_isp_context *ctx_isp) +{ + int rc = 0; + struct cam_ctx_request *req_temp; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + struct cam_context *ctx = ctx_isp->base; + + if (in_task()) + spin_lock_bh(&ctx->lock); + + /* Check for req in active/wait lists */ + if (list_empty(&ctx->active_req_list)) { + CAM_DBG(CAM_ISP, + "Active request list empty for ctx: %u on link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + if (list_empty(&ctx->wait_req_list)) { + CAM_WARN(CAM_ISP, + "No active/wait req for ctx: %u on link: 0x%x start from pending", + ctx->ctx_id, ctx->link_hdl); + rc = 0; + goto end; + } + } + + /* Validate if all fences for active requests are not signaled */ + if (!list_empty(&ctx->active_req_list)) { + list_for_each_entry_safe_reverse(req, req_temp, + &ctx->active_req_list, list) { + /* + * If some fences of the active request are already + * signaled, we should not do recovery for the buffer + * and timestamp consistency. + */ + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + if (!__cam_isp_ctx_request_can_reapply(req_isp)) { + CAM_WARN(CAM_ISP, + "Req: %llu in ctx:%u on link: 0x%x fence has partially signaled, cannot do recovery", + req->request_id, ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + goto end; + } + } + } + + /* Move active requests to pending list */ + if (!list_empty(&ctx->active_req_list)) { + list_for_each_entry_safe_reverse(req, req_temp, + &ctx->active_req_list, list) { + list_del_init(&req->list); + __cam_isp_ctx_enqueue_request_in_order(ctx, req, false); + ctx_isp->active_req_cnt--; + CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move active req %llu to pending", + ctx->ctx_id, ctx->link_hdl, req->request_id); + } + } + + /* Move wait requests to pending list */ + if (!list_empty(&ctx->wait_req_list)) { + list_for_each_entry_safe_reverse(req, req_temp, &ctx->wait_req_list, list) { + list_del_init(&req->list); + __cam_isp_ctx_enqueue_request_in_order(ctx, req, false); + CAM_DBG(CAM_ISP, "ctx:%u link:0x%x move wait req %llu to pending", + ctx->ctx_id, ctx->link_hdl, req->request_id); + } + } + +end: + if (in_task()) + spin_unlock_bh(&ctx->lock); + return rc; +} + +static int __cam_isp_ctx_handle_recovery_req_util( + struct cam_isp_context *ctx_isp) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req_to_reapply = NULL; + + if (list_empty(&ctx->pending_req_list)) { + CAM_WARN(CAM_ISP, + "No pending request to recover from on ctx: %u", ctx->ctx_id); + return -EINVAL; + } + + req_to_reapply = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH; + ctx_isp->recovery_req_id = req_to_reapply->request_id; + atomic_set(&ctx_isp->internal_recovery_set, 1); + + CAM_INFO(CAM_ISP, "Notify CRM to reapply req:%llu for ctx:%u link:0x%x", + req_to_reapply->request_id, ctx->ctx_id, ctx->link_hdl); + + rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, + CRM_KMD_WARN_INTERNAL_RECOVERY, req_to_reapply->request_id, + ctx_isp); + if (rc) { + /* Unable to notify CRM to do reapply back to normal */ + CAM_WARN(CAM_ISP, + "ctx:%u link:0x%x unable to notify CRM for req %llu", + ctx->ctx_id, ctx->link_hdl, ctx_isp->recovery_req_id); + ctx_isp->recovery_req_id = 0; + atomic_set(&ctx_isp->internal_recovery_set, 0); + } + + return rc; +} + +static int __cam_isp_ctx_trigger_error_req_reapply( + struct cam_isp_hw_error_event_data *err_event_data, + struct cam_isp_context *ctx_isp) +{ + int rc = 0; + uint32_t err_type = err_event_data->error_type; + struct cam_context *ctx = ctx_isp->base; + + if ((err_type & CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW) && + (isp_ctx_debug.disable_internal_recovery_mask & + CAM_ISP_CTX_DISABLE_RECOVERY_BUS_OVERFLOW)) { + CAM_DBG(CAM_ISP, "Internal recovery for bus overflow is disabled, err_type: 0x%x", + err_type); + err_event_data->try_internal_recovery = false; + return -EINVAL; + } + + if ((err_type & CAM_ISP_RECOVERABLE_CSID_ERRORS) && + (isp_ctx_debug.disable_internal_recovery_mask & + CAM_ISP_CTX_DISABLE_RECOVERY_CSID)) { + CAM_DBG(CAM_ISP, + "Internal recovery for CSID recoverable error is disabled, err_type: 0x%x", + err_type); + err_event_data->try_internal_recovery = false; + return -EINVAL; + } + + /* + * For errors that can be recoverable within kmd, we + * try to do internal hw stop, restart and notify CRM + * to do reapply with the help of bubble control flow. + */ + + rc = __cam_isp_ctx_validate_for_req_reapply_util(ctx_isp); + if (rc) + goto end; + + rc = __cam_isp_ctx_handle_recovery_req_util(ctx_isp); + if (rc) + goto end; + + CAM_DBG(CAM_ISP, "Triggered internal recovery for req:%llu ctx:%u on link 0x%x", + ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl); + +end: + return rc; +} + +static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + enum cam_req_mgr_device_error error; + uint32_t i = 0; + bool found = 0; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_to_report = NULL; + struct cam_ctx_request *req_to_dump = NULL; + struct cam_ctx_request *req_temp; + struct cam_isp_ctx_req *req_isp = NULL; + struct cam_isp_ctx_req *req_isp_to_report = NULL; + uint64_t error_request_id; + struct cam_hw_fence_map_entry *fence_map_out = NULL; + uint32_t recovery_type, fence_evt_cause; + uint32_t req_mgr_err_code; + + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_error_event_data *error_event_data = + (struct cam_isp_hw_error_event_data *)evt_data; + + CAM_DBG(CAM_ISP, "Enter HW error_type = %d, ctx:%u on link 0x%x", + error_event_data->error_type, ctx->ctx_id, ctx->link_hdl); + + if (error_event_data->try_internal_recovery) { + rc = __cam_isp_ctx_trigger_error_req_reapply(error_event_data, ctx_isp); + if (!rc) + goto exit; + } + + if (!ctx_isp->offline_context) + __cam_isp_ctx_pause_crm_timer(ctx); + + __cam_isp_ctx_dump_frame_timing_record(ctx_isp); + __cam_isp_ctx_print_event_record(ctx_isp); + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx, NULL); + + __cam_isp_get_notification_evt_params(error_event_data->error_type, + &fence_evt_cause, &req_mgr_err_code, &recovery_type); + /* + * The error is likely caused by first request on the active list. + * If active list is empty check wait list (maybe error hit as soon + * as RUP and we handle error before RUP. + */ + if (list_empty(&ctx->active_req_list)) { + CAM_DBG(CAM_ISP, + "handling error with no active request, ctx:%u on link 0x%x", + ctx->ctx_id, ctx->link_hdl); + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Error with no active/wait request, ctx:%u on link 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } else { + req_to_dump = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + } + } else { + req_to_dump = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + } + + req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv; + + if (error_event_data->enable_req_dump) + rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (!req_isp->bubble_report) { + CAM_ERR(CAM_ISP, "signalled error for req %llu, ctx:%u on link 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + fence_map_out = &req_isp->fence_map_out[i]; + if (req_isp->fence_map_out[i].sync_id != -1) { + CAM_DBG(CAM_ISP, + "req %llu, Sync fd 0x%x ctx %u, link 0x%x", + req->request_id, + req_isp->fence_map_out[i].sync_id, + ctx->ctx_id, ctx->link_hdl); + rc = cam_sync_signal( + fence_map_out->sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + fence_map_out->sync_id = -1; + } + + if (fence_map_out->early_sync_id > 0) { + rc = cam_sync_signal( + fence_map_out->early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + fence_map_out->early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + fence_map_out->early_sync_id = -1; + } + } + + list_del_init(&req->list); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + ctx_isp->active_req_cnt--; + } else { + found = 1; + break; + } + } + + if (found) + goto move_to_pending; + + list_for_each_entry_safe(req, req_temp, + &ctx->wait_req_list, list) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (!req_isp->bubble_report) { + CAM_ERR(CAM_ISP, "signalled error for req %llu, ctx %u, link 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + fence_map_out = &req_isp->fence_map_out[i]; + if (req_isp->fence_map_out[i].sync_id != -1) { + CAM_DBG(CAM_ISP, + "req %llu, Sync fd 0x%x ctx %u link 0x%x", + req->request_id, + req_isp->fence_map_out[i].sync_id, + ctx->ctx_id, ctx->link_hdl); + rc = cam_sync_signal( + fence_map_out->sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + fence_map_out->sync_id = -1; + } + + if (fence_map_out->early_sync_id > 0) { + rc = cam_sync_signal(fence_map_out->early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + fence_map_out->early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + fence_map_out->early_sync_id = -1; + } + } + + list_del_init(&req->list); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } else { + found = 1; + break; + } + } + +move_to_pending: + /* + * If bubble recovery is enabled on any request we need to move that + * request and all the subsequent requests to the pending list. + * Note: + * We need to traverse the active list in reverse order and add + * to head of pending list. + * e.g. pending current state: 10, 11 | active current state: 8, 9 + * intermittent for loop iteration- pending: 9, 10, 11 | active: 8 + * final state - pending: 8, 9, 10, 11 | active: NULL + */ + if (found) { + list_for_each_entry_safe_reverse(req, req_temp, + &ctx->active_req_list, list) { + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + ctx_isp->active_req_cnt--; + } + list_for_each_entry_safe_reverse(req, req_temp, + &ctx->wait_req_list, list) { + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + } + } + +end: + do { + if (list_empty(&ctx->pending_req_list)) { + error_request_id = ctx_isp->last_applied_req_id; + break; + } + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + error_request_id = ctx_isp->last_applied_req_id; + + if (req_isp->bubble_report) { + req_to_report = req; + req_isp_to_report = req_to_report->req_priv; + break; + } + + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + req_isp->fence_map_out[i].sync_id = -1; + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal(req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + fence_evt_cause); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + } + + list_del_init(&req->list); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } while (req->request_id < ctx_isp->last_applied_req_id); + + if (ctx_isp->offline_context) + goto exit; + + error = CRM_KMD_ERR_FATAL; + if (req_isp_to_report && req_isp_to_report->bubble_report) + if (error_event_data->recovery_enabled) + error = CRM_KMD_ERR_BUBBLE; + + __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, error, + error_request_id, ctx_isp); + + /* + * Need to send error occurred in KMD + * This will help UMD to take necessary action + * and to dump relevant info + */ + if (error == CRM_KMD_ERR_FATAL) + __cam_isp_ctx_notify_v4l2_error_event(recovery_type, + req_mgr_err_code, error_request_id, ctx); + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR; + CAM_DBG(CAM_ISP, "Handling error done on ctx: %u, link: 0x%x", ctx->ctx_id, ctx->link_hdl); + +exit: + return rc; +} + +static int __cam_isp_ctx_fs2_sof_in_sof_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + struct cam_ctx_request *req; + struct cam_context *ctx = ctx_isp->base; + uint64_t request_id = 0; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx: %u, link: 0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + + if (!(list_empty(&ctx->wait_req_list))) + goto end; + + if (ctx_isp->active_req_cnt <= 2) { + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + list_for_each_entry(req, &ctx->active_req_list, list) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + break; + } + } + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id); + +end: + return rc; +} + +static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_isp_hw_done_event_data *done = + (struct cam_isp_hw_done_event_data *) evt_data; + struct cam_context *ctx = ctx_isp->base; + int prev_active_req_cnt = 0; + int curr_req_id = 0; + struct cam_ctx_request *req; + + prev_active_req_cnt = ctx_isp->active_req_cnt; + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + if (req) + curr_req_id = req->request_id; + + rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0); + + if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) { + if (list_empty(&ctx->wait_req_list) && + list_empty(&ctx->active_req_list)) { + CAM_DBG(CAM_ISP, "No request, move to SOF, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_activated = + CAM_ISP_CTX_ACTIVATED_SOF; + if (ctx_isp->reported_req_id < curr_req_id) { + ctx_isp->reported_req_id = curr_req_id; + __cam_isp_ctx_send_sof_timestamp(ctx_isp, + curr_req_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } + } + } + + return rc; +} + +static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + + rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data); + return rc; +} + +static int __cam_isp_ctx_fs2_buf_done_in_applied( + struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + + rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data); + return rc; +} + +static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp, + void *evt_data) +{ + int rc = 0; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + + if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) { + CAM_DBG(CAM_ISP, "invalid RUP"); + goto end; + } + + /* + * This is for the first update. The initial setting will + * cause the reg_upd in the first frame. + */ + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (req_isp->num_fence_map_out == req_isp->num_acked) + __cam_isp_ctx_move_req_to_free_list(ctx, req); + else + CAM_ERR(CAM_ISP, + "receive rup in unexpected state, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + } + if (req != NULL) { + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, + req->request_id); + } +end: + return rc; +} + +static int __cam_isp_ctx_fs2_reg_upd_in_applied_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_ctx_request *req = NULL; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_ctx_req *req_isp; + uint64_t request_id = 0; + + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + if (req_isp->num_fence_map_out != 0) { + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx:%u,link:0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } else { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + /* no io config, so the request is completed. */ + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } + + /* + * This function only called directly from applied and bubble applied + * state so change substate here. + */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH; + if (req_isp->num_fence_map_out != 1) + goto end; + + if (ctx_isp->active_req_cnt <= 2) { + list_for_each_entry(req, &ctx->active_req_list, list) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + break; + } + } + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + } + + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated), + ctx->ctx_id, ctx->link_hdl); + +end: + if (req != NULL && !rc) { + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, + req->request_id); + } + return rc; +} + +static void __cam_isp_ctx_notify_aeb_error_for_sec_event( + struct cam_isp_context *ctx_isp) +{ + struct cam_context *ctx = ctx_isp->base; + + if ((++ctx_isp->aeb_error_cnt) <= CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX) { + CAM_WARN(CAM_ISP, + "AEB slave RDI's current request's SOF seen after next req is applied for ctx: %u on link: 0x%x last_applied_req: %llu err_cnt: %u", + ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, ctx_isp->aeb_error_cnt); + return; + } + + CAM_ERR(CAM_ISP, + "Fatal - AEB slave RDI's current request's SOF seen after next req is applied, EPOCH height need to be re-configured for ctx: %u on link: 0x%x err_cnt: %u", + ctx->ctx_id, ctx->link_hdl, ctx_isp->aeb_error_cnt); + + /* Pause CRM timer */ + if (!ctx_isp->offline_context) + __cam_isp_ctx_pause_crm_timer(ctx); + + /* Trigger reg dump */ + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx, NULL); + + /* Notify CRM on fatal error */ + __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_FATAL, + ctx_isp->last_applied_req_id, ctx_isp); + + /* Notify userland on error */ + __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_RECOVERY, + CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING, ctx_isp->last_applied_req_id, ctx); + + /* Change state to HALT, stop further processing of HW events */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT; + + /* Dump AEB debug info */ + __cam_isp_ctx_dump_frame_timing_record(ctx_isp); +} + +static int __cam_isp_ctx_trigger_internal_recovery( + bool sync_frame_drop, struct cam_isp_context *ctx_isp) +{ + int rc = 0; + bool do_recovery = true; + struct cam_context *ctx = ctx_isp->base; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + + if (list_empty(&ctx->wait_req_list)) { + /* + * If the wait list is empty, and we encounter a "silent" frame drop + * then the settings applied on the previous frame, did not reflect + * at the next frame boundary, it's expected to latch a frame after. + * No need to recover. If it's an out of sync drop use pending req + */ + if (sync_frame_drop && !list_empty(&ctx->pending_req_list)) + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + else + do_recovery = false; + } + + /* If both wait and pending list have no request to recover on */ + if (!do_recovery) { + CAM_WARN(CAM_ISP, + "No request to perform recovery - ctx: %u on link: 0x%x last_applied: %lld last_buf_done: %lld", + ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, + ctx_isp->req_info.last_bufdone_req_id); + goto end; + } + + if (!req) { + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); + if (req->request_id != ctx_isp->last_applied_req_id) + CAM_WARN(CAM_ISP, + "Top of wait list req: %llu does not match with last applied: %llu in ctx: %u on link: 0x%x", + req->request_id, ctx_isp->last_applied_req_id, + ctx->ctx_id, ctx->link_hdl); + } + + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + /* + * Treat this as bubble, after recovery re-start from appropriate sub-state + * This will block servicing any further apply calls from CRM + */ + atomic_set(&ctx_isp->internal_recovery_set, 1); + atomic_set(&ctx_isp->process_bubble, 1); + ctx_isp->recovery_req_id = req->request_id; + + /* Wait for active request's to finish before issuing recovery */ + if (ctx_isp->active_req_cnt) { + req_isp->bubble_detected = true; + CAM_WARN(CAM_ISP, + "Active req cnt: %u wait for all buf dones before kicking in recovery on req: %lld ctx: %u on link: 0x%x", + ctx_isp->active_req_cnt, ctx_isp->recovery_req_id, + ctx->ctx_id, ctx->link_hdl); + } else { + rc = __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, + CRM_KMD_WARN_INTERNAL_RECOVERY, ctx_isp->recovery_req_id, ctx_isp); + if (rc) { + /* Unable to do bubble recovery reset back to normal */ + CAM_WARN(CAM_ISP, + "Unable to perform internal recovery [bubble reporting failed] for req: %llu in ctx: %u on link: 0x%x", + ctx_isp->recovery_req_id, ctx->ctx_id, ctx->link_hdl); + __cam_isp_context_reset_internal_recovery_params(ctx_isp); + goto end; + } + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + } + +end: + return rc; +} + +static int __cam_isp_ctx_handle_secondary_events( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + bool recover = false, sync_frame_drop = false; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_secondary_event_data *sec_evt_data = + (struct cam_isp_hw_secondary_event_data *)evt_data; + + /* Current scheme to handle only for custom AEB */ + if (!ctx_isp->aeb_enabled) { + CAM_WARN(CAM_ISP, + "Recovery not supported for non-AEB ctx: %u on link: 0x%x reject sec evt: %u", + ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type); + goto end; + } + + if (atomic_read(&ctx_isp->internal_recovery_set)) { + CAM_WARN(CAM_ISP, + "Internal recovery in progress in ctx: %u on link: 0x%x reject sec evt: %u", + ctx->ctx_id, ctx->link_hdl, sec_evt_data->evt_type); + goto end; + } + + /* + * In case of custom AEB ensure first exposure frame has + * not moved forward with its settings without second/third + * expoure frame coming in. Also track for bubble, in case of system + * delays it's possible for the IFE settings to be not written to + * HW on a given frame. If these scenarios occurs flag as error, + * and recover. + */ + switch (sec_evt_data->evt_type) { + case CAM_ISP_HW_SEC_EVENT_SOF: + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF, + ctx_isp->last_applied_req_id); + + __cam_isp_ctx_update_frame_timing_record(CAM_ISP_HW_SECONDARY_EVENT, ctx_isp); + + /* Slave RDI's frame starting post IFE EPOCH - Fatal */ + if ((ctx_isp->substate_activated == + CAM_ISP_CTX_ACTIVATED_APPLIED) || + (ctx_isp->substate_activated == + CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) + __cam_isp_ctx_notify_aeb_error_for_sec_event(ctx_isp); + else + /* Reset error count */ + ctx_isp->aeb_error_cnt = 0; + break; + case CAM_ISP_HW_SEC_EVENT_EPOCH: + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH, + ctx_isp->last_applied_req_id); + ctx_isp->out_of_sync_cnt = 0; + + /* + * Master RDI frame dropped in CSID, due to programming delay no RUP/AUP + * On such occasions use CSID CAMIF EPOCH for bubble detection, flag + * on detection and perform necessary bubble recovery + */ + if ((ctx_isp->substate_activated == + CAM_ISP_CTX_ACTIVATED_APPLIED) || + (ctx_isp->substate_activated == + CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED)) { + recover = true; + CAM_WARN(CAM_ISP, + "Programming delay input frame dropped ctx: %u on link: 0x%x last_applied_req: %llu, kicking in internal recovery....", + ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id); + } + break; + case CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP: + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP, + ctx_isp->last_applied_req_id); + + /* Avoid recovery loop if frame is dropped at stream on */ + if (!ctx_isp->frame_id) { + CAM_ERR(CAM_ISP, + "Sensor sync [vc mismatch] frame dropped at stream on ctx: %u link: 0x%x frame_id: %u last_applied_req: %lld", + ctx->ctx_id, ctx->link_hdl, + ctx_isp->frame_id, ctx_isp->last_applied_req_id); + rc = -EPERM; + break; + } + + if (!(ctx_isp->out_of_sync_cnt++) && + (ctx_isp->recovery_req_id == ctx_isp->last_applied_req_id)) { + CAM_WARN(CAM_ISP, + "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu last_recovered_req: %llu out_of_sync_cnt: %u, recovery maybe in progress...", + ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, + ctx_isp->recovery_req_id, ctx_isp->out_of_sync_cnt); + break; + } + + recover = true; + sync_frame_drop = true; + ctx_isp->out_of_sync_cnt = 0; + CAM_WARN(CAM_ISP, + "Sensor sync [vc mismatch] frame dropped ctx: %u on link: 0x%x last_applied_req: %llu last_recovered_req: %llu out_of_sync_cnt: %u, kicking in internal recovery....", + ctx->ctx_id, ctx->link_hdl, ctx_isp->last_applied_req_id, + ctx_isp->recovery_req_id, ctx_isp->out_of_sync_cnt); + break; + default: + break; + } + + if (recover && + !(isp_ctx_debug.disable_internal_recovery_mask & CAM_ISP_CTX_DISABLE_RECOVERY_AEB)) + rc = __cam_isp_ctx_trigger_internal_recovery(sync_frame_drop, ctx_isp); + +end: + return rc; +} + +static struct cam_isp_ctx_irq_ops + cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_sof, + __cam_isp_ctx_notify_sof_in_activated_state, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_sof, + __cam_isp_ctx_handle_secondary_events, + }, + }, + /* APPLIED */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_applied_state, + __cam_isp_ctx_epoch_in_applied, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_applied, + __cam_isp_ctx_handle_secondary_events, + }, + }, + /* EPOCH */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_epoch, + __cam_isp_ctx_reg_upd_in_epoch_bubble_state, + __cam_isp_ctx_notify_sof_in_activated_state, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_epoch, + __cam_isp_ctx_handle_secondary_events, + }, + }, + /* BUBBLE */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_epoch_bubble_state, + __cam_isp_ctx_notify_sof_in_activated_state, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_bubble, + __cam_isp_ctx_handle_secondary_events, + }, + }, + /* Bubble Applied */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_applied_state, + __cam_isp_ctx_epoch_in_bubble_applied, + NULL, + __cam_isp_ctx_buf_done_in_bubble_applied, + __cam_isp_ctx_handle_secondary_events, + }, + }, + /* HW ERROR */ + { + .irq_ops = { + NULL, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_hw_error, + NULL, + NULL, + NULL, + }, + }, + /* HALT */ + { + }, +}; + +static struct cam_isp_ctx_irq_ops + cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_fs2_sof_in_sof_state, + __cam_isp_ctx_fs2_reg_upd_in_sof, + __cam_isp_ctx_fs2_sof_in_sof_state, + __cam_isp_ctx_notify_eof_in_activated_state, + NULL, + }, + }, + /* APPLIED */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_fs2_reg_upd_in_applied_state, + __cam_isp_ctx_epoch_in_applied, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_fs2_buf_done_in_applied, + }, + }, + /* EPOCH */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_epoch, + __cam_isp_ctx_reg_upd_in_epoch_bubble_state, + __cam_isp_ctx_notify_sof_in_activated_state, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_fs2_buf_done_in_epoch, + }, + }, + /* BUBBLE */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_epoch_bubble_state, + __cam_isp_ctx_notify_sof_in_activated_state, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_bubble, + }, + }, + /* Bubble Applied */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_applied_state, + __cam_isp_ctx_epoch_in_bubble_applied, + NULL, + __cam_isp_ctx_buf_done_in_bubble_applied, + }, + }, + /* HW ERROR */ + { + .irq_ops = { + NULL, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_hw_error, + NULL, + NULL, + NULL, + }, + }, + /* HALT */ + { + }, +}; + +static struct cam_isp_ctx_irq_ops + cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + NULL, + NULL, + NULL, + NULL, + NULL, + }, + }, + /* APPLIED */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_applied_state, + __cam_isp_ctx_offline_epoch_in_activated_state, + NULL, + __cam_isp_ctx_buf_done_in_applied, + }, + }, + /* EPOCH */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_sof_in_activated_state, + NULL, + __cam_isp_ctx_offline_epoch_in_activated_state, + NULL, + __cam_isp_ctx_buf_done_in_epoch, + }, + }, + /* BUBBLE */ + { + }, + /* Bubble Applied */ + { + }, + /* HW ERROR */ + { + .irq_ops = { + NULL, + __cam_isp_ctx_sof_in_activated_state, + __cam_isp_ctx_reg_upd_in_hw_error, + NULL, + NULL, + NULL, + }, + }, + /* HALT */ + { + }, +}; + +static inline int cam_isp_context_apply_evt_injection(struct cam_context *ctx) +{ + struct cam_isp_context *ctx_isp = ctx->ctx_priv; + struct cam_hw_inject_evt_param *evt_inject_params = &ctx_isp->evt_inject_params; + struct cam_common_evt_inject_data inject_evt = {0}; + int rc; + + inject_evt.evt_params = evt_inject_params; + rc = cam_context_apply_evt_injection(ctx, &inject_evt); + if (rc) + CAM_ERR(CAM_ISP, "Fail to apply event injection ctx_id: %u link: 0x%x req_id: %u", + ctx->ctx_id, ctx->link_hdl, evt_inject_params->req_id); + + evt_inject_params->is_valid = false; + + return rc; +} + +static inline void __cam_isp_ctx_update_fcg_prediction_idx( + struct cam_context *ctx, + uint64_t request_id, + struct cam_isp_fcg_prediction_tracker *fcg_tracker, + struct cam_isp_fcg_config_info *fcg_info) +{ + struct cam_isp_context *ctx_isp = ctx->ctx_priv; + struct cam_isp_fcg_caps *fcg_caps = fcg_tracker->fcg_caps; + uint32_t max_fcg_predictions; + + /* Max FCG predictions in SFE/IFE/MC_TFE are the same, use one of them here */ + max_fcg_predictions = fcg_caps->max_ife_fcg_predictions; + + if ((fcg_tracker->sum_skipped == 0) || + (fcg_tracker->sum_skipped > max_fcg_predictions)) { + fcg_info->use_current_cfg = true; + CAM_DBG(CAM_ISP, + "Apply req: %llu, Use current FCG value, frame_id: %llu, ctx_id: %u", + request_id, ctx_isp->frame_id, ctx->ctx_id); + } else { + fcg_info->prediction_idx = fcg_tracker->sum_skipped; + CAM_DBG(CAM_ISP, + "Apply req: %llu, FCG prediction: %u, frame_id: %llu, ctx_id: %u", + request_id, fcg_tracker->sum_skipped, + ctx_isp->frame_id, ctx->ctx_id); + } +} + +static inline void __cam_isp_ctx_print_fcg_tracker( + struct cam_isp_fcg_prediction_tracker *fcg_tracker) +{ + uint32_t skipped_list[CAM_ISP_AFD_PIPELINE_DELAY]; + struct cam_isp_skip_frame_info *skip_info; + int i = 0; + + list_for_each_entry(skip_info, + &fcg_tracker->skipped_list, list) { + skipped_list[i] = skip_info->num_frame_skipped; + i += 1; + } + + CAM_DBG(CAM_ISP, + "FCG tracker num_skipped: %u, sum_skipped: %u, skipped list: [%u, %u, %u]", + fcg_tracker->num_skipped, fcg_tracker->sum_skipped, + skipped_list[0], skipped_list[1], skipped_list[2]); +} + +static int __cam_isp_ctx_apply_req_in_activated_state( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply, + enum cam_isp_ctx_activated_substate next_state) +{ + int rc = 0; + struct cam_ctx_request *req; + struct cam_ctx_request *active_req = NULL; + struct cam_isp_ctx_req *req_isp; + struct cam_isp_ctx_req *active_req_isp; + struct cam_isp_context *ctx_isp = NULL; + struct cam_hw_config_args cfg = {0}; + struct cam_isp_skip_frame_info *skip_info; + struct cam_isp_fcg_prediction_tracker *fcg_tracker; + struct cam_isp_fcg_config_info *fcg_info; + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + + /* Reset mswitch ref cnt */ + atomic_set(&ctx_isp->mswitch_default_apply_delay_ref_cnt, + ctx_isp->mswitch_default_apply_delay_max_cnt); + + if (apply->re_apply) + if (apply->request_id <= ctx_isp->last_applied_req_id) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x Trying to reapply the same request %llu again", + ctx->ctx_id, ctx->link_hdl, + apply->request_id); + return 0; + } + + if (list_empty(&ctx->pending_req_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x No available request for Apply id %lld", + ctx->ctx_id, ctx->link_hdl, + apply->request_id); + rc = -EFAULT; + goto end; + } + + /* + * When the pipeline has issue, the requests can be queued up in the + * pipeline. In this case, we should reject the additional request. + * The maximum number of request allowed to be outstanding is 2. + * + */ + if (atomic_read(&ctx_isp->process_bubble)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x Processing bubble cannot apply Request Id %llu", + ctx->ctx_id, ctx->link_hdl, + apply->request_id); + rc = -EFAULT; + goto end; + } + + /* + * When isp processing internal recovery, the crm may still apply + * req to isp ctx. In this case, we should reject this req apply. + */ + if (atomic_read(&ctx_isp->internal_recovery_set)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x Processing recovery cannot apply Request Id %lld", + ctx->ctx_id, ctx->link_hdl, + apply->request_id); + rc = -EAGAIN; + goto end; + } + + spin_lock_bh(&ctx->lock); + req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + list); + + if (!list_empty(&ctx->active_req_list)) + active_req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + spin_unlock_bh(&ctx->lock); + + /* + * Check whether the request id is matching the tip, if not, this means + * we are in the middle of the error handling. Need to reject this apply + */ + if (req->request_id != apply->request_id) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x Invalid Request Id asking %llu existing %llu", + ctx->ctx_id, ctx->link_hdl, + apply->request_id, req->request_id); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u, link: 0x%x", + req->request_id, + __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated), + ctx->ctx_id, ctx->link_hdl); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (active_req) + active_req_isp = (struct cam_isp_ctx_req *) active_req->req_priv; + + if (ctx_isp->active_req_cnt >= 2) { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u, link: 0x%x", + req->request_id, + ctx_isp->active_req_cnt, + ctx->ctx_id, ctx->link_hdl); + + if (active_req) + __cam_isp_ctx_handle_buf_done_fail_log(ctx_isp, + active_req->request_id, active_req_isp); + + rc = -EFAULT; + goto end; + } + + + if (active_req) { + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_PER_REQ, ctx, + &active_req_isp->hw_update_data); + active_req_isp->is_reg_dump_triggered = true; + } + + /* Reset congestion counter */ + ctx_isp->congestion_cnt = 0; + + req_isp->bubble_report = apply->report_if_bubble; + + /* + * Reset all buf done/bubble flags for the req being applied + * If internal recovery has led to re-apply of same + * request, clear all stale entities + */ + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + req_isp->cdm_reset_before_apply = false; + req_isp->bubble_detected = false; + + cfg.ctxt_to_hw_map = ctx_isp->hw_ctx; + cfg.request_id = req->request_id; + cfg.hw_update_entries = req_isp->cfg; + cfg.num_hw_update_entries = req_isp->num_cfg; + cfg.priv = &req_isp->hw_update_data; + cfg.init_packet = 0; + cfg.reapply_type = req_isp->reapply_type; + cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply; + + req_isp->hw_update_data.force_disable_drv = apply->frame_duration_changing; + + if ((ctx_isp->evt_inject_params.is_valid) && + (req->request_id == ctx_isp->evt_inject_params.req_id)) { + rc = cam_isp_context_apply_evt_injection(ctx_isp->base); + if (!rc) + goto end; + } + + /* Decide the exact FCG prediction */ + fcg_tracker = &ctx_isp->fcg_tracker; + fcg_info = &req_isp->hw_update_data.fcg_info; + if (!list_empty(&fcg_tracker->skipped_list)) { + __cam_isp_ctx_print_fcg_tracker(fcg_tracker); + skip_info = list_first_entry(&fcg_tracker->skipped_list, + struct cam_isp_skip_frame_info, list); + fcg_tracker->sum_skipped -= skip_info->num_frame_skipped; + if (unlikely((uint32_t)UINT_MAX - fcg_tracker->sum_skipped < + fcg_tracker->num_skipped)) + fcg_tracker->num_skipped = + (uint32_t)UINT_MAX - fcg_tracker->sum_skipped; + fcg_tracker->sum_skipped += fcg_tracker->num_skipped; + skip_info->num_frame_skipped = fcg_tracker->num_skipped; + fcg_tracker->num_skipped = 0; + list_rotate_left(&fcg_tracker->skipped_list); + + __cam_isp_ctx_print_fcg_tracker(fcg_tracker); + __cam_isp_ctx_update_fcg_prediction_idx(ctx, + apply->request_id, fcg_tracker, fcg_info); + } + + atomic_set(&ctx_isp->apply_in_progress, 1); + + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (!rc) { + spin_lock_bh(&ctx->lock); + atomic_set(&ctx_isp->last_applied_default, 0); + ctx_isp->substate_activated = next_state; + ctx_isp->last_applied_req_id = apply->request_id; + ctx_isp->last_applied_jiffies = jiffies; + list_del_init(&req->list); + if (atomic_read(&ctx_isp->internal_recovery_set)) + __cam_isp_ctx_enqueue_request_in_order(ctx, req, false); + else + list_add_tail(&req->list, &ctx->wait_req_list); + CAM_DBG(CAM_ISP, "new Substate[%s], applied req %lld, ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type(next_state), + ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl); + + if (apply->last_applied_done_timestamp && + ((ctx_isp->boot_timestamp - CAM_CRM_SENSOR_APPLIY_DELAY_THRESHOLD) < + apply->last_applied_done_timestamp)) { + CAM_WARN(CAM_ISP, + "Apply delayed req:%llu, link:0x%x, timestamp sof:0x%llx applied:0x%llx", + apply->request_id, ctx->link_hdl, + ctx_isp->boot_timestamp, apply->last_applied_done_timestamp); + /* Notify userland on request error due to setting mismatched */ + __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_ERROR_TYPE_REQUEST, + CAM_REQ_MGR_ISP_ERR_SETTING_MISMATCHED, apply->request_id, ctx); + } + spin_unlock_bh(&ctx->lock); + + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED, + req->request_id); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_APPLY, req, NULL); + } else if (rc == -EALREADY) { + spin_lock_bh(&ctx->lock); + req_isp->bubble_detected = true; + req_isp->cdm_reset_before_apply = false; + atomic_set(&ctx_isp->process_bubble, 1); + list_del_init(&req->list); + list_add(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + spin_unlock_bh(&ctx->lock); + CAM_DBG(CAM_REQ, + "move request %lld to active list(cnt = %d), ctx %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x,Can not apply (req %lld) the configuration, rc %d", + ctx->ctx_id, ctx->link_hdl, apply->request_id, rc); + } + + atomic_set(&ctx_isp->apply_in_progress, 0); +end: + return rc; +} + +static int __cam_isp_ctx_apply_req_in_sof( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + CAM_DBG(CAM_ISP, "current Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply, + CAM_ISP_CTX_ACTIVATED_APPLIED); + CAM_DBG(CAM_ISP, "new Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (rc) + CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d, ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_apply_req_in_epoch( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + CAM_DBG(CAM_ISP, "current Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply, + CAM_ISP_CTX_ACTIVATED_APPLIED); + CAM_DBG(CAM_ISP, "new Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (rc) + CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d, ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_apply_req_in_bubble( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + CAM_DBG(CAM_ISP, "current Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply, + CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED); + CAM_DBG(CAM_ISP, "new Substate[%s], ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (rc) + CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d, ctx %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static void __cam_isp_ctx_find_mup_for_default_settings( + int64_t req_id, struct cam_context *ctx, + struct cam_ctx_request **switch_req) +{ + struct cam_ctx_request *req, *temp_req; + + if (list_empty(&ctx->pending_req_list)) { + CAM_DBG(CAM_ISP, + "Pending list empty, unable to find mup for req: %lld ctx: %u", + req_id, ctx->ctx_id); + return; + } + + list_for_each_entry_safe( + req, temp_req, &ctx->pending_req_list, list) { + if (req->request_id == req_id) { + struct cam_isp_ctx_req *req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + if (req_isp->hw_update_data.mup_en) { + *switch_req = req; + CAM_DBG(CAM_ISP, + "Found mup for last applied max pd req: %lld in ctx: %u", + req_id, ctx->ctx_id); + } + } + } +} + +static int __cam_isp_ctx_apply_default_req_settings( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + bool skip_rup_aup = false; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + struct cam_isp_context *isp_ctx = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_hw_config_args cfg = {0}; + + if (isp_ctx->mode_switch_en && isp_ctx->handle_mswitch) { + if ((apply->last_applied_max_pd_req > 0) && + (atomic_dec_and_test(&isp_ctx->mswitch_default_apply_delay_ref_cnt))) { + __cam_isp_ctx_find_mup_for_default_settings( + apply->last_applied_max_pd_req, ctx, &req); + } + + if (req) { + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + CAM_DBG(CAM_ISP, + "Applying IQ for mode switch req: %lld ctx: %u", + req->request_id, ctx->ctx_id); + cfg.ctxt_to_hw_map = isp_ctx->hw_ctx; + cfg.request_id = req->request_id; + cfg.hw_update_entries = req_isp->cfg; + cfg.num_hw_update_entries = req_isp->num_cfg; + cfg.priv = &req_isp->hw_update_data; + cfg.init_packet = 0; + cfg.reapply_type = CAM_CONFIG_REAPPLY_IQ; + cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply; + + rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to apply req: %lld IQ settings in ctx: %u", + req->request_id, ctx->ctx_id); + goto end; + } + skip_rup_aup = true; + + /* IQ applied for this request, on next trigger skip IQ cfg */ + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + } + } + + if (isp_ctx->use_default_apply) { + hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.u.default_cfg_params.last_applied_max_pd_req = + apply->last_applied_max_pd_req; + isp_hw_cmd_args.u.default_cfg_params.force_disable_drv = + apply->frame_duration_changing; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG; + + isp_hw_cmd_args.cmd_data = (void *)&skip_rup_aup; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to apply default settings rc %d ctx %u, link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + else + CAM_DBG(CAM_ISP, "Applied default settings rc %d ctx: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } + +end: + return rc; +} + +static void __cam_isp_ctx_handle_reg_dump(struct cam_context *ctx) +{ + struct cam_ctx_request *active_req = NULL; + struct cam_isp_ctx_req *active_req_isp = NULL; + + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->active_req_list)) + active_req = list_first_entry(&ctx->active_req_list, struct cam_ctx_request, list); + spin_unlock_bh(&ctx->lock); + + if (active_req) { + active_req_isp = (struct cam_isp_ctx_req *) active_req->req_priv; + + CAM_DBG(CAM_ISP, "Handling reg dump for active req %llu, ctx %d", + active_req->request_id, ctx->ctx_id); + if (!active_req_isp->is_reg_dump_triggered) { + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_PER_REQ, ctx, + &active_req_isp->hw_update_data); + active_req_isp->is_reg_dump_triggered = true; + } + } else { + CAM_DBG(CAM_ISP, "No active req, hence not doing reg_dump for ctx id %d", + ctx->ctx_id); + } +} + +static void *cam_isp_ctx_user_dump_req_list( + void *dump_struct, uint8_t *addr_ptr) +{ + struct list_head *head = NULL; + uint64_t *addr; + struct cam_ctx_request *req, *req_temp; + + head = (struct list_head *)dump_struct; + + addr = (uint64_t *)addr_ptr; + + if (!list_empty(head)) { + list_for_each_entry_safe(req, req_temp, head, list) { + *addr++ = req->request_id; + } + } + + return addr; +} + +static void *cam_isp_ctx_user_dump_active_requests( + void *dump_struct, uint8_t *addr_ptr) +{ + uint64_t *addr; + struct cam_ctx_request *req; + + req = (struct cam_ctx_request *)dump_struct; + + addr = (uint64_t *)addr_ptr; + *addr++ = req->request_id; + return addr; +} + +static int __cam_isp_ctx_dump_req_info( + struct cam_context *ctx, + struct cam_ctx_request *req, + struct cam_common_hw_dump_args *dump_args) +{ + int i, rc = 0; + uint32_t min_len; + size_t remain_len; + struct cam_isp_ctx_req *req_isp; + struct cam_ctx_request *req_temp; + + if (!req || !ctx || !dump_args) { + CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK", + req, ctx, dump_args); + return -EINVAL; + } + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, + "Dump buffer overshoot len %zu offset %zu, ctx_idx: %u, link: 0x%x", + dump_args->buf_len, dump_args->offset, ctx->ctx_id, ctx->link_hdl); + return -ENOSPC; + } + + remain_len = dump_args->buf_len - dump_args->offset; + min_len = sizeof(struct cam_isp_context_dump_header) + + (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS * + req_isp->num_fence_map_out * + sizeof(uint64_t)); + + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x", + remain_len, min_len, ctx->ctx_id, ctx->link_hdl); + return -ENOSPC; + } + + /* Dump pending request list */ + rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list, + &ctx->pending_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_PENDING_REQUESTS:"); + if (rc) { + CAM_ERR(CAM_ISP, + "CAM_ISP_CONTEXT:Pending request dump failed, rc:%d, ctx:%u, link:0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + /* Dump applied request list */ + rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list, + &ctx->wait_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_APPLIED_REQUESTS:"); + if (rc) { + CAM_ERR(CAM_ISP, + "CAM_ISP_CONTEXT: Applied request dump failed, rc:%d, ctx:%u, link:0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + /* Dump active request list */ + rc = cam_common_user_dump_helper(dump_args, cam_isp_ctx_user_dump_req_list, + &ctx->active_req_list, sizeof(uint64_t), "ISP_OUT_FENCE_ACTIVE_REQUESTS:"); + if (rc) { + CAM_ERR(CAM_ISP, + "CAM_ISP_CONTEXT: Active request dump failed, rc:%d, ctx:%u, link:0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + /* Dump active request fences */ + if (!list_empty(&ctx->active_req_list)) { + list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) { + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + for (i = 0; i < req_isp->num_fence_map_out; i++) { + rc = cam_common_user_dump_helper(dump_args, + cam_isp_ctx_user_dump_active_requests, + req, sizeof(uint64_t), + "ISP_OUT_FENCE_REQUEST_ACTIVE.%s.%u.%d:", + __cam_isp_ife_sfe_resource_handle_id_to_type( + req_isp->fence_map_out[i].resource_handle), + req_isp->fence_map_out[i].image_buf_addr[0], + req_isp->fence_map_out[i].sync_id); + + if (rc) { + CAM_ERR(CAM_ISP, + "CAM_ISP_CONTEXT DUMP_REQ_INFO: Dump failed, rc: %d, ctx_idx: %u, link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + } + } + } + + return rc; +} + +static void *cam_isp_ctx_user_dump_timer( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + uint64_t *addr; + ktime_t cur_time; + + req = (struct cam_ctx_request *)dump_struct; + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + cur_time = ktime_get(); + + addr = (uint64_t *)addr_ptr; + + *addr++ = req->request_id; + *addr++ = ktime_to_timespec64( + req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_sec; + *addr++ = ktime_to_timespec64( + req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]).tv_nsec / NSEC_PER_USEC; + *addr++ = ktime_to_timespec64(cur_time).tv_sec; + *addr++ = ktime_to_timespec64(cur_time).tv_nsec / NSEC_PER_USEC; + return addr; +} + +static void *cam_isp_ctx_user_dump_stream_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_context *ctx = NULL; + int32_t *addr; + + ctx = (struct cam_context *)dump_struct; + + addr = (int32_t *)addr_ptr; + + *addr++ = ctx->ctx_id; + *addr++ = ctx->dev_hdl; + *addr++ = ctx->link_hdl; + + return addr; +} + +static int __cam_isp_ctx_dump_in_top_state( + struct cam_context *ctx, + struct cam_req_mgr_dump_info *dump_info) +{ + int rc = 0; + bool dump_only_event_record = false; + size_t buf_len; + size_t remain_len; + ktime_t cur_time; + uint32_t min_len; + uint64_t diff; + uintptr_t cpu_addr; + uint8_t req_type; + struct cam_isp_context *ctx_isp; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp; + struct cam_ctx_request *req_temp; + struct cam_hw_dump_args ife_dump_args; + struct cam_common_hw_dump_args dump_args; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + rc = cam_mem_get_cpu_buf(dump_info->buf_handle, + &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_ISP, "Invalid handle %u rc %d, ctx_idx: %u, link: 0x%x", + dump_info->buf_handle, rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + spin_lock_bh(&ctx->lock); + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + if (req->request_id == dump_info->req_id) { + CAM_INFO(CAM_ISP, "isp dump active list req: %lld, ctx_idx: %u, link: 0x%x", + dump_info->req_id, ctx->ctx_id, ctx->link_hdl); + req_type = 'a'; + goto hw_dump; + } + } + list_for_each_entry_safe(req, req_temp, + &ctx->wait_req_list, list) { + if (req->request_id == dump_info->req_id) { + CAM_INFO(CAM_ISP, "isp dump wait list req: %lld, ctx_idx: %u, link: 0x%x", + dump_info->req_id, ctx->ctx_id, ctx->link_hdl); + req_type = 'w'; + goto hw_dump; + } + } + list_for_each_entry_safe(req, req_temp, + &ctx->pending_req_list, list) { + if (req->request_id == dump_info->req_id) { + CAM_INFO(CAM_ISP, + "isp dump pending list req: %lld, ctx_idx: %u, link: 0x%x", + dump_info->req_id, ctx->ctx_id, ctx->link_hdl); + req_type = 'p'; + goto hw_dump; + } + } + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + __cam_isp_ctx_print_event_record(ctx_isp); + spin_unlock_bh(&ctx->lock); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return 0; + +hw_dump: + if (buf_len <= dump_info->offset) { + spin_unlock_bh(&ctx->lock); + CAM_WARN(CAM_ISP, + "Dump buffer overshoot len %zu offset %zu, ctx_idx: %u, link: 0x%x", + buf_len, dump_info->offset, ctx->ctx_id, ctx->link_hdl); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + remain_len = buf_len - dump_info->offset; + min_len = sizeof(struct cam_isp_context_dump_header) + + (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t)); + + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, + "Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x", + remain_len, min_len, ctx->ctx_id, ctx->link_hdl); + spin_unlock_bh(&ctx->lock); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + cur_time = ktime_get(); + diff = ktime_us_delta( + req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY], + cur_time); + __cam_isp_ctx_print_event_record(ctx_isp); + if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) { + CAM_INFO(CAM_ISP, "req %lld found no error, ctx_idx: %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + dump_only_event_record = true; + } + + dump_args.req_id = dump_info->req_id; + dump_args.cpu_addr = cpu_addr; + dump_args.buf_len = buf_len; + dump_args.offset = dump_info->offset; + dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + + /* Dump time info */ + rc = cam_common_user_dump_helper(&dump_args, cam_isp_ctx_user_dump_timer, + req, sizeof(uint64_t), "ISP_CTX_DUMP.%c:", req_type); + if (rc) { + CAM_ERR(CAM_ISP, "Time dump fail %lld, rc: %d, ctx_idx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + dump_info->offset = dump_args.offset; + + min_len = sizeof(struct cam_isp_context_dump_header) + + (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(int32_t)); + remain_len = buf_len - dump_info->offset; + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, + "Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x", + remain_len, min_len, ctx->ctx_id, ctx->link_hdl); + spin_unlock_bh(&ctx->lock); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + /* Dump stream info */ + ctx->ctxt_to_hw_map = ctx_isp->hw_ctx; + if (ctx->hw_mgr_intf->hw_dump) { + /* Dump first part of stream info from isp context */ + rc = cam_common_user_dump_helper(&dump_args, + cam_isp_ctx_user_dump_stream_info, ctx, + sizeof(int32_t), "ISP_STREAM_INFO_FROM_CTX:"); + if (rc) { + CAM_ERR(CAM_ISP, + "ISP CTX stream info dump fail %lld, rc: %d, ctx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + + dump_info->offset = dump_args.offset; + remain_len = buf_len - dump_info->offset; + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, + "Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x", + remain_len, min_len, ctx->ctx_id, ctx->link_hdl); + spin_unlock_bh(&ctx->lock); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + /* Dump second part of stream info from ife hw manager */ + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_DUMP_STREAM_INFO; + isp_hw_cmd_args.cmd_data = &dump_args; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, + "IFE HW MGR stream info dump fail %lld, rc: %d, ctx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + + dump_info->offset = dump_args.offset; + } + + /* Dump event record */ + rc = __cam_isp_ctx_dump_event_record(ctx_isp, &dump_args); + if (rc) { + CAM_ERR(CAM_ISP, "Event record dump fail %lld, rc: %d, ctx_idx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + dump_info->offset = dump_args.offset; + if (dump_only_event_record) { + goto end; + } + + /* Dump state monitor array */ + rc = __cam_isp_ctx_user_dump_state_monitor_array(ctx_isp, &dump_args); + if (rc) { + CAM_ERR(CAM_ISP, "Dump event fail %lld, rc: %d, ctx_idx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + + /* Dump request info */ + rc = __cam_isp_ctx_dump_req_info(ctx, req, &dump_args); + if (rc) { + CAM_ERR(CAM_ISP, "Dump Req info fail %lld, rc: %d, ctx_idx: %u, link: 0x%x", + req->request_id, rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + spin_unlock_bh(&ctx->lock); + + /* Dump CSID, VFE, and SFE info */ + dump_info->offset = dump_args.offset; + if (ctx->hw_mgr_intf->hw_dump) { + ife_dump_args.offset = dump_args.offset; + ife_dump_args.request_id = dump_info->req_id; + ife_dump_args.buf_handle = dump_info->buf_handle; + ife_dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + rc = ctx->hw_mgr_intf->hw_dump( + ctx->hw_mgr_intf->hw_mgr_priv, + &ife_dump_args); + dump_info->offset = ife_dump_args.offset; + } + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; +end: + spin_unlock_bh(&ctx->lock); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; +} + +static int __cam_isp_ctx_flush_req_in_flushed_state( + struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush_req) +{ + struct cam_isp_context *ctx_isp; + + if (flush_req->req_id > ctx->last_flush_req) + ctx->last_flush_req = flush_req->req_id; + + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + ctx_isp->standby_en = flush_req->enable_sensor_standby; + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + if (ctx->state <= CAM_CTX_READY) { + ctx->state = CAM_CTX_ACQUIRED; + goto end; + } + + spin_lock_bh(&ctx->lock); + ctx->state = CAM_CTX_FLUSHED; + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT; + spin_unlock_bh(&ctx->lock); + + CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%u link: 0x%x", + flush_req->req_id, ctx->ctx_id, ctx->link_hdl); + ctx->last_flush_req = flush_req->req_id; + + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx, NULL); + + /* + * Just clear the lists in case there is a flush just after the + * init packet without sending the actual packets. + * Reaching here in flush state means hw reset has been done in the + * last flush call. In such cases, we need to clear the list else + * it may result in stale entried for upcoming configure requests and + * corrupt the packets. + */ + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->wait_req_list)) + __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list, + flush_req); + + if (!list_empty(&ctx->active_req_list)) + __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list, + flush_req); + + ctx_isp->active_req_cnt = 0; + + if (!list_empty(&ctx->pending_req_list)) + __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); + + ctx_isp->init_received = false; + spin_unlock_bh(&ctx->lock); + } + +end: + ctx_isp->bubble_frame_cnt = 0; + ctx_isp->congestion_cnt = 0; + ctx_isp->sof_dbg_irq_en = false; + ctx_isp->num_inits_post_flush = 0; + atomic_set(&ctx_isp->process_bubble, 0); + atomic_set(&ctx_isp->rxd_epoch, 0); + atomic_set(&ctx_isp->internal_recovery_set, 0); + return 0; +} + +static int __cam_isp_ctx_flush_req(struct cam_context *ctx, + struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req) +{ + int i, rc, tmp = 0; + struct cam_ctx_request *req; + struct cam_ctx_request *req_temp; + struct cam_isp_ctx_req *req_isp; + struct list_head flush_list; + struct cam_isp_context *ctx_isp = NULL; + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + + INIT_LIST_HEAD(&flush_list); + if (list_empty(req_list)) { + CAM_DBG(CAM_ISP, "request list is empty, ctx_id:%u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + CAM_INFO(CAM_ISP, + "no request to cancel(lastapplied:%lld cancel:%lld),ctx:%u link:0x%x", + ctx_isp->last_applied_req_id, flush_req->req_id, + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } else + return 0; + } + + CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu, ctx_id:%u link: 0x%x", + flush_req->type, flush_req->req_id, ctx->ctx_id, ctx->link_hdl); + list_for_each_entry_safe(req, req_temp, req_list, list) { + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + if (req->request_id != flush_req->req_id) { + continue; + } else { + list_del_init(&req->list); + list_add_tail(&req->list, &flush_list); + __cam_isp_ctx_update_state_monitor_array( + ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, + req->request_id); + break; + } + } + list_del_init(&req->list); + list_add_tail(&req->list, &flush_list); + __cam_isp_ctx_update_state_monitor_array(ctx_isp, + CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id); + } + + if (list_empty(&flush_list)) { + /* + * Maybe the req isn't sent to KMD since UMD already skip + * req in CSL layer. + */ + CAM_INFO(CAM_ISP, + "flush list is empty, flush type %d for req %llu, ctx_id:%u link: 0x%x", + flush_req->type, flush_req->req_id, ctx->ctx_id, ctx->link_hdl); + return 0; + } + + list_for_each_entry_safe(req, req_temp, &flush_list, list) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + CAM_DBG(CAM_ISP, + "Flush req 0x%llx, fence %d, ctx_id:%u link: 0x%x", + req->request_id, + req_isp->fence_map_out[i].sync_id, + ctx->ctx_id, ctx->link_hdl); + rc = cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_FLUSH); + if (rc) { + tmp = req_isp->fence_map_out[i].sync_id; + CAM_ERR_RATE_LIMIT(CAM_ISP, + "signal fence %d failed, ctx_id:%u link: 0x%x", + tmp, ctx->ctx_id, ctx->link_hdl); + } + + req_isp->fence_map_out[i].sync_id = -1; + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_FLUSH); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + } + + req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE; + req_isp->cdm_reset_before_apply = false; + list_del_init(&req->list); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } + + return 0; +} + +static inline void __cam_isp_ctx_reset_fcg_tracker( + struct cam_context *ctx) +{ + struct cam_isp_context *ctx_isp; + struct cam_isp_skip_frame_info *skip_info; + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + + /* Reset skipped_list for FCG config */ + ctx_isp->fcg_tracker.sum_skipped = 0; + ctx_isp->fcg_tracker.num_skipped = 0; + list_for_each_entry(skip_info, &ctx_isp->fcg_tracker.skipped_list, list) + skip_info->num_frame_skipped = 0; + CAM_DBG(CAM_ISP, "Reset FCG skip info on ctx %u link: %x", + ctx->ctx_id, ctx->link_hdl); +} + +static int __cam_isp_ctx_flush_req_in_top_state( + struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush_req) +{ + int rc = 0; + struct cam_isp_context *ctx_isp; + struct cam_isp_stop_args stop_isp; + struct cam_hw_stop_args stop_args; + struct cam_hw_reset_args reset_args; + struct cam_req_mgr_timer_notify timer; + + ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; + ctx_isp->standby_en = flush_req->enable_sensor_standby; + + /* Reset skipped_list for FCG config */ + __cam_isp_ctx_reset_fcg_tracker(ctx); + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + if (ctx->state <= CAM_CTX_READY) { + ctx->state = CAM_CTX_ACQUIRED; + goto end; + } + + spin_lock_bh(&ctx->lock); + ctx->state = CAM_CTX_FLUSHED; + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT; + spin_unlock_bh(&ctx->lock); + + CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%u link: 0x%x", + flush_req->req_id, ctx->ctx_id, ctx->link_hdl); + ctx->last_flush_req = flush_req->req_id; + + __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH, ctx, NULL); + + stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY; + stop_isp.stop_only = true; + stop_isp.is_internal_stop = false; + stop_isp.standby_en = ctx_isp->standby_en; + stop_args.args = (void *)&stop_isp; + rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv, + &stop_args); + if (rc) + CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d, ctx_id:%u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.Ctx_id:%u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + CAM_DBG(CAM_ISP, "Flush wait and active lists, ctx_id:%u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) { + timer.link_hdl = ctx->link_hdl; + timer.dev_hdl = ctx->dev_hdl; + timer.state = false; + ctx->ctx_crm_intf->notify_timer(&timer); + } + + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->wait_req_list)) + __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list, + flush_req); + + if (!list_empty(&ctx->active_req_list)) + __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list, + flush_req); + + ctx_isp->active_req_cnt = 0; + spin_unlock_bh(&ctx->lock); + + reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv, + &reset_args); + if (rc) + CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d, ctx_id:%u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + ctx_isp->init_received = false; + } + + CAM_DBG(CAM_ISP, "Flush pending list, ctx_idx: %u, link: 0x%x", ctx->ctx_id, ctx->link_hdl); + /* + * On occasions when we are doing a flush all, HW would get reset + * shutting down any th/bh in the pipeline. If internal recovery + * is triggered prior to flush, by clearing the pending list post + * HW reset will ensure no stale request entities are left behind + */ + spin_lock_bh(&ctx->lock); + __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); + spin_unlock_bh(&ctx->lock); + +end: + ctx_isp->bubble_frame_cnt = 0; + ctx_isp->congestion_cnt = 0; + ctx_isp->sof_dbg_irq_en = false; + ctx_isp->num_inits_post_flush = 0; + atomic_set(&ctx_isp->process_bubble, 0); + atomic_set(&ctx_isp->rxd_epoch, 0); + atomic_set(&ctx_isp->internal_recovery_set, 0); + return rc; +} + +static int __cam_isp_ctx_flush_req_in_ready( + struct cam_context *ctx, + struct cam_req_mgr_flush_request *flush_req) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "try to flush pending list, ctx_id:%u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + spin_lock_bh(&ctx->lock); + rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); + + /* if nothing is in pending req list, change state to acquire */ + if (list_empty(&ctx->pending_req_list)) + ctx->state = CAM_CTX_ACQUIRED; + spin_unlock_bh(&ctx->lock); + + trace_cam_context_state("ISP", ctx); + + CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d, ctx_id:%u link: 0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + return rc; +} + +static struct cam_ctx_ops + cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_sof, + .notify_frame_skip = + __cam_isp_ctx_apply_default_req_settings, + }, + .irq_ops = NULL, + }, + /* APPLIED */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* EPOCH */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_epoch, + .notify_frame_skip = + __cam_isp_ctx_apply_default_req_settings, + }, + .irq_ops = NULL, + }, + /* BUBBLE */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_bubble, + .notify_frame_skip = + __cam_isp_ctx_apply_default_req_settings, + }, + .irq_ops = NULL, + }, + /* Bubble Applied */ + { + .ioctl_ops = {}, + .crm_ops = { + .notify_frame_skip = + __cam_isp_ctx_apply_default_req_settings, + }, + .irq_ops = NULL, + }, + /* HW ERROR */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HALT */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, +}; + +static struct cam_ctx_ops + cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_sof, + }, + .irq_ops = NULL, + }, + /* APPLIED */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* EPOCH */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_epoch, + }, + .irq_ops = NULL, + }, + /* BUBBLE */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_apply_req_in_bubble, + }, + .irq_ops = NULL, + }, + /* Bubble Applied */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HW ERROR */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HALT */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, +}; + +static int __cam_isp_ctx_rdi_only_sof_in_top_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + int rc = 0; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + uint64_t request_id = 0; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx_idx: %u, link: 0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + + /* + * notify reqmgr with sof signal. Note, due to scheduling delay + * we can run into situation that two active requests has already + * be in the active queue while we try to do the notification. + * In this case, we need to skip the current notification. This + * helps the state machine to catch up the delay. + */ + if (ctx_isp->active_req_cnt <= 2) { + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + /* + * It's possible for rup done to be processed before + * SOF, check for first active request shutter here + */ + if (!list_empty(&ctx->active_req_list)) { + struct cam_ctx_request *req = NULL; + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + } + } + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + } + + if (list_empty(&ctx->active_req_list)) + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + else + CAM_DBG(CAM_ISP, "Still need to wait for the buf done, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + return rc; +} + +static int __cam_isp_ctx_rdi_only_sof_in_applied_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data"); + return -EINVAL; + } + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val); + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED; + CAM_DBG(CAM_ISP, "next Substate[%s]", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated)); + + return 0; +} + +static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + uint64_t request_id = 0; + + if (ctx_isp->bubble_recover_dis && !ctx_isp->sfe_en) { + CAM_INFO(CAM_ISP, "Bubble Recovery Disabled"); + __cam_isp_ctx_send_sof_timestamp(ctx_isp, 0, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + return 0; + } + + /* + * Sof in bubble applied state means, reg update not received. + * before increment frame id and override time stamp value, send + * the previous sof time stamp that got captured in the + * sof in applied state. + */ + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx_idx: %u, link: 0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx_idx: %u, link: 0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + + if (list_empty(&ctx->wait_req_list)) { + /* + * If no pending req in epoch, this is an error case. + * The recovery is to go back to sof state + */ + CAM_ERR(CAM_ISP, "No wait request, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + + /* Send SOF event as empty frame*/ + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + goto end; + } + + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, + list); + req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; + CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%u link: 0x%x Report Bubble flag %d req id:%lld", + ctx->ctx_id, ctx->link_hdl, req_isp->bubble_report, req->request_id); + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + req_isp->cdm_reset_before_apply = false; + + if (req_isp->bubble_report) { + __cam_isp_ctx_notify_error_util(CAM_TRIGGER_POINT_SOF, CRM_KMD_ERR_BUBBLE, + req->request_id, ctx_isp); + atomic_set(&ctx_isp->process_bubble, 1); + } else { + req_isp->bubble_report = 0; + } + + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d), ctx_idx: %u, link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + + if (!req_isp->bubble_report) { + if (req->request_id > ctx_isp->reported_req_id) { + request_id = req->request_id; + ctx_isp->reported_req_id = request_id; + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_ERROR); + } else + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + } else + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + /* change the state to bubble, as reg update has not come */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE; + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); +end: + return 0; +} + +static int __cam_isp_ctx_rdi_only_sof_in_bubble_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + uint32_t i; + struct cam_ctx_request *req; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_hw_sof_event_data *sof_event_data = evt_data; + struct cam_isp_ctx_req *req_isp; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + uint64_t request_id = 0; + uint64_t last_cdm_done_req = 0; + int rc = 0; + + if (!evt_data) { + CAM_ERR(CAM_ISP, "in valid sof event data, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + __cam_isp_ctx_update_sof_ts_util(sof_event_data, ctx_isp); + CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx_idx: %u, link: 0x%x", + ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id, ctx->link_hdl); + + + if (atomic_read(&ctx_isp->process_bubble)) { + if (list_empty(&ctx->active_req_list)) { + CAM_ERR(CAM_ISP, "No available active req in bubble, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + atomic_set(&ctx_isp->process_bubble, 0); + return -EINVAL; + } + + if (ctx_isp->last_sof_timestamp == + ctx_isp->sof_timestamp_val) { + CAM_DBG(CAM_ISP, + "Tasklet delay detected! Bubble frame: %lld check skipped, sof_timestamp: %lld, ctx_id: %u, link: 0x%x", + ctx_isp->frame_id, + ctx_isp->sof_timestamp_val, + ctx->ctx_id, ctx->link_hdl); + goto end; + } + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (req_isp->bubble_detected) { + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = + CAM_ISP_HW_MGR_GET_LAST_CDM_DONE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd( + ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx_id: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return rc; + } + + last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done; + CAM_DBG(CAM_ISP, "last_cdm_done req: %d ctx_id: %u, link: 0x%x", + last_cdm_done_req, ctx->ctx_id, ctx->link_hdl); + + if (last_cdm_done_req >= req->request_id) { + CAM_DBG(CAM_ISP, + "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done, ctx_id: %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + if (req_isp->num_fence_map_out == + req_isp->num_deferred_acks) { + __cam_isp_handle_deferred_buf_done(ctx_isp, req, + true, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + + __cam_isp_ctx_handle_buf_done_for_req_list( + ctx_isp, req); + } + goto end; + } else { + CAM_WARN(CAM_ISP, + "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay, ctx_id: %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + req_isp->bubble_detected = false; + req_isp->cdm_reset_before_apply = true; + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + atomic_set(&ctx_isp->process_bubble, 0); + ctx_isp->active_req_cnt--; + CAM_DBG(CAM_REQ, + "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u link: 0x%x", + req->request_id, + ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } + goto end; + } + } + + /* + * Signal all active requests with error and move the all the active + * requests to free list + */ + while (!list_empty(&ctx->active_req_list)) { + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d, ctx %u link: 0x%x", + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_ISP_EVENT_BUBBLE); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + } + + __cam_isp_ctx_move_req_to_free_list(ctx, req); + ctx_isp->active_req_cnt--; + } + +end: + /* notify reqmgr with sof signal */ + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + /* + * It is idle frame with out any applied request id, send + * request id as zero + */ + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + + /* + * Can't move the substate to SOF if we are processing bubble, + * since the SOF substate can't receive REG_UPD and buf done, + * then the processing of bubble req can't be finished + */ + if (!atomic_read(&ctx_isp->process_bubble)) + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + + CAM_DBG(CAM_ISP, "next Substate[%s], ctx %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val; + return 0; +} + + +static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_ctx_request *req = NULL; + struct cam_context *ctx = ctx_isp->base; + + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + + CAM_INFO(CAM_ISP, "Received RUP for Bubble Request, ctx %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + + return 0; +} + +static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state( + struct cam_isp_context *ctx_isp, void *evt_data) +{ + struct cam_ctx_request *req = NULL; + struct cam_context *ctx = ctx_isp->base; + struct cam_isp_ctx_req *req_isp; + uint64_t request_id = 0; + + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH; + /* notify reqmgr with sof signal*/ + if (list_empty(&ctx->wait_req_list)) { + CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request, ctx %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto error; + } + + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + request_id = + (req_isp->hw_update_data.packet_opcode_type == + CAM_ISP_PACKET_INIT_DEV) ? 0 : req->request_id; + + if (req_isp->num_fence_map_out != 0) { + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_ISP, + "move request %lld to active list(cnt = %d), ctx %u link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + /* if packet has buffers, set correct request id */ + request_id = req->request_id; + } else { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + /* no io config, so the request is completed. */ + __cam_isp_ctx_move_req_to_free_list(ctx, req); + CAM_DBG(CAM_ISP, + "move active req %lld to free list(cnt=%d), ctx %u link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } + + __cam_isp_ctx_notify_trigger_util(CAM_TRIGGER_POINT_SOF, ctx_isp); + + if (request_id) + ctx_isp->reported_req_id = request_id; + + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + CAM_DBG(CAM_ISP, "next Substate[%s], ctx %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_RUP, req, NULL); + return 0; +error: + /* Send SOF event as idle frame*/ + __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id, + CAM_REQ_MGR_SOF_EVENT_SUCCESS); + __cam_isp_ctx_update_event_record(ctx_isp, + CAM_ISP_CTX_EVENT_RUP, NULL, NULL); + + /* + * There is no request in the pending list, move the sub state machine + * to SOF sub state + */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + + return 0; +} + +static struct cam_isp_ctx_irq_ops + cam_isp_ctx_rdi_only_activated_state_machine_irq + [CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .irq_ops = { + NULL, + __cam_isp_ctx_rdi_only_sof_in_top_state, + __cam_isp_ctx_reg_upd_in_sof, + NULL, + __cam_isp_ctx_notify_eof_in_activated_state, + NULL, + }, + }, + /* APPLIED */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_rdi_only_sof_in_applied_state, + __cam_isp_ctx_reg_upd_in_applied_state, + NULL, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_applied, + }, + }, + /* EPOCH */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_rdi_only_sof_in_top_state, + NULL, + NULL, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_epoch, + }, + }, + /* BUBBLE*/ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_rdi_only_sof_in_bubble_state, + __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state, + NULL, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_bubble, + }, + }, + /* BUBBLE APPLIED ie PRE_BUBBLE */ + { + .irq_ops = { + __cam_isp_ctx_handle_error, + __cam_isp_ctx_rdi_only_sof_in_bubble_applied, + __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state, + NULL, + __cam_isp_ctx_notify_eof_in_activated_state, + __cam_isp_ctx_buf_done_in_bubble_applied, + }, + }, + /* HW ERROR */ + { + }, + /* HALT */ + { + }, +}; + +static int __cam_isp_ctx_rdi_only_apply_req_top_state( + struct cam_context *ctx, struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + CAM_DBG(CAM_ISP, "current Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply, + CAM_ISP_CTX_ACTIVATED_APPLIED); + CAM_DBG(CAM_ISP, "new Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (rc) + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_id:%u link: 0x%x Apply failed in Substate[%s], rc %d", + ctx->ctx_id, ctx->link_hdl, + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), rc); + + return rc; +} + +static struct cam_ctx_ops + cam_isp_ctx_rdi_only_activated_state_machine + [CAM_ISP_CTX_ACTIVATED_MAX] = { + /* SOF */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state, + }, + .irq_ops = NULL, + }, + /* APPLIED */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* EPOCH */ + { + .ioctl_ops = {}, + .crm_ops = { + .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state, + }, + .irq_ops = NULL, + }, + /* PRE BUBBLE */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* BUBBLE */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HW ERROR */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* HALT */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, +}; + +static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + struct cam_isp_context *ctx_isp = ctx->ctx_priv; + struct cam_req_mgr_flush_request flush_req; + + if (!ctx_isp->offline_context) { + CAM_ERR(CAM_ISP, "flush dev only supported in offline context,ctx: %u, link:0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL : + CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ; + flush_req.req_id = cmd->req_id; + + CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu), ctx_idx: %u, link: 0x%x", + flush_req.type, flush_req.req_id, ctx->ctx_id, ctx->link_hdl); + + switch (ctx->state) { + case CAM_CTX_ACQUIRED: + case CAM_CTX_ACTIVATED: + return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req); + case CAM_CTX_READY: + return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req); + default: + CAM_ERR(CAM_ISP, "flush dev in wrong state: %d, ctx_idx: %u, link: 0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } +} + +static inline void __cam_isp_ctx_free_fcg_config( + struct cam_isp_context *ctx_isp, + struct cam_isp_ctx_req *req_isp) +{ + struct cam_isp_fcg_caps *fcg_caps; + struct cam_isp_fcg_config_info *fcg_info; + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx_internal; + uint32_t max_fcg_ch_ctx; + int i; + + fcg_caps = ctx_isp->fcg_tracker.fcg_caps; + fcg_info = &req_isp->hw_update_data.fcg_info; + + /* Free IFE/MC_TFE related FCG config */ + max_fcg_ch_ctx = fcg_caps->max_ife_fcg_ch_ctx; + + fcg_ch_ctx_internal = fcg_info->ife_fcg_config.ch_ctx_fcg_configs; + if (fcg_ch_ctx_internal) { + for (i = 0; i < max_fcg_ch_ctx; i++) { + CAM_MEM_FREE(fcg_ch_ctx_internal[i].predicted_fcg_configs); + fcg_ch_ctx_internal[i].predicted_fcg_configs = NULL; + } + + CAM_MEM_FREE(fcg_info->ife_fcg_config.ch_ctx_fcg_configs); + fcg_info->ife_fcg_config.ch_ctx_fcg_configs = NULL; + } + + /* Free SFE related FCG config */ + max_fcg_ch_ctx = fcg_caps->max_sfe_fcg_ch_ctx; + + fcg_ch_ctx_internal = fcg_info->sfe_fcg_config.ch_ctx_fcg_configs; + if (fcg_ch_ctx_internal) { + for (i = 0; i < max_fcg_ch_ctx; i++) { + CAM_MEM_FREE(fcg_ch_ctx_internal[i].predicted_fcg_configs); + fcg_ch_ctx_internal[i].predicted_fcg_configs = NULL; + } + + CAM_MEM_FREE(fcg_info->sfe_fcg_config.ch_ctx_fcg_configs); + fcg_info->sfe_fcg_config.ch_ctx_fcg_configs = NULL; + } +} + +static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx) +{ + struct cam_isp_context *ctx_isp; + struct cam_isp_ctx_req *req_isp; + int i; + + if (ctx->out_map_entries) { + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + CAM_MEM_FREE(ctx->out_map_entries[i]); + ctx->out_map_entries[i] = NULL; + } + + CAM_MEM_FREE(ctx->out_map_entries); + ctx->out_map_entries = NULL; + } + + if (ctx->in_map_entries) { + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + CAM_MEM_FREE(ctx->in_map_entries[i]); + ctx->in_map_entries[i] = NULL; + } + + CAM_MEM_FREE(ctx->in_map_entries); + ctx->in_map_entries = NULL; + } + + if (ctx->hw_update_entry) { + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + CAM_MEM_FREE(ctx->hw_update_entry[i]); + ctx->hw_update_entry[i] = NULL; + } + + CAM_MEM_FREE(ctx->hw_update_entry); + ctx->hw_update_entry = NULL; + } + + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + if (ctx_isp) + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + CAM_MEM_FREE(ctx_isp->req_isp[i].deferred_fence_map_index); + ctx_isp->req_isp[i].deferred_fence_map_index = NULL; + } + + ctx->max_out_map_entries = 0; + ctx->max_in_map_entries = 0; + ctx->max_hw_update_entries = 0; + + /* Free memory for FCG channel/context */ + + if (ctx_isp && ctx_isp->fcg_tracker.fcg_caps) { + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + req_isp = &ctx_isp->req_isp[i]; + __cam_isp_ctx_free_fcg_config(ctx_isp, req_isp); + } + } +} + +static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx, + void *cmd) +{ + int rc = 0; + struct cam_hw_release_args rel_arg; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_req_mgr_flush_request flush_req; + int i; + + if (ctx_isp->hw_ctx) { + rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &rel_arg); + ctx_isp->hw_ctx = NULL; + } else { + CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u], link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + } + + ctx->last_flush_req = 0; + ctx_isp->custom_enabled = false; + ctx_isp->use_frame_header_ts = false; + ctx_isp->use_default_apply = false; + ctx_isp->frame_id = 0; + ctx_isp->active_req_cnt = 0; + ctx_isp->reported_req_id = 0; + ctx_isp->reported_frame_id = 0; + ctx_isp->hw_acquired = false; + ctx_isp->init_received = false; + ctx_isp->support_consumed_addr = false; + ctx_isp->aeb_enabled = false; + ctx_isp->sfe_en = false; + ctx_isp->bubble_recover_dis = false; + ctx_isp->req_info.last_bufdone_req_id = 0; + CAM_MEM_FREE(ctx_isp->vfe_bus_comp_grp); + CAM_MEM_FREE(ctx_isp->sfe_bus_comp_grp); + ctx_isp->vfe_bus_comp_grp = NULL; + ctx_isp->sfe_bus_comp_grp = NULL; + + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + /* + * Ideally, we should never have any active request here. + * But we still add some sanity check code here to help the debug + */ + if (!list_empty(&ctx->active_req_list)) + CAM_WARN(CAM_ISP, "Active list is not empty, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + /* Flush all the pending request list */ + flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL; + flush_req.link_hdl = ctx->link_hdl; + flush_req.dev_hdl = ctx->dev_hdl; + flush_req.req_id = 0; + + CAM_DBG(CAM_ISP, "try to flush pending list, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + spin_lock_bh(&ctx->lock); + rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req); + spin_unlock_bh(&ctx->lock); + __cam_isp_ctx_free_mem_hw_entries(ctx); + cam_req_mgr_workq_destroy(&ctx_isp->workq); + ctx->state = CAM_CTX_ACQUIRED; + + trace_cam_context_state("ISP", ctx); + CAM_DBG(CAM_ISP, "Release device success[%u] link: 0x%x next state %d", + ctx->ctx_id, ctx->link_hdl, ctx->state); + return rc; +} + +/* top level state machine */ +static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc = 0; + int i; + struct cam_hw_release_args rel_arg; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_req_mgr_flush_request flush_req; + + if (cmd && ctx_isp->hw_ctx) { + CAM_ERR(CAM_ISP, "releasing hw, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + __cam_isp_ctx_release_hw_in_top_state(ctx, NULL); + } + + if (ctx_isp->hw_ctx) { + rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &rel_arg); + ctx_isp->hw_ctx = NULL; + } + + cam_common_release_evt_params(ctx->dev_hdl); + memset(&ctx_isp->evt_inject_params, 0, sizeof(struct cam_hw_inject_evt_param)); + + ctx->session_hdl = -1; + ctx->dev_hdl = -1; + ctx->link_hdl = -1; + ctx->ctx_crm_intf = NULL; + ctx->last_flush_req = 0; + ctx_isp->frame_id = 0; + ctx_isp->active_req_cnt = 0; + ctx_isp->reported_req_id = 0; + ctx_isp->reported_frame_id = 0; + ctx_isp->hw_acquired = false; + ctx_isp->init_received = false; + ctx_isp->offline_context = false; + ctx_isp->vfps_aux_context = false; + ctx_isp->rdi_only_context = false; + ctx_isp->req_info.last_bufdone_req_id = 0; + ctx_isp->v4l2_event_sub_ids = 0; + ctx_isp->resume_hw_in_flushed = false; + + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + /* + * Ideally, we should never have any active request here. + * But we still add some sanity check code here to help the debug + */ + if (!list_empty(&ctx->active_req_list)) + CAM_ERR(CAM_ISP, "Active list is not empty, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + /* Flush all the pending request list */ + flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL; + flush_req.link_hdl = ctx->link_hdl; + flush_req.dev_hdl = ctx->dev_hdl; + flush_req.req_id = 0; + + CAM_DBG(CAM_ISP, "try to flush pending list, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + spin_lock_bh(&ctx->lock); + rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req); + spin_unlock_bh(&ctx->lock); + __cam_isp_ctx_free_mem_hw_entries(ctx); + + ctx->state = CAM_CTX_AVAILABLE; + + trace_cam_context_state("ISP", ctx); + CAM_DBG(CAM_ISP, "Release device success[%u] link: 0x%x next state %d", + ctx->ctx_id, ctx->link_hdl, ctx->state); + return rc; +} + +static int __cam_isp_ctx_config_dev_in_top_state( + struct cam_context *ctx, struct cam_config_dev_cmd *cmd) +{ + int rc = 0, i; + struct cam_ctx_request *req = NULL; + struct cam_isp_ctx_req *req_isp; + struct cam_packet *packet = NULL; + size_t remain_len = 0; + struct cam_hw_prepare_update_args cfg = {0}; + struct cam_req_mgr_add_request add_req = {0}; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + uint32_t packet_opcode = 0; + struct cam_isp_ch_ctx_fcg_config_internal *sfe_ch_ctx_fcg, *ife_ch_ctx_fcg; + struct cam_kmd_buf_info *kmd_buff = NULL; + + CAM_DBG(CAM_ISP, "get free request object......ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + /* get free request */ + spin_lock_bh(&ctx->lock); + if (!list_empty(&ctx->free_req_list)) { + req = list_first_entry(&ctx->free_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + } + spin_unlock_bh(&ctx->lock); + + if (!req) { + CAM_ERR(CAM_ISP, "No more request obj free, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -ENOMEM; + } + + if (req->packet) { + CAM_ERR(CAM_CTXT, "[%s][%d] Missing free request %llu local packet", + ctx->dev_name, ctx->ctx_id, req->request_id); + rc = -EINVAL; + goto free_req; + } + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + remain_len = cam_context_parse_config_cmd(ctx, cmd, &packet); + if (IS_ERR_OR_NULL(packet)) { + rc = PTR_ERR(packet); + goto free_req; + } + + if (cam_packet_util_validate_packet(packet, remain_len)) { + CAM_ERR(CAM_ISP, "Invalid packet params"); + rc = -EINVAL; + goto free_packet; + } + + /* Query the packet opcode */ + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE; + isp_hw_cmd_args.cmd_data = (void *)packet; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_packet; + } + + packet_opcode = isp_hw_cmd_args.u.packet_op_code; + if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV) + && (packet->header.request_id <= ctx->last_flush_req)) { + CAM_INFO(CAM_ISP, + "request %lld has been flushed, reject packet, ctx_idx: %u, link: 0x%x", + packet->header.request_id, ctx->ctx_id, ctx->link_hdl); + rc = -EBADR; + goto free_packet; + } else if ((packet_opcode == CAM_ISP_PACKET_INIT_DEV) + && (packet->header.request_id <= ctx->last_flush_req) + && ctx->last_flush_req && packet->header.request_id) { + CAM_WARN(CAM_ISP, + "last flushed req is %lld, config dev(init) for req %lld, ctx_idx: %u, link: 0x%x", + ctx->last_flush_req, packet->header.request_id, ctx->ctx_id, ctx->link_hdl); + rc = -EBADR; + goto free_packet; + } + + cfg.packet = packet; + cfg.remain_len = remain_len; + cfg.ctxt_to_hw_map = ctx_isp->hw_ctx; + cfg.max_hw_update_entries = ctx->max_hw_update_entries; + cfg.hw_update_entries = req_isp->cfg; + cfg.max_out_map_entries = ctx->max_out_map_entries; + cfg.max_in_map_entries = ctx->max_in_map_entries; + cfg.out_map_entries = req_isp->fence_map_out; + cfg.in_map_entries = req_isp->fence_map_in; + cfg.priv = &req_isp->hw_update_data; + cfg.pf_data = &(req->pf_data); + cfg.num_out_map_entries = 0; + cfg.num_in_map_entries = 0; + cfg.buf_tracker = &req->buf_tracker; + sfe_ch_ctx_fcg = req_isp->hw_update_data.fcg_info.sfe_fcg_config.ch_ctx_fcg_configs; + ife_ch_ctx_fcg = req_isp->hw_update_data.fcg_info.ife_fcg_config.ch_ctx_fcg_configs; + memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data)); + + /* Restore FCG related info */ + req_isp->hw_update_data.fcg_info.sfe_fcg_config.ch_ctx_fcg_configs + = sfe_ch_ctx_fcg; + req_isp->hw_update_data.fcg_info.ife_fcg_config.ch_ctx_fcg_configs + = ife_ch_ctx_fcg; + + memset(req_isp->fence_map_out, 0, sizeof(struct cam_hw_fence_map_entry) + * ctx->max_out_map_entries); + + INIT_LIST_HEAD(cfg.buf_tracker); + + rc = ctx->hw_mgr_intf->hw_prepare_update( + ctx->hw_mgr_intf->hw_mgr_priv, &cfg); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto free_req_and_buf_tracker_list; + } + + req_isp->num_cfg = cfg.num_hw_update_entries; + req_isp->num_fence_map_out = cfg.num_out_map_entries; + req_isp->num_fence_map_in = cfg.num_in_map_entries; + req_isp->num_acked = 0; + req_isp->num_deferred_acks = 0; + req_isp->bubble_detected = false; + req_isp->cdm_reset_before_apply = false; + req_isp->hw_update_data.packet = packet; + req_isp->is_reg_dump_triggered = false; + req->pf_data.req = req; + req->packet = packet; + + for (i = 0; i < req_isp->num_fence_map_out; i++) { + rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id); + if (rc) { + CAM_ERR(CAM_ISP, "Can't get ref for fence %d, ctx_idx: %u, link: 0x%x", + req_isp->fence_map_out[i].sync_id, ctx->ctx_id, ctx->link_hdl); + goto put_ref; + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].early_sync_id); + if (rc) { + CAM_ERR(CAM_ISP, + "Can't get ref for early fence %d, ctx_idx: %u, link: 0x%x", + req_isp->fence_map_out[i].early_sync_id, ctx->ctx_id, + ctx->link_hdl); + goto put_ref; + } + } + } + + CAM_DBG(CAM_ISP, + "packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d, ctx_idx: %u, link: 0x%x", + packet->header.request_id, req_isp->hw_update_data.packet_opcode_type, + req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in, + ctx->ctx_id, ctx->link_hdl); + + req->request_id = packet->header.request_id; + req->status = 1; + + if (req_isp->hw_update_data.packet_opcode_type == + CAM_ISP_PACKET_INIT_DEV) { + if (ctx->state < CAM_CTX_ACTIVATED) { + rc = __cam_isp_ctx_enqueue_init_request(ctx, req); + if (rc) + CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->init_received = true; + + if ((ctx_isp->vfps_aux_context) && (req->request_id > 0)) + ctx_isp->resume_hw_in_flushed = true; + else + ctx_isp->resume_hw_in_flushed = false; + + if (ctx->state == CAM_CTX_FLUSHED) + ctx_isp->num_inits_post_flush++; + } else { + rc = -EINVAL; + CAM_ERR(CAM_ISP, "Received INIT pkt in wrong state:%d, ctx:%u, link:0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + } + } else { + if (ctx->state < CAM_CTX_READY) { + rc = -EINVAL; + CAM_ERR(CAM_ISP, + "Received update req %lld in wrong state:%d, ctx_idx: %u, link: 0x%x", + req->request_id, ctx->state, ctx->ctx_id, ctx->link_hdl); + goto put_ref; + } + + if ((ctx->state == CAM_CTX_FLUSHED) && + (ctx_isp->num_inits_post_flush < + CAM_ISP_CONTEXT_NUM_INIT_REQ_RCVD_POST_FLUSH)) { + CAM_ERR(CAM_ISP, + "Received update req %lld in wrong state:%d, ctx_idx: %u, link: 0x%x, still waiting on INITs to resume num_inits: %u", + req->request_id, ctx->state, ctx->ctx_id, ctx->link_hdl, + ctx_isp->num_inits_post_flush); + goto put_ref; + } + + if ((ctx_isp->offline_context) || (ctx_isp->vfps_aux_context)) { + __cam_isp_ctx_enqueue_request_in_order(ctx, req, true); + } else if (ctx->ctx_crm_intf->add_req) { + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = ctx->link_hdl; + add_req.dev_hdl = ctx->dev_hdl; + add_req.req_id = req->request_id; + add_req.trigger_skip = req_isp->hw_update_data.mup_en; + rc = ctx->ctx_crm_intf->add_req(&add_req); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_ISP, + "Add req failed: req id=%llu, it has been flushed on link 0x%x ctx %u", + req->request_id, ctx->link_hdl, ctx->ctx_id); + else + CAM_ERR(CAM_ISP, + "Add req failed: req id=%llu on link 0x%x ctx %u", + req->request_id, ctx->link_hdl, ctx->ctx_id); + } else { + __cam_isp_ctx_enqueue_request_in_order(ctx, req, true); + } + } else { + CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu,ctx: %u,link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + rc = -ENODEV; + } + } + if (rc) + goto put_ref; + + if (isp_ctx_debug.enable_cdm_cmd_buff_dump) + cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false); + + CAM_DBG(CAM_REQ, + "Preprocessing Config req_id %lld successful on ctx %u, link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + + if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch)) + __cam_isp_ctx_schedule_apply_req(ctx_isp); + else if (ctx_isp->vfps_aux_context && + (req_isp->hw_update_data.packet_opcode_type != CAM_ISP_PACKET_INIT_DEV)) + __cam_isp_ctx_apply_pending_req(ctx_isp, NULL); + + return rc; + +put_ref: + for (--i; i >= 0; i--) { + if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id)) + CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d, ctx_idx: %u, link: 0x%x", + req_isp->fence_map_out[i].sync_id, ctx->ctx_id, ctx->link_hdl); + + if ((req_isp->fence_map_out[i].early_sync_id > 0) && + cam_sync_put_obj_ref(req_isp->fence_map_out[i].early_sync_id)) { + CAM_ERR(CAM_CTXT, + "Failed to put ref of early fence %d, ctx_idx: %u, link: 0x%x", + req_isp->fence_map_out[i].early_sync_id, ctx->ctx_id, + ctx->link_hdl); + } + } +free_req_and_buf_tracker_list: + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + kmd_buff = &(req_isp->hw_update_data.kmd_cmd_buff_info); + cam_mem_put_kref(kmd_buff->handle); +free_packet: + cam_common_mem_free(packet); +free_req: + spin_lock_bh(&ctx->lock); + req->packet = NULL; + list_add_tail(&req->list, &ctx->free_req_list); + spin_unlock_bh(&ctx->lock); + + return rc; +} + +static inline int __cam_isp_ctx_allocate_mem_fcg_config( + struct cam_isp_fcg_caps *fcg_caps, + struct cam_isp_ctx_req *req_isp, + bool sfe_en) +{ + struct cam_isp_fcg_config_info *fcg_info; + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx_internal; + uint32_t max_fcg_ch_ctx, max_fcg_predictions; + int i; + + fcg_info = &req_isp->hw_update_data.fcg_info; + + if (fcg_caps->ife_fcg_supported) { + max_fcg_ch_ctx = fcg_caps->max_ife_fcg_ch_ctx; + max_fcg_predictions = fcg_caps->max_ife_fcg_predictions; + + fcg_info->ife_fcg_config.ch_ctx_fcg_configs = + CAM_MEM_ZALLOC_ARRAY(max_fcg_ch_ctx, + sizeof(struct cam_isp_ch_ctx_fcg_config_internal), + GFP_KERNEL); + if (!fcg_info->ife_fcg_config.ch_ctx_fcg_configs) + return -ENOMEM; + + fcg_ch_ctx_internal = fcg_info->ife_fcg_config.ch_ctx_fcg_configs; + for (i = 0; i < max_fcg_ch_ctx; i++) { + fcg_ch_ctx_internal[i].predicted_fcg_configs = + CAM_MEM_ZALLOC_ARRAY(max_fcg_predictions, + sizeof(struct cam_isp_predict_fcg_config_internal), + GFP_KERNEL); + if (!fcg_ch_ctx_internal[i].predicted_fcg_configs) + return -ENOMEM; + } + } + + if (fcg_caps->sfe_fcg_supported && sfe_en) { + max_fcg_ch_ctx = fcg_caps->max_sfe_fcg_ch_ctx; + max_fcg_predictions = fcg_caps->max_sfe_fcg_predictions; + + fcg_info->sfe_fcg_config.ch_ctx_fcg_configs = + CAM_MEM_ZALLOC_ARRAY(max_fcg_ch_ctx, + sizeof(struct cam_isp_ch_ctx_fcg_config_internal), + GFP_KERNEL); + if (!fcg_info->sfe_fcg_config.ch_ctx_fcg_configs) + return -ENOMEM; + + fcg_ch_ctx_internal = fcg_info->sfe_fcg_config.ch_ctx_fcg_configs; + for (i = 0; i < max_fcg_ch_ctx; i++) { + fcg_ch_ctx_internal[i].predicted_fcg_configs = + CAM_MEM_ZALLOC_ARRAY(max_fcg_predictions, + sizeof(struct cam_isp_predict_fcg_config_internal), + GFP_KERNEL); + if (!fcg_ch_ctx_internal[i].predicted_fcg_configs) + return -ENOMEM; + } + } + + return 0; +} + +static int __cam_isp_ctx_allocate_mem_hw_entries( + struct cam_context *ctx, + struct cam_hw_acquire_args *param) +{ + int rc = 0, i; + uint32_t max_res = 0; + uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX; + struct cam_ctx_request *req; + struct cam_ctx_request *temp_req; + struct cam_isp_ctx_req *req_isp; + struct cam_isp_context *ctx_isp = ctx->ctx_priv; + struct cam_isp_fcg_caps *fcg_caps; + + if (!param->op_params.param_list[0]) + max_res = CAM_ISP_CTX_RES_MAX; + else { + max_res = param->op_params.param_list[0]; + if (param->op_flags & CAM_IFE_CTX_SFE_EN) { + max_res += param->op_params.param_list[1]; + max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX; + } + } + + ctx->max_in_map_entries = max_res; + ctx->max_out_map_entries = max_res; + ctx->max_hw_update_entries = max_hw_upd_entries; + + CAM_DBG(CAM_ISP, + "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d, ctx: %u, link: 0x%x", + max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN), + ctx->ctx_id, ctx->link_hdl); + + ctx->hw_update_entry = CAM_MEM_ZALLOC_ARRAY(CAM_ISP_CTX_REQ_MAX, + sizeof(struct cam_hw_update_entry *), + GFP_KERNEL); + + if (!ctx->hw_update_entry) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory, link: 0x%x", + ctx->dev_name, ctx->ctx_id, ctx->link_hdl); + return -ENOMEM; + } + + ctx->in_map_entries = CAM_MEM_ZALLOC_ARRAY(CAM_ISP_CTX_REQ_MAX, + sizeof(struct cam_hw_fence_map_entry *), + GFP_KERNEL); + + if (!ctx->in_map_entries) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory for in_map_entries, link: 0x%x", + ctx->dev_name, ctx->ctx_id, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + + ctx->out_map_entries = CAM_MEM_ZALLOC_ARRAY(CAM_ISP_CTX_REQ_MAX, + sizeof(struct cam_hw_fence_map_entry *), + GFP_KERNEL); + + if (!ctx->out_map_entries) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory for out_map_entries, link: 0x%x", + ctx->dev_name, ctx->ctx_id, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + + + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + + ctx->hw_update_entry[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_hw_update_entries, + sizeof(struct cam_hw_update_entry), GFP_KERNEL); + if (!ctx->hw_update_entry[i]) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory for hw_update_entry: %u, link: 0x%x", + ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl); + return -ENOMEM; + } + + ctx->in_map_entries[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_in_map_entries, + sizeof(struct cam_hw_fence_map_entry), + GFP_KERNEL); + + if (!ctx->in_map_entries[i]) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory for in_map_entries: %u, link: 0x%x", + ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + + ctx->out_map_entries[i] = CAM_MEM_ZALLOC_ARRAY(ctx->max_out_map_entries, + sizeof(struct cam_hw_fence_map_entry), + GFP_KERNEL); + + if (!ctx->out_map_entries[i]) { + CAM_ERR(CAM_CTXT, "%s[%u] no memory for out_map_entries: %u, link: 0x%x", + ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + + ctx_isp->req_isp[i].deferred_fence_map_index = kcalloc(param->total_ports_acq, + sizeof(uint32_t), GFP_KERNEL); + if (!ctx_isp->req_isp[i].deferred_fence_map_index) { + CAM_ERR(CAM_ISP, "%s[%d] no memory for defer fence map idx arr, ports:%u", + ctx->dev_name, ctx->ctx_id, param->total_ports_acq); + rc = -ENOMEM; + goto end; + } + } + + fcg_caps = ctx_isp->fcg_tracker.fcg_caps; + + list_for_each_entry_safe(req, temp_req, + &ctx->free_req_list, list) { + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + req_isp->cfg = ctx->hw_update_entry[req->index]; + req_isp->fence_map_in = ctx->in_map_entries[req->index]; + req_isp->fence_map_out = ctx->out_map_entries[req->index]; + + /* Allocate memory for FCG related hw update data */ + + if (!fcg_caps || + (!fcg_caps->ife_fcg_supported && !fcg_caps->sfe_fcg_supported)) + continue; + + rc = __cam_isp_ctx_allocate_mem_fcg_config(fcg_caps, req_isp, + param->op_flags & CAM_IFE_CTX_SFE_EN); + if (rc) { + CAM_ERR(CAM_CTXT, + "No sufficient memory for FCG channel/context in %s, ctx_id: %u, link: 0x%x", + ctx->dev_name, ctx->ctx_id, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + } + + if (fcg_caps) + CAM_DBG(CAM_ISP, + "Finish allocating FCG related structure, ctx_id: %u, FCG IFE/MC_TFE supported: %d, FCG SFE supported: %d, SFE_EN: %d", + ctx->ctx_id, fcg_caps->ife_fcg_supported, + fcg_caps->sfe_fcg_supported, + (bool)(param->op_flags & CAM_IFE_CTX_SFE_EN)); + + return rc; + +end: + __cam_isp_ctx_free_mem_hw_entries(ctx); + + return rc; +} + +static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *cmd) +{ + int rc = 0; + int i; + struct cam_hw_acquire_args param; + struct cam_isp_resource *isp_res = NULL; + struct cam_create_dev_hdl req_hdl_param; + struct cam_hw_release_args release; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_ISP, "HW interface is not ready, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_ISP, + "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld, ctx_idx: %u, link: 0x%x", + cmd->session_handle, cmd->num_resources, + cmd->handle_type, cmd->resource_hdl, ctx->ctx_id, ctx->link_hdl); + + ctx_isp->v4l2_event_sub_ids = cam_req_mgr_get_id_subscribed(); + + if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) { + ctx_isp->split_acquire = true; + CAM_DBG(CAM_ISP, "Acquire dev handle, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto get_dev_handle; + } + + if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) { + CAM_ERR(CAM_ISP, "Too much resources in the acquire, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -ENOMEM; + goto end; + } + + /* for now we only support user pointer */ + if (cmd->handle_type != 1) { + CAM_ERR(CAM_ISP, "Only user pointer is supported, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + goto end; + } + + isp_res = CAM_MEM_ZALLOC( + sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL); + if (!isp_res) { + rc = -ENOMEM; + goto end; + } + + CAM_DBG(CAM_ISP, "start copy %d resources from user, ctx_idx: %u, link: 0x%x", + cmd->num_resources, ctx->ctx_id, ctx->link_hdl); + + if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl), + sizeof(*isp_res)*cmd->num_resources)) { + rc = -EFAULT; + goto free_res; + } + + memset(¶m, 0, sizeof(param)); + param.context_data = ctx; + param.event_cb = ctx->irq_cb_intf; + param.sec_pf_evt_cb = cam_context_dump_pf_info; + param.num_acq = cmd->num_resources; + param.acquire_info = (uintptr_t) isp_res; + + rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, ¶m); + if (rc) { + CAM_ERR(CAM_ISP, "Ctx[%u] link: 0x%x allocate hw entry fail", + ctx->ctx_id, ctx->link_hdl); + goto free_res; + } + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Acquire device failed, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_res; + } + + /* Query the context has rdi only resource */ + hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_hw; + } + + if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_RDI) { + /* + * this context has rdi only resource assign rdi only + * state machine + */ + CAM_DBG(CAM_ISP, "RDI only session Context, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + ctx_isp->substate_machine_irq = + cam_isp_ctx_rdi_only_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_rdi_only_activated_state_machine; + ctx_isp->rdi_only_context = true; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_FS2) { + CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_fs2_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_fs2_state_machine; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_OFFLINE) { + CAM_DBG(CAM_ISP, + "offline Session has PIX and RD resources, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_offline_state_machine_irq; + } else { + CAM_DBG(CAM_ISP, + "Session has PIX or PIX and RDI resources, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_activated_state_machine; + } + + ctx_isp->hw_ctx = param.ctxt_to_hw_map; + ctx_isp->hw_acquired = true; + ctx_isp->split_acquire = false; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + ctx_isp->bubble_recover_dis = isp_hw_cmd_args.u.ctx_info.bubble_recover_dis; + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + + CAM_INFO(CAM_ISP, "Ctx_type: %u, ctx_id: %u, hw_mgr_ctx: %u bubble_recover %d", + isp_hw_cmd_args.u.ctx_info.type, ctx->ctx_id, param.hw_mgr_ctx_id, + isp_hw_cmd_args.u.ctx_info.bubble_recover_dis); + CAM_MEM_FREE(isp_res); + isp_res = NULL; + +get_dev_handle: + + req_hdl_param.session_hdl = cmd->session_handle; + /* bridge is not ready for these flags. so false for now */ + req_hdl_param.v4l2_sub_dev_flag = 0; + req_hdl_param.media_entity_flag = 0; + req_hdl_param.ops = ctx->crm_ctx_intf; + req_hdl_param.priv = ctx; + req_hdl_param.dev_id = CAM_ISP; + CAM_DBG(CAM_ISP, "get device handle form bridge, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param); + if (ctx->dev_hdl <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_ISP, "Can not create device handle, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_hw; + } + cmd->dev_handle = ctx->dev_hdl; + + /* store session information */ + ctx->session_hdl = cmd->session_handle; + ctx->state = CAM_CTX_ACQUIRED; + + trace_cam_context_state("ISP", ctx); + CAM_INFO(CAM_ISP, + "Acquire success: session_hdl 0x%x num_rsrces %d ctx %u link: 0x%x", + cmd->session_handle, cmd->num_resources, ctx->ctx_id, ctx->link_hdl); + + return rc; + +free_hw: + release.ctxt_to_hw_map = ctx_isp->hw_ctx; + if (ctx_isp->hw_acquired) + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &release); + ctx_isp->hw_ctx = NULL; + ctx_isp->hw_acquired = false; +free_res: + CAM_MEM_FREE(isp_res); +end: + return rc; +} + +static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx, + void *args) +{ + int rc = 0; + int i; + struct cam_acquire_hw_cmd_v1 *cmd = + (struct cam_acquire_hw_cmd_v1 *)args; + struct cam_hw_acquire_args param; + struct cam_hw_release_args release; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_isp_acquire_hw_info *acquire_hw_info = NULL; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_ISP, "HW interface is not ready, ctx %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_ISP, + "session_hdl 0x%x, hdl type %d, res %lld ctx %u link: 0x%x", + cmd->session_handle, cmd->handle_type, cmd->resource_hdl, + ctx->ctx_id, ctx->link_hdl); + + /* for now we only support user pointer */ + if (cmd->handle_type != 1) { + CAM_ERR(CAM_ISP, "Only user pointer is supported, ctx %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + goto end; + } + + if (cmd->data_size < sizeof(*acquire_hw_info)) { + CAM_ERR(CAM_ISP, "data_size is not a valid value, ctx %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + + acquire_hw_info = CAM_MEM_ZALLOC(cmd->data_size, GFP_KERNEL); + if (!acquire_hw_info) { + rc = -ENOMEM; + goto end; + } + + CAM_DBG(CAM_ISP, "start copy resources from user, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl, + cmd->data_size)) { + rc = -EFAULT; + goto free_res; + } + + memset(¶m, 0, sizeof(param)); + param.context_data = ctx; + param.event_cb = ctx->irq_cb_intf; + param.sec_pf_evt_cb = cam_context_dump_pf_info; + param.num_acq = CAM_API_COMPAT_CONSTANT; + param.acquire_info_size = cmd->data_size; + param.acquire_info = (uint64_t) acquire_hw_info; + param.mini_dump_cb = __cam_isp_ctx_minidump_cb; + + rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, + ¶m); + if (rc) { + CAM_ERR(CAM_ISP, "Ctx[%u] link: 0x%x allocate hw entry fail", + ctx->ctx_id, ctx->link_hdl); + goto free_res; + } + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Acquire device failed, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_res; + } + + ctx_isp->support_consumed_addr = + (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN); + + /* Query the context has rdi only resource */ + hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_hw; + } + + if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_RDI) { + /* + * this context has rdi only resource assign rdi only + * state machine + */ + CAM_DBG(CAM_ISP, "RDI only session Context, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + ctx_isp->substate_machine_irq = + cam_isp_ctx_rdi_only_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_rdi_only_activated_state_machine; + ctx_isp->rdi_only_context = true; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_FS2) { + CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_fs2_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_fs2_state_machine; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_OFFLINE) { + CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_offline_state_machine_irq; + ctx_isp->substate_machine = NULL; + } else { + CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_activated_state_machine; + } + + ctx_isp->hw_ctx = param.ctxt_to_hw_map; + ctx_isp->hw_acquired = true; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + + trace_cam_context_state("ISP", ctx); + CAM_INFO(CAM_ISP, + "Acquire success:session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u", + ctx->session_hdl, isp_hw_cmd_args.u.ctx_info.type, ctx->ctx_id, ctx->link_hdl, + param.hw_mgr_ctx_id); + CAM_MEM_FREE(acquire_hw_info); + return rc; + +free_hw: + release.ctxt_to_hw_map = ctx_isp->hw_ctx; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release); + ctx_isp->hw_ctx = NULL; + ctx_isp->hw_acquired = false; +free_res: + CAM_MEM_FREE(acquire_hw_info); +end: + return rc; +} + +static void cam_req_mgr_process_workq_apply_req_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static inline void __cam_isp_ctx_convert_hw_id_to_string( + char *ife_hw_name, uint32_t hw_idx) +{ + int num_hw = 0, len = 0; + char tmp_buf[30]; + + if (hw_idx & CAM_ISP_IFE0_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE0 "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE1_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE1 "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE2_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE2 "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE0_LITE_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE0_LITE "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE1_LITE_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE1_LITE "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE2_LITE_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE2_LITE "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE3_LITE_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE3_LITE "); + num_hw++; + } + + if (hw_idx & CAM_ISP_IFE4_LITE_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "IFE4_LITE "); + num_hw++; + } + + if (hw_idx & CAM_ISP_SFE0_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "SFE0 "); + num_hw++; + } + + if (hw_idx & CAM_ISP_SFE1_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "SFE1 "); + num_hw++; + } + + if (hw_idx & CAM_ISP_SFE2_HW) { + len += snprintf(ife_hw_name + len, sizeof(ife_hw_name) - len, + "SFE2 "); + num_hw++; + } + + if ((num_hw <= 0) || (num_hw > 2)) { + CAM_WARN(CAM_ISP, "Wrong hw id, hw id: 0x%x, num_hw: %d", hw_idx, num_hw); + return; + } + + if (num_hw == 2) { + snprintf(tmp_buf, sizeof(tmp_buf), "Dual: %s", ife_hw_name); + len = snprintf(ife_hw_name, sizeof(ife_hw_name), "%s", tmp_buf); + } + + /* Remove the last space */ + if ((len > 0) && (len < sizeof(ife_hw_name))) + ife_hw_name[len - 1] = '\0'; +} + +static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx, + void *args) +{ + int rc = 0, i, j; + struct cam_acquire_hw_cmd_v2 *cmd = + (struct cam_acquire_hw_cmd_v2 *)args; + struct cam_hw_acquire_args param; + struct cam_hw_release_args release; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_isp_acquire_hw_info *acquire_hw_info = NULL; + struct cam_isp_comp_record_query query_cmd; + struct cam_req_mgr_notify_msg msg = {0}; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_ISP, "HW interface is not ready, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_ISP, + "session_hdl 0x%x, hdl type %d, res %lld, ctx_id %u link: 0x%x", + cmd->session_handle, cmd->handle_type, cmd->resource_hdl, + ctx->ctx_id, ctx->link_hdl); + + /* for now we only support user pointer */ + if (cmd->handle_type != 1) { + CAM_ERR(CAM_ISP, "Only user pointer is supported, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + goto end; + } + + if (cmd->data_size < sizeof(*acquire_hw_info)) { + CAM_ERR(CAM_ISP, "data_size is not a valid value, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + + acquire_hw_info = CAM_MEM_ZALLOC(cmd->data_size, GFP_KERNEL); + if (!acquire_hw_info) { + rc = -ENOMEM; + goto end; + } + + CAM_DBG(CAM_ISP, "start copy resources from user, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl, + cmd->data_size)) { + rc = -EFAULT; + goto free_res; + } + + memset(¶m, 0, sizeof(param)); + param.context_data = ctx; + param.event_cb = ctx->irq_cb_intf; + param.sec_pf_evt_cb = cam_context_dump_pf_info; + param.num_acq = CAM_API_COMPAT_CONSTANT; + param.acquire_info_size = cmd->data_size; + param.acquire_info = (uint64_t) acquire_hw_info; + param.mini_dump_cb = __cam_isp_ctx_minidump_cb; + param.ctx_id = ctx->ctx_id; + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Acquire device failed, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_res; + } + + /* Deliver FCG caps */ + ctx_isp->fcg_tracker.fcg_caps = (struct cam_isp_fcg_caps *)param.op_params.fcg_caps; + if (!ctx_isp->fcg_tracker.fcg_caps) { + CAM_ERR(CAM_ISP, "Failed in getting FCG caps, ctx_id: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto free_res; + } + + rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, ¶m); + if (rc) { + CAM_ERR(CAM_ISP, "Ctx[%u] link: 0x%x allocate hw entry fail", + ctx->ctx_id, ctx->link_hdl); + goto free_hw; + } + + /* + * Set feature flag if applicable + * custom hw is supported only on v2 + */ + ctx_isp->custom_enabled = + (param.op_flags & CAM_IFE_CTX_CUSTOM_EN); + ctx_isp->use_frame_header_ts = + (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN); + ctx_isp->use_default_apply = + (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG); + ctx_isp->support_consumed_addr = + (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN); + ctx_isp->aeb_enabled = + (param.op_flags & CAM_IFE_CTX_AEB_EN); + ctx_isp->mode_switch_en = + (param.op_flags & CAM_IFE_CTX_DYNAMIC_SWITCH_EN); + ctx_isp->sfe_en = + (param.op_flags & CAM_IFE_CTX_SFE_EN); + + /* Query the context bus comp group information */ + ctx_isp->vfe_bus_comp_grp = CAM_MEM_ZALLOC_ARRAY(CAM_IFE_BUS_COMP_NUM_MAX, + sizeof(struct cam_isp_context_comp_record), GFP_KERNEL); + if (!ctx_isp->vfe_bus_comp_grp) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for vfe_bus_comp_grp", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto end; + } + + if (param.op_flags & CAM_IFE_CTX_SFE_EN) { + ctx_isp->sfe_bus_comp_grp = CAM_MEM_ZALLOC_ARRAY(CAM_SFE_BUS_COMP_NUM_MAX, + sizeof(struct cam_isp_context_comp_record), GFP_KERNEL); + if (!ctx_isp->sfe_bus_comp_grp) { + CAM_ERR(CAM_CTXT, "%s[%d] no memory for sfe_bus_comp_grp", + ctx->dev_name, ctx->ctx_id); + rc = -ENOMEM; + goto end; + } + } + + query_cmd.vfe_bus_comp_grp = ctx_isp->vfe_bus_comp_grp; + if (ctx_isp->sfe_bus_comp_grp) + query_cmd.sfe_bus_comp_grp = ctx_isp->sfe_bus_comp_grp; + hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_BUS_COMP_GROUP; + isp_hw_cmd_args.cmd_data = &query_cmd; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed"); + goto free_hw; + } + + /* Query the context has rdi only resource */ + hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto free_hw; + } + + if (param.valid_acquired_hw) { + for (i = 0; i < CAM_MAX_ACQ_RES; i++) + cmd->hw_info.acquired_hw_id[i] = + param.acquired_hw_id[i]; + + for (i = 0; i < CAM_MAX_ACQ_RES; i++) + for (j = 0; j < CAM_MAX_HW_SPLIT; j++) + cmd->hw_info.acquired_hw_path[i][j] = + param.acquired_hw_path[i][j]; + + ctx_isp->hw_idx = param.acquired_hw_id[0]; + } + + /* Update CRM with the hw idx */ + if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_msg) { + msg.link_hdl = ctx->link_hdl; + msg.dev_hdl = ctx->dev_hdl; + msg.msg_type = CAM_REQ_MGR_MSG_UPDATE_DEVICE_INFO; + __cam_isp_ctx_convert_hw_id_to_string(&msg.u.ife_hw_name[0], ctx_isp->hw_idx); + rc = ctx->ctx_crm_intf->notify_msg(&msg); + if (rc) { + CAM_WARN(CAM_ISP, "Failed at updating IFE hw idx to CRM"); + rc = 0; + } + } + + cmd->hw_info.valid_acquired_hw = + param.valid_acquired_hw; + + cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw; + + if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_RDI) { + /* + * this context has rdi only resource assign rdi only + * state machine + */ + CAM_DBG(CAM_ISP, "RDI only session Context, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + ctx_isp->substate_machine_irq = + cam_isp_ctx_rdi_only_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_rdi_only_activated_state_machine; + ctx_isp->rdi_only_context = true; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_FS2) { + CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_fs2_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_fs2_state_machine; + } else if (isp_hw_cmd_args.u.ctx_info.type == CAM_ISP_CTX_OFFLINE) { + CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_offline_state_machine_irq; + ctx_isp->substate_machine = NULL; + ctx_isp->offline_context = true; + } else { + CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx_isp->substate_machine_irq = + cam_isp_ctx_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_activated_state_machine; + } + + if (ctx_isp->offline_context || ctx_isp->vfps_aux_context) { + rc = cam_req_mgr_workq_create("ife_apply_req", 20, + &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_apply_req_worker); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to create workq for IFE rc:%d offline: %s vfps: %s ctx_id %u link: 0x%x", + rc, CAM_BOOL_TO_YESNO(ctx_isp->offline_context), + CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), + ctx->ctx_id, ctx->link_hdl); + } + + ctx_isp->hw_ctx = param.ctxt_to_hw_map; + ctx_isp->hw_acquired = true; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + ctx->hw_mgr_ctx_id = param.hw_mgr_ctx_id; + ctx_isp->bubble_recover_dis = isp_hw_cmd_args.u.ctx_info.bubble_recover_dis; + + snprintf(ctx->ctx_id_string, sizeof(ctx->ctx_id_string), + "%s_ctx[%d]_hwmgrctx[%d]_hwidx[0x%x]", + ctx->dev_name, + ctx->ctx_id, + ctx->hw_mgr_ctx_id, + ctx_isp->hw_idx); + + trace_cam_context_state("ISP", ctx); + CAM_INFO(CAM_ISP, + "Acquire success: session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u", + ctx->session_hdl, isp_hw_cmd_args.u.ctx_info.type, ctx->ctx_id, ctx->link_hdl, + param.hw_mgr_ctx_id); + CAM_MEM_FREE(acquire_hw_info); + return rc; + +free_hw: + release.ctxt_to_hw_map = ctx_isp->hw_ctx; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release); + ctx_isp->hw_ctx = NULL; + ctx_isp->hw_acquired = false; +free_res: + CAM_MEM_FREE(acquire_hw_info); +end: + return rc; +} + +static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx, + void *args) +{ + int rc = -EINVAL; + uint32_t api_version; + + if (!ctx || !args) { + CAM_ERR(CAM_ISP, "Invalid input pointer"); + return rc; + } + + api_version = *((uint32_t *)args); + if (api_version == 1) + rc = __cam_isp_ctx_acquire_hw_v1(ctx, args); + else if (api_version == 2) + rc = __cam_isp_ctx_acquire_hw_v2(ctx, args); + else + CAM_ERR(CAM_ISP, "Unsupported api version %d, ctx_id %u link: 0x%x", + api_version, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (!ctx_isp->hw_acquired) { + CAM_ERR(CAM_ISP, "HW is not acquired, reject packet, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd); + + if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) { + ctx->state = CAM_CTX_READY; + trace_cam_context_state("ISP", ctx); + } + + CAM_DBG(CAM_ISP, "next state %d, ctx %u link: 0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + return rc; +} + +static int __cam_isp_ctx_issue_resume_util(struct cam_context *ctx) +{ + int rc; + struct cam_start_stop_dev_cmd start_cmd; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + spin_lock_bh(&ctx->lock); + if (unlikely(ctx->state != CAM_CTX_FLUSHED)) { + spin_unlock_bh(&ctx->lock); + CAM_ERR(CAM_ISP, + "Cannot perform a resume if not in flushed state ctx: %u link: 0x%x state: %u", + ctx->ctx_id, ctx->link_hdl, ctx->state); + return -EINVAL; + } + spin_unlock_bh(&ctx->lock); + + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d, ctx_id %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + start_cmd.dev_handle = ctx->dev_hdl; + start_cmd.session_handle = ctx->session_hdl; + rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to re-start HW after flush rc: %d, ctx_id %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + else + CAM_INFO(CAM_ISP, + "Received init after flush. Re-start HW complete in ctx:%d, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + CAM_DBG(CAM_ISP, "next state %d sub_state:%d ctx_id %u link: 0x%x", ctx->state, + ctx_isp->substate_activated, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (!ctx_isp->hw_acquired) { + CAM_ERR(CAM_ISP, "HW is not acquired, reject packet, ctx_id %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + goto end; + } + + rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd); + if (rc) + goto end; + + if (!ctx_isp->init_received) { + CAM_WARN(CAM_ISP, + "Received update pckt in flushed state, skip start, ctx %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + goto end; + } + + CAM_DBG(CAM_ISP, "vfps_ctx:%s resume_hw_in_flushed:%d ctx:%u link: 0x%x", + CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), + ctx_isp->resume_hw_in_flushed, + ctx->ctx_id, ctx->link_hdl); + + if (ctx_isp->vfps_aux_context) { + /* Resume the HW only when we get first valid req */ + if (!ctx_isp->resume_hw_in_flushed) + goto end; + else + ctx_isp->resume_hw_in_flushed = false; + } else if (ctx_isp->standby_en && (ctx_isp->num_inits_post_flush < + CAM_ISP_CONTEXT_NUM_INIT_REQ_RCVD_POST_FLUSH)) { + CAM_DBG(CAM_ISP, + "Received %u number of INIT packets, expecting %u to resume on ctx: %u link: 0x%x", + ctx_isp->num_inits_post_flush, + CAM_ISP_CONTEXT_NUM_INIT_REQ_RCVD_POST_FLUSH, ctx->ctx_id, ctx->link_hdl); + goto end; + } + + if (ctx_isp->standby_en) { + /* Notify for synced resume */ + if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_msg) { + struct cam_req_mgr_notify_msg msg = {0}; + + msg.link_hdl = ctx->link_hdl; + msg.dev_hdl = ctx->dev_hdl; + msg.msg_type = CAM_REQ_MGR_MSG_NOTIFY_FOR_SYNCED_RESUME; + rc = ctx->ctx_crm_intf->notify_msg(&msg); + if (rc) + CAM_ERR(CAM_ISP, + "Failed at notifying for synced resume on ctx: %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + return rc; + } + } + + rc = __cam_isp_ctx_issue_resume_util(ctx); + +end: + return rc; +} + +static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *link) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (!link) { + CAM_ERR(CAM_ISP, "setup link info is null: %pK ctx: %u link: 0x%x", + link, ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + if (!link->crm_cb) { + CAM_ERR(CAM_ISP, "crm cb is null: %pK ctx: %u, link: 0x%x", + link->crm_cb, ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Enter.........ctx: %u, link: 0x%x", ctx->ctx_id, ctx->link_hdl); + + ctx->link_hdl = link->link_hdl; + ctx->ctx_crm_intf = link->crm_cb; + ctx_isp->subscribe_event = + CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF; + ctx_isp->trigger_id = link->trigger_id; + ctx_isp->mswitch_default_apply_delay_max_cnt = 0; + atomic_set(&ctx_isp->mswitch_default_apply_delay_ref_cnt, 0); + + if ((link->mode_switch_max_delay - CAM_MODESWITCH_DELAY_1) > 0) { + ctx_isp->handle_mswitch = true; + ctx_isp->mswitch_default_apply_delay_max_cnt = + link->mode_switch_max_delay - CAM_MODESWITCH_DELAY_1; + CAM_DBG(CAM_ISP, + "Enabled mode switch handling on ctx: %u max delay cnt: %u", + ctx->ctx_id, ctx_isp->mswitch_default_apply_delay_max_cnt); + atomic_set(&ctx_isp->mswitch_default_apply_delay_ref_cnt, + ctx_isp->mswitch_default_apply_delay_max_cnt); + } + + /* change state only if we had the init config */ + if (ctx_isp->init_received) { + ctx->state = CAM_CTX_READY; + trace_cam_context_state("ISP", ctx); + } + + CAM_DBG(CAM_ISP, "next state %d, ctx: %u, link: 0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + ctx->link_hdl = -1; + ctx->ctx_crm_intf = NULL; + ctx_isp->trigger_id = -1; + ctx_isp->mswitch_default_apply_delay_max_cnt = 0; + atomic_set(&ctx_isp->mswitch_default_apply_delay_ref_cnt, 0); + + return rc; +} + +static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx, + struct cam_req_mgr_device_info *dev_info) +{ + int rc = 0; + struct cam_isp_context *isp_ctx = (struct cam_isp_context *)ctx->ctx_priv; + + dev_info->dev_hdl = ctx->dev_hdl; + strscpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name)); + dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE; + dev_info->p_delay = CAM_PIPELINE_DELAY_1; + dev_info->m_delay = CAM_MODESWITCH_DELAY_1; + dev_info->trigger = CAM_TRIGGER_POINT_SOF; + dev_info->trigger_on = true; + dev_info->resume_sync_on = true; + isp_ctx->standby_en = true; + + return rc; +} + +static inline void __cam_isp_context_reset_ctx_params( + struct cam_isp_context *ctx_isp) +{ + atomic_set(&ctx_isp->process_bubble, 0); + atomic_set(&ctx_isp->rxd_epoch, 0); + atomic_set(&ctx_isp->internal_recovery_set, 0); + ctx_isp->frame_id = 0; + ctx_isp->sof_timestamp_val = 0; + ctx_isp->boot_timestamp = 0; + ctx_isp->active_req_cnt = 0; + ctx_isp->reported_req_id = 0; + ctx_isp->reported_frame_id = 0; + ctx_isp->bubble_frame_cnt = 0; + ctx_isp->congestion_cnt = 0; + ctx_isp->recovery_req_id = 0; + ctx_isp->aeb_error_cnt = 0; + ctx_isp->out_of_sync_cnt = 0; + ctx_isp->sof_dbg_irq_en = false; + ctx_isp->last_sof_jiffies = 0; + ctx_isp->last_applied_jiffies = 0; + ctx_isp->num_inits_post_flush = 0; + ctx_isp->standby_en = false; +} + +static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + int i; + struct cam_isp_start_args start_isp; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (cmd->session_handle != ctx->session_hdl || + cmd->dev_handle != ctx->dev_hdl) { + rc = -EPERM; + goto end; + } + + if (list_empty(&ctx->pending_req_list)) { + /* should never happen */ + CAM_ERR(CAM_ISP, "Start device with empty configuration, ctx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } else { + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + } + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + if (!ctx_isp->hw_ctx) { + CAM_ERR(CAM_ISP, "Wrong hw context pointer.ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx; + start_isp.hw_config.request_id = req->request_id; + start_isp.hw_config.hw_update_entries = req_isp->cfg; + start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg; + start_isp.hw_config.priv = &req_isp->hw_update_data; + start_isp.hw_config.init_packet = 1; + start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_NONE; + start_isp.hw_config.cdm_reset_before_apply = false; + start_isp.is_internal_start = false; + + ctx_isp->last_applied_req_id = req->request_id; + + if (ctx->state == CAM_CTX_FLUSHED) + start_isp.start_only = true; + else + start_isp.start_only = false; + + __cam_isp_context_reset_ctx_params(ctx_isp); + + ctx_isp->substate_activated = ctx_isp->rdi_only_context ? + CAM_ISP_CTX_ACTIVATED_APPLIED : + (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH : + CAM_ISP_CTX_ACTIVATED_SOF; + + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + + /* + * In case of CSID TPG we might receive SOF and RUP IRQs + * before hw_mgr_intf->hw_start has returned. So move + * req out of pending list before hw_start and add it + * back to pending list if hw_start fails. + */ + list_del_init(&req->list); + + if (ctx_isp->offline_context && !req_isp->num_fence_map_out) { + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + __cam_isp_ctx_move_req_to_free_list(ctx, req); + atomic_set(&ctx_isp->rxd_epoch, 1); + CAM_DBG(CAM_REQ, + "Move pending req: %lld to free list(cnt: %d) offline ctx %u link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) { + list_add_tail(&req->list, &ctx->wait_req_list); + CAM_DBG(CAM_REQ, + "Move pending req: %lld to wait list(cnt: %d) ctx %u link: 0x%x", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl); + } else { + list_add_tail(&req->list, &ctx->active_req_list); + ctx_isp->active_req_cnt++; + CAM_DBG(CAM_REQ, + "Move pending req: %lld to active list(cnt: %d) ctx %u link: 0x%x offline %d", + req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl, + ctx_isp->offline_context); + } + + /* + * Only place to change state before calling the hw due to + * hardware tasklet has higher priority that can cause the + * irq handling comes early + */ + ctx->state = CAM_CTX_ACTIVATED; + trace_cam_context_state("ISP", ctx); + rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, + &start_isp); + if (rc) { + /* HW failure. user need to clean up the resource */ + CAM_ERR(CAM_ISP, "Start HW failed, ctx %u link: 0x%x", ctx->ctx_id, ctx->link_hdl); + ctx->state = CAM_CTX_READY; + if ((rc == -ETIMEDOUT) && + (isp_ctx_debug.enable_cdm_cmd_buff_dump)) + rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false); + + trace_cam_context_state("ISP", ctx); + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + goto end; + } + CAM_DBG(CAM_ISP, "start device success ctx %u link: 0x%x", ctx->ctx_id, ctx->link_hdl); + +end: + return rc; +} + +static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + int rc = 0; + + ctx->link_hdl = -1; + ctx->ctx_crm_intf = NULL; + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("ISP", ctx); + + return rc; +} + +static int __cam_isp_ctx_stop_dev_in_activated_unlock( + struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd) +{ + int rc = 0; + uint32_t i; + struct cam_hw_stop_args stop; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_isp_stop_args stop_isp = {0}; + + /* Mask off all the incoming hardware events */ + spin_lock_bh(&ctx->lock); + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT; + spin_unlock_bh(&ctx->lock); + + /* stop hw first */ + if (ctx_isp->hw_ctx) { + stop.ctxt_to_hw_map = ctx_isp->hw_ctx; + + stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY; + stop_isp.stop_only = false; + stop_isp.is_internal_stop = false; + + stop.args = (void *) &stop_isp; + ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv, + &stop); + } + + CAM_DBG(CAM_ISP, "next Substate[%s], ctx_idx: %u, link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + if (ctx->ctx_crm_intf && + ctx->ctx_crm_intf->notify_stop) { + struct cam_req_mgr_notify_stop notify; + + notify.link_hdl = ctx->link_hdl; + CAM_DBG(CAM_ISP, + "Notify CRM about device stop ctx %u link 0x%x", + ctx->ctx_id, ctx->link_hdl); + ctx->ctx_crm_intf->notify_stop(¬ify); + } else if (!ctx_isp->offline_context) + CAM_ERR(CAM_ISP, "cb not present, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + while (!list_empty(&ctx->pending_req_list)) { + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d ctx:%u, link: 0x%x", + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + } + + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } + + while (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d ctx: %u, link: 0x%x", + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + + } + + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } + + while (!list_empty(&ctx->active_req_list)) { + req = list_first_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + list_del_init(&req->list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d ctx: %u, link: 0x%x", + req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl); + for (i = 0; i < req_isp->num_fence_map_out; i++) { + if (req_isp->fence_map_out[i].sync_id != -1) { + cam_sync_signal( + req_isp->fence_map_out[i].sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + } + + if (req_isp->fence_map_out[i].early_sync_id > 0) { + rc = cam_sync_signal( + req_isp->fence_map_out[i].early_sync_id, + CAM_SYNC_STATE_SIGNALED_CANCEL, + CAM_SYNC_ISP_EVENT_HW_STOP); + if (rc) { + CAM_ERR(CAM_ISP, + "Early sync=%d for req=%llu failed with rc=%d ctx:%u link[0x%x]", + req_isp->fence_map_out[i].early_sync_id, + req->request_id, rc, ctx->ctx_id, + ctx->link_hdl); + } + + req_isp->fence_map_out[i].early_sync_id = -1; + } + } + + __cam_isp_ctx_move_req_to_free_list(ctx, req); + } + + ctx_isp->frame_id = 0; + ctx_isp->active_req_cnt = 0; + ctx_isp->reported_req_id = 0; + ctx_isp->reported_frame_id = 0; + ctx_isp->last_applied_req_id = 0; + ctx_isp->req_info.last_bufdone_req_id = 0; + ctx_isp->bubble_frame_cnt = 0; + ctx_isp->congestion_cnt = 0; + ctx_isp->sof_dbg_irq_en = false; + atomic_set(&ctx_isp->process_bubble, 0); + atomic_set(&ctx_isp->internal_recovery_set, 0); + atomic_set(&ctx_isp->rxd_epoch, 0); + atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); + atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); + + /* Reset skipped_list for FCG config */ + __cam_isp_ctx_reset_fcg_tracker(ctx); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); + + CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u link: 0x%x", + ctx->state, ctx->ctx_id, ctx->link_hdl); + + if (!stop_cmd) { + rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL); + if (rc) + CAM_ERR(CAM_ISP, "Unlink failed rc=%d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + } + return rc; +} + +static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; + + __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd); + ctx_isp->init_received = false; + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("ISP", ctx); + return rc; +} + +static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc = 0; + + rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL); + if (rc) + CAM_ERR(CAM_ISP, "Stop device failed rc=%d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd); + if (rc) + CAM_ERR(CAM_ISP, "Release device failed rc=%d ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx, + void *cmd) +{ + int rc = 0; + + rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL); + if (rc) + CAM_ERR(CAM_ISP, "Stop device failed rc=%d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd); + if (rc) + CAM_ERR(CAM_ISP, "Release hw failed rc=%d, ctx %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_link_pause(struct cam_context *ctx) +{ + int rc = 0; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + + return rc; +} + +static int __cam_isp_ctx_link_resume(struct cam_context *ctx) +{ + int rc = 0; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + + return rc; +} + +static int __cam_isp_ctx_reset_and_recover( + bool skip_resume, struct cam_context *ctx) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; + struct cam_isp_stop_args stop_isp; + struct cam_hw_stop_args stop_args; + struct cam_isp_start_args start_isp; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_ctx_request *req; + struct cam_isp_ctx_req *req_isp; + + spin_lock_bh(&ctx->lock); + if (ctx_isp->active_req_cnt) { + spin_unlock_bh(&ctx->lock); + CAM_WARN(CAM_ISP, + "Active list not empty: %u in ctx: %u on link: 0x%x, retry recovery for req: %lld after buf_done", + ctx_isp->active_req_cnt, ctx->ctx_id, + ctx->link_hdl, ctx_isp->recovery_req_id); + goto end; + } + + if (ctx->state != CAM_CTX_ACTIVATED) { + spin_unlock_bh(&ctx->lock); + CAM_ERR(CAM_ISP, + "In wrong state %d, for recovery ctx: %u in link: 0x%x recovery req: %lld", + ctx->state, ctx->ctx_id, + ctx->link_hdl, ctx_isp->recovery_req_id); + rc = -EINVAL; + goto end; + } + + if (list_empty(&ctx->pending_req_list)) { + /* Cannot start with no request */ + spin_unlock_bh(&ctx->lock); + CAM_ERR(CAM_ISP, + "Failed to reset and recover last_applied_req: %llu in ctx: %u on link: 0x%x", + ctx_isp->last_applied_req_id, ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + if (!ctx_isp->hw_ctx) { + spin_unlock_bh(&ctx->lock); + CAM_ERR(CAM_ISP, + "Invalid hw context pointer ctx: %u on link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + goto end; + } + + /* Block all events till HW is resumed */ + ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT; + atomic_set(&ctx_isp->unserved_rup, 0); + + req = list_first_entry(&ctx->pending_req_list, + struct cam_ctx_request, list); + spin_unlock_bh(&ctx->lock); + + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + + CAM_INFO(CAM_ISP, + "Trigger Halt, Reset & Resume for req: %llu ctx: %u in state: %d link: 0x%x", + req->request_id, ctx->ctx_id, ctx->state, ctx->link_hdl); + + stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY; + stop_isp.stop_only = true; + stop_isp.is_internal_stop = true; + stop_args.args = (void *)&stop_isp; + rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv, + &stop_args); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to stop HW rc: %d ctx: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + CAM_DBG(CAM_ISP, "Stop HW success ctx: %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + /* API provides provision to stream off and not resume as well in case of fatal errors */ + if (skip_resume) { + atomic_set(&ctx_isp->internal_recovery_set, 0); + CAM_INFO(CAM_ISP, + "Halting streaming off IFE/SFE ctx: %u last_applied_req: %lld [recovery_req: %lld] on link: 0x%x", + ctx->ctx_id, ctx_isp->last_applied_req_id, + ctx_isp->recovery_req_id, ctx->link_hdl); + goto end; + } + + hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d ctx: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + goto end; + } + CAM_DBG(CAM_ISP, "Resume call success ctx: %u on link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx; + start_isp.hw_config.request_id = req->request_id; + start_isp.hw_config.hw_update_entries = req_isp->cfg; + start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg; + start_isp.hw_config.priv = &req_isp->hw_update_data; + start_isp.hw_config.init_packet = 1; + start_isp.hw_config.reapply_type = CAM_CONFIG_REAPPLY_IQ; + start_isp.hw_config.cdm_reset_before_apply = false; + start_isp.start_only = true; + start_isp.is_internal_start = true; + + __cam_isp_context_reset_internal_recovery_params(ctx_isp); + + ctx_isp->substate_activated = ctx_isp->rdi_only_context ? + CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF; + + rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, + &start_isp); + if (rc) { + CAM_ERR(CAM_ISP, "Start HW failed, ctx: %u link: 0x%x", ctx->ctx_id, ctx->link_hdl); + ctx->state = CAM_CTX_READY; + goto end; + } + + /* IQ applied for this request, on next trigger skip IQ cfg */ + req_isp->reapply_type = CAM_CONFIG_REAPPLY_IO; + + /* Notify userland that KMD has done internal recovery */ + __cam_isp_ctx_notify_v4l2_error_event(CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY, + 0, req->request_id, ctx); + + CAM_INFO(CAM_ISP, + "Internal Start HW success ctx %u on link: 0x%x for req: %llu MUP: [en: %s val: %u]", + ctx->ctx_id, ctx->link_hdl, req->request_id, + CAM_BOOL_TO_YESNO(req_isp->hw_update_data.mup_en), req_isp->hw_update_data.mup_val); + +end: + return rc; +} + +static bool __cam_isp_ctx_try_internal_recovery_for_bubble( + int64_t error_req_id, struct cam_context *ctx) +{ + int rc; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; + + if (isp_ctx_debug.disable_internal_recovery_mask & + CAM_ISP_CTX_DISABLE_RECOVERY_BUBBLE) + return false; + + /* Perform recovery if bubble recovery is stalled */ + if (!atomic_read(&ctx_isp->process_bubble)) + return false; + + /* Validate if errored request has been applied */ + if (ctx_isp->last_applied_req_id < error_req_id) { + CAM_WARN(CAM_ISP, + "Skip trying for internal recovery last applied: %lld error_req: %lld for ctx: %u on link: 0x%x", + ctx_isp->last_applied_req_id, error_req_id, + ctx->ctx_id, ctx->link_hdl); + return false; + } + + if (__cam_isp_ctx_validate_for_req_reapply_util(ctx_isp)) { + CAM_WARN(CAM_ISP, + "Internal recovery not possible for ctx: %u on link: 0x%x req: %lld [last_applied: %lld]", + ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id); + return false; + } + + /* Trigger reset and recover */ + atomic_set(&ctx_isp->internal_recovery_set, 1); + rc = __cam_isp_ctx_reset_and_recover(false, ctx); + if (rc) { + CAM_WARN(CAM_ISP, + "Internal recovery failed in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]", + ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id); + atomic_set(&ctx_isp->internal_recovery_set, 0); + goto error; + } + + CAM_DBG(CAM_ISP, + "Internal recovery done in ctx: %u on link: 0x%x req: %lld [last_applied: %lld]", + ctx->ctx_id, ctx->link_hdl, error_req_id, ctx_isp->last_applied_req_id); + + return true; + +error: + return false; +} + +static int __cam_isp_ctx_process_evt(struct cam_context *ctx, + struct cam_req_mgr_link_evt_data *link_evt_data) +{ + int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args = {0}; + + if ((ctx->state == CAM_CTX_ACQUIRED) && + (link_evt_data->evt_type != CAM_REQ_MGR_LINK_EVT_PAUSE) && + (link_evt_data->evt_type != CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES)) { + CAM_WARN(CAM_ISP, + "Get unexpect evt:%d in acquired state, ctx: %u on link: 0x%x", + link_evt_data->evt_type, ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + switch (link_evt_data->evt_type) { + case CAM_REQ_MGR_LINK_EVT_ERR: + case CAM_REQ_MGR_LINK_EVT_EOF: + /* No handling */ + break; + case CAM_REQ_MGR_LINK_EVT_PAUSE: + rc = __cam_isp_ctx_link_pause(ctx); + break; + case CAM_REQ_MGR_LINK_EVT_RESUME: + rc = __cam_isp_ctx_link_resume(ctx); + break; + case CAM_REQ_MGR_LINK_EVT_RESUME_HW: + rc = __cam_isp_ctx_issue_resume_util(ctx); + break; + case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE: + rc = __cam_isp_ctx_trigger_reg_dump(CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx, NULL); + if (rc) + CAM_ERR(CAM_ISP, "Reg dump on sof freeze failed ctx:%d rc:%d", + ctx->ctx_id, rc); + + rc = __cam_isp_ctx_handle_sof_freeze_evt(ctx); + break; + case CAM_REQ_MGR_LINK_EVT_STALLED: { + bool internal_recovery_skipped = false; + + if (ctx->state == CAM_CTX_ACTIVATED) { + if (link_evt_data->try_for_recovery) + internal_recovery_skipped = + __cam_isp_ctx_try_internal_recovery_for_bubble( + link_evt_data->req_id, ctx); + + if (!internal_recovery_skipped) + rc = __cam_isp_ctx_trigger_reg_dump( + CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR, ctx, NULL); + } + link_evt_data->try_for_recovery = internal_recovery_skipped; + } + break; + case CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES: + if (link_evt_data->u.properties_mask & + CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF) + ctx_isp->vfps_aux_context = true; + else + ctx_isp->vfps_aux_context = false; + CAM_DBG(CAM_ISP, "vfps_aux_context:%s on ctx: %u link: 0x%x", + CAM_BOOL_TO_YESNO(ctx_isp->vfps_aux_context), ctx->ctx_id, ctx->link_hdl); + break; + case CAM_REQ_MGR_LINK_EVT_SENSOR_FRAME_INFO: { + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_SET_DRV_INFO; + isp_hw_cmd_args.u.drv_info.req_id = link_evt_data->req_id; + isp_hw_cmd_args.u.drv_info.frame_duration = + link_evt_data->u.frame_info.frame_duration; + isp_hw_cmd_args.u.drv_info.blanking_duration = + link_evt_data->u.frame_info.blanking_duration; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to process drv info on ctx:%u link:0x%x, req_id:%llu rc:%d", + ctx->ctx_id, ctx->link_hdl, link_evt_data->req_id, rc); + } + break; + default: + CAM_WARN(CAM_ISP, + "Unsupported event type: 0x%x on ctx: %u link: 0x%x", + link_evt_data->evt_type, ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + break; + } + + return rc; +} + +static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx, + struct cam_req_mgr_core_dev_link_setup *unlink) +{ + int rc = 0; + + CAM_WARN(CAM_ISP, + "Received unlink in activated state. It's unexpected, ctx: %u link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL); + if (rc) + CAM_WARN(CAM_ISP, "Stop device failed rc=%d, ctx: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink); + if (rc) + CAM_ERR(CAM_ISP, "Unlink failed rc=%d, ctx: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int __cam_isp_ctx_apply_req(struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_ctx_ops *ctx_ops = NULL; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + trace_cam_apply_req("ISP", ctx->ctx_id, apply->request_id, apply->link_hdl); + CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld, ctx: %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), apply->request_id, ctx->ctx_id, ctx->link_hdl); + ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated]; + if (ctx_ops->crm_ops.apply_req) { + rc = ctx_ops->crm_ops.apply_req(ctx, apply); + } else { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "No handle function in activated Substate[%s], ctx: %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + } + + if (rc) + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Apply failed in active Substate[%s] rc %d, ctx: %u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), rc, ctx->ctx_id, ctx->link_hdl); + return rc; +} + +static int __cam_isp_ctx_apply_default_settings( + struct cam_context *ctx, + struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_ctx_ops *ctx_ops = NULL; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_isp_fcg_prediction_tracker *fcg_tracker; + + if (!(apply->trigger_point & ctx_isp->subscribe_event)) { + CAM_WARN(CAM_ISP, + "Trigger: %u not subscribed for: %u, ctx: %u link: 0x%x", + apply->trigger_point, ctx_isp->subscribe_event, ctx->ctx_id, + ctx->link_hdl); + return 0; + } + + /* Allow apply default settings for IFE only at SOF */ + if (apply->trigger_point != CAM_TRIGGER_POINT_SOF) + return 0; + + if (atomic_read(&ctx_isp->internal_recovery_set)) + return __cam_isp_ctx_reset_and_recover(false, ctx); + + /* FCG handling */ + fcg_tracker = &ctx_isp->fcg_tracker; + if (ctx_isp->frame_id != 1) + fcg_tracker->num_skipped += 1; + CAM_DBG(CAM_ISP, + "Apply default settings, number of previous continuous skipped frames: %d, ctx_id: %d", + fcg_tracker->num_skipped, ctx->ctx_id); + + /* + * Attempt register dump in case of skip frame + * and when per request reg dump is enabled. + */ + __cam_isp_ctx_handle_reg_dump(ctx); + + /* + * Call notify frame skip for static offline cases or + * mode switch cases where IFE mode switch delay differs + * from other devices on the link + */ + if ((ctx_isp->use_default_apply) || + (ctx_isp->mode_switch_en && ctx_isp->handle_mswitch)) { + CAM_DBG(CAM_ISP, + "Enter: apply req in Substate:%d request _id:%lld ctx:%u on link:0x%x", + ctx_isp->substate_activated, apply->request_id, + ctx->ctx_id, ctx->link_hdl); + + ctx_ops = &ctx_isp->substate_machine[ + ctx_isp->substate_activated]; + if (ctx_ops->crm_ops.notify_frame_skip) { + rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply); + } else { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "No handle function in activated substate %d, ctx:%u on link:0x%x", + ctx_isp->substate_activated, ctx->ctx_id, ctx->link_hdl); + rc = -EFAULT; + } + + if (rc) + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Apply default failed in active substate %d rc %d ctx: %u link: 0x%x", + ctx_isp->substate_activated, rc, ctx->ctx_id, ctx->link_hdl); + atomic_set(&ctx_isp->last_applied_default, 1); + } + + return rc; +} + +void __cam_isp_ctx_notify_cpas(struct cam_context *ctx, uint32_t evt_id) +{ + uint64_t request_id = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; + struct cam_ctx_request *req = NULL; + char ctx_evt_id_string[128]; + + switch (evt_id) { + case CAM_ISP_HW_EVENT_SOF: + if (!list_empty(&ctx->wait_req_list)) { + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); + request_id = req->request_id; + } else + request_id = 0; + if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_EPOCH && + !list_empty(&ctx->active_req_list)) { + req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request, list); + request_id = req->request_id; + CAM_DBG(CAM_ISP, "EPCR notify cpas"); + } + break; + case CAM_ISP_HW_EVENT_EOF: + if (!list_empty(&ctx->active_req_list)) { + req = list_first_entry(&ctx->active_req_list, struct cam_ctx_request, list); + request_id = req->request_id; + } else + request_id = 0; + break; + case CAM_ISP_HW_EVENT_EPOCH: + if (list_empty(&ctx->wait_req_list)) { + if (!list_empty(&ctx->active_req_list)) { + req = list_last_entry(&ctx->active_req_list, + struct cam_ctx_request, list); + request_id = req->request_id; + } else + request_id = 0; + } else { + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); + request_id = req->request_id; + } + break; + default: + return; + } + + snprintf(ctx_evt_id_string, sizeof(ctx_evt_id_string), + "%s_frame[%llu]_%s", + ctx->ctx_id_string, + ctx_isp->frame_id, + cam_isp_hw_evt_type_to_string(evt_id)); + CAM_DBG(CAM_ISP, "Substate[%s] ctx: %s frame: %llu evt: %s req: %llu", + __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated), + ctx->ctx_id_string, + ctx_isp->frame_id, + cam_isp_hw_evt_type_to_string(evt_id), + request_id); + cam_cpas_notify_event(ctx_evt_id_string, request_id); +} + +static int __cam_isp_ctx_handle_irq_in_activated(void *context, + uint32_t evt_id, void *evt_data) +{ + int rc = 0; + struct cam_isp_ctx_irq_ops *irq_ops = NULL; + struct cam_context *ctx = (struct cam_context *)context; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; + + spin_lock(&ctx->lock); + trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id, + __cam_isp_ctx_get_event_ts(evt_id, evt_data)); + + CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d, ctx:%u link: 0x%x", + ctx->state, __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), evt_id, + ctx->ctx_id, ctx->link_hdl); + irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated]; + if (irq_ops->irq_ops[evt_id]) { + rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data); + } else { + CAM_DBG(CAM_ISP, + "No handle function for Substate[%s], evt id %d, ctx:%u link: 0x%x", + __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), evt_id, + ctx->ctx_id, ctx->link_hdl); + } + + if (evt_id == CAM_ISP_HW_EVENT_ERROR) + __cam_isp_ctx_dump_state_monitor_array(ctx_isp); + + if ((evt_id == CAM_ISP_HW_EVENT_SOF) || + (evt_id == CAM_ISP_HW_EVENT_EOF) || + (evt_id == CAM_ISP_HW_EVENT_EPOCH)) + __cam_isp_ctx_update_frame_timing_record(evt_id, ctx_isp); + + __cam_isp_ctx_notify_cpas(ctx, evt_id); + CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s], ctx: %u link: 0x%x", + ctx->state, __cam_isp_ctx_substate_val_to_type( + ctx_isp->substate_activated), ctx->ctx_id, ctx->link_hdl); + + spin_unlock(&ctx->lock); + return rc; +} + +static int cam_isp_context_validate_event_notify_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + int rc = 0; + uint32_t evt_type; + uint64_t req_id; + + req_id = evt_params->req_id; + evt_type = evt_params->u.evt_notify.evt_notify_type; + + switch (evt_type) { + case V4L_EVENT_CAM_REQ_MGR_ERROR: { + struct cam_hw_inject_err_evt_param *err_evt_params = + &evt_params->u.evt_notify.u.err_evt_params; + + switch (err_evt_params->err_type) { + case CAM_REQ_MGR_ERROR_TYPE_RECOVERY: + case CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE: + case CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY: + case CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY: + break; + default: + CAM_ERR(CAM_ISP, + "Invalid error type: %u for error event injection err type: %u req id: %llu ctx id: %u link: 0x%x dev hdl: %d", + err_evt_params->err_type, err_evt_params->err_code, + req_id, ctx->ctx_id, ctx->link_hdl, ctx->dev_hdl); + return -EINVAL; + } + + CAM_INFO(CAM_ISP, + "Inject ERR evt: err code: %u err type: %u req id: %llu ctx id: %u link: 0x%x dev hdl: %d", + err_evt_params->err_code, err_evt_params->err_type, + req_id, ctx->ctx_id, ctx->link_hdl, ctx->dev_hdl); + break; + } + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: { + struct cam_hw_inject_pf_evt_param *pf_evt_params = + &evt_params->u.evt_notify.u.pf_evt_params; + bool non_fatal_en; + + rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en); + if (rc) { + CAM_ERR(CAM_ISP, + "Fail to query whether device's cb has non-fatal enabled rc:%d, ctx id: %u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + + if (!non_fatal_en) { + CAM_ERR(CAM_ISP, + "Fail to inject pagefault event notif. Pagefault fatal for ISP,ctx:%u link:0x%x", + ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + CAM_INFO(CAM_ISP, + "Inject PF evt: req_id:%llu ctx:%u link:0x%x dev hdl:%d ctx found:%hhu", + req_id, ctx->ctx_id, ctx->link_hdl, ctx->dev_hdl, + pf_evt_params->ctx_found); + break; + } + default: + CAM_ERR(CAM_ISP, "Event notification type not supported: %u, ctx: %u link: 0x%x", + evt_type, ctx->ctx_id, ctx->link_hdl); + rc = -EINVAL; + } + + return rc; +} + +static int cam_isp_context_inject_evt(void *context, void *evt_args) +{ + struct cam_context *ctx = context; + struct cam_isp_context *ctx_isp = NULL; + struct cam_hw_inject_evt_param *evt_params = evt_args; + int rc = 0; + + if (!ctx || !evt_args) { + CAM_ERR(CAM_ISP, + "Invalid params ctx %s event args %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args)); + return -EINVAL; + } + + ctx_isp = ctx->ctx_priv; + if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE) { + rc = cam_isp_context_validate_event_notify_injection(ctx, evt_params); + if (rc) { + CAM_ERR(CAM_ISP, + "Event notif injection failed validation rc:%d, ctx:%u link:0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + return rc; + } + } else { + CAM_ERR(CAM_ISP, "Bufdone err injection %u not supported by ISP,ctx:%u link:0x%x", + evt_params->inject_id, ctx->ctx_id, ctx->link_hdl); + return -EINVAL; + } + + + memcpy(&ctx_isp->evt_inject_params, evt_params, + sizeof(struct cam_hw_inject_evt_param)); + + ctx_isp->evt_inject_params.is_valid = true; + + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_isp_ctx_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired, + .release_dev = __cam_isp_ctx_release_dev_in_top_state, + .config_dev = __cam_isp_ctx_config_dev_in_acquired, + .flush_dev = __cam_isp_ctx_flush_dev_in_top_state, + .release_hw = __cam_isp_ctx_release_hw_in_top_state, + }, + .crm_ops = { + .link = __cam_isp_ctx_link_in_acquired, + .unlink = __cam_isp_ctx_unlink_in_acquired, + .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired, + .process_evt = __cam_isp_ctx_process_evt, + .flush_req = __cam_isp_ctx_flush_req_in_top_state, + .dump_req = __cam_isp_ctx_dump_in_top_state, + }, + .irq_ops = NULL, + .pagefault_ops = cam_isp_context_dump_requests, + .dumpinfo_ops = cam_isp_context_info_dump, + .evt_inject_ops = cam_isp_context_inject_evt, + }, + /* Ready */ + { + .ioctl_ops = { + .start_dev = __cam_isp_ctx_start_dev_in_ready, + .release_dev = __cam_isp_ctx_release_dev_in_top_state, + .config_dev = __cam_isp_ctx_config_dev_in_top_state, + .flush_dev = __cam_isp_ctx_flush_dev_in_top_state, + .release_hw = __cam_isp_ctx_release_hw_in_top_state, + }, + .crm_ops = { + .unlink = __cam_isp_ctx_unlink_in_ready, + .flush_req = __cam_isp_ctx_flush_req_in_ready, + .dump_req = __cam_isp_ctx_dump_in_top_state, + }, + .irq_ops = NULL, + .pagefault_ops = cam_isp_context_dump_requests, + .dumpinfo_ops = cam_isp_context_info_dump, + .evt_inject_ops = cam_isp_context_inject_evt, + }, + /* Flushed */ + { + .ioctl_ops = { + .stop_dev = __cam_isp_ctx_stop_dev_in_activated, + .release_dev = __cam_isp_ctx_release_dev_in_activated, + .config_dev = __cam_isp_ctx_config_dev_in_flushed, + .release_hw = __cam_isp_ctx_release_hw_in_activated, + }, + .crm_ops = { + .unlink = __cam_isp_ctx_unlink_in_ready, + .process_evt = __cam_isp_ctx_process_evt, + .flush_req = __cam_isp_ctx_flush_req_in_flushed_state, + }, + .irq_ops = NULL, + .pagefault_ops = cam_isp_context_dump_requests, + .dumpinfo_ops = cam_isp_context_info_dump, + .evt_inject_ops = cam_isp_context_inject_evt, + .msg_cb_ops = cam_isp_context_handle_message, + }, + /* Activated */ + { + .ioctl_ops = { + .stop_dev = __cam_isp_ctx_stop_dev_in_activated, + .release_dev = __cam_isp_ctx_release_dev_in_activated, + .config_dev = __cam_isp_ctx_config_dev_in_top_state, + .flush_dev = __cam_isp_ctx_flush_dev_in_top_state, + .release_hw = __cam_isp_ctx_release_hw_in_activated, + }, + .crm_ops = { + .unlink = __cam_isp_ctx_unlink_in_activated, + .apply_req = __cam_isp_ctx_apply_req, + .notify_frame_skip = + __cam_isp_ctx_apply_default_settings, + .flush_req = __cam_isp_ctx_flush_req_in_top_state, + .process_evt = __cam_isp_ctx_process_evt, + .dump_req = __cam_isp_ctx_dump_in_top_state, + }, + .irq_ops = __cam_isp_ctx_handle_irq_in_activated, + .pagefault_ops = cam_isp_context_dump_requests, + .dumpinfo_ops = cam_isp_context_info_dump, + .recovery_ops = cam_isp_context_hw_recovery, + .evt_inject_ops = cam_isp_context_inject_evt, + .msg_cb_ops = cam_isp_context_handle_message, + }, +}; + +static int cam_isp_context_hw_recovery(void *priv, void *data) +{ + struct cam_context *ctx = priv; + int rc = -EPERM; + + if (ctx->hw_mgr_intf->hw_recovery) + rc = ctx->hw_mgr_intf->hw_recovery(ctx->hw_mgr_intf->hw_mgr_priv, data); + else + CAM_ERR(CAM_ISP, "hw mgr doesn't support recovery, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static void cam_isp_context_find_faulted_context(struct cam_context *ctx, + struct list_head *req_list, struct cam_hw_dump_pf_args *pf_args, bool *found) +{ + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + int rc; + + *found = false; + list_for_each_entry_safe(req, req_temp, req_list, list) { + CAM_INFO(CAM_ISP, "List req_id: %llu ctx id: %u link: 0x%x", + req->request_id, ctx->ctx_id, ctx->link_hdl); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data); + if (rc) + CAM_ERR(CAM_ISP, "Failed to dump pf info, ctx_idx: %u, link: 0x%x", + ctx->ctx_id, ctx->link_hdl); + /* + * Found faulted buffer. Even if faulted ctx is found, but + * continue to search for faulted buffer + */ + if (pf_args->pf_context_info.mem_type != CAM_FAULT_BUF_NOT_FOUND) { + *found = true; + break; + } + } +} + +static int cam_isp_context_dump_requests(void *data, void *args) +{ + struct cam_context *ctx = (struct cam_context *)data; + struct cam_isp_context *ctx_isp; + struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args; + int rc = 0; + bool found; + + if (!ctx || !pf_args) { + CAM_ERR(CAM_ISP, "Invalid ctx %pK or pf args %pK", + ctx, pf_args); + return -EINVAL; + } + + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + if (!ctx_isp) { + CAM_ERR(CAM_ISP, "Invalid isp ctx"); + return -EINVAL; + } + + if (pf_args->handle_sec_pf) + goto end; + + CAM_INFO(CAM_ISP, + "Iterating over active list for isp ctx %u link: 0x%x state %d", + ctx->ctx_id, ctx->link_hdl, ctx->state); + cam_isp_context_find_faulted_context(ctx, &ctx->active_req_list, + pf_args, &found); + if (found) + goto end; + + CAM_INFO(CAM_ISP, + "Iterating over waiting list of isp ctx %u link: 0x%x state %d", + ctx->ctx_id, ctx->link_hdl, ctx->state); + cam_isp_context_find_faulted_context(ctx, &ctx->wait_req_list, + pf_args, &found); + if (found) + goto end; + + /* + * In certain scenarios we observe both overflow and SMMU pagefault + * for a particular request. If overflow is handled before page fault + * we need to traverse through pending request list because if + * bubble recovery is enabled on any request we move that request + * and all the subsequent requests to the pending list while handling + * overflow error. + */ + CAM_INFO(CAM_ISP, + "Iterating over pending req list of isp ctx %u link: 0x%x state %d", + ctx->ctx_id, ctx->link_hdl, ctx->state); + cam_isp_context_find_faulted_context(ctx, &ctx->pending_req_list, + pf_args, &found); + if (found) + goto end; + +end: + if (pf_args->pf_context_info.resource_type) { + ctx_isp = (struct cam_isp_context *)ctx->ctx_priv; + CAM_INFO(CAM_ISP, + "Page fault on resource:%s (0x%x) ctx id:%u link: 0x%x frame id:%d reported id:%lld applied id:%lld", + __cam_isp_resource_handle_id_to_type(ctx_isp->isp_device_type, + pf_args->pf_context_info.resource_type), + pf_args->pf_context_info.resource_type, + ctx->ctx_id, ctx->link_hdl, ctx_isp->frame_id, + ctx_isp->reported_req_id, ctx_isp->last_applied_req_id); + } + + /* + * Send PF notification to UMD if PF found on current CTX + * or it is forced to send PF notification to UMD even if no + * faulted context found + */ + if (pf_args->pf_context_info.ctx_found || + pf_args->pf_context_info.force_send_pf_evt) + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to notify PF event to userspace rc: %d, ctx id:%u link: 0x%x", + rc, ctx->ctx_id, ctx->link_hdl); + + return rc; +} + +static int cam_isp_context_handle_message(void *context, + uint32_t msg_type, void *data) +{ + int rc = -EINVAL; + struct cam_hw_cmd_args hw_cmd_args = {0}; + struct cam_isp_hw_cmd_args isp_hw_cmd_args = {0}; + struct cam_context *ctx = (struct cam_context *)context; + + hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + + switch (msg_type) { + case CAM_SUBDEV_MESSAGE_CLOCK_UPDATE: + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_UPDATE_CLOCK; + isp_hw_cmd_args.cmd_data = data; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + + if (rc) + CAM_ERR(CAM_ISP, "Update clock rate failed rc: %d", rc); + break; + default: + CAM_ERR(CAM_ISP, "Invalid message type %d", msg_type); + } + return rc; +} + +static int cam_isp_context_debug_register(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("isp_ctx", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_ISP, "DebugFS could not create directory!"); + return rc; + } + + /* Store parent inode for cleanup in caller */ + isp_ctx_debug.dentry = dbgfileptr; + + debugfs_create_u32("enable_state_monitor_dump", 0644, + isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump); + debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644, + isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump); + debugfs_create_u32("disable_internal_recovery_mask", 0644, + isp_ctx_debug.dentry, &isp_ctx_debug.disable_internal_recovery_mask); + + return 0; +} + +int cam_isp_context_init(struct cam_isp_context *ctx, + struct cam_context *ctx_base, + struct cam_req_mgr_kmd_ops *crm_node_intf, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, + uint32_t isp_device_type, + int img_iommu_hdl) +{ + int rc = -1; + int i; + struct cam_isp_skip_frame_info *skip_info, *temp; + + if (!ctx || !ctx_base) { + CAM_ERR(CAM_ISP, "Invalid Context"); + goto err; + } + + /* ISP context setup */ + memset(ctx, 0, sizeof(*ctx)); + + ctx->base = ctx_base; + ctx->frame_id = 0; + ctx->custom_enabled = false; + ctx->use_frame_header_ts = false; + ctx->use_default_apply = false; + ctx->active_req_cnt = 0; + ctx->reported_req_id = 0; + ctx->bubble_frame_cnt = 0; + ctx->congestion_cnt = 0; + ctx->req_info.last_bufdone_req_id = 0; + ctx->v4l2_event_sub_ids = 0; + + ctx->hw_ctx = NULL; + ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; + ctx->substate_machine = cam_isp_ctx_activated_state_machine; + ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq; + ctx->init_timestamp = jiffies_to_msecs(jiffies); + ctx->isp_device_type = isp_device_type; + + for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) { + ctx->req_base[i].req_priv = &ctx->req_isp[i]; + ctx->req_isp[i].base = &ctx->req_base[i]; + } + + /* camera context setup */ + rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id, + crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_ISP, "Camera Context Base init failed, ctx_idx: %u, link: 0x%x", + ctx_base->ctx_id, ctx_base->link_hdl); + goto err; + } + + /* FCG related struct setup */ + INIT_LIST_HEAD(&ctx->fcg_tracker.skipped_list); + for (i = 0; i < CAM_ISP_AFD_PIPELINE_DELAY; i++) { + skip_info = CAM_MEM_ZALLOC(sizeof(struct cam_isp_skip_frame_info), GFP_KERNEL); + if (!skip_info) { + CAM_ERR(CAM_ISP, + "Failed to allocate memory for FCG struct, ctx_idx: %u, link: %x", + ctx_base->ctx_id, ctx_base->link_hdl); + rc = -ENOMEM; + goto free_mem; + } + + list_add_tail(&skip_info->list, &ctx->fcg_tracker.skipped_list); + } + + /* link camera context with isp context */ + ctx_base->state_machine = cam_isp_ctx_top_state_machine; + ctx_base->ctx_priv = ctx; + + /* initializing current state for error logging */ + for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) { + ctx->dbg_monitors.state_monitor[i].curr_state = + CAM_ISP_CTX_ACTIVATED_MAX; + } + atomic64_set(&ctx->dbg_monitors.state_monitor_head, -1); + + for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) + atomic64_set(&ctx->dbg_monitors.event_record_head[i], -1); + + atomic64_set(&ctx->dbg_monitors.frame_monitor_head, -1); + + if (!isp_ctx_debug.dentry) + cam_isp_context_debug_register(); + + return rc; + +free_mem: + list_for_each_entry_safe(skip_info, temp, + &ctx->fcg_tracker.skipped_list, list) { + list_del(&skip_info->list); + CAM_MEM_FREE(skip_info); + skip_info = NULL; + } +err: + return rc; +} + +int cam_isp_context_deinit(struct cam_isp_context *ctx) +{ + struct cam_isp_skip_frame_info *skip_info, *temp; + + list_for_each_entry_safe(skip_info, temp, + &ctx->fcg_tracker.skipped_list, list) { + list_del(&skip_info->list); + CAM_MEM_FREE(skip_info); + skip_info = NULL; + } + + if (ctx->base) + cam_context_deinit(ctx->base); + + if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF) + CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid", + __cam_isp_ctx_substate_val_to_type( + ctx->substate_activated)); + + isp_ctx_debug.dentry = NULL; + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.h b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.h new file mode 100644 index 0000000000..5ce939f891 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_context.h @@ -0,0 +1,666 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ISP_CONTEXT_H_ +#define _CAM_ISP_CONTEXT_H_ + + +#include +#include +#include +#include + +#include "cam_context.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_req_mgr_workq.h" + +#define CAM_IFE_QTIMER_MUL_FACTOR 10000 +#define CAM_IFE_QTIMER_DIV_FACTOR 192 + +/* + * Maximum hw resource - This number is based on the maximum + * output port resource. The current maximum resource number + * is 24. + */ +#define CAM_ISP_CTX_RES_MAX 24 + +/* max requests per ctx for isp */ +#define CAM_ISP_CTX_REQ_MAX 8 + +/* + * Maximum entries in state monitoring array for error logging + */ +#define CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES 84 + +/* + * Threshold response time in us beyond which a request is not expected + * to be with IFE hw + */ +#define CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD 100000 + +/* Number of words for dumping isp context */ +#define CAM_ISP_CTX_DUMP_NUM_WORDS 5 + +/* Number of words for dumping isp context events*/ +#define CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS 3 + +/* Number of words for dumping request info*/ +#define CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS 2 + +/* Maximum entries in event record */ +#define CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES 8 + +/* Maximum length of tag while dumping */ +#define CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN 128 + +/* AEB error count threshold */ +#define CAM_ISP_CONTEXT_AEB_ERROR_CNT_MAX 6 + +/* Debug Buffer length*/ +#define CAM_ISP_CONTEXT_DBG_BUF_LEN 1000 + +/* AFD pipeline delay for FCG configuration */ +#define CAM_ISP_AFD_PIPELINE_DELAY 3 + +/* Maximum entries in frame record */ +#define CAM_ISP_CTX_MAX_FRAME_RECORDS 5 + +/* Congestion count threshold */ +#define CAM_ISP_CONTEXT_CONGESTION_CNT_MAX 3 + +/* Number of init requests expected post flush to issue resume */ +#define CAM_ISP_CONTEXT_NUM_INIT_REQ_RCVD_POST_FLUSH 2 + +/* forward declaration */ +struct cam_isp_context; + +/* cam isp context irq handling function type */ +typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp, + void *evt_data); + +/** + * enum cam_isp_ctx_activated_substate - sub states for activated + * + */ +enum cam_isp_ctx_activated_substate { + CAM_ISP_CTX_ACTIVATED_SOF, + CAM_ISP_CTX_ACTIVATED_APPLIED, + CAM_ISP_CTX_ACTIVATED_EPOCH, + CAM_ISP_CTX_ACTIVATED_BUBBLE, + CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED, + CAM_ISP_CTX_ACTIVATED_HW_ERROR, + CAM_ISP_CTX_ACTIVATED_HALT, + CAM_ISP_CTX_ACTIVATED_MAX, +}; + +/** + * enum cam_isp_ctx_event_type - events for a request + * + */ +enum cam_isp_ctx_event { + CAM_ISP_CTX_EVENT_SUBMIT, + CAM_ISP_CTX_EVENT_APPLY, + CAM_ISP_CTX_EVENT_EPOCH, + CAM_ISP_CTX_EVENT_RUP, + CAM_ISP_CTX_EVENT_BUFDONE, + CAM_ISP_CTX_EVENT_SHUTTER, + CAM_ISP_CTX_EVENT_MAX +}; + +/** + * enum cam_isp_state_change_trigger - Different types of ISP events + * + */ +enum cam_isp_state_change_trigger { + CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, + CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED, + CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, + CAM_ISP_STATE_CHANGE_TRIGGER_SOF, + CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, + CAM_ISP_STATE_CHANGE_TRIGGER_DONE, + CAM_ISP_STATE_CHANGE_TRIGGER_EOF, + CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, + CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_SOF, + CAM_ISP_STATE_CHANGE_TRIGGER_SEC_EVT_EPOCH, + CAM_ISP_STATE_CHANGE_TRIGGER_FRAME_DROP, + CAM_ISP_STATE_CHANGE_TRIGGER_CDM_DONE, + CAM_ISP_STATE_CHANGE_TRIGGER_MAX +}; + +#define CAM_ISP_CTX_DISABLE_RECOVERY_AEB BIT(0) +#define CAM_ISP_CTX_DISABLE_RECOVERY_BUS_OVERFLOW BIT(1) +#define CAM_ISP_CTX_DISABLE_RECOVERY_BUBBLE BIT(2) +#define CAM_ISP_CTX_DISABLE_RECOVERY_CSID BIT(3) + +/** + * struct cam_isp_ctx_debug - Contains debug parameters + * + * @dentry: Debugfs entry + * @enable_state_monitor_dump: Enable isp state monitor dump + * @disable_internal_recovery: Disable internal kernel recovery mask + * @enable_cdm_cmd_buff_dump: Enable CDM Command buffer dump + * + */ +struct cam_isp_ctx_debug { + struct dentry *dentry; + uint32_t enable_state_monitor_dump; + uint32_t disable_internal_recovery_mask; + uint8_t enable_cdm_cmd_buff_dump; +}; + +/** + * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks + * + * @irq_ops: Array of handle function pointers. + * + */ +struct cam_isp_ctx_irq_ops { + cam_isp_hw_event_cb_func irq_ops[CAM_ISP_HW_EVENT_MAX]; +}; + +/** + * struct cam_isp_ctx_req - ISP context request object + * + * @base: Common request object ponter + * @cfg: ISP hardware configuration array + * @num_cfg: Number of ISP hardware configuration entries + * @fence_map_out: Output fence mapping array + * @num_fence_map_out: Number of the output fence map + * @fence_map_in: Input fence mapping array + * @num_fence_map_in: Number of input fence map + * @num_acked: Count to track acked entried for output. + * If count equals the number of fence out, it means + * the request has been completed. + * @num_deferred_acks: Number of buf_dones/acks that are deferred to + * handle or signalled in special scenarios. + * Increment this count instead of num_acked and + * handle the events later where eventually + * increment num_acked. + * @deferred_fence_map_index Saves the indices of fence_map_out for which + * handling of buf_done is deferred. + * @bubble_report: Flag to track if bubble report is active on + * current request + * @hw_update_data: HW update data for this request + * @reapply_type: Determines type of settings to be re-applied + * @event_timestamp: Timestamp for different stage of request + * @cdm_reset_before_apply: For bubble re-apply when buf done not coming set + * to True + * @is_reg_dump_triggered check if reg dump is already triggered,in case of + * skip frame + */ +struct cam_isp_ctx_req { + struct cam_ctx_request *base; + struct cam_hw_update_entry *cfg; + uint32_t num_cfg; + struct cam_hw_fence_map_entry *fence_map_out; + uint32_t num_fence_map_out; + struct cam_hw_fence_map_entry *fence_map_in; + uint32_t num_fence_map_in; + uint32_t num_acked; + uint32_t num_deferred_acks; + uint32_t *deferred_fence_map_index; + int32_t bubble_report; + struct cam_isp_prepare_hw_update_data hw_update_data; + enum cam_hw_config_reapply_type reapply_type; + ktime_t event_timestamp + [CAM_ISP_CTX_EVENT_MAX]; + bool bubble_detected; + bool cdm_reset_before_apply; + bool is_reg_dump_triggered; +}; + +/** + * struct cam_isp_context_state_monitor - ISP context state + * monitoring for + * debug purposes + * + * @curr_state: Current sub state that received req + * @trigger: Event type of incoming req + * @req_id: Request id + * @frame_id: Frame id based on SOFs + * @evt_time_stamp Current time stamp + * + */ +struct cam_isp_context_state_monitor { + enum cam_isp_ctx_activated_substate curr_state; + enum cam_isp_state_change_trigger trigger; + uint64_t req_id; + int64_t frame_id; + struct timespec64 evt_time_stamp; +}; + +/** + * struct cam_isp_context_req_id_info - ISP context request id + * information for bufdone. + * + *@last_bufdone_req_id: Last bufdone request id + * + */ + +struct cam_isp_context_req_id_info { + int64_t last_bufdone_req_id; +}; + +struct shutter_event { + uint64_t frame_id; + uint64_t req_id; + uint32_t status; + ktime_t boot_ts; + ktime_t sof_ts; +}; + +/** + * + * + * struct cam_isp_context_event_record - Information for last 20 Events + * for a request; Submit, Apply, EPOCH, RUP, Buf done. + * + * @req_id: Last applied request id + * @timestamp: Timestamp for the event + * + */ +struct cam_isp_context_event_record { + uint64_t req_id; + ktime_t timestamp; + int event_type; + union event { + struct shutter_event shutter_event; + } event; +}; + +/** + * + * + * struct cam_isp_context_frame_timing_record - Frame timing events + * + * @sof_ts: SOF timestamp + * @eof_ts: EOF ts + * @epoch_ts: EPOCH ts + * @secondary_sof_ts: Secondary SOF ts + * + */ +struct cam_isp_context_frame_timing_record { + struct timespec64 sof_ts; + struct timespec64 eof_ts; + struct timespec64 epoch_ts; + struct timespec64 secondary_sof_ts; +}; + + +/** + * + * + * struct cam_isp_context_debug_monitors - ISP context debug monitors + * + * @state_monitor_head: State machine monitor head + * @state_monitor: State machine monitor info + * @event_record_head: Request Event monitor head + * @event_record: Request event monitor info + * @frame_monitor_head: Frame timing monitor head + * @frame_monitor: Frame timing event monitor + * + */ +struct cam_isp_context_debug_monitors { + /* State machine monitoring */ + atomic64_t state_monitor_head; + struct cam_isp_context_state_monitor state_monitor[ + CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES]; + + /* Req event monitor */ + atomic64_t event_record_head[ + CAM_ISP_CTX_EVENT_MAX]; + struct cam_isp_context_event_record event_record[ + CAM_ISP_CTX_EVENT_MAX][CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES]; + + /* Frame timing monitor */ + atomic64_t frame_monitor_head; + struct cam_isp_context_frame_timing_record frame_monitor[ + CAM_ISP_CTX_MAX_FRAME_RECORDS]; +}; + +/** + * struct cam_isp_skip_frame_info - FIFO Queue for number of skipped frames for + * the decision of FCG prediction + * @num_frame_skipped: Keep track of the number of skipped frames in between + * of the normal frames + * @list: List member used to append this node to a linked list + */ +struct cam_isp_skip_frame_info { + uint32_t num_frame_skipped; + struct list_head list; +}; + +/** + * struct cam_isp_fcg_prediction_tracker - Track the number of skipped frames before and + * indicate which FCG prediction should be applied + * + * @num_skipped: Number of skipped frames from previous normally applied frame + * to this normally applied frame + * @sum_skipped: Sum of the number of frames from req generation to req apply + * @skipped_list: Keep track of the number of skipped frames in between from two + * normal frames + */ +struct cam_isp_fcg_prediction_tracker { + struct cam_isp_fcg_caps *fcg_caps; + uint32_t num_skipped; + uint32_t sum_skipped; + struct list_head skipped_list; +}; + +/** + * struct cam_isp_context - ISP context object + * + * @base: Common context object pointer + * @frame_id: Frame id tracking for the isp context + * @frame_id_meta: Frame id read every epoch for the ctx + * meta from the sensor + * @substate_actiavted: Current substate for the activated state. + * @process_bubble: Atomic variable to check if ctx is still + * processing bubble. + * @substate_machine: ISP substate machine for external interface + * @substate_machine_irq: ISP substate machine for irq handling + * @req_base: Common request object storage + * @req_isp: ISP private request object storage + * @hw_ctx: HW object returned by the acquire device command + * @sof_timestamp_val: Captured time stamp value at sof hw event + * @boot_timestamp: Boot time stamp for a given req_id + * @active_req_cnt: Counter for the active request + * @reported_req_id: Last reported request id + * @subscribe_event: The irq event mask that CRM subscribes to, IFE + * will invoke CRM cb at those event. + * @last_applied_req_id: Last applied request id + * @recovery_req_id: Req ID flagged for internal recovery + * @last_sof_timestamp: SOF timestamp of the last frame + * @bubble_frame_cnt: Count of the frame after bubble + * @aeb_error_cnt: Count number of times a specific AEB error scenario is + * enountered + * @out_of_sync_cnt: Out of sync error count for AEB + * @congestion_cnt: Count number of times congestion was encountered + * consecutively + * @state_monitor_head: Write index to the state monitoring array + * @req_info Request id information about last buf done + * @dbg_monitors: Debug monitors for ISP context + * @apply_in_progress Whether request apply is in progress + * @unserved_rup: Indicate there is unserved rup + * @last_applied_default: Indicate last applied setting is default or not + * @init_timestamp: Timestamp at which this context is initialized + * @isp_device_type: ISP device type + * @rxd_epoch: Indicate whether epoch has been received. Used to + * decide whether to apply request in offline ctx + * @workq: Worker thread for offline ife + * @trigger_id: ID provided by CRM for each ctx on the link + * @last_bufdone_err_apply_req_id: last bufdone error apply request id + * @v4l2_event_sub_ids contains individual bits representing subscribed v4l2 ids + * @evt_inject_params: event injection parameters + * @last_sof_jiffies: Record the jiffies of last sof + * @last_applied_jiffies: Record the jiffiest of last applied req + * @vfe_bus_comp_grp: Vfe bus comp group record + * @sfe_bus_comp_grp: Sfe bus comp group record + * @mswitch_default_apply_delay_max_cnt: Max mode switch delay among all devices connected + * on the same link as this ISP context + * @mswitch_default_apply_delay_ref_cnt: Ref cnt for this context to decide when to apply + * mode switch settings + * @hw_idx: Hardware ID + * @num_inits_post_flush: Number of INITs received post flush + * @fcg_tracker: FCG prediction tracker containing number of previously skipped + * frames and indicates which prediction should be used + * @rdi_only_context: Get context type information. + * true, if context is rdi only context + * @offline_context: Indicate whether context is for offline IFE + * @vfps_aux_context: Indicate whether context is for VFPS aux link + * @resume_hw_in_flushed: Indicate whether resume hw in flushed state in vfps case + * @hw_acquired: Indicate whether HW resources are acquired + * @init_received: Indicate whether init config packet is received + * @split_acquire: Indicate whether a separate acquire is expected + * @custom_enabled: Custom HW enabled for this ctx + * @use_frame_header_ts: Use frame header for qtimer ts + * @support_consumed_addr: Indicate whether HW has last consumed addr reg + * @sof_dbg_irq_en: Indicates whether ISP context has enabled debug irqs + * @use_default_apply: Use default settings in case of frame skip + * @aeb_enabled: Indicate if stream is for AEB + * @bubble_recover_dis: Bubble recovery disabled + * @handle_mswitch: Indicates if IFE needs to explicitly handle mode switch + * on frame skip callback from request manager. + * This is decided based on the max mode switch delay published + * by other devices on the link as part of link setup + * @mode_switch_en: Indicates if mode switch is enabled + * @sfe_en: Indicates if SFE is being used + * + */ +struct cam_isp_context { + struct cam_context *base; + + uint64_t frame_id; + uint32_t frame_id_meta; + uint32_t substate_activated; + atomic_t process_bubble; + struct cam_ctx_ops *substate_machine; + struct cam_isp_ctx_irq_ops *substate_machine_irq; + + struct cam_ctx_request req_base[CAM_ISP_CTX_REQ_MAX]; + struct cam_isp_ctx_req req_isp[CAM_ISP_CTX_REQ_MAX]; + + void *hw_ctx; + uint64_t sof_timestamp_val; + uint64_t boot_timestamp; + int32_t active_req_cnt; + int64_t reported_req_id; + uint64_t reported_frame_id; + uint32_t subscribe_event; + int64_t last_applied_req_id; + uint64_t recovery_req_id; + uint64_t last_sof_timestamp; + uint32_t bubble_frame_cnt; + uint32_t aeb_error_cnt; + uint32_t out_of_sync_cnt; + uint32_t congestion_cnt; + struct cam_isp_context_req_id_info req_info; + struct cam_isp_context_debug_monitors dbg_monitors; + atomic_t apply_in_progress; + atomic_t unserved_rup; + atomic_t last_applied_default; + atomic_t internal_recovery_set; + unsigned int init_timestamp; + uint32_t isp_device_type; + atomic_t rxd_epoch; + struct cam_req_mgr_core_workq *workq; + int32_t trigger_id; + int64_t last_bufdone_err_apply_req_id; + uint32_t v4l2_event_sub_ids; + struct cam_hw_inject_evt_param evt_inject_params; + uint64_t last_sof_jiffies; + uint64_t last_applied_jiffies; + struct cam_isp_context_comp_record *vfe_bus_comp_grp; + struct cam_isp_context_comp_record *sfe_bus_comp_grp; + int32_t mswitch_default_apply_delay_max_cnt; + atomic_t mswitch_default_apply_delay_ref_cnt; + uint32_t hw_idx; + uint32_t num_inits_post_flush; + struct cam_isp_fcg_prediction_tracker fcg_tracker; + bool rdi_only_context; + bool offline_context; + bool vfps_aux_context; + bool resume_hw_in_flushed; + bool hw_acquired; + bool init_received; + bool split_acquire; + bool custom_enabled; + bool use_frame_header_ts; + bool support_consumed_addr; + bool sof_dbg_irq_en; + bool use_default_apply; + bool aeb_enabled; + bool bubble_recover_dis; + bool handle_mswitch; + bool mode_switch_en; + bool sfe_en; + bool standby_en; +}; + +/** + * struct cam_isp_context_dump_header - ISP context dump header + * @tag: Tag name for the header + * @word_size: Size of word + * @size: Size of data + * + */ +struct cam_isp_context_dump_header { + uint8_t tag[CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** * struct cam_isp_ctx_req_mini_dump - ISP mini dumprequest + * + * @map_out: Output fence mapping + * @map_in: Input fence mapping + * @io_cfg: IO buffer configuration + * @reapply_type: Determines type of settings to be re-applied + * @request_id: Request ID + * @num_fence_map_out: Number of the output fence map + * @num_fence_map_in: Number of input fence map + * @num_io_cfg: Number of ISP hardware configuration entries + * @num_acked: Count to track acked entried for output. + * @num_deferred_acks: Number of buf_dones/acks that are deferred to + * handle or signalled in special scenarios. + * Increment this count instead of num_acked and + * handle the events later where eventually + * increment num_acked. + * @bubble_report: Flag to track if bubble report is active on + * current request + * @bubble_detected: Flag to track if bubble is detected + * @cdm_reset_before_apply: For bubble re-apply when buf done not coming set + * to True + * + */ +struct cam_isp_ctx_req_mini_dump { + struct cam_hw_fence_map_entry *map_out; + struct cam_hw_fence_map_entry *map_in; + struct cam_buf_io_cfg *io_cfg; + enum cam_hw_config_reapply_type reapply_type; + uint64_t request_id; + uint8_t num_fence_map_in; + uint8_t num_fence_map_out; + uint8_t num_io_cfg; + uint8_t num_acked; + uint8_t num_deferred_acks; + bool bubble_report; + bool bubble_detected; + bool cdm_reset_before_apply; +}; + +/** + * struct cam_isp_ctx_mini_dump_info - Isp context mini dump data + * + * @active_list: Active Req list + * @pending_list: Pending req list + * @wait_list: Wait Req List + * @event_record: Event record + * @sof_timestamp_val: Captured time stamp value at sof hw event + * @boot_timestamp: Boot time stamp for a given req_id + * @last_sof_timestamp: SOF timestamp of the last frame + * @init_timestamp: Timestamp at which this context is initialized + * @frame_id: Frame id read every epoch for the ctx + * @reported_req_id: Last reported request id + * @last_applied_req_id: Last applied request id + * @frame_id_meta: Frame id for meta + * @ctx_id: Context id + * @subscribe_event: The irq event mask that CRM subscribes to, IFE + * will invoke CRM cb at those event. + * @bubble_frame_cnt: Count of the frame after bubble + * @isp_device_type: ISP device type + * @active_req_cnt: Counter for the active request + * @trigger_id: ID provided by CRM for each ctx on the link + * @substate_actiavted: Current substate for the activated state. + * @rxd_epoch: Indicate whether epoch has been received. Used to + * decide whether to apply request in offline ctx + * @process_bubble: Atomic variable to check if ctx is still + * processing bubble. + * @apply_in_progress Whether request apply is in progress + * @rdi_only_context: Get context type information. + * true, if context is rdi only context + * @offline_context: Indicate whether context is for offline IFE + * @hw_acquired: Indicate whether HW resources are acquired + * @init_received: Indicate whether init config packet is received + * meta from the sensor + * @split_acquire: Indicate whether a separate acquire is expected + * @custom_enabled: Custom HW enabled for this ctx + * @use_frame_header_ts: Use frame header for qtimer ts + * @support_consumed_addr: Indicate whether HW has last consumed addr reg + * @use_default_apply: Use default settings in case of frame skip + * + */ +struct cam_isp_ctx_mini_dump_info { + struct cam_isp_ctx_req_mini_dump *active_list; + struct cam_isp_ctx_req_mini_dump *pending_list; + struct cam_isp_ctx_req_mini_dump *wait_list; + struct cam_isp_context_event_record event_record[ + CAM_ISP_CTX_EVENT_MAX][CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES]; + uint64_t sof_timestamp_val; + uint64_t boot_timestamp; + uint64_t last_sof_timestamp; + uint64_t init_timestamp; + int64_t frame_id; + int64_t reported_req_id; + int64_t last_applied_req_id; + int64_t last_bufdone_err_apply_req_id; + uint32_t frame_id_meta; + uint8_t ctx_id; + uint8_t subscribe_event; + uint8_t bubble_frame_cnt; + uint8_t isp_device_type; + uint8_t active_req_cnt; + uint8_t trigger_id; + uint8_t substate_activated; + uint8_t rxd_epoch; + uint8_t process_bubble; + uint8_t active_cnt; + uint8_t pending_cnt; + uint8_t wait_cnt; + bool apply_in_progress; + bool rdi_only_context; + bool offline_context; + bool hw_acquired; + bool init_received; + bool split_acquire; + bool custom_enabled; + bool use_frame_header_ts; + bool support_consumed_addr; + bool use_default_apply; +}; + +/** + * cam_isp_context_init() + * + * @brief: Initialization function for the ISP context + * + * @ctx: ISP context obj to be initialized + * @bridge_ops: Bridge call back funciton + * @hw_intf: ISP hw manager interface + * @ctx_id: ID for this context + * @isp_device_type Isp device type + * @img_iommu_hdl IOMMU HDL for image buffers + * + */ +int cam_isp_context_init(struct cam_isp_context *ctx, + struct cam_context *ctx_base, + struct cam_req_mgr_kmd_ops *bridge_ops, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, + uint32_t isp_device_type, + int img_iommu_hdl); + +/** + * cam_isp_context_deinit() + * + * @brief: Deinitialize function for the ISP context + * + * @ctx: ISP context obj to be deinitialized + * + */ +int cam_isp_context_deinit(struct cam_isp_context *ctx); + +#endif /* __CAM_ISP_CONTEXT_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.c new file mode 100644 index 0000000000..3eef6d679f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "cam_isp_dev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_node.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_common_util.h" +#include "cam_context_utils.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_isp_dev g_isp_dev; + +static int cam_isp_dev_evt_inject_cb(void *inject_args) +{ + struct cam_common_inject_evt_param *inject_params = inject_args; + int i; + + for (i = 0; i < g_isp_dev.max_context; i++) { + if (g_isp_dev.ctx[i].dev_hdl == inject_params->dev_hdl) { + cam_context_add_evt_inject(&g_isp_dev.ctx[i], + &inject_params->evt_params); + return 0; + } + } + + CAM_ERR(CAM_ISP, "No dev hdl found %d", inject_params->dev_hdl); + return -ENODEV; +} + +static void cam_isp_dev_iommu_fault_handler(struct cam_smmu_pf_info *pf_smmu_info) +{ + int i, rc; + struct cam_node *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + + if (!pf_smmu_info || !pf_smmu_info->token) { + CAM_ERR(CAM_ISP, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_smmu_info->token; + + pf_args.pf_smmu_info = pf_smmu_info; + + for (i = 0; i < node->ctx_size; i++) { + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_context_info.ctx_found) + /* Faulted ctx found */ + break; + } + + if (i == node->ctx_size) { + /* Faulted ctx not found. Report PF to userspace */ + rc = cam_context_send_pf_evt(NULL, &pf_args); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to notify PF event to userspace rc: %d", rc); + } +} + +static void cam_isp_subdev_handle_message( + struct v4l2_subdev *sd, + enum cam_subdev_message_type_t message_type, + void *data) +{ + int i, rc = 0; + struct cam_node *node = v4l2_get_subdevdata(sd); + + CAM_DBG(CAM_ISP, "node name %s", node->name); + for (i = 0; i < node->ctx_size; i++) { + rc = cam_context_handle_message(&(node->ctx_list[i]), message_type, data); + if (rc) + CAM_ERR(CAM_ISP, "Failed to handle message for %s", node->name); + } +} + +static int cam_isp_vmrm_callback_handler( + void *cb_data, void *msg, uint32_t size) +{ + struct cam_isp_dev *isp_dev; + struct cam_vmrm_msg *isp_msg = NULL; + + isp_dev = (struct cam_isp_dev *)cb_data; + isp_msg = (struct cam_vmrm_msg *)msg; + + CAM_WARN(CAM_ISP, "isp dev type %d msg type %d", + isp_dev->isp_device_type, isp_msg->msg_type); + + return 0; +} + +static const struct of_device_id cam_isp_dt_match[] = { + { + .compatible = "qcom,cam-isp" + }, + {} +}; + +static int cam_isp_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + + mutex_lock(&g_isp_dev.isp_mutex); + g_isp_dev.open_cnt++; + mutex_unlock(&g_isp_dev.isp_mutex); + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + + return 0; +} + +static int cam_isp_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_node *node = v4l2_get_subdevdata(sd); + + mutex_lock(&g_isp_dev.isp_mutex); + if (g_isp_dev.open_cnt <= 0) { + CAM_DBG(CAM_ISP, "ISP subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_isp_dev.open_cnt--; + if (!node) { + CAM_ERR(CAM_ISP, "Node ptr is NULL"); + rc = -EINVAL; + goto end; + } + + if (g_isp_dev.open_cnt == 0) + cam_node_shutdown(node); + +end: + mutex_unlock(&g_isp_dev.isp_mutex); + return rc; +} + +static int cam_isp_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_ISP, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + return cam_isp_subdev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = { + .close = cam_isp_subdev_close, + .open = cam_isp_subdev_open, +}; + +static int cam_isp_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + int i; + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_node *node; + const char *compat_str = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct cam_driver_node driver_node; + int iommu_hdl = -1; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + rc = of_property_read_string_index(pdev->dev.of_node, "arch-compat", 0, + (const char **)&compat_str); + if (rc) { + CAM_ERR(CAM_ISP, "Error: failed to read arch-compat"); + goto err; + } + + g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops; + g_isp_dev.sd.close_seq_prior = CAM_SD_CLOSE_HIGH_PRIORITY; + /* Initialize the v4l2 subdevice first. (create cam_node) */ + if (strnstr(compat_str, "ife", strlen(compat_str))) { + rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME, + CAM_IFE_DEVICE_TYPE); + g_isp_dev.isp_device_type = CAM_IFE_DEVICE_TYPE; + g_isp_dev.max_context = CAM_IFE_CTX_MAX; + } else if (strnstr(compat_str, "mc_tfe", strlen(compat_str))) { + rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME, + CAM_TFE_MC_DEVICE_TYPE); + g_isp_dev.isp_device_type = CAM_TFE_MC_DEVICE_TYPE; + g_isp_dev.max_context = CAM_IFE_CTX_MAX; + } else if (strnstr(compat_str, "tfe", strlen(compat_str))) { + rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME, + CAM_TFE_DEVICE_TYPE); + g_isp_dev.sd.msg_cb = cam_isp_subdev_handle_message; + g_isp_dev.isp_device_type = CAM_TFE_DEVICE_TYPE; + g_isp_dev.max_context = CAM_TFE_CTX_MAX; + } else { + CAM_ERR(CAM_ISP, "Invalid ISP hw type %s", compat_str); + rc = -EINVAL; + goto err; + } + + if (rc) { + CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!"); + goto err; + } + node = (struct cam_node *) g_isp_dev.sd.token; + + memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf)); + g_isp_dev.ctx = CAM_MEM_ZALLOC_ARRAY(g_isp_dev.max_context, + sizeof(struct cam_context), + GFP_KERNEL); + if (!g_isp_dev.ctx) { + CAM_ERR(CAM_ISP, + "Mem Allocation failed for ISP base context"); + goto unregister; + } + + g_isp_dev.ctx_isp = CAM_MEM_ZALLOC_ARRAY(g_isp_dev.max_context, + sizeof(struct cam_isp_context), + GFP_KERNEL); + if (!g_isp_dev.ctx_isp) { + CAM_ERR(CAM_ISP, + "Mem Allocation failed for Isp private context"); + CAM_MEM_FREE(g_isp_dev.ctx); + g_isp_dev.ctx = NULL; + goto unregister; + } + + rc = cam_isp_hw_mgr_init(compat_str, &hw_mgr_intf, &iommu_hdl, + g_isp_dev.isp_device_type); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!"); + goto free_mem; + } + + for (i = 0; i < g_isp_dev.max_context; i++) { + rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i], + &g_isp_dev.ctx[i], + &node->crm_node_intf, + &node->hw_mgr_intf, + i, + g_isp_dev.isp_device_type, iommu_hdl); + if (rc) { + CAM_ERR(CAM_ISP, "ISP context init failed!"); + goto free_mem; + } + } + + cam_common_register_evt_inject_cb(cam_isp_dev_evt_inject_cb, + CAM_COMMON_EVT_INJECT_HW_ISP); + + rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, + g_isp_dev.max_context, CAM_ISP_DEV_NAME); + + if (rc) { + CAM_ERR(CAM_ISP, "ISP node init failed!"); + goto free_mem; + } + + node->sd_handler = cam_isp_subdev_close_internal; + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_isp_dev_iommu_fault_handler, node); + + mutex_init(&g_isp_dev.isp_mutex); + + driver_node.driver_id = CAM_DRIVER_ID_ISP; + scnprintf(driver_node.driver_name, + sizeof(driver_node.driver_name), "%s", pdev->name); + driver_node.driver_msg_callback = cam_isp_vmrm_callback_handler; + driver_node.driver_msg_callback_data = &g_isp_dev; + + rc = cam_vmrm_populate_driver_node_info(&driver_node); + if (rc) { + CAM_ERR(CAM_VMRM, " isp driver node populate failed: %d", rc); + goto free_mem; + } + + CAM_DBG(CAM_ISP, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +free_mem: + CAM_MEM_FREE(g_isp_dev.ctx); + g_isp_dev.ctx = NULL; + CAM_MEM_FREE(g_isp_dev.ctx_isp); + g_isp_dev.ctx_isp = NULL; + +unregister: + rc = cam_subdev_remove(&g_isp_dev.sd); +err: + return rc; +} + +static void cam_isp_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc, i; + const char *compat_str = NULL; + struct platform_device *pdev = to_platform_device(dev); + + rc = of_property_read_string_index(pdev->dev.of_node, "arch-compat", 0, + (const char **)&compat_str); + if (rc) { + CAM_ERR(CAM_ISP, "Failed at reading arch-compat"); + return; + } + + cam_isp_hw_mgr_deinit(compat_str); + /* clean up resources */ + for (i = 0; i < g_isp_dev.max_context; i++) { + rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]); + if (rc) + CAM_ERR(CAM_ISP, "ISP context %d deinit failed", i); + } + + CAM_MEM_FREE(g_isp_dev.ctx); + g_isp_dev.ctx = NULL; + CAM_MEM_FREE(g_isp_dev.ctx_isp); + g_isp_dev.ctx_isp = NULL; + + rc = cam_subdev_remove(&g_isp_dev.sd); + if (rc) + CAM_ERR(CAM_ISP, "Unregister failed rc: %d", rc); + + memset(&g_isp_dev, 0, sizeof(g_isp_dev)); +} + +const static struct component_ops cam_isp_dev_component_ops = { + .bind = cam_isp_dev_component_bind, + .unbind = cam_isp_dev_component_unbind, +}; + +static int cam_isp_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_isp_dev_component_ops); + return 0; +} + +static int cam_isp_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Adding ISP dev component"); + rc = component_add(&pdev->dev, &cam_isp_dev_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +struct platform_driver isp_driver = { + .probe = cam_isp_dev_probe, + .remove = cam_isp_dev_remove, + .driver = { + .name = "cam_isp", + .owner = THIS_MODULE, + .of_match_table = cam_isp_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_isp_dev_init_module(void) +{ + return platform_driver_register(&isp_driver); +} + +void cam_isp_dev_exit_module(void) +{ + platform_driver_unregister(&isp_driver); +} + +MODULE_DESCRIPTION("MSM ISP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.h new file mode 100644 index 0000000000..53d126c316 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/cam_isp_dev.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_ISP_DEV_H_ +#define _CAM_ISP_DEV_H_ + +#include "cam_subdev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_context.h" +#include "cam_isp_context.h" + +/** + * struct cam_isp_dev - Camera ISP V4l2 device node + * + * @sd: Commone camera subdevice node + * @ctx: Isp base context storage + * @ctx_isp: Isp private context storage + * @isp_mutex: ISP dev mutex + * @open_cnt: Open device count + * @isp_device_type ISP device type + * @max_context maximum contexts for TFE is 4 and for IFE is 8 + */ +struct cam_isp_dev { + struct cam_subdev sd; + struct cam_context *ctx; + struct cam_isp_context *ctx_isp; + struct mutex isp_mutex; + int32_t open_cnt; + uint32_t isp_device_type; + int32_t max_context; +}; + +/** + * @brief : API to register ISP Dev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_isp_dev_init_module(void); + +/** + * @brief : API to remove ISP Dev from platform framework. + */ +void cam_isp_dev_exit_module(void); +#endif /* __CAM_ISP_DEV_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c new file mode 100644 index 0000000000..12d4099775 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c @@ -0,0 +1,19305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include +#include +#include +#include + +#include + +#include "cam_compat.h" +#include "cam_smmu_api.h" +#include "cam_req_mgr_workq.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_vfe_hw_intf.h" +#include "cam_sfe_hw_intf.h" +#include "cam_isp_packet_parser.h" +#include "cam_ife_hw_mgr.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm_util.h" +#include "cam_packet_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr.h" +#include "cam_mem_mgr_api.h" +#include "cam_common_util.h" +#include "cam_presil_hw_access.h" + +#define CAM_IFE_SAFE_DISABLE 0 +#define CAM_IFE_SAFE_ENABLE 1 +#define SMMU_SE_IFE 0 + +#define CAM_FRAME_HEADER_BUFFER_SIZE 64 +#define CAM_FRAME_HEADER_ADDR_ALIGNMENT 256 + +#define CAM_ISP_PACKET_META_MAX \ + (CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1) + +#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \ + (CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG + 1) + +#define MAX_INTERNAL_RECOVERY_ATTEMPTS 1 + +#define MAX_PARAMS_FOR_IRQ_INJECT 5 +#define IRQ_INJECT_DISPLAY_BUF_LEN 4096 + +typedef int (*cam_isp_irq_inject_cmd_parse_handler)( + struct cam_isp_irq_inject_param *irq_inject_param, + uint32_t param_index, char *token, bool *is_query); + +#define IRQ_INJECT_USAGE_STRING \ + "######################################################\n" \ + "Usage:\n" \ + "$INJECT_NODE : /sys/kernel/debug/camera/ife/isp_irq_inject\n\n" \ + " - cat $INJECT_NODE\n" \ + " print Usage, injected params and current active HW info.\n" \ + " Also we need to cat the node to get output info after echo params to node.\n\n"\ + " - echo ?:?:?:? > $INJECT_NODE\n" \ + " print query info, entering '?' to any param besides req_id to query.\n\n" \ + " - echo hw_type:hw_idx:res_id:irq_mask:req_id > $INJECT_NODE\n" \ + " hw_type : Hw to inject IRQ\n" \ + " hw_idx : Index of the selected hw\n" \ + " reg_unit : Register to set irq\n" \ + " irq_mask : IRQ to be triggered\n" \ + " req_id : Req to trigger the IRQ, entering 'now' to this param will trigger " \ + "irq immediately\n\n" \ + "Up to 10 sets of inject params are supported.\n" \ + "######################################################\n" + +static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = { + CAM_ISP_HW_CMD_GET_HFR_UPDATE, + CAM_ISP_HW_CMD_CLOCK_UPDATE, + CAM_ISP_HW_CMD_BW_UPDATE, + CAM_ISP_HW_CMD_UBWC_UPDATE, + CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE, + CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG, + CAM_ISP_HW_CMD_UBWC_UPDATE_V2, + CAM_ISP_HW_CMD_CORE_CONFIG, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE, + CAM_ISP_HW_CMD_BW_UPDATE_V2, + CAM_ISP_HW_CMD_BLANKING_UPDATE, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE_V2, +}; + +static struct cam_ife_hw_mgr g_ife_hw_mgr; +static uint32_t g_num_ife_available, g_num_ife_lite_available, g_num_sfe_available; +static uint32_t g_num_ife_functional, g_num_ife_lite_functional, g_num_sfe_functional; +static uint32_t max_ife_out_res, max_sfe_out_res; + +static char *irq_inject_display_buf; + + +static int cam_ife_mgr_util_process_csid_path_res( + uint32_t path_id, enum cam_ife_pix_path_res_id *path_res_id); + +static int cam_ife_mgr_find_core_idx(int split_id, struct cam_ife_hw_mgr_ctx *ctx, + enum cam_isp_hw_type hw_type, uint32_t *core_idx); + +static int cam_isp_blob_ife_clock_update( + struct cam_isp_clock_config *clock_config, + struct cam_ife_hw_mgr_ctx *ctx); + +static int cam_isp_blob_sfe_clock_update( + struct cam_isp_clock_config *clock_config, + struct cam_ife_hw_mgr_ctx *ctx); + + +static int cam_ife_hw_mgr_event_handler( + void *priv, + uint32_t evt_id, + void *evt_info); + +static int cam_ife_mgr_prog_default_settings(int64_t last_applied_max_pd_req, + bool force_disable_drv, bool is_streamon, struct cam_ife_hw_mgr_ctx *ctx); + +static int cam_ife_mgr_cmd_get_sof_timestamp(struct cam_ife_hw_mgr_ctx *ife_ctx, + uint64_t *time_stamp, uint64_t *boot_time_stamp, uint64_t *prev_time_stamp, + struct timespec64 *boot_ts, bool get_curr_timestamp); + +static int cam_convert_rdi_out_res_id_to_src(int res_id); + +static int cam_ife_mgr_get_src_hw_ctxt_from_csid_path(uint32_t path_id) +{ + switch (path_id) { + case CAM_ISP_PXL_PATH: + return CAM_ISP_MULTI_CTXT_0; + case CAM_ISP_PXL1_PATH: + return CAM_ISP_MULTI_CTXT_1; + case CAM_ISP_PXL2_PATH: + return CAM_ISP_MULTI_CTXT_2; + default: + return -1; + } +} + +static void *cam_ife_hw_mgr_get_hw_intf( + struct cam_isp_ctx_base_info *base) +{ + if (base->hw_type == CAM_ISP_HW_TYPE_CSID) + return g_ife_hw_mgr.csid_devices[base->idx]; + else if (base->hw_type == CAM_ISP_HW_TYPE_SFE) + return g_ife_hw_mgr.sfe_devices[base->idx]->hw_intf; + else if (base->hw_type == CAM_ISP_HW_TYPE_VFE) + return g_ife_hw_mgr.ife_devices[base->idx]->hw_intf; + else + return NULL; +} + +static int cam_ife_mgr_update_core_info_to_cpas(struct cam_ife_hw_mgr_ctx *ctx, + bool set_port) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + int i, sfe_core_idx, csid_core_idx, rc = 0; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rc = cam_ife_mgr_find_core_idx(i, ctx, CAM_ISP_HW_TYPE_CSID, + &csid_core_idx); + if (rc) + return rc; + + rc = cam_ife_mgr_find_core_idx(i, ctx, CAM_ISP_HW_TYPE_SFE, &sfe_core_idx); + if (rc) + return rc; + + rc = cam_cpas_csid_input_core_info_update(csid_core_idx, sfe_core_idx, + set_port); + if (rc) { + CAM_ERR(CAM_PERF, + "Failed to update core info to cpas rc:%d,ctx:%u", + rc, ctx->ctx_index); + return rc; + } + } + } + + return rc; +} + +static void cam_ife_mgr_read_perf_cnts( + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, rc; + uint32_t hw_type = CAM_ISP_HW_TYPE_VFE, hw_idx, dummy; + struct cam_ife_hw_mgr *hw_mgr = ctx->hw_mgr; + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + hw_type = CAM_ISP_HW_TYPE_SFE; + + for (i = 0; i < ctx->num_base; i++) { + hw_idx = ctx->base[i].idx; + + switch (ctx->base[i].hw_type) { + case CAM_ISP_HW_TYPE_VFE: + if (hw_mgr->ife_devices[hw_idx]) { + rc = hw_mgr->ife_devices[hw_idx]->hw_intf->hw_ops.process_cmd( + hw_mgr->ife_devices[hw_idx]->hw_intf->hw_priv, + CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS, + &dummy, sizeof(dummy)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to read IFE_%u perf counters g rc: %d, ctx_idx: %u", + hw_idx, rc, ctx->ctx_index); + } + break; + case CAM_ISP_HW_TYPE_SFE: + if (hw_mgr->sfe_devices[hw_idx]) { + rc = hw_mgr->sfe_devices[hw_idx]->hw_intf->hw_ops.process_cmd( + hw_mgr->sfe_devices[hw_idx]->hw_intf->hw_priv, + CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS, + &dummy, sizeof(dummy)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to read SFE_%u perf counters rc: %d, ctx_idx: %u", + hw_idx, rc, ctx->ctx_index); + } + break; + default: + break; + } + } +} + +static int cam_isp_blob_path_exp_order_update(struct cam_ife_hw_mgr_ctx *ctx, + uint64_t request_id, struct cam_isp_prepare_hw_update_data *prepare_hw_data) +{ + struct cam_ife_hw_mgr *ife_hw_mgr; + struct cam_hw_intf *hw_intf = NULL; + struct cam_isp_path_exp_order_update_internal *exp_order_update; + bool csid_update_valid = false; + struct cam_ife_csid_exp_info_update_args exp_info_update_args = {0}; + int i, rc = 0; + + ife_hw_mgr = ctx->hw_mgr; + exp_order_update = &prepare_hw_data->isp_exp_order_update; + + CAM_DBG(CAM_ISP, + "Exp order update blob opcode:%u req_id:%llu ctx_idx:%u num_process_exp:%u num_sensor_out_exp:%u num_path_exp_info:%u", + prepare_hw_data->packet_opcode_type, request_id, + ctx->ctx_index, exp_order_update->num_process_exp, + exp_order_update->num_sensor_out_exp, exp_order_update->num_path_exp_info); + + + for (i = 0; i < exp_order_update->num_path_exp_info; i++) { + if ((exp_order_update->exp_info[i].hw_type <= CAM_ISP_HW_BASE) || + (exp_order_update->exp_info[i].hw_type >= CAM_ISP_HW_MAX)) { + CAM_ERR(CAM_ISP, + "Invalid hw_type:%u at idx:%d for exp order update req_id:%llu ctx_idx:%u", + exp_order_update->exp_info[i].hw_type, i, request_id, + ctx->ctx_index); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, + "Per path exp order update idx:%d hw_type:%u res_id:0x%x hw_ctxt_id:0x%x exp_order:0x%x", + i, exp_order_update->exp_info[i].hw_type, + exp_order_update->exp_info[i].res_id, + exp_order_update->exp_info[i].hw_ctxt_id, + exp_order_update->exp_info[i].exp_order); + + if (exp_order_update->exp_info[i].hw_type == CAM_ISP_HW_CSID) { + enum cam_ife_pix_path_res_id csid_res_id = CAM_IFE_PIX_PATH_RES_MAX; + + if ((!csid_update_valid) && + (exp_order_update->exp_info[i].exp_order == + CAM_ISP_EXP_ORDER_LAST) && + (exp_order_update->exp_info[i].res_id >= CAM_ISP_RDI0_PATH) && + (exp_order_update->exp_info[i].res_id <= CAM_ISP_RDI4_PATH)) { + rc = cam_ife_mgr_util_process_csid_path_res( + exp_order_update->exp_info[i].res_id, + &csid_res_id); + if (rc) { + CAM_ERR(CAM_ISP, + "Invalid csid res_id:%u at idx:%d for exp order update req_id:%llu ctx_idx:%u", + exp_order_update->exp_info[i].res_id, i, request_id, + ctx->ctx_index); + return -EINVAL; + } + + exp_info_update_args.last_exp_res_id = csid_res_id; + exp_info_update_args.last_exp_valid = true; + csid_update_valid = true; + } + } else { + CAM_ERR(CAM_ISP, + "hw_type:%u at idx:%d not supported for exp order update req_id:%llu ctx_idx:%u", + exp_order_update->exp_info[i].hw_type, i, request_id, + ctx->ctx_index); + return -ENODEV; + } + } + + if (csid_update_valid) { + exp_info_update_args.num_process_exp = exp_order_update->num_process_exp; + exp_info_update_args.num_sensor_out_exp = exp_order_update->num_sensor_out_exp; + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + hw_intf = ife_hw_mgr->csid_devices[ctx->base[i].idx]; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_EXP_INFO_UPDATE, &exp_info_update_args, + sizeof(struct cam_ife_csid_exp_info_update_args)); + if (rc) { + CAM_ERR(CAM_PERF, + "Exp info update failed req_id:%d i:%d hw_idx=%d rc:%d ctx_idx: %u", + request_id, i, ctx->base[i].idx, rc, + ctx->ctx_index); + break; + } + } + } + } + + return rc; +} + +static int cam_isp_mgr_drv_config(struct cam_ife_hw_mgr_ctx *ctx, + uint64_t request_id, bool force_disable_drv, + struct cam_isp_prepare_hw_update_data *prepare_hw_data) +{ + bool drv_en = false, update_drv = false; + bool is_blob_config_valid = false; + int i, rc = 0, index; + uint32_t timeout_val = 0, path_idle_en = 0; + uint64_t min_frame_duration, min_blanking_duration; + struct cam_ife_hw_mgr *ife_hw_mgr; + struct cam_hw_intf *hw_intf; + struct cam_isp_drv_config *drv_config; + struct cam_ife_csid_drv_config_args drv_config_args = {0}; + struct cam_isp_hw_per_req_info *prev_req_info = NULL; + struct cam_isp_hw_per_req_info *per_req_info = NULL; + struct cam_isp_hw_per_req_info *next_req_info = NULL; + struct cam_isp_hw_drv_info *prev_drv_info = NULL; + struct cam_isp_hw_drv_info *drv_info = NULL; + struct cam_isp_hw_drv_info *next_drv_info = NULL; + + ife_hw_mgr = ctx->hw_mgr; + + if (!g_ife_hw_mgr.cam_ddr_drv_support || g_ife_hw_mgr.debug_cfg.disable_isp_drv) { + CAM_DBG(CAM_ISP, "Skipping DRV config. drv_support: %d, drv_disable %d", + g_ife_hw_mgr.cam_ddr_drv_support, + g_ife_hw_mgr.debug_cfg.disable_isp_drv); + return rc; + } + + /* + * The main logic in this function is checking if per request drv info is valid, + * if it is valid then we do the decision logic based on per request drv info. + * Otherwises, configure the drv if the blob config is valid. + */ + + /* + * 1 Collect data and fill per request drv info structure + * 1.1 Already check if blob config data is valid or not. + */ + if (!prepare_hw_data) + is_blob_config_valid = false; + else + is_blob_config_valid = prepare_hw_data->drv_config_valid; + + for (index = 0; index < MAX_DRV_REQUEST_DEPTH; index++) + if (ctx->per_req_info[index].drv_info.req_id == request_id) + break; + + if (index == MAX_DRV_REQUEST_DEPTH) { + if (is_blob_config_valid && + prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + /* + * Save an initial record for early PCR request, then below threshold + * can be saved to the first slot. + */ + index = ctx->wr_per_req_index++; + ctx->per_req_info[index].drv_info.req_id = request_id; + ctx->per_req_info[index].drv_info.blanking_duration = 0; + } else { + update_drv = true; + drv_en = false; + goto set_drv; + } + } + + per_req_info = &ctx->per_req_info[index]; + next_req_info = &ctx->per_req_info[(index + 1) % MAX_DRV_REQUEST_DEPTH]; + + drv_info = &per_req_info->drv_info; + next_drv_info = &next_req_info->drv_info; + + drv_info->is_blob_config_valid = is_blob_config_valid; + if (is_blob_config_valid) { + per_req_info->mup_en = prepare_hw_data->mup_en; + } + + if (!is_blob_config_valid && (request_id != drv_info->req_id)) { + CAM_DBG(CAM_ISP, "No valid DRV info, req_id:%llu, ctx:%d", + request_id, ctx->ctx_index); + return 0; + } + + /* 1.2 If blob config is valid, fill the path idle info and drv blanking + * threshold of per request from blob config, otherwises, it means + * the path idle info is not changed from isp userland driver side, + * so get path idle info of current request from drv info of previous + * request. + * The goal in this step is getting all the needed drv info for per + * request drv info structure. + */ + if (is_blob_config_valid) { + drv_config = &prepare_hw_data->isp_drv_config; + drv_info->path_idle_en = drv_config->path_idle_en; + if (drv_config->num_valid_params) + if (drv_config->valid_param_mask & CAM_IFE_DRV_BLANKING_THRESHOLD) + drv_info->drv_blanking_threshold = + drv_config->params[ffs(CAM_IFE_DRV_BLANKING_THRESHOLD) - 1] * + CAM_COMMON_NS_PER_MS; + } else { + /* Get path idle en from previous drv info if current req has no drv blob data */ + if (request_id) { + index = (index + MAX_DRV_REQUEST_DEPTH - 1) % MAX_DRV_REQUEST_DEPTH; + prev_req_info = &ctx->per_req_info[index]; + prev_drv_info = &prev_req_info->drv_info; + if (prev_drv_info->req_id < request_id) { + drv_info->path_idle_en = prev_drv_info->path_idle_en; + drv_info->drv_blanking_threshold = + prev_drv_info->drv_blanking_threshold; + } else { + CAM_DBG(CAM_ISP, "no valid drv info, req:%llu, ctx:%d", + request_id, ctx->ctx_index); + return 0; + } + } + } + + if (debug_drv) { + if (is_blob_config_valid) + CAM_INFO(CAM_PERF, + "DRV config blob valid:%s opcode:%u req_id:%llu disable_drv_override:%s ctx_idx:%u drv_en:%s path_idle_en:0x%x timeout_val:%u, ctx:%d", + CAM_BOOL_TO_YESNO(prepare_hw_data->drv_config_valid), + prepare_hw_data->packet_opcode_type, request_id, + CAM_BOOL_TO_YESNO(g_ife_hw_mgr.debug_cfg.disable_isp_drv), + ctx->ctx_index, CAM_BOOL_TO_YESNO(drv_config->drv_en), + drv_config->path_idle_en, drv_config->timeout_val, + ctx->ctx_index); + + CAM_INFO(CAM_PERF, + "DRV per frame info: req:%llu is_valid:%s frame duration:%llu ns, vertical blanking duration:%llu ns, ctx:%d", + request_id, CAM_BOOL_TO_YESNO(drv_info->req_id == request_id), + drv_info->frame_duration, drv_info->blanking_duration, + ctx->ctx_index); + } + + if (is_blob_config_valid) + CAM_DBG(CAM_PERF, + "DRV config blob valid:%s opcode:%u req_id:%llu disable_drv_override:%s ctx_idx:%u drv_en:%u path_idle_en:0x%x timeout_val:%u, ctx:%d", + CAM_BOOL_TO_YESNO(prepare_hw_data->drv_config_valid), + prepare_hw_data->packet_opcode_type, request_id, + CAM_BOOL_TO_YESNO(g_ife_hw_mgr.debug_cfg.disable_isp_drv), + ctx->ctx_index, drv_config->drv_en, drv_config->path_idle_en, + drv_config->timeout_val, ctx->ctx_index); + + CAM_DBG(CAM_PERF, + "DRV per frame info: req:%llu is_valid:%s frame duration:%llu ns, vertical blanking duration:%llu ns, ctx:%d", + request_id, CAM_BOOL_TO_YESNO(drv_info->req_id == request_id), + drv_info->frame_duration, drv_info->blanking_duration, ctx->ctx_index); + + if (is_blob_config_valid && + (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV)) { + drv_config_args.is_init_config = true; + ctx->is_init_drv_cfg_received = true; + } + + if (!ctx->is_init_drv_cfg_received) { + CAM_DBG(CAM_PERF, "Init DRV cfg hasn't received, ctx:%d", + ctx->ctx_index); + + if (debug_drv) + CAM_INFO(CAM_PERF, "Init DRV cfg hasn't received, ctx:%d", + ctx->ctx_index); + + return 0; + } + + /* + * 2 DRV decision logic and get final drv parameters + */ + if ((drv_info->req_id != request_id) || (drv_config_args.is_init_config)) { + /* + * 2.1 If no valid per request drv info of current request, then just update drv + * when there is a valid drv blob data. + */ + if (is_blob_config_valid) { + drv_en = drv_config->drv_en; + path_idle_en = drv_config->path_idle_en; + timeout_val = drv_config->timeout_val; + update_drv = true; + } + } else if (request_id != 0) { + /* + * 2.2 If the per request drv info is valid, then we update the drv based on the + * per request drv info. + */ + min_frame_duration = drv_info->frame_duration; + min_blanking_duration = drv_info->blanking_duration; + + if (next_drv_info->req_id > request_id) { + /** + * Since sensor is going to apply the setting of next req and also need + * to consider the isp applying delay issue, so get min duration in betwwen + * the frame duration of current req and next req. + */ + if (min_frame_duration > next_drv_info->frame_duration) + min_frame_duration = next_drv_info->frame_duration; + + if (min_blanking_duration > next_drv_info->blanking_duration) + min_blanking_duration = next_drv_info->blanking_duration; + } else { + /* Disable DRV if no valid next drv info */ + min_frame_duration = 0; + min_blanking_duration = 0; + } + + path_idle_en = drv_info->path_idle_en; + + if ((min_blanking_duration > drv_info->drv_blanking_threshold) && + (min_frame_duration > CAM_COMMON_NS_PER_MS)) { + drv_en = true; + timeout_val = (GC_FREQUENCY_IN_KHZ * + (min_frame_duration - CAM_COMMON_NS_PER_MS)) / CAM_COMMON_NS_PER_MS; + } + + update_drv = true; + + if (debug_drv) + CAM_INFO(CAM_ISP, + "[min:curr:next] frame duration:[%llu:%llu:%llu] blanking:[%llu:%llu:%llu] drv_en:%d timeout:0x%x, ctx:%d", + min_frame_duration, drv_info->frame_duration, + next_drv_info->frame_duration, min_blanking_duration, + drv_info->blanking_duration, next_drv_info->blanking_duration, + drv_en, timeout_val, ctx->ctx_index); + + CAM_DBG(CAM_ISP, + "[min:curr:next] frame duration:[%llu:%llu:%llu] blanking:[%llu:%llu:%llu] drv_en:%d timeout:0x%x, ctx:%d", + min_frame_duration, drv_info->frame_duration, + next_drv_info->frame_duration, min_blanking_duration, + drv_info->blanking_duration, next_drv_info->blanking_duration, + drv_en, timeout_val, ctx->ctx_index); + } + + drv_info->update_drv = update_drv; + drv_info->drv_en = drv_en; + drv_info->timeout_val = timeout_val; + + if (force_disable_drv) { + update_drv = true; + drv_en = false; + CAM_DBG(CAM_ISP, "req_id:%llu force to disable drv, ctx:%d", + request_id, ctx->ctx_index); + } + + if (!update_drv) { + CAM_DBG(CAM_ISP, "req_id:%llu no need to update drv, ctx:%d", + request_id, ctx->ctx_index); + return 0; + } + +set_drv: + /* sanity check: if no valid idle path configuration, then disable drv */ + if (!path_idle_en) + drv_en = false; + + if (drv_en) + ctx->drv_path_idle_en = path_idle_en; + + /* 3. Pass the final drv parameters to CSID */ + drv_config_args.drv_en = drv_en; + drv_config_args.path_idle_en = path_idle_en; + drv_config_args.timeout_val = timeout_val; + + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + hw_intf = ife_hw_mgr->csid_devices[ctx->base[i].idx]; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DRV_CONFIG, &drv_config_args, + sizeof(struct cam_ife_csid_drv_config_args)); + if (rc) { + CAM_ERR(CAM_PERF, + "DRV config failed req_id:%d i:%d hw_idx=%d rc:%d ctx_idx: %u", + request_id, i, ctx->base[i].idx, rc, ctx->ctx_index); + break; + } + } + } + + return rc; +} + +static bool cam_isp_is_ctx_primary_rdi(struct cam_ife_hw_mgr_ctx *ctx) +{ + /* check for RDI only and RDI PD context*/ + if (ctx->flags.is_rdi_only_context || ctx->flags.rdi_pd_context) + return true; + + return false; +} + +static enum cam_ife_pix_path_res_id + cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + uint32_t out_port_type); + +static int cam_ife_mgr_finish_clk_bw_update( + struct cam_ife_hw_mgr_ctx *ctx, + uint64_t request_id, bool skip_clk_data_rst) +{ + int i, rc = 0; + struct cam_isp_apply_clk_bw_args clk_bw_args; + bool cesta_idx_updated[CAM_CESTA_MAX_CLIENTS] = {0}; + + clk_bw_args.request_id = request_id; + clk_bw_args.skip_clk_data_rst = skip_clk_data_rst; + clk_bw_args.is_drv_config_en = (bool) (ctx->drv_path_idle_en & BIT(0)); + + for (i = 0; i < ctx->num_base; i++) { + clk_bw_args.hw_intf = NULL; + clk_bw_args.clock_updated = false; + CAM_DBG(CAM_PERF, + "Clock/BW Update for ctx:%u req:%d i:%d num_vfe_out:%d num_sfe_out:%d in_rd:%d", + ctx->ctx_index, request_id, i, ctx->num_acq_vfe_out, ctx->num_acq_sfe_out, + !list_empty(&ctx->res_list_ife_in_rd)); + if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && + (ctx->num_acq_vfe_out || (!list_empty(&ctx->res_list_ife_in_rd)))) + clk_bw_args.hw_intf = g_ife_hw_mgr.ife_devices[ctx->base[i].idx]->hw_intf; + else if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && + (ctx->num_acq_sfe_out || (!list_empty(&ctx->res_list_ife_in_rd)))) + clk_bw_args.hw_intf = g_ife_hw_mgr.sfe_devices[ctx->base[i].idx]->hw_intf; + else + continue; + + CAM_DBG(CAM_PERF, + "Apply Clock/BW for ctx:%u req:%d i:%d hw_idx=%d hw_type:%d inline:%s num_vfe_out:%d num_sfe_out:%d in_rd:%d", + ctx->ctx_index, request_id, i, clk_bw_args.hw_intf->hw_idx, + clk_bw_args.hw_intf->hw_type, + CAM_BOOL_TO_YESNO(clk_bw_args.is_drv_config_en), + ctx->num_acq_vfe_out, ctx->num_acq_sfe_out, + !list_empty(&ctx->res_list_ife_in_rd)); + rc = clk_bw_args.hw_intf->hw_ops.process_cmd(clk_bw_args.hw_intf->hw_priv, + CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE, &clk_bw_args, + sizeof(struct cam_isp_apply_clk_bw_args)); + if (rc) { + CAM_ERR(CAM_PERF, + "Finish Clock/BW Update failed ctx:%u req:%d i:%d hw_id=%d hw_type:%d rc:%d", + ctx->ctx_index, request_id, i, ctx->base[i].idx, + ctx->base[i].hw_type, rc); + break; + } + + CAM_DBG(CAM_ISP, "clock_updated=%d, hw_idx=%d", + clk_bw_args.clock_updated, clk_bw_args.hw_intf->hw_idx); + + if ((clk_bw_args.clock_updated) && + (clk_bw_args.hw_intf->hw_idx < CAM_CESTA_MAX_CLIENTS)) + cesta_idx_updated[clk_bw_args.hw_intf->hw_idx] = true; + } + + if (g_ife_hw_mgr.cam_clk_drv_support) { + CAM_DBG(CAM_ISP, "Channel switch for [0]=%s, [1]=%s, [2]=%s", + CAM_BOOL_TO_YESNO(cesta_idx_updated[0]), + CAM_BOOL_TO_YESNO(cesta_idx_updated[1]), + CAM_BOOL_TO_YESNO(cesta_idx_updated[2])); + + for (i = 0; i < CAM_CESTA_MAX_CLIENTS; i++) { + if (!cesta_idx_updated[i]) + continue; + + rc = cam_soc_util_cesta_channel_switch(i, "ife_hw_mgr_update"); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for cesta client:%d rc:%d", + i, rc); + return rc; + } + } + } + + CAM_DBG(CAM_ISP, "Clk, BW update done for Req=%lld, skip_clk_data_rst=%d", + request_id, skip_clk_data_rst); + + return rc; +} + +static inline int __cam_ife_mgr_get_hw_soc_info( + struct list_head *res_list, + enum cam_isp_hw_split_id split_id, + enum cam_isp_hw_type hw_type, + struct cam_hw_soc_info **soc_info_ptr) +{ + int rc = -EINVAL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_hw_intf *hw_intf = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp; + + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + res_list, list) { + if (!hw_mgr_res->hw_res[split_id]) + continue; + + hw_intf = hw_mgr_res->hw_res[split_id]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA, &soc_info, + sizeof(void *)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in %d regspace data query res_id: %u split idx: %d rc : %d", + hw_type, hw_mgr_res->res_id, split_id, rc); + return rc; + } + + *soc_info_ptr = soc_info; + CAM_DBG(CAM_ISP, + "Obtained soc info for split %d for hw_type %d", + split_id, hw_type); + break; + } + } + + return rc; +} + +static int cam_ife_mgr_regspace_data_cb(uint32_t reg_base_type, + void *hw_mgr_ctx, struct cam_hw_soc_info **soc_info_ptr, + uint32_t *reg_base_idx) +{ + int rc = -EINVAL; + struct cam_ife_hw_mgr_ctx *ctx = + (struct cam_ife_hw_mgr_ctx *) hw_mgr_ctx; + + *soc_info_ptr = NULL; + switch (reg_base_type) { + case CAM_REG_DUMP_BASE_TYPE_CAMNOC: + case CAM_REG_DUMP_BASE_TYPE_ISP_LEFT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_ife_src, + CAM_ISP_HW_SPLIT_LEFT, CAM_ISP_HW_TYPE_VFE, + soc_info_ptr); + if (rc) + return rc; + + if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_LEFT) + *reg_base_idx = 0; + else + *reg_base_idx = 1; + + break; + case CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_ife_src, + CAM_ISP_HW_SPLIT_RIGHT, CAM_ISP_HW_TYPE_VFE, + soc_info_ptr); + if (rc) + return rc; + + *reg_base_idx = 0; + break; + case CAM_REG_DUMP_BASE_TYPE_CSID_WRAPPER: + case CAM_REG_DUMP_BASE_TYPE_CSID_LEFT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_ife_csid, + CAM_ISP_HW_SPLIT_LEFT, CAM_ISP_HW_TYPE_CSID, + soc_info_ptr); + if (rc) + return rc; + + if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_CSID_LEFT) + *reg_base_idx = 0; + else + *reg_base_idx = 1; + + break; + case CAM_REG_DUMP_BASE_TYPE_CSID_RIGHT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_ife_csid, + CAM_ISP_HW_SPLIT_RIGHT, CAM_ISP_HW_TYPE_CSID, + soc_info_ptr); + if (rc) + return rc; + + *reg_base_idx = 0; + break; + case CAM_REG_DUMP_BASE_TYPE_SFE_LEFT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_sfe_src, + CAM_ISP_HW_SPLIT_LEFT, CAM_ISP_HW_TYPE_SFE, + soc_info_ptr); + if (rc) + return rc; + + *reg_base_idx = 0; + break; + case CAM_REG_DUMP_BASE_TYPE_SFE_RIGHT: + rc = __cam_ife_mgr_get_hw_soc_info( + &ctx->res_list_sfe_src, + CAM_ISP_HW_SPLIT_RIGHT, CAM_ISP_HW_TYPE_SFE, + soc_info_ptr); + if (rc) + return rc; + + *reg_base_idx = 0; + break; + default: + CAM_ERR(CAM_ISP, + "Unrecognized reg base type: %u, ctx_idx: %u", + reg_base_type, ctx->ctx_index); + return rc; + } + + return rc; +} + +static int cam_ife_mgr_handle_reg_dump(struct cam_ife_hw_mgr_ctx *ctx, + struct cam_cmd_buf_desc *reg_dump_buf_desc, uint32_t num_reg_dump_buf, + uint32_t meta_type, + void *soc_dump_args, + bool user_triggered_dump) +{ + int rc = 0, i; + struct cam_hw_soc_skip_dump_args skip_dump_args; + uintptr_t cpu_addr = 0; + size_t buf_size = 0; + uint8_t total_ctx_acquired = 0; + + CAM_DBG(CAM_ISP, "Reg dump req_type: %u ctx_idx: %u req:%llu", + meta_type, ctx->ctx_index, ctx->applied_req_id); + + if (!ctx->flags.init_done) { + CAM_WARN(CAM_ISP, "regdump can't possible as HW not initialized, ctx_idx: %u", + ctx->ctx_index); + return 0; + } + + if (cam_presil_mode_enabled()) { + if (g_ife_hw_mgr.debug_cfg.enable_presil_reg_dump) { + CAM_WARN(CAM_ISP, "regdump enabled for presil mode, ctx_idx: %u", + ctx->ctx_index); + } else { + CAM_ERR(CAM_ISP, "regdump disabled by default for presil mode, ctx: %u", + ctx->ctx_index); + return 0; + } + } + + if (!num_reg_dump_buf || !reg_dump_buf_desc) { + CAM_DBG(CAM_ISP, + "Invalid args for reg dump req_id: [%llu] ctx idx: [%u] meta_type: [%u] num_reg_dump_buf: [%u] reg_dump_buf_desc: [%pK]", + ctx->applied_req_id, ctx->ctx_index, meta_type, + num_reg_dump_buf, reg_dump_buf_desc); + return rc; + } + + if (!atomic_read(&ctx->cdm_done)) + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Reg dump values might be from more than one request, ctx_idx: %u", + ctx->ctx_index); + + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + if (ctx->acq_hw_ctxt_src_dst_map[i]) + total_ctx_acquired++; + } + + if (g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump && + (total_ctx_acquired != 1)) { + skip_dump_args.skip_regdump = + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump; + skip_dump_args.start_offset = + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump_start_offset; + skip_dump_args.stop_offset = + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump_stop_offset; + } + + for (i = 0; i < num_reg_dump_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(®_dump_buf_desc[i]); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, "Reg dump cmd meta data: %u req_type: %u ctx_idx: %u req:%llu", + reg_dump_buf_desc[i].meta_data, meta_type, ctx->ctx_index, + ctx->applied_req_id); + if (reg_dump_buf_desc[i].meta_data == meta_type) { + if (in_serving_softirq()) { + cpu_addr = ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr; + buf_size = ctx->reg_dump_cmd_buf_addr_len[i].buf_size; + } else { + rc = cam_mem_get_cpu_buf(reg_dump_buf_desc[i].mem_handle, + &cpu_addr, &buf_size); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in Get cpu addr, rc=%d, mem_handle =%d", + rc, reg_dump_buf_desc[i].mem_handle); + return rc; + } + } + if (!cpu_addr || (buf_size == 0)) { + CAM_ERR(CAM_ISP, "Invalid cpu_addr=%pK mem_handle=%d", + (void *)cpu_addr, reg_dump_buf_desc[i].mem_handle); + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + } + rc = cam_soc_util_reg_dump_to_cmd_buf(ctx, + ®_dump_buf_desc[i], + ctx->applied_req_id, + cam_ife_mgr_regspace_data_cb, + soc_dump_args, + &skip_dump_args, + user_triggered_dump, cpu_addr, buf_size); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump failed at idx: %d, rc: %d req_id: %llu meta type: %u ctx_idx: %u", + i, rc, ctx->applied_req_id, meta_type, ctx->ctx_index); + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + return rc; + } + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + } + } + return rc; +} + +static inline void cam_ife_mgr_update_hw_entries_util( + enum cam_isp_cdm_bl_type cdm_bl_type, + uint32_t total_used_bytes, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_hw_prepare_update_args *prepare, + bool precheck_combine) +{ + bool combine; + uint32_t num_ent; + struct cam_hw_update_entry *prev_hw_update_entry; + + /* + * Combine with prev entry only when a new entry was created + * by previus handler and this entry has the same bl type + * as the previous entry, if not, a new entry will be generated + * later + */ + combine = precheck_combine; + if (combine) { + num_ent = prepare->num_hw_update_entries; + prev_hw_update_entry = &(prepare->hw_update_entries[num_ent - 1]); + if (prev_hw_update_entry->flags != cdm_bl_type) + combine = false; + } + cam_isp_update_hw_entry(cdm_bl_type, prepare, + kmd_buf_info, total_used_bytes, combine); +} + +static inline int cam_ife_mgr_allocate_cdm_cmd( + bool is_sfe_en, + struct cam_cdm_bl_request **cdm_cmd) +{ + int rc = 0; + uint32_t cfg_max = CAM_ISP_CTX_CFG_MAX; + + if (is_sfe_en) + cfg_max = CAM_ISP_SFE_CTX_CFG_MAX; + + *cdm_cmd = CAM_MEM_ZALLOC(((sizeof(struct cam_cdm_bl_request)) + + ((cfg_max - 1) * + sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL); + + if (!(*cdm_cmd)) { + CAM_ERR(CAM_ISP, "Failed to allocate cdm bl memory"); + rc = -ENOMEM; + } + + return rc; +} + +static inline void cam_ife_mgr_free_cdm_cmd( + struct cam_cdm_bl_request **cdm_cmd) +{ + CAM_MEM_FREE(*cdm_cmd); + *cdm_cmd = NULL; +} + +static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc = 0; + int i; + struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query = hw_caps_args; + struct cam_isp_query_cap_cmd query_isp; + struct cam_isp_dev_cap_info *ife_full_hw_info = NULL; + struct cam_isp_dev_cap_info *ife_lite_hw_info = NULL; + struct cam_isp_dev_cap_info *csid_full_hw_info = NULL; + struct cam_isp_dev_cap_info *csid_lite_hw_info = NULL; + struct cam_ife_csid_hw_caps *ife_csid_caps = {0}; + + CAM_DBG(CAM_ISP, "enter"); + + if (sizeof(struct cam_isp_query_cap_cmd) != query->size) { + CAM_ERR(CAM_ISP, + "Input query cap size:%u does not match expected query cap size: %u", + query->size, sizeof(struct cam_isp_query_cap_cmd)); + return -EFAULT; + } + + if (copy_from_user(&query_isp, + u64_to_user_ptr(query->caps_handle), + sizeof(struct cam_isp_query_cap_cmd))) { + rc = -EFAULT; + return rc; + } + + query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl; + query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure; + query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl; + query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure; + query_isp.num_dev = 0; + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (!hw_mgr->ife_devices[i]) + continue; + + if (hw_mgr->ife_dev_caps[i].is_lite) { + if (ife_lite_hw_info == NULL) { + ife_lite_hw_info = + &query_isp.dev_caps[query_isp.num_dev]; + query_isp.num_dev++; + + ife_lite_hw_info->hw_type = CAM_ISP_HW_IFE_LITE; + ife_lite_hw_info->hw_version.major = + hw_mgr->ife_dev_caps[i].major; + ife_lite_hw_info->hw_version.minor = + hw_mgr->ife_dev_caps[i].minor; + ife_lite_hw_info->hw_version.incr = + hw_mgr->ife_dev_caps[i].incr; + ife_lite_hw_info->hw_version.reserved = 0; + ife_lite_hw_info->num_hw = 0; + } + + ife_lite_hw_info->num_hw++; + + } else { + if (ife_full_hw_info == NULL) { + ife_full_hw_info = + &query_isp.dev_caps[query_isp.num_dev]; + query_isp.num_dev++; + + ife_full_hw_info->hw_type = (hw_mgr->isp_device_type + == CAM_TFE_MC_DEVICE_TYPE) ? + CAM_ISP_HW_MC_TFE : CAM_ISP_HW_IFE; + ife_full_hw_info->hw_version.major = + hw_mgr->ife_dev_caps[i].major; + ife_full_hw_info->hw_version.minor = + hw_mgr->ife_dev_caps[i].minor; + ife_full_hw_info->hw_version.incr = + hw_mgr->ife_dev_caps[i].incr; + ife_full_hw_info->hw_version.reserved = 0; + ife_full_hw_info->num_hw = 0; + } + + ife_full_hw_info->num_hw++; + } + } + + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (!hw_mgr->csid_devices[i]) + continue; + + ife_csid_caps = (struct cam_ife_csid_hw_caps *) + &hw_mgr->csid_hw_caps[i]; + + if (ife_csid_caps->is_lite) { + if (csid_lite_hw_info == NULL) { + csid_lite_hw_info = + &query_isp.dev_caps[query_isp.num_dev]; + query_isp.num_dev++; + + csid_lite_hw_info->hw_type = + CAM_ISP_HW_CSID_LITE; + csid_lite_hw_info->hw_version.major = + ife_csid_caps->major_version; + csid_lite_hw_info->hw_version.minor = + ife_csid_caps->minor_version; + csid_lite_hw_info->hw_version.incr = + ife_csid_caps->version_incr; + csid_lite_hw_info->hw_version.reserved = 0; + csid_lite_hw_info->num_hw = 0; + } + + csid_lite_hw_info->num_hw++; + + } else { + if (csid_full_hw_info == NULL) { + csid_full_hw_info = + &query_isp.dev_caps[query_isp.num_dev]; + query_isp.num_dev++; + + csid_full_hw_info->hw_type = CAM_ISP_HW_CSID; + csid_full_hw_info->hw_version.major = + ife_csid_caps->major_version; + csid_full_hw_info->hw_version.minor = + ife_csid_caps->minor_version; + csid_full_hw_info->hw_version.incr = + ife_csid_caps->version_incr; + csid_full_hw_info->hw_version.reserved = 0; + csid_full_hw_info->num_hw = 0; + } + + csid_full_hw_info->num_hw++; + } + } + + if (copy_to_user(u64_to_user_ptr(query->caps_handle), + &query_isp, sizeof(struct cam_isp_query_cap_cmd))) + rc = -EFAULT; + + CAM_DBG(CAM_ISP, "exit rc :%d", rc); + + return rc; +} + +static inline int cam_ife_hw_mgr_is_sfe_rdi_for_fetch( + uint32_t res_id) +{ + int rc = 0; + + switch (res_id) { + case CAM_ISP_SFE_OUT_RES_RDI_0: + case CAM_ISP_SFE_OUT_RES_RDI_1: + case CAM_ISP_SFE_OUT_RES_RDI_2: + rc = 1; + break; + default: + break; + } + + return rc; +} + +static inline int cam_ife_hw_mgr_is_shdr_fs_rdi_res( + uint32_t res_id, bool is_sfe_shdr, bool is_sfe_fs) +{ + return (cam_ife_hw_mgr_is_sfe_rdi_for_fetch(res_id) && + (is_sfe_shdr || is_sfe_fs)); +} + +static int cam_ife_hw_mgr_is_sfe_rdi_res(uint32_t res_id) +{ + int rc = 0; + + switch (res_id) { + case CAM_ISP_SFE_OUT_RES_RDI_0: + case CAM_ISP_SFE_OUT_RES_RDI_1: + case CAM_ISP_SFE_OUT_RES_RDI_2: + case CAM_ISP_SFE_OUT_RES_RDI_3: + case CAM_ISP_SFE_OUT_RES_RDI_4: + rc = 1; + break; + default: + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_is_rdi_res(uint32_t res_id) +{ + int rc = 0; + + switch (res_id) { + case CAM_ISP_IFE_OUT_RES_RDI_0: + case CAM_ISP_IFE_OUT_RES_RDI_1: + case CAM_ISP_IFE_OUT_RES_RDI_2: + case CAM_ISP_IFE_OUT_RES_RDI_3: + case CAM_ISP_IFE_OUT_RES_RDI_4: + rc = 1; + break; + default: + break; + } + + return rc; +} + +static inline bool cam_ife_hw_mgr_is_ife_out_port(uint32_t res_id) +{ + bool is_ife_out = false; + + if ((res_id >= CAM_ISP_IFE_OUT_RES_BASE) && + (res_id < (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res))) + is_ife_out = true; + + return is_ife_out; +} + +static inline bool cam_ife_hw_mgr_is_sfe_out_port(uint32_t res_id) +{ + bool is_sfe_out = false; + + if ((res_id >= CAM_ISP_SFE_OUT_RES_BASE) && + (res_id < (CAM_ISP_SFE_OUT_RES_BASE + + max_sfe_out_res))) + is_sfe_out = true; + + return is_sfe_out; +} + +void cam_ife_hw_mgr_drv_info_dump( + struct cam_isp_hw_event_info *evt, + void *ctx) +{ + struct cam_ife_hw_mgr_ctx *hw_mgr_ctx = ctx; + struct cam_isp_hw_per_req_info *per_req_info = NULL; + struct timespec64 cur_time_stamp; + int i, index; + struct tm ts; + + ktime_get_clocktai_ts64(&cur_time_stamp); + time64_to_tm(cur_time_stamp.tv_sec, 0, &ts); + + CAM_INFO(CAM_ISP, + "time[%d-%d %d:%d:%d.%lld]: hw_mgr_ctx->applied_req_id = %lld wr_index:%d", + ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, + ts.tm_sec, cur_time_stamp.tv_nsec / 1000000, + hw_mgr_ctx->applied_req_id, hw_mgr_ctx->wr_per_req_index); + + for (i = 0; i < MAX_DRV_REQUEST_DEPTH; i++) { + /* Dump info from oldest to latest */ + index = (i + hw_mgr_ctx->wr_per_req_index + 1) % MAX_DRV_REQUEST_DEPTH; + per_req_info = &hw_mgr_ctx->per_req_info[index]; + CAM_INFO(CAM_ISP, + "Index[%d] Apply IFE Req[%llu] frame_dur[%llu] blanking_dur[%llu] drv_timeout[%u] drv_en[%d], path_idle[0x%x], update_drv[%d], blob_valid[%d] mup_en[%d]", + i, per_req_info->drv_info.req_id, + per_req_info->drv_info.frame_duration, + per_req_info->drv_info.blanking_duration, + per_req_info->drv_info.timeout_val, + per_req_info->drv_info.drv_en, + per_req_info->drv_info.path_idle_en, + per_req_info->drv_info.update_drv, + per_req_info->drv_info.is_blob_config_valid, + per_req_info->mup_en); + } +} + +static int cam_ife_hw_mgr_check_and_notify_overflow( + struct cam_isp_hw_event_info *evt, + void *ctx, + bool *is_bus_overflow) +{ + int i; + int res_id; + int ife_res_id = -1; + int sfe_res_id = -1; + struct cam_hw_intf *hw_if = NULL; + struct cam_ife_hw_mgr_ctx *hw_mgr_ctx = ctx; + struct cam_isp_hw_overflow_info overflow_info; + + switch(evt->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + ife_res_id = CAM_ISP_HW_VFE_IN_CAMIF; + sfe_res_id = CAM_ISP_HW_SFE_IN_PIX; + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + ife_res_id = CAM_ISP_HW_VFE_IN_RDI0; + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI0; + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + ife_res_id = CAM_ISP_HW_VFE_IN_RDI1; + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI1; + break; + case CAM_IFE_PIX_PATH_RES_RDI_2: + ife_res_id = CAM_ISP_HW_VFE_IN_RDI2; + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI2; + break; + case CAM_IFE_PIX_PATH_RES_RDI_3: + ife_res_id = CAM_ISP_HW_VFE_IN_RDI3; + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI3; + break; + case CAM_IFE_PIX_PATH_RES_RDI_4: + ife_res_id = CAM_ISP_HW_VFE_IN_RDI4; + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI4; + break; + default: + break; + } + + for (i = 0; i < hw_mgr_ctx->num_base; i++) { + + if (hw_mgr_ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) { + if (hw_mgr_ctx->base[i].idx != evt->hw_idx) + continue; + + hw_if = g_ife_hw_mgr.ife_devices[evt->hw_idx]->hw_intf; + res_id = ife_res_id; + } else if (hw_mgr_ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) { + if (hw_mgr_ctx->base[i].idx != evt->in_core_idx) + continue; + + hw_if = g_ife_hw_mgr.sfe_devices[evt->in_core_idx]->hw_intf; + res_id = sfe_res_id; + } else { + continue; + } + + if (!hw_if) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "hw_intf is null, ctx_idx: %u", + hw_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (hw_if->hw_ops.process_cmd) { + overflow_info.res_id = res_id; + hw_if->hw_ops.process_cmd(hw_if->hw_priv, + CAM_ISP_HW_NOTIFY_OVERFLOW, + &overflow_info, + sizeof(struct cam_isp_hw_overflow_info)); + + CAM_DBG(CAM_ISP, + "check and notify hw idx %d type %d bus overflow happened %d ctx_idx: %u", + hw_mgr_ctx->base[i].idx, + hw_mgr_ctx->base[i].hw_type, + overflow_info.is_bus_overflow, hw_mgr_ctx->ctx_index); + + if (overflow_info.is_bus_overflow) + *is_bus_overflow = true; + } + } + + return 0; +} + +static enum cam_ife_pix_path_res_id + cam_ife_hw_mgr_get_csid_rdi_type_for_offline( + uint32_t rd_res_type) +{ + enum cam_ife_pix_path_res_id path_id; + + /* Allow only RD0 for offline */ + switch (rd_res_type) { + case CAM_ISP_SFE_IN_RD_0: + path_id = CAM_IFE_PIX_PATH_RES_RDI_0; + break; + default: + path_id = CAM_IFE_PIX_PATH_RES_MAX; + CAM_ERR(CAM_ISP, + "maximum rdi output type exceeded 0x%x", + rd_res_type); + break; + } + + CAM_DBG(CAM_ISP, "out_port: %x path_id: %d", + rd_res_type, path_id); + + return path_id; +} + +static bool cam_ife_hw_mgr_is_sfe_rd_res( + uint32_t sfe_in_path_type) +{ + switch (sfe_in_path_type) { + case CAM_ISP_SFE_IN_RD_0: + case CAM_ISP_SFE_IN_RD_1: + case CAM_ISP_SFE_IN_RD_2: + return true; + default: + return false; + } +} + +static int cam_ife_hw_mgr_reset_csid( + struct cam_ife_hw_mgr_ctx *ctx, + int reset_type) +{ + int i; + int rc = 0; + struct cam_hw_intf *hw_intf; + struct cam_csid_reset_cfg_args reset_args; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_hw_mgr *hw_mgr; + bool hw_idx_map[CAM_IFE_CSID_HW_NUM_MAX] = {0}; + + hw_mgr = ctx->hw_mgr; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + + if ((hw_mgr->csid_global_reset_en) && + (hw_idx_map[hw_intf->hw_idx])) + continue; + + reset_args.reset_type = reset_type; + reset_args.node_res = hw_mgr_res->hw_res[i]; + rc = hw_intf->hw_ops.reset(hw_intf->hw_priv, + &reset_args, sizeof(reset_args)); + if (rc) + goto err; + hw_idx_map[hw_intf->hw_idx] = true; + } + } + + return rc; +err: + CAM_ERR(CAM_ISP, "RESET HW res failed: (ctx_idx: %u type:%d, id:%d)", + ctx->ctx_index, hw_mgr_res->res_type, hw_mgr_res->res_id); + return rc; +} + +static int cam_ife_hw_mgr_init_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + int rc = -1; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + CAM_DBG(CAM_ISP, "enabled vfe hardware %u", + hw_intf->hw_idx); + if (hw_intf->hw_ops.init) { + rc = hw_intf->hw_ops.init(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) + goto err; + } + } + + return 0; +err: + CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)", + isp_hw_res->res_type, isp_hw_res->res_id); + return rc; +} + +static int cam_ife_mgr_csid_start_hw( + struct cam_ife_hw_mgr_ctx *ctx, + uint32_t primary_rdi_csid_res, + bool is_internal_start, + bool start_only) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *isp_res; + struct cam_isp_resource_node *res[CAM_IFE_PIX_PATH_RES_MAX - 1]; + struct cam_isp_resource_node *primary_rdi_res = NULL; + struct cam_csid_hw_start_args start_args; + struct cam_hw_intf *hw_intf; + uint32_t cnt; + int j; + bool ipp_available = false, is_secure; + + for (j = ctx->num_base - 1 ; j >= 0; j--) { + cnt = 0; + is_secure = false; + + if (ctx->base[j].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + isp_res = hw_mgr_res->hw_res[ctx->base[j].split_id]; + + if (!isp_res || ctx->base[j].idx != isp_res->hw_intf->hw_idx) + continue; + + if ((primary_rdi_csid_res == hw_mgr_res->res_id) || + (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE && + isp_res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) + primary_rdi_res = isp_res; + + if (hw_mgr_res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + ipp_available = true; + + if (hw_mgr_res->is_secure) + is_secure = true; + + CAM_DBG(CAM_ISP, + "csid[%u] ctx_idx: %u res:%s res_id %d cnt %u, is_secure: %s", + isp_res->hw_intf->hw_idx, ctx->ctx_index, + isp_res->res_name, isp_res->res_id, cnt, + CAM_BOOL_TO_YESNO(is_secure)); + res[cnt] = isp_res; + cnt++; + } + + if (!ipp_available && primary_rdi_res) + primary_rdi_res->is_rdi_primary_res = true; + + if (cnt) { + hw_intf = res[0]->hw_intf; + start_args.num_res = cnt; + start_args.node_res = res; + + if (ctx->cdm_hw_idx < 0) { + CAM_ERR(CAM_ISP, + "CSID[%u], physical CDM hw_idx is invalid: %d on ctx: %u", + hw_intf->hw_idx, ctx->cdm_hw_idx, ctx->ctx_index); + return -EINVAL; + } + + start_args.cdm_hw_idx = ctx->cdm_hw_idx; + start_args.is_secure = is_secure; + start_args.is_internal_start = is_internal_start; + start_args.start_only = start_only; + start_args.is_drv_config_en = (bool) ctx->drv_path_idle_en; + hw_intf->hw_ops.start(hw_intf->hw_priv, &start_args, + sizeof(start_args)); + } + } + + return 0; +} + +static int cam_ife_hw_mgr_start_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res, + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i; + int rc = -1; + struct cam_hw_intf *hw_intf; + + /* Start slave (which is right split) first */ + for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.start) { + rc = hw_intf->hw_ops.start(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not start HW:%d resources, ctx_idx: %u", + hw_intf->hw_idx, ctx->ctx_index); + goto err; + } + } else { + CAM_ERR(CAM_ISP, "function null, ctx_idx: %u", ctx->ctx_index); + goto err; + } + } + + return 0; +err: + CAM_ERR(CAM_ISP, "Start hw res failed (ctx_idx: %u type:%d, id:%d)", + ctx->ctx_index, isp_hw_res->res_type, isp_hw_res->res_id); + return rc; +} + +static void cam_ife_hw_mgr_stop_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + struct cam_hw_intf *hw_intf; + uint32_t dummy_args; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + + if (isp_hw_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + if (hw_intf->hw_ops.stop) { + hw_intf->hw_ops.stop(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + } + else + CAM_ERR(CAM_ISP, "stop null"); + if (hw_intf->hw_ops.process_cmd && + isp_hw_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ, + &dummy_args, sizeof(dummy_args)); + } + isp_hw_res->hw_res[i]->is_rdi_primary_res = false; + } +} + +static void cam_ife_hw_mgr_deinit_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.deinit) + hw_intf->hw_ops.deinit(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + } +} + +static void cam_ife_hw_mgr_deinit_hw( + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_hw_mgr *hw_mgr; + int i = 0, j, rc; + + if (!ctx->flags.init_done) { + CAM_WARN(CAM_ISP, "ctx is not in init state, ctx_idx: %u", ctx->ctx_index); + return; + } + + hw_mgr = ctx->hw_mgr; + + if (hw_mgr->csid_global_reset_en) + cam_ife_hw_mgr_reset_csid(ctx, CAM_IFE_CSID_RESET_GLOBAL); + + /* Deinit IFE CSID */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n ctx_idx: %u", __func__, + ctx->ctx_index); + cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + /* Deint SFE RSRC */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + CAM_DBG(CAM_ISP, "Going to DeInit SFE SRC %u, ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + /* Deinit SFE OUT */ + for (i = 0; i < ctx->num_acq_sfe_out; i++) + cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_sfe_out[i]); + } + + /* Deint BUS RD */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + CAM_DBG(CAM_ISP, "Going to DeInit BUS RD %u, ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + /* Deint IFE MUX(SRC) */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + CAM_DBG(CAM_ISP, "Going to DeInit IFE SRC %u, ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + /* Deinit IFE OUT */ + for (i = 0; i < ctx->num_acq_vfe_out; i++) + cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]); + + /* + * Do not reset the curr_idx, it can be only be changed only it new SCID is reserved + * for a particular exposure. Check if any cache needs to be de-activated + */ + for (i = CAM_LLCC_SMALL_1; i <= CAM_LLCC_LARGE_4; i++) { + if (ctx->flags.sys_cache_usage[i]) { + if (cam_cpas_is_notif_staling_supported() && + hw_mgr->sys_cache_info[i].llcc_staling_support) { + rc = cam_cpas_notif_increment_staling_counter(i); + if (rc) + CAM_ERR(CAM_ISP, + "llcc cache notif increment staling failed %d", i); + } + cam_cpas_deactivate_llcc(i); + } + ctx->flags.sys_cache_usage[i] = false; + } + + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) + for (j = 0; j < CAM_ISP_EXPOSURE_MAX; j++) + hw_mgr->sfe_cache_info[ctx->base[i].idx].activated[j] = false; + } + + ctx->flags.init_done = false; +} + +static int cam_ife_hw_mgr_init_hw( + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + int rc = 0, i; + + /* INIT IFE SRC */ + CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%u", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d), ctx_idx:%u", + hw_mgr_res->res_id, ctx->ctx_index); + goto deinit; + } + } + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + /* INIT SFE RSRC */ + CAM_DBG(CAM_ISP, "INIT SFE Resource in ctx id:%d", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not INIT SFE SRC res (%d), ctx_idx:%u", + hw_mgr_res->res_id, ctx->ctx_index); + goto deinit; + } + } + + CAM_DBG(CAM_ISP, "INIT SFE OUT RESOURCES in ctx id:%u", + ctx->ctx_index); + for (i = 0; i < ctx->num_acq_sfe_out; i++) { + rc = cam_ife_hw_mgr_init_hw_res( + &ctx->res_list_sfe_out[i]); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT SFE OUT (%d), ctx_idx:%u", + ctx->res_list_sfe_out[i].res_id, ctx->ctx_index); + goto deinit; + } + } + } + + CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%u", + ctx->ctx_index); + + /* INIT IFE BUS RD */ + CAM_DBG(CAM_ISP, "INIT IFE BUS RD in ctx id:%u", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Can not IFE BUS RD (%d), ctx_idx:%u", + hw_mgr_res->res_id, ctx->ctx_index); + return rc; + } + } + + /* INIT IFE OUT */ + CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%u", + ctx->ctx_index); + + for (i = 0; i < ctx->num_acq_vfe_out; i++) { + rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d), ctx_idx:%u", + ctx->res_list_ife_out[i].res_id, ctx->ctx_index); + goto deinit; + } + } + + /* INIT IFE csid */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d), ctx_idx:%u", + hw_mgr_res->res_id, ctx->ctx_index); + goto deinit; + } + } + + /* Check if any cache needs to be activated */ + for (i = CAM_LLCC_SMALL_1; i <= CAM_LLCC_LARGE_4; i++) { + if (ctx->flags.sys_cache_usage[i]) { + rc = cam_cpas_activate_llcc(i); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to activate cache: %d, ctx_idx:%u", i, ctx->ctx_index); + goto deinit; + } + } + } + + return rc; +deinit: + ctx->flags.init_done = true; + cam_ife_hw_mgr_deinit_hw(ctx); + return rc; +} + +static int cam_ife_hw_mgr_put_res( + struct list_head *src_list, + struct cam_isp_hw_mgr_res **res) +{ + int rc = 0; + struct cam_isp_hw_mgr_res *res_ptr = NULL; + + res_ptr = *res; + if (res_ptr) + list_add_tail(&res_ptr->list, src_list); + + return rc; +} + +static int cam_ife_hw_mgr_get_res( + struct list_head *src_list, + struct cam_isp_hw_mgr_res **res) +{ + int rc = 0; + struct cam_isp_hw_mgr_res *res_ptr = NULL; + + if (!list_empty(src_list)) { + res_ptr = list_first_entry(src_list, + struct cam_isp_hw_mgr_res, list); + list_del_init(&res_ptr->list); + } else { + CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx"); + rc = -1; + } + *res = res_ptr; + + return rc; +} + +static int cam_ife_hw_mgr_free_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res, bool del_list) +{ + int rc = 0; + int i; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.release) { + rc = hw_intf->hw_ops.release(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) + CAM_ERR(CAM_ISP, + "Release HW:%d Res: %s resource id %d failed", + hw_intf->hw_idx, + isp_hw_res->hw_res[i]->res_name, + isp_hw_res->res_id); + isp_hw_res->hw_res[i] = NULL; + } else + CAM_ERR(CAM_ISP, "Release null"); + } + /* caller should make sure the resource is in a list */ + if (del_list) { + list_del_init(&isp_hw_res->list); + memset(isp_hw_res, 0, sizeof(*isp_hw_res)); + INIT_LIST_HEAD(&isp_hw_res->list); + } else { + memset(isp_hw_res, 0, sizeof(*isp_hw_res)); + } + + return 0; +} + +static const char *cam_ife_hw_mgr_get_res_state( + uint32_t res_state) +{ + switch (res_state) { + case CAM_ISP_RESOURCE_STATE_UNAVAILABLE: + return "UNAVAILABLE"; + case CAM_ISP_RESOURCE_STATE_AVAILABLE: + return "AVAILABLE"; + case CAM_ISP_RESOURCE_STATE_RESERVED: + return "RESERVED"; + case CAM_ISP_RESOURCE_STATE_INIT_HW: + return "HW INIT DONE"; + case CAM_ISP_RESOURCE_STATE_STREAMING: + return "STREAMING"; + default: + return "INVALID STATE"; + } +} + +static inline bool cam_ife_hw_mgr_check_path_port_compat( + uint32_t in_type, uint32_t out_type) +{ + int i; + const struct cam_isp_hw_path_port_map *map = &g_ife_hw_mgr.path_port_map; + + for (i = 0; i < map->num_entries; i++) { + if (map->entry[i][1] == out_type) + return (map->entry[i][0] == in_type); + } + + return (in_type == CAM_ISP_HW_VFE_IN_CAMIF); +} + +static void cam_ife_hw_mgr_dump_acquire_resources( + struct cam_ife_hw_mgr_ctx *hwr_mgr_ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp = NULL; + struct cam_isp_resource_node *hw_res = NULL; + uint64_t ms, hrs, min, sec; + int i = 0, j = 0; + + CAM_CONVERT_TIMESTAMP_FORMAT(hwr_mgr_ctx->ts, hrs, min, sec, ms); + + CAM_INFO(CAM_ISP, + "**** %llu:%llu:%llu.%llu ctx_idx: %u rdi_only: %s is_dual: %s acquired ****", + hrs, min, sec, ms, + hwr_mgr_ctx->ctx_index, + (hwr_mgr_ctx->flags.is_rdi_only_context ? "true" : "false"), + (hwr_mgr_ctx->flags.is_dual ? "true" : "false")); + + /* Iterate over CSID resources */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &hwr_mgr_ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "CSID split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + i, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } + + /* Iterate over IFE IN resources */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &hwr_mgr_ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "IFE src split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + i, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } + + /* Iterate over SFE IN resources */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &hwr_mgr_ctx->res_list_sfe_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "SFE src split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + i, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } + + /* Iterate over IFE/SFE RD resources */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &hwr_mgr_ctx->res_list_ife_in_rd, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "src_rd split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + i, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } + + /* Iterate over IFE OUT resources */ + for (i = 0; i < hwr_mgr_ctx->num_acq_vfe_out; i++) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + hw_mgr_res = &hwr_mgr_ctx->res_list_ife_out[i]; + hw_res = hw_mgr_res->hw_res[j]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "IFE out split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + j, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } + + /* Iterate over SFE OUT resources */ + for (i = 0; i < hwr_mgr_ctx->num_acq_sfe_out; i++) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + hw_mgr_res = &hwr_mgr_ctx->res_list_sfe_out[i]; + hw_res = hw_mgr_res->hw_res[j]; + if (hw_res && hw_res->hw_intf) + CAM_INFO(CAM_ISP, + "SFE out split_id:%d ctx_idx:%u hw_idx:%u res:%s type:%d res_id:%d state:%s", + j, hwr_mgr_ctx->ctx_index, + hw_res->hw_intf->hw_idx, + hw_res->res_name, + hw_res->res_type, + hw_res->res_id, + cam_ife_hw_mgr_get_res_state + (hw_res->res_state)); + } + } +} + +static void cam_ife_hw_mgr_dump_acq_rsrc_for_all_ctx(void) +{ + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_ife_hw_mgr_ctx *ctx_temp; + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + if (list_empty(&g_ife_hw_mgr.used_ctx_list)) { + CAM_INFO(CAM_ISP, "Currently no ctx in use"); + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + return; + } + + list_for_each_entry_safe(ctx, ctx_temp, + &g_ife_hw_mgr.used_ctx_list, list) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "ctx id:%u is_dual:%d num_base:%d rdi only:%d", + ctx->ctx_index, ctx->flags.is_dual, + ctx->num_base, ctx->flags.is_rdi_only_context); + + cam_ife_hw_mgr_dump_acquire_resources(ctx); + } + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); +} + +static void cam_ife_hw_mgr_print_acquire_info( + struct cam_ife_hw_mgr_ctx *hw_mgr_ctx, uint32_t num_pix_port, + uint32_t num_pd_port, uint32_t num_rdi_port, int acquire_failed) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *hw_res = NULL; + char log_info[128]; + int hw_idx[CAM_ISP_HW_SPLIT_MAX] = {-1, -1}; + int sfe_hw_idx[CAM_ISP_HW_SPLIT_MAX] = {-1, -1}; + int i, len = 0; + uint64_t ms, sec, min, hrs; + + if (!list_empty(&hw_mgr_ctx->res_list_ife_src)) { + hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_ife_src, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + hw_idx[i] = hw_res->hw_intf->hw_idx; + } + } + + if (!list_empty(&hw_mgr_ctx->res_list_sfe_src)) { + hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_sfe_src, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + sfe_hw_idx[i] = hw_res->hw_intf->hw_idx; + } + } + + if (acquire_failed) + goto fail; + + /* 128 char in log_info */ + if (hw_mgr_ctx->flags.is_dual) { + len += scnprintf(log_info + len, (128 - len), "Dual IFE[%d: %d]", + hw_idx[CAM_ISP_HW_SPLIT_LEFT], + hw_idx[CAM_ISP_HW_SPLIT_RIGHT]); + if (hw_mgr_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + len += scnprintf(log_info + len, (128 - len), " SFE[%d: %d]", + sfe_hw_idx[CAM_ISP_HW_SPLIT_LEFT], + sfe_hw_idx[CAM_ISP_HW_SPLIT_RIGHT]); + } else { + len += scnprintf(log_info + len, (128 - len), "Single IFE[%d]", + hw_idx[CAM_ISP_HW_SPLIT_LEFT]); + if (hw_mgr_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + len += scnprintf(log_info + len, (128 - len), " SFE[%d]", + sfe_hw_idx[CAM_ISP_HW_SPLIT_LEFT]); + } + + if (hw_mgr_ctx->flags.is_sfe_shdr) + len += scnprintf(log_info + len, (128 - len), " sHDR: Y"); + + if (hw_mgr_ctx->flags.is_sfe_fs) + len += scnprintf(log_info + len, (128 - len), " SFE_FS: Y"); + + if (hw_mgr_ctx->flags.dsp_enabled) + len += scnprintf(log_info + len, (128 - len), " DSP: Y"); + + if (hw_mgr_ctx->flags.is_offline) + len += scnprintf(log_info + len, (128 - len), " OFFLINE: Y"); + + if (hw_mgr_ctx->is_hw_ctx_acq && + (!hw_mgr_ctx->hw_mgr->csid_hw_caps[hw_idx[CAM_ISP_HW_SPLIT_LEFT]].is_lite)) { + len += scnprintf(log_info + len, (128 - len), " HW_CTXT [SRC:DST_MASK]"); + + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + if (!hw_mgr_ctx->acq_hw_ctxt_src_dst_map[i]) + continue; + + len += scnprintf(log_info + len, (128 - len), " [%d:0x%x]", + i, hw_mgr_ctx->acq_hw_ctxt_src_dst_map[i]); + } + } + + CAM_GET_TIMESTAMP(hw_mgr_ctx->ts); + CAM_CONVERT_TIMESTAMP_FORMAT(hw_mgr_ctx->ts, hrs, min, sec, ms); + + CAM_INFO(CAM_ISP, + "%llu:%llu:%llu.%llu Acquired %s with [%u pix] [%u pd] [%u rdi] ports for ctx:%u", + hrs, min, sec, ms, + log_info, + num_pix_port, num_pd_port, num_rdi_port, + hw_mgr_ctx->ctx_index); + + return; + +fail: + CAM_ERR(CAM_ISP, + "Failed to acquire %s-IFE/SFE with [%u pix] [%u pd] [%u rdi] ports for ctx_idx:%u", + (hw_mgr_ctx->flags.is_dual) ? "dual" : "single", + num_pix_port, num_pd_port, num_rdi_port, hw_mgr_ctx->ctx_index); + CAM_INFO(CAM_ISP, "Previously acquired IFEs[%d %d] SFEs[%d %d], ctx_idx: %u", + hw_idx[CAM_ISP_HW_SPLIT_LEFT], hw_idx[CAM_ISP_HW_SPLIT_RIGHT], + sfe_hw_idx[CAM_ISP_HW_SPLIT_LEFT], sfe_hw_idx[CAM_ISP_HW_SPLIT_RIGHT], + hw_mgr_ctx->ctx_index); + + if (hw_mgr_ctx->is_hw_ctx_acq) + CAM_INFO(CAM_ISP, "HW_CTXT [SRC:DST_MASK] [%d:0x%x] [%d:0x%x] [%d:0x%x]", + CAM_ISP_MULTI_CTXT_0, + hw_mgr_ctx->acq_hw_ctxt_src_dst_map[CAM_ISP_MULTI_CTXT_0], + CAM_ISP_MULTI_CTXT_1, + hw_mgr_ctx->acq_hw_ctxt_src_dst_map[CAM_ISP_MULTI_CTXT_1], + CAM_ISP_MULTI_CTXT_2, + hw_mgr_ctx->acq_hw_ctxt_src_dst_map[CAM_ISP_MULTI_CTXT_2]); + + cam_ife_hw_mgr_dump_acq_rsrc_for_all_ctx(); +} + +static int cam_ife_mgr_csid_change_halt_mode(struct cam_ife_hw_mgr_ctx *ctx, + enum cam_ife_csid_halt_mode halt_mode) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *isp_res; + struct cam_ife_csid_hw_halt_args halt; + struct cam_hw_intf *hw_intf; + uint32_t i; + int rc = 0; + + if (!ctx->flags.is_dual) + return 0; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (i == CAM_ISP_HW_SPLIT_LEFT) + continue; + + if (!hw_mgr_res->hw_res[i] || + (hw_mgr_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING)) + continue; + + isp_res = hw_mgr_res->hw_res[i]; + + if ((isp_res->res_type == CAM_ISP_RESOURCE_PIX_PATH) && + (isp_res->res_id == CAM_IFE_PIX_PATH_RES_IPP)) { + hw_intf = isp_res->hw_intf; + halt.node_res = isp_res; + halt.halt_mode = halt_mode; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE, + &halt, + sizeof( + struct cam_ife_csid_hw_halt_args)); + if (rc) + CAM_ERR(CAM_ISP, "Halt update failed, ctx_idx: %u", + ctx->ctx_index); + break; + } + } + } + + return rc; +} + +static int cam_ife_mgr_csid_stop_hw( + struct cam_ife_hw_mgr_ctx *ctx, struct list_head *stop_list, + uint32_t base_idx, uint32_t stop_cmd, bool standby_en) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *isp_res; + struct cam_isp_resource_node *stop_res[CAM_IFE_PIX_PATH_RES_MAX - 1]; + struct cam_csid_hw_stop_args stop; + struct cam_hw_intf *hw_intf; + uint32_t i, cnt; + + cnt = 0; + list_for_each_entry(hw_mgr_res, stop_list, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i] || + (hw_mgr_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING)) + continue; + + isp_res = hw_mgr_res->hw_res[i]; + if (isp_res->hw_intf->hw_idx != base_idx) + continue; + CAM_DBG(CAM_ISP, "base_idx %d ctx_idx: %u res:%s res_id %d cnt %u", + base_idx, ctx->ctx_index, isp_res->res_name, + isp_res->res_id, cnt); + stop_res[cnt] = isp_res; + cnt++; + } + } + + if (cnt) { + hw_intf = stop_res[0]->hw_intf; + stop.num_res = cnt; + stop.node_res = stop_res; + stop.stop_cmd = stop_cmd; + stop.standby_en = standby_en; + hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop)); + for (i = 0; i < cnt; i++) + stop_res[i]->is_rdi_primary_res = false; + } + + return 0; +} + +static int cam_ife_hw_mgr_release_hw_for_ctx( + struct cam_ife_hw_mgr_ctx *ife_ctx) +{ + uint32_t i; + uint32_t num_out = 0; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp; + + /* ife leaf resource */ + num_out = ife_ctx->num_acq_vfe_out; + for (i = 0; i < num_out; i++) { + cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_out[i], + false); + ife_ctx->num_acq_vfe_out--; + } + + /* fetch rd resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ife_ctx->res_list_ife_in_rd, list) { + cam_ife_hw_mgr_free_hw_res(hw_mgr_res, true); + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res); + } + + /* ife source resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ife_ctx->res_list_ife_src, list) { + cam_ife_hw_mgr_free_hw_res(hw_mgr_res, true); + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res); + } + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + /* sfe leaf resource */ + num_out = ife_ctx->num_acq_sfe_out; + for (i = 0; i < num_out; i++) { + cam_ife_hw_mgr_free_hw_res( + &ife_ctx->res_list_sfe_out[i], false); + ife_ctx->num_acq_sfe_out--; + } + + /* sfe source resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ife_ctx->res_list_sfe_src, list) { + cam_ife_hw_mgr_free_hw_res(hw_mgr_res, true); + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, + &hw_mgr_res); + } + } + + /* ife csid resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ife_ctx->res_list_ife_csid, list) { + cam_ife_hw_mgr_free_hw_res(hw_mgr_res, true); + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res); + } + + /* ife root node */ + if (ife_ctx->res_list_ife_in.res_type != CAM_ISP_RESOURCE_UNINT) + cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_in, true); + + /* clean up the callback function */ + ife_ctx->common.cb_priv = NULL; + ife_ctx->common.event_cb = NULL; + + /* Reset all stream flags */ + memset(&ife_ctx->flags, 0, sizeof(struct cam_ife_hw_mgr_ctx_flags)); + + CAM_DBG(CAM_ISP, "release context completed ctx id:%u", + ife_ctx->ctx_index); + + return 0; +} + + +static int cam_ife_hw_mgr_put_ctx( + struct list_head *src_list, + struct cam_ife_hw_mgr_ctx **ife_ctx) +{ + int rc = 0; + struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + ctx_ptr = *ife_ctx; + if (ctx_ptr) + list_add_tail(&ctx_ptr->list, src_list); + *ife_ctx = NULL; + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + return rc; +} + +static int cam_ife_hw_mgr_get_ctx( + struct list_head *src_list, + struct cam_ife_hw_mgr_ctx **ife_ctx) +{ + int rc = 0; + struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + if (!list_empty(src_list)) { + ctx_ptr = list_first_entry(src_list, + struct cam_ife_hw_mgr_ctx, list); + list_del_init(&ctx_ptr->list); + } else { + CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx"); + rc = -1; + } + *ife_ctx = ctx_ptr; + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + + return rc; +} + +static void cam_ife_mgr_add_base_info( + struct cam_ife_hw_mgr_ctx *ctx, + enum cam_isp_hw_split_id split_id, + uint32_t base_idx, + enum cam_isp_hw_type hw_type) +{ + uint32_t i; + + if (!ctx->num_base) { + ctx->base[0].split_id = split_id; + ctx->base[0].idx = base_idx; + ctx->base[0].hw_type = hw_type; + ctx->num_base++; + CAM_DBG(CAM_ISP, + "Add split id = %d ctx_idx: %u for base idx = %d num_base=%d hw_type=%d", + split_id, ctx->ctx_index, base_idx, ctx->num_base, hw_type); + } else { + /*Check if base index already exists in the list */ + for (i = 0; i < ctx->num_base; i++) { + if ((ctx->base[i].idx == base_idx) && + (ctx->base[i].hw_type == hw_type)) { + if (split_id != CAM_ISP_HW_SPLIT_MAX && + ctx->base[i].split_id == + CAM_ISP_HW_SPLIT_MAX) + ctx->base[i].split_id = split_id; + + break; + } + } + + if (i == ctx->num_base) { + ctx->base[ctx->num_base].split_id = split_id; + ctx->base[ctx->num_base].idx = base_idx; + ctx->base[ctx->num_base].hw_type = hw_type; + ctx->num_base++; + CAM_DBG(CAM_ISP, + "Add split_id=%d ctx_idx: %u for base idx=%d num_base=%d hw_type=%d", + split_id, ctx->ctx_index, base_idx, ctx->num_base, hw_type); + } + } +} + +/* Update base info for IFE & SFE HWs */ +static int cam_ife_mgr_process_base_info( + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res = NULL; + uint32_t i; + bool hw_idx_map[CAM_IFE_CSID_HW_NUM_MAX] = {0}; + + if (list_empty(&ctx->res_list_ife_src) && + list_empty(&ctx->res_list_sfe_src)) { + CAM_ERR(CAM_ISP, "Mux List empty"); + return -ENODEV; + } + + /* IFE mux in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + cam_ife_mgr_add_base_info(ctx, i, + res->hw_intf->hw_idx, + CAM_ISP_HW_TYPE_VFE); + CAM_DBG(CAM_ISP, "add IFE base info for hw %d ctx_idx: %u", + res->hw_intf->hw_idx, ctx->ctx_index); + } + } + + /*CSID resources */ + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + + if (hw_idx_map[res->hw_intf->hw_idx]) + continue; + + cam_ife_mgr_add_base_info(ctx, i, + res->hw_intf->hw_idx, + CAM_ISP_HW_TYPE_CSID); + hw_idx_map[res->hw_intf->hw_idx] = true; + CAM_DBG(CAM_ISP, "add CSID base info for hw %d, ctx_idx: %u", + res->hw_intf->hw_idx, ctx->ctx_index); + } + } + + /* SFE in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + cam_ife_mgr_add_base_info(ctx, i, + res->hw_intf->hw_idx, + CAM_ISP_HW_TYPE_SFE); + CAM_DBG(CAM_ISP, "add SFE base info for hw %d, ctx_idx: %u", + res->hw_intf->hw_idx, ctx->ctx_index); + } + } + + CAM_DBG(CAM_ISP, "ctx base num = %d, ctx_idx: %u", ctx->num_base, ctx->ctx_index); + + return 0; +} + +static int cam_ife_mgr_share_sof_qtimer_addr(struct cam_ife_hw_mgr_ctx *ctx) +{ + /** + * The objective is to obtain the qtimer timestamp from + * the CSID path that drives the state machine's interrupts + * to ensure the events are aligned in time. IPP is selected + * for pixel pipelines; for SFE fetch use cases, + * RDI0 is specified; for RDI-only/ RDI-PD streams, + * any active RDI starting from RDI0 is picked. If none of + * the above criteria are met, the first CSID path acquired + * is used to fetch the timestamp. + */ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_isp_hw_mgr_res *ife_src_res; + struct cam_hw_intf *hw_intf; + struct cam_ife_csid_ts_reg_addr sof_ts_addr; + uint32_t primary_rdi_csid_res; + uint32_t primary_rdi_out_res; + int rc, i; + bool res_rdi_context_set = false, is_found = false; + + primary_rdi_csid_res = CAM_IFE_PIX_PATH_RES_MAX; + primary_rdi_out_res = g_ife_hw_mgr.isp_caps.max_vfe_out_res_type; + + if (cam_isp_is_ctx_primary_rdi(ctx)) { + for (i = 0; i < ctx->num_acq_vfe_out && !res_rdi_context_set; i++) { + hw_mgr_res = &ctx->res_list_ife_out[i]; + switch (hw_mgr_res->res_id) { + case CAM_ISP_IFE_OUT_RES_RDI_0: + case CAM_ISP_IFE_OUT_RES_RDI_1: + case CAM_ISP_IFE_OUT_RES_RDI_2: + case CAM_ISP_IFE_OUT_RES_RDI_3: + hw_mgr_res->hw_res[0]->is_rdi_primary_res = true; + res_rdi_context_set = true; + primary_rdi_out_res = hw_mgr_res->res_id; + break; + default: + break; + } + } + if (res_rdi_context_set) + primary_rdi_csid_res = cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + primary_rdi_out_res); + } + + list_for_each_entry(csid_res, &ctx->res_list_ife_csid, list) { + if (csid_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + if (csid_res->res_id == CAM_IFE_PIX_PATH_RES_IPP || + csid_res->res_id == primary_rdi_csid_res || + (ctx->flags.is_fe_enabled && + csid_res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) { + is_found = true; + break; + } + } + + if (!is_found) + csid_res = list_first_entry(&ctx->res_list_ife_csid, + struct cam_isp_hw_mgr_res, list); + + ife_src_res = list_first_entry(&ctx->res_list_ife_src, + struct cam_isp_hw_mgr_res, list); + + /* Left resource is always the master */ + hw_intf = csid_res->hw_res[0]->hw_intf; + sof_ts_addr.res_id = csid_res->res_id; + sof_ts_addr.get_addr = true; + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR, + &sof_ts_addr, sizeof(struct cam_ife_csid_ts_reg_addr)); + + if (rc) { + CAM_ERR(CAM_ISP, "Get CSID[%u] SOF ts addr failed", + hw_intf->hw_idx); + return rc; + } + + sof_ts_addr.get_addr = false; + hw_intf = ife_src_res->hw_res[0]->hw_intf; + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR, + &sof_ts_addr, sizeof(struct cam_ife_csid_ts_reg_addr)); + if (rc) { + CAM_ERR(CAM_ISP, + "Share SOF ts addr with IFE[%u] res id %u failed", + hw_intf->hw_idx, ife_src_res->res_id); + return rc; + } + + return 0; +} + +static int cam_ife_hw_mgr_acquire_res_ife_out_rdi( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_hw_mgr_res *ife_src_res, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_isp_out_port_generic_info *out_port = NULL; + struct cam_isp_hw_mgr_res *ife_out_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_context_comp_record *comp_grp = NULL; + uint32_t index; + uint32_t i, vfe_in_res_id; + uint32_t res_id = max_ife_out_res; + + /* take left resource */ + vfe_in_res_id = ife_src_res->hw_res[0]->res_id; + + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + + if (cam_convert_rdi_out_res_id_to_src(out_port->res_type) != vfe_in_res_id) + continue; + + res_id = out_port->res_type & 0xFF; + CAM_DBG(CAM_ISP, "i = %d, ctx: %d out_res_id = %d, out_port: %d", + i, ife_ctx->ctx_index, res_id, out_port->res_type); + + vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.priv = ife_ctx; + vfe_acquire.vfe_out.out_port_info = out_port; + vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT; + vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index; + vfe_acquire.vfe_out.is_dual = 0; + vfe_acquire.vfe_out.disable_ubwc_comp = + g_ife_hw_mgr.debug_cfg.disable_ubwc_comp; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + vfe_acquire.buf_done_controller = ife_ctx->buf_done_controller; + hw_intf = ife_src_res->hw_res[0]->hw_intf; + vfe_acquire.vfe_out.use_wm_pack = ife_src_res->use_wm_pack; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + goto err; + } + + index = vfe_acquire.vfe_out.comp_grp_id; + comp_grp = &ife_ctx->vfe_bus_comp_grp[index]; + comp_grp->res_id[comp_grp->num_res] = out_port->res_type; + comp_grp->num_res++; + break; + } + + CAM_DBG(CAM_ISP, "ctx: %d out_res_id = %d, in_res_id: %d", ife_ctx->ctx_index, res_id, + vfe_in_res_id); + + if (i == in_port->num_out_res || res_id >= max_ife_out_res) { + CAM_ERR(CAM_ISP, + "Cannot acquire out resource, i=%d, num_out_res=%d, ctx_idx: %u res_id: %d", + i, in_port->num_out_res, ife_ctx->ctx_index, res_id); + goto err; + } + + ife_ctx->vfe_out_map[res_id] = ife_ctx->num_acq_vfe_out; + ife_out_res = &ife_ctx->res_list_ife_out[ife_ctx->num_acq_vfe_out]; + ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node; + ife_out_res->is_dual_isp = 0; + ife_out_res->use_wm_pack = ife_src_res->use_wm_pack; + ife_out_res->res_id = out_port->res_type; + ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT; + ife_src_res->num_children++; + ife_ctx->num_acq_vfe_out++; + + return 0; +err: + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_ife_out_pixel( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_hw_mgr_res *ife_src_res, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1; + uint32_t i, j, k; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_isp_out_port_generic_info *out_port; + struct cam_isp_hw_mgr_res *ife_out_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_context_comp_record *comp_grp = NULL; + bool is_ife_out_in_list; + + for (i = 0; i < in_port->num_out_res; i++) { + is_ife_out_in_list = false; + out_port = &in_port->data[i]; + /* Skip output ports for SFE */ + if (!cam_ife_hw_mgr_is_ife_out_port(out_port->res_type)) + continue; + + if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type)) + continue; + + if (!cam_ife_hw_mgr_check_path_port_compat(ife_src_res->res_id, + out_port->res_type)) + continue; + + CAM_DBG(CAM_ISP, "res_type 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + + k = out_port->res_type & 0xFF; + + /* + * For Multi-context cases, acquire for different hw contexts of the same port + * might come as separate input data structure, which implies that this function + * might be called more than once for the same resource (with different + * hw context id). We find the existing resource instead of adding duplicate + * entries of resource for each hw context. + */ + if ((in_port->major_ver == 3) && (ife_src_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)) { + vfe_acquire.vfe_out.use_hw_ctxt = true; + if (ife_ctx->vfe_out_map[k] != 0xff) { + ife_out_res = &ife_ctx->res_list_ife_out[ife_ctx->vfe_out_map[k]]; + if ((ife_out_res->res_id == out_port->res_type) && + ife_out_res->hw_ctxt_id_mask) { + is_ife_out_in_list = true; + } + } + } + + if (!is_ife_out_in_list) { + ife_ctx->vfe_out_map[k] = ife_ctx->num_acq_vfe_out; + ife_out_res = &ife_ctx->res_list_ife_out[ife_ctx->num_acq_vfe_out]; + } + + ife_out_res->is_dual_isp = in_port->usage_type; + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.priv = ife_ctx; + vfe_acquire.vfe_out.out_port_info = out_port; + vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_isp; + vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index; + vfe_acquire.vfe_out.disable_ubwc_comp = + g_ife_hw_mgr.debug_cfg.disable_ubwc_comp; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + vfe_acquire.buf_done_controller = ife_ctx->buf_done_controller; + vfe_acquire.mc_comp_buf_done_controller = ife_ctx->mc_comp_buf_done_controller; + + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!ife_src_res->hw_res[j]) + continue; + + hw_intf = ife_src_res->hw_res[j]->hw_intf; + + if (j == CAM_ISP_HW_SPLIT_LEFT) { + vfe_acquire.vfe_out.split_id = + CAM_ISP_HW_SPLIT_LEFT; + if (ife_src_res->is_dual_isp) { + /*TBD */ + vfe_acquire.vfe_out.is_master = 1; + vfe_acquire.vfe_out.dual_slave_core = + ife_ctx->right_hw_idx; + } else { + vfe_acquire.vfe_out.is_master = 0; + vfe_acquire.vfe_out.dual_slave_core = + 0; + } + } else { + vfe_acquire.vfe_out.split_id = + CAM_ISP_HW_SPLIT_RIGHT; + vfe_acquire.vfe_out.is_master = 0; + vfe_acquire.vfe_out.dual_slave_core = + ife_ctx->left_hw_idx; + } + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire out resource 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + goto err; + } + + ife_out_res->hw_res[j] = vfe_acquire.vfe_out.rsrc_node; + + /* + * Here, isp mgr resource is unique for a output resource in case of + * multi-context cases, but comp group record can have multiple entries + * with different conetxt IDs. + */ + if (j == CAM_ISP_HW_SPLIT_LEFT) { + if (!is_ife_out_in_list) + ife_out_res->comp_grp_id = vfe_acquire.vfe_out.comp_grp_id; + + comp_grp = &ife_ctx->vfe_bus_comp_grp[ife_out_res->comp_grp_id]; + comp_grp->res_id[comp_grp->num_res] = + ife_out_res->hw_res[j]->res_id; + + if ((in_port->major_ver == 3) && vfe_acquire.vfe_out.use_hw_ctxt) + comp_grp->hw_ctxt_id[comp_grp->num_res] = + vfe_acquire.vfe_out.out_port_info->hw_context_id; + else + comp_grp->hw_ctxt_id[comp_grp->num_res] = 0x0; + + comp_grp->num_res++; + } + + CAM_DBG(CAM_ISP, "resource type:0x%x res id:0x%x comp grp id:%d ctx:%u", + ife_out_res->hw_res[j]->res_type, + ife_out_res->hw_res[j]->res_id, + ife_out_res->comp_grp_id, ife_ctx->ctx_index); + } + + ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT; + ife_out_res->res_id = out_port->res_type; + ife_out_res->hw_ctxt_id_mask |= vfe_acquire.vfe_out.out_port_info->hw_context_id; + if (!is_ife_out_in_list) { + ife_src_res->num_children++; + ife_ctx->num_acq_vfe_out++; + } + } + + return 0; +err: + /* release resource at the entry function */ + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_sfe_out_rdi( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_hw_mgr_res *sfe_src_res, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + uint32_t i, sfe_out_res_id, sfe_in_res_id; + struct cam_sfe_acquire_args sfe_acquire; + struct cam_isp_out_port_generic_info *out_port = NULL; + struct cam_isp_hw_mgr_res *sfe_out_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_context_comp_record *comp_grp = NULL; + uint32_t index; + + /* take left resource */ + sfe_in_res_id = sfe_src_res->hw_res[0]->res_id; + + switch (sfe_in_res_id) { + case CAM_ISP_HW_SFE_IN_RDI0: + sfe_out_res_id = CAM_ISP_SFE_OUT_RES_RDI_0; + break; + case CAM_ISP_HW_SFE_IN_RDI1: + sfe_out_res_id = CAM_ISP_SFE_OUT_RES_RDI_1; + break; + case CAM_ISP_HW_SFE_IN_RDI2: + sfe_out_res_id = CAM_ISP_SFE_OUT_RES_RDI_2; + break; + case CAM_ISP_HW_SFE_IN_RDI3: + sfe_out_res_id = CAM_ISP_SFE_OUT_RES_RDI_3; + break; + case CAM_ISP_HW_SFE_IN_RDI4: + sfe_out_res_id = CAM_ISP_SFE_OUT_RES_RDI_4; + break; + default: + CAM_ERR(CAM_ISP, + "invalid SFE RDI resource type 0x%x, ctx_idx: %u", + sfe_in_res_id, ife_ctx->ctx_index); + goto err; + } + + CAM_DBG(CAM_ISP, + "sfe_in_res_id: 0x%x sfe_out_res_id: 0x%x ctx_idx: %u", + sfe_in_res_id, sfe_out_res_id, ife_ctx->ctx_index); + + sfe_acquire.rsrc_type = CAM_ISP_RESOURCE_SFE_OUT; + sfe_acquire.tasklet = ife_ctx->common.tasklet_info; + ife_ctx->sfe_out_map[sfe_out_res_id & 0xFF] = ife_ctx->num_acq_sfe_out; + sfe_out_res = &ife_ctx->res_list_sfe_out[ife_ctx->num_acq_sfe_out]; + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + CAM_DBG(CAM_ISP, + "i: %d sfe_out_res_id: 0x%x out_port: 0x%x ctx_idx: %u", + i, sfe_out_res_id, out_port->res_type, ife_ctx->ctx_index); + + if (sfe_out_res_id != out_port->res_type) + continue; + + sfe_acquire.sfe_out.cdm_ops = ife_ctx->cdm_ops; + sfe_acquire.priv = ife_ctx; + sfe_acquire.sfe_out.out_port_info = out_port; + sfe_acquire.sfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT; + sfe_acquire.sfe_out.unique_id = ife_ctx->ctx_index; + sfe_acquire.sfe_out.is_dual = 0; + sfe_acquire.buf_done_controller = ife_ctx->buf_done_controller; + sfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + sfe_acquire.sfe_out.use_wm_pack = sfe_src_res->use_wm_pack; + hw_intf = sfe_src_res->hw_res[0]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &sfe_acquire, + sizeof(struct cam_sfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire out resource: 0x%x ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + goto err; + } + + index = sfe_acquire.sfe_out.comp_grp_id; + comp_grp = &ife_ctx->sfe_bus_comp_grp[index]; + comp_grp->res_id[comp_grp->num_res] = sfe_out_res_id; + comp_grp->num_res++; + break; + } + + if (i == in_port->num_out_res) { + CAM_ERR(CAM_ISP, + "Cannot acquire out resource i: %d, num_out_res: %u ctx_idx: %u", + i, in_port->num_out_res, ife_ctx->ctx_index); + goto err; + } + + sfe_out_res->hw_res[0] = sfe_acquire.sfe_out.rsrc_node; + sfe_out_res->is_dual_isp = 0; + sfe_out_res->use_wm_pack = sfe_src_res->use_wm_pack; + sfe_out_res->res_id = sfe_out_res_id; + sfe_out_res->res_type = CAM_ISP_RESOURCE_SFE_OUT; + sfe_src_res->num_children++; + ife_ctx->num_acq_sfe_out++; + return 0; + +err: + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_sfe_out_pix( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_hw_mgr_res *sfe_src_res, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1, k; + uint32_t i, j; + struct cam_sfe_acquire_args sfe_acquire; + struct cam_isp_out_port_generic_info *out_port; + struct cam_isp_hw_mgr_res *sfe_out_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_context_comp_record *comp_grp = NULL; + uint32_t index; + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + + /* Skip IFE ports */ + if (!cam_ife_hw_mgr_is_sfe_out_port(out_port->res_type)) + continue; + + if (cam_ife_hw_mgr_is_sfe_rdi_res(out_port->res_type)) + continue; + + k = out_port->res_type & 0xFF; + CAM_DBG(CAM_ISP, "ctx_idx: %u res_type: 0x%x", + ife_ctx->ctx_index, out_port->res_type); + + ife_ctx->sfe_out_map[k] = ife_ctx->num_acq_sfe_out; + sfe_out_res = &ife_ctx->res_list_sfe_out[ife_ctx->num_acq_sfe_out]; + sfe_out_res->is_dual_isp = in_port->usage_type; + sfe_acquire.rsrc_type = CAM_ISP_RESOURCE_SFE_OUT; + sfe_acquire.tasklet = ife_ctx->common.tasklet_info; + sfe_acquire.sfe_out.cdm_ops = ife_ctx->cdm_ops; + sfe_acquire.priv = ife_ctx; + sfe_acquire.sfe_out.out_port_info = out_port; + sfe_acquire.sfe_out.is_dual = sfe_src_res->is_dual_isp; + sfe_acquire.sfe_out.unique_id = ife_ctx->ctx_index; + sfe_acquire.buf_done_controller = ife_ctx->buf_done_controller; + sfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!sfe_src_res->hw_res[j]) + continue; + + hw_intf = sfe_src_res->hw_res[j]->hw_intf; + + if (j == CAM_ISP_HW_SPLIT_LEFT) { + sfe_acquire.sfe_out.split_id = + CAM_ISP_HW_SPLIT_LEFT; + if (sfe_src_res->is_dual_isp) + sfe_acquire.sfe_out.is_master = 1; + else + sfe_acquire.sfe_out.is_master = 0; + } else { + sfe_acquire.sfe_out.split_id = + CAM_ISP_HW_SPLIT_RIGHT; + sfe_acquire.sfe_out.is_master = 0; + } + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &sfe_acquire, + sizeof(struct cam_sfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire out resource 0x%x ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + goto err; + } + + sfe_out_res->hw_res[j] = + sfe_acquire.sfe_out.rsrc_node; + if (j == CAM_ISP_HW_SPLIT_LEFT) { + index = sfe_acquire.sfe_out.comp_grp_id; + comp_grp = &ife_ctx->sfe_bus_comp_grp[index]; + comp_grp->res_id[comp_grp->num_res] = + sfe_out_res->hw_res[j]->res_id; + comp_grp->num_res++; + } + + CAM_DBG(CAM_ISP, + "ctx:%u res_type:0x%x res: %s res id:0x%x comp grp id:%d", + ife_ctx->ctx_index, sfe_out_res->hw_res[j]->res_type, + sfe_out_res->hw_res[j]->res_name, + sfe_out_res->hw_res[j]->res_id, + sfe_acquire.sfe_out.comp_grp_id); + } + sfe_out_res->res_type = CAM_ISP_RESOURCE_SFE_OUT; + sfe_out_res->res_id = out_port->res_type; + sfe_src_res->num_children++; + ife_ctx->num_acq_sfe_out++; + } + + return 0; +err: + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_sfe_out( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *sfe_res_iterator; + + if (list_empty(&ife_ctx->res_list_sfe_src)) { + CAM_WARN(CAM_ISP, "SFE src list empty"); + return 0; + } + + list_for_each_entry(sfe_res_iterator, + &ife_ctx->res_list_sfe_src, list) { + if (sfe_res_iterator->num_children) + continue; + + switch (sfe_res_iterator->res_id) { + case CAM_ISP_HW_SFE_IN_PIX: + rc = cam_ife_hw_mgr_acquire_res_sfe_out_pix(ife_ctx, + sfe_res_iterator, in_port); + break; + case CAM_ISP_HW_SFE_IN_RDI0: + case CAM_ISP_HW_SFE_IN_RDI1: + case CAM_ISP_HW_SFE_IN_RDI2: + /* for FE use-cases acquire both RDI and PIX ports */ + rc = cam_ife_hw_mgr_acquire_res_sfe_out_rdi(ife_ctx, + sfe_res_iterator, in_port); + if (rc) + goto err; + + if (ife_ctx->flags.is_fe_enabled) + rc = cam_ife_hw_mgr_acquire_res_sfe_out_pix(ife_ctx, + sfe_res_iterator, in_port); + break; + case CAM_ISP_HW_SFE_IN_RDI3: + case CAM_ISP_HW_SFE_IN_RDI4: + rc = cam_ife_hw_mgr_acquire_res_sfe_out_rdi(ife_ctx, + sfe_res_iterator, in_port); + break; + default: + CAM_ERR(CAM_ISP, "Unknown SFE IN resource: %d, ctx_idx: %u", + sfe_res_iterator->res_id, ife_ctx->ctx_index); + rc = -EINVAL; + break; + } + if (rc) + goto err; + } + + return 0; +err: + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_ife_out( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *ife_src_res; + + if (list_empty(&ife_ctx->res_list_ife_src)) { + CAM_WARN(CAM_ISP, "IFE src list empty, ctx_idx: %u", ife_ctx->ctx_index); + return 0; + } + + list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) { + if (!((in_port->major_ver == 3) && ife_src_res->hw_ctxt_id_mask) && + ife_src_res->num_children) + continue; + + switch (ife_src_res->res_id) { + case CAM_ISP_HW_VFE_IN_CAMIF: + case CAM_ISP_HW_VFE_IN_PDLIB: + case CAM_ISP_HW_VFE_IN_RD: + case CAM_ISP_HW_VFE_IN_LCR: + rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx, + ife_src_res, in_port); + break; + case CAM_ISP_HW_VFE_IN_RDI0: + case CAM_ISP_HW_VFE_IN_RDI1: + case CAM_ISP_HW_VFE_IN_RDI2: + case CAM_ISP_HW_VFE_IN_RDI3: + case CAM_ISP_HW_VFE_IN_RDI4: + rc = cam_ife_hw_mgr_acquire_res_ife_out_rdi(ife_ctx, + ife_src_res, in_port); + break; + default: + CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d, ctx_idx: %u", + ife_src_res->res_id, ife_ctx->ctx_index); + break; + } + if (rc) + goto err; + } + + return 0; +err: + /* release resource on entry function */ + return rc; +} + +static inline void cam_ife_mgr_count_functional_sfe(void) +{ + int i; + + g_num_sfe_functional = 0; + + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if (g_ife_hw_mgr.sfe_devices[i]) + g_num_sfe_functional++; + } + + CAM_DBG(CAM_ISP, "counted %u functional SFEs", g_num_sfe_functional); +} + +static inline void cam_ife_mgr_count_functional_ife(void) +{ + int i; + + g_num_ife_functional = 0; + g_num_ife_lite_functional = 0; + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (g_ife_hw_mgr.ife_devices[i]) { + if (g_ife_hw_mgr.ife_dev_caps[i].is_lite) + g_num_ife_lite_functional++; + else + g_num_ife_functional++; + } + } + CAM_DBG(CAM_ISP, "counted functional %d IFE and %d IFE lite", g_num_ife_functional, + g_num_ife_lite_functional); +} + +static int cam_convert_hw_idx_to_sfe_hw_num(int hw_idx) +{ + if (hw_idx < g_num_sfe_available) { + switch (hw_idx) { + case 0: return CAM_ISP_SFE0_HW; + case 1: return CAM_ISP_SFE1_HW; + case 2: return CAM_ISP_SFE2_HW; + } + } else { + CAM_ERR(CAM_ISP, "SFE hw idx %d out-of-bounds max available %u", + hw_idx, g_num_sfe_available); + } + + return 0; +} + +static int cam_convert_hw_idx_to_ife_hw_num(int hw_idx) +{ + if (hw_idx < g_num_ife_available) { + switch (hw_idx) { + case 0: return CAM_ISP_IFE0_HW; + case 1: return CAM_ISP_IFE1_HW; + case 2: return CAM_ISP_IFE2_HW; + } + } else if (hw_idx < g_num_ife_available + g_num_ife_lite_available) { + switch (hw_idx - g_num_ife_available) { + case 0: return CAM_ISP_IFE0_LITE_HW; + case 1: return CAM_ISP_IFE1_LITE_HW; + case 2: return CAM_ISP_IFE2_LITE_HW; + case 3: return CAM_ISP_IFE3_LITE_HW; + case 4: return CAM_ISP_IFE4_LITE_HW; + } + } else { + CAM_ERR(CAM_ISP, "hw idx %d out-of-bounds", hw_idx); + } + return 0; +} + +static int cam_convert_rdi_out_res_id_to_src(int res_id) +{ + if (res_id == CAM_ISP_IFE_OUT_RES_RDI_0) + return CAM_ISP_HW_VFE_IN_RDI0; + else if (res_id == CAM_ISP_IFE_OUT_RES_RDI_1) + return CAM_ISP_HW_VFE_IN_RDI1; + else if (res_id == CAM_ISP_IFE_OUT_RES_RDI_2) + return CAM_ISP_HW_VFE_IN_RDI2; + else if (res_id == CAM_ISP_IFE_OUT_RES_RDI_3) + return CAM_ISP_HW_VFE_IN_RDI3; + else if (res_id == CAM_ISP_IFE_OUT_RES_RDI_4) + return CAM_ISP_HW_VFE_IN_RDI4; + return CAM_ISP_HW_VFE_IN_MAX; +} + +static int cam_convert_csid_rdi_res_to_ife_src(int res_id) +{ + enum cam_isp_hw_vfe_in_mux src_id; + + switch (res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + src_id = CAM_ISP_HW_VFE_IN_RDI0; + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + src_id = CAM_ISP_HW_VFE_IN_RDI1; + break; + case CAM_IFE_PIX_PATH_RES_RDI_2: + src_id = CAM_ISP_HW_VFE_IN_RDI2; + break; + case CAM_IFE_PIX_PATH_RES_RDI_3: + src_id = CAM_ISP_HW_VFE_IN_RDI3; + break; + case CAM_IFE_PIX_PATH_RES_RDI_4: + src_id = CAM_ISP_HW_VFE_IN_RDI4; + break; + default: + src_id = CAM_ISP_HW_VFE_IN_MAX; + break; + } + + return src_id; +} + +static int cam_convert_sfe_res_to_path(int res_id) +{ + switch (res_id) { + case CAM_ISP_HW_SFE_IN_PIX: + return CAM_ISP_PXL_PATH; + case CAM_ISP_HW_SFE_IN_RDI0: + return CAM_ISP_RDI0_PATH; + case CAM_ISP_HW_SFE_IN_RDI1: + return CAM_ISP_RDI1_PATH; + case CAM_ISP_HW_SFE_IN_RDI2: + return CAM_ISP_RDI2_PATH; + case CAM_ISP_HW_SFE_IN_RDI3: + return CAM_ISP_RDI3_PATH; + case CAM_ISP_HW_SFE_IN_RDI4: + return CAM_ISP_RDI4_PATH; + default: + CAM_ERR(CAM_ISP, "SFE res path invalid res_id: 0x%x", + res_id); + } + + return 0; +} + +static int cam_convert_res_id_to_hw_path(int res_id, int csid_res_id) +{ + if (res_id == CAM_ISP_HW_VFE_IN_LCR) { + return CAM_ISP_LCR_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_PDLIB) { + return CAM_ISP_PPP_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_CAMIF) { + if (csid_res_id == CAM_IFE_PIX_PATH_RES_IPP_1) + return CAM_ISP_PXL1_PATH; + else if (csid_res_id == CAM_IFE_PIX_PATH_RES_IPP_2) + return CAM_ISP_PXL2_PATH; + else + return CAM_ISP_PXL_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_RDI0) { + return CAM_ISP_RDI0_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_RDI1) { + return CAM_ISP_RDI1_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_RDI2) { + return CAM_ISP_RDI2_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_RDI3) { + return CAM_ISP_RDI3_PATH; + } else if (res_id == CAM_ISP_HW_VFE_IN_RDI4) { + return CAM_ISP_RDI4_PATH; + } + + return 0; +} + +static int cam_ife_hw_mgr_acquire_sfe_hw( + bool is_right_hw, + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_sfe_acquire_args *sfe_acquire) +{ + int rc = -EINVAL; + uint32_t hw_idx; + struct cam_hw_intf *hw_intf = NULL; + struct cam_ife_hw_mgr *ife_hw_mgr = ife_ctx->hw_mgr; + + if (is_right_hw) + hw_idx = ife_ctx->right_hw_idx; + else + hw_idx = ife_ctx->left_hw_idx; + + if ((hw_idx >= CAM_SFE_HW_NUM_MAX) || !ife_hw_mgr->sfe_devices[hw_idx]) { + CAM_ERR(CAM_ISP, "the hw index:%d is wrong"); + return -EINVAL; + } + + hw_intf = ife_hw_mgr->sfe_devices[hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + sfe_acquire, + sizeof(struct cam_sfe_acquire_args)); + if (rc) { + CAM_DBG(CAM_ISP, + "Can not acquire SFE HW: %d for res: %d ctx_idx: %u", + hw_idx, sfe_acquire->sfe_in.res_id, ife_ctx->ctx_index); + } + + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_sfe_src_util( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + struct cam_isp_hw_mgr_res *csid_res, + struct cam_sfe_acquire_args *sfe_acquire, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -1, i; + struct cam_isp_hw_mgr_res *sfe_src_res; + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, + &sfe_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "Ctx : %u failed to get res", ife_ctx->ctx_index); + return rc; + } + + sfe_acquire->rsrc_type = CAM_ISP_RESOURCE_SFE_IN; + sfe_acquire->tasklet = ife_ctx->common.tasklet_info; + sfe_acquire->sfe_in.cdm_ops = ife_ctx->cdm_ops; + sfe_acquire->sfe_in.in_port = in_port; + sfe_acquire->sfe_in.is_offline = ife_ctx->flags.is_offline; + sfe_acquire->priv = ife_ctx; + sfe_acquire->event_cb = cam_ife_hw_mgr_event_handler; + sfe_acquire->sfe_in.is_dual = csid_res->is_dual_isp; + + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_sfe_src, + &sfe_src_res); + + sfe_src_res->res_type = sfe_acquire->rsrc_type; + sfe_src_res->res_id = sfe_acquire->sfe_in.res_id; + sfe_src_res->is_dual_isp = csid_res->is_dual_isp; + sfe_src_res->use_wm_pack = csid_res->use_wm_pack; + + for (i = sfe_src_res->is_dual_isp; i >= 0; i--) { + rc = cam_ife_hw_mgr_acquire_sfe_hw(i, + ife_ctx, sfe_acquire); + + if (rc || !sfe_acquire->sfe_in.rsrc_node) { + CAM_ERR(CAM_ISP, + "Failed to acquire split_id: %d SFE for res_type: %u id: %u ctx_idx: %u", + i, sfe_src_res->res_type, sfe_src_res->res_id, ife_ctx->ctx_index); + goto err; + } + + sfe_src_res->hw_res[i] = + sfe_acquire->sfe_in.rsrc_node; + + *acquired_hw_id |= + cam_convert_hw_idx_to_sfe_hw_num( + sfe_src_res->hw_res[i]->hw_intf->hw_idx); + acquired_hw_path[i] |= cam_convert_sfe_res_to_path( + sfe_src_res->hw_res[i]->res_id); + + CAM_DBG(CAM_ISP, + "acquire success %s SFE: %u res_name: %s res_type: %u res_id: %u ctx_idx: %u", + ((i == CAM_ISP_HW_SPLIT_LEFT) ? "LEFT" : "RIGHT"), + sfe_src_res->hw_res[i]->hw_intf->hw_idx, + sfe_src_res->hw_res[i]->res_name, + sfe_src_res->hw_res[i]->res_type, + sfe_src_res->hw_res[i]->res_id, ife_ctx->ctx_index); + } + csid_res->num_children++; +err: + return rc; +} + +static bool cam_ife_hw_mgr_is_csid_rdi_sfe_rdi_out( + struct cam_isp_in_port_generic_info *in_port, + uint32_t res_id) +{ + struct cam_isp_out_port_generic_info *out_port; + uint32_t i = 0; + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + if (res_id == cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + out_port->res_type)) + return true; + } + return false; +} + +static int cam_ife_hw_mgr_acquire_res_sfe_src( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_hw_id, uint32_t *acquired_hw_path) +{ + int rc = -1; + struct cam_sfe_acquire_args sfe_acquire; + struct cam_isp_hw_mgr_res *csid_res; + long sfe_required_res = 0; + uint32_t sfe_acquired_res = 0; + uint32_t sfe_res_id = 0; + struct cam_ife_hw_mgr *hw_mgr; + struct cam_isp_hw_mgr_res *csid_res_map[CAM_ISP_HW_SFE_IN_MAX]; + + hw_mgr = ife_ctx->hw_mgr; + list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) { + if (csid_res->num_children) + continue; + + if (csid_res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + continue; + + switch (csid_res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + sfe_required_res |= BIT(CAM_ISP_HW_SFE_IN_PIX); + sfe_res_id = CAM_ISP_HW_SFE_IN_PIX; + csid_res_map[sfe_res_id] = csid_res; + continue; + case CAM_IFE_PIX_PATH_RES_RDI_0: + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI0; + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI1; + break; + case CAM_IFE_PIX_PATH_RES_RDI_2: + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI2; + break; + case CAM_IFE_PIX_PATH_RES_RDI_3: + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI3; + break; + case CAM_IFE_PIX_PATH_RES_RDI_4: + sfe_res_id = CAM_ISP_HW_SFE_IN_RDI4; + break; + } + + /* + * We acquire RDI only if RDI out is available, this will take + * care of cases where CSID RDI goes to SFE PIX + */ + if (cam_ife_hw_mgr_is_csid_rdi_sfe_rdi_out(in_port, csid_res->res_id)) { + sfe_required_res |= BIT(sfe_res_id); + csid_res_map[sfe_res_id] = csid_res; + } + + /* + * We need PIX resource without CSID IPP for following cases: + * 1. No read count + * 2. Dynamic switch from SHDR-->HDR and HDR-->SHDR is possible + */ + if ((!(sfe_required_res & BIT(CAM_ISP_HW_SFE_IN_PIX))) && + (!in_port->ife_rd_count || in_port->dynamic_hdr_switch_en) && + (BIT(csid_res->res_id) == hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res)) { + sfe_required_res |= BIT(CAM_ISP_HW_SFE_IN_PIX); + csid_res_map[CAM_ISP_HW_SFE_IN_PIX] = csid_res; + } + } + + CAM_DBG(CAM_ISP, "ctx: %u Required SFE in resources: 0x%x", ife_ctx->ctx_index, + sfe_required_res); + + while (sfe_required_res) { + sfe_res_id = ffs(sfe_required_res) - 1; + clear_bit(sfe_res_id, &sfe_required_res); + + sfe_acquire.sfe_in.res_id = sfe_res_id; + rc = cam_ife_hw_mgr_acquire_res_sfe_src_util(ife_ctx, in_port, + csid_res_map[sfe_res_id], &sfe_acquire, + acquired_hw_id, acquired_hw_path); + if (rc) + goto err; + sfe_acquired_res |= sfe_res_id; + } + + CAM_DBG(CAM_ISP, "ctx: %u Acquired SFE in resources: 0x%x", ife_ctx->ctx_index, + sfe_acquired_res); + return 0; + +err: + CAM_ERR(CAM_ISP, "Acquire SFE failed ctx: %u acquired_res 0x%x sfe_res %u ctx_idx: %u", + ife_ctx->ctx_index, sfe_acquired_res, sfe_res_id, ife_ctx->ctx_index); + + return rc; +} + +static bool cam_ife_mgr_check_can_use_lite( + struct cam_csid_hw_reserve_resource_args *csid_acquire, + struct cam_ife_hw_mgr_ctx *ife_ctx) +{ + bool can_use_lite = false; + + if (ife_ctx->flags.is_rdi_only_context || + csid_acquire->in_port->can_use_lite) { + can_use_lite = true; + goto end; + } + + switch (csid_acquire->res_id) { + + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + can_use_lite = true; + break; + default: + can_use_lite = false; + goto end; + } + + if (ife_ctx->flags.is_fe_enabled || ife_ctx->flags.dsp_enabled) + can_use_lite = false; + + CAM_DBG(CAM_ISP, + "in_port lite hint %d, rdi_only: %d can_use_lite: %d res_id: %u ctx_idx: %u", + csid_acquire->in_port->can_use_lite, + ife_ctx->flags.is_rdi_only_context, + can_use_lite, csid_acquire->res_id, ife_ctx->ctx_index); + +end: + return can_use_lite; +} + +static int cam_ife_hw_mgr_acquire_res_ife_bus_rd( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int i, rc = -EINVAL; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_isp_hw_mgr_res *ife_bus_rd_res; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr *ife_hw_mgr; + + ife_hw_mgr = ife_ctx->hw_mgr; + + if (!ife_hw_mgr->ife_devices[ife_ctx->left_hw_idx]) { + CAM_ERR(CAM_ISP, "ife_devices[%d] is NULL", ife_ctx->left_hw_idx); + return -ENODEV; + } + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &ife_bus_rd_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", ife_ctx->ctx_index); + return -ENODEV; + } + + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_BUS_RD; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + vfe_acquire.priv = ife_ctx; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + + vfe_acquire.vfe_bus_rd.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.vfe_bus_rd.is_dual = (uint32_t)ife_ctx->flags.is_dual; + vfe_acquire.vfe_bus_rd.is_offline = ife_ctx->flags.is_offline; + vfe_acquire.vfe_bus_rd.res_id = CAM_ISP_HW_VFE_IN_RD; + vfe_acquire.vfe_bus_rd.unpacker_fmt = in_port->fe_unpacker_fmt; + + if (ife_ctx->left_hw_idx >= CAM_IFE_HW_NUM_MAX) { + if (in_port->ife_rd_count) { + for (i = CAM_IFE_HW_NUM_MAX - 1; i >= 0; i--) { + if (!ife_hw_mgr->ife_devices[i]) + continue; + + hw_intf = ife_hw_mgr->ife_devices[i]->hw_intf; + if (ife_hw_mgr->ife_dev_caps[hw_intf->hw_idx].is_lite) + continue; + + vfe_acquire.vfe_bus_rd.rsrc_node = NULL; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, sizeof(struct cam_vfe_acquire_args)); + if (!rc && vfe_acquire.vfe_bus_rd.rsrc_node) + goto acquire_successful; + else + CAM_ERR(CAM_ISP, + "IFE[%d] acquire failed (rc=%d), ctx_idx: %u", + i, rc, ife_ctx->ctx_index); + } + CAM_ERR(CAM_ISP, "Can't find free IFE for ctx_idx: %u", + ife_ctx->ctx_index); + goto put_res; + } else { + rc = -ENODEV; + CAM_ERR(CAM_ISP, + "The acquired hw idx %d is invalid and it isn't FE usecase", + ife_ctx->left_hw_idx); + goto put_res; + } + } + + if (!ife_hw_mgr->ife_devices[ife_ctx->left_hw_idx]) { + CAM_ERR(CAM_ISP, "IFE device %d is NULL.", + ife_ctx->left_hw_idx); + goto put_res; + } + + hw_intf = ife_hw_mgr->ife_devices[ife_ctx->left_hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, sizeof(struct cam_vfe_acquire_args)); + + if (rc || !vfe_acquire.vfe_bus_rd.rsrc_node) { + rc = -ENODEV; + CAM_ERR(CAM_ISP, "Failed to acquire VFE:%d BUS RD for LEFT, ctx_idx: %u", + ife_ctx->left_hw_idx, ife_ctx->ctx_index); + goto put_res; + } + +acquire_successful: + ife_bus_rd_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] = + vfe_acquire.vfe_bus_rd.rsrc_node; + + CAM_DBG(CAM_ISP, "Acquired VFE:%d BUS RD for LEFT, ctx_idx: %u", + ife_ctx->left_hw_idx, ife_ctx->ctx_index); + + ife_ctx->left_hw_idx = hw_intf->hw_idx; + ife_bus_rd_res->res_type = vfe_acquire.rsrc_type; + ife_bus_rd_res->res_id = vfe_acquire.vfe_in.res_id; + ife_bus_rd_res->is_dual_isp = (uint32_t)ife_ctx->flags.is_dual; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_in_rd, &ife_bus_rd_res); + + if (ife_ctx->flags.is_dual) { + if (!ife_hw_mgr->ife_devices[ife_ctx->right_hw_idx]) { + CAM_ERR(CAM_ISP, "ife_devices[%d] is NULL", ife_ctx->right_hw_idx); + goto put_res; + } + + hw_intf = ife_hw_mgr->ife_devices[ife_ctx->right_hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + + if (!rc && vfe_acquire.vfe_bus_rd.rsrc_node) { + ife_bus_rd_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT] = + vfe_acquire.vfe_bus_rd.rsrc_node; + ife_ctx->right_hw_idx = hw_intf->hw_idx; + + CAM_DBG(CAM_ISP, + "Acquired VFE:%d BUS RD for RIGHT, ctx: %u", + ife_ctx->right_hw_idx, ife_ctx->ctx_index); + } + } + + return 0; + +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &ife_bus_rd_res); + return rc; +} + +static int cam_ife_hw_mgr_acquire_sfe_bus_rd( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1, i; + struct cam_sfe_acquire_args sfe_acquire; + struct cam_ife_hw_mgr *ife_hw_mgr; + struct cam_hw_intf *hw_intf = NULL; + struct cam_isp_hw_mgr_res *sfe_rd_res; + + ife_hw_mgr = ife_ctx->hw_mgr; + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &sfe_rd_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", ife_ctx->ctx_index); + return rc; + } + + if (!cam_ife_hw_mgr_is_sfe_rd_res(in_port->sfe_in_path_type)) { + CAM_ERR(CAM_ISP, "Invalid sfe rd type: 0x%x ctx_idx: %u", + in_port->sfe_in_path_type, ife_ctx->ctx_index); + rc = -EINVAL; + goto put_res; + } + + if (in_port->usage_type) + CAM_WARN(CAM_ISP, "DUAL mode not supported for BUS RD [RDIs], ctx_idx: %u", + ife_ctx->ctx_index); + + sfe_acquire.rsrc_type = CAM_ISP_RESOURCE_SFE_RD; + sfe_acquire.tasklet = ife_ctx->common.tasklet_info; + sfe_acquire.priv = ife_ctx; + sfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + sfe_acquire.sfe_rd.cdm_ops = ife_ctx->cdm_ops; + sfe_acquire.sfe_rd.is_offline = ife_ctx->flags.is_offline; + sfe_acquire.sfe_rd.unpacker_fmt = in_port->fe_unpacker_fmt; + sfe_acquire.sfe_rd.res_id = in_port->sfe_in_path_type; + sfe_acquire.sfe_rd.secure_mode = in_port->secure_mode; + + if (ife_ctx->left_hw_idx >= CAM_SFE_HW_NUM_MAX) { + if (in_port->ife_rd_count) { + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if (!ife_hw_mgr->sfe_devices[i]) + continue; + + hw_intf = ife_hw_mgr->sfe_devices[i]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &sfe_acquire, sizeof(struct cam_sfe_acquire_args)); + if (rc) + continue; + else + break; + } + } else { + rc = -ENODEV; + CAM_ERR(CAM_ISP, + "The acquired hw idx %d is invalid and it isn't FE usecase", + ife_ctx->left_hw_idx); + goto put_res; + } + } else { + if (!ife_hw_mgr->sfe_devices[ife_ctx->left_hw_idx]) { + rc = -ENODEV; + CAM_ERR(CAM_ISP, "No valid sfe devices for idx:%d", + ife_ctx->left_hw_idx); + goto put_res; + } + + hw_intf = ife_hw_mgr->sfe_devices[ife_ctx->left_hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &sfe_acquire, sizeof(struct cam_sfe_acquire_args)); + } + + if (!sfe_acquire.sfe_rd.rsrc_node || rc || !hw_intf) { + rc = -ENODEV; + CAM_ERR(CAM_ISP, + "Failed to acquire SFE RD: 0x%x, ctx_idx: %u", + in_port->sfe_in_path_type, ife_ctx->ctx_index); + goto put_res; + } + + sfe_rd_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] = sfe_acquire.sfe_rd.rsrc_node; + ife_ctx->left_hw_idx = hw_intf->hw_idx; + + CAM_DBG(CAM_ISP, + "SFE RD left [%u] acquired success for path: %u is_dual: %d res: %s res_id: 0x%x ctx_idx: %u", + sfe_rd_res->hw_res[0]->hw_intf->hw_idx, in_port->sfe_in_path_type, + in_port->usage_type, sfe_rd_res->hw_res[0]->res_name, + sfe_rd_res->hw_res[0]->res_id, ife_ctx->ctx_index); + + sfe_rd_res->res_id = in_port->sfe_in_path_type; + sfe_rd_res->res_type = sfe_acquire.rsrc_type; + sfe_rd_res->is_dual_isp = in_port->usage_type; + sfe_rd_res->is_secure = in_port->secure_mode; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_in_rd, &sfe_rd_res); + + return 0; +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &sfe_rd_res); + return rc; +} + +static int cam_ife_hw_mgr_acquire_ife_src_for_sfe( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + bool acquire_lcr, uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -1; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_isp_hw_mgr_res *ife_src_res; + struct cam_hw_intf *hw_intf = NULL; + struct cam_ife_hw_mgr *ife_hw_mgr; + + if ((ife_ctx->left_hw_idx >= CAM_SFE_HW_NUM_MAX) || + (ife_ctx->flags.is_dual && (ife_ctx->right_hw_idx >= CAM_SFE_HW_NUM_MAX))) { + CAM_ERR(CAM_ISP, "the hw index:[%d - %d] is wrong", + ife_ctx->left_hw_idx, ife_ctx->right_hw_idx); + goto end; + } + + ife_hw_mgr = ife_ctx->hw_mgr; + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, + &ife_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", + ife_ctx->ctx_index); + goto end; + } + + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.vfe_in.in_port = in_port; + vfe_acquire.vfe_in.is_fe_enabled = ife_ctx->flags.is_fe_enabled; + vfe_acquire.vfe_in.is_offline = ife_ctx->flags.is_offline; + vfe_acquire.priv = ife_ctx; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + vfe_acquire.vfe_in.handle_camif_irq = true; + if (ife_hw_mgr->csid_camif_irq_support && ife_ctx->ctx_type != + CAM_IFE_CTX_TYPE_SFE) + vfe_acquire.vfe_in.handle_camif_irq = false; + + if (!acquire_lcr) + vfe_acquire.vfe_in.res_id = + CAM_ISP_HW_VFE_IN_CAMIF; + else + vfe_acquire.vfe_in.res_id = + CAM_ISP_HW_VFE_IN_LCR; + + if (ife_ctx->flags.is_dual) { + vfe_acquire.vfe_in.sync_mode = + CAM_ISP_HW_SYNC_MASTER; + vfe_acquire.vfe_in.dual_hw_idx = + ife_ctx->right_hw_idx; + } else + vfe_acquire.vfe_in.sync_mode = + CAM_ISP_HW_SYNC_NONE; + + vfe_acquire.vfe_in.is_dual = + (uint32_t)ife_ctx->flags.is_dual; + + ife_src_res->res_type = vfe_acquire.rsrc_type; + ife_src_res->res_id = vfe_acquire.vfe_in.res_id; + ife_src_res->is_dual_isp = (uint32_t)ife_ctx->flags.is_dual; + + hw_intf = ife_hw_mgr->ife_devices[ife_ctx->left_hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + + if (rc || !vfe_acquire.vfe_in.rsrc_node) { + CAM_ERR(CAM_ISP, "Unable to acquire LEFT IFE res: %d ctx_idx: %u", + vfe_acquire.vfe_in.res_id, ife_ctx->ctx_index); + rc = -EINVAL; + goto put_res; + } + + ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] = vfe_acquire.vfe_in.rsrc_node; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src, + &ife_src_res); + + *acquired_hw_id |= cam_convert_hw_idx_to_ife_hw_num(hw_intf->hw_idx); + acquired_hw_path[0] |= cam_convert_res_id_to_hw_path( + ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]->res_id, -1); + + CAM_DBG(CAM_ISP, + "acquire success LEFT IFE: %d res type: 0x%x res: %s res id: 0x%x ctx_idx: %u", + hw_intf->hw_idx, + ife_src_res->hw_res[0]->res_type, + ife_src_res->hw_res[0]->res_name, + ife_src_res->hw_res[0]->res_id, ife_ctx->ctx_index); + + if (ife_ctx->flags.is_dual) { + vfe_acquire.vfe_in.rsrc_node = NULL; + vfe_acquire.vfe_in.sync_mode = + CAM_ISP_HW_SYNC_SLAVE; + vfe_acquire.vfe_in.dual_hw_idx = + ife_ctx->left_hw_idx; + + hw_intf = ife_hw_mgr->ife_devices[ife_ctx->right_hw_idx]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + + if (rc || !vfe_acquire.vfe_in.rsrc_node) { + CAM_ERR(CAM_ISP, "Unable to acquire right IFE res: %u, ctx_idx: %u", + vfe_acquire.vfe_in.res_id, ife_ctx->ctx_index); + rc = -EINVAL; + goto put_res; + } + + ife_src_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT] = vfe_acquire.vfe_in.rsrc_node; + *acquired_hw_id |= + cam_convert_hw_idx_to_ife_hw_num(hw_intf->hw_idx); + acquired_hw_path[1] |= cam_convert_res_id_to_hw_path( + ife_src_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]->res_id, -1); + CAM_DBG(CAM_ISP, + "acquire success RIGHT IFE: %u res type: 0x%x res: %s res id: 0x%x ctx_idx: %u", + hw_intf->hw_idx, + ife_src_res->hw_res[1]->res_type, + ife_src_res->hw_res[1]->res_name, + ife_src_res->hw_res[1]->res_id, ife_ctx->ctx_index); + } + + return 0; + +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &ife_src_res); +end: + /* release resource at the entry function */ + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_ife_src( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + bool acquire_lcr, bool acquire_ppp, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -1; + int i; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_isp_hw_mgr_res *ife_src_res; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr *ife_hw_mgr; + bool is_ife_src_res_in_list; + + ife_hw_mgr = ife_ctx->hw_mgr; + + list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) { + is_ife_src_res_in_list = false; + if (csid_res->num_children && !acquire_lcr) + continue; + + if (acquire_lcr && csid_res->res_id != CAM_IFE_PIX_PATH_RES_IPP) + continue; + + if (csid_res->res_id == CAM_IFE_PIX_PATH_RES_PPP && !acquire_ppp) + continue; + + /* + * For Multi-context cases, acquire for different hw contexts of the same src + * resource might come as separate input data structure, which implies that this + * function might be called more than once for the same resource (with different + * hw context id). We find the existing resource instead of adding duplicate + * entries of resource for each hw context. + */ + if ((in_port->major_ver == 3) && ((csid_res->res_id == CAM_IFE_PIX_PATH_RES_IPP) || + (csid_res->res_id == CAM_IFE_PIX_PATH_RES_IPP_1) || + (csid_res->res_id == CAM_IFE_PIX_PATH_RES_IPP_2))) { + list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) { + if (ife_src_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] && + ife_src_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) { + is_ife_src_res_in_list = true; + break; + } + } + } + + if (!is_ife_src_res_in_list) { + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &ife_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src, &ife_src_res); + } + + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.vfe_in.in_port = in_port; + vfe_acquire.vfe_in.is_fe_enabled = ife_ctx->flags.is_fe_enabled; + vfe_acquire.vfe_in.is_offline = ife_ctx->flags.is_offline; + vfe_acquire.priv = ife_ctx; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + vfe_acquire.vfe_in.handle_camif_irq = true; + vfe_acquire.vfe_in.hw_ctxt_mask = 0; + + if (ife_hw_mgr->csid_camif_irq_support && ife_ctx->ctx_type != + CAM_IFE_CTX_TYPE_SFE) + vfe_acquire.vfe_in.handle_camif_irq = false; + + switch (csid_res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + if (!acquire_lcr) { + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF; + vfe_acquire.vfe_in.hw_ctxt_mask = in_port->ipp_dst_hw_ctxt_mask; + } else { + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_LCR; + } + + if (csid_res->is_dual_isp) + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_MASTER; + else + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + + vfe_acquire.vfe_in.is_dual = csid_res->is_dual_isp; + + break; + case CAM_IFE_PIX_PATH_RES_PPP: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_PDLIB; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI0; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI1; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_IFE_PIX_PATH_RES_RDI_2: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI2; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_IFE_PIX_PATH_RES_RDI_3: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI3; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_IFE_PIX_PATH_RES_RDI_4: + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI4; + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + default: + CAM_ERR(CAM_ISP, "Wrong IFE CSID Path Resource ID : %d, ctx_idx: %u", + csid_res->res_id, ife_ctx->ctx_index); + goto err; + } + + ife_src_res->res_type = vfe_acquire.rsrc_type; + ife_src_res->res_id = vfe_acquire.vfe_in.res_id; + ife_src_res->is_dual_isp = csid_res->is_dual_isp; + ife_src_res->use_wm_pack = csid_res->use_wm_pack; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!csid_res->hw_res[i]) + continue; + + hw_intf = ife_hw_mgr->ife_devices[ + csid_res->hw_res[i]->hw_intf->hw_idx]->hw_intf; + + if (i == CAM_ISP_HW_SPLIT_LEFT && + ife_src_res->is_dual_isp) { + vfe_acquire.vfe_in.dual_hw_idx = + ife_ctx->right_hw_idx; + } + + /* fill in more acquire information as needed */ + /* slave Camif resource, */ + if (i == CAM_ISP_HW_SPLIT_RIGHT && ife_src_res->is_dual_isp) { + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_SLAVE; + vfe_acquire.vfe_in.dual_hw_idx = ife_ctx->left_hw_idx; + } + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire IFE HW res %d, ctx_idx: %u", + csid_res->res_id, ife_ctx->ctx_index); + goto err; + } + + ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node; + ife_src_res->hw_ctxt_id_mask |= vfe_acquire.vfe_in.hw_ctxt_mask; + + *acquired_hw_id |= cam_convert_hw_idx_to_ife_hw_num(hw_intf->hw_idx); + + if (i >= CAM_MAX_HW_SPLIT) { + CAM_ERR(CAM_ISP, "HW split is invalid: %d, ctx_idx: %u", + i, ife_ctx->ctx_index); + return -EINVAL; + } + + acquired_hw_path[i] |= cam_convert_res_id_to_hw_path( + ife_src_res->hw_res[i]->res_id, csid_res->res_id); + + CAM_DBG(CAM_ISP, + "acquire success IFE:%d ctx_idx: %u res type :0x%x res: %s res id:0x%x", + hw_intf->hw_idx, ife_ctx->ctx_index, + ife_src_res->hw_res[i]->res_type, + ife_src_res->hw_res[i]->res_name, + ife_src_res->hw_res[i]->res_id); + + } + csid_res->num_children++; + } + + return 0; +err: + /* release resource at the entry function */ + return rc; +} + +static int cam_ife_hw_mgr_acquire_csid_hw( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_csid_hw_reserve_resource_args *csid_acquire, + struct cam_isp_in_port_generic_info *in_port) +{ + int i; + int rc = -EINVAL; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr *ife_hw_mgr; + bool is_start_lower_idx = false; + struct cam_isp_hw_mgr_res *csid_res_iterator; + struct cam_isp_out_port_generic_info *out_port = NULL; + struct cam_ife_csid_hw_caps *csid_caps = NULL; + bool can_use_lite = false; + int busy_count = 0, compat_count = 0; + bool is_secure_mode = false; + + if (!ife_ctx || !csid_acquire) { + CAM_ERR(CAM_ISP, + "Invalid args ife hw ctx %pK csid_acquire %pK", + ife_ctx, csid_acquire); + return -EINVAL; + } + + ife_hw_mgr = ife_ctx->hw_mgr; + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + is_start_lower_idx = true; + + /* Update the secure mode of CSID based on all the out ports */ + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &(in_port->data[i]); + if (out_port->secure_mode) { + is_secure_mode = true; + break; + } + } + + ife_ctx->flags.is_dual = (bool)in_port->usage_type; + + if (ife_ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) + can_use_lite = cam_ife_mgr_check_can_use_lite( + csid_acquire, ife_ctx); + + if (ife_hw_mgr->csid_camif_irq_support && ife_ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) + csid_acquire->handle_camif_irq = true; + + /* Try acquiring CSID from previously acquired HW */ + list_for_each_entry(csid_res_iterator, &ife_ctx->res_list_ife_csid, + list) { + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!csid_res_iterator->hw_res[i]) + continue; + if (in_port->num_out_res && + ((csid_res_iterator->is_secure == 1 && + is_secure_mode == false) || + (csid_res_iterator->is_secure == 0 && + is_secure_mode == true))) + continue; + if (!in_port->num_out_res && + csid_res_iterator->is_secure == 1) + continue; + + hw_intf = csid_res_iterator->hw_res[i]->hw_intf; + csid_caps = + &ife_hw_mgr->csid_hw_caps[hw_intf->hw_idx]; + + if (csid_caps->is_lite && !can_use_lite) { + CAM_DBG(CAM_ISP, "CSID[%u] cannot use lite, ctx_idx: %u", + hw_intf->hw_idx, ife_ctx->ctx_index); + continue; + } + + if (csid_caps->is_ife_sfe_mapped && + (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) && + !ife_hw_mgr->sfe_devices[hw_intf->hw_idx]) { + CAM_DBG(CAM_ISP, "No sfe_device with idx: %d, ctx_idx: %u", + hw_intf->hw_idx, ife_ctx->ctx_index); + continue; + } + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + csid_acquire, sizeof(*csid_acquire)); + if (rc == -EBUSY) { + CAM_DBG(CAM_ISP, + "Resource not available from hw_idx %d, ctx_idx: %u", + hw_intf->hw_idx, ife_ctx->ctx_index); + continue; + } else if (rc) { + CAM_ERR(CAM_ISP, + "Failed to acquire from existing hw idx: %u ctx_idx: %u rc: %d", + hw_intf->hw_idx, ife_ctx->ctx_index, rc); + return rc; + } + + CAM_DBG(CAM_ISP, + "acquired from old csid(%s)=%d successfully, is_secure: %s, ctx_idx: %u", + (i == 0) ? "left" : "right", + hw_intf->hw_idx, + CAM_BOOL_TO_YESNO(csid_res_iterator->is_secure), + ife_ctx->ctx_index); + goto acquire_successful; + } + } + + for (i = (is_start_lower_idx) ? 0 : (CAM_IFE_CSID_HW_NUM_MAX - 1); + (is_start_lower_idx) ? (i < CAM_IFE_CSID_HW_NUM_MAX) : (i >= 0); + (is_start_lower_idx) ? i++ : i--) { + if ((i < 0) || (i >= CAM_IFE_CSID_HW_NUM_MAX)) { + CAM_ERR(CAM_ISP, "Invalid index for csid device i: %d", i); + return -EINVAL; + } + + if (!ife_hw_mgr->csid_devices[i]) + continue; + + if (ife_hw_mgr->debug_cfg.force_acq_csid < CAM_IFE_CSID_HW_NUM_MAX && + (i != ife_hw_mgr->debug_cfg.force_acq_csid)) + continue; + + hw_intf = ife_hw_mgr->csid_devices[i]; + + if (ife_hw_mgr->csid_hw_caps[hw_intf->hw_idx].is_lite && + !can_use_lite) { + CAM_DBG(CAM_ISP, "CSID[%u] cannot use lite, ctx_idx: %u", + hw_intf->hw_idx, ife_ctx->ctx_index); + continue; + } + + compat_count++; + + if (ife_hw_mgr->csid_hw_caps[hw_intf->hw_idx].is_ife_sfe_mapped && + (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) && + !ife_hw_mgr->sfe_devices[hw_intf->hw_idx]) { + CAM_DBG(CAM_ISP, "No sfe_device with idx: %d, ctx_idx: %u", + hw_intf->hw_idx, ife_ctx->ctx_index); + continue; + } + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, csid_acquire, + sizeof(struct cam_csid_hw_reserve_resource_args)); + if (!rc) + return rc; + + if (rc == -EBUSY) { + busy_count++; + } else { + CAM_ERR(CAM_ISP, "CSID[%d] acquire failed (rc=%d), ctx_idx: %u", + i, rc, ife_ctx->ctx_index); + return rc; + } + } + + if (compat_count == busy_count) + CAM_ERR(CAM_ISP, "all compatible CSIDs are busy, ctx_idx: %u", ife_ctx->ctx_index); + +acquire_successful: + return rc; +} + +static bool cam_ife_hw_mgr_is_need_csid_ipp( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + struct cam_ife_hw_mgr *hw_mgr; + bool need = true; + + hw_mgr = ife_ctx->hw_mgr; + + if (!(in_port->ipp_count || in_port->lcr_count)) + need = false; + else if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE && + ((hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res && !in_port->usage_type) || + in_port->ife_rd_count)) + need = false; + + CAM_DBG(CAM_ISP, "Need CSID PIX %u, Ctx_type: %u, ctx_idx: %u", + need, ife_ctx->ctx_type, ife_ctx->ctx_index); + return need; +} + +static int cam_ife_mgr_util_process_csid_path_res( + uint32_t path_id, enum cam_ife_pix_path_res_id *path_res_id) +{ + switch (path_id) { + case CAM_ISP_PXL_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_IPP; + break; + case CAM_ISP_PXL1_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_IPP_1; + break; + case CAM_ISP_PXL2_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_IPP_2; + break; + case CAM_ISP_RDI0_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_RDI_0; + break; + case CAM_ISP_RDI1_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_RDI_1; + break; + case CAM_ISP_RDI2_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_RDI_2; + break; + case CAM_ISP_RDI3_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_RDI_3; + break; + case CAM_ISP_RDI4_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_RDI_4; + break; + case CAM_ISP_PPP_PATH: + *path_res_id = CAM_IFE_PIX_PATH_RES_PPP; + break; + default: + CAM_ERR(CAM_ISP, "Invalid csid path ID: 0x%x", path_id); + return -EINVAL; + } + + return 0; +} + +static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + bool is_ipp, + bool crop_enable) +{ + int rc = 0, i; + struct cam_isp_out_port_generic_info *out_port = NULL; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_hw_intf *hw_intf; + struct cam_csid_hw_reserve_resource_args csid_acquire = {0}; + enum cam_ife_pix_path_res_id path_res_id; + struct cam_ife_csid_dual_sync_args dual_sync_args = {0}; + + if (is_ipp) { + if (in_port->major_ver == 3) { + rc = cam_ife_mgr_util_process_csid_path_res(in_port->path_id, &path_res_id); + if (rc) { + CAM_ERR(CAM_ISP, "Error in processing csid path resource rc:%d", + rc); + goto end; + } + } else { + path_res_id = CAM_IFE_PIX_PATH_RES_IPP; + } + } else { + path_res_id = CAM_IFE_PIX_PATH_RES_PPP; + } + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", ife_ctx->ctx_index); + goto end; + } + + csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH; + + csid_res->res_id = path_res_id; + ife_ctx->flags.is_dual = (bool)in_port->usage_type; + + if (in_port->usage_type && is_ipp) + csid_res->is_dual_isp = 1; + else { + csid_res->is_dual_isp = 0; + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE; + } + + /* Update the secure mode of CSID based on all the out ports */ + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &(in_port->data[i]); + if (out_port->secure_mode) { + csid_res->is_secure = out_port->secure_mode; + break; + } + } + + CAM_DBG(CAM_ISP, "CSID Acquire: Enter, ctx_idx: %u", ife_ctx->ctx_index); + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res); + + /* for dual ife, acquire the right ife first */ + for (i = csid_res->is_dual_isp; i >= 0 ; i--) { + CAM_DBG(CAM_ISP, "ctx_idx: %u i %d is_dual %d", + ife_ctx->ctx_index, i, csid_res->is_dual_isp); + + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.res_id = path_res_id; + csid_acquire.in_port = in_port; + csid_acquire.out_port = in_port->data; + csid_acquire.node_res = NULL; + csid_acquire.event_cb = cam_ife_hw_mgr_event_handler; + csid_acquire.cb_priv = ife_ctx; + csid_acquire.crop_enable = crop_enable; + csid_acquire.drop_enable = false; + + if (csid_res->is_dual_isp) + csid_acquire.sync_mode = i == CAM_ISP_HW_SPLIT_LEFT ? + CAM_ISP_HW_SYNC_MASTER : CAM_ISP_HW_SYNC_SLAVE; + + csid_acquire.tasklet = ife_ctx->common.tasklet_info; + csid_acquire.cdm_ops = ife_ctx->cdm_ops; + + rc = cam_ife_hw_mgr_acquire_csid_hw(ife_ctx, + &csid_acquire, + in_port); + + if (rc) { + CAM_ERR(CAM_ISP, + "Cannot acquire ife csid pxl path rsrc %s, ctx_idx: %u", + (is_ipp) ? "IPP" : "PPP", ife_ctx->ctx_index); + goto end; + } + + csid_res->hw_res[i] = csid_acquire.node_res; + hw_intf = csid_res->hw_res[i]->hw_intf; + + if (i == CAM_ISP_HW_SPLIT_LEFT) { + ife_ctx->left_hw_idx = hw_intf->hw_idx; + ife_ctx->buf_done_controller = + csid_acquire.buf_done_controller; + ife_ctx->mc_comp_buf_done_controller = + csid_acquire.mc_comp_buf_done_controller; + } else { + ife_ctx->right_hw_idx = hw_intf->hw_idx; + } + + ife_ctx->flags.need_csid_top_cfg = csid_acquire.need_top_cfg; + ife_ctx->flags.dynamic_drv_supported = csid_acquire.dynamic_drv_supported; + + CAM_DBG(CAM_ISP, + "acquired csid(%s)=%d ctx_idx: %u pxl path rsrc %s successfully, is_secure: %s", + (i == 0) ? "left" : "right", hw_intf->hw_idx, ife_ctx->ctx_index, + (is_ipp) ? "IPP" : "PPP", + CAM_BOOL_TO_YESNO(csid_res->is_secure)); + } + + if (!is_ipp) + goto end; + + if (csid_res->is_dual_isp && ife_ctx->flags.need_csid_top_cfg) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + + if (!csid_res->hw_res[i]) + continue; + + hw_intf = csid_res->hw_res[i]->hw_intf; + + if (i == CAM_ISP_HW_SPLIT_LEFT) { + dual_sync_args.sync_mode = + CAM_ISP_HW_SYNC_MASTER; + dual_sync_args.dual_core_id = + ife_ctx->right_hw_idx; + + } else if (i == CAM_ISP_HW_SPLIT_RIGHT) { + dual_sync_args.sync_mode = + CAM_ISP_HW_SYNC_SLAVE; + dual_sync_args.dual_core_id = + ife_ctx->left_hw_idx; + } + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_IFE_CSID_SET_DUAL_SYNC_CONFIG, + &dual_sync_args, + sizeof( + struct cam_ife_csid_dual_sync_args)); + } + } + +end: + return rc; +} + +static enum cam_ife_pix_path_res_id + cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + uint32_t out_port_type) +{ + enum cam_ife_pix_path_res_id path_id; + + switch (out_port_type) { + case CAM_ISP_IFE_OUT_RES_RDI_0: + case CAM_ISP_SFE_OUT_RES_RDI_0: + path_id = CAM_IFE_PIX_PATH_RES_RDI_0; + break; + case CAM_ISP_IFE_OUT_RES_RDI_1: + case CAM_ISP_SFE_OUT_RES_RDI_1: + path_id = CAM_IFE_PIX_PATH_RES_RDI_1; + break; + case CAM_ISP_IFE_OUT_RES_RDI_2: + case CAM_ISP_SFE_OUT_RES_RDI_2: + path_id = CAM_IFE_PIX_PATH_RES_RDI_2; + break; + case CAM_ISP_IFE_OUT_RES_RDI_3: + case CAM_ISP_SFE_OUT_RES_RDI_3: + path_id = CAM_IFE_PIX_PATH_RES_RDI_3; + break; + case CAM_ISP_SFE_OUT_RES_RDI_4: + case CAM_ISP_IFE_OUT_RES_RDI_4: + path_id = CAM_IFE_PIX_PATH_RES_RDI_4; + break; + default: + path_id = CAM_IFE_PIX_PATH_RES_MAX; + CAM_DBG(CAM_ISP, "maximum rdi output type exceeded"); + break; + } + + CAM_DBG(CAM_ISP, "out_port: 0x%x path_id: 0x%x", + out_port_type, path_id); + + return path_id; +} + +static int cam_ife_hw_mgr_acquire_csid_rdi_util( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t path_res_id, + struct cam_isp_out_port_generic_info *out_port) +{ + struct cam_isp_hw_mgr_res *csid_res; + struct cam_csid_hw_reserve_resource_args csid_acquire; + int rc = 0; + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, + &csid_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", ife_ctx->ctx_index); + goto end; + } + + memset(&csid_acquire, 0, sizeof(csid_acquire)); + csid_acquire.res_id = path_res_id; + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.in_port = in_port; + csid_acquire.out_port = out_port; + csid_acquire.node_res = NULL; + csid_acquire.event_cb = cam_ife_hw_mgr_event_handler; + csid_acquire.tasklet = ife_ctx->common.tasklet_info; + csid_acquire.cb_priv = ife_ctx; + csid_acquire.cdm_ops = ife_ctx->cdm_ops; + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + csid_acquire.sfe_en = true; + + if (out_port) { + if (cam_ife_hw_mgr_is_shdr_fs_rdi_res(out_port->res_type, + ife_ctx->flags.is_sfe_shdr, ife_ctx->flags.is_sfe_fs)) { + CAM_DBG(CAM_ISP, "setting inline shdr mode for res: 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + csid_acquire.sfe_inline_shdr = true; + /* + * Merged output will only be from the first n RDIs + * starting from RDI0. Any other RDI[1:2] resource + * if only being dumped will be considered as a + * no merge resource + */ + if (ife_ctx->flags.is_aeb_mode) { + if ((out_port->res_type - CAM_ISP_SFE_OUT_RES_RDI_0) >= + ife_ctx->scratch_buf_info.num_fetches) { + csid_acquire.sec_evt_config.en_secondary_evt = true; + csid_acquire.sec_evt_config.evt_type = CAM_IFE_CSID_EVT_SOF; + CAM_DBG(CAM_ISP, + "Secondary SOF evt enabled for path: 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + } + + /* Enable EPOCH/SYNC frame drop for error monitoring on master */ + if (out_port->res_type == CAM_ISP_SFE_OUT_RES_RDI_0) { + csid_acquire.sec_evt_config.en_secondary_evt = true; + csid_acquire.sec_evt_config.evt_type = + CAM_IFE_CSID_EVT_EPOCH | + CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP; + CAM_DBG(CAM_ISP, + "Secondary EPOCH & frame drop evt enabled for path: 0x%x, ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + } + } + } + csid_res->is_secure = out_port->secure_mode; + } + + if (in_port->usage_type) + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_MASTER; + else + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE; + + /* + * Enable RDI pixel drop by default. CSID will enable only for + * ver 480 HW to allow userspace to control pixel drop pattern. + */ + csid_acquire.drop_enable = true; + csid_acquire.crop_enable = true; + rc = cam_ife_hw_mgr_acquire_csid_hw(ife_ctx, + &csid_acquire, in_port); + + if (rc) { + CAM_ERR(CAM_ISP, + "CSID Path reserve failed rc=%d res_id=%d ctx_idx: %u", + rc, path_res_id, ife_ctx->ctx_index); + goto put_res; + } + + if (csid_acquire.node_res == NULL) { + CAM_ERR(CAM_ISP, "Acquire CSID RDI rsrc failed, ctx_idx: %u", ife_ctx->ctx_index); + + goto put_res; + } + + CAM_DBG(CAM_ISP, + "acquired csid[%u] rdi path rsrc %u successfully, is_secure: %s, ctx_idx: %u", + csid_acquire.node_res->hw_intf->hw_idx, + csid_acquire.node_res->res_id, + CAM_BOOL_TO_YESNO(csid_res->is_secure), ife_ctx->ctx_index); + + ife_ctx->flags.need_csid_top_cfg = csid_acquire.need_top_cfg; + ife_ctx->flags.dynamic_drv_supported = csid_acquire.dynamic_drv_supported; + csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_res->res_id = csid_acquire.res_id; + csid_res->is_dual_isp = 0; + csid_res->hw_res[0] = csid_acquire.node_res; + csid_res->hw_res[1] = NULL; + csid_res->use_wm_pack = csid_acquire.use_wm_pack; + + if (ife_ctx->left_hw_idx == CAM_IFE_CSID_HW_NUM_MAX) + ife_ctx->left_hw_idx = csid_res->hw_res[0]->hw_intf->hw_idx; + + if (!ife_ctx->buf_done_controller && csid_acquire.buf_done_controller) + ife_ctx->buf_done_controller = csid_acquire.buf_done_controller; + + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res); + + return 0; +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res); +end: + return rc; + +} + +static int cam_ife_hw_mgr_get_csid_rdi_for_sfe_ipp_input( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_rdi_res) +{ + struct cam_ife_hw_mgr *hw_mgr; + uint32_t res_id = CAM_IFE_PIX_PATH_RES_MAX; + int rc = 0; + + hw_mgr = ife_ctx->hw_mgr; + + if (hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res && !in_port->usage_type) + res_id = ffs(hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res) - 1; + + if ((res_id != CAM_IFE_PIX_PATH_RES_MAX) && (!(BIT(res_id) & (*acquired_rdi_res)))) { + rc = cam_ife_hw_mgr_acquire_csid_rdi_util(ife_ctx, + in_port, res_id, NULL); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire RDI Ctx: %u rdi:%d rc %d", + ife_ctx->ctx_index, res_id, rc); + goto end; + } + *acquired_rdi_res |= BIT(res_id); + } + + CAM_DBG(CAM_ISP, "Ctx: %u rdi_res:%d ctx_type %d rc %d", ife_ctx->ctx_index, + res_id, ife_ctx->ctx_type, rc); +end: + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_rdi_res) +{ + int rc = 0; + int i; + struct cam_isp_out_port_generic_info *out_port = NULL; + enum cam_ife_pix_path_res_id res_id = CAM_IFE_PIX_PATH_RES_MAX; + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + res_id = cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + out_port->res_type); + if (res_id == CAM_IFE_PIX_PATH_RES_MAX) + continue; + rc = cam_ife_hw_mgr_acquire_csid_rdi_util(ife_ctx, + in_port, res_id, out_port); + if (rc) { + CAM_ERR(CAM_ISP, "Ctx: %u Res %d acquire failed rc %d", + ife_ctx->ctx_index, res_id, rc); + break; + } + *acquired_rdi_res |= BIT(res_id); + } + + CAM_DBG(CAM_ISP, "Ctx: %u rdi: %d", ife_ctx->ctx_index, res_id); + return rc; +} + +static int cam_ife_hw_mgr_acquire_res_root( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1; + + if (ife_ctx->res_list_ife_in.res_type == CAM_ISP_RESOURCE_UNINT) { + /* first acquire */ + ife_ctx->res_list_ife_in.res_type = CAM_ISP_RESOURCE_SRC; + ife_ctx->res_list_ife_in.res_id = in_port->res_type; + ife_ctx->res_list_ife_in.is_dual_isp = in_port->usage_type; + } else if ((ife_ctx->res_list_ife_in.res_id != + in_port->res_type) && (!ife_ctx->flags.is_fe_enabled)) { + CAM_ERR(CAM_ISP, "No Free resource for this context, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } else { + /* else do nothing */ + } + return 0; +err: + /* release resource in entry function */ + return rc; +} + +static int cam_ife_mgr_check_and_update_fe_v0( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t acquire_info_size) +{ + int i; + struct cam_isp_in_port_info *in_port = NULL; + uint32_t in_port_length = 0; + uint32_t total_in_port_length = 0; + + if (acquire_hw_info->input_info_offset >= + acquire_hw_info->input_info_size) { + CAM_ERR(CAM_ISP, + "Invalid size offset 0x%x is greater then size 0x%x, ctx_idx: %u", + acquire_hw_info->input_info_offset, + acquire_hw_info->input_info_size, ife_ctx->ctx_index); + return -EINVAL; + } + + in_port = (struct cam_isp_in_port_info *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset); + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + + if (((uint8_t *)in_port + + sizeof(struct cam_isp_in_port_info)) > + ((uint8_t *)acquire_hw_info + + acquire_info_size)) { + CAM_ERR(CAM_ISP, "Invalid size, ctx_idx: %u", ife_ctx->ctx_index); + return -EINVAL; + } + + if ((in_port->num_out_res > max_ife_out_res) || + (in_port->num_out_res <= 0)) { + CAM_ERR(CAM_ISP, "Invalid num output res %u ctx_idx: %u", + in_port->num_out_res, ife_ctx->ctx_index); + return -EINVAL; + } + + in_port_length = sizeof(struct cam_isp_in_port_info) + + (in_port->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info); + total_in_port_length += in_port_length; + + if (total_in_port_length > acquire_hw_info->input_info_size) { + CAM_ERR(CAM_ISP, "buffer size is not enough, ctx_idx: %u", + ife_ctx->ctx_index); + return -EINVAL; + } + CAM_DBG(CAM_ISP, "in_port%d res_type %d ctx_idx: %u", i, + in_port->res_type, ife_ctx->ctx_index); + if (in_port->res_type == CAM_ISP_IFE_IN_RES_RD) { + ife_ctx->flags.is_fe_enabled = true; + break; + } + + in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port + + in_port_length); + } + CAM_DBG(CAM_ISP, "is_fe_enabled %d, ctx_idx: %u", + ife_ctx->flags.is_fe_enabled, ife_ctx->ctx_index); + + return 0; +} + +static bool cam_ife_mgr_check_for_sfe_rd( + uint32_t sfe_in_path_type) +{ + if (((sfe_in_path_type & 0xFFFF) == CAM_ISP_SFE_IN_RD_0) || + ((sfe_in_path_type & 0xFFFF) == CAM_ISP_SFE_IN_RD_1) || + ((sfe_in_path_type & 0xFFFF) == CAM_ISP_SFE_IN_RD_2)) + return true; + else + return false; +} + +static int cam_ife_mgr_check_and_update_fe_v2( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t acquire_info_size) +{ + int i; + bool is_sfe_rd = false, fetch_cfg = false; + struct cam_isp_in_port_info_v2 *in_port = NULL; + uint32_t in_port_length = 0; + uint32_t total_in_port_length = 0; + + if (acquire_hw_info->input_info_offset >= + acquire_hw_info->input_info_size) { + CAM_ERR(CAM_ISP, + "Invalid size offset 0x%x is greater then size 0x%x ctx_idx: %u", + acquire_hw_info->input_info_offset, + acquire_hw_info->input_info_size, ife_ctx->ctx_index); + return -EINVAL; + } + + in_port = (struct cam_isp_in_port_info_v2 *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset); + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + + if (((uint8_t *)in_port + + sizeof(struct cam_isp_in_port_info_v2)) > + ((uint8_t *)acquire_hw_info + + acquire_info_size)) { + CAM_ERR(CAM_ISP, "Invalid size, ctx_idx: %u", ife_ctx->ctx_index); + return -EINVAL; + } + + if ((in_port->num_out_res > (max_ife_out_res + + max_sfe_out_res)) || + (in_port->num_out_res <= 0)) { + CAM_ERR(CAM_ISP, "Invalid num output res %u, ctx_idx: %u", + in_port->num_out_res, ife_ctx->ctx_index); + return -EINVAL; + } + + in_port_length = sizeof(struct cam_isp_in_port_info_v2) + + (in_port->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info_v2); + total_in_port_length += in_port_length; + + if (total_in_port_length > acquire_hw_info->input_info_size) { + CAM_ERR(CAM_ISP, "buffer size is not enough, ctx_idx: %u", + ife_ctx->ctx_index); + return -EINVAL; + } + CAM_DBG(CAM_ISP, "in_port%d res_type 0x%x ctx_idx: %u", i, + in_port->res_type, ife_ctx->ctx_index); + is_sfe_rd = cam_ife_mgr_check_for_sfe_rd(in_port->sfe_in_path_type); + if (is_sfe_rd) + ife_ctx->scratch_buf_info.num_fetches++; + + if ((!fetch_cfg) && ((in_port->res_type == CAM_ISP_IFE_IN_RES_RD) || + (is_sfe_rd))) { + ife_ctx->flags.is_fe_enabled = true; + + /* Check for offline */ + if (in_port->offline_mode) + ife_ctx->flags.is_offline = true; + + /* Check for inline fetch modes */ + if ((is_sfe_rd) && (!ife_ctx->flags.is_offline)) { + /* Check for SFE FS mode - SFE PP bypass */ + if (in_port->feature_flag & CAM_ISP_SFE_FS_MODE_EN) + ife_ctx->flags.is_sfe_fs = true; + else + ife_ctx->flags.is_sfe_shdr = true; + } + + /* If once configured skip these checks thereafter */ + fetch_cfg = true; + } + + in_port = (struct cam_isp_in_port_info_v2 *) + ((uint8_t *)in_port + in_port_length); + } + CAM_DBG(CAM_ISP, + "is_fe_enabled %d is_offline %d sfe_fs %d sfe_shdr: %d num_sfe_fetches: %u ctx_idx: %u", + ife_ctx->flags.is_fe_enabled, + ife_ctx->flags.is_offline, + ife_ctx->flags.is_sfe_fs, + ife_ctx->flags.is_sfe_shdr, + ife_ctx->scratch_buf_info.num_fetches, ife_ctx->ctx_index); + + return 0; +} + +static int cam_ife_mgr_check_and_update_fe( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t acquire_info_size) +{ + uint32_t major_ver = 0, minor_ver = 0; + + if (acquire_hw_info == NULL || ife_ctx == NULL) + return -EINVAL; + + major_ver = (acquire_hw_info->common_info_version >> 12) & 0xF; + minor_ver = (acquire_hw_info->common_info_version) & 0xFFF; + ife_ctx->major_version = major_ver; + + switch (major_ver) { + case 1: + return cam_ife_mgr_check_and_update_fe_v0( + ife_ctx, acquire_hw_info, acquire_info_size); + case 2: + return cam_ife_mgr_check_and_update_fe_v2( + ife_ctx, acquire_hw_info, acquire_info_size); + case 3: + CAM_DBG(CAM_ISP, "FE updates not applicable"); + break; + default: + CAM_ERR(CAM_ISP, "Invalid ver of user common info: ctx_idx %u minor %u major %u", + ife_ctx->ctx_index, minor_ver, major_ver); + return -EINVAL; + } + + return 0; +} + +static int cam_ife_hw_mgr_convert_out_port_to_csid_path(uint32_t port_id) +{ + uint32_t path_id = CAM_IFE_PIX_PATH_RES_MAX; + + if (port_id >= (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res)) + goto end; + + path_id = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(port_id); + if (path_id >= CAM_IFE_PIX_PATH_RES_RDI_0 && path_id <= CAM_IFE_PIX_PATH_RES_RDI_4) + return path_id; + else if (cam_ife_hw_mgr_check_path_port_compat(CAM_ISP_HW_VFE_IN_PDLIB, + port_id)) + path_id = CAM_IFE_PIX_PATH_RES_PPP; + else + path_id = CAM_IFE_PIX_PATH_RES_IPP; + +end: + return path_id; +} + +static int cam_ife_hw_mgr_preprocess_port( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *max_height) +{ + uint32_t i; + struct cam_isp_out_port_generic_info *out_port; + + if (in_port->res_type == CAM_ISP_IFE_IN_RES_RD || + in_port->sfe_in_path_type == CAM_ISP_SFE_IN_RD_0 || + in_port->sfe_in_path_type == CAM_ISP_SFE_IN_RD_1 || + in_port->sfe_in_path_type == CAM_ISP_SFE_IN_RD_2) + in_port->ife_rd_count++; + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type)) { + in_port->rdi_count++; + in_port->lite_path_count++; + if (out_port->height >= *max_height) { + ife_ctx->pri_rdi_out_res = out_port->res_type; + *max_height = out_port->height; + } + } + else if (cam_ife_hw_mgr_is_sfe_rdi_res(out_port->res_type)) + in_port->rdi_count++; + else if (cam_ife_hw_mgr_check_path_port_compat(CAM_ISP_HW_VFE_IN_PDLIB, + out_port->res_type)) + in_port->ppp_count++; + else if (cam_ife_hw_mgr_check_path_port_compat(CAM_ISP_HW_VFE_IN_LCR, + out_port->res_type)) + in_port->lcr_count++; + else { + CAM_DBG(CAM_ISP, "out_res_type 0x%x, ife_ctx_idx: %u", + out_port->res_type, ife_ctx->ctx_index); + if ((in_port->major_ver == 3) && (in_port->path_id & + (CAM_ISP_PXL_PATH | CAM_ISP_PXL1_PATH | CAM_ISP_PXL2_PATH))) { + CAM_DBG(CAM_ISP, + "preprocess csid path resource: 0x%x, ipp_dst_hw_ctxt_mask: 0x%x, outport ctxt_id: %d", + in_port->path_id, in_port->ipp_dst_hw_ctxt_mask, + out_port->hw_context_id); + in_port->ipp_dst_hw_ctxt_mask |= out_port->hw_context_id; + } + + in_port->ipp_count++; + + if (in_port->can_use_lite) { + switch(out_port->res_type) { + case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: + case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: + in_port->lite_path_count++; + break; + default: + CAM_WARN(CAM_ISP, + "Output port 0x%x cannot use lite, ctx: %u", + out_port->res_type, ife_ctx->ctx_index); + } + } + } + + if ((out_port->res_type >= CAM_ISP_SFE_OUT_RES_BASE) && + (out_port->res_type < (CAM_ISP_SFE_OUT_RES_BASE + max_sfe_out_res))) + in_port->sfe_port_count++; + } + + CAM_DBG(CAM_ISP, "ife_ctx_idx: %u rdi: %d ipp: %d ppp: %d ife_rd: %d lcr: %d", + ife_ctx->ctx_index, in_port->rdi_count, in_port->ipp_count, + in_port->ppp_count, in_port->ife_rd_count, + in_port->lcr_count, + in_port->sfe_port_count); + + return 0; +} + +static int cam_ife_hw_mgr_acquire_offline_res_ife_camif( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + bool acquire_lcr, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -1; + int i; + struct cam_vfe_acquire_args vfe_acquire; + struct cam_hw_intf *hw_intf = NULL; + struct cam_isp_hw_mgr_res *ife_src_res; + struct cam_isp_hw_mgr_res *isp_bus_rd_res; + struct cam_ife_hw_mgr *ife_hw_mgr; + + ife_hw_mgr = ife_ctx->hw_mgr; + + isp_bus_rd_res = list_first_entry(&ife_ctx->res_list_ife_in_rd, + struct cam_isp_hw_mgr_res, list); + if (!isp_bus_rd_res) { + CAM_ERR(CAM_ISP, "BUS RD resource has not been acquired, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &ife_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "No free resource, ctx_idx: %u", ife_ctx->ctx_index); + goto end; + } + + vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN; + vfe_acquire.tasklet = ife_ctx->common.tasklet_info; + vfe_acquire.priv = ife_ctx; + vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + + vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops; + vfe_acquire.vfe_in.in_port = in_port; + vfe_acquire.vfe_in.is_fe_enabled = ife_ctx->flags.is_fe_enabled; + vfe_acquire.vfe_in.is_offline = ife_ctx->flags.is_offline; + vfe_acquire.vfe_in.handle_camif_irq = true; + if (ife_hw_mgr->csid_camif_irq_support && ife_ctx->ctx_type != + CAM_IFE_CTX_TYPE_SFE) + vfe_acquire.vfe_in.handle_camif_irq = false; + + if (!acquire_lcr) + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF; + else + vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_LCR; + + if (ife_ctx->flags.is_dual) + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_MASTER; + else + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (!ife_hw_mgr->ife_devices[i]) + continue; + + hw_intf = ife_hw_mgr->ife_devices[i]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + + if (rc) + continue; + else + break; + } + + if (i == CAM_IFE_HW_NUM_MAX || rc || + !vfe_acquire.vfe_in.rsrc_node) { + CAM_ERR(CAM_ISP, "Failed to acquire IFE LEFT rc: %d, ctx_idx: %u", + rc, ife_ctx->ctx_index); + goto put_res; + } + + ife_src_res->hw_res[0] = vfe_acquire.vfe_in.rsrc_node; + *acquired_hw_id |= cam_convert_hw_idx_to_ife_hw_num( + hw_intf->hw_idx); + acquired_hw_path[i] |= cam_convert_res_id_to_hw_path( + ife_src_res->hw_res[0]->res_id, -1); + + CAM_DBG(CAM_ISP, "Acquired VFE:%d CAMIF for LEFT, ctx_idx: %u", + ife_src_res->hw_res[0]->hw_intf->hw_idx, ife_ctx->ctx_index); + + ife_src_res->res_type = vfe_acquire.rsrc_type; + ife_src_res->res_id = vfe_acquire.vfe_in.res_id; + ife_src_res->is_dual_isp = (uint32_t)ife_ctx->flags.is_dual; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src, &ife_src_res); + + if (ife_ctx->flags.is_dual) { + vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_SLAVE; + vfe_acquire.vfe_in.rsrc_node = NULL; + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (!ife_hw_mgr->ife_devices[i]) + continue; + + if (i == ife_src_res->hw_res[0]->hw_intf->hw_idx) + continue; + + hw_intf = ife_hw_mgr->ife_devices[i]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &vfe_acquire, + sizeof(struct cam_vfe_acquire_args)); + + if (rc) + continue; + else + break; + } + + if (rc || !vfe_acquire.vfe_in.rsrc_node) { + CAM_ERR(CAM_ISP, "Failed to acquire IFE RIGHT rc: %d, ctx_idx: %u", + rc, ife_ctx->ctx_index); + goto end; + } + + ife_src_res->hw_res[1] = vfe_acquire.vfe_in.rsrc_node; + + *acquired_hw_id |= cam_convert_hw_idx_to_ife_hw_num( + hw_intf->hw_idx); + + acquired_hw_path[i] |= cam_convert_res_id_to_hw_path( + ife_src_res->hw_res[1]->res_id, -1); + + CAM_DBG(CAM_ISP, "Acquired VFE:%d CAMIF for RIGHT, ctx_idx: %u", + ife_src_res->hw_res[1]->hw_intf->hw_idx, ife_ctx->ctx_index); + } + + return rc; + +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &ife_src_res); + +end: + return rc; +} + +static int cam_ife_hw_mgr_acquire_offline_res_sfe( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1; + int i = CAM_ISP_HW_SPLIT_LEFT; + struct cam_sfe_acquire_args sfe_acquire; + struct cam_isp_hw_mgr_res *sfe_src_res; + struct cam_isp_hw_mgr_res *sfe_bus_rd_res; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr *ife_hw_mgr; + + ife_hw_mgr = ife_ctx->hw_mgr; + + sfe_bus_rd_res = list_first_entry(&ife_ctx->res_list_ife_in_rd, + struct cam_isp_hw_mgr_res, list); + + if (!sfe_bus_rd_res) { + CAM_ERR(CAM_ISP, "BUS RD resource has not been acquired, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &sfe_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "No free resource, ctx_idx: %u", ife_ctx->ctx_index); + goto end; + } + + sfe_acquire.rsrc_type = CAM_ISP_RESOURCE_SFE_IN; + sfe_acquire.tasklet = ife_ctx->common.tasklet_info; + sfe_acquire.priv = ife_ctx; + sfe_acquire.event_cb = cam_ife_hw_mgr_event_handler; + sfe_acquire.sfe_in.cdm_ops = ife_ctx->cdm_ops; + sfe_acquire.sfe_in.in_port = in_port; + sfe_acquire.sfe_in.is_offline = ife_ctx->flags.is_offline; + sfe_acquire.sfe_in.res_id = CAM_ISP_HW_SFE_IN_PIX; + + hw_intf = ife_hw_mgr->sfe_devices[ + sfe_bus_rd_res->hw_res[i]->hw_intf->hw_idx]->hw_intf; + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &sfe_acquire, + sizeof(struct cam_sfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to acquire SFE PIX for offline, ctx_idx: %u", ife_ctx->ctx_index); + goto put_res; + } + + sfe_src_res->hw_res[i] = sfe_acquire.sfe_in.rsrc_node; + CAM_DBG(CAM_ISP, "Acquired SFE: %u PIX LEFT for offline, ctx_idx: %u", + sfe_src_res->hw_res[i]->hw_intf->hw_idx, ife_ctx->ctx_index); + + sfe_src_res->res_type = sfe_acquire.rsrc_type; + sfe_src_res->res_id = sfe_acquire.sfe_in.res_id; + sfe_src_res->is_dual_isp = in_port->usage_type; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_sfe_src, &sfe_src_res); + + if (ife_ctx->flags.is_dual) { + CAM_WARN(CAM_ISP, + "DUAL not supported for offline use-case, ctx_idx: %u", ife_ctx->ctx_index); + + sfe_acquire.sfe_in.rsrc_node = NULL; + hw_intf = ife_hw_mgr->sfe_devices[ + sfe_bus_rd_res->hw_res[++i]->hw_intf->hw_idx]->hw_intf; + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &sfe_acquire, + sizeof(struct cam_sfe_acquire_args)); + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to acquire SFE PIX for RIGHT, ctx_idx: %u", + ife_ctx->ctx_index); + goto end; + } + + sfe_src_res->hw_res[i] = sfe_acquire.sfe_in.rsrc_node; + CAM_DBG(CAM_ISP, "Acquired SFE:%d PIX RIGHT for offline, ctx_idx: %u", + sfe_src_res->hw_res[i]->hw_intf->hw_idx, ife_ctx->ctx_index); + } + + sfe_bus_rd_res->num_children++; + + return rc; + +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &sfe_src_res); +end: + return rc; +} + +/* Acquire CSID for offline SFE */ +static int cam_ife_hw_mgr_acquire_offline_res_csid( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int rc = -1; + uint32_t path_res_id = 0; + struct cam_csid_hw_reserve_resource_args csid_acquire; + struct cam_isp_hw_mgr_res *sfe_bus_rd_res; + struct cam_isp_hw_mgr_res *csid_res; + + sfe_bus_rd_res = list_first_entry(&ife_ctx->res_list_ife_in_rd, + struct cam_isp_hw_mgr_res, list); + + if (!sfe_bus_rd_res) { + CAM_ERR(CAM_ISP, "BUS RD resource has not been acquired, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + path_res_id = cam_ife_hw_mgr_get_csid_rdi_type_for_offline( + sfe_bus_rd_res->res_id); + + if (path_res_id == CAM_IFE_PIX_PATH_RES_MAX) + return -EINVAL; + + rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, + &csid_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource, ctx_idx: %u", ife_ctx->ctx_index); + goto end; + } + + memset(&csid_acquire, 0, sizeof(csid_acquire)); + csid_acquire.res_id = path_res_id; + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.in_port = in_port; + csid_acquire.out_port = in_port->data; + csid_acquire.node_res = NULL; + csid_acquire.event_cb = cam_ife_hw_mgr_event_handler; + csid_acquire.tasklet = ife_ctx->common.tasklet_info; + csid_acquire.cb_priv = ife_ctx; + csid_acquire.cdm_ops = ife_ctx->cdm_ops; + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE; + csid_acquire.is_offline = true; + + rc = cam_ife_hw_mgr_acquire_csid_hw(ife_ctx, + &csid_acquire, in_port); + + if (rc || (csid_acquire.node_res == NULL)) { + CAM_ERR(CAM_ISP, + "CSID Path reserve failed rc=%d res_id=%d ctx_idx: %u", + rc, path_res_id, ife_ctx->ctx_index); + goto put_res; + } + + csid_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] = csid_acquire.node_res; + + if (!ife_ctx->buf_done_controller && csid_acquire.buf_done_controller) + ife_ctx->buf_done_controller = csid_acquire.buf_done_controller; + + ife_ctx->flags.need_csid_top_cfg = csid_acquire.need_top_cfg; + ife_ctx->flags.dynamic_drv_supported = csid_acquire.dynamic_drv_supported; + csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_res->res_id = csid_acquire.res_id; + csid_res->is_dual_isp = 0; + csid_res->hw_res[0] = csid_acquire.node_res; + csid_res->hw_res[1] = NULL; + ife_ctx->left_hw_idx = csid_acquire.node_res->hw_intf->hw_idx; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res); + + return 0; + +put_res: + cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res); + +end: + return rc; +} + +/* Acquire CSID & SFE for offline */ +static int cam_ife_mgr_acquire_hw_sfe_offline( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc; + + rc = cam_ife_hw_mgr_acquire_sfe_bus_rd( + ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire SFE BUS RD resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + rc = cam_ife_hw_mgr_acquire_offline_res_csid(ife_ctx, + in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE CSID RDI0 resource Failed, ctx_idx: %u", ife_ctx->ctx_index); + goto err; + } + + rc = cam_ife_hw_mgr_acquire_offline_res_sfe( + ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire SFE PIX SRC resource Failed, ctx_idx: %u", ife_ctx->ctx_index); + goto err; + } + + if (in_port->sfe_ife_enable) { + if (in_port->ipp_count) { + rc = cam_ife_hw_mgr_acquire_offline_res_ife_camif( + ife_ctx, in_port, false, + acquired_hw_id, acquired_hw_path); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE IPP SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + if (in_port->lcr_count) { + rc = cam_ife_hw_mgr_acquire_offline_res_ife_camif( + ife_ctx, in_port, true, + acquired_hw_id, acquired_hw_path); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE LCR SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + rc = cam_ife_hw_mgr_acquire_res_ife_out( + ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE OUT resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + rc = cam_ife_hw_mgr_acquire_res_sfe_out(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire SFE OUT resource Failed, ctx_idx: %u", ife_ctx->ctx_index); + goto err; + } + + return 0; +err: + return rc; +} + +/* Acquire HWs for IFE fetch engine archs */ +static int cam_ife_mgr_acquire_hw_ife_offline( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -1; + + rc = cam_ife_hw_mgr_acquire_res_ife_bus_rd(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE BUS RD resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + if (in_port->ipp_count) + rc = cam_ife_hw_mgr_acquire_offline_res_ife_camif(ife_ctx, + in_port, false, acquired_hw_id, acquired_hw_path); + + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE IPP SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + if (in_port->lcr_count) + rc = cam_ife_hw_mgr_acquire_offline_res_ife_camif(ife_ctx, + in_port, true, acquired_hw_id, acquired_hw_path); + + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE LCR SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + return 0; + +err: + return rc; +} + +static int cam_ife_mgr_acquire_hw_for_offline_ctx( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path) +{ + int rc = -EINVAL; + + ife_ctx->flags.is_dual = (bool)in_port->usage_type; + if ((!in_port->ipp_count && !in_port->lcr_count) || + !in_port->ife_rd_count) { + CAM_ERR(CAM_ISP, + "Invalid %d BUS RD %d PIX %d LCR ports for FE ctx: %u", + in_port->ife_rd_count, in_port->ipp_count, in_port->lcr_count, + ife_ctx->ctx_index); + return -EINVAL; + } + + if (in_port->rdi_count || in_port->ppp_count) { + CAM_ERR(CAM_ISP, + "%d RDI %d PPP ports invalid for FE ctx_idx: %u", + in_port->rdi_count, in_port->ppp_count, ife_ctx->ctx_index); + return -EINVAL; + } + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + rc = cam_ife_mgr_acquire_hw_sfe_offline( + ife_ctx, in_port, acquired_hw_id, acquired_hw_path); + else + rc = cam_ife_mgr_acquire_hw_ife_offline( + ife_ctx, in_port, acquired_hw_id, acquired_hw_path); + + return rc; +} + + +static int cam_ife_mgr_acquire_hw_for_ctx( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port, + uint32_t *acquired_hw_id, + uint32_t *acquired_hw_path, + uint32_t *acquired_rdi_res) +{ + int rc = -1; + bool crop_enable = true; + + ife_ctx->flags.dsp_enabled = (bool)in_port->dsp_mode; + ife_ctx->flags.is_dual = (bool)in_port->usage_type; + + /* Update aeb mode for the given in_port once */ + if ((in_port->aeb_mode) && (!ife_ctx->flags.is_aeb_mode)) + ife_ctx->flags.is_aeb_mode = true; + + /* get root node resource */ + rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Can not acquire root resource, ctx_idx: %u", ife_ctx->ctx_index); + goto err; + } + + if (!in_port->ipp_count && !in_port->rdi_count && + !in_port->ppp_count && !in_port->lcr_count) { + + CAM_ERR(CAM_ISP, + "No PIX or RDI or PPP or LCR resource, ctx_idx: %u", ife_ctx->ctx_index); + return -EINVAL; + } + + /* get ife csid IPP resource */ + if (cam_ife_hw_mgr_is_need_csid_ipp(ife_ctx, in_port)) { + rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx, + in_port, true, crop_enable); + + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE CSID IPP/LCR resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + if (in_port->ppp_count) { + /* get ife csid PPP resource */ + + /* If both IPP and PPP paths are requested with the same vc dt + * it is implied that the sensor is a type 3 PD sensor. Crop + * must be enabled for this sensor on PPP path as well. + */ + if (!in_port->ipp_count) + crop_enable = false; + + rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx, + in_port, false, crop_enable); + + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE CSID PPP resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + if (in_port->rdi_count) { + /* get ife csid RDI resource */ + rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port, + acquired_rdi_res); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE CSID RDI resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + CAM_DBG(CAM_ISP, "Acquired CSID resource, is_dual:%s left_hw_idx:%d right_hw_idx:%d", + CAM_BOOL_TO_YESNO(ife_ctx->flags.is_dual), + ife_ctx->left_hw_idx, ife_ctx->right_hw_idx); + + if (in_port->ife_rd_count) { + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + rc = cam_ife_hw_mgr_acquire_sfe_bus_rd( + ife_ctx, in_port); + else + rc = cam_ife_hw_mgr_acquire_res_ife_bus_rd( + ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire %s BUS RD resource Failed, ctx_idx: %u", + (ife_ctx->ctx_type == + CAM_IFE_CTX_TYPE_SFE) ? "SFE" : "IFE", ife_ctx->ctx_index); + goto err; + } + } + + /* try acquire RDI for SFE cases without RDI out ports, + * this is specifically for targets having RDI as input + * to SFE IPP + */ + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_hw_mgr_get_csid_rdi_for_sfe_ipp_input(ife_ctx, in_port, + acquired_rdi_res); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire RDI for SFE IPP failed Ctx: %u rc %d", + ife_ctx->ctx_index, rc); + goto err; + } + } + + /* acquire SFE input resource */ + if ((ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) && + (in_port->ipp_count || in_port->rdi_count)) { + rc = cam_ife_hw_mgr_acquire_res_sfe_src(ife_ctx, + in_port, acquired_hw_id, acquired_hw_path); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire SFE SRC resource failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + /* get ife IPP src resource */ + if (in_port->ipp_count) { + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + if (in_port->sfe_ife_enable) + rc = cam_ife_hw_mgr_acquire_ife_src_for_sfe( + ife_ctx, in_port, false, + acquired_hw_id, acquired_hw_path); + } else { + rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, + in_port, false, false, + acquired_hw_id, acquired_hw_path); + } + + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE IPP SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + /* get ife RDI src resource for non SFE streams */ + if (in_port->rdi_count) { + if (ife_ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, + in_port, false, false, + acquired_hw_id, acquired_hw_path); + + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire IFE RDI SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + } + + if (in_port->lcr_count) { + rc = cam_ife_hw_mgr_acquire_res_ife_src( + ife_ctx, in_port, true, false, + acquired_hw_id, acquired_hw_path); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE LCR SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + /* PPP path is from CSID->IFE bypassing SFE */ + if (in_port->ppp_count) { + rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port, + false, true, acquired_hw_id, acquired_hw_path); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE PPP SRC resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_hw_mgr_acquire_res_sfe_out(ife_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire SFE OUT resource Failed, ctx_idx: %u", + ife_ctx->ctx_index); + goto err; + } + } + + if (in_port->dynamic_sensor_switch_en) + ife_ctx->ctx_config |= CAM_IFE_CTX_CFG_DYNAMIC_SWITCH_ON; + + return 0; +err: + /* release resource at the acquire entry funciton */ + return rc; +} + +void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata, + enum cam_cdm_cb_status status, void *cookie) +{ + struct cam_isp_prepare_hw_update_data *hw_update_data = NULL; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + uint64_t req_id; + + if (!userdata) { + CAM_ERR(CAM_ISP, "Invalid args"); + return; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)userdata; + hw_update_data = ctx->cdm_userdata.hw_update_data; + + if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) { + req_id = *(uint64_t *)cookie; + complete_all(&ctx->config_done_complete); + atomic_set(&ctx->cdm_done, 1); + ctx->last_cdm_done_req = req_id; + CAM_DBG(CAM_ISP, + "CDM hdl=0x%x, udata=%pK, status=%d, cookie=%u ctx_index=%u cdm_req=%llu", + handle, userdata, status, req_id, ctx->ctx_index, + ctx->cdm_userdata.request_id); + } else if (status == CAM_CDM_CB_STATUS_PAGEFAULT) { + if (ctx->common.sec_pf_evt_cb) + ctx->common.sec_pf_evt_cb(ctx->common.cb_priv, cookie); + } else { + CAM_WARN(CAM_ISP, + "Called by CDM hdl=0x%x, udata=%pK, status=%d, cdm_req=%llu ctx_idx: %u", + handle, userdata, status, ctx->cdm_userdata.request_id, ctx->ctx_index); + } + ktime_get_clocktai_ts64(&ctx->cdm_done_ts); +} + +static int cam_ife_mgr_acquire_get_unified_structure_v0( + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_in_port_generic_info *in_port) +{ + struct cam_isp_in_port_info *in = NULL; + uint32_t in_port_length = 0; + int32_t rc = 0, i; + + in = (struct cam_isp_in_port_info *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_isp_in_port_info) + + (in->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info); + + *input_size += in_port_length; + + if (!in_port || ((*input_size) > acquire_hw_info->input_info_size)) { + CAM_ERR(CAM_ISP, "Input is not proper"); + rc = -EINVAL; + goto err; + } + + in_port->major_ver = + (acquire_hw_info->input_info_version >> 16) & 0xFFFF; + in_port->minor_ver = + acquire_hw_info->input_info_version & 0xFFFF; + in_port->res_type = in->res_type; + in_port->lane_type = in->lane_type; + in_port->lane_num = in->lane_num; + in_port->lane_cfg = in->lane_cfg; + in_port->vc[0] = in->vc; + in_port->dt[0] = in->dt; + in_port->num_valid_vc_dt = 1; + in_port->format[0] = in->format; + in_port->test_pattern = in->test_pattern; + in_port->usage_type = in->usage_type; + in_port->left_start = in->left_start; + in_port->left_stop = in->left_stop; + in_port->left_width = in->left_width; + in_port->right_start = in->right_start; + in_port->right_stop = in->right_stop; + in_port->right_width = in->right_width; + in_port->line_start = in->line_start; + in_port->line_stop = in->line_stop; + in_port->height = in->height; + in_port->pixel_clk = in->pixel_clk; + in_port->batch_size = in->batch_size; + in_port->dsp_mode = in->dsp_mode; + in_port->hbi_cnt = in->hbi_cnt; + in_port->cust_node = 0; + in_port->horizontal_bin = 0; + in_port->qcfa_bin = 0; + in_port->num_out_res = in->num_out_res; + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_out_port_generic_info), + GFP_KERNEL); + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in->num_out_res; i++) { + in_port->data[i].res_type = in->data_flex[i].res_type; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].split_point = in->data_flex[i].split_point; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + in_port->data[i].reserved = in->data_flex[i].reserved; + } + + return 0; +err: + return rc; +} + +static inline int cam_ife_mgr_hw_check_in_res_type( + u32 res_type) +{ + switch (res_type) { + case CAM_ISP_IFE_IN_RES_RD: + return 0; + case CAM_ISP_SFE_IN_RD_0: + return 0; + case CAM_ISP_SFE_IN_RD_1: + return 0; + case CAM_ISP_SFE_IN_RD_2: + return 0; + default: + return -EINVAL; + } +} + +static inline void cam_ife_mgr_acquire_get_feature_flag_params_v3( + struct cam_isp_in_port_info_v3 *in, + struct cam_isp_in_port_generic_info *in_port) +{ + in_port->dynamic_sensor_switch_en = in->feature_mask & CAM_ISP_DYNAMIC_SENOR_SWITCH_EN; + in_port->can_use_lite = in->feature_mask & CAM_ISP_CAN_USE_LITE_MODE; + in_port->aeb_mode = in->feature_mask & CAM_ISP_AEB_MODE_EN; + in_port->dynamic_hdr_switch_en = in->feature_mask & CAM_ISP_HDR_MODE_DYNAMIC_SWITCH_EN; +} + +static inline void cam_ife_mgr_acquire_get_feature_flag_params( + struct cam_isp_in_port_info_v2 *in, + struct cam_isp_in_port_generic_info *in_port) +{ + in_port->secure_mode = in->feature_flag & CAM_ISP_PARAM_FETCH_SECURITY_MODE; + in_port->dynamic_sensor_switch_en = in->feature_flag & CAM_ISP_DYNAMIC_SENOR_SWITCH_EN; + in_port->can_use_lite = in->feature_flag & CAM_ISP_CAN_USE_LITE_MODE; + in_port->sfe_binned_epoch_cfg = in->feature_flag & CAM_ISP_SFE_BINNED_EPOCH_CFG_ENABLE; + in_port->epd_supported = in->feature_flag & CAM_ISP_EPD_SUPPORT; + in_port->aeb_mode = in->feature_flag & CAM_ISP_AEB_MODE_EN; + in_port->dynamic_hdr_switch_en = in->feature_flag & CAM_ISP_HDR_MODE_DYNAMIC_SWITCH_EN; +} + +static int cam_ife_mgr_acquire_get_unified_structure_v3( + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_in_port_generic_info *in_port) +{ + struct cam_isp_in_port_info_v3 *in = NULL; + uint32_t in_port_length = 0; + int32_t rc = 0, i; + size_t len = 0; + uint8_t log_buf[200]; + + in = (struct cam_isp_in_port_info_v3 *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_isp_in_port_info_v3) + + (in->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info_v3); + + *input_size += in_port_length; + + if (!in_port || ((*input_size) > acquire_hw_info->input_info_size)) { + CAM_ERR(CAM_ISP, "Input is not proper"); + rc = -EINVAL; + goto err; + } + + in_port->major_ver = (acquire_hw_info->input_info_version >> 8) & 0xFFFF; + in_port->minor_ver = acquire_hw_info->input_info_version & 0xFFFF; + in_port->res_type = in->phy_info.res_type; + in_port->lane_type = in->phy_info.lane_type; + in_port->lane_num = in->phy_info.lane_num; + in_port->lane_cfg = in->phy_info.lane_cfg; + in_port->num_valid_vc_dt = in->csid_info.num_valid_vc_dt; + in_port->epd_supported = in->csid_info.param_mask & CAM_IFE_CSID_EPD_MODE_EN; + + if (in_port->num_valid_vc_dt == 0 || in_port->num_valid_vc_dt >= CAM_ISP_VC_DT_CFG || + in_port->num_valid_vc_dt > g_ife_hw_mgr.isp_caps.max_dt_supported) { + CAM_ERR(CAM_ISP, + "Invalid i/p arg invalid vc-dt: %d, arr size %u, max supported by HW: %u", + in_port->num_valid_vc_dt, CAM_ISP_VC_DT_CFG, + g_ife_hw_mgr.isp_caps.max_dt_supported); + rc = -EINVAL; + goto err; + } + + for (i = 0; i < in_port->num_valid_vc_dt; i++) { + if ((i >= CAM_IFE_CSID_MAX_VALID_VC_NUM) && + (in->csid_info.vc[i] != CAM_ISP_INVALID_VC_VALUE)) + rc = -EINVAL; + + in_port->vc[i] = in->csid_info.vc[i]; + in_port->dt[i] = in->csid_info.dt[i]; + + CAM_INFO_BUF(CAM_ISP, log_buf, 200, &len, "VC%d: 0x%x, DT%d: 0x%x ", + i, in_port->vc[i], i, in_port->dt[i]); + } + + if (rc) { + CAM_ERR(CAM_ISP, "Invalid VC/DT args, printing given %d args: %s", + in_port->num_valid_vc_dt, log_buf); + goto err; + } + + for (i = 0; i < in_port->num_valid_vc_dt; i++) { + in_port->format[i] = (in->csid_info.format >> (i * CAM_IFE_DECODE_FORMAT_SHIFT_VAL)) + & CAM_IFE_DECODE_FORMAT_MASK; + } + + in_port->path_id = in->csid_info.path_id; + in_port->left_width = in->csid_info.width; + in_port->height = in->csid_info.height; + in_port->num_out_res = in->num_out_res; + + cam_ife_mgr_acquire_get_feature_flag_params_v3(in, in_port); + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_out_port_generic_info), + GFP_KERNEL); + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in_port->num_out_res; i++) { + if ((in->data_flex[i].context_id < CAM_ISP_MULTI_CTXT0_MASK) || + (in->data_flex[i].context_id > CAM_ISP_MULTI_CTXT2_MASK)) { + CAM_ERR(CAM_ISP, "Invalid hw context id: 0x%x for acquire i: %d", + in->data_flex[i].context_id, i); + rc = -EINVAL; + goto err; + } + + in_port->data[i].res_type = in->data_flex[i].res_type; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].split_point = in->data_flex[i].split_point; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + in_port->data[i].wm_mode = in->data_flex[i].wm_mode; + in_port->data[i].hw_context_id = in->data_flex[i].context_id; + in_port->data[i].rcs_en = in->data_flex[i].param_mask & CAM_IFE_WM_RCS_EN; + in_port->data[i].use_wm_pack = in->data_flex[i].param_mask & CAM_IFE_USE_WM_PACK; + } + + return 0; + +err: + return rc; +} + +static int cam_ife_mgr_acquire_get_unified_structure_v2( + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_in_port_generic_info *in_port) +{ + struct cam_isp_in_port_info_v2 *in = NULL; + uint32_t in_port_length = 0; + int32_t rc = 0, i; + size_t len = 0; + uint8_t log_buf[200]; + + in = (struct cam_isp_in_port_info_v2 *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_isp_in_port_info_v2) + + (in->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info_v2); + + *input_size += in_port_length; + + if (!in_port || ((*input_size) > acquire_hw_info->input_info_size)) { + CAM_ERR(CAM_ISP, "Input is not proper"); + rc = -EINVAL; + goto err; + } + + in_port->major_ver = + (acquire_hw_info->input_info_version >> 16) & 0xFFFF; + in_port->minor_ver = + acquire_hw_info->input_info_version & 0xFFFF; + in_port->res_type = in->res_type; + in_port->lane_type = in->lane_type; + in_port->lane_num = in->lane_num; + in_port->lane_cfg = in->lane_cfg; + in_port->num_valid_vc_dt = in->num_valid_vc_dt; + + if (in_port->num_valid_vc_dt == 0 || in_port->num_valid_vc_dt >= CAM_ISP_VC_DT_CFG || + in_port->num_valid_vc_dt > g_ife_hw_mgr.isp_caps.max_dt_supported) { + CAM_ERR(CAM_ISP, + "Invalid i/p arg invalid vc-dt: %d, arr size %u, max supported by HW: %u", + in_port->num_valid_vc_dt, CAM_ISP_VC_DT_CFG, + g_ife_hw_mgr.isp_caps.max_dt_supported); + rc = -EINVAL; + goto err; + } + + for (i = 0; i < in_port->num_valid_vc_dt; i++) { + if ((i >= CAM_IFE_CSID_MAX_VALID_VC_NUM) && (in->vc[i] != CAM_ISP_INVALID_VC_VALUE)) + rc = -EINVAL; + + in_port->vc[i] = in->vc[i]; + in_port->dt[i] = in->dt[i]; + + CAM_INFO_BUF(CAM_ISP, log_buf, 200, &len, "VC%d: 0x%x, DT%d: 0x%x ", + i, in_port->vc[i], i, in_port->dt[i]); + } + + if (rc) { + CAM_ERR(CAM_ISP, "Invalid VC/DT args, printing given %d args: %s", + in_port->num_valid_vc_dt, log_buf); + goto err; + } + + for (i = 0; i < in_port->num_valid_vc_dt; i++) { + in_port->format[i] = (in->format >> (i * CAM_IFE_DECODE_FORMAT_SHIFT_VAL)) & + CAM_IFE_DECODE_FORMAT_MASK; + } + + in_port->test_pattern = in->test_pattern; + in_port->usage_type = in->usage_type; + in_port->left_start = in->left_start; + in_port->left_stop = in->left_stop; + in_port->left_width = in->left_width; + in_port->right_start = in->right_start; + in_port->right_stop = in->right_stop; + in_port->right_width = in->right_width; + in_port->line_start = in->line_start; + in_port->line_stop = in->line_stop; + in_port->height = in->height; + in_port->pixel_clk = in->pixel_clk; + in_port->batch_size = in->batch_size; + in_port->dsp_mode = in->dsp_mode; + in_port->hbi_cnt = in->hbi_cnt; + in_port->cust_node = in->cust_node; + in_port->horizontal_bin = (in->bidirectional_bin & 0xFFFF); + in_port->vertical_bin = (in->bidirectional_bin >> 16); + in_port->qcfa_bin = in->qcfa_bin; + in_port->num_out_res = in->num_out_res; + in_port->sfe_in_path_type = (in->sfe_in_path_type & 0xFFFF); + in_port->sfe_ife_enable = in->sfe_in_path_type >> 16; + /* + * Different formats are not supported for fetch engine use-cases + * Use vc0 format [LSB 8 bits], if the input formats are different for each VC + * fail the acquire + */ + in_port->fe_unpacker_fmt = in->format & CAM_IFE_DECODE_FORMAT_MASK; + + cam_ife_mgr_acquire_get_feature_flag_params(in, in_port); + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_out_port_generic_info), + GFP_KERNEL); + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in_port->num_out_res; i++) { + in_port->data[i].res_type = in->data_flex[i].res_type; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].split_point = in->data_flex[i].split_point; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + } + + return 0; + +err: + return rc; +} + +static int cam_ife_mgr_acquire_get_unified_structure( + struct cam_isp_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_in_port_generic_info *in_port) +{ + uint32_t major_ver = 0, minor_ver = 0; + + if (acquire_hw_info == NULL || input_size == NULL) + return -EINVAL; + + major_ver = (acquire_hw_info->common_info_version >> 12) & 0xF; + minor_ver = (acquire_hw_info->common_info_version) & 0xFFF; + + switch (major_ver) { + case 1: + return cam_ife_mgr_acquire_get_unified_structure_v0( + acquire_hw_info, offset, input_size, in_port); + case 2: + return cam_ife_mgr_acquire_get_unified_structure_v2( + acquire_hw_info, offset, input_size, in_port); + case 3: + return cam_ife_mgr_acquire_get_unified_structure_v3( + acquire_hw_info, offset, input_size, in_port); + default: + CAM_ERR(CAM_ISP, "Invalid ver of i/p port info from user. minor %u, major %u", + minor_ver, major_ver); + return -EINVAL; + } + + return 0; +} + +static inline void cam_ife_mgr_reset_streamon_scratch_cfg( + struct cam_ife_hw_mgr_ctx *ctx) +{ + ctx->scratch_buf_info.ife_scratch_config->skip_scratch_cfg_streamon = false; + ctx->scratch_buf_info.sfe_scratch_config->skip_scratch_cfg_streamon = false; + ctx->scratch_buf_info.ife_scratch_config->streamon_buf_mask = 0; + ctx->scratch_buf_info.sfe_scratch_config->streamon_buf_mask = 0; +} + +static void cam_ife_mgr_populate_hw_ctxt_map(struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_in_port_generic_info *in_port) +{ + int src_hw_ctxt_id; + + src_hw_ctxt_id = cam_ife_mgr_get_src_hw_ctxt_from_csid_path(in_port->path_id); + if (src_hw_ctxt_id == -1) + return; + + ife_ctx->acq_hw_ctxt_src_dst_map[src_hw_ctxt_id] = in_port->ipp_dst_hw_ctxt_mask; +} + +/* entry function: acquire_hw */ +static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc, i, j; + struct cam_ife_hw_mgr_ctx *ife_ctx; + struct cam_isp_in_port_generic_info *in_port = NULL; + struct cam_cdm_acquire_data cdm_acquire; + uint32_t total_pix_port = 0; + uint32_t total_rdi_port = 0; + uint32_t total_pd_port = 0; + uint32_t total_lite_port = 0; + uint32_t total_ports = 0; + uint32_t total_sfe_ports = 0; + struct cam_isp_acquire_hw_info *acquire_hw_info; + uint32_t input_size = 0; + uint32_t acquired_rdi_res = 0; + uint32_t input_format_checker = 0; + uint32_t max_height = 0; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + /* get the ife ctx */ + rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); + if (rc || !ife_ctx) { + CAM_ERR(CAM_ISP, "Get ife hw context failed"); + goto err; + } + + cam_cpas_get_cpas_hw_version(&ife_ctx->hw_version); + ife_ctx->ctx_config = 0; + ife_ctx->cdm_handle = 0; + ife_ctx->ctx_type = CAM_IFE_CTX_TYPE_NONE; + ife_ctx->num_acq_vfe_out = 0; + ife_ctx->num_acq_sfe_out = 0; + ife_ctx->common.cb_priv = acquire_args->context_data; + ife_ctx->common.mini_dump_cb = acquire_args->mini_dump_cb; + ife_ctx->flags.internal_cdm = false; + ife_ctx->left_hw_idx = CAM_IFE_CSID_HW_NUM_MAX; + ife_ctx->right_hw_idx = CAM_IFE_CSID_HW_NUM_MAX; + ife_ctx->buf_done_controller = NULL; + ife_ctx->mc_comp_buf_done_controller = NULL; + ife_ctx->common.event_cb = acquire_args->event_cb; + ife_ctx->hw_mgr = ife_hw_mgr; + ife_ctx->cdm_ops = cam_cdm_publish_ops(); + ife_ctx->common.sec_pf_evt_cb = acquire_args->sec_pf_evt_cb; + ife_ctx->try_recovery_cnt = 0; + ife_ctx->recovery_req_id = 0; + ife_ctx->drv_path_idle_en = 0; + ife_ctx->res_list_ife_out = NULL; + ife_ctx->res_list_sfe_out = NULL; + ife_ctx->pri_rdi_out_res = g_ife_hw_mgr.isp_caps.max_vfe_out_res_type; + ife_ctx->ctx_index = acquire_args->ctx_id; + ife_ctx->scratch_buf_info.ife_scratch_config = NULL; + ife_ctx->is_init_drv_cfg_received = false; + ife_ctx->flags.skip_reg_dump_buf_put = false; + ife_ctx->wr_per_req_index = 0; + memset(ife_ctx->per_req_info, 0, sizeof(ife_ctx->per_req_info)); + + acquire_hw_info = (struct cam_isp_acquire_hw_info *) acquire_args->acquire_info; + + rc = cam_ife_mgr_check_and_update_fe(ife_ctx, acquire_hw_info, + acquire_args->acquire_info_size); + if (rc) { + CAM_ERR(CAM_ISP, "buffer size is not enough, ctx_idx: %u", ife_ctx->ctx_index); + goto free_ctx; + } + + in_port = CAM_MEM_ZALLOC_ARRAY(acquire_hw_info->num_inputs, + sizeof(struct cam_isp_in_port_generic_info), + GFP_KERNEL); + + if (!in_port) { + CAM_ERR(CAM_ISP, "No memory available, ctx_idx: %u", ife_ctx->ctx_index); + rc = -ENOMEM; + goto free_ctx; + } + + ife_ctx->vfe_bus_comp_grp = CAM_MEM_ZALLOC_ARRAY(CAM_IFE_BUS_COMP_NUM_MAX, + sizeof(struct cam_isp_context_comp_record), GFP_KERNEL); + if (!ife_ctx->vfe_bus_comp_grp) { + CAM_ERR(CAM_CTXT, "No memory for vfe_bus_comp_grp"); + rc = -ENOMEM; + goto free_mem; + } + + ife_ctx->sfe_bus_comp_grp = CAM_MEM_ZALLOC_ARRAY(CAM_SFE_BUS_COMP_NUM_MAX, + sizeof(struct cam_isp_context_comp_record), GFP_KERNEL); + if (!ife_ctx->sfe_bus_comp_grp) { + CAM_ERR(CAM_CTXT, "No memory for sfe_bus_comp_grp"); + rc = -ENOMEM; + goto free_mem; + } + + /* Update in_port structure */ + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + rc = cam_ife_mgr_acquire_get_unified_structure(acquire_hw_info, + i, &input_size, &in_port[i]); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed in parsing: %d, ctx_idx: %u", + rc, ife_ctx->ctx_index); + goto free_mem; + } + + /* Check for formats in multi vc-dt FE use-cases */ + if (ife_ctx->flags.is_fe_enabled) { + input_format_checker = in_port[i].format[0]; + for (j = 1; j < in_port[i].num_valid_vc_dt; j++) { + if (in_port[i].format[j] != input_format_checker) { + CAM_ERR(CAM_ISP, + "Different input formats for FE use-cases not supported - formats vc0: %u vc%d: %u ctx_idx: %u", + input_format_checker, j, in_port[i].format[j], + ife_ctx->ctx_index); + rc = -EINVAL; + goto free_mem; + } + } + } + + if (in_port[i].usage_type && in_port[i].secure_mode) { + CAM_ERR(CAM_ISP, + "Dual IFE mode is not supported in secure camera usecases"); + rc = -EINVAL; + goto free_mem; + } + + cam_ife_hw_mgr_preprocess_port(ife_ctx, &in_port[i], &max_height); + total_pix_port += in_port[i].ipp_count + + in_port[i].ife_rd_count + + in_port[i].lcr_count; + total_rdi_port += in_port[i].rdi_count; + total_pd_port += in_port[i].ppp_count; + total_lite_port += in_port[i].lite_path_count; + total_sfe_ports += in_port[i].sfe_port_count; + + if ((in_port[i].major_ver == 3) && in_port[i].ipp_count) + ife_ctx->is_hw_ctx_acq = true; + } + + total_ports = total_pix_port + total_rdi_port + total_pd_port; + acquire_args->total_ports_acq = total_ports; + ife_ctx->res_list_ife_out = CAM_MEM_ZALLOC_ARRAY(total_ports, + sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL); + memset(ife_ctx->vfe_out_map, 0xff, sizeof(uint8_t) * max_ife_out_res); + + if (!ife_ctx->res_list_ife_out) { + rc = -ENOMEM; + CAM_ERR(CAM_ISP, "Alloc failed for ife out map"); + goto free_mem; + } + /* Check whether context has only RDI resource */ + if (!total_pix_port && !total_pd_port) { + ife_ctx->flags.is_rdi_only_context = true; + CAM_DBG(CAM_ISP, "RDI only context, ctx_idx: %u", ife_ctx->ctx_index); + } + + /* Check whether context has only RDI and PD resource */ + if (!total_pix_port && + (total_pd_port && total_rdi_port)) { + ife_ctx->flags.rdi_pd_context = true; + CAM_DBG(CAM_ISP, "RDI and PD context with [%u pd] [%u rdi], ctx_idx: %u", + total_pd_port, total_rdi_port, ife_ctx->ctx_index); + } + + /* Check if all output ports are of lite */ + if (total_lite_port == total_pix_port + total_rdi_port) + ife_ctx->flags.is_lite_context = true; + + if (total_sfe_ports) { + ife_ctx->res_list_sfe_out = CAM_MEM_ZALLOC_ARRAY(total_sfe_ports, + sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL); + memset(ife_ctx->sfe_out_map, 0xff, sizeof(uint8_t) * max_sfe_out_res); + if (!ife_ctx->res_list_sfe_out) { + rc = -ENOMEM; + CAM_ERR(CAM_ISP, "Alloc failed for sfe out map"); + goto free_mem; + } + } + + /* acquire HW resources */ + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + CAM_DBG(CAM_ISP, "ctx_idx: %u in_res_type %x", + ife_ctx->ctx_index, in_port[i].res_type); + if (!ife_ctx->ctx_type) { + if (in_port[i].cust_node) { + ife_ctx->ctx_type = CAM_IFE_CTX_TYPE_CUSTOM; + /* These can be obtained from uapi */ + ife_ctx->ctx_config |= + CAM_IFE_CTX_CFG_FRAME_HEADER_TS; + ife_ctx->ctx_config |= + CAM_IFE_CTX_CFG_SW_SYNC_ON; + } else if (in_port[i].sfe_in_path_type) { + ife_ctx->ctx_type = CAM_IFE_CTX_TYPE_SFE; + } + } + + CAM_DBG(CAM_ISP, + "in_res_type: 0x%x ctx_idx: %u sfe_in_path_type: 0x%x sfe_ife_enable: 0x%x", + in_port[i].res_type, ife_ctx->ctx_index, in_port[i].sfe_in_path_type, + in_port[i].sfe_ife_enable); + + /* For multi-context acquire cases*/ + if ((in_port[i].major_ver == 3) && in_port[i].ipp_count && + in_port[i].ipp_dst_hw_ctxt_mask) + cam_ife_mgr_populate_hw_ctxt_map(ife_ctx, &in_port[i]); + + if (ife_ctx->flags.is_offline) + rc = cam_ife_mgr_acquire_hw_for_offline_ctx( + ife_ctx, &in_port[i], + &acquire_args->acquired_hw_id[i], + acquire_args->acquired_hw_path[i]); + else + rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, + &in_port[i], + &acquire_args->acquired_hw_id[i], + acquire_args->acquired_hw_path[i], + &acquired_rdi_res); + + if (rc) { + cam_ife_hw_mgr_print_acquire_info(ife_ctx, + (in_port[i].ipp_count + + in_port[i].ife_rd_count + + in_port[i].lcr_count), + in_port[i].ppp_count, + in_port[i].rdi_count, rc); + goto free_res; + } + + CAM_MEM_FREE(in_port[i].data); + in_port[i].data = NULL; + } + + CAM_MEM_FREE(in_port); + in_port = NULL; + + /* Process base info */ + rc = cam_ife_mgr_process_base_info(ife_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Process base info failed, ctx_idx: %u", ife_ctx->ctx_index); + goto free_res; + } + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_mgr_update_core_info_to_cpas(ife_ctx, true); + if (rc) + goto free_res; + } + + if (!ife_hw_mgr->csid_camif_irq_support || (ife_hw_mgr->csid_camif_irq_support && + ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE)) { + rc = cam_ife_mgr_share_sof_qtimer_addr(ife_ctx); + if (rc) + goto free_res; + } + + rc = cam_ife_mgr_allocate_cdm_cmd( + (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE ? true : false), + &ife_ctx->cdm_cmd); + if (rc) + goto free_res; + + if (ife_ctx->flags.is_dual) + memcpy(cdm_acquire.identifier, "dualife", sizeof("dualife")); + else + memcpy(cdm_acquire.identifier, "ife", sizeof("ife")); + + if (ife_ctx->flags.is_dual) + cdm_acquire.cell_index = ife_ctx->left_hw_idx; + else + cdm_acquire.cell_index = ife_ctx->base[0].idx; + cdm_acquire.handle = 0; + cdm_acquire.userdata = ife_ctx; + cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX; + for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (ife_hw_mgr->cdm_reg_map[i]) + cdm_acquire.base_array[j++] = + ife_hw_mgr->cdm_reg_map[i]; + } + cdm_acquire.base_array_cnt = j; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback; + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW, ctx_idx: %u", ife_ctx->ctx_index); + goto free_cdm_cmd; + } + + CAM_DBG(CAM_ISP, + "Successfully acquired ctx_idx: %u CDM Id: %d, CDM HW hdl=%x, is_dual=%d", + ife_ctx->ctx_index, cdm_acquire.id, cdm_acquire.handle, ife_ctx->flags.is_dual); + ife_ctx->cdm_handle = cdm_acquire.handle; + ife_ctx->cdm_id = cdm_acquire.id; + ife_ctx->cdm_hw_idx = cdm_acquire.hw_idx; + if (cdm_acquire.id == CAM_CDM_IFE) + ife_ctx->flags.internal_cdm = true; + atomic_set(&ife_ctx->cdm_done, 1); + ife_ctx->last_cdm_done_req = 0; + + if (g_ife_hw_mgr.isp_caps.support_consumed_addr) + acquire_args->op_flags |= + CAM_IFE_CTX_CONSUME_ADDR_EN; + + if ((ife_ctx->flags.is_sfe_shdr) || + (ife_ctx->flags.is_sfe_fs) || + (ife_ctx->flags.dynamic_drv_supported)) { + acquire_args->op_flags |= + CAM_IFE_CTX_APPLY_DEFAULT_CFG; + ife_ctx->scratch_buf_info.sfe_scratch_config = + CAM_MEM_ZALLOC(sizeof(struct cam_sfe_scratch_buf_cfg), GFP_KERNEL); + if (!ife_ctx->scratch_buf_info.sfe_scratch_config) { + CAM_ERR(CAM_ISP, "Failed to allocate SFE scratch config, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -ENOMEM; + goto free_cdm_cmd; + } + + ife_ctx->scratch_buf_info.ife_scratch_config = + CAM_MEM_ZALLOC(sizeof(struct cam_ife_scratch_buf_cfg), GFP_KERNEL); + if (!ife_ctx->scratch_buf_info.ife_scratch_config) { + CAM_ERR(CAM_ISP, "Failed to allocate IFE scratch config, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -ENOMEM; + CAM_MEM_FREE(ife_ctx->scratch_buf_info.sfe_scratch_config); + goto free_cdm_cmd; + } + + /* Set scratch by default at stream on */ + cam_ife_mgr_reset_streamon_scratch_cfg(ife_ctx); + } + + acquire_args->ctxt_to_hw_map = ife_ctx; + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_CUSTOM) + acquire_args->op_flags |= CAM_IFE_CTX_CUSTOM_EN; + + if (ife_ctx->ctx_config & CAM_IFE_CTX_CFG_FRAME_HEADER_TS) + acquire_args->op_flags |= + CAM_IFE_CTX_FRAME_HEADER_EN; + + if (ife_ctx->ctx_config & CAM_IFE_CTX_CFG_DYNAMIC_SWITCH_ON) + acquire_args->op_flags |= + CAM_IFE_CTX_DYNAMIC_SWITCH_EN; + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + acquire_args->op_flags |= + CAM_IFE_CTX_SFE_EN; + + if (ife_ctx->flags.is_aeb_mode) + acquire_args->op_flags |= CAM_IFE_CTX_AEB_EN; + + ife_ctx->flags.ctx_in_use = true; + ife_ctx->num_reg_dump_buf = 0; + + acquire_args->valid_acquired_hw = + acquire_hw_info->num_inputs; + acquire_args->op_params.num_valid_params = 2; + acquire_args->op_params.param_list[0] = max_ife_out_res; + acquire_args->op_params.param_list[1] = max_sfe_out_res; + acquire_args->hw_mgr_ctx_id = ife_ctx->ctx_index; + acquire_args->op_params.fcg_caps = + &g_ife_hw_mgr.isp_caps.fcg_caps; + + cam_ife_hw_mgr_print_acquire_info(ife_ctx, total_pix_port, + total_pd_port, total_rdi_port, rc); + + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx); + + return 0; + +free_cdm_cmd: + cam_ife_mgr_free_cdm_cmd(&ife_ctx->cdm_cmd); +free_res: + cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx); +free_mem: + if (in_port) { + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + CAM_MEM_FREE(in_port[i].data); + in_port[i].data = NULL; + } + + CAM_MEM_FREE(in_port); + in_port = NULL; + } + + CAM_MEM_FREE(ife_ctx->vfe_bus_comp_grp); + CAM_MEM_FREE(ife_ctx->sfe_bus_comp_grp); + ife_ctx->vfe_bus_comp_grp = NULL; + ife_ctx->sfe_bus_comp_grp = NULL; + + CAM_MEM_FREE(ife_ctx->res_list_sfe_out); + ife_ctx->res_list_sfe_out = NULL; + CAM_MEM_FREE(ife_ctx->res_list_ife_out); + ife_ctx->res_list_ife_out = NULL; +free_ctx: + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); +err: + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +void cam_ife_mgr_acquire_get_unified_dev_str(struct cam_isp_in_port_info *in, + struct cam_isp_in_port_generic_info *gen_port_info) +{ + int i; + + gen_port_info->res_type = in->res_type; + gen_port_info->lane_type = in->lane_type; + gen_port_info->lane_num = in->lane_num; + gen_port_info->lane_cfg = in->lane_cfg; + gen_port_info->vc[0] = in->vc; + gen_port_info->dt[0] = in->dt; + gen_port_info->num_valid_vc_dt = 1; + gen_port_info->format[0] = in->format; + gen_port_info->test_pattern = in->test_pattern; + gen_port_info->usage_type = in->usage_type; + gen_port_info->left_start = in->left_start; + gen_port_info->left_stop = in->left_stop; + gen_port_info->left_width = in->left_width; + gen_port_info->right_start = in->right_start; + gen_port_info->right_stop = in->right_stop; + gen_port_info->right_width = in->right_width; + gen_port_info->line_start = in->line_start; + gen_port_info->line_stop = in->line_stop; + gen_port_info->height = in->height; + gen_port_info->pixel_clk = in->pixel_clk; + gen_port_info->batch_size = in->batch_size; + gen_port_info->dsp_mode = in->dsp_mode; + gen_port_info->hbi_cnt = in->hbi_cnt; + gen_port_info->fe_unpacker_fmt = in->format; + gen_port_info->cust_node = 0; + gen_port_info->num_out_res = in->num_out_res; + + for (i = 0; i < in->num_out_res; i++) { + gen_port_info->data[i].res_type = in->data_flex[i].res_type; + gen_port_info->data[i].format = in->data_flex[i].format; + gen_port_info->data[i].width = in->data_flex[i].width; + gen_port_info->data[i].height = in->data_flex[i].height; + gen_port_info->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + gen_port_info->data[i].split_point = in->data_flex[i].split_point; + gen_port_info->data[i].secure_mode = in->data_flex[i].secure_mode; + } +} + +/* entry function: acquire_hw */ +static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -1; + int i, j; + struct cam_ife_hw_mgr_ctx *ife_ctx; + struct cam_isp_in_port_info *in_port = NULL; + struct cam_isp_resource *isp_resource = NULL; + struct cam_cdm_acquire_data cdm_acquire; + struct cam_isp_in_port_generic_info *gen_port_info = NULL; + uint32_t total_pd_port = 0; + uint32_t total_pix_port = 0; + uint32_t total_rdi_port = 0; + uint32_t in_port_length = 0; + uint32_t acquired_rdi_res = 0; + uint32_t max_height = 0; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + /* get the ife ctx */ + rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); + if (rc || !ife_ctx) { + CAM_ERR(CAM_ISP, "Get ife hw context failed"); + goto err; + } + + ife_ctx->cdm_handle = 0; + ife_ctx->common.cb_priv = acquire_args->context_data; + ife_ctx->common.event_cb = acquire_args->event_cb; + + ife_ctx->hw_mgr = ife_hw_mgr; + ife_ctx->cdm_ops = cam_cdm_publish_ops(); + + isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info; + + gen_port_info = CAM_MEM_ZALLOC_ARRAY(acquire_args->num_acq, + sizeof(struct cam_isp_in_port_generic_info), + GFP_KERNEL); + + if (!gen_port_info) { + CAM_ERR(CAM_ISP, "No memory available, ctx_idx: %u", ife_ctx->ctx_index); + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < acquire_args->num_acq; i++) { + if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT) + continue; + + CAM_DBG(CAM_ISP, "ctx_idx: %u acquire no = %d total = %d", ife_ctx->ctx_index, i, + acquire_args->num_acq); + CAM_DBG(CAM_ISP, + "ctx_idx: %u start copy from user handle %lld with len = %d", + ife_ctx->ctx_index, isp_resource[i].res_hdl, + isp_resource[i].length); + + in_port_length = sizeof(struct cam_isp_in_port_info); + + if (in_port_length > isp_resource[i].length) { + CAM_ERR(CAM_ISP, "buffer size is not enough, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto free_res; + } + + in_port = memdup_user( + u64_to_user_ptr(isp_resource[i].res_hdl), + isp_resource[i].length); + if (!IS_ERR(in_port)) { + if (in_port->num_out_res > max_ife_out_res) { + CAM_ERR(CAM_ISP, "too many output res %d, ctx_idx: %u", + in_port->num_out_res, ife_ctx->ctx_index); + rc = -EINVAL; + CAM_MEM_FREE(in_port); + goto free_res; + } + + in_port_length = sizeof(struct cam_isp_in_port_info) + + (in_port->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info); + if (in_port_length > isp_resource[i].length) { + CAM_ERR(CAM_ISP, "buffer size is not enough, ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + CAM_MEM_FREE(in_port); + goto free_res; + } + + gen_port_info[i].data = CAM_MEM_ZALLOC_ARRAY( + in_port->num_out_res, + sizeof(struct cam_isp_out_port_generic_info), + GFP_KERNEL); + if (gen_port_info[i].data == NULL) { + rc = -ENOMEM; + goto free_res; + } + + cam_ife_mgr_acquire_get_unified_dev_str(in_port, + &gen_port_info[i]); + cam_ife_hw_mgr_preprocess_port(ife_ctx, + &gen_port_info[i], &max_height); + + total_pix_port += gen_port_info[i].ipp_count + + gen_port_info[i].ife_rd_count + + gen_port_info[i].lcr_count; + total_rdi_port += gen_port_info[i].rdi_count; + total_pd_port += gen_port_info[i].ppp_count; + + CAM_MEM_FREE(in_port); + } else { + CAM_ERR(CAM_ISP, + "Copy from user failed with in_port = %pK, ctx_idx: %u", + in_port, ife_ctx->ctx_index); + rc = -EFAULT; + goto free_mem; + } + } + + /* Check whether context has only RDI resource */ + if (!total_pix_port || !total_pd_port) { + ife_ctx->flags.is_rdi_only_context = true; + CAM_DBG(CAM_ISP, "RDI only context, ctx_idx: %u", ife_ctx->ctx_index); + } + + /* Check whether context has only RDI and PD resource */ + if (!total_pix_port && + (total_pd_port && total_rdi_port)) { + ife_ctx->flags.rdi_pd_context = true; + CAM_DBG(CAM_ISP, "RDI and PD context with [%u pd] [%u rdi] ctx_idx: %u", + total_pd_port, total_rdi_port, ife_ctx->ctx_index); + } + + /* acquire HW resources */ + for (i = 0; i < acquire_args->num_acq; i++) { + if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT) + continue; + + rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, + &gen_port_info[i], + &acquire_args->acquired_hw_id[i], + acquire_args->acquired_hw_path[i], + &acquired_rdi_res); + + if (rc) { + cam_ife_hw_mgr_print_acquire_info(ife_ctx, + total_pix_port, total_pd_port, + total_rdi_port, rc); + goto free_res; + } + + CAM_MEM_FREE(gen_port_info[i].data); + gen_port_info[i].data = NULL; + } + + CAM_MEM_FREE(gen_port_info); + gen_port_info = NULL; + + /* Process base info */ + rc = cam_ife_mgr_process_base_info(ife_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Process base info failed, ctx_idx: %u", ife_ctx->ctx_index); + goto free_res; + } + + if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_mgr_update_core_info_to_cpas(ife_ctx, true); + if (rc) + goto free_res; + } + + rc = cam_ife_mgr_allocate_cdm_cmd(false, + &ife_ctx->cdm_cmd); + if (rc) + goto free_res; + + cam_cpas_get_cpas_hw_version(&ife_ctx->hw_version); + ife_ctx->flags.internal_cdm = false; + + if (ife_ctx->flags.is_dual) + memcpy(cdm_acquire.identifier, "dualife", sizeof("dualife")); + else + memcpy(cdm_acquire.identifier, "ife", sizeof("ife")); + cdm_acquire.cell_index = ife_ctx->base[0].idx; + cdm_acquire.handle = 0; + cdm_acquire.userdata = ife_ctx; + cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX; + for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (ife_hw_mgr->cdm_reg_map[i]) + cdm_acquire.base_array[j++] = + ife_hw_mgr->cdm_reg_map[i]; + } + cdm_acquire.base_array_cnt = j; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback; + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW, ctx_idx: %u", ife_ctx->ctx_index); + goto free_cdm_cmd; + } + + CAM_DBG(CAM_ISP, "Successfully acquired CDM ID:%d, CDM HW hdl=%x ctx_idx: %u", + cdm_acquire.id, cdm_acquire.handle, ife_ctx->ctx_index); + + if (cdm_acquire.id == CAM_CDM_IFE) + ife_ctx->flags.internal_cdm = true; + ife_ctx->cdm_handle = cdm_acquire.handle; + ife_ctx->cdm_id = cdm_acquire.id; + atomic_set(&ife_ctx->cdm_done, 1); + ife_ctx->last_cdm_done_req = 0; + + acquire_args->ctxt_to_hw_map = ife_ctx; + ife_ctx->flags.ctx_in_use = true; + ife_ctx->num_reg_dump_buf = 0; + + cam_ife_hw_mgr_print_acquire_info(ife_ctx, total_pix_port, + total_pd_port, total_rdi_port, rc); + + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx); + + return 0; + +free_cdm_cmd: + cam_ife_mgr_free_cdm_cmd(&ife_ctx->cdm_cmd); +free_res: + cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx); + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); +free_mem: + if (gen_port_info) { + for (i = 0; i < acquire_args->num_acq; i++) { + CAM_MEM_FREE(gen_port_info[i].data); + gen_port_info[i].data = NULL; + } + CAM_MEM_FREE(gen_port_info); + gen_port_info = NULL; + } +err: + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +/* entry function: acquire_hw */ +static int cam_ife_mgr_acquire(void *hw_mgr_priv, + void *acquire_hw_args) +{ + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -1; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT) + rc = cam_ife_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args); + else + rc = cam_ife_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args); + + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +static const char *cam_isp_util_usage_data_to_string( + uint32_t usage_data) +{ + switch (usage_data) { + case CAM_ISP_USAGE_LEFT_PX: + return "LEFT_PX"; + case CAM_ISP_USAGE_RIGHT_PX: + return "RIGHT_PX"; + case CAM_ISP_USAGE_RDI: + return "RDI"; + case CAM_ISP_USAGE_SFE_LEFT: + return "SFE_LEFT_PX"; + case CAM_ISP_USAGE_SFE_RIGHT: + return "SFE_RIGHT_PX"; + case CAM_ISP_USAGE_SFE_RDI: + return "SFE_RDI"; + default: + return "USAGE_INVALID"; + } +} + +static void cam_ife_mgr_print_blob_info(struct cam_ife_hw_mgr_ctx *ctx, uint64_t request_id, + struct cam_isp_prepare_hw_update_data *hw_update_data) +{ + int i; + struct cam_isp_bw_config_internal_v2 *bw_config = + (struct cam_isp_bw_config_internal_v2 *) + &hw_update_data->bw_clk_config.bw_config_v2; + struct cam_isp_clock_config *ife_clock_config = + (struct cam_isp_clock_config *) &hw_update_data->bw_clk_config.ife_clock_config; + struct cam_isp_clock_config *sfe_clock_config = + (struct cam_isp_clock_config *) &hw_update_data->bw_clk_config.sfe_clock_config; + + CAM_INFO(CAM_ISP, "ctx: %u req_id:%llu config_valid[BW VFE_CLK SFE_CLK]:[%d %d %d]", + ctx->ctx_index, request_id, hw_update_data->bw_clk_config.bw_config_valid, + hw_update_data->bw_clk_config.ife_clock_config_valid, + hw_update_data->bw_clk_config.sfe_clock_config_valid); + + if (!hw_update_data->bw_clk_config.bw_config_valid) + goto ife_clk; + + for (i = 0; i < bw_config->num_paths; i++) { + CAM_INFO(CAM_PERF, + "ctx_idx: %u ISP_BLOB usage_type=%u [%s] [%s] [%s] [%llu] [%llu] [%llu]", + ctx->ctx_index, bw_config->usage_type, + cam_isp_util_usage_data_to_string( + bw_config->axi_path[i].usage_data), + cam_cpas_axi_util_path_type_to_string( + bw_config->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + bw_config->axi_path[i].transac_type), + bw_config->axi_path[i].camnoc_bw, + bw_config->axi_path[i].mnoc_ab_bw, + bw_config->axi_path[i].mnoc_ib_bw); + } + +ife_clk: + if (!hw_update_data->bw_clk_config.ife_clock_config_valid) + goto sfe_clk; + + CAM_INFO(CAM_PERF, "IFE ctx_idx: %u clk update usage=%u left_clk= %lu right_clk=%lu", + ctx->ctx_index, ife_clock_config->usage_type, ife_clock_config->left_pix_hz, + ife_clock_config->right_pix_hz); + +sfe_clk: + if (!hw_update_data->bw_clk_config.sfe_clock_config_valid) + goto end; + + CAM_INFO(CAM_PERF, "SFE ctx_idx: %u clk update usage: %u left_clk: %lu right_clk: %lu", + ctx->ctx_index, sfe_clock_config->usage_type, sfe_clock_config->left_pix_hz, + sfe_clock_config->right_pix_hz); + +end: + return; +} + +static int cam_isp_classify_vote_info( + struct cam_isp_hw_mgr_res *hw_mgr_res, + struct cam_isp_bw_config_internal_v2 *bw_config, + struct cam_axi_vote *isp_vote, + uint32_t hw_type, + uint32_t split_idx, + bool *nrdi_l_bw_updated, + bool *nrdi_r_bw_updated, + bool is_sfe_shdr) +{ + int rc = 0, i, j = 0; + + if (hw_type == CAM_ISP_HW_TYPE_VFE) { + if ((hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) + || (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_RD) || + (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) || + (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_LCR)) { + if (split_idx == CAM_ISP_HW_SPLIT_LEFT) { + if (*nrdi_l_bw_updated) + return rc; + + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_LEFT_PX) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *nrdi_l_bw_updated = true; + } else { + if (*nrdi_r_bw_updated) + return rc; + + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_RIGHT_PX) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *nrdi_r_bw_updated = true; + } + } else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) + && (hw_mgr_res->res_id <= + CAM_ISP_HW_VFE_IN_RDI4)) { + for (i = 0; i < bw_config->num_paths; i++) { + if ((bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_RDI) && + ((bw_config->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_IFE_RDI0) == + (hw_mgr_res->res_id - + CAM_ISP_HW_VFE_IN_RDI0))) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + } else { + if (hw_mgr_res->hw_res[split_idx]) { + CAM_ERR(CAM_ISP, "Invalid res_id %u, split_idx: %u", + hw_mgr_res->res_id, split_idx); + rc = -EINVAL; + return rc; + } + } + } else { + if (is_sfe_shdr || + (hw_mgr_res->res_id == CAM_ISP_HW_SFE_IN_PIX)) { + if ((split_idx == CAM_ISP_HW_SPLIT_LEFT) && + (!(*nrdi_l_bw_updated))) { + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_SFE_LEFT) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *nrdi_l_bw_updated = true; + } else if (!(*nrdi_r_bw_updated)) { + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_SFE_RIGHT) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *nrdi_r_bw_updated = true; + } + } + + if ((hw_mgr_res->res_id >= CAM_ISP_HW_SFE_IN_RDI0) + && (hw_mgr_res->res_id <= + CAM_ISP_HW_SFE_IN_RDI4)) { + for (i = 0; i < bw_config->num_paths; i++) { + if ((bw_config->axi_path[i].usage_data == + CAM_ISP_USAGE_SFE_RDI) && + ((bw_config->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_SFE_RDI0) == + (hw_mgr_res->res_id - + CAM_ISP_HW_SFE_IN_RDI0))) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + } + } + + for (i = 0; i < isp_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "CLASSIFY_VOTE [%s] [%s] [%s] [%s] [%llu] [%llu] [%llu]", + cam_isp_util_usage_data_to_string( + isp_vote->axi_path[i].usage_data), + cam_cpas_axi_util_path_type_to_string( + isp_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + isp_vote->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + isp_vote->axi_path[i].vote_level), + isp_vote->axi_path[i].camnoc_bw, + isp_vote->axi_path[i].mnoc_ab_bw, + isp_vote->axi_path[i].mnoc_ib_bw); + } + + return rc; +} + +static int cam_isp_blob_bw_update_v2( + struct cam_isp_bw_config_internal_v2 *bw_config, + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_vfe_bw_update_args_v2 bw_upd_args; + struct cam_sfe_bw_update_args sfe_bw_update_args; + int rc = -EINVAL; + uint32_t i, split_idx = INT_MIN; + bool nrdi_l_bw_updated = false; + bool nrdi_r_bw_updated = false; + bool is_sfe_shdr = false; + + for (i = 0; i < bw_config->num_paths; i++) { + CAM_DBG(CAM_PERF, + "ctx_idx: %u ISP_BLOB usage_type=%u [%s] [%s] [%s] [%s] [%llu] [%llu] [%llu]", + ctx->ctx_index, bw_config->usage_type, + cam_isp_util_usage_data_to_string( + bw_config->axi_path[i].usage_data), + cam_cpas_axi_util_path_type_to_string( + bw_config->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + bw_config->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + bw_config->axi_path[i].vote_level), + bw_config->axi_path[i].camnoc_bw, + bw_config->axi_path[i].mnoc_ab_bw, + bw_config->axi_path[i].mnoc_ib_bw); + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (split_idx = 0; split_idx < CAM_ISP_HW_SPLIT_MAX; + split_idx++) { + if (!hw_mgr_res->hw_res[split_idx]) + continue; + + memset(&bw_upd_args.isp_vote, 0, + sizeof(struct cam_axi_vote)); + rc = cam_isp_classify_vote_info(hw_mgr_res, bw_config, + &bw_upd_args.isp_vote, CAM_ISP_HW_TYPE_VFE, + split_idx, &nrdi_l_bw_updated, &nrdi_r_bw_updated, + false); + if (rc) + return rc; + + if (!bw_upd_args.isp_vote.num_paths) + continue; + + hw_intf = hw_mgr_res->hw_res[split_idx]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_upd_args.node_res = + hw_mgr_res->hw_res[split_idx]; + + /* + * Update BW values to top, actual apply to hw will happen when + * CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE is called + */ + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_UPDATE_V2, + &bw_upd_args, + sizeof( + struct cam_vfe_bw_update_args_v2)); + if (rc) + CAM_ERR(CAM_PERF, + "BW Update failed rc: %d, ctx_idx: %u", + rc, ctx->ctx_index); + } else { + CAM_WARN(CAM_ISP, "NULL hw_intf!, ctx_idx: %u", ctx->ctx_index); + } + } + } + + nrdi_l_bw_updated = false; + nrdi_r_bw_updated = false; + if ((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr)) + is_sfe_shdr = true; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + for (split_idx = 0; split_idx < CAM_ISP_HW_SPLIT_MAX; + split_idx++) { + if (!hw_mgr_res->hw_res[split_idx]) + continue; + + memset(&sfe_bw_update_args.sfe_vote, 0, + sizeof(struct cam_axi_vote)); + rc = cam_isp_classify_vote_info(hw_mgr_res, bw_config, + &sfe_bw_update_args.sfe_vote, CAM_ISP_HW_TYPE_SFE, + split_idx, &nrdi_l_bw_updated, &nrdi_r_bw_updated, + is_sfe_shdr); + if (rc) + return rc; + + if (!sfe_bw_update_args.sfe_vote.num_paths) + continue; + + hw_intf = hw_mgr_res->hw_res[split_idx]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + sfe_bw_update_args.node_res = + hw_mgr_res->hw_res[split_idx]; + + /* + * Update BW values to top, actual apply to hw will happen when + * CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE is called + */ + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_UPDATE_V2, + &sfe_bw_update_args, + sizeof( + struct cam_sfe_bw_update_args)); + if (rc) + CAM_ERR(CAM_PERF, + "BW Update failed rc: %d, ctx_idx: %u", + rc, ctx->ctx_index); + } else { + CAM_WARN(CAM_ISP, "NULL hw_intf!, ctx_idx: %u", ctx->ctx_index); + } + } + } + + return rc; +} + +static int cam_isp_blob_bw_update( + struct cam_isp_bw_config *bw_config, + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_vfe_bw_update_args bw_upd_args; + uint64_t cam_bw_bps = 0; + uint64_t ext_bw_bps = 0; + int rc = -EINVAL; + uint32_t i; + bool camif_l_bw_updated = false; + bool camif_r_bw_updated = false; + + CAM_DBG(CAM_PERF, + "ctx_idx: %u ISP_BLOB usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu, right cam_bw_bps=%llu ext_bw_bps=%llu", + ctx->ctx_index, bw_config->usage_type, + bw_config->left_pix_vote.cam_bw_bps, + bw_config->left_pix_vote.ext_bw_bps, + bw_config->right_pix_vote.cam_bw_bps, + bw_config->right_pix_vote.ext_bw_bps); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if ((hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_RD) || + (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) + || (hw_mgr_res->res_id == + CAM_ISP_HW_VFE_IN_LCR)) + if (i == CAM_ISP_HW_SPLIT_LEFT) { + if (camif_l_bw_updated) + continue; + + cam_bw_bps = + bw_config->left_pix_vote.cam_bw_bps; + ext_bw_bps = + bw_config->left_pix_vote.ext_bw_bps; + + camif_l_bw_updated = true; + } else { + if (camif_r_bw_updated) + continue; + + cam_bw_bps = + bw_config->right_pix_vote.cam_bw_bps; + ext_bw_bps = + bw_config->right_pix_vote.ext_bw_bps; + + camif_r_bw_updated = true; + } + else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) + && (hw_mgr_res->res_id <= + CAM_ISP_HW_VFE_IN_RDI4)) { + uint32_t idx = hw_mgr_res->res_id - + CAM_ISP_HW_VFE_IN_RDI0; + if (idx >= bw_config->num_rdi) + continue; + + cam_bw_bps = + bw_config->rdi_vote_flex[idx].cam_bw_bps; + ext_bw_bps = + bw_config->rdi_vote_flex[idx].ext_bw_bps; + } else { + if (hw_mgr_res->hw_res[i]) { + CAM_ERR(CAM_ISP, "Invalid ctx_idx: %u res_id %u", + ctx->ctx_index, hw_mgr_res->res_id); + rc = -EINVAL; + return rc; + } + } + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_upd_args.node_res = + hw_mgr_res->hw_res[i]; + bw_upd_args.camnoc_bw_bytes = cam_bw_bps; + bw_upd_args.external_bw_bytes = ext_bw_bps; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_UPDATE, + &bw_upd_args, + sizeof(struct cam_vfe_bw_update_args)); + if (rc) + CAM_ERR(CAM_PERF, "BW Update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf!ctx_idx: %u", ctx->ctx_index); + } + } + + return rc; +} + +static void cam_ife_mgr_send_frame_event(uint64_t request_id, uint32_t ctx_index) +{ + if (cam_presil_mode_enabled()) { + CAM_DBG(CAM_PRESIL, "PRESIL FRAME req_id=%llu ctx_index %u", + request_id, ctx_index); + cam_presil_send_event(CAM_PRESIL_EVENT_IFE_FRAME_RUN, request_id); + } +} + +static void cam_isp_irq_inject_clear_params( + struct cam_isp_irq_inject_param *param) +{ + param->hw_type = -1; + param->hw_idx = -1; + param->reg_unit = -1; + param->irq_mask = -1; + param->req_id = 0; + param->is_valid = false; + memset(param->line_buf, '\0', LINE_BUFFER_LEN); +} + +static int cam_ife_hw_mgr_sfe_irq_inject_or_dump_desc( + struct cam_ife_hw_mgr *hw_mgr, + struct cam_isp_irq_inject_param *params, + bool dump_irq_desc) +{ + int i, rc = 0; + char *line_buf = NULL; + struct cam_hw_intf *hw_intf = NULL; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if ((!hw_mgr->sfe_devices[i]) || + (hw_mgr->sfe_devices[i]->hw_intf->hw_idx != params->hw_idx)) + continue; + + hw_intf = hw_mgr->sfe_devices[i]->hw_intf; + + if (dump_irq_desc) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION, params, + sizeof(struct cam_isp_irq_inject_param)); + goto clear_param; + } + + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_IRQ_INJECTION, params, + sizeof(struct cam_isp_irq_inject_param)); + if (rc) + scnprintf(line_buf, LINE_BUFFER_LEN, + "Injecting IRQ %x failed for SFE at req: %lld\n", + params->irq_mask, params->req_id); + else + scnprintf(line_buf, LINE_BUFFER_LEN, + "IRQ %#x injected for SFE at req: %lld\n", + params->irq_mask, params->req_id); + break; + } + +clear_param: + if (irq_inject_display_buf) { + strlcat(irq_inject_display_buf, params->line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + } + + /* Clear the param injected */ + cam_isp_irq_inject_clear_params(params); + CAM_MEM_FREE(line_buf); + return rc; +} + +static int cam_ife_hw_mgr_vfe_irq_inject_or_dump_desc( + struct cam_ife_hw_mgr *hw_mgr, + struct cam_isp_irq_inject_param *params, + bool dump_irq_desc) +{ + int i, rc = 0; + char *line_buf = NULL; + struct cam_hw_intf *hw_intf = NULL; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if ((!hw_mgr->ife_devices[i]) || + (hw_mgr->ife_devices[i]->hw_intf->hw_idx != params->hw_idx)) + continue; + + hw_intf = hw_mgr->ife_devices[i]->hw_intf; + + if (dump_irq_desc) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION, params, + sizeof(struct cam_isp_irq_inject_param)); + goto clear_param; + } + + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_IRQ_INJECTION, params, + sizeof(struct cam_isp_irq_inject_param)); + if (rc) + scnprintf(line_buf, LINE_BUFFER_LEN, + "Injecting IRQ %x failed for IFE at req: %lld\n", + params->irq_mask, params->req_id); + else + scnprintf(line_buf, LINE_BUFFER_LEN, + "IRQ %#x injected for IFE at req: %lld\n", + params->irq_mask, params->req_id); + break; + } + +clear_param: + if (irq_inject_display_buf) { + strlcat(irq_inject_display_buf, params->line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + } + + /* Clear the param injected */ + cam_isp_irq_inject_clear_params(params); + CAM_MEM_FREE(line_buf); + return rc; +} + +static int cam_ife_hw_mgr_csid_irq_inject_or_dump_desc( + struct cam_ife_hw_mgr *hw_mgr, + struct cam_isp_irq_inject_param *params, + bool dump_irq_desc) +{ + int i, rc = 0; + char *line_buf = NULL; + struct cam_hw_intf *hw_intf = NULL; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if ((!hw_mgr->csid_devices[i]) || + (hw_mgr->csid_devices[i]->hw_idx != params->hw_idx)) + continue; + + hw_intf = hw_mgr->csid_devices[i]; + + if (dump_irq_desc) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION, params, + sizeof(struct cam_isp_irq_inject_param)); + goto clear_param; + } + + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_IRQ_INJECTION, params, + sizeof(struct cam_isp_irq_inject_param)); + if (rc) + scnprintf(line_buf, LINE_BUFFER_LEN, + "Injecting IRQ %x failed for CSID at req: %lld\n", + params->irq_mask, params->req_id); + else + scnprintf(line_buf, LINE_BUFFER_LEN, + "IRQ %#x injected for CSID at req: %lld\n", + params->irq_mask, params->req_id); + break; + } + +clear_param: + if (irq_inject_display_buf) { + strlcat(irq_inject_display_buf, params->line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + } + + /* Clear the param injected */ + cam_isp_irq_inject_clear_params(params); + CAM_MEM_FREE(line_buf); + return rc; +} + +static int cam_ife_hw_mgr_irq_injection(struct cam_ife_hw_mgr *hw_mgr, + uint64_t request_id) +{ + int i, rc = 0; + + for (i = 0; i < MAX_INJECT_SET; i++) { + if ((!hw_mgr->irq_inject_param[i].is_valid) || + ((hw_mgr->irq_inject_param[i].req_id != request_id) && + (hw_mgr->irq_inject_param[i].req_id != 0xFFFFFFFF))) + continue; + + switch (hw_mgr->irq_inject_param[i].hw_type) { + case CAM_ISP_HW_TYPE_CSID: + rc = cam_ife_hw_mgr_csid_irq_inject_or_dump_desc( + hw_mgr, &hw_mgr->irq_inject_param[i], false); + break; + case CAM_ISP_HW_TYPE_VFE: + rc = cam_ife_hw_mgr_vfe_irq_inject_or_dump_desc( + hw_mgr, &hw_mgr->irq_inject_param[i], false); + break; + case CAM_ISP_HW_TYPE_SFE: + rc = cam_ife_hw_mgr_sfe_irq_inject_or_dump_desc( + hw_mgr, &hw_mgr->irq_inject_param[i], false); + break; + default: + if (irq_inject_display_buf) + strlcat(irq_inject_display_buf, "No matched HW_TYPE\n", + IRQ_INJECT_DISPLAY_BUF_LEN); + rc = -EINVAL; + return rc; + } + } + + return rc; +} + +static int cam_isp_blob_fcg_update( + struct cam_isp_fcg_config_internal *fcg_config_internal, + uint32_t entry_idx, + uint32_t prediction_idx, + struct list_head *res_list_isp_src, + struct cam_hw_config_args *cfg) +{ + struct cam_isp_resource_node *res; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_fcg_cmd fcg_cmd; + struct cam_hw_update_entry *hw_entry; + uint32_t i; + int rc = -EINVAL; + + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if (entry_idx >= cfg->num_hw_update_entries) { + CAM_ERR(CAM_ISP, + "Entry index %d exceed number of hw update entries %u, request id %llu", + entry_idx, cfg->num_hw_update_entries, cfg->request_id); + return -EINVAL; + } + + hw_entry = &cfg->hw_update_entries[entry_idx]; + res = hw_mgr_res->hw_res[i]; + + fcg_cmd.res = res; + fcg_cmd.cmd_type = CAM_ISP_HW_CMD_FCG_CONFIG; + fcg_cmd.get_size_flag = false; + fcg_cmd.u.fcg_update.cmd_size = hw_entry->len; + fcg_cmd.u.fcg_update.cmd_buf_addr = hw_entry->addr; + fcg_cmd.u.fcg_update.data = (void *)fcg_config_internal; + + /* This prediction sent from userspace is insufficient */ + if (prediction_idx > fcg_config_internal->num_predictions) + prediction_idx = fcg_config_internal->num_predictions; + fcg_cmd.u.fcg_update.prediction_idx = prediction_idx; + + CAM_DBG(CAM_ISP, + "Replace FCG config with predicted ones, prediction idx: %d, request id: %llu", + prediction_idx, cfg->request_id); + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_FCG_CONFIG, + &fcg_cmd, (uint32_t)sizeof(struct cam_isp_hw_fcg_cmd)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in writing FCG values to the hw update entry, rc: %d, request id: %llu", + rc, cfg->request_id); + return rc; + } + return 0; + } + } + + CAM_DBG(CAM_ISP, + "No matching ISP resources when filling FCG hw update entry, request id: %llu", + cfg->request_id); + return rc; +} + +static inline int cam_ife_mgr_apply_fcg_update( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_prepare_hw_update_data *hw_update_data, + struct cam_hw_config_args *cfg) +{ + int rc = 0; + struct cam_isp_fcg_config_internal *fcg_configs; + + if (hw_update_data->fcg_info.ife_fcg_online && + !hw_update_data->fcg_info.use_current_cfg) { + CAM_DBG(CAM_ISP, "Start writing IFE/MC_TFE FCG configs to kmd buffer on ctx: %d", + ctx->ctx_index); + + fcg_configs = &hw_update_data->fcg_info.ife_fcg_config; + rc = cam_isp_blob_fcg_update(fcg_configs, + hw_update_data->fcg_info.ife_fcg_entry_idx, + hw_update_data->fcg_info.prediction_idx, + &ctx->res_list_ife_src, cfg); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in applying IFE/MC_TFE FCG configurations, ctx_idx: %u", + ctx->ctx_index); + return rc; + } + } + + if (hw_update_data->fcg_info.sfe_fcg_online && + !hw_update_data->fcg_info.use_current_cfg) { + CAM_DBG(CAM_ISP, "Start writing SFE FCG configs to kmd buffer on ctx: %d", + ctx->ctx_index); + + fcg_configs = &hw_update_data->fcg_info.sfe_fcg_config; + rc = cam_isp_blob_fcg_update(fcg_configs, + hw_update_data->fcg_info.sfe_fcg_entry_idx, + hw_update_data->fcg_info.prediction_idx, + &ctx->res_list_sfe_src, cfg); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in applying SFE FCG configurations, ctx_idx: %u", + ctx->ctx_index); + return rc; + } + } + + return rc; +} + +/* entry function: config_hw */ +static int cam_ife_mgr_config_hw( + void *hw_mgr_priv, + void *config_hw_args) +{ + int rc, i, skip = 0; + struct cam_hw_config_args *cfg; + struct cam_hw_update_entry *cmd; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_prepare_hw_update_data *hw_update_data; + struct cam_ife_hw_mgr *ife_hw_mgr; + unsigned long rem_jiffies = 0; + bool is_cdm_hung = false; + size_t len = 0; + uint32_t *buf_addr = NULL, *buf_start = NULL, *buf_end = NULL; + uint32_t cmd_type = 0; + bool wait_for_cdm = false; + + if (!hw_mgr_priv || !config_hw_args) { + CAM_ERR(CAM_ISP, + "Invalid arguments, hw_mgr_priv=%pK, config_hw_args=%pK", + hw_mgr_priv, config_hw_args); + return -EINVAL; + } + ife_hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv; + + cfg = config_hw_args; + ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map; + if (!ctx) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EINVAL; + } + + if (!ctx->flags.ctx_in_use || !ctx->cdm_cmd) { + CAM_ERR(CAM_ISP, + "Invalid context parameters : ctx_index %u, ctx_in_use=%d, cdm_cmd=%pK", + ctx->ctx_index, ctx->flags.ctx_in_use, ctx->cdm_cmd); + return -EPERM; + } + + if (atomic_read(&ctx->overflow_pending)) { + CAM_DBG(CAM_ISP, + "Ctx[%pK][%u] Overflow pending, cannot apply req %llu", + ctx, ctx->ctx_index, cfg->request_id); + return -EPERM; + } + + /* + * Assuming overflow recovery happens on req N, and we may + * haven't got all the result for req N while apply N + 1, + * so we reset try_recovery_cnt while apply N + 2. + */ + if (ctx->try_recovery_cnt && + (cfg->request_id > (ctx->recovery_req_id + 1))) { + ctx->try_recovery_cnt = 0; + ctx->recovery_req_id = 0; + CAM_DBG(CAM_ISP, + "Ctx[%pK][%u] Reset overflow recovery count for req %llu", + ctx, ctx->ctx_index, cfg->request_id); + } + + rc = cam_ife_hw_mgr_irq_injection(ife_hw_mgr, cfg->request_id); + if (rc) + CAM_ERR(CAM_ISP, "Failed to inject IRQ at req %d", + cfg->request_id); + + hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv; + hw_update_data->isp_mgr_ctx = ctx; + ctx->cdm_userdata.request_id = cfg->request_id; + ctx->cdm_userdata.hw_update_data = hw_update_data; + + CAM_DBG(CAM_ISP, "Ctx[%pK][%u] : Applying Req %lld, init_packet=%d", + ctx, ctx->ctx_index, cfg->request_id, cfg->init_packet); + + if (cfg->reapply_type && cfg->cdm_reset_before_apply) { + if (ctx->last_cdm_done_req < cfg->request_id) { + is_cdm_hung = !cam_cdm_detect_hang_error(ctx->cdm_handle); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "ctx_idx: %u CDM callback not received for req: %lld, last_cdm_done_req: %lld, is_cdm_hung: %d", + ctx->ctx_index, cfg->request_id, ctx->last_cdm_done_req, + is_cdm_hung); + + if (!is_cdm_hung) + cam_cdm_dump_debug_registers(ctx->cdm_handle); + + rc = cam_cdm_reset_hw(ctx->cdm_handle); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM reset unsuccessful for req: %lld. ctx: %u, rc: %d", + cfg->request_id, ctx->ctx_index, rc); + ctx->last_cdm_done_req = 0; + return rc; + } + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback received, should wait for buf done for req: %lld, ctx_idx: %u", + cfg->request_id, ctx->ctx_index); + return -EALREADY; + } + ctx->last_cdm_done_req = 0; + } + + if (hw_update_data->exp_order_update_valid) { + rc = cam_isp_blob_path_exp_order_update(ctx, cfg->request_id, hw_update_data); + if (rc) { + CAM_ERR(CAM_ISP, "Exp order update failed for req: %llu rc:%d ctx_idx=%u", + cfg->request_id, rc, ctx->ctx_index); + } + } + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ISP, "Presil Mode - Skipping CLK BW Update"); + goto skip_bw_clk_update; + } + + CAM_DBG(CAM_PERF, + "ctx_idx=%u, bw_config_version=%d config_valid[BW VFE_CLK SFE_CLK]:[%d %d %d]", + ctx->ctx_index, ctx->bw_config_version, + hw_update_data->bw_clk_config.bw_config_valid, + hw_update_data->bw_clk_config.ife_clock_config_valid, + hw_update_data->bw_clk_config.sfe_clock_config_valid); + + if ((ctx->flags.dynamic_drv_supported) || + (hw_update_data->drv_config_valid)) { + rc = cam_isp_mgr_drv_config(ctx, cfg->request_id, + hw_update_data->force_disable_drv, hw_update_data); + if (rc) { + CAM_ERR(CAM_ISP, "DRV config failed for req: %llu rc:%d ctx_idx=%u", + cfg->request_id, + rc, ctx->ctx_index); + } + } + + /* + * Update clock and bw values to top layer, the actual application of these + * votes to hw will happen for all relevant hw indices at once, in a separate + * finish update call + */ + if (hw_update_data->bw_clk_config.ife_clock_config_valid) { + rc = cam_isp_blob_ife_clock_update((struct cam_isp_clock_config *) + &hw_update_data->bw_clk_config.ife_clock_config, ctx); + if (rc) { + CAM_ERR(CAM_PERF, "Clock Update Failed, rc=%d, ctx_idx=%u", + rc, ctx->ctx_index); + return rc; + } + } + + if (hw_update_data->bw_clk_config.sfe_clock_config_valid) { + rc = cam_isp_blob_sfe_clock_update((struct cam_isp_clock_config *) + &hw_update_data->bw_clk_config.sfe_clock_config, ctx); + if (rc) { + CAM_ERR(CAM_PERF, "Clock Update Failed, rc=%d, ctx_idx=%u", + rc, ctx->ctx_index); + return rc; + } + } + + if (hw_update_data->bw_clk_config.bw_config_valid) { + if (ctx->bw_config_version == CAM_ISP_BW_CONFIG_V1) { + rc = cam_isp_blob_bw_update( + (struct cam_isp_bw_config *) + &hw_update_data->bw_clk_config.bw_config, ctx); + if (rc) { + CAM_ERR(CAM_PERF, "Bandwidth Update Failed rc: %d, ctx_idx=%u", + rc, ctx->ctx_index); + return rc; + } + } else if ((ctx->bw_config_version == CAM_ISP_BW_CONFIG_V2) || + (ctx->bw_config_version == CAM_ISP_BW_CONFIG_V3)) { + rc = cam_isp_blob_bw_update_v2(&hw_update_data->bw_clk_config.bw_config_v2, + ctx); + if (rc) { + CAM_ERR(CAM_PERF, "Bandwidth Update Failed rc: %d, ctx_idx=%u", + rc, ctx->ctx_index); + return rc; + } + } else { + CAM_ERR(CAM_PERF, "Invalid bw config version: %d, ctx_idx=%u", + ctx->bw_config_version, ctx->ctx_index); + } + } + + /* Apply the updated values in top layer to the HW*/ + rc = cam_ife_mgr_finish_clk_bw_update(ctx, cfg->request_id, false); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in finishing clk/bw update rc: %d, ctx_idx=%u", + rc, ctx->ctx_index); + cam_ife_mgr_print_blob_info(ctx, cfg->request_id, hw_update_data); + return rc; + } + +skip_bw_clk_update: + rc = cam_ife_mgr_apply_fcg_update(ctx, hw_update_data, cfg); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in updating FCG values %u", ctx->ctx_index); + return rc; + } + + CAM_DBG(CAM_ISP, + "Enter ctx id:%u num_hw_upd_entries %d request id: %llu", + ctx->ctx_index, cfg->num_hw_update_entries, cfg->request_id); + + if (cam_presil_mode_enabled() || cfg->init_packet || hw_update_data->mup_en || + g_ife_hw_mgr.debug_cfg.per_req_wait_cdm || + (ctx->ctx_config & CAM_IFE_CTX_CFG_SW_SYNC_ON)) + wait_for_cdm = true; + + if (cfg->num_hw_update_entries > 0) { + cdm_cmd = ctx->cdm_cmd; + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->userdata = ctx; + cdm_cmd->cookie = cfg->request_id; + cdm_cmd->gen_irq_arb = false; + cdm_cmd->genirq_buff = &hw_update_data->kmd_cmd_buff_info; + + if (wait_for_cdm) + cdm_cmd->flag = true; + else + cdm_cmd->flag = false; + + for (i = 0 ; i < cfg->num_hw_update_entries; i++) { + cmd = (cfg->hw_update_entries + i); + + if ((cfg->reapply_type == CAM_CONFIG_REAPPLY_IO) && + (cmd->flags == CAM_ISP_IQ_BL)) { + skip++; + continue; + } + + if ((cfg->reapply_type == CAM_CONFIG_REAPPLY_IQ) && + (cmd->flags == CAM_ISP_IOCFG_BL)) { + skip++; + continue; + } + + if (cmd->flags == CAM_ISP_UNUSED_BL || + cmd->flags >= CAM_ISP_BL_MAX) + CAM_ERR(CAM_ISP, "Unexpected BL type %d, ctx_idx=%u", + cmd->flags, ctx->ctx_index); + + if (hw_update_data->fcg_info.ife_fcg_online && + (hw_update_data->fcg_info.ife_fcg_entry_idx == i)) { + CAM_DBG(CAM_ISP, + "IFE/MC_TFE FCG hw entry is detected, num_ent: %d, ctx_idx: %u, request id: %llu, use current cfg: %d", + i, ctx->ctx_index, cfg->request_id, + hw_update_data->fcg_info.use_current_cfg); + if (hw_update_data->fcg_info.use_current_cfg) { + skip++; + continue; + } + } + + if (hw_update_data->fcg_info.sfe_fcg_online && + (hw_update_data->fcg_info.sfe_fcg_entry_idx == i)) { + CAM_DBG(CAM_ISP, + "SFE FCG hw entry is detected, num_ent: %d, ctx_idx: %u, request id: %llu, use current cfg: %d", + i, ctx->ctx_index, cfg->request_id, + hw_update_data->fcg_info.use_current_cfg); + if (hw_update_data->fcg_info.use_current_cfg) { + skip++; + continue; + } + } + + if (cmd->single_apply_only) { + skip++; + continue; + } + + if (cmd->flags == CAM_ISP_DEBUG_ENTRY) + cmd->single_apply_only = true; + + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle = cmd->handle; + cdm_cmd->cmd_flex[i - skip].offset = cmd->offset; + cdm_cmd->cmd_flex[i - skip].len = cmd->len; + cdm_cmd->cmd_flex[i - skip].arbitrate = false; + + if (g_ife_hw_mgr.debug_cfg.enable_cdm_cmd_check) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Enter cdm cmd_buf validation"); + rc = cam_packet_util_get_cmd_mem_addr( + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle, + &buf_addr, &len); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to get buf_addr and len for mem_handle: %d ctx id: %u request id: %llu", + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle, + ctx->ctx_index, cfg->request_id); + continue; + } + + if (((size_t)cdm_cmd->cmd_flex[i - skip].offset >= len) || + ((size_t)cdm_cmd->cmd_flex[i - skip].len) > + (len - (size_t)cdm_cmd->cmd_flex[i - skip].offset)) { + CAM_ERR(CAM_UTIL, "invalid mem len:%u cmd_inplen:%u off:%u", + len, cdm_cmd->cmd_flex[i - skip].len, + cdm_cmd->cmd_flex[i - skip].offset); + cam_mem_put_cpu_buf( + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + + buf_start = (uint32_t *)((uint8_t *) buf_addr + + cdm_cmd->cmd_flex[i - skip].offset); + buf_end = (uint32_t *)((uint8_t *) buf_start + + cdm_cmd->cmd_flex[i - skip].len - 1); + cmd_type = ((uint32_t)(*buf_start) >> CAM_CDM_COMMAND_OFFSET); + if ((i == 0) && (cmd_type != CAM_CDM_CMD_CHANGE_BASE)) { + CAM_ERR(CAM_ISP, + "first cmd in cmd_buf is not change_base, cmd_type: %u ctx id: %u request id: %llu", + cmd_type, ctx->ctx_index, cfg->request_id); + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf( + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + + if (cam_cdm_util_validate_cmd_buf(buf_start, buf_end)) { + CAM_ERR(CAM_ISP, + "found invalid cmd in cmd_buf, ctx id: %u request id: %llu", + ctx->ctx_index, cfg->request_id); + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf( + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + } + } + cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries - skip; + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_ISP, + "Sending relevant buffers for request:%llu to presil, ctx_idx=%u", + cfg->request_id, ctx->ctx_index); + rc = cam_presil_send_buffers_from_packet(hw_update_data->packet, + g_ife_hw_mgr.mgr_common.img_iommu_hdl, + g_ife_hw_mgr.mgr_common.cmd_iommu_hdl); + if (rc) { + CAM_ERR(CAM_ISP, + "Error sending buffers for request:%llu to presil, ctx=%u", + cfg->request_id, ctx->ctx_index); + return rc; + } + } + + reinit_completion(&ctx->config_done_complete); + ctx->applied_req_id = cfg->request_id; + + CAM_DBG(CAM_ISP, "Submit to CDM, ctx_idx=%u", ctx->ctx_index); + + if (wait_for_cdm) + atomic_set(&ctx->cdm_done, 0); + + rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to apply the configs for req %llu, rc %d ctx_idx=%u", + cfg->request_id, rc, ctx->ctx_index); + return rc; + } + + if (wait_for_cdm) { + rem_jiffies = cam_common_wait_for_completion_timeout( + &ctx->config_done_complete, + msecs_to_jiffies(60)); + if (rem_jiffies == 0) { + CAM_ERR(CAM_ISP, + "config done completion timeout for req_id=%llu ctx_index %u", + cfg->request_id, ctx->ctx_index); + rc = cam_cdm_detect_hang_error(ctx->cdm_handle); + if (rc < 0) { + cam_cdm_dump_debug_registers( + ctx->cdm_handle); + rc = -ETIMEDOUT; + } else { + CAM_DBG(CAM_ISP, + "Wq delayed but IRQ CDM done, ctx_index %u", + ctx->ctx_index); + } + } else { + CAM_DBG(CAM_ISP, + "config done Success for req_id=%llu ctx_index %u", + cfg->request_id, ctx->ctx_index); + /* Update last applied MUP */ + if (hw_update_data->mup_en) { + ctx->current_mup = hw_update_data->mup_val; + ctx->curr_num_exp = hw_update_data->num_exp; + } + hw_update_data->mup_en = false; + + /* Try for INIT packet reg dump by default - no debugfs set */ + if (cfg->init_packet && !g_ife_hw_mgr.debug_cfg.per_req_reg_dump) + cam_ife_mgr_handle_reg_dump(ctx, + hw_update_data->reg_dump_buf_desc, + hw_update_data->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST, + NULL, false); + } + } + + cam_ife_mgr_send_frame_event(cfg->request_id, ctx->ctx_index); + } else { + CAM_ERR(CAM_ISP, "No commands to config, ctx_index %u", ctx->ctx_index); + } + + CAM_DBG(CAM_ISP, "Exit: Config Done: %llu, ctx_index %u", cfg->request_id, ctx->ctx_index); + return rc; +} + +static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args) +{ + int rc = 0; + struct cam_hw_stop_args *stop_args = stop_hw_args; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_hw_mgr_ctx *ctx; + uint32_t i, master_base_idx = 0; + + if (!stop_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "Enter...ctx id:%u", + ctx->ctx_index); + + if (!ctx->num_base) { + CAM_ERR(CAM_ISP, "Number of bases are zero, ctx_index %u", ctx->ctx_index); + return -EINVAL; + } + + /* get master base index first */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) { + master_base_idx = ctx->base[i].idx; + break; + } + } + + if (i == ctx->num_base) + master_base_idx = ctx->base[0].idx; + + /* stop the master CSID path first */ + cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid, + master_base_idx, CAM_CSID_HALT_IMMEDIATELY, false); + + /* Stop rest of the CSID paths */ + for (i = 0; i < ctx->num_base; i++) { + if (i == master_base_idx) + continue; + + cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid, + ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY, false); + } + + /* IFE mux in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + cam_ife_hw_mgr_stop_hw_res(hw_mgr_res); + } + + /* IFE bus rd resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + cam_ife_hw_mgr_stop_hw_res(hw_mgr_res); + } + + /* IFE out resources */ + for (i = 0; i < ctx->num_acq_vfe_out; i++) + cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]); + + + /* Stop tasklet for context */ + cam_tasklet_stop(ctx->common.tasklet_info); + CAM_DBG(CAM_ISP, "Exit...ctx id:%u rc :%d", + ctx->ctx_index, rc); + + return rc; +} + +static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx, + enum cam_isp_bw_control_action action) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_bw_control_args bw_ctrl_args; + int rc = -EINVAL; + uint32_t i; + + CAM_DBG(CAM_ISP, "Enter...ctx id:%u", ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_ctrl_args.node_res = + hw_mgr_res->hw_res[i]; + bw_ctrl_args.action = action; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_CONTROL, + &bw_ctrl_args, + sizeof(struct cam_isp_bw_control_args)); + if (rc) + CAM_ERR(CAM_ISP, "BW Update failed, ctx id:%u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf!, ctx id:%u", ctx->ctx_index); + } + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_ctrl_args.node_res = + hw_mgr_res->hw_res[i]; + bw_ctrl_args.action = action; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_CONTROL, + &bw_ctrl_args, + sizeof(struct cam_isp_bw_control_args)); + if (rc) + CAM_ERR(CAM_ISP, "BW Update failed, ctx id:%u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx id:%u", ctx->ctx_index); + } + } + + return rc; +} + +static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx) +{ + return cam_ife_mgr_bw_control(ctx, CAM_ISP_BW_CONTROL_EXCLUDE); +} + +/* entry function: stop_hw */ +static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args) +{ + int rc = 0; + struct cam_hw_stop_args *stop_args = stop_hw_args; + struct cam_isp_stop_args *stop_isp; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_hw_mgr_ctx *ctx; + enum cam_ife_csid_halt_cmd csid_halt_type; + uint32_t i, master_base_idx = 0; + unsigned long rem_jiffies = 0; + + if (!hw_mgr_priv || !stop_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + if (!ctx->num_base) { + CAM_ERR(CAM_ISP, "number of bases are zero, ctx id:%u", ctx->ctx_index); + return -EINVAL; + } + + /* Cancel all scheduled recoveries without affecting future recoveries */ + atomic_inc(&ctx->recovery_id); + + CAM_DBG(CAM_ISP, " Enter...ctx id:%u", ctx->ctx_index); + stop_isp = (struct cam_isp_stop_args *)stop_args->args; + + /* stream off or flush case reset the drv info */ + if (!stop_isp->is_internal_stop) { + ctx->wr_per_req_index = 0; + memset(ctx->per_req_info, 0, sizeof(ctx->per_req_info)); + } + + /* Set the csid halt command */ + if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY) || + ctx->flags.dsp_enabled) + csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY; + else + csid_halt_type = CAM_CSID_HALT_IMMEDIATELY; + + /* Note:stop resource will remove the irq mask from the hardware */ + + CAM_DBG(CAM_ISP, "Halting CSIDs, ctx id:%u", ctx->ctx_index); + + /* get master base index first */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) { + master_base_idx = ctx->base[i].idx; + break; + } + } + + rc = cam_cdm_reset_hw(ctx->cdm_handle); + if (rc) { + CAM_WARN(CAM_ISP, "CDM: %u reset failed rc: %d in ctx: %u", + ctx->cdm_id, rc, ctx->ctx_index); + rc = 0; + } + + /* + * If Context does not have PIX resources and has only RDI resource + * then take the first base index. + */ + if (i == ctx->num_base) + master_base_idx = ctx->base[0].idx; + + /*Change slave mode*/ + if (csid_halt_type == CAM_CSID_HALT_IMMEDIATELY) + cam_ife_mgr_csid_change_halt_mode(ctx, + CAM_CSID_HALT_MODE_INTERNAL); + + + CAM_DBG(CAM_ISP, "Stopping master CSID idx %d, ctx id:%u", master_base_idx, ctx->ctx_index); + + + /* Stop the master CSID path first */ + cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid, + master_base_idx, csid_halt_type, stop_isp->standby_en); + + /* stop rest of the CSID paths */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].idx == master_base_idx) + continue; + CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d ctx id:%u", + ctx->base[i].idx, i, master_base_idx, ctx->ctx_index); + + cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid, + ctx->base[i].idx, csid_halt_type, stop_isp->standby_en); + } + + /* Ensure HW layer does not reset any clk data since it's + * internal stream off/resume + */ + if (stop_isp->is_internal_stop) + cam_ife_mgr_finish_clk_bw_update(ctx, 0, true); + else + ctx->is_init_drv_cfg_received = false; + + /* check to avoid iterating loop */ + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + CAM_DBG(CAM_ISP, "Going to stop SFE Out, ctx id:%u", ctx->ctx_index); + + /* SFE out resources */ + for (i = 0; i < ctx->num_acq_sfe_out; i++) + cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_sfe_out[i]); + + CAM_DBG(CAM_ISP, "Going to stop SFE SRC resources, ctx id:%u", ctx->ctx_index); + + /* SFE in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) + cam_ife_hw_mgr_stop_hw_res(hw_mgr_res); + } + + CAM_DBG(CAM_ISP, "Going to stop IFE out resources, ctx id:%u", ctx->ctx_index); + + /* IFE out resources */ + for (i = 0; i < ctx->num_acq_vfe_out; i++) + cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]); + + CAM_DBG(CAM_ISP, "Going to stop IFE Mux, ctx id:%u", ctx->ctx_index); + + /* IFE mux in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + cam_ife_hw_mgr_stop_hw_res(hw_mgr_res); + } + + /* bus rd resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + cam_ife_hw_mgr_stop_hw_res(hw_mgr_res); + } + + cam_tasklet_stop(ctx->common.tasklet_info); + + /* reset scratch buffer/mup expect INIT again for UMD triggered stop/flush */ + if (!stop_isp->is_internal_stop) { + ctx->current_mup = 0; + if (ctx->scratch_buf_info.sfe_scratch_config) + memset(ctx->scratch_buf_info.sfe_scratch_config, 0, + sizeof(struct cam_sfe_scratch_buf_cfg)); + + if (ctx->scratch_buf_info.ife_scratch_config) + memset(ctx->scratch_buf_info.ife_scratch_config, 0, + sizeof(struct cam_ife_scratch_buf_cfg)); + } + + cam_ife_mgr_pause_hw(ctx); + + if (!atomic_read(&ctx->cdm_done)) { + rem_jiffies = cam_common_wait_for_completion_timeout( + &ctx->config_done_complete, + msecs_to_jiffies(10)); + + if (rem_jiffies == 0) + CAM_WARN(CAM_ISP, + "config done completion timeout for last applied req_id=%llu ctx_index %u", + ctx->applied_req_id, ctx->ctx_index); + + } + + if (stop_isp->stop_only) + goto end; + + if (cam_cdm_stream_off(ctx->cdm_handle)) + CAM_ERR(CAM_ISP, "CDM stream off failed %d, ctx_id: %u", + ctx->cdm_handle, ctx->ctx_index); + + + cam_ife_hw_mgr_deinit_hw(ctx); + CAM_DBG(CAM_ISP, + "Stop success for ctx id:%u rc :%d", ctx->ctx_index, rc); + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) { + rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE); + if (rc) { + CAM_ERR(CAM_ISP, + "SAFE SCM call failed:Check TZ/HYP dependency, ctx_id: %u", + ctx->ctx_index); + rc = 0; + } + } + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + +end: + if (!stop_isp->is_internal_stop && !ctx->flags.skip_reg_dump_buf_put) { + for (i = 0; i < ctx->num_reg_dump_buf; i++) + cam_mem_put_cpu_buf(ctx->reg_dump_buf_desc[i].mem_handle); + ctx->num_reg_dump_buf = 0; + } + ctx->flags.skip_reg_dump_buf_put = false; + ctx->flags.dump_on_error = false; + ctx->flags.dump_on_flush = false; + return rc; +} + +static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr, + uint32_t hw_idx) +{ + uint32_t i = 0; + struct cam_hw_intf *vfe_hw_intf; + uint32_t vfe_reset_type; + + if (!hw_mgr) { + CAM_DBG(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + /* Reset VFE HW*/ + vfe_reset_type = CAM_VFE_HW_RESET_HW; + + for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) { + if (!hw_mgr->ife_devices[i]) + continue; + + if (hw_idx != hw_mgr->ife_devices[i]->hw_intf->hw_idx) + continue; + CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx); + vfe_hw_intf = hw_mgr->ife_devices[i]->hw_intf; + vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, + &vfe_reset_type, sizeof(vfe_reset_type)); + break; + } + + CAM_DBG(CAM_ISP, "Exit Successfully"); + return 0; +} + +static int cam_ife_mgr_unmask_bus_wr_irq(struct cam_ife_hw_mgr *hw_mgr, + uint32_t hw_idx) +{ + uint32_t i = 0, dummy_args = 0; + struct cam_hw_intf *vfe_hw_intf; + + if (!hw_mgr) { + CAM_DBG(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) { + if (!hw_mgr->ife_devices[i]) + continue; + + if (hw_idx != hw_mgr->ife_devices[i]->hw_intf->hw_idx) + continue; + + CAM_DBG(CAM_ISP, "Unmask VFE:%d BUS_WR IRQ", hw_idx); + + vfe_hw_intf = hw_mgr->ife_devices[i]->hw_intf; + + vfe_hw_intf->hw_ops.process_cmd(vfe_hw_intf->hw_priv, + CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ, + &dummy_args, + sizeof(dummy_args)); + + break; + } + + return 0; +} + +static int cam_ife_mgr_restart_hw(void *start_hw_args) +{ + int rc = -1; + struct cam_hw_start_args *start_args = start_hw_args; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + uint32_t i; + + if (!start_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%u", ctx->ctx_index); + + cam_tasklet_start(ctx->common.tasklet_info); + + /* start the IFE out devices */ + for (i = 0; i < ctx->num_acq_vfe_out; i++) { + rc = cam_ife_hw_mgr_start_hw_res( + &ctx->res_list_ife_out[i], ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d), ctx_idx: %u", + i, ctx->ctx_index); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%u", ctx->ctx_index); + + /* Start IFE BUS RD device */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start IFE BUS RD (%d), ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + goto err; + } + } + + /* Start the IFE mux in devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d), ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%u", ctx->ctx_index); + /* Start the IFE CSID HW devices */ + rc = cam_ife_mgr_csid_start_hw(ctx, CAM_IFE_PIX_PATH_RES_MAX, false, false); + if (rc) { + CAM_ERR(CAM_ISP, "Error in starting CSID HW in ctx id:%u", ctx->ctx_index); + goto err; + } + + /* Start IFE root node: do nothing */ + CAM_DBG(CAM_ISP, "Exit...(success), ctx_idx: %u", ctx->ctx_index); + return 0; + +err: + cam_ife_mgr_stop_hw_in_overflow(start_hw_args); + CAM_DBG(CAM_ISP, "Exit...(rc=%d), ctx_idx: %u", rc, ctx->ctx_index); + return rc; +} + +static int cam_ife_mgr_find_core_idx( + int split_id, + struct cam_ife_hw_mgr_ctx *ctx, + enum cam_isp_hw_type hw_type, + uint32_t *core_idx) +{ + int i; + + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != hw_type) + continue; + + if (ctx->base[i].split_id == split_id) { + CAM_DBG(CAM_ISP, "Found core: %u for split_id: %d hw_type: %d ctx_idx: %u", + ctx->base[i].idx, split_id, hw_type, ctx->ctx_index); + *core_idx = ctx->base[i].idx; + goto end; + } + } + + CAM_ERR(CAM_ISP, "Failed to find core idx for hw_type: %d split_id %d ctx_idx: %u", + hw_type, split_id, ctx->ctx_index); + return -EINVAL; + +end: + return 0; +} + +static void cam_ife_hw_mgr_set_hw_debug_config( + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, rc; + uint32_t hw_idx = 0; + struct cam_ife_hw_mgr *hw_mgr = ctx->hw_mgr; + struct cam_ife_csid_debug_cfg_args csid_debug_args; + struct cam_sfe_debug_cfg_params sfe_debug_args = {0}; + struct cam_vfe_generic_debug_config vfe_debug_args = {0}; + + /* Set CSID debug args */ + csid_debug_args.csid_debug = hw_mgr->debug_cfg.csid_debug; + csid_debug_args.csid_rx_capture_debug = hw_mgr->debug_cfg.rx_capture_debug; + csid_debug_args.rx_capture_debug_set = hw_mgr->debug_cfg.rx_capture_debug_set; + csid_debug_args.csid_testbus_debug = hw_mgr->debug_cfg.csid_test_bus; + csid_debug_args.set_domain_id_enabled = hw_mgr->debug_cfg.enable_csid_set_domain_id; + csid_debug_args.domain_id_value = hw_mgr->debug_cfg.csid_domain_id_value; + csid_debug_args.enable_cdr_sweep_debug = hw_mgr->debug_cfg.enable_cdr_sweep_debug; + + /* Set SFE debug args */ + sfe_debug_args.cache_config = false; + sfe_debug_args.u.dbg_cfg.sfe_debug_cfg = hw_mgr->debug_cfg.sfe_debug; + sfe_debug_args.u.dbg_cfg.sfe_sensor_sel = hw_mgr->debug_cfg.sfe_sensor_diag_cfg; + sfe_debug_args.u.dbg_cfg.num_counters = hw_mgr->isp_caps.num_sfe_perf_counters; + for (i = 0; i < hw_mgr->isp_caps.num_sfe_perf_counters; i++) { + sfe_debug_args.u.dbg_cfg.sfe_perf_counter_val[i] = + hw_mgr->debug_cfg.sfe_perf_counter_val[i]; + } + + for (i = 0; i < hw_mgr->isp_caps.num_sfe_bus_wr_perf_counters; i++) { + sfe_debug_args.u.dbg_cfg.sfe_bus_wr_perf_counter_val[i] = + hw_mgr->debug_cfg.sfe_bus_wr_perf_counter_val[i]; + } + + /* Set IFE bus debug args */ + vfe_debug_args.disable_ife_mmu_prefetch = hw_mgr->debug_cfg.disable_ife_mmu_prefetch; + vfe_debug_args.enable_ife_frame_irqs = hw_mgr->debug_cfg.enable_ife_frame_irqs; + vfe_debug_args.num_counters = hw_mgr->isp_caps.num_ife_perf_counters; + vfe_debug_args.diag_config = hw_mgr->debug_cfg.camif_debug; + for (i = 0; i < hw_mgr->isp_caps.num_ife_perf_counters; i++) + vfe_debug_args.vfe_perf_counter_val[i] = + hw_mgr->debug_cfg.ife_perf_counter_val[i]; + + for (i = 0; i < hw_mgr->isp_caps.num_ife_bus_wr_perf_counters; i++) { + vfe_debug_args.vfe_bus_wr_perf_counter_val[i] = + hw_mgr->debug_cfg.ife_bus_wr_perf_counter_val[i]; + } + + /* Iterate over HW acquired for this stream and update debug config */ + for (i = 0; i < ctx->num_base; i++) { + hw_idx = ctx->base[i].idx; + + switch (ctx->base[i].hw_type) { + case CAM_ISP_HW_TYPE_VFE: + if (hw_mgr->ife_devices[hw_idx]) { + rc = hw_mgr->ife_devices[hw_idx]->hw_intf->hw_ops.process_cmd( + hw_mgr->ife_devices[hw_idx]->hw_intf->hw_priv, + CAM_ISP_HW_CMD_IFE_DEBUG_CFG, + &vfe_debug_args, sizeof(vfe_debug_args)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to set IFE_%u bus wr debug cfg rc: %d, ctx_idx: %u", + hw_idx, rc, ctx->ctx_index); + } + break; + case CAM_ISP_HW_TYPE_SFE: + if (hw_mgr->sfe_devices[hw_idx]) { + rc = hw_mgr->sfe_devices[hw_idx]->hw_intf->hw_ops.process_cmd( + hw_mgr->sfe_devices[hw_idx]->hw_intf->hw_priv, + CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG, + &sfe_debug_args, sizeof(sfe_debug_args)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to set SFE_%u debug cfg rc: %d, ctx_idx: %u", + hw_idx, rc, ctx->ctx_index); + } + break; + case CAM_ISP_HW_TYPE_CSID: + if (hw_mgr->csid_devices[hw_idx]) { + rc = hw_mgr->csid_devices[hw_idx]->hw_ops.process_cmd( + hw_mgr->csid_devices[hw_idx]->hw_priv, + CAM_IFE_CSID_SET_CSID_DEBUG, + &csid_debug_args, sizeof(csid_debug_args)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to set CSID_%u debug cfg rc: %d, ctx_idx: %u", + hw_idx, rc, ctx->ctx_index); + } + break; + default: + break; + } + } +} + +static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args) +{ + int rc = -1; + struct cam_isp_start_args *start_isp = start_hw_args; + struct cam_hw_stop_args stop_args; + struct cam_isp_stop_args stop_isp; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *rsrc_node = NULL; + uint32_t i; + uint32_t camif_debug; + uint32_t primary_rdi_src_res; + uint32_t primary_rdi_csid_res; + struct cam_ife_csid_top_config_args csid_top_args = {0}; + struct cam_hw_intf *hw_intf; + + primary_rdi_src_res = CAM_ISP_HW_VFE_IN_MAX; + primary_rdi_csid_res = CAM_IFE_PIX_PATH_RES_MAX; + + if (!hw_mgr_priv || !start_isp) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *) + start_isp->hw_config.ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + if ((!ctx->flags.init_done) && start_isp->start_only) { + CAM_ERR(CAM_ISP, "Invalid args init_done %d start_only %d, ctx_idx: %u", + ctx->flags.init_done, start_isp->start_only, ctx->ctx_index); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Enter... ctx id:%u", + ctx->ctx_index); + + rc = cam_cpas_query_drv_enable(&g_ife_hw_mgr.cam_ddr_drv_support, + &g_ife_hw_mgr.cam_clk_drv_support); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc: %d", rc); + return -EINVAL; + } + + /* update Bandwidth should be done at the hw layer */ + + cam_tasklet_start(ctx->common.tasklet_info); + + if (ctx->flags.init_done && start_isp->start_only) { + /* Unmask BUS_WR bit in VFE top */ + for (i = 0; i < ctx->num_base; i++) { + rc = cam_ife_mgr_unmask_bus_wr_irq(hw_mgr_priv, + ctx->base[i].idx); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to unmask VFE:%d BUS_WR IRQ rc:%d ctx_idx: %u", + ctx->base[i].idx, rc, ctx->ctx_index); + } + goto start_only; + } + + /* Update debug config for acquired HW */ + cam_ife_hw_mgr_set_hw_debug_config(ctx); + + if (ctx->flags.need_csid_top_cfg) { + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, + list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + /* Updated based on sfe context */ + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + csid_top_args.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SFE_IFE; + rc = cam_ife_mgr_find_core_idx(i, ctx, CAM_ISP_HW_TYPE_SFE, + &csid_top_args.core_idx); + if (rc) + goto tasklet_stop; + } else { + csid_top_args.input_core_type = + CAM_IFE_CSID_INPUT_CORE_IFE; + } + + if (ctx->flags.is_offline) + csid_top_args.is_sfe_offline = true; + + if (ctx->flags.is_sfe_fs) + csid_top_args.is_sfe_fs = true; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_IFE_CSID_TOP_CONFIG, + &csid_top_args, + sizeof(csid_top_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID: %u top config cmd failed, rc:%d", + hw_intf->hw_idx, rc); + goto tasklet_stop; + } + CAM_DBG(CAM_ISP, + "CSID: %u split_id: %d core_idx: %u core_type: %u is_sfe_offline: %d ctx_idx: %u", + hw_intf->hw_idx, i, csid_top_args.core_idx, + csid_top_args.input_core_type, + csid_top_args.is_sfe_offline, ctx->ctx_index); + } + } + } + + camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rsrc_node = hw_mgr_res->hw_res[i]; + if (rsrc_node->process_cmd && (rsrc_node->res_id == + CAM_ISP_HW_VFE_IN_CAMIF)) { + rc = hw_mgr_res->hw_res[i]->process_cmd( + hw_mgr_res->hw_res[i], + CAM_ISP_HW_CMD_SET_CAMIF_DEBUG, + &camif_debug, + sizeof(camif_debug)); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE process cmd failed for rsrc_id:%d, rc:%d", + rsrc_node->res_id, rc); + } + } + } + } + + rc = cam_ife_hw_mgr_init_hw(ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Init failed, ctx_idx: %u", ctx->ctx_index); + goto tasklet_stop; + } + + ctx->flags.init_done = true; + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) { + rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE); + if (rc) { + CAM_ERR(CAM_ISP, + "SAFE SCM call failed:Check TZ/HYP dependency, ctx_idx: %u", + ctx->ctx_index); + rc = -EFAULT; + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + goto deinit_hw; + } + } + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + + rc = cam_cdm_stream_on(ctx->cdm_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start cdm (%d), ctx_idx: %u", + ctx->cdm_handle, ctx->ctx_index); + goto safe_disable; + } + +start_only: + + atomic_set(&ctx->overflow_pending, 0); + + /* Apply initial configuration */ + CAM_DBG(CAM_ISP, "Config HW, ctx_idx: %u", ctx->ctx_index); + rc = cam_ife_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config); + if (rc) { + CAM_ERR(CAM_ISP, + "Config HW failed, start_only=%d, rc=%d ctx_idx: %u", + start_isp->start_only, rc, ctx->ctx_index); + goto cdm_streamoff; + } + + CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%u", + ctx->ctx_index); + /* start the IFE out devices */ + for (i = 0; i < ctx->num_acq_vfe_out; i++) { + rc = cam_ife_hw_mgr_start_hw_res( + &ctx->res_list_ife_out[i], ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d), ctx_idx: %u", + i, ctx->ctx_index); + goto err; + } + } + + if (cam_isp_is_ctx_primary_rdi(ctx) && (ctx->pri_rdi_out_res < + g_ife_hw_mgr.isp_caps.max_vfe_out_res_type)) { + hw_mgr_res = + &ctx->res_list_ife_out[ctx->vfe_out_map[ctx->pri_rdi_out_res & 0xff]]; + hw_mgr_res->hw_res[0]->is_rdi_primary_res = true; + primary_rdi_src_res = + cam_convert_rdi_out_res_id_to_src(ctx->pri_rdi_out_res); + primary_rdi_csid_res = + cam_ife_hw_mgr_get_ife_csid_rdi_res_type(ctx->pri_rdi_out_res); + } + + CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%u", + ctx->ctx_index); + /* Start the IFE mux in devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + if (primary_rdi_src_res == hw_mgr_res->res_id) { + hw_mgr_res->hw_res[0]->is_rdi_primary_res = + cam_isp_is_ctx_primary_rdi(ctx); + } + + rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start IFE Mux (%d), ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + goto err; + } + } + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + CAM_DBG(CAM_ISP, "START SFE OUT ... in ctx id:%u", + ctx->ctx_index); + for (i = 0; i < ctx->num_acq_sfe_out; i++) { + hw_mgr_res = &ctx->res_list_sfe_out[i]; + rc = cam_ife_hw_mgr_start_hw_res( + hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start SFE OUT (%d), ctx_idx: %u", + i, ctx->ctx_index); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START SFE SRC RSRC ... in ctx id:%u", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start SFE SRC (%d), ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + goto err; + } + } + } + + CAM_DBG(CAM_ISP, "START BUS RD ... in ctx id:%u", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start BUS RD (%d), ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + goto err; + } + } + + if ((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr)) { + rc = cam_ife_mgr_prog_default_settings(0, false, true, ctx); + if (rc) + goto err; + + /* Reset streamon related scratch buffer config */ + cam_ife_mgr_reset_streamon_scratch_cfg(ctx); + } + + CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%u", + ctx->ctx_index); + /* Start the IFE CSID HW devices */ + rc = cam_ife_mgr_csid_start_hw(ctx, primary_rdi_csid_res, + start_isp->is_internal_start, start_isp->start_only); + if (rc) + goto err; + + /* Start IFE root node: do nothing */ + CAM_DBG(CAM_ISP, "Start success for ctx id:%u", ctx->ctx_index); + + return 0; + +err: + stop_isp.stop_only = false; + stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY; + stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map; + stop_args.args = (void *)(&stop_isp); + + cam_ife_mgr_stop_hw(hw_mgr_priv, &stop_args); + CAM_DBG(CAM_ISP, "Exit...(rc=%d), ctx_idx: %u", rc, ctx->ctx_index); + return rc; + +cdm_streamoff: + cam_cdm_stream_off(ctx->cdm_handle); +safe_disable: + cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE); + +deinit_hw: + cam_ife_hw_mgr_deinit_hw(ctx); + +tasklet_stop: + cam_tasklet_stop(ctx->common.tasklet_info); + + return rc; +} + +static int cam_ife_mgr_read(void *hw_mgr_priv, void *read_args) +{ + return -EPERM; +} + +static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args) +{ + return -EPERM; +} + +static int cam_ife_mgr_reset(void *hw_mgr_priv, void *hw_reset_args) +{ + struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_reset_args *reset_args = hw_reset_args; + struct cam_ife_hw_mgr_ctx *ctx; + int rc = 0, i = 0; + + if (!hw_mgr_priv || !hw_reset_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)reset_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + if (hw_mgr->csid_global_reset_en) { + CAM_DBG(CAM_ISP, "Path reset not supported, ctx_idx: %u", ctx->ctx_index); + return 0; + } + + CAM_DBG(CAM_ISP, "Reset CSID and VFE, ctx_idx: %u", ctx->ctx_index); + + rc = cam_ife_hw_mgr_reset_csid(ctx, CAM_IFE_CSID_RESET_PATH); + + if (rc) { + CAM_ERR(CAM_ISP, "Failed to reset CSID:%d rc: %d ctx_idx: %u", + rc, ctx->ctx_index); + goto end; + } + + for (i = 0; i < ctx->num_base; i++) { + rc = cam_ife_mgr_reset_vfe_hw(hw_mgr, ctx->base[i].idx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to reset VFE:%d rc: %d ctx_idx: %u", + ctx->base[i].idx, rc, ctx->ctx_index); + goto end; + } + } + +end: + return rc; +} + +static int cam_ife_mgr_release_hw(void *hw_mgr_priv, + void *release_hw_args) +{ + int rc = 0; + struct cam_hw_release_args *release_args = release_hw_args; + struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_ife_hw_mgr_ctx *ctx; + uint32_t i; + uint64_t ms, sec, min, hrs; + + if (!hw_mgr_priv || !release_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "Enter...ctx id:%u", + ctx->ctx_index); + + if (ctx->flags.init_done) + cam_ife_hw_mgr_deinit_hw(ctx); + + /* we should called the stop hw before this already */ + cam_ife_hw_mgr_release_hw_for_ctx(ctx); + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + rc = cam_ife_mgr_update_core_info_to_cpas(ctx, false); + if (rc) + CAM_ERR(CAM_ISP, "Failed to update core info to cpas rc:%d, ctx_idx: %u", + rc, ctx->ctx_index); + } + + /* reset base info */ + ctx->num_base = 0; + memset(ctx->base, 0, sizeof(ctx->base)); + + /* release cdm handle */ + cam_cdm_release(ctx->cdm_handle); + + /* clean context */ + list_del_init(&ctx->list); + ctx->cdm_handle = 0; + ctx->cdm_hw_idx = -1; + ctx->cdm_ops = NULL; + ctx->ctx_config = 0; + ctx->num_reg_dump_buf = 0; + ctx->last_cdm_done_req = 0; + ctx->left_hw_idx = CAM_IFE_CSID_HW_NUM_MAX; + ctx->right_hw_idx = CAM_IFE_CSID_HW_NUM_MAX; + ctx->scratch_buf_info.num_fetches = 0; + ctx->num_acq_vfe_out = 0; + ctx->num_acq_sfe_out = 0; + CAM_MEM_FREE(ctx->res_list_ife_out); + ctx->res_list_ife_out = NULL; + ctx->pri_rdi_out_res = g_ife_hw_mgr.isp_caps.max_vfe_out_res_type; + memset(ctx->vfe_out_map, 0xff, sizeof(uint8_t) * max_ife_out_res); + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + CAM_MEM_FREE(ctx->res_list_sfe_out); + ctx->res_list_sfe_out = NULL; + memset(ctx->sfe_out_map, 0xff, sizeof(uint8_t) * max_sfe_out_res); + } + + ctx->ctx_type = CAM_IFE_CTX_TYPE_NONE; + ctx->buf_done_controller = NULL; + ctx->mc_comp_buf_done_controller = NULL; + CAM_MEM_FREE(ctx->scratch_buf_info.sfe_scratch_config); + CAM_MEM_FREE(ctx->scratch_buf_info.ife_scratch_config); + ctx->scratch_buf_info.sfe_scratch_config = NULL; + ctx->scratch_buf_info.ife_scratch_config = NULL; + ctx->try_recovery_cnt = 0; + ctx->recovery_req_id = 0; + ctx->drv_path_idle_en = 0; + CAM_MEM_FREE(ctx->vfe_bus_comp_grp); + CAM_MEM_FREE(ctx->sfe_bus_comp_grp); + ctx->vfe_bus_comp_grp = NULL; + ctx->sfe_bus_comp_grp = NULL; + + atomic_set(&ctx->overflow_pending, 0); + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + ctx->sof_cnt[i] = 0; + ctx->eof_cnt[i] = 0; + ctx->epoch_cnt[i] = 0; + } + + cam_ife_mgr_free_cdm_cmd(&ctx->cdm_cmd); + + CAM_GET_TIMESTAMP(ctx->ts); + CAM_CONVERT_TIMESTAMP_FORMAT(ctx->ts, hrs, min, sec, ms); + + CAM_INFO(CAM_ISP, "%llu:%llu:%llu.%llu Release HW success ctx id: %u", + hrs, min, sec, ms, + ctx->ctx_index); + ctx->ctx_index = CAM_IFE_CTX_MAX; + + memset(&ctx->ts, 0, sizeof(struct timespec64)); + cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx); + return rc; +} + +static int cam_isp_blob_fe_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_fe_config *fe_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + int rc = -EINVAL; + uint32_t i; + struct cam_vfe_fe_update_args fe_upd_args; + + ctx = prepare->ctxt_to_hw_map; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + fe_upd_args.node_res = + hw_mgr_res->hw_res[i]; + + memcpy(&fe_upd_args.fe_config, fe_config, + sizeof(struct cam_fe_config)); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD, + &fe_upd_args, + sizeof( + struct cam_fe_config)); + if (rc) + CAM_ERR(CAM_ISP, "fs Update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_RD) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + fe_upd_args.node_res = + hw_mgr_res->hw_res[i]; + + memcpy(&fe_upd_args.fe_config, fe_config, + sizeof(struct cam_fe_config)); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, + &fe_upd_args, + sizeof( + struct cam_vfe_fe_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "fe Update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + return rc; +} + +static int cam_isp_blob_ubwc_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_ubwc_config *ubwc_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ubwc_plane_cfg_v1 *ubwc_plane_cfg; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + + ctx = prepare->ctxt_to_hw_map; + if (!ctx) { + CAM_ERR(CAM_ISP, "Invalid ctx"); + rc = -EINVAL; + goto end; + } + + if ((prepare->num_hw_update_entries + 1) >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + switch (ubwc_config->api_version) { + case CAM_UBWC_CFG_VERSION_1: + CAM_DBG(CAM_ISP, "ctx_idx: %u num_ports= %d", + ctx->ctx_index, ubwc_config->num_ports); + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < ubwc_config->num_ports; i++) { + ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg_array_flex[i][0]; + res_id_out = ubwc_plane_cfg->port_type & 0xFF; + + CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d ctx_idx: %u", i, + ubwc_plane_cfg->port_type, ctx->ctx_index); + + if (res_id_out >= max_ife_out_res) { + CAM_ERR(CAM_ISP, "Invalid port type:%x, ctx_idx: %u", + ubwc_plane_cfg->port_type, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base=%d bytes_used=%u buf_size=%u ctx_idx: %u", + blob_info->base_info->idx, bytes_used, + kmd_buf_info->size, ctx->ctx_index); + rc = -ENOMEM; + goto end; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + total_used_bytes/4; + + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + rc = -EINVAL; + goto end; + } + + hw_mgr_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "Invalid hw_mgr_res, ctx_idx: %u", ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + goto end; + } + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + continue; + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + blob_type_hw_cmd_map[blob_type], + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)ubwc_plane_cfg, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%X ctx_idx: %u", + blob_info->base_info->idx, + bytes_used, + ubwc_plane_cfg->port_type, ctx->ctx_index); + goto end; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util(CAM_ISP_IQ_BL, + total_used_bytes, kmd_buf_info, + prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + break; + default: + CAM_ERR(CAM_ISP, "Invalid UBWC API Version %d ctx_idx: %u", + ubwc_config->api_version, ctx->ctx_index); + rc = -EINVAL; + break; + } +end: + return rc; +} + +static int cam_isp_get_generic_ubwc_data_v2( + struct cam_ubwc_plane_cfg_v2 *ubwc_cfg, + uint32_t version, + struct cam_vfe_generic_ubwc_config *generic_ubwc_cfg) +{ + int i = 0; + + generic_ubwc_cfg->api_version = version; + for (i = 0; i < CAM_PACKET_MAX_PLANES - 1; i++) { + generic_ubwc_cfg->ubwc_plane_cfg[i].port_type = + ubwc_cfg[i].port_type; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_stride = + ubwc_cfg[i].meta_stride; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_size = + ubwc_cfg[i].meta_size; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_offset = + ubwc_cfg[i].meta_offset; + generic_ubwc_cfg->ubwc_plane_cfg[i].packer_config = + ubwc_cfg[i].packer_config; + generic_ubwc_cfg->ubwc_plane_cfg[i].mode_config_0 = + ubwc_cfg[i].mode_config_0; + generic_ubwc_cfg->ubwc_plane_cfg[i].mode_config_1 = + ubwc_cfg[i].mode_config_1; + generic_ubwc_cfg->ubwc_plane_cfg[i].tile_config = + ubwc_cfg[i].tile_config; + generic_ubwc_cfg->ubwc_plane_cfg[i].h_init = + ubwc_cfg[i].h_init; + generic_ubwc_cfg->ubwc_plane_cfg[i].v_init = + ubwc_cfg[i].v_init; + generic_ubwc_cfg->ubwc_plane_cfg[i].static_ctrl = + ubwc_cfg[i].static_ctrl; + generic_ubwc_cfg->ubwc_plane_cfg[i].ctrl_2 = + ubwc_cfg[i].ctrl_2; + generic_ubwc_cfg->ubwc_plane_cfg[i].stats_ctrl_2 = + ubwc_cfg[i].stats_ctrl_2; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_threshold_0 = + ubwc_cfg[i].lossy_threshold_0; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_threshold_1 = + ubwc_cfg[i].lossy_threshold_1; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_var_offset = + ubwc_cfg[i].lossy_var_offset; + generic_ubwc_cfg->ubwc_plane_cfg[i].bandwidth_limit = + ubwc_cfg[i].bandwidth_limit; + generic_ubwc_cfg->ubwc_plane_cfg[i].hw_ctx_id_mask = 0; + } + + return 0; +} + +static int cam_isp_blob_ubwc_update_v2( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_ubwc_config_v2 *ubwc_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ubwc_plane_cfg_v2 *ubwc_plane_cfg; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + struct cam_vfe_generic_ubwc_config generic_ubwc_cfg; + + ctx = prepare->ctxt_to_hw_map; + if (!ctx) { + CAM_ERR(CAM_ISP, "Invalid ctx"); + rc = -EINVAL; + goto end; + } + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d, ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "ctx_idx: %u num_ports= %d", ctx->ctx_index, ubwc_config->num_ports); + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < ubwc_config->num_ports; i++) { + ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg_array_flex[i][0]; + res_id_out = ubwc_plane_cfg->port_type & 0xFF; + + CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d ctx_idx: %u", i, + ubwc_plane_cfg->port_type, ctx->ctx_index); + + if (res_id_out >= max_ife_out_res) { + CAM_ERR(CAM_ISP, "Invalid port type:0x%x ctx_idx: %u", + ubwc_plane_cfg->port_type, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + rc = -EINVAL; + goto end; + } + + hw_mgr_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "Invalid hw_mgr_res"); + rc = -EINVAL; + goto end; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + goto end; + } + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + continue; + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base=%d bytes_used=%u buf_size=%u ctx_idx: %u", + blob_info->base_info->idx, bytes_used, + kmd_buf_info->size, ctx->ctx_index); + rc = -ENOMEM; + goto end; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + total_used_bytes/4; + + (void) cam_isp_get_generic_ubwc_data_v2(ubwc_plane_cfg, + ubwc_config->api_version, &generic_ubwc_cfg); + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + blob_type_hw_cmd_map[blob_type], + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)&generic_ubwc_cfg, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%X, ctx_idx: %u", + blob_info->base_info->idx, + bytes_used, + ubwc_plane_cfg->port_type, ctx->ctx_index); + goto end; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + +end: + return rc; +} + +static int cam_isp_get_generic_ubwc_data_v3( + struct cam_ubwc_plane_cfg_v3 *ubwc_cfg, + uint32_t version, + struct cam_vfe_generic_ubwc_config *generic_ubwc_cfg) +{ + int i = 0; + + generic_ubwc_cfg->api_version = version; + for (i = 0; i < CAM_PACKET_MAX_PLANES - 1; i++) { + generic_ubwc_cfg->ubwc_plane_cfg[i].port_type = + ubwc_cfg[i].port_type; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_stride = + ubwc_cfg[i].meta_stride; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_size = + ubwc_cfg[i].meta_size; + generic_ubwc_cfg->ubwc_plane_cfg[i].meta_offset = + ubwc_cfg[i].meta_offset; + generic_ubwc_cfg->ubwc_plane_cfg[i].packer_config = + ubwc_cfg[i].packer_config; + generic_ubwc_cfg->ubwc_plane_cfg[i].mode_config_0 = + ubwc_cfg[i].mode_config_0; + generic_ubwc_cfg->ubwc_plane_cfg[i].mode_config_1 = + ubwc_cfg[i].mode_config_1; + generic_ubwc_cfg->ubwc_plane_cfg[i].tile_config = + ubwc_cfg[i].tile_config; + generic_ubwc_cfg->ubwc_plane_cfg[i].h_init = + ubwc_cfg[i].h_init; + generic_ubwc_cfg->ubwc_plane_cfg[i].v_init = + ubwc_cfg[i].v_init; + generic_ubwc_cfg->ubwc_plane_cfg[i].static_ctrl = + ubwc_cfg[i].static_ctrl; + generic_ubwc_cfg->ubwc_plane_cfg[i].ctrl_2 = + ubwc_cfg[i].ctrl_2; + generic_ubwc_cfg->ubwc_plane_cfg[i].stats_ctrl_2 = + ubwc_cfg[i].stats_ctrl_2; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_threshold_0 = + ubwc_cfg[i].lossy_threshold_0; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_threshold_1 = + ubwc_cfg[i].lossy_threshold_1; + generic_ubwc_cfg->ubwc_plane_cfg[i].lossy_var_offset = + ubwc_cfg[i].lossy_var_offset; + generic_ubwc_cfg->ubwc_plane_cfg[i].bandwidth_limit = + ubwc_cfg[i].bandwidth_limit; + generic_ubwc_cfg->ubwc_plane_cfg[i].hw_ctx_id_mask = + ubwc_cfg[i].hw_ctx_id_mask; + } + + return 0; +} + +static int cam_isp_blob_ubwc_update_v3( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_ubwc_config_v3 *ubwc_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ubwc_plane_cfg_v3 *ubwc_plane_cfg; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + struct cam_vfe_generic_ubwc_config generic_ubwc_cfg; + uint32_t valid_hw_ctxt_mask = (CAM_ISP_MULTI_CTXT0_MASK | CAM_ISP_MULTI_CTXT1_MASK | + CAM_ISP_MULTI_CTXT2_MASK); + + ctx = prepare->ctxt_to_hw_map; + if (!ctx) { + CAM_ERR(CAM_ISP, "Invalid ctx"); + rc = -EINVAL; + goto end; + } + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d, ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "ctx_idx: %u num_ports= %d", ctx->ctx_index, ubwc_config->num_ports); + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < ubwc_config->num_ports; i++) { + ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg_array_flex[i][0]; + res_id_out = ubwc_plane_cfg->port_type & 0xFF; + + CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d ctx_idx: %u", i, + ubwc_plane_cfg->port_type, ctx->ctx_index); + + if (res_id_out >= max_ife_out_res) { + CAM_ERR(CAM_ISP, "Invalid port type:0x%x ctx_idx: %u", + ubwc_plane_cfg->port_type, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + if (ubwc_plane_cfg->hw_ctx_id_mask & (~valid_hw_ctxt_mask)) { + CAM_ERR(CAM_ISP, "Invalid hw ctxt port:0x%x ctx_idx: %u hw_ctxt_mask: 0x%x", + ubwc_plane_cfg->port_type, ctx->ctx_index, + ubwc_plane_cfg->hw_ctx_id_mask); + rc = -EINVAL; + goto end; + } + + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + rc = -EINVAL; + goto end; + } + + hw_mgr_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "Invalid hw_mgr_res"); + rc = -EINVAL; + goto end; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + return rc; + } + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + goto end; + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base=%d bytes_used=%u buf_size=%u ctx_idx: %u", + blob_info->base_info->idx, bytes_used, + kmd_buf_info->size, ctx->ctx_index); + rc = -ENOMEM; + goto end; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + total_used_bytes/4; + + (void) cam_isp_get_generic_ubwc_data_v3(ubwc_plane_cfg, + ubwc_config->api_version, &generic_ubwc_cfg); + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_UBWC_UPDATE_V3, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)&generic_ubwc_cfg, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%X, ctx_idx: %u", + blob_info->base_info->idx, + bytes_used, + ubwc_plane_cfg->port_type, ctx->ctx_index); + goto end; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + +end: + return rc; +} + +static int cam_isp_scratch_buf_update_util( + struct cam_isp_sfe_scratch_buf_info *buffer_info, + struct cam_ife_sfe_scratch_buf_info *port_info) +{ + int rc = 0; + int mmu_hdl; + size_t size; + dma_addr_t io_addr; + bool is_buf_secure; + + is_buf_secure = cam_mem_is_secure_buf(buffer_info->mem_handle); + if (is_buf_secure) { + port_info->is_secure = true; + mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure; + } else { + port_info->is_secure = false; + mmu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl; + } + + rc = cam_mem_get_io_buf(buffer_info->mem_handle, + mmu_hdl, &io_addr, &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ISP, + "no scratch buf addr for res: 0x%x", + buffer_info->resource_type); + rc = -ENOMEM; + return rc; + } + + if (buffer_info->offset >= size) { + CAM_ERR(CAM_ISP, + "Invalid scratch buffer offset:%u size:%u mmu_hdl:%u hdl:%d res_type:0x%x", + buffer_info->offset, size, mmu_hdl, buffer_info->mem_handle, + buffer_info->resource_type); + rc = -EINVAL; + return rc; + } + + port_info->res_id = buffer_info->resource_type; + port_info->io_addr = io_addr + buffer_info->offset; + port_info->width = buffer_info->width; + port_info->height = buffer_info->height; + port_info->stride = buffer_info->stride; + port_info->slice_height = buffer_info->slice_height; + port_info->offset = 0; + port_info->config_done = true; + + CAM_DBG(CAM_ISP, + "res_id: 0x%x w: %d h: %d s: %d sh: %d addr: 0x%x", + port_info->res_id, port_info->width, + port_info->height, port_info->stride, + port_info->slice_height, port_info->io_addr); + + return rc; +} + +static int cam_isp_blob_ife_scratch_buf_update( + struct cam_isp_sfe_init_scratch_buf_config *scratch_config, + struct cam_hw_prepare_update_args *prepare) +{ + int rc = 0, i; + uint32_t res_id_out; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_sfe_scratch_buf_info *buffer_info; + struct cam_ife_sfe_scratch_buf_info *port_info; + struct cam_isp_hw_mgr_res *ife_out_res; + struct cam_ife_scratch_buf_cfg *ife_scratch_config; + + ctx = prepare->ctxt_to_hw_map; + ife_scratch_config = ctx->scratch_buf_info.ife_scratch_config; + + for (i = 0; i < scratch_config->num_ports; i++) { + buffer_info = &scratch_config->port_scratch_cfg_flex[i]; + if (!cam_ife_hw_mgr_is_ife_out_port(buffer_info->resource_type)) + continue; + + res_id_out = buffer_info->resource_type & 0xFF; + + if (res_id_out >= max_ife_out_res) { + CAM_ERR(CAM_ISP, "Invalid port type:%x ctx_idx: %u", + buffer_info->resource_type, ctx->ctx_index); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x ctx_idx: %u", + i, buffer_info->resource_type, ctx->ctx_index); + + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + rc = -EINVAL; + goto end; + } + + ife_out_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + if (!ife_out_res->hw_res[0]) { + CAM_ERR(CAM_ISP, + "IFE rsrc_type: 0x%x not acquired, failing scratch config, ctx_idx: %u", + buffer_info->resource_type, ctx->ctx_index); + return -EINVAL; + } + + if (ife_scratch_config->num_config >= CAM_IFE_SCRATCH_NUM_MAX) { + CAM_ERR(CAM_ISP, + "Incoming num of scratch buffers: %u exceeds max: %u, ctx_idx: %u", + ife_scratch_config->num_config, CAM_IFE_SCRATCH_NUM_MAX, + ctx->ctx_index); + return -EINVAL; + } + + port_info = &ife_scratch_config->buf_info[ife_scratch_config->num_config++]; + rc = cam_isp_scratch_buf_update_util(buffer_info, port_info); + if (rc) + goto end; + } + +end: + return rc; +} + +static int cam_isp_blob_sfe_scratch_buf_update( + struct cam_isp_sfe_init_scratch_buf_config *scratch_config, + struct cam_hw_prepare_update_args *prepare) +{ + int rc = 0, i; + uint32_t res_id_out; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_sfe_scratch_buf_info *buffer_info; + struct cam_ife_sfe_scratch_buf_info *port_info; + struct cam_isp_hw_mgr_res *sfe_out_res; + + ctx = prepare->ctxt_to_hw_map; + + for (i = 0; i < scratch_config->num_ports; i++) { + buffer_info = &scratch_config->port_scratch_cfg_flex[i]; + if (!cam_ife_hw_mgr_is_sfe_out_port(buffer_info->resource_type)) + continue; + + res_id_out = buffer_info->resource_type & 0xFF; + + CAM_DBG(CAM_ISP, "scratch config idx: %d res: 0x%x, ctx_idx: %u", + i, buffer_info->resource_type, ctx->ctx_index); + + if (res_id_out >= CAM_SFE_FE_RDI_NUM_MAX) { + CAM_ERR(CAM_ISP, "invalid out res type: 0x%x, ctx_idx: %u", + buffer_info->resource_type, ctx->ctx_index); + return -EINVAL; + } + + if (ctx->sfe_out_map[res_id_out] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + sfe_out_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id_out]]; + if (!sfe_out_res->hw_res[0]) { + CAM_ERR(CAM_ISP, + "SFE rsrc_type: 0x%x not acquired, failing scratch config, ctx_idx: %u", + buffer_info->resource_type, ctx->ctx_index); + return -EINVAL; + } + + port_info = &ctx->scratch_buf_info.sfe_scratch_config->buf_info[res_id_out]; + rc = cam_isp_scratch_buf_update_util(buffer_info, port_info); + if (rc) + goto end; + + ctx->scratch_buf_info.sfe_scratch_config->num_config++; + } + + if (ctx->scratch_buf_info.sfe_scratch_config->num_config != + ctx->scratch_buf_info.num_fetches) { + CAM_ERR(CAM_ISP, + "Mismatch in number of scratch buffers provided: %u expected: %u ctx_idx: %u", + ctx->scratch_buf_info.sfe_scratch_config->num_config, + ctx->scratch_buf_info.num_fetches, ctx->ctx_index); + rc = -EINVAL; + } + +end: + return rc; +} + +static inline int __cam_isp_sfe_send_cache_config( + int32_t cmd_type, + struct cam_isp_sfe_bus_sys_cache_config *wm_rm_cache_cfg) +{ + int rc = 0; + struct cam_isp_resource_node *hw_res = wm_rm_cache_cfg->res; + + rc = hw_res->hw_intf->hw_ops.process_cmd( + hw_res->hw_intf->hw_priv, + cmd_type, wm_rm_cache_cfg, + sizeof(struct cam_isp_sfe_bus_sys_cache_config)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in sending cache config for: %u", + hw_res->res_id); + } + + return rc; +} + +static uint32_t cam_ife_hw_mgr_get_sfe_sys_cache_id(uint32_t exp_type, + struct cam_ife_hw_mgr_ctx *ctx, uint32_t hw_idx) +{ + uint32_t scid_idx = CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(CAM_LLCC_LARGE_4); + unsigned long supported_sc_idx; + struct cam_ife_hw_mgr *hw_mgr; + bool use_large; + int rc; + + hw_mgr = ctx->hw_mgr; + supported_sc_idx = hw_mgr->sfe_cache_info[hw_idx].supported_scid_idx; + if (!supported_sc_idx) { + CAM_DBG(CAM_ISP, "Unsupported SCID for SFE %u exp_type %u ctx_idx: %u", + hw_idx, exp_type, ctx->ctx_index); + goto end; + } + + if (exp_type >= CAM_ISP_EXPOSURE_MAX) { + CAM_INFO(CAM_ISP, "Invalid Exposure Type for SFE %u exp_type %u ctx_idx: %u", + hw_idx, exp_type, ctx->ctx_index); + goto end; + } + + scid_idx = ffs(supported_sc_idx) - 1; + clear_bit(scid_idx, &supported_sc_idx); + use_large = (exp_type != CAM_ISP_LAST_EXPOSURE) ? true : false; + + if (use_large) { + while (supported_sc_idx) { + scid_idx = ffs(supported_sc_idx) - 1; + clear_bit(scid_idx, &supported_sc_idx); + if ((scid_idx < CAM_LLCC_LARGE_1) || + (scid_idx >= (CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID( + CAM_LLCC_LARGE_4)))) + continue; + + /* + * In case of scenarios like, 1exp-->2exp-->1exp, we want to + * retaing the earlier allocated SCID. Below check takes care + * if the SCID is already activated, we do not need to reallocate + */ + if (hw_mgr->sfe_cache_info[hw_idx].activated[exp_type]) + break; + + /* + * Curr_idx should not be reset in its lifetime. We need to preserve + * it for next session/hw-open/hw-close cases to toggle with next + * available SCID. + */ + if (hw_mgr->sfe_cache_info[hw_idx].curr_idx[exp_type] != scid_idx) + break; + } + } + + if (use_large && (scid_idx < CAM_LLCC_LARGE_1)) + scid_idx = CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(CAM_LLCC_LARGE_4); + + if (scid_idx >= (CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(CAM_LLCC_LARGE_4))) { + CAM_DBG(CAM_ISP, "Cannot find scid for SFE %u exp_type %u ctx_idx: %u", + hw_idx, exp_type, ctx->ctx_index); + } else { + hw_mgr->sfe_cache_info[hw_idx].curr_idx[exp_type] = scid_idx; + + if (!hw_mgr->sfe_cache_info[hw_idx].activated[exp_type] && + ctx->flags.init_done) + cam_cpas_activate_llcc(scid_idx); + + hw_mgr->sfe_cache_info[hw_idx].activated[exp_type] = true; + if (cam_cpas_is_notif_staling_supported() + && hw_mgr->sys_cache_info[scid_idx].llcc_staling_support) { + rc = cam_cpas_notif_increment_staling_counter(scid_idx); + if (rc) + CAM_ERR(CAM_ISP, + "llcc cache notif increment staling failed %d", scid_idx); + } + + CAM_DBG(CAM_ISP, "SFE %u Exp type %u SCID index %d use_large %d ctx_idx: %u", + hw_idx, exp_type, scid_idx, use_large, ctx->ctx_index); + } + +end: + return scid_idx; +} + +static int cam_isp_blob_sfe_exp_order_update( + uint32_t base_idx, + struct cam_isp_sfe_exp_config *exp_config, + struct cam_hw_prepare_update_args *prepare) +{ + int rc = 0, i, j; + bool send_config; + uint32_t exp_order_max = 0; + uint32_t res_id_out, res_id_in, sc_idx, exp_type; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *tmp; + struct cam_isp_sfe_wm_exp_order_config *order_cfg; + struct cam_ife_hw_mgr *hw_mgr; + struct cam_isp_sfe_bus_sys_cache_config wm_rm_cache_cfg; + + ctx = prepare->ctxt_to_hw_map; + hw_mgr = ctx->hw_mgr; + memset(ctx->flags.sys_cache_usage, false, sizeof(ctx->flags.sys_cache_usage)); + + if (!hw_mgr->num_caches_found) { + CAM_DBG(CAM_ISP, "No caches found during probe, ctx_idx: %u", ctx->ctx_index); + return 0; + } + + if (!exp_config->num_ports) { + CAM_ERR(CAM_ISP, "Invalid number of ports: %d ctx_idx: %u", + exp_config->num_ports, ctx->ctx_index); + return -EINVAL; + } + + /* + * The last resource in the array will be considered as + * last exposure + */ + exp_order_max = exp_config->num_ports - 1; + for (i = 0; i < exp_config->num_ports; i++) { + order_cfg = &exp_config->wm_config_flex[i]; + + rc = cam_ife_hw_mgr_is_sfe_rdi_for_fetch( + order_cfg->res_type); + if (!rc) { + CAM_ERR(CAM_ISP, + "Not a SFE fetch RDI: 0x%x ctx_idx: %u", + order_cfg->res_type, ctx->ctx_index); + return -EINVAL; + } + + if ((order_cfg->res_type - CAM_ISP_SFE_OUT_RES_RDI_0) >= + ctx->scratch_buf_info.num_fetches) { + CAM_DBG(CAM_ISP, + "Skip cache config for resource: 0x%x, active fetches: %u [exp_order: %d %d] in %u ctx", + order_cfg->res_type, ctx->scratch_buf_info.num_fetches, + i, exp_order_max, ctx->ctx_index); + continue; + } + + /* Add more params if needed */ + wm_rm_cache_cfg.wr_cfg_done = false; + wm_rm_cache_cfg.rd_cfg_done = false; + wm_rm_cache_cfg.scid = 0; + wm_rm_cache_cfg.use_cache = false; + send_config = false; + exp_type = CAM_ISP_EXPOSURE_MAX; + + if (i == exp_order_max) + exp_type = CAM_ISP_LAST_EXPOSURE; + else if (i == exp_order_max - 1) + exp_type = CAM_ISP_LAST_1_EXPOSURE; + else if (i == exp_order_max - 2) + exp_type = CAM_ISP_LAST_2_EXPOSURE; + + sc_idx = cam_ife_hw_mgr_get_sfe_sys_cache_id(exp_type, + ctx, base_idx); + + if (sc_idx < (CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(CAM_LLCC_LARGE_4))) + wm_rm_cache_cfg.use_cache = true; + + if (wm_rm_cache_cfg.use_cache) { + wm_rm_cache_cfg.scid = hw_mgr->sys_cache_info[sc_idx].scid; + if (wm_rm_cache_cfg.scid <= 0) + goto end; + ctx->flags.sys_cache_usage[sc_idx] = true; + } + + /* Configure cache config for WM */ + res_id_out = order_cfg->res_type & 0xFF; + if (res_id_out >= max_sfe_out_res) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "res_id_out: %d exceeds max size: %d ctx_idx: %u", + res_id_out, max_sfe_out_res, ctx->ctx_index); + return -EINVAL; + } + + if (ctx->sfe_out_map[res_id_out] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + hw_mgr_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id_out]]; + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + wm_rm_cache_cfg.res = hw_mgr_res->hw_res[j]; + rc = __cam_isp_sfe_send_cache_config( + CAM_ISP_HW_SFE_SYS_CACHE_WM_CONFIG, + &wm_rm_cache_cfg); + send_config = true; + break; + } + + if (rc || !send_config) { + CAM_ERR(CAM_ISP, + "Failed to send cache config for WR res: 0x%x base_idx: %u send_config: %d rc: %d ctx_idx: %u", + order_cfg->res_type, base_idx, send_config, rc, ctx->ctx_index); + return -EINVAL; + } + + send_config = false; + /* RDI WMs have been validated find corresponding RM */ + if (order_cfg->res_type == CAM_ISP_SFE_OUT_RES_RDI_0) + res_id_in = CAM_ISP_SFE_IN_RD_0; + else if (order_cfg->res_type == CAM_ISP_SFE_OUT_RES_RDI_1) + res_id_in = CAM_ISP_SFE_IN_RD_1; + else + res_id_in = CAM_ISP_SFE_IN_RD_2; + + /* Configure cache config for RM */ + list_for_each_entry_safe(hw_mgr_res, tmp, &ctx->res_list_ife_in_rd, list) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->res_id != res_id_in) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + wm_rm_cache_cfg.res = hw_mgr_res->hw_res[j]; + rc = __cam_isp_sfe_send_cache_config( + CAM_ISP_HW_SFE_SYS_CACHE_RM_CONFIG, + &wm_rm_cache_cfg); + send_config = true; + break; + } + } + + if (rc || !send_config) { + CAM_ERR(CAM_ISP, + "Failed to send cache config for RD res: 0x%x base_idx: %u send_config: %d rc: %d ctx_idx: %u", + res_id_in, base_idx, send_config, rc, ctx->ctx_index); + return -EINVAL; + } + + if (!wm_rm_cache_cfg.rd_cfg_done && !wm_rm_cache_cfg.wr_cfg_done) { + wm_rm_cache_cfg.use_cache = false; + if (sc_idx < (CAM_CPAS_SHDR_SYS_CACHE_SHDR_MAX_ID(CAM_LLCC_LARGE_4))) + ctx->flags.sys_cache_usage[sc_idx] = false; + } + + CAM_DBG(CAM_ISP, + "cache %s on exp order: %u [max: %u] for out: 0x%x ctx_idx: %u", + (wm_rm_cache_cfg.use_cache ? "enabled" : "not enabled"), + i, exp_order_max, order_cfg->res_type, ctx->ctx_index); + } + + return rc; + +end: + return 0; +} + +static int cam_isp_blob_sfe_update_fetch_core_cfg( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_hw_prepare_update_args *prepare) +{ + int rc; + uint32_t used_bytes = 0, total_used_bytes = 0; + uint32_t remain_size, res_id; + uint32_t *cpu_addr = NULL; + bool enable = true; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_hw_intf *hw_intf = NULL; + + ctx = prepare->ctxt_to_hw_map; + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d, ctx_idx: %u", + prepare->num_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "No free kmd memory for base idx: %d, ctx_idx: %u", + blob_info->base_info->idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + return -EINVAL; + } + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + continue; + + res_id = hw_mgr_res->res_id; + + /* check for active fetches */ + if ((ctx->ctx_config & + CAM_IFE_CTX_CFG_DYNAMIC_SWITCH_ON) && + ((res_id - CAM_ISP_SFE_IN_RD_0) >= + ctx->scratch_buf_info.sfe_scratch_config->updated_num_exp)) + enable = false; + else + enable = true; + + cpu_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes / 4 + + total_used_bytes / 4; + + CAM_DBG(CAM_ISP, + "SFE:%u RM: %u res_id: 0x%x enable: %u num_exp: %u ctx_idx: %u", + blob_info->base_info->idx, + (res_id - CAM_ISP_SFE_IN_RD_0), res_id, enable, + ctx->scratch_buf_info.sfe_scratch_config->updated_num_exp, ctx->ctx_index); + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_RM_ENABLE_DISABLE, + (void *)cpu_addr, remain_size, + (void *)&enable, &used_bytes); + + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to dynamically %s SFE: %u RM: %u bytes_used: %u rc: %d ctx_idx: %u", + (enable ? "enable" : "disable"), + blob_info->base_info->idx, res_id, + used_bytes, rc, ctx->ctx_index); + return rc; + } + + total_used_bytes += used_bytes; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + + return 0; +} + +static int cam_isp_blob_hfr_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_resource_hfr_config *hfr_config, + struct cam_hw_prepare_update_args *prepare, + uint32_t out_max, + enum cam_isp_hw_type hw_type) +{ + struct cam_isp_port_hfr_config *port_hfr_config; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + + ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "ctx_idx: %u num_ports= %d", + ctx->ctx_index, hfr_config->num_ports); + + /* Max one hw entries required for hfr config update */ + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < hfr_config->num_ports; i++) { + port_hfr_config = &hfr_config->port_hfr_config_flex[i]; + res_id_out = port_hfr_config->resource_type & 0xFF; + + CAM_DBG(CAM_ISP, "type %d hfr config idx %d, type=%d ctx_idx: %u", + hw_type, i, res_id_out, ctx->ctx_index); + + if (res_id_out >= out_max) { + CAM_ERR(CAM_ISP, "invalid out restype:%x, ctx_idx: %u", + port_hfr_config->resource_type, ctx->ctx_index); + return -EINVAL; + } + + if (hw_type == CAM_ISP_HW_TYPE_SFE) { + if (ctx->sfe_out_map[res_id_out] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + hw_mgr_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id_out]]; + } else { + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + hw_mgr_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + return rc; + } + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + continue; + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base %d, ctx_idx: %u", + blob_info->base_info->idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + total_used_bytes/4; + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_GET_HFR_UPDATE, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)port_hfr_config, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed cmd_update, base_idx=%d, rc=%d, res_id_out=0x%X hw_type=%d, ctx_idx: %u", + blob_info->base_info->idx, bytes_used, + port_hfr_config->resource_type, hw_type, ctx->ctx_index); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + + return rc; +} + +static int cam_isp_blob_csid_discard_init_frame_update( + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_discard_initial_frames *discard_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_hw_intf *hw_intf; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + struct cam_ife_csid_discard_init_frame_args discard_args; + int rc = -EINVAL, i; + + ctx = prepare->ctxt_to_hw_map; + discard_args.num_frames = discard_config->num_frames; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + res = hw_mgr_res->hw_res[i]; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + if (hw_intf->hw_idx != blob_info->base_info->idx) + continue; + + discard_args.res = res; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES, + &discard_args, + sizeof(struct cam_ife_csid_discard_init_frame_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to update discard frame cfg for res: %s on CSID[%u] ctx:%u", + res->res_name, blob_info->base_info->idx, + ctx->ctx_index); + break; + } + } + } + } + + return rc; +} + + +static int cam_isp_blob_csid_dynamic_switch_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_mode_switch_info *mup_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_hw_intf *hw_intf; + struct cam_ife_csid_mode_switch_update_args csid_mup_upd_args = {0}; + struct cam_ife_hw_mgr *ife_hw_mgr; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + int i, rc = -EINVAL; + + ctx = prepare->ctxt_to_hw_map; + ife_hw_mgr = ctx->hw_mgr; + + CAM_INFO(CAM_ISP, + "csid mup value=%u, ctx_idx: %u req id %lld", mup_config->mup, + ctx->ctx_index, prepare->packet->header.request_id); + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + prepare_hw_data->mup_en = true; + prepare_hw_data->mup_val = mup_config->mup; + + /* Check for continuous out of sync */ + if (ife_hw_mgr->debug_cfg.csid_out_of_sync_simul >> 1) + prepare_hw_data->mup_val = ~mup_config->mup & 1; + + /* + * Send MUP to CSID for INIT packets only to be used at stream on and after. + * For update packets with MUP, append the config to the cdm packet + */ + if (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + csid_mup_upd_args.mup_args.mup_val = mup_config->mup; + csid_mup_upd_args.mup_args.use_mup = true; + } + + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_LEFT) + continue; + + /* For sHDR dynamic switch update num starting exposures to CSID for INIT */ + if ((prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) && + (ctx->flags.is_sfe_shdr)) { + csid_mup_upd_args.exp_update_args.reset_discard_cfg = true; + csid_mup_upd_args.exp_update_args.num_exposures = + mup_config->num_expoures; + } + + hw_intf = ife_hw_mgr->csid_devices[ctx->base[i].idx]; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_DYNAMIC_SWITCH_UPDATE, + &csid_mup_upd_args, + sizeof(struct cam_ife_csid_mode_switch_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "Dynamic switch update failed, ctx_idx: %u", + ctx->ctx_index); + } + } + + return rc; +} + +static int cam_isp_blob_csid_clock_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_csid_clock_config *clock_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_ife_csid_clock_update_args csid_clock_upd_args; + uint64_t clk_rate = 0; + int rc = -EINVAL; + uint32_t i; + + ctx = prepare->ctxt_to_hw_map; + + CAM_DBG(CAM_ISP, + "csid clk=%llu, ctx_idx: %u", clock_config->csid_clock, ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + clk_rate = clock_config->csid_clock; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + csid_clock_upd_args.clk_rate = clk_rate; + CAM_DBG(CAM_ISP, "i= %d clk=%llu\n", + i, csid_clock_upd_args.clk_rate); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + blob_type_hw_cmd_map[blob_type], + &csid_clock_upd_args, + sizeof( + struct cam_ife_csid_clock_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_ERR(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + + return rc; +} + +static int cam_isp_blob_csid_qcfa_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_csid_qcfa_config *qcfa_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_ife_csid_qcfa_update_args csid_qcfa_upd_args; + int rc = -EINVAL; + uint32_t i; + + ctx = prepare->ctxt_to_hw_map; + + CAM_DBG(CAM_ISP, + "csid binning=%d, ctx_idx: %u", qcfa_config->csid_binning, ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + + if (!hw_mgr_res->hw_res[i] || + hw_mgr_res->res_id != CAM_IFE_PIX_PATH_RES_IPP) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + csid_qcfa_upd_args.qcfa_binning = + qcfa_config->csid_binning; + csid_qcfa_upd_args.res = + hw_mgr_res->hw_res[i]; + + CAM_DBG(CAM_ISP, "i= %d QCFA binning=%d\n ctx_idx: %u", + i, csid_qcfa_upd_args.qcfa_binning, ctx->ctx_index); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_QCFA_SUPPORTED, + &csid_qcfa_upd_args, + sizeof( + struct cam_ife_csid_qcfa_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "QCFA Update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_ERR(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + + return rc; +} + +static int cam_isp_blob_core_cfg_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_core_config *core_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + int rc = 0, i; + struct cam_vfe_core_config_args vfe_core_config; + + ctx = prepare->ctxt_to_hw_map; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if ((hw_mgr_res->res_id == + CAM_ISP_HW_VFE_IN_CAMIF) || + (hw_mgr_res->res_id == + CAM_ISP_HW_VFE_IN_PDLIB)) { + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + vfe_core_config.node_res = + hw_mgr_res->hw_res[i]; + + memcpy(&vfe_core_config.core_config, + core_config, + sizeof( + struct cam_isp_core_config)); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CORE_CONFIG, + &vfe_core_config, + sizeof( + struct cam_vfe_core_config_args) + ); + if (rc) + CAM_ERR(CAM_ISP, + "Core cfg parse fail, ctx_idx: %u", + ctx->ctx_index); + } else { + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", + ctx->ctx_index); + } + } + } + } + + return rc; +} + +static int cam_isp_blob_sfe_core_cfg_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_sfe_core_config *core_config, + struct cam_hw_prepare_update_args *prepare) +{ + int rc = -EINVAL, i, idx; + struct cam_sfe_core_config_args sfe_core_config; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr *ife_hw_mgr; + + ctx = prepare->ctxt_to_hw_map; + ife_hw_mgr = ctx->hw_mgr; + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != CAM_ISP_HW_TYPE_SFE) + continue; + + idx = ctx->base[i].idx; + if (idx >= CAM_SFE_HW_NUM_MAX || + !ife_hw_mgr->sfe_devices[idx]) + continue; + + hw_intf = ife_hw_mgr->sfe_devices[idx]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + memcpy(&sfe_core_config.core_config, + core_config, + sizeof(struct cam_isp_sfe_core_config)); + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CORE_CONFIG, + &sfe_core_config, + sizeof(struct cam_sfe_core_config_args)); + if (rc) + CAM_ERR(CAM_ISP, + "SFE core cfg parse fail, ctx_idx: %u", + ctx->ctx_index); + } else { + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + + return rc; +} + +static int cam_isp_blob_ife_clock_update( + struct cam_isp_clock_config *clock_config, + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_vfe_clock_update_args clock_upd_args; + uint64_t clk_rate = 0; + int rc = -EINVAL; + uint32_t i, j; + bool camif_l_clk_updated = false; + bool camif_r_clk_updated = false; + + CAM_DBG(CAM_PERF, + "IFE clk update usage=%u left_clk= %lu right_clk=%lu ctx_idx: %u", + clock_config->usage_type, clock_config->left_pix_hz, + clock_config->right_pix_hz, ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + clk_rate = 0; + if (!hw_mgr_res->hw_res[i]) + continue; + + if ((hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + (hw_mgr_res->res_id == + CAM_ISP_HW_VFE_IN_PDLIB)) { + if (i == CAM_ISP_HW_SPLIT_LEFT) { + if (camif_l_clk_updated) + continue; + + clk_rate = + clock_config->left_pix_hz; + + camif_l_clk_updated = true; + } else { + if (camif_r_clk_updated) + continue; + + clk_rate = + clock_config->right_pix_hz; + + camif_r_clk_updated = true; + } + } else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RD) && + (hw_mgr_res->res_id <= CAM_ISP_HW_VFE_IN_RDI4)) { + for (j = 0; j < clock_config->num_rdi; j++) + clk_rate = max(clock_config->rdi_hz_flex[j], clk_rate); + } else { + if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_LCR + && hw_mgr_res->hw_res[i]) { + CAM_ERR(CAM_ISP, "Invalid res_id %u, ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + rc = -EINVAL; + return rc; + } + } + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + clock_upd_args.node_res = hw_mgr_res->hw_res[i]; + CAM_DBG(CAM_PERF, + "Update Clock value res_id=%u i= %d clk=%llu ctx_idx: %u", + hw_mgr_res->res_id, i, clk_rate, ctx->ctx_index); + + clock_upd_args.clk_rate = clk_rate; + + /* + * Update clock values to top, actual apply to hw will happen when + * CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE is called + */ + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CLOCK_UPDATE, + &clock_upd_args, + sizeof( + struct cam_vfe_clock_update_args)); + if (rc) { + CAM_ERR(CAM_PERF, + "IFE:%d Clock Update failed clk_rate:%llu rc:%d ctx_idx: %u", + hw_intf->hw_idx, clk_rate, rc, ctx->ctx_index); + goto end; + } + } else + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + +end: + return rc; +} + + +static int cam_isp_blob_sfe_clock_update( + struct cam_isp_clock_config *clock_config, + struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_sfe_clock_update_args clock_upd_args; + uint64_t clk_rate = 0; + int rc = -EINVAL; + uint32_t i, j; + bool l_clk_updated = false; + bool r_clk_updated = false; + + + CAM_DBG(CAM_PERF, + "SFE clk update usage: %u left_clk: %lu right_clk: %lu ctx_idx: %u", + clock_config->usage_type, clock_config->left_pix_hz, clock_config->right_pix_hz, + ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_sfe_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + clk_rate = 0; + if (!hw_mgr_res->hw_res[i]) + continue; + + if (hw_mgr_res->res_id == CAM_ISP_HW_SFE_IN_PIX) { + if (i == CAM_ISP_HW_SPLIT_LEFT) { + if (l_clk_updated) + continue; + + clk_rate = + clock_config->left_pix_hz; + l_clk_updated = true; + } else { + if (r_clk_updated) + continue; + + clk_rate = + clock_config->right_pix_hz; + r_clk_updated = true; + } + } else { + for (j = 0; j < clock_config->num_rdi; j++) + clk_rate = max(clock_config->rdi_hz_flex[j], + clk_rate); + } + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + clock_upd_args.node_res = + hw_mgr_res->hw_res[i]; + CAM_DBG(CAM_PERF, + "SFE res_id: %u i: %d clk: %llu ctx_idx: %u", + hw_mgr_res->res_id, i, clk_rate, ctx->ctx_index); + + clock_upd_args.clk_rate = clk_rate; + /* + * Update clock values to top, actual apply to hw will happen when + * CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE is called + */ + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CLOCK_UPDATE, + &clock_upd_args, + sizeof( + struct cam_sfe_clock_update_args)); + if (rc) + CAM_ERR(CAM_PERF, + "SFE clock update failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } + + return rc; +} + +static int cam_isp_blob_sfe_rd_update( + uint32_t blob_type, + uint32_t kmd_buf_remain_size, + uint32_t *cmd_buf_addr, + uint32_t *total_used_bytes, + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_vfe_wm_config *wm_config) +{ + int rc; + uint32_t bytes_used = 0; + bool found = false; + struct cam_isp_hw_mgr_res *sfe_rd_res; + struct cam_hw_intf *hw_intf = NULL; + + list_for_each_entry(sfe_rd_res, &ctx->res_list_ife_in_rd, + list) { + if (sfe_rd_res->res_id == wm_config->port_type) { + found = true; + break; + } + } + + if (!found) { + CAM_ERR(CAM_ISP, + "Failed to find SFE rd resource: %u, check if rsrc is acquired, ctx_idx: %u", + wm_config->port_type, ctx->ctx_index); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "SFE RM config for port: 0x%x, ctx_idx: %u", + wm_config->port_type, ctx->ctx_index); + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + return -EINVAL; + } + + if (!sfe_rd_res->hw_res[blob_info->base_info->split_id]) + return 0; + + rc = cam_isp_add_cmd_buf_update( + sfe_rd_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)wm_config, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to update SFE RM config out_type:0x%X base_idx:%d bytes_used:%u rc:%d ctx_idx: %u", + wm_config->port_type, blob_info->base_info->idx, + bytes_used, rc, ctx->ctx_index); + return rc; + } + + *total_used_bytes += bytes_used; + return rc; +} + +static int cam_ife_hw_mgr_update_scratch_offset( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_vfe_wm_config *wm_config) +{ + uint32_t res_id; + struct cam_ife_sfe_scratch_buf_info *port_info; + + if ((wm_config->port_type - CAM_ISP_SFE_OUT_RES_RDI_0) >= + ctx->scratch_buf_info.num_fetches) + return 0; + + res_id = wm_config->port_type & 0xFF; + + if (res_id >= CAM_SFE_FE_RDI_NUM_MAX) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "res_id: %d exceeds max size: %d ctx_idx: %u", + res_id, CAM_SFE_FE_RDI_NUM_MAX, ctx->ctx_index); + return -EINVAL; + } + + if (!ctx->scratch_buf_info.sfe_scratch_config->buf_info[res_id].config_done) { + CAM_ERR(CAM_ISP, + "Scratch buffer not configured on ctx: %u for res: %u ctx_idx: %u", + ctx->ctx_index, res_id, ctx->ctx_index); + return -EINVAL; + } + + port_info = &ctx->scratch_buf_info.sfe_scratch_config->buf_info[res_id]; + port_info->offset = wm_config->offset; + + CAM_DBG(CAM_ISP, "Scratch addr: 0x%x offset: %u updated for: 0x%x ctx_idx: %u", + port_info->io_addr, port_info->offset, wm_config->port_type, ctx->ctx_index); + + return 0; +} + +static int cam_isp_blob_vfe_out_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_vfe_out_config *vfe_out_config, + struct cam_hw_prepare_update_args *prepare, + uint32_t size_isp_out, + enum cam_isp_hw_type hw_type) +{ + struct cam_isp_vfe_wm_config *wm_config; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *isp_out_res; + struct cam_hw_intf *hw_intf = NULL; + bool is_sfe_rd = false; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + + ctx = prepare->ctxt_to_hw_map; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d, ctx_idx: %u", + prepare->num_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < vfe_out_config->num_ports; i++) { + wm_config = &vfe_out_config->wm_config_flex[i]; + if ((hw_type == CAM_ISP_HW_TYPE_VFE) && + (!cam_ife_hw_mgr_is_ife_out_port(wm_config->port_type))) + continue; + + if (hw_type == CAM_ISP_HW_TYPE_SFE) { + is_sfe_rd = cam_ife_hw_mgr_is_sfe_rd_res(wm_config->port_type); + if ((!cam_ife_hw_mgr_is_sfe_out_port(wm_config->port_type)) && + (!is_sfe_rd)) + continue; + } + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "No free kmd memory for base idx: %d, ctx_idx: %u", + blob_info->base_info->idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + (kmd_buf_info->used_bytes / 4) + + (total_used_bytes / 4); + + if (is_sfe_rd) { + rc = cam_isp_blob_sfe_rd_update(blob_type, + kmd_buf_remain_size, cmd_buf_addr, + &total_used_bytes, ctx, blob_info, wm_config); + if (rc) + return rc; + + is_sfe_rd = false; + continue; + } + + res_id_out = wm_config->port_type & 0xFF; + + CAM_DBG(CAM_ISP, "%s out config idx: %d port: 0x%x, ctx_idx: %u", + (hw_type == CAM_ISP_HW_TYPE_SFE ? "SFE" : "VFE"), + i, wm_config->port_type, ctx->ctx_index); + + if (res_id_out >= size_isp_out) { + CAM_ERR(CAM_ISP, "Invalid out port:0x%x, ctx_idx: %u", + wm_config->port_type, ctx->ctx_index); + return -EINVAL; + } + + if (hw_type == CAM_ISP_HW_TYPE_SFE) { + /* Update offset for scratch to compare for buf done */ + if ((ctx->flags.is_sfe_shdr) && + (cam_ife_hw_mgr_is_sfe_rdi_for_fetch(wm_config->port_type))) { + rc = cam_ife_hw_mgr_update_scratch_offset(ctx, wm_config); + if (rc) + return rc; + } + + if (ctx->sfe_out_map[res_id_out] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + isp_out_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id_out]]; + } else { + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + isp_out_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + return rc; + } + + if (!isp_out_res->hw_res[blob_info->base_info->split_id]) + continue; + + rc = cam_isp_add_cmd_buf_update( + isp_out_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)wm_config, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to update %s Out out_type:0x%X base_idx:%d bytes_used:%u rc:%d ctx_idx: %u", + ((hw_type == CAM_ISP_HW_TYPE_SFE) ? + "SFE" : "VFE"), + wm_config->port_type, blob_info->base_info->idx, + bytes_used, rc, ctx->ctx_index); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + return rc; +} + + +static int cam_isp_blob_vfe_out_update_v2( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_vfe_out_config_v2 *vfe_out_config, + struct cam_hw_prepare_update_args *prepare, + uint32_t size_isp_out, + enum cam_isp_hw_type hw_type) +{ + struct cam_isp_vfe_wm_config_v2 *wm_config; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *isp_out_res; + struct cam_hw_intf *hw_intf = NULL; + bool is_sfe_rd = false; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + + ctx = prepare->ctxt_to_hw_map; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d, ctx_idx: %u", + prepare->num_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < vfe_out_config->num_ports; i++) { + wm_config = &vfe_out_config->wm_config_flex[i]; + if ((hw_type == CAM_ISP_HW_TYPE_VFE) && + (!cam_ife_hw_mgr_is_ife_out_port(wm_config->port_type))) + continue; + + if (hw_type == CAM_ISP_HW_TYPE_SFE) { + is_sfe_rd = cam_ife_hw_mgr_is_sfe_rd_res(wm_config->port_type); + if ((!cam_ife_hw_mgr_is_sfe_out_port(wm_config->port_type)) && + (!is_sfe_rd)) + continue; + } + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "No free kmd memory for base idx: %d, ctx_idx: %u", + blob_info->base_info->idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + (kmd_buf_info->used_bytes / 4) + + (total_used_bytes / 4); + + + res_id_out = wm_config->port_type & 0xFF; + + CAM_DBG(CAM_ISP, "%s out config idx: %d port: 0x%x, ctx_idx: %u", + (hw_type == CAM_ISP_HW_TYPE_SFE ? "SFE" : "VFE"), + i, wm_config->port_type, ctx->ctx_index); + + if (res_id_out >= size_isp_out) { + CAM_ERR(CAM_ISP, "Invalid out port:0x%x, ctx_idx: %u", + wm_config->port_type, ctx->ctx_index); + return -EINVAL; + } + + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + isp_out_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + return rc; + } + + if (!isp_out_res->hw_res[blob_info->base_info->split_id]) + continue; + + rc = cam_isp_add_cmd_buf_update( + isp_out_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE_V2, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)wm_config, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to update %s Out out_type:0x%X base_idx:%d bytes_used:%u rc:%d ctx_idx: %u", + ((hw_type == CAM_ISP_HW_TYPE_SFE) ? + "SFE" : "VFE"), + wm_config->port_type, blob_info->base_info->idx, + bytes_used, rc, ctx->ctx_index); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_UNUSED_BL, total_used_bytes, kmd_buf_info, prepare, + blob_info->entry_added); + blob_info->entry_added = true; + } + + return rc; +} + +static int cam_isp_blob_sensor_blanking_config( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_sensor_blanking_config *sensor_blanking_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_blanking_config blanking_config; + int rc = 0, i; + + ctx = prepare->ctxt_to_hw_map; + if (list_empty(&ctx->res_list_ife_src)) { + CAM_ERR(CAM_ISP, "Mux List empty, ctx_idx: %u", ctx->ctx_index); + return -ENODEV; + } + + list_for_each_entry(hw_mgr_res, + &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + blanking_config.node_res = hw_mgr_res->hw_res[i]; + blanking_config.vbi = sensor_blanking_config->vbi; + blanking_config.hbi = sensor_blanking_config->hbi; + + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BLANKING_UPDATE, + &blanking_config, + sizeof( + struct cam_isp_blanking_config)); + if (rc) + CAM_ERR(CAM_ISP, + "blanking update failed, ctx_idx: %u", ctx->ctx_index); + } + } + } + + return rc; +} + +static int cam_isp_blob_bw_limit_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_out_rsrc_bw_limiter_config *bw_limit_cfg, + struct cam_hw_prepare_update_args *prepare, + enum cam_isp_hw_type hw_type) +{ + struct cam_isp_wm_bw_limiter_config *wm_bw_limit_cfg; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *isp_out_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int rc = 0; + + ctx = prepare->ctxt_to_hw_map; + + if ((prepare->num_hw_update_entries + 1) >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries: %d max: %d ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < bw_limit_cfg->num_ports; i++) { + wm_bw_limit_cfg = &bw_limit_cfg->bw_limiter_config_flex[i]; + res_id_out = wm_bw_limit_cfg->res_type & 0xFF; + + if ((hw_type == CAM_ISP_HW_TYPE_SFE) && + !((wm_bw_limit_cfg->res_type >= + CAM_ISP_SFE_OUT_RES_BASE) && + (wm_bw_limit_cfg->res_type < + (CAM_ISP_SFE_OUT_RES_BASE + max_sfe_out_res)))) + continue; + + if ((hw_type == CAM_ISP_HW_TYPE_VFE) && + !((wm_bw_limit_cfg->res_type >= + CAM_ISP_IFE_OUT_RES_BASE) && + (wm_bw_limit_cfg->res_type < + (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res)))) + continue; + + CAM_DBG(CAM_ISP, + "%s BW limit config idx: %d port: 0x%x enable: %d [0x%x:0x%x] ctx: %u", + (hw_type == CAM_ISP_HW_TYPE_SFE ? "SFE" : "VFE"), + i, wm_bw_limit_cfg->res_type, + wm_bw_limit_cfg->enable_limiter, + wm_bw_limit_cfg->counter_limit[0], + wm_bw_limit_cfg->counter_limit[1], ctx->ctx_index); + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "No free kmd memory for base idx: %d, ctx_idx: %u", + blob_info->base_info->idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + (kmd_buf_info->used_bytes / 4) + + (total_used_bytes / 4); + + if (hw_type == CAM_ISP_HW_TYPE_SFE) { + if (ctx->sfe_out_map[res_id_out] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + isp_out_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id_out]]; + } else { + if (ctx->vfe_out_map[res_id_out] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id_out); + return -EINVAL; + } + + isp_out_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id_out]]; + } + + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + return rc; + } + + if (!isp_out_res->hw_res[blob_info->base_info->split_id]) + continue; + + rc = cam_isp_add_cmd_buf_update( + isp_out_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, + CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)wm_bw_limit_cfg, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to update %s BW limiter config for res:0x%x enable:%d [0x%x:0x%x] base_idx:%d bytes_used:%u rc:%d ctx_idx: %u", + ((hw_type == CAM_ISP_HW_TYPE_SFE) ? + "SFE" : "VFE"), + wm_bw_limit_cfg->res_type, + wm_bw_limit_cfg->enable_limiter, + wm_bw_limit_cfg->counter_limit[0], + wm_bw_limit_cfg->counter_limit[1], + blob_info->base_info->idx, bytes_used, rc, ctx->ctx_index); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + + return rc; +} + +static int cam_isp_hw_mgr_add_cmd_buf_util( + struct cam_isp_hw_mgr_res *hw_mgr_res, + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_generic_blob_info *blob_info, + void *data, + uint32_t hw_cmd_type, + uint32_t blob_type) +{ + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_hw_intf *hw_intf = NULL; + uint32_t *cmd_buf_addr; + int rc = 0; + + kmd_buf_info = blob_info->kmd_buf_info; + if (kmd_buf_info->used_bytes < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - kmd_buf_info->used_bytes; + } else { + CAM_ERR(CAM_ISP, "No free kmd memory for base idx: %d used_bytes %u buf_size %u", + blob_info->base_info->idx, kmd_buf_info->used_bytes, kmd_buf_info->size); + return -ENOMEM; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + (kmd_buf_info->used_bytes / 4); + hw_intf = cam_ife_hw_mgr_get_hw_intf(blob_info->base_info); + + if (!hw_intf || blob_info->base_info->split_id >= CAM_ISP_HW_SPLIT_MAX) { + CAM_ERR(CAM_ISP, + "Invalid base %u type %u", blob_info->base_info->idx, + blob_info->base_info->hw_type); + rc = -EINVAL; + return rc; + } + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + return 0; + + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, blob_type, + hw_cmd_type, (void *)cmd_buf_addr, + kmd_buf_remain_size, data, &total_used_bytes); + if (rc) { + CAM_ERR(CAM_ISP, "Add cmd buffer failed idx: %d", + blob_info->base_info->idx); + return -EINVAL; + } + + if (total_used_bytes) { + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IQ_BL, total_used_bytes, + kmd_buf_info, prepare, blob_info->entry_added); + blob_info->entry_added = true; + } + return rc; +} + +static int cam_isp_update_ife_pdaf_cfg( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_lcr_rdi_cfg_args *isp_lcr_cfg, + uint32_t blob_type) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + uint32_t i; + uint32_t ife_res_id; + enum cam_ife_pix_path_res_id csid_path_id; + struct cam_isp_resource_node *res; + int rc = -EINVAL; + + /* + * For SFE cases, ife_res_id will contain corresponding input resource for vfe, + * since input config is done in vfe. + */ + csid_path_id = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(isp_lcr_cfg->rdi_lcr_cfg->res_id); + if (csid_path_id == CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid res_id %u, ctx_idx: %u", + isp_lcr_cfg->rdi_lcr_cfg->res_id, ctx->ctx_index); + return -EINVAL; + } + + ife_res_id = cam_convert_csid_rdi_res_to_ife_src(csid_path_id); + if (ife_res_id == CAM_ISP_HW_VFE_IN_MAX) { + CAM_ERR(CAM_ISP, "Invalid res_id %u, ctx_idx: %u", + isp_lcr_cfg->rdi_lcr_cfg->res_id, ctx->ctx_index); + return -EINVAL; + } + + isp_lcr_cfg->ife_src_res_id = ife_res_id; + CAM_DBG(CAM_ISP, "Ctx %d res: %u lcr %u id %u ctx_type %u", ctx->ctx_index, ife_res_id, + isp_lcr_cfg->rdi_lcr_cfg->res_id, blob_info->base_info->idx, ctx->ctx_type); + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + /* + * for SFE cases, only CAMIF resource is + * acquired. We need any res to go to vfe drivers + * to update the buffer. For non-sfe case, we match + * with the incoming res_id + */ + if ((ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE && + res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + res->res_id == ife_res_id) { + + rc = cam_isp_hw_mgr_add_cmd_buf_util(hw_mgr_res, prepare, + blob_info, (void *)isp_lcr_cfg, + CAM_ISP_HW_CMD_RDI_LCR_CFG, blob_type); + if (rc) + CAM_ERR(CAM_ISP, + "Ctx %u res: %u lcr %u id %u ctx_type %u rc %u", + ctx->ctx_index, ife_res_id, + isp_lcr_cfg->rdi_lcr_cfg->res_id, + blob_info->base_info->idx, ctx->ctx_type, rc); + goto end; + } + } + } +end: + return rc; +} + +static int cam_isp_config_rdi_lcr_csid_init_params( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_lcr_rdi_config *rdi_lcr_cfg, + uint32_t blob_type) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + int rc = -EINVAL; + uint32_t csid_res_id = 0; + uint32_t acquired_res_id_mask = 0; + + csid_res_id = cam_ife_hw_mgr_get_ife_csid_rdi_res_type( + rdi_lcr_cfg->res_id); + CAM_DBG(CAM_ISP, + "Ctx: %u csid_res_id: %u rdi_lcr: %u sfe_shdr %u ctx_ctype %u", ctx->ctx_index, + csid_res_id, rdi_lcr_cfg->res_id, ctx->flags.is_sfe_shdr, ctx->ctx_type); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + if (!hw_mgr_res->hw_res[0]) + continue; + + if (hw_mgr_res->res_id < CAM_IFE_PIX_PATH_RES_RDI_0 || + hw_mgr_res->res_id > CAM_IFE_PIX_PATH_RES_RDI_2) + continue; + + if (!ctx->flags.is_sfe_shdr && hw_mgr_res->res_id != csid_res_id) + continue; + + res = hw_mgr_res->hw_res[0]; + rc = res->hw_intf->hw_ops.process_cmd(res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_RDI_LCR_CFG, res, sizeof(*res)); + acquired_res_id_mask |= BIT(res->res_id); + if (rc) { + CAM_ERR(CAM_ISP, + "Ctx: %u csid_res_id: %u rdi_lcr: %u sfe_shdr %u ctx_ctype %u", + ctx->ctx_index, csid_res_id, rdi_lcr_cfg->res_id, + ctx->flags.is_sfe_shdr, ctx->ctx_type); + break; + } + } + + if (!(acquired_res_id_mask & BIT(csid_res_id))) { + CAM_ERR(CAM_ISP, + "Ctx: %u Unacquired csid_res_id: %u rdi_lcr: %u sfe_shdr %u ctx_ctype %u", + ctx->ctx_index, csid_res_id, rdi_lcr_cfg->res_id, + ctx->flags.is_sfe_shdr, ctx->ctx_type); + rc = -EINVAL; + } + return rc; +} + +static int cam_isp_blob_ife_rdi_lcr_config( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_lcr_rdi_config *rdi_lcr_cfg, + uint32_t blob_type) +{ + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_lcr_rdi_cfg_args isp_cfg_args = {0}; + int rc = -EINVAL; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)prepare->priv; + CAM_DBG(CAM_ISP, + "Blob opcode %u res %u ctx_type %u shdr %u rdi_lcr %u ctx_idx: %u", + prepare_hw_data->packet_opcode_type, rdi_lcr_cfg->res_id, ctx->ctx_type, + ctx->flags.is_sfe_shdr, ctx->flags.rdi_lcr_en, ctx->ctx_index); + + if (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + rc = cam_isp_config_rdi_lcr_csid_init_params(ctx, + prepare, blob_info, rdi_lcr_cfg, blob_type); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID param failed Ctx: %d rdi_lcr: %u ctx_type: %u ctx_idx: %u", + ctx->ctx_index, rdi_lcr_cfg->res_id, ctx->ctx_type, ctx->ctx_index); + return rc; + } + + isp_cfg_args.is_init = true; + ctx->flags.rdi_lcr_en = true; + } else if (!ctx->flags.rdi_lcr_en || !ctx->flags.is_sfe_shdr) { + /* + * we don't expect blob for non-shdr cases other than Init Packet, + * as the RDI input would remain same for the session. + */ + CAM_ERR(CAM_ISP, + "Unexpected Blob opcode %u res %u ctx_type %u shdr %u rdi_lcr %u ctx_idx: %u", + prepare_hw_data->packet_opcode_type, rdi_lcr_cfg->res_id, ctx->ctx_type, + ctx->flags.is_sfe_shdr, ctx->flags.rdi_lcr_en, ctx->ctx_index); + return rc; + } + + isp_cfg_args.rdi_lcr_cfg = rdi_lcr_cfg; + rc = cam_isp_update_ife_pdaf_cfg(ctx, prepare, blob_info, + &isp_cfg_args, blob_type); + if (rc) { + CAM_ERR(CAM_ISP, + "IFE param failed %u res %u ctx_type %u shdr %u rdi_lcr %u ctx_idx: %u", + prepare_hw_data->packet_opcode_type, rdi_lcr_cfg->res_id, ctx->ctx_type, + ctx->flags.is_sfe_shdr, ctx->flags.rdi_lcr_en, ctx->ctx_index); + return rc; + } + + return rc; +} + +static inline int cam_isp_validate_bw_limiter_blob( + uint32_t blob_size, + struct cam_isp_out_rsrc_bw_limiter_config *bw_limit_config) +{ + if ((bw_limit_config->num_ports > (max_ife_out_res + + max_sfe_out_res)) || + (bw_limit_config->num_ports == 0)) { + CAM_ERR(CAM_ISP, + "Invalid num_ports:%u in bw limit config", + bw_limit_config->num_ports); + return -EINVAL; + } + + /* Check for integer overflow */ + if (bw_limit_config->num_ports != 1) { + if (sizeof(struct cam_isp_wm_bw_limiter_config) > ((UINT_MAX - + sizeof(struct cam_isp_out_rsrc_bw_limiter_config)) / + (bw_limit_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in bw limit config num_ports:%u size per port:%lu", + bw_limit_config->num_ports, + sizeof(struct cam_isp_wm_bw_limiter_config)); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_out_rsrc_bw_limiter_config) + + (bw_limit_config->num_ports - 1) * + sizeof(struct cam_isp_wm_bw_limiter_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu", + blob_size, sizeof(struct cam_isp_out_rsrc_bw_limiter_config) + + (bw_limit_config->num_ports - 1) * + sizeof(struct cam_isp_wm_bw_limiter_config)); + return -EINVAL; + } + + return 0; +} + +static int cam_isp_blob_csid_init_config_update( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_init_config *init_config) +{ + int i, rc = -EINVAL; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_init_config_update init_cfg_update; + + ctx = prepare->ctxt_to_hw_map; + + /* Assign init config */ + init_cfg_update.init_config = init_config; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + init_cfg_update.node_res = + hw_mgr_res->hw_res[i]; + CAM_DBG(CAM_ISP, "Init config update for res_id: %u, ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE, + &init_cfg_update, + sizeof( + struct cam_isp_hw_init_config_update)); + if (rc) + CAM_ERR(CAM_ISP, + "Init cfg update failed rc: %d, ctx_idx: %u", + rc, ctx->ctx_index); + } + } + } + + return rc; +} +static int cam_isp_blob_ife_init_config_update( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_init_config *init_config) +{ + int i, rc = -EINVAL; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_init_config_update init_cfg_update; + + ctx = prepare->ctxt_to_hw_map; + + /* Assign init config */ + init_cfg_update.init_config = init_config; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + init_cfg_update.node_res = + hw_mgr_res->hw_res[i]; + CAM_DBG(CAM_ISP, "Init config update for res_id: %u ctx_idx: %u", + hw_mgr_res->res_id, ctx->ctx_index); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE, + &init_cfg_update, + sizeof( + struct cam_isp_hw_init_config_update)); + if (rc) + CAM_ERR(CAM_ISP, "Init cfg update failed rc: %d ctx: %u", + rc, ctx->ctx_index); + } + } + } + + return rc; +} + +static int cam_isp_validate_scratch_buffer_blob( + uint32_t blob_size, + struct cam_ife_hw_mgr_ctx *ife_mgr_ctx, + struct cam_isp_sfe_init_scratch_buf_config *scratch_config) +{ + if (!(ife_mgr_ctx->flags.is_sfe_fs || + ife_mgr_ctx->flags.is_sfe_shdr)) { + CAM_ERR(CAM_ISP, + "Not SFE sHDR/FS context: %u scratch buf blob not supported, ctx_idx: %u", + ife_mgr_ctx->ctx_index, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < + sizeof(struct cam_isp_sfe_init_scratch_buf_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if ((scratch_config->num_ports > + (CAM_SFE_FE_RDI_NUM_MAX + CAM_IFE_SCRATCH_NUM_MAX)) || + (scratch_config->num_ports == 0)) { + CAM_ERR(CAM_ISP, + "Invalid num_ports %u in scratch buf config, ctx_idx: %u", + scratch_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (scratch_config->num_ports != 1) { + if (sizeof(struct cam_isp_sfe_scratch_buf_info) > + ((UINT_MAX - + sizeof(struct cam_isp_sfe_init_scratch_buf_config)) / + (scratch_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in scratch config num_ports: %u size per port: %lu ctx: %u", + scratch_config->num_ports, + sizeof(struct cam_isp_sfe_scratch_buf_info), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < + (sizeof(struct cam_isp_sfe_init_scratch_buf_config) + + ((int32_t)scratch_config->num_ports - 1) * + sizeof(struct cam_isp_sfe_scratch_buf_info))) { + CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_sfe_init_scratch_buf_config) + + (scratch_config->num_ports - 1) * + sizeof(struct cam_isp_sfe_scratch_buf_info), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + return 0; +} + +static void cam_isp_copy_fcg_config( + struct cam_isp_fcg_config_internal *fcg_args_internal, + struct cam_isp_generic_fcg_config *fcg_args) +{ + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx_internal; + struct cam_isp_ch_ctx_fcg_config *fcg_ch_ctx; + struct cam_isp_predict_fcg_config_internal *fcg_predict_internal; + struct cam_isp_predict_fcg_config *fcg_predict; + uint32_t fcg_ch_ctx_size, num_types = 0; + int i, j; + + /* Copy generic FCG config */ + fcg_args_internal->num_ch_ctx = fcg_args->num_ch_ctx; + fcg_args_internal->num_predictions = fcg_args->num_predictions; + fcg_ch_ctx_size = sizeof(struct cam_isp_ch_ctx_fcg_config) + + (fcg_args->num_predictions - 1) * + sizeof(struct cam_isp_predict_fcg_config); + + /* Copy channel/context FCG config */ + for (i = 0; i < fcg_args->num_ch_ctx; i++) { + fcg_ch_ctx_internal = &fcg_args_internal->ch_ctx_fcg_configs[i]; + fcg_ch_ctx = (struct cam_isp_ch_ctx_fcg_config *) + ((void *)(fcg_args->ch_ctx_fcg_configs_flex) + + i * fcg_ch_ctx_size); + + fcg_ch_ctx_internal->fcg_ch_ctx_id = + fcg_ch_ctx->fcg_ch_ctx_id; + fcg_ch_ctx_internal->fcg_enable_mask = + fcg_ch_ctx->fcg_enable_mask; + if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) { + for (j = 0; j < fcg_args->num_predictions; j++) { + fcg_predict_internal = + &fcg_ch_ctx_internal->predicted_fcg_configs[j]; + fcg_predict = &fcg_ch_ctx->predicted_fcg_configs_flex[j]; + + /* Copy 3 PHASE related values for R/G/B channel */ + fcg_predict_internal->phase_index_b = + fcg_predict->phase_index_b; + fcg_predict_internal->phase_index_r = + fcg_predict->phase_index_r; + fcg_predict_internal->phase_index_g = + fcg_predict->phase_index_g; + CAM_DBG(CAM_ISP, + "Copy FCG PHASE config on ch 0x%x, prediction idx %d, phase_index_g: %u, phase_index_r: %u, phase_index_b: %u", + fcg_ch_ctx_internal->fcg_ch_ctx_id, j, + fcg_predict_internal->phase_index_g, + fcg_predict_internal->phase_index_r, + fcg_predict_internal->phase_index_b); + } + num_types += 1; + } + if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_STATS) { + for (j = 0; j < fcg_args->num_predictions; j++) { + fcg_predict_internal = + &fcg_ch_ctx_internal->predicted_fcg_configs[j]; + fcg_predict = &fcg_ch_ctx->predicted_fcg_configs_flex[j]; + + /* Copy 3 STATS related values for R/G/B channel */ + fcg_predict_internal->stats_index_b = + fcg_predict->stats_index_b; + fcg_predict_internal->stats_index_r = + fcg_predict->stats_index_r; + fcg_predict_internal->stats_index_g = + fcg_predict->stats_index_g; + CAM_DBG(CAM_ISP, + "Copy FCG STATS config on ch 0x%x, prediction idx %d, stats_index_g: %u, stats_index_r: %u, stats_index_b: %u", + fcg_ch_ctx_internal->fcg_ch_ctx_id, j, + fcg_predict_internal->stats_index_g, + fcg_predict_internal->stats_index_r, + fcg_predict_internal->stats_index_b); + } + num_types += 1; + } + } + fcg_args_internal->num_types = num_types; + CAM_DBG(CAM_ISP, + "Inspect on copied FCG config, num_types: %u, num_ch_ctx: %u, num_predictions: %u", + num_types, fcg_args_internal->num_ch_ctx, + fcg_args_internal->num_predictions); +} + +static int cam_isp_blob_fcg_config_prepare( + struct cam_isp_generic_fcg_config *fcg_config_args, + struct cam_hw_prepare_update_args *prepare, + enum cam_isp_hw_type hw_type) +{ + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_fcg_config_info *fcg_info; + uint32_t fcg_size; + uint64_t request_id; + + ctx = prepare->ctxt_to_hw_map; + request_id = prepare->packet->header.request_id; + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + fcg_info = &(prepare_hw_data->fcg_info); + + if ((hw_type == CAM_ISP_HW_TYPE_SFE) && + fcg_info->sfe_fcg_online) { + CAM_ERR(CAM_ISP, + "SFE FCG config is sent more than once, ctx_id: %u, request_id: %llu", + ctx->ctx_index, request_id); + return -EINVAL; + } + + if ((hw_type == CAM_ISP_HW_TYPE_VFE) && + fcg_info->ife_fcg_online) { + CAM_ERR(CAM_ISP, + "IFE/MC_TFE FCG config is sent more than once, ctx_id: %u, request_id: %llu", + ctx->ctx_index, request_id); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, + "Start storing FCG config in req_isp on ctx_idx: %u, hw_type: %d, request_id: %llu", + ctx->ctx_index, hw_type, request_id); + + fcg_size = (uint32_t)sizeof(struct cam_isp_generic_fcg_config); + fcg_size += (fcg_config_args->num_ch_ctx - 1) * + sizeof(struct cam_isp_ch_ctx_fcg_config); + fcg_size += fcg_config_args->num_ch_ctx * + (fcg_config_args->num_predictions - 1) * + sizeof(struct cam_isp_predict_fcg_config); + + if (fcg_size != fcg_config_args->size) { + CAM_ERR(CAM_ISP, + "Mismatched size between userspace provides and real comsumption %u - %u, ctx_idx: %u, request_id: %llu", + fcg_config_args->size, fcg_size, + ctx->ctx_index, request_id); + return -EINVAL; + } + + switch (hw_type) { + case CAM_ISP_HW_TYPE_SFE: + fcg_info->sfe_fcg_online = true; + cam_isp_copy_fcg_config(&fcg_info->sfe_fcg_config, + fcg_config_args); + break; + case CAM_ISP_HW_TYPE_VFE: + fcg_info->ife_fcg_online = true; + cam_isp_copy_fcg_config(&fcg_info->ife_fcg_config, + fcg_config_args); + break; + default: + CAM_ERR(CAM_ISP, + "Failed in parsing FCG configuration for hw_type: %u, ctx_idx: %u, request_id: %llu", + hw_type, ctx->ctx_index, request_id); + return -EINVAL; + } + + return 0; +} + +static int cam_isp_validate_fcg_configs( + struct cam_isp_generic_fcg_config *fcg_config_args, + uint32_t max_fcg_ch_ctx, + uint32_t max_fcg_predictions, + struct cam_ife_hw_mgr_ctx *ife_mgr_ctx) +{ + if ((fcg_config_args->num_ch_ctx > max_fcg_ch_ctx) || + (fcg_config_args->num_ch_ctx == 0)) { + CAM_ERR(CAM_ISP, "Invalid num of channels/contexts %u in FCG config, ctx_idx: %u", + fcg_config_args->num_ch_ctx, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if ((fcg_config_args->num_predictions > max_fcg_predictions) || + (fcg_config_args->num_predictions == 0)) { + CAM_ERR(CAM_ISP, "Invalid num of predictions %u in FCG config, ctx_idx: %u", + fcg_config_args->num_predictions, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + return 0; +} + +static int cam_isp_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0, i; + struct cam_isp_generic_blob_info *blob_info = user_data; + struct cam_ife_hw_mgr_ctx *ife_mgr_ctx = NULL; + struct cam_hw_prepare_update_args *prepare = NULL; + + if (!blob_data || (blob_size == 0) || !blob_info) { + CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK", + blob_data, blob_size, blob_info); + return -EINVAL; + } + + prepare = blob_info->prepare; + if (!prepare || !prepare->ctxt_to_hw_map) { + CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d", + blob_type); + return -EINVAL; + } + + ife_mgr_ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "Context[%pK][%u] blob_type=%d, blob_size=%d", + ife_mgr_ctx, ife_mgr_ctx->ctx_index, blob_type, blob_size); + + switch (blob_type) { + case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: { + struct cam_isp_resource_hfr_config *hfr_config; + + if (blob_size < sizeof(struct cam_isp_resource_hfr_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + hfr_config = (struct cam_isp_resource_hfr_config *)blob_data; + + if (hfr_config->num_ports > g_ife_hw_mgr.isp_caps.max_vfe_out_res_type || + hfr_config->num_ports == 0) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in HFR config, ctx_idx: %u", + hfr_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (hfr_config->num_ports != 1) { + if (sizeof(struct cam_isp_port_hfr_config) > + ((UINT_MAX - + sizeof(struct cam_isp_resource_hfr_config)) / + (hfr_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in hfr config num_ports:%u size per port:%lu ctx_idx: %u", + hfr_config->num_ports, + sizeof(struct cam_isp_port_hfr_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_port_hfr_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_port_hfr_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_hfr_update(blob_type, blob_info, + hfr_config, prepare, max_ife_out_res, + CAM_ISP_HW_TYPE_VFE); + if (rc) + CAM_ERR(CAM_ISP, "HFR Update Failed, ctx_idx: %u", ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: { + size_t clock_config_size = 0; + struct cam_isp_clock_config *clock_config; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + if (blob_size < sizeof(struct cam_isp_clock_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + clock_config = (struct cam_isp_clock_config *)blob_data; + + if ((clock_config->num_rdi > CAM_IFE_RDI_NUM_MAX) || + (clock_config->num_rdi == 0)) { + CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config, ctx_idx: %u", + clock_config->num_rdi, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (clock_config->num_rdi > 1) { + if (sizeof(uint64_t) > ((UINT_MAX - + sizeof(struct cam_isp_clock_config)) / + (clock_config->num_rdi - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in clock config num_rdi:%u size per port:%lu ctx_idx: %u", + clock_config->num_rdi, + sizeof(uint64_t), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if ((clock_config->num_rdi != 0) && (blob_size < + (sizeof(struct cam_isp_clock_config) + + sizeof(uint64_t) * (clock_config->num_rdi - 1)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(uint32_t) * 2 + sizeof(uint64_t) * + (clock_config->num_rdi + 2), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + clock_config_size = sizeof(struct cam_isp_clock_config) + + ((clock_config->num_rdi - 1) * + sizeof(clock_config->rdi_hz)); + memcpy(&prepare_hw_data->bw_clk_config.ife_clock_config, clock_config, + clock_config_size); + prepare_hw_data->bw_clk_config.ife_clock_config_valid = true; + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: { + struct cam_isp_bw_config *bw_config; + struct cam_isp_bw_config *bw_config_u; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + size_t bw_config_size; + + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_PERF, 300, 1, + "Deprecated Blob TYPE_BW_CONFIG"); + if (blob_size < sizeof(struct cam_isp_bw_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_u = (struct cam_isp_bw_config *)blob_data; + + if (bw_config_u->num_rdi > CAM_IFE_RDI_NUM_MAX || !bw_config_u->num_rdi) { + CAM_ERR(CAM_ISP, "Invalid num_rdi %u in bw config, ctx_idx: %u", + bw_config_u->num_rdi, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_size = sizeof(struct cam_isp_bw_config) + ((bw_config_u->num_rdi-1)* + sizeof(struct cam_isp_bw_vote)); + + rc = cam_common_mem_kdup((void **)&bw_config, bw_config_u, bw_config_size); + if (rc) { + CAM_ERR(CAM_ISP, "Alloc and copy request bw_config failed"); + return rc; + } + if (bw_config_u->num_rdi != bw_config->num_rdi) { + CAM_ERR(CAM_ISP, "num_rdi changed,userspace:%d, kernel:%d", bw_config_u->num_rdi, + bw_config->num_rdi); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + /* Check for integer overflow */ + if (bw_config->num_rdi > 1) { + if (sizeof(struct cam_isp_bw_vote) > ((UINT_MAX - + sizeof(struct cam_isp_bw_config)) / + (bw_config->num_rdi - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in bw config num_rdi:%u size per port:%lu ctx_idx: %u", + bw_config->num_rdi, + sizeof(struct cam_isp_bw_vote), ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + } + + if ((bw_config->num_rdi != 0) && (blob_size < + (sizeof(struct cam_isp_bw_config) + + (bw_config->num_rdi - 1) * + sizeof(struct cam_isp_bw_vote)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_bw_config) + + (bw_config->num_rdi - 1) * + sizeof(struct cam_isp_bw_vote), ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + if (!prepare || !prepare->priv || + (bw_config->usage_type >= CAM_ISP_HW_USAGE_TYPE_MAX)) { + CAM_ERR(CAM_ISP, "Invalid inputs usage type %d, ctx_idx: %u", + bw_config->usage_type, ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + + memcpy(&prepare_hw_data->bw_clk_config.bw_config, bw_config, + sizeof(prepare_hw_data->bw_clk_config.bw_config)); + ife_mgr_ctx->bw_config_version = CAM_ISP_BW_CONFIG_V1; + prepare_hw_data->bw_clk_config.bw_config_valid = true; + cam_common_mem_free(bw_config); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2: { + struct cam_isp_bw_config_v2 *bw_config; + struct cam_isp_bw_config_v2 *bw_config_u; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_cpas_axi_per_path_bw_vote *path_vote; + size_t bw_config_size; + + if (blob_size < sizeof(struct cam_isp_bw_config_v2)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u ctx_idx: %u", blob_size, + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_u = (struct cam_isp_bw_config_v2 *)blob_data; + + if (bw_config_u->num_paths > CAM_ISP_MAX_PER_PATH_VOTES || + !bw_config_u->num_paths) { + CAM_ERR(CAM_ISP, "Invalid num paths %d ctx_idx: %u", + bw_config_u->num_paths, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_size = sizeof(struct cam_isp_bw_config_v2) + ((bw_config_u->num_paths-1)* + sizeof(struct cam_axi_per_path_bw_vote)); + + rc = cam_common_mem_kdup((void **)&bw_config, bw_config_u, bw_config_size); + if (rc) { + CAM_ERR(CAM_ISP, "Alloc and copy request bw_config failed"); + return rc; + } + + if (bw_config_u->num_paths != bw_config->num_paths) { + CAM_ERR(CAM_ISP, "num_paths changed,userspace:%d, kernel:%d", bw_config_u->num_paths, + bw_config->num_paths); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + /* Check for integer overflow */ + if (bw_config->num_paths > 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct cam_isp_bw_config_v2)) / + (bw_config->num_paths - 1))) { + CAM_ERR(CAM_ISP, + "Size exceeds limit paths:%u size per path:%lu ctx_idx: %u", + bw_config->num_paths - 1, + sizeof( + struct cam_axi_per_path_bw_vote), ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + } + + if ((bw_config->num_paths != 0) && (blob_size < + (sizeof(struct cam_isp_bw_config_v2) + + (bw_config->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote)))) { + CAM_ERR(CAM_ISP, + "Invalid blob size: %u, num_paths: %u, bw_config size: %lu, per_path_vote size: %lu, ctx_idx: %u", + blob_size, bw_config->num_paths, + sizeof(struct cam_isp_bw_config_v2), + sizeof(struct cam_axi_per_path_bw_vote), ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + if (!prepare || !prepare->priv || + (bw_config->usage_type >= CAM_ISP_HW_USAGE_TYPE_MAX)) { + CAM_ERR(CAM_ISP, "Invalid inputs usage type %d ctx_idx: %u", + bw_config->usage_type, ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + + prepare_hw_data->bw_clk_config.bw_config_v2.usage_type = bw_config->usage_type; + prepare_hw_data->bw_clk_config.bw_config_v2.num_paths = bw_config->num_paths; + + for (i = 0; i < bw_config->num_paths; i++) { + path_vote = &prepare_hw_data->bw_clk_config.bw_config_v2.axi_path[i]; + path_vote->usage_data = bw_config->axi_path_flex[i].usage_data; + path_vote->transac_type = bw_config->axi_path_flex[i].transac_type; + path_vote->path_data_type = bw_config->axi_path_flex[i].path_data_type; + path_vote->vote_level = 0; + path_vote->camnoc_bw = bw_config->axi_path_flex[i].camnoc_bw; + path_vote->mnoc_ab_bw = bw_config->axi_path_flex[i].mnoc_ab_bw; + path_vote->mnoc_ib_bw = bw_config->axi_path_flex[i].mnoc_ib_bw; + } + + ife_mgr_ctx->bw_config_version = CAM_ISP_BW_CONFIG_V2; + prepare_hw_data->bw_clk_config.bw_config_valid = true; + cam_common_mem_free(bw_config); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V3: { + struct cam_isp_bw_config_v3 *bw_config; + struct cam_isp_bw_config_v3 *bw_config_u; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_cpas_axi_per_path_bw_vote *path_vote; + size_t bw_config_size; + + if (blob_size < sizeof(struct cam_isp_bw_config_v3)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u ctx_idx: %u", blob_size, + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_u = (struct cam_isp_bw_config_v3 *)blob_data; + + if (bw_config_u->num_paths > CAM_ISP_MAX_PER_PATH_VOTES || + !bw_config_u->num_paths) { + CAM_ERR(CAM_ISP, "Invalid num paths %d, ctx_idx: %u", + bw_config_u->num_paths, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_config_size = sizeof(struct cam_isp_bw_config_v3) + ((bw_config_u->num_paths-1) * + sizeof(struct cam_axi_per_path_bw_vote_v2)); + + rc = cam_common_mem_kdup((void **)&bw_config, bw_config_u, bw_config_size); + if (rc) { + CAM_ERR(CAM_ISP, "Alloc and copy request bw_config failed"); + return rc; + } + + if (bw_config_u->num_paths != bw_config->num_paths) { + CAM_ERR(CAM_ISP, "num_paths changed,userspace:%d, kernel:%d", bw_config_u->num_paths, + bw_config->num_paths); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + /* Check for integer overflow */ + if (bw_config->num_paths > 1) { + if (sizeof(struct cam_axi_per_path_bw_vote_v2) > ((UINT_MAX - + sizeof(struct cam_isp_bw_config_v3)) / + (bw_config->num_paths - 1))) { + CAM_ERR(CAM_ISP, + "Size exceeds limit paths:%u size per path:%lu ctx_idx: %u", + bw_config->num_paths - 1, + sizeof(struct cam_axi_per_path_bw_vote_v2), + ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + } + + if ((bw_config->num_paths != 0) && (blob_size < + (sizeof(struct cam_isp_bw_config_v3) + + (bw_config->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote_v2)))) { + CAM_ERR(CAM_ISP, + "Invalid blob size: %u, num_paths: %u, bw_config size: %lu, per_path_vote size: %lu ctx_idx: %u", + blob_size, bw_config->num_paths, + sizeof(struct cam_isp_bw_config_v3), + sizeof(struct cam_axi_per_path_bw_vote_v2), ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + if (!prepare || !prepare->priv || + (bw_config->usage_type >= CAM_ISP_HW_USAGE_TYPE_MAX)) { + CAM_ERR(CAM_ISP, "Invalid inputs usage type %d, ctx_idx: %u", + bw_config->usage_type, ife_mgr_ctx->ctx_index); + cam_common_mem_free(bw_config); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + + prepare_hw_data->bw_clk_config.bw_config_v2.usage_type = bw_config->usage_type; + prepare_hw_data->bw_clk_config.bw_config_v2.num_paths = bw_config->num_paths; + + for (i = 0; i < bw_config->num_paths; i++) { + path_vote = &prepare_hw_data->bw_clk_config.bw_config_v2.axi_path[i]; + path_vote->usage_data = bw_config->axi_path_flex[i].usage_data; + path_vote->transac_type = bw_config->axi_path_flex[i].transac_type; + path_vote->path_data_type = bw_config->axi_path_flex[i].path_data_type; + path_vote->vote_level = bw_config->axi_path_flex[i].vote_level; + path_vote->camnoc_bw = bw_config->axi_path_flex[i].camnoc_bw; + path_vote->mnoc_ab_bw = bw_config->axi_path_flex[i].mnoc_ab_bw; + path_vote->mnoc_ib_bw = bw_config->axi_path_flex[i].mnoc_ib_bw; + } + + ife_mgr_ctx->bw_config_version = CAM_ISP_BW_CONFIG_V3; + prepare_hw_data->bw_clk_config.bw_config_valid = true; + cam_common_mem_free(bw_config); + } + break; + + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: { + struct cam_ubwc_config *ubwc_config; + + if (blob_size < sizeof(struct cam_ubwc_config)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + ubwc_config = (struct cam_ubwc_config *)blob_data; + + if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS || + ubwc_config->num_ports == 0) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config, ctx_idx: %u", + ubwc_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (ubwc_config->num_ports != 1) { + if (sizeof(struct cam_ubwc_plane_cfg_v1) > + ((UINT_MAX - sizeof(struct cam_ubwc_config)) / + ((ubwc_config->num_ports - 1) * 2))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in ubwc config num_ports:%u size per port:%lu ctx_idx: %u", + ubwc_config->num_ports, + sizeof(struct cam_ubwc_plane_cfg_v1) * + 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_ubwc_config) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v1) * 2)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_ubwc_config) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v1) * 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_ubwc_update(blob_type, blob_info, + ubwc_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2: { + struct cam_ubwc_config_v2 *ubwc_config; + + if (blob_size < sizeof(struct cam_ubwc_config_v2)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + ubwc_config = (struct cam_ubwc_config_v2 *)blob_data; + + if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS || + ubwc_config->num_ports == 0) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config, ctx_idx: %u", + ubwc_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (ubwc_config->num_ports != 1) { + if (sizeof(struct cam_ubwc_plane_cfg_v2) > + ((UINT_MAX - sizeof(struct cam_ubwc_config_v2)) + / ((ubwc_config->num_ports - 1) * 2))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in ubwc config num_ports:%u size per port:%lu ctx_idx: %u", + ubwc_config->num_ports, + sizeof(struct cam_ubwc_plane_cfg_v2) * + 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_ubwc_config_v2) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v2) * 2)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_ubwc_config_v2) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v2) * 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_ubwc_update_v2(blob_type, blob_info, + ubwc_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V3: { + struct cam_ubwc_config_v3 *ubwc_config; + + if (blob_size < sizeof(struct cam_ubwc_config_v3)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + ubwc_config = (struct cam_ubwc_config_v3 *)blob_data; + + if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS || + ubwc_config->num_ports == 0) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config, ctx_idx: %u", + ubwc_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (ubwc_config->num_ports != 1) { + if (sizeof(struct cam_ubwc_plane_cfg_v3) > + ((UINT_MAX - sizeof(struct cam_ubwc_config_v3)) + / ((ubwc_config->num_ports - 1) * 2))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in ubwc config num_ports:%u size per port:%lu ctx_idx: %u", + ubwc_config->num_ports, + sizeof(struct cam_ubwc_plane_cfg_v3) * + 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_ubwc_config_v3) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v3) * 2)) { + CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_ubwc_config_v3) + + (ubwc_config->num_ports - 1) * + sizeof(struct cam_ubwc_plane_cfg_v3) * 2, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_ubwc_update_v3(blob_type, blob_info, + ubwc_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: { + struct cam_isp_csid_clock_config *clock_config; + + if (blob_size < sizeof(struct cam_isp_csid_clock_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_csid_clock_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + clock_config = (struct cam_isp_csid_clock_config *)blob_data; + + rc = cam_isp_blob_csid_clock_update(blob_type, blob_info, + clock_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update Failed, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG: { + struct cam_isp_csid_qcfa_config *qcfa_config; + + if (blob_size < sizeof(struct cam_isp_csid_qcfa_config)) { + CAM_ERR(CAM_ISP, + "Invalid qcfa blob size %u expected %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_csid_qcfa_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + qcfa_config = (struct cam_isp_csid_qcfa_config *)blob_data; + + rc = cam_isp_blob_csid_qcfa_update(blob_type, blob_info, + qcfa_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "QCFA Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: { + struct cam_fe_config *fe_config; + + if (blob_size < sizeof(struct cam_fe_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu, ctx_idx: %u", + blob_size, sizeof(struct cam_fe_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + fe_config = (struct cam_fe_config *)blob_data; + + rc = cam_isp_blob_fe_update(blob_type, blob_info, + fe_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "FS Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: { + struct cam_isp_core_config *core_config; + + if (blob_size < sizeof(struct cam_isp_core_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_core_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + core_config = (struct cam_isp_core_config *)blob_data; + + rc = cam_isp_blob_core_cfg_update(blob_type, blob_info, + core_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "Core cfg update fail: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG: { + struct cam_isp_vfe_out_config *vfe_out_config; + + if (blob_size < sizeof(struct cam_isp_vfe_out_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_vfe_out_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + vfe_out_config = (struct cam_isp_vfe_out_config *)blob_data; + + if (vfe_out_config->num_ports > max_ife_out_res || + vfe_out_config->num_ports == 0) { + CAM_ERR(CAM_ISP, + "Invalid num_ports:%u in vfe out config, ctx_idx: %u", + vfe_out_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (vfe_out_config->num_ports != 1) { + if (sizeof(struct cam_isp_vfe_wm_config) > ((UINT_MAX - + sizeof(struct cam_isp_vfe_out_config)) / + (vfe_out_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in vfe out config num_ports:%u size per port:%lu, ctx_idx: %u", + vfe_out_config->num_ports, + sizeof(struct cam_isp_vfe_wm_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_vfe_out_config) + + (vfe_out_config->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_vfe_out_config) + + (vfe_out_config->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_vfe_out_update(blob_type, blob_info, + vfe_out_config, prepare, max_ife_out_res, + CAM_ISP_HW_TYPE_VFE); + if (rc) + CAM_ERR(CAM_ISP, "VFE out update failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG_V2: { + struct cam_isp_vfe_out_config_v2 *vfe_out_config_v2; + + if (blob_size < sizeof(struct cam_isp_vfe_out_config_v2)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_vfe_out_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + vfe_out_config_v2 = (struct cam_isp_vfe_out_config_v2 *)blob_data; + + if (vfe_out_config_v2->num_ports > max_ife_out_res || + vfe_out_config_v2->num_ports == 0) { + CAM_ERR(CAM_ISP, + "Invalid num_ports:%u in vfe out config, ctx_idx: %u", + vfe_out_config_v2->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (vfe_out_config_v2->num_ports != 1) { + if (sizeof(struct cam_isp_vfe_wm_config_v2) > ((UINT_MAX - + sizeof(struct cam_isp_vfe_out_config_v2)) / + (vfe_out_config_v2->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in vfe out config num_ports:%u size per port:%lu, ctx_idx: %u", + vfe_out_config_v2->num_ports, + sizeof(struct cam_isp_vfe_wm_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_vfe_out_config_v2) + + (vfe_out_config_v2->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config_v2))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_vfe_out_config_v2) + + (vfe_out_config_v2->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config_v2), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_vfe_out_update_v2(blob_type, blob_info, + vfe_out_config_v2, prepare, max_ife_out_res, + CAM_ISP_HW_TYPE_VFE); + if (rc) + CAM_ERR(CAM_ISP, "VFE out update failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + + case CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_BLANKING_CONFIG: { + struct cam_isp_sensor_blanking_config *sensor_blanking_config; + + if (blob_size < sizeof(struct cam_isp_sensor_blanking_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %zu expected %zu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_sensor_blanking_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + sensor_blanking_config = + (struct cam_isp_sensor_blanking_config *)blob_data; + + rc = cam_isp_blob_sensor_blanking_config(blob_type, blob_info, + sensor_blanking_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, + "Epoch Configuration Update Failed rc:%d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_DISCARD_INITIAL_FRAMES: { + struct cam_isp_discard_initial_frames *discard_config; + + if (blob_size < sizeof(struct cam_isp_discard_initial_frames)) { + CAM_ERR(CAM_ISP, + "Invalid discard frames blob size %u expected %u ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_discard_initial_frames), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + discard_config = (struct cam_isp_discard_initial_frames *)blob_data; + + rc = cam_isp_blob_csid_discard_init_frame_update( + blob_info, discard_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "Discard initial frames update failed rc: %d ctx: %u", + rc, ife_mgr_ctx->ctx_index); + + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: { + struct cam_isp_sfe_init_scratch_buf_config *scratch_config; + + scratch_config = + (struct cam_isp_sfe_init_scratch_buf_config *)blob_data; + + rc = cam_isp_validate_scratch_buffer_blob(blob_size, + ife_mgr_ctx, scratch_config); + if (rc) + return rc; + + rc = cam_isp_blob_ife_scratch_buf_update( + scratch_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "IFE scratch buffer update failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG: { + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_generic_fcg_config *fcg_config_args; + struct cam_isp_fcg_caps *fcg_caps = + &g_ife_hw_mgr.isp_caps.fcg_caps; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + if (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + CAM_WARN(CAM_ISP, "Detected IFE FCG blob in INIT packet, ignore it"); + return 0; + } + + if (!fcg_caps->ife_fcg_supported) { + CAM_ERR(CAM_ISP, + "FCG is not supported by IFE/MC_TFE hardware, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < + sizeof(struct cam_isp_generic_fcg_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, fcg config size: %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_generic_fcg_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + fcg_config_args = + (struct cam_isp_generic_fcg_config *)blob_data; + + rc = cam_isp_validate_fcg_configs(fcg_config_args, + fcg_caps->max_ife_fcg_ch_ctx, + fcg_caps->max_ife_fcg_predictions, + ife_mgr_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in validating FCG configs, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + return rc; + } + + rc = cam_isp_blob_fcg_config_prepare(fcg_config_args, + prepare, CAM_ISP_HW_TYPE_VFE); + if (rc) + CAM_ERR(CAM_ISP, + "FCG configuration preparation failed, rc: %d, ctx_idx: %d", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: + break; + case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG: + { + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + + prepare_hw_data->irq_comp_cfg_valid = true; + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_DYNAMIC_MODE_SWITCH: { + struct cam_isp_mode_switch_info *mup_config; + + if (blob_size < sizeof(struct cam_isp_mode_switch_info)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_mode_switch_info), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + mup_config = (struct cam_isp_mode_switch_info *)blob_data; + + rc = cam_isp_blob_csid_dynamic_switch_update( + blob_type, blob_info, mup_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "MUP Update Failed, ctx_idx: %u", ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_BW_LIMITER_CFG: { + struct cam_isp_out_rsrc_bw_limiter_config *bw_limit_config; + + if (blob_size < + sizeof(struct cam_isp_out_rsrc_bw_limiter_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_out_rsrc_bw_limiter_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_limit_config = + (struct cam_isp_out_rsrc_bw_limiter_config *)blob_data; + + rc = cam_isp_validate_bw_limiter_blob(blob_size, + bw_limit_config); + if (rc) + return rc; + + rc = cam_isp_blob_bw_limit_update(blob_type, blob_info, + bw_limit_config, prepare, CAM_ISP_HW_TYPE_VFE); + if (rc) + CAM_ERR(CAM_ISP, + "BW limit update failed for IFE rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG: { + struct cam_isp_init_config *init_config; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + + if (prepare_hw_data->packet_opcode_type != + CAM_ISP_PACKET_INIT_DEV) { + CAM_ERR(CAM_ISP, + "Init config blob not supported for packet type: %u req: %llu ctx_idx: %u", + prepare_hw_data->packet_opcode_type, + prepare->packet->header.request_id, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < sizeof(struct cam_isp_init_config)) { + CAM_ERR(CAM_ISP, + "Invalid init config blob size %u expected %u, ctx_idx: %u", + blob_size, sizeof(struct cam_isp_init_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + init_config = (struct cam_isp_init_config *)blob_data; + + rc = cam_isp_blob_csid_init_config_update(prepare, init_config); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID Init config failed for req: %llu rc: %d ctx_idx: %u", + prepare->packet->header.request_id, rc, ife_mgr_ctx->ctx_index); + break; + } + + rc = cam_isp_blob_ife_init_config_update(prepare, init_config); + if (rc) + CAM_ERR(CAM_ISP, + "IFE Init config failed for req: %llu rc: %d ctx_idx: %u", + prepare->packet->header.request_id, rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG: { + struct cam_isp_lcr_rdi_config *lcr_rdi_config; + + if (blob_size < sizeof(struct cam_isp_lcr_rdi_config)) { + CAM_ERR(CAM_ISP, "Invalid lcr blob size %u expected %u ctx_idx: %u", + blob_size, sizeof(struct cam_isp_lcr_rdi_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + lcr_rdi_config = (struct cam_isp_lcr_rdi_config *)blob_data; + rc = cam_isp_blob_ife_rdi_lcr_config(ife_mgr_ctx, prepare, + blob_info, lcr_rdi_config, blob_type); + if (rc) + CAM_ERR(CAM_ISP, + "RDI LCR config failed for res %u, ctx_idx: %u", + lcr_rdi_config->res_id, ife_mgr_ctx->ctx_index); + + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG: { + struct cam_isp_drv_config *drv_config; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + if (blob_size < sizeof(struct cam_isp_drv_config)) { + CAM_ERR(CAM_ISP, "Invalid DRV blob size %u expected %u ctx_idx: %u", + blob_size, sizeof(struct cam_isp_drv_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + drv_config = (struct cam_isp_drv_config *)blob_data; + memcpy(&prepare_hw_data->isp_drv_config, drv_config, + sizeof(prepare_hw_data->isp_drv_config)); + + CAM_DBG(CAM_ISP, + "DRV config blob en:%d timeout_val:%u path_idle_en: 0x%x ctx: %u", + drv_config->drv_en, drv_config->timeout_val, drv_config->path_idle_en, + ife_mgr_ctx->ctx_index); + prepare_hw_data->drv_config_valid = true; + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_EXP_ORDER_UPDATE: { + struct cam_isp_path_exp_order_update *exp_order_update; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + if (blob_size < sizeof(struct cam_isp_path_exp_order_update)) { + CAM_ERR(CAM_ISP, + "Invalid exp order update blob size %u expected %u ctx_idx: %u", + blob_size, sizeof(struct cam_isp_path_exp_order_update), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + exp_order_update = (struct cam_isp_path_exp_order_update *)blob_data; + + if ((!exp_order_update->num_path_exp_info) || + (exp_order_update->num_path_exp_info > CAM_ISP_MAX_PER_PATH_EXP_INFO) || + (!exp_order_update->num_sensor_out_exp)) { + CAM_ERR(CAM_ISP, + "Invalid num_path_info:%u sensor_out_exp:%u in exp order update, ctx_idx: %u", + exp_order_update->num_path_exp_info, + exp_order_update->num_sensor_out_exp, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (sizeof(struct cam_isp_per_path_exp_info) > ((UINT_MAX - + sizeof(struct cam_isp_path_exp_order_update)) / + (exp_order_update->num_path_exp_info))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in exp order update num_path_info:%u size per path:%lu, ctx_idx: %u", + exp_order_update->num_path_exp_info, + sizeof(struct cam_isp_per_path_exp_info), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < (sizeof(struct cam_isp_path_exp_order_update) + + (exp_order_update->num_path_exp_info * + sizeof(struct cam_isp_per_path_exp_info)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_path_exp_order_update) + + (exp_order_update->num_path_exp_info * + sizeof(struct cam_isp_vfe_wm_config_v2)), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + memcpy(&prepare_hw_data->isp_exp_order_update, exp_order_update, + (sizeof(struct cam_isp_path_exp_order_update) + + (exp_order_update->num_path_exp_info * + sizeof(struct cam_isp_per_path_exp_info)))); + + CAM_DBG(CAM_ISP, + "Exp order update num_process_exp:%u num_sensor_out_exp:%u num_path_exp_info:%u ctx:%u", + exp_order_update->num_process_exp, exp_order_update->num_sensor_out_exp, + exp_order_update->num_path_exp_info, + ife_mgr_ctx->ctx_index); + prepare_hw_data->exp_order_update_valid = true; + } + break; + default: + CAM_WARN(CAM_ISP, "Invalid blob type %d, ctx_idx: %u", + blob_type, ife_mgr_ctx->ctx_index); + break; + } + + return rc; +} + +static int cam_ife_mgr_util_insert_frame_header( + struct cam_kmd_buf_info *kmd_buf, + struct cam_isp_prepare_hw_update_data *prepare_hw_data, + struct list_head *buf_tracker) +{ + int mmu_hdl = -1, rc = 0; + dma_addr_t iova_addr; + uint32_t frame_header_iova, padded_bytes = 0; + size_t len; + struct cam_ife_hw_mgr *hw_mgr = &g_ife_hw_mgr; + struct cam_smmu_buffer_tracker *buf_track_entry; + + mmu_hdl = cam_mem_is_secure_buf( + kmd_buf->handle) ? + hw_mgr->mgr_common.img_iommu_hdl_secure : + hw_mgr->mgr_common.img_iommu_hdl; + + rc = cam_mem_get_io_buf(kmd_buf->handle, mmu_hdl, + &iova_addr, &len, NULL, buf_tracker); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to get io addr for handle = %d for mmu_hdl = %u", + kmd_buf->handle, mmu_hdl); + return rc; + } + + if (kmd_buf->offset >= len) { + CAM_ERR(CAM_ISP, "Invalid kmd buffer offset %u", kmd_buf->offset); + if (buf_tracker) { + buf_track_entry = list_first_entry_or_null(buf_tracker, + struct cam_smmu_buffer_tracker, list); + cam_smmu_buffer_tracker_buffer_putref(buf_track_entry); + } + + return -EINVAL; + } + + /* CDM buffer is within 32-bit address space */ + frame_header_iova = (uint32_t)iova_addr; + frame_header_iova += kmd_buf->offset; + + /* frame header address needs to be 256 byte aligned */ + if (frame_header_iova % CAM_FRAME_HEADER_ADDR_ALIGNMENT) { + padded_bytes = (uint32_t)(CAM_FRAME_HEADER_ADDR_ALIGNMENT - + (frame_header_iova % CAM_FRAME_HEADER_ADDR_ALIGNMENT)); + frame_header_iova += padded_bytes; + } + + prepare_hw_data->frame_header_iova = frame_header_iova; + + /* update the padding if any for the cpu addr as well */ + prepare_hw_data->frame_header_cpu_addr = kmd_buf->cpu_addr + + (padded_bytes / 4); + + CAM_DBG(CAM_ISP, + "Frame Header iova_addr: %pK cpu_addr: %pK padded_bytes: %llu", + prepare_hw_data->frame_header_iova, + prepare_hw_data->frame_header_cpu_addr, + padded_bytes); + + /* Reserve memory for frame header */ + kmd_buf->used_bytes += (padded_bytes + CAM_FRAME_HEADER_BUFFER_SIZE); + kmd_buf->offset += kmd_buf->used_bytes; + + return rc; +} + +static int cam_isp_blob_csid_irq_comp_cfg( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_irq_comp_cfg *comp_cfg, + uint32_t blob_type) + +{ + int rc = 0; + struct cam_isp_hw_mgr_res *hw_mgr_res; + uint32_t valid_hw_ctxt_mask = (CAM_ISP_MULTI_CTXT0_MASK | CAM_ISP_MULTI_CTXT1_MASK | + CAM_ISP_MULTI_CTXT2_MASK); + + if ((comp_cfg->ipp_src_ctxt_mask & (~valid_hw_ctxt_mask)) || + (comp_cfg->ipp_dst_comp_mask & (~valid_hw_ctxt_mask))) { + CAM_ERR(CAM_ISP, "Invalid hw ctx masks Ctx:%d IPP SRC mask 0x%x IPP DST mask 0x%x", + ctx->ctx_index, comp_cfg->ipp_src_ctxt_mask, comp_cfg->ipp_dst_comp_mask); + return -EINVAL; + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + if (!hw_mgr_res->hw_res[0]) + continue; + break; + } + + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "Ctx:%d invalid res", ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_hw_mgr_add_cmd_buf_util(hw_mgr_res, prepare, + blob_info, (void *)comp_cfg, + CAM_ISP_HW_CMD_IRQ_COMP_CFG, blob_type); + + CAM_DBG(CAM_ISP, "Ctx:%d IPP SRC mask 0x%x IPP DST mask 0x%x", ctx->ctx_index, + comp_cfg->ipp_src_ctxt_mask, comp_cfg->ipp_dst_comp_mask); + + return rc; +} + +static int cam_csid_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + struct cam_isp_generic_blob_info *blob_info = user_data; + struct cam_ife_hw_mgr_ctx *ife_mgr_ctx = NULL; + struct cam_hw_prepare_update_args *prepare = NULL; + + if (!blob_data || (blob_size == 0) || !blob_info) { + CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK", + blob_data, blob_size, blob_info); + return -EINVAL; + } + + prepare = blob_info->prepare; + if (!prepare || !prepare->ctxt_to_hw_map) { + CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d", + blob_type); + return -EINVAL; + } + + ife_mgr_ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "Context[%pK][%u] blob_type=%d, blob_size=%d", + ife_mgr_ctx, ife_mgr_ctx->ctx_index, blob_type, blob_size); + + switch (blob_type) { + case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V3: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_BLANKING_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_DISCARD_INITIAL_FRAMES: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_DYNAMIC_MODE_SWITCH: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_LIMITER_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V3: + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_EXP_ORDER_UPDATE: + break; + case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG: { + struct cam_isp_irq_comp_cfg *irq_comp_cfg; + + if (blob_size < sizeof(struct cam_isp_irq_comp_cfg)) { + CAM_ERR(CAM_ISP, + "Invalid IPP IRQ comp cfg blob size, %u, expected %u", + blob_size, sizeof(struct cam_isp_irq_comp_cfg)); + return -EINVAL; + } + + irq_comp_cfg = (struct cam_isp_irq_comp_cfg *)blob_data; + rc = cam_isp_blob_csid_irq_comp_cfg(ife_mgr_ctx, prepare, + blob_info, irq_comp_cfg, blob_type); + + CAM_DBG(CAM_ISP, + "IRQ comp cfg blob, ipp_src_ctxt_mask: 0x%x, ipp_dest_ctxt_mask: 0x%x", + irq_comp_cfg->ipp_src_ctxt_mask, irq_comp_cfg->ipp_dst_comp_mask); + } + break; + default: + CAM_WARN(CAM_ISP, "Invalid blob type %d, ctx_idx: %u", + blob_type, ife_mgr_ctx->ctx_index); + break; + } + + return rc; +} + +static int cam_sfe_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + struct cam_isp_generic_blob_info *blob_info = user_data; + struct cam_ife_hw_mgr_ctx *ife_mgr_ctx = NULL; + struct cam_hw_prepare_update_args *prepare = NULL; + + if (!blob_data || (blob_size == 0) || !blob_info) { + CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK", + blob_data, blob_size, blob_info); + return -EINVAL; + } + + prepare = blob_info->prepare; + if (!prepare || !prepare->ctxt_to_hw_map) { + CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d", + blob_type); + return -EINVAL; + } + + ife_mgr_ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "Context[%pK][%u] blob_type: %d, blob_size: %d", + ife_mgr_ctx, ife_mgr_ctx->ctx_index, blob_type, blob_size); + + switch (blob_type) { + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG: { + size_t clock_config_size = 0; + struct cam_isp_clock_config *clock_config; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + if (blob_size < sizeof(struct cam_isp_clock_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + clock_config = (struct cam_isp_clock_config *)blob_data; + + if ((clock_config->num_rdi > CAM_SFE_RDI_NUM_MAX) || + (clock_config->num_rdi == 0)) { + CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config, ctx_idx: %u", + clock_config->num_rdi, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (clock_config->num_rdi > 1) { + if (sizeof(uint64_t) > ((UINT_MAX - + sizeof(struct cam_isp_clock_config)) / + (clock_config->num_rdi - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in clock config num_rdi:%u size per port:%lu ctx_idx: %u", + clock_config->num_rdi, + sizeof(uint64_t), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if ((clock_config->num_rdi != 0) && (blob_size < + (sizeof(struct cam_isp_clock_config) + + sizeof(uint64_t) * (clock_config->num_rdi - 1)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(uint32_t) * 2 + sizeof(uint64_t) * + (clock_config->num_rdi + 2), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + clock_config_size = sizeof(struct cam_isp_clock_config) + + ((clock_config->num_rdi - 1) * + sizeof(clock_config->rdi_hz)); + memcpy(&prepare_hw_data->bw_clk_config.sfe_clock_config, clock_config, + clock_config_size); + prepare_hw_data->bw_clk_config.sfe_clock_config_valid = true; + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG: { + struct cam_isp_vfe_out_config *vfe_out_config; + + if (blob_size < sizeof(struct cam_isp_vfe_out_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_vfe_out_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + vfe_out_config = (struct cam_isp_vfe_out_config *)blob_data; + + + if (vfe_out_config->num_ports > max_sfe_out_res || + vfe_out_config->num_ports == 0) { + CAM_ERR(CAM_ISP, + "Invalid num_ports:%u in sfe out config, ctx_idx: %u", + vfe_out_config->num_ports, + max_sfe_out_res, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (vfe_out_config->num_ports != 1) { + if (sizeof(struct cam_isp_vfe_wm_config) > ((UINT_MAX - + sizeof(struct cam_isp_vfe_out_config)) / + (vfe_out_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in sfe out config num_ports:%u size per port:%lu ctx_idx: %u", + vfe_out_config->num_ports, + sizeof(struct cam_isp_vfe_wm_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_vfe_out_config) + + (vfe_out_config->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_isp_vfe_out_config) + + (vfe_out_config->num_ports - 1) * + sizeof(struct cam_isp_vfe_wm_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_vfe_out_update(blob_type, blob_info, + vfe_out_config, prepare, max_sfe_out_res, + CAM_ISP_HW_TYPE_SFE); + if (rc) + CAM_ERR(CAM_ISP, "SFE out update failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG: { + struct cam_isp_resource_hfr_config *hfr_config; + + if (blob_size < sizeof(struct cam_isp_resource_hfr_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + hfr_config = (struct cam_isp_resource_hfr_config *)blob_data; + + if (hfr_config->num_ports > max_sfe_out_res + || hfr_config->num_ports == 0) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in HFR config, ctx_idx: %u", + hfr_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (hfr_config->num_ports != 1) { + if (sizeof(struct cam_isp_port_hfr_config) > + ((UINT_MAX - + sizeof(struct cam_isp_resource_hfr_config)) / + (hfr_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in hfr config num_ports:%u size per port:%lu ctx_idx: %u", + hfr_config->num_ports, + sizeof(struct cam_isp_port_hfr_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < (sizeof(struct cam_isp_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_port_hfr_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_port_hfr_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_hfr_update(blob_type, blob_info, + hfr_config, prepare, max_sfe_out_res, + CAM_ISP_HW_TYPE_SFE); + if (rc) + CAM_ERR(CAM_ISP, "HFR Update Failed, ctx_idx: %u", ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG: { + struct cam_isp_sfe_core_config *core_cfg; + + if (blob_size < sizeof(struct cam_isp_sfe_core_config)) { + CAM_ERR(CAM_ISP, + "Invalid blob size: %u expected: %u ctx_idx: %u", blob_size, + sizeof(struct cam_isp_sfe_core_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + core_cfg = (struct cam_isp_sfe_core_config *)blob_data; + rc = cam_isp_blob_sfe_core_cfg_update(blob_type, blob_info, + core_cfg, prepare); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG: { + struct cam_isp_sfe_init_scratch_buf_config *scratch_config; + + scratch_config = + (struct cam_isp_sfe_init_scratch_buf_config *)blob_data; + + rc = cam_isp_validate_scratch_buffer_blob(blob_size, + ife_mgr_ctx, scratch_config); + if (rc) + return rc; + + rc = cam_isp_blob_sfe_scratch_buf_update( + scratch_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "SFE scratch buffer update failed rc: %d ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: { + struct cam_fe_config *fe_config; + + if (blob_size < sizeof(struct cam_fe_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, sizeof(struct cam_fe_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + fe_config = (struct cam_fe_config *)blob_data; + + rc = cam_isp_blob_fe_update(blob_type, blob_info, + fe_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "FS Update Failed rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_DYNAMIC_MODE_SWITCH: { + struct cam_isp_mode_switch_info *mup_config; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + + if (blob_size < sizeof(struct cam_isp_mode_switch_info)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_mode_switch_info), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + mup_config = (struct cam_isp_mode_switch_info *)blob_data; + if (ife_mgr_ctx->flags.is_sfe_shdr) { + ife_mgr_ctx->scratch_buf_info.sfe_scratch_config->updated_num_exp = + mup_config->num_expoures; + prepare_hw_data->num_exp = mup_config->num_expoures; + + rc = cam_isp_blob_sfe_update_fetch_core_cfg( + blob_type, blob_info, prepare); + if (rc) + CAM_ERR(CAM_ISP, + "SFE dynamic enable/disable for fetch failed, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + } + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG: { + struct cam_isp_sfe_exp_config *exp_config; + + if (!ife_mgr_ctx->flags.is_sfe_shdr) { + CAM_ERR(CAM_ISP, + "Blob %u supported only for sHDR streams, ctx_idx: %u", + blob_type, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < + sizeof(struct cam_isp_sfe_exp_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, ctx_idx: %u", + blob_size, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + exp_config = + (struct cam_isp_sfe_exp_config *)blob_data; + + if ((exp_config->num_ports > CAM_SFE_FE_RDI_NUM_MAX) || + (exp_config->num_ports == 0)) { + CAM_ERR(CAM_ISP, + "Invalid num_ports %u in exp order config, ctx_idx: %u", + exp_config->num_ports, ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + /* Check for integer overflow */ + if (exp_config->num_ports != 1) { + if (sizeof(struct cam_isp_sfe_wm_exp_order_config) > + ((UINT_MAX - + sizeof( + struct cam_isp_sfe_exp_config)) / + (exp_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in exp order config num_ports: %u size per port: %lu ctx_idx: %u", + exp_config->num_ports, + sizeof( + struct cam_isp_sfe_wm_exp_order_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + } + + if (blob_size < + (sizeof(struct cam_isp_sfe_exp_config) + + (exp_config->num_ports - 1) * + sizeof(struct cam_isp_sfe_wm_exp_order_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size: %u expected: %lu ctx_idx: %u", + blob_size, + sizeof( + struct cam_isp_sfe_exp_config) + + (exp_config->num_ports - 1) * + sizeof( + struct cam_isp_sfe_wm_exp_order_config), ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + rc = cam_isp_blob_sfe_exp_order_update( + blob_info->base_info->idx, exp_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "SFE exp order update failed, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_BW_LIMITER_CFG: { + struct cam_isp_out_rsrc_bw_limiter_config *bw_limit_config; + + if (blob_size < + sizeof(struct cam_isp_out_rsrc_bw_limiter_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_out_rsrc_bw_limiter_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + bw_limit_config = + (struct cam_isp_out_rsrc_bw_limiter_config *)blob_data; + + rc = cam_isp_validate_bw_limiter_blob(blob_size, + bw_limit_config); + if (rc) + return rc; + + rc = cam_isp_blob_bw_limit_update(blob_type, blob_info, + bw_limit_config, prepare, CAM_ISP_HW_TYPE_SFE); + if (rc) + CAM_ERR(CAM_ISP, + "BW limit update failed for SFE rc: %d, ctx_idx: %u", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG: { + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_generic_fcg_config *fcg_config_args; + struct cam_isp_fcg_caps *fcg_caps = + &g_ife_hw_mgr.isp_caps.fcg_caps; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + if (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + CAM_WARN(CAM_ISP, "Detected SFE FCG blob in INIT packet, ignore it"); + return 0; + } + + if (!fcg_caps->sfe_fcg_supported) { + CAM_ERR(CAM_ISP, "FCG is not supported by SFE hardware, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + if (blob_size < + sizeof(struct cam_isp_generic_fcg_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u, fcg config size: %u, ctx_idx: %u", + blob_size, + sizeof(struct cam_isp_generic_fcg_config), + ife_mgr_ctx->ctx_index); + return -EINVAL; + } + + fcg_config_args = + (struct cam_isp_generic_fcg_config *)blob_data; + + rc = cam_isp_validate_fcg_configs(fcg_config_args, + fcg_caps->max_sfe_fcg_ch_ctx, + fcg_caps->max_sfe_fcg_predictions, + ife_mgr_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in validating FCG configs, ctx_idx: %u", + ife_mgr_ctx->ctx_index); + return rc; + } + + rc = cam_isp_blob_fcg_config_prepare(fcg_config_args, + prepare, CAM_ISP_HW_TYPE_SFE); + if (rc) + CAM_ERR(CAM_ISP, + "FCG configuration preparation failed, rc: %d, ctx_idx: %d", + rc, ife_mgr_ctx->ctx_index); + } + break; + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG: + case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_BLANKING_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_DISCARD_INITIAL_FRAMES: + case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V3: + case CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG: + case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V3: + case CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG_V2: + case CAM_ISP_GENERIC_BLOB_TYPE_EXP_ORDER_UPDATE: + break; + default: + CAM_WARN(CAM_ISP, "Invalid blob type: %u, ctx_idx: %u", + blob_type, ife_mgr_ctx->ctx_index); + break; + } + + return rc; +} + +static inline bool cam_ife_mgr_validate_for_io_buffers( + uint32_t port_id, uint32_t scratch_cfg_mask) +{ + if (BIT(port_id) & scratch_cfg_mask) + return true; + + return false; +} + +static inline bool cam_isp_sfe_validate_for_scratch_buf_config( + uint32_t res_idx, struct cam_ife_hw_mgr_ctx *ctx, + bool default_settings) +{ + uint32_t curr_num_exp; + + /* check for num exposures for static mode but using RDI1-2 without RD1-2 */ + if (res_idx >= ctx->scratch_buf_info.num_fetches) + return true; + + /* check for num exposures for shdr dynamic mode */ + if ((ctx->flags.is_sfe_shdr) && + (ctx->ctx_config & CAM_IFE_CTX_CFG_DYNAMIC_SWITCH_ON)) { + if (default_settings) + curr_num_exp = ctx->curr_num_exp; + else + curr_num_exp = ctx->scratch_buf_info.sfe_scratch_config->updated_num_exp; + + if (res_idx >= curr_num_exp) + return true; + } + + return false; +} + +static int cam_isp_sfe_send_scratch_buf_upd( + uint32_t remaining_size, + enum cam_isp_hw_cmd_type cmd_type, + struct cam_isp_resource_node *hw_res, + struct cam_ife_sfe_scratch_buf_info *buf_info, + uint32_t *cpu_addr, + uint32_t *used_bytes) +{ + int rc, i; + struct cam_isp_hw_get_cmd_update update_buf; + struct cam_isp_hw_get_wm_update wm_update; + dma_addr_t io_addr[CAM_PACKET_MAX_PLANES]; + + update_buf.res = hw_res; + update_buf.cmd_type = cmd_type; + update_buf.cmd.cmd_buf_addr = cpu_addr; + update_buf.use_scratch_cfg = true; + + wm_update.num_buf = 1; + /* + * Update same scratch buffer for different planes, + * when used for IFE clients, same scratch buffer + * is configured to both per plane clients + */ + for (i = 0; i < CAM_PACKET_MAX_PLANES; i++) + io_addr[i] = buf_info->io_addr; + + wm_update.image_buf = io_addr; + wm_update.width = buf_info->width; + wm_update.height = buf_info->height; + wm_update.stride = buf_info->stride; + wm_update.slice_height = buf_info->slice_height; + wm_update.io_cfg = NULL; + + update_buf.wm_update = &wm_update; + update_buf.cmd.size = remaining_size; + + rc = hw_res->hw_intf->hw_ops.process_cmd( + hw_res->hw_intf->hw_priv, + cmd_type, &update_buf, + sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to send cmd: %u res: %u rc: %d", + cmd_type, hw_res->res_id, rc); + return rc; + } + + CAM_DBG(CAM_ISP, "Scratch buf configured for res: 0x%x", + hw_res->res_id); + + /* Update used bytes if update is via CDM */ + if ((cmd_type == CAM_ISP_HW_CMD_GET_BUF_UPDATE) || + (cmd_type == CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM)) + *used_bytes = update_buf.cmd.used_bytes; + + return rc; +} + +static int cam_isp_sfe_add_scratch_buffer_cfg( + uint32_t base_idx, + uint32_t sfe_rdi_cfg_mask, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_hw_mgr_res *res_list_isp_out, + struct list_head *res_list_in_rd, + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, j, res_id, rc = 0; + uint32_t used_bytes = 0, remain_size = 0; + uint32_t io_cfg_used_bytes; + uint32_t *cpu_addr = NULL; + struct cam_ife_sfe_scratch_buf_info *buf_info; + struct cam_isp_hw_mgr_res *hw_mgr_res; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d, ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + io_cfg_used_bytes = 0; + CAM_DBG(CAM_ISP, "ctx_idx: %u num_ports: %u", + ctx->ctx_index, ctx->scratch_buf_info.sfe_scratch_config->num_config); + + /* Update RDI WMs */ + for (i = 0; i < CAM_SFE_FE_RDI_NUM_MAX; i++) { + if (ctx->sfe_out_map[i] == 0xff) + continue; + + hw_mgr_res = &res_list_isp_out[ctx->sfe_out_map[i]]; + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) < + kmd_buf_info->size) { + remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + io_cfg_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base %d, ctx_idx: %u", + base_idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + res_id = hw_mgr_res->hw_res[j]->res_id; + + if (cam_isp_sfe_validate_for_scratch_buf_config( + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), ctx, false)) + continue; + + /* check if buffer provided for this RDI is from userspace */ + if (cam_ife_mgr_validate_for_io_buffers( + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), sfe_rdi_cfg_mask)) + continue; + + cpu_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes / 4 + io_cfg_used_bytes / 4; + buf_info = &ctx->scratch_buf_info.sfe_scratch_config->buf_info[ + res_id - CAM_ISP_SFE_OUT_RES_RDI_0]; + + /* Check if scratch available for this resource */ + if (!buf_info->config_done) { + CAM_ERR(CAM_ISP, + "No scratch buffer config found for res: %u on ctx: %u", + res_id, ctx->ctx_index); + return -EFAULT; + } + + CAM_DBG(CAM_ISP, "WM res_id: 0x%x idx: %u io_addr: %pK, ctx_idx: %u", + hw_mgr_res->hw_res[j]->res_id, + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), + buf_info->io_addr, ctx->ctx_index); + + rc = cam_isp_sfe_send_scratch_buf_upd( + remain_size, + CAM_ISP_HW_CMD_GET_BUF_UPDATE, + hw_mgr_res->hw_res[j], buf_info, + cpu_addr, &used_bytes); + if (rc) + return rc; + + io_cfg_used_bytes += used_bytes; + } + } + + /* Update RDI RMs */ + list_for_each_entry(hw_mgr_res, res_list_in_rd, list) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) < + kmd_buf_info->size) { + remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + io_cfg_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base %d, ctx_idx: %u", + base_idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + res_id = hw_mgr_res->hw_res[j]->res_id; + + if (cam_isp_sfe_validate_for_scratch_buf_config( + (res_id - CAM_ISP_SFE_IN_RD_0), ctx, false)) + continue; + + /* check if buffer provided for this RM is from userspace */ + if (cam_ife_mgr_validate_for_io_buffers( + (res_id - CAM_ISP_SFE_IN_RD_0), sfe_rdi_cfg_mask)) + continue; + + cpu_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes / 4 + + io_cfg_used_bytes / 4; + buf_info = &ctx->scratch_buf_info.sfe_scratch_config->buf_info[ + res_id - CAM_ISP_SFE_IN_RD_0]; + + CAM_DBG(CAM_ISP, "RM res_id: 0x%x idx: %u io_addr: %pK, ctx_idx: %u", + hw_mgr_res->hw_res[j]->res_id, + (res_id - CAM_ISP_SFE_IN_RD_0), + buf_info->io_addr, ctx->ctx_index); + + rc = cam_isp_sfe_send_scratch_buf_upd(remain_size, + CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM, + hw_mgr_res->hw_res[j], buf_info, + cpu_addr, &used_bytes); + if (rc) + return rc; + + io_cfg_used_bytes += used_bytes; + } + } + + if (io_cfg_used_bytes) + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IOCFG_BL, io_cfg_used_bytes, + kmd_buf_info, prepare, false); + + return rc; +} + +static int cam_isp_ife_add_scratch_buffer_cfg( + uint32_t base_idx, + uint32_t scratch_cfg_mask, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, j, res_id, rc = 0; + uint32_t used_bytes = 0, remain_size = 0, io_cfg_used_bytes; + uint32_t *cpu_addr = NULL; + struct cam_ife_sfe_scratch_buf_info *buf_info; + struct cam_isp_hw_mgr_res *hw_mgr_res; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d, ctx_idx: %u", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries, ctx->ctx_index); + return -EINVAL; + } + + io_cfg_used_bytes = 0; + + /* Update scratch buffer for IFE WMs */ + for (i = 0; i < ctx->scratch_buf_info.ife_scratch_config->num_config; i++) { + /* + * Configure scratch only if the bit mask is not set for the given port, + * this is determined after parsing all the IO config buffers + */ + if (cam_ife_mgr_validate_for_io_buffers(i, scratch_cfg_mask)) + continue; + + res_id = ctx->scratch_buf_info.ife_scratch_config->buf_info[i].res_id & 0xFF; + + if (ctx->vfe_out_map[res_id] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id); + return -EINVAL; + } + + hw_mgr_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id]]; + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) < + kmd_buf_info->size) { + remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + io_cfg_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base %u, ctx_idx: %u", + base_idx, ctx->ctx_index); + rc = -ENOMEM; + return rc; + } + + cpu_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes / 4 + io_cfg_used_bytes / 4; + buf_info = &ctx->scratch_buf_info.ife_scratch_config->buf_info[i]; + CAM_DBG(CAM_ISP, "WM res_id: 0x%x io_addr: %pK, ctx_idx: %u", + hw_mgr_res->hw_res[j]->res_id, buf_info->io_addr, ctx->ctx_index); + + rc = cam_isp_sfe_send_scratch_buf_upd( + remain_size, + CAM_ISP_HW_CMD_GET_BUF_UPDATE, + hw_mgr_res->hw_res[j], buf_info, + cpu_addr, &used_bytes); + if (rc) + return rc; + + io_cfg_used_bytes += used_bytes; + } + } + + if (io_cfg_used_bytes) + cam_ife_mgr_update_hw_entries_util( + CAM_ISP_IOCFG_BL, io_cfg_used_bytes, + kmd_buf_info, prepare, false); + + return rc; +} + +static int cam_ife_mgr_csid_add_reg_update(struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf) +{ + int i; + int rc = 0; + uint32_t hw_idx; + struct cam_ife_hw_mgr *hw_mgr; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_csid_hw_caps *csid_caps; + struct cam_isp_resource_node *res; + struct cam_isp_prepare_hw_update_data *prepare_hw_data = NULL; + struct cam_isp_change_base_args change_base_info = {0}; + struct cam_isp_csid_reg_update_args + rup_args[CAM_IFE_CSID_HW_NUM_MAX] = {0}; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + + hw_mgr = ctx->hw_mgr; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + hw_idx = res->hw_intf->hw_idx; + csid_caps = &hw_mgr->csid_hw_caps[hw_idx]; + + if (i == CAM_ISP_HW_SPLIT_RIGHT && + csid_caps->only_master_rup) + continue; + + rup_args[hw_idx].res[rup_args[hw_idx].num_res] = res; + rup_args[hw_idx].num_res++; + rup_args[hw_idx].mup_en = prepare_hw_data->mup_en; + rup_args[hw_idx].mup_val = prepare_hw_data->mup_val; + + CAM_DBG(CAM_ISP, + "Reg update queued for res %d hw_id %d, ctx_idx: %u", + res->res_id, res->hw_intf->hw_idx, ctx->ctx_index); + } + } + + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (!rup_args[i].num_res) + continue; + + change_base_info.base_idx = i; + change_base_info.cdm_id = ctx->cdm_id; + rc = cam_isp_add_change_base(prepare, + &ctx->res_list_ife_csid, + &change_base_info, kmd_buf); + + CAM_DBG(CAM_ISP, "Ctx:%u Change base added for num_res %d", + ctx->ctx_index, rup_args[i].num_res); + + if (rc) { + CAM_ERR(CAM_ISP, + "Change base Failed Ctx:%u hw_idx=%d, rc=%d", + ctx->ctx_index, i, rc); + break; + } + + if (hw_mgr->debug_cfg.csid_out_of_sync_simul == + CAM_IFE_CTX_TRIGGER_SINGLE_OUT_OF_SYNC_CFG) + rup_args[i].add_toggled_mup_entry = true; + + rc = cam_isp_add_csid_reg_update(prepare, kmd_buf, + &rup_args[i], true); + + if (rc) { + CAM_ERR(CAM_ISP, "Ctx:%u Reg Update failed idx:%u", + ctx->ctx_index, i); + break; + } + + CAM_DBG(CAM_ISP, "Ctx:%u Reg update added id:%d num_res %d", + ctx->ctx_index, i, rup_args[i].num_res); + } + + return rc; +} + +static int cam_ife_mgr_isp_add_reg_update(struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf) +{ + int i; + int rc = 0; + struct cam_isp_change_base_args change_base_info = {0}; + + for (i = 0; i < ctx->num_base; i++) { + + change_base_info.base_idx = ctx->base[i].idx; + change_base_info.cdm_id = ctx->cdm_id; + + /* Add change base */ + if (!ctx->flags.internal_cdm) { + rc = cam_isp_add_change_base(prepare, + &ctx->res_list_ife_src, + &change_base_info, kmd_buf); + + if (rc) { + CAM_ERR(CAM_ISP, + "Add Change base cmd Failed i=%d, idx=%d, rc=%d ctx_idx: %u", + i, ctx->base[i].idx, rc, ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, + "Add Change base cmd i=%d, idx=%d, rc=%d ctx_idx: %u", + i, ctx->base[i].idx, rc, ctx->ctx_index); + } + + rc = cam_isp_add_reg_update(prepare, + &ctx->res_list_ife_src, + ctx->base[i].idx, kmd_buf, + !ctx->flags.internal_cdm); + + if (rc) { + CAM_ERR(CAM_ISP, + "Add Reg Update cmd Failed i=%d, idx=%d, rc=%d ctx_idx: %u", + i, ctx->base[i].idx, rc, ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, + "Add Reg Update cmd i=%d, idx=%d, rc=%d ctx_idx: %u", + i, ctx->base[i].idx, rc, ctx->ctx_index); + } + return rc; +} + +static int cam_ife_hw_mgr_add_csid_go_cmd( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int rc = -EINVAL, i; + bool is_found = false; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_change_base_args change_base_info = {0}; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if (i == CAM_ISP_HW_SPLIT_RIGHT) { + CAM_ERR(CAM_ISP, + "Offline with right rsrc [%u] not supported ctx: %u", + hw_mgr_res->hw_res[i]->res_id, ctx->ctx_index); + return -EINVAL; + } + + is_found = true; + goto add_cmds; + } + } + +add_cmds: + + if (is_found) { + change_base_info.base_idx = hw_mgr_res->hw_res[i]->hw_intf->hw_idx; + change_base_info.cdm_id = ctx->cdm_id; + + rc = cam_isp_add_change_base(prepare, + &ctx->res_list_ife_csid, + &change_base_info, kmd_buf_info); + if (rc) + return rc; + + rc = cam_isp_add_csid_offline_cmd(prepare, + hw_mgr_res->hw_res[i], kmd_buf_info, true); + if (rc) + return rc; + + rc = 0; + } + + return rc; +} + +static int cam_ife_hw_mgr_add_vfe_go_cmd( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int rc = -EINVAL, i; + bool is_found = false; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_change_base_args change_base_info = {0}; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_in_rd, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + if (i == CAM_ISP_HW_SPLIT_RIGHT) { + CAM_ERR(CAM_ISP, + "Offline with right rsrc [%u] not supported ctx: %u", + hw_mgr_res->hw_res[i]->res_id, ctx->ctx_index); + return -EINVAL; + } + + is_found = true; + goto add_cmds; + } + } + +add_cmds: + + if (is_found) { + change_base_info.base_idx = hw_mgr_res->hw_res[i]->hw_intf->hw_idx; + change_base_info.cdm_id = ctx->cdm_id; + + rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src, + &change_base_info, kmd_buf_info); + if (rc) + return rc; + + rc = cam_isp_add_go_cmd(prepare, hw_mgr_res->hw_res[i], kmd_buf_info, true); + if (rc) + return rc; + + rc = 0; + } + + return rc; +} + +static int cam_ife_hw_mgr_add_fcg_update( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_fcg_config_internal *fcg_args_internal, + bool *fcg_online, + uint32_t *fcg_entry_idx, + struct list_head *res_list_isp_src) +{ + uint32_t fcg_kmd_size, num_ent, i; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + struct cam_isp_hw_fcg_cmd fcg_cmd = {0}; + int rc; + + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + + fcg_cmd.res = res; + fcg_cmd.cmd_type = CAM_ISP_HW_CMD_FCG_CONFIG; + fcg_cmd.get_size_flag = true; + fcg_cmd.u.fcg_get_size.num_types = fcg_args_internal->num_types; + fcg_cmd.u.fcg_get_size.num_ctxs = fcg_args_internal->num_ch_ctx; + fcg_cmd.u.fcg_get_size.kmd_size = 0; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_FCG_CONFIG, &fcg_cmd, + (uint32_t)sizeof(struct cam_isp_hw_fcg_cmd)); + if (rc || (fcg_cmd.u.fcg_get_size.kmd_size == 0)) { + CAM_ERR(CAM_ISP, + "Failed in retrieving KMD buf size requirement, rc: %d", + rc); + return rc; + } + } + break; + } + + if (!fcg_cmd.u.fcg_get_size.fcg_supported) { + *fcg_online = false; + CAM_WARN(CAM_ISP, "FCG is sent from userspace but not supported by the hardware"); + return 0; + } + + fcg_kmd_size = fcg_cmd.u.fcg_get_size.kmd_size * sizeof(uint32_t); + CAM_DBG(CAM_ISP, "KMD buf usage for FCG config is %u", fcg_kmd_size); + + num_ent = prepare->num_hw_update_entries; + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries: %u, %u", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + if (fcg_kmd_size + kmd_buf_info->used_bytes > + kmd_buf_info->size) { + CAM_ERR(CAM_ISP, "Insufficient space in kmd buffer, used_bytes: %u, buf size: %u", + kmd_buf_info->used_bytes, kmd_buf_info->size); + return -ENOMEM; + } + + *fcg_entry_idx = num_ent; + cam_ife_mgr_update_hw_entries_util(CAM_ISP_IQ_BL, fcg_kmd_size, + kmd_buf_info, prepare, false); + + CAM_DBG(CAM_ISP, "FCG dummy entry, num_ent: %u, entry_size: %u", + num_ent, fcg_kmd_size); + + return 0; +} + +static int cam_ife_hw_mgr_update_cmd_buffer( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf, + struct cam_isp_cmd_buf_count *cmd_buf_count, + uint32_t base_idx) +{ + struct list_head *res_list = NULL; + struct cam_isp_change_base_args change_base_info = {0}; + int rc = 0; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_fcg_config_info *fcg_info; + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)prepare->priv; + + if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_SFE) { + res_list = &ctx->res_list_sfe_src; + } else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_VFE) { + res_list = &ctx->res_list_ife_src; + } else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_CSID) { + if (!cmd_buf_count->csid_cnt && !prepare_hw_data->irq_comp_cfg_valid) + return rc; + res_list = &ctx->res_list_ife_csid; + } else { + CAM_ERR(CAM_ISP, + "Invalid hw_type=%d, ctx_idx: %u", + ctx->base[base_idx].hw_type, ctx->ctx_index); + return -EINVAL; + } + + if (!ctx->flags.internal_cdm) { + change_base_info.base_idx = ctx->base[base_idx].idx; + change_base_info.cdm_id = ctx->cdm_id; + rc = cam_isp_add_change_base(prepare, + res_list, + &change_base_info, kmd_buf); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed change base, i=%d, split_id=%d, hw_type=%d ctx_idx: %u", + base_idx, ctx->base[base_idx].split_id, + ctx->base[base_idx].hw_type, ctx->ctx_index); + return rc; + } + + CAM_DBG(CAM_ISP, + "changing the base hw_type: %u core_id: %u CDM ID: %d ctx_idx: %u", + ctx->base[base_idx].hw_type, ctx->base[base_idx].idx, + ctx->cdm_id, ctx->ctx_index); + } + + CAM_DBG(CAM_ISP, + "Add cmdbuf, i=%d, split_id=%d, hw_type=%d ctx_idx: %u", + base_idx, ctx->base[base_idx].split_id, + ctx->base[base_idx].hw_type, ctx->ctx_index); + + fcg_info = &(prepare_hw_data->fcg_info); + + if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_SFE) { + rc = cam_sfe_add_command_buffers( + prepare, kmd_buf, &ctx->base[base_idx], + cam_sfe_packet_generic_blob_handler, + ctx->res_list_sfe_out, + ctx->sfe_out_map, + CAM_ISP_SFE_OUT_RES_BASE, + (CAM_ISP_SFE_OUT_RES_BASE + + max_sfe_out_res)); + if (rc) + goto add_cmd_err; + + /* No need to handle FCG entry if no valid fcg config from userspace */ + if (!fcg_info->sfe_fcg_online) + goto end; + + rc = cam_ife_hw_mgr_add_fcg_update( + prepare, kmd_buf, + &fcg_info->sfe_fcg_config, + &fcg_info->sfe_fcg_online, + &fcg_info->sfe_fcg_entry_idx, + res_list); + if (rc) + goto add_cmd_err; + } else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_VFE) { + rc = cam_isp_add_command_buffers( + prepare, kmd_buf, &ctx->base[base_idx], + cam_isp_packet_generic_blob_handler, + ctx->res_list_ife_out, + ctx->vfe_out_map, + CAM_ISP_IFE_OUT_RES_BASE, + (CAM_ISP_IFE_OUT_RES_BASE + + max_ife_out_res)); + if (rc) + goto add_cmd_err; + + /* No need to handle FCG entry if no valid fcg config from userspace */ + if (!fcg_info->ife_fcg_online) + goto end; + + rc = cam_ife_hw_mgr_add_fcg_update( + prepare, kmd_buf, + &fcg_info->ife_fcg_config, + &fcg_info->ife_fcg_online, + &fcg_info->ife_fcg_entry_idx, + res_list); + if (rc) + goto add_cmd_err; + } else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_CSID) { + rc = cam_isp_add_csid_command_buffers(prepare, + kmd_buf, cam_csid_packet_generic_blob_handler, + &ctx->base[base_idx]); + if (rc) + goto add_cmd_err; + } + + return rc; + +add_cmd_err: + CAM_ERR(CAM_ISP, + "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d hw_type=%d ctx_idx: %u", + base_idx, ctx->base[base_idx].split_id, rc, + ctx->base[base_idx].hw_type, ctx->ctx_index); +end: + return rc; +} + +static void cam_ife_hw_mgr_check_if_scratch_is_needed( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_check_io_cfg_for_scratch *check_for_scratch) +{ + /* Validate for scratch buffer use-cases sHDR/FS */ + if (!((ctx->flags.is_sfe_fs) || (ctx->flags.is_sfe_shdr))) + return; + + /* For SFE use number of fetches = number of scratch buffers needed */ + check_for_scratch->sfe_scratch_res_info.num_active_fe_rdis = + ctx->scratch_buf_info.num_fetches; + check_for_scratch->validate_for_sfe = true; + + /* Check if IFE has any scratch buffer */ + if (ctx->scratch_buf_info.ife_scratch_config->num_config) { + int i; + + check_for_scratch->validate_for_ife = true; + for (i = 0; i < ctx->scratch_buf_info.ife_scratch_config->num_config; i++) { + check_for_scratch->ife_scratch_res_info.ife_scratch_resources[i] = + ctx->scratch_buf_info.ife_scratch_config->buf_info[i].res_id; + check_for_scratch->ife_scratch_res_info.num_ports++; + } + } +} + +static int cam_ife_hw_mgr_sfe_scratch_buf_update( + int32_t opcode_type, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf, + struct cam_hw_prepare_update_args *prepare, + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info) +{ + int rc = 0; + + /* If there are no output provided buffers */ + if ((sfe_res_info->sfe_rdi_cfg_mask) != + ((1 << ctx->scratch_buf_info.num_fetches) - 1)) { + /* For update packets add scratch buffer */ + if (opcode_type == CAM_ISP_PACKET_UPDATE_DEV) { + CAM_DBG(CAM_ISP, + "Adding SFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x ctx_idx: %u", + ((1 << ctx->scratch_buf_info.num_fetches) - 1), + sfe_res_info->sfe_rdi_cfg_mask, ctx->ctx_index); + rc = cam_isp_sfe_add_scratch_buffer_cfg( + ctx->base[base_idx].idx, sfe_res_info->sfe_rdi_cfg_mask, + prepare, kmd_buf, ctx->res_list_sfe_out, + &ctx->res_list_ife_in_rd, ctx); + if (rc) + goto end; + } else if (opcode_type == CAM_ISP_PACKET_INIT_DEV) { + /* For INIT packets update mask which is applied at streamon */ + ctx->scratch_buf_info.sfe_scratch_config->streamon_buf_mask = + sfe_res_info->sfe_rdi_cfg_mask; + } + } else { + /* If buffers are provided for ePCR skip scratch buffer at stream on */ + if (opcode_type == CAM_ISP_PACKET_INIT_DEV) + ctx->scratch_buf_info.sfe_scratch_config->skip_scratch_cfg_streamon = true; + } + +end: + return rc; +} + +static int cam_ife_hw_mgr_ife_scratch_buf_update( + int32_t opcode_type, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf, + struct cam_hw_prepare_update_args *prepare, + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_ife_scratch_buf_res_info *ife_res_info) +{ + int rc = 0; + + if ((ife_res_info->ife_scratch_cfg_mask) != + ((1 << ife_res_info->num_ports) - 1)) { + if (opcode_type == CAM_ISP_PACKET_UPDATE_DEV) { + CAM_DBG(CAM_ISP, + "Adding IFE scratch buffer cfg_mask expected: 0x%x actual: 0x%x ctx_idx: %u", + ((1 << ife_res_info->num_ports) - 1), + ife_res_info->ife_scratch_cfg_mask, ctx->ctx_index); + rc = cam_isp_ife_add_scratch_buffer_cfg( + ctx->base[base_idx].idx, + ife_res_info->ife_scratch_cfg_mask, prepare, + kmd_buf, ctx); + if (rc) + goto end; + } else if (opcode_type == CAM_ISP_PACKET_INIT_DEV) { + ctx->scratch_buf_info.ife_scratch_config->streamon_buf_mask = + ife_res_info->ife_scratch_cfg_mask; + } + } else { + if (opcode_type == CAM_ISP_PACKET_INIT_DEV) + ctx->scratch_buf_info.ife_scratch_config->skip_scratch_cfg_streamon = true; + } + +end: + return rc; +} + +static void cam_ife_mgr_check_for_per_request_reg_dump( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_prepare_hw_update_data *prepare_hw_data) +{ + int i; + + if ((!prepare->num_reg_dump_buf) || (prepare->num_reg_dump_buf > + CAM_REG_DUMP_MAX_BUF_ENTRIES)) { + CAM_DBG(CAM_ISP, "Invalid num of reg dump desc: %u for req: %llu", + prepare->num_reg_dump_buf, prepare->packet->header.request_id); + return; + } + + for (i = 0; i < prepare->num_reg_dump_buf; i++) { + if (prepare->reg_dump_buf_desc[i].meta_data != + CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST) + continue; + + memcpy(&prepare_hw_data->reg_dump_buf_desc[ + prepare_hw_data->num_reg_dump_buf], + &prepare->reg_dump_buf_desc[i], + sizeof(struct cam_cmd_buf_desc)); + prepare_hw_data->num_reg_dump_buf++; + CAM_DBG(CAM_ISP, "Updated per request reg dump for req: %llu", + prepare->packet->header.request_id); + return; + } +} + +static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv, + void *prepare_hw_update_args) +{ + int rc = 0; + struct cam_hw_prepare_update_args *prepare = + (struct cam_hw_prepare_update_args *) prepare_hw_update_args; + + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_ife_hw_mgr *hw_mgr; + uint32_t i; + bool fill_ife_fence = true; + bool fill_sfe_fence = true; + bool frame_header_enable = false; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_frame_header_info frame_header_info; + struct list_head *res_list_ife_rd_tmp = NULL; + struct cam_isp_cmd_buf_count cmd_buf_count = {0}; + struct cam_isp_check_io_cfg_for_scratch check_for_scratch = {0}; + struct cam_isp_io_buf_info io_buf_info = {0}; + + if (!hw_mgr_priv || !prepare_hw_update_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)prepare->priv; + + ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map; + hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv; + + CAM_DBG(CAM_REQ, "ctx[%pK][%u] Enter for req_id %lld", + ctx, ctx->ctx_index, prepare->packet->header.request_id); + + rc = cam_packet_util_validate_packet(prepare->packet, prepare->remain_len); + if (rc) + return rc; + + /* Pre parse the packet*/ + rc = cam_packet_util_get_kmd_buffer(prepare->packet, &prepare_hw_data->kmd_cmd_buff_info); + if (rc) + return rc; + + if (ctx->ctx_config & CAM_IFE_CTX_CFG_FRAME_HEADER_TS) { + rc = cam_ife_mgr_util_insert_frame_header(&prepare_hw_data->kmd_cmd_buff_info, + prepare_hw_data, prepare->buf_tracker); + if (rc) + return rc; + + frame_header_enable = true; + prepare_hw_data->frame_header_res_id = 0x0; + } + + if (ctx->flags.internal_cdm) + rc = cam_packet_util_process_patches(prepare->packet, + prepare->buf_tracker, hw_mgr->mgr_common.img_iommu_hdl, + hw_mgr->mgr_common.img_iommu_hdl_secure, true); + else + rc = cam_packet_util_process_patches(prepare->packet, + prepare->buf_tracker, hw_mgr->mgr_common.cmd_iommu_hdl, + hw_mgr->mgr_common.cmd_iommu_hdl_secure, true); + + if (rc) { + CAM_ERR(CAM_ISP, "Patch ISP packet failed. ctx_idx: %u", ctx->ctx_index); + return rc; + } + + prepare->num_hw_update_entries = 0; + prepare->num_in_map_entries = 0; + prepare->num_out_map_entries = 0; + prepare->num_reg_dump_buf = 0; + + /* Assign IFE RD for non SFE targets */ + if (ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) + res_list_ife_rd_tmp = &ctx->res_list_ife_in_rd; + + rc = cam_isp_get_cmd_buf_count(prepare, &cmd_buf_count); + + if (rc) { + CAM_ERR(CAM_ISP, "Invalid cmd buffers, ctx_idx: %u", ctx->ctx_index); + return rc; + } + + if (((prepare->packet->header.op_code + 1) & 0xF) == + CAM_ISP_PACKET_INIT_DEV) + prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV; + else + prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV; + + cam_ife_hw_mgr_check_if_scratch_is_needed(ctx, &check_for_scratch); + + for (i = 0; i < ctx->num_base; i++) { + + memset(&frame_header_info, 0, + sizeof(struct cam_isp_frame_header_info)); + if (frame_header_enable) { + frame_header_info.frame_header_enable = true; + frame_header_info.frame_header_iova_addr = + prepare_hw_data->frame_header_iova; + } + + rc = cam_ife_hw_mgr_update_cmd_buffer(ctx, prepare, + &prepare_hw_data->kmd_cmd_buff_info, &cmd_buf_count, i); + + if (rc) { + CAM_ERR(CAM_ISP, + "Add cmd buffer failed base_idx: %d hw_type %d ctx_idx: %u", + i, ctx->base[i].hw_type, ctx->ctx_index); + goto end; + } + + /* get IO buffers */ + io_buf_info.frame_hdr = &frame_header_info; + io_buf_info.scratch_check_cfg = &check_for_scratch; + io_buf_info.prepare = prepare; + io_buf_info.kmd_buf_info = &prepare_hw_data->kmd_cmd_buff_info; + io_buf_info.iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl; + io_buf_info.sec_iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl_secure; + io_buf_info.base = &ctx->base[i]; + io_buf_info.hw_intf = cam_ife_hw_mgr_get_hw_intf(&ctx->base[i]); + io_buf_info.major_version = ctx->major_version; + + if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) { + io_buf_info.fill_fence = fill_ife_fence; + io_buf_info.out_base = CAM_ISP_IFE_OUT_RES_BASE; + io_buf_info.out_max = (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res); + io_buf_info.res_list_isp_out = ctx->res_list_ife_out; + io_buf_info.out_map = ctx->vfe_out_map; + io_buf_info.res_list_in_rd = res_list_ife_rd_tmp; + rc = cam_isp_add_io_buffers(&io_buf_info); + } else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) { + io_buf_info.fill_fence = fill_sfe_fence; + io_buf_info.out_base = CAM_ISP_SFE_OUT_RES_BASE; + io_buf_info.out_max = (CAM_ISP_SFE_OUT_RES_BASE + max_sfe_out_res); + io_buf_info.res_list_in_rd = &ctx->res_list_ife_in_rd; + io_buf_info.res_list_isp_out = ctx->res_list_sfe_out; + io_buf_info.out_map = ctx->sfe_out_map; + rc = cam_isp_add_io_buffers(&io_buf_info); + } + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in io buffers, i=%d, rc=%d hw_type=%s ctx_idx: %u", + i, rc, + (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE ? "SFE" : "IFE"), + ctx->ctx_index); + goto end; + } + + /* + * Add scratch buffer if there no output buffer for SFE/IFE clients + * only for UPDATE packets. For INIT we could have ePCR enabled + * based on that decide to configure scratch via AHB at + * stream on or not. It's possible that in ePCR one HW could + * have buffers and the other might not. Handle different + * combinations for different HWs + */ + if ((check_for_scratch.validate_for_sfe) && + (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && (fill_sfe_fence)) { + struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info = + &check_for_scratch.sfe_scratch_res_info; + + rc = cam_ife_hw_mgr_sfe_scratch_buf_update( + prepare_hw_data->packet_opcode_type, + i, &prepare_hw_data->kmd_cmd_buff_info, prepare, ctx, sfe_res_info); + if (rc) + goto end; + } + + if ((check_for_scratch.validate_for_ife) && + (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && (fill_ife_fence)) { + struct cam_isp_ife_scratch_buf_res_info *ife_res_info = + &check_for_scratch.ife_scratch_res_info; + + rc = cam_ife_hw_mgr_ife_scratch_buf_update( + prepare_hw_data->packet_opcode_type, + i, &prepare_hw_data->kmd_cmd_buff_info, prepare, ctx, ife_res_info); + if (rc) + goto end; + } + + /* fence map table entries need to fill only once in the loop */ + if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) && fill_sfe_fence) + fill_sfe_fence = false; + else if ((ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) && fill_ife_fence) + fill_ife_fence = false; + + if (frame_header_info.frame_header_res_id && + frame_header_enable) { + frame_header_enable = false; + prepare_hw_data->frame_header_res_id = + frame_header_info.frame_header_res_id; + + CAM_DBG(CAM_ISP, + "Frame header enabled for res_id 0x%x cpu_addr %pK ctx_idx: %u", + prepare_hw_data->frame_header_res_id, + prepare_hw_data->frame_header_cpu_addr, ctx->ctx_index); + } + } + + /* Check if frame header was enabled for any WM */ + if ((ctx->ctx_config & CAM_IFE_CTX_CFG_FRAME_HEADER_TS) && + (prepare->num_out_map_entries) && + (!prepare_hw_data->frame_header_res_id)) { + CAM_ERR(CAM_ISP, "Failed to configure frame header, ctx_idx: %u", ctx->ctx_index); + goto end; + } + + /* + * reg update will be done later for the initial configure. + * need to plus one to the op_code and only take the lower + * bits to get the type of operation since UMD definition + * of op_code has some difference from KMD. + */ + if (prepare_hw_data->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) { + if ((!prepare->num_reg_dump_buf) || (prepare->num_reg_dump_buf > + CAM_REG_DUMP_MAX_BUF_ENTRIES)) + goto end; + + /* Copy reg dump buf desc for error/flush cases */ + if (!ctx->num_reg_dump_buf) { + ctx->num_reg_dump_buf = prepare->num_reg_dump_buf; + + memcpy(ctx->reg_dump_buf_desc, + prepare->reg_dump_buf_desc, + sizeof(struct cam_cmd_buf_desc) * + prepare->num_reg_dump_buf); + /* + * save the address for error/flush cases to avoid + * invoking mutex(cpu get/put buf) in tasklet/atomic context. + */ + for (i = 0; i < ctx->num_reg_dump_buf; i++) { + rc = cam_mem_get_cpu_buf(ctx->reg_dump_buf_desc[i].mem_handle, + &(ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr), + &(ctx->reg_dump_cmd_buf_addr_len[i].buf_size)); + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in get_cpu_addr i=%d rc=%d, cpu_addr=%pK", + i, rc, + (void *)ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr); + for (--i; i >= 0; i--) + cam_mem_put_cpu_buf( + ctx->reg_dump_buf_desc[i].mem_handle); + ctx->flags.skip_reg_dump_buf_put = true; + return rc; + } + } + } + + /* Per req reg dump */ + cam_ife_mgr_check_for_per_request_reg_dump(prepare, prepare_hw_data); + goto end; + } else { + cam_ife_mgr_check_for_per_request_reg_dump(prepare, prepare_hw_data); + } + + /* add reg update commands */ + if (hw_mgr->csid_aup_rup_en) + rc = cam_ife_mgr_csid_add_reg_update(ctx, + prepare, &prepare_hw_data->kmd_cmd_buff_info); + + else + rc = cam_ife_mgr_isp_add_reg_update(ctx, + prepare, &prepare_hw_data->kmd_cmd_buff_info); + + if (rc) { + CAM_ERR(CAM_ISP, "Add RUP fail csid_aup_rup_en %d, ctx_idx: %u", + hw_mgr->csid_aup_rup_en, ctx->ctx_index); + goto end; + } + + /* add go_cmd for offline context */ + if (prepare->num_out_map_entries && + prepare->num_in_map_entries && + ctx->flags.is_offline) { + if (ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) + rc = cam_ife_hw_mgr_add_vfe_go_cmd(ctx, prepare, + &prepare_hw_data->kmd_cmd_buff_info); + else + rc = cam_ife_hw_mgr_add_csid_go_cmd(ctx, prepare, + &prepare_hw_data->kmd_cmd_buff_info); + if (rc) + CAM_ERR(CAM_ISP, + "Add %s GO_CMD failed in ctx: %u rc: %d", + (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE ? + "CSID" : "IFE RD"), ctx->ctx_index, rc); + } + + if (prepare_hw_data->kmd_cmd_buff_info.size <= + prepare_hw_data->kmd_cmd_buff_info.used_bytes) { + CAM_ERR(CAM_ISP, "No Sufficient memory for the Gen IRQ command, ctx_idx: %u", + ctx->ctx_index); + rc = -ENOMEM; + } + +end: + return rc; +} + +static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx) +{ + return cam_ife_mgr_bw_control(ctx, CAM_ISP_BW_CONTROL_INCLUDE); +} + +static int cam_ife_mgr_sof_irq_debug( + struct cam_ife_hw_mgr_ctx *ctx, + uint32_t sof_irq_enable) +{ + int rc = 0; + uint32_t i = 0, hw_idx; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_ife_hw_mgr *hw_mgr = ctx->hw_mgr; + + /* Per CSID enablement will enable for all paths */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + hw_idx = ctx->base[i].idx; + if (hw_mgr->csid_devices[hw_idx]) { + rc |= hw_mgr->csid_devices[hw_idx]->hw_ops.process_cmd( + hw_mgr->csid_devices[hw_idx]->hw_priv, + CAM_IFE_CSID_SOF_IRQ_DEBUG, + &sof_irq_enable, sizeof(sof_irq_enable)); + if (rc) + CAM_DBG(CAM_ISP, + "Failed to set CSID_%u sof irq debug cfg rc: %d", + hw_idx, rc); + } + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rsrc_node = hw_mgr_res->hw_res[i]; + if ((rsrc_node->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + (rsrc_node->res_id == CAM_ISP_HW_VFE_IN_RDI0)) { + if (rsrc_node->process_cmd) { + rc |= rsrc_node->process_cmd( + hw_mgr_res->hw_res[i], + CAM_ISP_HW_CMD_SOF_IRQ_DEBUG, + &sof_irq_enable, + sizeof(sof_irq_enable)); + } else if (rsrc_node->hw_intf->hw_ops.process_cmd) { + struct cam_vfe_enable_sof_irq_args sof_irq_args; + + sof_irq_args.res = rsrc_node; + sof_irq_args.enable_sof_irq_debug = true; + rc |= rsrc_node->hw_intf->hw_ops.process_cmd( + rsrc_node->hw_intf->hw_priv, + CAM_ISP_HW_CMD_SOF_IRQ_DEBUG, + &sof_irq_args, sizeof(sof_irq_args)); + } + if (rc) + CAM_DBG(CAM_ISP, + "Failed to set VFE:%u sof irq debug, rc: %d", + rsrc_node->hw_intf->hw_idx, rc); + } + } + } + + return rc; +} + +static inline void cam_ife_hw_mgr_stop_bus_rd_for_res(struct cam_ife_hw_mgr_ctx *ctx, + uint32_t res_id) +{ + struct cam_isp_hw_mgr_res *isp_hw_res; + + list_for_each_entry(isp_hw_res, &ctx->res_list_ife_in_rd, list) { + if (isp_hw_res->res_id == res_id) { + CAM_ERR(CAM_ISP, "Stopping IFE/SFE bus rd res id 0x%x, ctx_idx: %u", + res_id, ctx->ctx_index); + cam_ife_hw_mgr_stop_hw_res(isp_hw_res); + break; + } + } +} + +static void cam_ife_hw_mgr_stop_pf_hw_res(struct cam_ife_hw_mgr_ctx *ctx, + uint32_t res_id, uint32_t hw_type) +{ + struct cam_isp_hw_mgr_res *isp_hw_res; + + if (hw_type == CAM_ISP_HW_TYPE_VFE) { + if (cam_ife_hw_mgr_is_ife_out_port(res_id)) { + if (ctx->vfe_out_map[res_id & 0xFF] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id & 0xFF); + return; + } + + isp_hw_res = &ctx->res_list_ife_out[ctx->vfe_out_map[res_id & 0xFF]]; + CAM_ERR(CAM_ISP, "Stopping IFE out resource res id 0x%x ctx_idx: %u", + res_id, ctx->ctx_index); + cam_ife_hw_mgr_stop_hw_res(isp_hw_res); + } else + cam_ife_hw_mgr_stop_bus_rd_for_res(ctx, res_id); + } else if (hw_type == CAM_ISP_HW_TYPE_SFE) { + if (cam_ife_hw_mgr_is_sfe_out_port(res_id)) { + if (ctx->sfe_out_map[res_id & 0xFF] == 0xFF) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id & 0xFF); + return; + } + + isp_hw_res = &ctx->res_list_sfe_out[ctx->sfe_out_map[res_id & 0xFF]]; + CAM_ERR(CAM_ISP, "Stopping SFE out resource res id 0x%x ctx_idx: %u", + res_id, ctx->ctx_index); + cam_ife_hw_mgr_stop_hw_res(isp_hw_res); + } else + cam_ife_hw_mgr_stop_bus_rd_for_res(ctx, res_id); + } +} + +static int cam_ife_hw_mgr_dump_bus_info( + uint32_t res_id, + struct cam_isp_hw_intf_data *hw_intf_data) +{ + struct cam_isp_hw_event_info event_info; + + event_info.res_id = res_id; + + return hw_intf_data->hw_intf->hw_ops.process_cmd( + hw_intf_data->hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, + &event_info, sizeof(struct cam_isp_hw_event_info)); +} + +static void cam_ife_mgr_pf_dump(struct cam_ife_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + int i, rc = 0; + + /* dump the registers */ + rc = cam_ife_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR, NULL, false); + if (rc) + CAM_ERR(CAM_ISP, + "Reg dump on pf failed req id: %llu rc: %d ctx_idx: %u", + ctx->applied_req_id, rc, ctx->ctx_index); + + + /* dump the acquire data */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_IFE_CSID_LOG_ACQUIRE_DATA, + hw_mgr_res->hw_res[i], + sizeof(void *)); + if (rc) + CAM_ERR(CAM_ISP, + "csid acquire data dump failed, ctx_idx: %u", + ctx->ctx_index); + } else + CAM_ERR(CAM_ISP, "NULL hw_intf! ctx_idx: %u", ctx->ctx_index); + } + } +} + +static void cam_ife_mgr_pf_dump_mid_info( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_hw_cmd_args *hw_cmd_args, + struct cam_isp_hw_intf_data *hw_intf_data) +{ + struct cam_packet *packet; + struct cam_isp_hw_get_cmd_update cmd_update; + struct cam_isp_hw_get_res_for_mid get_res; + int rc = 0; + struct cam_ctx_request *req_pf; + + req_pf = (struct cam_ctx_request *) + hw_cmd_args->u.pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + get_res.mid = hw_cmd_args->u.pf_cmd_args->pf_args->pf_smmu_info->mid; + get_res.pid = hw_cmd_args->u.pf_cmd_args->pf_args->pf_smmu_info->pid; + cmd_update.cmd_type = CAM_ISP_HW_CMD_GET_RES_FOR_MID; + cmd_update.data = (void *) &get_res; + + /* get resource id for given mid */ + rc = hw_intf_data->hw_intf->hw_ops.process_cmd(hw_intf_data->hw_intf->hw_priv, + cmd_update.cmd_type, &cmd_update, sizeof(struct cam_isp_hw_get_cmd_update)); + + if (rc) { + CAM_ERR(CAM_ISP, + "getting mid port resource id failed ctx id:%u req id:%lld", + ctx->ctx_index, packet->header.request_id); + return; + } + + hw_cmd_args->u.pf_cmd_args->pf_args->pf_context_info.resource_type = get_res.out_res_id; + ctx->flags.pf_mid_found = true; + ctx->pf_info.mid = get_res.mid; + ctx->pf_info.out_port_id = get_res.out_res_id; + CAM_ERR(CAM_ISP, + "Page fault on resource id:(0x%x) ctx id:%u req id:%lld", + get_res.out_res_id, ctx->ctx_index, packet->header.request_id); +} + +static void cam_ife_mgr_dump_pf_data( + struct cam_ife_hw_mgr *hw_mgr, + struct cam_hw_cmd_args *hw_cmd_args) +{ + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_packet *packet; + struct cam_isp_hw_intf_data *hw_intf_data; + struct cam_hw_dump_pf_args *pf_args; + bool *ctx_found; + int i, j; + struct cam_ctx_request *req_pf; + + ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map; + req_pf = (struct cam_ctx_request *) + hw_cmd_args->u.pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + pf_args = hw_cmd_args->u.pf_cmd_args->pf_args; + ctx_found = &(pf_args->pf_context_info.ctx_found); + + if ((*ctx_found) && (ctx->flags.pf_mid_found)) + goto outportlog; + + /* Determine if the current context is the faulted context based on pid */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE) + hw_intf_data = g_ife_hw_mgr.ife_devices[ctx->base[i].idx]; + else if (ctx->base[i].hw_type == CAM_ISP_HW_TYPE_SFE) + hw_intf_data = g_ife_hw_mgr.sfe_devices[ctx->base[i].idx]; + else + continue; + + /* + * Few old targets do not have support for PID, for such targets, + * we need to print mid for all the bases, SFE enabled targets + * are expected to have PID support. + */ + if (!g_ife_hw_mgr.hw_pid_support) { + if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) + cam_ife_mgr_pf_dump_mid_info(ctx, hw_cmd_args, hw_intf_data); + continue; + } + + for (j = 0; j < hw_intf_data->num_hw_pid; j++) { + if (hw_intf_data->hw_pid[j] == pf_args->pf_smmu_info->pid) { + *ctx_found = true; + CAM_ERR(CAM_ISP, "PF found for %s%d pid: %u ctx_idx: %u", + ctx->base[i].hw_type == CAM_ISP_HW_TYPE_VFE ? "VFE" : "SFE", + ctx->base[i].idx, pf_args->pf_smmu_info->pid, + ctx->ctx_index); + cam_ife_mgr_pf_dump_mid_info(ctx, hw_cmd_args, hw_intf_data); + + /* If MID found - stop hw res and dump client info */ + if (ctx->flags.pf_mid_found) { + cam_ife_hw_mgr_stop_pf_hw_res(ctx, ctx->pf_info.out_port_id, + ctx->base[i].hw_type); + cam_ife_hw_mgr_dump_bus_info(ctx->pf_info.out_port_id, + hw_intf_data); + } + break; + } + } + + if (*ctx_found) + break; + } + + if (g_ife_hw_mgr.hw_pid_support && (i == ctx->num_base || !*ctx_found)) + CAM_INFO(CAM_ISP, + "This context does not cause pf:pid:%d ctx_id:%u", + pf_args->pf_smmu_info->pid, ctx->ctx_index); + + cam_ife_mgr_pf_dump(ctx); + +outportlog: + cam_packet_util_dump_io_bufs(packet, hw_mgr->mgr_common.img_iommu_hdl, + hw_mgr->mgr_common.img_iommu_hdl_secure, pf_args, true); +} + +int cam_isp_config_csid_rup_aup( + struct cam_ife_hw_mgr_ctx *ctx) +{ + int rc = 0, i, j, hw_idx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct list_head *res_list; + struct cam_isp_resource_node *res; + struct cam_isp_csid_reg_update_args + rup_args[CAM_IFE_CSID_HW_NUM_MAX] = {0}; + + res_list = &ctx->res_list_ife_csid; + for (j = 0; j < ctx->num_base; j++) { + if (ctx->base[j].hw_type != CAM_ISP_HW_TYPE_CSID) + continue; + + list_for_each_entry(hw_mgr_res, res_list, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + if (res->hw_intf->hw_idx != ctx->base[j].idx) + continue; + + hw_idx = res->hw_intf->hw_idx; + rup_args[hw_idx].res[rup_args[hw_idx].num_res] = res; + rup_args[hw_idx].num_res++; + + CAM_DBG(CAM_ISP, + "Reg update for res %d hw_id %d cdm_idx %d ctx_idx: %u", + res->res_id, res->hw_intf->hw_idx, ctx->base[j].idx, + ctx->ctx_index); + } + } + } + + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (!rup_args[i].num_res) + continue; + + rup_args[i].cmd.cmd_buf_addr = NULL; + rup_args[i].cmd.size = 0; + rup_args[i].reg_write = true; + rup_args[i].last_applied_mup = ctx->current_mup; + res = rup_args[i].res[0]; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_REG_UPDATE, &rup_args[i], + sizeof(struct cam_isp_csid_reg_update_args)); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, + "Reg update for CSID: %u mup: %u ctx_idx: %u", + res->hw_intf->hw_idx, ctx->current_mup, ctx->ctx_index); + } + + return rc; +} + +static int cam_ife_mgr_configure_scratch_for_ife( + bool is_streamon, + struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, j, rc = 0; + uint32_t res_id; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_sfe_scratch_buf_info *port_info; + struct cam_ife_scratch_buf_cfg *ife_buf_info; + struct cam_isp_hw_mgr_res *res_list_ife_out = NULL; + + ife_buf_info = ctx->scratch_buf_info.ife_scratch_config; + res_list_ife_out = ctx->res_list_ife_out; + + if (ctx->scratch_buf_info.ife_scratch_config->skip_scratch_cfg_streamon) + goto end; + + for (i = 0; i < ife_buf_info->num_config; i++) { + res_id = ife_buf_info->buf_info[i].res_id & 0xFF; + port_info = &ife_buf_info->buf_info[i]; + + if (ctx->vfe_out_map[res_id] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", res_id); + return -EINVAL; + } + + hw_mgr_res = &res_list_ife_out[ctx->vfe_out_map[res_id]]; + + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + /* j = 1 is not valid for this use-case */ + if (!hw_mgr_res->hw_res[j]) + continue; + + if ((is_streamon) && + cam_ife_mgr_validate_for_io_buffers( + i, ctx->scratch_buf_info.ife_scratch_config->streamon_buf_mask)) + continue; + + CAM_DBG(CAM_ISP, + "Configure scratch for IFE res: 0x%x io_addr %pK ctx_idx: %u", + ife_buf_info->buf_info[i].res_id, port_info->io_addr, + ctx->ctx_index); + + rc = cam_isp_sfe_send_scratch_buf_upd(0x0, + CAM_ISP_HW_CMD_BUF_UPDATE, + hw_mgr_res->hw_res[j], port_info, + NULL, NULL); + if (rc) + goto end; + } + } + +end: + return rc; +} + +static int cam_ife_mgr_configure_scratch_for_sfe( + bool is_streamon, struct cam_ife_hw_mgr_ctx *ctx) +{ + int i, j, res_id, rc = 0; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_ife_sfe_scratch_buf_info *buf_info; + struct cam_sfe_scratch_buf_cfg *sfe_scratch_config; + struct list_head *res_list_in_rd = NULL; + struct cam_isp_hw_mgr_res *res_list_sfe_out = NULL; + + res_list_in_rd = &ctx->res_list_ife_in_rd; + res_list_sfe_out = ctx->res_list_sfe_out; + sfe_scratch_config = ctx->scratch_buf_info.sfe_scratch_config; + + if (sfe_scratch_config->skip_scratch_cfg_streamon) + goto end; + + for (i = 0; i < CAM_SFE_FE_RDI_NUM_MAX; i++) { + if (ctx->sfe_out_map[i] == 0xff) + continue; + + hw_mgr_res = &res_list_sfe_out[ctx->sfe_out_map[i]]; + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + /* j = 1 is not valid for this use-case */ + if (!hw_mgr_res->hw_res[j]) + continue; + + res_id = hw_mgr_res->hw_res[j]->res_id; + + if (cam_isp_sfe_validate_for_scratch_buf_config( + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), ctx, true)) + continue; + + if ((is_streamon) && + cam_ife_mgr_validate_for_io_buffers( + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), + sfe_scratch_config->streamon_buf_mask)) + continue; + + buf_info = &sfe_scratch_config->buf_info[ + res_id - CAM_ISP_SFE_OUT_RES_RDI_0]; + + /* Check if scratch available for this resource */ + if (!buf_info->config_done) { + CAM_ERR(CAM_ISP, + "No scratch buffer config found for res: %u on ctx: %u", + res_id, ctx->ctx_index); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_ISP, + "RDI%d res_id 0x%x idx %u io_addr %pK ctx_idx: %u", + i, + hw_mgr_res->hw_res[j]->res_id, + (res_id - CAM_ISP_SFE_OUT_RES_RDI_0), + buf_info->io_addr, ctx->ctx_index); + + rc = cam_isp_sfe_send_scratch_buf_upd(0x0, + CAM_ISP_HW_CMD_BUF_UPDATE, + hw_mgr_res->hw_res[j], buf_info, + NULL, NULL); + if (rc) + goto end; + } + } + + list_for_each_entry(hw_mgr_res, res_list_in_rd, list) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!hw_mgr_res->hw_res[j]) + continue; + + res_id = hw_mgr_res->hw_res[j]->res_id; + + if (cam_isp_sfe_validate_for_scratch_buf_config( + (res_id - CAM_ISP_SFE_IN_RD_0), ctx, true)) + continue; + + if ((is_streamon) && + cam_ife_mgr_validate_for_io_buffers( + (res_id - CAM_ISP_SFE_IN_RD_0), + sfe_scratch_config->streamon_buf_mask)) + continue; + + buf_info = &ctx->scratch_buf_info.sfe_scratch_config->buf_info + [res_id - CAM_ISP_SFE_IN_RD_0]; + CAM_DBG(CAM_ISP, + "RD res_id 0x%x idx %u io_addr %pK ctx_idx: %u", + hw_mgr_res->hw_res[j]->res_id, + (res_id - CAM_ISP_SFE_IN_RD_0), + buf_info->io_addr, ctx->ctx_index); + rc = cam_isp_sfe_send_scratch_buf_upd(0x0, + CAM_ISP_HW_CMD_BUF_UPDATE_RM, + hw_mgr_res->hw_res[j], buf_info, + NULL, NULL); + if (rc) + goto end; + } + } + +end: + return rc; +} + +/* + * Scratch buffer is for sHDR/FS usescases involing SFE RDI0-2 + * There is no possibility of dual in this case, hence + * using the scratch buffer provided during INIT corresponding + * to each left RDIs + */ +static int cam_ife_mgr_prog_default_settings(int64_t last_applied_max_pd_req, + bool force_disable_drv, bool is_streamon, struct cam_ife_hw_mgr_ctx *ctx) +{ + int rc = 0; + + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) { + /* Check for SFE scratch buffers */ + rc = cam_ife_mgr_configure_scratch_for_sfe(is_streamon, ctx); + if (rc) + goto end; + } + + /* Check for IFE scratch buffers */ + if (ctx->scratch_buf_info.ife_scratch_config && + ctx->scratch_buf_info.ife_scratch_config->num_config) { + rc = cam_ife_mgr_configure_scratch_for_ife(is_streamon, ctx); + if (rc) + goto end; + } + + /* Program rup & aup only at run time */ + if (!is_streamon) { + /* + * Sometimes, the sensor applied settings but isp doesn't have + * valid packet, then ife calls apply_default interface, in the + * meanwhile, we also need to update the drv config based on + * previous sensor applied info. + */ + if ((ctx->flags.dynamic_drv_supported) && + (last_applied_max_pd_req >= 0)) + cam_isp_mgr_drv_config(ctx, last_applied_max_pd_req, force_disable_drv, NULL); + + rc = cam_isp_config_csid_rup_aup(ctx); + if (rc) + CAM_ERR(CAM_ISP, + "RUP/AUP update failed for scratch buffers in ctx: %u", + ctx->ctx_index); + } + +end: + return rc; +} + +static int cam_hw_mgr_reset_out_of_sync_cnt( + struct cam_ife_hw_mgr_ctx *ife_ctx) +{ + int rc = -EINVAL; + uint32_t i; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_csid_reset_out_of_sync_count_args args; + + list_for_each_entry(hw_mgr_res, + &ife_ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + args.node_res = + hw_mgr_res->hw_res[i]; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_IFE_CSID_RESET_OUT_OF_SYNC_CNT, + &args, + sizeof(args)); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to reset out of sync cnt"); + } + } + } + + return rc; +} + +static int cam_ife_mgr_cmd_get_last_consumed_addr( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_isp_hw_done_event_data *done) +{ + int i, rc = -EINVAL; + struct cam_isp_resource_node *res; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct list_head *res_list_isp_src; + + if (done->hw_type == CAM_ISP_HW_TYPE_VFE) + res_list_isp_src = &ife_ctx->res_list_ife_src; + else if (done->hw_type == CAM_ISP_HW_TYPE_SFE) + res_list_isp_src = &ife_ctx->res_list_sfe_src; + else { + CAM_ERR(CAM_ISP, "invalid hw_type:%d", done->hw_type); + return rc; + } + + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR, + done, sizeof(struct cam_isp_hw_done_event_data)); + + return rc; + } + } + + return rc; +} + +static void *cam_ife_mgr_user_dump_stream_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_ife_hw_mgr_ctx *hw_mgr_ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *hw_res = NULL; + int32_t *addr; + int i; + int hw_idx[CAM_ISP_HW_SPLIT_MAX] = { -1, -1 }; + int sfe_hw_idx[CAM_ISP_HW_SPLIT_MAX] = { -1, -1 }; + + hw_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)dump_struct; + + if (!list_empty(&hw_mgr_ctx->res_list_ife_src)) { + hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_ife_src, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + hw_idx[i] = hw_res->hw_intf->hw_idx; + } + } + + if (!list_empty(&hw_mgr_ctx->res_list_sfe_src)) { + hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_sfe_src, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + sfe_hw_idx[i] = hw_res->hw_intf->hw_idx; + } + } + + addr = (int32_t *)addr_ptr; + + *addr++ = hw_mgr_ctx->flags.is_dual; + *addr++ = hw_mgr_ctx->ctx_type; + + *addr++ = hw_idx[CAM_ISP_HW_SPLIT_LEFT]; + *addr++ = hw_idx[CAM_ISP_HW_SPLIT_RIGHT]; + *addr++ = sfe_hw_idx[CAM_ISP_HW_SPLIT_LEFT]; + *addr++ = sfe_hw_idx[CAM_ISP_HW_SPLIT_RIGHT]; + + *addr++ = hw_mgr_ctx->flags.is_sfe_shdr; + *addr++ = hw_mgr_ctx->flags.is_sfe_fs; + *addr++ = hw_mgr_ctx->flags.dsp_enabled; + *addr++ = hw_mgr_ctx->flags.is_offline; + + return addr; +} + +static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL; + struct cam_packet *packet; + unsigned long rem_jiffies = 0; + struct cam_isp_comp_record_query *query_cmd; + struct cam_isp_prepare_hw_update_data *hw_update_data; + struct cam_isp_hw_per_req_info *per_req_info = NULL; + struct cam_isp_hw_drv_info *drv_info = NULL; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map; + if (!ctx || !ctx->flags.ctx_in_use) { + CAM_ERR(CAM_ISP, "Fatal: Invalid context is used"); + return -EPERM; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_INTERNAL: + if (!hw_cmd_args->u.internal_args) { + CAM_ERR(CAM_ISP, "Invalid cmd arguments, ctx_idx: %u", ctx->ctx_index); + return -EINVAL; + } + + isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *) + hw_cmd_args->u.internal_args; + + switch (isp_hw_cmd_args->cmd_type) { + case CAM_ISP_HW_MGR_CMD_PAUSE_HW: + cam_ife_mgr_pause_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_RESUME_HW: + cam_ife_mgr_resume_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_SOF_DEBUG: + cam_ife_mgr_sof_irq_debug(ctx, + isp_hw_cmd_args->u.sof_irq_enable); + break; + case CAM_ISP_HW_MGR_CMD_CTX_TYPE: + if (ctx->flags.is_fe_enabled && ctx->flags.is_offline) + isp_hw_cmd_args->u.ctx_info.type = CAM_ISP_CTX_OFFLINE; + else if (ctx->flags.is_fe_enabled && !ctx->flags.is_offline && + ctx->ctx_type != CAM_IFE_CTX_TYPE_SFE) + isp_hw_cmd_args->u.ctx_info.type = CAM_ISP_CTX_FS2; + else + isp_hw_cmd_args->u.ctx_info.type = CAM_ISP_CTX_PIX; + + if (hw_mgr->csid_aup_rup_en) + isp_hw_cmd_args->u.ctx_info.bubble_recover_dis = 1; + else + isp_hw_cmd_args->u.ctx_info.bubble_recover_dis = 0; + break; + case CAM_ISP_HW_MGR_GET_PACKET_OPCODE: + packet = (struct cam_packet *) + isp_hw_cmd_args->cmd_data; + if (((packet->header.op_code + 1) & 0xF) == //why? + CAM_ISP_PACKET_INIT_DEV) + isp_hw_cmd_args->u.packet_op_code = + CAM_ISP_PACKET_INIT_DEV; + else + isp_hw_cmd_args->u.packet_op_code = + CAM_ISP_PACKET_UPDATE_DEV; + break; + case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE: + isp_hw_cmd_args->cdm_done_ts = + ctx->cdm_done_ts; + isp_hw_cmd_args->u.last_cdm_done = + ctx->last_cdm_done_req; + break; + case CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG: { + bool skip_rup_aup = false; + + if (!isp_hw_cmd_args->cmd_data) { + CAM_ERR(CAM_ISP, "Invalid cmd data"); + rc = -EINVAL; + break; + } + + skip_rup_aup = *((bool *)isp_hw_cmd_args->cmd_data); + rc = cam_ife_mgr_prog_default_settings( + isp_hw_cmd_args->u.default_cfg_params.last_applied_max_pd_req, + isp_hw_cmd_args->u.default_cfg_params.force_disable_drv, + skip_rup_aup, ctx); + } + break; + case CAM_ISP_HW_MGR_GET_SOF_TS: + rc = cam_ife_mgr_cmd_get_sof_timestamp(ctx, + &isp_hw_cmd_args->u.sof_ts.curr, + &isp_hw_cmd_args->u.sof_ts.boot, + &isp_hw_cmd_args->u.sof_ts.prev, NULL, true); + break; + case CAM_ISP_HW_MGR_DUMP_STREAM_INFO: + rc = cam_common_user_dump_helper( + (void *)(isp_hw_cmd_args->cmd_data), + cam_ife_mgr_user_dump_stream_info, ctx, + sizeof(int32_t), "ISP_STREAM_INFO_FROM_IFE_HW_MGR:"); + break; + case CAM_ISP_HW_MGR_GET_BUS_COMP_GROUP: + query_cmd = (struct cam_isp_comp_record_query *) + isp_hw_cmd_args->cmd_data; + memcpy(query_cmd->vfe_bus_comp_grp, ctx->vfe_bus_comp_grp, + sizeof(struct cam_isp_context_comp_record) * + CAM_IFE_BUS_COMP_NUM_MAX); + if (ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE) + memcpy(query_cmd->sfe_bus_comp_grp, ctx->sfe_bus_comp_grp, + sizeof(struct cam_isp_context_comp_record) * + CAM_SFE_BUS_COMP_NUM_MAX); + break; + case CAM_ISP_HW_MGR_GET_LAST_CONSUMED_ADDR: + rc = cam_ife_mgr_cmd_get_last_consumed_addr(ctx, + (struct cam_isp_hw_done_event_data *)(isp_hw_cmd_args->cmd_data)); + break; + case CAM_ISP_HW_MGR_SET_DRV_INFO: + ctx->wr_per_req_index = (++ctx->wr_per_req_index) % MAX_DRV_REQUEST_DEPTH; + per_req_info = &ctx->per_req_info[ctx->wr_per_req_index]; + drv_info = &per_req_info->drv_info; + memset(drv_info, 0, sizeof(struct cam_isp_hw_drv_info)); + drv_info->req_id = isp_hw_cmd_args->u.drv_info.req_id; + drv_info->frame_duration = isp_hw_cmd_args->u.drv_info.frame_duration; + drv_info->blanking_duration = isp_hw_cmd_args->u.drv_info.blanking_duration; + CAM_DBG(CAM_PERF, "Save req:%llu per request info to index:%d, ctx_idx: %u", + drv_info->req_id, ctx->wr_per_req_index, ctx->ctx_index); + break; + default: + CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x, ctx_idx: %u", + hw_cmd_args->cmd_type, ctx->ctx_index); + rc = -EINVAL; + break; + } + break; + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_ife_mgr_dump_pf_data(hw_mgr, hw_cmd_args); + + break; + case CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH: + if (ctx->flags.dump_on_flush) + return 0; + + ctx->flags.dump_on_flush = true; + if (!atomic_read(&ctx->cdm_done)) { + rem_jiffies = cam_common_wait_for_completion_timeout( + &ctx->config_done_complete, msecs_to_jiffies(30)); + if (rem_jiffies == 0) + CAM_ERR(CAM_ISP, + "config done completion timeout, Reg dump will be unreliable rc=%d ctx_index %u", + rc, ctx->ctx_index); + } + + rc = cam_ife_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_ON_FLUSH, NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump on flush failed req id: %llu rc: %d ctx_idx: %u", + ctx->applied_req_id, rc, ctx->ctx_index); + return rc; + } + break; + case CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR: + if (ctx->flags.dump_on_error) + return 0; + + ctx->flags.dump_on_error = true; + rc = cam_ife_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR, NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump on error failed req id: %llu rc: %d ctx_idx: %u", + ctx->applied_req_id, rc, ctx->ctx_index); + return rc; + } + break; + case CAM_HW_MGR_CMD_REG_DUMP_PER_REQ: + hw_update_data = hw_cmd_args->u.hw_update_data; + if (g_ife_hw_mgr.debug_cfg.per_req_reg_dump && hw_update_data) { + rc = cam_ife_mgr_handle_reg_dump(ctx, + hw_update_data->reg_dump_buf_desc, + hw_update_data->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST, + NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump for req#%llu rc: %d ctx_idx: %u", + ctx->applied_req_id, rc, ctx->ctx_index); + return rc; + } + } + break; + case CAM_HW_MGR_CMD_DUMP_ACQ_INFO: + cam_ife_hw_mgr_dump_acquire_resources(ctx); + break; + default: + CAM_ERR(CAM_ISP, "Invalid cmd, ctx_idx: %u", ctx->ctx_index); + } + + return rc; +} + +static int cam_ife_mgr_user_dump_hw( + struct cam_ife_hw_mgr_ctx *ife_ctx, + struct cam_hw_dump_args *dump_args) +{ + int rc = 0; + + if (!ife_ctx || !dump_args) { + CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK", + ife_ctx, dump_args); + rc = -EINVAL; + goto end; + } + + rc = cam_ife_mgr_handle_reg_dump(ife_ctx, + ife_ctx->reg_dump_buf_desc, + ife_ctx->num_reg_dump_buf, + CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR, + NULL, + false); + if (rc) { + CAM_ERR(CAM_ISP, + "Dump failed req: %lld handle %u offset %u ctx_idx: %u", + dump_args->request_id, + dump_args->buf_handle, + dump_args->offset, ife_ctx->ctx_index); + goto end; + } + +end: + return rc; +} + +static int cam_ife_mgr_dump(void *hw_mgr_priv, void *args) +{ + struct cam_isp_hw_dump_args isp_hw_dump_args; + struct cam_hw_dump_args *dump_args = (struct cam_hw_dump_args *)args; + struct cam_hw_intf *hw_intf; + struct cam_ife_hw_mgr_ctx *ife_ctx = (struct cam_ife_hw_mgr_ctx *) + dump_args->ctxt_to_hw_map; + int i; + int rc = 0; + uint32_t hw_idx = 0; + + if (!ife_ctx) { + CAM_ERR(CAM_ISP, "ISP CTX null"); + return -EINVAL; + } else if (!ife_ctx->num_base) { + CAM_ERR(CAM_ISP, "ISP CTX num_base null, ctx_idx: %u", ife_ctx->ctx_index); + return -EINVAL; + } + + /* for some targets, information about the IFE registers to be dumped + * is already submitted with the hw manager. In this case, we + * can dump just the related registers and skip going to core files. + */ + if (!ife_ctx->flags.dump_on_error) { + cam_ife_mgr_user_dump_hw(ife_ctx, dump_args); + ife_ctx->flags.dump_on_error = true; + } + + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &isp_hw_dump_args.cpu_addr, + &isp_hw_dump_args.buf_len); + if (rc) { + CAM_ERR(CAM_ISP, "Invalid handle %u rc %d ctx_idx: %u", + dump_args->buf_handle, rc, ife_ctx->ctx_index); + return -EINVAL; + } + + isp_hw_dump_args.offset = dump_args->offset; + isp_hw_dump_args.req_id = dump_args->request_id; + + if (isp_hw_dump_args.buf_len <= isp_hw_dump_args.offset) { + CAM_ERR(CAM_ISP, + "Dump offset overshoot offset %zu buf_len %zu ctx_idx: %u", + isp_hw_dump_args.offset, isp_hw_dump_args.buf_len, ife_ctx->ctx_index); + rc = -EINVAL; + goto put_cpu_buf; + } + + for (i = 0; i < ife_ctx->num_base; i++) { + hw_idx = ife_ctx->base[i].idx; + + switch (ife_ctx->base[i].hw_type) { + case CAM_ISP_HW_TYPE_CSID: + hw_intf = ife_ctx->hw_mgr->csid_devices[hw_idx]; + if (!hw_intf) { + CAM_ERR(CAM_ISP, "hw_intf null, returning rc...ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto put_cpu_buf; + } + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_USER_DUMP, &isp_hw_dump_args, + sizeof(struct cam_isp_hw_dump_args)); + if (rc) { + rc = -EINVAL; + goto put_cpu_buf; + } + break; + case CAM_ISP_HW_TYPE_VFE: + hw_intf = ife_ctx->hw_mgr->ife_devices[hw_idx]->hw_intf; + if (!hw_intf || !hw_intf->hw_priv) { + CAM_ERR(CAM_ISP, "hw_intf null, returning rc...ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto put_cpu_buf; + } + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_USER_DUMP, &isp_hw_dump_args, + sizeof(struct cam_isp_hw_dump_args)); + if (rc) { + rc = -EINVAL; + goto put_cpu_buf; + } + break; + case CAM_ISP_HW_TYPE_SFE: + hw_intf = ife_ctx->hw_mgr->sfe_devices[hw_idx]->hw_intf; + if (!hw_intf || !hw_intf->hw_priv) { + CAM_ERR(CAM_ISP, "hw_intf null, returning rc...ctx_idx: %u", + ife_ctx->ctx_index); + rc = -EINVAL; + goto put_cpu_buf; + } + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_USER_DUMP, &isp_hw_dump_args, + sizeof(struct cam_isp_hw_dump_args)); + if (rc) { + rc = -EINVAL; + goto put_cpu_buf; + } + break; + default: + break; + } + + } + + dump_args->offset = isp_hw_dump_args.offset; + +put_cpu_buf: + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static inline void cam_ife_hw_mgr_get_offline_sof_timestamp( + uint64_t *timestamp, + uint64_t *boot_time) +{ + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + *timestamp = (uint64_t)((ts.tv_sec * 1000000000) + ts.tv_nsec); + *boot_time = *timestamp; +} + +static int cam_ife_mgr_cmd_get_sof_timestamp( + struct cam_ife_hw_mgr_ctx *ife_ctx, + uint64_t *time_stamp, + uint64_t *boot_time_stamp, + uint64_t *prev_time_stamp, + struct timespec64 *raw_boot_ts, + bool get_curr_timestamp) +{ + int rc = -EINVAL; + uint32_t i; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_csid_get_time_stamp_args csid_get_time; + + hw_mgr_res = list_first_entry(&ife_ctx->res_list_ife_csid, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + /* + * Get the SOF time stamp from left resource only. + * Left resource is master for dual vfe case and + * Rdi only context case left resource only hold + * the RDI resource + */ + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + /* + * Single VFE case, Get the time stamp from + * available one csid hw in the context + * Dual VFE case, get the time stamp from + * master(left) would be sufficient + */ + + csid_get_time.node_res = + hw_mgr_res->hw_res[i]; + csid_get_time.get_prev_timestamp = (prev_time_stamp != NULL); + csid_get_time.get_curr_timestamp = get_curr_timestamp; + csid_get_time.time_stamp_val = *time_stamp; + csid_get_time.raw_boot_time = raw_boot_ts; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_IFE_CSID_CMD_GET_TIME_STAMP, + &csid_get_time, + sizeof( + struct cam_csid_get_time_stamp_args)); + if (!rc && (i == CAM_ISP_HW_SPLIT_LEFT)) { + *time_stamp = + csid_get_time.time_stamp_val; + *boot_time_stamp = + csid_get_time.boot_timestamp; + if (prev_time_stamp) + *prev_time_stamp = + csid_get_time.prev_time_stamp_val; + } + } + } + + if (rc) + CAM_ERR_RATE_LIMIT(CAM_ISP, "Getting sof time stamp failed, ctx_idx: %u", + ife_ctx->ctx_index); + + return rc; +} + +static int cam_ife_mgr_recover_hw(void *priv, void *data) +{ + int32_t rc = 0; + struct cam_ife_hw_event_recovery_data *recovery_data = data; + struct cam_hw_start_args start_args; + struct cam_hw_stop_args stop_args; + struct cam_ife_hw_mgr *ife_hw_mgr = priv; + uint32_t i = 0; + bool cancel = false; + uint32_t error_type = recovery_data->error_type; + struct cam_ife_hw_mgr_ctx *ctx = NULL; + + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + if (recovery_data->id[i] != atomic_read(&ctx->recovery_id)) { + CAM_INFO(CAM_ISP, "recovery for ctx:%u error-type:%d cancelled", + ctx->ctx_index, error_type); + cancel = true; + } + } + if (cancel) + goto end; + + /* Here recovery is performed */ + CAM_DBG(CAM_ISP, "ErrorType = %d", error_type); + + switch (error_type) { + case CAM_ISP_HW_ERROR_OVERFLOW: + case CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW: + case CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED: + case CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA: + case CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR: + case CAM_ISP_HW_ERROR_CSID_FATAL: + case CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME: + case CAM_ISP_HW_ERROR_CSID_MISSING_EOT: + case CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED: + case CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW: + case CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW: + case CAM_ISP_HW_ERROR_CSID_FRAME_SIZE: + case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW: + case CAM_ISP_HW_ERROR_VIOLATION: + if (!recovery_data->affected_ctx[0]) { + CAM_ERR(CAM_ISP, + "No context is affected but recovery called"); + kfree(recovery_data); + return 0; + } + /* stop resources here */ + CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d", + recovery_data->no_of_context); + for (i = 0; i < recovery_data->no_of_context; i++) { + stop_args.ctxt_to_hw_map = + recovery_data->affected_ctx[i]; + rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args); + if (rc) { + CAM_ERR(CAM_ISP, "CTX stop failed(%d) ctx_idx: %u", + rc, ctx->ctx_index); + return rc; + } + } + + if (!g_ife_hw_mgr.debug_cfg.enable_recovery) + break; + + CAM_DBG(CAM_ISP, "RESET: CSID PATH"); + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + rc = cam_ife_hw_mgr_reset_csid(ctx, + CAM_IFE_CSID_RESET_PATH); + + if (rc) { + CAM_ERR(CAM_ISP, "Failed RESET, ctx_idx: %u", ctx->ctx_index); + return rc; + } + } + + CAM_DBG(CAM_ISP, "RESET: Calling VFE reset"); + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (recovery_data->affected_core[i]) + cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i); + } + + CAM_DBG(CAM_ISP, "START: Number of affected context: %d", + recovery_data->no_of_context); + + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + start_args.ctxt_to_hw_map = ctx; + + rc = cam_ife_mgr_restart_hw(&start_args); + if (rc) { + CAM_ERR(CAM_ISP, "CTX start failed(%d) ctx_idx: %u", + rc, ctx->ctx_index); + return rc; + } + CAM_DBG(CAM_ISP, "Started resources rc (%d) ctx_idx: %u", + rc, ctx->ctx_index); + } + + atomic_set(&ctx->overflow_pending, 0); + CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc); + + break; + + case CAM_ISP_HW_ERROR_P2I_ERROR: + break; + + default: + CAM_ERR(CAM_ISP, "Invalid Error"); + } + CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type); + +end: + kfree(recovery_data); + return rc; +} + +static void cam_ife_hw_mgr_trigger_crop_reg_dump( + struct cam_hw_intf *hw_intf, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0, path_id = 0; + + path_id = cam_ife_hw_mgr_convert_out_port_to_csid_path( + event_info->res_id); + + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_DUMP_CROP_REG, + &path_id, + sizeof(path_id)); + if (rc) + CAM_ERR(CAM_ISP, "CSID:%d Reg Dump failed for path=%u", + event_info->hw_idx, path_id); + } +} + +static int cam_ife_hw_mgr_do_error_recovery( + struct cam_ife_hw_event_recovery_data *ife_mgr_recovery_data) +{ + int32_t rc, i; + struct crm_workq_task *task = NULL; + struct cam_ife_hw_event_recovery_data *recovery_data = NULL; + struct cam_ife_hw_mgr_ctx *ctx; + + recovery_data = kmemdup(ife_mgr_recovery_data, + sizeof(struct cam_ife_hw_event_recovery_data), GFP_ATOMIC); + if (!recovery_data) + return -ENOMEM; + + CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type); + + task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq); + if (!task) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No empty task frame"); + kfree(recovery_data); + return -ENOMEM; + } + + task->process_cb = &cam_context_handle_hw_recovery; + task->payload = recovery_data; + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + recovery_data->id[i] = atomic_inc_return(&ctx->recovery_id); + } + + rc = cam_req_mgr_workq_enqueue_task(task, + recovery_data->affected_ctx[0]->common.cb_priv, + CRM_TASK_PRIORITY_0); + return rc; +} + +/* + * This function checks if any of the valid entry in affected_core[] + * is associated with this context. if YES + * a. It fills the other cores associated with this context.in + * affected_core[] + * b. Return true + */ +static bool cam_ife_hw_mgr_is_ctx_affected( + struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx, + uint32_t *affected_core, + uint32_t size) +{ + + bool rc = false; + uint32_t i = 0, j = 0; + uint32_t max_idx = ife_hwr_mgr_ctx->num_base; + uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0}; + + CAM_DBG(CAM_ISP, "Enter:max_idx = %d, ctx_idx: %u", max_idx, ife_hwr_mgr_ctx->ctx_index); + + if ((max_idx >= CAM_IFE_HW_NUM_MAX) || (size > CAM_IFE_HW_NUM_MAX)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "invalid parameter = %d, ctx_idx: %u", + max_idx, ife_hwr_mgr_ctx->ctx_index); + return rc; + } + + for (i = 0; i < max_idx; i++) { + if (affected_core[ife_hwr_mgr_ctx->base[i].idx]) + rc = true; + else { + ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx; + j = j + 1; + } + } + + if (rc) { + while (j) { + if (affected_core[ctx_affected_core_idx[j-1]] != 1) + affected_core[ctx_affected_core_idx[j-1]] = 1; + j = j - 1; + } + } + CAM_DBG(CAM_ISP, "Exit, ctx_idx: %u", ife_hwr_mgr_ctx->ctx_index); + return rc; +} + +/* + * For any dual VFE context, if non-affected VFE is also serving + * another context, then that context should also be notified with fatal error + * So Loop through each context and - + * a. match core_idx + * b. Notify CTX with fatal error + */ +static int cam_ife_hw_mgr_find_affected_ctx( + struct cam_isp_hw_error_event_data *error_event_data, + uint32_t curr_core_idx, + struct cam_ife_hw_event_recovery_data *recovery_data, + bool force_recover) +{ + uint32_t affected_core[CAM_IFE_HW_NUM_MAX] = {0}; + struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL; + cam_hw_event_cb_func notify_err_cb; + struct cam_ife_hw_mgr *ife_hwr_mgr = NULL; + uint32_t i = 0; + + if (!recovery_data) { + CAM_ERR(CAM_ISP, "recovery_data parameter is NULL"); + return -EINVAL; + } + + recovery_data->no_of_context = 0; + affected_core[curr_core_idx] = 1; + ife_hwr_mgr = &g_ife_hw_mgr; + + list_for_each_entry(ife_hwr_mgr_ctx, + &ife_hwr_mgr->used_ctx_list, list) { + /* + * Check if current core_idx matches the HW associated + * with this context + */ + if (!cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx, + affected_core, CAM_IFE_HW_NUM_MAX)) + continue; + + if (!force_recover && atomic_read(&ife_hwr_mgr_ctx->overflow_pending)) { + CAM_INFO(CAM_ISP, "CTX:%u already error reported", + ife_hwr_mgr_ctx->ctx_index); + continue; + } + + atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1); + notify_err_cb = ife_hwr_mgr_ctx->common.event_cb; + + /* Add affected_context in list of recovery data */ + CAM_DBG(CAM_ISP, "Add affected ctx %u to list", + ife_hwr_mgr_ctx->ctx_index); + if (recovery_data->no_of_context < CAM_IFE_CTX_MAX) + recovery_data->affected_ctx[ + recovery_data->no_of_context++] = + ife_hwr_mgr_ctx; + + /* + * In the call back function corresponding ISP context + * will update CRM about fatal Error + */ + if (notify_err_cb) + notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_ERROR, + (void *)error_event_data); + else { + CAM_WARN(CAM_ISP, "Error call back is not set, ctx_idx: %u", + ife_hwr_mgr_ctx->ctx_index); + goto end; + } + } + + /* fill the affected_core in recovery data */ + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + recovery_data->affected_core[i] = affected_core[i]; + CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)", + i, recovery_data->affected_core[i]); + } +end: + return 0; +} + +static int cam_ife_hw_mgr_handle_csid_secondary_err_evts( + uint32_t err_type, + struct cam_isp_hw_event_info *event_info, + struct cam_ife_hw_mgr_ctx *ctx) +{ + int rc = -EINVAL; + struct cam_isp_hw_secondary_event_data sec_evt_data = {0}; + cam_hw_event_cb_func ife_hw_irq_cb = ctx->common.event_cb; + + /* + * Support frame drop as secondary event + */ + if (err_type & CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP) { + sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP; + + CAM_DBG(CAM_ISP, + "Received CSID[%u] sensor sync frame drop res: %d as secondary evt on ctx: %u", + event_info->hw_idx, event_info->res_id, ctx->ctx_index); + + rc = ife_hw_irq_cb(ctx->common.cb_priv, + CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data); + } + + return rc; +} + +static int cam_ife_hw_mgr_handle_csid_error( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_hw_event_info *event_info) +{ + int rc = -EINVAL; + bool recoverable = true; + uint32_t err_type; + struct cam_isp_hw_error_event_info *err_evt_info; + struct cam_isp_hw_error_event_data error_event_data = {0}; + struct cam_ife_hw_event_recovery_data recovery_data = {0}; + bool is_bus_overflow = false, force_recover = false; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, + "No additional error event data failed to process for CSID[%u] ctx: %u", + event_info->hw_idx, ctx->ctx_index); + return -EINVAL; + } + + err_evt_info = (struct cam_isp_hw_error_event_info *)event_info->event_data; + err_type = err_evt_info->err_type; + CAM_DBG(CAM_ISP, "Entry CSID[%u] error %d ctx_idx: %u", + event_info->hw_idx, err_type, ctx->ctx_index); + + spin_lock(&g_ife_hw_mgr.ctx_lock); + + /* Secondary event handling */ + if (event_info->is_secondary_evt) { + rc = cam_ife_hw_mgr_handle_csid_secondary_err_evts(err_type, event_info, ctx); + if (rc) + CAM_ERR(CAM_ISP, + "Failed to handle CSID[%u] sec event for res: %d err: 0x%x on ctx: %u", + event_info->hw_idx, event_info->res_id, err_type, ctx->ctx_index); + spin_unlock(&g_ife_hw_mgr.ctx_lock); + return rc; + } + + /* Default error types */ + recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW; + error_event_data.error_type = CAM_ISP_HW_ERROR_CSID_FATAL; + error_event_data.error_type |= err_type; + + /* Notify IFE/SFE devices, determine bus overflow */ + if (err_type & (CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW | + CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW | + CAM_ISP_HW_ERROR_CSID_FRAME_SIZE | + CAM_ISP_HW_ERROR_CSID_CAMIF_FRAME_DROP| + CAM_ISP_HW_ERROR_DRV_VOTEUP_LATE)) + cam_ife_hw_mgr_check_and_notify_overflow(event_info, + ctx, &is_bus_overflow); + + if (err_type & CAM_ISP_HW_ERROR_DRV_VOTEUP_LATE) { + cam_ife_hw_mgr_drv_info_dump(event_info, + ctx); + } + + if (err_type & CAM_ISP_NON_RECOVERABLE_CSID_ERRORS) { + recovery_data.error_type = err_type; + recoverable = false; + } + + if (recoverable && (is_bus_overflow || + (err_type & CAM_ISP_RECOVERABLE_CSID_ERRORS))) { + if (ctx->try_recovery_cnt < MAX_INTERNAL_RECOVERY_ATTEMPTS) { + error_event_data.try_internal_recovery = true; + + if (!atomic_read(&ctx->overflow_pending)) + ctx->try_recovery_cnt++; + + if (!ctx->recovery_req_id) + ctx->recovery_req_id = ctx->applied_req_id; + } + + CAM_DBG(CAM_ISP, + "CSID[%u] error: %u current_recovery_cnt: %u recovery_req: %llu on ctx: %u", + event_info->hw_idx, err_type, ctx->try_recovery_cnt, + ctx->recovery_req_id, ctx->ctx_index); + + recovery_data.error_type = err_type; + } + + /* For out of sync continue to try recovery */ + if ((error_event_data.try_internal_recovery) && (atomic_read(&ctx->overflow_pending))) + force_recover = err_type & CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR; + + rc = cam_ife_hw_mgr_find_affected_ctx(&error_event_data, + event_info->hw_idx, &recovery_data, force_recover); + if (rc || !recovery_data.no_of_context) + goto end; + + if (!error_event_data.try_internal_recovery) + cam_ife_hw_mgr_do_error_recovery(&recovery_data); + + CAM_DBG(CAM_ISP, "Exit CSID[%u] error %d ctx_idx: %u", + event_info->hw_idx, err_type, ctx->ctx_index); + +end: + spin_unlock(&g_ife_hw_mgr.ctx_lock); + return 0; +} + +static int cam_ife_hw_mgr_handle_csid_rup( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hwr_irq_rup_cb; + struct cam_isp_hw_reg_update_event_data rup_event_data; + + ife_hwr_irq_rup_cb = ife_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + case CAM_IFE_PIX_PATH_RES_PPP: + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data); + CAM_DBG(CAM_ISP, "RUP done for CSID:%d source %d ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + } + + return 0; +} + +static int cam_ife_hw_mgr_handle_csid_eof( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hwr_irq_rup_cb; + struct cam_isp_hw_eof_event_data eof_done_event_data; + + ife_hwr_irq_rup_cb = ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + case CAM_IFE_PIX_PATH_RES_PPP: + if (atomic_read(&ctx->overflow_pending)) + break; + + ife_hwr_irq_rup_cb(ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EOF, &eof_done_event_data); + CAM_DBG(CAM_ISP, + "Received CSID[%u] CAMIF EOF res: %d ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ctx->ctx_index); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d ctx_idx: %u", + event_info->res_id, ctx->ctx_index); + break; + } + + return 0; +} +static int cam_ife_hw_mgr_handle_csid_camif_sof( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0; + cam_hw_event_cb_func ife_hw_irq_sof_cb = ctx->common.event_cb; + struct cam_isp_hw_sof_event_data sof_done_event_data = {0}; + struct timespec64 ts; + struct cam_isp_sof_ts_data *sof_and_boot_time; + + if (event_info->is_secondary_evt) { + struct cam_isp_hw_secondary_event_data sec_evt_data; + + CAM_DBG(CAM_ISP, + "Received CSID[%u] CAMIF SOF res: %d as secondary evt, ctx_idx: %u", + event_info->hw_idx, event_info->res_id, ctx->ctx_index); + + sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_SOF; + rc = ife_hw_irq_sof_cb(ctx->common.cb_priv, + CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data); + goto end; + } + + switch (event_info->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + case CAM_IFE_PIX_PATH_RES_PPP: + if (atomic_read(&ctx->overflow_pending)) + break; + if (ctx->ctx_config & + CAM_IFE_CTX_CFG_FRAME_HEADER_TS) { + sof_done_event_data.timestamp = 0x0; + ktime_get_boottime_ts64(&ts); + sof_done_event_data.boot_time = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + CAM_DBG(CAM_ISP, "boot_time 0x%llx, ctx_idx: %u", + sof_done_event_data.boot_time, ctx->ctx_index); + } else { + if (ctx->flags.is_offline) + cam_ife_hw_mgr_get_offline_sof_timestamp( + &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time); + else { + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, "SOF timestamp data is null: %s", + CAM_IS_NULL_TO_STR(event_info->event_data)); + break; + } + sof_and_boot_time = + (struct cam_isp_sof_ts_data *)event_info->event_data; + sof_done_event_data.timestamp = + sof_and_boot_time->sof_ts; + cam_ife_mgr_cmd_get_sof_timestamp( + ctx, &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time, NULL, + &sof_and_boot_time->boot_time, false); + } + } + + cam_hw_mgr_reset_out_of_sync_cnt(ctx); + + ife_hw_irq_sof_cb(ctx->common.cb_priv, + CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data); + + CAM_DBG(CAM_ISP, + "Received CSID[%u] CAMIF SOF res: %d, ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ctx->ctx_index); + + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ctx->ctx_index); + break; + } +end: + return rc; +} + +static int cam_ife_hw_mgr_handle_csid_camif_epoch( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0; + cam_hw_event_cb_func ife_hw_irq_epoch_cb = ctx->common.event_cb; + struct cam_isp_hw_epoch_event_data epoch_done_event_data = {0}; + + if (event_info->is_secondary_evt) { + struct cam_isp_hw_secondary_event_data sec_evt_data; + + CAM_DBG(CAM_ISP, + "Received CSID[%u] CAMIF EPOCH res: %d as secondary evt, ctx_idx: %u", + event_info->hw_idx, event_info->res_id, ctx->ctx_index); + + sec_evt_data.evt_type = CAM_ISP_HW_SEC_EVENT_EPOCH; + rc = ife_hw_irq_epoch_cb(ctx->common.cb_priv, + CAM_ISP_HW_SECONDARY_EVENT, (void *)&sec_evt_data); + goto end; + } + + switch (event_info->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + case CAM_IFE_PIX_PATH_RES_PPP: + if (atomic_read(&ctx->overflow_pending)) + break; + + epoch_done_event_data.frame_id_meta = event_info->reg_val; + ife_hw_irq_epoch_cb(ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&epoch_done_event_data); + + CAM_DBG(CAM_ISP, + "Received CSID[%u] CAMIF Epoch res: %d, ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ctx->ctx_index); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d ctx_idx: %u", + event_info->res_id, ctx->ctx_index); + break; + } +end: + return rc; +} + +static int cam_ife_hw_mgr_handle_sfe_hw_dump_info( + void *ctx, + void *evt_info) +{ + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx = + (struct cam_ife_hw_mgr_ctx *)ctx; + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)evt_info; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_hw_intf *hw_intf; + uint32_t i, out_port; + int rc = 0; + + /* SFE in rd resources */ + list_for_each_entry(hw_mgr_res, + &ife_hw_mgr_ctx->res_list_ife_in_rd, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + rsrc_node = hw_mgr_res->hw_res[i]; + if ((event_info->res_type == CAM_ISP_RESOURCE_SFE_RD) && + (rsrc_node->res_type == CAM_ISP_RESOURCE_SFE_RD) && + (event_info->res_id == rsrc_node->res_id)) { + hw_intf = rsrc_node->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, (void *)event_info, + sizeof(struct cam_isp_hw_event_info)); + } + } + } + + /* SFE out resources */ + if (event_info->res_type == CAM_ISP_RESOURCE_SFE_OUT) { + out_port = event_info->res_id & 0xFF; + hw_mgr_res = + &ife_hw_mgr_ctx->res_list_sfe_out[ife_hw_mgr_ctx->sfe_out_map[out_port]]; + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, (void *)event_info, + sizeof(struct cam_isp_hw_event_info)); + } + } + } + return rc; +} + +static int cam_ife_hw_mgr_handle_hw_dump_info( + void *ctx, + void *evt_info) +{ + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx = + (struct cam_ife_hw_mgr_ctx *)ctx; + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)evt_info; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_hw_intf *hw_intf; + uint32_t i, out_port; + uint64_t dummy_args; + int rc = 0; + + list_for_each_entry(hw_mgr_res, + &ife_hw_mgr_ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rsrc_node = hw_mgr_res->hw_res[i]; + if (rsrc_node->res_id == + CAM_ISP_HW_VFE_IN_CAMIF) { + hw_intf = rsrc_node->hw_intf; + if (hw_intf && + hw_intf->hw_ops.process_cmd) + rc = + hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CAMIF_DATA, + rsrc_node, + sizeof( + struct + cam_isp_resource_node)); + } + } + } + + list_for_each_entry(hw_mgr_res, + &ife_hw_mgr_ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_CLOCK_DUMP, + &dummy_args, + sizeof(uint64_t)); + if (rc) + CAM_ERR(CAM_ISP, + "CSID Clock Dump failed, ctx_idx: %u", + ife_hw_mgr_ctx->ctx_index); + } + } + } + + if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + out_port = event_info->res_id & 0xFF; + if (ife_hw_mgr_ctx->vfe_out_map[out_port] == 0xff) { + CAM_ERR(CAM_ISP, "Invalid index:%d for out_map", out_port); + return -EINVAL; + } + + hw_mgr_res = + &ife_hw_mgr_ctx->res_list_ife_out[ife_hw_mgr_ctx->vfe_out_map[out_port]]; + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, + (void *)event_info, + sizeof(struct cam_isp_hw_event_info)); + } + } + } + + return rc; +} + +static int cam_ife_hw_mgr_handle_sfe_hw_err( + struct cam_ife_hw_mgr_ctx *ctx, + struct cam_isp_hw_event_info *event_info) +{ + struct cam_isp_hw_error_event_info *err_evt_info; + struct cam_isp_hw_error_event_data error_event_data = {0}; + struct cam_ife_hw_event_recovery_data recovery_data = {0}; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, + "No additional error event data failed to process for SFE[%u] ctx: %u", + event_info->hw_idx, ctx->ctx_index); + return -EINVAL; + } + + err_evt_info = (struct cam_isp_hw_error_event_info *)event_info->event_data; + + CAM_DBG(CAM_ISP, "SFE[%u] error [%u] on res_type %u ctx: %u", + event_info->hw_idx, err_evt_info->err_type, + event_info->res_type, ctx->ctx_index); + + spin_lock(&g_ife_hw_mgr.ctx_lock); + + cam_ife_hw_mgr_handle_sfe_hw_dump_info(ctx, event_info); + + /* Only report error to userspace */ + if (err_evt_info->err_type & CAM_SFE_IRQ_STATUS_VIOLATION) { + error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION; + CAM_DBG(CAM_ISP, "Notify context for SFE error, ctx_idx: %u", ctx->ctx_index); + cam_ife_hw_mgr_find_affected_ctx(&error_event_data, + event_info->hw_idx, &recovery_data, false); + } + spin_unlock(&g_ife_hw_mgr.ctx_lock); + + return 0; +} + +static int cam_ife_hw_mgr_handle_hw_err( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + uint32_t core_idx, err_type; + struct cam_isp_hw_error_event_info *err_evt_info; + struct cam_isp_hw_error_event_data error_event_data = {0}; + struct cam_ife_hw_event_recovery_data recovery_data = {0}; + struct cam_hw_intf *hw_intf; + int rc = -EINVAL; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, + "No additional error event data failed to process for IFE[%u] ctx: %u", + event_info->hw_idx, ife_hw_mgr_ctx->ctx_index); + return -EINVAL; + } + + err_evt_info = (struct cam_isp_hw_error_event_info *)event_info->event_data; + err_type = err_evt_info->err_type; + + spin_lock(&g_ife_hw_mgr.ctx_lock); + if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + hw_intf = g_ife_hw_mgr.csid_devices[event_info->hw_idx]; + cam_ife_hw_mgr_trigger_crop_reg_dump(hw_intf, event_info); + } + + if (event_info->res_type == + CAM_ISP_RESOURCE_VFE_IN && + !ife_hw_mgr_ctx->flags.is_rdi_only_context && + event_info->res_id != + CAM_ISP_HW_VFE_IN_CAMIF) + cam_ife_hw_mgr_handle_hw_dump_info(ife_hw_mgr_ctx, event_info); + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) { + error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION; + + if (err_evt_info->err_mask & CAM_VFE_IRQ_ERR_MASK_HWPD_VIOLATION) + error_event_data.error_type |= CAM_ISP_HW_ERROR_HWPD_VIOLATION; + } else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_IN) + error_event_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW; + else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT) + error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW; + + core_idx = event_info->hw_idx; + + if (g_ife_hw_mgr.debug_cfg.enable_recovery) + error_event_data.recovery_enabled = true; + + if (g_ife_hw_mgr.debug_cfg.enable_req_dump) + error_event_data.enable_req_dump = true; + + rc = cam_ife_hw_mgr_find_affected_ctx(&error_event_data, + core_idx, &recovery_data, false); + + if (rc || !recovery_data.no_of_context) + goto end; + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) + recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION; + else + recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW; + + cam_ife_hw_mgr_do_error_recovery(&recovery_data); +end: + spin_unlock(&g_ife_hw_mgr.ctx_lock); + return rc; +} + +static int cam_ife_hw_mgr_handle_hw_rup( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hwr_irq_rup_cb; + struct cam_isp_hw_reg_update_event_data rup_event_data; + + ife_hwr_irq_rup_cb = ife_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_VFE_IN_CAMIF: + if ((ife_hw_mgr_ctx->flags.is_dual) && + (event_info->hw_idx != + ife_hw_mgr_ctx->left_hw_idx)) + break; + + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&rup_event_data); + break; + + case CAM_ISP_HW_VFE_IN_RDI0: + case CAM_ISP_HW_VFE_IN_RDI1: + case CAM_ISP_HW_VFE_IN_RDI2: + case CAM_ISP_HW_VFE_IN_RDI3: + case CAM_ISP_HW_VFE_IN_RDI4: + if (!cam_isp_is_ctx_primary_rdi(ife_hw_mgr_ctx)) + break; + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&rup_event_data); + break; + + case CAM_ISP_HW_VFE_IN_PDLIB: + case CAM_ISP_HW_VFE_IN_LCR: + case CAM_ISP_HW_VFE_IN_RD: + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, "RUP done for VFE:%d source %d, ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + + return 0; +} + +static int cam_ife_hw_mgr_handle_hw_epoch( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hw_irq_epoch_cb; + struct cam_isp_hw_epoch_event_data epoch_done_event_data; + + ife_hw_irq_epoch_cb = ife_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_VFE_IN_CAMIF: + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + + epoch_done_event_data.frame_id_meta = event_info->reg_val; + ife_hw_irq_epoch_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&epoch_done_event_data); + + break; + + case CAM_ISP_HW_VFE_IN_RDI0: + case CAM_ISP_HW_VFE_IN_RDI1: + case CAM_ISP_HW_VFE_IN_RDI2: + case CAM_ISP_HW_VFE_IN_RDI3: + case CAM_ISP_HW_VFE_IN_RDI4: + case CAM_ISP_HW_VFE_IN_PDLIB: + case CAM_ISP_HW_VFE_IN_LCR: + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, "Epoch for VFE:%d source %d ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + + return 0; +} + +static int cam_ife_hw_mgr_handle_hw_sof( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hw_irq_sof_cb; + struct cam_isp_hw_sof_event_data sof_done_event_data; + struct cam_isp_sof_ts_data *sof_and_boot_time; + struct timespec64 ts; + + memset(&sof_done_event_data, 0, sizeof(sof_done_event_data)); + + ife_hw_irq_sof_cb = ife_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_VFE_IN_CAMIF: + case CAM_ISP_HW_VFE_IN_RD: + /* if frame header is enabled reset qtimer ts */ + if (ife_hw_mgr_ctx->ctx_config & + CAM_IFE_CTX_CFG_FRAME_HEADER_TS) { + sof_done_event_data.timestamp = 0x0; + ktime_get_boottime_ts64(&ts); + sof_done_event_data.boot_time = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + CAM_DBG(CAM_ISP, "boot_time 0x%llx, ctx_idx: %u", + sof_done_event_data.boot_time, ife_hw_mgr_ctx->ctx_index); + } else { + if (ife_hw_mgr_ctx->flags.is_offline) + cam_ife_hw_mgr_get_offline_sof_timestamp( + &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time); + else { + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, "SOF timestamp data is null: %s", + CAM_IS_NULL_TO_STR(event_info->event_data)); + break; + } + sof_and_boot_time = + (struct cam_isp_sof_ts_data *)event_info->event_data; + sof_done_event_data.timestamp = sof_and_boot_time->sof_ts; + cam_ife_mgr_cmd_get_sof_timestamp( + ife_hw_mgr_ctx, &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time, NULL, + &sof_and_boot_time->boot_time, false); + } + } + + cam_hw_mgr_reset_out_of_sync_cnt(ife_hw_mgr_ctx); + + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + + ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data); + + break; + + case CAM_ISP_HW_VFE_IN_RDI0: + case CAM_ISP_HW_VFE_IN_RDI1: + case CAM_ISP_HW_VFE_IN_RDI2: + case CAM_ISP_HW_VFE_IN_RDI3: + case CAM_ISP_HW_VFE_IN_RDI4: + if (!cam_isp_is_ctx_primary_rdi(ife_hw_mgr_ctx)) + break; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, "SOF timestamp data is null: %s", + CAM_IS_NULL_TO_STR(event_info->event_data)); + break; + } + sof_and_boot_time = + (struct cam_isp_sof_ts_data *)event_info->event_data; + sof_done_event_data.timestamp = sof_and_boot_time->sof_ts; + cam_ife_mgr_cmd_get_sof_timestamp( + ife_hw_mgr_ctx, &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time, NULL, + &sof_and_boot_time->boot_time, false); + + cam_hw_mgr_reset_out_of_sync_cnt(ife_hw_mgr_ctx); + + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + + ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data); + break; + + case CAM_ISP_HW_VFE_IN_PDLIB: + case CAM_ISP_HW_VFE_IN_LCR: + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, "SOF for VFE:%d source %d ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + + return 0; +} + +static int cam_ife_hw_mgr_handle_hw_eof( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hw_irq_eof_cb; + struct cam_isp_hw_eof_event_data eof_done_event_data; + + ife_hw_irq_eof_cb = ife_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_VFE_IN_CAMIF: + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + + ife_hw_irq_eof_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EOF, (void *)&eof_done_event_data); + + break; + + case CAM_ISP_HW_VFE_IN_RDI0: + case CAM_ISP_HW_VFE_IN_RDI1: + case CAM_ISP_HW_VFE_IN_RDI2: + case CAM_ISP_HW_VFE_IN_RDI3: + case CAM_ISP_HW_VFE_IN_RDI4: + if (!ife_hw_mgr_ctx->flags.is_rdi_only_context) + break; + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + break; + + ife_hw_irq_eof_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EOF, (void *)&eof_done_event_data); + break; + + case CAM_ISP_HW_VFE_IN_PDLIB: + case CAM_ISP_HW_VFE_IN_LCR: + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d, ctx_idx: %u", + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + break; + } + + CAM_DBG(CAM_ISP, "EOF for VFE:%d source %d ctx_idx: %u", event_info->hw_idx, + event_info->res_id, ife_hw_mgr_ctx->ctx_index); + + return 0; +} + +static bool cam_ife_hw_mgr_last_consumed_addr_check( + uint32_t last_consumed_addr, struct cam_ife_sfe_scratch_buf_info *buf_info) +{ + dma_addr_t final_addr; + uint32_t cmp_addr = 0; + + final_addr = buf_info->io_addr + buf_info->offset; + cmp_addr = cam_smmu_is_expanded_memory() ? + CAM_36BIT_INTF_GET_IOVA_BASE(final_addr) : final_addr; + if (cmp_addr == last_consumed_addr) + return true; + + return false; +} + +static int cam_ife_hw_mgr_check_ife_scratch_buf_done( + struct cam_ife_scratch_buf_cfg *scratch_cfg, + uint32_t res_id, uint32_t last_consumed_addr) +{ + int rc = 0, i; + struct cam_ife_sfe_scratch_buf_info *buf_info; + + for (i = 0; i < scratch_cfg->num_config; i++) { + if (scratch_cfg->buf_info[i].res_id == res_id) { + buf_info = &scratch_cfg->buf_info[i]; + + if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) { + CAM_DBG(CAM_ISP, + "IFE res:0x%x buf done for scratch - skip ctx notify", + buf_info->res_id); + rc = -EAGAIN; + } + } + } + + return rc; +} + +static int cam_ife_hw_mgr_check_sfe_scratch_buf_done( + struct cam_sfe_scratch_buf_cfg *scratch_cfg, + uint32_t res_id, uint32_t last_consumed_addr) +{ + int rc = 0; + struct cam_ife_sfe_scratch_buf_info *buf_info; + + switch (res_id) { + case CAM_ISP_SFE_OUT_RES_RDI_0: + case CAM_ISP_SFE_OUT_RES_RDI_1: + case CAM_ISP_SFE_OUT_RES_RDI_2: + buf_info = &scratch_cfg->buf_info[res_id - CAM_ISP_SFE_OUT_RES_RDI_0]; + if (!buf_info->config_done) + return 0; + + if (cam_ife_hw_mgr_last_consumed_addr_check(last_consumed_addr, buf_info)) { + CAM_DBG(CAM_ISP, + "SFE RDI: 0x%x buf done for scratch - skip ctx notify", + buf_info->res_id); + rc = -EAGAIN; + } + break; + default: + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_check_for_scratch_buf_done( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + enum cam_isp_hw_type hw_type, + uint32_t res_id, uint32_t last_consumed_addr) +{ + int rc = 0; + + switch (hw_type) { + case CAM_ISP_HW_TYPE_VFE: + if (ife_hw_mgr_ctx->scratch_buf_info.ife_scratch_config->num_config) + rc = cam_ife_hw_mgr_check_ife_scratch_buf_done( + ife_hw_mgr_ctx->scratch_buf_info.ife_scratch_config, + res_id, last_consumed_addr); + break; + case CAM_ISP_HW_TYPE_SFE: + rc = cam_ife_hw_mgr_check_sfe_scratch_buf_done( + ife_hw_mgr_ctx->scratch_buf_info.sfe_scratch_config, + res_id, last_consumed_addr); + break; + default: + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_handle_hw_buf_done( + struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx, + struct cam_isp_hw_event_info *event_info) +{ + cam_hw_event_cb_func ife_hwr_irq_wm_done_cb; + struct cam_isp_hw_done_event_data buf_done_event_data = {0}; + struct cam_isp_hw_bufdone_event_info *bufdone_evt_info = NULL; + int32_t rc = 0; + struct cam_ife_hw_mgr_debug *debug_cfg = &g_ife_hw_mgr.debug_cfg; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, + "No additional buf done data failed to process for HW: %u, ctx_idx: %u", + event_info->hw_type, ife_hw_mgr_ctx->ctx_index); + return -EINVAL; + } + + ife_hwr_irq_wm_done_cb = ife_hw_mgr_ctx->common.event_cb; + bufdone_evt_info = (struct cam_isp_hw_bufdone_event_info *)event_info->event_data; + buf_done_event_data.resource_handle = 0; + + CAM_DBG(CAM_ISP, + "Buf done for %s: %d res_id: 0x%x last consumed addr: 0x%x ctx: %u is_hw_ctxt_comp: %s is_early_done: %s", + ((event_info->hw_type == CAM_ISP_HW_TYPE_SFE) ? "SFE" : "IFE"), + event_info->hw_idx, event_info->res_id, + bufdone_evt_info->last_consumed_addr, ife_hw_mgr_ctx->ctx_index, + CAM_BOOL_TO_YESNO(bufdone_evt_info->is_hw_ctxt_comp), + CAM_BOOL_TO_YESNO(bufdone_evt_info->is_early_done)); + + if (unlikely(debug_cfg->perf_cnt_res_id == event_info->res_id)) + cam_ife_mgr_read_perf_cnts(ife_hw_mgr_ctx); + + /* Check scratch for sHDR/FS use-cases */ + if (ife_hw_mgr_ctx->flags.is_sfe_fs || ife_hw_mgr_ctx->flags.is_sfe_shdr) { + rc = cam_ife_hw_mgr_check_for_scratch_buf_done(ife_hw_mgr_ctx, + event_info->hw_type, event_info->res_id, + bufdone_evt_info->last_consumed_addr); + if (rc) + return 0; + } + + buf_done_event_data.hw_type = event_info->hw_type; + buf_done_event_data.resource_handle = event_info->res_id; + buf_done_event_data.last_consumed_addr = bufdone_evt_info->last_consumed_addr; + buf_done_event_data.comp_group_id = bufdone_evt_info->comp_grp_id; + buf_done_event_data.is_early_done = bufdone_evt_info->is_early_done; + + if (atomic_read(&ife_hw_mgr_ctx->overflow_pending)) + return 0; + + if (buf_done_event_data.resource_handle > 0 && ife_hwr_irq_wm_done_cb) { + CAM_DBG(CAM_ISP, + "Notify ISP context for %u handles in ctx: %u", + buf_done_event_data.resource_handle, ife_hw_mgr_ctx->ctx_index); + ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_DONE, (void *)&buf_done_event_data); + } + + return 0; +} + +static int cam_ife_hw_mgr_handle_ife_event( + struct cam_ife_hw_mgr_ctx *ctx, + uint32_t evt_id, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Handle IFE[%u] %s event in ctx: %u", + event_info->hw_idx, + cam_isp_hw_evt_type_to_string(evt_id), + ctx->ctx_index); + + switch (evt_id) { + case CAM_ISP_HW_EVENT_SOF: + rc = cam_ife_hw_mgr_handle_hw_sof(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_REG_UPDATE: + rc = cam_ife_hw_mgr_handle_hw_rup(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_EPOCH: + rc = cam_ife_hw_mgr_handle_hw_epoch(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_EOF: + rc = cam_ife_hw_mgr_handle_hw_eof(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_DONE: + rc = cam_ife_hw_mgr_handle_hw_buf_done(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_ERROR: + rc = cam_ife_hw_mgr_handle_hw_err(ctx, event_info); + break; + + default: + CAM_ERR(CAM_ISP, "Event: %u not handled for IFE, ctx_idx: %u", + evt_id, ctx->ctx_index); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_handle_csid_event( + struct cam_ife_hw_mgr_ctx *ctx, + uint32_t evt_id, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Handle CSID[%u] %s event in ctx: %u", + event_info->hw_idx, + cam_isp_hw_evt_type_to_string(evt_id), + ctx->ctx_index); + + switch (evt_id) { + case CAM_ISP_HW_EVENT_REG_UPDATE: + rc = cam_ife_hw_mgr_handle_csid_rup(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_ERROR: + rc = cam_ife_hw_mgr_handle_csid_error(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_SOF: + rc = cam_ife_hw_mgr_handle_csid_camif_sof(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_EPOCH: + rc = cam_ife_hw_mgr_handle_csid_camif_epoch(ctx, event_info); + break; + case CAM_ISP_HW_EVENT_EOF: + rc = cam_ife_hw_mgr_handle_csid_eof(ctx, event_info); + break; + default: + CAM_ERR(CAM_ISP, "Event: %u not handled for CSID, ctx_idx: %u", + evt_id, ctx->ctx_index); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_handle_sfe_event( + struct cam_ife_hw_mgr_ctx *ctx, + uint32_t evt_id, + struct cam_isp_hw_event_info *event_info) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Handle SFE[%u] %s event in ctx: %u", + event_info->hw_idx, + cam_isp_hw_evt_type_to_string(evt_id), + ctx->ctx_index); + + switch (evt_id) { + case CAM_ISP_HW_EVENT_ERROR: + rc = cam_ife_hw_mgr_handle_sfe_hw_err(ctx, event_info); + break; + + case CAM_ISP_HW_EVENT_DONE: + rc = cam_ife_hw_mgr_handle_hw_buf_done(ctx, event_info); + break; + + default: + CAM_WARN(CAM_ISP, "Event: %u not handled for SFE, ctx_idx: %u", + evt_id, ctx->ctx_index); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_ife_hw_mgr_event_handler( + void *priv, + uint32_t evt_id, + void *evt_info) +{ + int rc = -EINVAL; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_hw_event_info *event_info; + + if (!evt_info || !priv) { + CAM_ERR(CAM_ISP, + "Invalid data evt_info: %pK priv: %pK", + evt_info, priv); + return rc; + } + + ctx = (struct cam_ife_hw_mgr_ctx *)priv; + event_info = (struct cam_isp_hw_event_info *)evt_info; + + switch (event_info->hw_type) { + case CAM_ISP_HW_TYPE_CSID: + rc = cam_ife_hw_mgr_handle_csid_event(ctx, evt_id, event_info); + break; + + case CAM_ISP_HW_TYPE_SFE: + rc = cam_ife_hw_mgr_handle_sfe_event(ctx, evt_id, event_info); + break; + + case CAM_ISP_HW_TYPE_VFE: + rc = cam_ife_hw_mgr_handle_ife_event(ctx, evt_id, event_info); + break; + + default: + break; + } + + if (rc) + CAM_ERR(CAM_ISP, "Failed to handle %s [%u] event from hw %u in ctx %u rc %d", + cam_isp_hw_evt_type_to_string(evt_id), + evt_id, event_info->hw_type, ctx->ctx_index, rc); + + return rc; +} + +static int cam_ife_hw_mgr_sort_dev_with_caps( + struct cam_ife_hw_mgr *ife_hw_mgr) +{ + int i; + + /* get caps for csid devices */ + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (!ife_hw_mgr->csid_devices[i]) + continue; + + if (!ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) + continue; + + ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps( + ife_hw_mgr->csid_devices[i]->hw_priv, + &ife_hw_mgr->csid_hw_caps[i], + sizeof(ife_hw_mgr->csid_hw_caps[i])); + + ife_hw_mgr->csid_global_reset_en = + ife_hw_mgr->csid_hw_caps[i].global_reset_en; + ife_hw_mgr->csid_aup_rup_en = + ife_hw_mgr->csid_hw_caps[i].aup_rup_en; + ife_hw_mgr->csid_camif_irq_support = + ife_hw_mgr->csid_hw_caps[i].camif_irq_support; + } + + /* get caps for ife devices */ + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (!ife_hw_mgr->ife_devices[i]) + continue; + if (ife_hw_mgr->ife_devices[i]->hw_intf->hw_ops.get_hw_caps) { + ife_hw_mgr->ife_devices[i]->hw_intf->hw_ops.get_hw_caps( + ife_hw_mgr->ife_devices[i]->hw_intf->hw_priv, + &ife_hw_mgr->ife_dev_caps[i], + sizeof(ife_hw_mgr->ife_dev_caps[i])); + } + } + + return 0; +} + +static int cam_ife_set_sfe_cache_debug(void *data, u64 val) +{ + int i, rc = -EINVAL; + uint32_t hw_idx = 0; + struct cam_sfe_debug_cfg_params debug_cfg; + struct cam_hw_intf *hw_intf = NULL; + + debug_cfg.cache_config = true; + + /* BITS [0:3] is for hw_idx */ + hw_idx = val & 0xF; + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if ((g_ife_hw_mgr.sfe_devices[i]) && (i == hw_idx)) { + hw_intf = g_ife_hw_mgr.sfe_devices[i]->hw_intf; + + debug_cfg.u.cache_cfg.sfe_cache_dbg = (val >> 4); + g_ife_hw_mgr.debug_cfg.sfe_cache_debug[i] = + debug_cfg.u.cache_cfg.sfe_cache_dbg; + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG, + &debug_cfg, + sizeof(struct cam_sfe_debug_cfg_params)); + } + } + + CAM_DBG(CAM_ISP, "Set SFE cache debug value: 0x%llx", val); + return rc; +} + +static int cam_ife_get_sfe_cache_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.sfe_cache_debug[CAM_SFE_CORE_1]; + *val = *val << 32; + *val |= g_ife_hw_mgr.debug_cfg.sfe_cache_debug[CAM_SFE_CORE_0]; + CAM_DBG(CAM_ISP, "Get SFE cace debug value: 0x%llx", *val); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_ife_sfe_cache_debug, + cam_ife_get_sfe_cache_debug, + cam_ife_set_sfe_cache_debug, "%16llu"); + +static int cam_ife_set_csid_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.csid_debug = val; + CAM_INFO(CAM_ISP, "Set CSID Debug value :%lld", val); + return 0; +} + +static int cam_ife_get_csid_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.csid_debug; + CAM_INFO(CAM_ISP, "Get CSID Debug value :%lld", + g_ife_hw_mgr.debug_cfg.csid_debug); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug, + cam_ife_get_csid_debug, + cam_ife_set_csid_debug, "%16llu"); + +static int cam_ife_set_camif_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.camif_debug = val; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", val); + return 0; +} + +static int cam_ife_get_camif_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.camif_debug; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", + g_ife_hw_mgr.debug_cfg.csid_debug); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug, + cam_ife_get_camif_debug, + cam_ife_set_camif_debug, "%16llu"); + +static int cam_ife_set_sfe_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.sfe_debug = (uint32_t)val; + CAM_DBG(CAM_ISP, "Set SFE Debug value :%u", + g_ife_hw_mgr.debug_cfg.sfe_debug); + return 0; +} + +static int cam_ife_get_sfe_debug(void *data, u64 *val) +{ + *val = (uint64_t)g_ife_hw_mgr.debug_cfg.sfe_debug; + CAM_DBG(CAM_ISP, "Get SFE Debug value :%u", + g_ife_hw_mgr.debug_cfg.sfe_debug); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_ife_sfe_debug, + cam_ife_get_sfe_debug, + cam_ife_set_sfe_debug, "%16llu"); + +static int cam_ife_set_sfe_sensor_diag_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.sfe_sensor_diag_cfg = (uint32_t)val; + CAM_DBG(CAM_ISP, "Set SFE Sensor diag value :%u", + g_ife_hw_mgr.debug_cfg.sfe_sensor_diag_cfg); + return 0; +} + +static int cam_ife_get_sfe_sensor_diag_debug(void *data, u64 *val) +{ + *val = (uint64_t)g_ife_hw_mgr.debug_cfg.sfe_sensor_diag_cfg; + CAM_DBG(CAM_ISP, "Get SFE Sensor diag value :%u", + g_ife_hw_mgr.debug_cfg.sfe_sensor_diag_cfg); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_ife_sfe_sensor_diag_debug, + cam_ife_get_sfe_sensor_diag_debug, + cam_ife_set_sfe_sensor_diag_debug, "%16llu"); + +static int cam_ife_set_csid_rx_pkt_capture_debug(void *data, u64 val) +{ + if (val >= 0xFFFFF) { + g_ife_hw_mgr.debug_cfg.rx_capture_debug_set = false; + g_ife_hw_mgr.debug_cfg.rx_capture_debug = 0; + } else { + g_ife_hw_mgr.debug_cfg.rx_capture_debug_set = true; + g_ife_hw_mgr.debug_cfg.rx_capture_debug = val; + } + + CAM_DBG(CAM_ISP, "Set CSID RX capture Debug value :%lld", val); + return 0; +} + +static int cam_ife_get_csid_rx_pkt_capture_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.rx_capture_debug; + CAM_DBG(CAM_ISP, "Get CSID RX capture Debug value :%lld", + g_ife_hw_mgr.debug_cfg.rx_capture_debug); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_ife_csid_rx_capture_debug, + cam_ife_get_csid_rx_pkt_capture_debug, + cam_ife_set_csid_rx_pkt_capture_debug, "%16llu"); + +static int cam_ife_set_force_acq_csid(void *data, u64 val) +{ + if (val > CAM_IFE_CSID_HW_NUM_MAX) { + CAM_WARN(CAM_ISP, "Invalid force_acq_csid value :%lld", val); + g_ife_hw_mgr.debug_cfg.force_acq_csid = CAM_IFE_CSID_HW_NUM_MAX; + return 0; + } + + g_ife_hw_mgr.debug_cfg.force_acq_csid = (uint32_t)val; + CAM_INFO(CAM_ISP, "Set force_acq_csid value :%lld", val); + return 0; +} + +static int cam_ife_get_force_acq_csid(void *data, u64 *val) +{ + *val = (uint64_t)g_ife_hw_mgr.debug_cfg.force_acq_csid; + CAM_INFO(CAM_ISP, + "Set camif force_acq_csid value :%lld", + g_ife_hw_mgr.debug_cfg.force_acq_csid); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_ife_force_acq_csid, + cam_ife_get_force_acq_csid, + cam_ife_set_force_acq_csid, "%16llu"); + +#ifdef CONFIG_CAM_TEST_IRQ_LINE +static int __cam_ife_mgr_test_irq_line(struct cam_hw_intf *hw_intf, int *n_intf, int *n_success, + const char *hw_name, int idx) +{ + int rc = -EINVAL; + + if (!hw_intf) { + CAM_ERR(CAM_ISP, "%s:%d hw-intf is null", hw_name, idx); + return -EINVAL; + } + + (*n_intf)++; + + if (hw_intf->hw_ops.test_irq_line) + rc = hw_intf->hw_ops.test_irq_line(hw_intf->hw_priv); + + if (!rc) { + (*n_success)++; + CAM_INFO(CAM_ISP, "%s:%u IRQ line verified successfully", hw_name, hw_intf->hw_idx); + } else { + CAM_ERR(CAM_ISP, "%s:%u failed to verify IRQ line", hw_name, hw_intf->hw_idx); + } + + return rc; +} + +static int cam_ife_mgr_test_irq_lines(struct cam_ife_hw_mgr *hw_mgr) +{ + int i, rc, n_intf = 0, n_success = 0; + + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (hw_mgr->csid_devices[i]) { + rc = __cam_ife_mgr_test_irq_line(hw_mgr->csid_devices[i], &n_intf, + &n_success, "CSID", i); + } + } + + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (hw_mgr->ife_devices[i] && hw_mgr->ife_devices[i]->hw_intf) { + rc = __cam_ife_mgr_test_irq_line(hw_mgr->ife_devices[i]->hw_intf, &n_intf, + &n_success, "IFE", i); + } + } + + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if (hw_mgr->sfe_devices[i] && hw_mgr->sfe_devices[i]->hw_intf) { + rc = __cam_ife_mgr_test_irq_line(hw_mgr->sfe_devices[i]->hw_intf, &n_intf, + &n_success, "SFE", i); + } + } + + if (n_intf) { + if (n_intf == n_success) + CAM_INFO(CAM_ISP, "verified IRQ lines for all %d hw-intf", n_intf); + else + CAM_ERR(CAM_ISP, "verified %d/%d IRQ lines", n_success, n_intf); + } else { + CAM_ERR(CAM_ISP, "no valid hw-intf to test IRQ lines"); + } + + return 0; +} +#else +static int cam_ife_mgr_test_irq_lines(struct cam_ife_hw_mgr *hw_mgr) +{ + return 0; +} +#endif + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) +static int cam_ife_mgr_test_irq_lines_at_probe(struct cam_ife_hw_mgr *hw_mgr) +{ + return cam_ife_mgr_test_irq_lines(hw_mgr); +} +#else +static int cam_ife_mgr_test_irq_lines_at_probe(struct cam_ife_hw_mgr *hw_mgr) +{ + return 0; +} +#endif + +static int cam_isp_set_test_irq_line(void *data, u64 val) +{ + cam_ife_mgr_test_irq_lines(&g_ife_hw_mgr); + return 0; +} + +static int cam_isp_get_test_irq_line(void *data, u64 *val) +{ + *val = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_isp_test_irq_line, cam_isp_get_test_irq_line, + cam_isp_set_test_irq_line, "%16llu"); + +static ssize_t cam_ife_hw_mgr_perfcnt_bus_wr_write( + struct file *file, const char __user *ubuf, + size_t size, loff_t *loff_t) +{ + char *delimiter1, *delimiter2, *delimiter3; + char input_buf[64] = {'\0'}; + uint32_t counter_idx = 0, counter_val = 0, res_id = 0; + struct cam_ife_hw_mgr_debug *debug_cfg = &g_ife_hw_mgr.debug_cfg; + + if (size >= 64) + return -EINVAL; + + if (copy_from_user(input_buf, ubuf, size)) + return -EFAULT; + + input_buf[size] = '\0'; + + if ((!g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters) && + (!g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters)) + return -EBADF; + + delimiter1 = strnchr(input_buf, size, '_'); + if (!delimiter1) + goto end; + + delimiter2 = strnchr(delimiter1 + 1, size, '_'); + if (!delimiter2) + goto end; + + delimiter3 = strnchr(delimiter2 + 1, size, '_'); + if (!delimiter3) + goto end; + + /* separate the strings */ + *delimiter1 = '\0'; + *delimiter2 = '\0'; + *delimiter3 = '\0'; + + /* Find the counter index after the first delimiter */ + if (kstrtou32(delimiter1 + 1, 0, &counter_idx)) + goto end; + + /* Find the counter value after the second delimiter */ + if (kstrtou32(delimiter2 + 1, 0, &counter_val)) + goto end; + + if (kstrtou32(delimiter3 + 1, 0, &res_id)) + goto end; + + /* Check for supported HWs */ + if (strcmp(input_buf, "ife") == 0) { + /* check if counter is available for given target */ + if ((counter_idx) && (counter_idx <= + g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters)) { + debug_cfg->ife_bus_wr_perf_counter_val[counter_idx - 1] = + counter_val; + debug_cfg->perf_cnt_res_id = res_id; + } else + goto end; + } else if (strcmp(input_buf, "sfe") == 0) { + if ((counter_idx) && (counter_idx <= + g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters)) { + debug_cfg->sfe_bus_wr_perf_counter_val[counter_idx - 1] = + counter_val; + debug_cfg->perf_cnt_res_id = res_id; + } else + goto end; + } else { + goto end; + } + + return size; + +end: + CAM_INFO(CAM_ISP, + "Failed to set perf counter debug setting - invalid input format [input: %s counter: %u counter_val: %u]", + input_buf, counter_idx, counter_val); + return -EINVAL; +} + +static ssize_t cam_ife_hw_mgr_perfcnt_bus_wr_read( + struct file *file, char __user *ubuf, + size_t size, loff_t *loff_t) +{ + char display_string[256]; + int len = 0; + + len += scnprintf(display_string + len, (256 - len), + "\n***** ISP BUS WRITE PERF COUNTERS *****\n\n"); + + if ((!g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters) && + (!g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters)) { + len += scnprintf(display_string + len, (256 - len), "NOT SUPPORTED\n\n"); + } else { + len += scnprintf(display_string + len, (256 - len), + "Available BUS write counters IFE: %u SFE: %u\n\n", + g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters, + g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters); + len += scnprintf(display_string + len, (256 - len), + "To choose counter write to same file - \"__\"\nEx. \"ife_1_6619140\"\n\n"); + } + + scnprintf(display_string + len, (256 - len), + "*****************************\n"); + + return simple_read_from_buffer(ubuf, size, loff_t, display_string, + strlen(display_string)); +} + +static ssize_t cam_ife_hw_mgr_perfcnt_write( + struct file *file, const char __user *ubuf, + size_t size, loff_t *loff_t) +{ + char *delimiter1, *delimiter2; + char input_buf[16] = {'\0'}; + uint32_t counter_idx = 0, counter_val = 0; + struct cam_ife_hw_mgr_debug *debug_cfg = &g_ife_hw_mgr.debug_cfg; + + if (size >= 16) + return -EINVAL; + + if (copy_from_user(input_buf, ubuf, size)) + return -EFAULT; + + input_buf[size] = '\0'; + + if ((!g_ife_hw_mgr.isp_caps.num_ife_perf_counters) && + (!g_ife_hw_mgr.isp_caps.num_sfe_perf_counters)) + return -EBADF; + + delimiter1 = strnchr(input_buf, size, '_'); + if (!delimiter1) + goto end; + + delimiter2 = strnchr(delimiter1 + 1, size, '_'); + if (!delimiter2) + goto end; + + /* separate the strings */ + *delimiter1 = '\0'; + *delimiter2 = '\0'; + + /* Find the counter index after the first delimiter */ + if (kstrtou32(delimiter1 + 1, 0, &counter_idx)) + goto end; + + /* Find the counter value after the second delimiter */ + if (kstrtou32(delimiter2 + 1, 0, &counter_val)) + goto end; + + /* Check for supported HWs */ + if (strcmp(input_buf, "ife") == 0) { + /* check if counter is available for given target */ + if ((counter_idx) && (counter_idx <= g_ife_hw_mgr.isp_caps.num_ife_perf_counters)) + debug_cfg->ife_perf_counter_val[counter_idx - 1] = + counter_val; + else + goto end; + } else if (strcmp(input_buf, "sfe") == 0) { + if ((counter_idx) && (counter_idx <= g_ife_hw_mgr.isp_caps.num_sfe_perf_counters)) + debug_cfg->sfe_perf_counter_val[counter_idx - 1] = + counter_val; + else + goto end; + } else { + goto end; + } + + return size; + +end: + CAM_INFO(CAM_ISP, + "Failed to set perf counter debug setting - invalid input format [input: %s counter: %u counter_val: %u]", + input_buf, counter_idx, counter_val); + return -EINVAL; +} + +static ssize_t cam_ife_hw_mgr_perfcnt_read( + struct file *file, char __user *ubuf, + size_t size, loff_t *loff_t) +{ + char display_string[256]; + int len = 0; + + len += scnprintf(display_string + len, (256 - len), + "\n***** ISP PERF COUNTERS *****\n\n"); + + if ((!g_ife_hw_mgr.isp_caps.num_ife_perf_counters) && + (!g_ife_hw_mgr.isp_caps.num_sfe_perf_counters)) { + len += scnprintf(display_string + len, (256 - len), "NOT SUPPORTED\n\n"); + } else { + len += scnprintf(display_string + len, (256 - len), + "Available counters IFE: %u SFE: %u\n\n", + g_ife_hw_mgr.isp_caps.num_ife_perf_counters, + g_ife_hw_mgr.isp_caps.num_sfe_perf_counters); + len += scnprintf(display_string + len, (256 - len), + "To choose counter write to same file - \"__\"\nEx. \"ife_1_6619140\"\n\n"); + } + + scnprintf(display_string + len, (256 - len), + "*****************************\n"); + + return simple_read_from_buffer(ubuf, size, loff_t, display_string, + strlen(display_string)); +} + +static const struct file_operations cam_ife_hw_mgr_perfcnter_debug = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_ife_hw_mgr_perfcnt_read, + .write = cam_ife_hw_mgr_perfcnt_write, +}; + +static const struct file_operations cam_ife_hw_mgr_bus_wr_perfcnter_debug = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_ife_hw_mgr_perfcnt_bus_wr_read, + .write = cam_ife_hw_mgr_perfcnt_bus_wr_write, +}; + +static int cam_ife_set_csid_testbus_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.csid_test_bus = val; + CAM_DBG(CAM_ISP, "Set CSID test bus value :%lld", val); + return 0; +} + +static int cam_ife_get_csid_testbus_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.csid_test_bus; + CAM_DBG(CAM_ISP, "Get CSID test bus value :%u", + g_ife_hw_mgr.debug_cfg.csid_test_bus); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_ife_csid_testbus_debug, + cam_ife_get_csid_testbus_debug, + cam_ife_set_csid_testbus_debug, "%16llu"); + +static int cam_ife_hw_mgr_dump_irq_desc(struct cam_ife_hw_mgr *hw_mgr, + struct cam_isp_irq_inject_param *param) +{ + int rc = 0; + + switch (param->hw_type) { + case CAM_ISP_HW_TYPE_CSID: + rc = cam_ife_hw_mgr_csid_irq_inject_or_dump_desc( + hw_mgr, param, true); + break; + case CAM_ISP_HW_TYPE_VFE: + rc = cam_ife_hw_mgr_vfe_irq_inject_or_dump_desc( + hw_mgr, param, true); + break; + case CAM_ISP_HW_TYPE_SFE: + rc = cam_ife_hw_mgr_sfe_irq_inject_or_dump_desc( + hw_mgr, param, true); + break; + default: + if (irq_inject_display_buf) + strlcat(irq_inject_display_buf, + "No matched HW_TYPE\n", IRQ_INJECT_DISPLAY_BUF_LEN); + rc = -EINVAL; + return rc; + } + + return rc; +} + +static void cam_ife_hw_mgr_dump_active_hw(char *buffer, int *offset) +{ + uint32_t i; + struct cam_ife_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp; + struct cam_ife_hw_mgr_ctx *ctx_temp; + + mutex_lock(&g_ife_hw_mgr.ctx_mutex); + if (list_empty(&g_ife_hw_mgr.used_ctx_list)) { + *offset += scnprintf(buffer + *offset, LINE_BUFFER_LEN - *offset, + "Currently no ctx in use\n"); + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); + return; + } + + list_for_each_entry_safe(ctx, ctx_temp, + &g_ife_hw_mgr.used_ctx_list, list) { + + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ctx->res_list_ife_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + *offset += scnprintf(buffer + *offset, + LINE_BUFFER_LEN - *offset, + "hw_type:CSID hw_idx:%d ctx id:%u res: %s\n", + hw_mgr_res->hw_res[i]->hw_intf->hw_idx, ctx->ctx_index, + hw_mgr_res->hw_res[i]->res_name); + } + } + + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + *offset += scnprintf(buffer + *offset, + LINE_BUFFER_LEN - *offset, + "hw_type:IFE hw_idx:%d ctx id:%u res: %s\n", + hw_mgr_res->hw_res[i]->hw_intf->hw_idx, ctx->ctx_index, + hw_mgr_res->hw_res[i]->res_name); + } + } + + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &ctx->res_list_sfe_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + *offset += scnprintf(buffer + *offset, + LINE_BUFFER_LEN - *offset, + "hw_type:SFE hw_idx:%d ctx id:%u res: %s\n", + hw_mgr_res->hw_res[i]->hw_intf->hw_idx, ctx->ctx_index, + hw_mgr_res->hw_res[i]->res_name); + } + } + } + mutex_unlock(&g_ife_hw_mgr.ctx_mutex); +} + +static inline char *__cam_isp_irq_inject_reg_unit_to_name(int32_t reg_unit) +{ + switch (reg_unit) { + case CAM_ISP_CSID_TOP_REG: + return "CAM_ISP_CSID_TOP_REG"; + case CAM_ISP_CSID_RX_REG: + return "CAM_ISP_CSID_RX_REG"; + case CAM_ISP_CSID_PATH_IPP_REG: + return "CAM_ISP_CSID_PATH_IPP_REG"; + case CAM_ISP_CSID_PATH_PPP_REG: + return "CAM_ISP_CSID_PATH_PPP_REG"; + case CAM_ISP_CSID_PATH_RDI0_REG: + return "CAM_ISP_CSID_PATH_RDI0_REG"; + case CAM_ISP_CSID_PATH_RDI1_REG: + return "CAM_ISP_CSID_PATH_RDI1_REG"; + case CAM_ISP_CSID_PATH_RDI2_REG: + return "CAM_ISP_CSID_PATH_RDI2_REG"; + case CAM_ISP_CSID_PATH_RDI3_REG: + return "CAM_ISP_CSID_PATH_RDI3_REG"; + case CAM_ISP_CSID_PATH_RDI4_REG: + return "CAM_ISP_CSID_PATH_RDI4_REG"; + case CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG: + return "CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG"; + case CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG: + return "CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG"; + case CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG: + return "CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG"; + case CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG: + return "CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG"; + default: + return "Invalid reg_unit"; + } +} + +static inline char *__cam_isp_irq_inject_hw_type_to_name(int32_t hw_type) +{ + switch (hw_type) { + case CAM_ISP_HW_TYPE_CSID: + return "CSID"; + case CAM_ISP_HW_TYPE_VFE: + return "VFE"; + case CAM_ISP_HW_TYPE_SFE: + return "SFE"; + default: + return "Invalid hw_type"; + } +} + +static inline int cam_isp_irq_inject_get_hw_type( + int32_t *hw_type, char *token) +{ + if (strcmp(token, "CSID") == 0) + *hw_type = CAM_ISP_HW_TYPE_CSID; + else if (strcmp(token, "VFE") == 0) + *hw_type = CAM_ISP_HW_TYPE_VFE; + else if (strcmp(token, "SFE") == 0) + *hw_type = CAM_ISP_HW_TYPE_SFE; + else + return -EINVAL; + + return 0; +} + +static int cam_isp_irq_inject_parse_common_params( + struct cam_isp_irq_inject_param *irq_inject_param, + uint32_t param_index, char *token, bool *is_query) +{ + int i, rc = 0, offset = 0; + char *line_buf = NULL; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + switch (param_index) { + case HW_TYPE: + if (strnstr(token, "?", 1)) { + *is_query = true; + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Interruptable HW : CSID | IFE | SFE\n"); + break; + } + rc = cam_isp_irq_inject_get_hw_type(&irq_inject_param->hw_type, token); + if (rc) { + irq_inject_param->hw_type = -1; + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid camera hardware [ %s ]\n", token); + } + break; + case HW_IDX: + if (strnstr(token, "?", 1)) { + *is_query = true; + if (irq_inject_param->hw_type == -1) { + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "HW_IDX : Enter hw_type first\n"); + break; + } + switch (irq_inject_param->hw_type) { + case CAM_ISP_HW_TYPE_CSID: + for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + if (!g_ife_hw_mgr.csid_devices[i]) + break; + } + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "Max index of CSID : %d\n", i - 1); + break; + case CAM_ISP_HW_TYPE_VFE: + for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (!g_ife_hw_mgr.ife_devices[i]) + break; + } + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "Max index of VFE : %d\n", i - 1); + break; + case CAM_ISP_HW_TYPE_SFE: + for (i = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + if (!g_ife_hw_mgr.sfe_devices[i]) + break; + } + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "Max index of SFE : %d\n", i - 1); + break; + default: + break; + } + } else if (kstrtou32(token, 0, &irq_inject_param->hw_idx)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid hw index %s\n", token); + rc = -EINVAL; + } + break; + case REG_UNIT: + if (strnstr(token, "?", 1)) { + *is_query = true; + if (irq_inject_param->hw_type == -1) { + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "REG_UNIT : Enter hw_type first\n"); + break; + } + + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Printing available res for hw_type: %s\n", + __cam_isp_irq_inject_hw_type_to_name( + irq_inject_param->hw_type)); + for (i = 0; i < CAM_ISP_REG_UNIT_MAX; i++) { + if ((irq_inject_param->hw_type == CAM_ISP_HW_TYPE_CSID) && + i > CAM_ISP_CSID_PATH_RDI4_REG) + continue; + else if ((irq_inject_param->hw_type == CAM_ISP_HW_TYPE_VFE) && + ((i < CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG) || + (i > CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG))) + continue; + else if ((irq_inject_param->hw_type == CAM_ISP_HW_TYPE_SFE) && + ((i < CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG) || + (i > CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG))) + continue; + + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, "%d : %s\n", i, + __cam_isp_irq_inject_reg_unit_to_name(i)); + } + + } else if (kstrtou32(token, 0, &irq_inject_param->reg_unit)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid register %s\n", token); + rc = -EINVAL; + } + break; + case IRQ_MASK: + if (strnstr(token, "?", 1)) { + *is_query = true; + if ((irq_inject_param->hw_type == -1) || + (irq_inject_param->reg_unit == -1)) { + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "IRQ_MASK : Enter hw_type and reg_unit first\n"); + break; + } + if (cam_ife_hw_mgr_dump_irq_desc(&g_ife_hw_mgr, + irq_inject_param)) { + offset += scnprintf(line_buf + offset, + LINE_BUFFER_LEN - offset, + "Dump irq description failed\n"); + rc = -EINVAL; + } + } else if (kstrtou32(token, 0, &irq_inject_param->irq_mask)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid irq mask %s\n", token); + rc = -EINVAL; + } + break; + case INJECT_REQ: + if (strnstr(token, "now", 3)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Trigger IRQ now\n"); + irq_inject_param->req_id = 0xFFFFFFFF; + } else if (kstrtou64(token, 0, &irq_inject_param->req_id)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid request id %s\n", token); + rc = -EINVAL; + } + break; + default: + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Invalid extra parameter: %s\n", token); + rc = -EINVAL; + } + + if ((offset <= LINE_BUFFER_LEN) && irq_inject_display_buf) + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + + CAM_MEM_FREE(line_buf); + return rc; +} + +static int cam_isp_irq_inject_command_parser( + struct cam_isp_irq_inject_param *irq_inject_param, + char **msg, uint32_t max_params, + cam_isp_irq_inject_cmd_parse_handler cmd_parse_cb, + bool *is_query) +{ + char *token = NULL; + char *line_buf = NULL; + int rc, param_index = 0, offset = 0; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + token = strsep(msg, ":"); + while (token != NULL) { + rc = cmd_parse_cb(irq_inject_param, param_index, token, is_query); + if (rc) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Parsed Command failed rc: %d\n", rc); + goto end; + } + + param_index++; + if (param_index == max_params) + break; + token = strsep(msg, ":"); + } + + if ((param_index < max_params) && !(*is_query)) { + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Insufficient parameters passed for total parameters: %u\n", + param_index); + rc = -EINVAL; + goto end; + } + + if ((offset <= LINE_BUFFER_LEN) && irq_inject_display_buf) + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + + rc = param_index; +end: + CAM_MEM_FREE(line_buf); + return rc; +} + +static ssize_t cam_isp_irq_injection_read(struct file *file, + char __user *ubuf, + size_t size, loff_t *ppos) +{ + int i, offset = 0; + int count = 0; + uint32_t hw_type = 0; + char *line_buf = NULL; + + if (!irq_inject_display_buf) + return -EINVAL; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + if (!(*ppos) && strlen(irq_inject_display_buf)) + goto end; + else if ((*ppos) && (strlen(irq_inject_display_buf) == 0)) { + CAM_MEM_FREE(line_buf); + return 0; + } + + strlcat(irq_inject_display_buf, IRQ_INJECT_USAGE_STRING, IRQ_INJECT_DISPLAY_BUF_LEN); + + for (i = 0; i < MAX_INJECT_SET; i++) { + if (!g_ife_hw_mgr.irq_inject_param[i].is_valid) + continue; + + hw_type = g_ife_hw_mgr.irq_inject_param[i].hw_type; + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "injected param[%d] : hw_type:%s hw_idx:%d reg_unit:%d irq_mask:%#x req_id:%lld\n", + i, __cam_isp_irq_inject_hw_type_to_name(hw_type), + g_ife_hw_mgr.irq_inject_param[i].hw_idx, + g_ife_hw_mgr.irq_inject_param[i].reg_unit, + g_ife_hw_mgr.irq_inject_param[i].irq_mask, + g_ife_hw_mgr.irq_inject_param[i].req_id); + } + + cam_ife_hw_mgr_dump_active_hw(line_buf, &offset); + + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + +end: + if (clear_user(ubuf, size)) { + CAM_MEM_FREE(line_buf); + return -EIO; + } + count = simple_read_from_buffer(ubuf, size, ppos, irq_inject_display_buf, + strlen(irq_inject_display_buf)); + + memset(irq_inject_display_buf, '\0', IRQ_INJECT_DISPLAY_BUF_LEN); + CAM_MEM_FREE(line_buf); + return count; +} + +static ssize_t cam_isp_irq_injection_write(struct file *file, + const char __user *ubuf, size_t size, loff_t *ppos) +{ + bool is_query = false; + int i, rc = 0; + int offset = 0; + uint32_t hw_type = 0; + char *msg = NULL; + char *line_buf = NULL; + char input_buf[LINE_BUFFER_LEN] = {'\0'}; + + line_buf = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_LEN, GFP_KERNEL); + if (!line_buf) + return -ENOMEM; + + if (irq_inject_display_buf) + memset(irq_inject_display_buf, '\0', IRQ_INJECT_DISPLAY_BUF_LEN); + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + rc = -EFAULT; + goto end; + } + + msg = input_buf; + + for (i = 0; i < MAX_INJECT_SET; i++) { + if (g_ife_hw_mgr.irq_inject_param[i].is_valid) + continue; + + rc = cam_isp_irq_inject_command_parser( + &g_ife_hw_mgr.irq_inject_param[i], &msg, + MAX_PARAMS_FOR_IRQ_INJECT, + cam_isp_irq_inject_parse_common_params, &is_query); + if ((rc != MAX_PARAMS_FOR_IRQ_INJECT) || is_query) { + cam_isp_irq_inject_clear_params(&g_ife_hw_mgr.irq_inject_param[i]); + if (!is_query) + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Parsed Command failed, param_index = %d\n", rc); + } else { + g_ife_hw_mgr.irq_inject_param[i].is_valid = true; + hw_type = g_ife_hw_mgr.irq_inject_param[i].hw_type; + offset += scnprintf(line_buf + offset, LINE_BUFFER_LEN - offset, + "Setting param[%d] : hw_type:%s hw_idx:%d reg_unit:%d irq_mask:%#x req_id:%lld\n", + i, __cam_isp_irq_inject_hw_type_to_name(hw_type), + g_ife_hw_mgr.irq_inject_param[i].hw_idx, + g_ife_hw_mgr.irq_inject_param[i].reg_unit, + g_ife_hw_mgr.irq_inject_param[i].irq_mask, + g_ife_hw_mgr.irq_inject_param[i].req_id); + } + break; + } + + if ((offset <= LINE_BUFFER_LEN) && irq_inject_display_buf) + strlcat(irq_inject_display_buf, line_buf, IRQ_INJECT_DISPLAY_BUF_LEN); + + rc = size; +end: + CAM_MEM_FREE(line_buf); + return rc; +} + +static const struct file_operations cam_isp_irq_injection = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_isp_irq_injection_read, + .write = cam_isp_irq_injection_write, +}; + +static int cam_ife_hw_mgr_debug_register(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + irq_inject_display_buf = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("ife", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_ISP,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + + irq_inject_display_buf = + CAM_MEM_ZALLOC(sizeof(char) * IRQ_INJECT_DISPLAY_BUF_LEN, GFP_KERNEL); + + /* Store parent inode for cleanup in caller */ + g_ife_hw_mgr.debug_cfg.dentry = dbgfileptr; + + debugfs_create_file("ife_csid_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_csid_debug); + debugfs_create_file("ife_csid_rx_capture_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_csid_rx_capture_debug); + debugfs_create_u32("enable_recovery", 0644, g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_recovery); + debugfs_create_bool("enable_req_dump", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_req_dump); + debugfs_create_u32("enable_csid_recovery", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_csid_recovery); + debugfs_create_file("ife_camif_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_camif_debug); + debugfs_create_bool("per_req_reg_dump", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.per_req_reg_dump); + debugfs_create_bool("disable_ubwc_comp", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.disable_ubwc_comp); + debugfs_create_file("sfe_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_sfe_debug); + debugfs_create_file("sfe_sensor_diag_sel", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_sfe_sensor_diag_debug); + debugfs_create_bool("disable_ife_mmu_prefetch", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.disable_ife_mmu_prefetch); + debugfs_create_bool("enable_ife_frame_irqs", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_ife_frame_irqs); + debugfs_create_file("sfe_cache_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_sfe_cache_debug); + debugfs_create_file("test_irq_line", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_isp_test_irq_line); + debugfs_create_file("isp_top_perf_counters", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_hw_mgr_perfcnter_debug); + debugfs_create_file("isp_bus_wr_perf_counters", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_hw_mgr_bus_wr_perfcnter_debug); + debugfs_create_file("ife_csid_testbus", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_ife_csid_testbus_debug); + debugfs_create_bool("disable_isp_drv", 0644, g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.disable_isp_drv); + debugfs_create_bool("enable_presil_reg_dump", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_presil_reg_dump); + debugfs_create_file("isp_irq_inject", 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, &cam_isp_irq_injection); + debugfs_create_bool("enable_cdm_cmd_check", 0644, g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_cdm_cmd_check); + debugfs_create_bool("enable_csid_set_domain_id", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_csid_set_domain_id); + debugfs_create_u32("csid_domain_id_value", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.csid_domain_id_value); + debugfs_create_u32("csid_out_of_sync_simul", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.csid_out_of_sync_simul); + debugfs_create_bool("per_req_wait_cdm", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.per_req_wait_cdm); + debugfs_create_bool("enable_cdr_sweep_debug", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.enable_cdr_sweep_debug); + debugfs_create_u32("force_acq_csid", 0644, + g_ife_hw_mgr.debug_cfg.dentry, + &g_ife_hw_mgr.debug_cfg.force_acq_csid); + +end: + g_ife_hw_mgr.debug_cfg.enable_csid_recovery = 1; + return rc; +} + +static void cam_req_mgr_process_workq_cam_ife_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static unsigned long cam_ife_hw_mgr_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_ife_hw_mini_dump_data *mgr_md; + struct cam_ife_hw_mini_dump_ctx *ctx_md; + struct cam_ife_hw_mgr_ctx *ctx_temp; + struct cam_ife_hw_mgr_ctx *ctx; + uint32_t j; + uint32_t hw_idx = 0; + struct cam_hw_intf *hw_intf = NULL; + struct cam_ife_hw_mgr *hw_mgr; + struct cam_hw_mini_dump_args hw_dump_args; + unsigned long remain_len = len; + unsigned long dumped_len = 0; + uint32_t i = 0; + int rc = 0; + + if (len < sizeof(*mgr_md)) { + CAM_ERR(CAM_ISP, "Insufficent received length: %u", + len); + return 0; + } + + mgr_md = (struct cam_ife_hw_mini_dump_data *)dst; + mgr_md->num_ctx = 0; + hw_mgr = &g_ife_hw_mgr; + dumped_len += sizeof(*mgr_md); + remain_len -= dumped_len; + + list_for_each_entry_safe(ctx, ctx_temp, + &hw_mgr->used_ctx_list, list) { + + if (remain_len < sizeof(*ctx_md)) { + CAM_ERR(CAM_ISP, + "Insufficent received length: %u, dumped_len %u", + len, dumped_len); + goto end; + } + + ctx_md = (struct cam_ife_hw_mini_dump_ctx *) + ((uint8_t *)dst + dumped_len); + mgr_md->ctx[i] = ctx_md; + ctx_md->ctx_index = ctx->ctx_index; + ctx_md->left_hw_idx = ctx->left_hw_idx; + ctx_md->right_hw_idx = ctx->right_hw_idx; + ctx_md->cdm_handle = ctx->cdm_handle; + ctx_md->num_base = ctx->num_base; + ctx_md->cdm_id = ctx->cdm_id; + ctx_md->last_cdm_done_req = ctx->last_cdm_done_req; + ctx_md->applied_req_id = ctx->applied_req_id; + ctx_md->ctx_type = ctx->ctx_type; + ctx_md->overflow_pending = + atomic_read(&ctx->overflow_pending); + ctx_md->cdm_done = atomic_read(&ctx->cdm_done); + memcpy(&ctx_md->pf_info, &ctx->pf_info, + sizeof(struct cam_ife_hw_mgr_ctx_pf_info)); + memcpy(&ctx_md->flags, &ctx->flags, + sizeof(struct cam_ife_hw_mgr_ctx_flags)); + + dumped_len += sizeof(*ctx_md); + + for (j = 0; j < ctx->num_base; j++) { + memcpy(&ctx_md->base[j], &ctx->base[j], + sizeof(struct cam_isp_ctx_base_info)); + hw_idx = ctx->base[j].idx; + if (ctx->base[j].hw_type == CAM_ISP_HW_TYPE_CSID) { + hw_intf = hw_mgr->csid_devices[hw_idx]; + ctx_md->csid_md[hw_idx] = (void *)((uint8_t *)dst + dumped_len); + memset(&hw_dump_args, 0, sizeof(hw_dump_args)); + hw_dump_args.start_addr = ctx_md->csid_md[hw_idx]; + hw_dump_args.len = remain_len; + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CSID_MINI_DUMP, &hw_dump_args, + sizeof(hw_dump_args)); + if (hw_dump_args.bytes_written == 0) + goto end; + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + } else if (ctx->base[j].hw_type == + CAM_ISP_HW_TYPE_VFE) { + hw_intf = hw_mgr->ife_devices[hw_idx]->hw_intf; + ctx_md->vfe_md[hw_idx] = (void *)((uint8_t *)dst + dumped_len); + memset(&hw_dump_args, 0, sizeof(hw_dump_args)); + hw_dump_args.start_addr = ctx_md->vfe_md[hw_idx]; + hw_dump_args.len = remain_len; + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_IFE_BUS_MINI_DUMP, &hw_dump_args, + sizeof(hw_dump_args)); + if (hw_dump_args.bytes_written == 0) + goto end; + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + } else if (ctx->base[j].hw_type == + CAM_ISP_HW_TYPE_SFE) { + hw_intf = hw_mgr->sfe_devices[hw_idx]->hw_intf; + ctx_md->sfe_md[hw_idx] = (void *)((uint8_t *)dst + dumped_len); + memset(&hw_dump_args, 0, sizeof(hw_dump_args)); + hw_dump_args.start_addr = ctx_md->sfe_md[hw_idx]; + hw_dump_args.len = remain_len; + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_SFE_BUS_MINI_DUMP, &hw_dump_args, + sizeof(hw_dump_args)); + if (hw_dump_args.bytes_written == 0) + goto end; + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + } + } + + if (ctx->common.mini_dump_cb) { + hw_dump_args.start_addr = (void *)((uint8_t *)dst + dumped_len); + hw_dump_args.len = remain_len; + hw_dump_args.bytes_written = 0; + rc = ctx->common.mini_dump_cb(ctx->common.cb_priv, &hw_dump_args); + if (rc || (hw_dump_args.bytes_written + dumped_len > len)) + goto end; + + ctx_md->ctx_priv = hw_dump_args.start_addr; + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + } + + i++; + } +end: + mgr_md->num_ctx = i; + return dumped_len; +} + +static void cam_ife_hw_mgr_attach_sfe_sys_cache_id( + bool shared, uint32_t type, uint32_t *hw_id, uint32_t num_sfe) +{ + int i; + + if (shared) { + for (i = 0; i < num_sfe; i++) { + g_ife_hw_mgr.sfe_cache_info[i].supported_scid_idx |= BIT(type); + CAM_DBG(CAM_ISP, "SFE[%u] shared scid %d type %u", i, + g_ife_hw_mgr.sys_cache_info[type].scid, type); + } + } else { + g_ife_hw_mgr.sfe_cache_info[*hw_id].supported_scid_idx |= BIT(type); + CAM_DBG(CAM_ISP, "SFE[%u] non-shared cache_type %d, type %u", *hw_id, + g_ife_hw_mgr.sys_cache_info[type].scid, type); + *hw_id = (*hw_id + 1) % num_sfe; + } +} + +static int cam_ife_mgr_populate_sys_cache_id(void) +{ + int i, scid, j; + uint32_t hw_id = 0; + uint32_t num_small_scid = 0; + uint32_t num_large_scid = 0; + uint32_t num_sfe = 0; + bool shared; + int rc = 0; + + /* Populate sys cache info */ + g_ife_hw_mgr.num_caches_found = 0; + for (i = 0; i < CAM_SFE_HW_NUM_MAX && g_ife_hw_mgr.sfe_devices[i]; i++) { + g_ife_hw_mgr.sfe_cache_info[i].supported_scid_idx = 0; + num_sfe++; + for (j = 0; j < CAM_ISP_EXPOSURE_MAX; j++) { + g_ife_hw_mgr.sfe_cache_info[i].activated[j] = false; + g_ife_hw_mgr.sfe_cache_info[i].curr_idx[j] = -1; + } + } + + if (!num_sfe) + return rc; + + for (i = CAM_LLCC_SMALL_1; i <= CAM_LLCC_LARGE_4; i++) { + scid = cam_cpas_get_scid(i); + g_ife_hw_mgr.sys_cache_info[i].scid = scid; + g_ife_hw_mgr.sys_cache_info[i].type = i; + if (scid < 0) + continue; + + switch (i) { + case CAM_LLCC_SMALL_1: + case CAM_LLCC_SMALL_2: + num_small_scid++; + break; + case CAM_LLCC_LARGE_1: + case CAM_LLCC_LARGE_2: + case CAM_LLCC_LARGE_3: + case CAM_LLCC_LARGE_4: + num_large_scid++; + break; + default: + break; + } + g_ife_hw_mgr.num_caches_found++; + } + + /* Distribute the smaller cache-ids */ + shared = (bool)(num_small_scid % num_sfe); + for (i = 0; i < num_small_scid; i++) { + if (g_ife_hw_mgr.sys_cache_info[i].scid < 0) + continue; + cam_ife_hw_mgr_attach_sfe_sys_cache_id(shared, + g_ife_hw_mgr.sys_cache_info[i].type, &hw_id, num_sfe); + } + + /* Distribute the large cache-ids */ + shared = (bool)(num_large_scid % num_sfe); + hw_id = 0; + for (i = CAM_LLCC_LARGE_1; i <= CAM_LLCC_LARGE_4; i++) { + if (g_ife_hw_mgr.sys_cache_info[i].scid < 0) + continue; + cam_ife_hw_mgr_attach_sfe_sys_cache_id(shared, + g_ife_hw_mgr.sys_cache_info[i].type, &hw_id, num_sfe); + g_ife_hw_mgr.sys_cache_info[i].llcc_staling_support = false; + rc = cam_cpas_configure_staling_llcc(i, + CAM_LLCC_STALING_MODE_NOTIFY, + CAM_LLCC_NOTIFY_STALING_EVICT, + 1); + if ((num_large_scid == 1) && (num_large_scid < num_sfe) && + (rc == -EOPNOTSUPP)) { + CAM_ERR(CAM_ISP, + "Fatal error llcc staling feature is not supported cache: %d", i); + rc = -EFAULT; + } else if (!rc && num_large_scid > 1) { + CAM_ERR(CAM_ISP, + "Fatal error llcc staling feature is supported more large cache %d", i); + rc = -EFAULT; + } else if (rc == -EOPNOTSUPP) { + CAM_ERR(CAM_ISP, + "llcc staling feature is not supported cache: %d", i); + } else if (rc) { + CAM_ERR(CAM_ISP, + "llcc staling feature enabling failing cache: %d", i); + } else { + CAM_INFO(CAM_ISP, + "llcc staling feature supported: %d rc = %d", i, rc); + g_ife_hw_mgr.sys_cache_info[i].llcc_staling_support = true; + } + } + + CAM_DBG(CAM_ISP, "Num SCIDs Small:%u Large: %u", num_small_scid, num_large_scid); + for (i = 0; i < num_sfe; i++) + CAM_DBG(CAM_ISP, "SFE[%u] available SCIDs 0x%x", i, + g_ife_hw_mgr.sfe_cache_info[i].supported_scid_idx); + return rc; + +} + +int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl, + uint32_t isp_device_type) +{ + int rc = -EFAULT; + int i, j; + struct cam_iommu_handle cdm_handles; + struct cam_ife_hw_mgr_ctx *ctx_pool; + struct cam_isp_hw_cap isp_cap = {0}; + struct cam_isp_hw_path_port_map path_port_map; + + memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr)); + memset(&path_port_map, 0, sizeof(path_port_map)); + + for (i = 0; i < MAX_INJECT_SET; i++) + cam_isp_irq_inject_clear_params(&g_ife_hw_mgr.irq_inject_param[i]); + + mutex_init(&g_ife_hw_mgr.ctx_mutex); + spin_lock_init(&g_ife_hw_mgr.ctx_lock); + + if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) { + CAM_ERR(CAM_ISP, "CSID num is different then IFE num"); + return -EINVAL; + } + + /* fill ife hw intf information */ + for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + rc = cam_vfe_hw_init(&g_ife_hw_mgr.ife_devices[i], i); + if (!rc) { + struct cam_hw_intf *ife_device = + g_ife_hw_mgr.ife_devices[i]->hw_intf; + struct cam_hw_info *vfe_hw = + (struct cam_hw_info *) + ife_device->hw_priv; + struct cam_hw_soc_info *soc_info = &vfe_hw->soc_info; + + if (j == 0) { + ife_device->hw_ops.process_cmd( + vfe_hw, + CAM_ISP_HW_CMD_QUERY_CAP, + &isp_cap, + sizeof(struct cam_isp_hw_cap)); + CAM_DBG(CAM_ISP, + "max VFE out resources: 0x%x num perf counters: 0x%x; IFE/MC_TFE: max FCG channels/contexts: %u, max FCG predictions: %u, FCG supported: %u", + isp_cap.max_out_res_type, isp_cap.num_perf_counters, + isp_cap.max_fcg_ch_ctx, isp_cap.max_fcg_predictions, + isp_cap.fcg_supported); + + ife_device->hw_ops.process_cmd( + vfe_hw, + CAM_ISP_HW_CMD_GET_PATH_PORT_MAP, + &path_port_map, + sizeof(struct cam_isp_hw_path_port_map)); + CAM_DBG(CAM_ISP, "received %d path-port mappings", + path_port_map.num_entries); + } + + j++; + g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0]; + CAM_DBG(CAM_ISP, + "reg_map: mem base = %pK cam_base = 0x%llx", + (void __iomem *)soc_info->reg_map[0].mem_base, + (uint64_t) soc_info->reg_map[0].mem_cam_base); + + if (g_ife_hw_mgr.ife_devices[i]->num_hw_pid) + g_ife_hw_mgr.hw_pid_support = true; + + } else { + g_ife_hw_mgr.cdm_reg_map[i] = NULL; + } + } + if (j == 0) { + CAM_ERR(CAM_ISP, "no valid IFE HW"); + return -EINVAL; + } + + g_ife_hw_mgr.isp_caps.support_consumed_addr = + isp_cap.support_consumed_addr; + g_ife_hw_mgr.isp_caps.max_vfe_out_res_type = + isp_cap.max_out_res_type; + g_ife_hw_mgr.isp_caps.num_ife_perf_counters = + isp_cap.num_perf_counters; + g_ife_hw_mgr.isp_caps.fcg_caps.max_ife_fcg_ch_ctx = + isp_cap.max_fcg_ch_ctx; + g_ife_hw_mgr.isp_caps.fcg_caps.max_ife_fcg_predictions = + isp_cap.max_fcg_predictions; + g_ife_hw_mgr.isp_caps.fcg_caps.ife_fcg_supported = + isp_cap.fcg_supported; + g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters = + isp_cap.num_wr_perf_counters; + max_ife_out_res = + g_ife_hw_mgr.isp_caps.max_vfe_out_res_type & 0xFF; + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump = + isp_cap.skip_regdump_data.skip_regdump; + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump_start_offset = + isp_cap.skip_regdump_data.skip_regdump_start_offset; + g_ife_hw_mgr.isp_caps.skip_regdump_data.skip_regdump_stop_offset = + isp_cap.skip_regdump_data.skip_regdump_stop_offset; + memset(&isp_cap, 0x0, sizeof(struct cam_isp_hw_cap)); + + for (i = 0; i < path_port_map.num_entries; i++) { + g_ife_hw_mgr.path_port_map.entry[i][0] = path_port_map.entry[i][0]; + g_ife_hw_mgr.path_port_map.entry[i][1] = path_port_map.entry[i][1]; + } + g_ife_hw_mgr.path_port_map.num_entries = path_port_map.num_entries; + + g_ife_hw_mgr.isp_device_type = isp_device_type; + + /* fill csid hw intf information */ + for (i = 0, j = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + rc = cam_ife_csid_hw_init(&g_ife_hw_mgr.csid_devices[i], i); + if (!rc) { + if (j == 0) { + struct cam_hw_intf *csid_device = g_ife_hw_mgr.csid_devices[i]; + struct cam_hw_info *csid_hw = + (struct cam_hw_info *) csid_device->hw_priv; + rc = csid_device->hw_ops.process_cmd( + csid_hw, + CAM_ISP_HW_CMD_QUERY_CAP, + &isp_cap, + sizeof(struct cam_isp_hw_cap)); + if (!rc) { + CAM_DBG(CAM_ISP, + "Max DT supported: %u", isp_cap.max_dt_supported); + g_ife_hw_mgr.isp_caps.max_dt_supported = + isp_cap.max_dt_supported; + } else { + CAM_ERR(CAM_ISP, "Invalid num of DT supported: %u", + isp_cap.max_dt_supported); + return -EINVAL; + } + } + j++; + } + } + if (!j) { + CAM_ERR(CAM_ISP, "no valid IFE CSID HW"); + return -EINVAL; + } + + memset(&isp_cap, 0x0, sizeof(struct cam_isp_hw_cap)); + + /* fill sfe hw intf info */ + for (i = 0, j = 0; i < CAM_SFE_HW_NUM_MAX; i++) { + rc = cam_sfe_hw_init(&g_ife_hw_mgr.sfe_devices[i], i); + if (!rc) { + if (j == 0) { + struct cam_hw_intf *sfe_device = + g_ife_hw_mgr.sfe_devices[i]->hw_intf; + struct cam_hw_info *sfe_hw = + (struct cam_hw_info *) + sfe_device->hw_priv; + + rc = sfe_device->hw_ops.process_cmd( + sfe_hw, + CAM_ISP_HW_CMD_QUERY_CAP, + &isp_cap, + sizeof(struct cam_isp_hw_cap)); + CAM_DBG(CAM_ISP, + "max SFE out resources: 0x%x num_perf_counters: 0x%x; SFE: max FCG channels: %u, max FCG predictions: %u, FCG supported: %d", + isp_cap.max_out_res_type, isp_cap.num_perf_counters, + isp_cap.max_fcg_ch_ctx, isp_cap.max_fcg_predictions, + isp_cap.fcg_supported); + if (!rc) { + g_ife_hw_mgr.isp_caps.max_sfe_out_res_type = + isp_cap.max_out_res_type; + g_ife_hw_mgr.isp_caps.num_sfe_perf_counters = + isp_cap.num_perf_counters; + g_ife_hw_mgr.isp_caps.fcg_caps.max_sfe_fcg_ch_ctx = + isp_cap.max_fcg_ch_ctx; + g_ife_hw_mgr.isp_caps.fcg_caps.max_sfe_fcg_predictions = + isp_cap.max_fcg_predictions; + g_ife_hw_mgr.isp_caps.fcg_caps.sfe_fcg_supported = + isp_cap.fcg_supported; + g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters = + isp_cap.num_wr_perf_counters; + max_sfe_out_res = + g_ife_hw_mgr.isp_caps.max_sfe_out_res_type & 0xFF; + } + + if (g_ife_hw_mgr.sfe_devices[i]->num_hw_pid) + g_ife_hw_mgr.hw_pid_support = true; + } + j++; + } + } + if (!j) + CAM_ERR(CAM_ISP, "no valid SFE HW devices"); + + cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr); + + /* setup ife context list */ + INIT_LIST_HEAD(&g_ife_hw_mgr.free_ctx_list); + INIT_LIST_HEAD(&g_ife_hw_mgr.used_ctx_list); + + /* + * for now, we only support one iommu handle. later + * we will need to setup more iommu handle for other + * use cases. + * Also, we have to release them once we have the + * deinit support + */ + rc = cam_smmu_get_handle("ife", + &g_ife_hw_mgr.mgr_common.img_iommu_hdl); + + if (rc && rc != -EALREADY) { + CAM_ERR(CAM_ISP, "Can not get iommu handle"); + return -EINVAL; + } + + if (cam_smmu_get_handle("cam-secure", + &g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) { + CAM_ERR(CAM_ISP, "Failed to get secure iommu handle"); + goto secure_fail; + } + + CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]", + g_ife_hw_mgr.mgr_common.img_iommu_hdl, + g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure); + + if (!cam_cdm_get_iommu_handle("ife3", &cdm_handles)) { + CAM_DBG(CAM_ISP, + "Successfully acquired CDM iommu handles 0x%x, 0x%x", + cdm_handles.non_secure, cdm_handles.secure); + g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure; + g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = + cdm_handles.secure; + } else { + CAM_ERR(CAM_ISP, "Failed to acquire CDM iommu handle"); + g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1; + g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1; + } + + atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0); + for (i = 0; i < CAM_IFE_CTX_MAX; i++) { + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list); + + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in.list); + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_csid); + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_src); + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_sfe_src); + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in_rd); + ctx_pool = &g_ife_hw_mgr.ctx_pool[i]; + + ctx_pool->vfe_out_map = CAM_MEM_ZALLOC((max_ife_out_res * + sizeof(uint8_t)), GFP_KERNEL); + if (!ctx_pool->vfe_out_map) { + rc = -ENOMEM; + CAM_ERR(CAM_ISP, "Alloc failed for ife out res list, ctx_idx: %u", + ctx_pool->ctx_index); + goto end; + } + + if (max_sfe_out_res) { + ctx_pool->sfe_out_map = CAM_MEM_ZALLOC((max_sfe_out_res * + sizeof(uint8_t)), GFP_KERNEL); + if (!ctx_pool->sfe_out_map) { + rc = -ENOMEM; + CAM_ERR(CAM_ISP, "Alloc failed for SFE out res list, ctx_idx: %u", + ctx_pool->ctx_index); + goto end; + } + + memset(ctx_pool->sfe_out_map, 0xff, max_sfe_out_res * sizeof(uint8_t)); + } + + /* init context pool */ + INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].free_res_list); + for (j = 0; j < CAM_IFE_HW_RES_POOL_MAX; j++) { + INIT_LIST_HEAD( + &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list); + list_add_tail( + &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list, + &g_ife_hw_mgr.ctx_pool[i].free_res_list); + } + + /* ctx index will be overridden during acquire and reset during release */ + g_ife_hw_mgr.ctx_pool[i].ctx_index = CAM_IFE_CTX_MAX; + g_ife_hw_mgr.ctx_pool[i].hw_mgr = &g_ife_hw_mgr; + + cam_tasklet_init(&g_ife_hw_mgr.mgr_common.tasklet_pool[i], + &g_ife_hw_mgr.ctx_pool[i], i); + g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = + g_ife_hw_mgr.mgr_common.tasklet_pool[i]; + + init_completion(&g_ife_hw_mgr.ctx_pool[i].config_done_complete); + list_add_tail(&g_ife_hw_mgr.ctx_pool[i].list, + &g_ife_hw_mgr.free_ctx_list); + } + + /* Create Worker for ife_hw_mgr with 10 tasks */ + rc = cam_req_mgr_workq_create("cam_ife_worker", 10, + &g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0, + cam_req_mgr_process_workq_cam_ife_worker); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Unable to create worker, ctx_idx: %u", ctx_pool->ctx_index); + goto end; + } + + /* Populate sys cache info */ + rc = cam_ife_mgr_populate_sys_cache_id(); + if (rc == -EFAULT) { + CAM_ERR(CAM_ISP, "LLCC stall notif enable fault"); + goto end; + } + + /* fill return structure */ + hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire; + hw_mgr_intf->hw_start = cam_ife_mgr_start_hw; + hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw; + hw_mgr_intf->hw_read = cam_ife_mgr_read; + hw_mgr_intf->hw_write = cam_ife_mgr_write; + hw_mgr_intf->hw_release = cam_ife_mgr_release_hw; + hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update; + hw_mgr_intf->hw_config = cam_ife_mgr_config_hw; + hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd; + hw_mgr_intf->hw_reset = cam_ife_mgr_reset; + hw_mgr_intf->hw_dump = cam_ife_mgr_dump; + hw_mgr_intf->hw_recovery = cam_ife_mgr_recover_hw; + + if (iommu_hdl) + *iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl; + + g_ife_hw_mgr.debug_cfg.force_acq_csid = CAM_IFE_CSID_HW_NUM_MAX; + + cam_ife_hw_mgr_debug_register(); + cam_ife_mgr_count_functional_ife(); + cam_ife_mgr_count_functional_sfe(); + + cam_vfe_get_num_ifes(&g_num_ife_available); + rc = cam_cpas_prepare_subpart_info(CAM_IFE_HW_IDX, g_num_ife_available, + g_num_ife_functional); + if (rc) + CAM_ERR(CAM_ISP, "Failed to populate num_ifes, rc: %d", rc); + + cam_vfe_get_num_ife_lites(&g_num_ife_lite_available); + rc = cam_cpas_prepare_subpart_info(CAM_IFE_LITE_HW_IDX, g_num_ife_lite_available, + g_num_ife_lite_functional); + if (rc) + CAM_ERR(CAM_ISP, "Failed to populate num_ife_lites, rc: %d", rc); + + cam_sfe_get_num_hws(&g_num_sfe_available); + rc = cam_cpas_prepare_subpart_info(CAM_SFE_HW_IDX, g_num_sfe_available, + g_num_sfe_functional); + if (rc) + CAM_ERR(CAM_ISP, "Failed to populate num_sfes, rc: %d", rc); + + cam_common_register_mini_dump_cb(cam_ife_hw_mgr_mini_dump_cb, + "CAM_ISP", NULL); + cam_ife_mgr_test_irq_lines_at_probe(&g_ife_hw_mgr); + + /* Allocate memory for perf counters */ + if (g_ife_hw_mgr.isp_caps.num_ife_perf_counters) { + g_ife_hw_mgr.debug_cfg.ife_perf_counter_val = CAM_MEM_ZALLOC_ARRAY( + g_ife_hw_mgr.isp_caps.num_ife_perf_counters, + sizeof(uint32_t), GFP_KERNEL); + } + + if (g_ife_hw_mgr.isp_caps.num_sfe_perf_counters) { + g_ife_hw_mgr.debug_cfg.sfe_perf_counter_val = CAM_MEM_ZALLOC_ARRAY( + g_ife_hw_mgr.isp_caps.num_sfe_perf_counters, + sizeof(uint32_t), GFP_KERNEL); + } + + /* Allocate memory for perf counters */ + if (g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters) { + g_ife_hw_mgr.debug_cfg.ife_bus_wr_perf_counter_val = kcalloc( + g_ife_hw_mgr.isp_caps.num_ife_bus_wr_perf_counters, + sizeof(uint32_t), GFP_KERNEL); + } + + if (g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters) { + g_ife_hw_mgr.debug_cfg.sfe_bus_wr_perf_counter_val = kcalloc( + g_ife_hw_mgr.isp_caps.num_sfe_bus_wr_perf_counters, + sizeof(uint32_t), GFP_KERNEL); + } + + CAM_DBG(CAM_ISP, "Exit"); + + return 0; +end: + if (rc) { + for (i = 0; i < CAM_IFE_CTX_MAX; i++) { + cam_tasklet_deinit( + &g_ife_hw_mgr.mgr_common.tasklet_pool[i]); + g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL; + CAM_MEM_FREE(g_ife_hw_mgr.ctx_pool[i].vfe_out_map); + CAM_MEM_FREE(g_ife_hw_mgr.ctx_pool[i].sfe_out_map); + g_ife_hw_mgr.ctx_pool[i].vfe_out_map = NULL; + g_ife_hw_mgr.ctx_pool[i].sfe_out_map = NULL; + g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL; + } + } + cam_smmu_destroy_handle( + g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure); + g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1; +secure_fail: + cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl); + g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1; + return rc; +} + +void cam_ife_hw_mgr_deinit(void) +{ + int i = 0; + + cam_req_mgr_workq_destroy(&g_ife_hw_mgr.workq); + g_ife_hw_mgr.debug_cfg.dentry = NULL; + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.sfe_perf_counter_val); + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.ife_perf_counter_val); + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.sfe_perf_counter_val); + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.ife_perf_counter_val); + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.sfe_bus_wr_perf_counter_val); + CAM_MEM_FREE(g_ife_hw_mgr.debug_cfg.ife_bus_wr_perf_counter_val); + + for (i = 0; i < CAM_IFE_CTX_MAX; i++) { + cam_tasklet_deinit( + &g_ife_hw_mgr.mgr_common.tasklet_pool[i]); + g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL; + CAM_MEM_FREE(g_ife_hw_mgr.ctx_pool[i].vfe_out_map); + CAM_MEM_FREE(g_ife_hw_mgr.ctx_pool[i].sfe_out_map); + g_ife_hw_mgr.ctx_pool[i].vfe_out_map = NULL; + g_ife_hw_mgr.ctx_pool[i].sfe_out_map = NULL; + g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL; + } + + cam_smmu_destroy_handle( + g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure); + g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1; + + cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl); + g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1; + g_ife_hw_mgr.num_caches_found = 0; + + if (irq_inject_display_buf) + CAM_MEM_FREE(irq_inject_display_buf); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h new file mode 100644 index 0000000000..d47d49781c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h @@ -0,0 +1,747 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#ifndef _CAM_IFE_HW_MGR_H_ +#define _CAM_IFE_HW_MGR_H_ + +#include +#include +#include "cam_isp_hw_mgr.h" +#include "cam_vfe_hw_intf.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_tasklet_util.h" +#include "cam_cdm_intf_api.h" +#include "cam_cpas_api.h" + +/* + * enum cam_ife_ctx_master_type - HW master type + * CAM_IFE_CTX_TYPE_NONE: IFE ctx/stream directly connected to CSID + * CAM_IFE_CTX_TYPE_CUSTOM: IFE ctx/stream connected to custom HW + * CAM_IFE_CTX_TYPE_SFE: IFE ctx/stream connected to SFE + */ +enum cam_ife_ctx_master_type { + CAM_IFE_CTX_TYPE_NONE, + CAM_IFE_CTX_TYPE_CUSTOM, + CAM_IFE_CTX_TYPE_SFE, + CAM_IFE_CTX_TYPE_MAX, +}; + +/* IFE resource constants */ +#define CAM_IFE_HW_IN_RES_MAX (CAM_ISP_IFE_IN_RES_MAX & 0xFF) +#define CAM_IFE_HW_RES_POOL_MAX 64 + +/* IFE_HW_MGR ctx config */ +#define CAM_IFE_CTX_CFG_FRAME_HEADER_TS BIT(0) +#define CAM_IFE_CTX_CFG_SW_SYNC_ON BIT(1) +#define CAM_IFE_CTX_CFG_DYNAMIC_SWITCH_ON BIT(2) + +/* Maximum set for irq injection*/ +#define MAX_INJECT_SET 10 + +/* IFE CSID non recovery errors */ +#define CAM_ISP_NON_RECOVERABLE_CSID_ERRORS \ + (CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW | \ + CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED | \ + CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA | \ + CAM_ISP_HW_ERROR_CSID_FATAL | \ + CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME | \ + CAM_ISP_HW_ERROR_CSID_MISSING_EOT | \ + CAM_ISP_HW_ERROR_CSID_ILLEGAL_DT_SWITCH) + +/* IFE CSID recovery errors */ +#define CAM_ISP_RECOVERABLE_CSID_ERRORS \ + (CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR | \ + CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP | \ + CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED) + +/* Maximum DRV request depth */ +#define MAX_DRV_REQUEST_DEPTH 8 + +/* Global Counter has frequency 19.2 Mhz */ +#define GC_FREQUENCY_IN_KHZ 19200 + +/* Trigger single out of sync debugfs */ +#define CAM_IFE_CTX_TRIGGER_SINGLE_OUT_OF_SYNC_CFG 1 + +/** + * struct cam_ife_hw_mgr_debug - contain the debug information + * + * @dentry: Debugfs entry + * @csid_debug: csid debug information + * @rx_capture_debug: rx capture debug info + * @enable_recovery: enable recovery + * @camif_debug: camif debug info + * @enable_csid_recovery: enable csid recovery + * @sfe_debug: sfe debug config + * @sfe_sensor_diag_cfg: sfe sensor diag config + * @csid_test_bus: csid test bus config + * @sfe_cache_debug: sfe cache debug info + * @perf_cnt_res_id: res_id for WM on which perf cnt is enabled + * @ife_perf_counter_val: ife perf counter values + * @sfe_perf_counter_val: sfe perf counter values + * @csid_domain_id_value: Value of domain id to set on CSID + * @csid_out_of_sync_simul: Controls out of sync simulation + * @force_acq_csid: Acquire specific csid + * @enable_req_dump: Enable request dump on HW errors + * @per_req_reg_dump: Enable per request reg dump + * @disable_ubwc_comp: Disable UBWC compression + * @disable_ife_mmu_prefetch: Disable MMU prefetch for IFE bus WR + * @enable_ife_frame_irqs: Enable Frame timing IRQs for IFE/MCTFE + * @rx_capture_debug_set: If rx capture debug is set by user + * @disable_isp_drv: Disable ISP DRV config + * @enable_presil_reg_dump: Enable per req regdump in presil + * @enable_cdm_cmd_check: Enable invalid command check in cmd_buf + * @enable_csid_set_domain_id: Enable CSID force set per path domain id + * @per_req_wait_cdm: Enable per req wait cdm + * @enable_cdr_sweep_debug: Enable sending some CSID reg values as part + * of CSIPHY CDR tuning + */ +struct cam_ife_hw_mgr_debug { + struct dentry *dentry; + uint64_t csid_debug; + uint32_t rx_capture_debug; + uint32_t enable_recovery; + uint64_t camif_debug; + uint32_t enable_csid_recovery; + uint32_t sfe_debug; + uint32_t sfe_sensor_diag_cfg; + uint32_t csid_test_bus; + uint32_t sfe_cache_debug[CAM_SFE_HW_NUM_MAX]; + uint32_t perf_cnt_res_id; + uint32_t *ife_perf_counter_val; + uint32_t *sfe_perf_counter_val; + uint32_t csid_domain_id_value; + uint32_t csid_out_of_sync_simul; + uint32_t *ife_bus_wr_perf_counter_val; + uint32_t *sfe_bus_wr_perf_counter_val; + uint32_t force_acq_csid; + bool enable_req_dump; + bool per_req_reg_dump; + bool disable_ubwc_comp; + bool disable_ife_mmu_prefetch; + bool enable_ife_frame_irqs; + bool rx_capture_debug_set; + bool disable_isp_drv; + bool enable_presil_reg_dump; + bool enable_cdm_cmd_check; + bool enable_csid_set_domain_id; + bool per_req_wait_cdm; + bool enable_cdr_sweep_debug; + bool enable_sfe_wr_perf_cntr; +}; + +/** + * struct cam_ife_hw_mgr_ctx_pf_info - pf buf info + * + * @out_port_id: Out port id + * @mid: MID value + */ +struct cam_ife_hw_mgr_ctx_pf_info { + uint32_t out_port_id; + uint32_t mid; +}; + +/** + * struct cam_ife_sfe_scratch_buf_info - Scratch buf info + * + * @width: Width in pixels + * @height: Height in pixels + * @stride: Stride in pixels + * @slice_height: Height in lines + * @io_addr: Buffer address + * @res_id: Resource type + * @offset: Buffer offset + * @config_done: To indicate if RDIx received scratch cfg + * @is_secure: secure scratch buffer + */ +struct cam_ife_sfe_scratch_buf_info { + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t slice_height; + dma_addr_t io_addr; + uint32_t res_id; + uint32_t offset; + bool config_done; + bool is_secure; +}; + +/** + * struct cam_sfe_scratch_buf_cfg - Scratch buf info + * + * @num_configs: Total Number of scratch buffers provided + * @streamon_buf_mask: Mask to indicate which ports have received buffer + * from userspace, apply scratch for other ports + * @updated_num_exp: Current num of exposures + * @buf_info: Info on each of the buffers + * @skip_scratch_cfg_streamon: Determine if scratch cfg needs to be programmed at stream on + * + */ +struct cam_sfe_scratch_buf_cfg { + uint32_t num_config; + uint32_t streamon_buf_mask; + uint32_t updated_num_exp; + struct cam_ife_sfe_scratch_buf_info buf_info[ + CAM_SFE_FE_RDI_NUM_MAX]; + bool skip_scratch_cfg_streamon; +}; + +/** + * struct cam_ife_scratch_buf_cfg - Scratch buf info + * + * @num_config: Total Number of scratch buffers provided + * @streamon_buf_mask: Mask to indicate which ports have received buffer + * from userspace, apply scratch for other ports + * @buf_info: Info on each of the buffers + * @skip_scratch_cfg_streamon: Determine if scratch cfg needs to be programmed at stream on + * + */ +struct cam_ife_scratch_buf_cfg { + uint32_t num_config; + uint32_t streamon_buf_mask; + struct cam_ife_sfe_scratch_buf_info buf_info[ + CAM_IFE_SCRATCH_NUM_MAX]; + bool skip_scratch_cfg_streamon; +}; + + +/** + * struct cam_ife_hw_mgr_ctx_scratch_buf_info - Scratch buffer info + * + * @num_fetches: Indicate number of SFE fetches for this stream + * @sfe_scratch_config: Scratch buffer config if any for SFE ports + * @ife_scratch_config: Scratch buffer config if any for IFE ports + */ +struct cam_ife_hw_mgr_ctx_scratch_buf_info { + uint32_t num_fetches; + struct cam_sfe_scratch_buf_cfg *sfe_scratch_config; + struct cam_ife_scratch_buf_cfg *ife_scratch_config; +}; + +/** + * struct cam_ife_hw_mgr_ctx_flags - IFE HW mgr ctx flags + * + * @ctx_in_use: Flag to tell whether context is active + * @init_done: Indicate whether init hw is done + * @is_fe_enabled: Indicate whether fetch engine\read path is enabled + * @is_dual: Indicate whether context is in dual VFE mode + * @is_offline: Indicate whether context is for offline IFE + * @dsp_enabled: Indicate whether dsp is enabled in this context + * @internal_cdm: Indicate whether context uses internal CDM + * @pf_mid_found: In page fault, mid found for this ctx. + * @need_csid_top_cfg: Flag to indicate if CSID top cfg is needed. + * @is_rdi_only_context: Flag to specify the context has only rdi resource + * @is_lite_context: Flag to specify the context has only uses lite + * resources + * @is_sfe_shdr: Indicate if stream is for SFE sHDR + * @is_sfe_fs: Indicate if stream is for inline SFE FS + * @dump_on_flush: Set if reg dump triggered on flush + * @dump_on_error: Set if reg dump triggered on error + * @custom_aeb_mode: Set if custom AEB stream + * @rdi_lcr_en: To indicate if RDI LCR is enabled + * @sys_cache_usage: Per context sys cache usage + * The corresponding index will be set + * for the cache type + * @rdi_pd_context: Flag to specify the context has + * only rdi and PD resource without PIX port. + * @dynamic_drv_supported: Indicate if the dynamic drv is supported + * @skip_reg_dump_buf_put: Set if put_cpu_buf for reg dump buf is already called + * + */ +struct cam_ife_hw_mgr_ctx_flags { + bool ctx_in_use; + bool init_done; + bool is_fe_enabled; + bool is_dual; + bool is_offline; + bool dsp_enabled; + bool internal_cdm; + bool pf_mid_found; + bool need_csid_top_cfg; + bool is_rdi_only_context; + bool is_lite_context; + bool is_sfe_shdr; + bool is_sfe_fs; + bool dump_on_flush; + bool dump_on_error; + bool is_aeb_mode; + bool rdi_lcr_en; + bool sys_cache_usage[CAM_LLCC_LARGE_4 + 1]; + bool rdi_pd_context; + bool dynamic_drv_supported; + bool skip_reg_dump_buf_put; +}; + +/** + * struct cam_ife_cdm_user_data - IFE HW user data with CDM + * + * @prepare: hw_update_data + * @request_id: Request id + */ +struct cam_ife_cdm_user_data { + struct cam_isp_prepare_hw_update_data *hw_update_data; + uint64_t request_id; +}; + +/** + * struct cam_isp_context_comp_record: + * + * @brief: Structure record the res id reserved on a comp group + * + * @num_res: Number of valid resource IDs in this record + * @res_id: Resource IDs to report buf dones + * @hw_ctxt_id: Hw ctxt id corresponding to the res_id entry + * + */ +struct cam_isp_context_comp_record { + uint32_t num_res; + uint32_t res_id[CAM_NUM_OUT_PER_COMP_IRQ_MAX]; + uint32_t hw_ctxt_id[CAM_NUM_OUT_PER_COMP_IRQ_MAX]; +}; + +/** + * struct cam_isp_comp_record_query: + * + * @brief: Structure record the bus comp group pointer information + * + * @vfe_bus_comp_grp: Vfe bus comp group pointer + * @vfe_bus_comp_grp: Sfe bus comp group pointer + * + */ +struct cam_isp_comp_record_query { + struct cam_isp_context_comp_record *vfe_bus_comp_grp; + struct cam_isp_context_comp_record *sfe_bus_comp_grp; +}; + +/** + * struct cam_cmd_buf_desc_addr_len + * + * brief: structure to store cpu addr and size of + * reg dump descriptors + * @cpu_addr: cpu addr of buffer + * @size: size of the buffer + */ + +struct cam_cmd_buf_desc_addr_len { + uintptr_t cpu_addr; + size_t buf_size; +}; + +/** + * struct cam_ife_hw_mgr_ctx - IFE HW manager Context object + * + * @list: used by the ctx list. + * @common: common acquired context data + * @ctx_index: acquired context id. + * @left_hw_idx: hw index for master core [left] + * @right_hw_idx: hw index for slave core [right] + * @hw_mgr: IFE hw mgr which owns this context + * @res_list_csid: CSID resource list + * @res_list_ife_src: IFE input resource list + * @res_list_sfe_src SFE input resource list + * @res_list_ife_in_rd IFE/SFE input resource list for read path + * @res_list_ife_out: IFE output resoruces array + * @res_list_sfe_out: SFE output resources array + * @vfe_out_map: Map for VFE out ports + * @sfe_out_map: Map for SFE out ports + * @num_acq_vfe_out: Number of acquired VFE out resources + * @num_acq_sfe_out: Number of acquired SFE out resources + * @free_res_list: Free resources list for the branch node + * @res_pool: memory storage for the free resource list + * @irq_status0_mask: irq_status0_mask for the context + * @irq_status1_mask: irq_status1_mask for the context + * @base device base index array contain the all IFE HW + * instance associated with this context. + * @num_base number of valid base data in the base array + * @cdm_handle cdm hw acquire handle + * @cdm_hw_idx: Physical CDM in use + * @cdm_ops cdm util operation pointer for building + * cdm commands + * @cdm_cmd cdm base and length request pointer + * @cdm_id cdm id of the acquired cdm + * @sof_cnt sof count value per core, used for dual VFE + * @epoch_cnt epoch count value per core, used for dual VFE + * @eof_cnt eof count value per core, used for dual VFE + * @overflow_pending flat to specify the overflow is pending for the + * context + * @cdm_done flag to indicate cdm has finished writing shadow + * registers + * @last_cdm_done_req: Last cdm done request + * @config_done_complete indicator for configuration complete + * @reg_dump_buf_desc: cmd buffer descriptors for reg dump + * @num_reg_dump_buf: Count of descriptors in reg_dump_buf_desc + * @applied_req_id: Last request id to be applied + * @ctx_type Type of IFE ctx [CUSTOM/SFE etc.] + * @ctx_config ife ctx config [bit field] + * @ts captured timestamp when the ctx is acquired + * @hw_enabled Array to indicate active HW + * @buf_done_controller Buf done controller. + * @mc_comp_buf_done_controller: Buf done controller for hw context composite buf dones + * @scratch_buf_info Scratch buf [SFE/IFE] info pertaining to this stream + * @flags Flags pertainting to this ctx + * @bw_config_version BW Config version + * @recovery_id: Unique ID of the current valid scheduled recovery + * @current_mup: Current MUP val, scratch will then apply the same as previously + * applied request + * @curr_num_exp: Current num of exposures + * @try_recovery_cnt: Retry count for overflow recovery + * @recovery_req_id: The request id on which overflow recovery happens + * @drv_path_idle_en: Path idle enable value for DRV + * @major_version: Major version for acquire + * @vfe_bus_comp_grp: VFE composite group placeholder + * @sfe_bus_comp_grp: SFE composite group placeholder + * @cdm_done_ts: CDM callback done timestamp + * @is_hw_ctx_acq: If acquire for ife ctx is having hw ctx acquired + * @acq_hw_ctxt_src_dst_map: Src to dst hw ctxt map for acquired pixel paths + * @pri_rdi_out_res: Primary RDI res for RDI only cases + * @drv_info: Array to include the per request drv info + * @is_init_drv_cfg_received: Indicate if init drv config has received + */ +struct cam_ife_hw_mgr_ctx { + struct list_head list; + struct cam_isp_hw_mgr_ctx common; + uint32_t ctx_index; + uint32_t left_hw_idx; + uint32_t right_hw_idx; + struct cam_ife_hw_mgr *hw_mgr; + struct cam_isp_hw_mgr_res res_list_ife_in; + struct list_head res_list_ife_csid; + struct list_head res_list_ife_src; + struct list_head res_list_sfe_src; + struct list_head res_list_ife_in_rd; + struct cam_isp_hw_mgr_res *res_list_ife_out; + struct cam_isp_hw_mgr_res *res_list_sfe_out; + struct list_head free_res_list; + struct cam_isp_hw_mgr_res res_pool[CAM_IFE_HW_RES_POOL_MAX]; + uint8_t *vfe_out_map; + uint8_t *sfe_out_map; + uint32_t num_acq_vfe_out; + uint32_t num_acq_sfe_out; + + uint32_t irq_status0_mask[CAM_IFE_HW_NUM_MAX]; + uint32_t irq_status1_mask[CAM_IFE_HW_NUM_MAX]; + struct cam_isp_ctx_base_info base[CAM_IFE_HW_NUM_MAX + + CAM_SFE_HW_NUM_MAX]; + uint32_t num_base; + uint32_t cdm_handle; + int32_t cdm_hw_idx; + struct cam_cdm_utils_ops *cdm_ops; + struct cam_cdm_bl_request *cdm_cmd; + enum cam_cdm_id cdm_id; + uint32_t sof_cnt[CAM_IFE_HW_NUM_MAX]; + uint32_t epoch_cnt[CAM_IFE_HW_NUM_MAX]; + uint32_t eof_cnt[CAM_IFE_HW_NUM_MAX]; + atomic_t overflow_pending; + atomic_t cdm_done; + uint64_t last_cdm_done_req; + struct completion config_done_complete; + uint32_t hw_version; + struct cam_cmd_buf_desc reg_dump_buf_desc[ + CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint32_t num_reg_dump_buf; + struct cam_cmd_buf_desc_addr_len reg_dump_cmd_buf_addr_len[ + CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint64_t applied_req_id; + enum cam_ife_ctx_master_type ctx_type; + uint32_t ctx_config; + struct timespec64 ts; + void *buf_done_controller; + void *mc_comp_buf_done_controller; + struct cam_ife_hw_mgr_ctx_scratch_buf_info scratch_buf_info; + struct cam_ife_hw_mgr_ctx_flags flags; + struct cam_ife_hw_mgr_ctx_pf_info pf_info; + struct cam_ife_cdm_user_data cdm_userdata; + uint32_t bw_config_version; + atomic_t recovery_id; + uint32_t current_mup; + uint32_t curr_num_exp; + uint32_t try_recovery_cnt; + uint64_t recovery_req_id; + uint32_t drv_path_idle_en; + uint32_t major_version; + struct cam_isp_context_comp_record *vfe_bus_comp_grp; + struct cam_isp_context_comp_record *sfe_bus_comp_grp; + struct timespec64 cdm_done_ts; + bool is_hw_ctx_acq; + uint32_t acq_hw_ctxt_src_dst_map[CAM_ISP_MULTI_CTXT_MAX]; + uint32_t pri_rdi_out_res; + struct cam_isp_hw_per_req_info per_req_info[MAX_DRV_REQUEST_DEPTH]; + uint8_t wr_per_req_index; + bool is_init_drv_cfg_received; +}; + +/** + * struct cam_isp_fcg_caps - IFE/SFE/MC_TFE FCG capabilities + * + * @max_ife_fcg_ch_ctx: Maximum number of IFE FCG channels/TFE FCG contexts + * @max_sfe_fcg_ch_ctx: Maximum number of SFE FCG channels + * @max_ife_fcg_predictions: Maximum number of IFE FCG predictions + * @max_sfe_fcg_predictions: Maximum number of SFE FCG predictions + * @ife_fcg_supported: Indicate whether FCG is supported by IFE hw + * @sfe_fcg_supported: Indicate whether FCG is supported by SFE hw + */ +struct cam_isp_fcg_caps { + uint32_t max_ife_fcg_ch_ctx; + uint32_t max_sfe_fcg_ch_ctx; + uint32_t max_ife_fcg_predictions; + uint32_t max_sfe_fcg_predictions; + bool ife_fcg_supported; + bool sfe_fcg_supported; +}; + +/** + * struct cam_isp_ife_sfe_hw_caps - IFE/SFE hw capabilities + * + * @max_vfe_out_res_type : max ife out res type value from hw + * @max_sfe_out_res_type : max sfe out res type value from hw + * @num_ife_perf_counters : max ife perf counters supported + * @num_sfe_perf_counters : max sfe perf counters supported + * @max_dt_supported : max DT CSID can decode + * @support_consumed_addr : indicate whether hw supports last consumed address + */ +struct cam_isp_ife_sfe_hw_caps { + struct cam_isp_fcg_caps fcg_caps; + uint32_t max_vfe_out_res_type; + uint32_t max_sfe_out_res_type; + uint32_t num_ife_perf_counters; + uint32_t num_sfe_perf_counters; + uint32_t num_ife_bus_wr_perf_counters; + uint32_t num_sfe_bus_wr_perf_counters; + uint32_t max_dt_supported; + bool support_consumed_addr; + struct cam_isp_hw_regiter_dump_data skip_regdump_data; +}; + +/* + * struct cam_isp_sys_cache_info: + * + * @Brief: ISP Bus sys cache info. Placeholder for all cache ids and their + * types + * + * @type: Cache type + * @scid: Cache slice ID + * @llcc_staling_support to check llcc sys cache stalling mode supported or not + */ +struct cam_isp_sys_cache_info { + uint32_t type; + int32_t scid; + bool llcc_staling_support; +}; + +/* + * struct cam_isp_sfe_cache_info: + * + * @Brief: SFE cache info. Placeholder for: + * 1. Supported cache IDs which are populated during + * probe based on large and small. + * 2. keeps track for the current cache id used for a + * particular exposure type + * 3. keeps track of acvitated exposures. + * Based on this data, we can toggle the SCIDs for a particular + * hw whenever there is a hw halt. Also we don't change + * the SCID in case of dynamic exposure switches. + * + * @supported_scid_idx: Bit mask for IDs supported for each exposure type + * @curr_idx: Index of Cache ID in use for each exposure + * @activated: Maintains if the cache is activated for a particular exposure + */ +struct cam_isp_sfe_cache_info { + int supported_scid_idx; + int curr_idx[CAM_ISP_EXPOSURE_MAX]; + bool activated[CAM_ISP_EXPOSURE_MAX]; +}; + +/* + * struct cam_isp_irq_inject_irq_desc: Structure to hold IRQ description + * + * @bitmask : Bitmask of the IRQ + * @desc : String to describe the IRQ bit + */ +struct cam_isp_irq_inject_irq_desc { + uint32_t bitmask; + char *desc; +}; + +/* + * enum cam_isp_irq_inject_common_param_pos - Irq injection param + * + * HW_TYPE : hw to inject IRQ + * HW_IDX : index of the selected hw + * RES_ID : register to set irq + * IRQ_MASK : IRQ to be triggered + * INJECT_REQ : req to trigger the IRQ + * INJECT_PARAM_MAX: max allowed num of injected param + */ +enum cam_isp_irq_inject_common_param_pos { + HW_TYPE, + HW_IDX, + REG_UNIT, + IRQ_MASK, + INJECT_REQ, + INJECT_PARAM_MAX +}; + +/** + * struct cam_ife_hw_mgr - IFE HW Manager + * + * @mgr_common: common data for all HW managers + * @csid_devices; csid device instances array. This will be filled by + * HW manager during the initialization. + * @ife_devices: IFE device instances array. This will be filled by + * HW layer during initialization + * @sfe_devices: SFE device instance array + * @ctx_mutex: mutex for the hw context pool + * @free_ctx_list: free hw context list + * @used_ctx_list: used hw context list + * @ctx_pool: context storage + * @csid_hw_caps csid hw capability stored per core + * @ife_dev_caps ife device capability per core + * @work q work queue for IFE hw manager + * @debug_cfg debug configuration + * @ctx_lock context lock + * @hw_pid_support hw pid support for this target + * @csid_aup_rup_en Reg update at CSID side + * @csid_global_reset_en CSID global reset enable + * @csid_camif_irq_support CSID camif IRQ support + * @cam_ddr_drv_support DDR DRV support + * @cam_clk_drv_support CLK DRV support + * @isp_caps Capability of underlying SFE/IFE HW + * @path_port_map Mapping of outport to IFE mux + * @num_caches_found Number of caches supported + * @sys_cache_info Sys cache info + * @sfe_cache_info SFE Cache Info + * @isp_device_type: If device supports single-context(ife) or multi- + * context(mc_tfe) + * @irq_inject_param Param for isp irq injection + */ +struct cam_ife_hw_mgr { + struct cam_isp_hw_mgr mgr_common; + struct cam_hw_intf *csid_devices[CAM_IFE_CSID_HW_NUM_MAX]; + struct cam_isp_hw_intf_data *ife_devices[CAM_IFE_HW_NUM_MAX]; + struct cam_isp_hw_intf_data *sfe_devices[CAM_SFE_HW_NUM_MAX]; + struct cam_soc_reg_map *cdm_reg_map[CAM_IFE_HW_NUM_MAX]; + + struct mutex ctx_mutex; + atomic_t active_ctx_cnt; + struct list_head free_ctx_list; + struct list_head used_ctx_list; + struct cam_ife_hw_mgr_ctx ctx_pool[CAM_IFE_CTX_MAX]; + + struct cam_ife_csid_hw_caps csid_hw_caps[ + CAM_IFE_CSID_HW_NUM_MAX]; + struct cam_vfe_hw_get_hw_cap ife_dev_caps[CAM_IFE_HW_NUM_MAX]; + struct cam_req_mgr_core_workq *workq; + struct cam_ife_hw_mgr_debug debug_cfg; + spinlock_t ctx_lock; + struct cam_isp_ife_sfe_hw_caps isp_caps; + struct cam_isp_hw_path_port_map path_port_map; + + uint32_t num_caches_found; + struct cam_isp_sys_cache_info sys_cache_info[CAM_LLCC_LARGE_4 + 1]; + struct cam_isp_sfe_cache_info sfe_cache_info[CAM_SFE_HW_NUM_MAX]; + uint32_t isp_device_type; + + struct cam_isp_irq_inject_param irq_inject_param[MAX_INJECT_SET]; + bool hw_pid_support; + bool csid_aup_rup_en; + bool csid_global_reset_en; + bool csid_camif_irq_support; + bool cam_ddr_drv_support; + bool cam_clk_drv_support; +}; + +/** + * struct cam_ife_hw_event_recovery_data - Payload for the recovery procedure + * + * @error_type: Error type that causes the recovery + * @affected_core: Array of the hardware cores that are affected + * @affected_ctx: Array of the hardware contexts that are affected + * @no_of_context: Actual number of the affected context + * @id: Unique ID associated with this recovery data (per HW context) + * + */ +struct cam_ife_hw_event_recovery_data { + uint32_t error_type; + uint32_t affected_core[CAM_ISP_HW_NUM_MAX]; + struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_IFE_CTX_MAX]; + uint32_t no_of_context; + uint32_t id[CAM_IFE_CTX_MAX]; +}; + +/** + * struct cam_ife_hw_mini_dump_ctx - Mini dump data + * + * @base: device base index array contain the all IFE HW + * @pf_info: Page Fault Info + * @csid_md: CSID mini dump data + * @vfe_md: VFE mini dump data + * @sfe_md: SFE mini dump data + * @flags: Flags pertainting to this ctx + * @ctx_priv: Array of the hardware contexts that are affected + * @last_cdm_done_req: Last cdm done request + * @applied_req_id: Last request id to be applied + * @cdm_handle: cdm hw acquire handle + * @ctx_index: acquired context id. + * @left_hw_idx: hw index for master core [left] + * @right_hw_idx: hw index for slave core [right] + * @num_base: number of valid base data in the base array + * @cdm_id: cdm id of the acquired cdm + * @ctx_type: Type of IFE ctx [CUSTOM/SFE etc.] + * @overflow_pending: flat to specify the overflow is pending for the + * @cdm_done: flag to indicate cdm has finished writing shadow + * registers + */ +struct cam_ife_hw_mini_dump_ctx { + struct cam_isp_ctx_base_info base[CAM_IFE_HW_NUM_MAX + + CAM_SFE_HW_NUM_MAX]; + struct cam_ife_hw_mgr_ctx_pf_info pf_info; + void *csid_md[CAM_IFE_HW_NUM_MAX]; + void *vfe_md[CAM_IFE_HW_NUM_MAX]; + void *sfe_md[CAM_SFE_HW_NUM_MAX]; + struct cam_ife_hw_mgr_ctx_flags flags; + void *ctx_priv; + uint64_t last_cdm_done_req; + uint64_t applied_req_id; + uint32_t cdm_handle; + uint8_t ctx_index; + uint8_t left_hw_idx; + uint8_t right_hw_idx; + uint8_t num_base; + enum cam_cdm_id cdm_id; + enum cam_ife_ctx_master_type ctx_type; + bool overflow_pending; + bool cdm_done; +}; + +/** + * struct cam_ife_hw_mini_dump_data - Mini dump data + * + * @num_ctx: Number of context dumped + * @ctx: Array of context + * + */ +struct cam_ife_hw_mini_dump_data { + uint32_t num_ctx; + struct cam_ife_hw_mini_dump_ctx *ctx[CAM_IFE_CTX_MAX]; +}; + +/** + * cam_ife_hw_mgr_init() + * + * @brief: Initialize the IFE hardware manger. This is the + * etnry functinon for the IFE HW manager. + * + * @hw_mgr_intf: IFE hardware manager object returned + * @iommu_hdl: Iommu handle to be returned + * @isp_device_type: If device supports single-context(ife) or multi- + * context(mc_tfe) + * + */ +int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl, + uint32_t isp_device_type); +void cam_ife_hw_mgr_deinit(void); + +#endif /* _CAM_IFE_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c new file mode 100644 index 0000000000..2fada4c84f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_isp_hw_mgr_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_debug_util.h" +#include "cam_tfe_hw_mgr.h" + + +int cam_isp_hw_mgr_init(const char *device_name_str, + struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl, uint32_t isp_device_type) +{ + int rc = 0; + + if (strnstr(device_name_str, "ife", strlen(device_name_str)) || + strnstr(device_name_str, "mc_tfe", strlen(device_name_str))) + rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl, isp_device_type); + else if (strnstr(device_name_str, "tfe", strlen(device_name_str))) + rc = cam_tfe_hw_mgr_init(hw_mgr, iommu_hdl); + else { + CAM_ERR(CAM_ISP, "Invalid ISP hw type :%s", device_name_str); + rc = -EINVAL; + } + + return rc; +} + +void cam_isp_hw_mgr_deinit(const char *device_name_str) +{ + if (strnstr(device_name_str, "ife", strlen(device_name_str))) + cam_ife_hw_mgr_deinit(); + else if (strnstr(device_name_str, "tfe", strlen(device_name_str))) + cam_tfe_hw_mgr_deinit(); + else + CAM_ERR(CAM_ISP, "Invalid ISP hw type :%s", device_name_str); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h new file mode 100644 index 0000000000..7f6f5d0314 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ISP_HW_MGR_H_ +#define _CAM_ISP_HW_MGR_H_ + +#include +#include "cam_isp_hw_mgr_intf.h" +#include "cam_tasklet_util.h" +#include "cam_isp_hw.h" + +#define CAM_ISP_HW_NUM_MAX 8 + +/** + * struct cam_isp_hw_mgr_ctx - common acquired context for managers + * + * @takslet_info: assciated tasklet + * @event_cb: call back interface to ISP context. Set during + * acquire device + * @cb_priv: first argument for the call back function + * set during acquire device + * @mini_dump_cb: Callback for mini dump + * @sec_pf_evt_cb: Callback interface to ISP context for CDM page fault + * set during device acquire + * + */ +struct cam_isp_hw_mgr_ctx { + void *tasklet_info; + cam_hw_event_cb_func event_cb; + void *cb_priv; + cam_ctx_mini_dump_cb_func mini_dump_cb; + cam_hw_pagefault_cb_func sec_pf_evt_cb; +}; + +/** + * struct cam_isp_hw_mgr - ISP HW Manager common object + * + * @tasklet_pool: Tasklet pool + * @img_iommu_hdl: iommu memory handle for regular image buffer + * @img_iommu_hdl_secure: iommu memory handle for secure image buffer + * @cmd_iommu_hdl: iommu memory handle for regular command buffer + * @cmd_iommu_hdl: iommu memory handle for secure command buffer + * @scratch_buf_range: scratch buffer range (not for IFE) + * @scratch_buf_addr: scratch buffer address (not for IFE) + * + */ +struct cam_isp_hw_mgr { + void *tasklet_pool[CAM_CTX_MAX]; + int img_iommu_hdl; + int img_iommu_hdl_secure; + int cmd_iommu_hdl; + int cmd_iommu_hdl_secure; + uint32_t scratch_buf_range; + dma_addr_t scratch_buf_addr; +}; + +/** + * struct cam_isp_hw_mgr_res- HW resources for the ISP hw manager + * + * @list: used by the resource list + * @res_type: ISP manager resource type + * @res_id: resource id based on the resource type for root or + * leaf resource, it matches the KMD interface port id. + * For branch resource, it is defined by the ISP HW + * layer + * @is_dual_isp is dual isp hw resource + * @hw_res: hw layer resource array. For single ISP, only one ISP + * hw resource will be acquired. For dual ISP, two hw + * resources from different ISP HW device will be + * acquired + * @is_secure informs whether the resource is in secure mode or not + * @num_children: number of the child resource node. + * @use_wm_pack: Flag to indicate if WM is to be used for packing + * @hw_ctxt_id: HW context ID mask corresponding to this resource + * @comp_grp_id: Composite group ID corresponding to this resource + * + */ +struct cam_isp_hw_mgr_res { + struct list_head list; + enum cam_isp_resource_type res_type; + uint32_t res_id; + uint32_t is_dual_isp; + struct cam_isp_resource_node *hw_res[CAM_ISP_HW_SPLIT_MAX]; + uint32_t is_secure; + uint32_t num_children; + bool use_wm_pack; + uint32_t hw_ctxt_id_mask; + uint32_t comp_grp_id; +}; + + +/** + * struct cam_isp_ctx_base_info - Base hardware information for the context + * + * @idx: Base resource index + * @split_id: Split info for the base resource + * @hw_type: HW type [IFE/SFE/..] for the base resource + * + */ +struct cam_isp_ctx_base_info { + uint32_t idx; + enum cam_isp_hw_split_id split_id; + enum cam_isp_hw_type hw_type; +}; +#endif /* _CAM_ISP_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c new file mode 100644 index 0000000000..8ee5bf1c12 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c @@ -0,0 +1,6566 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_smmu_api.h" +#include "cam_req_mgr_workq.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_tfe_hw_intf.h" +#include "cam_isp_packet_parser.h" +#include "cam_tfe_hw_mgr.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm_util.h" +#include "cam_packet_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "cam_req_mgr_debug.h" +#include "cam_trace.h" +#include "cam_compat.h" + +#define CAM_TFE_HW_CONFIG_TIMEOUT 60 +#define CAM_TFE_HW_CONFIG_WAIT_MAX_TRY 3 + +#define TZ_SVC_SMMU_PROGRAM 0x15 +#define TZ_SAFE_SYSCALL_ID 0x3 +#define CAM_TFE_SAFE_DISABLE 0 +#define CAM_TFE_SAFE_ENABLE 1 +#define SMMU_SE_TFE 0 + + +static struct cam_tfe_hw_mgr g_tfe_hw_mgr; + +static int cam_tfe_hw_mgr_event_handler( + void *priv, + uint32_t evt_id, + void *evt_info); + +static void *cam_tfe_hw_mgr_get_hw_intf( + struct cam_isp_ctx_base_info *base, + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_tfe_hw_mgr *hw_mgr; + + hw_mgr = ctx->hw_mgr; + + return hw_mgr->tfe_devices[base->idx]->hw_intf; +} + +static int cam_tfe_mgr_cmd_get_sof_timestamp(struct cam_tfe_hw_mgr_ctx *tfe_ctx, + uint64_t *time_stamp, uint64_t *boot_time_stamp, uint64_t *prev_time_stamp); + +static int cam_tfe_mgr_regspace_data_cb(uint32_t reg_base_type, + void *hw_mgr_ctx, struct cam_hw_soc_info **soc_info_ptr, + uint32_t *reg_base_idx) +{ + int rc = 0; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_isp_resource_node *res; + struct cam_tfe_hw_mgr_ctx *ctx = + (struct cam_tfe_hw_mgr_ctx *) hw_mgr_ctx; + + *soc_info_ptr = NULL; + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + if ((hw_mgr_res->res_id != CAM_ISP_HW_TFE_IN_CAMIF) && + !ctx->is_rdi_only_context) + continue; + + switch (reg_base_type) { + case CAM_REG_DUMP_BASE_TYPE_CAMNOC: + case CAM_REG_DUMP_BASE_TYPE_ISP_LEFT: + if (!hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]) + continue; + + res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA, + &soc_info, sizeof(void *)); + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in regspace data query split idx: %d rc : %d", + CAM_ISP_HW_SPLIT_LEFT, rc); + return rc; + } + + if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_LEFT) + *reg_base_idx = 0; + else + *reg_base_idx = 1; + + *soc_info_ptr = soc_info; + break; + case CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT: + if (!hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]) + continue; + + + res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA, + &soc_info, sizeof(void *)); + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in regspace data query split idx: %d rc : %d", + CAM_ISP_HW_SPLIT_RIGHT, rc); + return rc; + } + + *reg_base_idx = 0; + *soc_info_ptr = soc_info; + break; + default: + CAM_ERR(CAM_ISP, + "Unrecognized reg base type: %d", + reg_base_type); + return -EINVAL; + } + } + + return rc; +} + +static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx, + struct cam_cmd_buf_desc *reg_dump_buf_desc, uint32_t num_reg_dump_buf, + uint32_t meta_type, + void *soc_dump_args, + bool user_triggered_dump) +{ + int rc = -EINVAL, i; + uintptr_t cpu_addr = 0; + size_t buf_size = 0; + + if (!num_reg_dump_buf || !reg_dump_buf_desc) { + CAM_DBG(CAM_ISP, + "Invalid args for reg dump req_id: [%llu] ctx idx: [%u] meta_type: [%u] num_reg_dump_buf: [%u] reg_dump_buf_desc: [%pK]", + ctx->applied_req_id, ctx->ctx_index, meta_type, + num_reg_dump_buf, reg_dump_buf_desc); + return rc; + } + + if (!atomic_read(&ctx->cdm_done)) + CAM_WARN_RATE_LIMIT(CAM_ISP, + "Reg dump values might be from more than one request"); + + for (i = 0; i < num_reg_dump_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(®_dump_buf_desc[i]); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, "Reg dump cmd meta data: %u req_type: %u", + reg_dump_buf_desc[i].meta_data, meta_type); + + if (reg_dump_buf_desc[i].meta_data == meta_type) { + if (in_serving_softirq()) { + cpu_addr = ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr; + buf_size = ctx->reg_dump_cmd_buf_addr_len[i].buf_size; + } else { + rc = cam_mem_get_cpu_buf(reg_dump_buf_desc[i].mem_handle, + &cpu_addr, &buf_size); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in Get cpu addr, rc=%d, mem_handle =%d", + rc, reg_dump_buf_desc[i].mem_handle); + return rc; + } + } + if (!cpu_addr || (buf_size == 0)) { + CAM_ERR(CAM_ISP, "Invalid cpu_addr=%pK mem_handle=%d", + (void *)cpu_addr, reg_dump_buf_desc[i].mem_handle); + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + return rc; + } + rc = cam_soc_util_reg_dump_to_cmd_buf(ctx, + ®_dump_buf_desc[i], + ctx->applied_req_id, + cam_tfe_mgr_regspace_data_cb, + soc_dump_args, + user_triggered_dump, cpu_addr, buf_size); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump failed at idx: %d, rc: %d req_id: %llu meta type: %u", + i, rc, ctx->applied_req_id, meta_type); + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + return rc; + } + if (!in_serving_softirq()) + cam_mem_put_cpu_buf(reg_dump_buf_desc[i].mem_handle); + } + } + return rc; +} + +static int cam_tfe_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc = 0; + int i; + uint32_t num_dev = 0; + struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query = hw_caps_args; + struct cam_isp_tfe_query_cap_cmd query_isp; + + CAM_DBG(CAM_ISP, "enter"); + + if (sizeof(struct cam_isp_tfe_query_cap_cmd) != query->size) { + CAM_ERR(CAM_ISP, + "Input query cap size:%u does not match expected query cap size: %u", + query->size, sizeof(struct cam_isp_tfe_query_cap_cmd)); + return -EFAULT; + } + + if (copy_from_user(&query_isp, + u64_to_user_ptr(query->caps_handle), + sizeof(struct cam_isp_tfe_query_cap_cmd))) { + rc = -EFAULT; + return rc; + } + + query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl; + query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure; + query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl; + query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure; + + for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) { + if (!hw_mgr->csid_devices[i]) + continue; + + query_isp.dev_caps[i].hw_type = CAM_ISP_TFE_HW_TFE; + query_isp.dev_caps[i].hw_version.major = 5; + query_isp.dev_caps[i].hw_version.minor = 3; + query_isp.dev_caps[i].hw_version.incr = 0; + + /* + * device number is based on number of full tfe + * if pix is not supported, set reserve to 1 + */ + if (hw_mgr->tfe_csid_dev_caps[i].num_pix) { + query_isp.dev_caps[i].hw_version.reserved = 0; + num_dev++; + } else + query_isp.dev_caps[i].hw_version.reserved = 1; + } + + query_isp.num_dev = num_dev; + + if (copy_to_user(u64_to_user_ptr(query->caps_handle), + &query_isp, sizeof(struct cam_isp_tfe_query_cap_cmd))) + rc = -EFAULT; + + CAM_DBG(CAM_ISP, "exit rc :%d", rc); + + return rc; +} + +static int cam_tfe_hw_mgr_is_rdi_res(uint32_t res_id) +{ + int rc = 0; + + switch (res_id) { + case CAM_ISP_TFE_OUT_RES_RDI_0: + case CAM_ISP_TFE_OUT_RES_RDI_1: + case CAM_ISP_TFE_OUT_RES_RDI_2: + rc = 1; + break; + default: + break; + } + + return rc; +} + +static int cam_tfe_hw_mgr_convert_rdi_out_res_id_to_in_res(int res_id) +{ + if (res_id == CAM_ISP_TFE_OUT_RES_RDI_0) + return CAM_ISP_HW_TFE_IN_RDI0; + else if (res_id == CAM_ISP_TFE_OUT_RES_RDI_1) + return CAM_ISP_HW_TFE_IN_RDI1; + else if (res_id == CAM_ISP_TFE_OUT_RES_RDI_2) + return CAM_ISP_HW_TFE_IN_RDI1; + + return CAM_ISP_HW_TFE_IN_MAX; +} + +static int cam_tfe_hw_mgr_reset_csid_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + int rc = 0; + struct cam_hw_intf *hw_intf; + struct cam_tfe_csid_reset_cfg_args csid_reset_args; + + csid_reset_args.reset_type = CAM_TFE_CSID_RESET_PATH; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + csid_reset_args.node_res = isp_hw_res->hw_res[i]; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + CAM_DBG(CAM_ISP, "Resetting csid hardware %d", + hw_intf->hw_idx); + if (hw_intf->hw_ops.reset) { + rc = hw_intf->hw_ops.reset(hw_intf->hw_priv, + &csid_reset_args, + sizeof(struct cam_tfe_csid_reset_cfg_args)); + if (rc <= 0) + goto err; + } + } + + return 0; +err: + CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)", + isp_hw_res->res_type, isp_hw_res->res_id); + return rc; +} + +static int cam_tfe_hw_mgr_init_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + int rc = -EINVAL; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + CAM_DBG(CAM_ISP, "hw type %d hw index:%d", + hw_intf->hw_type, hw_intf->hw_idx); + if (hw_intf->hw_ops.init) { + rc = hw_intf->hw_ops.init(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) + goto err; + } + } + + return 0; +err: + CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)", + isp_hw_res->res_type, isp_hw_res->res_id); + return rc; +} + +static int cam_tfe_hw_mgr_start_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res, + struct cam_tfe_hw_mgr_ctx *ctx) +{ + int i; + int rc = -EINVAL; + struct cam_hw_intf *hw_intf; + + /* Start slave (which is right split) first */ + for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.start) { + rc = hw_intf->hw_ops.start(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start HW resources"); + goto err; + } + CAM_DBG(CAM_ISP, "Start hw type:%d HW idx %d Res %d", + hw_intf->hw_type, + hw_intf->hw_idx, + isp_hw_res->hw_res[i]->res_id); + } else { + CAM_ERR(CAM_ISP, "function null"); + goto err; + } + } + + return 0; +err: + CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)", + isp_hw_res->res_type, isp_hw_res->res_id); + return rc; +} + +static void cam_tfe_hw_mgr_stop_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + struct cam_hw_intf *hw_intf; + uint32_t dummy_args; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + + if (isp_hw_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + if (hw_intf->hw_ops.stop) + hw_intf->hw_ops.stop(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + else + CAM_ERR(CAM_ISP, "stop null"); + if (hw_intf->hw_ops.process_cmd && + isp_hw_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) { + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ, + &dummy_args, sizeof(dummy_args)); + } + isp_hw_res->hw_res[i]->is_rdi_primary_res = false; + } +} + +static void cam_tfe_hw_mgr_deinit_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int i; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.deinit) + hw_intf->hw_ops.deinit(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + } +} + +static void cam_tfe_hw_mgr_deinit_hw( + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + + if (!ctx->init_done) { + CAM_WARN(CAM_ISP, "ctx is not in init state"); + return; + } + + /* Deinit TFE CSID hw */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + CAM_DBG(CAM_ISP, "Going to DeInit TFE CSID"); + cam_tfe_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + /* Deint TFE HW */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + cam_tfe_hw_mgr_deinit_hw_res(hw_mgr_res); + } + + ctx->init_done = false; +} + +static int cam_tfe_hw_mgr_init_hw( + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + int rc = 0; + + CAM_DBG(CAM_ISP, "INIT TFE csid ... in ctx id:%d", + ctx->ctx_index); + /* INIT TFE csid */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + rc = cam_tfe_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT TFE CSID(id :%d)", + hw_mgr_res->res_id); + goto deinit; + } + } + + /* INIT TFE IN */ + CAM_DBG(CAM_ISP, "INIT TFE in resource ctx id:%d", + ctx->ctx_index); + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + rc = cam_tfe_hw_mgr_init_hw_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Can not INIT TFE SRC (%d)", + hw_mgr_res->res_id); + goto deinit; + } + } + + return rc; +deinit: + ctx->init_done = true; + cam_tfe_hw_mgr_deinit_hw(ctx); + return rc; +} + +static int cam_tfe_hw_mgr_put_res( + struct list_head *src_list, + struct cam_isp_hw_mgr_res **res) +{ + struct cam_isp_hw_mgr_res *res_ptr = NULL; + + res_ptr = *res; + if (res_ptr) + list_add_tail(&res_ptr->list, src_list); + + return 0; +} + +static int cam_tfe_hw_mgr_get_res( + struct list_head *src_list, + struct cam_isp_hw_mgr_res **res) +{ + int rc = 0; + struct cam_isp_hw_mgr_res *res_ptr = NULL; + + if (!list_empty(src_list)) { + res_ptr = list_first_entry(src_list, + struct cam_isp_hw_mgr_res, list); + list_del_init(&res_ptr->list); + } else { + CAM_ERR(CAM_ISP, "No more free tfe hw mgr ctx"); + rc = -EINVAL; + } + *res = res_ptr; + + return rc; +} + +static int cam_tfe_hw_mgr_free_hw_res( + struct cam_isp_hw_mgr_res *isp_hw_res) +{ + int rc = 0; + int i; + struct cam_hw_intf *hw_intf; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!isp_hw_res->hw_res[i]) + continue; + hw_intf = isp_hw_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.release) { + rc = hw_intf->hw_ops.release(hw_intf->hw_priv, + isp_hw_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + if (rc) + CAM_ERR(CAM_ISP, + "Release hw resource id %d failed", + isp_hw_res->res_id); + isp_hw_res->hw_res[i] = NULL; + } else + CAM_ERR(CAM_ISP, "Release null"); + } + /* caller should make sure the resource is in a list */ + list_del_init(&isp_hw_res->list); + memset(isp_hw_res, 0, sizeof(*isp_hw_res)); + INIT_LIST_HEAD(&isp_hw_res->list); + + return 0; +} + +static int cam_tfe_mgr_csid_change_halt_mode(struct cam_tfe_hw_mgr_ctx *ctx, + enum cam_tfe_csid_halt_mode halt_mode) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *isp_res; + struct cam_tfe_csid_hw_halt_args halt; + struct cam_hw_intf *hw_intf; + uint32_t i; + int rc = 0; + + if (!ctx->is_dual) + return 0; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (i == CAM_ISP_HW_SPLIT_LEFT) + continue; + + if (!hw_mgr_res->hw_res[i] || + (hw_mgr_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING)) + continue; + + isp_res = hw_mgr_res->hw_res[i]; + + if ((isp_res->res_type == CAM_ISP_RESOURCE_PIX_PATH) && + (isp_res->res_id == + CAM_TFE_CSID_PATH_RES_IPP)) { + hw_intf = isp_res->hw_intf; + halt.node_res = isp_res; + halt.halt_mode = halt_mode; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE, + &halt, + sizeof( + struct cam_tfe_csid_hw_halt_args)); + if (rc) + CAM_ERR(CAM_ISP, "Halt update failed"); + break; + } + } + } + + return rc; +} + +static int cam_tfe_mgr_csid_stop_hw( + struct cam_tfe_hw_mgr_ctx *ctx, struct list_head *stop_list, + uint32_t base_idx, uint32_t stop_cmd) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *isp_res; + struct cam_isp_resource_node *stop_res[CAM_TFE_CSID_PATH_RES_MAX]; + struct cam_tfe_csid_hw_stop_args stop; + struct cam_hw_intf *hw_intf; + uint32_t i, cnt; + + cnt = 0; + list_for_each_entry(hw_mgr_res, stop_list, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i] || + (hw_mgr_res->hw_res[i]->res_state != + CAM_ISP_RESOURCE_STATE_STREAMING)) + continue; + + isp_res = hw_mgr_res->hw_res[i]; + if (isp_res->hw_intf->hw_idx != base_idx) + continue; + CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u", + base_idx, isp_res->res_id, cnt); + stop_res[cnt] = isp_res; + cnt++; + } + } + + if (cnt) { + hw_intf = stop_res[0]->hw_intf; + stop.num_res = cnt; + stop.node_res = stop_res; + stop.stop_cmd = stop_cmd; + hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop)); + + for (i = 0; i < cnt; i++) + stop_res[i]->is_rdi_primary_res = false; + } + + return 0; +} + +static int cam_tfe_hw_mgr_release_hw_for_ctx( + struct cam_tfe_hw_mgr_ctx *tfe_ctx) +{ + uint32_t i; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp; + + /* tfe out resource */ + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) + cam_tfe_hw_mgr_free_hw_res(&tfe_ctx->res_list_tfe_out[i]); + + /* tfe in resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &tfe_ctx->res_list_tfe_in, list) { + cam_tfe_hw_mgr_free_hw_res(hw_mgr_res); + cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &hw_mgr_res); + } + + /* tfe csid resource */ + list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp, + &tfe_ctx->res_list_tfe_csid, list) { + cam_tfe_hw_mgr_free_hw_res(hw_mgr_res); + cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &hw_mgr_res); + } + + /* clean up the callback function */ + tfe_ctx->common.cb_priv = NULL; + tfe_ctx->common.event_cb = NULL; + + CAM_DBG(CAM_ISP, "release context completed ctx id:%d", + tfe_ctx->ctx_index); + + return 0; +} + + +static int cam_tfe_hw_mgr_put_ctx( + struct list_head *src_list, + struct cam_tfe_hw_mgr_ctx **tfe_ctx) +{ + struct cam_tfe_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_tfe_hw_mgr.ctx_mutex); + ctx_ptr = *tfe_ctx; + if (ctx_ptr) + list_add_tail(&ctx_ptr->list, src_list); + *tfe_ctx = NULL; + mutex_unlock(&g_tfe_hw_mgr.ctx_mutex); + return 0; +} + +static int cam_tfe_hw_mgr_get_ctx( + struct list_head *src_list, + struct cam_tfe_hw_mgr_ctx **tfe_ctx) +{ + int rc = 0; + struct cam_tfe_hw_mgr_ctx *ctx_ptr = NULL; + + mutex_lock(&g_tfe_hw_mgr.ctx_mutex); + if (!list_empty(src_list)) { + ctx_ptr = list_first_entry(src_list, + struct cam_tfe_hw_mgr_ctx, list); + list_del_init(&ctx_ptr->list); + } else { + CAM_ERR(CAM_ISP, "No more free tfe hw mgr ctx"); + rc = -EINVAL; + } + *tfe_ctx = ctx_ptr; + mutex_unlock(&g_tfe_hw_mgr.ctx_mutex); + + return rc; +} + +static void cam_tfe_hw_mgr_dump_all_ctx(void) +{ + uint32_t i; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + + mutex_lock(&g_tfe_hw_mgr.ctx_mutex); + list_for_each_entry(ctx, &g_tfe_hw_mgr.used_ctx_list, list) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "ctx id:%d is_dual:%d num_base:%d rdi only:%d", + ctx->ctx_index, ctx->is_dual, + ctx->num_base, ctx->is_rdi_only_context); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, + list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + CAM_INFO_RATE_LIMIT(CAM_ISP, + "csid:%d res_type:%d res_id:%d res_state:%d", + hw_mgr_res->hw_res[i]->hw_intf->hw_idx, + hw_mgr_res->hw_res[i]->res_type, + hw_mgr_res->hw_res[i]->res_id, + hw_mgr_res->hw_res[i]->res_state); + } + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, + list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + CAM_INFO_RATE_LIMIT(CAM_ISP, + "TFE IN:%d res_type:%d res_id:%d res_state:%d", + hw_mgr_res->hw_res[i]->hw_intf->hw_idx, + hw_mgr_res->hw_res[i]->res_type, + hw_mgr_res->hw_res[i]->res_id, + hw_mgr_res->hw_res[i]->res_state); + } + } + } + mutex_unlock(&g_tfe_hw_mgr.ctx_mutex); + +} + +static void cam_tfe_mgr_add_base_info( + struct cam_tfe_hw_mgr_ctx *ctx, + enum cam_isp_hw_split_id split_id, + uint32_t base_idx) +{ + uint32_t i; + + if (!ctx->num_base) { + ctx->base[0].split_id = split_id; + ctx->base[0].idx = base_idx; + ctx->num_base++; + CAM_DBG(CAM_ISP, + "Add split id = %d for base idx = %d num_base=%d", + split_id, base_idx, ctx->num_base); + } else { + /*Check if base index already exists in the list */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].idx == base_idx) { + if (split_id != CAM_ISP_HW_SPLIT_MAX && + ctx->base[i].split_id == + CAM_ISP_HW_SPLIT_MAX) + ctx->base[i].split_id = split_id; + + break; + } + } + + if (i == ctx->num_base) { + ctx->base[ctx->num_base].split_id = split_id; + ctx->base[ctx->num_base].idx = base_idx; + ctx->num_base++; + CAM_DBG(CAM_ISP, + "Add split_id=%d for base idx=%d num_base=%d", + split_id, base_idx, ctx->num_base); + } + } +} + +static int cam_tfe_mgr_process_base_info( + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res = NULL; + uint32_t i; + + if (list_empty(&ctx->res_list_tfe_in)) { + CAM_ERR(CAM_ISP, "tfe in list empty"); + return -ENODEV; + } + + /* TFE in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + cam_tfe_mgr_add_base_info(ctx, i, + res->hw_intf->hw_idx); + CAM_DBG(CAM_ISP, "add base info for hw %d", + res->hw_intf->hw_idx); + } + } + CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base); + + return 0; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_out_rdi( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_hw_mgr_res *tfe_in_res, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + struct cam_tfe_acquire_args tfe_acquire; + struct cam_isp_tfe_out_port_generic_info *out_port = NULL; + struct cam_isp_hw_mgr_res *tfe_out_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_hw_comp_record *comp_grp = NULL; + uint32_t i, tfe_out_res_id, tfe_in_res_id, index; + + /* take left resource */ + tfe_in_res_id = tfe_in_res->hw_res[0]->res_id; + + switch (tfe_in_res_id) { + case CAM_ISP_HW_TFE_IN_RDI0: + tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_0; + break; + case CAM_ISP_HW_TFE_IN_RDI1: + tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_1; + break; + case CAM_ISP_HW_TFE_IN_RDI2: + tfe_out_res_id = CAM_ISP_TFE_OUT_RES_RDI_2; + break; + default: + CAM_ERR(CAM_ISP, "invalid resource type"); + goto err; + } + CAM_DBG(CAM_ISP, "tfe_in_res_id = %d, tfe_out_red_id = %d", + tfe_in_res_id, tfe_out_res_id); + + tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_OUT; + tfe_acquire.tasklet = tfe_ctx->common.tasklet_info; + + tfe_out_res = &tfe_ctx->res_list_tfe_out[tfe_out_res_id & 0xFF]; + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + + CAM_DBG(CAM_ISP, "i = %d, tfe_out_res_id = %d, out_port: %d", + i, tfe_out_res_id, out_port->res_id); + + if (tfe_out_res_id != out_port->res_id) + continue; + + tfe_acquire.tfe_out.cdm_ops = tfe_ctx->cdm_ops; + tfe_acquire.priv = tfe_ctx; + tfe_acquire.tfe_out.out_port_info = out_port; + tfe_acquire.tfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT; + tfe_acquire.tfe_out.unique_id = tfe_ctx->ctx_index; + tfe_acquire.tfe_out.is_dual = 0; + tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + hw_intf = tfe_in_res->hw_res[0]->hw_intf; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &tfe_acquire, + sizeof(struct cam_tfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x", + out_port->res_id); + goto err; + } + + index = tfe_acquire.tfe_out.comp_grp_id; + comp_grp = &tfe_ctx->tfe_bus_comp_grp[index]; + comp_grp->res_id[comp_grp->num_res] = tfe_out_res_id; + comp_grp->num_res++; + break; + } + + if (i == in_port->num_out_res) { + CAM_ERR(CAM_ISP, + "Cannot acquire out resource, i=%d, num_out_res=%d", + i, in_port->num_out_res); + goto err; + } + + tfe_out_res->hw_res[0] = tfe_acquire.tfe_out.rsrc_node; + tfe_out_res->is_dual_isp = 0; + tfe_out_res->res_id = tfe_out_res_id; + tfe_out_res->res_type = CAM_ISP_RESOURCE_TFE_OUT; + tfe_in_res->num_children++; + + return 0; +err: + return rc; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_out_pixel( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_hw_mgr_res *tfe_in_res, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + uint32_t i, j, k; + struct cam_tfe_acquire_args tfe_acquire; + struct cam_isp_tfe_out_port_generic_info *out_port; + struct cam_isp_hw_mgr_res *tfe_out_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_hw_comp_record *comp_grp = NULL; + uint32_t index; + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + k = out_port->res_id & 0xFF; + if (k >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "invalid output resource type 0x%x", + out_port->res_id); + continue; + } + + if (cam_tfe_hw_mgr_is_rdi_res(out_port->res_id)) + continue; + + CAM_DBG(CAM_ISP, "res_type 0x%x", out_port->res_id); + + tfe_out_res = &tfe_ctx->res_list_tfe_out[k]; + tfe_out_res->is_dual_isp = in_port->usage_type; + + tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_OUT; + tfe_acquire.tasklet = tfe_ctx->common.tasklet_info; + tfe_acquire.tfe_out.cdm_ops = tfe_ctx->cdm_ops; + tfe_acquire.priv = tfe_ctx; + tfe_acquire.tfe_out.out_port_info = out_port; + tfe_acquire.tfe_out.is_dual = tfe_in_res->is_dual_isp; + tfe_acquire.tfe_out.unique_id = tfe_ctx->ctx_index; + tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + if (!tfe_in_res->hw_res[j]) + continue; + + hw_intf = tfe_in_res->hw_res[j]->hw_intf; + + if (j == CAM_ISP_HW_SPLIT_LEFT) { + tfe_acquire.tfe_out.split_id = + CAM_ISP_HW_SPLIT_LEFT; + if (tfe_in_res->is_dual_isp) + tfe_acquire.tfe_out.is_master = 1; + else + tfe_acquire.tfe_out.is_master = 0; + } else { + tfe_acquire.tfe_out.split_id = + CAM_ISP_HW_SPLIT_RIGHT; + tfe_acquire.tfe_out.is_master = 0; + } + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &tfe_acquire, + sizeof(struct cam_tfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire out resource 0x%x", + out_port->res_id); + goto err; + } + + tfe_out_res->hw_res[j] = + tfe_acquire.tfe_out.rsrc_node; + + index = tfe_acquire.tfe_out.comp_grp_id; + comp_grp = &tfe_ctx->tfe_bus_comp_grp[index]; + comp_grp->res_id[comp_grp->num_res] = + tfe_out_res->hw_res[j]->res_id; + comp_grp->num_res++; + + CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x comp grp id:%d", + tfe_out_res->hw_res[j]->res_type, + tfe_out_res->hw_res[j]->res_id, + tfe_acquire.tfe_out.comp_grp_id); + + } + tfe_out_res->res_type = CAM_ISP_RESOURCE_TFE_OUT; + tfe_out_res->res_id = out_port->res_id; + tfe_in_res->num_children++; + } + + return 0; +err: + /* release resource at the entry function */ + return rc; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_out( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *tfe_in_res; + + list_for_each_entry(tfe_in_res, &tfe_ctx->res_list_tfe_in, list) { + if (tfe_in_res->num_children) + continue; + + switch (tfe_in_res->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + rc = cam_tfe_hw_mgr_acquire_res_tfe_out_pixel(tfe_ctx, + tfe_in_res, in_port); + break; + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + rc = cam_tfe_hw_mgr_acquire_res_tfe_out_rdi(tfe_ctx, + tfe_in_res, in_port); + break; + default: + CAM_ERR(CAM_ISP, "Unknown TFE SRC resource: %d", + tfe_in_res->res_id); + break; + } + if (rc) + goto err; + } + + return 0; +err: + /* release resource on entry function */ + return rc; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_in( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port, + uint32_t *pdaf_enable) +{ + int rc = -EINVAL; + int i; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_isp_hw_mgr_res *tfe_src_res; + struct cam_tfe_acquire_args tfe_acquire; + struct cam_hw_intf *hw_intf; + struct cam_tfe_hw_mgr *tfe_hw_mgr; + + tfe_hw_mgr = tfe_ctx->hw_mgr; + + list_for_each_entry(csid_res, &tfe_ctx->res_list_tfe_csid, list) { + if (csid_res->num_children) + continue; + + rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list, + &tfe_src_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource"); + goto err; + } + cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_in, + &tfe_src_res); + tfe_src_res->hw_res[0] = NULL; + tfe_src_res->hw_res[1] = NULL; + + tfe_acquire.rsrc_type = CAM_ISP_RESOURCE_TFE_IN; + tfe_acquire.tasklet = tfe_ctx->common.tasklet_info; + tfe_acquire.tfe_in.cdm_ops = tfe_ctx->cdm_ops; + tfe_acquire.tfe_in.in_port = in_port; + tfe_acquire.tfe_in.camif_pd_enable = *pdaf_enable; + tfe_acquire.priv = tfe_ctx; + tfe_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + + switch (csid_res->res_id) { + case CAM_TFE_CSID_PATH_RES_IPP: + tfe_acquire.tfe_in.res_id = + CAM_ISP_HW_TFE_IN_CAMIF; + + if (csid_res->is_dual_isp) + tfe_acquire.tfe_in.sync_mode = + CAM_ISP_HW_SYNC_MASTER; + else + tfe_acquire.tfe_in.sync_mode = + CAM_ISP_HW_SYNC_NONE; + + break; + case CAM_TFE_CSID_PATH_RES_RDI_0: + tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI0; + tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_TFE_CSID_PATH_RES_RDI_1: + tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI1; + tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + case CAM_TFE_CSID_PATH_RES_RDI_2: + tfe_acquire.tfe_in.res_id = CAM_ISP_HW_TFE_IN_RDI2; + tfe_acquire.tfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE; + break; + default: + CAM_ERR(CAM_ISP, "Wrong TFE CSID Resource Node"); + goto err; + } + tfe_src_res->res_type = tfe_acquire.rsrc_type; + tfe_src_res->res_id = tfe_acquire.tfe_in.res_id; + tfe_src_res->is_dual_isp = csid_res->is_dual_isp; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!csid_res->hw_res[i]) + continue; + + hw_intf = tfe_hw_mgr->tfe_devices[ + csid_res->hw_res[i]->hw_intf->hw_idx]->hw_intf; + + /* fill in more acquire information as needed */ + /* slave Camif resource, */ + if (i == CAM_ISP_HW_SPLIT_RIGHT && + tfe_src_res->is_dual_isp) { + tfe_acquire.tfe_in.sync_mode = + CAM_ISP_HW_SYNC_SLAVE; + tfe_acquire.tfe_in.dual_tfe_sync_sel_idx = + csid_res->hw_res[0]->hw_intf->hw_idx; + } else if (i == CAM_ISP_HW_SPLIT_LEFT && + tfe_src_res->is_dual_isp) + tfe_acquire.tfe_in.dual_tfe_sync_sel_idx = + csid_res->hw_res[1]->hw_intf->hw_idx; + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &tfe_acquire, + sizeof(struct cam_tfe_acquire_args)); + if (rc) { + CAM_ERR(CAM_ISP, + "Can not acquire TFE HW res %d", + csid_res->res_id); + goto err; + } + tfe_src_res->hw_res[i] = tfe_acquire.tfe_in.rsrc_node; + CAM_DBG(CAM_ISP, + "acquire success TFE:%d res type :0x%x res id:0x%x", + hw_intf->hw_idx, + tfe_src_res->hw_res[i]->res_type, + tfe_src_res->hw_res[i]->res_id); + + } + csid_res->num_children++; + } + + return 0; +err: + /* release resource at the entry function */ + return rc; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + int i, j; + uint32_t acquired_cnt = 0; + struct cam_tfe_hw_mgr *tfe_hw_mgr; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_csid_hw_reserve_resource_args csid_acquire; + enum cam_tfe_csid_path_res_id path_res_id; + struct cam_isp_hw_mgr_res *csid_res_temp, *csid_res_iterator; + struct cam_isp_tfe_out_port_generic_info *out_port = NULL; + + tfe_hw_mgr = tfe_ctx->hw_mgr; + /* get csid resource */ + path_res_id = CAM_TFE_CSID_PATH_RES_IPP; + + rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list, &csid_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource"); + goto end; + } + + csid_res_temp = csid_res; + + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.res_id = path_res_id; + csid_acquire.in_port = in_port; + csid_acquire.out_port = in_port->data; + csid_acquire.node_res = NULL; + csid_acquire.event_cb_prv = tfe_ctx; + csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + if (in_port->num_out_res) + out_port = &(in_port->data[0]); + + if (in_port->usage_type) + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_MASTER; + else + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE; + + /* Try acquiring CSID resource from previously acquired HW */ + list_for_each_entry(csid_res_iterator, &tfe_ctx->res_list_tfe_csid, + list) { + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!csid_res_iterator->hw_res[i]) + continue; + + if (csid_res_iterator->is_secure == 1 || + (csid_res_iterator->is_secure == 0 && + in_port->num_out_res && + out_port->secure_mode == 1)) + continue; + + hw_intf = csid_res_iterator->hw_res[i]->hw_intf; + csid_acquire.master_idx = hw_intf->hw_idx; + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) { + CAM_DBG(CAM_ISP, + "No tfe csid resource from hw %d", + hw_intf->hw_idx); + continue; + } + + csid_res_temp->hw_res[acquired_cnt++] = + csid_acquire.node_res; + + CAM_DBG(CAM_ISP, + "acquired from old csid(%s)=%d CSID rsrc successfully", + (i == 0) ? "left" : "right", + hw_intf->hw_idx); + + if (in_port->usage_type && acquired_cnt == 1 && + path_res_id == CAM_TFE_CSID_PATH_RES_IPP) + /* + * Continue to acquire Right for IPP. + * Dual TFE for RDI is not currently + * supported. + */ + continue; + + if (acquired_cnt) + /* + * If successfully acquired CSID from + * previously acquired HW, skip the next + * part + */ + goto acquire_successful; + } + } + + /* + * If successfully acquired CSID from + * previously acquired HW, skip the next + * part + */ + if (acquired_cnt) + goto acquire_successful; + + /* Acquire Left if not already acquired */ + if (in_port->usage_type) { + for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) { + if (!tfe_hw_mgr->csid_devices[i]) + continue; + + hw_intf = tfe_hw_mgr->csid_devices[i]; + csid_acquire.master_idx = hw_intf->hw_idx; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) + continue; + else { + csid_res_temp->hw_res[acquired_cnt++] = + csid_acquire.node_res; + break; + } + } + + if (i == CAM_TFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) { + CAM_ERR(CAM_ISP, + "Can not acquire left tfe csid path resource %d", + path_res_id); + goto put_res; + } + } else { + for (i = (CAM_TFE_CSID_HW_NUM_MAX - 1); i >= 0; i--) { + if (!tfe_hw_mgr->csid_devices[i]) + continue; + + hw_intf = tfe_hw_mgr->csid_devices[i]; + csid_acquire.master_idx = hw_intf->hw_idx; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) + continue; + else { + csid_res_temp->hw_res[acquired_cnt++] = + csid_acquire.node_res; + break; + } + } + + if (i == -1 || !csid_acquire.node_res) { + CAM_ERR(CAM_ISP, + "Can not acquire tfe csid path resource %d", + path_res_id); + goto put_res; + } + } +acquire_successful: + CAM_DBG(CAM_ISP, "CSID path left acquired success. is_dual %d", + in_port->usage_type); + + csid_res_temp->res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_res_temp->res_id = path_res_id; + + if (in_port->usage_type) { + csid_res_temp->is_dual_isp = 1; + tfe_ctx->is_dual = true; + tfe_ctx->master_hw_idx = + csid_res_temp->hw_res[0]->hw_intf->hw_idx; + } else + csid_res_temp->is_dual_isp = 0; + + if (in_port->num_out_res) + csid_res_temp->is_secure = out_port->secure_mode; + + cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_csid, &csid_res); + + /* + * Acquire Right if not already acquired. + * Dual TFE for RDI is not currently supported. + */ + if (in_port->usage_type && (path_res_id == CAM_TFE_CSID_PATH_RES_IPP) + && (acquired_cnt == 1)) { + memset(&csid_acquire, 0, sizeof(csid_acquire)); + csid_acquire.node_res = NULL; + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.res_id = path_res_id; + csid_acquire.in_port = in_port; + csid_acquire.master_idx = + csid_res_temp->hw_res[0]->hw_intf->hw_idx; + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_SLAVE; + csid_acquire.node_res = NULL; + csid_acquire.out_port = in_port->data; + csid_acquire.event_cb_prv = tfe_ctx; + csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + + for (j = 0; j < CAM_TFE_CSID_HW_NUM_MAX; j++) { + if (!tfe_hw_mgr->csid_devices[j]) + continue; + + if (j == csid_res_temp->hw_res[0]->hw_intf->hw_idx) + continue; + + hw_intf = tfe_hw_mgr->csid_devices[j]; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) + continue; + else + break; + } + + if (j == CAM_TFE_CSID_HW_NUM_MAX) { + CAM_ERR(CAM_ISP, + "Can not acquire tfe csid pixel resource"); + goto end; + } + csid_res_temp->hw_res[1] = csid_acquire.node_res; + tfe_ctx->slave_hw_idx = + csid_res_temp->hw_res[1]->hw_intf->hw_idx; + CAM_DBG(CAM_ISP, "CSID right acquired success is_dual %d", + in_port->usage_type); + } + + return 0; +put_res: + cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &csid_res); +end: + return rc; +} + +static enum cam_tfe_csid_path_res_id + cam_tfe_hw_mgr_get_tfe_csid_rdi_res_type( + uint32_t out_port_type) +{ + enum cam_tfe_csid_path_res_id path_id; + + CAM_DBG(CAM_ISP, "out_port_type %x", out_port_type); + switch (out_port_type) { + case CAM_ISP_TFE_OUT_RES_RDI_0: + path_id = CAM_TFE_CSID_PATH_RES_RDI_0; + break; + case CAM_ISP_TFE_OUT_RES_RDI_1: + path_id = CAM_TFE_CSID_PATH_RES_RDI_1; + break; + case CAM_ISP_TFE_OUT_RES_RDI_2: + path_id = CAM_TFE_CSID_PATH_RES_RDI_2; + break; + default: + path_id = CAM_TFE_CSID_PATH_RES_MAX; + CAM_DBG(CAM_ISP, "maximum rdi type exceeded out_port_type:%d ", + out_port_type); + break; + } + + CAM_DBG(CAM_ISP, "out_port %x path_id %d", out_port_type, path_id); + + return path_id; +} + +static int cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int rc = -EINVAL; + int i, j; + + struct cam_tfe_hw_mgr *tfe_hw_mgr; + struct cam_isp_hw_mgr_res *csid_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_tfe_out_port_generic_info *out_port; + struct cam_tfe_csid_hw_reserve_resource_args csid_acquire; + struct cam_isp_hw_mgr_res *csid_res_iterator; + enum cam_tfe_csid_path_res_id path_res_id; + + tfe_hw_mgr = tfe_ctx->hw_mgr; + + for (j = 0; j < in_port->num_out_res; j++) { + out_port = &in_port->data[j]; + path_res_id = cam_tfe_hw_mgr_get_tfe_csid_rdi_res_type( + out_port->res_id); + + if (path_res_id == CAM_TFE_CSID_PATH_RES_MAX) + continue; + + rc = cam_tfe_hw_mgr_get_res(&tfe_ctx->free_res_list, &csid_res); + if (rc) { + CAM_ERR(CAM_ISP, "No more free hw mgr resource"); + goto end; + } + + memset(&csid_acquire, 0, sizeof(csid_acquire)); + csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_acquire.res_id = path_res_id; + csid_acquire.in_port = in_port; + csid_acquire.out_port = out_port; + csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE; + csid_acquire.node_res = NULL; + csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler; + csid_acquire.event_cb_prv = tfe_ctx; + + /* Try acquiring CSID resource from previously acquired HW */ + list_for_each_entry(csid_res_iterator, + &tfe_ctx->res_list_tfe_csid, list) { + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!csid_res_iterator->hw_res[i]) + continue; + + if (csid_res_iterator->is_secure == 1 || + (csid_res_iterator->is_secure == 0 && + in_port->num_out_res && + out_port->secure_mode == 1)) + continue; + + hw_intf = csid_res_iterator->hw_res[i]->hw_intf; + + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) { + CAM_DBG(CAM_ISP, + "No tfe csid resource from hw %d", + hw_intf->hw_idx); + continue; + } + + if (csid_acquire.node_res == NULL) { + CAM_ERR(CAM_ISP, + "Acquire RDI:%d rsrc failed", + path_res_id); + goto put_res; + } + + csid_res->hw_res[0] = csid_acquire.node_res; + + CAM_DBG(CAM_ISP, + "acquired from old csid(%s)=%d CSID rsrc successfully", + (i == 0) ? "left" : "right", + hw_intf->hw_idx); + /* + * If successfully acquired CSID from + * previously acquired HW, skip the next + * part + */ + goto acquire_successful; + } + } + + /* Acquire if not already acquired */ + if (tfe_ctx->is_dual) { + for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) { + if (!tfe_hw_mgr->csid_devices[i]) + continue; + + hw_intf = tfe_hw_mgr->csid_devices[i]; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) + continue; + else { + csid_res->hw_res[0] = + csid_acquire.node_res; + break; + } + } + + if (i == CAM_TFE_CSID_HW_NUM_MAX || + !csid_acquire.node_res) { + CAM_ERR(CAM_ISP, + "Can not acquire tfe csid rdi path%d", + path_res_id); + + rc = -EINVAL; + goto put_res; + } + } else { + for (i = CAM_TFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) { + if (!tfe_hw_mgr->csid_devices[i]) + continue; + + hw_intf = tfe_hw_mgr->csid_devices[i]; + rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, + &csid_acquire, sizeof(csid_acquire)); + if (rc) + continue; + else { + csid_res->hw_res[0] = + csid_acquire.node_res; + break; + } + } + + if (i == -1 || !csid_acquire.node_res) { + CAM_ERR(CAM_ISP, + "Can not acquire tfe csid rdi path %d", + path_res_id); + + rc = -EINVAL; + goto put_res; + } + } + +acquire_successful: + CAM_DBG(CAM_ISP, "CSID path :%d acquired success", path_res_id); + csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH; + csid_res->res_id = path_res_id; + csid_res->hw_res[1] = NULL; + csid_res->is_dual_isp = 0; + + if (in_port->num_out_res) + csid_res->is_secure = out_port->secure_mode; + + cam_tfe_hw_mgr_put_res(&tfe_ctx->res_list_tfe_csid, &csid_res); + } + + return 0; +put_res: + cam_tfe_hw_mgr_put_res(&tfe_ctx->free_res_list, &csid_res); +end: + return rc; +} + +static int cam_tfe_hw_mgr_preprocess_port( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port, + int *ipp_count, + int *rdi_count, + int *pdaf_enable) +{ + int ipp_num = 0; + int rdi_num = 0; + bool rdi2_enable = false; + uint32_t i; + struct cam_isp_tfe_out_port_generic_info *out_port; + struct cam_tfe_hw_mgr *tfe_hw_mgr; + struct cam_hw_intf *tfe_device; + bool pdaf_rdi2_mux_en = false; + + tfe_hw_mgr = tfe_ctx->hw_mgr; + tfe_device = tfe_hw_mgr->tfe_devices[0]->hw_intf; + + tfe_device->hw_ops.process_cmd(tfe_device->hw_priv, + CAM_ISP_HW_CMD_IS_PDAF_RDI2_MUX_EN, + &pdaf_rdi2_mux_en, + sizeof(pdaf_rdi2_mux_en)); + + for (i = 0; i < in_port->num_out_res; i++) { + out_port = &in_port->data[i]; + CAM_DBG(CAM_ISP, "out_res id %d", out_port->res_id); + + if (cam_tfe_hw_mgr_is_rdi_res(out_port->res_id)) { + rdi_num++; + if (out_port->res_id == CAM_ISP_TFE_OUT_RES_RDI_2) + rdi2_enable = true; + } else { + ipp_num++; + if (out_port->res_id == CAM_ISP_TFE_OUT_RES_PDAF) + *pdaf_enable = 1; + } + } + + if (pdaf_rdi2_mux_en && *pdaf_enable && rdi2_enable) { + CAM_ERR(CAM_ISP, + "invalid outports both RDI2 and PDAF enabled"); + return -EINVAL; + } + + *ipp_count = ipp_num; + *rdi_count = rdi_num; + + CAM_DBG(CAM_ISP, "rdi: %d ipp: %d pdaf:%d", rdi_num, ipp_num, + *pdaf_enable); + + return 0; +} + +static int cam_tfe_mgr_acquire_hw_for_ctx( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_isp_tfe_in_port_generic_info *in_port, + uint32_t *num_pix_port, uint32_t *num_rdi_port, + uint32_t *pdaf_enable) +{ + int rc = -EINVAL; + int is_dual_isp = 0; + int ipp_count = 0; + int rdi_count = 0; + + is_dual_isp = in_port->usage_type; + + cam_tfe_hw_mgr_preprocess_port(tfe_ctx, in_port, &ipp_count, + &rdi_count, pdaf_enable); + + if (!ipp_count && !rdi_count) { + CAM_ERR(CAM_ISP, + "No PIX or RDI"); + return -EINVAL; + } + + if (ipp_count) { + /* get tfe csid IPP resource */ + rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_pxl(tfe_ctx, + in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire TFE CSID IPP resource Failed dual:%d", + in_port->usage_type); + goto err; + } + } + + if (rdi_count) { + /* get tfe csid rdi resource */ + rc = cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(tfe_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire TFE CSID RDI resource Failed dual:%d", + in_port->usage_type); + goto err; + } + } + + rc = cam_tfe_hw_mgr_acquire_res_tfe_in(tfe_ctx, in_port, pdaf_enable); + if (rc) { + CAM_ERR(CAM_ISP, + "Acquire TFE IN resource Failed dual:%d", in_port->usage_type); + goto err; + } + + CAM_DBG(CAM_ISP, "Acquiring TFE OUT resource..."); + rc = cam_tfe_hw_mgr_acquire_res_tfe_out(tfe_ctx, in_port); + if (rc) { + CAM_ERR(CAM_ISP, "Acquire TFE OUT resource Failed dual:%d", + in_port->usage_type); + goto err; + } + + *num_pix_port += ipp_count; + *num_rdi_port += rdi_count; + + return 0; +err: + /* release resource at the acquire entry funciton */ + return rc; +} + +void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata, + enum cam_cdm_cb_status status, void *cookie) +{ + struct cam_isp_prepare_hw_update_data *hw_update_data = NULL; + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + uint32_t *buf_start, *buf_end; + int i, rc = 0; + size_t len = 0; + uint32_t *buf_addr; + + if (!userdata) { + CAM_ERR(CAM_ISP, "Invalid args"); + return; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *)userdata; + hw_update_data = ctx->cdm_userdata.hw_update_data; + + if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) { + complete_all(&ctx->config_done_complete); + atomic_set(&ctx->cdm_done, 1); + ctx->last_cdm_done_req = *(uint64_t *)cookie; + if (g_tfe_hw_mgr.debug_cfg.per_req_reg_dump) { + if (ctx->cdm_userdata.request_id == *(uint64_t *)cookie) { + cam_tfe_mgr_handle_reg_dump(ctx, + hw_update_data->reg_dump_buf_desc, + hw_update_data->num_reg_dump_buf, + CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST, + NULL, false); + } else { + CAM_INFO(CAM_ISP, "CDM delay, Skip dump req: %llu, cdm_req: %llu", + *(uint64_t *)cookie, ctx->cdm_userdata.request_id); + } + } + CAM_DBG(CAM_ISP, + "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d cdm_req=%llu", + handle, userdata, status, cookie, ctx->ctx_index, + ctx->cdm_userdata.request_id); + } else if (status == CAM_CDM_CB_STATUS_PAGEFAULT || + status == CAM_CDM_CB_STATUS_INVALID_BL_CMD || + status == CAM_CDM_CB_STATUS_HW_ERROR) { + CAM_INFO(CAM_ISP, + "req_id =%d ctx_id =%d Bl_cmd_count =%d status=%d", + ctx->applied_req_id, ctx->ctx_index, + ctx->last_submit_bl_cmd.bl_count, status); + + for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) { + CAM_INFO(CAM_ISP, + "BL(%d) hdl=0x%x addr=0x%x len=%d input_len =%d offset=0x%x type=%d", + i, ctx->last_submit_bl_cmd.cmd[i].mem_handle, + ctx->last_submit_bl_cmd.cmd[i].hw_addr, + ctx->last_submit_bl_cmd.cmd[i].len, + ctx->last_submit_bl_cmd.cmd[i].input_len, + ctx->last_submit_bl_cmd.cmd[i].offset, + ctx->last_submit_bl_cmd.cmd[i].type); + + rc = cam_packet_util_get_cmd_mem_addr( + ctx->last_submit_bl_cmd.cmd[i].mem_handle, + &buf_addr, &len); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Failed to get mem_hdl:0x%x, rc=%d", + ctx->last_submit_bl_cmd.cmd[i].mem_handle, rc); + return rc; + } + if (((size_t)ctx->last_submit_bl_cmd.cmd[i].offset >= len) || + ((size_t)ctx->last_submit_bl_cmd.cmd[i].input_len) > + (len - (size_t)ctx->last_submit_bl_cmd.cmd[i].offset)) { + CAM_ERR(CAM_UTIL, "invalid mem len:%u cmd_inplen:%u off:%u", + len, ctx->last_submit_bl_cmd.cmd[i].input_len, + ctx->last_submit_bl_cmd.cmd[i].offset); + cam_mem_put_cpu_buf(ctx->last_submit_bl_cmd.cmd[i].mem_handle); + return -EINVAL; + } + + buf_start = (uint32_t *)((uint8_t *) buf_addr + + ctx->last_submit_bl_cmd.cmd[i].offset); + buf_end = (uint32_t *)((uint8_t *) buf_start + + ctx->last_submit_bl_cmd.cmd[i].input_len - 1); + + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf(ctx->last_submit_bl_cmd.cmd[i].mem_handle); + } + if (ctx->packet != NULL) + cam_packet_util_dump_patch_info(ctx->packet, + g_tfe_hw_mgr.mgr_common.img_iommu_hdl, + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure, + NULL); + + } else { + CAM_WARN(CAM_ISP, + "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu cdm_req=%llu", + handle, userdata, status, cookie, ctx->cdm_userdata.request_id); + } +} + +static int cam_tfe_mgr_acquire_get_unified_structure_v1( + struct cam_isp_tfe_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + struct cam_isp_tfe_in_port_info *in = NULL; + uint32_t in_port_length = 0; + int32_t rc = 0, i; + + in = (struct cam_isp_tfe_in_port_info *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_isp_tfe_in_port_info) + + (in->num_out_res - 1) * + sizeof(struct cam_isp_tfe_out_port_info); + + *input_size += in_port_length; + + if (!in_port || ((*input_size) > acquire_hw_info->input_info_size)) { + CAM_ERR(CAM_ISP, "Input is not proper"); + rc = -EINVAL; + goto err; + } + + in_port->major_ver = + (acquire_hw_info->input_info_version >> 16) & 0xFFFF; + in_port->minor_ver = + acquire_hw_info->input_info_version & 0xFFFF; + in_port->res_id = in->res_id; + in_port->lane_type = in->lane_type; + in_port->lane_num = in->lane_num; + in_port->lane_cfg = in->lane_cfg; + in_port->vc[0] = in->vc; + in_port->dt[0] = in->dt; + in_port->num_valid_vc_dt = 1; + in_port->format = in->format; + in_port->pix_pattern = in->pix_pattern; + in_port->usage_type = in->usage_type; + in_port->left_start = in->left_start; + in_port->left_end = in->left_end; + in_port->left_width = in->left_width; + in_port->right_start = in->right_start; + in_port->right_end = in->right_end; + in_port->right_width = in->right_width; + in_port->line_start = in->line_start; + in_port->line_end = in->line_end; + in_port->height = in->height; + in_port->batch_size = in->batch_size; + in_port->dsp_mode = in->dsp_mode; + in_port->sensor_width = in->sensor_width; + in_port->sensor_height = in->sensor_height; + in_port->sensor_hbi = in->sensor_hbi; + in_port->sensor_vbi = in->sensor_vbi; + in_port->sensor_fps = in->sensor_fps; + in_port->init_frame_drop = in->init_frame_drop; + in_port->num_out_res = in->num_out_res; + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_tfe_out_port_generic_info), + GFP_KERNEL); + + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in->num_out_res; i++) { + in_port->data[i].res_id = in->data_flex[i].res_id; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].stride = in->data_flex[i].stride; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + in_port->data[i].wm_mode = in->data_flex[i].wm_mode; + in_port->data[i].reserved = in->data_flex[i].reserved; + } + + return 0; +err: + return rc; +} + +static int cam_tfe_mgr_acquire_get_unified_structure_v2( + struct cam_isp_tfe_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + struct cam_isp_tfe_in_port_info_v2 *in = NULL; + uint32_t in_port_length = 0; + int32_t rc = 0, i; + + in = (struct cam_isp_tfe_in_port_info_v2 *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset + *input_size); + + in_port_length = sizeof(struct cam_isp_tfe_in_port_info_v2) + + (in->num_out_res - 1) * + sizeof(struct cam_isp_tfe_out_port_info); + + *input_size += in_port_length; + + if (!in_port || ((*input_size) > acquire_hw_info->input_info_size)) { + CAM_ERR(CAM_ISP, "Input is not proper"); + rc = -EINVAL; + goto err; + } + + in_port->major_ver = + (acquire_hw_info->input_info_version >> 16) & 0xFFFF; + in_port->minor_ver = + acquire_hw_info->input_info_version & 0xFFFF; + in_port->res_id = in->res_id; + in_port->lane_type = in->lane_type; + in_port->lane_num = in->lane_num; + in_port->lane_cfg = in->lane_cfg; + in_port->num_valid_vc_dt = in->num_valid_vc_dt; + + if (in_port->num_valid_vc_dt == 0 || + in_port->num_valid_vc_dt >= CAM_ISP_TFE_VC_DT_CFG) { + CAM_ERR(CAM_ISP, "Invalid i/p arg invalid vc-dt: %d", + in->num_valid_vc_dt); + rc = -EINVAL; + goto err; + } + + for (i = 0; i < in_port->num_valid_vc_dt; i++) { + in_port->vc[i] = in->vc[i]; + in_port->dt[i] = in->dt[i]; + } + + in_port->format = in->format; + in_port->pix_pattern = in->pix_pattern; + in_port->usage_type = in->usage_type; + in_port->left_start = in->left_start; + in_port->left_end = in->left_end; + in_port->left_width = in->left_width; + in_port->right_start = in->right_start; + in_port->right_end = in->right_end; + in_port->right_width = in->right_width; + in_port->line_start = in->line_start; + in_port->line_end = in->line_end; + in_port->height = in->height; + in_port->batch_size = in->batch_size; + in_port->dsp_mode = in->dsp_mode; + in_port->sensor_width = in->sensor_width; + in_port->sensor_height = in->sensor_height; + in_port->sensor_hbi = in->sensor_hbi; + in_port->sensor_vbi = in->sensor_vbi; + in_port->sensor_fps = in->sensor_fps; + in_port->init_frame_drop = in->init_frame_drop; + in_port->bayer_bin = in->feature_flag & + CAM_ISP_TFE_FLAG_BAYER_BIN; + in_port->qcfa_bin = in->feature_flag & + CAM_ISP_TFE_FLAG_QCFA_BIN; + + if (in_port->bayer_bin && in_port->qcfa_bin) { + CAM_ERR(CAM_ISP, + "Bayer and QCFA binning not supported together!"); + rc = -EINVAL; + goto err; + } + + in_port->core_cfg = in->core_cfg; + in_port->num_out_res = in->num_out_res; + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_tfe_out_port_generic_info), + GFP_KERNEL); + + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in->num_out_res; i++) { + in_port->data[i].res_id = in->data_flex[i].res_id; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].stride = in->data_flex[i].stride; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + in_port->data[i].wm_mode = in->data_flex[i].wm_mode; + in_port->data[i].reserved = in->data_flex[i].reserved; + } + + return 0; +err: + return rc; +} +static int cam_tfe_mgr_acquire_get_unified_structure( + struct cam_isp_tfe_acquire_hw_info *acquire_hw_info, + uint32_t offset, uint32_t *input_size, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + uint32_t major_ver = 0, minor_ver = 0; + + if (acquire_hw_info == NULL || input_size == NULL) + return -EINVAL; + + major_ver = (acquire_hw_info->common_info_version >> 12) & 0xF; + minor_ver = (acquire_hw_info->common_info_version) & 0xFFF; + + switch (major_ver) { + case 1: + return cam_tfe_mgr_acquire_get_unified_structure_v1( + acquire_hw_info, offset, input_size, in_port); + case 2: + return cam_tfe_mgr_acquire_get_unified_structure_v2( + acquire_hw_info, offset, input_size, in_port); + default: + CAM_ERR(CAM_ISP, "Invalid ver of i/p port info from user"); + return -EINVAL; + } + return 0; +} + +int cam_tfe_hw_mgr_csiphy_clk_sync( + struct cam_tfe_hw_mgr_ctx *ctx, void *cmd_args) +{ + int rc = -EINVAL; + unsigned long phy_clock_rate = 0; + unsigned long csid_clk_rate = 0, tfe_clk_rate = 0, temp_clk_rate = 0; + struct cam_hw_intf *hw_intf; + int i; + + if (!ctx || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + phy_clock_rate = (*((unsigned long *)cmd_args)); + for (i = 0; i < ctx->num_base; i++) { + if (!ctx->hw_mgr->tfe_csid_dev_caps[ctx->base[i].idx].sync_clk) + continue; + + hw_intf = g_tfe_hw_mgr.csid_devices[ctx->base[i].idx]; + + temp_clk_rate = phy_clock_rate; + rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_DYNAMIC_CLOCK_UPDATE, + &temp_clk_rate, sizeof(unsigned long)); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to set CSID Clock rate"); + return rc; + } + csid_clk_rate = temp_clk_rate; + + hw_intf = g_tfe_hw_mgr.tfe_devices[hw_intf->hw_idx]->hw_intf; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DYNAMIC_CLOCK_UPDATE, + &temp_clk_rate, sizeof(unsigned long)); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to set TFE Clock rate"); + return rc; + } + tfe_clk_rate = temp_clk_rate; + } + + CAM_DBG(CAM_ISP, "Clock rates: phy:%llu csid:%llu tfe:%llu", + phy_clock_rate, csid_clk_rate, tfe_clk_rate); + + if ((phy_clock_rate > csid_clk_rate) || (csid_clk_rate > tfe_clk_rate) || + (phy_clock_rate > tfe_clk_rate)) { + CAM_ERR(CAM_ISP, + "Invalid clock rates, phy:%llu csid:%llu tfe:%llu", + phy_clock_rate, csid_clk_rate, tfe_clk_rate); + + return -EINVAL; + } + + return 0; +} + +/* entry function: acquire_hw */ +static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_tfe_hw_mgr *tfe_hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -EINVAL; + int i, j; + struct cam_tfe_hw_mgr_ctx *tfe_ctx; + struct cam_isp_tfe_in_port_generic_info *in_port = NULL; + struct cam_cdm_acquire_data cdm_acquire; + uint32_t num_pix_port_per_in = 0; + uint32_t num_rdi_port_per_in = 0; + uint32_t pdaf_enable = 0; + uint32_t total_pix_port = 0; + uint32_t total_rdi_port = 0; + struct cam_isp_tfe_acquire_hw_info *acquire_hw_info = NULL; + uint32_t input_size = 0; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + /* get the tfe ctx */ + rc = cam_tfe_hw_mgr_get_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx); + if (rc || !tfe_ctx) { + CAM_ERR(CAM_ISP, "Get tfe hw context failed"); + goto err; + } + + tfe_ctx->common.cb_priv = acquire_args->context_data; + tfe_ctx->common.event_cb = acquire_args->event_cb; + + tfe_ctx->hw_mgr = tfe_hw_mgr; + + memcpy(cdm_acquire.identifier, "tfe", sizeof("tfe")); + cdm_acquire.cell_index = 0; + cdm_acquire.handle = 0; + cdm_acquire.userdata = tfe_ctx; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + cdm_acquire.base_array_cnt = CAM_TFE_HW_NUM_MAX; + for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (tfe_hw_mgr->cdm_reg_map[i]) + cdm_acquire.base_array[j++] = + tfe_hw_mgr->cdm_reg_map[i]; + } + cdm_acquire.base_array_cnt = j; + + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = cam_tfe_cam_cdm_callback; + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW"); + goto free_ctx; + } + + CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x", + cdm_acquire.handle); + tfe_ctx->cdm_handle = cdm_acquire.handle; + tfe_ctx->cdm_ops = cdm_acquire.ops; + atomic_set(&tfe_ctx->cdm_done, 1); + tfe_ctx->last_cdm_done_req = 0; + tfe_ctx->skip_reg_dump_buf_put = false; + + acquire_hw_info = (struct cam_isp_tfe_acquire_hw_info *) + acquire_args->acquire_info; + in_port = CAM_MEM_ZALLOC_ARRAY(acquire_hw_info->num_inputs, + sizeof(struct cam_isp_tfe_in_port_generic_info), + GFP_KERNEL); + + if (!in_port) { + CAM_ERR(CAM_ISP, "No memory available"); + rc = -ENOMEM; + goto free_cdm; + } + + tfe_ctx->tfe_bus_comp_grp = CAM_MEM_ZALLOC_ARRAY(CAM_TFE_BUS_COMP_NUM_MAX, + sizeof(struct cam_tfe_hw_comp_record), GFP_KERNEL); + + if (!tfe_ctx->tfe_bus_comp_grp) { + CAM_ERR(CAM_CTXT, "No memory for tfe_bus_comp_grp"); + rc = -ENOMEM; + goto free_ctx; + } + /* Update in_port structure */ + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + rc = cam_tfe_mgr_acquire_get_unified_structure(acquire_hw_info, + i, &input_size, &in_port[i]); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed in parsing: %d", rc); + goto free_cdm; + } + } + + /* Check any inport has dual tfe usage */ + tfe_ctx->is_dual = false; + for (i = 0; i < acquire_hw_info->num_inputs; i++) + if (in_port[i].usage_type) + tfe_ctx->is_dual = true; + + /* acquire HW resources */ + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + + if (in_port[i].num_out_res > CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "too many output res %d", + in_port[i].num_out_res); + rc = -EINVAL; + goto free_cdm; + } + + CAM_DBG(CAM_ISP, "in_res_id %x", in_port[i].res_id); + rc = cam_tfe_mgr_acquire_hw_for_ctx(tfe_ctx, &in_port[i], + &num_pix_port_per_in, &num_rdi_port_per_in, + &pdaf_enable); + total_pix_port += num_pix_port_per_in; + total_rdi_port += num_rdi_port_per_in; + + if (rc) { + CAM_ERR(CAM_ISP, "can not acquire resource"); + goto free_res; + } + } + + acquire_args->total_ports_acq = total_pix_port + total_rdi_port; + /* Check whether context has only RDI resource */ + if (!total_pix_port) { + tfe_ctx->is_rdi_only_context = 1; + CAM_DBG(CAM_ISP, "RDI only context"); + } else + tfe_ctx->is_rdi_only_context = 0; + + /* Process base info */ + rc = cam_tfe_mgr_process_base_info(tfe_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Process base info failed"); + goto free_res; + } + + acquire_args->ctxt_to_hw_map = tfe_ctx; + tfe_ctx->ctx_in_use = 1; + tfe_ctx->num_reg_dump_buf = 0; + + if (in_port) { + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + CAM_MEM_FREE(in_port[i].data); + in_port[i].data = NULL; + } + + CAM_MEM_FREE(in_port); + in_port = NULL; + } + + if (g_tfe_hw_mgr.support_consumed_addr) + acquire_args->op_flags |= + CAM_IFE_CTX_CONSUME_ADDR_EN; + + cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx); + + CAM_DBG(CAM_ISP, "Exit...(success)"); + + return 0; +free_res: + cam_tfe_hw_mgr_release_hw_for_ctx(tfe_ctx); + tfe_ctx->ctx_in_use = 0; + tfe_ctx->is_rdi_only_context = 0; + tfe_ctx->cdm_handle = 0; + tfe_ctx->cdm_ops = NULL; + tfe_ctx->init_done = false; + tfe_ctx->is_dual = false; +free_cdm: + cam_cdm_release(tfe_ctx->cdm_handle); +free_ctx: + cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx); + if (in_port) { + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + CAM_MEM_FREE(in_port[i].data); + in_port[i].data = NULL; + } + + CAM_MEM_FREE(in_port); + in_port = NULL; + } + CAM_MEM_FREE(tfe_ctx->tfe_bus_comp_grp); + tfe_ctx->tfe_bus_comp_grp = NULL; +err: + /* Dump all the current acquired HW */ + cam_tfe_hw_mgr_dump_all_ctx(); + + CAM_ERR_RATE_LIMIT(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +int cam_tfe_mgr_acquire_get_unified_dev_str( + struct cam_isp_tfe_in_port_info *in, + struct cam_isp_tfe_in_port_generic_info *in_port) +{ + int i, rc = 0; + + in_port->res_id = in->res_id; + in_port->lane_type = in->lane_type; + in_port->lane_num = in->lane_num; + in_port->lane_cfg = in->lane_cfg; + in_port->vc[0] = in->vc; + in_port->dt[0] = in->dt; + in_port->num_valid_vc_dt = 1; + in_port->format = in->format; + in_port->pix_pattern = in->pix_pattern; + in_port->usage_type = in->usage_type; + in_port->left_start = in->left_start; + in_port->left_end = in->left_end; + in_port->left_width = in->left_width; + in_port->right_start = in->right_start; + in_port->right_end = in->right_end; + in_port->right_width = in->right_width; + in_port->line_start = in->line_start; + in_port->line_end = in->line_end; + in_port->height = in->height; + in_port->batch_size = in->batch_size; + in_port->dsp_mode = in->dsp_mode; + in_port->sensor_width = in->sensor_width; + in_port->sensor_height = in->sensor_height; + in_port->sensor_hbi = in->sensor_hbi; + in_port->sensor_vbi = in->sensor_vbi; + in_port->sensor_fps = in->sensor_fps; + in_port->init_frame_drop = in->init_frame_drop; + in_port->num_out_res = in->num_out_res; + + in_port->data = CAM_MEM_ZALLOC_ARRAY(in->num_out_res, + sizeof(struct cam_isp_tfe_out_port_generic_info), + GFP_KERNEL); + + if (in_port->data == NULL) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < in->num_out_res; i++) { + in_port->data[i].res_id = in->data_flex[i].res_id; + in_port->data[i].format = in->data_flex[i].format; + in_port->data[i].width = in->data_flex[i].width; + in_port->data[i].height = in->data_flex[i].height; + in_port->data[i].stride = in->data_flex[i].stride; + in_port->data[i].comp_grp_id = in->data_flex[i].comp_grp_id; + in_port->data[i].secure_mode = in->data_flex[i].secure_mode; + in_port->data[i].wm_mode = in->data_flex[i].wm_mode; + in_port->data[i].reserved = in->data_flex[i].reserved; + } + + return 0; +err: + return rc; +} + +/* entry function: acquire_hw */ +static int cam_tfe_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_tfe_hw_mgr *tfe_hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -EINVAL; + int i, j; + struct cam_tfe_hw_mgr_ctx *tfe_ctx; + struct cam_isp_tfe_in_port_info *in_port = NULL; + struct cam_isp_tfe_in_port_generic_info *gen_in_port = NULL; + struct cam_isp_resource *isp_resource = NULL; + struct cam_cdm_acquire_data cdm_acquire; + uint32_t num_pix_port_per_in = 0; + uint32_t num_rdi_port_per_in = 0; + uint32_t pdad_enable = 0; + uint32_t total_pix_port = 0; + uint32_t total_rdi_port = 0; + uint32_t in_port_length = 0; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + /* get the tfe ctx */ + rc = cam_tfe_hw_mgr_get_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx); + if (rc || !tfe_ctx) { + CAM_ERR(CAM_ISP, "Get tfe hw context failed"); + goto err; + } + + tfe_ctx->common.cb_priv = acquire_args->context_data; + tfe_ctx->common.event_cb = acquire_args->event_cb; + + tfe_ctx->hw_mgr = tfe_hw_mgr; + + memcpy(cdm_acquire.identifier, "tfe", sizeof("tfe")); + cdm_acquire.cell_index = 0; + cdm_acquire.handle = 0; + cdm_acquire.userdata = tfe_ctx; + cdm_acquire.base_array_cnt = CAM_TFE_HW_NUM_MAX; + for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (tfe_hw_mgr->cdm_reg_map[i]) + cdm_acquire.base_array[j++] = + tfe_hw_mgr->cdm_reg_map[i]; + } + cdm_acquire.base_array_cnt = j; + + + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = cam_tfe_cam_cdm_callback; + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW"); + goto free_ctx; + } + + CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x", + cdm_acquire.handle); + tfe_ctx->cdm_handle = cdm_acquire.handle; + tfe_ctx->cdm_ops = cdm_acquire.ops; + atomic_set(&tfe_ctx->cdm_done, 1); + tfe_ctx->last_cdm_done_req = 0; + + isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info; + + gen_in_port = CAM_MEM_ZALLOC_ARRAY(acquire_args->num_acq, + sizeof(struct cam_isp_tfe_in_port_generic_info), + GFP_KERNEL); + + if (!gen_in_port) { + CAM_ERR(CAM_ISP, "No memory available"); + rc = -ENOMEM; + goto free_cdm; + } + + /* acquire HW resources */ + for (i = 0; i < acquire_args->num_acq; i++) { + if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT) + continue; + + CAM_DBG(CAM_ISP, "acquire no = %d total = %d", i, + acquire_args->num_acq); + CAM_DBG(CAM_ISP, + "start copy from user handle %lld with len = %d", + isp_resource[i].res_hdl, + isp_resource[i].length); + + in_port_length = sizeof(struct cam_isp_tfe_in_port_info); + + if (in_port_length > isp_resource[i].length) { + CAM_ERR(CAM_ISP, "buffer size is not enough"); + rc = -EINVAL; + goto free_res; + } + + in_port = memdup_user( + u64_to_user_ptr(isp_resource[i].res_hdl), + isp_resource[i].length); + if (!IS_ERR(in_port)) { + if (in_port->num_out_res > CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "too many output res %d", + in_port->num_out_res); + rc = -EINVAL; + CAM_MEM_FREE(in_port); + goto free_res; + } + + in_port_length = + sizeof(struct cam_isp_tfe_in_port_info) + + (in_port->num_out_res - 1) * + sizeof(struct cam_isp_tfe_out_port_info); + if (in_port_length > isp_resource[i].length) { + CAM_ERR(CAM_ISP, "buffer size is not enough"); + rc = -EINVAL; + CAM_MEM_FREE(in_port); + goto free_res; + } + + rc = cam_tfe_mgr_acquire_get_unified_dev_str(in_port, + &gen_in_port[i]); + + if (rc) { + CAM_ERR(CAM_ISP, + "Cannot get in_port structure, rc: %d", + rc); + goto free_res; + } + + rc = cam_tfe_mgr_acquire_hw_for_ctx(tfe_ctx, + &gen_in_port[i], + &num_pix_port_per_in, &num_rdi_port_per_in, + &pdad_enable); + total_pix_port += num_pix_port_per_in; + total_rdi_port += num_rdi_port_per_in; + + CAM_MEM_FREE(in_port); + in_port = NULL; + if (rc) { + CAM_ERR(CAM_ISP, "can not acquire resource"); + goto free_res; + } + } else { + CAM_ERR(CAM_ISP, + "Copy from user failed with in_port = %pK", + in_port); + rc = -EFAULT; + goto free_res; + } + } + + /* Check whether context has only RDI resource */ + if (!total_pix_port) { + tfe_ctx->is_rdi_only_context = 1; + CAM_DBG(CAM_ISP, "RDI only context"); + } else + tfe_ctx->is_rdi_only_context = 0; + + /* Process base info */ + rc = cam_tfe_mgr_process_base_info(tfe_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Process base info failed"); + goto free_res; + } + + if (gen_in_port) { + for (i = 0; i < acquire_args->num_acq; i++) { + CAM_MEM_FREE(gen_in_port[i].data); + gen_in_port[i].data = NULL; + } + CAM_MEM_FREE(gen_in_port); + gen_in_port = NULL; + } + + acquire_args->ctxt_to_hw_map = tfe_ctx; + tfe_ctx->ctx_in_use = 1; + + cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->used_ctx_list, &tfe_ctx); + + CAM_DBG(CAM_ISP, "Exit...(success)"); + + return 0; +free_res: + cam_tfe_hw_mgr_release_hw_for_ctx(tfe_ctx); +free_cdm: + cam_cdm_release(tfe_ctx->cdm_handle); + if (gen_in_port) { + for (i = 0; i < acquire_args->num_acq; i++) + CAM_MEM_FREE(gen_in_port[i].data); + CAM_MEM_FREE(gen_in_port); + } +free_ctx: + cam_tfe_hw_mgr_put_ctx(&tfe_hw_mgr->free_ctx_list, &tfe_ctx); +err: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +/* entry function: acquire_hw */ +static int cam_tfe_mgr_acquire(void *hw_mgr_priv, + void *acquire_hw_args) +{ + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -EINVAL; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT) + rc = cam_tfe_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args); + else + rc = cam_tfe_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args); + + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +static const char *cam_tfe_util_usage_data_to_string( + uint32_t usage_data) +{ + switch (usage_data) { + case CAM_ISP_TFE_USAGE_LEFT_PX: + return "LEFT_PX"; + case CAM_ISP_TFE_USAGE_RIGHT_PX: + return "RIGHT_PX"; + case CAM_ISP_TFE_USAGE_RDI: + return "RDI"; + default: + return "USAGE_INVALID"; + } +} + +static int cam_tfe_classify_vote_info( + struct cam_isp_hw_mgr_res *hw_mgr_res, + struct cam_isp_bw_config_internal_v2 *bw_config, + struct cam_axi_vote *isp_vote, + uint32_t split_idx, + bool *camif_l_bw_updated, + bool *camif_r_bw_updated) +{ + int rc = 0, i, j = 0; + + if (hw_mgr_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + if (split_idx == CAM_ISP_HW_SPLIT_LEFT) { + if (*camif_l_bw_updated) + return rc; + + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_TFE_USAGE_LEFT_PX) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *camif_l_bw_updated = true; + } else { + if (*camif_r_bw_updated) + return rc; + + for (i = 0; i < bw_config->num_paths; i++) { + if (bw_config->axi_path[i].usage_data == + CAM_ISP_TFE_USAGE_RIGHT_PX) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + *camif_r_bw_updated = true; + } + } else if ((hw_mgr_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0) + && (hw_mgr_res->res_id <= + CAM_ISP_HW_TFE_IN_RDI2)) { + for (i = 0; i < bw_config->num_paths; i++) { + if ((bw_config->axi_path[i].usage_data == + CAM_ISP_TFE_USAGE_RDI) && + ((bw_config->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_IFE_RDI0) == + (hw_mgr_res->res_id - + CAM_ISP_HW_TFE_IN_RDI0))) { + memcpy(&isp_vote->axi_path[j], + &bw_config->axi_path[i], + sizeof(struct + cam_cpas_axi_per_path_bw_vote)); + j++; + } + } + isp_vote->num_paths = j; + + } else { + if (hw_mgr_res->hw_res[split_idx]) { + CAM_ERR(CAM_ISP, "Invalid res_id %u, split_idx: %u", + hw_mgr_res->res_id, split_idx); + rc = -EINVAL; + return rc; + } + } + + for (i = 0; i < isp_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "CLASSIFY_VOTE [%s] [%s] [%s] [%llu] [%llu] [%llu]", + cam_tfe_util_usage_data_to_string( + isp_vote->axi_path[i].usage_data), + cam_cpas_axi_util_path_type_to_string( + isp_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + isp_vote->axi_path[i].transac_type), + isp_vote->axi_path[i].camnoc_bw, + isp_vote->axi_path[i].mnoc_ab_bw, + isp_vote->axi_path[i].mnoc_ib_bw); + } + + return rc; +} + +static int cam_isp_tfe_blob_bw_update( + struct cam_isp_bw_config_internal_v2 *bw_config, + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_bw_update_args *bw_upd_args = NULL; + int rc = -EINVAL; + uint32_t i, split_idx; + bool camif_l_bw_updated = false; + bool camif_r_bw_updated = false; + + for (i = 0; i < bw_config->num_paths; i++) { + CAM_DBG(CAM_PERF, + "ISP_BLOB usage_type=%u [%s] [%s] [%s] [%llu] [%llu] [%llu]", + bw_config->usage_type, + cam_tfe_util_usage_data_to_string( + bw_config->axi_path[i].usage_data), + cam_cpas_axi_util_path_type_to_string( + bw_config->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + bw_config->axi_path[i].transac_type), + bw_config->axi_path[i].camnoc_bw, + bw_config->axi_path[i].mnoc_ab_bw, + bw_config->axi_path[i].mnoc_ib_bw); + } + + bw_upd_args = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bw_update_args), + GFP_KERNEL); + if (!bw_upd_args) { + CAM_ERR(CAM_ISP, "Out of memory"); + return -ENOMEM; + } + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + for (split_idx = 0; split_idx < CAM_ISP_HW_SPLIT_MAX; + split_idx++) { + if (!hw_mgr_res->hw_res[split_idx]) + continue; + + memset(&bw_upd_args->isp_vote, 0, + sizeof(struct cam_axi_vote)); + rc = cam_tfe_classify_vote_info(hw_mgr_res, bw_config, + &bw_upd_args->isp_vote, split_idx, + &camif_l_bw_updated, &camif_r_bw_updated); + if (rc) + goto end; + + if (!bw_upd_args->isp_vote.num_paths) + continue; + + hw_intf = hw_mgr_res->hw_res[split_idx]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_upd_args->node_res = + hw_mgr_res->hw_res[split_idx]; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_UPDATE_V2, + bw_upd_args, + sizeof( + struct cam_tfe_bw_update_args)); + if (rc) + CAM_ERR(CAM_ISP, + "BW Update failed rc: %d", rc); + } else { + CAM_WARN(CAM_ISP, "NULL hw_intf!"); + } + } + } + +end: + CAM_MEM_ZFREE((void *)bw_upd_args, sizeof(struct cam_tfe_bw_update_args)); + bw_upd_args = NULL; + return rc; +} + +/* entry function: config_hw */ +static int cam_tfe_mgr_config_hw(void *hw_mgr_priv, + void *config_hw_args) +{ + int rc = -EINVAL, i, skip = 0; + struct cam_hw_config_args *cfg; + struct cam_hw_update_entry *cmd; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_prepare_hw_update_data *hw_update_data; + bool is_cdm_hung = false; + size_t len = 0; + uint32_t *buf_addr = NULL, *buf_start = NULL, *buf_end = NULL; + uint32_t cmd_type = 0; + unsigned long rem_jiffies = 0; + + if (!hw_mgr_priv || !config_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + cfg = config_hw_args; + ctx = (struct cam_tfe_hw_mgr_ctx *)cfg->ctxt_to_hw_map; + if (!ctx) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + if (!ctx->ctx_in_use || !ctx->cdm_cmd) { + CAM_ERR(CAM_ISP, "Invalid context parameters"); + return -EPERM; + } + if (atomic_read(&ctx->overflow_pending)) + return -EINVAL; + + hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv; + hw_update_data->isp_mgr_ctx = ctx; + ctx->cdm_userdata.request_id = cfg->request_id; + ctx->cdm_userdata.hw_update_data = hw_update_data; + + if (cfg->reapply_type && cfg->cdm_reset_before_apply) { + if (ctx->last_cdm_done_req < cfg->request_id) { + is_cdm_hung = !cam_cdm_detect_hang_error(ctx->cdm_handle); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback not received for req: %lld, last_cdm_done_req: %lld, is_cdm_hung: %d", + cfg->request_id, ctx->last_cdm_done_req, + is_cdm_hung); + + if (!is_cdm_hung) + cam_cdm_dump_debug_registers(ctx->cdm_handle); + + rc = cam_cdm_reset_hw(ctx->cdm_handle); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM reset unsuccessful for req: %lld, ctx: %d, rc: %d", + cfg->request_id, ctx->ctx_index, rc); + ctx->last_cdm_done_req = 0; + return rc; + } + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CDM callback received, should wait for buf done for req: %lld", + cfg->request_id); + return -EALREADY; + } + ctx->last_cdm_done_req = 0; + } + + if (hw_update_data->bw_clk_config.bw_config_valid) { + CAM_DBG(CAM_ISP, "idx=%d, bw_config_version=%d", ctx->ctx_index, + ctx->bw_config_version); + if (ctx->bw_config_version == + CAM_ISP_BW_CONFIG_V2) { + rc = cam_isp_tfe_blob_bw_update( + &hw_update_data->bw_clk_config.bw_config_v2, ctx); + if (rc) + CAM_ERR(CAM_ISP, + "Bandwidth Update Failed rc: %d", rc); + } else { + CAM_ERR(CAM_ISP, + "Invalid bw config version: %d", + ctx->bw_config_version); + } + } + + CAM_DBG(CAM_ISP, + "Enter ctx id:%d num_hw_upd_entries %d request id: %llu", + ctx->ctx_index, cfg->num_hw_update_entries, cfg->request_id); + + if (cfg->num_hw_update_entries <= 0) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Enter ctx id:%d no valid hw entries:%d request id: %llu", + ctx->ctx_index, cfg->num_hw_update_entries, + cfg->request_id); + goto end; + } + + cdm_cmd = ctx->cdm_cmd; + cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries; + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->flag = true; + cdm_cmd->userdata = ctx; + cdm_cmd->cookie = cfg->request_id; + cdm_cmd->gen_irq_arb = false; + cdm_cmd->genirq_buff = &hw_update_data->kmd_cmd_buff_info; + + for (i = 0; i < cfg->num_hw_update_entries; i++) { + cmd = (cfg->hw_update_entries + i); + if ((cfg->reapply_type == CAM_CONFIG_REAPPLY_IO) && (cmd->flags == CAM_ISP_IQ_BL)) { + skip++; + continue; + } + + if (cmd->flags == CAM_ISP_UNUSED_BL || cmd->flags >= CAM_ISP_BL_MAX) + CAM_ERR(CAM_ISP, "Unexpected BL type %d", cmd->flags); + + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle = cmd->handle; + cdm_cmd->cmd_flex[i - skip].offset = cmd->offset; + cdm_cmd->cmd_flex[i - skip].len = cmd->len; + cdm_cmd->cmd_flex[i - skip].arbitrate = false; + + if (g_tfe_hw_mgr.debug_cfg.enable_cdm_cmd_check) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Enter cdm cmd_buf validation"); + rc = cam_packet_util_get_cmd_mem_addr( + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle, &buf_addr, &len); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to get buf_addr and len for mem_handle: %d ctx id: %u request id: %llu", + cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle, + ctx->ctx_index, cfg->request_id); + continue; + } + + if (((size_t)cdm_cmd->cmd_flex[i - skip].offset >= len) || + ((len - (size_t)cdm_cmd->cmd_flex[i - skip].offset) < + (size_t)cdm_cmd->cmd_flex[i - skip].len)) { + CAM_ERR(CAM_UTIL, "invalid mem len:%u cmd_inplen:%u off:%u", + len, cdm_cmd->cmd_flex[i - skip].len, + cdm_cmd->cmd_flex[i - skip].offset); + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + + buf_start = (uint32_t *)((uint8_t *) buf_addr + + cdm_cmd->cmd_flex[i - skip].offset); + buf_end = (uint32_t *)((uint8_t *) buf_start + + cdm_cmd->cmd_flex[i - skip].len - 1); + cmd_type = ((uint32_t)(*buf_start) >> CAM_CDM_COMMAND_OFFSET); + if ((i == 0) && (cmd_type != CAM_CDM_CMD_CHANGE_BASE)) { + CAM_ERR(CAM_ISP, + "first cmd in cmd_buf is not change_base, cmd_type: %u ctx id: %u request id: %llu", + cmd_type, ctx->ctx_index, cfg->request_id); + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + + if (cam_cdm_util_validate_cmd_buf(buf_start, buf_end)) { + CAM_ERR(CAM_ISP, + "found invalid cmd in cmd_buf, ctx id: %u request id: %llu", + ctx->ctx_index, cfg->request_id); + cam_cdm_util_dump_cmd_buf(buf_start, buf_end); + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + return -EINVAL; + } + cam_mem_put_cpu_buf(cdm_cmd->cmd_flex[i - skip].bl_addr.mem_handle); + } + } + + cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries - skip; + reinit_completion(&ctx->config_done_complete); + ctx->applied_req_id = cfg->request_id; + CAM_DBG(CAM_ISP, "Submit to CDM"); + + atomic_set(&ctx->cdm_done, 0); + + rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to apply the configs for req %llu, rc %d", + cfg->request_id, rc); + return rc; + } + + ctx->packet = (struct cam_packet *)hw_update_data->packet; + ctx->last_submit_bl_cmd.bl_count = cdm_cmd->cmd_arrary_count; + + for (i = 0; i < cdm_cmd->cmd_arrary_count; i++) { + if (cdm_cmd->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) { + ctx->last_submit_bl_cmd.cmd[i].mem_handle = + cdm_cmd->cmd_flex[i].bl_addr.mem_handle; + + rc = cam_mem_get_io_buf( + cdm_cmd->cmd_flex[i].bl_addr.mem_handle, + g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl, + &ctx->last_submit_bl_cmd.cmd[i].hw_addr, + &ctx->last_submit_bl_cmd.cmd[i].len, NULL, NULL); + } else if (cdm_cmd->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) { + if (!cdm_cmd->cmd_flex[i].bl_addr.hw_iova) { + CAM_ERR(CAM_CDM, "Submitted Hw bl hw_iova is invalid %d:%d", + i, cdm_cmd->cmd_arrary_count); + rc = -EINVAL; + break; + } + rc = 0; + ctx->last_submit_bl_cmd.cmd[i].hw_addr = + (uint64_t)cdm_cmd->cmd_flex[i].bl_addr.hw_iova; + ctx->last_submit_bl_cmd.cmd[i].len = + cdm_cmd->cmd_flex[i].len + cdm_cmd->cmd_flex[i].offset; + ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0; + } else + CAM_INFO(CAM_ISP, "submitted invalid bl cmd addr type :%d for Bl(%d)", + cdm_cmd->type, i); + + ctx->last_submit_bl_cmd.cmd[i].offset = cdm_cmd->cmd_flex[i].offset; + ctx->last_submit_bl_cmd.cmd[i].type = cdm_cmd->type; + ctx->last_submit_bl_cmd.cmd[i].input_len = cdm_cmd->cmd_flex[i].len; + } + + if (!cfg->init_packet) + goto end; + + for (i = 0; i < CAM_TFE_HW_CONFIG_WAIT_MAX_TRY; i++) { + rem_jiffies = cam_common_wait_for_completion_timeout( + &ctx->config_done_complete, + msecs_to_jiffies( + CAM_TFE_HW_CONFIG_TIMEOUT)); + if (rem_jiffies <= 0) { + rc = cam_cdm_detect_hang_error(ctx->cdm_handle); + if (rc == 0) { + CAM_ERR(CAM_ISP, + "CDM workqueue delay detected, wait for some more time req_id=%llu rc=%d ctx_index %d", + cfg->request_id, rc, + ctx->ctx_index); + cam_req_mgr_debug_delay_detect(); + trace_cam_delay_detect("CDM", + "CDM workqueue delay detected", + cfg->request_id, ctx->ctx_index, + CAM_DEFAULT_VALUE, + CAM_DEFAULT_VALUE, rc); + continue; + } else { + CAM_ERR(CAM_ISP, + "cfg_done completn timeout cdm_hang=%d req=%llu ctx_idx=%d", + cfg->request_id, rc, + ctx->ctx_index); + cam_req_mgr_debug_delay_detect(); + trace_cam_delay_detect("ISP", + "config done completion timeout", + cfg->request_id, ctx->ctx_index, + CAM_DEFAULT_VALUE, CAM_DEFAULT_VALUE, + rc); + + rc = -ETIMEDOUT; + break; + } + } else { + rc = 0; + CAM_DBG(CAM_ISP, + "config done Success for req_id=%llu ctx_index %d", + cfg->request_id, ctx->ctx_index); + break; + } + } + + if ((i == CAM_TFE_HW_CONFIG_WAIT_MAX_TRY) && (rc == 0)) + CAM_DBG(CAM_ISP, "Wq delayed but IRQ CDM done"); +end: + CAM_DBG(CAM_ISP, "Exit: Config Done: %llu", cfg->request_id); + + return rc; +} + +static int cam_tfe_mgr_stop_hw_in_overflow(void *stop_hw_args) +{ + int rc = 0; + struct cam_hw_stop_args *stop_args = stop_hw_args; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_tfe_hw_mgr_ctx *ctx; + uint32_t i, master_base_idx = 0; + + if (!stop_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + ctx = (struct cam_tfe_hw_mgr_ctx *)stop_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "Enter...ctx id:%d", + ctx->ctx_index); + + if (!ctx->num_base) { + CAM_ERR(CAM_ISP, "Number of bases are zero"); + return -EINVAL; + } + + /* get master base index first */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) { + master_base_idx = ctx->base[i].idx; + break; + } + } + + if (i == ctx->num_base) + master_base_idx = ctx->base[0].idx; + + + /* stop the master CSID path first */ + cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid, + master_base_idx, CAM_TFE_CSID_HALT_IMMEDIATELY); + + /* Stop rest of the CSID paths */ + for (i = 0; i < ctx->num_base; i++) { + if (i == master_base_idx) + continue; + + cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid, + ctx->base[i].idx, CAM_TFE_CSID_HALT_IMMEDIATELY); + } + + /* TFE in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + cam_tfe_hw_mgr_stop_hw_res(hw_mgr_res); + } + + /* TFE out resources */ + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) + cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tfe_out[i]); + + /* Stop tasklet for context */ + cam_tasklet_stop(ctx->common.tasklet_info); + CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", + ctx->ctx_index, rc); + + return rc; +} + +static int cam_tfe_mgr_bw_control(struct cam_tfe_hw_mgr_ctx *ctx, + enum cam_tfe_bw_control_action action) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_bw_control_args bw_ctrl_args; + int rc = -EINVAL; + uint32_t i; + + CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + bw_ctrl_args.node_res = + hw_mgr_res->hw_res[i]; + bw_ctrl_args.action = action; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_BW_CONTROL, + &bw_ctrl_args, + sizeof(struct cam_tfe_bw_control_args)); + if (rc) + CAM_ERR(CAM_ISP, "BW Update failed"); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf!"); + } + } + + return rc; +} + +static int cam_tfe_mgr_pause_hw(struct cam_tfe_hw_mgr_ctx *ctx) +{ + return cam_tfe_mgr_bw_control(ctx, CAM_TFE_BW_CONTROL_EXCLUDE); +} + +/* entry function: stop_hw */ +static int cam_tfe_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args) +{ + int rc = 0; + struct cam_hw_stop_args *stop_args = stop_hw_args; + struct cam_isp_stop_args *stop_isp; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_tfe_hw_mgr_ctx *ctx; + enum cam_tfe_csid_halt_cmd csid_halt_type; + uint32_t i, master_base_idx = 0; + + if (!hw_mgr_priv || !stop_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *)stop_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index); + stop_isp = (struct cam_isp_stop_args *)stop_args->args; + + /* Set the csid halt command */ + if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY) + csid_halt_type = CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY; + else + csid_halt_type = CAM_TFE_CSID_HALT_IMMEDIATELY; + + /* Note:stop resource will remove the irq mask from the hardware */ + + if (!ctx->num_base) { + CAM_ERR(CAM_ISP, "number of bases are zero"); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Halting CSIDs"); + + /* get master base index first */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) { + master_base_idx = ctx->base[i].idx; + break; + } + } + + /* + * If Context does not have PIX resources and has only RDI resource + * then take the first base index. + */ + if (i == ctx->num_base) + master_base_idx = ctx->base[0].idx; + + /*Change slave mode*/ + if (csid_halt_type == CAM_TFE_CSID_HALT_IMMEDIATELY) + cam_tfe_mgr_csid_change_halt_mode(ctx, + CAM_TFE_CSID_HALT_MODE_INTERNAL); + + CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx); + + /* Stop the master CSID path first */ + cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid, + master_base_idx, csid_halt_type); + + /* stop rest of the CSID paths */ + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].idx == master_base_idx) + continue; + CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d", + ctx->base[i].idx, i, master_base_idx); + + cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid, + ctx->base[i].idx, csid_halt_type); + } + + CAM_DBG(CAM_ISP, "Going to stop TFE Out"); + + /* TFE out resources */ + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) + cam_tfe_hw_mgr_stop_hw_res(&ctx->res_list_tfe_out[i]); + + CAM_DBG(CAM_ISP, "Going to stop TFE IN"); + + /* TFE in resources */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + cam_tfe_hw_mgr_stop_hw_res(hw_mgr_res); + } + + cam_tasklet_stop(ctx->common.tasklet_info); + + cam_tfe_mgr_pause_hw(ctx); + + cam_common_wait_for_completion_timeout(&ctx->config_done_complete, + msecs_to_jiffies(5)); + + if (stop_isp->stop_only) + goto end; + + if (cam_cdm_stream_off(ctx->cdm_handle)) + CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle); + + cam_tfe_hw_mgr_deinit_hw(ctx); + + CAM_DBG(CAM_ISP, + "Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc); + + mutex_lock(&g_tfe_hw_mgr.ctx_mutex); + atomic_dec_return(&g_tfe_hw_mgr.active_ctx_cnt); + mutex_unlock(&g_tfe_hw_mgr.ctx_mutex); + + for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) { + ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0; + ctx->last_submit_bl_cmd.cmd[i].hw_addr = 0; + ctx->last_submit_bl_cmd.cmd[i].len = 0; + ctx->last_submit_bl_cmd.cmd[i].offset = 0; + ctx->last_submit_bl_cmd.cmd[i].type = 0; + ctx->last_submit_bl_cmd.cmd[i].input_len = 0; + } + ctx->last_submit_bl_cmd.bl_count = 0; + ctx->packet = NULL; +end: + if (!stop_isp.is_internal_stop && !ctx->skip_reg_dump_buf_put) { + for (i = 0; i < ctx->num_reg_dump_buf; i++) + cam_mem_put_cpu_buf(ctx->reg_dump_buf_desc[i].mem_handle); + ctx->num_reg_dump_buf = 0; + } + ctx->skip_reg_dump_buf_put = false; + return rc; +} + +static int cam_tfe_mgr_reset_tfe_hw(struct cam_tfe_hw_mgr *hw_mgr, + uint32_t hw_idx) +{ + uint32_t i = 0; + struct cam_hw_intf *tfe_hw_intf; + uint32_t tfe_reset_type; + + if (!hw_mgr) { + CAM_DBG(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + /* Reset TFE HW*/ + tfe_reset_type = CAM_TFE_HW_RESET_HW; + + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (!hw_mgr->tfe_devices[i]) + continue; + + if (hw_idx != hw_mgr->tfe_devices[i]->hw_intf->hw_idx) + continue; + CAM_DBG(CAM_ISP, "TFE (id = %d) reset", hw_idx); + tfe_hw_intf = hw_mgr->tfe_devices[i]->hw_intf; + tfe_hw_intf->hw_ops.reset(tfe_hw_intf->hw_priv, + &tfe_reset_type, sizeof(tfe_reset_type)); + break; + } + + CAM_DBG(CAM_ISP, "Exit Successfully"); + return 0; +} + +static int cam_tfe_mgr_restart_hw(void *start_hw_args) +{ + int rc = -EINVAL; + struct cam_hw_start_args *start_args = start_hw_args; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + uint32_t i; + + if (!start_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *)start_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "START TFE OUT ... in ctx id:%d", ctx->ctx_index); + + cam_tasklet_start(ctx->common.tasklet_info); + + /* start the TFE out devices */ + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) { + rc = cam_tfe_hw_mgr_start_hw_res( + &ctx->res_list_tfe_out[i], ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE OUT (%d)", i); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START TFE SRC ... in ctx id:%d", ctx->ctx_index); + + /* Start the TFE in devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE IN (%d)", + hw_mgr_res->res_id); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index); + /* Start the TFE CSID HW devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)", + hw_mgr_res->res_id); + goto err; + } + } + + CAM_DBG(CAM_ISP, "Exit...(success)"); + return 0; + +err: + cam_tfe_mgr_stop_hw_in_overflow(start_hw_args); + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +static int cam_tfe_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args) +{ + int rc = -EINVAL; + struct cam_isp_start_args *start_isp = start_hw_args; + struct cam_hw_stop_args stop_args; + struct cam_isp_stop_args stop_isp; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + uint32_t i; + bool res_rdi_context_set = false; + uint32_t primary_rdi_in_res; + uint32_t primary_rdi_out_res; + bool hw_id[CAM_TFE_HW_NUM_MAX] = {0}; + + primary_rdi_in_res = CAM_ISP_HW_TFE_IN_MAX; + primary_rdi_out_res = CAM_ISP_TFE_OUT_RES_MAX; + + if (!hw_mgr_priv || !start_isp) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *) + start_isp->hw_config.ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + if ((!ctx->init_done) && start_isp->start_only) { + CAM_ERR(CAM_ISP, "Invalid args init_done %d start_only %d", + ctx->init_done, start_isp->start_only); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Enter... ctx id:%d", + ctx->ctx_index); + + /* update Bandwidth should be done at the hw layer */ + + cam_tasklet_start(ctx->common.tasklet_info); + + if (ctx->init_done && start_isp->start_only) + goto start_only; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_id[hw_intf->hw_idx]) + continue; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_TFE_CSID_SET_CSID_DEBUG, + &g_tfe_hw_mgr.debug_cfg.csid_debug, + sizeof(g_tfe_hw_mgr.debug_cfg.csid_debug)); + hw_id[hw_intf->hw_idx] = true; + } + } + + memset(&hw_id[0], 0, sizeof(hw_id)); + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_id[hw_intf->hw_idx]) + continue; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_SET_CAMIF_DEBUG, + &g_tfe_hw_mgr.debug_cfg.camif_debug, + sizeof(g_tfe_hw_mgr.debug_cfg.camif_debug)); + hw_id[hw_intf->hw_idx] = true; + } + } + + rc = cam_tfe_hw_mgr_init_hw(ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Init failed"); + goto tasklet_stop; + } + + ctx->init_done = true; + + mutex_lock(&g_tfe_hw_mgr.ctx_mutex); + atomic_fetch_inc(&g_tfe_hw_mgr.active_ctx_cnt); + mutex_unlock(&g_tfe_hw_mgr.ctx_mutex); + + CAM_DBG(CAM_ISP, "start cdm interface"); + rc = cam_cdm_stream_on(ctx->cdm_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start cdm (%d)", + ctx->cdm_handle); + goto deinit_hw; + } + +start_only: + /* Apply initial configuration */ + CAM_DBG(CAM_ISP, "Config HW"); + rc = cam_tfe_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config); + if (rc) { + CAM_ERR(CAM_ISP, "Config HW failed"); + goto cdm_streamoff; + } + + CAM_DBG(CAM_ISP, "START TFE OUT ... in ctx id:%d", + ctx->ctx_index); + /* start the TFE out devices */ + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) { + hw_mgr_res = &ctx->res_list_tfe_out[i]; + switch (hw_mgr_res->res_id) { + case CAM_ISP_TFE_OUT_RES_RDI_0: + case CAM_ISP_TFE_OUT_RES_RDI_1: + case CAM_ISP_TFE_OUT_RES_RDI_2: + if (!res_rdi_context_set && ctx->is_rdi_only_context) { + hw_mgr_res->hw_res[0]->is_rdi_primary_res = + ctx->is_rdi_only_context; + res_rdi_context_set = true; + primary_rdi_out_res = hw_mgr_res->res_id; + } + } + + rc = cam_tfe_hw_mgr_start_hw_res( + &ctx->res_list_tfe_out[i], ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE OUT (%d)", + i); + goto err; + } + } + + if (primary_rdi_out_res < CAM_ISP_TFE_OUT_RES_MAX) + primary_rdi_in_res = + cam_tfe_hw_mgr_convert_rdi_out_res_id_to_in_res( + primary_rdi_out_res); + + CAM_DBG(CAM_ISP, "START TFE IN ... in ctx id:%d", + ctx->ctx_index); + /* Start the TFE in resources devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + /* + * if rdi only context has two rdi resources then only one irq + * subscription should be sufficient + */ + if (primary_rdi_in_res == hw_mgr_res->res_id) + hw_mgr_res->hw_res[0]->is_rdi_primary_res = + ctx->is_rdi_only_context; + + rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE in resource (%d)", + hw_mgr_res->res_id); + goto err; + } + } + + CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", + ctx->ctx_index); + /* Start the TFE CSID HW devices */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + rc = cam_tfe_hw_mgr_start_hw_res(hw_mgr_res, ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Can not start TFE CSID (%d)", + hw_mgr_res->res_id); + goto err; + } + } + + return 0; + +err: + stop_isp.stop_only = false; + stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY; + stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map; + stop_args.args = (void *)(&stop_isp); + + cam_tfe_mgr_stop_hw(hw_mgr_priv, &stop_args); + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; + +cdm_streamoff: + cam_cdm_stream_off(ctx->cdm_handle); + +deinit_hw: + cam_tfe_hw_mgr_deinit_hw(ctx); + +tasklet_stop: + cam_tasklet_stop(ctx->common.tasklet_info); + + return rc; +} + +static int cam_tfe_mgr_read(void *hw_mgr_priv, void *read_args) +{ + return -EPERM; +} + +static int cam_tfe_mgr_write(void *hw_mgr_priv, void *write_args) +{ + return -EPERM; +} + +static int cam_tfe_mgr_user_dump_hw( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + struct cam_hw_dump_args *dump_args) +{ + int rc = 0; + struct cam_hw_soc_dump_args soc_dump_args; + + if (!tfe_ctx || !dump_args) { + CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK", + tfe_ctx, dump_args); + return -EINVAL; + } + soc_dump_args.buf_handle = dump_args->buf_handle; + soc_dump_args.request_id = dump_args->request_id; + soc_dump_args.offset = dump_args->offset; + + rc = cam_tfe_mgr_handle_reg_dump(tfe_ctx, + tfe_ctx->reg_dump_buf_desc, + tfe_ctx->num_reg_dump_buf, + CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR, + &soc_dump_args, + true); + if (rc) { + CAM_DBG(CAM_ISP, + "Dump failed req: %lld handle %u offset %u rc %d", + dump_args->request_id, + dump_args->buf_handle, + dump_args->offset, + rc); + return rc; + } + dump_args->offset = soc_dump_args.offset; + return rc; +} + +static int cam_tfe_mgr_dump(void *hw_mgr_priv, void *args) +{ + + struct cam_isp_hw_dump_args isp_hw_dump_args; + struct cam_hw_dump_args *dump_args = (struct cam_hw_dump_args *)args; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_hw_mgr_ctx *tfe_ctx = (struct cam_tfe_hw_mgr_ctx *) + dump_args->ctxt_to_hw_map; + int i; + int rc = 0; + + /* for some targets, information about the TFE registers to be dumped + * is already submitted with the hw manager. In this case, we + * can dump just the related registers and skip going to core files. + * If dump to this buffer falis due to any reason, fallback to dump + * to the LDAR buffer + */ + isp_hw_dump_args.is_dump_all = true; + if (tfe_ctx->num_reg_dump_buf) { + rc = cam_tfe_mgr_user_dump_hw(tfe_ctx, dump_args); + if (!rc) + isp_hw_dump_args.is_dump_all = false; + } + + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &isp_hw_dump_args.cpu_addr, + &isp_hw_dump_args.buf_len); + + if (rc) { + CAM_ERR(CAM_ISP, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + + isp_hw_dump_args.offset = dump_args->offset; + isp_hw_dump_args.req_id = dump_args->request_id; + + list_for_each_entry(hw_mgr_res, &tfe_ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + switch (hw_mgr_res->hw_res[i]->res_id) { + case CAM_TFE_CSID_PATH_RES_RDI_0: + case CAM_TFE_CSID_PATH_RES_RDI_1: + case CAM_TFE_CSID_PATH_RES_RDI_2: + if (tfe_ctx->is_rdi_only_context && + hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_HW, + &isp_hw_dump_args, + sizeof(struct + cam_isp_hw_dump_args)); + } + break; + + case CAM_TFE_CSID_PATH_RES_IPP: + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_HW, + &isp_hw_dump_args, + sizeof(struct + cam_isp_hw_dump_args)); + } + break; + default: + CAM_DBG(CAM_ISP, "not a valid res %d", + hw_mgr_res->res_id); + break; + } + } + } + + list_for_each_entry(hw_mgr_res, &tfe_ctx->res_list_tfe_in, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + switch (hw_mgr_res->hw_res[i]->res_id) { + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + if (tfe_ctx->is_rdi_only_context && + hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_HW, + &isp_hw_dump_args, + sizeof(struct + cam_isp_hw_dump_args)); + } + break; + + case CAM_ISP_HW_TFE_IN_CAMIF: + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_HW, + &isp_hw_dump_args, + sizeof(struct + cam_isp_hw_dump_args)); + } + break; + default: + CAM_DBG(CAM_ISP, "not a valid res %d", + hw_mgr_res->res_id); + break; + } + } + } + dump_args->offset = isp_hw_dump_args.offset; + CAM_DBG(CAM_ISP, "offset %u", dump_args->offset); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_tfe_mgr_reset(void *hw_mgr_priv, void *hw_reset_args) +{ + struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_reset_args *reset_args = hw_reset_args; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + int rc = 0, i = 0; + + if (!hw_mgr_priv || !hw_reset_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *)reset_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "Reset CSID and TFE"); + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + rc = cam_tfe_hw_mgr_reset_csid_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to reset CSID:%d rc: %d", + hw_mgr_res->res_id, rc); + goto end; + } + } + + for (i = 0; i < ctx->num_base; i++) { + rc = cam_tfe_mgr_reset_tfe_hw(hw_mgr, ctx->base[i].idx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to reset TFE:%d rc: %d", + ctx->base[i].idx, rc); + goto end; + } + } + + atomic_set(&ctx->overflow_pending, 0); +end: + return rc; +} + +static int cam_tfe_mgr_release_hw(void *hw_mgr_priv, + void *release_hw_args) +{ + int rc = 0; + struct cam_hw_release_args *release_args = release_hw_args; + struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_tfe_hw_mgr_ctx *ctx; + uint32_t i; + + if (!hw_mgr_priv || !release_hw_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + ctx = (struct cam_tfe_hw_mgr_ctx *)release_args->ctxt_to_hw_map; + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Invalid context is used"); + return -EPERM; + } + + CAM_DBG(CAM_ISP, "Enter...ctx id:%d", + ctx->ctx_index); + + if (ctx->init_done) + cam_tfe_hw_mgr_deinit_hw(ctx); + + /* we should called the stop hw before this already */ + cam_tfe_hw_mgr_release_hw_for_ctx(ctx); + + /* reset base info */ + ctx->num_base = 0; + memset(ctx->base, 0, sizeof(ctx->base)); + + /* release cdm handle */ + cam_cdm_release(ctx->cdm_handle); + + /* clean context */ + list_del_init(&ctx->list); + ctx->ctx_in_use = 0; + ctx->is_rdi_only_context = 0; + ctx->cdm_handle = 0; + ctx->cdm_ops = NULL; + ctx->init_done = false; + ctx->is_dual = false; + ctx->last_cdm_done_req = 0; + CAM_MEM_FREE(ctx->tfe_bus_comp_grp); + ctx->tfe_bus_comp_grp = NULL; + atomic_set(&ctx->overflow_pending, 0); + + for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) { + ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0; + ctx->last_submit_bl_cmd.cmd[i].hw_addr = 0; + ctx->last_submit_bl_cmd.cmd[i].len = 0; + ctx->last_submit_bl_cmd.cmd[i].offset = 0; + ctx->last_submit_bl_cmd.cmd[i].type = 0; + ctx->last_submit_bl_cmd.cmd[i].input_len = 0; + } + ctx->last_submit_bl_cmd.bl_count = 0; + ctx->packet = NULL; + + CAM_DBG(CAM_ISP, "Exit...ctx id:%d", + ctx->ctx_index); + cam_tfe_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx); + return rc; +} + +static int cam_isp_tfe_blob_hfr_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_tfe_resource_hfr_config *hfr_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_isp_tfe_port_hfr_config *port_hfr_config; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int num_ent, rc = 0; + + ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "num_ports= %d", hfr_config->num_ports); + + /* Max one hw entries required for hfr config update */ + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < hfr_config->num_ports; i++) { + port_hfr_config = &hfr_config->port_hfr_config_flex[i]; + res_id_out = port_hfr_config->resource_type & 0xFF; + + CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i, + res_id_out); + + if (res_id_out >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "invalid out restype:%x", + port_hfr_config->resource_type); + return -EINVAL; + } + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base %d", + blob_info->base_info->idx); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + total_used_bytes/4; + hw_mgr_res = &ctx->res_list_tfe_out[res_id_out]; + + if (!hw_mgr_res->hw_res[blob_info->base_info->split_id]) + return 0; + + hw_intf = cam_tfe_hw_mgr_get_hw_intf(blob_info->base_info, ctx); + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], hw_intf, + blob_type, CAM_ISP_HW_CMD_GET_HFR_UPDATE, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)port_hfr_config, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed cmd_update, base_idx=%d, rc=%d", + blob_info->base_info->idx, bytes_used); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + /* Update the HW entries */ + num_ent = prepare->num_hw_update_entries; + prepare->hw_update_entries[num_ent].handle = + kmd_buf_info->handle; + prepare->hw_update_entries[num_ent].len = total_used_bytes; + prepare->hw_update_entries[num_ent].offset = + kmd_buf_info->offset; + num_ent++; + kmd_buf_info->used_bytes += total_used_bytes; + kmd_buf_info->offset += total_used_bytes; + prepare->num_hw_update_entries = num_ent; + } + + return rc; +} + +static int cam_isp_tfe_blob_csid_clock_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_tfe_csid_clock_config *clock_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_csid_clock_update_args csid_clock_upd_args; + uint64_t clk_rate = 0; + int rc = -EINVAL; + uint32_t i; + + ctx = prepare->ctxt_to_hw_map; + + CAM_DBG(CAM_ISP, "csid clk=%llu", clock_config->csid_clock); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + clk_rate = 0; + if (!hw_mgr_res->hw_res[i]) + continue; + clk_rate = clock_config->csid_clock; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + csid_clock_upd_args.clk_rate = clk_rate; + CAM_DBG(CAM_ISP, "i= %d csid clk=%llu", + i, csid_clock_upd_args.clk_rate); + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE, + &csid_clock_upd_args, + sizeof( + struct cam_tfe_csid_clock_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update failed"); + } else + CAM_ERR(CAM_ISP, "NULL hw_intf!"); + } + } + + return rc; +} + +static int cam_isp_tfe_blob_clock_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_tfe_clock_config *clock_config, + struct cam_hw_prepare_update_args *prepare) +{ + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_clock_update_args clock_upd_args; + uint64_t clk_rate = 0; + int rc = -EINVAL; + uint32_t i; + uint32_t j; + bool camif_l_clk_updated = false; + bool camif_r_clk_updated = false; + + ctx = prepare->ctxt_to_hw_map; + + CAM_DBG(CAM_PERF, + "usage=%u left_clk= %lu right_clk=%lu", + clock_config->usage_type, + clock_config->left_pix_hz, + clock_config->right_pix_hz); + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + clk_rate = 0; + if (!hw_mgr_res->hw_res[i]) + continue; + + if (hw_mgr_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + if (i == CAM_ISP_HW_SPLIT_LEFT) { + if (camif_l_clk_updated) + continue; + + clk_rate = + clock_config->left_pix_hz; + + camif_l_clk_updated = true; + } else { + if (camif_r_clk_updated) + continue; + + clk_rate = + clock_config->right_pix_hz; + + camif_r_clk_updated = true; + } + } else if ((hw_mgr_res->res_id >= + CAM_ISP_HW_TFE_IN_RDI0) && (hw_mgr_res->res_id + <= CAM_ISP_HW_TFE_IN_RDI2)) { + for (j = 0; j < clock_config->num_rdi; j++) + clk_rate = max(clock_config->rdi_hz_flex[j], + clk_rate); + } else { + CAM_ERR(CAM_ISP, "Invalid res_id %u", + hw_mgr_res->res_id); + rc = -EINVAL; + return rc; + } + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + clock_upd_args.node_res = + hw_mgr_res->hw_res[i]; + CAM_DBG(CAM_ISP, + "res_id=%u i= %d clk=%llu", + hw_mgr_res->res_id, i, clk_rate); + + clock_upd_args.clk_rate = clk_rate; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CLOCK_UPDATE, + &clock_upd_args, + sizeof( + struct cam_tfe_clock_update_args)); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update failed"); + } else + CAM_WARN(CAM_ISP, "NULL hw_intf!"); + } + } + + return rc; +} + +static int cam_isp_tfe_blob_bw_limit_update( + uint32_t blob_type, + struct cam_isp_generic_blob_info *blob_info, + struct cam_isp_tfe_out_rsrc_bw_limiter_config *bw_limit_cfg, + struct cam_hw_prepare_update_args *prepare, + enum cam_isp_hw_type hw_type) +{ + struct cam_isp_tfe_wm_bw_limiter_config *wm_bw_limit_cfg; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf = NULL; + uint32_t res_id_out, i; + uint32_t total_used_bytes = 0; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + uint32_t bytes_used = 0; + int num_ent, rc = 0; + + ctx = prepare->ctxt_to_hw_map; + + if ((prepare->num_hw_update_entries + 1) >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries: %d max: %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + kmd_buf_info = blob_info->kmd_buf_info; + for (i = 0; i < bw_limit_cfg->num_ports; i++) { + wm_bw_limit_cfg = &bw_limit_cfg->bw_limiter_config_flex[i]; + res_id_out = wm_bw_limit_cfg->res_type & 0xFF; + + CAM_DBG(CAM_ISP, "%s BW limit config idx: %d port: 0x%x enable: %d [0x%x:0x%x]", + "TFE", i, wm_bw_limit_cfg->res_type, wm_bw_limit_cfg->enable_limiter, + wm_bw_limit_cfg->counter_limit[0], wm_bw_limit_cfg->counter_limit[1]); + + if ((kmd_buf_info->used_bytes + + total_used_bytes) < kmd_buf_info->size) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + total_used_bytes); + } else { + CAM_ERR(CAM_ISP, + "No free kmd memory for base idx: %d", + blob_info->base_info->idx); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = kmd_buf_info->cpu_addr + + (kmd_buf_info->used_bytes / 4) + + (total_used_bytes / 4); + + hw_mgr_res = &ctx->res_list_tfe_out[res_id_out]; + + hw_intf = cam_tfe_hw_mgr_get_hw_intf(blob_info->base_info, ctx); + rc = cam_isp_add_cmd_buf_update( + hw_mgr_res->hw_res[blob_info->base_info->split_id], + hw_intf, + blob_type, + CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)wm_bw_limit_cfg, + &bytes_used); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Failed to update %s BW limiter config for res:0x%x enable:%d [0x%x:0x%x] base_idx:%d bytes_used:%u rc:%d", + "VFE", wm_bw_limit_cfg->res_type, wm_bw_limit_cfg->enable_limiter, + wm_bw_limit_cfg->counter_limit[0], + wm_bw_limit_cfg->counter_limit[1], + blob_info->base_info->idx, bytes_used, rc); + return rc; + } + + total_used_bytes += bytes_used; + } + + if (total_used_bytes) { + /* Update the HW entries */ + num_ent = prepare->num_hw_update_entries; + prepare->hw_update_entries[num_ent].handle = + kmd_buf_info->handle; + prepare->hw_update_entries[num_ent].len = total_used_bytes; + prepare->hw_update_entries[num_ent].offset = + kmd_buf_info->offset; + num_ent++; + kmd_buf_info->used_bytes += total_used_bytes; + kmd_buf_info->offset += total_used_bytes; + prepare->num_hw_update_entries = num_ent; + } + return rc; +} + +static inline int cam_isp_tfe_validate_bw_limiter_blob( + uint32_t blob_size, + struct cam_isp_tfe_out_rsrc_bw_limiter_config *bw_limit_config) +{ + /* Check for blob version */ + if (bw_limit_config->version != CAM_TFE_BW_LIMITER_CONFIG_V1) { + CAM_ERR(CAM_ISP, "Invalid Blob config version:%d", bw_limit_config->version); + return -EINVAL; + } + /* Check for number of out ports*/ + if (bw_limit_config->num_ports > CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid num_ports:%u", bw_limit_config->num_ports); + return -EINVAL; + } + /* Check for integer overflow */ + if (bw_limit_config->num_ports != 1) { + if (sizeof(struct cam_isp_tfe_wm_bw_limiter_config) > ((UINT_MAX - + sizeof(struct cam_isp_tfe_out_rsrc_bw_limiter_config)) / + (bw_limit_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in bw limit config num_ports:%u size per port:%lu", + bw_limit_config->num_ports, + sizeof(struct cam_isp_tfe_wm_bw_limiter_config)); + return -EINVAL; + } + } + if (blob_size < (sizeof(struct cam_isp_tfe_out_rsrc_bw_limiter_config) + + (bw_limit_config->num_ports - 1) * + sizeof(struct cam_isp_tfe_wm_bw_limiter_config))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu", + blob_size, sizeof(struct cam_isp_tfe_out_rsrc_bw_limiter_config) + + (bw_limit_config->num_ports - 1) * + sizeof(struct cam_isp_tfe_wm_bw_limiter_config)); + return -EINVAL; + } + return 0; +} + +static int cam_isp_tfe_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0, i; + struct cam_isp_generic_blob_info *blob_info = user_data; + struct cam_hw_prepare_update_args *prepare = NULL; + struct cam_tfe_hw_mgr_ctx *tfe_mgr_ctx = NULL; + + if (!blob_data || (blob_size == 0) || !blob_info) { + CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK", + blob_data, blob_size, blob_info); + return -EINVAL; + } + + prepare = blob_info->prepare; + if (!prepare) { + CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d", + blob_type); + return -EINVAL; + } + + tfe_mgr_ctx = prepare->ctxt_to_hw_map; + CAM_DBG(CAM_ISP, "BLOB Type: %d", blob_type); + switch (blob_type) { + case CAM_ISP_TFE_GENERIC_BLOB_TYPE_HFR_CONFIG: { + struct cam_isp_tfe_resource_hfr_config *hfr_config = + (struct cam_isp_tfe_resource_hfr_config *)blob_data; + + if (blob_size < + sizeof(struct cam_isp_tfe_resource_hfr_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size); + return -EINVAL; + } + + if (hfr_config->num_ports > CAM_ISP_TFE_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid num_ports %u in hfr config", + hfr_config->num_ports); + return -EINVAL; + } + + /* Check for integer overflow */ + if (hfr_config->num_ports > 1) { + if (sizeof(struct cam_isp_tfe_resource_hfr_config) > + ((UINT_MAX - + sizeof(struct cam_isp_tfe_resource_hfr_config)) + / (hfr_config->num_ports - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in hfr config num_ports:%u size per port:%lu", + hfr_config->num_ports, + sizeof(struct + cam_isp_tfe_resource_hfr_config)); + return -EINVAL; + } + } + + if ((hfr_config->num_ports != 0) && (blob_size < + (sizeof(struct cam_isp_tfe_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_tfe_port_hfr_config)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu", + blob_size, + sizeof(struct cam_isp_tfe_resource_hfr_config) + + (hfr_config->num_ports - 1) * + sizeof(struct cam_isp_tfe_resource_hfr_config)); + return -EINVAL; + } + + rc = cam_isp_tfe_blob_hfr_update(blob_type, blob_info, + hfr_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "HFR Update Failed"); + } + break; + case CAM_ISP_TFE_GENERIC_BLOB_TYPE_CLOCK_CONFIG: { + struct cam_isp_tfe_clock_config *clock_config = + (struct cam_isp_tfe_clock_config *)blob_data; + + if (blob_size < sizeof(struct cam_isp_tfe_clock_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size); + return -EINVAL; + } + + if (clock_config->num_rdi > CAM_TFE_RDI_NUM_MAX) { + CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config", + clock_config->num_rdi); + return -EINVAL; + } + /* Check integer overflow */ + if (clock_config->num_rdi > 1) { + if (sizeof(uint64_t) > ((UINT_MAX- + sizeof(struct cam_isp_tfe_clock_config))/ + (clock_config->num_rdi - 1))) { + CAM_ERR(CAM_ISP, + "Max size exceeded in clock config num_rdi:%u size per port:%lu", + clock_config->num_rdi, + sizeof(uint64_t)); + return -EINVAL; + } + } + + if ((clock_config->num_rdi != 0) && (blob_size < + (sizeof(struct cam_isp_tfe_clock_config) + + sizeof(uint64_t) * (clock_config->num_rdi - 1)))) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu", + blob_size, + sizeof(uint32_t) * 2 + sizeof(uint64_t) * + (clock_config->num_rdi + 2)); + return -EINVAL; + } + + rc = cam_isp_tfe_blob_clock_update(blob_type, blob_info, + clock_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update Failed"); + } + break; + case CAM_ISP_TFE_GENERIC_BLOB_TYPE_BW_CONFIG_V2: { + struct cam_isp_tfe_bw_config_v2 *bw_config = + (struct cam_isp_tfe_bw_config_v2 *)blob_data; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_cpas_axi_per_path_bw_vote *path_vote; + + if (blob_size < sizeof(struct cam_isp_tfe_bw_config_v2)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size); + return -EINVAL; + } + + if ((bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES) || + !bw_config->num_paths) { + CAM_ERR(CAM_ISP, "Invalid num paths %d", + bw_config->num_paths); + return -EINVAL; + } + + /* Check for integer overflow */ + if (bw_config->num_paths > 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct cam_isp_tfe_bw_config_v2)) / + (bw_config->num_paths - 1))) { + CAM_ERR(CAM_ISP, + "Size exceeds limit paths:%u size per path:%lu", + bw_config->num_paths - 1, + sizeof( + struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + } + + if ((bw_config->num_paths != 0) && (blob_size < + (sizeof(struct cam_isp_tfe_bw_config_v2) + + ((bw_config->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote))))) { + CAM_ERR(CAM_ISP, + "Invalid blob size: %u, num_paths: %u, bw_config size: %lu, per_path_vote size: %lu", + blob_size, bw_config->num_paths, + sizeof(struct cam_isp_tfe_bw_config_v2), + sizeof(struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + + if (!prepare || !prepare->priv || + (bw_config->usage_type >= CAM_TFE_HW_NUM_MAX)) { + CAM_ERR(CAM_ISP, "Invalid inputs"); + return -EINVAL; + } + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv; + memset(&prepare_hw_data->bw_clk_config.bw_config_v2, + 0, sizeof(prepare_hw_data->bw_clk_config.bw_config_v2)); + prepare_hw_data->bw_clk_config.bw_config_v2.usage_type = bw_config->usage_type; + prepare_hw_data->bw_clk_config.bw_config_v2.num_paths = bw_config->num_paths; + + for (i = 0; i < bw_config->num_paths; i++) { + path_vote = &prepare_hw_data->bw_clk_config.bw_config_v2.axi_path[i]; + path_vote->usage_data = bw_config->axi_path_flex[i].usage_data; + path_vote->transac_type = bw_config->axi_path_flex[i].transac_type; + path_vote->path_data_type = bw_config->axi_path_flex[i].path_data_type; + path_vote->vote_level = 0; + path_vote->camnoc_bw = bw_config->axi_path_flex[i].camnoc_bw; + path_vote->mnoc_ab_bw = bw_config->axi_path_flex[i].mnoc_ab_bw; + path_vote->mnoc_ib_bw = bw_config->axi_path_flex[i].mnoc_ib_bw; + } + + tfe_mgr_ctx->bw_config_version = CAM_ISP_BW_CONFIG_V2; + prepare_hw_data->bw_clk_config.bw_config_valid = true; + } + break; + case CAM_ISP_TFE_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: { + struct cam_isp_tfe_csid_clock_config *clock_config = + (struct cam_isp_tfe_csid_clock_config *)blob_data; + + if (blob_size < sizeof(struct cam_isp_tfe_csid_clock_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u", + blob_size, + sizeof(struct cam_isp_tfe_csid_clock_config)); + return -EINVAL; + } + rc = cam_isp_tfe_blob_csid_clock_update(blob_type, blob_info, + clock_config, prepare); + if (rc) + CAM_ERR(CAM_ISP, "Clock Update Failed"); + } + break; + case CAM_ISP_TFE_GENERIC_BLOB_TYPE_BW_LIMITER_CFG: { + struct cam_isp_tfe_out_rsrc_bw_limiter_config *bw_limit_config; + + if (blob_size < + sizeof(struct cam_isp_tfe_out_rsrc_bw_limiter_config)) { + CAM_ERR(CAM_ISP, "Invalid blob size %u expected %lu", + blob_size, + sizeof(struct cam_isp_tfe_out_rsrc_bw_limiter_config)); + return -EINVAL; + } + + bw_limit_config = (struct cam_isp_tfe_out_rsrc_bw_limiter_config *)blob_data; + rc = cam_isp_tfe_validate_bw_limiter_blob(blob_size, bw_limit_config); + if (rc) + return rc; + + rc = cam_isp_tfe_blob_bw_limit_update(blob_type, blob_info, + bw_limit_config, prepare, CAM_ISP_HW_TYPE_TFE); + if (rc) + CAM_ERR(CAM_ISP, "BW limit update failed for TFE rc: %d", rc); + } + break; + default: + CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type); + break; + } + + return rc; +} + +static int cam_tfe_update_dual_config( + struct cam_cmd_buf_desc *cmd_desc, + uint32_t split_id, + uint32_t base_idx, + struct cam_isp_hw_mgr_res *res_list_isp_out, + uint32_t size_isp_out) +{ + int rc = -EINVAL; + struct cam_isp_tfe_dual_config *dual_config; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + struct cam_tfe_dual_update_args dual_isp_update_args; + uint32_t outport_id; + size_t len = 0, remain_len = 0; + uint32_t *cpu_addr; + uint32_t *cpu_addr_local = NULL; + uint32_t i, j, stp_index; + size_t packet_size = 0; + + CAM_DBG(CAM_ISP, "cmd des size %d, length: %d", + cmd_desc->size, cmd_desc->length); + + rc = cam_packet_util_get_cmd_mem_addr( + cmd_desc->mem_handle, &cpu_addr, &len); + if (rc) { + CAM_DBG(CAM_ISP, "unable to get cmd mem addr handle:0x%x", + cmd_desc->mem_handle); + return rc; + } + + if ((len < sizeof(struct cam_isp_tfe_dual_config)) || + (cmd_desc->offset >= + (len - sizeof(struct cam_isp_tfe_dual_config)))) { + CAM_ERR(CAM_ISP, "not enough buffer provided"); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return -EINVAL; + } + + remain_len = len - cmd_desc->offset; + cpu_addr += (cmd_desc->offset / 4); + packet_size = cmd_desc->length; + + if (packet_size <= remain_len) { + rc = cam_common_mem_kdup((void **)&cpu_addr_local, + cpu_addr, packet_size); + if (rc) { + CAM_ERR(CAM_ISP, "Alloc and copy cmd desc fail"); + goto put_ref; + } + } else { + CAM_ERR(CAM_ISP, "Invalid packet header size %u", + packet_size); + rc = -EINVAL; + goto put_ref; + } + + dual_config = (struct cam_isp_tfe_dual_config *)cpu_addr_local; + + if ((dual_config->num_ports * + sizeof(struct cam_isp_tfe_dual_stripe_config)) > + (remain_len - + offsetof(struct cam_isp_tfe_dual_config, stripes))) { + CAM_ERR(CAM_ISP, "not enough buffer for all the dual configs"); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "num_ports:%d", dual_config->num_ports); + if (dual_config->num_ports >= size_isp_out) { + CAM_ERR(CAM_UTIL, + "inval num ports %d max num tfe ports:%d", + dual_config->num_ports, size_isp_out); + rc = -EINVAL; + goto end; + } + + for (i = 0; i < dual_config->num_ports; i++) { + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + stp_index = (i * CAM_PACKET_MAX_PLANES) + + (j * (CAM_PACKET_MAX_PLANES * + dual_config->num_ports)); + + if (!dual_config->stripes_flex[stp_index].port_id) + continue; + + outport_id = dual_config->stripes_flex[stp_index].port_id; + if (outport_id >= size_isp_out) { + CAM_ERR(CAM_UTIL, + "inval outport id:%d i:%d j:%d num ports:%d ", + outport_id, i, j, + dual_config->num_ports); + rc = -EINVAL; + goto end; + } + + hw_mgr_res = &res_list_isp_out[outport_id]; + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + res = hw_mgr_res->hw_res[j]; + + if (res->res_id < CAM_ISP_TFE_OUT_RES_BASE || + res->res_id >= CAM_ISP_TFE_OUT_RES_MAX) { + CAM_DBG(CAM_ISP, "res id :%d", res->res_id); + continue; + } + + dual_isp_update_args.split_id = j; + dual_isp_update_args.res = res; + dual_isp_update_args.stripe_config = + &dual_config->stripes_flex[stp_index]; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_STRIPE_UPDATE, + &dual_isp_update_args, + sizeof(struct cam_tfe_dual_update_args)); + if (rc) + goto end; + } + } +end: + cam_common_mem_free(cpu_addr_local); +put_ref: + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; +} + +int cam_tfe_add_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_ctx_base_info *base_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_hw_mgr_res *res_list_isp_out, + uint32_t size_isp_out) +{ + int rc = 0; + uint32_t cmd_meta_data, num_ent, i; + uint32_t base_idx; + enum cam_isp_hw_split_id split_id; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_hw_update_entry *hw_entry = NULL; + + split_id = base_info->split_id; + base_idx = base_info->idx; + hw_entry = prepare->hw_update_entries; + /* + * set the cmd_desc to point the first command descriptor in the + * packet + */ + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + + CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d", + split_id, prepare->packet->num_cmd_buf); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + num_ent = prepare->num_hw_update_entries; + if (!cmd_desc[i].length) + continue; + + /* One hw entry space required for left or right or common */ + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + cmd_meta_data = cmd_desc[i].meta_data; + + CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d", + cmd_meta_data, split_id); + + switch (cmd_meta_data) { + case CAM_ISP_TFE_PACKET_META_BASE: + case CAM_ISP_TFE_PACKET_META_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = + cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + CAM_DBG(CAM_ISP, + "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + + num_ent++; + } + break; + case CAM_ISP_TFE_PACKET_META_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = + cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + CAM_DBG(CAM_ISP, + "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + + num_ent++; + } + break; + case CAM_ISP_TFE_PACKET_META_COMMON: + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = + cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + CAM_DBG(CAM_ISP, + "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON) + hw_entry[num_ent].flags = 0x1; + + num_ent++; + break; + case CAM_ISP_TFE_PACKET_META_DUAL_CONFIG: + + rc = cam_tfe_update_dual_config(&cmd_desc[i], split_id, + base_idx, res_list_isp_out, size_isp_out); + + if (rc) + return rc; + break; + case CAM_ISP_TFE_PACKET_META_GENERIC_BLOB_COMMON: { + struct cam_isp_generic_blob_info blob_info; + + prepare->num_hw_update_entries = num_ent; + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", rc); + return rc; + } + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH: + case CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR: + case CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + if (prepare->num_reg_dump_buf >= + CAM_REG_DUMP_MAX_BUF_ENTRIES) { + CAM_ERR(CAM_ISP, + "Descriptor count out of bounds: %d", + prepare->num_reg_dump_buf); + return -EINVAL; + } + prepare->reg_dump_buf_desc[ + prepare->num_reg_dump_buf] = + cmd_desc[i]; + prepare->num_reg_dump_buf++; + CAM_DBG(CAM_ISP, + "Added command buffer: %d desc_count: %d", + cmd_desc[i].meta_data, + prepare->num_reg_dump_buf); + } + break; + default: + CAM_ERR(CAM_ISP, "invalid cdm command meta data %d", + cmd_meta_data); + return -EINVAL; + } + prepare->num_hw_update_entries = num_ent; + } + + return rc; +} + +static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv, + void *prepare_hw_update_args) +{ + int rc = 0; + struct cam_hw_prepare_update_args *prepare = + (struct cam_hw_prepare_update_args *) prepare_hw_update_args; + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_tfe_hw_mgr *hw_mgr; + uint32_t i; + bool fill_fence = true; + struct cam_isp_prepare_hw_update_data *prepare_hw_data; + struct cam_isp_frame_header_info frame_header_info; + struct cam_isp_change_base_args change_base_info = {0}; + struct cam_isp_check_io_cfg_for_scratch check_for_scratch = {0}; + struct cam_isp_io_buf_info io_buf_info = {0}; + + if (!hw_mgr_priv || !prepare_hw_update_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + CAM_DBG(CAM_REQ, "Enter for req_id %lld", + prepare->packet->header.request_id); + + prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) + prepare->priv; + + ctx = (struct cam_tfe_hw_mgr_ctx *) prepare->ctxt_to_hw_map; + hw_mgr = (struct cam_tfe_hw_mgr *)hw_mgr_priv; + + rc = cam_packet_util_validate_packet(prepare->packet, + prepare->remain_len); + if (rc) + return rc; + + /* Pre parse the packet*/ + rc = cam_packet_util_get_kmd_buffer(prepare->packet, &prepare_hw_data->kmd_cmd_buff_info); + if (rc) + return rc; + + rc = cam_packet_util_process_patches(prepare->packet, + prepare->buf_tracker, hw_mgr->mgr_common.cmd_iommu_hdl, + hw_mgr->mgr_common.cmd_iommu_hdl_secure, false); + if (rc) { + CAM_ERR(CAM_ISP, "Patch ISP packet failed."); + return rc; + } + + prepare->num_hw_update_entries = 0; + prepare->num_in_map_entries = 0; + prepare->num_out_map_entries = 0; + prepare->num_reg_dump_buf = 0; + + for (i = 0; i < ctx->num_base; i++) { + CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i); + + CAM_DBG(CAM_ISP, + "change base i=%d, idx=%d", + i, ctx->base[i].idx); + + change_base_info.base_idx = ctx->base[i].idx; + change_base_info.cdm_id = CAM_CDM_MAX; + + /* Add change base */ + rc = cam_isp_add_change_base(prepare, &ctx->res_list_tfe_in, + &change_base_info, &prepare_hw_data->kmd_cmd_buff_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in change base i=%d, idx=%d, rc=%d", + i, ctx->base[i].idx, rc); + goto end; + } + + + /* get command buffers */ + if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) { + rc = cam_tfe_add_command_buffers(prepare, + &prepare_hw_data->kmd_cmd_buff_info, + &ctx->base[i], + cam_isp_tfe_packet_generic_blob_handler, + ctx->res_list_tfe_out, CAM_TFE_HW_OUT_RES_MAX); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d", + i, ctx->base[i].split_id, rc); + goto end; + } + } + + memset(&frame_header_info, 0, + sizeof(struct cam_isp_frame_header_info)); + frame_header_info.frame_header_enable = false; + + /* get IO buffers */ + io_buf_info.frame_hdr = &frame_header_info; + io_buf_info.scratch_check_cfg = &check_for_scratch; + io_buf_info.prepare = prepare; + io_buf_info.kmd_buf_info = &prepare_hw_data->kmd_cmd_buff_info; + io_buf_info.res_list_in_rd = NULL; + io_buf_info.iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl; + io_buf_info.sec_iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl_secure; + io_buf_info.base = &ctx->base[i]; + io_buf_info.fill_fence = fill_fence; + io_buf_info.out_base = CAM_ISP_TFE_OUT_RES_BASE; + io_buf_info.out_max = CAM_TFE_HW_OUT_RES_MAX; + io_buf_info.res_list_isp_out = ctx->res_list_tfe_out; + rc = cam_isp_add_io_buffers(&io_buf_info); + + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in io buffers, i=%d, rc=%d", + i, rc); + goto end; + } + + /* fence map table entries need to fill only once in the loop */ + if (fill_fence) + fill_fence = false; + } + + CAM_DBG(CAM_ISP, + "num_reg_dump_buf=%d ope code:%d", + prepare->num_reg_dump_buf, prepare->packet->header.op_code); + + /* reg update will be done later for the initial configure */ + if (((prepare->packet->header.op_code) & 0xF) == + CAM_ISP_PACKET_INIT_DEV) { + prepare_hw_data->packet_opcode_type = + CAM_ISP_TFE_PACKET_INIT_DEV; + + if ((!prepare->num_reg_dump_buf) || (prepare->num_reg_dump_buf > + CAM_REG_DUMP_MAX_BUF_ENTRIES)) + goto end; + + if (!ctx->num_reg_dump_buf) { + ctx->num_reg_dump_buf = + prepare->num_reg_dump_buf; + memcpy(ctx->reg_dump_buf_desc, + prepare->reg_dump_buf_desc, + sizeof(struct cam_cmd_buf_desc) * + prepare->num_reg_dump_buf); + /* + * save the address for error/flush cases to avoid + * invoking mutex(cpu get/put buf) in tasklet/atomic context. + */ + for (i = 0; i < ctx->num_reg_dump_buf; i++) { + rc = cam_mem_get_cpu_buf(ctx->reg_dump_buf_desc[i].mem_handle, + &(ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr), + &(ctx->reg_dump_cmd_buf_addr_len[i].buf_size)); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in Get cpu addr, rc=%d,i=%d cpu_addr=%pK", + rc, i, + (void *)ctx->reg_dump_cmd_buf_addr_len[i].cpu_addr); + for (--i; i >= 0; i--) + cam_mem_put_cpu_buf( + ctx->reg_dump_buf_desc[i].mem_handle); + ctx->skip_reg_dump_buf_put = true; + return rc; + } + } + } else { + prepare_hw_data->num_reg_dump_buf = + prepare->num_reg_dump_buf; + memcpy(prepare_hw_data->reg_dump_buf_desc, + prepare->reg_dump_buf_desc, + sizeof(struct cam_cmd_buf_desc) * + prepare_hw_data->num_reg_dump_buf); + } + + goto end; + } else { + prepare_hw_data->packet_opcode_type = CAM_ISP_TFE_PACKET_CONFIG_DEV; + prepare_hw_data->num_reg_dump_buf = prepare->num_reg_dump_buf; + if ((prepare_hw_data->num_reg_dump_buf) && + (prepare_hw_data->num_reg_dump_buf < CAM_REG_DUMP_MAX_BUF_ENTRIES)) { + memcpy(prepare_hw_data->reg_dump_buf_desc, + prepare->reg_dump_buf_desc, + sizeof(struct cam_cmd_buf_desc) * + prepare_hw_data->num_reg_dump_buf); + } + } + + /* add reg update commands */ + for (i = 0; i < ctx->num_base; i++) { + change_base_info.base_idx = ctx->base[i].idx; + change_base_info.cdm_id = CAM_CDM_MAX; + /* Add change base */ + rc = cam_isp_add_change_base(prepare, &ctx->res_list_tfe_in, + &change_base_info, &prepare_hw_data->kmd_cmd_buff_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in change base adding reg_update cmd i=%d, idx=%d, rc=%d", + i, ctx->base[i].idx, rc); + goto end; + } + + /*Add reg update */ + rc = cam_isp_add_reg_update(prepare, &ctx->res_list_tfe_in, + ctx->base[i].idx, &prepare_hw_data->kmd_cmd_buff_info, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Add Reg_update cmd Failed i=%d, idx=%d, rc=%d", + i, ctx->base[i].idx, rc); + goto end; + } + } + + if (prepare_hw_data->kmd_cmd_buff_info.size <= + prepare_hw_data->kmd_cmd_buff_info.used_bytes) { + CAM_ERR(CAM_ISP, "No Sufficient memory for the Gen IRQ command"); + rc = -ENOMEM; + } + +end: + return rc; +} + +static int cam_tfe_mgr_resume_hw(struct cam_tfe_hw_mgr_ctx *ctx) +{ + return cam_tfe_mgr_bw_control(ctx, CAM_TFE_BW_CONTROL_INCLUDE); +} + +static int cam_tfe_mgr_cmd_get_last_consumed_addr( + struct cam_tfe_hw_mgr_ctx *ctx, + struct cam_isp_hw_done_event_data *done) +{ + int i, rc = -EINVAL; + uint32_t res_id_out; + struct cam_isp_resource_node *res; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct list_head *res_list_isp_src; + + res_id_out = done->resource_handle & 0xFF; + + if (res_id_out >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid out resource id :%x", + res_id); + return; + } + + hw_mgr_res = + &ctx->res_list_tfe_out[res_id_out]; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR, + done, sizeof(struct cam_isp_hw_done_event_data)); + + return rc; + } + + return rc; +} + +static int cam_tfe_mgr_sof_irq_debug( + struct cam_tfe_hw_mgr_ctx *ctx, + uint32_t sof_irq_enable) +{ + int rc = 0; + uint32_t i = 0; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_hw_intf *hw_intf = NULL; + struct cam_isp_resource_node *rsrc_node = NULL; + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc |= hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_TFE_CSID_SOF_IRQ_DEBUG, + &sof_irq_enable, + sizeof(sof_irq_enable)); + } + } + } + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rsrc_node = hw_mgr_res->hw_res[i]; + if (rsrc_node->process_cmd && (rsrc_node->res_id == + CAM_ISP_HW_TFE_IN_CAMIF)) { + rc |= hw_mgr_res->hw_res[i]->process_cmd( + hw_mgr_res->hw_res[i], + CAM_ISP_HW_CMD_SOF_IRQ_DEBUG, + &sof_irq_enable, + sizeof(sof_irq_enable)); + } + } + } + + return rc; +} + +static void cam_tfe_mgr_pf_dump(uint32_t res_id, + struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_hw_get_cmd_update cmd_update; + uint32_t res_id_out; + int i, rc = 0; + + /* dump the registers */ + rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR, NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump on pf failed req id: %llu rc: %d", + ctx->applied_req_id, rc); + } + + /* dump the acquire data */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf && hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_TFE_CSID_LOG_ACQUIRE_DATA, + hw_mgr_res->hw_res[i], + sizeof(void *)); + if (rc) + CAM_ERR(CAM_ISP, + "acquire dump data failed"); + } else + CAM_ERR(CAM_ISP, "NULL hw_intf!"); + } + } + + res_id_out = res_id & 0xFF; + + if (res_id_out >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid out resource id :%x", + res_id); + return; + } + + hw_mgr_res = + &ctx->res_list_tfe_out[res_id_out]; + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + cmd_update.cmd_type = CAM_ISP_HW_CMD_DUMP_BUS_INFO; + cmd_update.res = hw_mgr_res->hw_res[i]; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, + (void *)&cmd_update, + sizeof(struct cam_isp_hw_get_cmd_update)); + } + } +} + +static void cam_tfe_mgr_dump_pf_data( + struct cam_tfe_hw_mgr *hw_mgr, + struct cam_hw_cmd_args *hw_cmd_args) +{ + struct cam_tfe_hw_mgr_ctx *ctx; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_get_cmd_update cmd_update; + struct cam_isp_hw_get_res_for_mid get_res; + struct cam_packet *packet = NULL; + struct cam_hw_cmd_pf_args *pf_cmd_args; + uint32_t *resource_type; + uint32_t hw_id; + bool *ctx_found, hw_id_found = false; + int i, j, rc = 0; + struct cam_ctx_request *req_pf; + + ctx = (struct cam_tfe_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map; + + pf_cmd_args = hw_cmd_args->u.pf_cmd_args; + req_pf = (struct cam_ctx_request *) + pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + ctx_found = &pf_cmd_args->pf_args->pf_context_info.ctx_found; + resource_type = &pf_cmd_args->pf_args->pf_context_info.resource_type; + + if ((*ctx_found) && (*resource_type)) + goto outportlog; + + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (!g_tfe_hw_mgr.tfe_devices[i]) + continue; + + for (j = 0; j < g_tfe_hw_mgr.tfe_devices[i]->num_hw_pid; j++) { + if (g_tfe_hw_mgr.tfe_devices[i]->hw_pid[j] == + pf_cmd_args->pf_args->pf_smmu_info->pid) { + hw_id_found = true; + hw_id = i; + break; + } + } + if (hw_id_found) + break; + } + + if (i == CAM_TFE_HW_NUM_MAX) { + CAM_INFO(CAM_ISP, + "PID:%d is not matching with any TFE HW PIDs ctx id:%d", + pf_cmd_args->pf_args->pf_smmu_info->pid, ctx->ctx_index); + cam_packet_util_put_packet_addr(pf_cmd_args->pf_req_info->packet_handle); + return; + } + + for (i = 0; i < ctx->num_base; i++) { + if (ctx->base[i].idx == hw_id) { + *ctx_found = true; + break; + } + } + + if (!(*ctx_found)) { + CAM_INFO(CAM_ISP, + "This context does not cause pf:pid:%d hw id:%d ctx_id:%d", + pf_cmd_args->pf_args->pf_smmu_info->pid, hw_id, ctx->ctx_index); + cam_packet_util_put_packet_addr(pf_cmd_args->pf_req_info->packet_handle); + return; + } + + for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) { + hw_mgr_res = &ctx->res_list_tfe_out[i]; + if (!hw_mgr_res->hw_res[0]) + continue; + break; + } + + if (i >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, + "NO valid outport resources ctx id:%d req id:%lld", + ctx->ctx_index, packet->header.request_id); + cam_packet_util_put_packet_addr(pf_cmd_args->pf_req_info->packet_handle); + return; + } + + get_res.mid = pf_cmd_args->pf_args->pf_smmu_info->mid; + cmd_update.res = hw_mgr_res->hw_res[0]; + cmd_update.cmd_type = CAM_ISP_HW_CMD_GET_RES_FOR_MID; + cmd_update.data = (void *) &get_res; + + /* get resource id for given mid */ + rc = hw_mgr_res->hw_res[0]->hw_intf->hw_ops.process_cmd( + hw_mgr_res->hw_res[0]->hw_intf->hw_priv, + cmd_update.cmd_type, &cmd_update, + sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) { + CAM_ERR(CAM_ISP, + "getting mid port resource id failed ctx id:%d req id:%lld", + ctx->ctx_index, packet->header.request_id); + cam_packet_util_put_packet_addr(pf_cmd_args->pf_req_info->packet_handle); + return; + } + CAM_ERR(CAM_ISP, + "Page fault on resource id:0x%x ctx id:%d req id:%lld", + get_res.out_res_id, ctx->ctx_index, packet->header.request_id); + *resource_type = get_res.out_res_id; + + cam_tfe_mgr_pf_dump(get_res.out_res_id, ctx); + +outportlog: + cam_packet_util_dump_io_bufs(packet, hw_mgr->mgr_common.img_iommu_hdl, + hw_mgr->mgr_common.img_iommu_hdl_secure, pf_cmd_args->pf_args, true); + cam_packet_util_put_packet_addr(pf_cmd_args->pf_req_info->packet_handle); + +} + +static void *cam_tfe_mgr_user_dump_stream_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *hw_res = NULL; + int32_t *addr; + int i; + int hw_idx[CAM_ISP_HW_SPLIT_MAX] = { -1, -1 }; + + ctx = (struct cam_tfe_hw_mgr_ctx *)dump_struct; + + if (!list_empty(&ctx->res_list_tfe_in)) { + hw_mgr_res = list_first_entry(&ctx->res_list_tfe_in, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + hw_res = hw_mgr_res->hw_res[i]; + if (hw_res && hw_res->hw_intf) + hw_idx[i] = hw_res->hw_intf->hw_idx; + } + } + + addr = (int32_t *)addr_ptr; + + *addr++ = ctx->is_dual; + *addr++ = hw_idx[CAM_ISP_HW_SPLIT_LEFT]; + *addr++ = hw_idx[CAM_ISP_HW_SPLIT_RIGHT]; + + return addr; +} + +static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_tfe_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_tfe_hw_mgr_ctx *ctx = (struct cam_tfe_hw_mgr_ctx *) + hw_cmd_args->ctxt_to_hw_map; + struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL; + struct cam_packet *packet; + struct cam_tfe_comp_record_query *query_cmd; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + if (!ctx || !ctx->ctx_in_use) { + CAM_ERR(CAM_ISP, "Fatal: Invalid context is used"); + return -EPERM; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_INTERNAL: + if (!hw_cmd_args->u.internal_args) { + CAM_ERR(CAM_ISP, "Invalid cmd arguments"); + return -EINVAL; + } + + isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *) + hw_cmd_args->u.internal_args; + + switch (isp_hw_cmd_args->cmd_type) { + case CAM_ISP_HW_MGR_CMD_PAUSE_HW: + cam_tfe_mgr_pause_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_RESUME_HW: + cam_tfe_mgr_resume_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_SOF_DEBUG: + cam_tfe_mgr_sof_irq_debug(ctx, + isp_hw_cmd_args->u.sof_irq_enable); + break; + case CAM_ISP_HW_MGR_CMD_CTX_TYPE: + if (ctx->is_rdi_only_context) + isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_RDI; + else + isp_hw_cmd_args->u.ctx_type = CAM_ISP_CTX_PIX; + break; + case CAM_ISP_HW_MGR_GET_PACKET_OPCODE: + packet = (struct cam_packet *) + isp_hw_cmd_args->cmd_data; + if ((packet->header.op_code & 0xF) == + CAM_ISP_TFE_PACKET_INIT_DEV) + isp_hw_cmd_args->u.packet_op_code = + CAM_ISP_TFE_PACKET_INIT_DEV; + else + isp_hw_cmd_args->u.packet_op_code = + CAM_ISP_TFE_PACKET_CONFIG_DEV; + break; + case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE: + isp_hw_cmd_args->u.last_cdm_done = + ctx->last_cdm_done_req; + break; + case CAM_ISP_HW_MGR_GET_BUS_COMP_GROUP: + query_cmd = (struct cam_tfe_comp_record_query *) + isp_hw_cmd_args->cmd_data; + memcpy(query_cmd->tfe_bus_comp_grp, ctx->tfe_bus_comp_grp, + sizeof(struct cam_tfe_hw_comp_record) * + CAM_TFE_BUS_COMP_NUM_MAX); + break; + case CAM_ISP_HW_MGR_GET_SOF_TS: + rc = cam_tfe_mgr_cmd_get_sof_timestamp(ctx, + &isp_hw_cmd_args->u.sof_ts.curr, + &isp_hw_cmd_args->u.sof_ts.boot, + &isp_hw_cmd_args->u.sof_ts.prev); + break; + case CAM_ISP_HW_MGR_CMD_UPDATE_CLOCK: + rc = cam_tfe_hw_mgr_csiphy_clk_sync(ctx, isp_hw_cmd_args->cmd_data); + break; + case CAM_ISP_HW_MGR_DUMP_STREAM_INFO: + rc = cam_common_user_dump_helper( + (void *)(isp_hw_cmd_args->cmd_data), + cam_tfe_mgr_user_dump_stream_info, ctx, + sizeof(int32_t), "ISP_STREAM_INFO_FROM_TFE_HW_MGR:"); + break; + case CAM_ISP_HW_MGR_GET_LAST_CONSUMED_ADDR: + rc = cam_tfe_mgr_cmd_get_last_consumed_addr(ctx, + (struct cam_isp_hw_done_event_data *)(isp_hw_cmd_args->cmd_data)); + break; + case CAM_ISP_HW_MGR_SET_DRV_INFO: + break; + default: + CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x, ISP HW mgr cmd:0x%x", + hw_cmd_args->cmd_type, isp_hw_cmd_args->cmd_type); + rc = -EINVAL; + break; + } + break; + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_tfe_mgr_dump_pf_data(hw_mgr, hw_cmd_args); + break; + case CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH: + if (ctx->last_dump_flush_req_id == ctx->applied_req_id) + return 0; + + ctx->last_dump_flush_req_id = ctx->applied_req_id; + + rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH, + NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump on flush failed req id: %llu num_reg_dump:0x%x rc: %d", + ctx->applied_req_id, ctx->num_reg_dump_buf, rc); + return rc; + } + + break; + case CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR: + if (ctx->last_dump_err_req_id == ctx->applied_req_id) + return 0; + + ctx->last_dump_err_req_id = ctx->applied_req_id; + rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc, + ctx->num_reg_dump_buf, + CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR, + NULL, false); + if (rc) { + CAM_ERR(CAM_ISP, + "Reg dump on error failed req id:%llu num_reg_dump:0x%x rc: %d", + ctx->applied_req_id, ctx->num_reg_dump_buf, rc); + return rc; + } + break; + default: + CAM_ERR(CAM_ISP, "Invalid cmd"); + } + + return rc; +} + +static int cam_tfe_mgr_cmd_get_sof_timestamp( + struct cam_tfe_hw_mgr_ctx *tfe_ctx, + uint64_t *time_stamp, + uint64_t *boot_time_stamp, + uint64_t *prev_time_stamp) +{ + int rc = -EINVAL; + uint32_t i; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_tfe_csid_get_time_stamp_args csid_get_time; + + hw_mgr_res = list_first_entry(&tfe_ctx->res_list_tfe_csid, + struct cam_isp_hw_mgr_res, list); + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + /* + * Get the SOF time stamp from left resource only. + * Left resource is master for dual tfe case and + * Rdi only context case left resource only hold + * the RDI resource + */ + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + /* + * Single TFE case, Get the time stamp from + * available one csid hw in the context + * Dual TFE case, get the time stamp from + * master(left) would be sufficient + */ + + csid_get_time.node_res = + hw_mgr_res->hw_res[i]; + csid_get_time.get_prev_timestamp = (prev_time_stamp != NULL); + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_TFE_CSID_CMD_GET_TIME_STAMP, + &csid_get_time, + sizeof(struct + cam_tfe_csid_get_time_stamp_args)); + if (!rc && (i == CAM_ISP_HW_SPLIT_LEFT)) { + *time_stamp = + csid_get_time.time_stamp_val; + *boot_time_stamp = + csid_get_time.boot_timestamp; + if (prev_time_stamp) + *prev_time_stamp = + csid_get_time.prev_time_stamp_val; + break; + } + } + } + + if (rc) + CAM_ERR_RATE_LIMIT(CAM_ISP, "Getting sof time stamp failed"); + + return rc; +} + +static void cam_tfe_mgr_ctx_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx) +{ + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_hw_intf *hw_intf; + struct cam_isp_hw_get_cmd_update cmd_update; + int i = 0; + + + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, + list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + switch (hw_mgr_res->hw_res[i]->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + cmd_update.res = hw_mgr_res->hw_res[i]; + cmd_update.cmd_type = + CAM_ISP_HW_CMD_GET_REG_DUMP; + hw_intf->hw_ops.process_cmd(hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_REG_DUMP, + &cmd_update, sizeof(cmd_update)); + break; + default: + break; + } + } + } + + /* Dump the TFE CSID registers */ + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, + list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_TFE_CSID_CMD_GET_REG_DUMP, + hw_mgr_res->hw_res[i], + sizeof(struct cam_isp_resource_node)); + } + } + } +} + +static int cam_tfe_mgr_process_recovery_cb(void *priv, void *data) +{ + int32_t rc = 0; + struct cam_tfe_hw_event_recovery_data *recovery_data = data; + struct cam_hw_start_args start_args; + struct cam_hw_stop_args stop_args; + struct cam_tfe_hw_mgr *tfe_hw_mgr = priv; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx; + uint32_t i = 0; + + uint32_t error_type = recovery_data->error_type; + struct cam_tfe_hw_mgr_ctx *ctx = NULL; + + /* Here recovery is performed */ + CAM_DBG(CAM_ISP, "ErrorType = %d", error_type); + + switch (error_type) { + case CAM_ISP_HW_ERROR_OVERFLOW: + case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW: + if (!recovery_data->affected_ctx[0]) { + CAM_ERR(CAM_ISP, + "No context is affected but recovery called"); + kfree(recovery_data); + return 0; + } + + /* stop resources here */ + CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d", + recovery_data->no_of_context); + for (i = 0; i < recovery_data->no_of_context; i++) { + stop_args.ctxt_to_hw_map = + recovery_data->affected_ctx[i]; + tfe_hw_mgr_ctx = recovery_data->affected_ctx[i]; + + if (g_tfe_hw_mgr.debug_cfg.enable_reg_dump) + cam_tfe_mgr_ctx_reg_dump(tfe_hw_mgr_ctx); + + if (g_tfe_hw_mgr.debug_cfg.enable_recovery) { + rc = cam_tfe_mgr_stop_hw_in_overflow( + &stop_args); + if (rc) { + CAM_ERR(CAM_ISP, + "CTX stop failed(%d)", rc); + return rc; + } + } + } + + if (!g_tfe_hw_mgr.debug_cfg.enable_recovery) { + CAM_INFO(CAM_ISP, "reg dumping is done "); + return 0; + } + + CAM_DBG(CAM_ISP, "RESET: CSID PATH"); + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, + list) { + rc = cam_tfe_hw_mgr_reset_csid_res(hw_mgr_res); + if (rc) { + CAM_ERR(CAM_ISP, "Failed RESET (%d)", + hw_mgr_res->res_id); + return rc; + } + } + } + + CAM_DBG(CAM_ISP, "RESET: Calling TFE reset"); + + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (recovery_data->affected_core[i]) + cam_tfe_mgr_reset_tfe_hw(tfe_hw_mgr, i); + } + + CAM_DBG(CAM_ISP, "START: Number of affected context: %d", + recovery_data->no_of_context); + + for (i = 0; i < recovery_data->no_of_context; i++) { + ctx = recovery_data->affected_ctx[i]; + start_args.ctxt_to_hw_map = ctx; + + atomic_set(&ctx->overflow_pending, 0); + + rc = cam_tfe_mgr_restart_hw(&start_args); + if (rc) { + CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc); + return rc; + } + CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc); + } + CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc); + + break; + + case CAM_ISP_HW_ERROR_P2I_ERROR: + break; + + case CAM_ISP_HW_ERROR_VIOLATION: + break; + + default: + CAM_ERR(CAM_ISP, "Invalid Error"); + } + CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type); + + kfree(recovery_data); + return rc; +} + +static int cam_tfe_hw_mgr_do_error_recovery( + struct cam_tfe_hw_event_recovery_data *tfe_mgr_recovery_data) +{ + int32_t rc = 0; + struct crm_workq_task *task = NULL; + struct cam_tfe_hw_event_recovery_data *recovery_data = NULL; + + recovery_data = kmemdup(tfe_mgr_recovery_data, + sizeof(struct cam_tfe_hw_event_recovery_data), GFP_ATOMIC); + + if (!recovery_data) + return -ENOMEM; + + CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type); + + task = cam_req_mgr_workq_get_task(g_tfe_hw_mgr.workq); + if (!task) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No empty task frame"); + kfree(recovery_data); + return -ENOMEM; + } + + task->process_cb = &cam_tfe_mgr_process_recovery_cb; + task->payload = recovery_data; + rc = cam_req_mgr_workq_enqueue_task(task, + recovery_data->affected_ctx[0]->hw_mgr, + CRM_TASK_PRIORITY_0); + + return rc; +} + +/* + * This function checks if any of the valid entry in affected_core[] + * is associated with this context. if YES + * a. It fills the other cores associated with this context.in + * affected_core[] + * b. Return true + */ +static bool cam_tfe_hw_mgr_is_ctx_affected( + struct cam_tfe_hw_mgr_ctx *tfe_hwr_mgr_ctx, + uint32_t *affected_core, + uint32_t size) +{ + + bool rc = false; + uint32_t i = 0, j = 0; + uint32_t max_idx = tfe_hwr_mgr_ctx->num_base; + uint32_t ctx_affected_core_idx[CAM_TFE_HW_NUM_MAX] = {0}; + + CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx); + + if ((max_idx >= CAM_TFE_HW_NUM_MAX) || (size > CAM_TFE_HW_NUM_MAX)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "invalid parameter = %d", max_idx); + return rc; + } + + for (i = 0; i < max_idx; i++) { + if (affected_core[tfe_hwr_mgr_ctx->base[i].idx]) + rc = true; + else { + ctx_affected_core_idx[j] = tfe_hwr_mgr_ctx->base[i].idx; + j = j + 1; + } + } + + if (rc) { + while (j) { + if (affected_core[ctx_affected_core_idx[j-1]] != 1) + affected_core[ctx_affected_core_idx[j-1]] = 1; + j = j - 1; + } + } + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +/* + * For any dual TFE context, if non-affected TFE is also serving + * another context, then that context should also be notified with fatal error + * So Loop through each context and - + * a. match core_idx + * b. Notify CTX with fatal error + */ +static int cam_tfe_hw_mgr_find_affected_ctx( + struct cam_isp_hw_error_event_data *error_event_data, + uint32_t curr_core_idx, + struct cam_tfe_hw_event_recovery_data *recovery_data) +{ + uint32_t affected_core[CAM_TFE_HW_NUM_MAX] = {0}; + struct cam_tfe_hw_mgr_ctx *tfe_hwr_mgr_ctx = NULL; + cam_hw_event_cb_func notify_err_cb; + struct cam_tfe_hw_mgr *tfe_hwr_mgr = NULL; + uint32_t i = 0; + + if (!recovery_data) { + CAM_ERR(CAM_ISP, "recovery_data parameter is NULL"); + return -EINVAL; + } + + recovery_data->no_of_context = 0; + affected_core[curr_core_idx] = 1; + tfe_hwr_mgr = &g_tfe_hw_mgr; + + list_for_each_entry(tfe_hwr_mgr_ctx, + &tfe_hwr_mgr->used_ctx_list, list) { + /* + * Check if current core_idx matches the HW associated + * with this context + */ + if (!cam_tfe_hw_mgr_is_ctx_affected(tfe_hwr_mgr_ctx, + affected_core, CAM_TFE_HW_NUM_MAX)) + continue; + + if (atomic_read(&tfe_hwr_mgr_ctx->overflow_pending)) { + CAM_INFO(CAM_ISP, "CTX:%d already error reported", + tfe_hwr_mgr_ctx->ctx_index); + continue; + } + + atomic_set(&tfe_hwr_mgr_ctx->overflow_pending, 1); + notify_err_cb = tfe_hwr_mgr_ctx->common.event_cb; + + /* Add affected_context in list of recovery data */ + CAM_DBG(CAM_ISP, "Add affected ctx %d to list", + tfe_hwr_mgr_ctx->ctx_index); + if (recovery_data->no_of_context < CAM_TFE_CTX_MAX) + recovery_data->affected_ctx[ + recovery_data->no_of_context++] = + tfe_hwr_mgr_ctx; + + /* + * In the call back function corresponding ISP context + * will update CRM about fatal Error + */ + if (notify_err_cb) { + notify_err_cb(tfe_hwr_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_ERROR, + (void *)error_event_data); + } else { + CAM_WARN(CAM_ISP, "Error call back is not set"); + goto end; + } + } + + /* fill the affected_core in recovery data */ + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + recovery_data->affected_core[i] = affected_core[i]; + CAM_DBG(CAM_ISP, "tfe core %d is affected (%d)", + i, recovery_data->affected_core[i]); + } +end: + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_dump_info( + void *ctx, + void *evt_info) +{ + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = + (struct cam_tfe_hw_mgr_ctx *)ctx; + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)evt_info; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_hw_intf *hw_intf; + uint32_t i = 0, out_port_id = 0; + uint64_t dummy_args = 0; + int rc = 0; + + for (i = 0; i < tfe_hw_mgr_ctx->num_base; i++) { + hw_intf = g_tfe_hw_mgr.csid_devices[tfe_hw_mgr_ctx->base[i].idx]; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_CSID_CLOCK_DUMP, + &dummy_args, + sizeof(uint64_t)); + if (rc) + CAM_ERR(CAM_ISP, + "CSID Clock Dump failed"); + } + } + + if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + out_port_id = event_info->res_id & 0xFF; + if (out_port_id >= CAM_TFE_HW_OUT_RES_MAX) { + CAM_ERR(CAM_ISP, + "Resource out of range"); + goto end; + } + hw_mgr_res = + &tfe_hw_mgr_ctx->res_list_tfe_out[out_port_id]; + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + hw_intf = hw_mgr_res->hw_res[i]->hw_intf; + if (hw_intf->hw_ops.process_cmd) { + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, + (void *)event_info, + sizeof(struct cam_isp_hw_event_info)); + } + } + } +end: + return rc; +} + +static int cam_tfe_hw_mgr_handle_csid_event( + uint32_t err_type, + struct cam_isp_hw_event_info *event_info) +{ + struct cam_isp_hw_error_event_data error_event_data = {0}; + struct cam_tfe_hw_event_recovery_data recovery_data = {0}; + + /* this can be extended based on the types of error + * received from CSID + */ + switch (err_type) { + case CAM_ISP_HW_ERROR_CSID_FATAL: { + + if (!g_tfe_hw_mgr.debug_cfg.enable_csid_recovery) + break; + + error_event_data.error_type = err_type; + cam_tfe_hw_mgr_find_affected_ctx(&error_event_data, + event_info->hw_idx, + &recovery_data); + break; + } + default: + break; + } + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_err( + void *ctx, + void *evt_info) +{ + struct cam_isp_hw_error_event_info *err_evt_info; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx; + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_isp_hw_error_event_data error_event_data = {0}; + struct cam_tfe_hw_event_recovery_data recovery_data = {0}; + int rc = -EINVAL; + uint32_t core_idx; + + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, "No error event data failed to process"); + return rc; + } + + err_evt_info = (struct cam_isp_hw_error_event_info *)event_info->event_data; + if (err_evt_info->err_type == CAM_TFE_IRQ_STATUS_VIOLATION) + error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION; + else if (event_info->res_type == CAM_ISP_RESOURCE_TFE_IN || + event_info->res_type == CAM_ISP_RESOURCE_PIX_PATH) + error_event_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW; + else if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT) + error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW; + + spin_lock(&g_tfe_hw_mgr.ctx_lock); + if (err_evt_info->err_type == CAM_ISP_HW_ERROR_CSID_FATAL) { + rc = cam_tfe_hw_mgr_handle_csid_event(err_evt_info->err_type, event_info); + spin_unlock(&g_tfe_hw_mgr.ctx_lock); + return rc; + } + + if (ctx) { + tfe_hw_mgr_ctx = + (struct cam_tfe_hw_mgr_ctx *)ctx; + if ((event_info->res_type == CAM_ISP_RESOURCE_TFE_IN) + && (!tfe_hw_mgr_ctx->is_rdi_only_context) + && (event_info->res_id != CAM_ISP_HW_TFE_IN_CAMIF)) + cam_tfe_hw_mgr_handle_hw_dump_info(tfe_hw_mgr_ctx, event_info); + } + + core_idx = event_info->hw_idx; + + if (g_tfe_hw_mgr.debug_cfg.enable_recovery) + error_event_data.recovery_enabled = true; + else + error_event_data.recovery_enabled = false; + + rc = cam_tfe_hw_mgr_find_affected_ctx(&error_event_data, + core_idx, &recovery_data); + if (rc || !(recovery_data.no_of_context)) + goto end; + + if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT) { + spin_unlock(&g_tfe_hw_mgr.ctx_lock); + return rc; + } + + if (g_tfe_hw_mgr.debug_cfg.enable_recovery) { + /* Trigger for recovery */ + if (err_evt_info->err_type == CAM_TFE_IRQ_STATUS_VIOLATION) + recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION; + else + recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW; + cam_tfe_hw_mgr_do_error_recovery(&recovery_data); + } else { + CAM_DBG(CAM_ISP, "recovery is not enabled"); + rc = 0; + } +end: + spin_unlock(&g_tfe_hw_mgr.ctx_lock); + return rc; +} + +static int cam_tfe_hw_mgr_handle_hw_rup( + void *ctx, + void *evt_info) +{ + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = ctx; + cam_hw_event_cb_func tfe_hwr_irq_rup_cb; + struct cam_isp_hw_reg_update_event_data rup_event_data; + + tfe_hwr_irq_rup_cb = tfe_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + if (tfe_hw_mgr_ctx->is_dual) + if (event_info->hw_idx != tfe_hw_mgr_ctx->master_hw_idx) + break; + + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + + tfe_hwr_irq_rup_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data); + break; + + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + if (!tfe_hw_mgr_ctx->is_rdi_only_context) + break; + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + tfe_hwr_irq_rup_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&rup_event_data); + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d", + event_info->res_id); + break; + } + + CAM_DBG(CAM_ISP, "RUP done for TFE source %d", + event_info->res_id); + + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_epoch( + void *ctx, + void *evt_info) +{ + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = ctx; + cam_hw_event_cb_func tfe_hw_irq_epoch_cb; + struct cam_isp_hw_epoch_event_data epoch_done_event_data; + + tfe_hw_irq_epoch_cb = tfe_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + tfe_hw_irq_epoch_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EPOCH, + (void *)&epoch_done_event_data); + break; + + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d", + event_info->res_id); + break; + } + + CAM_DBG(CAM_ISP, "Epoch for TFE source %d", event_info->res_id); + + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_sof( + void *ctx, + void *evt_info) +{ + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = ctx; + cam_hw_event_cb_func tfe_hw_irq_sof_cb; + struct cam_isp_hw_sof_event_data sof_done_event_data; + + tfe_hw_irq_sof_cb = tfe_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + cam_tfe_mgr_cmd_get_sof_timestamp(tfe_hw_mgr_ctx, + &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time, NULL); + + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + + tfe_hw_irq_sof_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data); + + break; + + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + if (!tfe_hw_mgr_ctx->is_rdi_only_context) + break; + cam_tfe_mgr_cmd_get_sof_timestamp(tfe_hw_mgr_ctx, + &sof_done_event_data.timestamp, + &sof_done_event_data.boot_time, NULL); + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + tfe_hw_irq_sof_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data); + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d", + event_info->res_id); + break; + } + + CAM_DBG(CAM_ISP, "SOF for TFE source %d", event_info->res_id); + + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_eof( + void *ctx, + void *evt_info) +{ + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = ctx; + cam_hw_event_cb_func tfe_hw_irq_eof_cb; + struct cam_isp_hw_eof_event_data eof_done_event_data; + + tfe_hw_irq_eof_cb = tfe_hw_mgr_ctx->common.event_cb; + + switch (event_info->res_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + tfe_hw_irq_eof_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EOF, (void *)&eof_done_event_data); + + break; + + case CAM_ISP_HW_TFE_IN_RDI0: + case CAM_ISP_HW_TFE_IN_RDI1: + case CAM_ISP_HW_TFE_IN_RDI2: + if (!tfe_hw_mgr_ctx->is_rdi_only_context) + break; + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + break; + tfe_hw_irq_eof_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_EOF, (void *)&eof_done_event_data); + break; + + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d", + event_info->res_id); + break; + } + + CAM_DBG(CAM_ISP, "EOF for out_res->res_id: 0x%x", + event_info->res_id); + + return 0; +} + +static int cam_tfe_hw_mgr_handle_hw_buf_done( + void *ctx, + void *evt_info) +{ + cam_hw_event_cb_func tfe_hwr_irq_wm_done_cb; + struct cam_tfe_hw_mgr_ctx *tfe_hw_mgr_ctx = ctx; + struct cam_isp_hw_done_event_data buf_done_event_data = {0}; + struct cam_isp_hw_event_info *event_info = evt_info; + struct cam_isp_hw_bufdone_event_info *bufdone_evt_info = NULL; + + + if (atomic_read(&tfe_hw_mgr_ctx->overflow_pending)) + return 0; + + event_info = (struct cam_isp_hw_event_info *)evt_info; + if (!event_info->event_data) { + CAM_ERR(CAM_ISP, + "No additional buf done data failed to process for HW: %u", + event_info->hw_type); + return -EINVAL; + } + + tfe_hwr_irq_wm_done_cb = tfe_hw_mgr_ctx->common.event_cb; + + bufdone_evt_info = (struct cam_isp_hw_bufdone_event_info *) + event_info->event_data; + buf_done_event_data.resource_handle = 0; + + CAM_DBG(CAM_ISP, + "Buf done for TFE: %d res_id: 0x%x last consumed addr: 0x%x ctx: %u", + event_info->hw_idx, event_info->res_id, + bufdone_evt_info->last_consumed_addr, tfe_hw_mgr_ctx->ctx_index); + + buf_done_event_data.hw_type = event_info->hw_type; + buf_done_event_data.resource_handle = event_info->res_id; + buf_done_event_data.last_consumed_addr = bufdone_evt_info->last_consumed_addr; + buf_done_event_data.comp_group_id = bufdone_evt_info->comp_grp_id; + + if (buf_done_event_data.resource_handle > 0 && tfe_hwr_irq_wm_done_cb) { + CAM_DBG(CAM_ISP, "Buf done for out_res->res_id: 0x%x in ctx: %u", + event_info->res_id, tfe_hw_mgr_ctx->ctx_index); + tfe_hwr_irq_wm_done_cb(tfe_hw_mgr_ctx->common.cb_priv, + CAM_ISP_HW_EVENT_DONE, (void *)&buf_done_event_data); + } + + return 0; +} + +static int cam_tfe_hw_mgr_event_handler( + void *priv, + uint32_t evt_id, + void *evt_info) +{ + int rc = 0; + + if (!evt_info) + return -EINVAL; + + if (!priv) + if (evt_id != CAM_ISP_HW_EVENT_ERROR) + return -EINVAL; + + CAM_DBG(CAM_ISP, "Event ID 0x%x", evt_id); + + switch (evt_id) { + case CAM_ISP_HW_EVENT_SOF: + rc = cam_tfe_hw_mgr_handle_hw_sof(priv, evt_info); + break; + + case CAM_ISP_HW_EVENT_REG_UPDATE: + rc = cam_tfe_hw_mgr_handle_hw_rup(priv, evt_info); + break; + + case CAM_ISP_HW_EVENT_EPOCH: + rc = cam_tfe_hw_mgr_handle_hw_epoch(priv, evt_info); + break; + + case CAM_ISP_HW_EVENT_EOF: + rc = cam_tfe_hw_mgr_handle_hw_eof(priv, evt_info); + break; + + case CAM_ISP_HW_EVENT_DONE: + rc = cam_tfe_hw_mgr_handle_hw_buf_done(priv, evt_info); + break; + + case CAM_ISP_HW_EVENT_ERROR: + rc = cam_tfe_hw_mgr_handle_hw_err(priv, evt_info); + break; + + default: + CAM_ERR(CAM_ISP, "Invalid event ID %d", evt_id); + break; + } + + return rc; +} + +static int cam_tfe_hw_mgr_sort_dev_with_caps( + struct cam_tfe_hw_mgr *tfe_hw_mgr) +{ + int i; + + /* get caps for csid devices */ + for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) { + if (!tfe_hw_mgr->csid_devices[i]) + continue; + if (tfe_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) { + tfe_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps( + tfe_hw_mgr->csid_devices[i]->hw_priv, + &tfe_hw_mgr->tfe_csid_dev_caps[i], + sizeof(tfe_hw_mgr->tfe_csid_dev_caps[i])); + } + } + + /* get caps for tfe devices */ + for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + if (!tfe_hw_mgr->tfe_devices[i]) + continue; + + if (tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_ops.get_hw_caps) { + tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_ops.get_hw_caps( + tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_priv, + &tfe_hw_mgr->tfe_dev_caps[i], + sizeof(tfe_hw_mgr->tfe_dev_caps[i])); + } + } + + return 0; +} + +static int cam_tfe_set_csid_debug(void *data, u64 val) +{ + g_tfe_hw_mgr.debug_cfg.csid_debug = val; + CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val); + return 0; +} + +static int cam_tfe_get_csid_debug(void *data, u64 *val) +{ + *val = g_tfe_hw_mgr.debug_cfg.csid_debug; + CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld", + g_tfe_hw_mgr.debug_cfg.csid_debug); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_tfe_csid_debug, + cam_tfe_get_csid_debug, + cam_tfe_set_csid_debug, "%16llu"); + +static int cam_tfe_set_camif_debug(void *data, u64 val) +{ + g_tfe_hw_mgr.debug_cfg.camif_debug = val; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", val); + return 0; +} + +static int cam_tfe_get_camif_debug(void *data, u64 *val) +{ + *val = g_tfe_hw_mgr.debug_cfg.camif_debug; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", + g_tfe_hw_mgr.debug_cfg.csid_debug); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_tfe_camif_debug, + cam_tfe_get_camif_debug, + cam_tfe_set_camif_debug, "%16llu"); + +static int cam_tfe_hw_mgr_debug_register(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + goto end; + + rc = cam_debugfs_create_subdir("tfe", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_ISP,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + g_tfe_hw_mgr.debug_cfg.dentry = dbgfileptr; + + dbgfileptr = debugfs_create_file("tfe_csid_debug", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, NULL, &cam_tfe_csid_debug); + debugfs_create_u32("enable_recovery", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, + &g_tfe_hw_mgr.debug_cfg.enable_recovery); + debugfs_create_u32("enable_reg_dump", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, + &g_tfe_hw_mgr.debug_cfg.enable_reg_dump); + debugfs_create_u32("enable_csid_recovery", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, + &g_tfe_hw_mgr.debug_cfg.enable_csid_recovery); + dbgfileptr = debugfs_create_file("tfe_camif_debug", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, NULL, &cam_tfe_camif_debug); + debugfs_create_u32("per_req_reg_dump", 0644, + g_tfe_hw_mgr.debug_cfg.dentry, + &g_tfe_hw_mgr.debug_cfg.per_req_reg_dump); + debugfs_create_bool("enable_cdm_cmd_check", 0644, g_tfe_hw_mgr.debug_cfg.dentry, + &g_tfe_hw_mgr.debug_cfg.enable_cdm_cmd_check); + if (IS_ERR(dbgfileptr)) { + if (PTR_ERR(dbgfileptr) == -ENODEV) + CAM_WARN(CAM_ISP, "DebugFS not enabled in kernel!"); + else + rc = PTR_ERR(dbgfileptr); + } +end: + g_tfe_hw_mgr.debug_cfg.enable_recovery = 0; + return rc; +} + +static void cam_req_mgr_process_tfe_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl) +{ + int rc = -EFAULT; + int i, j; + struct cam_iommu_handle cdm_handles; + struct cam_tfe_hw_mgr_ctx *ctx_pool; + struct cam_isp_hw_mgr_res *res_list_tfe_out; + bool support_consumed_addr = false; + + CAM_DBG(CAM_ISP, "Enter"); + + memset(&g_tfe_hw_mgr, 0, sizeof(g_tfe_hw_mgr)); + + mutex_init(&g_tfe_hw_mgr.ctx_mutex); + spin_lock_init(&g_tfe_hw_mgr.ctx_lock); + + if (CAM_TFE_HW_NUM_MAX != CAM_TFE_CSID_HW_NUM_MAX) { + CAM_ERR(CAM_ISP, "CSID num is different then TFE num"); + return -EINVAL; + } + + /* fill tfe hw intf information */ + for (i = 0, j = 0; i < CAM_TFE_HW_NUM_MAX; i++) { + rc = cam_tfe_hw_init(&g_tfe_hw_mgr.tfe_devices[i], i); + if (!rc) { + struct cam_hw_intf *tfe_device = + g_tfe_hw_mgr.tfe_devices[i]->hw_intf; + struct cam_hw_info *tfe_hw = (struct cam_hw_info *) + g_tfe_hw_mgr.tfe_devices[i]->hw_intf->hw_priv; + struct cam_hw_soc_info *soc_info = &tfe_hw->soc_info; + + if (j == 0) + tfe_device->hw_ops.process_cmd( + tfe_hw, + CAM_ISP_HW_CMD_IS_CONSUMED_ADDR_SUPPORT, + &support_consumed_addr, + sizeof(support_consumed_addr)); + + j++; + + g_tfe_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0]; + CAM_DBG(CAM_ISP, + "reg_map: mem base = %pK cam_base = 0x%llx", + (void __iomem *)soc_info->reg_map[0].mem_base, + (uint64_t) soc_info->reg_map[0].mem_cam_base); + } else { + g_tfe_hw_mgr.cdm_reg_map[i] = NULL; + } + } + if (j == 0) { + CAM_ERR(CAM_ISP, "no valid TFE HW"); + return -EINVAL; + } + + g_tfe_hw_mgr.support_consumed_addr = support_consumed_addr; + /* fill csid hw intf information */ + for (i = 0, j = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) { + rc = cam_tfe_csid_hw_init(&g_tfe_hw_mgr.csid_devices[i], i); + if (!rc) + j++; + } + if (!j) { + CAM_ERR(CAM_ISP, "no valid TFE CSID HW"); + return -EINVAL; + } + + cam_tfe_hw_mgr_sort_dev_with_caps(&g_tfe_hw_mgr); + + /* setup tfe context list */ + INIT_LIST_HEAD(&g_tfe_hw_mgr.free_ctx_list); + INIT_LIST_HEAD(&g_tfe_hw_mgr.used_ctx_list); + + /* + * for now, we only support one iommu handle. later + * we will need to setup more iommu handle for other + * use cases. + * Also, we have to release them once we have the + * deinit support + */ + if (cam_smmu_get_handle("tfe", + &g_tfe_hw_mgr.mgr_common.img_iommu_hdl)) { + CAM_ERR(CAM_ISP, "Can not get iommu handle"); + return -EINVAL; + } + + if (cam_smmu_get_handle("cam-secure", + &g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure)) { + CAM_ERR(CAM_ISP, "Failed to get secure iommu handle"); + goto secure_fail; + } + + CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]", + g_tfe_hw_mgr.mgr_common.img_iommu_hdl, + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure); + + if (!cam_cdm_get_iommu_handle("tfe0", &cdm_handles)) { + CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles"); + g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure; + g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl_secure = + cdm_handles.secure; + } else { + CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles"); + g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl = -1; + g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1; + } + + atomic_set(&g_tfe_hw_mgr.active_ctx_cnt, 0); + for (i = 0; i < CAM_TFE_CTX_MAX; i++) { + memset(&g_tfe_hw_mgr.ctx_pool[i], 0, + sizeof(g_tfe_hw_mgr.ctx_pool[i])); + INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].list); + INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].res_list_tfe_csid); + INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].res_list_tfe_in); + ctx_pool = &g_tfe_hw_mgr.ctx_pool[i]; + for (j = 0; j < CAM_TFE_HW_OUT_RES_MAX; j++) { + res_list_tfe_out = &ctx_pool->res_list_tfe_out[j]; + INIT_LIST_HEAD(&res_list_tfe_out->list); + } + + /* init context pool */ + INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].free_res_list); + for (j = 0; j < CAM_TFE_HW_RES_POOL_MAX; j++) { + INIT_LIST_HEAD( + &g_tfe_hw_mgr.ctx_pool[i].res_pool[j].list); + list_add_tail( + &g_tfe_hw_mgr.ctx_pool[i].res_pool[j].list, + &g_tfe_hw_mgr.ctx_pool[i].free_res_list); + } + + g_tfe_hw_mgr.ctx_pool[i].cdm_cmd = + CAM_MEM_ZALLOC(((sizeof(struct cam_cdm_bl_request)) + + ((CAM_ISP_CTX_CFG_MAX - 1) * + sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL); + if (!g_tfe_hw_mgr.ctx_pool[i].cdm_cmd) { + rc = -ENOMEM; + CAM_ERR(CAM_ISP, "Allocation Failed for cdm command"); + goto end; + } + + g_tfe_hw_mgr.ctx_pool[i].ctx_index = i; + g_tfe_hw_mgr.ctx_pool[i].hw_mgr = &g_tfe_hw_mgr; + + cam_tasklet_init(&g_tfe_hw_mgr.mgr_common.tasklet_pool[i], + &g_tfe_hw_mgr.ctx_pool[i], i); + g_tfe_hw_mgr.ctx_pool[i].common.tasklet_info = + g_tfe_hw_mgr.mgr_common.tasklet_pool[i]; + + + init_completion(&g_tfe_hw_mgr.ctx_pool[i].config_done_complete); + list_add_tail(&g_tfe_hw_mgr.ctx_pool[i].list, + &g_tfe_hw_mgr.free_ctx_list); + } + + /* Create Worker for tfe_hw_mgr with 10 tasks */ + rc = cam_req_mgr_workq_create("cam_tfe_worker", 10, + &g_tfe_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0, + cam_req_mgr_process_tfe_worker); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Unable to create worker"); + goto end; + } + + /* fill return structure */ + hw_mgr_intf->hw_mgr_priv = &g_tfe_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_tfe_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_tfe_mgr_acquire; + hw_mgr_intf->hw_start = cam_tfe_mgr_start_hw; + hw_mgr_intf->hw_stop = cam_tfe_mgr_stop_hw; + hw_mgr_intf->hw_read = cam_tfe_mgr_read; + hw_mgr_intf->hw_write = cam_tfe_mgr_write; + hw_mgr_intf->hw_release = cam_tfe_mgr_release_hw; + hw_mgr_intf->hw_prepare_update = cam_tfe_mgr_prepare_hw_update; + hw_mgr_intf->hw_config = cam_tfe_mgr_config_hw; + hw_mgr_intf->hw_cmd = cam_tfe_mgr_cmd; + hw_mgr_intf->hw_reset = cam_tfe_mgr_reset; + hw_mgr_intf->hw_dump = cam_tfe_mgr_dump; + + if (iommu_hdl) + *iommu_hdl = g_tfe_hw_mgr.mgr_common.img_iommu_hdl; + + cam_tfe_hw_mgr_debug_register(); + CAM_DBG(CAM_ISP, "Exit"); + + return 0; +end: + if (rc) { + for (i = 0; i < CAM_TFE_CTX_MAX; i++) { + cam_tasklet_deinit( + &g_tfe_hw_mgr.mgr_common.tasklet_pool[i]); + CAM_MEM_FREE(g_tfe_hw_mgr.ctx_pool[i].cdm_cmd); + g_tfe_hw_mgr.ctx_pool[i].cdm_cmd = NULL; + g_tfe_hw_mgr.ctx_pool[i].common.tasklet_info = NULL; + } + } + cam_smmu_destroy_handle( + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure); + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure = -1; +secure_fail: + cam_smmu_destroy_handle(g_tfe_hw_mgr.mgr_common.img_iommu_hdl); + g_tfe_hw_mgr.mgr_common.img_iommu_hdl = -1; + return rc; +} + +void cam_tfe_hw_mgr_deinit(void) +{ + int i = 0; + + cam_req_mgr_workq_destroy(&g_tfe_hw_mgr.workq); + g_tfe_hw_mgr.debug_cfg.dentry = NULL; + + for (i = 0; i < CAM_TFE_CTX_MAX; i++) { + cam_tasklet_deinit( + &g_tfe_hw_mgr.mgr_common.tasklet_pool[i]); + CAM_MEM_FREE(g_tfe_hw_mgr.ctx_pool[i].cdm_cmd); + g_tfe_hw_mgr.ctx_pool[i].cdm_cmd = NULL; + g_tfe_hw_mgr.ctx_pool[i].common.tasklet_info = NULL; + } + + cam_smmu_destroy_handle( + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure); + g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure = -1; + + cam_smmu_destroy_handle(g_tfe_hw_mgr.mgr_common.img_iommu_hdl); + g_tfe_hw_mgr.mgr_common.img_iommu_hdl = -1; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h new file mode 100644 index 0000000000..5393352f5a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_HW_MGR_H_ +#define _CAM_TFE_HW_MGR_H_ + +#include +#include +#include "cam_isp_hw_mgr.h" +#include "cam_tfe_hw_intf.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_tasklet_util.h" +#include "cam_cdm_intf_api.h" + + + +/* TFE resource constants */ +#define CAM_TFE_HW_IN_RES_MAX (CAM_ISP_TFE_IN_RES_MAX & 0xFF) +#define CAM_TFE_HW_OUT_RES_MAX (CAM_ISP_TFE_OUT_RES_MAX & 0xFF) +#define CAM_TFE_HW_RES_POOL_MAX 64 + +/** + * struct cam_tfe_hw_mgr_debug - contain the debug information + * + * @dentry: Debugfs entry + * @csid_debug: csid debug information + * @enable_recovery: enable recovery + * @enable_csid_recovery: enable csid recovery + * @camif_debug: enable sensor diagnosis status + * @enable_reg_dump: enable reg dump on error; + * @per_req_reg_dump: Enable per request reg dump + * @enable_cdm_cmd_check: Enable invalid command check in cmd_buf + */ +struct cam_tfe_hw_mgr_debug { + struct dentry *dentry; + uint64_t csid_debug; + uint32_t enable_recovery; + uint32_t enable_csid_recovery; + uint32_t camif_debug; + uint32_t enable_reg_dump; + uint32_t per_req_reg_dump; + bool enable_cdm_cmd_check; +}; + +/** + * struct cam_tfe_hw_comp_record + * + * @brief: Structure record the res id reserved on a comp group + * + * @num_res: Number of valid resource IDs in this record + * @res_id: Resource IDs to report buf dones + * + */ +struct cam_tfe_hw_comp_record { + uint32_t num_res; + uint32_t res_id[CAM_NUM_OUT_PER_COMP_IRQ_MAX]; +}; + +/** + * struct cam_tfe_comp_record_query + * + * @brief: Structure record the bus comp group pointer information + * @tfe_bus_comp_grp: Tfe bus comp group pointer + * @reserved: used to make parent compatible with struct cam_isp_comp_record_query + * + */ +struct cam_tfe_comp_record_query { + struct cam_tfe_hw_comp_record *tfe_bus_comp_grp; + void *reserved; +}; + +/** + * struct cam_tfe_cdm_user_data - TFE HW user data with CDM + * + * @prepare: hw_update_data + * @request_id: Request id + */ +struct cam_tfe_cdm_user_data { + struct cam_isp_prepare_hw_update_data *hw_update_data; + uint64_t request_id; +}; + +/** + * struct cam_tfe_cmd_buf_desc_addr_len + * + * brief: structure to store cpu addr and size of + * reg dump descriptors + * @cpu_addr: cpu addr of buffer + * @size: size of the buffer + */ +struct cam_tfe_cmd_buf_desc_addr_len { + uintptr_t cpu_addr; + size_t buf_size; +}; + + +/** + * struct cam_tfe_hw_mgr_ctx - TFE HW manager Context object + * + * @list: used by the ctx list. + * @common: common acquired context data + * @ctx_index: acquired context id. + * @hw_mgr: tfe hw mgr which owns this context + * @ctx_in_use: flag to tell whether context is active + * @res_list_csid: csid resource list + * @res_list_tfe_in: tfe input resource list + * @res_list_tfe_out: tfe output resoruces array + * @free_res_list: free resources list for the branch node + * @res_pool: memory storage for the free resource list + * @base device base index array contain the all TFE HW + * instance associated with this context. + * @num_base number of valid base data in the base array + * @cdm_handle cdm hw acquire handle + * @cdm_ops cdm util operation pointer for building + * cdm commands + * @cdm_cmd cdm base and length request pointer + * @last_submit_bl_cmd last submiited CDM BL command data + * @config_done_complete indicator for configuration complete + * @overflow_pending flat to specify the overflow is pending for the + * context + * @cdm_done flag to indicate cdm has finished writing shadow + * registers + * @last_cdm_done_req: Last CDM done request + * @is_rdi_only_context flag to specify the context has only rdi resource + * @reg_dump_buf_desc: cmd buffer descriptors for reg dump + * @num_reg_dump_buf: count of descriptors in reg_dump_buf_desc + * @applied_req_id: last request id to be applied + * @last_dump_flush_req_id last req id for which reg dump on flush was called + * @last_dump_err_req_id last req id for which reg dump on error was called + * @init_done indicate whether init hw is done + * @is_dual indicate whether context is in dual TFE mode + * @master_hw_idx master hardware index in dual tfe case + * @slave_hw_idx slave hardware index in dual tfe case + * @dual_tfe_irq_mismatch_cnt irq mismatch count value per core, used for + * dual TFE + * @packet CSL packet from user mode driver + * @bw_config_version BW Config version + * @tfe_bus_comp_grp pointer to tfe comp group info + * @cdm_userdata CDM user data + * @skip_reg_dump_buf_put Set if put_cpu_buf for reg dump buf is already called + */ +struct cam_tfe_hw_mgr_ctx { + struct list_head list; + struct cam_isp_hw_mgr_ctx common; + + uint32_t ctx_index; + struct cam_tfe_hw_mgr *hw_mgr; + uint32_t ctx_in_use; + + struct list_head res_list_tfe_csid; + struct list_head res_list_tfe_in; + struct cam_isp_hw_mgr_res + res_list_tfe_out[CAM_TFE_HW_OUT_RES_MAX]; + + struct list_head free_res_list; + struct cam_isp_hw_mgr_res res_pool[CAM_TFE_HW_RES_POOL_MAX]; + + struct cam_isp_ctx_base_info base[CAM_TFE_HW_NUM_MAX]; + uint32_t num_base; + uint32_t cdm_handle; + struct cam_cdm_utils_ops *cdm_ops; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_cdm_bl_info last_submit_bl_cmd; + struct completion config_done_complete; + + atomic_t overflow_pending; + atomic_t cdm_done; + uint64_t last_cdm_done_req; + uint32_t is_rdi_only_context; + struct cam_cmd_buf_desc reg_dump_buf_desc[ + CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint32_t num_reg_dump_buf; + struct cam_tfe_cmd_buf_desc_addr_len + reg_dump_cmd_buf_addr_len[CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint64_t applied_req_id; + uint64_t last_dump_flush_req_id; + uint64_t last_dump_err_req_id; + bool init_done; + bool is_dual; + uint32_t master_hw_idx; + uint32_t slave_hw_idx; + uint32_t dual_tfe_irq_mismatch_cnt; + struct cam_packet *packet; + uint32_t bw_config_version; + struct cam_tfe_hw_comp_record *tfe_bus_comp_grp; + struct cam_tfe_cdm_user_data cdm_userdata; + bool skip_reg_dump_buf_put; +}; + +/** + * struct cam_tfe_hw_mgr - TFE HW Manager + * + * @mgr_common: common data for all HW managers + * @csid_devices: csid device instances array. This will be filled by + * HW manager during the initialization. + * @tfe_devices: TFE device instances array. This will be filled by + * HW layer during initialization + * @cdm_reg_map commands for register dump + * @ctx_mutex: mutex for the hw context pool + * @active_ctx_cnt active context count number + * @free_ctx_list: free hw context list + * @used_ctx_list: used hw context list + * @ctx_pool: context storage + * @tfe_csid_dev_caps csid device capability stored per core + * @tfe_dev_caps tfe device capability per core + * @work q work queue for TFE hw manager + * @debug_cfg debug configuration + * @support_consumed_addr indicate whether hw supports last consumed address + * @ctx_lock Spinlock for HW manager + */ +struct cam_tfe_hw_mgr { + struct cam_isp_hw_mgr mgr_common; + struct cam_hw_intf *csid_devices[CAM_TFE_CSID_HW_NUM_MAX]; + struct cam_isp_hw_intf_data *tfe_devices[CAM_TFE_HW_NUM_MAX]; + struct cam_soc_reg_map *cdm_reg_map[CAM_TFE_HW_NUM_MAX]; + struct mutex ctx_mutex; + atomic_t active_ctx_cnt; + struct list_head free_ctx_list; + struct list_head used_ctx_list; + struct cam_tfe_hw_mgr_ctx ctx_pool[CAM_TFE_CTX_MAX]; + + struct cam_tfe_csid_hw_caps tfe_csid_dev_caps[ + CAM_TFE_CSID_HW_NUM_MAX]; + struct cam_tfe_hw_get_hw_cap tfe_dev_caps[CAM_TFE_HW_NUM_MAX]; + struct cam_req_mgr_core_workq *workq; + struct cam_tfe_hw_mgr_debug debug_cfg; + bool support_consumed_addr; + spinlock_t ctx_lock; +}; + +/** + * struct cam_tfe_hw_event_recovery_data - Payload for the recovery procedure + * + * @error_type: Error type that causes the recovery + * @affected_core: Array of the hardware cores that are affected + * @affected_ctx: Array of the hardware contexts that are affected + * @no_of_context: Actual number of the affected context + * + */ +struct cam_tfe_hw_event_recovery_data { + uint32_t error_type; + uint32_t affected_core[CAM_TFE_HW_NUM_MAX]; + struct cam_tfe_hw_mgr_ctx *affected_ctx[CAM_TFE_CTX_MAX]; + uint32_t no_of_context; +}; + +/** + * cam_tfe_hw_mgr_init() + * + * @brief: Initialize the TFE hardware manger. This is the + * etnry functinon for the TFE HW manager. + * + * @hw_mgr_intf: TFE hardware manager object returned + * @iommu_hdl: Iommu handle to be returned + * + */ +#ifdef CONFIG_SPECTRA_TFE +int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl); +void cam_tfe_hw_mgr_deinit(void); +#else +static inline int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, + int *iommu_hdl) +{ + return -EINVAL; +} + +static inline void cam_tfe_hw_mgr_deinit(void) +{ +} +#endif + +#endif /* _CAM_TFE_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c new file mode 100644 index 0000000000..9d568d34c9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c @@ -0,0 +1,2029 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_mem_mgr.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_isp_packet_parser.h" +#include "cam_debug_util.h" +#include "cam_isp_hw_mgr_intf.h" + +static void cam_isp_add_update_entry( + enum cam_isp_cdm_bl_type cdm_bl_type, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + uint32_t update_size) +{ + /* Validation is done from the caller */ + uint32_t num_ent; + struct cam_hw_update_entry *curr_update_entry; + + num_ent = prepare->num_hw_update_entries; + curr_update_entry = &(prepare->hw_update_entries[num_ent]); + curr_update_entry->handle = kmd_buf_info->handle; + curr_update_entry->len = update_size; + curr_update_entry->offset = kmd_buf_info->offset; + curr_update_entry->flags = cdm_bl_type; + curr_update_entry->addr = (uintptr_t) (kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4); + + num_ent++; + kmd_buf_info->used_bytes += update_size; + kmd_buf_info->offset += update_size; + prepare->num_hw_update_entries = num_ent; + + CAM_DBG(CAM_ISP, + "Add new entry: num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent - 1, curr_update_entry->handle, + curr_update_entry->len, curr_update_entry->offset); +} + +static void cam_isp_combine_update_entry( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + uint32_t update_size) +{ + /* Validation is done from the caller */ + uint32_t num_ent; + struct cam_hw_update_entry *prev_update_entry; + + num_ent = prepare->num_hw_update_entries; + prev_update_entry = &(prepare->hw_update_entries[num_ent - 1]); + prev_update_entry->len += update_size; + + CAM_DBG(CAM_ISP, + "Combined with prev entry: num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent - 1, prev_update_entry->handle, + prev_update_entry->len, prev_update_entry->offset); + + kmd_buf_info->used_bytes += update_size; + kmd_buf_info->offset += update_size; +} + +void cam_isp_update_hw_entry( + enum cam_isp_cdm_bl_type cdm_bl_type, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + uint32_t update_size, + bool combine) +{ + if (combine) + cam_isp_combine_update_entry(prepare, + kmd_buf_info, update_size); + else + cam_isp_add_update_entry(cdm_bl_type, prepare, + kmd_buf_info, update_size); +} + +int cam_isp_add_change_base( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + struct cam_isp_change_base_args *change_base_args, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + struct cam_isp_hw_get_cmd_update get_base; + uint32_t num_ent, i; + + num_ent = prepare->num_hw_update_entries; + + /* Max one hw entries required for each base */ + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + if (res->hw_intf->hw_idx != change_base_args->base_idx) + continue; + + get_base.res = res; + get_base.cdm_id = change_base_args->cdm_id; + get_base.cmd_type = CAM_ISP_HW_CMD_GET_CHANGE_BASE; + get_base.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4; + get_base.cmd.size = kmd_buf_info->size - + kmd_buf_info->used_bytes; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_CHANGE_BASE, &get_base, + sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) + return rc; + + cam_isp_add_update_entry(CAM_ISP_COMMON_CFG_BL, prepare, + kmd_buf_info, get_base.cmd.used_bytes); + + /* return success */ + return 0; + } + } + + return rc; +} + +static int cam_isp_update_dual_config( + struct cam_cmd_buf_desc *cmd_desc, + uint32_t split_id, + uint32_t base_idx, + struct cam_isp_hw_mgr_res *res_list_isp_out, + uint8_t *out_map, + uint32_t out_base, + uint32_t out_max) +{ + int rc = -EINVAL; + struct cam_isp_dual_config *dual_config; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_resource_node *res; + struct cam_isp_hw_dual_isp_update_args dual_isp_update_args; + uint32_t port_id = 0; + uint32_t ports_plane_idx; + size_t len = 0, remain_len = 0; + uint32_t *cpu_addr; + uint32_t *cpu_addr_local = NULL; + uint32_t i, j; + size_t packet_size = 0; + + CAM_DBG(CAM_ISP, "cmd des size %d, length: %d", + cmd_desc->size, cmd_desc->length); + + rc = cam_packet_util_get_cmd_mem_addr( + cmd_desc->mem_handle, &cpu_addr, &len); + if (rc) + return rc; + + if ((len < sizeof(struct cam_isp_dual_config)) || + (cmd_desc->offset >= + (len - sizeof(struct cam_isp_dual_config)))) { + CAM_ERR(CAM_ISP, "not enough buffer provided"); + rc = -EINVAL; + goto put_ref; + } + + remain_len = len - cmd_desc->offset; + cpu_addr += (cmd_desc->offset / 4); + + packet_size = cmd_desc->length; + if (packet_size <= remain_len) { + rc = cam_common_mem_kdup((void **)&cpu_addr_local, + cpu_addr, packet_size); + if (rc) { + CAM_ERR(CAM_ISP, "Alloc and copy cmd desc fail"); + goto put_ref; + } + } else { + CAM_ERR(CAM_ISP, "Invalid packet header size %u", + packet_size); + rc = -EINVAL; + goto put_ref; + } + + dual_config = (struct cam_isp_dual_config *)cpu_addr_local; + if ((dual_config->num_ports * + sizeof(struct cam_isp_dual_stripe_config)) > + (remain_len - offsetof(struct cam_isp_dual_config, stripes))) { + CAM_ERR(CAM_ISP, "not enough buffer for all the dual configs"); + rc = -EINVAL; + goto end; + } + + for (i = 0; i < dual_config->num_ports; i++) { + + if (i >= (out_max & 0xFF)) { + CAM_ERR(CAM_ISP, + "failed update for i:%d > size_isp_out:%d", + i, (out_max & 0xFF)); + rc = -EINVAL; + goto end; + } + + for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) { + ports_plane_idx = (j * (dual_config->num_ports * + CAM_PACKET_MAX_PLANES)) + + (i * CAM_PACKET_MAX_PLANES); + + if (dual_config->stripes_flex[ports_plane_idx].port_id == 0) + continue; + port_id = dual_config->stripes_flex[ports_plane_idx].port_id; + hw_mgr_res = &res_list_isp_out[out_map[port_id & 0xFF]]; + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, + "Invalid isp out resource i %d num_out_res %d", + i, dual_config->num_ports); + rc = -EINVAL; + goto end; + } + + if (!hw_mgr_res->hw_res[j]) + continue; + + if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx) + continue; + + res = hw_mgr_res->hw_res[j]; + + if (res->res_id < out_base || + res->res_id >= out_max) + continue; + + dual_isp_update_args.split_id = j; + dual_isp_update_args.res = res; + dual_isp_update_args.dual_cfg = dual_config; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_STRIPE_UPDATE, + &dual_isp_update_args, + sizeof(struct cam_isp_hw_dual_isp_update_args)); + if (rc) + goto end; + } + } + +end: + cam_common_mem_free(cpu_addr_local); +put_ref: + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; +} + +int cam_isp_add_cmd_buf_update( + struct cam_isp_resource_node *res, + struct cam_hw_intf *hw_intf, + uint32_t cmd_type, + uint32_t hw_cmd_type, + uint32_t *cmd_buf_addr, + uint32_t kmd_buf_remain_size, + void *cmd_update_data, + uint32_t *bytes_used) +{ + int rc = 0; + struct cam_isp_hw_get_cmd_update cmd_update; + uint32_t total_used_bytes = 0; + + cmd_update.cmd_type = hw_cmd_type; + cmd_update.cmd.cmd_buf_addr = cmd_buf_addr; + cmd_update.cmd.size = kmd_buf_remain_size; + cmd_update.cmd.used_bytes = 0; + cmd_update.data = cmd_update_data; + CAM_DBG(CAM_ISP, "cmd_type %u cmd buffer 0x%pK, size %d", + cmd_update.cmd_type, + cmd_update.cmd.cmd_buf_addr, + cmd_update.cmd.size); + + cmd_update.res = res; + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + cmd_update.cmd_type, &cmd_update, + sizeof(struct cam_isp_hw_get_cmd_update)); + + if (rc) { + CAM_ERR(CAM_ISP, "get buf cmd error:%d cmd %d", hw_intf->hw_idx, + cmd_update.cmd_type); + rc = -ENOMEM; + return rc; + } + + total_used_bytes += cmd_update.cmd.used_bytes; + *bytes_used = total_used_bytes; + CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes); + return rc; +} + +int cam_isp_add_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_ctx_base_info *base_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_hw_mgr_res *res_list_isp_out, + uint8_t *out_map, + uint32_t out_base, + uint32_t out_max) +{ + int rc = 0; + uint32_t cmd_meta_data, num_ent, i; + uint32_t base_idx; + enum cam_isp_hw_split_id split_id; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_hw_update_entry *hw_entry = NULL; + + split_id = base_info->split_id; + base_idx = base_info->idx; + hw_entry = prepare->hw_update_entries; + + /* + * set the cmd_desc to point the first command descriptor in the + * packet + */ + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + + CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d", + split_id, prepare->packet->num_cmd_buf); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + num_ent = prepare->num_hw_update_entries; + if (!cmd_desc[i].length) + continue; + + /* One hw entry space required for left or right or common */ + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + cmd_meta_data = cmd_desc[i].meta_data; + + CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d", + cmd_meta_data, split_id); + + switch (cmd_meta_data) { + case CAM_ISP_PACKET_META_BASE: + case CAM_ISP_PACKET_META_LEFT: + case CAM_ISP_PACKET_META_DMI_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_PACKET_META_RIGHT: + case CAM_ISP_PACKET_META_DMI_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_PACKET_META_COMMON: + case CAM_ISP_PACKET_META_DMI_COMMON: + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + break; + case CAM_ISP_PACKET_META_DUAL_CONFIG: + rc = cam_isp_update_dual_config(&cmd_desc[i], + split_id, base_idx, res_list_isp_out, + out_map, out_base, out_max); + if (rc) + return rc; + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_REG_DUMP_ON_FLUSH: + case CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR: + case CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + if (prepare->num_reg_dump_buf >= + CAM_REG_DUMP_MAX_BUF_ENTRIES) { + CAM_ERR(CAM_ISP, + "Descriptor count out of bounds: %d", + prepare->num_reg_dump_buf); + return -EINVAL; + } + + prepare->reg_dump_buf_desc[ + prepare->num_reg_dump_buf] = + cmd_desc[i]; + prepare->num_reg_dump_buf++; + CAM_DBG(CAM_ISP, + "Added command buffer: %d desc_count: %d", + cmd_desc[i].meta_data, + prepare->num_reg_dump_buf); + } + break; + case CAM_ISP_SFE_PACKET_META_LEFT: + case CAM_ISP_SFE_PACKET_META_RIGHT: + case CAM_ISP_SFE_PACKET_META_COMMON: + case CAM_ISP_SFE_PACKET_META_DUAL_CONFIG: + break; + case CAM_ISP_PACKET_META_CSID_LEFT: + case CAM_ISP_PACKET_META_CSID_RIGHT: + case CAM_ISP_PACKET_META_CSID_COMMON: + break; + default: + CAM_ERR(CAM_ISP, "invalid cdm command meta data %d", + cmd_meta_data); + return -EINVAL; + } + prepare->num_hw_update_entries = num_ent; + } + + return rc; +} + +int cam_sfe_add_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_ctx_base_info *base_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_hw_mgr_res *res_list_sfe_out, + uint8_t *out_map, + uint32_t out_base, + uint32_t out_max) +{ + int rc = 0; + uint32_t cmd_meta_data, num_ent, i; + enum cam_isp_hw_split_id split_id; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_hw_update_entry *hw_entry = NULL; + + split_id = base_info->split_id; + hw_entry = prepare->hw_update_entries; + + /* + * set the cmd_desc to point the first command descriptor in the + * packet + */ + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + + CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d", + split_id, prepare->packet->num_cmd_buf); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + num_ent = prepare->num_hw_update_entries; + if (!cmd_desc[i].length) + continue; + + /* One hw entry space required for left or right or common */ + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + cmd_meta_data = cmd_desc[i].meta_data; + + CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d", + cmd_meta_data, split_id); + + switch (cmd_meta_data) { + case CAM_ISP_SFE_PACKET_META_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_SFE_PACKET_META_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_SFE_PACKET_META_COMMON: + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + break; + case CAM_ISP_SFE_PACKET_META_DUAL_CONFIG: + rc = cam_isp_update_dual_config(&cmd_desc[i], + split_id, base_info->idx, + res_list_sfe_out, out_map, out_base, out_max); + if (rc) + return rc; + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: { + struct cam_isp_generic_blob_info blob_info; + + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + blob_info.entry_added = false; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", rc); + return rc; + } + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_BASE: + case CAM_ISP_PACKET_META_LEFT: + case CAM_ISP_PACKET_META_RIGHT: + case CAM_ISP_PACKET_META_COMMON: + case CAM_ISP_PACKET_META_DMI_LEFT: + case CAM_ISP_PACKET_META_DMI_RIGHT: + case CAM_ISP_PACKET_META_DMI_COMMON: + case CAM_ISP_PACKET_META_CLOCK: + case CAM_ISP_PACKET_META_CSID: + case CAM_ISP_PACKET_META_DUAL_CONFIG: + case CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST: + case CAM_ISP_PACKET_META_REG_DUMP_ON_FLUSH: + case CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR: + case CAM_ISP_PACKET_META_CSID_LEFT: + case CAM_ISP_PACKET_META_CSID_RIGHT: + case CAM_ISP_PACKET_META_CSID_COMMON: + break; + default: + CAM_ERR(CAM_ISP, "invalid cdm command meta data %d", + cmd_meta_data); + return -EINVAL; + } + prepare->num_hw_update_entries = num_ent; + } + + return rc; +} + +static void cam_isp_validate_for_sfe_scratch( + struct cam_isp_sfe_scratch_buf_res_info *sfe_res_info, + uint32_t res_type, uint32_t out_base) +{ + uint32_t res_id_out = res_type & 0xFF; + + if ((res_id_out) < ((out_base & 0xFF) + + sfe_res_info->num_active_fe_rdis)) { + CAM_DBG(CAM_ISP, + "Buffer found for SFE port: 0x%x - skip scratch buffer", + res_type); + sfe_res_info->sfe_rdi_cfg_mask |= (1 << res_id_out); + } +} + +static void cam_isp_validate_for_ife_scratch( + struct cam_isp_ife_scratch_buf_res_info *ife_res_info, + uint32_t res_type) +{ + int i; + + for (i = 0; i < ife_res_info->num_ports; i++) { + if (res_type == ife_res_info->ife_scratch_resources[i]) { + CAM_DBG(CAM_ISP, + "Buffer found for IFE port: 0x%x - skip scratch buffer", + res_type); + ife_res_info->ife_scratch_cfg_mask |= (1 << i); + } + } +} + +static int cam_isp_io_buf_get_entries_util( + struct cam_isp_io_buf_info *buf_info, + struct cam_buf_io_cfg *io_cfg, + struct cam_isp_hw_mgr_res **hw_mgr_res) +{ + uint32_t res_id; + uint32_t num_entries; + struct cam_hw_fence_map_entry *map_entries = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res_temp; + bool found = false; + + CAM_DBG(CAM_REQ, + "req_id %llu resource_type:%d fence:%d direction %d format %d", + buf_info->prepare->packet->header.request_id, + io_cfg->resource_type, io_cfg->fence, + io_cfg->direction, io_cfg->format); + + if (io_cfg->direction == CAM_BUF_OUTPUT) { + res_id = io_cfg->resource_type & 0xFF; + + if (io_cfg->resource_type < buf_info->out_base || + io_cfg->resource_type >= buf_info->out_max) + return -ENOMSG; + + if ((buf_info->base->hw_type == CAM_ISP_HW_TYPE_SFE) && + (buf_info->scratch_check_cfg->validate_for_sfe)) { + cam_isp_validate_for_sfe_scratch( + &buf_info->scratch_check_cfg->sfe_scratch_res_info, + io_cfg->resource_type, buf_info->out_base); + } else if ((buf_info->base->hw_type == CAM_ISP_HW_TYPE_VFE) && + (buf_info->scratch_check_cfg->validate_for_ife)) { + cam_isp_validate_for_ife_scratch( + &buf_info->scratch_check_cfg->ife_scratch_res_info, + io_cfg->resource_type); + } + + *hw_mgr_res = &buf_info->res_list_isp_out[buf_info->out_map[res_id]]; + if ((*hw_mgr_res)->res_type == CAM_ISP_RESOURCE_UNINT) { + CAM_ERR(CAM_ISP, "io res id:%d not valid", + io_cfg->resource_type); + return -EINVAL; + } + } else if (io_cfg->direction == CAM_BUF_INPUT) { + found = false; + res_id = io_cfg->resource_type; + if (!buf_info->res_list_in_rd) { + CAM_DBG(CAM_ISP, + "No BUS Read supported hw_type %d io_cfg %d req:%d type:%d fence:%d", + buf_info->base->hw_type, + buf_info->prepare->packet->num_io_configs, + buf_info->prepare->packet->header.request_id, + io_cfg->resource_type, io_cfg->fence); + return -ENOMSG; + } + if (buf_info->base->hw_type != CAM_ISP_HW_TYPE_SFE) + res_id = CAM_ISP_HW_VFE_IN_RD; + + list_for_each_entry_safe((*hw_mgr_res), hw_mgr_res_temp, + buf_info->res_list_in_rd, list) { + if ((*hw_mgr_res)->res_id == res_id) { + found = true; + break; + } + } + + if (!found) { + CAM_ERR(CAM_ISP, "No Read resource"); + return -EINVAL; + } + + } else { + CAM_ERR(CAM_ISP, "Invalid io config direction :%d", + io_cfg->direction); + return -EINVAL; + } + + if (buf_info->fill_fence) { + if (io_cfg->direction == CAM_BUF_OUTPUT && (buf_info->prepare->num_out_map_entries < + buf_info->prepare->max_out_map_entries)) { + + num_entries = buf_info->prepare->num_out_map_entries; + map_entries = &buf_info->prepare->out_map_entries[num_entries]; + buf_info->prepare->num_out_map_entries++; + } else if (io_cfg->direction == CAM_BUF_INPUT && + (buf_info->prepare->num_in_map_entries < + buf_info->prepare->max_in_map_entries)) { + num_entries = buf_info->prepare->num_in_map_entries; + map_entries = &buf_info->prepare->in_map_entries[num_entries]; + buf_info->prepare->num_in_map_entries++; + + } else { + CAM_ERR(CAM_ISP, "dir: %d max_ln:%d max_out: %u in: %u out %u", + io_cfg->direction, + buf_info->prepare->max_in_map_entries, + buf_info->prepare->max_out_map_entries, + buf_info->prepare->num_in_map_entries, + buf_info->prepare->num_out_map_entries); + return -EINVAL; + } + + map_entries->resource_handle = io_cfg->resource_type; + map_entries->sync_id = io_cfg->fence; + map_entries->early_sync_id = io_cfg->early_fence; + if (buf_info->major_version == 3) + map_entries->hw_ctxt_id = io_cfg->flag; + else + map_entries->hw_ctxt_id = 0x0; + } + + return 0; +} + +static int cam_isp_add_io_buffers_util( + struct cam_isp_io_buf_info *buf_info, + struct cam_buf_io_cfg *io_cfg, + struct cam_isp_resource_node *res) +{ + uint32_t secure_mode_cmd; + uint32_t bus_update_cmd; + int rc = 0; + dma_addr_t io_addr[CAM_PACKET_MAX_PLANES]; + struct cam_isp_hw_get_cmd_update update_buf; + struct cam_isp_hw_get_wm_update bus_port_update; + struct cam_hw_fence_map_entry *out_map_entry = NULL; + struct cam_smmu_buffer_tracker *old_head_entry, *new_head_entry; + uint32_t kmd_buf_remain_size; + uint32_t plane_id, num_entries; + dma_addr_t *image_buf_addr; + uint32_t *image_buf_offset; + size_t size; + int mmu_hdl; + bool is_buf_secure; + struct cam_isp_hw_get_cmd_update secure_mode; + uint32_t mode = 0; + + if (io_cfg->direction == CAM_BUF_OUTPUT) { + secure_mode_cmd = CAM_ISP_HW_CMD_GET_WM_SECURE_MODE; + bus_update_cmd = CAM_ISP_HW_CMD_GET_BUF_UPDATE; + } else if (io_cfg->direction == CAM_BUF_INPUT) { + secure_mode_cmd = CAM_ISP_HW_CMD_GET_RM_SECURE_MODE; + bus_update_cmd = CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM; + + } else { + CAM_ERR(CAM_ISP, "Invalid dir %d", io_cfg->direction); + return -EINVAL; + } + + if (!res) { + CAM_ERR(CAM_ISP, "invalid res, io res id:%d split_id :%d", + io_cfg->resource_type, buf_info->base->split_id); + return -EINVAL; + } + + if (res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Inactive res ID: 0x%x state: %d", res->res_id, res->res_state); + return -EINVAL; + } + + if (res->res_id != io_cfg->resource_type) { + CAM_ERR(CAM_ISP, "err res id:%d io res id:%d", + res->res_id, io_cfg->resource_type); + return -EINVAL; + } + + old_head_entry = list_first_entry_or_null(buf_info->prepare->buf_tracker, + struct cam_smmu_buffer_tracker, list); + + secure_mode.cmd_type = secure_mode_cmd; + secure_mode.res = res; + secure_mode.data = (void *)&mode; + if (!res->hw_intf) { + CAM_ERR(CAM_ISP, "Invalid hw intf for res: 0x%x", res->res_id); + return -EINVAL; + } + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, secure_mode_cmd, + &secure_mode, sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) { + CAM_ERR(CAM_ISP, "Get secure mode failed cmd_type %d res_id %d", + secure_mode_cmd, res->res_id); + return -EINVAL; + } + + memset(io_addr, 0, sizeof(io_addr)); + for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; plane_id++) { + if (!io_cfg->mem_handle[plane_id]) + break; + + is_buf_secure = cam_mem_is_secure_buf(io_cfg->mem_handle[plane_id]); + + if ((mode == CAM_SECURE_MODE_SECURE) && + is_buf_secure) { + mmu_hdl = buf_info->sec_iommu_hdl; + } else if ((mode == CAM_SECURE_MODE_NON_SECURE) && + !is_buf_secure) { + mmu_hdl = buf_info->iommu_hdl; + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Invalid hdl: port mode[%u], buf mode[%u]", + mode, is_buf_secure); + return -EINVAL; + } + + rc = cam_mem_get_io_buf(io_cfg->mem_handle[plane_id], + mmu_hdl, &io_addr[plane_id], &size, NULL, + (!plane_id && (io_cfg->direction == CAM_BUF_OUTPUT)) ? + buf_info->prepare->buf_tracker : NULL); + if (rc) { + CAM_ERR(CAM_ISP, "no io addr for plane%d", plane_id); + rc = -ENOMEM; + return rc; + } + + /* need to update with offset */ + io_addr[plane_id] += io_cfg->offsets[plane_id]; + CAM_DBG(CAM_ISP, "get io_addr for plane %d: 0x%llx, mem_hdl=0x%x", + plane_id, io_addr[plane_id], io_cfg->mem_handle[plane_id]); + + CAM_DBG(CAM_ISP, "mmu_hdl=0x%x, size=%d, end=0x%x", + mmu_hdl, (int)size, io_addr[plane_id]+size); + } + + if (!plane_id) { + CAM_ERR(CAM_ISP, "No valid planes for res%d", res->res_id); + rc = -ENOMEM; + return rc; + } + + if ((buf_info->kmd_buf_info->used_bytes) < + buf_info->kmd_buf_info->size) { + kmd_buf_remain_size = buf_info->kmd_buf_info->size - + (buf_info->kmd_buf_info->used_bytes); + } else { + CAM_ERR(CAM_ISP, "no free kmd memory for base %d", buf_info->base->idx); + rc = -ENOMEM; + return rc; + } + update_buf.res = res; + update_buf.cmd_type = bus_update_cmd; + update_buf.cmd.cmd_buf_addr = buf_info->kmd_buf_info->cpu_addr + + buf_info->kmd_buf_info->used_bytes/4; + bus_port_update.image_buf = io_addr; + bus_port_update.num_buf = plane_id; + bus_port_update.io_cfg = io_cfg; + bus_port_update.frame_header = 0; + bus_port_update.fh_enabled = false; + + for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; plane_id++) + bus_port_update.image_buf_offset[plane_id] = 0; + + if ((buf_info->frame_hdr->frame_header_enable) && + !(buf_info->frame_hdr->frame_header_res_id)) { + bus_port_update.frame_header = + buf_info->frame_hdr->frame_header_iova_addr; + bus_port_update.local_id = + buf_info->prepare->packet->header.request_id; + } + + update_buf.cmd.size = kmd_buf_remain_size; + if (io_cfg->direction == CAM_BUF_OUTPUT) + update_buf.wm_update = &bus_port_update; + else if (io_cfg->direction == CAM_BUF_INPUT) + update_buf.rm_update = &bus_port_update; + + update_buf.use_scratch_cfg = false; + + CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d", + update_buf.cmd.cmd_buf_addr, update_buf.cmd.size); + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + bus_update_cmd, &update_buf, + sizeof(struct cam_isp_hw_get_cmd_update)); + + if (rc) { + CAM_ERR(CAM_ISP, "get buf cmd error:%d", + res->res_id); + rc = -ENOMEM; + return rc; + } + + if (bus_port_update.fh_enabled) { + buf_info->frame_hdr->frame_header_res_id = res->res_id; + CAM_DBG(CAM_ISP, + "Frame header enabled for res: 0x%x iova: %pK", + buf_info->frame_hdr->frame_header_res_id, + bus_port_update.frame_header); + } + + buf_info->kmd_buf_info->used_bytes += update_buf.cmd.used_bytes; + buf_info->kmd_buf_info->offset += update_buf.cmd.used_bytes; + + if (io_cfg->direction == CAM_BUF_OUTPUT) { + if (buf_info->fill_fence) { + num_entries = buf_info->prepare->num_out_map_entries - 1; + out_map_entry = + &buf_info->prepare->out_map_entries[num_entries]; + if (!out_map_entry) { + CAM_ERR(CAM_ISP, "out_map_entry is NULL"); + rc = -EINVAL; + return rc; + } + + image_buf_addr = out_map_entry->image_buf_addr; + image_buf_offset = bus_port_update.image_buf_offset; + if (buf_info->base->split_id == CAM_ISP_HW_SPLIT_LEFT) { + for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; + plane_id++) + image_buf_addr[plane_id] = io_addr[plane_id] + + image_buf_offset[plane_id]; + } + + new_head_entry = + list_first_entry_or_null(buf_info->prepare->buf_tracker, + struct cam_smmu_buffer_tracker, list); + if (new_head_entry && old_head_entry != new_head_entry) { + out_map_entry->buffer_tracker = new_head_entry; + CAM_DBG(CAM_ISP, + "[SMMU_BT] Tracking io_buf, buf_handle: 0x%x, fd: 0x%x, res_id: %d", + io_cfg->mem_handle[0], + out_map_entry->buffer_tracker->ion_fd, res->res_id); + } + } + } + + return rc; +} + +static int cam_isp_add_io_buffers_mc( + uint64_t *mc_io_cfg, + struct cam_isp_io_buf_info *io_info, + uint8_t num_ports, + uint32_t ctxt_id) +{ + uint32_t bytes_used; + uint32_t kmd_buf_remain_size; + uint32_t *cmd_buf_addr; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + struct cam_isp_resource_node *res = NULL; + uint8_t max_out = 0; + int rc = 0; + int i; + + if (io_info->kmd_buf_info->used_bytes < io_info->kmd_buf_info->size) { + kmd_buf_remain_size = io_info->kmd_buf_info->size - + io_info->kmd_buf_info->used_bytes; + } else { + CAM_ERR(CAM_ISP, + "no free kmd memory for base=%d bytes_used=%u buf_size=%u", + io_info->base->idx, io_info->kmd_buf_info->used_bytes, + io_info->kmd_buf_info->size); + rc = -ENOMEM; + return rc; + } + + cmd_buf_addr = io_info->kmd_buf_info->cpu_addr + + io_info->kmd_buf_info->used_bytes/4; + rc = cam_isp_add_cmd_buf_update( + NULL, io_info->hw_intf, + CAM_ISP_HW_CMD_MC_CTXT_SEL, + CAM_ISP_HW_CMD_MC_CTXT_SEL, + (void *)cmd_buf_addr, + kmd_buf_remain_size, + (void *)(&ctxt_id), + &bytes_used); + + if (rc) { + CAM_ERR(CAM_ISP, "Adding MC context[%u] failed for base[%d]", + ctxt_id, io_info->base->idx); + return rc; + } + + io_info->kmd_buf_info->used_bytes += bytes_used; + io_info->kmd_buf_info->offset += bytes_used; + + max_out = io_info->out_max; + for (i = 0; i < num_ports; i++) { + rc = cam_isp_io_buf_get_entries_util(io_info, + (struct cam_buf_io_cfg *)mc_io_cfg[(max_out * ctxt_id) + i], &hw_mgr_res); + + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "hw_mgr res is NULL"); + return -EINVAL; + } + + res = hw_mgr_res->hw_res[io_info->base->split_id]; + + if (!res) + continue; + + rc = cam_isp_add_io_buffers_util(io_info, + (struct cam_buf_io_cfg *)mc_io_cfg[(max_out * ctxt_id) + i], res); + + if (rc) { + CAM_ERR(CAM_ISP, "ctxt[%d] io_cfg[%d] add buf failed rc %d", + ctxt_id, i, rc); + return rc; + } + } + + return rc; +} + +int cam_isp_add_io_buffers(struct cam_isp_io_buf_info *io_info) +{ + int rc = 0; + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_isp_hw_mgr_res *hw_mgr_res = NULL; + uint32_t i; + uint32_t curr_used_bytes = 0; + uint32_t bytes_updated = 0; + struct cam_isp_resource_node *res = NULL; + int ctxt_id = 0; + uint8_t num_ports[CAM_ISP_MULTI_CTXT_MAX] = {0}; + uint8_t max_out_res = 0; + uint64_t *mc_cfg = NULL; + uint32_t major_version = 0; + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &io_info->prepare->packet->payload_flex + + io_info->prepare->packet->io_configs_offset); + curr_used_bytes = io_info->kmd_buf_info->used_bytes; + + /* Max one hw entries required for each base */ + if (io_info->prepare->num_hw_update_entries + 1 >= + io_info->prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + io_info->prepare->num_hw_update_entries, + io_info->prepare->max_hw_update_entries); + return -EINVAL; + } + + max_out_res = io_info->out_max & 0xFF; + major_version = io_info->major_version; + + if (major_version == 3) { + mc_cfg = vzalloc(sizeof(uint64_t) * CAM_ISP_MULTI_CTXT_MAX * (max_out_res)); + if (!mc_cfg) { + CAM_ERR(CAM_ISP, "Memory allocation failed for MC cases"); + return -ENOMEM; + } + } + + for (i = 0; i < io_info->prepare->packet->num_io_configs; i++) { + + if (major_version == 3) { + if ((io_cfg[i].flag < CAM_ISP_MULTI_CTXT0_MASK) || + (io_cfg[i].flag > CAM_ISP_MULTI_CTXT2_MASK)) { + CAM_ERR(CAM_ISP, "Invalid hw context id: 0x%x for io cfg: %d", + io_cfg[i].flag, i); + rc = -EINVAL; + goto err; + } + + ctxt_id = ffs(io_cfg[i].flag) - 1; + if (ctxt_id < 0) { + CAM_ERR(CAM_ISP, + "Invalid ctxt_id %d req_id %llu resource_type:%d", + ctxt_id, io_info->prepare->packet->header.request_id, + io_cfg[i].resource_type); + rc = -EINVAL; + goto err; + } + + mc_cfg[(max_out_res * ctxt_id) + num_ports[ctxt_id]] = (uint64_t)&io_cfg[i]; + num_ports[ctxt_id]++; + } else { + rc = cam_isp_io_buf_get_entries_util(io_info, &io_cfg[i], &hw_mgr_res); + if (rc == -ENOMSG) { + rc = 0; + continue; + } else if (rc) { + CAM_ERR(CAM_ISP, "io_cfg[%d] failed rc %d", i, rc); + return rc; + } + + if (!hw_mgr_res) { + CAM_ERR(CAM_ISP, "hw_mgr res is NULL"); + return -EINVAL; + } + + res = hw_mgr_res->hw_res[io_info->base->split_id]; + if (!res) + continue; + + rc = cam_isp_add_io_buffers_util(io_info, &io_cfg[i], res); + if (rc) { + CAM_ERR(CAM_ISP, "io_cfg[%d] add buf failed rc %d", i, rc); + return rc; + } + } + } + + if (major_version == 3) { + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + if (!num_ports[i]) + continue; + + rc = cam_isp_add_io_buffers_mc(mc_cfg, io_info, num_ports[i], i); + if (rc) { + CAM_ERR(CAM_ISP, "MC context[%u] failed for base[%d]", + i, io_info->base->idx); + goto err; + } + } + + vfree(mc_cfg); + } + + bytes_updated = io_info->kmd_buf_info->used_bytes - curr_used_bytes; + CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d", + bytes_updated, io_info->fill_fence); + + if (bytes_updated) { + /** + * Offset and used_bytes are already updated in the previous + * add_io_buffers_util function and now points to next empty + * block in kmd buffer, reset it here to align with generic + * update hw entry function + */ + io_info->kmd_buf_info->offset -= bytes_updated; + io_info->kmd_buf_info->used_bytes -= bytes_updated; + cam_isp_update_hw_entry(CAM_ISP_IOCFG_BL, io_info->prepare, + io_info->kmd_buf_info, bytes_updated, false); + } + + return rc; +err: + vfree(mc_cfg); + return rc; +} + +int cam_isp_add_reg_update( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine) +{ + int rc = -EINVAL; + struct cam_isp_resource_node *res; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_get_cmd_update get_regup; + uint32_t kmd_buf_remain_size, i, reg_update_size; + + /* Max one hw entries required for each base */ + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + reg_update_size = 0; + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + res = hw_mgr_res->hw_res[i]; + if (res->hw_intf->hw_idx != base_idx) + continue; + + if (kmd_buf_info->size > (kmd_buf_info->used_bytes + + reg_update_size)) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + reg_update_size); + } else { + CAM_ERR(CAM_ISP, "no free mem %d %d %d", + base_idx, kmd_buf_info->size, + kmd_buf_info->used_bytes + + reg_update_size); + rc = -EINVAL; + return rc; + } + + get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + reg_update_size/4; + get_regup.cmd.size = kmd_buf_remain_size; + get_regup.cmd_type = CAM_ISP_HW_CMD_GET_REG_UPDATE; + get_regup.res = res; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_REG_UPDATE, &get_regup, + sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, + "Reg update added for res %d hw_id %d cdm_idx %d", + res->res_id, res->hw_intf->hw_idx, base_idx); + reg_update_size += get_regup.cmd.used_bytes; + } + } + + if (reg_update_size) { + cam_isp_update_hw_entry(CAM_ISP_COMMON_CFG_BL, prepare, + kmd_buf_info, reg_update_size, combine); + rc = 0; + } + + return rc; +} + +int cam_isp_add_go_cmd( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_resource_node *res, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine) +{ + int rc; + struct cam_isp_hw_get_cmd_update get_regup; + uint32_t kmd_buf_remain_size, reg_update_size; + + /* Max one hw entries required for each base */ + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + reg_update_size = 0; + if (kmd_buf_info->size > (kmd_buf_info->used_bytes + + reg_update_size)) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + reg_update_size); + } else { + CAM_ERR(CAM_ISP, "no free mem %d %d", + kmd_buf_info->size, + kmd_buf_info->used_bytes + + reg_update_size); + return -EINVAL; + } + + get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + reg_update_size/4; + get_regup.cmd.size = kmd_buf_remain_size; + get_regup.cmd_type = CAM_ISP_HW_CMD_FE_TRIGGER_CMD; + get_regup.res = res; + + rc = res->process_cmd(res->res_priv, + CAM_ISP_HW_CMD_FE_TRIGGER_CMD, &get_regup, + sizeof(struct cam_isp_hw_get_cmd_update)); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, "GO_CMD added for RD res %d hw_id %d", + res->res_type, res->hw_intf->hw_idx); + reg_update_size += get_regup.cmd.used_bytes; + + /* Update HW entries */ + if (reg_update_size) + cam_isp_update_hw_entry(CAM_ISP_COMMON_CFG_BL, + prepare, kmd_buf_info, reg_update_size, + combine); + return 0; +} + +int cam_isp_add_comp_wait( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_get_cmd_update add_wait; + struct cam_hw_intf *hw_intf; + bool hw_res_valid = false; + uint32_t kmd_buf_remain_size, num_ent, add_wait_size; + + /* Max one hw entries required for each base */ + if (prepare->num_hw_update_entries >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries"); + return -EINVAL; + } + + add_wait_size = 0; + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + if (hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] && + hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]) { + hw_res_valid = true; + break; + } + } + + if (!hw_mgr_res || !hw_res_valid) { + CAM_ERR(CAM_ISP, "No src with multi_vfe config"); + return -EINVAL; + } + + hw_intf = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]->hw_intf; + + if (kmd_buf_info->size > (kmd_buf_info->used_bytes + + add_wait_size)) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + add_wait_size); + } else { + CAM_ERR(CAM_ISP, "no free mem %d %d %d", + base_idx, kmd_buf_info->size, + kmd_buf_info->used_bytes + + add_wait_size); + rc = -EINVAL; + return rc; + } + + add_wait.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + add_wait_size/4; + add_wait.cmd.size = kmd_buf_remain_size; + add_wait.cmd_type = CAM_ISP_HW_CMD_ADD_WAIT; + add_wait.res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT]; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_ADD_WAIT, + &add_wait, + sizeof(struct cam_isp_hw_get_cmd_update)); + + if (rc) { + CAM_ERR(CAM_ISP, + "wait_comp_event addition failed for dual vfe"); + return rc; + } + + add_wait_size += add_wait.cmd.used_bytes; + if (add_wait_size) { + /* Update the HW entries */ + num_ent = prepare->num_hw_update_entries; + prepare->hw_update_entries[num_ent].handle = + kmd_buf_info->handle; + prepare->hw_update_entries[num_ent].len = add_wait_size; + prepare->hw_update_entries[num_ent].offset = + kmd_buf_info->offset; + prepare->hw_update_entries[num_ent].flags = CAM_ISP_IOCFG_BL; + CAM_DBG(CAM_ISP, + "num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + prepare->hw_update_entries[num_ent].handle, + prepare->hw_update_entries[num_ent].len, + prepare->hw_update_entries[num_ent].offset); + num_ent++; + + kmd_buf_info->used_bytes += add_wait_size; + kmd_buf_info->offset += add_wait_size; + prepare->num_hw_update_entries = num_ent; + /* add wait_comp_event is success return status 0 */ + rc = 0; + } + + return rc; +} + +int cam_isp_add_wait_trigger( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info, + bool trigger_cdm_en) +{ + int rc = -EINVAL; + struct cam_isp_hw_mgr_res *hw_mgr_res; + struct cam_isp_hw_get_cmd_update add_trigger; + struct cam_hw_intf *hw_intf; + bool hw_res_valid = false; + uint32_t kmd_buf_remain_size, num_ent, add_trigger_size; + + /* Max one hw entries required for each base */ + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries"); + return -EINVAL; + } + + add_trigger_size = 0; + + list_for_each_entry(hw_mgr_res, res_list_isp_src, list) { + if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT) + continue; + if (hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_LEFT] && + hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]) { + hw_res_valid = true; + break; + } + } + + if (!hw_mgr_res || !hw_res_valid) { + CAM_ERR(CAM_ISP, "No src with multi_vfe config"); + return -EINVAL; + } + + hw_intf = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]->hw_intf; + + if (kmd_buf_info->size > (kmd_buf_info->used_bytes + + add_trigger_size)) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + add_trigger_size); + } else { + CAM_ERR(CAM_ISP, "no free mem %d %d %d", + base_idx, kmd_buf_info->size, + kmd_buf_info->used_bytes + + add_trigger_size); + rc = -EINVAL; + return rc; + } + + add_trigger.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + add_trigger_size/4; + add_trigger.cmd.size = kmd_buf_remain_size; + add_trigger.cmd_type = CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER; + add_trigger.res = hw_mgr_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT]; + + rc = hw_intf->hw_ops.process_cmd( + hw_intf->hw_priv, + CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER, + &add_trigger, + sizeof(struct cam_isp_hw_get_cmd_update)); + + if (rc) { + CAM_ERR(CAM_ISP, + "wait_trigger_event addition failed for dual vfe"); + return rc; + } + + add_trigger_size += add_trigger.cmd.used_bytes; + + if (add_trigger_size) { + /* Update the HW entries */ + num_ent = prepare->num_hw_update_entries; + prepare->hw_update_entries[num_ent].handle = + kmd_buf_info->handle; + prepare->hw_update_entries[num_ent].len = add_trigger_size; + prepare->hw_update_entries[num_ent].offset = + kmd_buf_info->offset; + prepare->hw_update_entries[num_ent].flags = CAM_ISP_IOCFG_BL; + CAM_DBG(CAM_ISP, + "num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + prepare->hw_update_entries[num_ent].handle, + prepare->hw_update_entries[num_ent].len, + prepare->hw_update_entries[num_ent].offset); + num_ent++; + + kmd_buf_info->used_bytes += add_trigger_size; + kmd_buf_info->offset += add_trigger_size; + prepare->num_hw_update_entries = num_ent; + /* add wait trigger is success return status 0 */ + rc = 0; + } + + return rc; +} + +int cam_isp_add_csid_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_ctx_base_info *base_info) +{ + int rc = 0; + uint32_t cmd_meta_data, num_ent, i; + enum cam_isp_hw_split_id split_id; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_hw_update_entry *hw_entry = NULL; + + split_id = base_info->split_id; + hw_entry = prepare->hw_update_entries; + + /* + * set the cmd_desc to point the first command descriptor in the + * packet + */ + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + + CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d", + split_id, prepare->packet->num_cmd_buf); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + num_ent = prepare->num_hw_update_entries; + if (!cmd_desc[i].length) + continue; + + /* One hw entry space required for left or right or common */ + if (num_ent + 1 >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + num_ent, prepare->max_hw_update_entries); + return -EINVAL; + } + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + cmd_meta_data = cmd_desc[i].meta_data; + + CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d", + cmd_meta_data, split_id); + + switch (cmd_meta_data) { + case CAM_ISP_PACKET_META_BASE: + case CAM_ISP_PACKET_META_LEFT: + case CAM_ISP_PACKET_META_DMI_LEFT: + case CAM_ISP_PACKET_META_RIGHT: + case CAM_ISP_PACKET_META_DMI_RIGHT: + case CAM_ISP_PACKET_META_COMMON: + case CAM_ISP_PACKET_META_DMI_COMMON: + case CAM_ISP_PACKET_META_DUAL_CONFIG: + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + struct cam_isp_generic_blob_info blob_info; + + prepare->num_hw_update_entries = num_ent; + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + struct cam_isp_generic_blob_info blob_info; + + prepare->num_hw_update_entries = num_ent; + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", + rc); + return rc; + } + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: { + struct cam_isp_generic_blob_info blob_info; + + prepare->num_hw_update_entries = num_ent; + blob_info.prepare = prepare; + blob_info.base_info = base_info; + blob_info.kmd_buf_info = kmd_buf_info; + + rc = cam_packet_util_process_generic_cmd_buffer( + &cmd_desc[i], + blob_handler_cb, + &blob_info); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in processing blobs %d", rc); + return rc; + } + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent = prepare->num_hw_update_entries; + } + break; + case CAM_ISP_PACKET_META_REG_DUMP_ON_FLUSH: + case CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR: + case CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST: + case CAM_ISP_SFE_PACKET_META_LEFT: + case CAM_ISP_SFE_PACKET_META_RIGHT: + case CAM_ISP_SFE_PACKET_META_COMMON: + case CAM_ISP_SFE_PACKET_META_DUAL_CONFIG: + break; + case CAM_ISP_PACKET_META_CSID_LEFT: + if (split_id == CAM_ISP_HW_SPLIT_LEFT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_PACKET_META_CSID_RIGHT: + if (split_id == CAM_ISP_HW_SPLIT_RIGHT) { + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + } + break; + case CAM_ISP_PACKET_META_CSID_COMMON: + hw_entry[num_ent].len = cmd_desc[i].length; + hw_entry[num_ent].handle = cmd_desc[i].mem_handle; + hw_entry[num_ent].offset = cmd_desc[i].offset; + CAM_DBG(CAM_ISP, + "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u", + num_ent, + hw_entry[num_ent].handle, + hw_entry[num_ent].len, + hw_entry[num_ent].offset); + hw_entry[num_ent].flags = CAM_ISP_IQ_BL; + num_ent++; + break; + default: + CAM_ERR(CAM_ISP, "invalid cdm command meta data %d", + cmd_meta_data); + return -EINVAL; + } + prepare->num_hw_update_entries = num_ent; + } + + return rc; +} + +int cam_isp_add_csid_reg_update( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + void *args, + bool combine) +{ + int rc; + bool add_toggled_entry; + struct cam_isp_resource_node *res; + uint32_t kmd_buf_remain_size, reg_update_size = 0; + struct cam_isp_csid_reg_update_args *rup_args = NULL; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + rup_args = (struct cam_isp_csid_reg_update_args *)args; + add_toggled_entry = rup_args->add_toggled_mup_entry; + if (!rup_args->num_res) { + CAM_ERR(CAM_ISP, "No Res for Reg Update"); + return -EINVAL; + } + + if (kmd_buf_info->size <=(kmd_buf_info->used_bytes + + reg_update_size)) { + CAM_ERR(CAM_ISP, "no free mem %u %u %u", + kmd_buf_info->size, + kmd_buf_info->used_bytes + + reg_update_size); + return -EINVAL; + } + + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + reg_update_size); + rup_args->cmd.used_bytes = 0; + rup_args->cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + + reg_update_size/4; + rup_args->cmd.size = kmd_buf_remain_size; + rup_args->reg_write = false; + rup_args->add_toggled_mup_entry = false; + res = rup_args->res[0]; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_REG_UPDATE, rup_args, + sizeof(struct cam_isp_csid_reg_update_args)); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, + "Reg update added for res %d hw_id %d", + res->res_id, res->hw_intf->hw_idx); + reg_update_size += rup_args->cmd.used_bytes; + + /* Update hw entries */ + if (reg_update_size) + cam_isp_update_hw_entry(CAM_ISP_COMMON_CFG_BL, + prepare, kmd_buf_info, reg_update_size, + combine); + + if (add_toggled_entry) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + reg_update_size); + if (kmd_buf_info->size <= (kmd_buf_info->used_bytes + + reg_update_size)) { + CAM_WARN(CAM_ISP, "no free mem %u %u %u", + kmd_buf_info->size, kmd_buf_info->used_bytes + + reg_update_size); + return 0; + } + rup_args->cmd.used_bytes = 0; + rup_args->cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes/4 + reg_update_size/4; + rup_args->cmd.size = kmd_buf_remain_size; + rup_args->reg_write = false; + rup_args->add_toggled_mup_entry = true; + res = rup_args->res[0]; + reg_update_size = 0; + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_ISP_HW_CMD_GET_REG_UPDATE, rup_args, + sizeof(struct cam_isp_csid_reg_update_args)); + if (!rc) { + reg_update_size += rup_args->cmd.used_bytes; + /* Update hw entries */ + if (reg_update_size) + cam_isp_update_hw_entry(CAM_ISP_DEBUG_ENTRY, + prepare, kmd_buf_info, reg_update_size, false); + } + } + + return 0; +} + +int cam_isp_add_csid_offline_cmd( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_resource_node *res, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine) +{ + int rc; + uint32_t kmd_buf_remain_size, go_cmd_size; + struct cam_ife_csid_offline_cmd_update_args go_args; + + if (prepare->num_hw_update_entries + 1 >= + prepare->max_hw_update_entries) { + CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d", + prepare->num_hw_update_entries, + prepare->max_hw_update_entries); + return -EINVAL; + } + + go_cmd_size = 0; + if (kmd_buf_info->size > (kmd_buf_info->used_bytes + + go_cmd_size)) { + kmd_buf_remain_size = kmd_buf_info->size - + (kmd_buf_info->used_bytes + + go_cmd_size); + } else { + CAM_ERR(CAM_ISP, "no free mem %d %d", + kmd_buf_info->size, + kmd_buf_info->used_bytes + + go_cmd_size); + return -EINVAL; + } + + go_args.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes / 4 + + go_cmd_size / 4; + go_args.cmd.size = kmd_buf_remain_size; + go_args.res = res; + + rc = res->hw_intf->hw_ops.process_cmd( + res->hw_intf->hw_priv, + CAM_IFE_CSID_PROGRAM_OFFLINE_CMD, &go_args, + sizeof(go_args)); + if (rc) + return rc; + + CAM_DBG(CAM_ISP, + "offline cmd update added for CSID: %u res: %d", + res->hw_intf->hw_idx, res->res_id); + go_cmd_size += go_args.cmd.used_bytes; + + /* Update HW entries */ + if (go_cmd_size) + cam_isp_update_hw_entry(CAM_ISP_COMMON_CFG_BL, + prepare, kmd_buf_info, go_cmd_size, + combine); + return 0; +} + +int cam_isp_get_cmd_buf_count( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_cmd_buf_count *cmd_buf_count) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + uint32_t cmd_meta_data = 0; + int i; + int rc = 0; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&prepare->packet->payload_flex + + prepare->packet->cmd_buf_offset); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + + if (rc) + return rc; + + cmd_meta_data = cmd_desc[i].meta_data; + + switch (cmd_meta_data) { + + case CAM_ISP_PACKET_META_BASE: + case CAM_ISP_PACKET_META_LEFT: + case CAM_ISP_PACKET_META_DMI_LEFT: + case CAM_ISP_PACKET_META_RIGHT: + case CAM_ISP_PACKET_META_DMI_RIGHT: + case CAM_ISP_PACKET_META_COMMON: + case CAM_ISP_PACKET_META_DMI_COMMON: + cmd_buf_count->isp_cnt++; + break; + case CAM_ISP_PACKET_META_CSID_LEFT: + case CAM_ISP_PACKET_META_CSID_RIGHT: + case CAM_ISP_PACKET_META_CSID_COMMON: + cmd_buf_count->csid_cnt++; + break; + case CAM_ISP_SFE_PACKET_META_LEFT: + case CAM_ISP_SFE_PACKET_META_RIGHT: + case CAM_ISP_SFE_PACKET_META_COMMON: + cmd_buf_count->sfe_cnt++; + break; + default: + break; + } + } + + CAM_DBG(CAM_ISP, "Number of cmd buffers: isp:%u csid:%u sfe:%u", + cmd_buf_count->isp_cnt, cmd_buf_count->csid_cnt, + cmd_buf_count->sfe_cnt); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c new file mode 100644 index 0000000000..326c6e5a4b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_tasklet_util.h" +#include "cam_irq_controller.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +/* Threshold for scheduling delay in ms */ +#define CAM_TASKLET_SCHED_TIME_THRESHOLD 5 + +/* Threshold for execution delay in ms */ +#define CAM_TASKLET_EXE_TIME_THRESHOLD 10 + +#define CAM_TASKLETQ_SIZE 256 + +static void cam_tasklet_action(unsigned long data); + +/** + * struct cam_tasklet_queue_cmd: + * @Brief: Structure associated with each slot in the + * tasklet queue + * + * @list: list_head member for each entry in queue + * @payload: Payload structure for the event. This will be + * passed to the handler function + * @handler_priv: Private data passed at event subscribe + * @bottom_half_handler: Function pointer for event handler in bottom + * half context + * @tasklet_enqueue_ts: enqueue time of tasklet + * + */ +struct cam_tasklet_queue_cmd { + struct list_head list; + void *payload; + void *handler_priv; + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler; + ktime_t tasklet_enqueue_ts; +}; + +/** + * struct cam_tasklet_info: + * @Brief: Tasklet private structure + * + * @list: list_head member for each tasklet + * @index: Instance id for the tasklet + * @tasklet_lock: Spin lock + * @tasklet_active: Atomic variable to control tasklet state + * @tasklet: Tasklet structure used to schedule bottom half + * @free_cmd_list: List of free tasklet queue cmd for use + * @used_cmd_list: List of used tasklet queue cmd + * @cmd_queue: Array of tasklet cmd for storage + * @ctx_priv: Private data passed to the handling function + * + */ +struct cam_tasklet_info { + struct list_head list; + uint32_t index; + spinlock_t tasklet_lock; + atomic_t tasklet_active; + struct tasklet_struct tasklet; + + struct list_head free_cmd_list; + struct list_head used_cmd_list; + struct cam_tasklet_queue_cmd cmd_queue[CAM_TASKLETQ_SIZE]; + + void *ctx_priv; +}; + +struct cam_irq_bh_api tasklet_bh_api = { + .bottom_half_enqueue_func = cam_tasklet_enqueue_cmd, + .get_bh_payload_func = cam_tasklet_get_cmd, + .put_bh_payload_func = cam_tasklet_put_cmd, +}; + +int cam_tasklet_get_cmd( + void *bottom_half, + void **bh_cmd) +{ + int rc = 0; + unsigned long flags; + struct cam_tasklet_info *tasklet = bottom_half; + struct cam_tasklet_queue_cmd *tasklet_cmd = NULL; + + *bh_cmd = NULL; + + if (tasklet == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL"); + return -EINVAL; + } + + if (!atomic_read(&tasklet->tasklet_active)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet idx:%d is not active", + tasklet->index); + rc = -EPIPE; + return rc; + } + + spin_lock_irqsave(&tasklet->tasklet_lock, flags); + if (list_empty(&tasklet->free_cmd_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd idx:%d", + tasklet->index); + rc = -ENODEV; + goto spin_unlock; + } else { + tasklet_cmd = list_first_entry(&tasklet->free_cmd_list, + struct cam_tasklet_queue_cmd, list); + list_del_init(&(tasklet_cmd)->list); + *bh_cmd = tasklet_cmd; + } + +spin_unlock: + spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); + return rc; +} + +void cam_tasklet_put_cmd( + void *bottom_half, + void **bh_cmd) +{ + unsigned long flags; + struct cam_tasklet_info *tasklet = bottom_half; + struct cam_tasklet_queue_cmd *tasklet_cmd = *bh_cmd; + + if (tasklet == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL"); + return; + } + + if (tasklet_cmd == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid tasklet_cmd"); + return; + } + + spin_lock_irqsave(&tasklet->tasklet_lock, flags); + list_del_init(&tasklet_cmd->list); + list_add_tail(&tasklet_cmd->list, &tasklet->free_cmd_list); + *bh_cmd = NULL; + spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); +} + +/** + * cam_tasklet_dequeue_cmd() + * + * @brief: Initialize the tasklet info structure + * + * @hw_mgr_ctx: Private Ctx data that will be passed to the handler + * function + * @idx: Index of tasklet used as identity + * @tasklet_action: Tasklet callback function that will be called + * when tasklet runs on CPU + * + * @return: 0: Success + * Negative: Failure + */ +static int cam_tasklet_dequeue_cmd( + struct cam_tasklet_info *tasklet, + struct cam_tasklet_queue_cmd **tasklet_cmd) +{ + int rc = 0; + unsigned long flags; + + *tasklet_cmd = NULL; + + CAM_DBG(CAM_ISP, "Dequeue before lock tasklet idx:%d", tasklet->index); + spin_lock_irqsave(&tasklet->tasklet_lock, flags); + if (list_empty(&tasklet->used_cmd_list)) { + CAM_DBG(CAM_ISP, "End of list reached. Exit"); + rc = -ENODEV; + goto spin_unlock; + } else { + *tasklet_cmd = list_first_entry(&tasklet->used_cmd_list, + struct cam_tasklet_queue_cmd, list); + list_del_init(&(*tasklet_cmd)->list); + CAM_DBG(CAM_ISP, "Dequeue Successful"); + } + +spin_unlock: + spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); + return rc; +} + +void cam_tasklet_enqueue_cmd( + void *bottom_half, + void *bh_cmd, + void *handler_priv, + void *evt_payload_priv, + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler) +{ + unsigned long flags; + struct cam_tasklet_queue_cmd *tasklet_cmd = bh_cmd; + struct cam_tasklet_info *tasklet = bottom_half; + + if (!bottom_half) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bottom half"); + return; + } + + if (!bh_cmd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bh cmd"); + return; + } + + if (!atomic_read(&tasklet->tasklet_active)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active idx:%d", + tasklet->index); + return; + } + + CAM_DBG(CAM_ISP, "Enqueue tasklet cmd idx:%d", tasklet->index); + if (!cam_presil_mode_enabled()) { + tasklet_cmd->bottom_half_handler = bottom_half_handler; + tasklet_cmd->payload = evt_payload_priv; + tasklet_cmd->handler_priv = handler_priv; + tasklet_cmd->tasklet_enqueue_ts = ktime_get(); + spin_lock_irqsave(&tasklet->tasklet_lock, flags); + list_add_tail(&tasklet_cmd->list, + &tasklet->used_cmd_list); + spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); + tasklet_hi_schedule(&tasklet->tasklet); + } else { + cam_presil_enqueue_presil_irq_tasklet(bottom_half_handler, + handler_priv, + evt_payload_priv); + } +} + +int cam_tasklet_init( + void **tasklet_info, + void *hw_mgr_ctx, + uint32_t idx) +{ + int i; + struct cam_tasklet_info *tasklet = NULL; + + tasklet = CAM_MEM_ZALLOC(sizeof(struct cam_tasklet_info), GFP_KERNEL); + if (!tasklet) { + CAM_DBG(CAM_ISP, + "Error! Unable to allocate memory for tasklet"); + *tasklet_info = NULL; + return -ENOMEM; + } + + tasklet->ctx_priv = hw_mgr_ctx; + tasklet->index = idx; + spin_lock_init(&tasklet->tasklet_lock); + memset(tasklet->cmd_queue, 0, sizeof(tasklet->cmd_queue)); + INIT_LIST_HEAD(&tasklet->free_cmd_list); + INIT_LIST_HEAD(&tasklet->used_cmd_list); + for (i = 0; i < CAM_TASKLETQ_SIZE; i++) { + INIT_LIST_HEAD(&tasklet->cmd_queue[i].list); + list_add_tail(&tasklet->cmd_queue[i].list, + &tasklet->free_cmd_list); + } + tasklet_init(&tasklet->tasklet, cam_tasklet_action, + (unsigned long)tasklet); + tasklet_disable(&tasklet->tasklet); + + *tasklet_info = tasklet; + + return 0; +} + +void cam_tasklet_deinit(void **tasklet_info) +{ + struct cam_tasklet_info *tasklet = *tasklet_info; + + if (atomic_read(&tasklet->tasklet_active)) { + atomic_set(&tasklet->tasklet_active, 0); + tasklet_kill(&tasklet->tasklet); + tasklet_disable(&tasklet->tasklet); + } + CAM_MEM_FREE(tasklet); + *tasklet_info = NULL; +} + +static inline void cam_tasklet_flush(struct cam_tasklet_info *tasklet_info) +{ + cam_tasklet_action((unsigned long) tasklet_info); +} + +int cam_tasklet_start(void *tasklet_info) +{ + struct cam_tasklet_info *tasklet = tasklet_info; + int i = 0; + + if (atomic_read(&tasklet->tasklet_active)) { + CAM_ERR(CAM_ISP, "Tasklet already active idx:%d", + tasklet->index); + return -EBUSY; + } + + /* clean up the command queue first */ + for (i = 0; i < CAM_TASKLETQ_SIZE; i++) { + list_del_init(&tasklet->cmd_queue[i].list); + list_add_tail(&tasklet->cmd_queue[i].list, + &tasklet->free_cmd_list); + } + + atomic_set(&tasklet->tasklet_active, 1); + + tasklet_enable(&tasklet->tasklet); + + return 0; +} + +void cam_tasklet_stop(void *tasklet_info) +{ + struct cam_tasklet_info *tasklet = tasklet_info; + + if (!atomic_read(&tasklet->tasklet_active)) + return; + + atomic_set(&tasklet->tasklet_active, 0); + tasklet_kill(&tasklet->tasklet); + tasklet_disable(&tasklet->tasklet); + cam_tasklet_flush(tasklet); +} + +/* + * cam_tasklet_action() + * + * @brief: Process function that will be called when tasklet runs + * on CPU + * + * @data: Tasklet Info structure that is passed in tasklet_init + * + * @return: Void + */ +static void cam_tasklet_action(unsigned long data) +{ + struct cam_tasklet_info *tasklet_info = NULL; + struct cam_tasklet_queue_cmd *tasklet_cmd = NULL; + ktime_t tasklet_exec_start_time; + void *cb = NULL; + tasklet_info = (struct cam_tasklet_info *)data; + + while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) { + cb = (void *)tasklet_cmd->bottom_half_handler; + cam_common_util_thread_switch_delay_detect( + "ISP Tasklet", "schedule", cb, + tasklet_cmd->tasklet_enqueue_ts, + CAM_TASKLET_SCHED_TIME_THRESHOLD); + tasklet_exec_start_time = ktime_get(); + + tasklet_cmd->bottom_half_handler(tasklet_cmd->handler_priv, + tasklet_cmd->payload); + + cam_common_util_thread_switch_delay_detect( + "ISP Tasklet", "execution", cb, + tasklet_exec_start_time, + CAM_TASKLET_SCHED_TIME_THRESHOLD); + cam_tasklet_put_cmd(tasklet_info, (void **)(&tasklet_cmd)); + } +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h new file mode 100644 index 0000000000..3da26221c7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ISP_HW_PARSER_H_ +#define _CAM_ISP_HW_PARSER_H_ + +#include +#include +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw_mgr.h" +#include "cam_hw_intf.h" +#include "cam_packet_util.h" +#include "cam_cdm_intf_api.h" + +/* enum cam_isp_cdm_bl_type - isp cdm packet type */ +enum cam_isp_cdm_bl_type { + CAM_ISP_UNUSED_BL, + CAM_ISP_COMMON_CFG_BL, + CAM_ISP_IQ_BL, + CAM_ISP_IOCFG_BL, + CAM_ISP_DEBUG_ENTRY, + CAM_ISP_BL_MAX, +}; + +/* + * struct cam_isp_generic_blob_info + * + * @prepare: Payload for prepare command + * @base_info: Base hardware information for the context + * @kmd_buf_info: Kmd buffer to store the custom cmd data + * @entry_added: Indicate whether the entry is previously + * added when handling the blob before + */ +struct cam_isp_generic_blob_info { + struct cam_hw_prepare_update_args *prepare; + struct cam_isp_ctx_base_info *base_info; + struct cam_kmd_buf_info *kmd_buf_info; + bool entry_added; +}; + +/* + * struct cam_isp_frame_header_info + * + * @frame_header_enable: Enable frame header + * @frame_header_iova_addr: frame header iova + * @frame_header_res_id: res id for which frame header is enabled + */ +struct cam_isp_frame_header_info { + bool frame_header_enable; + uint64_t frame_header_iova_addr; + uint32_t frame_header_res_id; +}; + +/* + * struct cam_isp_sfe_scratch_buf_res_info + * + * @num_active_fe_rdis : To indicate active RMs/RDIs + * @sfe_rdi_cfg_mask : Output mask to mark if the given RDI res has been + * provided with IO cfg buffer + */ +struct cam_isp_sfe_scratch_buf_res_info { + uint32_t num_active_fe_rdis; + uint32_t sfe_rdi_cfg_mask; +}; + +/* + * struct cam_isp_ife_scratch_buf_res_info + * + * @num_ports : Number of ports for which scratch buffer is provided + * @ife_scratch_resources : IFE resources that have been provided a scratch buffer + * @ife_scratch_cfg_mask : Output mask to mark if the given client has been + * provided with IO cfg buffer + */ +struct cam_isp_ife_scratch_buf_res_info { + uint32_t num_ports; + uint32_t ife_scratch_resources[CAM_IFE_SCRATCH_NUM_MAX]; + uint32_t ife_scratch_cfg_mask; +}; + +/* + * struct cam_isp_check_io_cfg_for_scratch + * + * @sfe_scratch_res_info : SFE scratch buffer validation info + * @ife_scratch_res_info : IFE scratch buffer validation info + * @validate_for_sfe : Validate for SFE clients, check if scratch is needed + * @validate_for_ife : Validate for IFE clients, check if scratch is needed + */ +struct cam_isp_check_io_cfg_for_scratch { + struct cam_isp_sfe_scratch_buf_res_info sfe_scratch_res_info; + struct cam_isp_ife_scratch_buf_res_info ife_scratch_res_info; + bool validate_for_sfe; + bool validate_for_ife; +}; + +/* + * struct cam_isp_change_base_args + * + * @cdm_id: CDM id + * @base_idx: Base index + */ +struct cam_isp_change_base_args { + enum cam_cdm_id cdm_id; + uint32_t base_idx; +}; + +/* + * struct cam_isp_cmd_buf_count + * + * @csid_cnt: CSID cmd buffer cnt + * @vfe_cnt: ISP cmd buffer cnt + * @sfe_cnt: SFE cmd buffer cnt + */ +struct cam_isp_cmd_buf_count { + uint32_t csid_cnt; + uint32_t isp_cnt; + uint32_t sfe_cnt; +}; + +/* + * struct cam_isp_io_buf_info + * + * @frame_hdr: Frame header related params + * @scratch_check_cfg: Validate info for IFE/SFE scratch buffers + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: Kmd buffer to store the change base command + * @res_list_isp_out: IFE/SFE out resource list + * @res_list_ife_in_rd: IFE/SFE in rd resource list + * @base: Base info for IFE/SFE + * @out_map: Outport map + * @hw_intf: HW intf + * @iommu_hdl: Iommu handle to get the IO buf from memory manager + * @sec_iommu_hdl: Secure iommu handle to get the IO buf from + * memory manager + * @out_base: Base value of ISP resource (IFE/SFE) + * @out_max: Max of supported ISP resources(IFE/SFE) + * @major_version: Major version + * @fill_fence: If true, Fence map table will be filled + * @return: 0 for success + * -EINVAL for Fail + */ + +struct cam_isp_io_buf_info { + struct cam_isp_frame_header_info *frame_hdr; + struct cam_isp_check_io_cfg_for_scratch *scratch_check_cfg; + struct cam_hw_prepare_update_args *prepare; + struct cam_kmd_buf_info *kmd_buf_info; + struct cam_isp_hw_mgr_res *res_list_isp_out; + struct list_head *res_list_in_rd; + struct cam_isp_ctx_base_info *base; + uint8_t *out_map; + struct cam_hw_intf *hw_intf; + int iommu_hdl; + int sec_iommu_hdl; + uint32_t out_base; + uint32_t out_max; + uint32_t major_version; + bool fill_fence; +}; + +/* + * cam_isp_add_change_base() + * + * @brief Add change base in the hw entries list + * processe the isp source list get the change base from + * ISP HW instance + * + * @prepare: Contain the packet and HW update variables + * @res_list_isp_src: Resource list for IFE/VFE source + * @change_base_args: Arguments for Change base function + * @kmd_buf_info: Kmd buffer to store the change base command + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_change_base( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + struct cam_isp_change_base_args *change_base_args, + struct cam_kmd_buf_info *kmd_buf_info); + +/* + * cam_isp_add_cmd_buf_update() + * + * @brief Add command buffer in the HW entries list for given + * Blob Data. + * + * @res: ISP HW resource to get the update from + * @cmd_type: Cmd type to get update for + * @hw_cmd_type: HW Cmd type corresponding to cmd_type + * @base_idx: Base hardware index + * @cmd_buf_addr: Cpu buf addr of kmd scratch buffer + * @kmd_buf_remain_size: Remaining size left for cmd buffer update + * @cmd_update_data: Data needed by HW to process the cmd and provide + * cmd buffer + * @bytes_used: Address of the field to be populated with + * total bytes used as output to caller + * + * @return: Negative for Failure + * otherwise returns bytes used + */ +int cam_isp_add_cmd_buf_update( + struct cam_isp_resource_node *res, + struct cam_hw_intf *hw_intf, + uint32_t cmd_type, + uint32_t hw_cmd_type, + uint32_t *cmd_buf_addr, + uint32_t kmd_buf_remain_size, + void *cmd_update_data, + uint32_t *bytes_used); + +/* + * cam_sfe_add_command_buffers() + * + * @brief Add command buffer in the HW entries list for given + * left or right SFE instance. + * + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: KMD buffer to store the custom cmd data + * @base_info: base hardware information + * @blob_handler_cb: Call_back_function for Meta handling + * @res_list_isp_out: SFE out resource list + * @out_map: SFE /VFE out port map + * @out_base: Base value of ISP resource (SFE) + * @out_max: Max of supported ISP resources(SFE) + * + * @return: 0 for success + * Negative for Failure + */ +int cam_sfe_add_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_ctx_base_info *base_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_hw_mgr_res *res_list_sfe_out, + uint8_t *out_map, + uint32_t out_base, + uint32_t out_max); + +/* + * cam_isp_add_command_buffers() + * + * @brief Add command buffer in the HW entries list for given + * left or right VFE/IFE instance. + * + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: KMD buffer to store the custom cmd data + * @base_info: base hardware information + * @blob_handler_cb: Call_back_function for Meta handling + * @res_list_isp_out: IFE /VFE out resource list + * @out_map: SFE /VFE out port map + * @out_base: Base value of ISP resource (IFE) + * @out_max: Max of supported ISP resources(IFE) + * + * @return: 0 for success + * Negative for Failure + */ +int cam_isp_add_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + struct cam_isp_ctx_base_info *base_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_hw_mgr_res *res_list_isp_out, + uint8_t *out_map, + uint32_t out_base, + uint32_t out_max); + +/* + * cam_isp_add_io_buffers() + * + * @brief Add io buffer configurations in the HW entries list + * processe the io configurations based on the base + * index and update the HW entries list + * + * @io_info: Io buffer information + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_io_buffers(struct cam_isp_io_buf_info *io_info); + +/* + * cam_isp_add_reg_update() + * + * @brief Add reg update in the hw entries list + * processe the isp source list get the reg update from + * ISP HW instance + * + * @prepare: Contain the packet and HW update variables + * @res_list_isp_src: Resource list for IFE/VFE source + * @base_idx: Base or dev index of the IFE/VFE HW instance + * @kmd_buf_info: Kmd buffer to store the change base command + * @combine: Indicate whether combine with prev update entry + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_reg_update( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine); + +/* + * cam_isp_add_comp_wait() + * + * @brief Add reg update in the hw entries list + * processe the isp source list get the reg update from + * ISP HW instance + * + * @prepare: Contain the packet and HW update variables + * @res_list_isp_src: Resource list for IFE/VFE source + * @base_idx: Base or dev index of the IFE/VFE HW instance + * @kmd_buf_info: Kmd buffer to store the change base command + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_comp_wait( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info); + +/* + * cam_isp_add_wait_trigger() + * + * @brief Add reg update in the hw entries list + * processe the isp source list get the reg update from + * ISP HW instance + * + * @prepare: Contain the packet and HW update variables + * @res_list_isp_src: Resource list for IFE/VFE source + * @base_idx: Base or dev index of the IFE/VFE HW instance + * @kmd_buf_info: Kmd buffer to store the change base command + * @trigger_cdm_en Used to reset and set trigger_cdm_events register + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_wait_trigger( + struct cam_hw_prepare_update_args *prepare, + struct list_head *res_list_isp_src, + uint32_t base_idx, + struct cam_kmd_buf_info *kmd_buf_info, + bool trigger_cdm_en); + + +/* + * cam_isp_add_go_cmd() + * + * @brief Add go_cmd in the hw entries list for each rd source + * + * @prepare: Contain the packet and HW update variables + * @res: go_cmd added for this resource + * @kmd_buf_info: Kmd buffer to store the change base command + * @combine: Indicate whether combine with prev update entry + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_go_cmd( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_resource_node *res, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine); + +/* cam_isp_csid_add_reg_update() + * + * @brief Add csid reg update in the hw entries list + * processe the isp source list get the reg update from + * ISP HW instance + * + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: Kmd buffer to store the change base command + * @rup_args: Reg Update args + * @combine: Indicate whether combine with prev update entry + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_csid_reg_update( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + void *args, + bool combine); + + +/* cam_isp_add_csid_offline_cmd() + * + * @brief Add csid go cmd for offline mode + * + * @prepare: Contain the packet and HW update variables + * @res: go_cmd added for this resource + * @kmd_buf_info: Kmd buffer to store the change base command + * @combine: Indicate whether combine with prev update entry + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_isp_add_csid_offline_cmd( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_resource_node *res, + struct cam_kmd_buf_info *kmd_buf_info, + bool combine); + +/* + * cam_isp_add_csid_command_buffers() + * + * @brief Add command buffer in the HW entries list for given + * left or right CSID instance. + * + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: KMD buffer to store the custom cmd data + * @blob_handler_cb: Blob handler callback + * @base_info: base hardware information + * + * @return: 0 for success + * Negative for Failure + */ +int cam_isp_add_csid_command_buffers( + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + cam_packet_generic_blob_handler blob_handler_cb, + struct cam_isp_ctx_base_info *base_info); + +/* + * cam_isp_get_cmd_buf_count() + * + * @brief Counts the number of command buffers + * + * @prepare: Contain the packet and HW update variables + * @cmd_buf_count: Cmd buffer count container + * + * @return: 0 for success + * Negative for Failure + */ +int cam_isp_get_cmd_buf_count( + struct cam_hw_prepare_update_args *prepare, + struct cam_isp_cmd_buf_count *cmd_buf_count); + +/* + * cam_isp_update_hw_entry() + * + * @brief Add a new update hw entry or combine with + * prev update hw entry + * + * @cdm_bl_type: CDM BL type for the current updated entry + * @prepare: Contain the packet and HW update variables + * @kmd_buf_info: Kmd buffer to store register value pair changes + * @update_size: Update size for cmd data in kmd buffer + * @combine: Indicate whether combine with prev update entry + */ +void cam_isp_update_hw_entry( + enum cam_isp_cdm_bl_type cdm_bl_type, + struct cam_hw_prepare_update_args *prepare, + struct cam_kmd_buf_info *kmd_buf_info, + uint32_t update_size, + bool combine); +#endif /*_CAM_ISP_HW_PARSER_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h new file mode 100644 index 0000000000..22beb49ef6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TASKLET_UTIL_H_ +#define _CAM_TASKLET_UTIL_H_ + +#include "cam_irq_controller.h" + +/* + * cam_tasklet_init() + * + * @brief: Initialize the tasklet info structure + * + * @tasklet: Tasklet to initialize + * @hw_mgr_ctx: Private Ctx data that will be passed to the handler + * function + * @idx: Index of tasklet used as identity + * + * @return: 0: Success + * Negative: Failure + */ +int cam_tasklet_init( + void **tasklet, + void *hw_mgr_ctx, + uint32_t idx); + +/* + * cam_tasklet_deinit() + * + * @brief: Deinitialize the tasklet info structure + * + * @tasklet: Tasklet to deinitialize + * + * @return: Void + */ +void cam_tasklet_deinit(void **tasklet); + +/* + * cam_tasklet_start() + * + * @brief: Enable the tasklet to be scheduled and run. + * Caller should make sure this function is called + * before trying to enqueue. + * + * @tasklet: Tasklet to start + * + * @return: 0: Success + * Negative: Failure + */ +int cam_tasklet_start(void *tasklet); + +/* + * cam_tasklet_stop() + * + * @brief: Disable the tasklet so it can no longer be scheduled. + * Need to enable again to run. + * + * @tasklet: Tasklet to stop + * + * @return: Void + */ +void cam_tasklet_stop(void *tasklet); + +/* + * cam_tasklet_enqueue_cmd() + * + * @brief: Enqueue the tasklet_cmd to used list + * + * @bottom_half: Tasklet info to enqueue onto + * @bh_cmd: Tasklet cmd used to enqueue task + * @handler_priv: Private Handler data that will be passed to the + * handler function + * @evt_payload_priv: Event payload that will be passed to the handler + * function + * @bottom_half_handler: Callback function that will be called by tasklet + * for handling event + * + * @return: Void + */ +void cam_tasklet_enqueue_cmd( + void *bottom_half, + void *bh_cmd, + void *handler_priv, + void *evt_payload_priv, + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler); + +/** + * cam_tasklet_get_cmd() + * + * @brief: Get free cmd from tasklet + * + * @bottom_half: Tasklet Info structure to get cmd from + * @bh_cmd: Return tasklet_cmd pointer if successful + * + * @return: 0: Success + * Negative: Failure + */ +int cam_tasklet_get_cmd(void *bottom_half, void **bh_cmd); + +/** + * cam_tasklet_put_cmd() + * + * @brief: Put back cmd to free list + * + * @bottom_half: Tasklet Info structure to put cmd into + * @bh_cmd: tasklet_cmd pointer that needs to be put back + * + * @return: Void + */ +void cam_tasklet_put_cmd(void *bottom_half, void **bh_cmd); + +extern struct cam_irq_bh_api tasklet_bh_api; + +#endif /* _CAM_TASKLET_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c new file mode 100644 index 0000000000..a59233c33b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c @@ -0,0 +1,1314 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_irq_controller.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +#define CAM_IRQ_LINE_TEST_TIMEOUT_MS 1000 +#define CAM_IRQ_MAX_DEPENDENTS 13 +#define CAM_IRQ_CTRL_NAME_LEN 16 + +/** + * struct cam_irq_evt_handler: + * @Brief: Event handler information + * + * @priority: Priority level of this event + * @evt_bit_mask_arr: evt_bit_mask that has the bits set for IRQs to + * subscribe for + * @handler_priv: Private data that will be passed to the Top/Bottom + * Half handler function + * @top_half_handler: Top half Handler callback function + * @bottom_half_handler: Bottom half Handler callback function + * @bottom_half: Pointer to bottom_half implementation on which to + * enqueue the event for further handling + * @bottom_half_enqueue_func: + * Function used to enqueue the bottom_half event + * @list_node: list_head struct used for overall handler List + * @th_list_node: list_head struct used for top half handler List + * @index: Unique id of the event + * @group: Group to which the event belongs + */ +struct cam_irq_evt_handler { + enum cam_irq_priority_level priority; + uint32_t *evt_bit_mask_arr; + void *handler_priv; + CAM_IRQ_HANDLER_TOP_HALF top_half_handler; + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler; + void *bottom_half; + struct cam_irq_bh_api irq_bh_api; + struct list_head list_node; + struct list_head th_list_node; + int index; + int group; +}; + +/** + * struct cam_irq_register_obj: + * @Brief: Structure containing information related to + * a particular register Set + * + * @index: Index of set in Array + * @mask_reg_offset: Offset of IRQ MASK register + * @clear_reg_offset: Offset of IRQ CLEAR register + * @status_reg_offset: Offset of IRQ STATUS register + * @set_reg_offset: Offset of IRQ SET register + * @test_set_val: Value to write to IRQ SET register to trigger IRQ + * @test_sub_val: Value to write to IRQ MASK register to receive test IRQ + * @force_rd_mask: Mask value for bits to be read in hw errata cases + * @top_half_enable_mask: Array of enabled bit_mask sorted by priority + * @aggr_mask: Aggregate mask to keep track of the overall mask + * after subscribe/unsubscribe calls + * @dependent_read_mask: Mask to check if any dependent controllers' read needs to triggered + * from independent controller + * @dirty_clear: Flag to identify if clear register contains non-zero value + */ +struct cam_irq_register_obj { + uint32_t index; + uint32_t mask_reg_offset; + uint32_t clear_reg_offset; + uint32_t status_reg_offset; + uint32_t set_reg_offset; + uint32_t test_set_val; + uint32_t test_sub_val; + uint32_t force_rd_mask; + uint32_t top_half_enable_mask[CAM_IRQ_PRIORITY_MAX]; + uint32_t aggr_mask; + uint32_t dependent_read_mask[CAM_IRQ_MAX_DEPENDENTS]; + bool dirty_clear; +}; + +/** + * struct cam_irq_controller: + * + * @brief: IRQ Controller structure. + * + * @name: Name of IRQ Controller block + * @mem_base: Mapped base address of register space to which + * register offsets are added to access registers + * @num_registers: Number of sets(mask/clear/status) of IRQ registers + * @irq_register_arr: Array of Register object associated with this + * Controller + * @irq_status_arr: Array of IRQ Status values + * @global_irq_cmd_offset: Offset of Global IRQ Clear register. This register + * contains the BIT that needs to be set for the CLEAR + * to take effect + * @global_clear_bitmask: Bitmask needed to be used in Global IRQ command register + * for Clear IRQ cmd to take effect + * @global_set_bitmask: Bitmask needed to be used in Global IRQ command register + * for Set IRQ cmd to take effect + * @clear_all_bitmask: Bitmask that specifies which bits should be written to clear register + * when it is to be cleared forcefully + * @dependent_bitmap: Bitmap to keep track of all the dependent controllers + * @parent_bitmap_idx: Index of this controller in parent controller's bitmap + * @evt_handler_list_head: List of all event handlers + * @th_list_head: List of handlers sorted by priority + * @hdl_idx: Unique identity of handler assigned on Subscribe. + * Used to Unsubscribe. + * @th_payload: Payload structure to be passed to top half handler + * @dependent_controller: Array of controllers that depend on this controller + * @lock: Lock to be used by controller, Use mutex lock in presil mode, + * and spinlock in regular case + * @is_dependent: Flag to indicate is this controller is dependent on another controller + * @delayed_global_clear: Flag to indicate if this controller issues global clear after dependent + * controllers are handled + */ +struct cam_irq_controller { + char name[CAM_IRQ_CTRL_NAME_LEN]; + void __iomem *mem_base; + uint32_t num_registers; + struct cam_irq_register_obj *irq_register_arr; + uint32_t *irq_status_arr; + uint32_t global_irq_cmd_offset; + uint32_t global_clear_bitmask; + uint32_t global_set_bitmask; + uint32_t clear_all_bitmask; + uint32_t dependent_bitmap; + int parent_bitmap_idx; + struct list_head evt_handler_list_head; + struct list_head th_list_head[CAM_IRQ_PRIORITY_MAX]; + uint32_t hdl_idx; + struct cam_irq_th_payload th_payload; + struct cam_irq_controller *dependent_controller[CAM_IRQ_MAX_DEPENDENTS]; + +#ifdef CONFIG_CAM_PRESIL + struct mutex lock; +#else + spinlock_t lock; +#endif + + bool is_dependent; + bool delayed_global_clear; + +}; + +#ifdef CONFIG_CAM_PRESIL +static inline void cam_irq_controller_lock_init(struct cam_irq_controller *controller) +{ + mutex_init(&controller->lock); +} + +static inline unsigned long cam_irq_controller_lock_irqsave( + struct cam_irq_controller *controller) +{ + mutex_lock(&controller->lock); + + return 0; +} + +static inline void cam_irq_controller_unlock_irqrestore( + struct cam_irq_controller *controller, unsigned long flags) +{ + mutex_unlock(&controller->lock); +} + +static inline void cam_irq_controller_lock(struct cam_irq_controller *controller) +{ + mutex_lock(&controller->lock); +} + +static inline void cam_irq_controller_unlock(struct cam_irq_controller *controller) +{ + mutex_unlock(&controller->lock); +} +#else +static inline void cam_irq_controller_lock_init(struct cam_irq_controller *controller) +{ + spin_lock_init(&controller->lock); +} + +static inline unsigned long cam_irq_controller_lock_irqsave( + struct cam_irq_controller *controller) +{ + unsigned long flags = 0; + + if (!in_irq()) + spin_lock_irqsave(&controller->lock, flags); + + return flags; +} + +static inline void cam_irq_controller_unlock_irqrestore( + struct cam_irq_controller *controller, unsigned long flags) +{ + if (!in_irq()) + spin_unlock_irqrestore(&controller->lock, flags); +} + +static inline void cam_irq_controller_lock(struct cam_irq_controller *controller) +{ + spin_lock(&controller->lock); +} + +static inline void cam_irq_controller_unlock(struct cam_irq_controller *controller) +{ + spin_unlock(&controller->lock); +} +#endif + +int cam_irq_controller_unregister_dependent(void *primary_controller, void *secondary_controller) +{ + struct cam_irq_controller *ctrl_primary, *ctrl_secondary; + int i, dep_idx; + + if (!primary_controller || !secondary_controller) { + CAM_ERR(CAM_IRQ_CTRL, "invalid args: %pK, %pK", primary_controller, + secondary_controller); + return -EINVAL; + } + + ctrl_primary = primary_controller; + ctrl_secondary = secondary_controller; + dep_idx = ctrl_secondary->parent_bitmap_idx; + + if ((dep_idx == -1) || (dep_idx == CAM_IRQ_MAX_DEPENDENTS)) { + CAM_ERR(CAM_IRQ_CTRL, "could not find %s as a dependent of %s)", + ctrl_secondary->name, ctrl_primary->name); + return -EINVAL; + } + + ctrl_primary->dependent_controller[dep_idx] = NULL; + for (i = 0; i < ctrl_primary->num_registers; i++) + ctrl_primary->irq_register_arr[i].dependent_read_mask[dep_idx] = 0; + + ctrl_secondary->is_dependent = false; + ctrl_secondary->parent_bitmap_idx = -1; + + ctrl_primary->dependent_bitmap &= (~BIT(dep_idx)); + + if (!ctrl_primary->dependent_bitmap) + ctrl_primary->delayed_global_clear = false; + + CAM_DBG(CAM_IRQ_CTRL, "successfully unregistered %s as dependent of %s", + ctrl_secondary->name, ctrl_primary->name); + + return 0; +} + +int cam_irq_controller_register_dependent(void *primary_controller, void *secondary_controller, + uint32_t *mask) +{ + struct cam_irq_controller *ctrl_primary, *ctrl_secondary; + int i, dep_idx; + unsigned long dependent_bitmap; + + if (!primary_controller || !secondary_controller) { + CAM_ERR(CAM_IRQ_CTRL, "invalid args: %pK, %pK", primary_controller, + secondary_controller); + return -EINVAL; + } + + ctrl_primary = primary_controller; + ctrl_secondary = secondary_controller; + dependent_bitmap = ctrl_primary->dependent_bitmap; + + if (ctrl_secondary->parent_bitmap_idx != -1) { + CAM_ERR(CAM_IRQ_CTRL, + "Duplicate dependent register for pri_ctrl:%s sec_ctrl:%s parent_bitmap_idx:%d", + ctrl_primary->name, ctrl_secondary->name, + ctrl_secondary->parent_bitmap_idx); + return -EPERM; + } + + dep_idx = find_first_zero_bit(&(dependent_bitmap), CAM_IRQ_MAX_DEPENDENTS); + if (dep_idx == CAM_IRQ_MAX_DEPENDENTS) { + CAM_ERR(CAM_IRQ_CTRL, "reached maximum dependents (%s - %s)", + ctrl_primary->name, ctrl_secondary->name); + return -ENOMEM; + } + + ctrl_secondary->parent_bitmap_idx = dep_idx; + ctrl_primary->dependent_controller[dep_idx] = secondary_controller; + for (i = 0; i < ctrl_primary->num_registers; i++) + ctrl_primary->irq_register_arr[i].dependent_read_mask[dep_idx] = mask[i]; + + ctrl_secondary->is_dependent = true; + ctrl_primary->dependent_bitmap |= BIT(dep_idx); + + /** + * NOTE: For dependent controllers that should not issue global clear command, + * set their global_irq_cmd_offset to 0 + */ + if (!ctrl_secondary->global_irq_cmd_offset) + ctrl_primary->delayed_global_clear = true; + + CAM_DBG(CAM_IRQ_CTRL, "successfully registered %s as dependent of %s", ctrl_secondary->name, + ctrl_primary->name); + return 0; +} + +static inline void cam_irq_controller_clear_irq( + struct cam_irq_controller *controller, + struct cam_irq_evt_handler *evt_handler) +{ + struct cam_irq_register_obj *irq_register; + int i; + + /* Don't clear in IRQ context since global clear will be issued after + * top half processing + */ + if (in_irq()) + return; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + cam_io_w_mb(evt_handler->evt_bit_mask_arr[i], + controller->mem_base + + irq_register->clear_reg_offset); + } + + if (controller->global_irq_cmd_offset) + cam_io_w_mb(controller->global_clear_bitmask, + controller->mem_base + + controller->global_irq_cmd_offset); +} + +int cam_irq_controller_deinit(void **irq_controller) +{ + struct cam_irq_controller *controller = *irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + + if (!controller) { + CAM_ERR(CAM_IRQ_CTRL, "Null Pointer"); + return -EINVAL; + } + + if (controller->dependent_bitmap) { + CAM_ERR(CAM_IRQ_CTRL, + "Unbalanced dependent unregister for controller: %s dep_bitmap:0x%x", + controller->name, controller->dependent_bitmap); + return -EINVAL; + } + + while (!list_empty(&controller->evt_handler_list_head)) { + evt_handler = list_first_entry( + &controller->evt_handler_list_head, + struct cam_irq_evt_handler, list_node); + list_del_init(&evt_handler->list_node); + CAM_MEM_FREE(evt_handler->evt_bit_mask_arr); + CAM_MEM_FREE(evt_handler); + } + + CAM_MEM_FREE(controller->th_payload.evt_status_arr); + CAM_MEM_FREE(controller->irq_status_arr); + CAM_MEM_FREE(controller->irq_register_arr); + CAM_MEM_FREE(controller); + *irq_controller = NULL; + return 0; +} + +int cam_irq_controller_init(const char *name, + void __iomem *mem_base, + struct cam_irq_controller_reg_info *register_info, + void **irq_controller) +{ + struct cam_irq_controller *controller = NULL; + int i, rc = 0; + + *irq_controller = NULL; + + if (!register_info->num_registers || !register_info->irq_reg_set || + !name || !mem_base) { + CAM_ERR(CAM_IRQ_CTRL, "Invalid parameters"); + rc = -EINVAL; + return rc; + } + + controller = CAM_MEM_ZALLOC(sizeof(struct cam_irq_controller), GFP_KERNEL); + if (!controller) { + CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ Controller"); + return -ENOMEM; + } + + controller->irq_register_arr = CAM_MEM_ZALLOC(register_info->num_registers * + sizeof(struct cam_irq_register_obj), GFP_KERNEL); + if (!controller->irq_register_arr) { + CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ register Arr"); + rc = -ENOMEM; + goto reg_alloc_error; + } + + controller->irq_status_arr = CAM_MEM_ZALLOC(register_info->num_registers * + sizeof(uint32_t), GFP_KERNEL); + if (!controller->irq_status_arr) { + CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ status Arr"); + rc = -ENOMEM; + goto status_alloc_error; + } + + controller->th_payload.evt_status_arr = + CAM_MEM_ZALLOC(register_info->num_registers * sizeof(uint32_t), + GFP_KERNEL); + if (!controller->th_payload.evt_status_arr) { + CAM_DBG(CAM_IRQ_CTRL, + "Failed to allocate BH payload bit mask Arr"); + rc = -ENOMEM; + goto evt_mask_alloc_error; + } + + strscpy(controller->name, name, CAM_IRQ_CTRL_NAME_LEN); + + CAM_DBG(CAM_IRQ_CTRL, "num_registers: %d", register_info->num_registers); + for (i = 0; i < register_info->num_registers; i++) { + controller->irq_register_arr[i].index = i; + controller->irq_register_arr[i].mask_reg_offset = + register_info->irq_reg_set[i].mask_reg_offset; + controller->irq_register_arr[i].clear_reg_offset = + register_info->irq_reg_set[i].clear_reg_offset; + controller->irq_register_arr[i].status_reg_offset = + register_info->irq_reg_set[i].status_reg_offset; + controller->irq_register_arr[i].set_reg_offset = + register_info->irq_reg_set[i].set_reg_offset; + controller->irq_register_arr[i].test_set_val = + register_info->irq_reg_set[i].test_set_val; + controller->irq_register_arr[i].test_sub_val = + register_info->irq_reg_set[i].test_sub_val; + controller->irq_register_arr[i].force_rd_mask = + register_info->irq_reg_set[i].force_rd_mask; + controller->irq_register_arr[i].dirty_clear = true; + CAM_DBG(CAM_IRQ_CTRL, "i %d mask_reg_offset: 0x%x", i, + controller->irq_register_arr[i].mask_reg_offset); + CAM_DBG(CAM_IRQ_CTRL, "i %d clear_reg_offset: 0x%x", i, + controller->irq_register_arr[i].clear_reg_offset); + CAM_DBG(CAM_IRQ_CTRL, "i %d status_reg_offset: 0x%x", i, + controller->irq_register_arr[i].status_reg_offset); + CAM_DBG(CAM_IRQ_CTRL, "i %d set_reg_offset: 0x%x", i, + controller->irq_register_arr[i].set_reg_offset); + } + controller->num_registers = register_info->num_registers; + controller->global_clear_bitmask = register_info->global_clear_bitmask; + controller->global_irq_cmd_offset = register_info->global_irq_cmd_offset; + controller->global_set_bitmask = register_info->global_set_bitmask; + controller->clear_all_bitmask = register_info->clear_all_bitmask; + controller->mem_base = mem_base; + controller->is_dependent = false; + controller->parent_bitmap_idx = -1; + + CAM_DBG(CAM_IRQ_CTRL, "global_clear_bitmask: 0x%x", + controller->global_clear_bitmask); + CAM_DBG(CAM_IRQ_CTRL, "global_irq_cmd_offset: 0x%x", + controller->global_irq_cmd_offset); + CAM_DBG(CAM_IRQ_CTRL, "mem_base: %pK", + (void __iomem *)controller->mem_base); + + INIT_LIST_HEAD(&controller->evt_handler_list_head); + for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) + INIT_LIST_HEAD(&controller->th_list_head[i]); + + cam_irq_controller_lock_init(controller); + + controller->hdl_idx = 1; + *irq_controller = controller; + + return rc; + +evt_mask_alloc_error: + CAM_MEM_FREE(controller->irq_status_arr); +status_alloc_error: + CAM_MEM_FREE(controller->irq_register_arr); +reg_alloc_error: + CAM_MEM_FREE(controller); + + return rc; +} + +static inline void __cam_irq_controller_disable_irq( + struct cam_irq_controller *controller, + struct cam_irq_evt_handler *evt_handler) +{ + struct cam_irq_register_obj *irq_register; + uint32_t *update_mask; + int i, priority = 0; + + update_mask = evt_handler->evt_bit_mask_arr; + priority = evt_handler->priority; + + if (unlikely(priority >= CAM_IRQ_PRIORITY_MAX)) + return; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + irq_register->top_half_enable_mask[priority] &= ~update_mask[i]; + irq_register->aggr_mask &= ~update_mask[i]; + cam_io_w_mb(irq_register->aggr_mask, controller->mem_base + + irq_register->mask_reg_offset); + } +} + +static inline void __cam_irq_controller_disable_irq_evt( + struct cam_irq_controller *controller, + struct cam_irq_evt_handler *evt_handler) +{ + struct cam_irq_register_obj *irq_register; + uint32_t *update_mask; + int i, priority = 0; + + update_mask = evt_handler->evt_bit_mask_arr; + priority = evt_handler->priority; + + if (unlikely(priority >= CAM_IRQ_PRIORITY_MAX)) + return; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + irq_register->top_half_enable_mask[priority] &= ~update_mask[i]; + irq_register->aggr_mask &= ~update_mask[i]; + } +} + +static inline void __cam_irq_controller_enable_irq( + struct cam_irq_controller *controller, + struct cam_irq_evt_handler *evt_handler) +{ + struct cam_irq_register_obj *irq_register; + uint32_t *update_mask; + int i, priority = 0; + + update_mask = evt_handler->evt_bit_mask_arr; + priority = evt_handler->priority; + + if (unlikely(priority >= CAM_IRQ_PRIORITY_MAX)) + return; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + irq_register->top_half_enable_mask[priority] |= update_mask[i]; + irq_register->aggr_mask |= update_mask[i]; + irq_register->dirty_clear = true; + cam_io_w_mb(irq_register->aggr_mask, controller->mem_base + + irq_register->mask_reg_offset); + } +} + +int cam_irq_controller_subscribe_irq(void *irq_controller, + enum cam_irq_priority_level priority, + uint32_t *evt_bit_mask_arr, + void *handler_priv, + CAM_IRQ_HANDLER_TOP_HALF top_half_handler, + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler, + void *bottom_half, + struct cam_irq_bh_api *irq_bh_api, + enum cam_irq_event_group evt_grp) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + int i; + int rc = 0; + unsigned long flags = 0; + + if (!controller || !handler_priv || !evt_bit_mask_arr) { + CAM_ERR(CAM_IRQ_CTRL, + "Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK", + controller, handler_priv, evt_bit_mask_arr); + return -EINVAL; + } + + if (!top_half_handler) { + CAM_ERR(CAM_IRQ_CTRL, "Missing top half handler"); + return -EINVAL; + } + + if (bottom_half_handler && + (!bottom_half || !irq_bh_api)) { + CAM_ERR(CAM_IRQ_CTRL, + "Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK", + bottom_half_handler, + bottom_half, + irq_bh_api); + return -EINVAL; + } + + if (irq_bh_api && + (!irq_bh_api->bottom_half_enqueue_func || + !irq_bh_api->get_bh_payload_func || + !irq_bh_api->put_bh_payload_func)) { + CAM_ERR(CAM_IRQ_CTRL, + "Invalid: enqueue_func=%pK get_bh=%pK put_bh=%pK", + irq_bh_api->bottom_half_enqueue_func, + irq_bh_api->get_bh_payload_func, + irq_bh_api->put_bh_payload_func); + return -EINVAL; + } + + if (priority >= CAM_IRQ_PRIORITY_MAX) { + CAM_ERR(CAM_IRQ_CTRL, "Invalid priority=%u, max=%u", priority, + CAM_IRQ_PRIORITY_MAX); + return -EINVAL; + } + + evt_handler = CAM_MEM_ZALLOC(sizeof(struct cam_irq_evt_handler), GFP_KERNEL); + if (!evt_handler) { + CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node"); + return -ENOMEM; + } + + evt_handler->evt_bit_mask_arr = CAM_MEM_ZALLOC(sizeof(uint32_t) * + controller->num_registers, GFP_KERNEL); + if (!evt_handler->evt_bit_mask_arr) { + CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node"); + rc = -ENOMEM; + goto free_evt_handler; + } + + INIT_LIST_HEAD(&evt_handler->list_node); + INIT_LIST_HEAD(&evt_handler->th_list_node); + + for (i = 0; i < controller->num_registers; i++) + evt_handler->evt_bit_mask_arr[i] = evt_bit_mask_arr[i]; + + evt_handler->priority = priority; + evt_handler->handler_priv = handler_priv; + evt_handler->top_half_handler = top_half_handler; + evt_handler->bottom_half_handler = bottom_half_handler; + evt_handler->bottom_half = bottom_half; + evt_handler->index = controller->hdl_idx++; + evt_handler->group = evt_grp; + + if (irq_bh_api) + evt_handler->irq_bh_api = *irq_bh_api; + + /* Avoid rollover to negative values */ + if (controller->hdl_idx > 0x3FFFFFFF) + controller->hdl_idx = 1; + + flags = cam_irq_controller_lock_irqsave(controller); + + __cam_irq_controller_enable_irq(controller, evt_handler); + + list_add_tail(&evt_handler->list_node, + &controller->evt_handler_list_head); + list_add_tail(&evt_handler->th_list_node, + &controller->th_list_head[priority]); + + cam_irq_controller_unlock_irqrestore(controller, flags); + + return evt_handler->index; + +free_evt_handler: + CAM_MEM_FREE(evt_handler); + evt_handler = NULL; + + return rc; +} + +static inline int cam_irq_controller_find_event_handle(struct cam_irq_controller *controller, + uint32_t handle, struct cam_irq_evt_handler **found_evt_handler) +{ + struct cam_irq_evt_handler *evt_handler, *evt_handler_temp; + int rc = -EINVAL; + + list_for_each_entry_safe(evt_handler, evt_handler_temp, + &controller->evt_handler_list_head, list_node) { + if (evt_handler->index == handle) { + rc = 0; + *found_evt_handler = evt_handler; + break; + } + } + + return rc; +} + +int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + unsigned long flags = 0; + int rc = 0; + + if (!controller) + return rc; + + flags = cam_irq_controller_lock_irqsave(controller); + + rc = cam_irq_controller_find_event_handle(controller, handle, + &evt_handler); + if (rc) + goto end; + + CAM_DBG(CAM_IRQ_CTRL, "enable event %d", handle); + __cam_irq_controller_enable_irq(controller, evt_handler); + +end: + cam_irq_controller_unlock_irqrestore(controller, flags); + + return rc; +} + +int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + unsigned long flags = 0; + int rc = 0; + + if (!controller) + return rc; + + flags = cam_irq_controller_lock_irqsave(controller); + + rc = cam_irq_controller_find_event_handle(controller, handle, + &evt_handler); + if (rc) + goto end; + + CAM_DBG(CAM_IRQ_CTRL, "disable event %d", handle); + __cam_irq_controller_disable_irq(controller, evt_handler); + cam_irq_controller_clear_irq(controller, evt_handler); + +end: + cam_irq_controller_unlock_irqrestore(controller, flags); + + return rc; +} + +int cam_irq_controller_unsubscribe_irq(void *irq_controller, + uint32_t handle) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + unsigned long flags = 0; + int rc = 0; + + flags = cam_irq_controller_lock_irqsave(controller); + + + rc = cam_irq_controller_find_event_handle(controller, handle, + &evt_handler); + if (rc) + goto end; + + list_del_init(&evt_handler->list_node); + list_del_init(&evt_handler->th_list_node); + + __cam_irq_controller_disable_irq(controller, evt_handler); + cam_irq_controller_clear_irq(controller, evt_handler); + + CAM_MEM_FREE(evt_handler->evt_bit_mask_arr); + CAM_MEM_FREE(evt_handler); + +end: + cam_irq_controller_unlock_irqrestore(controller, flags); + + return rc; +} + +int cam_irq_controller_unsubscribe_irq_evt(void *irq_controller, + uint32_t handle) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + unsigned long flags = 0; + int rc = 0; + + flags = cam_irq_controller_lock_irqsave(controller); + + + rc = cam_irq_controller_find_event_handle(controller, handle, + &evt_handler); + if (rc) + goto end; + + list_del_init(&evt_handler->list_node); + list_del_init(&evt_handler->th_list_node); + + __cam_irq_controller_disable_irq_evt(controller, evt_handler); + cam_irq_controller_clear_irq(controller, evt_handler); + + CAM_MEM_FREE(evt_handler->evt_bit_mask_arr); + CAM_MEM_FREE(evt_handler); + +end: + cam_irq_controller_unlock_irqrestore(controller, flags); + + return rc; +} + +/** + * cam_irq_controller_match_bit_mask() + * + * @Brief: This function checks if any of the enabled IRQ bits + * for a certain handler is Set in the Status values + * of the controller. + * + * @controller: IRQ Controller structure + * @evt_handler: Event handler structure + * + * @Return: True: If any interested IRQ Bit is Set + * False: Otherwise + */ +static bool cam_irq_controller_match_bit_mask( + struct cam_irq_controller *controller, + struct cam_irq_evt_handler *evt_handler, + int evt_grp) +{ + int i; + + if (evt_handler->group != evt_grp) + return false; + + for (i = 0; i < controller->num_registers; i++) { + if (evt_handler->evt_bit_mask_arr[i] & + (controller->irq_status_arr[i] | + controller->irq_register_arr[i].force_rd_mask)) + return true; + } + + return false; +} + +static void __cam_irq_controller_th_processing( + struct cam_irq_controller *controller, + struct list_head *th_list_head, + int evt_grp) +{ + struct cam_irq_evt_handler *evt_handler = NULL; + struct cam_irq_evt_handler *evt_handler_tmp = NULL; + struct cam_irq_th_payload *th_payload = &controller->th_payload; + bool is_irq_match; + int rc = -EINVAL; + int i; + void *bh_cmd = NULL; + struct cam_irq_bh_api *irq_bh_api = NULL; + + CAM_DBG(CAM_IRQ_CTRL, "Enter"); + + if (list_empty(th_list_head)) + return; + + list_for_each_entry_safe(evt_handler, evt_handler_tmp, th_list_head, th_list_node) { + is_irq_match = cam_irq_controller_match_bit_mask(controller, evt_handler, evt_grp); + + if (!is_irq_match) + continue; + + CAM_DBG(CAM_IRQ_CTRL, "match found"); + + cam_irq_th_payload_init(th_payload); + th_payload->handler_priv = evt_handler->handler_priv; + th_payload->num_registers = controller->num_registers; + for (i = 0; i < controller->num_registers; i++) { + th_payload->evt_status_arr[i] = + controller->irq_status_arr[i] & + evt_handler->evt_bit_mask_arr[i]; + } + + irq_bh_api = &evt_handler->irq_bh_api; + bh_cmd = NULL; + + if (evt_handler->bottom_half_handler) { + rc = irq_bh_api->get_bh_payload_func( + evt_handler->bottom_half, &bh_cmd); + if (rc || !bh_cmd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No payload, IRQ handling frozen for %s", + controller->name); + continue; + } + } + + /* + * irq_status_arr[0] is dummy argument passed. the entire + * status array is passed in th_payload. + */ + if (evt_handler->top_half_handler) + rc = evt_handler->top_half_handler( + controller->irq_status_arr[0], + (void *)th_payload); + + if (rc && bh_cmd) { + irq_bh_api->put_bh_payload_func( + evt_handler->bottom_half, &bh_cmd); + continue; + } + + if (evt_handler->bottom_half_handler) { + CAM_DBG(CAM_IRQ_CTRL, "Enqueuing bottom half for %s", + controller->name); + irq_bh_api->bottom_half_enqueue_func( + evt_handler->bottom_half, + bh_cmd, + evt_handler->handler_priv, + th_payload->evt_payload_priv, + evt_handler->bottom_half_handler); + } + } + + CAM_DBG(CAM_IRQ_CTRL, "Exit"); +} + +void cam_irq_controller_disable_all(void *priv) +{ + struct cam_irq_controller *controller = priv; + struct cam_irq_register_obj *irq_register; + + uint32_t i = 0; + + if (!controller) + return; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + memset(irq_register->top_half_enable_mask, 0, + sizeof(irq_register->top_half_enable_mask)); + irq_register->aggr_mask = 0; + cam_io_w_mb(0x0, controller->mem_base + irq_register->mask_reg_offset); + cam_io_w_mb(controller->clear_all_bitmask, controller->mem_base + + irq_register->clear_reg_offset); + } + + if (controller->global_irq_cmd_offset && !controller->delayed_global_clear) { + cam_io_w_mb(controller->global_clear_bitmask, + controller->mem_base + controller->global_irq_cmd_offset); + CAM_DBG(CAM_IRQ_CTRL, "Global Clear done from %s", + controller->name); + } +} + +static void __cam_irq_controller_read_registers(struct cam_irq_controller *controller) +{ + struct cam_irq_register_obj *irq_register; + int i; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + + /* Skip register read if we are not going to process it */ + if (!irq_register->aggr_mask) { + controller->irq_status_arr[i] = 0; + continue; + } + + controller->irq_status_arr[i] = cam_io_r(controller->mem_base + + irq_register->status_reg_offset); + + CAM_DBG(CAM_IRQ_CTRL, "(%s) Read irq status%d (0x%x) = 0x%x", controller->name, i, + controller->irq_register_arr[i].status_reg_offset, + controller->irq_status_arr[i]); + + /** + * If this status register has not caused the interrupt to be + * triggered, we can skip writing to the clear register as long + * as no previous writes have been made to it that will cause + * bits of interest to be cleared (i.e. clear register is + * dirty). + * + * The dirty clear flag will never cause false negatives (i.e. + * valid writes to be missed) since hardware cannot set any bits + * in the clear register to 1 and will only clear the entire + * register upon resetting the hardware. + */ + if (!(controller->irq_status_arr[i] & irq_register->aggr_mask)) { + if (!irq_register->dirty_clear) + continue; + else + irq_register->dirty_clear = false; + } else { + irq_register->dirty_clear = true; + } + + CAM_DBG(CAM_IRQ_CTRL, "(%s) Write irq clear%d (0x%x) = 0x%x (dirty=%s)", + controller->name, i, controller->irq_register_arr[i].clear_reg_offset, + controller->irq_status_arr[i], + CAM_BOOL_TO_YESNO(irq_register->dirty_clear)); + + cam_io_w(controller->irq_status_arr[i], + controller->mem_base + irq_register->clear_reg_offset); + } + + if (controller->global_irq_cmd_offset && !controller->delayed_global_clear) { + cam_io_w_mb(controller->global_clear_bitmask, + controller->mem_base + controller->global_irq_cmd_offset); + CAM_DBG(CAM_IRQ_CTRL, "Global Clear done from %s", controller->name); + } +} + +static void __cam_irq_controller_sanitize_clear_registers(struct cam_irq_controller *controller) +{ + struct cam_irq_register_obj *irq_register; + int i; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + if (!irq_register->dirty_clear) + continue; + + irq_register->dirty_clear = false; + + CAM_DBG(CAM_IRQ_CTRL, "(%s) Write irq clear%d (0x%x) = 0x%x (dirty=%s)", + controller->name, i, controller->irq_register_arr[i].clear_reg_offset, + 0x0, CAM_BOOL_TO_YESNO(irq_register->dirty_clear)); + + cam_io_w(0x0, controller->mem_base + irq_register->clear_reg_offset); + } +} + +static void cam_irq_controller_get_need_reg_read( + struct cam_irq_controller *controller, + bool *need_reg_read) +{ + struct cam_irq_register_obj *irq_register; + int i, j; + const unsigned long dependent_bitmap = controller->dependent_bitmap; + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + for_each_set_bit(j, &dependent_bitmap, CAM_IRQ_MAX_DEPENDENTS) { + if (irq_register->dependent_read_mask[j] & + (controller->irq_status_arr[i] | irq_register->force_rd_mask)) + need_reg_read[j] = true; + + CAM_DBG(CAM_IRQ_CTRL, + "(%s) reg:%d dep:%d need_reg_read = %d force_rd_mask: 0x%x", + controller->name, i, j, need_reg_read[j], + irq_register->force_rd_mask); + } + } +} + +static void cam_irq_controller_dep_reg_read( + struct cam_irq_controller *controller, + bool *need_reg_read) +{ + struct cam_irq_controller *dep_controller; + int j; + const unsigned long dependent_bitmap = controller->dependent_bitmap; + bool need_dep_reg_read[CAM_IRQ_MAX_DEPENDENTS] = {false}; + + for_each_set_bit(j, &dependent_bitmap, CAM_IRQ_MAX_DEPENDENTS) { + dep_controller = controller->dependent_controller[j]; + if (!dep_controller) { + CAM_ERR(CAM_IRQ_CTRL, "%s[%d] is undefined", controller->name, j); + continue; + } + + if (need_reg_read[j]) { + CAM_DBG(CAM_IRQ_CTRL, "Reading dependent registers for %s", + dep_controller->name); + __cam_irq_controller_read_registers(dep_controller); + } else { + CAM_DBG(CAM_IRQ_CTRL, "Sanitize registers for %s", + dep_controller->name); + __cam_irq_controller_sanitize_clear_registers(dep_controller); + } + + if (dep_controller->dependent_bitmap) { + cam_irq_controller_lock(dep_controller); + cam_irq_controller_get_need_reg_read(dep_controller, need_dep_reg_read); + cam_irq_controller_dep_reg_read(dep_controller, need_dep_reg_read); + cam_irq_controller_unlock(dep_controller); + } + } + +} + +static void cam_irq_controller_read_registers(struct cam_irq_controller *controller) +{ + bool need_reg_read[CAM_IRQ_MAX_DEPENDENTS] = {false}; + + __cam_irq_controller_read_registers(controller); + + if (controller->dependent_bitmap) { + cam_irq_controller_get_need_reg_read(controller, need_reg_read); + cam_irq_controller_dep_reg_read(controller, need_reg_read); + } + + if (controller->global_irq_cmd_offset && controller->delayed_global_clear) { + cam_io_w_mb(controller->global_clear_bitmask, + controller->mem_base + controller->global_irq_cmd_offset); + CAM_DBG(CAM_IRQ_CTRL, "Delayed Global Clear done from %s", + controller->name); + } +} + +static void cam_irq_controller_process_th(struct cam_irq_controller *controller, int evt_grp) +{ + struct cam_irq_register_obj *irq_register; + bool need_th_processing[CAM_IRQ_PRIORITY_MAX] = {false}; + int i, j; + + + for (i = 0; i < controller->num_registers; i++) { + irq_register = &controller->irq_register_arr[i]; + for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) { + if (irq_register->top_half_enable_mask[j] & + (controller->irq_status_arr[i] | irq_register->force_rd_mask)) + need_th_processing[j] = true; + + CAM_DBG(CAM_IRQ_CTRL, + "reg:%d priority:%d need_th_processing = %d force_rd_mask: 0x%x", + i, j, need_th_processing[j], irq_register->force_rd_mask); + } + } + + for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) { + if (need_th_processing[i]) { + CAM_DBG(CAM_IRQ_CTRL, "(%s) Invoke TH processing priority:%d", + controller->name, i); + __cam_irq_controller_th_processing(controller, &controller->th_list_head[i], + evt_grp); + } + } +} + +irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv, int evt_grp) +{ + struct cam_irq_controller *controller = priv; + + if (unlikely(!controller)) + return IRQ_NONE; + + CAM_DBG(CAM_IRQ_CTRL, + "Locking: %s IRQ Controller: [%pK], lock handle: %pK", + controller->name, controller, &controller->lock); + cam_irq_controller_lock(controller); + + if (!controller->is_dependent) + cam_irq_controller_read_registers(controller); + + cam_irq_controller_process_th(controller, evt_grp); + + cam_irq_controller_unlock(controller); + CAM_DBG(CAM_IRQ_CTRL, + "Unlocked: %s IRQ Controller: %pK, lock handle: %pK", + controller->name, controller, &controller->lock); + + return IRQ_HANDLED; +} + +int cam_irq_controller_update_irq(void *irq_controller, uint32_t handle, + bool enable, uint32_t *irq_mask) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_evt_handler *evt_handler = NULL; + unsigned long flags = 0; + unsigned int i; + int rc = 0; + + if (!controller) + return rc; + + flags = cam_irq_controller_lock_irqsave(controller); + + rc = cam_irq_controller_find_event_handle(controller, handle, + &evt_handler); + if (rc) + goto end; + + __cam_irq_controller_disable_irq(controller, evt_handler); + for (i = 0; i < controller->num_registers; i++) { + if (enable) { + evt_handler->evt_bit_mask_arr[i] |= irq_mask[i]; + } else { + evt_handler->evt_bit_mask_arr[i] &= ~irq_mask[i]; + } + } + __cam_irq_controller_enable_irq(controller, evt_handler); + cam_irq_controller_clear_irq(controller, evt_handler); + +end: + cam_irq_controller_unlock_irqrestore(controller, flags); + + return rc; +} + +#ifdef CONFIG_CAM_TEST_IRQ_LINE + +struct cam_irq_line_test_priv { + char msg[256]; + struct completion complete; +}; + +static int cam_irq_controller_test_irq_line_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_irq_line_test_priv *test_priv; + + if (!th_payload || !th_payload->handler_priv) { + CAM_ERR(CAM_IRQ_CTRL, "invalid payload"); + return -EINVAL; + } + + test_priv = th_payload->handler_priv; + complete(&test_priv->complete); + CAM_INFO(CAM_IRQ_CTRL, "%s IRQ line verified", test_priv->msg); + + return 0; +} + + +int cam_irq_controller_test_irq_line(void *irq_controller, const char *fmt, ...) +{ + struct cam_irq_controller *controller = irq_controller; + struct cam_irq_register_obj *irq_reg; + struct cam_irq_line_test_priv *test_priv; + uint32_t *mask = NULL; + va_list args; + bool can_test = false; + int rc = 0, i, handle; + + if (!irq_controller) { + CAM_ERR(CAM_IRQ_CTRL, "invalid argument"); + return -EINVAL; + } + + for (i = 0; i < controller->num_registers; i++) { + irq_reg = &controller->irq_register_arr[i]; + if (irq_reg->set_reg_offset != 0 && irq_reg->test_set_val != 0 && + irq_reg->test_sub_val) + can_test = true; + CAM_DBG(CAM_IRQ_CTRL, "[%d] offset:0x%x val:0x%x can-test:%s", + i, irq_reg->set_reg_offset, irq_reg->test_set_val, + CAM_BOOL_TO_YESNO(can_test)); + } + + if (controller->global_irq_cmd_offset == 0 || controller->global_set_bitmask == 0) + can_test = false; + + CAM_DBG(CAM_IRQ_CTRL, "global offset:0x%x mask:0x%x", + controller->global_irq_cmd_offset, + controller->global_set_bitmask); + + if (!can_test) { + CAM_ERR(CAM_IRQ_CTRL, "%s not configured properly for testing", + controller->name); + return -EINVAL; + } + + mask = CAM_MEM_ZALLOC_ARRAY(controller->num_registers, sizeof(uint32_t), GFP_KERNEL); + if (!mask) { + CAM_ERR(CAM_IRQ_CTRL, "%s: cannot allocate mask array of length %d", + controller->name, controller->num_registers); + return -ENOMEM; + } + + for (i = 0; i < controller->num_registers; i++) + mask[i] = controller->irq_register_arr[i].test_sub_val; + + test_priv = CAM_MEM_ZALLOC(sizeof(struct cam_irq_line_test_priv), GFP_KERNEL); + if (!test_priv) { + CAM_ERR(CAM_IRQ_CTRL, "%s: cannot allocate test-priv", controller->name); + rc = -ENOMEM; + goto free_mem; + } + + va_start(args, fmt); + vscnprintf(test_priv->msg, 256, fmt, args); + va_end(args); + + init_completion(&test_priv->complete); + + handle = cam_irq_controller_subscribe_irq(controller, CAM_IRQ_PRIORITY_0, + mask, test_priv, cam_irq_controller_test_irq_line_top_half, + NULL, NULL, NULL, CAM_IRQ_EVT_GROUP_0); + if (handle < 0) { + CAM_ERR(CAM_IRQ_CTRL, "%s: failed to subscribe to test irq line", + controller->name); + rc = -EINVAL; + goto free_mem; + } + + for (i = 0; i < controller->num_registers; i++) { + irq_reg = &controller->irq_register_arr[i]; + if (irq_reg->test_set_val && irq_reg->set_reg_offset) { + cam_io_w_mb(irq_reg->test_set_val, + controller->mem_base + irq_reg->set_reg_offset); + CAM_DBG(CAM_IRQ_CTRL, "%s[%d] offset:0x%08x val:0x%08x", controller->name, + i, irq_reg->set_reg_offset, irq_reg->test_set_val); + } + } + + cam_io_w_mb(controller->global_set_bitmask, + controller->mem_base + controller->global_irq_cmd_offset); + CAM_DBG(CAM_IRQ_CTRL, "%s[SET-CMD] addr:0x%08x value:0x%08x", controller->name, + controller->mem_base + controller->global_irq_cmd_offset, + controller->global_set_bitmask); + + if (!cam_common_wait_for_completion_timeout(&test_priv->complete, + msecs_to_jiffies(CAM_IRQ_LINE_TEST_TIMEOUT_MS))) { + CAM_ERR(CAM_IRQ_CTRL, "%s: IRQ line verification timed out", controller->name); + rc = -ETIMEDOUT; + goto unsub_exit; + } + + CAM_DBG(CAM_IRQ_CTRL, "%s: verified IRQ line successfully", controller->name); + +unsub_exit: + cam_irq_controller_unsubscribe_irq(controller, handle); +free_mem: + CAM_MEM_FREE(mask); + CAM_MEM_FREE(test_priv); + return rc; +} + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h new file mode 100644 index 0000000000..0467d99dbd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IRQ_CONTROLLER_H_ +#define _CAM_IRQ_CONTROLLER_H_ + +#include + +#define CAM_IRQ_BITS_PER_REGISTER 32 + +/* + * enum cam_irq_priority_level: + * @Brief: Priority levels for IRQ events. + * Priority_0 events will be serviced before + * Priority_1 if they these bits are set in the same + * Status Read. And so on upto Priority_4. + * + * Default Priority is Priority_4. + */ +enum cam_irq_priority_level { + CAM_IRQ_PRIORITY_0, + CAM_IRQ_PRIORITY_1, + CAM_IRQ_PRIORITY_2, + CAM_IRQ_PRIORITY_3, + CAM_IRQ_PRIORITY_4, + CAM_IRQ_PRIORITY_MAX, +}; + +/** + * enum cam_irq_event_group: + * @brief: Event groups to filter out events while handling. Use group 0 as default. + */ +enum cam_irq_event_group { + CAM_IRQ_EVT_GROUP_0, + CAM_IRQ_EVT_GROUP_1, + CAM_IRQ_EVT_GROUP_2, +}; + +/* + * struct cam_irq_register_set: + * @Brief: Structure containing offsets of IRQ related + * registers belonging to a Set + * + * @mask_reg_offset: Offset of IRQ MASK register + * @clear_reg_offset: Offset of IRQ CLEAR register + * @status_reg_offset: Offset of IRQ STATUS register + * @set_reg_offset: Offset of IRQ SET register + * @test_set_val: Value to write to IRQ SET register to trigger IRQ + * @test_sub_val: Value to write to IRQ MASK register to receive test IRQ + * @force_rd_mask: Mask value for bits to be read in hw errata cases + */ +struct cam_irq_register_set { + uint32_t mask_reg_offset; + uint32_t clear_reg_offset; + uint32_t status_reg_offset; + uint32_t set_reg_offset; + uint32_t test_set_val; + uint32_t test_sub_val; + uint32_t force_rd_mask; +}; + +/* + * struct cam_irq_controller_reg_info: + * @Brief: Structure describing the IRQ registers + * + * @num_registers: Number of sets(mask/clear/status) of IRQ registers + * @irq_reg_set: Array of Register Set Offsets. + * Length of array = num_registers + * @global_irq_cmd_offset: Offset of Global IRQ Clear register. This register + * contains the BIT that needs to be set for the CLEAR + * to take effect + * @global_clear_bitmask: Bitmask needed to be used in Global Clear register + * for Clear IRQ cmd to take effect + * @global_set_bitmask: Bitmask needed to be used in Global IRQ command register + * for Set IRQ cmd to take effect + * @clear_all_bitmask: Bitmask that specifies which bits should be written + * to clear register when it is to be cleared forcefully + */ +struct cam_irq_controller_reg_info { + uint32_t num_registers; + struct cam_irq_register_set *irq_reg_set; + uint32_t global_irq_cmd_offset; + uint32_t global_clear_bitmask; + uint32_t global_set_bitmask; + uint32_t clear_all_bitmask; +}; + +/* + * struct cam_irq_th_payload: + * @Brief: Event payload structure. This structure will be + * passed to the Top Half handler functions. + * + * @handler_priv: Private Data of handling object set when + * subscribing to IRQ event. + * @num_registers: Length of evt_bit_mask Array below + * @evt_status_arr: Array of Status bitmask read from registers. + * Length of array = num_registers + * @evt_payload_priv: Private payload pointer which can be set by Top + * Half handler for use in Bottom Half. + * @is_comp_irq: Indicates if the interrupt is originating from a + * composite IRQ bit. + */ +struct cam_irq_th_payload { + void *handler_priv; + uint32_t num_registers; + uint32_t *evt_status_arr; + void *evt_payload_priv; + bool is_comp_irq; +}; + +/* + * cam_irq_th_payload_init() + * + * @brief: Initialize the top half payload structure + * + * @th_payload: Top Half payload structure to Initialize + * + * @return: Void + */ +static inline void cam_irq_th_payload_init( + struct cam_irq_th_payload *th_payload) { + th_payload->handler_priv = NULL; + th_payload->evt_payload_priv = NULL; + th_payload->is_comp_irq = false; +} + +typedef int (*CAM_IRQ_HANDLER_TOP_HALF)(uint32_t evt_id, + struct cam_irq_th_payload *th_payload); + +typedef int (*CAM_IRQ_HANDLER_BOTTOM_HALF)(void *handler_priv, + void *evt_payload_priv); + +typedef void (*CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC)(void *bottom_half, + void *bh_cmd, void *handler_priv, void *evt_payload_priv, + CAM_IRQ_HANDLER_BOTTOM_HALF); + +typedef int (*CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC)(void *bottom_half, + void **bh_cmd); + +typedef void (*CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC)(void *bottom_half, + void **bh_cmd); + +struct cam_irq_bh_api { + CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC bottom_half_enqueue_func; + CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC get_bh_payload_func; + CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC put_bh_payload_func; +}; + +/* + * cam_irq_controller_init() + * + * @brief: Create and Initialize IRQ Controller. + * + * @name: Name of IRQ Controller block + * @mem_base: Mapped base address of register space to which + * register offsets are added to access registers + * @register_info: Register Info structure associated with this Controller + * @irq_controller: Pointer to IRQ Controller that will be filled if + * initialization is successful + * + * @return: 0: Success + * Negative: Failure + */ +int cam_irq_controller_init(const char *name, + void __iomem *mem_base, + struct cam_irq_controller_reg_info *register_info, + void **irq_controller); + +/* + * cam_irq_controller_subscribe_irq() + * + * @brief: Subscribe to certain IRQ events. + * + * @irq_controller: Pointer to IRQ Controller that controls this event IRQ + * @priority: Priority level of these events used if multiple events + * are SET in the Status Register + * @evt_bit_mask_arr: evt_bit_mask that has the bits set for IRQs to + * subscribe for + * @handler_priv: Private data that will be passed to the Top/Bottom Half + * handler function + * @top_half_handler: Top half Handler callback function + * @bottom_half_handler: Bottom half Handler callback function + * @bottom_half: Pointer to bottom_half implementation on which to + * enqueue the event for further handling + * @bottom_half_enqueue_func: + * Function used to enqueue the bottom_half event + * @evt_grp: Event group to which this event must belong to (use group 0 as default) + * + * @return: Positive: Success. Value represents handle which is + * to be used to unsubscribe + * Negative: Failure + */ +int cam_irq_controller_subscribe_irq(void *irq_controller, + enum cam_irq_priority_level priority, + uint32_t *evt_bit_mask_arr, + void *handler_priv, + CAM_IRQ_HANDLER_TOP_HALF top_half_handler, + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler, + void *bottom_half, + struct cam_irq_bh_api *irq_bh_api, + enum cam_irq_event_group evt_grp); + +/* + * cam_irq_controller_unsubscribe_irq() + * + * @brief: Unsubscribe to IRQ events previously subscribed to. + * Masks out the correspondings events from source + * + * @irq_controller: Pointer to IRQ Controller that controls this event IRQ + * @handle: Handle returned on successful subscribe used to + * identify the handler object + * + * @return: 0: Success + * Negative: Failure + */ +int cam_irq_controller_unsubscribe_irq(void *irq_controller, + uint32_t handle); + +/* + * cam_irq_controller_unsubscribe_irq_evt() + * + * @brief: Unsubscribe to IRQ event payloads previously subscribed to + * + * @irq_controller: Pointer to IRQ Controller that controls this event IRQ + * @handle: Handle returned on successful subscribe used to + * identify the handler object + * + * @return: 0: Success + * Negative: Failure + */ +int cam_irq_controller_unsubscribe_irq_evt(void *irq_controller, + uint32_t handle); + +/* + * cam_irq_controller_deinit() + * + * @brief: Deinitialize IRQ Controller. + * + * @irq_controller: Pointer to IRQ Controller that needs to be + * deinitialized + * + * @return: 0: Success + * Negative: Failure + */ +int cam_irq_controller_deinit(void **irq_controller); + +/* + * cam_irq_controller_handle_irq() + * + * @brief: Function that should be registered with the IRQ line. + * This is the first function to be called when the IRQ + * is fired. It will read the Status register and Clear + * the IRQ bits. It will then call the top_half handlers + * and enqueue the result to bottom_half. + * + * @irq_num: Number of IRQ line that was set that lead to this + * function being called + * @priv: Private data registered with request_irq is passed back + * here. This private data should be the irq_controller + * structure. + * + * @return: IRQ_HANDLED/IRQ_NONE + */ +irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv, int evt_grp); + +/* + * cam_irq_controller_disable_irq() + * + * @brief: Disable the interrupts on given controller. + * Unsubscribe will disable the IRQ by default, so this is + * only needed if between subscribe/unsubscribe there is + * need to disable IRQ again + * + * @irq_controller: Pointer to IRQ Controller that controls the registered + * events to it. + * @handle: Handle returned on successful subscribe, used to + * identify the handler object + * + * @return: 0: events found and disabled + * Negative: events not registered on this controller + */ +int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle); + +/* + * cam_irq_controller_enable_irq() + * + * @brief: Enable the interrupts on given controller. + * Subscribe will enable the IRQ by default, so this is + * only needed if between subscribe/unsubscribe there is + * need to enable IRQ again + * + * @irq_controller: Pointer to IRQ Controller that controls the registered + * events to it. + * @handle: Handle returned on successful subscribe, used to + * identify the handler object + * + * @return: 0: events found and enabled + * Negative: events not registered on this controller + */ +int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle); + +/* + * cam_irq_controller_disable_all() + * + * @brief: This function clears and masks all the irq bits + * + * @priv: Private data registered with request_irq is passed back + * here. This private data should be the irq_controller + * structure. + */ +void cam_irq_controller_disable_all(void *priv); + +/* + * cam_irq_controller_update_irq() + * + * @brief: Enable/Disable the interrupts on given controller. + * Dynamically any irq can be disabled or enabled. + * + * @irq_controller: Pointer to IRQ Controller that controls the registered + * events to it. + * @handle: Handle returned on successful subscribe, used to + * identify the handler object + * + * @enable: Flag to indicate if disable or enable the irq. + * + * @irq_mask: IRQ mask to be enabled or disabled. + * + * @return: 0: events found and enabled + * Negative: events not registered on this controller + */ +int cam_irq_controller_update_irq(void *irq_controller, uint32_t handle, + bool enable, uint32_t *irq_mask); + +/** + * cam_irq_controller_register_dependent + * @brief: Register one IRQ controller as dependent for another IRQ controller + * + * @primary_controller: Controller whose top half will invoke handle_irq function of secondary + * controller + * @secondary_controller: Controller whose handle_irq function that will be invoked from primary + * controller + * + * @return: 0: successfully registered + * Negative: failed to register as dependent + */ +int cam_irq_controller_register_dependent(void *primary_controller, void *secondary_controller, + uint32_t *mask); + +/** + * cam_irq_controller_unregister_dependent + * @brief: Unregister previously registered dependent IRQ controller + * + * @primary_controller: Controller whose top half will invoke handle_irq function of secondary + * controller + * @secondary_controller: Controller whose handle_irq function that will be invoked from primary + * controller + * + * @return: 0: successfully unregistered + * Negative: failed to unregister dependent + */ +int cam_irq_controller_unregister_dependent(void *primary_controller, void *secondary_controller); + +/** + * cam_irq_controller_test_irq_line() + * @brief: Test IRQ line corresponding to IRQ controller + * + * @irq_controller: IRQ controller under test + * @fmt: Format of the debug message to be printed + * @args: Arguments for the format specifiers corresponding to @fmt + * + * @return: 0: success, negative: failure + */ +#ifdef CONFIG_CAM_TEST_IRQ_LINE +int cam_irq_controller_test_irq_line(void *irq_controller, const char *fmt, ...); +#else +static inline int cam_irq_controller_test_irq_line(void *irq_controller, const char *fmt, ...) +{ + return -EPERM; +} +#endif + +#endif /* _CAM_IRQ_CONTROLLER_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h new file mode 100644 index 0000000000..9e9397bb2b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h @@ -0,0 +1,654 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ISP_HW_MGR_INTF_H_ +#define _CAM_ISP_HW_MGR_INTF_H_ + +#include +#include +#include +#include +#include "cam_hw_mgr_intf.h" +#include "cam_packet_util.h" +#include "cam_cpas_api.h" + +/* MAX IFE instance */ +#define CAM_IFE_HW_NUM_MAX 8 +#define CAM_SFE_HW_NUM_MAX 3 +#define CAM_IFE_RDI_NUM_MAX 4 +#define CAM_SFE_RDI_NUM_MAX 5 +#define CAM_SFE_FE_RDI_NUM_MAX 3 +#define CAM_ISP_BW_CONFIG_V1 1 +#define CAM_ISP_BW_CONFIG_V2 2 +#define CAM_ISP_BW_CONFIG_V3 3 +#define CAM_TFE_HW_NUM_MAX 3 +#define CAM_TFE_RDI_NUM_MAX 3 +#define CAM_IFE_SCRATCH_NUM_MAX 2 +#define CAM_IFE_BUS_COMP_NUM_MAX 18 +#define CAM_SFE_BUS_COMP_NUM_MAX 12 +#define CAM_TFE_BW_LIMITER_CONFIG_V1 1 +#define CAM_TFE_BUS_COMP_NUM_MAX 18 + +/* maximum context numbers for TFE */ +#define CAM_TFE_CTX_MAX 4 + +/* maximum context numbers for IFE */ +#define CAM_IFE_CTX_MAX 8 + +/* Appliacble vote paths for dual ife, based on no. of UAPI definitions */ +#define CAM_ISP_MAX_PER_PATH_VOTES 40 + +/* Appliacble number of per path exp info including csid and downstream hw */ +#define CAM_ISP_MAX_PER_PATH_EXP_INFO 40 + +/* Output params for acquire from hw_mgr to ctx */ +#define CAM_IFE_CTX_CUSTOM_EN BIT(0) +#define CAM_IFE_CTX_FRAME_HEADER_EN BIT(1) +#define CAM_IFE_CTX_CONSUME_ADDR_EN BIT(2) +#define CAM_IFE_CTX_APPLY_DEFAULT_CFG BIT(3) +#define CAM_IFE_CTX_SFE_EN BIT(4) +#define CAM_IFE_CTX_AEB_EN BIT(5) +#define CAM_IFE_CTX_DYNAMIC_SWITCH_EN BIT(6) + +/* + * Maximum configuration entry size - This is based on the + * worst case DUAL IFE use case plus some margin. + */ +#define CAM_ISP_CTX_CFG_MAX 30 + +/* + * Maximum configuration entry size including SFE & CSID - This is based on the + * worst case DUAL IFE/SFE use case plus some margin. + */ +#define CAM_ISP_SFE_CTX_CFG_MAX 40 + +/* Maximum number of channels/contexts for FCG modules */ +#define CAM_ISP_MAX_FCG_CH_CTXS 3 +#define CAM_ISP_IFE_MAX_FCG_CH_CTXS 3 +#define CAM_ISP_SFE_MAX_FCG_CHANNELS 2 + +/** + * enum cam_isp_hw_event_type - Collection of the ISP hardware events + */ +enum cam_isp_hw_event_type { + CAM_ISP_HW_EVENT_ERROR, + CAM_ISP_HW_EVENT_SOF, + CAM_ISP_HW_EVENT_REG_UPDATE, + CAM_ISP_HW_EVENT_EPOCH, + CAM_ISP_HW_EVENT_EOF, + CAM_ISP_HW_EVENT_DONE, + CAM_ISP_HW_SECONDARY_EVENT, + CAM_ISP_HW_EVENT_MAX +}; + +/** + * cam_isp_hw_evt_type_to_string() - convert cam_isp_hw_event_type to string for printing logs + */ +static inline const char *cam_isp_hw_evt_type_to_string( + enum cam_isp_hw_event_type evt_type) +{ + switch (evt_type) { + case CAM_ISP_HW_EVENT_ERROR: + return "ERROR"; + case CAM_ISP_HW_EVENT_SOF: + return "SOF"; + case CAM_ISP_HW_EVENT_REG_UPDATE: + return "REG_UPDATE"; + case CAM_ISP_HW_EVENT_EPOCH: + return "EPOCH"; + case CAM_ISP_HW_EVENT_EOF: + return "EOF"; + case CAM_ISP_HW_EVENT_DONE: + return "BUF_DONE"; + default: + return "INVALID_EVT"; + } +} + +/** + * enum cam_isp_hw_secondary-event_type - Collection of the ISP hardware secondary events + */ +enum cam_isp_hw_secondary_event_type { + CAM_ISP_HW_SEC_EVENT_SOF, + CAM_ISP_HW_SEC_EVENT_EPOCH, + CAM_ISP_HW_SEC_EVENT_OUT_OF_SYNC_FRAME_DROP, + CAM_ISP_HW_SEC_EVENT_EVENT_MAX, +}; + +/** + * enum cam_isp_hw_err_type - Collection of the ISP error types for + * ISP hardware event CAM_ISP_HW_EVENT_ERROR + */ +enum cam_isp_hw_err_type { + CAM_ISP_HW_ERROR_NONE = 0x00000001, + CAM_ISP_HW_ERROR_OVERFLOW = 0x00000002, + CAM_ISP_HW_ERROR_P2I_ERROR = 0x00000004, + CAM_ISP_HW_ERROR_VIOLATION = 0x00000008, + CAM_ISP_HW_ERROR_BUSIF_OVERFLOW = 0x00000010, + CAM_ISP_HW_ERROR_CSID_FATAL = 0x00000020, + CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW = 0x00000040, + CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW = 0x00000080, + CAM_ISP_HW_ERROR_CSID_FRAME_SIZE = 0x00000100, + CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW = 0x00000200, + CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED = 0x00000400, + CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA = 0x00000800, + CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR = 0x00001000, + CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME = 0x00002000, + CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP = 0x00004000, + CAM_ISP_HW_ERROR_CSID_MISSING_EOT = 0x00008000, + CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED = 0x00010000, + CAM_ISP_HW_ERROR_CSID_CAMIF_FRAME_DROP = 0x00020000, + CAM_ISP_HW_ERROR_HWPD_VIOLATION = 0x00040000, + CAM_ISP_HW_ERROR_CSID_MISSING_SOT = 0x00080000, + CAM_ISP_HW_ERROR_CSID_ILLEGAL_DT_SWITCH = 0x00100000, + CAM_ISP_HW_ERROR_DRV_VOTEUP_LATE = 0x00200000 +}; + +/** + * enum cam_isp_hw_stop_cmd - Specify the stop command type + */ +enum cam_isp_hw_stop_cmd { + CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY, + CAM_ISP_HW_STOP_IMMEDIATELY, + CAM_ISP_HW_STOP_MAX, +}; + +/** + * struct cam_isp_stop_args - hardware stop arguments + * + * @hw_stop_cmd: Hardware stop command type information + * @is_internal_stop: Stop triggered internally for reset & recovery + * @stop_only: Send stop only to hw drivers. No Deinit to be + * done. + * @standby_en: Sensor standby is enabled + * + */ +struct cam_isp_stop_args { + enum cam_isp_hw_stop_cmd hw_stop_cmd; + bool is_internal_stop; + bool stop_only; + bool standby_en; +}; + +/** + * struct cam_isp_clock_config_internal - Clock configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_rdi: Number of RDI votes + * @left_pix_hz: Pixel Clock for Left ISP + * @right_pix_hz: Pixel Clock for Right ISP, valid only if Dual + * @rdi_hz: RDI Clock. ISP clock will be max of RDI and + * PIX clocks. For a particular context which ISP + * HW the RDI is allocated to is not known to UMD. + * Hence pass the clock and let KMD decide. + */ +struct cam_isp_clock_config_internal { + uint64_t usage_type; + uint64_t num_rdi; + uint64_t left_pix_hz; + uint64_t right_pix_hz; + uint64_t rdi_hz[CAM_IFE_RDI_NUM_MAX]; +}; + +/** + * struct cam_isp_bw_config_internal_v2 - Bandwidth configuration + * + * @usage_type: ife hw index + * @num_paths: Number of data paths + * @axi_path: per path vote info + */ +struct cam_isp_bw_config_internal_v2 { + uint32_t usage_type; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_ISP_MAX_PER_PATH_VOTES]; +}; + +/** + * struct cam_isp_bw_config_internal - Internal Bandwidth configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_rdi: Number of RDI votes + * @left_pix_vote: Bandwidth vote for left ISP + * @right_pix_vote: Bandwidth vote for right ISP + * @rdi_vote: RDI bandwidth requirements + */ +struct cam_isp_bw_config_internal { + uint32_t usage_type; + uint32_t num_rdi; + struct cam_isp_bw_vote left_pix_vote; + struct cam_isp_bw_vote right_pix_vote; + struct cam_isp_bw_vote rdi_vote[CAM_IFE_RDI_NUM_MAX]; +}; + +/** + * struct cam_isp_bw_clk_config_info - Bw/Clk config info + * + * @bw_config : BW vote info for current request V1 + * @bw_config_v2: BW vote info for current request V2 + * @bw_config_valid: Flag indicating if BW vote is valid for current request + * @ife_clock_config: Clock config information for ife + * @ife_clock_config_valid: Flag indicating whether clock config is valid for + * current request for ife + * @sfe_clock_config: Clock config information for sfe + * @sfe_clock_config_valid: Flag indicating whether clock config is valid for + * current request for sfe + */ +struct cam_isp_bw_clk_config_info { + struct cam_isp_bw_config_internal bw_config; + struct cam_isp_bw_config_internal_v2 bw_config_v2; + bool bw_config_valid; + struct cam_isp_clock_config_internal ife_clock_config; + bool ife_clock_config_valid; + struct cam_isp_clock_config_internal sfe_clock_config; + bool sfe_clock_config_valid; + +}; + +/** + * struct cam_isp_predict_fcg_config_internal - Internal FCG config in a single prediction + * + * @phase_index_g: Starting index of LUT for G channel in phase + * @phase_index_r: Starting index of LUT for R channel in phase + * @phase_index_b: Starting index of LUT for B channel in phase + * @stats_index_g: Starting index of LUT for G channel in stats + * @stats_index_r: Starting index of LUT for R channel in stats + * @stats_index_b: Starting index of LUT for B channel in stats + */ +struct cam_isp_predict_fcg_config_internal { + uint32_t phase_index_g; + uint32_t phase_index_r; + uint32_t phase_index_b; + uint32_t stats_index_g; + uint32_t stats_index_r; + uint32_t stats_index_b; +}; + +/** + * struct cam_isp_ch_ctx_fcg_config_internal - Internal FCG config in a single channel or context + * + * @fcg_ch_ctx_id: Index of the channel in SFE/IFE or context in TFE + * to be configured that FCG blocks reside on. + * For example, if one wants to config FCG block + * for IFE in ctx 0, this value will be CAM_ISP_FCG_MASK_CH0 + * @fcg_enable_mask: Indicate which module will be enabled for + * FCG. For example, if one wants to config + * SFE FCG STATS module, CAM_ISP_FCG_ENABLE_STATS + * will be set in mask + * @predicted_fcg_configs: FCG config for each prediction of the channel + * in serial order + */ +struct cam_isp_ch_ctx_fcg_config_internal { + uint32_t fcg_ch_ctx_id; + uint32_t fcg_enable_mask; + struct cam_isp_predict_fcg_config_internal *predicted_fcg_configs; +}; + +/** + * struct cam_isp_fcg_config_internal - Internal FCG config for a frame + * + * @num_ch_ctx: Number of channels for FCG config for SFE/IFE or + * number of contexts for FCG config for TFE + * @num_predictions: Number of predictions for each channel + * @num_types: Number of types(STATS/PHASE) for FCG config + * @ch_ctx_fcg_configs: FCG config for each channel or context + */ +struct cam_isp_fcg_config_internal { + uint32_t num_ch_ctx; + uint32_t num_predictions; + uint32_t num_types; + struct cam_isp_ch_ctx_fcg_config_internal *ch_ctx_fcg_configs; +}; + +/** + * struct cam_isp_fcg_config_info - Track FCG config for further usage in config stage + * + * @prediction_idx: Indicate which exact prediction to be used, decided + * during trying to apply the request + * @sfe_fcg_entry_idx: Index for SFE FCG config in hw update entries + * @sfe_fcg_config: Internal storage of SFE FCG configurations + * @ife_fcg_entry_idx: Index for IFE/MC_TFE FCG config in hw update entries + * @ife_fcg_config: Internal storage of IFE/MC_TFE FCG configurations + * @use_current_cfg: Indicate whether use current configuration or replace + * the value with FCG predicted ones. + * @sfe_fcg_online: Indicate whether SFE FCG handling is online or not + * @ife_fcg_online: Indicate whether IFE/MC_TFE FCG handling is online or not + */ +struct cam_isp_fcg_config_info { + uint32_t prediction_idx; + uint32_t sfe_fcg_entry_idx; + struct cam_isp_fcg_config_internal sfe_fcg_config; + uint32_t ife_fcg_entry_idx; + struct cam_isp_fcg_config_internal ife_fcg_config; + bool use_current_cfg; + bool sfe_fcg_online; + bool ife_fcg_online; +}; + +/** + * struct cam_isp_path_exp_order_update_internal - ISP exp order update internal struct + * + * This config will contain number of processed and sensor out exposures + * applicable until updated. Also, this config contains an array + * of path to exposure order info map. + * + * @version: Version info + * @num_process_exp: Number of processed exposures + * @num_out_exp: Number of sensor output exposures + * @num_path_exp_info: Number of per path exp info + * @num_valid_params: Number of valid params being used + * @valid_param_mask: Indicate the exact params being used + * @params: Params for future change + * @exp_info: Exposure info for each path + */ +struct cam_isp_path_exp_order_update_internal { + __u32 version; + __u32 num_process_exp; + __u32 num_sensor_out_exp; + __u32 num_path_exp_info; + __u32 num_valid_params; + __u32 valid_params_mask; + __u32 params[4]; + struct cam_isp_per_path_exp_info exp_info[CAM_ISP_MAX_PER_PATH_EXP_INFO]; +}; + +/** + * struct cam_isp_prepare_hw_update_data - hw prepare data + * + * @isp_mgr_ctx: ISP HW manager Context for current request + * @packet_opcode_type: Packet header opcode in the packet header + * this opcode defines, packet is init packet or + * update packet + * @frame_header_cpu_addr: Frame header cpu addr + * @frame_header_iova: Frame header iova + * @frame_header_res_id: Out port res_id corresponding to frame header + * @bw_clk_config: BW and clock config info + * @isp_drv_config: DRV config info + * @drv_config_valid: Flag indicating if DRV config is valid for current request + * @isp_irq_comp_cfg: IRQ comp configuration for MC-based TFEs + * @irq_comp_cfg_valid: Flag indicating if IRQ comp cfg is valid for current request + * @isp_exp_order_update: Exposure order update for ISP paths + * @exp_order_update_valid: Flag indicating if exposure order update is valid for current request + * @reg_dump_buf_desc: cmd buffer descriptors for reg dump + * @num_reg_dump_buf: Count of descriptors in reg_dump_buf_desc + * @packet: CSL packet from user mode driver + * @mup_val: MUP value if configured + * @num_exp: Num of exposures + * @mup_en: Flag if dynamic sensor switch is enabled + * @force_disable_drv: Force to disable drv + * @fcg_info: Track FCG config for further usage in config stage + * + */ +struct cam_isp_prepare_hw_update_data { + void *isp_mgr_ctx; + uint32_t packet_opcode_type; + uint32_t *frame_header_cpu_addr; + uint64_t frame_header_iova; + uint32_t frame_header_res_id; + struct cam_isp_bw_clk_config_info bw_clk_config; + struct cam_isp_drv_config isp_drv_config; + bool drv_config_valid; + struct cam_isp_irq_comp_cfg isp_irq_comp_cfg; + bool irq_comp_cfg_valid; + struct cam_isp_path_exp_order_update_internal isp_exp_order_update; + bool exp_order_update_valid; + struct cam_cmd_buf_desc reg_dump_buf_desc[ + CAM_REG_DUMP_MAX_BUF_ENTRIES]; + uint32_t num_reg_dump_buf; + struct cam_packet *packet; + struct cam_kmd_buf_info kmd_cmd_buff_info; + uint32_t mup_val; + uint32_t num_exp; + bool mup_en; + bool force_disable_drv; + struct cam_isp_fcg_config_info fcg_info; +}; + + +/** + * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF + * + * @timestamp : Time stamp for the sof event + * @boot_time : Boot time stamp for the sof event + * + */ +struct cam_isp_hw_sof_event_data { + uint64_t timestamp; + uint64_t boot_time; +}; + +/** + * struct cam_isp_hw_reg_update_event_data - Event payload for + * CAM_HW_EVENT_REG_UPDATE + * + * @timestamp: Time stamp for the reg update event + * + */ +struct cam_isp_hw_reg_update_event_data { + uint64_t timestamp; +}; + +/** + * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH + * + * @timestamp: Time stamp for the epoch event + * @frame_id_meta: Frame id value corresponding to this frame + */ +struct cam_isp_hw_epoch_event_data { + uint64_t timestamp; + uint32_t frame_id_meta; +}; + +/** + * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE + * + * @hw_type: Hw type sending the event + * @resource_handle: Resource handle + * @comp_group_id: Bus comp group id + * @last_consumed_addr: Last consumed addr + * @timestamp: Timestamp for the buf done event + * @is_early_done: Indicates if its an early done event + * + */ +struct cam_isp_hw_done_event_data { + uint32_t hw_type; + uint32_t resource_handle; + uint32_t comp_group_id; + uint32_t last_consumed_addr; + uint64_t timestamp; + bool is_early_done; +}; + +/** + * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF + * + * @timestamp: Timestamp for the eof event + * + */ +struct cam_isp_hw_eof_event_data { + uint64_t timestamp; +}; + +/** + * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR + * + * @error_type: HW error type for the error event + * @timestamp: Timestamp for the error event + * @recovery_enabled: Identifies if the context needs to recover & reapply + * this request + * @enable_req_dump: Enable request dump on HW errors + * @try_internal_recovery: Enable internal recovery on HW errors + */ +struct cam_isp_hw_error_event_data { + uint32_t error_type; + uint64_t timestamp; + bool recovery_enabled; + bool enable_req_dump; + bool try_internal_recovery; +}; + +/** + * struct cam_isp_hw_secondary_event_data - Event payload for secondary events + * + * @evt_type : Event notified as secondary + * + */ +struct cam_isp_hw_secondary_event_data { + enum cam_isp_hw_secondary_event_type evt_type; +}; + +/* enum cam_isp_hw_mgr_command - Hardware manager command type */ +enum cam_isp_hw_mgr_command { + CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT, + CAM_ISP_HW_MGR_CMD_PAUSE_HW, + CAM_ISP_HW_MGR_CMD_RESUME_HW, + CAM_ISP_HW_MGR_CMD_SOF_DEBUG, + CAM_ISP_HW_MGR_CMD_CTX_TYPE, + CAM_ISP_HW_MGR_GET_PACKET_OPCODE, + CAM_ISP_HW_MGR_GET_LAST_CDM_DONE, + CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG, + CAM_ISP_HW_MGR_GET_SOF_TS, + CAM_ISP_HW_MGR_DUMP_STREAM_INFO, + CAM_ISP_HW_MGR_GET_BUS_COMP_GROUP, + CAM_ISP_HW_MGR_CMD_UPDATE_CLOCK, + CAM_ISP_HW_MGR_GET_LAST_CONSUMED_ADDR, + CAM_ISP_HW_MGR_SET_DRV_INFO, + CAM_ISP_HW_MGR_CMD_MAX, +}; + +enum cam_isp_ctx_type { + CAM_ISP_CTX_FS2 = 1, + CAM_ISP_CTX_RDI, + CAM_ISP_CTX_PIX, + CAM_ISP_CTX_OFFLINE, + CAM_ISP_CTX_MAX, +}; + +/** + * struct cam_isp_hw_drv_info - DRV info + * + * @req_id: Request id + * @path_idle_en: Mask for paths to be considered for consolidated IDLE signal. + * When paths matching the mask go idle, BW is voted down. + * @frame_duration: Frame duration for a request + * @blanking_duration: Vertical blanking duration for a request, and it is representing + * the blanking durations before the frame for this request. + * @drv_blanking_threshold: DRV blanking threshold + * @timeout_val: DRV timeout value + * @update_drv: This to tell DRV needs to be updated or not + * @drv_en: DRV is enabled or not + * @is_blob_config_valid: DV blob is valid or not + * + */ +struct cam_isp_hw_drv_info { + uint64_t req_id; + uint32_t path_idle_en; + uint64_t frame_duration; + uint64_t blanking_duration; + uint64_t drv_blanking_threshold; + uint32_t timeout_val; + bool update_drv; + bool drv_en; + bool is_blob_config_valid; +}; + +/** + * struct cam_isp_hw_per_req_info - per request info + * + * @drv_info: DRV config related information + * @mup_en: is mup enabled or not + * + */ +struct cam_isp_hw_per_req_info { + struct cam_isp_hw_drv_info drv_info; + bool mup_en; +}; + + +/** + * struct cam_isp_hw_cmd_args - Payload for hw manager command + * + * @cmd_type: HW command type + * @cmd_data: Command data + * @sof_irq_enable: To debug if SOF irq is enabled + * @packet_op_code: Packet opcode + * @last_cdm_done: Last cdm done request + * @ctx_info: Gives info about context(RDI, PIX, bubble recovery) + * @sof_ts: SOF timestamps (current, boot and previous) + * @default_cfg_params: The params for default config + * @drv_info: DRV info for corresponding req + * @cdm_done_ts: CDM callback done timestamp + */ +struct cam_isp_hw_cmd_args { + uint32_t cmd_type; + void *cmd_data; + union { + uint32_t sof_irq_enable; + uint32_t packet_op_code; + uint64_t last_cdm_done; + struct { + uint64_t type; + bool bubble_recover_dis; + } ctx_info; + struct { + uint64_t curr; + uint64_t prev; + uint64_t boot; + } sof_ts; + struct { + int64_t last_applied_max_pd_req; + bool force_disable_drv; + } default_cfg_params; + struct cam_isp_hw_drv_info drv_info; + } u; + struct timespec64 cdm_done_ts; +}; + +/** + * struct cam_isp_start_args - isp hardware start arguments + * + * @config_args: Hardware configuration commands. + * @is_internal_start: Start triggered internally for reset & recovery + * @start_only Send start only to hw drivers. No init to + * be done. + * + */ +struct cam_isp_start_args { + struct cam_hw_config_args hw_config; + bool is_internal_start; + bool start_only; +}; + +/** + * struct cam_isp_lcr_rdi_cfg_args - isp hardware start arguments + * + * @rdi_lcr_cfg: RDI LCR cfg received from User space. + * @ife_src_res_id: IFE SRC res id to be used in sfe context + * @is_init: Flag to indicate if init packet. + * + */ +struct cam_isp_lcr_rdi_cfg_args { + struct cam_isp_lcr_rdi_config *rdi_lcr_cfg; + uint32_t ife_src_res_id; + bool is_init; +}; + +/** + * cam_isp_hw_mgr_init() + * + * @brief: Initialization function for the ISP hardware manager + * + * @device_name_str: Device name string + * @hw_mgr: Input/output structure for the ISP hardware manager + * initialization + * @iommu_hdl: Iommu handle to be returned + * @isp_device_type: ISP device type + */ +int cam_isp_hw_mgr_init(const char *device_name_str, + struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl, uint32_t isp_device_type); + +void cam_isp_hw_mgr_deinit(const char *device_name_str); + +#endif /* __CAM_ISP_HW_MGR_INTF_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid1080.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid1080.h new file mode 100644 index 0000000000..19495c5356 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid1080.h @@ -0,0 +1,2093 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_1080_H_ +#define _CAM_IFE_CSID_1080_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +#define CAM_CSID_VERSION_V1080 0x100080000 + +static const struct cam_ife_csid_irq_desc cam_ife_csid_1080_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "DPHY_PH_ECC_SEC", + }, + { + .bitmask = BIT(9), + .desc = "SENSOR_MODE_ID_CHANGE", + }, + {0}, + {0}, + { + .bitmask = BIT(12), + .desc = + "DL0_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(13), + .desc = + "DL1_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(14), + .desc = + "DL2_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(15), + .desc = + "DL3_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(16), + .desc = + "DL0_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(17), + .desc = + "DL1_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(18), + .desc = + "DL2_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(19), + .desc = + "DL3_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(20), + .desc = + "DL0_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 0 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(21), + .desc = + "DL1_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 1 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(22), + .desc = + "DL2_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 2 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(23), + .desc = + "DL3_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 3 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(24), + .desc = + "CPHY_PH_CRC, Sensor: All CPHY packet headers received are corrupted - Check phy/sensor config", + }, + { + .bitmask = BIT(25), + .desc = + "PAYLOAD_CRC, Sensor: The calculated CRC of a long packet does not match the transmitted (expected) CRC, possible corruption - Check phy/sensor config", + }, + { + .bitmask = BIT(26), + .desc = + "DPHY_PH_ECC_DED, Sensor: A short or long packet is corrupted and cannot be recovered - Check phy/sensor config", + }, + { + .bitmask = BIT(27), + .desc = + "MMAPPED_VC_DT, SW: A long packet has a VC_DT combination that is configured for more than one IPP or RDIs", + }, + { + .bitmask = BIT(28), + .desc = + "UNMAPPED_VC_DT, Sensor: A long packet has a VC_DT combination that is not configured for IPP or RDIs", + }, + { + .bitmask = BIT(29), + .desc = + "STREAM_UNDERFLOW, Sensor: Long packet payload size is less than payload header size, resulting a corrupted frame - Perform PHY Tuning/Check sensor limitations", + }, + {0}, + { + .bitmask = BIT(31), + .desc = "CSI2_RX_IRQ_STATUS_2", + }, + }, + { + { + .bitmask = BIT(0), + .desc = + "LONG_PKT, Debug: The header of the first long pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(1), + .desc = + "SHORT_PKT, Debug: The header of the first short pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(2), + .desc = + "CPHY_PKT_HDR, Debug: The header of the first cphy pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(3), + .desc = "Illegal programming for next frame ID config", + }, + }, +}; + +static const uint32_t cam_ife_csid_1080_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_1080_rx_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_1080_rx_irq_desc[1]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_1080_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .irq_name = "ILLEGAL_PROGRAMMING", + .desc = "SW: Illegal programming sequence", + .debug = "Check the following possiblities:", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + {0}, + { + .bitmask = BIT(2), + .irq_name = "INFO_DATA_FIFO_FULL", + }, + { + .bitmask = BIT(3), + .irq_name = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .irq_name = "CAMIF_SOF", + }, + {0}, + {0}, + { + .bitmask = BIT(7), + .err_type = CAM_ISP_HW_ERROR_CSID_ILLEGAL_DT_SWITCH, + .irq_name = "ILLEGAL_DT_SWITCH", + .desc = "SW: Change in DT without MUP", + .debug = "Check sensor configuration", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + {0}, + { + .bitmask = BIT(9), + .irq_name = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .irq_name = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .irq_name = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .irq_name = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_PIX_COUNT", + .desc = "SW: Mismatch in expected versus received number of pixels per line", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_LINE_COUNT", + .desc = "SW: Mismatch in expected versus received number of lines", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .irq_name = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(16), + .irq_name = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(17), + .irq_name = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .err_type = CAM_ISP_HW_ERROR_CSID_CAMIF_FRAME_DROP, + .irq_name = "CAMIF_FRAME_DROP", + .desc = + "Sensor: The pre CAMIF frame drop would drop a frame in case the new frame starts prior to the end of the previous frame", + .debug = "Slower downstream processing or faster frame generation from sensor", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .irq_name = "OVERFLOW_RECOVERY", + .desc = "Detected by the overflow recovery block", + .debug = "Backpressure downstream", + }, + { + .bitmask = BIT(20), + .irq_name = "ERROR_REC_CCIF_VIOLATION", + .desc = "Output CCIF has a violation with respect to frame timing", + }, + { + .bitmask = BIT(21), + .irq_name = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .irq_name = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .irq_name = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .irq_name = "ILLEGAL_BATCH_ID", + .desc = "SW: Decoded frame ID does not match with any of the programmed batch IDs", + .debug = "Check frame ID and all available batch IDs", + }, + { + .bitmask = BIT(25), + .irq_name = "BATCH_END_MISSING_VIOLATION", + .desc = "SW: Input number of frames is not a multiple of the batch size", + .debug = "Check the configured pattern/period for batching", + }, + { + .bitmask = BIT(26), + .irq_name = "UNBOUNDED_FRAME", + .desc = "Sensor: Frame end or frame start is missing", + .debug = "Check the settle count in sensor driver XML", + }, + { + .bitmask = BIT(27), + .irq_name = "ERROR_SER_INVALID_CTXT", + .desc = "Indicates invalid context on CCIF", + + }, + { + .bitmask = BIT(28), + .irq_name = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .desc = + "Sensor/SW: The programmed MUP is out of sync with the VC of the incoming frame", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .irq_name = "CCIF_VIOLATION", + .desc = + "The output CCIF from the serializer has a violation with respect to frame timing", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_1080_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = + "Sensor/SW: Minimum VBI period between dynamically switching between two sensor modes was either violated or the downstream pipe was not active when the switch was made", + }, + }, + { + { + .bitmask = BIT(2), + .err_name = "ERROR_NO_VOTE_DN", + .desc = + "DRV: vote_down is never generated for the same frame and resource is never relinquished", + .debug = "Check vote up generated time", + }, + { + .bitmask = BIT(3), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "DRV: vote_up is generated after SOF", + .debug = "Check the vote up timer value", + .err_handler = cam_ife_csid_hw_ver2_drv_err_handler, + }, + { + .bitmask = BIT(4), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = + "System/SW: Multiple RDIs configured to access the same shared line buffer, more of a SW issue that led to this programming", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + { + .bitmask = BIT(5), + .err_name = "ERROR_SENSOR_HBI", + .desc = "Sensor: Sensor HBI is less than expected HBI", + .debug = "Check sensor configuration", + }, + }, +}; + +static const uint32_t cam_ife_csid_1080_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_1080_top_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_1080_top_irq_desc[1]), +}; + +static const char *cam_ife_csid_1080_debug_vec_desc[][32] = { + { + "ERROR_UNBOUNDED_FRAME_RDI1", + "ERROR_SER_INVALID_CTXT_RDI1", + "ERROR_SER_CCIF_VIOLATION_RDI1", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI1", + "ERROR_REC_FRAME_DROP_RDI1", + "ERROR_REC_OVERFLOW_RDI0", + "ERROR_CAMIF_CCIF_VIOLATION_RDI0", + "ERROR_ILLEGAL_BATCH_ID_RDI0", + "ERROR_UNBOUNDED_FRAME_RDI0", + "ERROR_SER_INVALID_CTXT_RDI0", + "ERROR_SER_CCIF_VIOLATION_RDI0", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI0", + "ERROR_REC_FRAME_DROP_RDI0", + "ERROR_REC_OVERFLOW_PPP", + "ERROR_CAMIF_CCIF_VIOLATION_PPP", + "ERROR_ILLEGAL_BATCH_ID_PPP", + "ERROR_UNBOUNDED_FRAME_PPP", + "ERROR_SER_INVALID_CTXT_PPP", + "ERROR_SER_CCIF_VIOLATION_PPP", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_PPP", + "ERROR_REC_FRAME_DROP_PPP", + "ERROR_DL0_FIFO_OVERFLOW", + "ERROR_DL1_FIFO_OVERFLOW", + "ERROR_DL2_FIFO_OVERFLOW", + "ERROR_DL3_FIFO_OVERFLOW", + "ERROR_CPHY_PH_CRC", + "ERROR_DPHY_PH_ECC_DED", + "ERROR_STREAM_UNDERFLOW", + "ERROR_NO_VOTE_DN", + "ERROR_VOTE_UP_LATE", + "ERROR_RDI_LINE_BUFFER_CONFLICT", + "ERROR_SENSOR_HBI" + }, + { + "ERROR_UNBOUNDED_FRAME_IPP0", + "ERROR_SER_INVALID_CTXT_IPP0", + "ERROR_SER_CCIF_VIOLATION_IPP0", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_IPP0", + "ERROR_REC_FRAME_DROP_IPP0", + "ERROR_REC_OVERFLOW_RDI4", + "ERROR_CAMIF_CCIF_VIOLATION_RDI4", + "ERROR_ILLEGAL_BATCH_ID_RDI4", + "ERROR_UNBOUNDED_FRAME_RDI4", + "ERROR_SER_INVALID_CTXT_RDI4", + "ERROR_SER_CCIF_VIOLATION_RDI4", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI4", + "ERROR_REC_FRAME_DROP_RDI4", + "ERROR_REC_OVERFLOW_RDI3", + "ERROR_CAMIF_CCIF_VIOLATION_RDI3", + "ERROR_ILLEGAL_BATCH_ID_RDI3", + "ERROR_UNBOUNDED_FRAME_RDI3", + "ERROR_SER_INVALID_CTXT_RDI3", + "ERROR_SER_CCIF_VIOLATION_RDI3", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI3", + "ERROR_REC_FRAME_DROP_RDI3", + "ERROR_REC_OVERFLOW_RDI2", + "ERROR_CAMIF_CCIF_VIOLATION_RDI2", + "ERROR_ILLEGAL_BATCH_ID_RDI2", + "ERROR_UNBOUNDED_FRAME_RDI2", + "ERROR_SER_INVALID_CTXT_RDI2", + "ERROR_SER_CCIF_VIOLATION_RDI2", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI2", + "ERROR_REC_FRAME_DROP_RDI2", + "ERROR_REC_OVERFLOW_RDI1", + "ERROR_CAMIF_CCIF_VIOLATION_RDI1", + "ERROR_ILLEGAL_BATCH_ID_RDI1" + }, + { + "ERROR_REC_OVERFLOW_IPP0", + "ERROR_CAMIF_CCIF_VIOLATION_IPP0", + "ERROR_ILLEGAL_BATCH_ID_IPP0", + "ERROR_REC_OVERFLOW_IPP2", + "ERROR_CAMIF_CCIF_VIOLATION_IPP2", + "ERROR_ILLEGAL_BATCH_ID_IPP2", + "ERROR_UNBOUNDED_FRAME_IPP2", + "ERROR_SER_INVALID_CTXT_IPP2", + "ERROR_SER_CCIF_VIOLATION_IPP2", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_IPP2", + "ERROR_REC_FRAME_DROP_IPP2", + "ERROR_REC_OVERFLOW_IPP1", + "ERROR_CAMIF_CCIF_VIOLATION_IPP1", + "ERROR_ILLEGAL_BATCH_ID_IPP1", + "ERROR_UNBOUNDED_FRAME_IPP1", + "ERROR_SER_INVALID_CTXT_IPP1", + "ERROR_SER_CCIF_VIOLATION_IPP1", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_IPP1", + "ERROR_REC_FRAME_DROP_IPP1" + } +}; + +static struct cam_irq_register_set cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_MAX] = { + /* Top_1 */ + { + .mask_reg_offset = 0x00000184, + .clear_reg_offset = 0x00000188, + .status_reg_offset = 0x00000180, + .set_reg_offset = 0x0000018C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + .force_rd_mask = BIT(31), /* force read due to hw errata */ + }, + /* RX_1 */ + { + .mask_reg_offset = 0x000001B4, + .clear_reg_offset = 0x000001B8, + .status_reg_offset = 0x000001B0, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x00000228, + .clear_reg_offset = 0x0000022C, + .status_reg_offset = 0x00000224, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000238, + .clear_reg_offset = 0x0000023C, + .status_reg_offset = 0x00000234, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000248, + .clear_reg_offset = 0x0000024C, + .status_reg_offset = 0x00000244, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000258, + .clear_reg_offset = 0x0000025C, + .status_reg_offset = 0x00000254, + }, + /* RDI4 */ + { + .mask_reg_offset = 0x00000268, + .clear_reg_offset = 0x0000026C, + .status_reg_offset = 0x00000264, + }, + /* IPP_0 */ + { + .mask_reg_offset = 0x000001E8, + .clear_reg_offset = 0x000001EC, + .status_reg_offset = 0x000001E4, + }, + /* PPP */ + { + .mask_reg_offset = 0x000001D4, + .clear_reg_offset = 0x000001D8, + .status_reg_offset = 0x000001D0, + }, + /* UDI_0 */ + {0}, + /* UDI_1 */ + {0}, + /* UDI_2 */ + {0}, + + /* Top_2 */ + { + .mask_reg_offset = 0x00000194, + .clear_reg_offset = 0x00000198, + .status_reg_offset = 0x00000190, + .set_reg_offset = 0x0000019C, + }, + /* RX_2 */ + { + .mask_reg_offset = 0x000001C4, + .clear_reg_offset = 0x000001C8, + .status_reg_offset = 0x000001C0, + }, + /* IPP_1 */ + { + .mask_reg_offset = 0x000001F8, + .clear_reg_offset = 0x000001FC, + .status_reg_offset = 0x000001F4, + }, + /* IPP_2 */ + { + .mask_reg_offset = 0x00000208, + .clear_reg_offset = 0x0000020C, + .status_reg_offset = 0x00000204, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_1080_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000110, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_1080_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_MAX] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_4], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_PPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + /* UDI_0 */ + {0}, + /* UDI_1 */ + {0}, + /* UDI_2 */ + {0}, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + +}; + +static struct cam_irq_register_set cam_ife_csid_1080_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x000001A4, + .clear_reg_offset = 0x000001A8, + .status_reg_offset = 0x000001A0, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_1080_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_1080_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_ipp_reg_info = { + .irq_status_addr = 0x01E4, + .irq_mask_addr = 0x01E8, + .irq_clear_addr = 0x01EC, + .irq_set_addr = 0x01F0, + .cfg0_addr = 0x0C80, + .ctrl_addr = 0x0C88, + .debug_clr_cmd_addr = 0x0C8C, + .multi_vcdt_cfg0_addr = 0x0C90, + .multi_vcdt_cfg1_addr = 0x0C84, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0C94, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x0C98, + .debug_camif_1_addr = 0x0C9C, + .debug_camif_0_addr = 0x0CA0, + .frm_drop_pattern_addr = 0x0CB8, + .frm_drop_period_addr = 0x0CBC, + .irq_subsample_pattern_addr = 0x0CC0, + .irq_subsample_period_addr = 0x0CC4, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x0CA4, + .debug_misr_val0_addr = 0x0CA8, + .debug_misr_val1_addr = 0x0CAC, + .debug_misr_val2_addr = 0x0CB0, + .debug_misr_val3_addr = 0x0CB4, + .format_measure_cfg0_addr = 0x0CC8, + .format_measure_cfg1_addr = 0x0CCC, + .format_measure_cfg2_addr = 0x0CD0, + .format_measure0_addr = 0x0CD4, + .format_measure1_addr = 0x0CD8, + .format_measure2_addr = 0x0CDC, + .timestamp_curr0_sof_addr = 0x0CE0, + .timestamp_curr1_sof_addr = 0x0CE4, + .timestamp_perv0_sof_addr = 0x0CE8, + .timestamp_perv1_sof_addr = 0x0CEC, + .timestamp_curr0_eof_addr = 0x0CF0, + .timestamp_curr1_eof_addr = 0x0CF4, + .timestamp_perv0_eof_addr = 0x0CF8, + .timestamp_perv1_eof_addr = 0x0CFC, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x0D00, + .batch_id_cfg1_addr = 0x0D04, + .batch_period_cfg_addr = 0x0D08, + .batch_stream_id_cfg_addr = 0x0D0C, + .epoch0_cfg_batch_id0_addr = 0x0D10, + .epoch1_cfg_batch_id0_addr = 0x0D14, + .epoch0_cfg_batch_id1_addr = 0x0D18, + .epoch1_cfg_batch_id1_addr = 0x0D1C, + .epoch0_cfg_batch_id2_addr = 0x0D20, + .epoch1_cfg_batch_id2_addr = 0x0D24, + .epoch0_cfg_batch_id3_addr = 0x0D28, + .epoch1_cfg_batch_id3_addr = 0x0D2C, + .epoch0_cfg_batch_id4_addr = 0x0D30, + .epoch1_cfg_batch_id4_addr = 0x0D34, + .epoch0_cfg_batch_id5_addr = 0x0D38, + .epoch1_cfg_batch_id5_addr = 0x0D3C, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x0D40, + .path_frame_id = 0x0D44, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 0, + .early_eof_en_shift_val = 16, + .bin_h_en_shift_val = 20, + .bin_v_en_shift_val = 21, + .bin_en_shift_val = 18, + .bin_qcfa_en_shift_val = 19, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 0, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .bin_pd_en_shift_val = 0, + .bin_pd_blk_w_shift_val = 8, + .bin_pd_blk_h_shift_val = 24, + .bin_pd_detect_x_offset_shift_val = 0, + .bin_pd_detect_x_end_shift_val = 16, + .bin_pd_detect_y_offset_shift_val = 0, + .bin_pd_detect_y_end_shift_val = 16, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x1, + .aup_mask = 0x1, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .disable_sof_retime_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_ipp_1_reg_info = { + .irq_status_addr = 0x01F4, + .irq_mask_addr = 0x01F8, + .irq_clear_addr = 0x01FC, + .irq_set_addr = 0x0200, + .cfg0_addr = 0x1080, + .ctrl_addr = 0x1088, + .debug_clr_cmd_addr = 0x108C, + .multi_vcdt_cfg0_addr = 0x1090, + .multi_vcdt_cfg1_addr = 0x1084, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x1094, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x1098, + .debug_camif_1_addr = 0x109C, + .debug_camif_0_addr = 0x10A0, + .frm_drop_pattern_addr = 0x10B8, + .frm_drop_period_addr = 0x10BC, + .irq_subsample_pattern_addr = 0x10C0, + .irq_subsample_period_addr = 0x10C4, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x10A4, + .debug_misr_val0_addr = 0x10A8, + .debug_misr_val1_addr = 0x10AC, + .debug_misr_val2_addr = 0x10B0, + .debug_misr_val3_addr = 0x10B4, + .format_measure_cfg0_addr = 0x10C8, + .format_measure_cfg1_addr = 0x10CC, + .format_measure_cfg2_addr = 0x10D0, + .format_measure0_addr = 0x10D4, + .format_measure1_addr = 0x10D8, + .format_measure2_addr = 0x10DC, + .timestamp_curr0_sof_addr = 0x10E0, + .timestamp_curr1_sof_addr = 0x10E4, + .timestamp_perv0_sof_addr = 0x10E8, + .timestamp_perv1_sof_addr = 0x10EC, + .timestamp_curr0_eof_addr = 0x10F0, + .timestamp_curr1_eof_addr = 0x10F4, + .timestamp_perv0_eof_addr = 0x10F8, + .timestamp_perv1_eof_addr = 0x10FC, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x1100, + .batch_id_cfg1_addr = 0x1104, + .batch_period_cfg_addr = 0x1108, + .batch_stream_id_cfg_addr = 0x110C, + .epoch0_cfg_batch_id0_addr = 0x1110, + .epoch1_cfg_batch_id0_addr = 0x1114, + .epoch0_cfg_batch_id1_addr = 0x1118, + .epoch1_cfg_batch_id1_addr = 0x111C, + .epoch0_cfg_batch_id2_addr = 0x1120, + .epoch1_cfg_batch_id2_addr = 0x1124, + .epoch0_cfg_batch_id3_addr = 0x1128, + .epoch1_cfg_batch_id3_addr = 0x112C, + .epoch0_cfg_batch_id4_addr = 0x1130, + .epoch1_cfg_batch_id4_addr = 0x1134, + .epoch0_cfg_batch_id5_addr = 0x1138, + .epoch1_cfg_batch_id5_addr = 0x113C, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1140, + .path_frame_id = 0x1144, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .timestamp_en_shift_val = 6, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x2, + .aup_mask = 0x2, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x200,}, + .disable_sof_retime_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_ipp_2_reg_info = { + .irq_status_addr = 0x0204, + .irq_mask_addr = 0x0208, + .irq_clear_addr = 0x020C, + .irq_set_addr = 0x0210, + .cfg0_addr = 0x1480, + .ctrl_addr = 0x1488, + .debug_clr_cmd_addr = 0x148C, + .multi_vcdt_cfg0_addr = 0x1490, + .multi_vcdt_cfg1_addr = 0x1484, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x1494, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x1498, + .debug_camif_1_addr = 0x149C, + .debug_camif_0_addr = 0x14A0, + .frm_drop_pattern_addr = 0x14B8, + .frm_drop_period_addr = 0x14BC, + .irq_subsample_pattern_addr = 0x14C0, + .irq_subsample_period_addr = 0x14C4, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x14A4, + .debug_misr_val0_addr = 0x14A8, + .debug_misr_val1_addr = 0x14AC, + .debug_misr_val2_addr = 0x14B0, + .debug_misr_val3_addr = 0x14B4, + .format_measure_cfg0_addr = 0x14C8, + .format_measure_cfg1_addr = 0x14CC, + .format_measure_cfg2_addr = 0x14D0, + .format_measure0_addr = 0x14D4, + .format_measure1_addr = 0x14D8, + .format_measure2_addr = 0x14DC, + .timestamp_curr0_sof_addr = 0x14E0, + .timestamp_curr1_sof_addr = 0x14E4, + .timestamp_perv0_sof_addr = 0x14E8, + .timestamp_perv1_sof_addr = 0x14EC, + .timestamp_curr0_eof_addr = 0x14F0, + .timestamp_curr1_eof_addr = 0x14F4, + .timestamp_perv0_eof_addr = 0x14F8, + .timestamp_perv1_eof_addr = 0x14FC, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x1500, + .batch_id_cfg1_addr = 0x1504, + .batch_period_cfg_addr = 0x1508, + .batch_stream_id_cfg_addr = 0x150C, + .epoch0_cfg_batch_id0_addr = 0x1510, + .epoch1_cfg_batch_id0_addr = 0x1514, + .epoch0_cfg_batch_id1_addr = 0x1518, + .epoch1_cfg_batch_id1_addr = 0x151C, + .epoch0_cfg_batch_id2_addr = 0x1520, + .epoch1_cfg_batch_id2_addr = 0x1524, + .epoch0_cfg_batch_id3_addr = 0x1528, + .epoch1_cfg_batch_id3_addr = 0x152C, + .epoch0_cfg_batch_id4_addr = 0x1530, + .epoch1_cfg_batch_id4_addr = 0x1534, + .epoch0_cfg_batch_id5_addr = 0x1538, + .epoch1_cfg_batch_id5_addr = 0x153C, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1540, + .path_frame_id = 0x1544, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .timestamp_en_shift_val = 6, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x4, + .aup_mask = 0x4, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x400,}, + .disable_sof_retime_default = true, +}; + + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_ppp_reg_info = { + .irq_status_addr = 0x01D0, + .irq_mask_addr = 0x01D4, + .irq_clear_addr = 0x01D8, + .irq_set_addr = 0x01DC, + .cfg0_addr = 0x3080, + .ctrl_addr = 0x3088, + .debug_clr_cmd_addr = 0x308C, + .multi_vcdt_cfg0_addr = 0x3090, + .multi_vcdt_cfg1_addr = 0x3084, + .cfg1_addr = 0x5080, + .bin_cfg0_addr = 0x3164, + .pix_store_cfg0_addr = 0x5084, + .sparse_pd_extractor_cfg_addr = 0x5088, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x3094, + .epoch_irq_cfg_addr = 0x3154, + .epoch0_subsample_ptrn_addr = 0x3158, + .epoch1_subsample_ptrn_addr = 0x315C, + .debug_rup_aup_status = 0x3098, + .debug_camif_1_addr = 0x309C, + .debug_camif_0_addr = 0x30A0, + .frm_drop_pattern_addr = 0x30B8, + .frm_drop_period_addr = 0x30BC, + .irq_subsample_pattern_addr = 0x30C0, + .irq_subsample_period_addr = 0x30C4, + .hcrop_addr = 0x508C, + .vcrop_addr = 0x5090, + .pix_drop_pattern_addr = 0x5094, + .pix_drop_period_addr = 0x5098, + .line_drop_pattern_addr = 0x509C, + .line_drop_period_addr = 0x50A0, + .debug_halt_status_addr = 0x30A4, + .debug_misr_val0_addr = 0x30A8, + .debug_misr_val1_addr = 0x30AC, + .debug_misr_val2_addr = 0x30B0, + .debug_misr_val3_addr = 0x30B4, + .format_measure_cfg0_addr = 0x30C8, + .format_measure_cfg1_addr = 0x30CC, + .format_measure_cfg2_addr = 0x30D0, + .format_measure0_addr = 0x30D4, + .format_measure1_addr = 0x30D8, + .format_measure2_addr = 0x30DC, + .timestamp_curr0_sof_addr = 0x30E0, + .timestamp_curr1_sof_addr = 0x30E4, + .timestamp_perv0_sof_addr = 0x30E8, + .timestamp_perv1_sof_addr = 0x30EC, + .timestamp_curr0_eof_addr = 0x30F0, + .timestamp_curr1_eof_addr = 0x30F4, + .timestamp_perv0_eof_addr = 0x30F8, + .timestamp_perv1_eof_addr = 0x30FC, + .lut_bank_cfg_addr = 0x50A4, + .batch_id_cfg0_addr = 0x3100, + .batch_id_cfg1_addr = 0x3104, + .batch_period_cfg_addr = 0x3108, + .batch_stream_id_cfg_addr = 0x310C, + .epoch0_cfg_batch_id0_addr = 0x3110, + .epoch1_cfg_batch_id0_addr = 0x3114, + .epoch0_cfg_batch_id1_addr = 0x3118, + .epoch1_cfg_batch_id1_addr = 0x311C, + .epoch0_cfg_batch_id2_addr = 0x3120, + .epoch1_cfg_batch_id2_addr = 0x3124, + .epoch0_cfg_batch_id3_addr = 0x3128, + .epoch1_cfg_batch_id3_addr = 0x312C, + .epoch0_cfg_batch_id4_addr = 0x3130, + .epoch1_cfg_batch_id4_addr = 0x3134, + .epoch0_cfg_batch_id5_addr = 0x3138, + .epoch1_cfg_batch_id5_addr = 0x313C, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x3140, + .path_frame_id = 0x3144, + .debug_sim_monitor = 0x3168, + + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_shift = 2, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_master_sel_val = 3, + .start_master_sel_shift = 4, + .binning_supported = 0x1, + .bin_h_en_shift_val = 18, + .bin_en_shift_val = 18, + .early_eof_en_shift_val = 16, + .pix_store_en_shift_val = 0, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 0, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 1, + .start_master_sel_shift_val = 4, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .rup_mask = 0x10000, + .aup_mask = 0x10000, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x10,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, + .disable_sof_retime_default = true, + .use_master_slave_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_rdi_0_reg_info = { + .irq_status_addr = 0x0224, + .irq_mask_addr = 0x0228, + .irq_clear_addr = 0x022C, + .irq_set_addr = 0x0230, + .cfg0_addr = 0x5480, + .ctrl_addr = 0x5488, + .debug_clr_cmd_addr = 0x548C, + .multi_vcdt_cfg0_addr = 0x5490, + .multi_vcdt_cfg1_addr = 0x5484, + .cfg1_addr = 0x5494, + .pix_store_cfg0_addr = 0x5498, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x54A8, + .debug_byte_cntr_pong_addr = 0x54AC, + .camif_frame_cfg_addr = 0x54B0, + .epoch_irq_cfg_addr = 0x54B4, + .epoch0_subsample_ptrn_addr = 0x54B8, + .epoch1_subsample_ptrn_addr = 0x54BC, + .debug_rup_aup_status = 0x54C0, + .debug_camif_1_addr = 0x54C4, + .debug_camif_0_addr = 0x54C8, + .frm_drop_pattern_addr = 0x54CC, + .frm_drop_period_addr = 0x54D0, + .irq_subsample_pattern_addr = 0x54D4, + .irq_subsample_period_addr = 0x54D8, + .hcrop_addr = 0x54DC, + .vcrop_addr = 0x54E0, + .pix_drop_pattern_addr = 0x54E4, + .pix_drop_period_addr = 0x54E8, + .line_drop_pattern_addr = 0x54EC, + .line_drop_period_addr = 0x54F0, + .debug_halt_status_addr = 0x54F8, + .debug_misr_val0_addr = 0x54FC, + .debug_misr_val1_addr = 0x5500, + .debug_misr_val2_addr = 0x5504, + .debug_misr_val3_addr = 0x5508, + .format_measure_cfg0_addr = 0x550C, + .format_measure_cfg1_addr = 0x5510, + .format_measure_cfg2_addr = 0x5514, + .format_measure0_addr = 0x5518, + .format_measure1_addr = 0x551C, + .format_measure2_addr = 0x5520, + .timestamp_curr0_sof_addr = 0x5524, + .timestamp_curr1_sof_addr = 0x5528, + .timestamp_perv0_sof_addr = 0x552C, + .timestamp_perv1_sof_addr = 0x5530, + .timestamp_curr0_eof_addr = 0x5534, + .timestamp_curr1_eof_addr = 0x5538, + .timestamp_perv0_eof_addr = 0x553C, + .timestamp_perv1_eof_addr = 0x5540, + .batch_id_cfg0_addr = 0x5544, + .batch_id_cfg1_addr = 0x5548, + .batch_period_cfg_addr = 0x554C, + .batch_stream_id_cfg_addr = 0x5550, + .epoch0_cfg_batch_id0_addr = 0x5554, + .epoch1_cfg_batch_id0_addr = 0x5558, + .epoch0_cfg_batch_id1_addr = 0x555C, + .epoch1_cfg_batch_id1_addr = 0x5560, + .epoch0_cfg_batch_id2_addr = 0x5564, + .epoch1_cfg_batch_id2_addr = 0x5568, + .epoch0_cfg_batch_id3_addr = 0x556C, + .epoch1_cfg_batch_id3_addr = 0x5570, + .epoch0_cfg_batch_id4_addr = 0x5574, + .epoch1_cfg_batch_id4_addr = 0x5578, + .epoch0_cfg_batch_id5_addr = 0x557C, + .epoch1_cfg_batch_id5_addr = 0x5580, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x5584, + .path_frame_id = 0x5588, + .debug_sim_monitor = 0x558C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x100, + .aup_mask = 0x100, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x10000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, + .default_out_format = CAM_FORMAT_PLAIN16_16, + .disable_sof_retime_default = true, + .use_master_slave_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_rdi_1_reg_info = { + .irq_status_addr = 0x0234, + .irq_mask_addr = 0x0238, + .irq_clear_addr = 0x023C, + .irq_set_addr = 0x0240, + .cfg0_addr = 0x5680, + .ctrl_addr = 0x5688, + .debug_clr_cmd_addr = 0x568C, + .multi_vcdt_cfg0_addr = 0x5690, + .multi_vcdt_cfg1_addr = 0x5684, + .cfg1_addr = 0x5694, + .pix_store_cfg0_addr = 0x5698, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x56A8, + .debug_byte_cntr_pong_addr = 0x56AC, + .camif_frame_cfg_addr = 0x56B0, + .epoch_irq_cfg_addr = 0x56B4, + .epoch0_subsample_ptrn_addr = 0x56B8, + .epoch1_subsample_ptrn_addr = 0x56BC, + .debug_rup_aup_status = 0x56C0, + .debug_camif_1_addr = 0x56C4, + .debug_camif_0_addr = 0x56C8, + .frm_drop_pattern_addr = 0x56CC, + .frm_drop_period_addr = 0x56D0, + .irq_subsample_pattern_addr = 0x56D4, + .irq_subsample_period_addr = 0x56D8, + .hcrop_addr = 0x56DC, + .vcrop_addr = 0x56E0, + .pix_drop_pattern_addr = 0x56E4, + .pix_drop_period_addr = 0x56E8, + .line_drop_pattern_addr = 0x56EC, + .line_drop_period_addr = 0x56F0, + .debug_halt_status_addr = 0x56F8, + .debug_misr_val0_addr = 0x56FC, + .debug_misr_val1_addr = 0x5700, + .debug_misr_val2_addr = 0x5704, + .debug_misr_val3_addr = 0x5708, + .format_measure_cfg0_addr = 0x570C, + .format_measure_cfg1_addr = 0x5710, + .format_measure_cfg2_addr = 0x5714, + .format_measure0_addr = 0x5718, + .format_measure1_addr = 0x571C, + .format_measure2_addr = 0x5720, + .timestamp_curr0_sof_addr = 0x5724, + .timestamp_curr1_sof_addr = 0x5728, + .timestamp_perv0_sof_addr = 0x572C, + .timestamp_perv1_sof_addr = 0x5730, + .timestamp_curr0_eof_addr = 0x5734, + .timestamp_curr1_eof_addr = 0x5738, + .timestamp_perv0_eof_addr = 0x573C, + .timestamp_perv1_eof_addr = 0x5740, + .batch_id_cfg0_addr = 0x5744, + .batch_id_cfg1_addr = 0x5748, + .batch_period_cfg_addr = 0x574C, + .batch_stream_id_cfg_addr = 0x5750, + .epoch0_cfg_batch_id0_addr = 0x5754, + .epoch1_cfg_batch_id0_addr = 0x5758, + .epoch0_cfg_batch_id1_addr = 0x575C, + .epoch1_cfg_batch_id1_addr = 0x5760, + .epoch0_cfg_batch_id2_addr = 0x5764, + .epoch1_cfg_batch_id2_addr = 0x5768, + .epoch0_cfg_batch_id3_addr = 0x576C, + .epoch1_cfg_batch_id3_addr = 0x5770, + .epoch0_cfg_batch_id4_addr = 0x5774, + .epoch1_cfg_batch_id4_addr = 0x5778, + .epoch0_cfg_batch_id5_addr = 0x577C, + .epoch1_cfg_batch_id5_addr = 0x5780, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x5784, + .path_frame_id = 0x5788, + .debug_sim_monitor = 0x578C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x200, + .aup_mask = 0x200, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x20000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_rdi_2_reg_info = { + .irq_status_addr = 0x0244, + .irq_mask_addr = 0x0248, + .irq_clear_addr = 0x024C, + .irq_set_addr = 0x0250, + .cfg0_addr = 0x5880, + .ctrl_addr = 0x5888, + .debug_clr_cmd_addr = 0x588C, + .multi_vcdt_cfg0_addr = 0x5890, + .multi_vcdt_cfg1_addr = 0x5884, + .cfg1_addr = 0x5894, + .pix_store_cfg0_addr = 0x5898, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x58A8, + .debug_byte_cntr_pong_addr = 0x58AC, + .camif_frame_cfg_addr = 0x58B0, + .epoch_irq_cfg_addr = 0x58B4, + .epoch0_subsample_ptrn_addr = 0x58B8, + .epoch1_subsample_ptrn_addr = 0x58BC, + .debug_rup_aup_status = 0x58C0, + .debug_camif_1_addr = 0x58C4, + .debug_camif_0_addr = 0x58C8, + .frm_drop_pattern_addr = 0x58CC, + .frm_drop_period_addr = 0x58D0, + .irq_subsample_pattern_addr = 0x58D4, + .irq_subsample_period_addr = 0x58D8, + .hcrop_addr = 0x58DC, + .vcrop_addr = 0x58E0, + .pix_drop_pattern_addr = 0x58E4, + .pix_drop_period_addr = 0x58E8, + .line_drop_pattern_addr = 0x58EC, + .line_drop_period_addr = 0x58F0, + .debug_halt_status_addr = 0x58F8, + .debug_misr_val0_addr = 0x58FC, + .debug_misr_val1_addr = 0x5900, + .debug_misr_val2_addr = 0x5904, + .debug_misr_val3_addr = 0x5908, + .format_measure_cfg0_addr = 0x590C, + .format_measure_cfg1_addr = 0x5910, + .format_measure_cfg2_addr = 0x5914, + .format_measure0_addr = 0x5918, + .format_measure1_addr = 0x591C, + .format_measure2_addr = 0x5920, + .timestamp_curr0_sof_addr = 0x5924, + .timestamp_curr1_sof_addr = 0x5928, + .timestamp_perv0_sof_addr = 0x592C, + .timestamp_perv1_sof_addr = 0x5930, + .timestamp_curr0_eof_addr = 0x5934, + .timestamp_curr1_eof_addr = 0x5938, + .timestamp_perv0_eof_addr = 0x593C, + .timestamp_perv1_eof_addr = 0x5940, + .batch_id_cfg0_addr = 0x5944, + .batch_id_cfg1_addr = 0x5948, + .batch_period_cfg_addr = 0x594C, + .batch_stream_id_cfg_addr = 0x5950, + .epoch0_cfg_batch_id0_addr = 0x5954, + .epoch1_cfg_batch_id0_addr = 0x5958, + .epoch0_cfg_batch_id1_addr = 0x595C, + .epoch1_cfg_batch_id1_addr = 0x5960, + .epoch0_cfg_batch_id2_addr = 0x5964, + .epoch1_cfg_batch_id2_addr = 0x5968, + .epoch0_cfg_batch_id3_addr = 0x596C, + .epoch1_cfg_batch_id3_addr = 0x5970, + .epoch0_cfg_batch_id4_addr = 0x5974, + .epoch1_cfg_batch_id4_addr = 0x5978, + .epoch0_cfg_batch_id5_addr = 0x597C, + .epoch1_cfg_batch_id5_addr = 0x5980, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x5984, + .path_frame_id = 0x5988, + .debug_sim_monitor = 0x598C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x400, + .aup_mask = 0x400, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x40000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, + +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_rdi_3_reg_info = { + .irq_status_addr = 0x0254, + .irq_mask_addr = 0x0258, + .irq_clear_addr = 0x025C, + .irq_set_addr = 0x0260, + .cfg0_addr = 0x5A80, + .ctrl_addr = 0x5A88, + .debug_clr_cmd_addr = 0x5A8C, + .multi_vcdt_cfg0_addr = 0x5A90, + .multi_vcdt_cfg1_addr = 0x5A84, + .cfg1_addr = 0x5A94, + .debug_byte_cntr_ping_addr = 0x5AA8, + .debug_byte_cntr_pong_addr = 0x5AAC, + .camif_frame_cfg_addr = 0x5AB0, + .epoch_irq_cfg_addr = 0x5AB4, + .epoch0_subsample_ptrn_addr = 0x5AB8, + .epoch1_subsample_ptrn_addr = 0x5ABC, + .debug_rup_aup_status = 0x5AC0, + .debug_camif_1_addr = 0x5AC4, + .debug_camif_0_addr = 0x5AC8, + .frm_drop_pattern_addr = 0x5ACC, + .frm_drop_period_addr = 0x5AD0, + .irq_subsample_pattern_addr = 0x5AD4, + .irq_subsample_period_addr = 0x5AD8, + .hcrop_addr = 0x5ADC, + .vcrop_addr = 0x5AE0, + .pix_drop_pattern_addr = 0x5AE4, + .pix_drop_period_addr = 0x5AE8, + .line_drop_pattern_addr = 0x5AEC, + .line_drop_period_addr = 0x5AF0, + .debug_halt_status_addr = 0x5AF8, + .debug_misr_val0_addr = 0x5AFC, + .debug_misr_val1_addr = 0x5B00, + .debug_misr_val2_addr = 0x5B04, + .debug_misr_val3_addr = 0x5B08, + .format_measure_cfg0_addr = 0x5B0C, + .format_measure_cfg1_addr = 0x5B10, + .format_measure_cfg2_addr = 0x5B14, + .format_measure0_addr = 0x5B18, + .format_measure1_addr = 0x5B1C, + .format_measure2_addr = 0x5B20, + .timestamp_curr0_sof_addr = 0x5B24, + .timestamp_curr1_sof_addr = 0x5B28, + .timestamp_perv0_sof_addr = 0x5B2C, + .timestamp_perv1_sof_addr = 0x5B30, + .timestamp_curr0_eof_addr = 0x5B34, + .timestamp_curr1_eof_addr = 0x5B38, + .timestamp_perv0_eof_addr = 0x5B3C, + .timestamp_perv1_eof_addr = 0x5B40, + .batch_id_cfg0_addr = 0x5B44, + .batch_id_cfg1_addr = 0x5B48, + .batch_period_cfg_addr = 0x5B4C, + .batch_stream_id_cfg_addr = 0x5B50, + .epoch0_cfg_batch_id0_addr = 0x5B54, + .epoch1_cfg_batch_id0_addr = 0x5B58, + .epoch0_cfg_batch_id1_addr = 0x5B5C, + .epoch1_cfg_batch_id1_addr = 0x5B60, + .epoch0_cfg_batch_id2_addr = 0x5B64, + .epoch1_cfg_batch_id2_addr = 0x5B68, + .epoch0_cfg_batch_id3_addr = 0x5B6C, + .epoch1_cfg_batch_id3_addr = 0x5B70, + .epoch0_cfg_batch_id4_addr = 0x5B74, + .epoch1_cfg_batch_id4_addr = 0x5B78, + .epoch0_cfg_batch_id5_addr = 0x5B7C, + .epoch1_cfg_batch_id5_addr = 0x5B80, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x5B84, + .path_frame_id = 0x5B88, + .debug_sim_monitor = 0x5B8C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x800, + .aup_mask = 0x800, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x80000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_1080_rdi_4_reg_info = { + .irq_status_addr = 0x0264, + .irq_mask_addr = 0x0268, + .irq_clear_addr = 0x026C, + .irq_set_addr = 0x0270, + .cfg0_addr = 0x5C80, + .ctrl_addr = 0x5C88, + .debug_clr_cmd_addr = 0x5C8C, + .multi_vcdt_cfg0_addr = 0x5C90, + .multi_vcdt_cfg1_addr = 0x5C84, + .cfg1_addr = 0x5C94, + .debug_byte_cntr_ping_addr = 0x5CA8, + .debug_byte_cntr_pong_addr = 0x5CAC, + .camif_frame_cfg_addr = 0x5CB0, + .epoch_irq_cfg_addr = 0x5CB4, + .epoch0_subsample_ptrn_addr = 0x5CB8, + .epoch1_subsample_ptrn_addr = 0x5CBC, + .debug_rup_aup_status = 0x5CC0, + .debug_camif_1_addr = 0x5CC4, + .debug_camif_0_addr = 0x5CC8, + .frm_drop_pattern_addr = 0x5CCC, + .frm_drop_period_addr = 0x5CD0, + .irq_subsample_pattern_addr = 0x5CD4, + .irq_subsample_period_addr = 0x5CD8, + .hcrop_addr = 0x5CDC, + .vcrop_addr = 0x5CE0, + .pix_drop_pattern_addr = 0x5CE4, + .pix_drop_period_addr = 0x5CE8, + .line_drop_pattern_addr = 0x5CEC, + .line_drop_period_addr = 0x5CF0, + .debug_halt_status_addr = 0x5CF8, + .debug_misr_val0_addr = 0x5CFC, + .debug_misr_val1_addr = 0x5D00, + .debug_misr_val2_addr = 0x5D04, + .debug_misr_val3_addr = 0x5D08, + .format_measure_cfg0_addr = 0x5D0C, + .format_measure_cfg1_addr = 0x5D10, + .format_measure_cfg2_addr = 0x5D14, + .format_measure0_addr = 0x5D18, + .format_measure1_addr = 0x5D1C, + .format_measure2_addr = 0x5D20, + .timestamp_curr0_sof_addr = 0x5D24, + .timestamp_curr1_sof_addr = 0x5D28, + .timestamp_perv0_sof_addr = 0x5D2C, + .timestamp_perv1_sof_addr = 0x5D30, + .timestamp_curr0_eof_addr = 0x5D34, + .timestamp_curr1_eof_addr = 0x5D38, + .timestamp_perv0_eof_addr = 0x5D3C, + .timestamp_perv1_eof_addr = 0x5D40, + .batch_id_cfg0_addr = 0x5D44, + .batch_id_cfg1_addr = 0x5D48, + .batch_period_cfg_addr = 0x5D4C, + .batch_stream_id_cfg_addr = 0x5D50, + .epoch0_cfg_batch_id0_addr = 0x5D54, + .epoch1_cfg_batch_id0_addr = 0x5D58, + .epoch0_cfg_batch_id1_addr = 0x5D5C, + .epoch1_cfg_batch_id1_addr = 0x5D60, + .epoch0_cfg_batch_id2_addr = 0x5D64, + .epoch1_cfg_batch_id2_addr = 0x5D68, + .epoch0_cfg_batch_id3_addr = 0x5D6C, + .epoch1_cfg_batch_id3_addr = 0x5D70, + .epoch0_cfg_batch_id4_addr = 0x5D74, + .epoch1_cfg_batch_id4_addr = 0x5D78, + .epoch0_cfg_batch_id5_addr = 0x5D7c, + .epoch1_cfg_batch_id5_addr = 0x5D80, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x5D84, + .path_frame_id = 0x5D88, + .debug_sim_monitor = 0x5D8C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x1000, + .aup_mask = 0x1000, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x100000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_1080_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_RX2_IRQ) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_LOST), + + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED), + }, + + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 8, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 24, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 20, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 21, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 22, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 25, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 26, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 27, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 28, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 29, + .bit_pos[CAM_IFE_CSID_RX_RX2_IRQ] = 31, + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_LOST] = 12, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_LOST] = 13, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_LOST] = 14, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_LOST] = 15, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_LOST] = 16, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_LOST] = 17, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_LOST] = 18, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_LOST] = 19, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_1080_top_debug_mask = { + + .evt_bitmap = { + 0ULL, + + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 0, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 1, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 2, +}; + + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_1080_csi2_reg_info = { + .irq_status_addr = {0x01B0, 0x01C0}, + .irq_mask_addr = {0x01B4, 0x01C4}, + .irq_clear_addr = {0x01B8, 0x01C8}, + .irq_set_addr = {0x01BC, 0x01CC}, + /*CSI2 rx control */ + .cfg0_addr = 0x0880, + .cfg1_addr = 0x0884, + .capture_ctrl_addr = 0x0888, + .rst_strobes_addr = 0x088C, + .cap_unmap_long_pkt_hdr_0_addr = 0x0890, + .cap_unmap_long_pkt_hdr_1_addr = 0x0894, + .captured_short_pkt_0_addr = 0x0898, + .captured_short_pkt_1_addr = 0x089C, + .captured_long_pkt_0_addr = 0x08A0, + .captured_long_pkt_1_addr = 0x08A4, + .captured_long_pkt_ftr_addr = 0x08A8, + .captured_cphy_pkt_hdr_addr = 0x08AC, + .lane0_misr_addr = 0x08B0, + .lane1_misr_addr = 0x08B4, + .lane2_misr_addr = 0x08B8, + .lane3_misr_addr = 0x08BC, + .total_pkts_rcvd_addr = 0x08C0, + .stats_ecc_addr = 0x08C4, + .total_crc_err_addr = 0x08C8, + .de_scramble_type3_cfg0_addr = 0x08CC, + .de_scramble_type3_cfg1_addr = 0x08D0, + .de_scramble_type2_cfg0_addr = 0x08D4, + .de_scramble_type2_cfg1_addr = 0x08D8, + .de_scramble_type1_cfg0_addr = 0x08DC, + .de_scramble_type1_cfg1_addr = 0x08E0, + .de_scramble_type0_cfg0_addr = 0x08E4, + .de_scramble_type0_cfg1_addr = 0x08E8, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff0000, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 13, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x25fff000, 0x0}, + .part_fatal_err_mask = {0x02000000, 0x0}, + .non_fatal_err_mask = {0x08000000, 0x0}, + .top_irq_mask = {0x4, 0x0}, + .rx_rx2_irq_mask = 0x80000000, +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_1080_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0100, + .global_cmd_addr = 0x0104, + .reset_cfg_addr = 0x0108, + .reset_cmd_addr = 0x010C, + .irq_cmd_addr = 0x0110, + .rup_cmd_addr = 0x0114, + .aup_cmd_addr = 0x0118, + .rup_aup_cmd_addr = 0x011C, + .offline_cmd_addr = 0x0120, + .shdr_master_slave_cfg_addr = 0x0124, + .multi_sensor_mode_addr = 0x0128, + .top_irq_status_addr = {0x0180, 0x0190}, + .top_irq_mask_addr = {0x0184, 0x0194}, + .top_irq_clear_addr = {0x0188, 0x0198}, + .top_irq_set_addr = {0x018C, 0x019C}, + .buf_done_irq_status_addr = 0x01A0, + .buf_done_irq_mask_addr = 0x01A4, + .buf_done_irq_clear_addr = 0x01A8, + .buf_done_irq_set_addr = 0x01AC, + .test_bus_ctrl = 0x07DC, + .test_bus_debug = 0x07F8, + .drv_cfg0_addr = 0x0800, + .drv_cfg1_addr = 0x0804, + .drv_cfg2_addr = 0x0808, + .debug_drv_0_addr = 0x080C, + .debug_drv_1_addr = 0x0810, + .debug_sensor_hbi_irq_vcdt_addr = 0x29C, + .debug_violation_addr = 0x7E0, + .debug_cfg_addr = 0x7E4, + .debug_err_vec_irq = {0x2D4, 0x2D8, 0x2DC}, + .debug_err_vec_cfg = 0x2D0, + .debug_err_vec_ts_lb = 0x2E0, + .debug_err_vec_ts_mb = 0x2E4, + .rx_mode_id_cfg1_addr = 0x0870, + + /*configurations */ + .major_version = 10, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 5, + .num_pix = 3, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format2_shift_val = 0, + .decode_format3_shift_val = 4, + .decode_format1_supported = true, + .decode_format_mask = 0xF, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_dt2_shift_val = 8, + .multi_vcdt_dt3_shift_val = 16, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 4, + .shdr_slave_ppp_shift = 16, + .shdr_slave_rdi2_shift = 18, + .shdr_slave_rdi1_shift = 17, + .shdr_master_rdi0_shift = 4, + .shdr_master_slave_en_shift = 0, + .drv_en_shift = 0, + .drv_rup_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .num_dt_supported = 4, + .ts_comb_vcdt_en = true, + .direct_cid_config = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .top_err_irq_mask = {0x00000002, 0x18}, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = {0x1,}, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .format_measure_max_hbi_shift = 16, + .format_measure_min_hbi_mask = 0xFFF, + .top_buf_done_irq_mask = 0x8, + .decode_format_payload_only = 0xF, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .capabilities = CAM_IFE_CSID_CAP_SPLIT_RUP_AUP | + CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 | + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG | + CAM_IFE_CSID_CAP_DEBUG_ERR_VEC | + CAM_IFE_CSID_CAP_TOP_MASK_ALL_IRQS, + .top_top2_irq_mask = 0x80000000, + .drv_rup_en_val_map = { + 2, /*RDI0 */ + 3, /*RDI1 */ + 4, /*RDI2 */ + 5, /*RDI3 */ + 6, /*RDI4 */ + 0, /*IPP */ + 1, /*PPP */ + 0, /*UDI0 */ + 0, /*UDI1 */ + 0, /*UDI2 */ + }, + .drv_path_idle_en_val_map = { + BIT(4), /*CAM_ISP_PXL_PATH */ + BIT(5), /*CAM_ISP_PPP_PATH */ + 0, /* LCR not applicable */ + BIT(6), /*CAM_ISP_RDI0_PATH */ + BIT(7), /*CAM_ISP_RDI1_PATH */ + BIT(8), /*CAM_ISP_RDI2_PATH */ + BIT(9), /*CAM_ISP_RDI3_PATH */ + BIT(10), /*CAM_ISP_RDI4_PATH */ + }, + .path_domain_id_cfg0 = 0x0, + .path_domain_id_cfg1 = 0x4, + .path_domain_id_cfg2 = 0x8, + .phy_sel_base_idx = 1, +}; + +struct cam_ife_csid_ver2_mc_reg_info + cam_ife_csid_1080_ipp_mc_reg_info = { + .irq_comp_cfg0_addr = 0x0294, + .ipp_src_ctxt_mask_shift = 4, + .ipp_dst_ctxt_mask_shift = 0, + .comp_rup_mask = 0x4000000, + .comp_epoch0_mask = 0x8000000, + .comp_eof_mask = 0x20000000, + .comp_sof_mask = 0x40000000, + .comp_subgrp0_mask = 0x1000000, + .comp_subgrp2_mask = 0x2000000, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_1080_reg_info = { + .top_irq_reg_info = cam_ife_csid_1080_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_1080_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + NULL, + NULL, + NULL, + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_1], + &cam_ife_csid_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_2], + }, + .buf_done_irq_reg_info = &cam_ife_csid_1080_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_1080_cmn_reg_info, + .csi2_reg = &cam_ife_csid_1080_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_1080_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_1] = &cam_ife_csid_1080_ipp_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_2] = &cam_ife_csid_1080_ipp_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_1080_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_1080_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_1080_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_1080_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_1080_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_1080_rdi_4_reg_info, + .ipp_mc_reg = &cam_ife_csid_1080_ipp_mc_reg_info, + .need_top_cfg = 0x0, + .top_irq_desc = &cam_ife_csid_1080_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_1080_rx_irq_desc, + .debug_vec_desc = &cam_ife_csid_1080_debug_vec_desc, + .path_irq_desc = cam_ife_csid_1080_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_1080_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_1080_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_1080_path_irq_desc), + .top_debug_mask = &cam_ife_csid_1080_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_1080_rx_debug_mask, + .num_top_regs = 2, + .num_rx_regs = 2, +}; +#endif /*_CAM_IFE_CSID_1080_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h new file mode 100644 index 0000000000..3336e19426 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h @@ -0,0 +1,409 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ +#ifndef _CAM_IFE_CSID_170_H_ +#define _CAM_IFE_CSID_170_H_ + +#include +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_ife_csid_dev.h" +#include "camera_main.h" + +#define CAM_CSID_VERSION_V170 0x10070000 + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_ipp_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x254, + .misr_val_addr = 0x258, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + /* configurations */ + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .pix_store_en_shift_val = 7, + .halt_master_sel_master_val = 0, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .early_eof_en_shift_val = 29, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_rdi_0_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_rdi_1_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_rdi_2_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_170_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x3, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, + .non_fatal_err_mask = 0x380000, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_170_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .lfsr_seed = 0x12345678, + .ctrl_cfg = 0x408007, + .line_interleave_mode = 0x1, + .color_bar = 1, + .num_frames = 0, + .num_active_dt = 0, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_170_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .num_ppp = 0, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0x0, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_170_reg_info = { + .cmn_reg = &cam_ife_csid_170_cmn_reg_info, + .csi2_reg = &cam_ife_csid_170_csi2_reg_info, + .ipp_reg = &cam_ife_csid_170_ipp_reg_info, + .ppp_reg = NULL, + .rdi_reg = { + &cam_ife_csid_170_rdi_0_reg_info, + &cam_ife_csid_170_rdi_1_reg_info, + &cam_ife_csid_170_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_170_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_170_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170_200.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170_200.h new file mode 100644 index 0000000000..56c415fa32 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170_200.h @@ -0,0 +1,489 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_170_200_H_ +#define _CAM_IFE_CSID_170_200_H_ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +#define CAM_CSID_VERSION_V170 0x10070000 + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_200_ipp_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x254, + .misr_val_addr = 0x258, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + /* configurations */ + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .bin_qcfa_en_shift_val = 30, + .bin_h_en_shift_val = 2, + .bin_en_shift_val = 2, + .binning_supported = 0x3, + .halt_master_sel_master_val = 0, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_200_ppp_reg_info = { + .irq_status_addr = 0xa0, + .irq_mask_addr = 0xa4, + .irq_clear_addr = 0xa8, + .irq_set_addr = 0xac, + .cfg0_addr = 0x700, + .cfg1_addr = 0x704, + .ctrl_addr = 0x708, + .frm_drop_pattern_addr = 0x70c, + .frm_drop_period_addr = 0x710, + .irq_subsample_pattern_addr = 0x714, + .irq_subsample_period_addr = 0x718, + .hcrop_addr = 0x71c, + .vcrop_addr = 0x720, + .pix_drop_pattern_addr = 0x724, + .pix_drop_period_addr = 0x728, + .line_drop_pattern_addr = 0x72c, + .line_drop_period_addr = 0x730, + .rst_strobes_addr = 0x740, + .status_addr = 0x754, + .misr_val_addr = 0x758, + .format_measure_cfg0_addr = 0x770, + .format_measure_cfg1_addr = 0x774, + .format_measure0_addr = 0x778, + .format_measure1_addr = 0x77c, + .format_measure2_addr = 0x780, + .timestamp_curr0_sof_addr = 0x790, + .timestamp_curr1_sof_addr = 0x794, + .timestamp_prev0_sof_addr = 0x798, + .timestamp_prev1_sof_addr = 0x79c, + .timestamp_curr0_eof_addr = 0x7a0, + .timestamp_curr1_eof_addr = 0x7a4, + .timestamp_prev0_eof_addr = 0x7a8, + .timestamp_prev1_eof_addr = 0x7ac, + /* configurations */ + .halt_master_sel_master_val = 3, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_200_rdi_0_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 30, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_200_rdi_1_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_170_200_rdi_2_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_170_200_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + .de_scramble_type3_cfg0_addr = 0x170, + .de_scramble_type3_cfg1_addr = 0x174, + .de_scramble_type2_cfg0_addr = 0x178, + .de_scramble_type2_cfg1_addr = 0x17c, + .de_scramble_type1_cfg0_addr = 0x180, + .de_scramble_type1_cfg1_addr = 0x184, + .de_scramble_type0_cfg0_addr = 0x188, + .de_scramble_type0_cfg1_addr = 0x18c, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x7, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, + .non_fatal_err_mask = 0x380000, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_170_200_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .lfsr_seed = 0x12345678, + .ctrl_cfg = 0x408007, + .color_bar = 1, + .line_interleave_mode = 0x1, + .num_frames = 0, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .payload_mode = 0x8, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_170_200_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .num_ppp = 1, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .decode_format_shift_val = 12, + .ipp_irq_mask_all = 0xFFFF, + .rdi_irq_mask_all = 0xFFFF, + .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_170_200_reg_info = { + .cmn_reg = &cam_ife_csid_170_200_cmn_reg_info, + .csi2_reg = &cam_ife_csid_170_200_csi2_reg_info, + .ipp_reg = &cam_ife_csid_170_200_ipp_reg_info, + .ppp_reg = &cam_ife_csid_170_200_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_170_200_rdi_0_reg_info, + &cam_ife_csid_170_200_rdi_1_reg_info, + &cam_ife_csid_170_200_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_170_200_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_170_200_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h new file mode 100644 index 0000000000..2dec2b716b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_175_H_ +#define _CAM_IFE_CSID_175_H_ + +#include +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_ife_csid_dev.h" +#include "camera_main.h" + +#define CAM_CSID_VERSION_V175 0x10070050 +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_ipp_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x254, + .misr_val_addr = 0x258, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + /* configurations */ + .halt_master_sel_master_val = 0, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_ppp_reg_info = { + .irq_status_addr = 0xa0, + .irq_mask_addr = 0xa4, + .irq_clear_addr = 0xa8, + .irq_set_addr = 0xac, + .cfg0_addr = 0x700, + .cfg1_addr = 0x704, + .ctrl_addr = 0x708, + .frm_drop_pattern_addr = 0x70c, + .frm_drop_period_addr = 0x710, + .irq_subsample_pattern_addr = 0x714, + .irq_subsample_period_addr = 0x718, + .hcrop_addr = 0x71c, + .vcrop_addr = 0x720, + .pix_drop_pattern_addr = 0x724, + .pix_drop_period_addr = 0x728, + .line_drop_pattern_addr = 0x72c, + .line_drop_period_addr = 0x730, + .rst_strobes_addr = 0x740, + .status_addr = 0x754, + .misr_val_addr = 0x758, + .format_measure_cfg0_addr = 0x770, + .format_measure_cfg1_addr = 0x774, + .format_measure0_addr = 0x778, + .format_measure1_addr = 0x77c, + .format_measure2_addr = 0x780, + .timestamp_curr0_sof_addr = 0x790, + .timestamp_curr1_sof_addr = 0x794, + .timestamp_prev0_sof_addr = 0x798, + .timestamp_prev1_sof_addr = 0x79c, + .timestamp_curr0_eof_addr = 0x7a0, + .timestamp_curr1_eof_addr = 0x7a4, + .timestamp_prev0_eof_addr = 0x7a8, + .timestamp_prev1_eof_addr = 0x7ac, + /* configurations */ + .halt_master_sel_master_val = 3, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_rdi_0_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + .plain_fmt_shift_val = 10, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_rdi_1_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_rdi_2_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_175_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x3, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, + .non_fatal_err_mask = 0x380000, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_175_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .lfsr_seed = 0x12345678, + .ctrl_cfg = 0x408007, + .color_bar = 1, + .num_frames = 0, + .line_interleave_mode = 0x1, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_175_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .num_ppp = 1, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .timestamp_stb_sel_shift_val = 0, + .timestamp_strobe_val = 0x2, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_175_reg_info = { + .cmn_reg = &cam_ife_csid_175_cmn_reg_info, + .csi2_reg = &cam_ife_csid_175_csi2_reg_info, + .ipp_reg = &cam_ife_csid_175_ipp_reg_info, + .ppp_reg = &cam_ife_csid_175_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_175_rdi_0_reg_info, + &cam_ife_csid_175_rdi_1_reg_info, + &cam_ife_csid_175_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_175_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_175_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175_200.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175_200.h new file mode 100644 index 0000000000..8329eb3a4a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175_200.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_175_200_H_ +#define _CAM_IFE_CSID_175_200_H_ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +#define CAM_CSID_DRV_NAME "csid" +#define CAM_CSID_VERSION_V175 0x10070050 + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_200_ipp_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x254, + .misr_val_addr = 0x258, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + /* configurations */ + .halt_master_sel_master_val = 0, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .bin_qcfa_en_shift_val = 30, + .bin_h_en_shift_val = 2, + .bin_en_shift_val = 2, + .binning_supported = 0x3, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_200_ppp_reg_info = { + .irq_status_addr = 0xa0, + .irq_mask_addr = 0xa4, + .irq_clear_addr = 0xa8, + .irq_set_addr = 0xac, + .cfg0_addr = 0x700, + .cfg1_addr = 0x704, + .ctrl_addr = 0x708, + .frm_drop_pattern_addr = 0x70c, + .frm_drop_period_addr = 0x710, + .irq_subsample_pattern_addr = 0x714, + .irq_subsample_period_addr = 0x718, + .hcrop_addr = 0x71c, + .vcrop_addr = 0x720, + .pix_drop_pattern_addr = 0x724, + .pix_drop_period_addr = 0x728, + .line_drop_pattern_addr = 0x72c, + .line_drop_period_addr = 0x730, + .rst_strobes_addr = 0x740, + .status_addr = 0x754, + .misr_val_addr = 0x758, + .format_measure_cfg0_addr = 0x770, + .format_measure_cfg1_addr = 0x774, + .format_measure0_addr = 0x778, + .format_measure1_addr = 0x77c, + .format_measure2_addr = 0x780, + .timestamp_curr0_sof_addr = 0x790, + .timestamp_curr1_sof_addr = 0x794, + .timestamp_prev0_sof_addr = 0x798, + .timestamp_prev1_sof_addr = 0x79c, + .timestamp_curr0_eof_addr = 0x7a0, + .timestamp_curr1_eof_addr = 0x7a4, + .timestamp_prev0_eof_addr = 0x7a8, + .timestamp_prev1_eof_addr = 0x7ac, + /* configurations */ + .halt_master_sel_master_val = 3, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_200_rdi_0_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_200_rdi_1_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_175_200_rdi_2_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .ccif_violation_en = 1, + .plain_fmt_shift_val = 10, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 30, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_175_200_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + .de_scramble_type3_cfg0_addr = 0x170, + .de_scramble_type3_cfg1_addr = 0x174, + .de_scramble_type2_cfg0_addr = 0x178, + .de_scramble_type2_cfg1_addr = 0x17c, + .de_scramble_type1_cfg0_addr = 0x180, + .de_scramble_type1_cfg1_addr = 0x184, + .de_scramble_type0_cfg0_addr = 0x188, + .de_scramble_type0_cfg1_addr = 0x18c, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x7, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, + .non_fatal_err_mask = 0x380000, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_175_200_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .ctrl_cfg = 0x408007, + .lfsr_seed = 0x12345678, + .color_bar = 1, + .num_frames = 0, + .line_interleave_mode = 0x1, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_175_200_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 5, + .num_rdis = 3, + .num_pix = 1, + .num_ppp = 1, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0xFFFF, + .rdi_irq_mask_all = 0xFFFF, + .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_175_200_reg_info = { + .cmn_reg = &cam_ife_csid_175_200_cmn_reg_info, + .csi2_reg = &cam_ife_csid_175_200_csi2_reg_info, + .ipp_reg = &cam_ife_csid_175_200_ipp_reg_info, + .ppp_reg = &cam_ife_csid_175_200_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_175_200_rdi_0_reg_info, + &cam_ife_csid_175_200_rdi_1_reg_info, + &cam_ife_csid_175_200_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_175_200_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_175_200_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h new file mode 100644 index 0000000000..fed1a303a9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid480.h @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_480_H_ +#define _CAM_IFE_CSID_480_H_ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +#define CAM_CSID_VERSION_V480 0x40080000 + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_480_ipp_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x254, + .misr_val_addr = 0x258, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + .err_recovery_cfg0_addr = 0x2d0, + .err_recovery_cfg1_addr = 0x2d4, + .err_recovery_cfg2_addr = 0x2d8, + .multi_vcdt_cfg0_addr = 0x2dc, + /* configurations */ + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + //.hblank_cfg_shift_val = 4, + .halt_master_sel_master_val = 0, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .bin_h_en_shift_val = 2, + .bin_en_shift_val = 2, + .bin_qcfa_en_shift_val = 30, + .binning_supported = 0x3, + .overflow_ctrl_mode_val = 0x8, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x26004, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_480_ppp_reg_info = { + .irq_status_addr = 0xa0, + .irq_mask_addr = 0xa4, + .irq_clear_addr = 0xa8, + .irq_set_addr = 0xac, + + .cfg0_addr = 0x700, + .cfg1_addr = 0x704, + .ctrl_addr = 0x708, + .frm_drop_pattern_addr = 0x70c, + .frm_drop_period_addr = 0x710, + .irq_subsample_pattern_addr = 0x714, + .irq_subsample_period_addr = 0x718, + .hcrop_addr = 0x71c, + .vcrop_addr = 0x720, + .pix_drop_pattern_addr = 0x724, + .pix_drop_period_addr = 0x728, + .line_drop_pattern_addr = 0x72c, + .line_drop_period_addr = 0x730, + .rst_strobes_addr = 0x740, + .status_addr = 0x754, + .misr_val_addr = 0x758, + .format_measure_cfg0_addr = 0x770, + .format_measure_cfg1_addr = 0x774, + .format_measure0_addr = 0x778, + .format_measure1_addr = 0x77c, + .format_measure2_addr = 0x780, + .timestamp_curr0_sof_addr = 0x790, + .timestamp_curr1_sof_addr = 0x794, + .timestamp_prev0_sof_addr = 0x798, + .timestamp_prev1_sof_addr = 0x79c, + .timestamp_curr0_eof_addr = 0x7a0, + .timestamp_curr1_eof_addr = 0x7a4, + .timestamp_prev0_eof_addr = 0x7a8, + .timestamp_prev1_eof_addr = 0x7ac, + .err_recovery_cfg0_addr = 0x7d0, + .err_recovery_cfg1_addr = 0x7d4, + .err_recovery_cfg2_addr = 0x7d8, + .multi_vcdt_cfg0_addr = 0x7dc, + /* configurations */ + .halt_master_sel_master_val = 3, + .halt_master_sel_shift = 4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_master = 2, + .halt_mode_slave = 3, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .timestamp_en_shift_val = 1, + .format_measure_en_shift_val = 0, + .fatal_err_mask = 0x26004, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_480_rdi_0_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .err_recovery_cfg0_addr = 0x3b0, + .err_recovery_cfg1_addr = 0x3b4, + .err_recovery_cfg2_addr = 0x3b8, + .multi_vcdt_cfg0_addr = 0x3bc, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x26004, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_480_rdi_1_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .err_recovery_cfg0_addr = 0x4b0, + .err_recovery_cfg1_addr = 0x4b4, + .err_recovery_cfg2_addr = 0x4b8, + .multi_vcdt_cfg0_addr = 0x4bc, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x26004, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_480_rdi_2_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .err_recovery_cfg0_addr = 0x5b0, + .err_recovery_cfg1_addr = 0x5b4, + .err_recovery_cfg2_addr = 0x5b8, + .multi_vcdt_cfg0_addr = 0x5bc, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x26004, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_480_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x7, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .fatal_err_mask = 0x59FA800, + .part_fatal_err_mask = 0x0001000, + .non_fatal_err_mask = 0x0200000, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_480_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .ctrl_cfg = 0x408007, + .lfsr_seed = 0x12345678, + .color_bar = 1, + .num_frames = 0, + .line_interleave_mode = 0x1, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_480_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .num_ppp = 1, + .drop_supported = 1, + .early_eof_supported = 1, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .multi_vcdt_en_shift_val = 0, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_480_reg_info = { + .cmn_reg = &cam_ife_csid_480_cmn_reg_info, + .csi2_reg = &cam_ife_csid_480_csi2_reg_info, + .ipp_reg = &cam_ife_csid_480_ipp_reg_info, + .ppp_reg = &cam_ife_csid_480_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_480_rdi_0_reg_info, + &cam_ife_csid_480_rdi_1_reg_info, + &cam_ife_csid_480_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_480_tpg_reg_info, + .csid_cust_node_map = {0x2, 0x4}, +}; +#endif /*_CAM_IFE_CSID_480_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid570.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid570.h new file mode 100644 index 0000000000..7798dff5f2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid570.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_570_H_ +#define _CAM_IFE_CSID_570_H_ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +/* Settings for 570 CSID are leveraged from 480 */ +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_570_reg_info = { + .cmn_reg = &cam_ife_csid_480_cmn_reg_info, + .csi2_reg = &cam_ife_csid_480_csi2_reg_info, + .ipp_reg = &cam_ife_csid_480_ipp_reg_info, + .ppp_reg = &cam_ife_csid_480_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_480_rdi_0_reg_info, + &cam_ife_csid_480_rdi_1_reg_info, + &cam_ife_csid_480_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_480_tpg_reg_info, + .width_fuse_max_val = 3, + .fused_max_width = {5612, 6048, 7308, UINT_MAX}, +}; +#endif /*_CAM_IFE_CSID_570_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid580.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid580.h new file mode 100644 index 0000000000..fc515b4258 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid580.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_580_H_ +#define _CAM_IFE_CSID_580_H_ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +/* Settings for 580 CSID are leveraged from 480 */ +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_580_reg_info = { + .cmn_reg = &cam_ife_csid_480_cmn_reg_info, + .csi2_reg = &cam_ife_csid_480_csi2_reg_info, + .ipp_reg = &cam_ife_csid_480_ipp_reg_info, + .ppp_reg = &cam_ife_csid_480_ppp_reg_info, + .rdi_reg = { + &cam_ife_csid_480_rdi_0_reg_info, + &cam_ife_csid_480_rdi_1_reg_info, + &cam_ife_csid_480_rdi_2_reg_info, + NULL, + }, + .tpg_reg = &cam_ife_csid_480_tpg_reg_info, + .csid_cust_node_map = {0x2, 0x4}, + .width_fuse_max_val = 3, + .fused_max_width = {5612, 6048, 7308, UINT_MAX}, +}; +#endif /*_CAM_IFE_CSID_580_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680.h new file mode 100644 index 0000000000..c22666cc18 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680.h @@ -0,0 +1,1419 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_680_H_ +#define _CAM_IFE_CSID_680_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +#define CAM_CSID_VERSION_V680 0x60080000 + +static uint32_t cam_ife_csid_680_num_top_irq_desc[] = {0,}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_680_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + }, +}; + +static const uint32_t cam_ife_csid_680_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_680_rx_irq_desc[0]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_680_path_irq_desc[] = { + { + .bitmask = BIT(0), + .desc = "", + }, + { + .bitmask = BIT(1), + .desc = "", + }, + { + .bitmask = BIT(2), + .desc = "ERROR_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(3), + .desc = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .desc = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .desc = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .desc = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .desc = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .desc = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .desc = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .desc = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .desc = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .desc = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_LINE_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .desc = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .desc = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .desc = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .desc = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .desc = "OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .desc = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .desc = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .desc = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .desc = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .desc = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .desc = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .desc = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .desc = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .desc = "CCIF_VIOLATION: Bad frame timings", + }, +}; + +static struct cam_irq_register_set cam_ife_csid_680_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + /* RDI4 */ + { + .mask_reg_offset = 0x00000130, + .clear_reg_offset = 0x00000134, + .status_reg_offset = 0x0000012C, + }, + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + /* PPP */ + { + .mask_reg_offset = 0x000000D0, + .clear_reg_offset = 0x000000D4, + .status_reg_offset = 0x000000CC, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_680_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_680_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_680_path_irq_reg_info[7] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_4], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_PPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_register_set cam_ife_csid_680_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_680_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_680_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_ipp_reg_info = { + .irq_status_addr = 0xAC, + .irq_mask_addr = 0xB0, + .irq_clear_addr = 0xB4, + .irq_set_addr = 0xB8, + .cfg0_addr = 0x300, + .ctrl_addr = 0x304, + .debug_clr_cmd_addr = 0x308, + .multi_vcdt_cfg0_addr = 0x30c, + .cfg1_addr = 0x310, + .err_recovery_cfg0_addr = 0x318, + .err_recovery_cfg1_addr = 0x31C, + .err_recovery_cfg2_addr = 0x320, + .bin_pd_detect_cfg0_addr = 0x324, + .bin_pd_detect_cfg1_addr = 0x328, + .bin_pd_detect_cfg2_addr = 0x32C, + .camif_frame_cfg_addr = 0x330, + .epoch_irq_cfg_addr = 0x334, + .epoch0_subsample_ptrn_addr = 0x338, + .epoch1_subsample_ptrn_addr = 0x33C, + .debug_camif_1_addr = 0x340, + .debug_camif_0_addr = 0x344, + .debug_halt_status_addr = 0x348, + .debug_misr_val0_addr = 0x34C, + .debug_misr_val1_addr = 0x350, + .debug_misr_val2_addr = 0x354, + .debug_misr_val3_addr = 0x358, + .hcrop_addr = 0x35c, + .vcrop_addr = 0x360, + .pix_drop_pattern_addr = 0x364, + .pix_drop_period_addr = 0x368, + .line_drop_pattern_addr = 0x36C, + .line_drop_period_addr = 0x370, + .frm_drop_pattern_addr = 0x374, + .frm_drop_period_addr = 0x378, + .irq_subsample_pattern_addr = 0x37C, + .irq_subsample_period_addr = 0x380, + .format_measure_cfg0_addr = 0x384, + .format_measure_cfg1_addr = 0x388, + .format_measure0_addr = 0x38C, + .format_measure1_addr = 0x390, + .format_measure2_addr = 0x394, + .timestamp_curr0_sof_addr = 0x398, + .timestamp_curr1_sof_addr = 0x39C, + .timestamp_perv0_sof_addr = 0x3A0, + .timestamp_perv1_sof_addr = 0x3A4, + .timestamp_curr0_eof_addr = 0x3A8, + .timestamp_curr1_eof_addr = 0x3AC, + .timestamp_perv0_eof_addr = 0x3B0, + .timestamp_perv1_eof_addr = 0x3B4, + .lut_bank_cfg_addr = 0x3B8, + .batch_id_cfg0_addr = 0x3BC, + .batch_id_cfg1_addr = 0x3C0, + .batch_period_cfg_addr = 0x3C4, + .batch_stream_id_cfg_addr = 0x3C8, + .epoch0_cfg_batch_id0_addr = 0x3CC, + .epoch1_cfg_batch_id0_addr = 0x3D0, + .epoch0_cfg_batch_id1_addr = 0x3D4, + .epoch1_cfg_batch_id1_addr = 0x3D8, + .epoch0_cfg_batch_id2_addr = 0x3DC, + .epoch1_cfg_batch_id2_addr = 0x3E0, + .epoch0_cfg_batch_id3_addr = 0x3E4, + .epoch1_cfg_batch_id3_addr = 0x3E8, + .epoch0_cfg_batch_id4_addr = 0x3EC, + .epoch1_cfg_batch_id4_addr = 0x3F0, + .epoch0_cfg_batch_id5_addr = 0x3F4, + .epoch1_cfg_batch_id5_addr = 0x3F8, + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .bin_h_en_shift_val = 20, + .bin_v_en_shift_val = 21, + .bin_en_shift_val = 18, + .bin_qcfa_en_shift_val = 19, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 9, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .bin_pd_en_shift_val = 0, + .bin_pd_blk_w_shift_val = 8, + .bin_pd_blk_h_shift_val = 24, + .bin_pd_detect_x_offset_shift_val = 0, + .bin_pd_detect_x_end_shift_val = 16, + .bin_pd_detect_y_offset_shift_val = 0, + .bin_pd_detect_y_end_shift_val = 16, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_ppp_reg_info = { + .irq_status_addr = 0xCC, + .irq_mask_addr = 0xD0, + .irq_clear_addr = 0xD4, + .irq_set_addr = 0xD8, + .cfg0_addr = 0xB00, + .ctrl_addr = 0xB04, + .debug_clr_cmd_addr = 0xB08, + .multi_vcdt_cfg0_addr = 0xB0c, + .cfg1_addr = 0xB10, + .sparse_pd_extractor_cfg_addr = 0xB14, + .err_recovery_cfg0_addr = 0xB18, + .err_recovery_cfg1_addr = 0xB1C, + .err_recovery_cfg2_addr = 0xB20, + .camif_frame_cfg_addr = 0xB30, + .epoch_irq_cfg_addr = 0xB34, + .epoch0_subsample_ptrn_addr = 0xB38, + .epoch1_subsample_ptrn_addr = 0xB3C, + .debug_camif_1_addr = 0xB40, + .debug_camif_0_addr = 0xB44, + .debug_halt_status_addr = 0xB48, + .debug_misr_val0_addr = 0xB4C, + .debug_misr_val1_addr = 0xB50, + .debug_misr_val2_addr = 0xB54, + .debug_misr_val3_addr = 0xB58, + .hcrop_addr = 0xB5c, + .vcrop_addr = 0xB60, + .pix_drop_pattern_addr = 0xB64, + .pix_drop_period_addr = 0xB68, + .line_drop_pattern_addr = 0xB6C, + .line_drop_period_addr = 0xB70, + .frm_drop_pattern_addr = 0xB74, + .frm_drop_period_addr = 0xB78, + .irq_subsample_pattern_addr = 0xB7C, + .irq_subsample_period_addr = 0xB80, + .format_measure_cfg0_addr = 0xB84, + .format_measure_cfg1_addr = 0xB88, + .format_measure0_addr = 0xB8C, + .format_measure1_addr = 0xB90, + .format_measure2_addr = 0xB94, + .timestamp_curr0_sof_addr = 0xB98, + .timestamp_curr1_sof_addr = 0xB9C, + .timestamp_perv0_sof_addr = 0xBA0, + .timestamp_perv1_sof_addr = 0xBA4, + .timestamp_curr0_eof_addr = 0xBA8, + .timestamp_curr1_eof_addr = 0xBAC, + .timestamp_perv0_eof_addr = 0xBB0, + .timestamp_perv1_eof_addr = 0xBB4, + .lut_bank_cfg_addr = 0xBB8, + .batch_id_cfg0_addr = 0xBBC, + .batch_id_cfg1_addr = 0xBC0, + .batch_period_cfg_addr = 0xBC4, + .batch_stream_id_cfg_addr = 0xBC8, + .epoch0_cfg_batch_id0_addr = 0xBCC, + .epoch1_cfg_batch_id0_addr = 0xBD0, + .epoch0_cfg_batch_id1_addr = 0xBD4, + .epoch1_cfg_batch_id1_addr = 0xBD8, + .epoch0_cfg_batch_id2_addr = 0xBDC, + .epoch1_cfg_batch_id2_addr = 0xBE0, + .epoch0_cfg_batch_id3_addr = 0xBE4, + .epoch1_cfg_batch_id3_addr = 0xBE8, + .epoch0_cfg_batch_id4_addr = 0xBEC, + .epoch1_cfg_batch_id4_addr = 0xBF0, + .epoch0_cfg_batch_id5_addr = 0xBF4, + .epoch1_cfg_batch_id5_addr = 0xBF8, + /* configurations */ + .resume_frame_boundary = 1, + .start_mode_shift = 2, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_master_sel_val = 3, + .start_master_sel_shift = 4, + .binning_supported = 0x1, + .bin_h_en_shift_val = 18, + .bin_en_shift_val = 18, + .early_eof_en_shift_val = 16, + .pix_store_en_shift_val = 14, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 9, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .rup_aup_mask = 0x40004, + .top_irq_mask = {0x40,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .crop_drop_enable = false, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_rdi_0_reg_info = { + .irq_status_addr = 0xEC, + .irq_mask_addr = 0xF0, + .irq_clear_addr = 0xF4, + .irq_set_addr = 0xF8, + .cfg0_addr = 0x500, + .ctrl_addr = 0x504, + .debug_clr_cmd_addr = 0x508, + .multi_vcdt_cfg0_addr = 0x50c, + .cfg1_addr = 0x510, + .err_recovery_cfg0_addr = 0x514, + .err_recovery_cfg1_addr = 0x518, + .err_recovery_cfg2_addr = 0x51C, + .debug_byte_cntr_ping_addr = 0x520, + .debug_byte_cntr_pong_addr = 0x524, + .camif_frame_cfg_addr = 0x528, + .epoch_irq_cfg_addr = 0x52C, + .epoch0_subsample_ptrn_addr = 0x530, + .epoch1_subsample_ptrn_addr = 0x534, + .debug_camif_1_addr = 0x538, + .debug_camif_0_addr = 0x53C, + .frm_drop_pattern_addr = 0x540, + .frm_drop_period_addr = 0x540, + .irq_subsample_pattern_addr = 0x548, + .irq_subsample_period_addr = 0x54C, + .hcrop_addr = 0x550, + .vcrop_addr = 0x554, + .pix_drop_pattern_addr = 0x558, + .pix_drop_period_addr = 0x55C, + .line_drop_pattern_addr = 0x560, + .line_drop_period_addr = 0x564, + .debug_halt_status_addr = 0x568, + .debug_misr_val0_addr = 0x570, + .debug_misr_val1_addr = 0x574, + .debug_misr_val2_addr = 0x578, + .debug_misr_val3_addr = 0x57C, + .format_measure_cfg0_addr = 0x580, + .format_measure_cfg1_addr = 0x584, + .format_measure0_addr = 0x588, + .format_measure1_addr = 0x58C, + .format_measure2_addr = 0x590, + .timestamp_curr0_sof_addr = 0x594, + .timestamp_curr1_sof_addr = 0x598, + .timestamp_perv0_sof_addr = 0x59C, + .timestamp_perv1_sof_addr = 0x5A0, + .timestamp_curr0_eof_addr = 0x5A4, + .timestamp_curr1_eof_addr = 0x5A8, + .timestamp_perv0_eof_addr = 0x5AC, + .timestamp_perv1_eof_addr = 0x5B0, + .batch_id_cfg0_addr = 0x5B4, + .batch_id_cfg1_addr = 0x5B8, + .batch_period_cfg_addr = 0x5BC, + .batch_stream_id_cfg_addr = 0x5C0, + .epoch0_cfg_batch_id0_addr = 0x5C4, + .epoch1_cfg_batch_id0_addr = 0x5C8, + .epoch0_cfg_batch_id1_addr = 0x5CC, + .epoch1_cfg_batch_id1_addr = 0x5D0, + .epoch0_cfg_batch_id2_addr = 0x5D4, + .epoch1_cfg_batch_id2_addr = 0x5D8, + .epoch0_cfg_batch_id3_addr = 0x5DC, + .epoch1_cfg_batch_id3_addr = 0x5E0, + .epoch0_cfg_batch_id4_addr = 0x5E4, + .epoch1_cfg_batch_id4_addr = 0x5E8, + .epoch0_cfg_batch_id5_addr = 0x5EC, + .epoch1_cfg_batch_id5_addr = 0x5F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_rdi_1_reg_info = { + .irq_status_addr = 0xFC, + .irq_mask_addr = 0x100, + .irq_clear_addr = 0x104, + .irq_set_addr = 0x108, + .cfg0_addr = 0x600, + .ctrl_addr = 0x604, + .debug_clr_cmd_addr = 0x608, + .multi_vcdt_cfg0_addr = 0x60c, + .cfg1_addr = 0x610, + .err_recovery_cfg0_addr = 0x614, + .err_recovery_cfg1_addr = 0x618, + .err_recovery_cfg2_addr = 0x61C, + .debug_byte_cntr_ping_addr = 0x620, + .debug_byte_cntr_pong_addr = 0x624, + .camif_frame_cfg_addr = 0x628, + .epoch_irq_cfg_addr = 0x62C, + .epoch0_subsample_ptrn_addr = 0x630, + .epoch1_subsample_ptrn_addr = 0x634, + .debug_camif_1_addr = 0x638, + .debug_camif_0_addr = 0x63C, + .frm_drop_pattern_addr = 0x640, + .frm_drop_period_addr = 0x644, + .irq_subsample_pattern_addr = 0x648, + .irq_subsample_period_addr = 0x64C, + .hcrop_addr = 0x650, + .vcrop_addr = 0x654, + .pix_drop_pattern_addr = 0x658, + .pix_drop_period_addr = 0x65C, + .line_drop_pattern_addr = 0x660, + .line_drop_period_addr = 0x664, + .debug_halt_status_addr = 0x66C, + .debug_misr_val0_addr = 0x670, + .debug_misr_val1_addr = 0x674, + .debug_misr_val2_addr = 0x678, + .debug_misr_val3_addr = 0x67C, + .format_measure_cfg0_addr = 0x680, + .format_measure_cfg1_addr = 0x684, + .format_measure0_addr = 0x688, + .format_measure1_addr = 0x68C, + .format_measure2_addr = 0x690, + .timestamp_curr0_sof_addr = 0x694, + .timestamp_curr1_sof_addr = 0x698, + .timestamp_perv0_sof_addr = 0x69C, + .timestamp_perv1_sof_addr = 0x6A0, + .timestamp_curr0_eof_addr = 0x6A4, + .timestamp_curr1_eof_addr = 0x6A8, + .timestamp_perv0_eof_addr = 0x6AC, + .timestamp_perv1_eof_addr = 0x6B0, + .batch_id_cfg0_addr = 0x6B4, + .batch_id_cfg1_addr = 0x6B8, + .batch_period_cfg_addr = 0x6BC, + .batch_stream_id_cfg_addr = 0x6C0, + .epoch0_cfg_batch_id0_addr = 0x6C4, + .epoch1_cfg_batch_id0_addr = 0x6C8, + .epoch0_cfg_batch_id1_addr = 0x6CC, + .epoch1_cfg_batch_id1_addr = 0x6D0, + .epoch0_cfg_batch_id2_addr = 0x6D4, + .epoch1_cfg_batch_id2_addr = 0x6D8, + .epoch0_cfg_batch_id3_addr = 0x6DC, + .epoch1_cfg_batch_id3_addr = 0x6E0, + .epoch0_cfg_batch_id4_addr = 0x6E4, + .epoch1_cfg_batch_id4_addr = 0x6E8, + .epoch0_cfg_batch_id5_addr = 0x6EC, + .epoch1_cfg_batch_id5_addr = 0x6F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_rdi_2_reg_info = { + .irq_status_addr = 0x10C, + .irq_mask_addr = 0x110, + .irq_clear_addr = 0x114, + .irq_set_addr = 0x118, + .cfg0_addr = 0x700, + .ctrl_addr = 0x704, + .debug_clr_cmd_addr = 0x708, + .multi_vcdt_cfg0_addr = 0x70c, + .cfg1_addr = 0x710, + .err_recovery_cfg0_addr = 0x714, + .err_recovery_cfg1_addr = 0x718, + .err_recovery_cfg2_addr = 0x71C, + .debug_byte_cntr_ping_addr = 0x720, + .debug_byte_cntr_pong_addr = 0x724, + .camif_frame_cfg_addr = 0x728, + .epoch_irq_cfg_addr = 0x72C, + .epoch0_subsample_ptrn_addr = 0x730, + .epoch1_subsample_ptrn_addr = 0x734, + .debug_camif_1_addr = 0x738, + .debug_camif_0_addr = 0x73C, + .frm_drop_pattern_addr = 0x740, + .frm_drop_period_addr = 0x744, + .irq_subsample_pattern_addr = 0x748, + .irq_subsample_period_addr = 0x74C, + .hcrop_addr = 0x750, + .vcrop_addr = 0x754, + .pix_drop_pattern_addr = 0x758, + .pix_drop_period_addr = 0x75C, + .line_drop_pattern_addr = 0x760, + .line_drop_period_addr = 0x764, + .debug_halt_status_addr = 0x76C, + .debug_misr_val0_addr = 0x770, + .debug_misr_val1_addr = 0x774, + .debug_misr_val2_addr = 0x778, + .debug_misr_val3_addr = 0x77C, + .format_measure_cfg0_addr = 0x780, + .format_measure_cfg1_addr = 0x784, + .format_measure0_addr = 0x788, + .format_measure1_addr = 0x78C, + .format_measure2_addr = 0x790, + .timestamp_curr0_sof_addr = 0x794, + .timestamp_curr1_sof_addr = 0x798, + .timestamp_perv0_sof_addr = 0x79C, + .timestamp_perv1_sof_addr = 0x7A0, + .timestamp_curr0_eof_addr = 0x7A4, + .timestamp_curr1_eof_addr = 0x7A8, + .timestamp_perv0_eof_addr = 0x7AC, + .timestamp_perv1_eof_addr = 0x7B0, + .batch_id_cfg0_addr = 0x7B4, + .batch_id_cfg1_addr = 0x7B8, + .batch_period_cfg_addr = 0x7BC, + .batch_stream_id_cfg_addr = 0x7C0, + .epoch0_cfg_batch_id0_addr = 0x7C4, + .epoch1_cfg_batch_id0_addr = 0x7C8, + .epoch0_cfg_batch_id1_addr = 0x7CC, + .epoch1_cfg_batch_id1_addr = 0x7D0, + .epoch0_cfg_batch_id2_addr = 0x7D4, + .epoch1_cfg_batch_id2_addr = 0x7D8, + .epoch0_cfg_batch_id3_addr = 0x7DC, + .epoch1_cfg_batch_id3_addr = 0x7E0, + .epoch0_cfg_batch_id4_addr = 0x7E4, + .epoch1_cfg_batch_id4_addr = 0x7E8, + .epoch0_cfg_batch_id5_addr = 0x7EC, + .epoch1_cfg_batch_id5_addr = 0x7F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_rdi_3_reg_info = { + .irq_status_addr = 0x11C, + .irq_mask_addr = 0x120, + .irq_clear_addr = 0x124, + .irq_set_addr = 0x128, + .cfg0_addr = 0x800, + .ctrl_addr = 0x804, + .debug_clr_cmd_addr = 0x808, + .multi_vcdt_cfg0_addr = 0x80c, + .cfg1_addr = 0x810, + .err_recovery_cfg0_addr = 0x814, + .err_recovery_cfg1_addr = 0x818, + .err_recovery_cfg2_addr = 0x81C, + .debug_byte_cntr_ping_addr = 0x820, + .debug_byte_cntr_pong_addr = 0x824, + .camif_frame_cfg_addr = 0x828, + .epoch_irq_cfg_addr = 0x82C, + .epoch0_subsample_ptrn_addr = 0x830, + .epoch1_subsample_ptrn_addr = 0x834, + .debug_camif_1_addr = 0x838, + .debug_camif_0_addr = 0x83C, + .frm_drop_pattern_addr = 0x840, + .frm_drop_period_addr = 0x840, + .irq_subsample_pattern_addr = 0x848, + .irq_subsample_period_addr = 0x84C, + .hcrop_addr = 0x850, + .vcrop_addr = 0x854, + .pix_drop_pattern_addr = 0x858, + .pix_drop_period_addr = 0x85C, + .line_drop_pattern_addr = 0x860, + .line_drop_period_addr = 0x864, + .debug_halt_status_addr = 0x868, + .debug_misr_val0_addr = 0x870, + .debug_misr_val1_addr = 0x874, + .debug_misr_val2_addr = 0x878, + .debug_misr_val3_addr = 0x87C, + .format_measure_cfg0_addr = 0x880, + .format_measure_cfg1_addr = 0x884, + .format_measure0_addr = 0x888, + .format_measure1_addr = 0x88C, + .format_measure2_addr = 0x890, + .timestamp_curr0_sof_addr = 0x894, + .timestamp_curr1_sof_addr = 0x898, + .timestamp_perv0_sof_addr = 0x89C, + .timestamp_perv1_sof_addr = 0x8A0, + .timestamp_curr0_eof_addr = 0x8A4, + .timestamp_curr1_eof_addr = 0x8A8, + .timestamp_perv0_eof_addr = 0x8AC, + .timestamp_perv1_eof_addr = 0x8B0, + .batch_id_cfg0_addr = 0x8B4, + .batch_id_cfg1_addr = 0x8B8, + .batch_period_cfg_addr = 0x8BC, + .batch_stream_id_cfg_addr = 0x8C0, + .epoch0_cfg_batch_id0_addr = 0x8C4, + .epoch1_cfg_batch_id0_addr = 0x8C8, + .epoch0_cfg_batch_id1_addr = 0x8CC, + .epoch1_cfg_batch_id1_addr = 0x8D0, + .epoch0_cfg_batch_id2_addr = 0x8D4, + .epoch1_cfg_batch_id2_addr = 0x8D8, + .epoch0_cfg_batch_id3_addr = 0x8DC, + .epoch1_cfg_batch_id3_addr = 0x8E0, + .epoch0_cfg_batch_id4_addr = 0x8E4, + .epoch1_cfg_batch_id4_addr = 0x8E8, + .epoch0_cfg_batch_id5_addr = 0x8EC, + .epoch1_cfg_batch_id5_addr = 0x8F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_680_rdi_4_reg_info = { + .irq_status_addr = 0x12C, + .irq_mask_addr = 0x130, + .irq_clear_addr = 0x134, + .irq_set_addr = 0x138, + .cfg0_addr = 0x900, + .ctrl_addr = 0x904, + .debug_clr_cmd_addr = 0x908, + .multi_vcdt_cfg0_addr = 0x90c, + .cfg1_addr = 0x910, + .err_recovery_cfg0_addr = 0x914, + .err_recovery_cfg1_addr = 0x918, + .err_recovery_cfg2_addr = 0x91C, + .debug_byte_cntr_ping_addr = 0x920, + .debug_byte_cntr_pong_addr = 0x924, + .camif_frame_cfg_addr = 0x928, + .epoch_irq_cfg_addr = 0x92C, + .epoch0_subsample_ptrn_addr = 0x930, + .epoch1_subsample_ptrn_addr = 0x934, + .debug_camif_1_addr = 0x938, + .debug_camif_0_addr = 0x93C, + .frm_drop_pattern_addr = 0x940, + .frm_drop_period_addr = 0x940, + .irq_subsample_pattern_addr = 0x948, + .irq_subsample_period_addr = 0x94C, + .hcrop_addr = 0x950, + .vcrop_addr = 0x954, + .pix_drop_pattern_addr = 0x958, + .pix_drop_period_addr = 0x95C, + .line_drop_pattern_addr = 0x960, + .line_drop_period_addr = 0x964, + .debug_halt_status_addr = 0x968, + .debug_misr_val0_addr = 0x970, + .debug_misr_val1_addr = 0x974, + .debug_misr_val2_addr = 0x978, + .debug_misr_val3_addr = 0x97C, + .format_measure_cfg0_addr = 0x980, + .format_measure_cfg1_addr = 0x984, + .format_measure0_addr = 0x988, + .format_measure1_addr = 0x98C, + .format_measure2_addr = 0x990, + .timestamp_curr0_sof_addr = 0x994, + .timestamp_curr1_sof_addr = 0x998, + .timestamp_perv0_sof_addr = 0x99C, + .timestamp_perv1_sof_addr = 0x9A0, + .timestamp_curr0_eof_addr = 0x9A4, + .timestamp_curr1_eof_addr = 0x9A8, + .timestamp_perv0_eof_addr = 0x9AC, + .timestamp_perv1_eof_addr = 0x9B0, + .batch_id_cfg0_addr = 0x9B4, + .batch_id_cfg1_addr = 0x9B8, + .batch_period_cfg_addr = 0x9BC, + .batch_stream_id_cfg_addr = 0x9C0, + .epoch0_cfg_batch_id0_addr = 0x9C4, + .epoch1_cfg_batch_id0_addr = 0x9C8, + .epoch0_cfg_batch_id1_addr = 0x9CC, + .epoch1_cfg_batch_id1_addr = 0x9D0, + .epoch0_cfg_batch_id2_addr = 0x9D4, + .epoch1_cfg_batch_id2_addr = 0x9D8, + .epoch0_cfg_batch_id3_addr = 0x9DC, + .epoch1_cfg_batch_id3_addr = 0x9E0, + .epoch0_cfg_batch_id4_addr = 0x9E4, + .epoch1_cfg_batch_id4_addr = 0x9E8, + .epoch0_cfg_batch_id5_addr = 0x9EC, + .epoch1_cfg_batch_id5_addr = 0x9F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .early_eof_en_shift_val = 14, + .plain_fmt_shift_val = 12, + .plain_alignment_shift_val = 11, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .timestamp_en_shift_val = 4, + .format_measure_en_shift_val = 3, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x186004, + .non_fatal_err_mask = 0x10000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x1000100, + .top_irq_mask = {0x1000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_680_csi2_reg_info = { + .irq_status_addr = {0x9C,}, + .irq_mask_addr = {0xA0,}, + .irq_clear_addr = {0xA4,}, + .irq_set_addr = {0xA8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .capture_ctrl_addr = 0x208, + .rst_strobes_addr = 0x20C, + .cap_unmap_long_pkt_hdr_0_addr = 0x210, + .cap_unmap_long_pkt_hdr_1_addr = 0x214, + .captured_short_pkt_0_addr = 0x218, + .captured_short_pkt_1_addr = 0x21c, + .captured_long_pkt_0_addr = 0x220, + .captured_long_pkt_1_addr = 0x224, + .captured_long_pkt_ftr_addr = 0x228, + .captured_cphy_pkt_hdr_addr = 0x22c, + .lane0_misr_addr = 0x230, + .lane1_misr_addr = 0x234, + .lane2_misr_addr = 0x238, + .lane3_misr_addr = 0x23c, + .total_pkts_rcvd_addr = 0x240, + .stats_ecc_addr = 0x244, + .total_crc_err_addr = 0x248, + .de_scramble_type3_cfg0_addr = 0x24C, + .de_scramble_type3_cfg1_addr = 0x250, + .de_scramble_type2_cfg0_addr = 0x254, + .de_scramble_type2_cfg1_addr = 0x258, + .de_scramble_type1_cfg0_addr = 0x25C, + .de_scramble_type1_cfg1_addr = 0x260, + .de_scramble_type0_cfg0_addr = 0x264, + .de_scramble_type0_cfg1_addr = 0x268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x19FA800,}, + .part_fatal_err_mask = {0x0001000,}, + .non_fatal_err_mask = {0x0200000,}, + .top_irq_mask = {0x4,}, +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_680_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .global_cmd_addr = 0x8, + .reset_cfg_addr = 0xc, + .reset_cmd_addr = 0x10, + .irq_cmd_addr = 0x14, + .rup_aup_cmd_addr = 0x18, + .offline_cmd_addr = 0x1C, + .shdr_master_slave_cfg_addr = 0x20, + .top_irq_status_addr = {0x7C,}, + .top_irq_mask_addr = {0x80,}, + .top_irq_clear_addr = {0x84,}, + .top_irq_set_addr = {0x88,}, + .buf_done_irq_status_addr = 0x8C, + .buf_done_irq_mask_addr = 0x90, + .buf_done_irq_clear_addr = 0x94, + .buf_done_irq_set_addr = 0x98, + .test_bus_ctrl = 0x1E8, + .test_bus_debug = 0x1EC, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 5, + .num_pix = 1, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_supported = false, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 0, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .shdr_slave_rdi2_shift = 22, + .shdr_slave_rdi1_shift = 21, + .shdr_master_rdi0_shift = 5, + .shdr_master_slave_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0x1c, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .top_err_irq_mask = { + 0x0, + }, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_reset_irq_mask = { + 0x1, + }, + .top_buf_done_irq_mask = 0x2000, + .phy_sel_base_idx = 1, + .camif_irq_support = true, +}; + +static struct cam_ife_csid_ver2_top_reg_info + cam_ife_csid_680_top_reg_info = { + .io_path_cfg0_addr = { + 0x0, + 0x4, + 0x8, + }, + .dual_csid_cfg0_addr = { + 0xC, + 0x10, + 0x14, + }, + .input_core_type_shift_val = 0, + .sfe_offline_en_shift_val = 12, + .out_ife_en_shift_val = 8, + .dual_sync_sel_shift_val = 8, + .dual_en_shift_val = 0, + .master_slave_sel_shift_val = 1, + .master_sel_val = 0, + .slave_sel_val = 1, + .io_path_cfg_rst_val = 1, + .dual_cfg_rst_val = 0, + .sfe_pipeline_bypassed = true, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_680_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_680_top_debug_mask = { + .evt_bitmap = { 0ULL,}, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_680_reg_info = { + .top_irq_reg_info = cam_ife_csid_680_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_680_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_680_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_680_cmn_reg_info, + .csi2_reg = &cam_ife_csid_680_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_680_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_680_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_680_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_680_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_680_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_680_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_680_rdi_4_reg_info, + .top_reg = &cam_ife_csid_680_top_reg_info, + .input_core_sel = { + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x8, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + 0x9, + -1, + }, + }, + .need_top_cfg = 0x1, + .csid_cust_node_map = {0x1, 0x0, 0x2}, + .rx_irq_desc = &cam_ife_csid_680_rx_irq_desc, + .path_irq_desc = cam_ife_csid_680_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_680_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_680_num_rx_irq_desc, + .top_debug_mask = &cam_ife_csid_680_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_680_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; +#endif /*_CAM_IFE_CSID_680_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680_110.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680_110.h new file mode 100644 index 0000000000..6b57a7a006 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid680_110.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_680_110_H_ +#define _CAM_IFE_CSID_680_110_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_680_110_reg_info = { + .top_irq_reg_info = cam_ife_csid_680_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_680_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_680_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_680_cmn_reg_info, + .csi2_reg = &cam_ife_csid_680_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_680_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_680_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_680_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_680_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_680_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_680_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_680_rdi_4_reg_info, + .top_reg = &cam_ife_csid_680_top_reg_info, + .input_core_sel = { + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x8, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + 0x9, + -1, + }, + }, + .need_top_cfg = 0x1, + .csid_cust_node_map = {0x1, 0x0, 0x2}, + .rx_irq_desc = &cam_ife_csid_680_rx_irq_desc, + .path_irq_desc = cam_ife_csid_680_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_680_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_680_num_rx_irq_desc, + .top_debug_mask = &cam_ife_csid_680_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_680_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; +#endif /*_CAM_IFE_CSID_680_110_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid780.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid780.h new file mode 100644 index 0000000000..71e03c4ad4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid780.h @@ -0,0 +1,1545 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_780_H_ +#define _CAM_IFE_CSID_780_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +#define CAM_CSID_VERSION_V780 0x70080000 + +static const struct cam_ife_csid_irq_desc cam_ife_csid_780_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + }, +}; + +static const uint32_t cam_ife_csid_780_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_780_rx_irq_desc[0]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_780_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .desc = "ILLEGAL_PROGRAMMING", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + { + .bitmask = BIT(1), + .desc = "EROOR_MSG_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(2), + .desc = "ERROR_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(3), + .desc = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .desc = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .desc = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .desc = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .desc = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .desc = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .desc = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .desc = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .desc = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .desc = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_LINE_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .desc = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .desc = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .desc = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .desc = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .desc = "OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .desc = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .desc = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .desc = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .desc = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .desc = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .desc = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .desc = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .desc = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .desc = "CCIF_VIOLATION: Bad frame timings", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_780_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = "Fatal Error during dynamically switching between 2 sensors", + }, + { + .bitmask = BIT(18), + .err_name = "ERROR_NO_VOTE_DN", + .desc = "vote_up is asserted before IDLE is encountered in a frame", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "vote_up is asserted at the same time as an SOF", + .err_handler = cam_ife_csid_hw_ver2_drv_err_handler, + }, + { + .bitmask = BIT(20), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = "Two or more RDIs programmed to access the shared line buffer", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + }, +}; + +static const uint32_t cam_ife_csid_780_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_780_top_irq_desc[0]), +}; + +static struct cam_irq_register_set cam_ife_csid_780_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + /* RDI4 */ + { + .mask_reg_offset = 0x00000130, + .clear_reg_offset = 0x00000134, + .status_reg_offset = 0x0000012C, + }, + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + /* PPP */ + { + .mask_reg_offset = 0x000000D0, + .clear_reg_offset = 0x000000D4, + .status_reg_offset = 0x000000CC, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_780_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_780_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_780_path_irq_reg_info[7] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_4], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_PPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_register_set cam_ife_csid_780_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_780_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_780_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_ipp_reg_info = { + .irq_status_addr = 0xAC, + .irq_mask_addr = 0xB0, + .irq_clear_addr = 0xB4, + .irq_set_addr = 0xB8, + .cfg0_addr = 0x300, + .ctrl_addr = 0x304, + .debug_clr_cmd_addr = 0x308, + .multi_vcdt_cfg0_addr = 0x30C, + .cfg1_addr = 0x310, + .err_recovery_cfg0_addr = 0x318, + .err_recovery_cfg1_addr = 0x31C, + .err_recovery_cfg2_addr = 0x320, + .bin_pd_detect_cfg0_addr = 0x324, + .bin_pd_detect_cfg1_addr = 0x328, + .bin_pd_detect_cfg2_addr = 0x32C, + .camif_frame_cfg_addr = 0x330, + .epoch_irq_cfg_addr = 0x334, + .epoch0_subsample_ptrn_addr = 0x338, + .epoch1_subsample_ptrn_addr = 0x33C, + .debug_camif_1_addr = 0x340, + .debug_camif_0_addr = 0x344, + .debug_halt_status_addr = 0x348, + .debug_misr_val0_addr = 0x34C, + .debug_misr_val1_addr = 0x350, + .debug_misr_val2_addr = 0x354, + .debug_misr_val3_addr = 0x358, + .hcrop_addr = 0x35c, + .vcrop_addr = 0x360, + .pix_drop_pattern_addr = 0x364, + .pix_drop_period_addr = 0x368, + .line_drop_pattern_addr = 0x36C, + .line_drop_period_addr = 0x370, + .frm_drop_pattern_addr = 0x374, + .frm_drop_period_addr = 0x378, + .irq_subsample_pattern_addr = 0x37C, + .irq_subsample_period_addr = 0x380, + .format_measure_cfg0_addr = 0x384, + .format_measure_cfg1_addr = 0x388, + .format_measure0_addr = 0x38C, + .format_measure1_addr = 0x390, + .format_measure2_addr = 0x394, + .timestamp_curr0_sof_addr = 0x398, + .timestamp_curr1_sof_addr = 0x39C, + .timestamp_perv0_sof_addr = 0x3A0, + .timestamp_perv1_sof_addr = 0x3A4, + .timestamp_curr0_eof_addr = 0x3A8, + .timestamp_curr1_eof_addr = 0x3AC, + .timestamp_perv0_eof_addr = 0x3B0, + .timestamp_perv1_eof_addr = 0x3B4, + .lut_bank_cfg_addr = 0x3B8, + .batch_id_cfg0_addr = 0x3BC, + .batch_id_cfg1_addr = 0x3C0, + .batch_period_cfg_addr = 0x3C4, + .batch_stream_id_cfg_addr = 0x3C8, + .epoch0_cfg_batch_id0_addr = 0x3CC, + .epoch1_cfg_batch_id0_addr = 0x3D0, + .epoch0_cfg_batch_id1_addr = 0x3D4, + .epoch1_cfg_batch_id1_addr = 0x3D8, + .epoch0_cfg_batch_id2_addr = 0x3DC, + .epoch1_cfg_batch_id2_addr = 0x3E0, + .epoch0_cfg_batch_id3_addr = 0x3E4, + .epoch1_cfg_batch_id3_addr = 0x3E8, + .epoch0_cfg_batch_id4_addr = 0x3EC, + .epoch1_cfg_batch_id4_addr = 0x3F0, + .epoch0_cfg_batch_id5_addr = 0x3F4, + .epoch1_cfg_batch_id5_addr = 0x3F8, + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .bin_h_en_shift_val = 20, + .bin_v_en_shift_val = 21, + .bin_en_shift_val = 18, + .bin_qcfa_en_shift_val = 19, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .bin_pd_en_shift_val = 0, + .bin_pd_blk_w_shift_val = 8, + .bin_pd_blk_h_shift_val = 24, + .bin_pd_detect_x_offset_shift_val = 0, + .bin_pd_detect_x_end_shift_val = 16, + .bin_pd_detect_y_offset_shift_val = 0, + .bin_pd_detect_y_end_shift_val = 16, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000006, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_ppp_reg_info = { + .irq_status_addr = 0xCC, + .irq_mask_addr = 0xD0, + .irq_clear_addr = 0xD4, + .irq_set_addr = 0xD8, + .cfg0_addr = 0xB00, + .ctrl_addr = 0xB04, + .debug_clr_cmd_addr = 0xB08, + .multi_vcdt_cfg0_addr = 0xB0C, + .cfg1_addr = 0xB10, + .sparse_pd_extractor_cfg_addr = 0xB14, + .err_recovery_cfg0_addr = 0xB18, + .err_recovery_cfg1_addr = 0xB1C, + .err_recovery_cfg2_addr = 0xB20, + .camif_frame_cfg_addr = 0xB30, + .epoch_irq_cfg_addr = 0xB34, + .epoch0_subsample_ptrn_addr = 0xB38, + .epoch1_subsample_ptrn_addr = 0xB3C, + .debug_camif_1_addr = 0xB40, + .debug_camif_0_addr = 0xB44, + .debug_halt_status_addr = 0xB48, + .debug_misr_val0_addr = 0xB4C, + .debug_misr_val1_addr = 0xB50, + .debug_misr_val2_addr = 0xB54, + .debug_misr_val3_addr = 0xB58, + .hcrop_addr = 0xB5c, + .vcrop_addr = 0xB60, + .pix_drop_pattern_addr = 0xB64, + .pix_drop_period_addr = 0xB68, + .line_drop_pattern_addr = 0xB6C, + .line_drop_period_addr = 0xB70, + .frm_drop_pattern_addr = 0xB74, + .frm_drop_period_addr = 0xB78, + .irq_subsample_pattern_addr = 0xB7C, + .irq_subsample_period_addr = 0xB80, + .format_measure_cfg0_addr = 0xB84, + .format_measure_cfg1_addr = 0xB88, + .format_measure0_addr = 0xB8C, + .format_measure1_addr = 0xB90, + .format_measure2_addr = 0xB94, + .timestamp_curr0_sof_addr = 0xB98, + .timestamp_curr1_sof_addr = 0xB9C, + .timestamp_perv0_sof_addr = 0xBA0, + .timestamp_perv1_sof_addr = 0xBA4, + .timestamp_curr0_eof_addr = 0xBA8, + .timestamp_curr1_eof_addr = 0xBAC, + .timestamp_perv0_eof_addr = 0xBB0, + .timestamp_perv1_eof_addr = 0xBB4, + .lut_bank_cfg_addr = 0xBB8, + .batch_id_cfg0_addr = 0xBBC, + .batch_id_cfg1_addr = 0xBC0, + .batch_period_cfg_addr = 0xBC4, + .batch_stream_id_cfg_addr = 0xBC8, + .epoch0_cfg_batch_id0_addr = 0xBCC, + .epoch1_cfg_batch_id0_addr = 0xBD0, + .epoch0_cfg_batch_id1_addr = 0xBD4, + .epoch1_cfg_batch_id1_addr = 0xBD8, + .epoch0_cfg_batch_id2_addr = 0xBDC, + .epoch1_cfg_batch_id2_addr = 0xBE0, + .epoch0_cfg_batch_id3_addr = 0xBE4, + .epoch1_cfg_batch_id3_addr = 0xBE8, + .epoch0_cfg_batch_id4_addr = 0xBEC, + .epoch1_cfg_batch_id4_addr = 0xBF0, + .epoch0_cfg_batch_id5_addr = 0xBF4, + .epoch1_cfg_batch_id5_addr = 0xBF8, + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_shift = 2, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_master_sel_val = 3, + .start_master_sel_shift = 4, + .binning_supported = 0x1, + .bin_h_en_shift_val = 18, + .bin_en_shift_val = 18, + .early_eof_en_shift_val = 16, + .pix_store_en_shift_val = 14, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000006, + .rup_aup_mask = 0x40004, + .top_irq_mask = {0x40,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, + .crop_drop_enable = false, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_rdi_0_reg_info = { + .irq_status_addr = 0xEC, + .irq_mask_addr = 0xF0, + .irq_clear_addr = 0xF4, + .irq_set_addr = 0xF8, + .cfg0_addr = 0x500, + .ctrl_addr = 0x504, + .debug_clr_cmd_addr = 0x508, + .multi_vcdt_cfg0_addr = 0x50C, + .cfg1_addr = 0x510, + .err_recovery_cfg0_addr = 0x514, + .err_recovery_cfg1_addr = 0x518, + .err_recovery_cfg2_addr = 0x51C, + .debug_byte_cntr_ping_addr = 0x520, + .debug_byte_cntr_pong_addr = 0x524, + .camif_frame_cfg_addr = 0x528, + .epoch_irq_cfg_addr = 0x52C, + .epoch0_subsample_ptrn_addr = 0x530, + .epoch1_subsample_ptrn_addr = 0x534, + .debug_camif_1_addr = 0x538, + .debug_camif_0_addr = 0x53C, + .frm_drop_pattern_addr = 0x540, + .frm_drop_period_addr = 0x544, + .irq_subsample_pattern_addr = 0x548, + .irq_subsample_period_addr = 0x54C, + .hcrop_addr = 0x550, + .vcrop_addr = 0x554, + .pix_drop_pattern_addr = 0x558, + .pix_drop_period_addr = 0x55C, + .line_drop_pattern_addr = 0x560, + .line_drop_period_addr = 0x564, + .debug_halt_status_addr = 0x568, + .debug_misr_val0_addr = 0x570, + .debug_misr_val1_addr = 0x574, + .debug_misr_val2_addr = 0x578, + .debug_misr_val3_addr = 0x57C, + .format_measure_cfg0_addr = 0x580, + .format_measure_cfg1_addr = 0x584, + .format_measure0_addr = 0x588, + .format_measure1_addr = 0x58C, + .format_measure2_addr = 0x590, + .timestamp_curr0_sof_addr = 0x594, + .timestamp_curr1_sof_addr = 0x598, + .timestamp_perv0_sof_addr = 0x59C, + .timestamp_perv1_sof_addr = 0x5A0, + .timestamp_curr0_eof_addr = 0x5A4, + .timestamp_curr1_eof_addr = 0x5A8, + .timestamp_perv0_eof_addr = 0x5AC, + .timestamp_perv1_eof_addr = 0x5B0, + .batch_id_cfg0_addr = 0x5B4, + .batch_id_cfg1_addr = 0x5B8, + .batch_period_cfg_addr = 0x5BC, + .batch_stream_id_cfg_addr = 0x5C0, + .epoch0_cfg_batch_id0_addr = 0x5C4, + .epoch1_cfg_batch_id0_addr = 0x5C8, + .epoch0_cfg_batch_id1_addr = 0x5CC, + .epoch1_cfg_batch_id1_addr = 0x5D0, + .epoch0_cfg_batch_id2_addr = 0x5D4, + .epoch1_cfg_batch_id2_addr = 0x5D8, + .epoch0_cfg_batch_id3_addr = 0x5DC, + .epoch1_cfg_batch_id3_addr = 0x5E0, + .epoch0_cfg_batch_id4_addr = 0x5E4, + .epoch1_cfg_batch_id4_addr = 0x5E8, + .epoch0_cfg_batch_id5_addr = 0x5EC, + .epoch1_cfg_batch_id5_addr = 0x5F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, + .default_out_format = CAM_FORMAT_PLAIN16_16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_rdi_1_reg_info = { + .irq_status_addr = 0xFC, + .irq_mask_addr = 0x100, + .irq_clear_addr = 0x104, + .irq_set_addr = 0x108, + .cfg0_addr = 0x600, + .ctrl_addr = 0x604, + .debug_clr_cmd_addr = 0x608, + .multi_vcdt_cfg0_addr = 0x60C, + .cfg1_addr = 0x610, + .err_recovery_cfg0_addr = 0x614, + .err_recovery_cfg1_addr = 0x618, + .err_recovery_cfg2_addr = 0x61C, + .debug_byte_cntr_ping_addr = 0x620, + .debug_byte_cntr_pong_addr = 0x624, + .camif_frame_cfg_addr = 0x628, + .epoch_irq_cfg_addr = 0x62C, + .epoch0_subsample_ptrn_addr = 0x630, + .epoch1_subsample_ptrn_addr = 0x634, + .debug_camif_1_addr = 0x638, + .debug_camif_0_addr = 0x63C, + .frm_drop_pattern_addr = 0x640, + .frm_drop_period_addr = 0x644, + .irq_subsample_pattern_addr = 0x648, + .irq_subsample_period_addr = 0x64C, + .hcrop_addr = 0x650, + .vcrop_addr = 0x654, + .pix_drop_pattern_addr = 0x658, + .pix_drop_period_addr = 0x65C, + .line_drop_pattern_addr = 0x660, + .line_drop_period_addr = 0x664, + .debug_halt_status_addr = 0x66C, + .debug_misr_val0_addr = 0x670, + .debug_misr_val1_addr = 0x674, + .debug_misr_val2_addr = 0x678, + .debug_misr_val3_addr = 0x67C, + .format_measure_cfg0_addr = 0x680, + .format_measure_cfg1_addr = 0x684, + .format_measure0_addr = 0x688, + .format_measure1_addr = 0x68C, + .format_measure2_addr = 0x690, + .timestamp_curr0_sof_addr = 0x694, + .timestamp_curr1_sof_addr = 0x698, + .timestamp_perv0_sof_addr = 0x69C, + .timestamp_perv1_sof_addr = 0x6A0, + .timestamp_curr0_eof_addr = 0x6A4, + .timestamp_curr1_eof_addr = 0x6A8, + .timestamp_perv0_eof_addr = 0x6AC, + .timestamp_perv1_eof_addr = 0x6B0, + .batch_id_cfg0_addr = 0x6B4, + .batch_id_cfg1_addr = 0x6B8, + .batch_period_cfg_addr = 0x6BC, + .batch_stream_id_cfg_addr = 0x6C0, + .epoch0_cfg_batch_id0_addr = 0x6C4, + .epoch1_cfg_batch_id0_addr = 0x6C8, + .epoch0_cfg_batch_id1_addr = 0x6CC, + .epoch1_cfg_batch_id1_addr = 0x6D0, + .epoch0_cfg_batch_id2_addr = 0x6D4, + .epoch1_cfg_batch_id2_addr = 0x6D8, + .epoch0_cfg_batch_id3_addr = 0x6DC, + .epoch1_cfg_batch_id3_addr = 0x6E0, + .epoch0_cfg_batch_id4_addr = 0x6E4, + .epoch1_cfg_batch_id4_addr = 0x6E8, + .epoch0_cfg_batch_id5_addr = 0x6EC, + .epoch1_cfg_batch_id5_addr = 0x6F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_rdi_2_reg_info = { + .irq_status_addr = 0x10C, + .irq_mask_addr = 0x110, + .irq_clear_addr = 0x114, + .irq_set_addr = 0x118, + .cfg0_addr = 0x700, + .ctrl_addr = 0x704, + .debug_clr_cmd_addr = 0x708, + .multi_vcdt_cfg0_addr = 0x70C, + .cfg1_addr = 0x710, + .err_recovery_cfg0_addr = 0x714, + .err_recovery_cfg1_addr = 0x718, + .err_recovery_cfg2_addr = 0x71C, + .debug_byte_cntr_ping_addr = 0x720, + .debug_byte_cntr_pong_addr = 0x724, + .camif_frame_cfg_addr = 0x728, + .epoch_irq_cfg_addr = 0x72C, + .epoch0_subsample_ptrn_addr = 0x730, + .epoch1_subsample_ptrn_addr = 0x734, + .debug_camif_1_addr = 0x738, + .debug_camif_0_addr = 0x73C, + .frm_drop_pattern_addr = 0x740, + .frm_drop_period_addr = 0x744, + .irq_subsample_pattern_addr = 0x748, + .irq_subsample_period_addr = 0x74C, + .hcrop_addr = 0x750, + .vcrop_addr = 0x754, + .pix_drop_pattern_addr = 0x758, + .pix_drop_period_addr = 0x75C, + .line_drop_pattern_addr = 0x760, + .line_drop_period_addr = 0x764, + .debug_halt_status_addr = 0x76C, + .debug_misr_val0_addr = 0x770, + .debug_misr_val1_addr = 0x774, + .debug_misr_val2_addr = 0x778, + .debug_misr_val3_addr = 0x77C, + .format_measure_cfg0_addr = 0x780, + .format_measure_cfg1_addr = 0x784, + .format_measure0_addr = 0x788, + .format_measure1_addr = 0x78C, + .format_measure2_addr = 0x790, + .timestamp_curr0_sof_addr = 0x794, + .timestamp_curr1_sof_addr = 0x798, + .timestamp_perv0_sof_addr = 0x79C, + .timestamp_perv1_sof_addr = 0x7A0, + .timestamp_curr0_eof_addr = 0x7A4, + .timestamp_curr1_eof_addr = 0x7A8, + .timestamp_perv0_eof_addr = 0x7AC, + .timestamp_perv1_eof_addr = 0x7B0, + .batch_id_cfg0_addr = 0x7B4, + .batch_id_cfg1_addr = 0x7B8, + .batch_period_cfg_addr = 0x7BC, + .batch_stream_id_cfg_addr = 0x7C0, + .epoch0_cfg_batch_id0_addr = 0x7C4, + .epoch1_cfg_batch_id0_addr = 0x7C8, + .epoch0_cfg_batch_id1_addr = 0x7CC, + .epoch1_cfg_batch_id1_addr = 0x7D0, + .epoch0_cfg_batch_id2_addr = 0x7D4, + .epoch1_cfg_batch_id2_addr = 0x7D8, + .epoch0_cfg_batch_id3_addr = 0x7DC, + .epoch1_cfg_batch_id3_addr = 0x7E0, + .epoch0_cfg_batch_id4_addr = 0x7E4, + .epoch1_cfg_batch_id4_addr = 0x7E8, + .epoch0_cfg_batch_id5_addr = 0x7EC, + .epoch1_cfg_batch_id5_addr = 0x7F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_rdi_3_reg_info = { + .irq_status_addr = 0x11C, + .irq_mask_addr = 0x120, + .irq_clear_addr = 0x124, + .irq_set_addr = 0x128, + .cfg0_addr = 0x800, + .ctrl_addr = 0x804, + .debug_clr_cmd_addr = 0x808, + .multi_vcdt_cfg0_addr = 0x80C, + .cfg1_addr = 0x810, + .err_recovery_cfg0_addr = 0x814, + .err_recovery_cfg1_addr = 0x818, + .err_recovery_cfg2_addr = 0x81C, + .debug_byte_cntr_ping_addr = 0x820, + .debug_byte_cntr_pong_addr = 0x824, + .camif_frame_cfg_addr = 0x828, + .epoch_irq_cfg_addr = 0x82C, + .epoch0_subsample_ptrn_addr = 0x830, + .epoch1_subsample_ptrn_addr = 0x834, + .debug_camif_1_addr = 0x838, + .debug_camif_0_addr = 0x83C, + .frm_drop_pattern_addr = 0x840, + .frm_drop_period_addr = 0x840, + .irq_subsample_pattern_addr = 0x848, + .irq_subsample_period_addr = 0x84C, + .hcrop_addr = 0x850, + .vcrop_addr = 0x854, + .pix_drop_pattern_addr = 0x858, + .pix_drop_period_addr = 0x85C, + .line_drop_pattern_addr = 0x860, + .line_drop_period_addr = 0x864, + .debug_halt_status_addr = 0x868, + .debug_misr_val0_addr = 0x870, + .debug_misr_val1_addr = 0x874, + .debug_misr_val2_addr = 0x878, + .debug_misr_val3_addr = 0x87C, + .format_measure_cfg0_addr = 0x880, + .format_measure_cfg1_addr = 0x884, + .format_measure0_addr = 0x888, + .format_measure1_addr = 0x88C, + .format_measure2_addr = 0x890, + .timestamp_curr0_sof_addr = 0x894, + .timestamp_curr1_sof_addr = 0x898, + .timestamp_perv0_sof_addr = 0x89C, + .timestamp_perv1_sof_addr = 0x8A0, + .timestamp_curr0_eof_addr = 0x8A4, + .timestamp_curr1_eof_addr = 0x8A8, + .timestamp_perv0_eof_addr = 0x8AC, + .timestamp_perv1_eof_addr = 0x8B0, + .batch_id_cfg0_addr = 0x8B4, + .batch_id_cfg1_addr = 0x8B8, + .batch_period_cfg_addr = 0x8BC, + .batch_stream_id_cfg_addr = 0x8C0, + .epoch0_cfg_batch_id0_addr = 0x8C4, + .epoch1_cfg_batch_id0_addr = 0x8C8, + .epoch0_cfg_batch_id1_addr = 0x8CC, + .epoch1_cfg_batch_id1_addr = 0x8D0, + .epoch0_cfg_batch_id2_addr = 0x8D4, + .epoch1_cfg_batch_id2_addr = 0x8D8, + .epoch0_cfg_batch_id3_addr = 0x8DC, + .epoch1_cfg_batch_id3_addr = 0x8E0, + .epoch0_cfg_batch_id4_addr = 0x8E4, + .epoch1_cfg_batch_id4_addr = 0x8E8, + .epoch0_cfg_batch_id5_addr = 0x8EC, + .epoch1_cfg_batch_id5_addr = 0x8F0, + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_en = 1, + .capabilities = 0, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_780_rdi_4_reg_info = { + .irq_status_addr = 0x12C, + .irq_mask_addr = 0x130, + .irq_clear_addr = 0x134, + .irq_set_addr = 0x138, + .cfg0_addr = 0x900, + .ctrl_addr = 0x904, + .debug_clr_cmd_addr = 0x908, + .multi_vcdt_cfg0_addr = 0x90C, + .cfg1_addr = 0x910, + .err_recovery_cfg0_addr = 0x914, + .err_recovery_cfg1_addr = 0x918, + .err_recovery_cfg2_addr = 0x91C, + .debug_byte_cntr_ping_addr = 0x920, + .debug_byte_cntr_pong_addr = 0x924, + .camif_frame_cfg_addr = 0x928, + .epoch_irq_cfg_addr = 0x92C, + .epoch0_subsample_ptrn_addr = 0x930, + .epoch1_subsample_ptrn_addr = 0x934, + .debug_camif_1_addr = 0x938, + .debug_camif_0_addr = 0x93C, + .frm_drop_pattern_addr = 0x940, + .frm_drop_period_addr = 0x944, + .irq_subsample_pattern_addr = 0x948, + .irq_subsample_period_addr = 0x94C, + .hcrop_addr = 0x950, + .vcrop_addr = 0x954, + .pix_drop_pattern_addr = 0x958, + .pix_drop_period_addr = 0x95C, + .line_drop_pattern_addr = 0x960, + .line_drop_period_addr = 0x964, + .debug_halt_status_addr = 0x968, + .debug_misr_val0_addr = 0x970, + .debug_misr_val1_addr = 0x974, + .debug_misr_val2_addr = 0x978, + .debug_misr_val3_addr = 0x97C, + .format_measure_cfg0_addr = 0x980, + .format_measure_cfg1_addr = 0x984, + .format_measure0_addr = 0x988, + .format_measure1_addr = 0x98C, + .format_measure2_addr = 0x990, + .timestamp_curr0_sof_addr = 0x994, + .timestamp_curr1_sof_addr = 0x998, + .timestamp_perv0_sof_addr = 0x99C, + .timestamp_perv1_sof_addr = 0x9A0, + .timestamp_curr0_eof_addr = 0x9A4, + .timestamp_curr1_eof_addr = 0x9A8, + .timestamp_perv0_eof_addr = 0x9AC, + .timestamp_perv1_eof_addr = 0x9B0, + .batch_id_cfg0_addr = 0x9B4, + .batch_id_cfg1_addr = 0x9B8, + .batch_period_cfg_addr = 0x9BC, + .batch_stream_id_cfg_addr = 0x9C0, + .epoch0_cfg_batch_id0_addr = 0x9C4, + .epoch1_cfg_batch_id0_addr = 0x9C8, + .epoch0_cfg_batch_id1_addr = 0x9CC, + .epoch1_cfg_batch_id1_addr = 0x9D0, + .epoch0_cfg_batch_id2_addr = 0x9D4, + .epoch1_cfg_batch_id2_addr = 0x9D8, + .epoch0_cfg_batch_id3_addr = 0x9DC, + .epoch1_cfg_batch_id3_addr = 0x9E0, + .epoch0_cfg_batch_id4_addr = 0x9E4, + .epoch1_cfg_batch_id4_addr = 0x9E8, + .epoch0_cfg_batch_id5_addr = 0x9EC, + .epoch1_cfg_batch_id5_addr = 0x9F0, + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_en = 1, + .capabilities = 0, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .early_eof_en_shift_val = 14, + .plain_fmt_shift_val = 12, + .plain_alignment_shift_val = 11, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .timestamp_en_shift_val = 6, + .format_measure_en_shift_val = 3, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x1000100, + .top_irq_mask = {0x1000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_780_csi2_reg_info = { + .irq_status_addr = {0x9C,}, + .irq_mask_addr = {0xA0,}, + .irq_clear_addr = {0xA4,}, + .irq_set_addr = {0xA8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .capture_ctrl_addr = 0x208, + .rst_strobes_addr = 0x20C, + .cap_unmap_long_pkt_hdr_0_addr = 0x210, + .cap_unmap_long_pkt_hdr_1_addr = 0x214, + .captured_short_pkt_0_addr = 0x218, + .captured_short_pkt_1_addr = 0x21c, + .captured_long_pkt_0_addr = 0x220, + .captured_long_pkt_1_addr = 0x224, + .captured_long_pkt_ftr_addr = 0x228, + .captured_cphy_pkt_hdr_addr = 0x22c, + .lane0_misr_addr = 0x230, + .lane1_misr_addr = 0x234, + .lane2_misr_addr = 0x238, + .lane3_misr_addr = 0x23c, + .total_pkts_rcvd_addr = 0x240, + .stats_ecc_addr = 0x244, + .total_crc_err_addr = 0x248, + .de_scramble_type3_cfg0_addr = 0x24C, + .de_scramble_type3_cfg1_addr = 0x250, + .de_scramble_type2_cfg0_addr = 0x254, + .de_scramble_type2_cfg1_addr = 0x258, + .de_scramble_type1_cfg0_addr = 0x25C, + .de_scramble_type1_cfg1_addr = 0x260, + .de_scramble_type0_cfg0_addr = 0x264, + .de_scramble_type0_cfg1_addr = 0x268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 11, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x197A800,}, + .part_fatal_err_mask = {0x0081000,}, + .non_fatal_err_mask = {0x0200000,}, + .top_irq_mask = {0x4,}, +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_780_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .global_cmd_addr = 0x8, + .reset_cfg_addr = 0xc, + .reset_cmd_addr = 0x10, + .irq_cmd_addr = 0x14, + .rup_aup_cmd_addr = 0x18, + .offline_cmd_addr = 0x1C, + .shdr_master_slave_cfg_addr = 0x20, + .top_irq_status_addr = {0x7C,}, + .top_irq_mask_addr = {0x80,}, + .top_irq_clear_addr = {0x84,}, + .top_irq_set_addr = {0x88,}, + .buf_done_irq_status_addr = 0x8C, + .buf_done_irq_mask_addr = 0x90, + .buf_done_irq_clear_addr = 0x94, + .buf_done_irq_set_addr = 0x98, + .test_bus_ctrl = 0x1E8, + .test_bus_debug = 0x1EC, + .drv_cfg0_addr = 0x13c, + .drv_cfg1_addr = 0x140, + .drv_cfg2_addr = 0x144, + .debug_drv_0_addr = 0x148, + .debug_drv_1_addr = 0x14C, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 5, + .num_pix = 1, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format1_supported = true, + .decode_format_mask = 0xF, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .shdr_slave_ppp_shift = 20, + .shdr_slave_rdi2_shift = 22, + .shdr_slave_rdi1_shift = 21, + .shdr_master_rdi0_shift = 5, + .shdr_master_slave_en_shift = 0, + .drv_en_shift = 0, + .drv_rup_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .top_err_irq_mask = {0x180002,}, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = {0x1,}, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_buf_done_irq_mask = 0x2000, + .decode_format_payload_only = 0xF, + .phy_sel_base_idx = 1, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .sfe_ipp_input_rdi_res = BIT(CAM_IFE_PIX_PATH_RES_RDI_0), + .drv_rup_en_val_map = { + 2, //RDI0 + 3, //RDI1 + 4, //RDI2 + 5, //RDI3 + 6, //RDI4 + 0, //IPP + 1, //PPP + 0, //UDI0 + 0, //UDI1 + 0, //UDI2 + }, + .drv_path_idle_en_val_map = { + BIT(4), //CAM_ISP_PXL_PATH + BIT(5), //CAM_ISP_PPP_PATH + 0, // LCR not applicable + BIT(6), //CAM_ISP_RDI0_PATH + BIT(7), //CAM_ISP_RDI1_PATH + BIT(8), //CAM_ISP_RDI2_PATH + BIT(9), //CAM_ISP_RDI3_PATH + BIT(10), //CAM_ISP_RDI4_PATH + }, +}; + +static struct cam_ife_csid_ver2_top_reg_info + cam_ife_csid_780_top_reg_info = { + .io_path_cfg0_addr = { + 0x0, + 0x4, + 0x8, + }, + .dual_csid_cfg0_addr = { + 0xC, + 0x10, + 0x14, + }, + .input_core_type_shift_val = 0, + .sfe_offline_en_shift_val = 12, + .out_ife_en_shift_val = 8, + .dual_sync_sel_shift_val = 8, + .dual_en_shift_val = 0, + .master_slave_sel_shift_val = 1, + .rdi_lcr_shift_val = 16, + .master_sel_val = 0, + .slave_sel_val = 1, + .io_path_cfg_rst_val = 1, + .dual_cfg_rst_val = 0, + .sfe_pipeline_bypassed = true, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_780_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_780_top_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 16, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 17, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 18, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_780_reg_info = { + .top_irq_reg_info = cam_ife_csid_780_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_780_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_780_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_780_cmn_reg_info, + .csi2_reg = &cam_ife_csid_780_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_780_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_780_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_780_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_780_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_780_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_780_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_780_rdi_4_reg_info, + .top_reg = &cam_ife_csid_780_top_reg_info, + .input_core_sel = { + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x8, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + -1, + 0x9, + -1, + }, + }, + .need_top_cfg = 0x1, + .csid_cust_node_map = {0x1, 0x0, 0x2}, + .rx_irq_desc = &cam_ife_csid_780_rx_irq_desc, + .path_irq_desc = cam_ife_csid_780_path_irq_desc, + .top_irq_desc = &cam_ife_csid_780_top_irq_desc, + .num_top_err_irqs = cam_ife_csid_780_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_780_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_780_path_irq_desc), + .top_debug_mask = &cam_ife_csid_780_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_780_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; +#endif /*_CAM_IFE_CSID_780_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid880.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid880.h new file mode 100644 index 0000000000..574bbfbb7a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid880.h @@ -0,0 +1,1586 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_880_H_ +#define _CAM_IFE_CSID_880_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +#define CAM_CSID_VERSION_V880 0x80080000 + +/* Offsets might not match due to csid secure regs at beginning of reg space */ + +static const struct cam_ife_csid_irq_desc cam_ife_csid_880_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + { + .bitmask = BIT(25), + .desc = "ERROR_ILLEGAL_PROGRAMMING_IRQ", + }, + { + .bitmask = BIT(26), + .desc = "INFO_SENSOR_MODE_ID_CHANGE_IRQ", + }, + }, +}; + +static const uint32_t cam_ife_csid_880_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_880_rx_irq_desc[0]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_880_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .desc = "ILLEGAL_PROGRAMMING", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + {0}, + { + .bitmask = BIT(2), + .desc = "INFO_DATA_FIFO_FULL", + }, + { + .bitmask = BIT(3), + .desc = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .desc = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .desc = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .desc = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .desc = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .desc = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .desc = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .desc = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .desc = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .desc = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_LINE_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .desc = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .desc = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .desc = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .desc = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .desc = "OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .desc = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .desc = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .desc = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .desc = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .desc = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .desc = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .desc = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .desc = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .desc = "CCIF_VIOLATION: Bad frame timings", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_880_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = "Fatal Error during dynamically switching between 2 sensors", + }, + { + .bitmask = BIT(18), + .err_name = "ERROR_NO_VOTE_DN", + .desc = "vote_up is asserted before IDLE is encountered in a frame", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "vote_up is asserted at the same time as an SOF", + .err_handler = cam_ife_csid_hw_ver2_drv_err_handler, + }, + { + .bitmask = BIT(20), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = "Two or more RDIs programmed to access the shared line buffer", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + }, +}; + +static const uint32_t cam_ife_csid_880_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_880_top_irq_desc[0]), +}; + +static struct cam_irq_register_set cam_ife_csid_880_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + /* RDI4 */ + { + .mask_reg_offset = 0x00000130, + .clear_reg_offset = 0x00000134, + .status_reg_offset = 0x0000012C, + }, + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + /* PPP */ + { + .mask_reg_offset = 0x000000D0, + .clear_reg_offset = 0x000000D4, + .status_reg_offset = 0x000000CC, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_880_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_880_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_880_path_irq_reg_info[7] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_4], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_PPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, +}; + +static struct cam_irq_register_set cam_ife_csid_880_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_880_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_880_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_ipp_reg_info = { + .irq_status_addr = 0x00AC, + .irq_mask_addr = 0x00B0, + .irq_clear_addr = 0x00B4, + .irq_set_addr = 0x00B8, + .cfg0_addr = 0x0300, + .ctrl_addr = 0x0304, + .debug_clr_cmd_addr = 0x0308, + .multi_vcdt_cfg0_addr = 0x030C, + .cfg1_addr = 0x0310, + .err_recovery_cfg0_addr = 0x0318, + .err_recovery_cfg1_addr = 0x031C, + .err_recovery_cfg2_addr = 0x0320, + .bin_pd_detect_cfg0_addr = 0x0324, + .bin_pd_detect_cfg1_addr = 0x0328, + .bin_pd_detect_cfg2_addr = 0x032C, + .camif_frame_cfg_addr = 0x0330, + .epoch_irq_cfg_addr = 0x0334, + .epoch0_subsample_ptrn_addr = 0x0338, + .epoch1_subsample_ptrn_addr = 0x033C, + .debug_camif_1_addr = 0x0340, + .debug_camif_0_addr = 0x0344, + .debug_halt_status_addr = 0x0348, + .debug_misr_val0_addr = 0x034C, + .debug_misr_val1_addr = 0x0350, + .debug_misr_val2_addr = 0x0354, + .debug_misr_val3_addr = 0x0358, + .hcrop_addr = 0x035c, + .vcrop_addr = 0x0360, + .pix_drop_pattern_addr = 0x0364, + .pix_drop_period_addr = 0x0368, + .line_drop_pattern_addr = 0x036C, + .line_drop_period_addr = 0x0370, + .frm_drop_pattern_addr = 0x0374, + .frm_drop_period_addr = 0x0378, + .irq_subsample_pattern_addr = 0x037C, + .irq_subsample_period_addr = 0x0380, + .format_measure_cfg0_addr = 0x0384, + .format_measure_cfg1_addr = 0x0388, + .format_measure0_addr = 0x038C, + .format_measure1_addr = 0x0390, + .format_measure2_addr = 0x0394, + .timestamp_curr0_sof_addr = 0x0398, + .timestamp_curr1_sof_addr = 0x039C, + .timestamp_perv0_sof_addr = 0x03A0, + .timestamp_perv1_sof_addr = 0x03A4, + .timestamp_curr0_eof_addr = 0x03A8, + .timestamp_curr1_eof_addr = 0x03AC, + .timestamp_perv0_eof_addr = 0x03B0, + .timestamp_perv1_eof_addr = 0x03B4, + .lut_bank_cfg_addr = 0x03B8, + .batch_id_cfg0_addr = 0x03BC, + .batch_id_cfg1_addr = 0x03C0, + .batch_period_cfg_addr = 0x03C4, + .batch_stream_id_cfg_addr = 0x03C8, + .epoch0_cfg_batch_id0_addr = 0x03CC, + .epoch1_cfg_batch_id0_addr = 0x03D0, + .epoch0_cfg_batch_id1_addr = 0x03D4, + .epoch1_cfg_batch_id1_addr = 0x03D8, + .epoch0_cfg_batch_id2_addr = 0x03DC, + .epoch1_cfg_batch_id2_addr = 0x03E0, + .epoch0_cfg_batch_id3_addr = 0x03E4, + .epoch1_cfg_batch_id3_addr = 0x03E8, + .epoch0_cfg_batch_id4_addr = 0x03EC, + .epoch1_cfg_batch_id4_addr = 0x03F0, + .epoch0_cfg_batch_id5_addr = 0x03F4, + .epoch1_cfg_batch_id5_addr = 0x03F8, + .path_batch_status = 0x03FC, + .path_frame_id = 0x0400, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .bin_h_en_shift_val = 20, + .bin_v_en_shift_val = 21, + .bin_en_shift_val = 18, + .bin_qcfa_en_shift_val = 19, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .bin_pd_en_shift_val = 0, + .bin_pd_blk_w_shift_val = 8, + .bin_pd_blk_h_shift_val = 24, + .bin_pd_detect_x_offset_shift_val = 0, + .bin_pd_detect_x_end_shift_val = 16, + .bin_pd_detect_y_offset_shift_val = 0, + .bin_pd_detect_y_end_shift_val = 16, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_ppp_reg_info = { + .irq_status_addr = 0x00CC, + .irq_mask_addr = 0x00D0, + .irq_clear_addr = 0x00D4, + .irq_set_addr = 0x00D8, + .cfg0_addr = 0x0B00, + .ctrl_addr = 0x0B04, + .debug_clr_cmd_addr = 0x0B08, + .multi_vcdt_cfg0_addr = 0x0B0C, + .cfg1_addr = 0x0B10, + .sparse_pd_extractor_cfg_addr = 0x0B14, + .err_recovery_cfg0_addr = 0x0B18, + .err_recovery_cfg1_addr = 0x0B1C, + .err_recovery_cfg2_addr = 0x0B20, + .camif_frame_cfg_addr = 0x0B30, + .epoch_irq_cfg_addr = 0x0B34, + .epoch0_subsample_ptrn_addr = 0x0B38, + .epoch1_subsample_ptrn_addr = 0x0B3C, + .debug_camif_1_addr = 0x0B40, + .debug_camif_0_addr = 0x0B44, + .debug_halt_status_addr = 0x0B48, + .debug_misr_val0_addr = 0x0B4C, + .debug_misr_val1_addr = 0x0B50, + .debug_misr_val2_addr = 0x0B54, + .debug_misr_val3_addr = 0x0B58, + .hcrop_addr = 0x0B5c, + .vcrop_addr = 0x0B60, + .pix_drop_pattern_addr = 0x0B64, + .pix_drop_period_addr = 0x0B68, + .line_drop_pattern_addr = 0x0B6C, + .line_drop_period_addr = 0x0B70, + .frm_drop_pattern_addr = 0x0B74, + .frm_drop_period_addr = 0x0B78, + .irq_subsample_pattern_addr = 0x0B7C, + .irq_subsample_period_addr = 0x0B80, + .format_measure_cfg0_addr = 0x0B84, + .format_measure_cfg1_addr = 0x0B88, + .format_measure0_addr = 0x0B8C, + .format_measure1_addr = 0x0B90, + .format_measure2_addr = 0x0B94, + .timestamp_curr0_sof_addr = 0x0B98, + .timestamp_curr1_sof_addr = 0x0B9C, + .timestamp_perv0_sof_addr = 0x0BA0, + .timestamp_perv1_sof_addr = 0x0BA4, + .timestamp_curr0_eof_addr = 0x0BA8, + .timestamp_curr1_eof_addr = 0x0BAC, + .timestamp_perv0_eof_addr = 0x0BB0, + .timestamp_perv1_eof_addr = 0x0BB4, + .lut_bank_cfg_addr = 0x0BB8, + .batch_id_cfg0_addr = 0x0BBC, + .batch_id_cfg1_addr = 0x0BC0, + .batch_period_cfg_addr = 0x0BC4, + .batch_stream_id_cfg_addr = 0x0BC8, + .epoch0_cfg_batch_id0_addr = 0x0BCC, + .epoch1_cfg_batch_id0_addr = 0x0BD0, + .epoch0_cfg_batch_id1_addr = 0x0BD4, + .epoch1_cfg_batch_id1_addr = 0x0BD8, + .epoch0_cfg_batch_id2_addr = 0x0BDC, + .epoch1_cfg_batch_id2_addr = 0x0BE0, + .epoch0_cfg_batch_id3_addr = 0x0BE4, + .epoch1_cfg_batch_id3_addr = 0x0BE8, + .epoch0_cfg_batch_id4_addr = 0x0BEC, + .epoch1_cfg_batch_id4_addr = 0x0BF0, + .epoch0_cfg_batch_id5_addr = 0x0BF4, + .epoch1_cfg_batch_id5_addr = 0x0BF8, + .path_batch_status = 0x0BFC, + .path_frame_id = 0x0C00, + + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_shift = 2, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_master_sel_val = 3, + .start_master_sel_shift = 4, + .binning_supported = 0x1, + .bin_h_en_shift_val = 18, + .bin_en_shift_val = 18, + .early_eof_en_shift_val = 16, + .pix_store_en_shift_val = 14, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .rup_aup_mask = 0x40004, + .top_irq_mask = {0x40,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, + .crop_drop_enable = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_rdi_0_reg_info = { + .irq_status_addr = 0x00EC, + .irq_mask_addr = 0x00F0, + .irq_clear_addr = 0x00F4, + .irq_set_addr = 0x00F8, + .cfg0_addr = 0x0500, + .ctrl_addr = 0x0504, + .debug_clr_cmd_addr = 0x0508, + .multi_vcdt_cfg0_addr = 0x050C, + .cfg1_addr = 0x0510, + .err_recovery_cfg0_addr = 0x0514, + .err_recovery_cfg1_addr = 0x0518, + .err_recovery_cfg2_addr = 0x051C, + .debug_byte_cntr_ping_addr = 0x0520, + .debug_byte_cntr_pong_addr = 0x0524, + .camif_frame_cfg_addr = 0x0528, + .epoch_irq_cfg_addr = 0x052C, + .epoch0_subsample_ptrn_addr = 0x0530, + .epoch1_subsample_ptrn_addr = 0x0534, + .debug_camif_1_addr = 0x0538, + .debug_camif_0_addr = 0x053C, + .frm_drop_pattern_addr = 0x0540, + .frm_drop_period_addr = 0x0544, + .irq_subsample_pattern_addr = 0x0548, + .irq_subsample_period_addr = 0x054C, + .hcrop_addr = 0x0550, + .vcrop_addr = 0x0554, + .pix_drop_pattern_addr = 0x0558, + .pix_drop_period_addr = 0x055C, + .line_drop_pattern_addr = 0x0560, + .line_drop_period_addr = 0x0564, + .debug_halt_status_addr = 0x0568, + .debug_misr_val0_addr = 0x0570, + .debug_misr_val1_addr = 0x0574, + .debug_misr_val2_addr = 0x0578, + .debug_misr_val3_addr = 0x057C, + .format_measure_cfg0_addr = 0x0580, + .format_measure_cfg1_addr = 0x0584, + .format_measure0_addr = 0x0588, + .format_measure1_addr = 0x058C, + .format_measure2_addr = 0x0590, + .timestamp_curr0_sof_addr = 0x0594, + .timestamp_curr1_sof_addr = 0x0598, + .timestamp_perv0_sof_addr = 0x059C, + .timestamp_perv1_sof_addr = 0x05A0, + .timestamp_curr0_eof_addr = 0x05A4, + .timestamp_curr1_eof_addr = 0x05A8, + .timestamp_perv0_eof_addr = 0x05AC, + .timestamp_perv1_eof_addr = 0x05B0, + .batch_id_cfg0_addr = 0x05B4, + .batch_id_cfg1_addr = 0x05B8, + .batch_period_cfg_addr = 0x05BC, + .batch_stream_id_cfg_addr = 0x05C0, + .epoch0_cfg_batch_id0_addr = 0x05C4, + .epoch1_cfg_batch_id0_addr = 0x05C8, + .epoch0_cfg_batch_id1_addr = 0x05CC, + .epoch1_cfg_batch_id1_addr = 0x05D0, + .epoch0_cfg_batch_id2_addr = 0x05D4, + .epoch1_cfg_batch_id2_addr = 0x05D8, + .epoch0_cfg_batch_id3_addr = 0x05DC, + .epoch1_cfg_batch_id3_addr = 0x05E0, + .epoch0_cfg_batch_id4_addr = 0x05E4, + .epoch1_cfg_batch_id4_addr = 0x05E8, + .epoch0_cfg_batch_id5_addr = 0x05EC, + .epoch1_cfg_batch_id5_addr = 0x05F0, + .path_batch_status = 0x05F4, + .path_frame_id = 0x05F8, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, + .default_out_format = CAM_FORMAT_PLAIN16_16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_rdi_1_reg_info = { + .irq_status_addr = 0x00FC, + .irq_mask_addr = 0x0100, + .irq_clear_addr = 0x0104, + .irq_set_addr = 0x0108, + .cfg0_addr = 0x0600, + .ctrl_addr = 0x0604, + .debug_clr_cmd_addr = 0x0608, + .multi_vcdt_cfg0_addr = 0x060C, + .cfg1_addr = 0x0610, + .err_recovery_cfg0_addr = 0x0614, + .err_recovery_cfg1_addr = 0x0618, + .err_recovery_cfg2_addr = 0x061C, + .debug_byte_cntr_ping_addr = 0x0620, + .debug_byte_cntr_pong_addr = 0x0624, + .camif_frame_cfg_addr = 0x0628, + .epoch_irq_cfg_addr = 0x062C, + .epoch0_subsample_ptrn_addr = 0x0630, + .epoch1_subsample_ptrn_addr = 0x0634, + .debug_camif_1_addr = 0x0638, + .debug_camif_0_addr = 0x063C, + .frm_drop_pattern_addr = 0x0640, + .frm_drop_period_addr = 0x0644, + .irq_subsample_pattern_addr = 0x0648, + .irq_subsample_period_addr = 0x064C, + .hcrop_addr = 0x0650, + .vcrop_addr = 0x0654, + .pix_drop_pattern_addr = 0x0658, + .pix_drop_period_addr = 0x065C, + .line_drop_pattern_addr = 0x0660, + .line_drop_period_addr = 0x0664, + .debug_halt_status_addr = 0x066C, + .debug_misr_val0_addr = 0x0670, + .debug_misr_val1_addr = 0x0674, + .debug_misr_val2_addr = 0x0678, + .debug_misr_val3_addr = 0x067C, + .format_measure_cfg0_addr = 0x0680, + .format_measure_cfg1_addr = 0x0684, + .format_measure0_addr = 0x0688, + .format_measure1_addr = 0x068C, + .format_measure2_addr = 0x0690, + .timestamp_curr0_sof_addr = 0x0694, + .timestamp_curr1_sof_addr = 0x0698, + .timestamp_perv0_sof_addr = 0x069C, + .timestamp_perv1_sof_addr = 0x06A0, + .timestamp_curr0_eof_addr = 0x06A4, + .timestamp_curr1_eof_addr = 0x06A8, + .timestamp_perv0_eof_addr = 0x06AC, + .timestamp_perv1_eof_addr = 0x06B0, + .batch_id_cfg0_addr = 0x06B4, + .batch_id_cfg1_addr = 0x06B8, + .batch_period_cfg_addr = 0x06BC, + .batch_stream_id_cfg_addr = 0x06C0, + .epoch0_cfg_batch_id0_addr = 0x06C4, + .epoch1_cfg_batch_id0_addr = 0x06C8, + .epoch0_cfg_batch_id1_addr = 0x06CC, + .epoch1_cfg_batch_id1_addr = 0x06D0, + .epoch0_cfg_batch_id2_addr = 0x06D4, + .epoch1_cfg_batch_id2_addr = 0x06D8, + .epoch0_cfg_batch_id3_addr = 0x06DC, + .epoch1_cfg_batch_id3_addr = 0x06E0, + .epoch0_cfg_batch_id4_addr = 0x06E4, + .epoch1_cfg_batch_id4_addr = 0x06E8, + .epoch0_cfg_batch_id5_addr = 0x06EC, + .epoch1_cfg_batch_id5_addr = 0x06F0, + .path_batch_status = 0x06F4, + .path_frame_id = 0x06F8, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_rdi_2_reg_info = { + .irq_status_addr = 0x010C, + .irq_mask_addr = 0x0110, + .irq_clear_addr = 0x0114, + .irq_set_addr = 0x0118, + .cfg0_addr = 0x0700, + .ctrl_addr = 0x0704, + .debug_clr_cmd_addr = 0x0708, + .multi_vcdt_cfg0_addr = 0x070C, + .cfg1_addr = 0x0710, + .err_recovery_cfg0_addr = 0x0714, + .err_recovery_cfg1_addr = 0x0718, + .err_recovery_cfg2_addr = 0x071C, + .debug_byte_cntr_ping_addr = 0x0720, + .debug_byte_cntr_pong_addr = 0x0724, + .camif_frame_cfg_addr = 0x0728, + .epoch_irq_cfg_addr = 0x072C, + .epoch0_subsample_ptrn_addr = 0x0730, + .epoch1_subsample_ptrn_addr = 0x0734, + .debug_camif_1_addr = 0x0738, + .debug_camif_0_addr = 0x073C, + .frm_drop_pattern_addr = 0x0740, + .frm_drop_period_addr = 0x0744, + .irq_subsample_pattern_addr = 0x0748, + .irq_subsample_period_addr = 0x074C, + .hcrop_addr = 0x0750, + .vcrop_addr = 0x0754, + .pix_drop_pattern_addr = 0x0758, + .pix_drop_period_addr = 0x075C, + .line_drop_pattern_addr = 0x0760, + .line_drop_period_addr = 0x0764, + .debug_halt_status_addr = 0x076C, + .debug_misr_val0_addr = 0x0770, + .debug_misr_val1_addr = 0x0774, + .debug_misr_val2_addr = 0x0778, + .debug_misr_val3_addr = 0x077C, + .format_measure_cfg0_addr = 0x0780, + .format_measure_cfg1_addr = 0x0784, + .format_measure0_addr = 0x0788, + .format_measure1_addr = 0x078C, + .format_measure2_addr = 0x0790, + .timestamp_curr0_sof_addr = 0x0794, + .timestamp_curr1_sof_addr = 0x0798, + .timestamp_perv0_sof_addr = 0x079C, + .timestamp_perv1_sof_addr = 0x07A0, + .timestamp_curr0_eof_addr = 0x07A4, + .timestamp_curr1_eof_addr = 0x07A8, + .timestamp_perv0_eof_addr = 0x07AC, + .timestamp_perv1_eof_addr = 0x07B0, + .batch_id_cfg0_addr = 0x07B4, + .batch_id_cfg1_addr = 0x07B8, + .batch_period_cfg_addr = 0x07BC, + .batch_stream_id_cfg_addr = 0x07C0, + .epoch0_cfg_batch_id0_addr = 0x07C4, + .epoch1_cfg_batch_id0_addr = 0x07C8, + .epoch0_cfg_batch_id1_addr = 0x07CC, + .epoch1_cfg_batch_id1_addr = 0x07D0, + .epoch0_cfg_batch_id2_addr = 0x07D4, + .epoch1_cfg_batch_id2_addr = 0x07D8, + .epoch0_cfg_batch_id3_addr = 0x07DC, + .epoch1_cfg_batch_id3_addr = 0x07E0, + .epoch0_cfg_batch_id4_addr = 0x07E4, + .epoch1_cfg_batch_id4_addr = 0x07E8, + .epoch0_cfg_batch_id5_addr = 0x07EC, + .epoch1_cfg_batch_id5_addr = 0x07F0, + .path_batch_status = 0x07F4, + .path_frame_id = 0x07F8, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .offline_mode_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 10, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_rdi_3_reg_info = { + .irq_status_addr = 0x011C, + .irq_mask_addr = 0x0120, + .irq_clear_addr = 0x0124, + .irq_set_addr = 0x0128, + .cfg0_addr = 0x0800, + .ctrl_addr = 0x0804, + .debug_clr_cmd_addr = 0x0808, + .multi_vcdt_cfg0_addr = 0x080C, + .cfg1_addr = 0x0810, + .err_recovery_cfg0_addr = 0x0814, + .err_recovery_cfg1_addr = 0x0818, + .err_recovery_cfg2_addr = 0x081C, + .debug_byte_cntr_ping_addr = 0x0820, + .debug_byte_cntr_pong_addr = 0x0824, + .camif_frame_cfg_addr = 0x0828, + .epoch_irq_cfg_addr = 0x082C, + .epoch0_subsample_ptrn_addr = 0x0830, + .epoch1_subsample_ptrn_addr = 0x0834, + .debug_camif_1_addr = 0x0838, + .debug_camif_0_addr = 0x083C, + .frm_drop_pattern_addr = 0x0840, + .frm_drop_period_addr = 0x0840, + .irq_subsample_pattern_addr = 0x0848, + .irq_subsample_period_addr = 0x084C, + .hcrop_addr = 0x0850, + .vcrop_addr = 0x0854, + .pix_drop_pattern_addr = 0x0858, + .pix_drop_period_addr = 0x085C, + .line_drop_pattern_addr = 0x0860, + .line_drop_period_addr = 0x0864, + .debug_halt_status_addr = 0x0868, + .debug_misr_val0_addr = 0x0870, + .debug_misr_val1_addr = 0x0874, + .debug_misr_val2_addr = 0x0878, + .debug_misr_val3_addr = 0x087C, + .format_measure_cfg0_addr = 0x0880, + .format_measure_cfg1_addr = 0x0884, + .format_measure0_addr = 0x0888, + .format_measure1_addr = 0x088C, + .format_measure2_addr = 0x0890, + .timestamp_curr0_sof_addr = 0x0894, + .timestamp_curr1_sof_addr = 0x0898, + .timestamp_perv0_sof_addr = 0x089C, + .timestamp_perv1_sof_addr = 0x08A0, + .timestamp_curr0_eof_addr = 0x08A4, + .timestamp_curr1_eof_addr = 0x08A8, + .timestamp_perv0_eof_addr = 0x08AC, + .timestamp_perv1_eof_addr = 0x08B0, + .batch_id_cfg0_addr = 0x08B4, + .batch_id_cfg1_addr = 0x08B8, + .batch_period_cfg_addr = 0x08BC, + .batch_stream_id_cfg_addr = 0x08C0, + .epoch0_cfg_batch_id0_addr = 0x08C4, + .epoch1_cfg_batch_id0_addr = 0x08C8, + .epoch0_cfg_batch_id1_addr = 0x08CC, + .epoch1_cfg_batch_id1_addr = 0x08D0, + .epoch0_cfg_batch_id2_addr = 0x08D4, + .epoch1_cfg_batch_id2_addr = 0x08D8, + .epoch0_cfg_batch_id3_addr = 0x08DC, + .epoch1_cfg_batch_id3_addr = 0x08E0, + .epoch0_cfg_batch_id4_addr = 0x08E4, + .epoch1_cfg_batch_id4_addr = 0x08E8, + .epoch0_cfg_batch_id5_addr = 0x08EC, + .epoch1_cfg_batch_id5_addr = 0x08F0, + .path_batch_status = 0x08F4, + .path_frame_id = 0x08F8, + + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_en = 1, + .capabilities = 0, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_880_rdi_4_reg_info = { + .irq_status_addr = 0x012C, + .irq_mask_addr = 0x0130, + .irq_clear_addr = 0x0134, + .irq_set_addr = 0x0138, + .cfg0_addr = 0x0900, + .ctrl_addr = 0x0904, + .debug_clr_cmd_addr = 0x0908, + .multi_vcdt_cfg0_addr = 0x090C, + .cfg1_addr = 0x0910, + .err_recovery_cfg0_addr = 0x0914, + .err_recovery_cfg1_addr = 0x0918, + .err_recovery_cfg2_addr = 0x091C, + .debug_byte_cntr_ping_addr = 0x0920, + .debug_byte_cntr_pong_addr = 0x0924, + .camif_frame_cfg_addr = 0x0928, + .epoch_irq_cfg_addr = 0x092C, + .epoch0_subsample_ptrn_addr = 0x0930, + .epoch1_subsample_ptrn_addr = 0x0934, + .debug_camif_1_addr = 0x0938, + .debug_camif_0_addr = 0x093C, + .frm_drop_pattern_addr = 0x0940, + .frm_drop_period_addr = 0x0944, + .irq_subsample_pattern_addr = 0x0948, + .irq_subsample_period_addr = 0x094C, + .hcrop_addr = 0x0950, + .vcrop_addr = 0x0954, + .pix_drop_pattern_addr = 0x0958, + .pix_drop_period_addr = 0x095C, + .line_drop_pattern_addr = 0x0960, + .line_drop_period_addr = 0x0964, + .debug_halt_status_addr = 0x0968, + .debug_misr_val0_addr = 0x0970, + .debug_misr_val1_addr = 0x0974, + .debug_misr_val2_addr = 0x0978, + .debug_misr_val3_addr = 0x097C, + .format_measure_cfg0_addr = 0x0980, + .format_measure_cfg1_addr = 0x0984, + .format_measure0_addr = 0x0988, + .format_measure1_addr = 0x098C, + .format_measure2_addr = 0x0990, + .timestamp_curr0_sof_addr = 0x0994, + .timestamp_curr1_sof_addr = 0x0998, + .timestamp_perv0_sof_addr = 0x099C, + .timestamp_perv1_sof_addr = 0x09A0, + .timestamp_curr0_eof_addr = 0x09A4, + .timestamp_curr1_eof_addr = 0x09A8, + .timestamp_perv0_eof_addr = 0x09AC, + .timestamp_perv1_eof_addr = 0x09B0, + .batch_id_cfg0_addr = 0x09B4, + .batch_id_cfg1_addr = 0x09B8, + .batch_period_cfg_addr = 0x09BC, + .batch_stream_id_cfg_addr = 0x09C0, + .epoch0_cfg_batch_id0_addr = 0x09C4, + .epoch1_cfg_batch_id0_addr = 0x09C8, + .epoch0_cfg_batch_id1_addr = 0x09CC, + .epoch1_cfg_batch_id1_addr = 0x09D0, + .epoch0_cfg_batch_id2_addr = 0x09D4, + .epoch1_cfg_batch_id2_addr = 0x09D8, + .epoch0_cfg_batch_id3_addr = 0x09DC, + .epoch1_cfg_batch_id3_addr = 0x09E0, + .epoch0_cfg_batch_id4_addr = 0x09E4, + .epoch1_cfg_batch_id4_addr = 0x09E8, + .epoch0_cfg_batch_id5_addr = 0x09EC, + .epoch1_cfg_batch_id5_addr = 0x09F0, + .path_batch_status = 0x09F4, + .path_frame_id = 0x09F8, + + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_en = 1, + .capabilities = 0, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .early_eof_en_shift_val = 14, + .plain_fmt_shift_val = 12, + .plain_alignment_shift_val = 11, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .timestamp_en_shift_val = 6, + .format_measure_en_shift_val = 3, + .debug_byte_cntr_rst_shift_val = 2, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x1000100, + .top_irq_mask = {0x1000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_880_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_880_top_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 16, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 17, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 18, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_880_csi2_reg_info = { + .irq_status_addr = {0x009C,}, + .irq_mask_addr = {0x00A0,}, + .irq_clear_addr = {0x00A4,}, + .irq_set_addr = {0x00A8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x0200, + .cfg1_addr = 0x0204, + .capture_ctrl_addr = 0x0208, + .rst_strobes_addr = 0x020C, + .cap_unmap_long_pkt_hdr_0_addr = 0x0210, + .cap_unmap_long_pkt_hdr_1_addr = 0x0214, + .captured_short_pkt_0_addr = 0x0218, + .captured_short_pkt_1_addr = 0x021c, + .captured_long_pkt_0_addr = 0x0220, + .captured_long_pkt_1_addr = 0x0224, + .captured_long_pkt_ftr_addr = 0x0228, + .captured_cphy_pkt_hdr_addr = 0x022c, + .lane0_misr_addr = 0x0230, + .lane1_misr_addr = 0x0234, + .lane2_misr_addr = 0x0238, + .lane3_misr_addr = 0x023c, + .total_pkts_rcvd_addr = 0x0240, + .stats_ecc_addr = 0x0244, + .total_crc_err_addr = 0x0248, + .de_scramble_type3_cfg0_addr = 0x024C, + .de_scramble_type3_cfg1_addr = 0x0250, + .de_scramble_type2_cfg0_addr = 0x0254, + .de_scramble_type2_cfg1_addr = 0x0258, + .de_scramble_type1_cfg0_addr = 0x025C, + .de_scramble_type1_cfg1_addr = 0x0260, + .de_scramble_type0_cfg0_addr = 0x0264, + .de_scramble_type0_cfg1_addr = 0x0268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 13, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x197A800,}, + .part_fatal_err_mask = {0x0081000,}, + .non_fatal_err_mask = {0x0200000,}, + .top_irq_mask = {0x4,}, +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_880_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0004, + .global_cmd_addr = 0x0008, + .reset_cfg_addr = 0x000c, + .reset_cmd_addr = 0x0010, + .irq_cmd_addr = 0x0014, + .rup_aup_cmd_addr = 0x0018, + .offline_cmd_addr = 0x001C, + .shdr_master_slave_cfg_addr = 0x0020, + .top_irq_status_addr = {0x007C,}, + .top_irq_mask_addr = {0x0080,}, + .top_irq_clear_addr = {0x0084,}, + .top_irq_set_addr = {0x0088,}, + .buf_done_irq_status_addr = 0x008C, + .buf_done_irq_mask_addr = 0x0090, + .buf_done_irq_clear_addr = 0x0094, + .buf_done_irq_set_addr = 0x0098, + .test_bus_ctrl = 0x01E8, + .test_bus_debug = 0x01EC, + .drv_cfg0_addr = 0x13c, + .drv_cfg1_addr = 0x140, + .drv_cfg2_addr = 0x144, + .debug_drv_0_addr = 0x148, + .debug_drv_1_addr = 0x14C, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 5, + .num_pix = 1, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format1_supported = true, + .decode_format_mask = 0xF, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .shdr_slave_ppp_shift = 20, + .shdr_slave_rdi2_shift = 22, + .shdr_slave_rdi1_shift = 21, + .shdr_master_rdi0_shift = 5, + .shdr_master_slave_en_shift = 0, + .drv_en_shift = 0, + .drv_rup_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .format_measure_max_hbi_shift = 16, + .format_measure_min_hbi_mask = 0xFFF, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .sync_reset_ctrl_testbus_val = 0x1000C101, + .top_err_irq_mask = { + 0x180002, + }, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = { + 0x1, + }, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_buf_done_irq_mask = 0x2000, + .phy_sel_base_idx = 1, + .decode_format_payload_only = 0xF, + .timestamp_enabled_in_cfg0 = true, + .sfe_ipp_input_rdi_res = BIT(CAM_IFE_PIX_PATH_RES_RDI_0), + .camif_irq_support = true, + .drv_rup_en_val_map = { + 2, //RDI0 + 3, //RDI1 + 4, //RDI2 + 5, //RDI3 + 6, //RDI4 + 0, //IPP + 1, //PPP + 0, //UDI0 + 0, //UDI1 + 0, //UDI2 + }, + .drv_path_idle_en_val_map = { + BIT(4), //CAM_ISP_PXL_PATH + BIT(5), //CAM_ISP_PPP_PATH + 0, // LCR not applicable + BIT(6), //CAM_ISP_RDI0_PATH + BIT(7), //CAM_ISP_RDI1_PATH + BIT(8), //CAM_ISP_RDI2_PATH + BIT(9), //CAM_ISP_RDI3_PATH + BIT(10), //CAM_ISP_RDI4_PATH + }, + .path_domain_id_cfg0 = 0x0, + .path_domain_id_cfg1 = 0x4, +}; + +static struct cam_ife_csid_ver2_top_reg_info + cam_ife_csid_880_top_reg_info = { + .io_path_cfg0_addr = { + 0x0, + 0x4, + 0x8, + }, + .dual_csid_cfg0_addr = { + 0xC, + 0x10, + 0x14, + }, + .input_core_type_shift_val = 0, + .sfe_offline_en_shift_val = 12, + .out_ife_en_shift_val = 8, + .dual_sync_sel_shift_val = 8, + .dual_en_shift_val = 0, + .master_slave_sel_shift_val = 1, + .rdi_lcr_shift_val = 16, + .master_sel_val = 0, + .slave_sel_val = 1, + .io_path_cfg_rst_val = 1, + .dual_cfg_rst_val = 0, + .sfe_pipeline_bypassed = false, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_880_reg_info = { + .top_irq_reg_info = cam_ife_csid_880_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_880_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_880_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_880_cmn_reg_info, + .csi2_reg = &cam_ife_csid_880_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_880_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_880_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_880_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_880_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_880_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_880_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_880_rdi_4_reg_info, + .top_reg = &cam_ife_csid_880_top_reg_info, + .input_core_sel = { + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x4, + 0x8, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x4, + -1, + -1, + -1, + }, + { + 0x0, + 0x1, + 0x2, + 0x3, + 0x4, + -1, + 0x9, + -1, + }, + }, + .need_top_cfg = 0x1, + .csid_cust_node_map = {0x1, 0x0, 0x2}, + .top_irq_desc = &cam_ife_csid_880_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_880_rx_irq_desc, + .path_irq_desc = cam_ife_csid_880_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_880_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_880_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_880_path_irq_desc), + .top_debug_mask = &cam_ife_csid_880_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_880_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, + .is_ife_sfe_mapped = true, +}; +#endif /*_CAM_IFE_CSID_880_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid970.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid970.h new file mode 100644 index 0000000000..e3ad2abfd1 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid970.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_970_H_ +#define _CAM_IFE_CSID_970_H_ + +#include +#include "cam_ife_csid_hw_ver2.h" +#include "cam_ife_csid980.h" + +#define CAM_CSID_VERSION_V970 0x90070000 + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_970_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0004, + .global_cmd_addr = 0x0008, + .reset_cfg_addr = 0x000C, + .reset_cmd_addr = 0x0010, + .irq_cmd_addr = 0x0014, + .rup_cmd_addr = 0x0018, + .aup_cmd_addr = 0x001C, + .rup_aup_cmd_addr = 0x0020, + .offline_cmd_addr = 0x0024, + .shdr_master_slave_cfg_addr = 0x0028, + .multi_sensor_mode_addr = 0x002C, + .top_irq_status_addr = {0x0084, 0x0094}, + .top_irq_mask_addr = {0x0088, 0x0098}, + .top_irq_clear_addr = {0x008C, 0x009C}, + .top_irq_set_addr = {0x0090, 0x00A0}, + .buf_done_irq_status_addr = 0x00A4, + .buf_done_irq_mask_addr = 0x00A8, + .buf_done_irq_clear_addr = 0x00AC, + .buf_done_irq_set_addr = 0x00B0, + .test_bus_ctrl = 0x03F4, + .test_bus_debug = 0x03F8, + .drv_cfg0_addr = 0x0164, + .drv_cfg1_addr = 0x0168, + .drv_cfg2_addr = 0x016C, + .debug_drv_0_addr = 0x0170, + .debug_drv_1_addr = 0x0174, + .debug_sensor_hbi_irq_vcdt_addr = 0x0180, + .debug_violation_addr = 0x03D4, + .debug_cfg_addr = 0x03E0, + .rx_mode_id_cfg1_addr = 0x0470, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 2, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format1_supported = true, + .decode_format_mask = 0xF, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 4, + .shdr_slave_ppp_shift = 16, + .shdr_slave_rdi2_shift = 18, + .shdr_slave_rdi1_shift = 17, + .shdr_master_rdi0_shift = 4, + .shdr_master_slave_en_shift = 0, + .drv_en_shift = 0, + .drv_rup_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .top_err_irq_mask = {0x00000002, 0x18}, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = {0x1,}, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .format_measure_max_hbi_shift = 16, + .format_measure_min_hbi_mask = 0xFFF, + .top_buf_done_irq_mask = 0x8, + .decode_format_payload_only = 0xF, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .capabilities = CAM_IFE_CSID_CAP_SPLIT_RUP_AUP | + CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 | + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG| + CAM_IFE_CSID_CAP_MULTI_CTXT, + .top_top2_irq_mask = 0x80000000, + .path_domain_id_cfg0 = 0x0, + .path_domain_id_cfg1 = 0x4, + .path_domain_id_cfg2 = 0x8, + .phy_sel_base_idx = 1, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_970_reg_info = { + .top_irq_reg_info = cam_ife_csid_980_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_980_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + NULL, + NULL, + NULL, + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_1], + NULL, + }, + .buf_done_irq_reg_info = &cam_ife_csid_980_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_970_cmn_reg_info, + .csi2_reg = &cam_ife_csid_980_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_980_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_1] = &cam_ife_csid_980_ipp_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_2] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_980_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_980_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_980_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_980_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_980_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = NULL, + .ipp_mc_reg = &cam_ife_csid_980_ipp_mc_reg_info, + .need_top_cfg = 0x0, + .dynamic_drv_supported = false, + .top_irq_desc = &cam_ife_csid_980_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_980_rx_irq_desc, + .path_irq_desc = cam_ife_csid_980_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_980_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_980_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_980_path_irq_desc), + .top_debug_mask = &cam_ife_csid_980_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_980_rx_debug_mask, + .num_top_regs = 2, + .num_rx_regs = 2, +}; +#endif /*_CAM_IFE_CSID_970_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid975.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid975.h new file mode 100644 index 0000000000..ed3e77c9c7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid975.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_975_H_ +#define _CAM_IFE_CSID_975_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_ife_csid980.h" + +#define CAM_CSID_VERSION_V975 0x90070050 + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_975_reg_info = { + .top_irq_reg_info = cam_ife_csid_980_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_980_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + NULL, + NULL, + NULL, + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_1], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_2], + }, + .buf_done_irq_reg_info = &cam_ife_csid_980_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_980_cmn_reg_info, + .csi2_reg = &cam_ife_csid_980_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_980_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_1] = &cam_ife_csid_980_ipp_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_2] = &cam_ife_csid_980_ipp_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_980_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_980_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_980_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_980_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_980_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_980_rdi_4_reg_info, + .ipp_mc_reg = &cam_ife_csid_980_ipp_mc_reg_info, + .need_top_cfg = 0x0, + .dynamic_drv_supported = true, + .top_irq_desc = &cam_ife_csid_980_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_980_rx_irq_desc, + .path_irq_desc = cam_ife_csid_980_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_980_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_980_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_980_path_irq_desc), + .top_debug_mask = &cam_ife_csid_980_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_980_rx_debug_mask, + .num_top_regs = 2, + .num_rx_regs = 2, +}; +#endif /*_CAM_IFE_CSID_975_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid980.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid980.h new file mode 100644 index 0000000000..bd11cbe908 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid980.h @@ -0,0 +1,1979 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_980_H_ +#define _CAM_IFE_CSID_980_H_ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +#define CAM_CSID_VERSION_V980 0x90080000 + +static const struct cam_ife_csid_irq_desc cam_ife_csid_980_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "DPHY_PH_ECC_SEC", + }, + { + .bitmask = BIT(9), + .desc = "SENSOR_MODE_ID_CHANGE", + }, + {0}, + {0}, + { + .bitmask = BIT(12), + .desc = + "DL0_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(13), + .desc = + "DL1_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(14), + .desc = + "DL2_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(15), + .desc = + "DL3_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(16), + .desc = + "DL0_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(17), + .desc = + "DL1_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(18), + .desc = + "DL2_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(19), + .desc = + "DL3_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(20), + .desc = + "DL0_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 0 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(21), + .desc = + "DL1_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 1 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(22), + .desc = + "DL2_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 2 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(23), + .desc = + "DL3_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 3 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(24), + .desc = + "CPHY_PH_CRC, Sensor: All CPHY packet headers received are corrupted - Check phy/sensor config", + }, + { + .bitmask = BIT(25), + .desc = + "PAYLOAD_CRC, Sensor: The calculated CRC of a long packet does not match the transmitted (expected) CRC, possible corruption - Check phy/sensor config", + }, + { + .bitmask = BIT(26), + .desc = + "DPHY_PH_ECC_DED, Sensor: A short or long packet is corrupted and cannot be recovered - Check phy/sensor config", + }, + { + .bitmask = BIT(27), + .desc = + "MMAPPED_VC_DT, SW: A long packet has a VC_DT combination that is configured for more than one IPP or RDIs", + }, + { + .bitmask = BIT(28), + .desc = + "UNMAPPED_VC_DT, Sensor: A long packet has a VC_DT combination that is not configured for IPP or RDIs", + }, + { + .bitmask = BIT(29), + .desc = + "STREAM_UNDERFLOW, Sensor: Long packet payload size is less than payload header size, resulting a corrupted frame - Perform PHY Tuning/Check sensor limitations", + }, + {0}, + { + .bitmask = BIT(31), + .desc = "CSI2_RX_IRQ_STATUS_2", + }, + }, + { + { + .bitmask = BIT(0), + .desc = + "LONG_PKT, Debug: The header of the first long pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(1), + .desc = + "SHORT_PKT, Debug: The header of the first short pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(2), + .desc = + "CPHY_PKT_HDR, Debug: The header of the first cphy pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(3), + .desc = "Illegal programming for next frame ID config", + }, + }, +}; + +static const uint32_t cam_ife_csid_980_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_980_rx_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_980_rx_irq_desc[1]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_980_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .irq_name = "ILLEGAL_PROGRAMMING", + .desc = "SW: Illegal programming sequence", + .debug = "Check the following possiblities:", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + {0}, + { + .bitmask = BIT(2), + .irq_name = "INFO_DATA_FIFO_FULL", + }, + { + .bitmask = BIT(3), + .irq_name = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .irq_name = "CAMIF_SOF", + }, + {0}, + {0}, + {0}, + {0}, + { + .bitmask = BIT(9), + .irq_name = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .irq_name = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .irq_name = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .irq_name = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_PIX_COUNT", + .desc = "SW: Mismatch in expected versus received number of pixels per line", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_LINE_COUNT", + .desc = "SW: Mismatch in expected versus received number of lines", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .irq_name = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(16), + .irq_name = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(17), + .irq_name = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .err_type = CAM_ISP_HW_ERROR_CSID_CAMIF_FRAME_DROP, + .irq_name = "CAMIF_FRAME_DROP", + .desc = + "Sensor: The pre CAMIF frame drop would drop a frame in case the new frame starts prior to the end of the previous frame", + .debug = "Slower downstream processing or faster frame generation from sensor", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .irq_name = "OVERFLOW_RECOVERY", + .desc = "Detected by the overflow recovery block", + .debug = "Backpressure downstream", + }, + { + .bitmask = BIT(20), + .irq_name = "ERROR_REC_CCIF_VIOLATION", + .desc = "Output CCIF has a violation with respect to frame timing", + }, + { + .bitmask = BIT(21), + .irq_name = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .irq_name = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .irq_name = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .irq_name = "ILLEGAL_BATCH_ID", + .desc = "SW: Decoded frame ID does not match with any of the programmed batch IDs", + .debug = "Check frame ID and all available batch IDs", + }, + { + .bitmask = BIT(25), + .irq_name = "BATCH_END_MISSING_VIOLATION", + .desc = "SW: Input number of frames is not a multiple of the batch size", + .debug = "Check the configured pattern/period for batching", + }, + { + .bitmask = BIT(26), + .err_type = CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME, + .irq_name = "UNBOUNDED_FRAME", + .desc = "Sensor: Frame end or frame start is missing", + .debug = "Check the settle count in sensor driver XML", + }, + {0}, + { + .bitmask = BIT(28), + .irq_name = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .desc = + "Sensor/SW: The programmed MUP is out of sync with the VC of the incoming frame", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .irq_name = "CCIF_VIOLATION", + .desc = + "The output CCIF from the serializer has a violation with respect to frame timing", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_980_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = + "Sensor/SW: Minimum VBI period between dynamically switching between two sensor modes was either violated or the downstream pipe was not active when the switch was made", + }, + }, + { + { + .bitmask = BIT(2), + .err_name = "ERROR_NO_VOTE_DN", + .desc = + "DRV: vote_down is never generated for the same frame and resource is never relinquished", + .debug = "Check vote up generated time", + }, + { + .bitmask = BIT(3), + .err_type = CAM_ISP_HW_ERROR_DRV_VOTEUP_LATE, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "DRV: vote_up is generated after SOF", + .debug = "Check the vote up timer value", + .err_handler = cam_ife_csid_hw_ver2_drv_err_handler, + }, + { + .bitmask = BIT(4), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = + "System/SW: Multiple RDIs configured to access the same shared line buffer, more of a SW issue that led to this programming", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + { + .bitmask = BIT(5), + .err_name = "ERROR_SENSOR_HBI", + .desc = "Sensor: Sensor HBI is less than expected HBI", + .debug = "Check sensor configuration", + }, + }, +}; + +static const uint32_t cam_ife_csid_980_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_980_top_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_980_top_irq_desc[1]), +}; + +static struct cam_irq_register_set cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_MAX] = { + /* Top_1 */ + { + .mask_reg_offset = 0x00000088, + .clear_reg_offset = 0x0000008C, + .status_reg_offset = 0x00000084, + .set_reg_offset = 0x00000090, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + .force_rd_mask = BIT(31), /* force read due to hw errata */ + }, + /* RX_1 */ + { + .mask_reg_offset = 0x000000B8, + .clear_reg_offset = 0x000000BC, + .status_reg_offset = 0x000000B4, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x00000118, + .clear_reg_offset = 0x0000011C, + .status_reg_offset = 0x00000114, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000128, + .clear_reg_offset = 0x0000012C, + .status_reg_offset = 0x00000124, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000138, + .clear_reg_offset = 0x0000013C, + .status_reg_offset = 0x00000134, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000148, + .clear_reg_offset = 0x0000014C, + .status_reg_offset = 0x00000144, + }, + /* RDI4 */ + { + .mask_reg_offset = 0x00000158, + .clear_reg_offset = 0x0000015C, + .status_reg_offset = 0x00000154, + }, + /* IPP_0 */ + { + .mask_reg_offset = 0x000000D8, + .clear_reg_offset = 0x000000DC, + .status_reg_offset = 0x000000D4, + }, + /* PPP */ + { + .mask_reg_offset = 0x00000108, + .clear_reg_offset = 0x0000010C, + .status_reg_offset = 0x00000104, + }, + /* UDI_0 */ + {0}, + /* UDI_1 */ + {0}, + /* UDI_2 */ + {0}, + + /* Top_2 */ + { + .mask_reg_offset = 0x00000098, + .clear_reg_offset = 0x0000009C, + .status_reg_offset = 0x00000094, + .set_reg_offset = 0x00000090, + }, + /* RX_2 */ + { + .mask_reg_offset = 0x000000C8, + .clear_reg_offset = 0x000000CC, + .status_reg_offset = 0x000000C4, + }, + /* IPP_1 */ + { + .mask_reg_offset = 0x000000E8, + .clear_reg_offset = 0x000000EC, + .status_reg_offset = 0x000000E4, + }, + /* IPP_2 */ + { + .mask_reg_offset = 0x000000F8, + .clear_reg_offset = 0x000000FC, + .status_reg_offset = 0x000000F4, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_980_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_980_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_MAX] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_4], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_PPP], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + /* UDI_0 */ + {0}, + /* UDI_1 */ + {0}, + /* UDI_2 */ + {0}, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP_1], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_980_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + +}; + +static struct cam_irq_register_set cam_ife_csid_980_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x000000A8, + .clear_reg_offset = 0x000000AC, + .status_reg_offset = 0x000000A4, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_980_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_980_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_ipp_reg_info = { + .irq_status_addr = 0x00D4, + .irq_mask_addr = 0x00D8, + .irq_clear_addr = 0x00DC, + .irq_set_addr = 0x00E0, + .cfg0_addr = 0x0600, + .ctrl_addr = 0x0604, + .debug_clr_cmd_addr = 0x0608, + .multi_vcdt_cfg0_addr = 0x060C, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0638, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x0648, + .debug_camif_1_addr = 0x064C, + .debug_camif_0_addr = 0x0650, + .frm_drop_pattern_addr = 0x0680, + .frm_drop_period_addr = 0x0684, + .irq_subsample_pattern_addr = 0x0688, + .irq_subsample_period_addr = 0x068C, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x0654, + .debug_misr_val0_addr = 0x0658, + .debug_misr_val1_addr = 0x065C, + .debug_misr_val2_addr = 0x0660, + .debug_misr_val3_addr = 0x0664, + .format_measure_cfg0_addr = 0x0690, + .format_measure_cfg1_addr = 0x0694, + .format_measure_cfg2_addr = 0x0698, + .format_measure0_addr = 0x069C, + .format_measure1_addr = 0x06A0, + .format_measure2_addr = 0x06A4, + .timestamp_curr0_sof_addr = 0x06A8, + .timestamp_curr1_sof_addr = 0x06AC, + .timestamp_perv0_sof_addr = 0x06B0, + .timestamp_perv1_sof_addr = 0x06B4, + .timestamp_curr0_eof_addr = 0x06B8, + .timestamp_curr1_eof_addr = 0x06BC, + .timestamp_perv0_eof_addr = 0x06C0, + .timestamp_perv1_eof_addr = 0x06C4, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x06CC, + .batch_id_cfg1_addr = 0x06D0, + .batch_period_cfg_addr = 0x06D4, + .batch_stream_id_cfg_addr = 0x06D8, + .epoch0_cfg_batch_id0_addr = 0x06DC, + .epoch1_cfg_batch_id0_addr = 0x06E0, + .epoch0_cfg_batch_id1_addr = 0x06E4, + .epoch1_cfg_batch_id1_addr = 0x06E8, + .epoch0_cfg_batch_id2_addr = 0x06EC, + .epoch1_cfg_batch_id2_addr = 0x06F0, + .epoch0_cfg_batch_id3_addr = 0x06F4, + .epoch1_cfg_batch_id3_addr = 0x06F8, + .epoch0_cfg_batch_id4_addr = 0x06FC, + .epoch1_cfg_batch_id4_addr = 0x0700, + .epoch0_cfg_batch_id5_addr = 0x0704, + .epoch1_cfg_batch_id5_addr = 0x0708, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x070C, + .path_frame_id = 0x0710, + .cfg2_addr = 0x0714, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 0, + .early_eof_en_shift_val = 16, + .bin_h_en_shift_val = 20, + .bin_v_en_shift_val = 21, + .bin_en_shift_val = 18, + .bin_qcfa_en_shift_val = 19, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 0, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .bin_pd_en_shift_val = 0, + .bin_pd_blk_w_shift_val = 8, + .bin_pd_blk_h_shift_val = 24, + .bin_pd_detect_x_offset_shift_val = 0, + .bin_pd_detect_x_end_shift_val = 16, + .bin_pd_detect_y_offset_shift_val = 0, + .bin_pd_detect_y_end_shift_val = 16, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x1, + .aup_mask = 0x1, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .disable_sof_retime_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_ipp_1_reg_info = { + .irq_status_addr = 0x00E4, + .irq_mask_addr = 0x00E8, + .irq_clear_addr = 0x00EC, + .irq_set_addr = 0x00F0, + .cfg0_addr = 0x0800, + .ctrl_addr = 0x0804, + .debug_clr_cmd_addr = 0x0808, + .multi_vcdt_cfg0_addr = 0x080C, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0838, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x0848, + .debug_camif_1_addr = 0x084C, + .debug_camif_0_addr = 0x0850, + .frm_drop_pattern_addr = 0x0880, + .frm_drop_period_addr = 0x0884, + .irq_subsample_pattern_addr = 0x0888, + .irq_subsample_period_addr = 0x088C, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x0854, + .debug_misr_val0_addr = 0x0858, + .debug_misr_val1_addr = 0x085C, + .debug_misr_val2_addr = 0x0860, + .debug_misr_val3_addr = 0x0864, + .format_measure_cfg0_addr = 0x0890, + .format_measure_cfg1_addr = 0x0894, + .format_measure_cfg2_addr = 0x0898, + .format_measure0_addr = 0x089C, + .format_measure1_addr = 0x08A0, + .format_measure2_addr = 0x08A4, + .timestamp_curr0_sof_addr = 0x08A8, + .timestamp_curr1_sof_addr = 0x08AC, + .timestamp_perv0_sof_addr = 0x08B0, + .timestamp_perv1_sof_addr = 0x08B4, + .timestamp_curr0_eof_addr = 0x08B8, + .timestamp_curr1_eof_addr = 0x08BC, + .timestamp_perv0_eof_addr = 0x08C0, + .timestamp_perv1_eof_addr = 0x08C4, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x08CC, + .batch_id_cfg1_addr = 0x08D0, + .batch_period_cfg_addr = 0x08D4, + .batch_stream_id_cfg_addr = 0x08D8, + .epoch0_cfg_batch_id0_addr = 0x08DC, + .epoch1_cfg_batch_id0_addr = 0x08E0, + .epoch0_cfg_batch_id1_addr = 0x08E4, + .epoch1_cfg_batch_id1_addr = 0x08E8, + .epoch0_cfg_batch_id2_addr = 0x08EC, + .epoch1_cfg_batch_id2_addr = 0x08F0, + .epoch0_cfg_batch_id3_addr = 0x08F4, + .epoch1_cfg_batch_id3_addr = 0x08F8, + .epoch0_cfg_batch_id4_addr = 0x08FC, + .epoch1_cfg_batch_id4_addr = 0x0900, + .epoch0_cfg_batch_id5_addr = 0x0904, + .epoch1_cfg_batch_id5_addr = 0x0908, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x090C, + .path_frame_id = 0x0910, + .cfg2_addr = 0x0914, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .timestamp_en_shift_val = 6, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x2, + .aup_mask = 0x2, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x200,}, + .disable_sof_retime_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_ipp_2_reg_info = { + .irq_status_addr = 0x00F4, + .irq_mask_addr = 0x00F8, + .irq_clear_addr = 0x00FC, + .irq_set_addr = 0x0100, + .cfg0_addr = 0x0A00, + .ctrl_addr = 0x0A04, + .debug_clr_cmd_addr = 0x0A08, + .multi_vcdt_cfg0_addr = 0x0A0C, + .cfg1_addr = 0, + .sparse_pd_extractor_cfg_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0A38, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x0A48, + .debug_camif_1_addr = 0x0A4C, + .debug_camif_0_addr = 0x0A50, + .frm_drop_pattern_addr = 0x0A80, + .frm_drop_period_addr = 0x0A84, + .irq_subsample_pattern_addr = 0x0A88, + .irq_subsample_period_addr = 0x0A8C, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .debug_halt_status_addr = 0x0A54, + .debug_misr_val0_addr = 0x0A58, + .debug_misr_val1_addr = 0x0A5C, + .debug_misr_val2_addr = 0x0A60, + .debug_misr_val3_addr = 0x0A64, + .format_measure_cfg0_addr = 0x0A90, + .format_measure_cfg1_addr = 0x0A94, + .format_measure_cfg2_addr = 0x0A98, + .format_measure0_addr = 0x0A9C, + .format_measure1_addr = 0x0AA0, + .format_measure2_addr = 0x0AA4, + .timestamp_curr0_sof_addr = 0x0AA8, + .timestamp_curr1_sof_addr = 0x0AAC, + .timestamp_perv0_sof_addr = 0x0AB0, + .timestamp_perv1_sof_addr = 0x0AB4, + .timestamp_curr0_eof_addr = 0x0AB8, + .timestamp_curr1_eof_addr = 0x0ABC, + .timestamp_perv0_eof_addr = 0x0AC0, + .timestamp_perv1_eof_addr = 0x0AC4, + .lut_bank_cfg_addr = 0, + .batch_id_cfg0_addr = 0x0ACC, + .batch_id_cfg1_addr = 0x0AD0, + .batch_period_cfg_addr = 0x0AD4, + .batch_stream_id_cfg_addr = 0x0AD8, + .epoch0_cfg_batch_id0_addr = 0x0ADC, + .epoch1_cfg_batch_id0_addr = 0x0AE0, + .epoch0_cfg_batch_id1_addr = 0x0AE4, + .epoch1_cfg_batch_id1_addr = 0x0AE8, + .epoch0_cfg_batch_id2_addr = 0x0AEC, + .epoch1_cfg_batch_id2_addr = 0x0AF0, + .epoch0_cfg_batch_id3_addr = 0x0AF4, + .epoch1_cfg_batch_id3_addr = 0x0AF8, + .epoch0_cfg_batch_id4_addr = 0x0AFC, + .epoch1_cfg_batch_id4_addr = 0x0B00, + .epoch0_cfg_batch_id5_addr = 0x0B04, + .epoch1_cfg_batch_id5_addr = 0x0B08, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x0B0C, + .path_frame_id = 0x0B10, + .cfg2_addr = 0x0B14, + + /* configurations */ + .resume_frame_boundary = 1, + .binning_supported = 0x7, + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS | + CAM_IFE_CSID_CAP_MULTI_CTXT, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .sof_retiming_dis_shift = 5, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .timestamp_en_shift_val = 6, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x4, + .aup_mask = 0x4, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x400,}, + .disable_sof_retime_default = true, +}; + + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_ppp_reg_info = { + .irq_status_addr = 0x0104, + .irq_mask_addr = 0x0108, + .irq_clear_addr = 0x010C, + .irq_set_addr = 0x0110, + .cfg0_addr = 0x0C00, + .ctrl_addr = 0x0C04, + .debug_clr_cmd_addr = 0x0C08, + .multi_vcdt_cfg0_addr = 0x0C0C, + .cfg1_addr = 0x0C10, + .bin_cfg0_addr = 0x0C14, + .pix_store_cfg0_addr = 0x0C18, + .sparse_pd_extractor_cfg_addr = 0x0C1C, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .bin_pd_detect_cfg0_addr = 0, + .bin_pd_detect_cfg1_addr = 0, + .bin_pd_detect_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0C38, + .epoch_irq_cfg_addr = 0x0C3C, + .epoch0_subsample_ptrn_addr = 0x0C40, + .epoch1_subsample_ptrn_addr = 0x0C44, + .debug_rup_aup_status = 0x0C48, + .debug_camif_1_addr = 0x0C4C, + .debug_camif_0_addr = 0x0C50, + .frm_drop_pattern_addr = 0x0C80, + .frm_drop_period_addr = 0x0C84, + .irq_subsample_pattern_addr = 0x0C88, + .irq_subsample_period_addr = 0x0C8C, + .hcrop_addr = 0x0C68, + .vcrop_addr = 0x0C6C, + .pix_drop_pattern_addr = 0x0C70, + .pix_drop_period_addr = 0x0C74, + .line_drop_pattern_addr = 0x0C78, + .line_drop_period_addr = 0x0C7C, + .debug_halt_status_addr = 0x0C54, + .debug_misr_val0_addr = 0x0C58, + .debug_misr_val1_addr = 0x0C5C, + .debug_misr_val2_addr = 0x0C60, + .debug_misr_val3_addr = 0x0C64, + .format_measure_cfg0_addr = 0x0C90, + .format_measure_cfg1_addr = 0x0C94, + .format_measure_cfg2_addr = 0x0C98, + .format_measure0_addr = 0x0C9C, + .format_measure1_addr = 0x0CA0, + .format_measure2_addr = 0x0CA4, + .timestamp_curr0_sof_addr = 0x0CA8, + .timestamp_curr1_sof_addr = 0x0CAC, + .timestamp_perv0_sof_addr = 0x0CB0, + .timestamp_perv1_sof_addr = 0x0CB4, + .timestamp_curr0_eof_addr = 0x0CB8, + .timestamp_curr1_eof_addr = 0x0CBC, + .timestamp_perv0_eof_addr = 0x0CC0, + .timestamp_perv1_eof_addr = 0x0CC4, + .lut_bank_cfg_addr = 0x0CC8, + .batch_id_cfg0_addr = 0x0CCC, + .batch_id_cfg1_addr = 0x0CD0, + .batch_period_cfg_addr = 0x0CD4, + .batch_stream_id_cfg_addr = 0x0CD8, + .epoch0_cfg_batch_id0_addr = 0x0CDC, + .epoch1_cfg_batch_id0_addr = 0x0CE0, + .epoch0_cfg_batch_id1_addr = 0x0CE4, + .epoch1_cfg_batch_id1_addr = 0x0CE8, + .epoch0_cfg_batch_id2_addr = 0x0CEC, + .epoch1_cfg_batch_id2_addr = 0x0CF0, + .epoch0_cfg_batch_id3_addr = 0x0CF4, + .epoch1_cfg_batch_id3_addr = 0x0CF8, + .epoch0_cfg_batch_id4_addr = 0x0CFC, + .epoch1_cfg_batch_id4_addr = 0x0D00, + .epoch0_cfg_batch_id5_addr = 0x0D04, + .epoch1_cfg_batch_id5_addr = 0x0D08, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x0D0C, + .path_frame_id = 0x0D10, + .debug_sim_monitor = 0x0D14, + + /* configurations */ + .capabilities = CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .resume_frame_boundary = 1, + .start_mode_shift = 2, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_master_sel_val = 3, + .start_master_sel_shift = 4, + .binning_supported = 0x1, + .bin_h_en_shift_val = 18, + .bin_en_shift_val = 18, + .early_eof_en_shift_val = 16, + .pix_store_en_shift_val = 0, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 0, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 1, + .start_master_sel_shift_val = 4, + .lut_bank_0_sel_val = 0, + .lut_bank_1_sel_val = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .rup_mask = 0x10000, + .aup_mask = 0x10000, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x10,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .sof_retiming_dis_shift = 5, + .disable_sof_retime_default = true, + .use_master_slave_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_rdi_0_reg_info = { + .irq_status_addr = 0x0114, + .irq_mask_addr = 0x0118, + .irq_clear_addr = 0x011C, + .irq_set_addr = 0x0120, + .cfg0_addr = 0x0E00, + .ctrl_addr = 0x0E04, + .debug_clr_cmd_addr = 0x0E08, + .multi_vcdt_cfg0_addr = 0x0E0C, + .cfg1_addr = 0x0E10, + .pix_store_cfg0_addr = 0x0E14, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x0E24, + .debug_byte_cntr_pong_addr = 0x0E28, + .camif_frame_cfg_addr = 0x0E2C, + .epoch_irq_cfg_addr = 0x0E30, + .epoch0_subsample_ptrn_addr = 0x0E34, + .epoch1_subsample_ptrn_addr = 0x0E38, + .debug_rup_aup_status = 0x0E3C, + .debug_camif_1_addr = 0x0E40, + .debug_camif_0_addr = 0x0E44, + .frm_drop_pattern_addr = 0x0E48, + .frm_drop_period_addr = 0x0E4C, + .irq_subsample_pattern_addr = 0x0E50, + .irq_subsample_period_addr = 0x0E54, + .hcrop_addr = 0x0E58, + .vcrop_addr = 0x0E5C, + .pix_drop_pattern_addr = 0x0E60, + .pix_drop_period_addr = 0x0E64, + .line_drop_pattern_addr = 0x0E68, + .line_drop_period_addr = 0x0E6C, + .debug_halt_status_addr = 0x0E74, + .debug_misr_val0_addr = 0x0E78, + .debug_misr_val1_addr = 0x0E7C, + .debug_misr_val2_addr = 0x0E80, + .debug_misr_val3_addr = 0x0E84, + .format_measure_cfg0_addr = 0x0E88, + .format_measure_cfg1_addr = 0x0E8C, + .format_measure_cfg2_addr = 0x0E90, + .format_measure0_addr = 0x0E94, + .format_measure1_addr = 0x0E98, + .format_measure2_addr = 0x0E9C, + .timestamp_curr0_sof_addr = 0x0EA0, + .timestamp_curr1_sof_addr = 0x0EA4, + .timestamp_perv0_sof_addr = 0x0EA8, + .timestamp_perv1_sof_addr = 0x0EAC, + .timestamp_curr0_eof_addr = 0x0EB0, + .timestamp_curr1_eof_addr = 0x0EB4, + .timestamp_perv0_eof_addr = 0x0EB8, + .timestamp_perv1_eof_addr = 0x0EBC, + .batch_id_cfg0_addr = 0x0EC0, + .batch_id_cfg1_addr = 0x0EC4, + .batch_period_cfg_addr = 0x0EC8, + .batch_stream_id_cfg_addr = 0x0ECC, + .epoch0_cfg_batch_id0_addr = 0x0ED0, + .epoch1_cfg_batch_id0_addr = 0x0ED4, + .epoch0_cfg_batch_id1_addr = 0x0ED8, + .epoch1_cfg_batch_id1_addr = 0x0EDC, + .epoch0_cfg_batch_id2_addr = 0x0EE0, + .epoch1_cfg_batch_id2_addr = 0x0EE4, + .epoch0_cfg_batch_id3_addr = 0x0EE8, + .epoch1_cfg_batch_id3_addr = 0x0EEC, + .epoch0_cfg_batch_id4_addr = 0x0EF0, + .epoch1_cfg_batch_id4_addr = 0x0EF4, + .epoch0_cfg_batch_id5_addr = 0x0EF8, + .epoch1_cfg_batch_id5_addr = 0x0EFC, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x0F00, + .path_frame_id = 0x0F04, + .cfg2_addr = 0x0F08, + .debug_sim_monitor = 0x0F0C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x100, + .aup_mask = 0x100, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x10000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, + .default_out_format = CAM_FORMAT_PLAIN16_16, + .disable_sof_retime_default = true, + .use_master_slave_default = true, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_rdi_1_reg_info = { + .irq_status_addr = 0x0124, + .irq_mask_addr = 0x0128, + .irq_clear_addr = 0x012C, + .irq_set_addr = 0x0130, + .cfg0_addr = 0x1000, + .ctrl_addr = 0x1004, + .debug_clr_cmd_addr = 0x1008, + .multi_vcdt_cfg0_addr = 0x100C, + .cfg1_addr = 0x1010, + .pix_store_cfg0_addr = 0x1014, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x1024, + .debug_byte_cntr_pong_addr = 0x1028, + .camif_frame_cfg_addr = 0x102C, + .epoch_irq_cfg_addr = 0x1030, + .epoch0_subsample_ptrn_addr = 0x1034, + .epoch1_subsample_ptrn_addr = 0x1038, + .debug_rup_aup_status = 0x103C, + .debug_camif_1_addr = 0x1040, + .debug_camif_0_addr = 0x1044, + .frm_drop_pattern_addr = 0x1048, + .frm_drop_period_addr = 0x104C, + .irq_subsample_pattern_addr = 0x1050, + .irq_subsample_period_addr = 0x1054, + .hcrop_addr = 0x1058, + .vcrop_addr = 0x105C, + .pix_drop_pattern_addr = 0x1060, + .pix_drop_period_addr = 0x1064, + .line_drop_pattern_addr = 0x1068, + .line_drop_period_addr = 0x106C, + .debug_halt_status_addr = 0x1074, + .debug_misr_val0_addr = 0x1078, + .debug_misr_val1_addr = 0x107C, + .debug_misr_val2_addr = 0x1080, + .debug_misr_val3_addr = 0x1084, + .format_measure_cfg0_addr = 0x1088, + .format_measure_cfg1_addr = 0x108C, + .format_measure_cfg2_addr = 0x1090, + .format_measure0_addr = 0x1094, + .format_measure1_addr = 0x1098, + .format_measure2_addr = 0x109C, + .timestamp_curr0_sof_addr = 0x10A0, + .timestamp_curr1_sof_addr = 0x10A4, + .timestamp_perv0_sof_addr = 0x10A8, + .timestamp_perv1_sof_addr = 0x10AC, + .timestamp_curr0_eof_addr = 0x10B0, + .timestamp_curr1_eof_addr = 0x10B4, + .timestamp_perv0_eof_addr = 0x10B8, + .timestamp_perv1_eof_addr = 0x10BC, + .batch_id_cfg0_addr = 0x10C0, + .batch_id_cfg1_addr = 0x10C4, + .batch_period_cfg_addr = 0x10C8, + .batch_stream_id_cfg_addr = 0x10CC, + .epoch0_cfg_batch_id0_addr = 0x10D0, + .epoch1_cfg_batch_id0_addr = 0x10D4, + .epoch0_cfg_batch_id1_addr = 0x10D8, + .epoch1_cfg_batch_id1_addr = 0x10DC, + .epoch0_cfg_batch_id2_addr = 0x10E0, + .epoch1_cfg_batch_id2_addr = 0x10E4, + .epoch0_cfg_batch_id3_addr = 0x10E8, + .epoch1_cfg_batch_id3_addr = 0x10EC, + .epoch0_cfg_batch_id4_addr = 0x10F0, + .epoch1_cfg_batch_id4_addr = 0x10F4, + .epoch0_cfg_batch_id5_addr = 0x10F8, + .epoch1_cfg_batch_id5_addr = 0x10FC, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1100, + .path_frame_id = 0x1104, + .cfg2_addr = 0x1108, + .debug_sim_monitor = 0x110C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x200, + .aup_mask = 0x200, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x20000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_rdi_2_reg_info = { + .irq_status_addr = 0x0134, + .irq_mask_addr = 0x0138, + .irq_clear_addr = 0x013C, + .irq_set_addr = 0x0140, + .cfg0_addr = 0x1200, + .ctrl_addr = 0x1204, + .debug_clr_cmd_addr = 0x1208, + .multi_vcdt_cfg0_addr = 0x120C, + .cfg1_addr = 0x1210, + .pix_store_cfg0_addr = 0x1214, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .debug_byte_cntr_ping_addr = 0x1224, + .debug_byte_cntr_pong_addr = 0x1228, + .camif_frame_cfg_addr = 0x122C, + .epoch_irq_cfg_addr = 0x1230, + .epoch0_subsample_ptrn_addr = 0x1234, + .epoch1_subsample_ptrn_addr = 0x1238, + .debug_rup_aup_status = 0x123C, + .debug_camif_1_addr = 0x1240, + .debug_camif_0_addr = 0x1244, + .frm_drop_pattern_addr = 0x1248, + .frm_drop_period_addr = 0x124C, + .irq_subsample_pattern_addr = 0x1250, + .irq_subsample_period_addr = 0x1254, + .hcrop_addr = 0x1258, + .vcrop_addr = 0x125C, + .pix_drop_pattern_addr = 0x1260, + .pix_drop_period_addr = 0x1264, + .line_drop_pattern_addr = 0x1268, + .line_drop_period_addr = 0x126C, + .debug_halt_status_addr = 0x1274, + .debug_misr_val0_addr = 0x1278, + .debug_misr_val1_addr = 0x127C, + .debug_misr_val2_addr = 0x1280, + .debug_misr_val3_addr = 0x1284, + .format_measure_cfg0_addr = 0x1288, + .format_measure_cfg1_addr = 0x128C, + .format_measure_cfg2_addr = 0x1290, + .format_measure0_addr = 0x1294, + .format_measure1_addr = 0x1298, + .format_measure2_addr = 0x129C, + .timestamp_curr0_sof_addr = 0x12A0, + .timestamp_curr1_sof_addr = 0x12A4, + .timestamp_perv0_sof_addr = 0x12A8, + .timestamp_perv1_sof_addr = 0x12AC, + .timestamp_curr0_eof_addr = 0x12B0, + .timestamp_curr1_eof_addr = 0x12B4, + .timestamp_perv0_eof_addr = 0x12B8, + .timestamp_perv1_eof_addr = 0x12BC, + .batch_id_cfg0_addr = 0x12C0, + .batch_id_cfg1_addr = 0x12C4, + .batch_period_cfg_addr = 0x12C8, + .batch_stream_id_cfg_addr = 0x12CC, + .epoch0_cfg_batch_id0_addr = 0x12D0, + .epoch1_cfg_batch_id0_addr = 0x12D4, + .epoch0_cfg_batch_id1_addr = 0x12D8, + .epoch1_cfg_batch_id1_addr = 0x12DC, + .epoch0_cfg_batch_id2_addr = 0x12E0, + .epoch1_cfg_batch_id2_addr = 0x12E4, + .epoch0_cfg_batch_id3_addr = 0x12E8, + .epoch1_cfg_batch_id3_addr = 0x12EC, + .epoch0_cfg_batch_id4_addr = 0x12F0, + .epoch1_cfg_batch_id4_addr = 0x12F4, + .epoch0_cfg_batch_id5_addr = 0x12F8, + .epoch1_cfg_batch_id5_addr = 0x12FC, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1300, + .path_frame_id = 0x1304, + .cfg2_addr = 0x1308, + .debug_sim_monitor = 0x130C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x400, + .aup_mask = 0x400, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x40000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, + +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_rdi_3_reg_info = { + .irq_status_addr = 0x0144, + .irq_mask_addr = 0x0148, + .irq_clear_addr = 0x014C, + .irq_set_addr = 0x0150, + .cfg0_addr = 0x1400, + .ctrl_addr = 0x1404, + .debug_clr_cmd_addr = 0x1408, + .multi_vcdt_cfg0_addr = 0x140C, + .cfg1_addr = 0x1410, + .debug_byte_cntr_ping_addr = 0x1424, + .debug_byte_cntr_pong_addr = 0x1428, + .camif_frame_cfg_addr = 0x142C, + .epoch_irq_cfg_addr = 0x1430, + .epoch0_subsample_ptrn_addr = 0x1434, + .epoch1_subsample_ptrn_addr = 0x1438, + .debug_rup_aup_status = 0x143C, + .debug_camif_1_addr = 0x1440, + .debug_camif_0_addr = 0x1444, + .frm_drop_pattern_addr = 0x1448, + .frm_drop_period_addr = 0x144C, + .irq_subsample_pattern_addr = 0x1450, + .irq_subsample_period_addr = 0x1454, + .hcrop_addr = 0x1458, + .vcrop_addr = 0x145C, + .pix_drop_pattern_addr = 0x1460, + .pix_drop_period_addr = 0x1464, + .line_drop_pattern_addr = 0x1468, + .line_drop_period_addr = 0x146C, + .debug_halt_status_addr = 0x1474, + .debug_misr_val0_addr = 0x1478, + .debug_misr_val1_addr = 0x147C, + .debug_misr_val2_addr = 0x1480, + .debug_misr_val3_addr = 0x1484, + .format_measure_cfg0_addr = 0x1488, + .format_measure_cfg1_addr = 0x148C, + .format_measure_cfg2_addr = 0x1490, + .format_measure0_addr = 0x1494, + .format_measure1_addr = 0x1498, + .format_measure2_addr = 0x149C, + .timestamp_curr0_sof_addr = 0x14A0, + .timestamp_curr1_sof_addr = 0x14A4, + .timestamp_perv0_sof_addr = 0x14A8, + .timestamp_perv1_sof_addr = 0x14AC, + .timestamp_curr0_eof_addr = 0x14B0, + .timestamp_curr1_eof_addr = 0x14B4, + .timestamp_perv0_eof_addr = 0x14B8, + .timestamp_perv1_eof_addr = 0x14BC, + .batch_id_cfg0_addr = 0x14C0, + .batch_id_cfg1_addr = 0x14C4, + .batch_period_cfg_addr = 0x14C8, + .batch_stream_id_cfg_addr = 0x14CC, + .epoch0_cfg_batch_id0_addr = 0x14D0, + .epoch1_cfg_batch_id0_addr = 0x14D4, + .epoch0_cfg_batch_id1_addr = 0x14D8, + .epoch1_cfg_batch_id1_addr = 0x14DC, + .epoch0_cfg_batch_id2_addr = 0x14E0, + .epoch1_cfg_batch_id2_addr = 0x14E4, + .epoch0_cfg_batch_id3_addr = 0x14E8, + .epoch1_cfg_batch_id3_addr = 0x14EC, + .epoch0_cfg_batch_id4_addr = 0x14F0, + .epoch1_cfg_batch_id4_addr = 0x14F4, + .epoch0_cfg_batch_id5_addr = 0x14F8, + .epoch1_cfg_batch_id5_addr = 0x14FC, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1500, + .path_frame_id = 0x1504, + .cfg2_addr = 0x1508, + .debug_sim_monitor = 0x150C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x800, + .aup_mask = 0x800, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x80000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_980_rdi_4_reg_info = { + .irq_status_addr = 0x0154, + .irq_mask_addr = 0x0158, + .irq_clear_addr = 0x015C, + .irq_set_addr = 0x0160, + .cfg0_addr = 0x1600, + .ctrl_addr = 0x1604, + .debug_clr_cmd_addr = 0x1608, + .multi_vcdt_cfg0_addr = 0x160C, + .cfg1_addr = 0x1610, + .debug_byte_cntr_ping_addr = 0x1624, + .debug_byte_cntr_pong_addr = 0x1628, + .camif_frame_cfg_addr = 0x162C, + .epoch_irq_cfg_addr = 0x1630, + .epoch0_subsample_ptrn_addr = 0x1634, + .epoch1_subsample_ptrn_addr = 0x1638, + .debug_rup_aup_status = 0x163C, + .debug_camif_1_addr = 0x1640, + .debug_camif_0_addr = 0x1644, + .frm_drop_pattern_addr = 0x1648, + .frm_drop_period_addr = 0x164C, + .irq_subsample_pattern_addr = 0x1650, + .irq_subsample_period_addr = 0x1654, + .hcrop_addr = 0x1658, + .vcrop_addr = 0x165C, + .pix_drop_pattern_addr = 0x1660, + .pix_drop_period_addr = 0x1664, + .line_drop_pattern_addr = 0x1668, + .line_drop_period_addr = 0x166C, + .debug_halt_status_addr = 0x1674, + .debug_misr_val0_addr = 0x1678, + .debug_misr_val1_addr = 0x167C, + .debug_misr_val2_addr = 0x1680, + .debug_misr_val3_addr = 0x1684, + .format_measure_cfg0_addr = 0x1688, + .format_measure_cfg1_addr = 0x168C, + .format_measure_cfg2_addr = 0x1690, + .format_measure0_addr = 0x1694, + .format_measure1_addr = 0x1698, + .format_measure2_addr = 0x169C, + .timestamp_curr0_sof_addr = 0x16A0, + .timestamp_curr1_sof_addr = 0x16A4, + .timestamp_perv0_sof_addr = 0x16A8, + .timestamp_perv1_sof_addr = 0x16AC, + .timestamp_curr0_eof_addr = 0x16B0, + .timestamp_curr1_eof_addr = 0x16B4, + .timestamp_perv0_eof_addr = 0x16B8, + .timestamp_perv1_eof_addr = 0x16BC, + .batch_id_cfg0_addr = 0x16C0, + .batch_id_cfg1_addr = 0x16C4, + .batch_period_cfg_addr = 0x16C8, + .batch_stream_id_cfg_addr = 0x16CC, + .epoch0_cfg_batch_id0_addr = 0x16D0, + .epoch1_cfg_batch_id0_addr = 0x16D4, + .epoch0_cfg_batch_id1_addr = 0x16D8, + .epoch1_cfg_batch_id1_addr = 0x16DC, + .epoch0_cfg_batch_id2_addr = 0x16E0, + .epoch1_cfg_batch_id2_addr = 0x16E4, + .epoch0_cfg_batch_id3_addr = 0x16E8, + .epoch1_cfg_batch_id3_addr = 0x16EC, + .epoch0_cfg_batch_id4_addr = 0x16F0, + .epoch1_cfg_batch_id4_addr = 0x16F4, + .epoch0_cfg_batch_id5_addr = 0x16F8, + .epoch1_cfg_batch_id5_addr = 0x16FC, + .secure_mask_cfg0 = 0, + .path_batch_status = 0x1700, + .path_frame_id = 0x1704, + .cfg2_addr = 0x1708, + .debug_sim_monitor = 0x170C, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 0, + .capabilities = CAM_IFE_CSID_CAP_INPUT_LCR | + CAM_IFE_CSID_CAP_RDI_UNPACK_MSB | + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI | + CAM_IFE_CSID_CAP_SOF_RETIME_DIS, + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_shift = 2, + .overflow_ctrl_mode_val = 0x8, + .offline_mode_supported = 1, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 4, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 7, + .offline_mode_en_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x241c6001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x1000, + .aup_mask = 0x1000, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x100000,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, + .pix_store_en_shift_val = 0, + .sof_retiming_dis_shift = 5, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_980_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_RX2_IRQ) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_LOST) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_LOST), + + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED), + }, + + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 8, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 24, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 20, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 21, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 22, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 25, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 26, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 27, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 28, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 29, + .bit_pos[CAM_IFE_CSID_RX_RX2_IRQ] = 31, + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_LOST] = 12, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_LOST] = 13, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_LOST] = 14, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_LOST] = 15, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_LOST] = 16, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_LOST] = 17, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_LOST] = 18, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_LOST] = 19, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_980_top_debug_mask = { + + .evt_bitmap = { + 0ULL, + + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 0, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 1, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 2, +}; + + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_980_csi2_reg_info = { + .irq_status_addr = {0x00B4, 0x00C4}, + .irq_mask_addr = {0x00B8, 0x00C8}, + .irq_clear_addr = {0x00BC, 0x00CC}, + .irq_set_addr = {0x00C0, 0x00D0}, + /*CSI2 rx control */ + .cfg0_addr = 0x0400, + .cfg1_addr = 0x0404, + .capture_ctrl_addr = 0x0408, + .rst_strobes_addr = 0x040C, + .cap_unmap_long_pkt_hdr_0_addr = 0x0410, + .cap_unmap_long_pkt_hdr_1_addr = 0x0414, + .captured_short_pkt_0_addr = 0x0418, + .captured_short_pkt_1_addr = 0x041c, + .captured_long_pkt_0_addr = 0x0420, + .captured_long_pkt_1_addr = 0x0424, + .captured_long_pkt_ftr_addr = 0x0428, + .captured_cphy_pkt_hdr_addr = 0x042c, + .lane0_misr_addr = 0x0430, + .lane1_misr_addr = 0x0434, + .lane2_misr_addr = 0x0438, + .lane3_misr_addr = 0x043c, + .total_pkts_rcvd_addr = 0x0440, + .stats_ecc_addr = 0x0444, + .total_crc_err_addr = 0x0448, + .de_scramble_type3_cfg0_addr = 0x044C, + .de_scramble_type3_cfg1_addr = 0x0450, + .de_scramble_type2_cfg0_addr = 0x0454, + .de_scramble_type2_cfg1_addr = 0x0458, + .de_scramble_type1_cfg0_addr = 0x045C, + .de_scramble_type1_cfg1_addr = 0x0460, + .de_scramble_type0_cfg0_addr = 0x0464, + .de_scramble_type0_cfg1_addr = 0x0468, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff0000, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 13, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x25fff000, 0x0}, + .part_fatal_err_mask = {0x02000000, 0x0}, + .non_fatal_err_mask = {0x08000000, 0x0}, + .top_irq_mask = {0x4, 0x0}, + .rx_rx2_irq_mask = 0x80000000, +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_980_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0004, + .global_cmd_addr = 0x0008, + .reset_cfg_addr = 0x000C, + .reset_cmd_addr = 0x0010, + .irq_cmd_addr = 0x0014, + .rup_cmd_addr = 0x0018, + .aup_cmd_addr = 0x001C, + .rup_aup_cmd_addr = 0x0020, + .offline_cmd_addr = 0x0024, + .shdr_master_slave_cfg_addr = 0x0028, + .multi_sensor_mode_addr = 0x002C, + .top_irq_status_addr = {0x0084, 0x0094}, + .top_irq_mask_addr = {0x0088, 0x0098}, + .top_irq_clear_addr = {0x008C, 0x009C}, + .top_irq_set_addr = {0x0090, 0x00A0}, + .buf_done_irq_status_addr = 0x00A4, + .buf_done_irq_mask_addr = 0x00A8, + .buf_done_irq_clear_addr = 0x00AC, + .buf_done_irq_set_addr = 0x00B0, + .test_bus_ctrl = 0x03F4, + .test_bus_debug = 0x03F8, + .drv_cfg0_addr = 0x0164, + .drv_cfg1_addr = 0x0168, + .drv_cfg2_addr = 0x016C, + .debug_drv_0_addr = 0x0170, + .debug_drv_1_addr = 0x0174, + .debug_sensor_hbi_irq_vcdt_addr = 0x0180, + .debug_violation_addr = 0x03D4, + .debug_cfg_addr = 0x03E0, + .rx_mode_id_cfg1_addr = 0x0470, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 5, + .num_pix = 3, + .num_ppp = 1, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format1_supported = true, + .decode_format_mask = 0xF, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 4, + .shdr_slave_ppp_shift = 16, + .shdr_slave_rdi2_shift = 18, + .shdr_slave_rdi1_shift = 17, + .shdr_master_rdi0_shift = 4, + .shdr_master_slave_en_shift = 0, + .drv_en_shift = 0, + .drv_rup_en_shift = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .top_err_irq_mask = {0x00000002, 0x18}, + .rst_loc_path_only_val = 0x0, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = {0x1,}, + .rst_location_shift_val = 4, + .rst_mode_shift_val = 0, + .epoch_factor = 50, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .format_measure_max_hbi_shift = 16, + .format_measure_min_hbi_mask = 0xFFF, + .top_buf_done_irq_mask = 0x8, + .decode_format_payload_only = 0xF, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .capabilities = CAM_IFE_CSID_CAP_SPLIT_RUP_AUP | + CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 | + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG| + CAM_IFE_CSID_CAP_MULTI_CTXT, + .top_top2_irq_mask = 0x80000000, + .drv_rup_en_val_map = { + 2, /*RDI0 */ + 3, /*RDI1 */ + 4, /*RDI2 */ + 5, /*RDI3 */ + 6, /*RDI4 */ + 0, /*IPP */ + 1, /*PPP */ + 0, /*UDI0 */ + 0, /*UDI1 */ + 0, /*UDI2 */ + }, + .drv_path_idle_en_val_map = { + BIT(4), /*CAM_ISP_PXL_PATH */ + BIT(5), /*CAM_ISP_PPP_PATH */ + 0, /* LCR not applicable */ + BIT(6), /*CAM_ISP_RDI0_PATH */ + BIT(7), /*CAM_ISP_RDI1_PATH */ + BIT(8), /*CAM_ISP_RDI2_PATH */ + BIT(9), /*CAM_ISP_RDI3_PATH */ + BIT(10), /*CAM_ISP_RDI4_PATH */ + }, + .path_domain_id_cfg0 = 0x0, + .path_domain_id_cfg1 = 0x4, + .path_domain_id_cfg2 = 0x8, + .phy_sel_base_idx = 1, +}; + +struct cam_ife_csid_ver2_mc_reg_info + cam_ife_csid_980_ipp_mc_reg_info = { + .irq_comp_cfg0_addr = 0x0178, + .ipp_src_ctxt_mask_shift = 4, + .ipp_dst_ctxt_mask_shift = 0, + .comp_rup_mask = 0x4000000, + .comp_epoch0_mask = 0x8000000, + .comp_eof_mask = 0x20000000, + .comp_sof_mask = 0x40000000, + .comp_subgrp0_mask = 0x1000000, + .comp_subgrp2_mask = 0x2000000, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_980_reg_info = { + .top_irq_reg_info = cam_ife_csid_980_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_980_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_4], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + NULL, + NULL, + NULL, + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_1], + &cam_ife_csid_980_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP_2], + }, + .buf_done_irq_reg_info = &cam_ife_csid_980_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_980_cmn_reg_info, + .csi2_reg = &cam_ife_csid_980_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_980_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_1] = &cam_ife_csid_980_ipp_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP_2] = &cam_ife_csid_980_ipp_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = &cam_ife_csid_980_ppp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_980_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_980_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_980_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_980_rdi_3_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_4] = &cam_ife_csid_980_rdi_4_reg_info, + .ipp_mc_reg = &cam_ife_csid_980_ipp_mc_reg_info, + .need_top_cfg = 0x0, + .dynamic_drv_supported = true, + .top_irq_desc = &cam_ife_csid_980_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_980_rx_irq_desc, + .path_irq_desc = cam_ife_csid_980_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_980_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_980_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_980_path_irq_desc), + .top_debug_mask = &cam_ife_csid_980_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_980_rx_debug_mask, + .num_top_regs = 2, + .num_rx_regs = 2, +}; +#endif /*_CAM_IFE_CSID_980_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.c new file mode 100644 index 0000000000..971da175b1 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include +#include + +#include + +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_hw.h" +#include "cam_cdm_util.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_ife_csid_soc.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_cdm_intf_api.h" + +/* factor to conver qtime to boottime */ +int64_t qtime_to_boottime; + +const uint8_t *cam_ife_csid_irq_reg_tag[CAM_IFE_CSID_IRQ_REG_MAX + 1] = { + "TOP", + "RX", + "RDI0", + "RDI1", + "RDI2", + "RDI3", + "RDI4", + "IPP", + "PPP", + "UDI0", + "UDI1", + "UDI2", + "TOP2", + "RX2", + "IPP1", + "IPP2", + "MAX", +}; + +static int cam_ife_csid_get_cid(struct cam_ife_csid_cid_data *cid_data, + struct cam_csid_hw_reserve_resource_args *reserve) +{ + uint32_t i; + + if (cid_data->cid_cnt == 0) { + + for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) { + cid_data->vc_dt[i].vc = reserve->in_port->vc[i]; + cid_data->vc_dt[i].dt = reserve->in_port->dt[i]; + cid_data->vc_dt[i].valid = true; + } + + cid_data->num_vc_dt = reserve->in_port->num_valid_vc_dt; + return 0; + } + + for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) { + + if (cid_data->vc_dt[i].vc == reserve->in_port->vc[i] && + cid_data->vc_dt[i].dt == reserve->in_port->dt[i]) + return 0; + } + + return -EINVAL; +} + +int cam_ife_csid_is_pix_res_format_supported( + uint32_t in_format) +{ + int rc = -EINVAL; + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_DPCM_10_6_10: + case CAM_FORMAT_DPCM_10_8_10: + case CAM_FORMAT_DPCM_12_6_12: + case CAM_FORMAT_DPCM_12_8_12: + case CAM_FORMAT_DPCM_14_8_14: + case CAM_FORMAT_DPCM_14_10_14: + case CAM_FORMAT_DPCM_12_10_12: + case CAM_FORMAT_YUV422: + case CAM_FORMAT_YUV422_10: + rc = 0; + break; + default: + break; + } + return rc; +} + +static int cam_ife_csid_validate_rdi_format(uint32_t in_format, + uint32_t out_format) +{ + int rc = 0; + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_PLAIN8: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_8: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PLAIN8: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_16: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_PLAIN16_16: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_20: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_PLAIN32_20: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_YUV422: + switch (out_format) { + case CAM_FORMAT_YUV422: + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_YUV422_10: + switch (out_format) { + case CAM_FORMAT_YUV422_10: + break; + default: + rc = -EINVAL; + break; + } + break; + default: + rc = -EINVAL; + break; + } + + if (rc) + CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d", + in_format, out_format); + return rc; +} + +int cam_ife_csid_get_format_rdi( + uint32_t in_format, uint32_t out_format, + struct cam_ife_csid_path_format *path_format, bool mipi_pack_supported, + bool mipi_unpacked) +{ + int rc = 0; + + rc = cam_ife_csid_validate_rdi_format(in_format, out_format); + if (rc) + goto err; + + memset(path_format, 0, sizeof(*path_format)); + /* if no packing supported and input is same as output dump the raw payload */ + if (!mipi_pack_supported && (in_format == out_format)) { + path_format->decode_fmt = 0xf; + goto end; + } + + /* Configure the incoming stream format types */ + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + path_format->decode_fmt = 0x0; + path_format->bits_per_pxl = 6; + break; + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_YUV422: + path_format->decode_fmt = 0x1; + path_format->bits_per_pxl = 8; + break; + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_YUV422_10: + path_format->decode_fmt = 0x2; + path_format->bits_per_pxl = 10; + break; + case CAM_FORMAT_MIPI_RAW_12: + path_format->decode_fmt = 0x3; + path_format->bits_per_pxl = 12; + break; + case CAM_FORMAT_MIPI_RAW_14: + path_format->decode_fmt = 0x4; + path_format->bits_per_pxl = 14; + break; + case CAM_FORMAT_MIPI_RAW_16: + path_format->decode_fmt = 0x5; + path_format->bits_per_pxl = 16; + break; + case CAM_FORMAT_MIPI_RAW_20: + path_format->decode_fmt = 0x6; + path_format->bits_per_pxl = 20; + break; + default: + rc = -EINVAL; + goto err; + } + + /* Configure the out stream format types */ + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_YUV422: + case CAM_FORMAT_PLAIN128: + if (mipi_unpacked) + path_format->plain_fmt = 0x0; + else + path_format->packing_fmt = 0x1; + break; + case CAM_FORMAT_PLAIN8: + path_format->plain_fmt = 0x0; + break; + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_YUV422_10: + if (mipi_unpacked) + path_format->plain_fmt = 0x1; + else + path_format->packing_fmt = 0x1; + break; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_20: + if (mipi_unpacked) + path_format->plain_fmt = 0x2; + else + path_format->packing_fmt = 0x1; + break; + case CAM_FORMAT_PLAIN32_20: + path_format->plain_fmt = 0x2; + break; + default: + rc = -EINVAL; + goto err; + } + +end: + CAM_DBG(CAM_ISP, + "in %u out %u plain_fmt %u packing %u decode %u bpp %u unpack %u pack supported %u", + in_format, out_format, path_format->plain_fmt, path_format->packing_fmt, + path_format->decode_fmt, path_format->bits_per_pxl, mipi_unpacked, + mipi_pack_supported); + return rc; + +err: + CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d", + in_format, out_format); + return rc; +} + +int cam_ife_csid_get_format_ipp_ppp( + uint32_t in_format, + struct cam_ife_csid_path_format *path_format) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "input format:%d", + in_format); + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + path_format->decode_fmt = 0; + path_format->plain_fmt = 0; + break; + case CAM_FORMAT_MIPI_RAW_8: + path_format->decode_fmt = 0x1; + path_format->plain_fmt = 0; + break; + case CAM_FORMAT_MIPI_RAW_10: + path_format->decode_fmt = 0x2; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_12: + path_format->decode_fmt = 0x3; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_14: + path_format->decode_fmt = 0x4; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_16: + path_format->decode_fmt = 0x5; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_20: + path_format->decode_fmt = 0x6; + path_format->plain_fmt = 0x2; + break; + case CAM_FORMAT_DPCM_10_6_10: + path_format->decode_fmt = 0x7; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_10_8_10: + path_format->decode_fmt = 0x8; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_12_6_12: + path_format->decode_fmt = 0x9; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_12_8_12: + path_format->decode_fmt = 0xA; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_14_8_14: + path_format->decode_fmt = 0xB; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_14_10_14: + path_format->decode_fmt = 0xC; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_DPCM_12_10_12: + path_format->decode_fmt = 0xD; + path_format->plain_fmt = 0x1; + break; + case CAM_FORMAT_YUV422: + path_format->decode_fmt = 0x1; + path_format->plain_fmt = 0x0; + break; + case CAM_FORMAT_YUV422_10: + path_format->decode_fmt = 0x2; + path_format->plain_fmt = 0x0; + break; + default: + CAM_ERR(CAM_ISP, "Unsupported format %d", + in_format); + rc = -EINVAL; + } + + CAM_DBG(CAM_ISP, "decode_fmt:%d plain_fmt:%d", + path_format->decode_fmt, + path_format->plain_fmt); + + return rc; +} + +int cam_ife_csid_hw_probe_init(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *core_info, bool is_custom) +{ + + int rc = -EINVAL; + + if (core_info->sw_version == CAM_IFE_CSID_VER_1_0) { + rc = cam_ife_csid_hw_ver1_init(hw_intf, + core_info, is_custom); + } else if (core_info->sw_version == CAM_IFE_CSID_VER_2_0) { + rc = cam_ife_csid_hw_ver2_init(hw_intf, + core_info, is_custom); + } + + return rc; +} + +int cam_ife_csid_hw_deinit(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *core_info) +{ + int rc = -EINVAL; + + if (core_info->sw_version == CAM_IFE_CSID_VER_1_0) + rc = cam_ife_csid_hw_ver1_deinit(hw_intf->hw_priv); + else if (core_info->sw_version == CAM_IFE_CSID_VER_2_0) + rc = cam_ife_csid_hw_ver2_deinit( + hw_intf->hw_priv); + + return rc; +} + +int cam_ife_csid_is_vc_full_width(struct cam_ife_csid_cid_data *cid_data) +{ + int i, j; + int rc = 0; + struct cam_ife_csid_cid_data *p_cid; + + for (i = 0; i < CAM_IFE_CSID_CID_MAX; i++) { + p_cid = &cid_data[i]; + + if (!p_cid->cid_cnt) + continue; + + if (p_cid->num_vc_dt > CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX) { + CAM_ERR(CAM_ISP, "Invalid num_vc_dt:%d cid: %d", + p_cid->num_vc_dt, i); + rc = -EINVAL; + goto end; + } + + for (j = 0; j < p_cid->num_vc_dt; j++) { + if (p_cid->vc_dt[j].valid && + p_cid->vc_dt[j].vc > 3) { + rc = 1; + goto end; + } + } + } + +end: + return rc; +} + +int cam_ife_csid_cid_reserve(struct cam_ife_csid_cid_data *cid_data, + uint32_t *cid_value, + uint32_t hw_idx, + struct cam_csid_hw_reserve_resource_args *reserve) +{ + int i, j, rc = 0; + + for (i = 0; i < CAM_IFE_CSID_CID_MAX; i++) { + rc = cam_ife_csid_get_cid(&cid_data[i], reserve); + if (!rc) + break; + } + + if (i == CAM_IFE_CSID_CID_MAX) { + for (j = 0; j < reserve->in_port->num_valid_vc_dt; j++) + CAM_ERR(CAM_ISP, + "CSID[%d] reserve fail vc[%d] dt[%d]", + hw_idx, reserve->in_port->vc[j], + reserve->in_port->dt[j]); + + return -EINVAL; + } + + cid_data[i].cid_cnt++; + *cid_value = i; + + return 0; +} + +int cam_ife_csid_cid_release( + struct cam_ife_csid_cid_data *cid_data, + uint32_t hw_idx, + uint32_t cid) +{ + int i; + + if (!cid_data->cid_cnt) { + CAM_WARN(CAM_ISP, "CSID[%d] unbalanced cid:%d release", + hw_idx, cid); + return 0; + } + + cid_data->cid_cnt--; + + if (cid_data->cid_cnt == 0) { + + for (i = 0; i < cid_data->num_vc_dt; i++) + cid_data->vc_dt[i].valid = false; + + cid_data->num_vc_dt = 0; + } + + return 0; +} + +int cam_ife_csid_check_in_port_args( + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t hw_idx) +{ + + if (reserve->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) { + + CAM_ERR(CAM_ISP, "CSID:%d Invalid phy sel %d", + hw_idx, reserve->in_port->res_type); + return -EINVAL; + } + + if (reserve->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX && + reserve->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) { + + CAM_ERR(CAM_ISP, "CSID:%d Invalid lane type %d", + hw_idx, reserve->in_port->lane_type); + return -EINVAL; + } + + if ((reserve->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY && + reserve->in_port->lane_num > 4) && + reserve->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) { + + CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d", + hw_idx, reserve->in_port->lane_num); + return -EINVAL; + } + + if ((reserve->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY && + reserve->in_port->lane_num > 3) && + reserve->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) { + + CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d", + hw_idx, + reserve->in_port->lane_type, + reserve->in_port->lane_num); + return -EINVAL; + } + + if ((reserve->res_id == CAM_IFE_PIX_PATH_RES_IPP || + reserve->res_id == CAM_IFE_PIX_PATH_RES_PPP) && + (cam_ife_csid_is_pix_res_format_supported( + reserve->in_port->format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0]))) { + CAM_ERR(CAM_ISP, "CSID %d, res_id %d, unsupported format %d", + hw_idx, + reserve->res_id, + reserve->in_port->format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0]); + return -EINVAL; + } + + return 0; +} + +int cam_ife_csid_convert_res_to_irq_reg(uint32_t res_id) +{ + switch (res_id) { + + case CAM_IFE_PIX_PATH_RES_RDI_0: + return CAM_IFE_CSID_IRQ_REG_RDI_0; + case CAM_IFE_PIX_PATH_RES_RDI_1: + return CAM_IFE_CSID_IRQ_REG_RDI_1; + case CAM_IFE_PIX_PATH_RES_RDI_2: + return CAM_IFE_CSID_IRQ_REG_RDI_2; + case CAM_IFE_PIX_PATH_RES_RDI_3: + return CAM_IFE_CSID_IRQ_REG_RDI_3; + case CAM_IFE_PIX_PATH_RES_RDI_4: + return CAM_IFE_CSID_IRQ_REG_RDI_4; + case CAM_IFE_PIX_PATH_RES_IPP: + return CAM_IFE_CSID_IRQ_REG_IPP; + case CAM_IFE_PIX_PATH_RES_PPP: + return CAM_IFE_CSID_IRQ_REG_PPP; + case CAM_IFE_PIX_PATH_RES_UDI_0: + return CAM_IFE_CSID_IRQ_REG_UDI_0; + case CAM_IFE_PIX_PATH_RES_UDI_1: + return CAM_IFE_CSID_IRQ_REG_UDI_1; + case CAM_IFE_PIX_PATH_RES_UDI_2: + return CAM_IFE_CSID_IRQ_REG_UDI_2; + case CAM_IFE_PIX_PATH_RES_IPP_1: + return CAM_IFE_CSID_IRQ_REG_IPP_1; + case CAM_IFE_PIX_PATH_RES_IPP_2: + return CAM_IFE_CSID_IRQ_REG_IPP_2; + default: + return CAM_IFE_CSID_IRQ_REG_MAX; + } +} + +const char *cam_ife_csid_reset_type_to_string(enum cam_ife_csid_reset_type reset_type) +{ + switch (reset_type) { + case CAM_IFE_CSID_RESET_GLOBAL: return "global"; + case CAM_IFE_CSID_RESET_PATH: return "path"; + case CAM_IFE_CSID_RESET_GLOBAL_HW_ONLY: return "global_hw"; + case CAM_IFE_CSID_RESET_GLOBAL_IRQ_CNTRL: return "global_irq_rst"; + default: return "invalid"; + } +} + +int cam_ife_csid_get_base(struct cam_hw_soc_info *soc_info, + uint32_t base_id, void *cmd_args, size_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + size_t size = 0; + uint32_t mem_base = 0; + struct cam_csid_soc_private *soc_private; + + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error, Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !soc_info) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "soc_private is null"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, base_id); + if (mem_base == -1) { + CAM_ERR(CAM_ISP, "failed to get mem_base, index: %d num_reg_map: %u", + base_id, soc_info->num_reg_map); + return -EINVAL; + } + + if (cdm_args->cdm_id == CAM_CDM_RT) { + if (!soc_private->rt_wrapper_base) { + CAM_ERR(CAM_ISP, "rt_wrapper_base_addr is null"); + return -EINVAL; + } + + mem_base -= soc_private->rt_wrapper_base; + } + + CAM_DBG(CAM_ISP, "core %d mem_base 0x%x, cdm_id:%u", + soc_info->index, mem_base, cdm_args->cdm_id); + + cdm_util_ops->cdm_write_changebase(cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} +const uint8_t **cam_ife_csid_get_irq_reg_tag_ptr(void) +{ + return cam_ife_csid_irq_reg_tag; +} + + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.h new file mode 100644 index 0000000000..1ca7001a49 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_common.h @@ -0,0 +1,550 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_COMMON_H_ +#define _CAM_IFE_CSID_COMMON_H_ + +#include "cam_hw.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_ife_csid_soc.h" + +#define CAM_IFE_CSID_VER_1_0 0x100 +#define CAM_IFE_CSID_VER_2_0 0x200 +#define CAM_IFE_CSID_MAX_ERR_COUNT 100 + +/* + * CRC error threshold is set to be 1% of frame width and + * this macro is used as divisor in calculation + */ +#define CAM_IFE_CSID_MAX_CRC_ERR_DIVISOR 100 + +#define CAM_IFE_CSID_HW_CAP_IPP 0x1 +#define CAM_IFE_CSID_HW_CAP_RDI 0x2 +#define CAM_IFE_CSID_HW_CAP_PPP 0x4 +#define CAM_IFE_CSID_HW_CAP_TOP 0x8 + +#define CAM_IFE_CSID_TPG_ENCODE_RAW8 0x1 +#define CAM_IFE_CSID_TPG_ENCODE_RAW10 0x2 +#define CAM_IFE_CSID_TPG_ENCODE_RAW12 0x3 +#define CAM_IFE_CSID_TPG_ENCODE_RAW14 0x4 +#define CAM_IFE_CSID_TPG_ENCODE_RAW16 0x5 + +#define CAM_IFE_CSID_TPG_TEST_PATTERN_YUV 0x4 + +#define CAM_IFE_CSID_HW_IDX_0 0x1 +#define CAM_IFE_CSID_HW_IDX_1 0x2 +#define CAM_IFE_CSID_HW_IDX_2 0x4 + +#define CAM_IFE_CSID_LOG_BUF_LEN 1024 + +/** + * CSID debug err vector related fields + */ +#define CAM_IFE_CSID_DEBUG_VEC_FIFO_SIZE 4 +#define CAM_IFE_CSID_DEBUG_TIMESTAMP_IRQ_SEL_SHIFT 1 +#define CAM_IFE_CSID_DEBUG_VEC_ERR_REGS 3 + +#define CAM_IFE_CSID_CAP_INPUT_LCR BIT(0) +#define CAM_IFE_CSID_CAP_RDI_UNPACK_MSB BIT(1) +#define CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI BIT(2) +#define CAM_IFE_CSID_CAP_SOF_RETIME_DIS BIT(3) +#define CAM_IFE_CSID_CAP_SPLIT_RUP_AUP BIT(4) +#define CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 BIT(5) +#define CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG BIT(6) +#define CAM_IFE_CSID_CAP_MULTI_CTXT BIT(7) +#define CAM_IFE_CSID_CAP_DEBUG_ERR_VEC BIT(8) +#define CAM_IFE_CSID_CAP_TOP_MASK_ALL_IRQS BIT(9) + +/* + * CSID RX debug vc-dt capture + */ +#define CAM_IFE_CSID_DEBUGFS_RST_STROBE_MASK 0xF +#define CAM_IFE_CSID_DEBUGFS_VC_DT_MASK 0xFF +#define CAM_IFE_CSID_DEBUGFS_VC_SHIFT_MASK 0x4 +#define CAM_IFE_CSID_DEBUGFS_DT_SHIFT_MASK 0xC + +/* + * Debug values enable the corresponding interrupts and debug logs provide + * necessary information + */ +#define CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ BIT(0) +#define CAM_IFE_CSID_DEBUG_ENABLE_EOF_IRQ BIT(1) +#define CAM_IFE_CSID_DEBUG_ENABLE_SOT_IRQ BIT(2) +#define CAM_IFE_CSID_DEBUG_ENABLE_EOT_IRQ BIT(3) +#define CAM_IFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE BIT(4) +#define CAM_IFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE BIT(5) +#define CAM_IFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6) +#define CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7) +#define CAM_IFE_CSID_DEBUG_DISABLE_EARLY_EOF BIT(8) +#define CAM_IFE_DEBUG_ENABLE_UNMAPPED_VC_DT_IRQ BIT(9) +#define CAM_IFE_CSID_DEBUG_ENABLE_VOTE_UP_IRQ BIT(10) +#define CAM_IFE_CSID_DEBUG_ENABLE_VOTE_DN_IRQ BIT(11) +#define CAM_IFE_CSID_DEBUG_ENABLE_ERR_NO_VOTE_DN_IRQ BIT(12) + +/* Binning supported masks. Binning support changes for specific paths + * and also for targets. With the mask, we handle the supported features + * in reg files and handle in code accordingly. + */ + +#define CAM_IFE_CSID_BIN_HORIZONTAL BIT(0) +#define CAM_IFE_CSID_BIN_QCFA BIT(1) +#define CAM_IFE_CSID_BIN_VERTICAL BIT(2) + +#define CAM_IFE_CSID_WIDTH_FUSE_VAL_MAX 4 + +#define CAM_IFE_CSID_RUP_AUP_SET_VAL BIT(0) + +/* factor to conver qtime to boottime */ +extern int64_t qtime_to_boottime; + +/* enum for multiple mem base in some of the targets */ +enum cam_ife_csid_mem_base_id { + CAM_IFE_CSID_CLC_MEM_BASE_ID, + CAM_IFE_CSID_TOP_MEM_BASE_ID, + CAM_IFE_CSID_SEC_MEM_BASE_ID, + CAM_IFE_CSID_MAX_MEM_BASE_ID, +}; + +/* enum cam_ife_csid_path_multi_vc_dt_grp: for multi vc dt suppot */ +enum cam_ife_csid_path_multi_vc_dt_grp { + CAM_IFE_CSID_MULTI_VC_DT_GRP_0, + CAM_IFE_CSID_MULTI_VC_DT_GRP_1, + CAM_IFE_CSID_MULTI_VC_DT_GRP_2, + CAM_IFE_CSID_MULTI_VC_DT_GRP_3, + CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX, +}; + +/** + * enum cam_ife_csid_irq_reg - Specify the csid irq reg + */ +enum cam_ife_csid_irq_reg { + CAM_IFE_CSID_IRQ_REG_TOP, + CAM_IFE_CSID_IRQ_REG_RX, + CAM_IFE_CSID_IRQ_REG_RDI_0, + CAM_IFE_CSID_IRQ_REG_RDI_1, + CAM_IFE_CSID_IRQ_REG_RDI_2, + CAM_IFE_CSID_IRQ_REG_RDI_3, + CAM_IFE_CSID_IRQ_REG_RDI_4, + CAM_IFE_CSID_IRQ_REG_IPP, + CAM_IFE_CSID_IRQ_REG_PPP, + CAM_IFE_CSID_IRQ_REG_UDI_0, + CAM_IFE_CSID_IRQ_REG_UDI_1, + CAM_IFE_CSID_IRQ_REG_UDI_2, + CAM_IFE_CSID_IRQ_REG_TOP_2, + CAM_IFE_CSID_IRQ_REG_RX_2, + CAM_IFE_CSID_IRQ_REG_IPP_1, + CAM_IFE_CSID_IRQ_REG_IPP_2, + CAM_IFE_CSID_IRQ_REG_MAX, +}; + +/** + * enum cam_ife_csid_hw_top_events - Specify the top irq events + */ +enum cam_ife_csid_hw_top_events { + CAM_IFE_CSID_TOP_INFO_VOTE_UP, + CAM_IFE_CSID_TOP_INFO_VOTE_DN, + CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN, + CAM_IFE_CSID_TOP_ERR_VOTE_UP_LATE, + CAM_IFE_CSID_TOP_ERR_RDI_LINE_BUFFER_CONFLICT, + CAM_IFE_CSID_TOP_ERR_SENSOR_HBI, + CAM_IFE_CSID_TOP_REG_IRQ_EVENTS_MAX, +}; + +/** + * enum cam_ife_csid_hw_rx_events - Specify the rx irq events + */ +enum cam_ife_csid_hw_rx_events { + CAM_IFE_CSID_RX_DL0_EOT_CAPTURED, + CAM_IFE_CSID_RX_DL1_EOT_CAPTURED, + CAM_IFE_CSID_RX_DL2_EOT_CAPTURED, + CAM_IFE_CSID_RX_DL3_EOT_CAPTURED, + CAM_IFE_CSID_RX_DL0_SOT_CAPTURED, + CAM_IFE_CSID_RX_DL1_SOT_CAPTURED, + CAM_IFE_CSID_RX_DL2_SOT_CAPTURED, + CAM_IFE_CSID_RX_DL3_SOT_CAPTURED, + CAM_IFE_CSID_RX_LONG_PKT_CAPTURED, + CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED, + CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED, + CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION, + CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION, + CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC, + CAM_IFE_CSID_RX_WARNING_ECC, + CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW, + CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW, + CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW, + CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW, + CAM_IFE_CSID_RX_ERROR_CRC, + CAM_IFE_CSID_RX_ERROR_ECC, + CAM_IFE_CSID_RX_MMAPPED_VC_DT, + CAM_IFE_CSID_RX_UNMAPPED_VC_DT, + CAM_IFE_CSID_RX_STREAM_UNDERFLOW, + CAM_IFE_CSID_RX_UNBOUNDED_FRAME, + CAM_IFE_CSID_RX_ERROR_ILLEGAL_PROGRAMMING, + CAM_IFE_CSID_RX_INFO_SENSOR_MODE_ID_CHANGE, + CAM_IFE_CSID_RX_RX2_IRQ, + CAM_IFE_CSID_RX_DL0_EOT_LOST, + CAM_IFE_CSID_RX_DL1_EOT_LOST, + CAM_IFE_CSID_RX_DL2_EOT_LOST, + CAM_IFE_CSID_RX_DL3_EOT_LOST, + CAM_IFE_CSID_RX_DL0_SOT_LOST, + CAM_IFE_CSID_RX_DL1_SOT_LOST, + CAM_IFE_CSID_RX_DL2_SOT_LOST, + CAM_IFE_CSID_RX_DL3_SOT_LOST, + CAM_IFE_CSID_RX_REG_IRQ_EVENTS_MAX, +}; + +struct cam_ife_csid_top_debug_mask { + uint64_t evt_bitmap[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint8_t bit_pos[CAM_IFE_CSID_TOP_REG_IRQ_EVENTS_MAX]; +}; + +struct cam_ife_csid_rx_debug_mask { + uint64_t evt_bitmap[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint8_t bit_pos[CAM_IFE_CSID_RX_REG_IRQ_EVENTS_MAX]; +}; + +/* + * struct cam_ife_csid_irq_desc: Structure to hold IRQ description + * + * @bitmask : Bitmask of the IRQ + * @err_type : Error type for ISP hardware event + * @irq_name : IRQ name + * @desc : String to describe the IRQ bit + * @debug : Debug guidance for the error + * @err_handler: Error handler which gets invoked if error IRQ bit set + */ +struct cam_ife_csid_irq_desc { + uint32_t bitmask; + uint32_t err_type; + char *irq_name; + char *desc; + char *debug; + void (*err_handler)(void *csid_hw, void *res); +}; + +/* + * struct cam_ife_csid_top_irq_desc: Structure to hold IRQ bitmask and description + * + * @bitmask : Bitmask of the IRQ + * @err_type : Error type for ISP hardware event + * @err_name : IRQ name + * @desc : String to describe about the IRQ + * @debug : Debug guidance for the error + * @err_handler: Error handler which gets invoked if error IRQ bit set + */ +struct cam_ife_csid_top_irq_desc { + uint32_t bitmask; + uint32_t err_type; + char *err_name; + char *desc; + char *debug; + void (*err_handler)(void *csid_hw); +}; + +/* + * struct cam_ife_csid_vc_dt: Structure to hold vc dt combination + * + * @vc: Virtual channel number + * @dt: Data type of incoming data + * @valid: flag to indicate if combimation is valid + */ + +struct cam_ife_csid_vc_dt { + uint32_t vc; + uint32_t dt; + bool valid; +}; + +/* + * struct cam_ife_csid_path_format: Structure format info + * + * @decode_fmt: Decode format + * @packing_fmt: Packing format + * @plain_fmt: Plain format + * @bits_per_pixel: Bits per pixel + */ +struct cam_ife_csid_path_format { + uint32_t decode_fmt; + uint32_t packing_fmt; + uint32_t plain_fmt; + uint32_t bits_per_pxl; +}; + +/* + * struct cam_ife_csid_csi2_rx_reg_info: Structure to hold Rx reg offset + * holds register address offsets + * shift values + * masks + */ +struct cam_ife_csid_csi2_rx_reg_info { + uint32_t irq_status_addr; + uint32_t irq_mask_addr; + uint32_t irq_clear_addr; + uint32_t irq_set_addr; + uint32_t cfg0_addr; + uint32_t cfg1_addr; + uint32_t capture_ctrl_addr; + uint32_t rst_strobes_addr; + uint32_t de_scramble_cfg0_addr; + uint32_t de_scramble_cfg1_addr; + uint32_t cap_unmap_long_pkt_hdr_0_addr; + uint32_t cap_unmap_long_pkt_hdr_1_addr; + uint32_t captured_short_pkt_0_addr; + uint32_t captured_short_pkt_1_addr; + uint32_t captured_long_pkt_0_addr; + uint32_t captured_long_pkt_1_addr; + uint32_t captured_long_pkt_ftr_addr; + uint32_t captured_cphy_pkt_hdr_addr; + uint32_t lane0_misr_addr; + uint32_t lane1_misr_addr; + uint32_t lane2_misr_addr; + uint32_t lane3_misr_addr; + uint32_t total_pkts_rcvd_addr; + uint32_t stats_ecc_addr; + uint32_t total_crc_err_addr; + uint32_t de_scramble_type3_cfg0_addr; + uint32_t de_scramble_type3_cfg1_addr; + uint32_t de_scramble_type2_cfg0_addr; + uint32_t de_scramble_type2_cfg1_addr; + uint32_t de_scramble_type1_cfg0_addr; + uint32_t de_scramble_type1_cfg1_addr; + uint32_t de_scramble_type0_cfg0_addr; + uint32_t de_scramble_type0_cfg1_addr; + + /*configurations */ + uint32_t rst_srb_all; + uint32_t rst_done_shift_val; + uint32_t irq_mask_all; + uint32_t misr_enable_shift_val; + uint32_t vc_mode_shift_val; + uint32_t capture_long_pkt_en_shift; + uint32_t capture_short_pkt_en_shift; + uint32_t capture_cphy_pkt_en_shift; + uint32_t capture_long_pkt_dt_shift; + uint32_t capture_long_pkt_vc_shift; + uint32_t capture_short_pkt_vc_shift; + uint32_t capture_cphy_pkt_dt_shift; + uint32_t capture_cphy_pkt_vc_shift; + uint32_t ecc_correction_shift_en; + uint32_t phy_bist_shift_en; + uint32_t epd_mode_shift_en; + uint32_t eotp_shift_en; + uint32_t dyn_sensor_switch_shift_en; + uint32_t rup_aup_latch_shift; + bool rup_aup_latch_supported; + uint32_t phy_num_mask; + uint32_t vc_mask; + uint32_t wc_mask; + uint32_t dt_mask; + uint32_t vc_shift; + uint32_t dt_shift; + uint32_t wc_shift; + uint32_t calc_crc_mask; + uint32_t expected_crc_mask; + uint32_t calc_crc_shift; + uint32_t lane_num_shift; + uint32_t lane_cfg_shift; + uint32_t phy_type_shift; + uint32_t phy_num_shift; + uint32_t tpg_mux_en_shift; + uint32_t tpg_num_sel_shift; + uint32_t long_pkt_strobe_rst_shift; + uint32_t short_pkt_strobe_rst_shift; + uint32_t cphy_pkt_strobe_rst_shift; + uint32_t unmapped_pkt_strobe_rst_shift; + uint32_t fatal_err_mask; + uint32_t part_fatal_err_mask; + uint32_t non_fatal_err_mask; + uint32_t debug_irq_mask; + uint32_t top_irq_mask; +}; + +/* + * struct cam_ife_csid_timestamp: place holder for csid core info + * + * @prev_boot_timestamp: Previous frame boot timestamp + * @prev_sof_timestamp: Previous frame SOF timetamp + */ +struct cam_ife_csid_timestamp { + uint64_t prev_boot_ts; + uint64_t prev_sof_ts; +}; + +/* + * struct cam_ife_csid_core_info: place holder for csid core info + * + * @csid_reg: Pointer to csid reg info + * @sw_version: sw version based on targets + */ +struct cam_ife_csid_core_info { + void *csid_reg; + uint32_t sw_version; +}; + +/* + * struct cam_ife_csid_hw_counters: place holder for csid counters + * + * @csi2_reserve_cnt: Reserve count for csi2 + * @irq_debug_cnt: irq debug counter + * @error_irq_count: error irq counter + * @crc_error_irq_count: crc error irq counter + */ +struct cam_ife_csid_hw_counters { + uint32_t csi2_reserve_cnt; + uint32_t irq_debug_cnt; + uint32_t error_irq_count; + uint32_t crc_error_irq_count; +}; + +/* + * struct cam_ife_csid_debug_info: place holder for csid debug + * + * @debug_val: Debug val for enabled features + * @rx_capture_vc: rx packet vc capture + * @rx_capture_dt: rx packet dt capture + * @rst_capture_strobes: rx packet capture rst strobes + * @top_mask: Debug mask for top irq + * @rx_mask: Debug mask for rx irq + * @path_mask: Debug mask for path irq + * @test_bus_val: CSID test bus value + * @rx_capture_debug_set: rx pkt capture debug set + * @test_bus_enabled: test bus enabled + */ +struct cam_ife_csid_debug_info { + uint32_t debug_val; + uint32_t rx_capture_vc; + uint32_t rx_capture_dt; + uint32_t rst_capture_strobes; + uint32_t top_mask; + uint32_t rx_mask; + uint32_t path_mask; + uint32_t test_bus_val; + bool rx_capture_debug_set; + bool test_bus_enabled; +}; + +/* + * struct cam_ife_csid_hw_flags: place holder for flags + * + * @device_enabled: flag to indicate if device enabled + * @binning_enabled: flag to indicate if binning enabled + * @sof_irq_triggered: flag to indicate if sof irq triggered + * @fatal_err_detected: flag to indicate if fatal err detected + * @rx_enabled: flag to indicate if rx is enabled + * @tpg_configured: flag to indicate if internal_tpg is configured + * @reset_awaited: flag to indicate if reset is awaited + * @offline_mode: flag to indicate if csid in offline mode + * @rdi_lcr_en: flag to indicate if RDI to lcr is enabled + * @sfe_en: flag to indicate if SFE is enabled + * @pf_err_detected: flag to indicate if camnoc has encountered + * error - page fault + * @domain_id_security Flag to determine if target has domain-id based security + * @last_exp_valid: Flag to indicate if last exp info is valid for epoch callback + */ +struct cam_ife_csid_hw_flags { + bool device_enabled; + bool binning_enabled; + bool sof_irq_triggered; + bool process_reset; + bool fatal_err_detected; + bool rx_enabled; + bool tpg_enabled; + bool tpg_configured; + bool reset_awaited; + bool offline_mode; + bool rdi_lcr_en; + bool sfe_en; + bool pf_err_detected; + bool domain_id_security; + bool last_exp_valid; +}; + +/* + * struct am_ife_csid_cid_data: place holder for cid data + * + * @vc_dt: vc_dt structure + * @cid_cnt: count of cid acquired + * @num_vc_dt: num of vc dt combinaton for this cid in multi vcdt case + */ +struct cam_ife_csid_cid_data { + struct cam_ife_csid_vc_dt vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX]; + uint32_t cid_cnt; + uint32_t num_vc_dt; +}; + +/* + * struct cam_ife_csid_rx_cfg: place holder for rx cfg + * + * @phy_sel: Selected phy + * @lane_type: type of lane selected + * @lane_num: number of lanes + * @lane_cfg: lane configuration + * @tpg_mux_sel: TPG mux sel + * @tpg_num_sel: TPG num sel + * @mup: Mode Update bit. 0 for odd vc, 1 for even VC + * @epd_supported: Flag to check if epd supported + * @irq_handle: IRQ Handle + * @err_irq_handle: Error IRQ Handle + * @dynamic_sensor_switch_en: Flag if dynamic sensor switch is enabled + */ +struct cam_ife_csid_rx_cfg { + uint32_t phy_sel; + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; + uint32_t tpg_mux_sel; + uint32_t tpg_num_sel; + uint32_t mup; + uint32_t epd_supported; + uint32_t top_irq_handle; + uint32_t irq_handle; + uint32_t err_irq_handle; + bool dynamic_sensor_switch_en; +}; + +int cam_ife_csid_is_pix_res_format_supported( + uint32_t in_format); + +int cam_ife_csid_get_format_rdi( + uint32_t in_format, uint32_t out_format, + struct cam_ife_csid_path_format *path_format, bool mipi_pack_supported, + bool mipi_unpacked); + +int cam_ife_csid_get_format_ipp_ppp( + uint32_t in_format, + struct cam_ife_csid_path_format *path_format); + +int cam_ife_csid_hw_probe_init(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *core_info, bool is_custom); + +int cam_ife_csid_hw_deinit(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *core_info); + +int cam_ife_csid_cid_reserve(struct cam_ife_csid_cid_data *cid_data, + uint32_t *cid_value, + uint32_t hw_idx, + struct cam_csid_hw_reserve_resource_args *reserve); + +int cam_ife_csid_cid_release( + struct cam_ife_csid_cid_data *cid_data, + uint32_t hw_idx, + uint32_t cid); + +int cam_ife_csid_check_in_port_args( + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t hw_idx); + +int cam_ife_csid_is_vc_full_width(struct cam_ife_csid_cid_data *cid_data); + +int cam_ife_csid_convert_res_to_irq_reg(uint32_t res_id); + +int cam_ife_csid_get_base(struct cam_hw_soc_info *soc_info, + uint32_t base_id, void *cmd_args, size_t arg_size); + +const char *cam_ife_csid_reset_type_to_string(enum cam_ife_csid_reset_type reset_type); + +const uint8_t **cam_ife_csid_get_irq_reg_tag_ptr(void); +#endif /*_CAM_IFE_CSID_COMMON_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c new file mode 100644 index 0000000000..51b1f103a4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_cpas_api.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include +#include "cam_req_mgr_dev.h" + +static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_NUM_MAX] = { + 0, 0, 0, 0}; + +static int cam_ife_csid_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + const struct of_device_id *match_dev = NULL; + struct cam_ife_csid_core_info *csid_core_info = NULL; + uint32_t csid_dev_idx; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + CAM_DBG(CAM_ISP, "Binding IFE CSID component"); + + /* get ife csid hw index */ + rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to read cell-index of IFE CSID HW, rc: %d", rc); + goto err; + } + + if (!cam_cpas_is_feature_supported(CAM_CPAS_ISP_FUSE, BIT(csid_dev_idx), NULL) || + !cam_cpas_is_feature_supported(CAM_CPAS_ISP_LITE_FUSE, + BIT(csid_dev_idx), NULL)) { + CAM_DBG(CAM_ISP, "CSID[%d] not supported based on fuse", csid_dev_idx); + goto err; + } + + hw_intf = CAM_MEM_ZALLOC(sizeof(*hw_intf), GFP_KERNEL); + if (!hw_intf) { + rc = -ENOMEM; + goto err; + } + + hw_info = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!hw_info) { + rc = -ENOMEM; + goto free_hw_intf; + } + + /* get ife csid hw information */ + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!"); + rc = -EINVAL; + goto free_hw_info; + } + + hw_intf->hw_idx = csid_dev_idx; + hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID; + hw_intf->hw_priv = hw_info; + + hw_info->soc_info.pdev = pdev; + hw_info->soc_info.dev = &pdev->dev; + hw_info->soc_info.dev_name = pdev->name; + hw_info->soc_info.index = csid_dev_idx; + + csid_core_info = (struct cam_ife_csid_core_info *)match_dev->data; + + /* call the driver init and fill csid_hw_info->core_info */ + rc = cam_ife_csid_hw_probe_init(hw_intf, csid_core_info, false); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] probe init failed", + csid_dev_idx); + goto free_hw_info; + } + + platform_set_drvdata(pdev, hw_intf); + + hw_info->soc_info.hw_id = CAM_HW_ID_CSID0 + hw_info->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&hw_info->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ISP, " hw instance populate failed: %d", rc); + goto free_hw_info; + } + + CAM_DBG(CAM_ISP, "CSID:%d component bound successfully", + hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + if (hw_intf->hw_idx < CAM_IFE_CSID_HW_NUM_MAX) + cam_ife_csid_hw_list[hw_intf->hw_idx] = hw_intf; + else + goto free_hw_info; + + return 0; + +free_hw_info: + CAM_MEM_FREE(hw_info); +free_hw_intf: + CAM_MEM_FREE(hw_intf); +err: + return rc; +} + +static void cam_ife_csid_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + struct cam_ife_csid_core_info *core_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + const struct of_device_id *match_dev = NULL; + + hw_intf = (struct cam_hw_intf *)platform_get_drvdata(pdev); + + if (!hw_intf) { + CAM_ERR(CAM_ISP, "Error No data in hw_intf"); + return; + } + + hw_info = hw_intf->hw_priv; + + CAM_DBG(CAM_ISP, "CSID:%d component unbind", + hw_intf->hw_idx); + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + + if (!match_dev) { + CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!"); + goto free_mem; + } + + core_info = (struct cam_ife_csid_core_info *)match_dev->data; + + cam_ife_csid_hw_deinit(hw_intf, core_info); + +free_mem: + /*release the csid device memory */ + CAM_MEM_FREE(hw_info); + CAM_MEM_FREE(hw_intf); +} + +const static struct component_ops cam_ife_csid_component_ops = { + .bind = cam_ife_csid_component_bind, + .unbind = cam_ife_csid_component_unbind, +}; + +int cam_ife_csid_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Adding IFE CSID component"); + rc = component_add(&pdev->dev, &cam_ife_csid_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_ife_csid_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ife_csid_component_ops); + return 0; +} + +int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw, + uint32_t hw_idx) +{ + int rc = 0; + + if (hw_idx >= CAM_IFE_CSID_HW_NUM_MAX) { + *ife_csid_hw = NULL; + return -EINVAL; + } + + if (cam_ife_csid_hw_list[hw_idx]) { + *ife_csid_hw = cam_ife_csid_hw_list[hw_idx]; + } else { + *ife_csid_hw = NULL; + rc = -1; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h new file mode 100644 index 0000000000..e6cf2173fb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_DEV_H_ +#define _CAM_IFE_CSID_DEV_H_ + +#include "cam_isp_hw.h" + +irqreturn_t cam_ife_csid_irq(int irq_num, void *data); + +int cam_ife_csid_probe(struct platform_device *pdev); +int cam_ife_csid_remove(struct platform_device *pdev); + +/** + * @brief : API to register CSID hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_ife_csid_init_module(void); + +/** + * @brief : API to register CSID Lite hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_ife_csid_lite_init_module(void); + +/** + * @brief : API to remove CSID Hw from platform framework. + */ +void cam_ife_csid_exit_module(void); + +/** + * @brief : API to remove CSID Lite Hw from platform framework. + */ +void cam_ife_csid_lite_exit_module(void); + +#endif /*_CAM_IFE_CSID_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.c new file mode 100644 index 0000000000..6b29594476 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.c @@ -0,0 +1,4860 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include +#include +#include + +#include + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_isp_hw.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_tasklet_util.h" +#include "cam_common_util.h" +#include "cam_subdev.h" +#include "cam_mem_mgr_api.h" + +#define IFE_CSID_TIMEOUT 1000 + +/* Timeout values in usec */ +#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000 +#define CAM_IFE_CSID_TIMEOUT_ALL_US 100000 + +#define CAM_IFE_CSID_RESET_TIMEOUT_MS 100 + +/* + * Constant Factors needed to change QTimer ticks to nanoseconds + * QTimer Freq = 19.2 MHz + * Time(us) = ticks/19.2 + * Time(ns) = ticks/19.2 * 1000 + */ +#define CAM_IFE_CSID_QTIMER_MUL_FACTOR 10000 +#define CAM_IFE_CSID_QTIMER_DIV_FACTOR 192 + +/* Max number of sof irq's triggered in case of SOF freeze */ +#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12 + +#define CAM_IFE_CSID_VER1_STATUS_MAX_NUM 32 + +static const struct cam_ife_csid_irq_desc ver1_rx_irq_desc[] = { + { + .desc = "PHY_DL0_EOT_CAPTURED", + }, + { + .desc = "PHY_DL1_EOT_CAPTURED", + }, + { + .desc = "PHY_DL2_EOT_CAPTURED", + }, + { + .desc = "PHY_DL3_EOT_CAPTURED", + }, + { + .desc = "PHY_DL0_SOT_CAPTURED", + }, + { + .desc = "PHY_DL1_SOT_CAPTURED", + }, + { + .desc = "PHY_DL2_SOT_CAPTURED", + }, + { + .desc = "PHY_DL3_SOT_CAPTURED", + }, + { + .desc = "LONG_PKT", + }, + { + .desc = "SHORT_PKT", + }, + { + .desc = "CPHY_PKT_HDR", + }, + { + .desc = "CPHY_EOT_RECEPTION: No EOT on lane/s", + }, + { + .desc = "CPHY_SOT_RECEPTION: Less SOTs on lane/s", + }, + { + .desc = "CSID:%d CPHY_PH_CRC CPHY: Pkt Hdr CRC mismatch", + }, + { + .desc = "WARNING_ECC", + }, + { + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .desc = "ERROR_CRC", + }, + { + .desc = "ERROR_ECC", + }, + { + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .desc = "ERROR_UNBOUNDED_FRAME", + }, + { + .desc = "RST_DONE", + }, +}; + +static const struct cam_ife_csid_irq_desc ver1_path_irq_desc[] = { + { + .desc = "", + }, + { + .desc = "Reset_Done", + }, + { + .desc = "PATH_ERROR_O/P_FIFO_OVERFLOW: Slow IFE read", + }, + { + .desc = "SUBSAMPLED_EOF", + }, + { + .desc = "SUBSAMPLED_SOF", + }, + { + .desc = "FRAME_DROP_EOF", + }, + { + .desc = "FRAME_DROP_EOL", + }, + { + .desc = "FRAME_DROP_SOL", + }, + { + .desc = "FRAME_DROP_SOF", + }, + { + .desc = "INPUT_EOF", + }, + { + .desc = "INPUT_EOL", + }, + { + .desc = "INPUT_SOL", + }, + { + .desc = "INPUT_SOF", + }, + { + .desc = "ERROR_PIX_COUNT", + }, + { + .desc = "ERROR_LINE_COUNT", + }, + { + .desc = "PATH_ERROR_CCIF_VIOLATION: Bad frame timings", + }, + { + .desc = "FRAME_DROP", + }, + { + .desc = + "PATH_OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .desc = "ERROR_REC_CCIF_VIOLATION", + }, + { + .desc = "VCDT_GRP0_SEL", + }, + { + .desc = "VCDT_GRP1_SEL", + }, + { + .desc = "VCDT_GRP_CHANGE", + }, +}; + +static int cam_ife_csid_ver1_set_debug( + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t debug_val) +{ + int bit_pos = 0; + uint32_t val; + + memset(&csid_hw->debug_info, 0, + sizeof(struct cam_ife_csid_debug_info)); + csid_hw->debug_info.debug_val = debug_val; + + while (debug_val) { + + if (!(debug_val & 0x1)) { + debug_val >>= 1; + bit_pos++; + continue; + } + + val = BIT(bit_pos); + + switch (val) { + case CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ: + csid_hw->debug_info.path_mask |= + IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_EOF_IRQ: + csid_hw->debug_info.path_mask |= + IFE_CSID_VER1_PATH_INFO_INPUT_EOF; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_SOT_IRQ: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_DL0_SOT_CAPTURED | + IFE_CSID_VER1_RX_DL1_SOT_CAPTURED | + IFE_CSID_VER1_RX_DL2_SOT_CAPTURED; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_EOT_IRQ: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_DL0_EOT_CAPTURED | + IFE_CSID_VER1_RX_DL1_EOT_CAPTURED | + IFE_CSID_VER1_RX_DL2_EOT_CAPTURED; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_SHORT_PKT_CAPTURED; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_LONG_PKT_CAPTURED; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_CPHY_PKT_HDR_CAPTURED; + break; + case CAM_IFE_DEBUG_ENABLE_UNMAPPED_VC_DT_IRQ: + csid_hw->debug_info.rx_mask |= + IFE_CSID_VER1_RX_UNMAPPED_VC_DT; + break; + default: + break; + } + + debug_val >>= 1; + bit_pos++; + } + + return 0; +} + +int cam_ife_csid_ver1_get_hw_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_ife_csid_hw_caps *hw_caps; + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_csid_soc_private *soc_private = NULL; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_private = (struct cam_csid_soc_private *) + csid_hw->hw_info->soc_info.soc_private; + + hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis; + hw_caps->num_pix = csid_reg->cmn_reg->num_pix; + hw_caps->num_ppp = csid_reg->cmn_reg->num_ppp; + hw_caps->major_version = csid_reg->cmn_reg->major_version; + hw_caps->minor_version = csid_reg->cmn_reg->minor_version; + hw_caps->version_incr = csid_reg->cmn_reg->version_incr; + hw_caps->global_reset_en = csid_reg->cmn_reg->global_reset; + hw_caps->aup_rup_en = csid_reg->cmn_reg->aup_rup_supported; + hw_caps->is_lite = soc_private->is_ife_csid_lite; + + CAM_DBG(CAM_ISP, + "CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d", + csid_hw->hw_intf->hw_idx, hw_caps->num_rdis, + hw_caps->num_pix, hw_caps->major_version, + hw_caps->minor_version, hw_caps->version_incr); + + return rc; +} + +static int cam_ife_csid_ver1_prepare_reset( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver1_reg_info *csid_reg; + uint32_t i; + unsigned long flags; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%d Csid reset", + csid_hw->hw_intf->hw_idx); + + spin_lock_irqsave(&csid_hw->hw_info->hw_lock, flags); + + /* Mask all interrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->irq_mask_addr); + + if (csid_reg->cmn_reg->num_pix) + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_mask_addr); + + if (csid_reg->cmn_reg->num_ppp) + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->ppp_reg->irq_mask_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->irq_mask_addr); + + /* clear all interrupts */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + + cam_io_w_mb(csid_reg->csi2_reg->irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->irq_clear_addr); + + if (csid_reg->cmn_reg->num_pix) + cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_clear_addr); + + if (csid_reg->cmn_reg->num_ppp) + cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ppp_reg->irq_clear_addr); + + for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->irq_clear_addr); + + for (i = 0 ; i < csid_reg->cmn_reg->num_udis; i++) + cam_io_w_mb(csid_reg->cmn_reg->udi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->udi_reg[i]->irq_clear_addr); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + spin_unlock_irqrestore(&csid_hw->hw_info->hw_lock, flags); + + cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->cfg1_addr); + + /* enable the IPP and RDI format measure */ + if (csid_reg->cmn_reg->num_pix) + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->cfg0_addr); + + if (csid_reg->cmn_reg->num_ppp) + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + csid_reg->ppp_reg->cfg0_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->cfg0_addr); + return 0; +} + +static int cam_ife_csid_ver1_hw_reset( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t val = 0; + unsigned long flags, rem_jiffies = 0; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + reinit_completion( + &csid_hw->irq_complete[CAM_IFE_CSID_IRQ_REG_TOP]); + + spin_lock_irqsave(&csid_hw->hw_info->hw_lock, flags); + + csid_hw->flags.process_reset = true; + + /* clear the top interrupt first */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + /* enable top reset complete IRQ */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_mask_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + /* perform the top CSID registers reset */ + val = csid_reg->cmn_reg->rst_hw_reg_stb; + + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->rst_strobes_addr); + + spin_unlock_irqrestore(&csid_hw->hw_info->hw_lock, flags); + CAM_DBG(CAM_ISP, "CSID reset start"); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &csid_hw->irq_complete[CAM_IFE_CSID_IRQ_REG_TOP], + msecs_to_jiffies(CAM_IFE_CSID_RESET_TIMEOUT_MS)); + + if (rem_jiffies == 0) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_status_addr); + if (val & 0x1) { + /* clear top reset IRQ */ + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + CAM_DBG(CAM_ISP, "CSID:%d HW reset completed %d", + csid_hw->hw_intf->hw_idx, + rem_jiffies); + goto end; + } + CAM_ERR(CAM_ISP, "CSID:%d hw csid_reset fail rc = %d", + rem_jiffies); + rc = -ETIMEDOUT; + } else { + CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d", + csid_hw->hw_intf->hw_idx, + rem_jiffies); + } +end: + csid_hw->flags.process_reset = false; + return rc; +} + +static int cam_ife_csid_ver1_sw_reset( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t val = 0; + unsigned long flags, rem_jiffies = 0; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + reinit_completion( + &csid_hw->irq_complete[CAM_IFE_CSID_IRQ_REG_TOP]); + + spin_lock_irqsave(&csid_hw->hw_info->hw_lock, flags); + + csid_hw->flags.process_reset = true; + + /* clear the top interrupt first */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + /* perform the top CSID registers reset */ + val = csid_reg->cmn_reg->rst_sw_reg_stb; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->rst_strobes_addr); + + /* + * for SW reset, we enable the IRQ after since the mask + * register has been reset + */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_mask_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + spin_unlock_irqrestore(&csid_hw->hw_info->hw_lock, flags); + CAM_DBG(CAM_ISP, "CSID[%d] top reset start", + csid_hw->hw_intf->hw_idx); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &csid_hw->irq_complete[CAM_IFE_CSID_IRQ_REG_TOP], + msecs_to_jiffies(CAM_IFE_CSID_RESET_TIMEOUT_MS)); + + if (rem_jiffies == 0) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_status_addr); + if (val & 0x1) { + /* clear top reset IRQ */ + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + CAM_DBG(CAM_ISP, "CSID:%d sw reset completed %d", + csid_hw->hw_intf->hw_idx, + rem_jiffies); + goto end; + } + CAM_ERR(CAM_ISP, "CSID:%d sw csid_reset fail rc = %d", + csid_hw->hw_intf->hw_idx, + rem_jiffies); + rc = -ETIMEDOUT; + } else { + CAM_DBG(CAM_ISP, "CSID:%d sw reset completed %d", + csid_hw->hw_intf->hw_idx, + rem_jiffies); + } +end: + csid_hw->flags.process_reset = false; + return rc; +} + +static int cam_ife_csid_ver1_global_reset( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + + rc = cam_ife_csid_ver1_prepare_reset(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] prepare reset failed"); + goto end; + } + + rc = cam_ife_csid_ver1_hw_reset(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] hw reset failed"); + goto end; + } + + rc = cam_ife_csid_ver1_sw_reset(csid_hw); + + if (rc) + CAM_ERR(CAM_ISP, "CSID[%d] sw reset failed"); +end: + return rc; +} + +static int cam_ife_csid_path_reset( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_csid_reset_cfg_args *reset) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_isp_resource_node *res; + struct cam_hw_soc_info *soc_info; + unsigned long rem_jiffies; + uint32_t val; + int rc = 0; + int irq_reg = 0; + int id = 0; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + res = reset->node_res; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%d reset Resource[id:%d name:%s]", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name); + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + path_reg = csid_reg->ipp_reg; + break; + case CAM_IFE_PIX_PATH_RES_PPP: + path_reg = csid_reg->ppp_reg; + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + path_reg = csid_reg->rdi_reg[res->res_id]; + break; + case CAM_IFE_PIX_PATH_RES_UDI_0: + case CAM_IFE_PIX_PATH_RES_UDI_1: + case CAM_IFE_PIX_PATH_RES_UDI_2: + id = res->res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + path_reg = csid_reg->udi_reg[id]; + break; + default: + break; + } + + if (!path_reg) { + CAM_ERR(CAM_ISP, "Invalid res %d", res->res_id); + return -EINVAL; + } + + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->irq_mask_addr); + val |= 1 << csid_reg->cmn_reg->rst_done_shift_val; + irq_reg = cam_ife_csid_convert_res_to_irq_reg(res->res_id); + reinit_completion(&csid_hw->irq_complete[irq_reg]); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + path_reg->irq_mask_addr); + cam_io_w_mb(csid_reg->cmn_reg->path_rst_stb_all, + soc_info->reg_map[0].mem_base + path_reg->rst_strobes_addr); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &csid_hw->irq_complete[irq_reg], + msecs_to_jiffies(IFE_CSID_TIMEOUT)); + + CAM_DBG(CAM_ISP, "CSID:%d resource[id:%d name:%s] reset done", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name); + + if (!rem_jiffies) { + rc = -ETIMEDOUT; + CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d", + csid_hw->hw_intf->hw_idx, + res->res_id, rc); + } + + return rc; +} + +int cam_ife_csid_ver1_reset(void *hw_priv, + void *reset_args, uint32_t arg_size) +{ + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_csid_reset_cfg_args *reset; + int rc = 0; + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + reset = (struct cam_csid_reset_cfg_args *)reset_args; + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + switch (reset->reset_type) { + case CAM_IFE_CSID_RESET_GLOBAL: + rc = cam_ife_csid_ver1_global_reset(csid_hw); + break; + case CAM_IFE_CSID_RESET_PATH: + rc = cam_ife_csid_path_reset(csid_hw, reset); + break; + default: + CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d", + reset->reset_type); + rc = -EINVAL; + break; + } + + CAM_DBG(CAM_ISP, "CSID[%d] reset type :%d", + csid_hw->hw_intf->hw_idx, + reset->reset_type); + + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_ife_csid_ver1_deinit_rdi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_core_info *core_info; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + int rc = 0; + uint32_t val; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW || + res->res_id > CAM_IFE_PIX_PATH_RES_RDI_4) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + + path_reg = csid_reg->rdi_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + + if (path_reg->overflow_ctrl_en) + cam_io_w_mb(0, mem_base + + path_reg->err_recovery_cfg0_addr); + + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + + if (val & path_reg->format_measure_en_shift_val) { + val &= ~path_reg->format_measure_en_shift_val; + cam_io_w_mb(val, mem_base + + path_reg->cfg0_addr); + + /* Disable the HBI/VBI counter */ + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return rc; +} + +static int cam_ife_csid_ver1_deinit_udi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_core_info *core_info; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + uint32_t val, id; + int rc = 0; + + if ((res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) || + (res->res_id < CAM_IFE_PIX_PATH_RES_UDI_0 || + res->res_id > CAM_IFE_PIX_PATH_RES_UDI_2)) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + + id = res->res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + path_reg = csid_reg->udi_reg[id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + + if (path_reg->overflow_ctrl_en) + cam_io_w_mb(0, mem_base + + path_reg->err_recovery_cfg0_addr); + + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + + if (val & BIT(path_reg->format_measure_en_shift_val)) { + val &= ~BIT(path_reg->format_measure_en_shift_val); + cam_io_w_mb(val, mem_base + + path_reg->cfg0_addr); + + /* Disable the HBI/VBI counter */ + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return rc; +} +static int cam_ife_csid_ver1_deinit_pxl_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + void __iomem *mem_base; + uint32_t val; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + struct cam_ife_csid_core_info *core_info; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + path_reg = csid_reg->ipp_reg; + else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + path_reg = csid_reg->ppp_reg; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d PIX:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + + if (path_reg->overflow_ctrl_en) + cam_io_w_mb(0, mem_base + + path_reg->err_recovery_cfg0_addr); + + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + + if (val & path_reg->format_measure_en_shift_val) { + val &= ~path_reg->format_measure_en_shift_val; + cam_io_w_mb(val, mem_base + + path_reg->cfg0_addr); + + /* Disable the HBI/VBI counter */ + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_ife_csid_ver1_stop_pxl_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res, + enum cam_ife_csid_halt_cmd stop_cmd) +{ + int rc = 0; + uint32_t val; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + uint32_t halt_cmd = 0; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + path_reg = csid_reg->ipp_reg; + else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + path_reg = csid_reg->ppp_reg; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d PIX:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + + if (stop_cmd == CAM_CSID_HALT_IMMEDIATELY) { + halt_cmd = path_reg->halt_immediate; + } else if (stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY) { + halt_cmd = path_reg->halt_frame_boundary; + } else { + CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d", + csid_hw->hw_intf->hw_idx, stop_cmd); + return -EINVAL; + } + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + path_reg->irq_mask_addr); + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->ctrl_addr); + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) { + /* configure Halt for master */ + val &= ~0x3; + val |= halt_cmd; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + path_reg->ctrl_addr); + } + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_SLAVE && + stop_cmd == CAM_CSID_HALT_IMMEDIATELY) { + /* configure Halt for slave */ + val &= ~0xF; + val |= halt_cmd; + val |= (path_reg->halt_mode_master << + path_reg->halt_mode_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + path_reg->ctrl_addr); + } + + return rc; +} + +static int cam_ife_csid_ver1_stop_rdi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res, + enum cam_ife_csid_halt_cmd stop_cmd) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_core_info *core_info; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + uint32_t val; + int rc = 0; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + if (res->res_id >= CAM_IFE_CSID_RDI_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + path_reg = csid_reg->rdi_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d PIX:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + + if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY && + stop_cmd != CAM_CSID_HALT_IMMEDIATELY) { + CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d", + csid_hw->hw_intf->hw_idx, stop_cmd); + return -EINVAL; + } + + cam_io_w_mb(0, mem_base + path_reg->irq_mask_addr); + + val = cam_io_r_mb(mem_base + path_reg->ctrl_addr); + val &= ~0x3; + val |= stop_cmd; + cam_io_w_mb(val, mem_base + path_reg->ctrl_addr); + + return rc; +} + +static int cam_ife_csid_ver1_stop_udi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res, + enum cam_ife_csid_halt_cmd stop_cmd) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_core_info *core_info; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + uint32_t val, id; + int rc = 0; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + if (res->res_id < CAM_IFE_PIX_PATH_RES_UDI_0 || + res->res_id > CAM_IFE_PIX_PATH_RES_UDI_2) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + id = res->res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + path_reg = csid_reg->udi_reg[id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + + if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY && + stop_cmd != CAM_CSID_HALT_IMMEDIATELY) { + CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d", + csid_hw->hw_intf->hw_idx, stop_cmd); + return -EINVAL; + } + + cam_io_w_mb(0, mem_base + path_reg->irq_mask_addr); + + val = cam_io_r_mb(mem_base + path_reg->ctrl_addr); + val &= ~0x3; + val |= stop_cmd; + cam_io_w_mb(val, mem_base + path_reg->ctrl_addr); + + return rc; +} + +static int cam_ife_csid_hw_ver1_path_cfg( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_path_cfg *path_cfg, + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t cid) +{ + + path_cfg->cid = cid; + path_cfg->in_format = reserve->in_port->format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0]; + path_cfg->out_format = reserve->out_port->format; + path_cfg->sync_mode = reserve->sync_mode; + path_cfg->height = reserve->in_port->height; + path_cfg->start_line = reserve->in_port->line_start; + path_cfg->end_line = reserve->in_port->line_stop; + path_cfg->crop_enable = reserve->crop_enable; + path_cfg->drop_enable = reserve->drop_enable; + path_cfg->horizontal_bin = reserve->in_port->horizontal_bin; + path_cfg->qcfa_bin = reserve->in_port->qcfa_bin; + path_cfg->num_bytes_out = reserve->in_port->num_bytes_out; + + if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) { + path_cfg->start_pixel = reserve->in_port->left_start; + path_cfg->end_pixel = reserve->in_port->left_stop; + path_cfg->width = reserve->in_port->left_width; + + if (reserve->res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 && + reserve->res_id <= (CAM_IFE_PIX_PATH_RES_RDI_0 + + CAM_IFE_CSID_RDI_MAX - 1)) { + path_cfg->end_pixel = reserve->in_port->right_stop; + path_cfg->width = path_cfg->end_pixel - + path_cfg->start_pixel + 1; + } + + CAM_DBG(CAM_ISP, + "CSID:%d res:%d master:startpixel 0x%x endpixel:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_pixel, path_cfg->end_pixel); + CAM_DBG(CAM_ISP, + "CSID:%d res:%d master:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_line, path_cfg->end_line); + } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) { + path_cfg->start_pixel = reserve->in_port->right_start; + path_cfg->end_pixel = reserve->in_port->right_stop; + path_cfg->width = reserve->in_port->right_width; + CAM_DBG(CAM_ISP, + "CSID:%d res:%d slave:start:0x%x end:0x%x width 0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_pixel, path_cfg->end_pixel, + path_cfg->width); + CAM_DBG(CAM_ISP, + "CSID:%d res:%d slave:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_line, path_cfg->end_line); + } else { + path_cfg->width = reserve->in_port->left_width; + path_cfg->start_pixel = reserve->in_port->left_start; + path_cfg->end_pixel = reserve->in_port->left_stop; + CAM_DBG(CAM_ISP, + "CSID:%d res:%d left width %d start: %d stop:%d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + reserve->in_port->left_width, + reserve->in_port->left_start, + reserve->in_port->left_stop); + } + return 0; +} + +static int cam_ife_csid_ver1_rx_capture_config( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_rx_cfg *rx_cfg; + uint32_t vc, dt, i; + uint32_t val = 0; + + for (i = 0; i < CAM_IFE_CSID_CID_MAX; i++) + if (csid_hw->cid_data[i].cid_cnt) + break; + + if (i == CAM_IFE_CSID_CID_MAX) { + CAM_WARN(CAM_ISP, "CSID[%d] no valid cid", + csid_hw->hw_intf->hw_idx); + return 0; + } + rx_cfg = &csid_hw->rx_cfg; + + vc = csid_hw->cid_data[i].vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc; + dt = csid_hw->cid_data[i].vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) + val = ((1 << csid_reg->csi2_reg->capture_short_pkt_en_shift) | + (vc << csid_reg->csi2_reg->capture_short_pkt_vc_shift)); + + /* CAM_IFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE */ + val |= ((1 << csid_reg->csi2_reg->capture_long_pkt_en_shift) | + (dt << csid_reg->csi2_reg->capture_long_pkt_dt_shift) | + (vc << csid_reg->csi2_reg->capture_long_pkt_vc_shift)); + + /* CAM_IFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE*/ + if (rx_cfg->lane_type == CAM_ISP_LANE_TYPE_CPHY) { + val |= ((1 << csid_reg->csi2_reg->capture_cphy_pkt_en_shift) | + (dt << csid_reg->csi2_reg->capture_cphy_pkt_dt_shift) | + (vc << csid_reg->csi2_reg->capture_cphy_pkt_vc_shift)); + } + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->capture_ctrl_addr); + return 0; +} + +static int cam_ife_csid_ver1_tpg_config( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_csid_hw_reserve_resource_args *reserve) +{ + + if (csid_hw->flags.tpg_configured) + return 0; + + switch (reserve->in_port->test_pattern) { + case CAM_ISP_PATTERN_BAYER_RGRGRG: + case CAM_ISP_PATTERN_BAYER_GRGRGR: + case CAM_ISP_PATTERN_BAYER_BGBGBG: + case CAM_ISP_PATTERN_BAYER_GBGBGB: + csid_hw->tpg_cfg.test_pattern = reserve->in_port->test_pattern; + break; + case CAM_ISP_PATTERN_YUV_YCBYCR: + case CAM_ISP_PATTERN_YUV_YCRYCB: + case CAM_ISP_PATTERN_YUV_CBYCRY: + case CAM_ISP_PATTERN_YUV_CRYCBY: + csid_hw->tpg_cfg.test_pattern = + CAM_IFE_CSID_TPG_TEST_PATTERN_YUV; + break; + default: + CAM_ERR(CAM_ISP, "CSID[%d] invalid test_pattern %d", + csid_hw->hw_intf->hw_idx, + reserve->in_port->test_pattern); + return -EINVAL; + } + + switch (reserve->in_port->format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0]) { + case CAM_FORMAT_MIPI_RAW_8: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW8; + break; + case CAM_FORMAT_MIPI_RAW_10: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW10; + break; + case CAM_FORMAT_MIPI_RAW_12: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW12; + break; + case CAM_FORMAT_MIPI_RAW_14: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW14; + break; + case CAM_FORMAT_MIPI_RAW_16: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW16; + break; + case CAM_FORMAT_YUV422: + csid_hw->tpg_cfg.encode_format = CAM_IFE_CSID_TPG_ENCODE_RAW8; + break; + default: + CAM_ERR(CAM_ISP, "CSID[%d] invalid input format %d", + csid_hw->hw_intf->hw_idx, + reserve->in_port->format); + return -EINVAL; + } + + if (reserve->in_port->usage_type) + csid_hw->tpg_cfg.width = reserve->in_port->right_stop + 1; + else + csid_hw->tpg_cfg.width = reserve->in_port->left_width; + csid_hw->tpg_cfg.height = reserve->in_port->height; + csid_hw->flags.tpg_configured = true; + csid_hw->tpg_cfg.vc = reserve->in_port->vc[0]; + csid_hw->tpg_cfg.dt = reserve->in_port->dt[0]; + return 0; +} + +static int cam_ife_csid_ver1_tpg_start( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + uint32_t val = 0; + uint32_t i; + uint32_t base; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg = NULL; + + if (csid_hw->flags.tpg_enabled) + return 0; + + /*Enable the TPG */ + CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG", + csid_hw->hw_intf->hw_idx); + + soc_info = &csid_hw->hw_info->soc_info; + + CAM_DBG(CAM_ISP, "================ TPG ============"); + base = 0x600; + + for (i = 0; i < 16; i++) { + val = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + base + i * 4); + CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", + (base + i*4), val); + } + + CAM_DBG(CAM_ISP, "================ IPP ============="); + base = 0x200; + for (i = 0; i < 10; i++) { + val = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + base + i * 4); + CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", + (base + i*4), val); + } + + CAM_DBG(CAM_ISP, "================ RX ============="); + base = 0x100; + for (i = 0; i < 5; i++) { + val = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + base + i * 4); + CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", + (base + i*4), val); + } + + /* Enable the IFE force clock on for dual isp case */ + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (csid_hw->tpg_cfg.usage_type) { + rc = cam_ife_csid_enable_ife_force_clock_on(soc_info, + csid_reg->tpg_reg->cpas_ife_reg_offset); + if (rc) + return rc; + } + + CAM_DBG(CAM_ISP, "============ TPG control ============"); + val = csid_reg->tpg_reg->ctrl_cfg | + ((csid_hw->rx_cfg.lane_num - 1) & + csid_reg->tpg_reg->num_active_lanes_mask); + + csid_hw->flags.tpg_enabled = true; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->tpg_reg->ctrl_addr); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600); + CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val); + + return 0; +} + +static int cam_ife_csid_ver1_tpg_stop(struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg = NULL; + + if (!csid_hw->flags.tpg_enabled) + return 0; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + /* disable the TPG */ + CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG", + csid_hw->hw_intf->hw_idx); + + /* Disable the IFE force clock on for dual isp case */ + if (csid_hw->tpg_cfg.usage_type) { + rc = cam_ife_csid_disable_ife_force_clock_on(soc_info, + csid_reg->tpg_reg->cpas_ife_reg_offset); + CAM_DBG(CAM_ISP, "Dual isp case: Disable IFE force clk. rc %d", rc); + } + + /*stop the TPG */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->tpg_reg->ctrl_addr); + csid_hw->flags.tpg_enabled = false; + csid_hw->flags.tpg_configured = false; + + return 0; +} + +static int cam_ife_csid_ver1_init_tpg_hw( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_ver1_tpg_reg_info *tpg_reg; + struct cam_hw_soc_info *soc_info; + uint32_t val = 0; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + tpg_reg = csid_reg->tpg_reg; + soc_info = &csid_hw->hw_info->soc_info; + + CAM_DBG(CAM_ISP, "CSID:%d TPG config", + csid_hw->hw_intf->hw_idx); + + /* configure one DT, infinite frames */ + val = (tpg_reg->num_frames << tpg_reg->num_frame_shift) | + csid_hw->tpg_cfg.vc; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->vc_cfg0_addr); + + /* vertical blanking count = 0x3FF, horzontal blanking count = 0x740*/ + val = (tpg_reg->vbi << tpg_reg->vbi_shift) | + (tpg_reg->hbi << tpg_reg->hbi_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->vc_cfg1_addr); + + cam_io_w_mb(tpg_reg->lfsr_seed, soc_info->reg_map[0].mem_base + + tpg_reg->lfsr_seed_addr); + + val = csid_hw->tpg_cfg.width << tpg_reg->width_shift | + csid_hw->tpg_cfg.height; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->dt_n_cfg_0_addr); + + cam_io_w_mb(csid_hw->tpg_cfg.dt, soc_info->reg_map[0].mem_base + + csid_reg->tpg_reg->dt_n_cfg_1_addr); + + /* + * in_format is the same as the input resource format. + * it is one larger than the register spec format. + */ + val = ((csid_hw->tpg_cfg.encode_format) << tpg_reg->fmt_shift) | + tpg_reg->payload_mode; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->tpg_reg->dt_n_cfg_2_addr); + + /* static frame with split color bar */ + val = tpg_reg->color_bar << tpg_reg->color_bar_shift; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->color_bars_cfg_addr); + /* config pix pattern */ + cam_io_w_mb(csid_hw->tpg_cfg.test_pattern, + soc_info->reg_map[0].mem_base + + tpg_reg->common_gen_cfg_addr); + + return 0; +} + +static int cam_ife_csid_hw_ver1_rx_cfg( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_csid_hw_reserve_resource_args *reserve) +{ + + if (csid_hw->counters.csi2_reserve_cnt) { + csid_hw->counters.csi2_reserve_cnt++; + CAM_DBG(CAM_ISP, "CSID %u Rx already reserved cnt %u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.csi2_reserve_cnt); + return 0; + } + + csid_hw->rx_cfg.lane_cfg = + reserve->in_port->lane_cfg; + csid_hw->rx_cfg.lane_type = + reserve->in_port->lane_type; + csid_hw->rx_cfg.lane_num = + reserve->in_port->lane_num; + csid_hw->res_type = reserve->in_port->res_type; + csid_hw->rx_cfg.epd_supported = + reserve->in_port->epd_supported; + + switch (reserve->in_port->res_type) { + case CAM_ISP_IFE_IN_RES_TPG: + csid_hw->rx_cfg.phy_sel = 0; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_0: + csid_hw->rx_cfg.phy_sel = 0; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_1: + csid_hw->rx_cfg.phy_sel = 1; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_2: + csid_hw->rx_cfg.phy_sel = 2; + break; + default: + csid_hw->rx_cfg.phy_sel = + (reserve->in_port->res_type & 0xFF) - 1; + break; + } + + csid_hw->counters.csi2_reserve_cnt++; + CAM_DBG(CAM_ISP, + "CSID:%u Lane cfg:0x%x type:%u num:%u res:0x%x, res_cnt %u", + csid_hw->hw_intf->hw_idx, + reserve->in_port->lane_cfg, reserve->in_port->lane_type, + reserve->in_port->lane_num, reserve->in_port->res_type, + csid_hw->counters.csi2_reserve_cnt); + + return 0; + +} + +int cam_ife_csid_hw_ver1_hw_cfg( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_path_cfg *path_cfg, + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t cid) +{ + int rc = 0; + + cam_ife_csid_hw_ver1_rx_cfg(csid_hw, reserve); + cam_ife_csid_hw_ver1_path_cfg(csid_hw, path_cfg, + reserve, cid); + + if (reserve->res_type == CAM_ISP_IFE_IN_RES_TPG) { + rc = cam_ife_csid_ver1_tpg_config(csid_hw, reserve); + + if (rc) + CAM_ERR(CAM_ISP, + "CSID[%d] Res_id %d tpg config fail", + csid_hw->hw_intf->hw_idx, reserve->res_id); + } + + return rc; +} + +static int cam_ife_csid_check_cust_node( + struct cam_csid_hw_reserve_resource_args *reserve, + struct cam_ife_csid_ver1_hw *csid_hw) +{ + struct cam_ife_csid_ver1_reg_info *csid_reg; + + if (!reserve->in_port->cust_node) + return 0; + + if (reserve->in_port->usage_type == CAM_ISP_RES_USAGE_DUAL) { + CAM_ERR(CAM_ISP, + "Dual IFE is not supported for cust_node %u", + reserve->in_port->cust_node); + return -EINVAL; + } + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (!(csid_reg->csid_cust_node_map[csid_hw->hw_intf->hw_idx] & + BIT(reserve->in_port->cust_node))) { + CAM_ERR(CAM_ISP, + "Invalid CSID:%u cust_node %u", + csid_hw->hw_intf->hw_idx, + reserve->in_port->cust_node); + return -EINVAL; + } + + return 0; +} + +static bool cam_ife_csid_ver1_is_width_valid_by_fuse( + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t width) +{ + struct cam_ife_csid_ver1_reg_info *csid_reg; + uint32_t fuse_val = UINT_MAX; + + cam_cpas_is_feature_supported(CAM_CPAS_MP_LIMIT_FUSE, + CAM_CPAS_HW_IDX_ANY, &fuse_val); + + if (fuse_val == UINT_MAX) { + CAM_DBG(CAM_ISP, "CSID[%u] Fuse not present", + csid_hw->hw_intf->hw_idx); + return true; + } + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (fuse_val > csid_reg->width_fuse_max_val) { + CAM_ERR(CAM_ISP, "Invalid fuse value %u", fuse_val); + return false; + } + + if (width > csid_reg->fused_max_width[fuse_val]) { + CAM_ERR(CAM_ISP, + "CSID[%u] Resolution not supported required_width: %d max_supported_width: %d", + csid_hw->hw_intf->hw_idx, + width, csid_reg->fused_max_width[fuse_val]); + return false; + } + + return true; +} + +static bool cam_ife_csid_ver1_is_width_valid_by_dt( + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t width) +{ + struct cam_csid_soc_private *soc_private = NULL; + + soc_private = (struct cam_csid_soc_private *) + csid_hw->hw_info->soc_info.soc_private; + + if (!soc_private->max_width_enabled) + return true; + + if (width > soc_private->max_width) { + CAM_ERR(CAM_ISP, + "CSID[%u] Resolution not supported required_width: %d max_supported_width: %d", + csid_hw->hw_intf->hw_idx, + width, soc_private->max_width); + return false; + } + + return true; +} + +bool cam_ife_csid_ver1_is_width_valid( + struct cam_csid_hw_reserve_resource_args *reserve, + struct cam_ife_csid_ver1_hw *csid_hw) +{ + uint32_t width = 0; + + if (reserve->res_id != CAM_IFE_PIX_PATH_RES_IPP) + return true; + + if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER || + reserve->sync_mode == CAM_ISP_HW_SYNC_NONE) + width = reserve->in_port->left_stop - + reserve->in_port->left_start + 1; + else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + width = reserve->in_port->right_stop - + reserve->in_port->right_start + 1; + + if (!cam_ife_csid_ver1_is_width_valid_by_fuse(csid_hw, width)) { + CAM_ERR(CAM_ISP, "CSID[%u] width limited by fuse", + csid_hw->hw_intf->hw_idx); + return false; + } + + if (!cam_ife_csid_ver1_is_width_valid_by_dt(csid_hw, width)) { + CAM_ERR(CAM_ISP, "CSID[%u] width limited by dt", + csid_hw->hw_intf->hw_idx); + return false; + } + + return true; +} + +static int cam_ife_csid_ver1_in_port_validate( + struct cam_csid_hw_reserve_resource_args *reserve, + struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + + /* check in port args */ + rc = cam_ife_csid_check_in_port_args(reserve, + csid_hw->hw_intf->hw_idx); + if (rc) + goto err; + + if (!cam_ife_csid_ver1_is_width_valid(reserve, csid_hw)) + goto err; + + if (reserve->in_port->cust_node) { + rc = cam_ife_csid_check_cust_node(reserve, csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "Custom node config error"); + goto err; + } + } + + if (csid_hw->counters.csi2_reserve_cnt) { + + if (csid_hw->res_type != reserve->in_port->res_type) { + CAM_ERR(CAM_ISP, + "CSID[%d ]Invalid res[%x] in_res_type[%x]", + csid_hw->hw_intf->hw_idx, + csid_hw->res_type, + reserve->in_port->res_type); + rc = -EINVAL; + goto err; + } + + if (csid_hw->rx_cfg.lane_cfg != + reserve->in_port->lane_cfg || + csid_hw->rx_cfg.lane_type != + reserve->in_port->lane_type || + csid_hw->rx_cfg.lane_num != + reserve->in_port->lane_num) { + CAM_ERR(CAM_ISP, + "[%d] lane: num[%d %d] type[%d %d] cfg[%d %d]", + csid_hw->hw_intf->hw_idx, + csid_hw->rx_cfg.lane_num, + reserve->in_port->lane_num, + csid_hw->rx_cfg.lane_type, + reserve->in_port->lane_type, + csid_hw->rx_cfg.lane_cfg, + reserve->in_port->lane_cfg); + rc = -EINVAL; + goto err; + } + } + + return rc; +err: + CAM_ERR(CAM_ISP, "Invalid args csid[%d] rc %d", + csid_hw->hw_intf->hw_idx, rc); + return rc; +} + + +int cam_ife_csid_ver1_reserve(void *hw_priv, + void *reserve_args, uint32_t arg_size) +{ + + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_isp_resource_node *res = NULL; + struct cam_csid_hw_reserve_resource_args *reserve; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + uint32_t cid; + int rc = 0; + + reserve = (struct cam_csid_hw_reserve_resource_args *)reserve_args; + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + + if (reserve->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID %d invalid Res_id %d", + csid_hw->hw_intf->hw_idx, reserve->res_id); + return -EINVAL; + } + res = &csid_hw->path_res[reserve->res_id]; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_DBG(CAM_ISP, "CSID %d Res_id %d state %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state); + return -EINVAL; + } + + rc = cam_ife_csid_ver1_in_port_validate(reserve, csid_hw); + + CAM_DBG(CAM_ISP, "CSID[%d] res_id %d", + csid_hw->hw_intf->hw_idx, reserve->res_id); + + if (rc) { + CAM_DBG(CAM_ISP, "CSID %d Res_id %d port validation failed", + csid_hw->hw_intf->hw_idx, reserve->res_id); + return rc; + } + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + + if (!path_cfg) { + CAM_ERR(CAM_ISP, + "CSID %d Unallocated Res_id %d state %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state); + return -EINVAL; + } + + rc = cam_ife_csid_cid_reserve(csid_hw->cid_data, &cid, + csid_hw->hw_intf->hw_idx, reserve); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID %d Res_id %d state %d invalid cid %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state, cid); + return rc; + } + + rc = cam_ife_csid_hw_ver1_hw_cfg(csid_hw, path_cfg, + reserve, cid); + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + reserve->node_res = res; + csid_hw->event_cb = reserve->event_cb; + csid_hw->token = reserve->cb_priv; + + CAM_DBG(CAM_ISP, "CSID %d Resource[id:%d name:%s] state %d cid %d rc %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_name, res->res_state, cid, rc); + + return 0; +} + +int cam_ife_csid_ver1_release(void *hw_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_isp_resource_node *res = NULL; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + int rc = 0; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + res = (struct cam_isp_resource_node *)release_args; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) || + (res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) { + CAM_WARN(CAM_ISP, + "CSID:%d res type:%d Res %d in state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, + res->res_state); + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + + if (path_cfg->cid >= CAM_IFE_CSID_CID_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid cid:%d", + csid_hw->hw_intf->hw_idx, path_cfg->cid); + rc = -EINVAL; + goto end; + } + + cam_ife_csid_cid_release(&csid_hw->cid_data[path_cfg->cid], + csid_hw->hw_intf->hw_idx, + path_cfg->cid); + memset(path_cfg, 0, sizeof(*path_cfg)); + + if (csid_hw->counters.csi2_reserve_cnt) + csid_hw->counters.csi2_reserve_cnt--; + if (!csid_hw->counters.csi2_reserve_cnt) { + memset(&csid_hw->rx_cfg, 0, + sizeof(struct cam_ife_csid_rx_cfg)); + memset(&csid_hw->debug_info, 0, + sizeof(struct cam_ife_csid_debug_info)); + csid_hw->event_cb = NULL; + csid_hw->token = NULL; + } + + CAM_DBG(CAM_ISP, + "CSID:%d res type :%d Resource[id:%d name:%s] csi2_reserve_cnt:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id, + res->res_name, csid_hw->counters.csi2_reserve_cnt); + + res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_ife_csid_ver1_start_rdi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + struct cam_ife_csid_core_info *core_info; + void __iomem *mem_base; + uint32_t val; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW || + res->res_id > CAM_IFE_PIX_PATH_RES_RDI_4) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + + path_reg = csid_reg->rdi_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + /* Resume at frame boundary */ + cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY, + mem_base + path_reg->ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%d start: %s", + csid_hw->hw_intf->hw_idx, res->res_name); + + val = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask | + csid_hw->debug_info.path_mask | + IFE_CSID_VER1_PATH_INFO_RST_DONE; + cam_io_w_mb(val, mem_base + path_reg->irq_mask_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_ife_csid_ver1_start_udi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + struct cam_ife_csid_core_info *core_info; + void __iomem *mem_base; + uint32_t val, id; + + if ((res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) || + (res->res_id < CAM_IFE_PIX_PATH_RES_UDI_0 || + res->res_id > CAM_IFE_PIX_PATH_RES_UDI_2)) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + core_info = csid_hw->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *)core_info->csid_reg; + + id = res->res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + + path_reg = csid_reg->udi_reg[id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d UDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[0].mem_base; + /* Resume at frame boundary */ + cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY, + mem_base + path_reg->ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%d Start:%s", + csid_hw->hw_intf->hw_idx, res->res_name); + + val = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask | + csid_hw->debug_info.path_mask | + IFE_CSID_VER1_PATH_INFO_RST_DONE; + cam_io_w_mb(val, mem_base + path_reg->irq_mask_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_ife_csid_ver1_start_pix_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + uint32_t val = 0; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + struct cam_ife_csid_ver1_path_cfg *ipp_path_cfg; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) { + CAM_ERR(CAM_ISP, + "CSID:%d %s path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_name, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + path_reg = csid_reg->ipp_reg; + else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + path_reg = csid_reg->ppp_reg; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d PIX:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) { + + path_reg = csid_reg->ipp_reg; + val = path_reg->halt_master_sel_master_val << + path_reg->halt_master_sel_shift; + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER) { + /* Set start mode as master */ + val |= path_reg->halt_mode_master << + path_reg->halt_mode_shift; + } else if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_SLAVE) { + /* Set start mode as slave */ + val |= path_reg->halt_mode_slave << + path_reg->halt_mode_shift; + } else { + /* Default is internal halt mode */ + val = 0; + } + } else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) { + + path_reg = csid_reg->ppp_reg; + + /* for dual case + * set ppp as slave + * if current csid is set as master set + * start_master_sel_val as 3 + */ + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) { + val = 0; + } else { + val = path_reg->halt_mode_slave << + path_reg->halt_mode_shift; + /* Set halt mode as master */ + + ipp_path_cfg = (struct cam_ife_csid_ver1_path_cfg *) + csid_hw->path_res + [CAM_IFE_PIX_PATH_RES_IPP].res_priv; + + if (ipp_path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER) + val |= path_reg->halt_master_sel_master_val << + path_reg->halt_master_sel_shift; + } + } + + /* + * Resume at frame boundary if Master or No Sync. + * Slave will get resume command from Master. + */ + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) + val |= path_reg->resume_frame_boundary; + + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + path_reg->ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%d Resource[id:%d name:%s]ctrl val: 0x%x", + csid_hw->hw_intf->hw_idx, + res->res_id, res->res_name, val); + + val = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask | + csid_hw->debug_info.path_mask | + IFE_CSID_VER1_PATH_INFO_RST_DONE; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + path_reg->irq_mask_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_ife_csid_ver1_enable_csi2(struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + const struct cam_ife_csid_csi2_rx_reg_info *csi2_reg; + uint32_t val = 0; + struct cam_ife_csid_rx_cfg *rx_cfg; + int vc_full_width; + + if (csid_hw->flags.rx_enabled) + return 0; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + rx_cfg = &csid_hw->rx_cfg; + + /*Configure Rx cfg0 */ + + val = (rx_cfg->lane_cfg << csi2_reg->lane_cfg_shift) | + ((rx_cfg->lane_num - 1) << csi2_reg->lane_num_shift) | + (rx_cfg->lane_type << csi2_reg->phy_type_shift) | + (rx_cfg->phy_sel << csi2_reg->phy_num_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->cfg0_addr); + + /*Configure Rx cfg1*/ + val = 1 << csi2_reg->misr_enable_shift_val; + val |= 1 << csi2_reg->ecc_correction_shift_en; + + vc_full_width = cam_ife_csid_is_vc_full_width(csid_hw->cid_data); + + if (vc_full_width == 1) { + val |= 1 << csi2_reg->vc_mode_shift_val; + } else if (vc_full_width < 0) { + CAM_ERR(CAM_ISP, "Error VC DT"); + return -EINVAL; + } + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->cfg1_addr); + rc = cam_ife_csid_ver1_hw_reset(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] hw reset fail", + csid_hw->hw_intf->hw_idx); + return rc; + } + + csid_hw->flags.rx_enabled = true; + + if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) + cam_ife_csid_ver1_init_tpg_hw(csid_hw); + + /* Enable the interrupt based on csid debug info set + * Fatal error mask + * Partly fatal error mask + * Rx reset done irq + */ + val = csi2_reg->fatal_err_mask | csi2_reg->part_fatal_err_mask | + csid_hw->debug_info.rx_mask | IFE_CSID_VER1_RX_RST_DONE; + + /*EPD supported sensors do not send EOT, error will be generated + * if this irq is enabled + */ + if (csid_hw->rx_cfg.epd_supported) + val &= ~IFE_CSID_VER1_RX_CPHY_EOT_RECEPTION; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->irq_mask_addr); + cam_ife_csid_ver1_rx_capture_config(csid_hw); + + return rc; +} + +static int cam_ife_csid_ver1_init_config_rdi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver1_common_reg_info *cmn_reg = NULL; + uint32_t val; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + void __iomem *mem_base; + struct cam_ife_csid_path_format path_format = {0}; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_reg->rdi_reg[res->res_id]) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + cmn_reg = csid_reg->cmn_reg; + path_reg = csid_reg->rdi_reg[res->res_id]; + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + cid_data = &csid_hw->cid_data[path_cfg->cid]; + mem_base = soc_info->reg_map[0].mem_base; + rc = cam_ife_csid_get_format_rdi(path_cfg->in_format, + path_cfg->out_format, &path_format, path_reg->mipi_pack_supported, false); + if (rc) + return rc; + + /*Configure cfg0: + * Timestamp enable + * VC + * DT + * DT_ID cobination + * Decode Format + * Crop/Drop parameters + * Plain format + * Packing format + */ + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc << + cmn_reg->vc_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt << + cmn_reg->dt_shift_val) | + (path_cfg->cid << cmn_reg->dt_id_shift_val) | + (path_format.decode_fmt << + cmn_reg->decode_format_shift_val); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + val |= 1 << path_reg->format_measure_en_shift_val; + + val |= (path_cfg->crop_enable << path_reg->crop_h_en_shift_val) | + (path_cfg->crop_enable << + path_reg->crop_v_en_shift_val); + + if (cmn_reg->drop_supported) + val |= (path_cfg->drop_enable << + path_reg->drop_v_en_shift_val) | + (path_cfg->drop_enable << + path_reg->drop_h_en_shift_val); + + if (path_reg->mipi_pack_supported) + val |= path_format.packing_fmt << + path_reg->packing_fmt_shift_val; + + val |= path_format.plain_fmt << path_reg->plain_fmt_shift_val; + val |= 1 << path_reg->timestamp_en_shift_val; + + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + + /*Configure Multi VC DT combo */ + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc << + cmn_reg->multi_vcdt_vc1_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].dt << + cmn_reg->multi_vcdt_dt1_shift_val) | + (1 << cmn_reg->multi_vcdt_en_shift_val); + + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg0_addr); + } + + val = 0; + /*configure cfg1 addr + * Timestamp strobe selection + */ + val |= cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val; + + cam_io_w_mb(val, mem_base + path_reg->cfg1_addr); + + if (path_cfg->crop_enable) { + val = (((path_cfg->end_pixel & cmn_reg->crop_pix_start_mask) << + cmn_reg->crop_shift_val) | + (path_cfg->start_pixel & cmn_reg->crop_pix_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->hcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + val = (((path_cfg->end_line & cmn_reg->crop_line_start_mask) << + csid_reg->cmn_reg->crop_shift_val) | + (path_cfg->start_line & cmn_reg->crop_line_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->vcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + } + + /* set frame drop pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->frm_drop_period_addr); + cam_io_w_mb(0, mem_base + path_reg->frm_drop_pattern_addr); + /* set irq sub sample pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->irq_subsample_period_addr); + cam_io_w_mb(0, mem_base + path_reg->irq_subsample_pattern_addr); + + /*TODO Need to check for any hw errata like 480 and 580*/ + /* set pxl drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->pix_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->pix_drop_period_addr); + + /* set line drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->line_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->line_drop_period_addr); + + if (path_reg->overflow_ctrl_en) { + val = path_reg->overflow_ctrl_en | + path_reg->overflow_ctrl_mode_val; + cam_io_w_mb(val, mem_base + path_reg->err_recovery_cfg0_addr); + } + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + /* Enable the RDI path */ + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + val |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "%s cfg0 0x%x", res->res_name, val); + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + + return rc; +} + +static int cam_ife_csid_ver1_init_config_udi_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver1_common_reg_info *cmn_reg = NULL; + uint32_t val; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + void __iomem *mem_base; + struct cam_ife_csid_path_format path_format = {0}; + uint32_t id; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (res->res_id < CAM_IFE_PIX_PATH_RES_UDI_0 || + res->res_id > CAM_IFE_PIX_PATH_RES_UDI_2) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + id = res->res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + if (!csid_reg->udi_reg[id]) { + CAM_ERR(CAM_ISP, "CSID:%d UDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + cmn_reg = csid_reg->cmn_reg; + path_reg = csid_reg->udi_reg[id]; + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + cid_data = &csid_hw->cid_data[path_cfg->cid]; + mem_base = soc_info->reg_map[0].mem_base; + rc = cam_ife_csid_get_format_rdi(path_cfg->in_format, + path_cfg->out_format, &path_format, path_reg->mipi_pack_supported, false); + if (rc) + return rc; + + /*Configure cfg0: + * Timestamp enable + * VC + * DT + * DT_ID cobination + * Decode Format + * Crop/Drop parameters + * Plain format + * Packing format + */ + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc << + cmn_reg->vc_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt << + cmn_reg->dt_shift_val) | + (path_cfg->cid << cmn_reg->dt_id_shift_val) | + (path_format.decode_fmt << cmn_reg->decode_format_shift_val); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + val |= 1 << path_reg->format_measure_en_shift_val; + + val |= (path_cfg->crop_enable << path_reg->crop_h_en_shift_val) | + (path_cfg->crop_enable << + path_reg->crop_v_en_shift_val); + + if (cmn_reg->drop_supported) + val |= (path_cfg->drop_enable << + path_reg->drop_v_en_shift_val) | + (path_cfg->drop_enable << + path_reg->drop_h_en_shift_val); + + if (path_reg->mipi_pack_supported) + val |= path_format.packing_fmt << + path_reg->packing_fmt_shift_val; + + val |= path_format.plain_fmt << path_reg->plain_fmt_shift_val; + + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + + val = 0; + /*configure cfg1 addr + * Timestamp strobe selection + */ + val |= (1 << path_reg->timestamp_en_shift_val) | + (cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val); + + cam_io_w_mb(val, mem_base + path_reg->cfg1_addr); + + if (path_cfg->crop_enable) { + val = (((path_cfg->end_pixel & cmn_reg->crop_pix_start_mask) << + cmn_reg->crop_shift_val) | + (path_cfg->start_pixel & cmn_reg->crop_pix_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->hcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + val = (((path_cfg->end_line & cmn_reg->crop_line_start_mask) << + csid_reg->cmn_reg->crop_shift_val) | + (path_cfg->start_line & cmn_reg->crop_line_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->vcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + } + + /* set frame drop pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->frm_drop_period_addr); + cam_io_w_mb(0, mem_base + path_reg->frm_drop_pattern_addr); + /* set irq sub sample pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->irq_subsample_period_addr); + cam_io_w_mb(0, mem_base + path_reg->irq_subsample_pattern_addr); + + /*TODO Need to check for any hw errata like 480 and 580*/ + /* set pxl drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->pix_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->pix_drop_period_addr); + + /* set line drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->line_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->line_drop_period_addr); + + /* Enable the UDI path */ + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + val |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "%s cfg0 0x%x", res->res_name, val); + + if (path_reg->overflow_ctrl_en) { + val = path_reg->overflow_ctrl_en | + path_reg->overflow_ctrl_mode_val; + cam_io_w_mb(val, mem_base + path_reg->err_recovery_cfg0_addr); + } + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + return rc; +} + +static int cam_ife_csid_ver1_init_config_pxl_path( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver1_common_reg_info *cmn_reg = NULL; + uint32_t val = 0; + struct cam_ife_csid_ver1_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + void __iomem *mem_base; + struct cam_ife_csid_path_format path_format = {0}; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + path_reg = csid_reg->ipp_reg; + else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + path_reg = csid_reg->ppp_reg; + + if (!path_reg) + return -EINVAL; + + cmn_reg = csid_reg->cmn_reg; + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + cid_data = &csid_hw->cid_data[path_cfg->cid]; + mem_base = soc_info->reg_map[0].mem_base; + + rc = cam_ife_csid_get_format_ipp_ppp(path_cfg->in_format, + &path_format); + + /*Configure: + * VC + * DT + * DT_ID cobination + * Decode Format + * Early eof + * timestamp enable + * crop/drop enable + * Binning + */ + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc << + cmn_reg->vc_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt << + cmn_reg->dt_shift_val) | + (path_cfg->cid << cmn_reg->dt_id_shift_val) | + (path_format.decode_fmt << + cmn_reg->decode_format_shift_val); + + /*enable early eof based on crop enable */ + if (!(csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_DISABLE_EARLY_EOF) && + cmn_reg->early_eof_supported && + path_cfg->crop_enable) + val |= (1 << path_reg->early_eof_en_shift_val); + + if (cmn_reg->drop_supported) + val |= (path_cfg->drop_enable << + path_reg->drop_v_en_shift_val) | + (path_cfg->drop_enable << + path_reg->drop_h_en_shift_val); + + val |= (1 << path_reg->pix_store_en_shift_val) | + (1 << path_reg->timestamp_en_shift_val); + + val |= (path_cfg->crop_enable << path_reg->crop_h_en_shift_val) | + (path_cfg->crop_enable << + path_reg->crop_v_en_shift_val); + + if ((path_reg->binning_supported & CAM_IFE_CSID_BIN_HORIZONTAL) && + path_cfg->horizontal_bin) + val |= 1 << path_reg->bin_h_en_shift_val; + + if ((path_reg->binning_supported & CAM_IFE_CSID_BIN_VERTICAL) && + path_cfg->vertical_bin) + val |= 1 << path_reg->bin_v_en_shift_val; + + if ((path_reg->binning_supported & CAM_IFE_CSID_BIN_QCFA) && + path_cfg->qcfa_bin) + val |= 1 << path_reg->bin_qcfa_en_shift_val; + + if ((path_cfg->qcfa_bin || path_cfg->vertical_bin || + path_cfg->horizontal_bin) && path_reg->binning_supported) + val |= 1 << path_reg->bin_en_shift_val; + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + val |= 1 << path_reg->format_measure_en_shift_val; + + CAM_DBG(CAM_ISP, "CSID[%u] cfg0_addr val %x", + csid_hw->hw_intf->hw_idx, val); + + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + + /*Configure Multi VC DT combo */ + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc << + cmn_reg->multi_vcdt_vc1_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].dt << + cmn_reg->multi_vcdt_dt1_shift_val) | + (1 << cmn_reg->multi_vcdt_en_shift_val); + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg0_addr); + } + + /*configure cfg1 addr + * timestamp strobe selection + */ + + val = cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val; + + cam_io_w_mb(val, mem_base + path_reg->cfg1_addr); + + if (path_cfg->crop_enable) { + val = (((path_cfg->end_pixel & cmn_reg->crop_pix_start_mask) << + cmn_reg->crop_shift_val) | + (path_cfg->start_pixel & cmn_reg->crop_pix_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->hcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + val = (((path_cfg->end_line & cmn_reg->crop_line_start_mask) << + csid_reg->cmn_reg->crop_shift_val) | + (path_cfg->start_line & cmn_reg->crop_line_end_mask)); + cam_io_w_mb(val, mem_base + path_reg->vcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + } + + /* set frame drop pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->frm_drop_period_addr); + cam_io_w_mb(0, mem_base + path_reg->frm_drop_pattern_addr); + /* set irq sub sample pattern to 0 and period to 1 */ + cam_io_w_mb(1, mem_base + path_reg->irq_subsample_period_addr); + cam_io_w_mb(0, mem_base + path_reg->irq_subsample_pattern_addr); + /* set pxl drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->pix_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->pix_drop_period_addr); + /* set line drop pattern to 0 and period to 1 */ + cam_io_w_mb(0, mem_base + path_reg->line_drop_pattern_addr); + cam_io_w_mb(1, mem_base + path_reg->line_drop_period_addr); + + if (path_reg->overflow_ctrl_en) { + val = path_reg->overflow_ctrl_en | + path_reg->overflow_ctrl_mode_val; + cam_io_w_mb(val, mem_base + path_reg->err_recovery_cfg0_addr); + } + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, + mem_base + path_reg->format_measure_cfg0_addr); + } + + /* Enable the Pxl path */ + val = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + val |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "%s cfg0 0x%x", res->res_name, val); + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + + return rc; +} + +static int cam_ife_csid_ver1_enable_hw(struct cam_ife_csid_ver1_hw *csid_hw) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + uint32_t clk_lvl, i, val; + unsigned long flags; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + /* overflow check before increment */ + if (csid_hw->hw_info->open_count == UINT_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Open count reached max", + csid_hw->hw_intf->hw_idx); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return -EINVAL; + } + + /* Increment ref Count */ + csid_hw->hw_info->open_count++; + + if (csid_hw->hw_info->open_count > 1) { + CAM_DBG(CAM_ISP, "CSID hw has already been enabled"); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; + } + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + rc = cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate, + soc_info->src_clk_idx, &clk_lvl); + + CAM_DBG(CAM_ISP, + "CSID[%d] clk lvl %u received clk_rate %u applied clk_rate %lu rc %d", + csid_hw->hw_intf->hw_idx, clk_lvl, csid_hw->clk_rate, + soc_info->applied_src_clk_rates.sw_client, rc); + + rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed", + csid_hw->hw_intf->hw_idx); + goto err; + } + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP; + + rc = cam_ife_csid_ver1_global_reset(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] global reset failed"); + goto disable_soc; + } + + + /* Clear IRQs */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + + cam_io_w_mb(csid_reg->csi2_reg->irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->irq_clear_addr); + + if (csid_reg->cmn_reg->num_pix) + cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_clear_addr); + + if (csid_reg->cmn_reg->num_ppp) + cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_clear_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->irq_clear_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) + cam_io_w_mb(csid_reg->cmn_reg->udi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->udi_reg[i]->irq_clear_addr); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + + /* Read hw version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->hw_version_addr); + + CAM_DBG(CAM_ISP, "CSID:%d Enabled CSID HW version: 0x%x", + csid_hw->hw_intf->hw_idx, val); + memset(&csid_hw->timestamp, 0, sizeof(struct cam_ife_csid_timestamp)); + spin_lock_irqsave(&csid_hw->lock_state, flags); + csid_hw->flags.fatal_err_detected = false; + csid_hw->flags.device_enabled = true; + spin_unlock_irqrestore(&csid_hw->lock_state, flags); + + return rc; + +disable_soc: + cam_ife_csid_disable_soc_resources(soc_info); + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; +err: + mutex_lock(&csid_hw->hw_info->hw_mutex); + csid_hw->hw_info->open_count--; + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +int cam_ife_csid_ver1_init_hw(void *hw_priv, + void *init_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver1_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + int rc = 0; + + if (!hw_priv || !init_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + + rc = cam_ife_csid_ver1_enable_hw(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d Enable hw fail", + csid_hw->hw_intf->hw_idx); + return rc; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + res = (struct cam_isp_resource_node *)init_args; + if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) && + (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) { + CAM_ERR(CAM_ISP, + "CSID:%d res type:%d res_id:%dInvalid state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + res = (struct cam_isp_resource_node *)init_args; + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_PPP: + rc = cam_ife_csid_ver1_init_config_pxl_path( + csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver1_init_config_rdi_path( + csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_UDI_0: + case CAM_IFE_PIX_PATH_RES_UDI_1: + case CAM_IFE_PIX_PATH_RES_UDI_2: + rc = cam_ife_csid_ver1_init_config_udi_path( + csid_hw, res); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid Res id %d", + csid_hw->hw_intf->hw_idx, res->res_id); + rc = -EINVAL; + break; + } + + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d res_id:%d path init configuration failed with rc: %d", + csid_hw->hw_intf->hw_idx, res->res_id, rc); + goto end; + } + + rc = cam_ife_csid_ver1_hw_reset(csid_hw); + + if (rc < 0) + CAM_ERR(CAM_ISP, "CSID:%d Failed in HW reset", + csid_hw->hw_intf->hw_idx); + + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + return 0; +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_ife_csid_ver1_disable_csi2( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_hw->flags.rx_enabled) { + CAM_DBG(CAM_ISP, "CSID:%d Rx already disabled", + csid_hw->hw_intf->hw_idx); + return 0; + } + + /* Disable the CSI2 rx inerrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->irq_mask_addr); + + /* Reset the Rx CFG registers */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->cfg0_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->cfg1_addr); + csid_hw->flags.rx_enabled = false; + + CAM_DBG(CAM_ISP, "CSID:%d Disable csi2 rx", + csid_hw->hw_intf->hw_idx); + + return 0; +} + +static int cam_ife_csid_ver1_disable_hw( + struct cam_ife_csid_ver1_hw *csid_hw) +{ + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + int rc = 0; + unsigned long flags; + + mutex_lock(&csid_hw->hw_info->hw_mutex); + /* Check for refcount */ + if (!csid_hw->hw_info->open_count) { + CAM_WARN(CAM_ISP, "Unbalanced disable_hw"); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; + } + + /* Decrement ref Count */ + csid_hw->hw_info->open_count--; + + if (csid_hw->hw_info->open_count) { + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; + } + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + cam_ife_csid_ver1_disable_csi2(csid_hw); + cam_ife_csid_ver1_global_reset(csid_hw); + /* Disable the top IRQ interrupt */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_mask_addr); + + rc = cam_ife_csid_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed", + csid_hw->hw_intf->hw_idx); + + spin_lock_irqsave(&csid_hw->lock_state, flags); + csid_hw->flags.device_enabled = false; + spin_unlock_irqrestore(&csid_hw->lock_state, flags); + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + csid_hw->counters.error_irq_count = 0; + + return rc; +} + +int cam_ife_csid_ver1_deinit_hw(void *hw_priv, + void *deinit_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver1_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + int rc = 0; + + if (!hw_priv || !deinit_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID:Invalid arguments"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + res = (struct cam_isp_resource_node *)deinit_args; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state", + csid_hw->hw_intf->hw_idx, + res->res_id); + return -EINVAL; + } + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_PPP: + rc = cam_ife_csid_ver1_deinit_pxl_path(csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver1_deinit_rdi_path(csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_UDI_0: + case CAM_IFE_PIX_PATH_RES_UDI_1: + case CAM_IFE_PIX_PATH_RES_UDI_2: + rc = cam_ife_csid_ver1_deinit_udi_path(csid_hw, res); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d", + csid_hw->hw_intf->hw_idx, res->res_type); + break; + } + + /* Disable CSID HW */ + cam_ife_csid_ver1_disable_hw(csid_hw); + CAM_DBG(CAM_ISP, "De-Init CSID %d Path: %d", + csid_hw->hw_intf->hw_idx, res->res_id); + + return rc; +} + +int cam_ife_csid_ver1_start(void *hw_priv, void *args, + uint32_t arg_size) +{ + struct cam_ife_csid_ver1_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + struct cam_csid_hw_start_args *start_args; + int rc = 0; + uint32_t i = 0; + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + start_args = (struct cam_csid_hw_start_args *)args; + + csid_hw->flags.sof_irq_triggered = false; + csid_hw->counters.irq_debug_cnt = 0; + + CAM_DBG(CAM_ISP, "CSID:%d num_res %u", + csid_hw->hw_intf->hw_idx, start_args->num_res); + + cam_ife_csid_ver1_enable_csi2(csid_hw); + + if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) + cam_ife_csid_ver1_tpg_start(csid_hw); + + for (i = 0; i < start_args->num_res; i++) { + + res = start_args->node_res[i]; + if (!res || res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d: res: %p, res type: %d", + csid_hw->hw_intf->hw_idx, res, (res) ? res->res_type : -1); + rc = -EINVAL; + goto end; + } + CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res: %s", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_name); + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_PPP: + rc = cam_ife_csid_ver1_start_pix_path(csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver1_start_rdi_path(csid_hw, res); + break; + case CAM_IFE_PIX_PATH_RES_UDI_0: + case CAM_IFE_PIX_PATH_RES_UDI_1: + case CAM_IFE_PIX_PATH_RES_UDI_2: + rc = cam_ife_csid_ver1_start_udi_path(csid_hw, res); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d", + csid_hw->hw_intf->hw_idx, res->res_type); + rc = -EINVAL; + break; + } + } + + if (rc && res) + CAM_ERR(CAM_ISP, "CSID:%d start fail res type:%d res id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + if (!rc) + csid_hw->flags.reset_awaited = false; +end: + return rc; +} + +static int cam_ife_csid_poll_stop_status( + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t res_mask) +{ + int rc = 0, id; + uint32_t status_addr = 0, val = 0, res_id = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + for (; res_id < CAM_IFE_PIX_PATH_RES_MAX; res_id++, res_mask >>= 1) { + if ((res_mask & 0x1) == 0) + continue; + val = 0; + + if (res_id == CAM_IFE_PIX_PATH_RES_IPP) { + status_addr = + csid_reg->ipp_reg->status_addr; + } else if (res_id == CAM_IFE_PIX_PATH_RES_PPP) { + status_addr = + csid_reg->ppp_reg->status_addr; + } else if (res_id == CAM_IFE_PIX_PATH_RES_RDI_0 || + res_id == CAM_IFE_PIX_PATH_RES_RDI_1 || + res_id == CAM_IFE_PIX_PATH_RES_RDI_2 || + res_id == CAM_IFE_PIX_PATH_RES_RDI_3 || + res_id == CAM_IFE_PIX_PATH_RES_RDI_4) { + status_addr = + csid_reg->rdi_reg[res_id]->status_addr; + } else if (res_id == CAM_IFE_PIX_PATH_RES_UDI_0 || + res_id == CAM_IFE_PIX_PATH_RES_UDI_1 || + res_id == CAM_IFE_PIX_PATH_RES_UDI_2) { + id = res_id - CAM_IFE_PIX_PATH_RES_UDI_0; + status_addr = + csid_reg->udi_reg[id]->status_addr; + } else { + CAM_ERR(CAM_ISP, "Invalid res_id: %u", res_id); + rc = -EINVAL; + break; + } + + CAM_DBG(CAM_ISP, "start polling CSID:%d res_id:%d", + csid_hw->hw_intf->hw_idx, res_id); + + rc = cam_common_read_poll_timeout( + soc_info->reg_map[0].mem_base + + status_addr, + CAM_IFE_CSID_TIMEOUT_SLEEP_US, + CAM_IFE_CSID_TIMEOUT_ALL_US, + 0x1, 0x1, &val); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d", + csid_hw->hw_intf->hw_idx, res_id, rc); + rc = -ETIMEDOUT; + break; + } + + CAM_DBG(CAM_ISP, "End polling CSID:%d res_id:%d", + csid_hw->hw_intf->hw_idx, res_id); + } + + return rc; +} + +static int cam_ife_csid_change_pxl_halt_mode( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_hw_halt_args *csid_halt) +{ + uint32_t val = 0; + const struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *res; + + res = csid_halt->node_res; + + csid_reg = csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_id != CAM_IFE_PIX_PATH_RES_IPP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res id %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "CSID:%d Res:%d in invalid state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_state); + return -EINVAL; + } + + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_mask_addr); + + /* configure Halt for slave */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->ctrl_addr); + val &= ~0xC; + val |= (csid_halt->halt_mode << 2); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->ctrl_addr); + CAM_DBG(CAM_ISP, "CSID:%d IPP path Res halt mode:%d configured:%x", + csid_hw->hw_intf->hw_idx, csid_halt->halt_mode, val); + + return 0; +} + +int cam_ife_csid_halt(struct cam_ife_csid_ver1_hw *csid_hw, + void *halt_args) +{ + struct cam_isp_resource_node *res; + struct cam_ife_csid_hw_halt_args *csid_halt; + int rc = 0; + + if (!csid_hw || !halt_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_halt = (struct cam_ife_csid_hw_halt_args *)halt_args; + + /* Change the halt mode */ + res = csid_halt->node_res; + CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id); + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + rc = cam_ife_csid_change_pxl_halt_mode(csid_hw, csid_halt); + break; + default: + CAM_DBG(CAM_ISP, "CSID:%d res_id %d", + csid_hw->hw_intf->hw_idx, + res->res_id); + break; + } + + return rc; +} + +int cam_ife_csid_ver1_stop(void *hw_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver1_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + int rc = 0; + uint32_t i; + struct cam_csid_hw_stop_args *csid_stop; + uint32_t res_mask = 0; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_csid_hw_stop_args))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_stop = (struct cam_csid_hw_stop_args *) stop_args; + + if (!csid_stop->num_res) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + CAM_DBG(CAM_ISP, "CSID:%d num_res %d", + csid_hw->hw_intf->hw_idx, + csid_stop->num_res); + cam_ife_csid_ver1_tpg_stop(csid_hw); + /* Stop the resource first */ + for (i = 0; i < csid_stop->num_res; i++) { + + res = csid_stop->node_res[i]; + res_mask |= (1 << res->res_id); + CAM_DBG(CAM_ISP, "CSID:%d res_type %d Resource[id:%d name:%s]", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_name); + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_PPP: + rc = cam_ife_csid_ver1_stop_pxl_path(csid_hw, + res, csid_stop->stop_cmd); + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver1_stop_rdi_path(csid_hw, + res, csid_stop->stop_cmd); + break; + case CAM_IFE_PIX_PATH_RES_UDI_0: + case CAM_IFE_PIX_PATH_RES_UDI_1: + case CAM_IFE_PIX_PATH_RES_UDI_2: + rc = cam_ife_csid_ver1_stop_udi_path(csid_hw, + res, csid_stop->stop_cmd); + break; + default: + CAM_ERR(CAM_ISP, "Invalid res_id: %u", + res->res_id); + break; + } + } + + if (res_mask) + rc = cam_ife_csid_poll_stop_status(csid_hw, res_mask); + + for (i = 0; i < csid_stop->num_res; i++) { + res = csid_stop->node_res[i]; + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + } + + csid_hw->counters.error_irq_count = 0; + + return rc; +} + +int cam_ife_csid_ver1_read(void *hw_priv, + void *read_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + + return -EINVAL; +} + +int cam_ife_csid_ver1_write(void *hw_priv, + void *write_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + return -EINVAL; +} + +static int cam_ife_csid_ver1_sof_irq_debug( + struct cam_ife_csid_ver1_hw *csid_hw, + void *cmd_args) +{ + int i = 0; + uint32_t val = 0; + bool sof_irq_enable = false; + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver1_reg_info *csid_reg; + uint32_t data_idx; + + if (*((uint32_t *)cmd_args) == 1) + sof_irq_enable = true; + + if (csid_hw->hw_info->hw_state == + CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "CSID powered down unable to %s sof irq", + (sof_irq_enable) ? "enable" : "disable"); + return 0; + } + + data_idx = csid_hw->rx_cfg.phy_sel; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = 0; i < csid_reg->cmn_reg->num_pix; i++) { + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_mask_addr); + + if (sof_irq_enable) + val |= IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + else + val &= ~IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->irq_mask_addr); + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->irq_mask_addr); + + if (sof_irq_enable) + val |= IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + else + val &= ~IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->irq_mask_addr); + } + + for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->udi_reg[i]->irq_mask_addr); + + if (sof_irq_enable) + val |= IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + else + val &= ~IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->udi_reg[i]->irq_mask_addr); + } + + if (sof_irq_enable) { + csid_hw->debug_info.path_mask |= + IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + csid_hw->debug_info.debug_val |= + CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ; + csid_hw->flags.sof_irq_triggered = true; + } else { + csid_hw->debug_info.path_mask &= + ~IFE_CSID_VER1_PATH_INFO_INPUT_SOF; + csid_hw->debug_info.debug_val &= + ~CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ; + csid_hw->flags.sof_irq_triggered = false; + } + + CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s", + (sof_irq_enable) ? "enabled" : "disabled"); + + CAM_INFO(CAM_ISP, "Notify CSIPHY: %d", + csid_hw->rx_cfg.phy_sel); + + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_REG_DUMP, (void *)&data_idx); + + return 0; +} + + +static int cam_ife_csid_ver1_print_hbi_vbi( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t hbi, vbi; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID[%u] Invalid res_type:%d res [id: %d name: %s]", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id, res->res_name); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + path_reg = csid_reg->ipp_reg; + break; + case CAM_IFE_PIX_PATH_RES_PPP: + path_reg = csid_reg->ppp_reg; + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + path_reg = csid_reg->rdi_reg[res->res_id]; + break; + default: + CAM_ERR(CAM_ISP, "CSID[%u] invalid res %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID[%u] invalid res: %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure2_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID[%u] res[id: %d name: %s] hbi 0x%x vbi 0x%x", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, + hbi, vbi); + + return 0; +} + +static int cam_ife_csid_ver1_get_time_stamp( + struct cam_ife_csid_ver1_hw *csid_hw, void *cmd_args) +{ + struct cam_isp_resource_node *res = NULL; + uint64_t time_lo, time_hi; + struct cam_hw_soc_info *soc_info; + struct cam_csid_get_time_stamp_args *timestamp_args; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct timespec64 ts; + uint32_t curr_0_sof_addr, curr_1_sof_addr; + + timestamp_args = (struct cam_csid_get_time_stamp_args *)cmd_args; + res = timestamp_args->node_res; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + curr_0_sof_addr = csid_reg->ipp_reg->timestamp_curr0_sof_addr; + curr_1_sof_addr = csid_reg->ipp_reg->timestamp_curr1_sof_addr; + break; + case CAM_IFE_PIX_PATH_RES_PPP: + curr_0_sof_addr = csid_reg->ppp_reg->timestamp_curr0_sof_addr; + curr_1_sof_addr = csid_reg->ppp_reg->timestamp_curr1_sof_addr; + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + curr_0_sof_addr = + csid_reg->rdi_reg + [res->res_id]->timestamp_curr0_sof_addr; + curr_1_sof_addr = + csid_reg->rdi_reg + [res->res_id]->timestamp_curr1_sof_addr; + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d invalid res %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + time_hi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + curr_1_sof_addr); + time_lo = cam_io_r_mb(soc_info->reg_map[0].mem_base + + curr_0_sof_addr); + timestamp_args->time_stamp_val = (time_hi << 32) | time_lo; + + timestamp_args->time_stamp_val = mul_u64_u32_div( + timestamp_args->time_stamp_val, + CAM_IFE_CSID_QTIMER_MUL_FACTOR, + CAM_IFE_CSID_QTIMER_DIV_FACTOR); + + if (qtime_to_boottime == 0) { + ktime_get_boottime_ts64(&ts); + qtime_to_boottime = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec) - (int64_t)timestamp_args->time_stamp_val; + } + + timestamp_args->boot_timestamp = timestamp_args->time_stamp_val + + qtime_to_boottime; + CAM_DBG(CAM_ISP, "timestamp:%lld", + timestamp_args->boot_timestamp); + csid_hw->timestamp.prev_sof_ts = timestamp_args->time_stamp_val; + csid_hw->timestamp.prev_boot_ts = timestamp_args->boot_timestamp; + + return 0; +} + +static int cam_ife_csid_ver1_set_csid_clock( + struct cam_ife_csid_ver1_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_clock_update_args *clk_update = NULL; + + if (!csid_hw) + return -EINVAL; + + clk_update = + (struct cam_ife_csid_clock_update_args *)cmd_args; + + csid_hw->clk_rate = clk_update->clk_rate; + + return 0; +} + +int cam_ife_csid_ver1_set_csid_qcfa( + struct cam_ife_csid_ver1_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_ver1_path_cfg *path_cfg = NULL; + struct cam_ife_csid_qcfa_update_args *qcfa_update = NULL; + struct cam_isp_resource_node *res = NULL; + + if (!csid_hw || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid param %pK %pK", + csid_hw, cmd_args); + return -EINVAL; + } + + qcfa_update = + (struct cam_ife_csid_qcfa_update_args *)cmd_args; + res = qcfa_update->res; + + if (!res) { + CAM_ERR(CAM_ISP, "CSID[%u] NULL res", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + + if (!path_cfg) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid res_id %u", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + path_cfg->qcfa_bin = qcfa_update->qcfa_binning; + + CAM_DBG(CAM_ISP, "CSID %u QCFA binning %d", + csid_hw->hw_intf->hw_idx, + path_cfg->qcfa_bin); + + return 0; +} + +static int cam_ife_csid_ver1_dump_hw( + struct cam_ife_csid_ver1_hw *csid_hw, void *cmd_args) +{ + int i; + uint8_t *dst; + uint32_t *addr, *start; + uint32_t min_len; + uint32_t num_reg; + size_t remain_len; + struct cam_isp_hw_dump_header *hdr; + struct cam_isp_hw_dump_args *dump_args = + (struct cam_isp_hw_dump_args *)cmd_args; + struct cam_hw_soc_info *soc_info; + + if (!dump_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + if (!dump_args->cpu_addr || !dump_args->buf_len) { + CAM_ERR(CAM_ISP, + "Invalid params %pK %zu", + (void *)dump_args->cpu_addr, + dump_args->buf_len); + return -EINVAL; + } + soc_info = &csid_hw->hw_info->soc_info; + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, + "Dump offset overshoot offset %zu buf_len %zu", + dump_args->offset, dump_args->buf_len); + return -ENOSPC; + } + min_len = soc_info->reg_map[0].size + + sizeof(struct cam_isp_hw_dump_header) + + sizeof(uint32_t); + remain_len = dump_args->buf_len - dump_args->offset; + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u", + remain_len, min_len); + return -ENOSPC; + } + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "CSID_REG:"); + addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header)); + + start = addr; + num_reg = soc_info->reg_map[0].size/4; + hdr->word_size = sizeof(uint32_t); + *addr = soc_info->index; + addr++; + for (i = 0; i < num_reg; i++) { + addr[0] = soc_info->mem_block[0]->start + (i*4); + addr[1] = cam_io_r(soc_info->reg_map[0].mem_base + + (i*4)); + addr += 2; + } + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset); + return 0; +} + +static int cam_ife_csid_log_acquire_data( + struct cam_ife_csid_ver1_hw *csid_hw, void *cmd_args) +{ + struct cam_isp_resource_node *res = NULL; + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_ver1_path_cfg *path_cfg = NULL; + struct cam_ife_csid_ver1_path_reg_info *rdi_reg = NULL; + uint32_t byte_cnt_ping, byte_cnt_pong; + + res = (struct cam_isp_resource_node *)cmd_args; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, + "CSID:%d invalid res id:%d res type: %d state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_type, + res->res_state); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver1_path_cfg *)res->res_priv; + /* Dump all the acquire data for this hardware */ + CAM_INFO(CAM_ISP, + "CSID:%d res id:%d type:%d state:%d in f:%d out f:%d st pix:%d end pix:%d st line:%d end line:%d h bin:%d qcfa bin:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_type, + res->res_type, path_cfg->in_format, path_cfg->out_format, + path_cfg->start_pixel, path_cfg->end_pixel, + path_cfg->start_line, path_cfg->end_line, + path_cfg->horizontal_bin, path_cfg->qcfa_bin); + + if (res->res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 && + res->res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) { + rdi_reg = csid_reg->rdi_reg[res->res_id]; + /* read total number of bytes transmitted through RDI */ + byte_cnt_ping = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->byte_cntr_ping_addr); + byte_cnt_pong = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->byte_cntr_pong_addr); + CAM_INFO(CAM_ISP, + "CSID:%d res id:%d byte cnt val ping:%d pong:%d", + csid_hw->hw_intf->hw_idx, res->res_id, + byte_cnt_ping, byte_cnt_pong); + } + + return 0; +} + +static int cam_ife_csid_ver1_dump_csid_clock( + struct cam_ife_csid_ver1_hw *csid_hw, void *cmd_args) +{ + if (!csid_hw) + return -EINVAL; + + CAM_INFO(CAM_ISP, "CSID:%d clock rate %llu", + csid_hw->hw_intf->hw_idx, + csid_hw->clk_rate); + + return 0; +} + +static int cam_ife_csid_ver1_process_cmd(void *hw_priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_isp_resource_node *res = NULL; + + if (!hw_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid arguments"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_info->core_info; + + switch (cmd_type) { + case CAM_IFE_CSID_CMD_GET_TIME_STAMP: + rc = cam_ife_csid_ver1_get_time_stamp(csid_hw, cmd_args); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + res = ((struct cam_csid_get_time_stamp_args *) + cmd_args)->node_res; + cam_ife_csid_ver1_print_hbi_vbi(csid_hw, res); + } + + break; + case CAM_IFE_CSID_SET_CSID_DEBUG: + rc = cam_ife_csid_ver1_set_debug(csid_hw, + *((uint32_t *)cmd_args)); + break; + case CAM_IFE_CSID_SOF_IRQ_DEBUG: + rc = cam_ife_csid_ver1_sof_irq_debug(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE: + rc = cam_ife_csid_ver1_set_csid_clock(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_QCFA_SUPPORTED: + rc = cam_ife_csid_ver1_set_csid_qcfa(csid_hw, + cmd_args); + break; + case CAM_ISP_HW_CMD_DUMP_HW: + rc = cam_ife_csid_ver1_dump_hw(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CLOCK_DUMP: + rc = cam_ife_csid_ver1_dump_csid_clock(csid_hw, cmd_args); + break; + case CAM_IFE_CSID_TOP_CONFIG: + break; + case CAM_IFE_CSID_SET_DUAL_SYNC_CONFIG: + break; + case CAM_IFE_CSID_LOG_ACQUIRE_DATA: + cam_ife_csid_log_acquire_data(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE: + rc = cam_ife_csid_halt(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES: + /* Not supported for V1 */ + rc = 0; + break; + case CAM_ISP_HW_CMD_DRV_CONFIG: + /* Not supported for V1 */ + rc = 0; + break; + case CAM_IFE_CSID_RESET_OUT_OF_SYNC_CNT: + /* Not supported for V1 */ + rc = 0; + break; + case CAM_ISP_HW_CMD_CSID_DUMP_CROP_REG: + /* Not supported in V1*/ + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d", + csid_hw->hw_intf->hw_idx, cmd_type); + rc = -EINVAL; + break; + } + return rc; + +} + +static int cam_ife_csid_ver1_handle_rx_debug_event( + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t bit_pos) +{ + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver1_reg_info *csid_reg; + const struct cam_ife_csid_csi2_rx_reg_info *csi2_reg; + uint32_t mask, val; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + mask = BIT(bit_pos); + + switch (mask) { + case IFE_CSID_VER1_RX_LONG_PKT_CAPTURED: + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d Long pkt VC: %d DT: %d WC: %d", + csid_hw->hw_intf->hw_idx, + (val & csi2_reg->vc_mask) >> 22, + (val & csi2_reg->dt_mask) >> 16, + val & csi2_reg->wc_mask); + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_1_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d Long pkt ECC: %d", + csid_hw->hw_intf->hw_idx, val); + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_ftr_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d Long pkt cal CRC: %d expected CRC: %d", + csid_hw->hw_intf->hw_idx, + val & csi2_reg->calc_crc_mask, + val & csi2_reg->expected_crc_mask); + break; + + case IFE_CSID_VER1_RX_SHORT_PKT_CAPTURED: + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_short_pkt_0_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d Short pkt VC: %d DT: %d LC: %d", + csid_hw->hw_intf->hw_idx, + (val & csi2_reg->vc_mask) >> 22, + (val & csi2_reg->dt_mask) >> 16, + val & csi2_reg->wc_mask); + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_short_pkt_1_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d Short pkt ECC: %d", + csid_hw->hw_intf->hw_idx, val); + break; + case IFE_CSID_VER1_RX_CPHY_PKT_HDR_CAPTURED: + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_cphy_pkt_hdr_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "Csid :%d CPHY pkt VC: %d DT: %d LC: %d", + csid_hw->hw_intf->hw_idx, + (val & csi2_reg->vc_mask) >> 22, + (val & csi2_reg->dt_mask) >> 16, + val & csi2_reg->wc_mask); + break; + case IFE_CSID_VER1_RX_UNMAPPED_VC_DT: + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->cap_unmap_long_pkt_hdr_0_addr); + + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID:%d UNMAPPED_VC_DT: VC:%d DT:%d WC:%d not mapped to any csid paths", + csid_hw->hw_intf->hw_idx, (val >> 22), + ((val >> 16) & 0x3F), (val & 0xFFFF)); + + csid_hw->counters.error_irq_count++; + + CAM_DBG(CAM_ISP, "CSID[%u] Recoverable Error Count:%u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.error_irq_count); + break; + default: + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID[%d] RX_IRQ: %s", + csid_hw->hw_intf->hw_idx, + ver1_rx_irq_desc[bit_pos].desc); + break; + } + + return 0; +} + +static int cam_ife_csid_ver1_handle_event_err( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_evt_payload *evt_payload, + uint32_t err_type) +{ + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_isp_hw_event_info event_info = {0}; + int rc = 0; + + event_info.hw_idx = evt_payload->hw_idx; + err_evt_info.err_type = err_type; + event_info.hw_type = CAM_ISP_HW_TYPE_CSID; + event_info.event_data = (void *)&err_evt_info; + + CAM_DBG(CAM_ISP, "CSID[%d] Error type %d", + csid_hw->hw_intf->hw_idx, err_type); + + rc = csid_hw->event_cb(csid_hw->token, + CAM_ISP_HW_EVENT_ERROR, (void *)&event_info); + + return rc; +} + +static int cam_ife_csid_ver1_put_evt_payload( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_evt_payload **evt_payload, + struct list_head *payload_list) +{ + unsigned long flags; + + if (*evt_payload == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid payload core %d", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_ife_csid_ver1_evt_payload); + spin_lock_irqsave(&csid_hw->lock_state, flags); + list_add_tail(&(*evt_payload)->list, payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&csid_hw->lock_state, flags); + + return 0; +} + +static int cam_ife_csid_ver1_get_evt_payload( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_evt_payload **evt_payload, + struct list_head *payload_list) +{ + + spin_lock(&csid_hw->lock_state); + + if (list_empty(payload_list)) { + *evt_payload = NULL; + spin_unlock(&csid_hw->lock_state); + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload core %d", + csid_hw->hw_intf->hw_idx); + return -ENOMEM; + } + + *evt_payload = list_first_entry(payload_list, + struct cam_ife_csid_ver1_evt_payload, list); + list_del_init(&(*evt_payload)->list); + spin_unlock(&csid_hw->lock_state); + + return 0; +} + +static int cam_ife_csid_ver1_rx_bottom_half_handler( + struct cam_ife_csid_ver1_hw *csid_hw, + struct cam_ife_csid_ver1_evt_payload *evt_payload) +{ + const struct cam_ife_csid_csi2_rx_reg_info *csi2_reg; + struct cam_ife_csid_ver1_reg_info *csid_reg; + uint8_t *log_buf = NULL; + uint32_t irq_status; + uint32_t val; + uint32_t event_type = 0; + size_t len = 0; + struct cam_hw_soc_info *soc_info; + uint32_t data_idx; + + if (!csid_hw || !evt_payload) { + CAM_ERR(CAM_ISP, + "Invalid Param handler_priv %pK evt_payload_priv %pK", + csid_hw, evt_payload); + return -EINVAL; + } + + data_idx = csid_hw->rx_cfg.phy_sel; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + + irq_status = evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX] + & csi2_reg->fatal_err_mask; + log_buf = csid_hw->log_buf; + memset(log_buf, 0, sizeof(csid_hw->log_buf)); + + CAM_INFO(CAM_ISP, "IRQ_Status: 0x%x", + evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX]); + + if (irq_status) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, "Fatal Errors:\n"); + + if (irq_status & IFE_CSID_VER1_RX_LANE0_FIFO_OVERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "RX_ERROR_LANE0_FIFO_OVERFLOW: Skew/Less Data on lanes/ Slow csid clock:%luHz\n", + soc_info->applied_src_clk_rates.sw_client); + } + + if (irq_status & IFE_CSID_VER1_RX_LANE1_FIFO_OVERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "RX_ERROR_LANE1_FIFO_OVERFLOW: Skew/Less Data on lanes/ Slow csid clock:%luHz\n", + soc_info->applied_src_clk_rates.sw_client); + } + + if (irq_status & IFE_CSID_VER1_RX_LANE2_FIFO_OVERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "RX_ERROR_LANE2_FIFO_OVERFLOW: Skew/Less Data on lanes/ Slow csid clock:%luHz\n", + soc_info->applied_src_clk_rates.sw_client); + } + + if (irq_status & IFE_CSID_VER1_RX_LANE3_FIFO_OVERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_LANE_FIFO_OVERFLOW; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "RX_ERROR_LANE3_FIFO_OVERFLOW: Skew/Less Data on lanes/ Slow csid clock:%luHz\n", + soc_info->applied_src_clk_rates.sw_client); + } + + if (irq_status & IFE_CSID_VER1_RX_TG_FIFO_OVERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "RX_ERROR_TPG_FIFO_OVERFLOW: Backpressure from IFE\n"); + } + + if (irq_status & IFE_CSID_VER1_RX_CPHY_PH_CRC) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_PH_CRC: Pkt Hdr CRC mismatch\n"); + } + + if (irq_status & IFE_CSID_VER1_RX_STREAM_UNDERFLOW) { + event_type |= CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA; + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "ERROR_STREAM_UNDERFLOW: Fewer bytes rcvd than WC:%d in pkt hdr\n", + val & 0xFFFF); + } + + if (irq_status & IFE_CSID_VER1_RX_ERROR_ECC) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "DPHY_ERROR_ECC: Pkt hdr errors unrecoverable\n"); + } + + if (irq_status & IFE_CSID_VER1_RX_UNBOUNDED_FRAME) { + event_type |= CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "UNBOUNDED_FRAME: Frame started with EOF or No EOF\n"); + } + + if (irq_status & IFE_CSID_VER1_RX_CPHY_EOT_RECEPTION) { + event_type |= CAM_ISP_HW_ERROR_CSID_MISSING_EOT; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_EOT_RECEPTION: No EOT on lane/s, is_EPD: %d, PHY_Type: %s(%u)\n", + csid_hw->rx_cfg.epd_supported, + (csid_hw->rx_cfg.lane_type) ? "cphy" : "dphy", + csid_hw->rx_cfg.lane_type); + } + + if (irq_status & IFE_CSID_VER1_RX_ERROR_CRC) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_ERROR_CRC: Long pkt payload CRC mismatch\n"); + } + } + + irq_status = evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX] & + csi2_reg->part_fatal_err_mask; + + if (irq_status) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Recoverable-errors:\n"); + + if (irq_status & IFE_CSID_VER1_RX_CPHY_SOT_RECEPTION) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_SOT_RECEPTION: Less SOTs on lane/s\n"); + } + } + + irq_status = evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX] & + csi2_reg->non_fatal_err_mask; + + if (irq_status) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Non-fatal errors:\n"); + if (irq_status & IFE_CSID_VER1_RX_MMAPPED_VC_DT) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "MMAPPED_VC_DT: VC:%d DT:%d mapped to more than 1 csid paths\n", + (val >> 22), ((val >> 16) & 0x3F)); + } + } + + if (len) + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] %s", + csid_hw->hw_intf->hw_idx, log_buf); + + if ((!csid_hw->flags.reset_awaited) && csid_hw->flags.fatal_err_detected) { + if (!event_type) + event_type |= CAM_ISP_HW_ERROR_CSID_FATAL; + + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_REG_DUMP, + (void *)&data_idx); + + cam_ife_csid_ver1_handle_event_err(csid_hw, evt_payload, event_type); + csid_hw->flags.reset_awaited = true; + } + + return IRQ_HANDLED; +} + +static int cam_ife_csid_ver1_path_bottom_half_handler( + const struct cam_ife_csid_ver1_path_reg_info *path_reg, + struct cam_ife_csid_ver1_evt_payload *evt_payload, + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t index) +{ + const uint8_t **irq_reg_tag; + uint8_t *log_buf = NULL; + uint32_t bit_pos = 0; + uint32_t irq_status; + size_t len = 0; + + if (!csid_hw || !evt_payload) { + CAM_ERR(CAM_ISP, + "Invalid Param csid_hw %pK evt_payload %pK", + csid_hw, evt_payload); + return 0; + } + + irq_status = evt_payload->irq_status[index] & path_reg->fatal_err_mask; + bit_pos = 0; + log_buf = csid_hw->log_buf; + memset(log_buf, 0, sizeof(csid_hw->log_buf)); + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + while (irq_status) { + if ((irq_status & 0x1)) + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "%s\n", ver1_path_irq_desc[bit_pos]); + bit_pos++; + irq_status >>= 1; + } + + if (len) + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID[%u] %s: %s", + csid_hw->hw_intf->hw_idx, + irq_reg_tag[index], + log_buf); + + if (evt_payload->irq_status[index] & + IFE_CSID_VER1_PATH_ERROR_FIFO_OVERFLOW) + cam_ife_csid_ver1_handle_event_err(csid_hw, + evt_payload, CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW); + + return 0; +} + +static int cam_ife_csid_ver1_bottom_half_handler( + void *handler_priv, + void *evt_payload_priv) +{ + const struct cam_ife_csid_ver1_path_reg_info *path_reg; + struct cam_ife_csid_ver1_evt_payload *evt_payload; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_ver1_hw *csid_hw; + int i; + int id = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, + "Invalid Param handler_priv %pK evt_payload_priv %pK", + handler_priv, evt_payload_priv); + return -EINVAL; + } + + csid_hw = (struct cam_ife_csid_ver1_hw *)handler_priv; + evt_payload = (struct cam_ife_csid_ver1_evt_payload *)evt_payload_priv; + + if (evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX]) + cam_ife_csid_ver1_rx_bottom_half_handler( + csid_hw, evt_payload); + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = CAM_IFE_CSID_IRQ_REG_RDI_0; i < CAM_IFE_CSID_IRQ_REG_MAX; + i++) { + + if (!evt_payload->irq_status[i]) + continue; + + switch (i) { + case CAM_IFE_CSID_IRQ_REG_IPP: + path_reg = csid_reg->ipp_reg; + break; + case CAM_IFE_CSID_IRQ_REG_PPP: + path_reg = csid_reg->ppp_reg; + break; + case CAM_IFE_CSID_IRQ_REG_RDI_0: + case CAM_IFE_CSID_IRQ_REG_RDI_1: + case CAM_IFE_CSID_IRQ_REG_RDI_2: + case CAM_IFE_CSID_IRQ_REG_RDI_3: + case CAM_IFE_CSID_IRQ_REG_RDI_4: + id = i - CAM_IFE_CSID_IRQ_REG_RDI_0; + path_reg = csid_reg->rdi_reg[id]; + break; + case CAM_IFE_CSID_IRQ_REG_UDI_0: + case CAM_IFE_CSID_IRQ_REG_UDI_1: + case CAM_IFE_CSID_IRQ_REG_UDI_2: + id = i - CAM_IFE_CSID_IRQ_REG_UDI_0; + path_reg = csid_reg->udi_reg[id]; + break; + default: + path_reg = NULL; + break; + } + + if (!path_reg) + continue; + + cam_ife_csid_ver1_path_bottom_half_handler( + path_reg, evt_payload, csid_hw, i); + } + + cam_ife_csid_ver1_put_evt_payload(csid_hw, &evt_payload, + &csid_hw->free_payload_list); + + return IRQ_HANDLED; +} + +static int cam_ife_csid_ver1_rx_top_half( + uint32_t *irq_status, + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t *need_bh_sched) +{ + const struct cam_ife_csid_csi2_rx_reg_info *csi2_reg; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t status = 0; + uint32_t debug_bits; + uint32_t bit_pos = 0; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + + status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->irq_status_addr); + + if (csid_hw->rx_cfg.epd_supported) + status = status & (~IFE_CSID_VER1_RX_CPHY_EOT_RECEPTION); + + irq_status[CAM_IFE_CSID_IRQ_REG_RX] = status; + + if (!status) + return IRQ_HANDLED; + + cam_io_w_mb(status, + soc_info->reg_map[0].mem_base + + csi2_reg->irq_clear_addr); + + if (csid_hw->flags.process_reset) + return 0; + + if (status & BIT(csi2_reg->rst_done_shift_val)) { + CAM_DBG(CAM_ISP, "CSID[%d] rx_reset done", + csid_hw->hw_intf->hw_idx); + complete(&csid_hw->irq_complete + [CAM_IFE_CSID_IRQ_REG_RX]); + return IRQ_HANDLED; + } + + if (csid_hw->flags.fatal_err_detected) { + CAM_DBG(CAM_ISP, "CSID[%u] already handling fatal error", + csid_hw->hw_intf->hw_idx); + return 0; + } + + if (status & csi2_reg->fatal_err_mask) { + csid_hw->flags.fatal_err_detected = true; + cam_ife_csid_ver1_disable_csi2(csid_hw); + } + + if (status & csi2_reg->part_fatal_err_mask) { + if (status & IFE_CSID_VER1_RX_CPHY_SOT_RECEPTION) + csid_hw->counters.error_irq_count++; + + CAM_DBG(CAM_ISP, "CSID[%u] Recoverable Error Count:%u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.error_irq_count); + + if (csid_hw->counters.error_irq_count > CAM_IFE_CSID_MAX_ERR_COUNT) { + csid_hw->flags.fatal_err_detected = true; + cam_ife_csid_ver1_disable_csi2(csid_hw); + } + } + + debug_bits = status & csid_hw->debug_info.rx_mask; + + while (debug_bits) { + + if (debug_bits & 0x1) + cam_ife_csid_ver1_handle_rx_debug_event(csid_hw, + bit_pos); + bit_pos++; + debug_bits >>= 1; + } + + *need_bh_sched = status & (csi2_reg->fatal_err_mask | + csi2_reg->part_fatal_err_mask | + csi2_reg->non_fatal_err_mask); + + return IRQ_HANDLED; +} + +static int cam_ife_csid_ver1_path_top_half( + uint32_t *irq_status, + struct cam_ife_csid_ver1_hw *csid_hw, + uint32_t *need_bh, + uint32_t index) +{ + struct cam_ife_csid_ver1_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const uint8_t **irq_reg_tag; + uint32_t status = 0; + uint32_t debug_bits; + uint32_t bit_pos = 0; + int id = 0; + uint32_t sof_irq_debug_en = 0; + uint32_t val , val1; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + + switch (index) { + case CAM_IFE_CSID_IRQ_REG_IPP: + path_reg = csid_reg->ipp_reg; + break; + case CAM_IFE_CSID_IRQ_REG_PPP: + path_reg = csid_reg->ppp_reg; + break; + case CAM_IFE_CSID_IRQ_REG_RDI_0: + case CAM_IFE_CSID_IRQ_REG_RDI_1: + case CAM_IFE_CSID_IRQ_REG_RDI_2: + case CAM_IFE_CSID_IRQ_REG_RDI_3: + case CAM_IFE_CSID_IRQ_REG_RDI_4: + id = index - CAM_IFE_CSID_IRQ_REG_RDI_0; + path_reg = csid_reg->rdi_reg[id]; + break; + case CAM_IFE_CSID_IRQ_REG_UDI_0: + case CAM_IFE_CSID_IRQ_REG_UDI_1: + case CAM_IFE_CSID_IRQ_REG_UDI_2: + id = index - CAM_IFE_CSID_IRQ_REG_UDI_0; + path_reg = csid_reg->udi_reg[id]; + break; + default: + break; + } + + if (!path_reg) + return 0; + + soc_info = &csid_hw->hw_info->soc_info; + status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->irq_status_addr); + cam_io_w_mb(status, + soc_info->reg_map[0].mem_base + + path_reg->irq_clear_addr); + + irq_status[index] = status; + + if (csid_hw->flags.process_reset) + return 0; + + if (status & BIT(csid_reg->cmn_reg->rst_done_shift_val)) { + CAM_DBG(CAM_ISP, "irq_reg:%d reset done", index); + complete(&csid_hw->irq_complete[index]); + return 0; + } + + if (csid_hw->flags.sof_irq_triggered && + (status & IFE_CSID_VER1_PATH_INFO_INPUT_SOF)) { + csid_hw->counters.irq_debug_cnt++; + } + + if ((status & IFE_CSID_VER1_PATH_ERROR_PIX_COUNT ) || + (status & IFE_CSID_VER1_PATH_ERROR_LINE_COUNT)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure0_addr); + val1 = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure_cfg1_addr); + CAM_ERR(CAM_ISP, + "Expected:: h: 0x%x w: 0x%x actual:: h: 0x%x w: 0x%x [format_measure0: 0x%x]", + ((val1 >> + csid_reg->cmn_reg->format_measure_height_shift_val) & + csid_reg->cmn_reg->format_measure_height_mask_val), + val1 & + csid_reg->cmn_reg->format_measure_width_mask_val, + ((val >> + csid_reg->cmn_reg->format_measure_height_shift_val) & + csid_reg->cmn_reg->format_measure_height_mask_val), + val & + csid_reg->cmn_reg->format_measure_width_mask_val, + val); + } + + if (status & path_reg->fatal_err_mask) + cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY, + soc_info->reg_map[0].mem_base + + path_reg->ctrl_addr); + + debug_bits = status & csid_hw->debug_info.path_mask; + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + while (debug_bits) { + if ((debug_bits & 0x1)) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID[%d] IRQ %s %s ", + csid_hw->hw_intf->hw_idx, + irq_reg_tag[index], + ver1_path_irq_desc[bit_pos]); + + bit_pos++; + debug_bits >>= 1; + } + + *need_bh = status & (path_reg->fatal_err_mask | + path_reg->non_fatal_err_mask); + + if (csid_hw->counters.irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) { + cam_ife_csid_ver1_sof_irq_debug(csid_hw, &sof_irq_debug_en); + csid_hw->counters.irq_debug_cnt = 0; + } + + return IRQ_HANDLED; +} + +static irqreturn_t cam_ife_csid_irq(int irq_num, void *data) +{ + struct cam_ife_csid_ver1_evt_payload *evt_payload = NULL; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_ife_csid_ver1_hw *csid_hw; + struct cam_hw_soc_info *soc_info; + void *bh_cmd = NULL; + unsigned long flags; + uint32_t status[CAM_IFE_CSID_IRQ_REG_MAX]; + uint32_t need_rx_bh = 0; + uint32_t need_path_bh = 0; + uint32_t need_bh_sched = 0; + int i; + int rc = 0; + + csid_hw = (struct cam_ife_csid_ver1_hw *)data; + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + memset(status, 0, sizeof(status)); + + spin_lock_irqsave(&csid_hw->hw_info->hw_lock, flags); + + status[CAM_IFE_CSID_IRQ_REG_TOP] = + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_status_addr); + cam_io_w_mb(status[CAM_IFE_CSID_IRQ_REG_TOP], + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_clear_addr); + + for (i = CAM_IFE_CSID_IRQ_REG_RX; i < CAM_IFE_CSID_IRQ_REG_MAX; i++) { + + switch (i) { + case CAM_IFE_CSID_IRQ_REG_RX: + cam_ife_csid_ver1_rx_top_half( + status, csid_hw, &need_rx_bh); + break; + case CAM_IFE_CSID_IRQ_REG_IPP: + case CAM_IFE_CSID_IRQ_REG_PPP: + case CAM_IFE_CSID_IRQ_REG_RDI_0: + case CAM_IFE_CSID_IRQ_REG_RDI_1: + case CAM_IFE_CSID_IRQ_REG_RDI_2: + case CAM_IFE_CSID_IRQ_REG_RDI_3: + case CAM_IFE_CSID_IRQ_REG_RDI_4: + case CAM_IFE_CSID_IRQ_REG_UDI_0: + case CAM_IFE_CSID_IRQ_REG_UDI_1: + case CAM_IFE_CSID_IRQ_REG_UDI_2: + cam_ife_csid_ver1_path_top_half( + status, csid_hw, &need_path_bh, i); + break; + default: + break; + } + need_bh_sched |= (need_rx_bh | need_path_bh); + } + + if (status[CAM_IFE_CSID_IRQ_REG_TOP] & IFE_CSID_VER1_TOP_IRQ_DONE) { + csid_hw->flags.process_reset = false; + goto handle_top_reset; + } + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + spin_unlock_irqrestore( + &csid_hw->hw_info->hw_lock, flags); + + if (!need_bh_sched) + return IRQ_HANDLED; + + rc = cam_ife_csid_ver1_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->free_payload_list); + + if (!evt_payload) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u], no free tasklet rc %d", + csid_hw->hw_intf->hw_idx, rc); + return IRQ_HANDLED; + } + + + rc = tasklet_bh_api.get_bh_payload_func(csid_hw->tasklet, &bh_cmd); + + if (rc || !bh_cmd) { + cam_ife_csid_ver1_put_evt_payload(csid_hw, &evt_payload, + &csid_hw->free_payload_list); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID[%d] Can not get cmd for tasklet, status %x", + csid_hw->hw_intf->hw_idx, + status[CAM_IFE_CSID_IRQ_REG_TOP]); + return IRQ_HANDLED; + } + + evt_payload->priv = csid_hw->token; + evt_payload->hw_idx = csid_hw->hw_intf->hw_idx; + + for (i = 0; i < CAM_IFE_CSID_IRQ_REG_MAX; i++) + evt_payload->irq_status[i] = status[i]; + + tasklet_bh_api.bottom_half_enqueue_func(csid_hw->tasklet, + bh_cmd, + csid_hw, + evt_payload, + cam_ife_csid_ver1_bottom_half_handler); + + return IRQ_HANDLED; + +handle_top_reset: + CAM_DBG(CAM_ISP, "csid top reset complete"); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->irq_cmd_addr); + spin_unlock_irqrestore( + &csid_hw->hw_info->hw_lock, flags); + complete(&csid_hw->irq_complete[CAM_IFE_CSID_IRQ_REG_TOP]); + return IRQ_HANDLED; +} + +static void cam_ife_csid_ver1_free_res(struct cam_ife_csid_ver1_hw *ife_csid_hw) +{ + struct cam_isp_resource_node *res; + uint32_t num_paths; + int i; + struct cam_ife_csid_ver1_reg_info *csid_reg; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + ife_csid_hw->core_info->csid_reg; + num_paths = csid_reg->cmn_reg->num_udis; + + for (i = 0; i < num_paths; i++) { + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_UDI_0 + i]; + CAM_MEM_FREE(res->res_priv); + res->res_priv = NULL; + } + + num_paths = csid_reg->cmn_reg->num_rdis; + + for (i = 0; i < num_paths; i++) { + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0 + i]; + CAM_MEM_FREE(res->res_priv); + res->res_priv = NULL; + } + + CAM_MEM_FREE(ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_priv); + ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_priv = NULL; + CAM_MEM_FREE(ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_priv); + ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_priv = NULL; +} + +static int cam_ife_ver1_hw_alloc_res( + struct cam_isp_resource_node *res, + uint32_t res_type, + struct cam_hw_intf *hw_intf, + uint32_t res_id) + +{ + struct cam_ife_csid_ver1_path_cfg *path_cfg = NULL; + + path_cfg = CAM_MEM_ZALLOC(sizeof(*path_cfg), GFP_KERNEL); + + if (!path_cfg) + return -ENOMEM; + + res->res_id = res_id; + res->res_type = res_type; + res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + res->hw_intf = hw_intf; + res->res_priv = path_cfg; + + return 0; +} + +static int cam_ife_csid_ver1_hw_init_path_res( + struct cam_ife_csid_ver1_hw *ife_csid_hw) +{ + int rc = 0; + int i; + struct cam_ife_csid_ver1_reg_info *csid_reg; + struct cam_isp_resource_node *res; + + csid_reg = (struct cam_ife_csid_ver1_reg_info *) + ife_csid_hw->core_info->csid_reg; + + /* Initialize the IPP resources */ + if (csid_reg->cmn_reg->num_pix) { + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP]; + rc = cam_ife_ver1_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + ife_csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_IPP); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %d IPP res init fail", + ife_csid_hw->hw_intf->hw_idx); + goto free_res; + } + scnprintf(res->res_name, CAM_ISP_RES_NAME_LEN, "IPP"); + } + + /* Initialize PPP resource */ + if (csid_reg->cmn_reg->num_ppp) { + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP]; + rc = cam_ife_ver1_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + ife_csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_PPP); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %d PPP res init fail", + ife_csid_hw->hw_intf->hw_idx); + goto free_res; + } + scnprintf(res->res_name, + CAM_ISP_RES_NAME_LEN, "PPP"); + } + + /* Initialize the RDI resource */ + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + /* res type is from RDI 0 to RDI3 */ + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0 + i]; + rc = cam_ife_ver1_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + ife_csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_RDI_0 + i); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %d RDI[%d] res init fail", + ife_csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + scnprintf(res->res_name, CAM_ISP_RES_NAME_LEN, "RDI_%d", i); + } + + /* Initialize the UDI resource */ + for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) { + /* res type is from UDI0 to UDI3 */ + res = &ife_csid_hw->path_res[CAM_IFE_PIX_PATH_RES_UDI_0 + i]; + rc = cam_ife_ver1_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + ife_csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_UDI_0 + i); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %d UDI[%d] res init fail", + ife_csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + scnprintf(res->res_name, CAM_ISP_RES_NAME_LEN, "UDI_%d", i); + } + + return rc; + +free_res: + cam_ife_csid_ver1_free_res(ife_csid_hw); + return rc; +} + +int cam_ife_csid_hw_ver1_init(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *csid_core_info, + bool is_custom) +{ + int rc = -EINVAL; + uint32_t i; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver1_hw *ife_csid_hw = NULL; + + if (!hw_intf || !csid_core_info) { + CAM_ERR(CAM_ISP, "Invalid parameters intf: %pK hw_info: %pK", + hw_intf, csid_core_info); + return rc; + } + + hw_info = (struct cam_hw_info *)hw_intf->hw_priv; + + ife_csid_hw = CAM_MEM_ZALLOC(sizeof(struct cam_ife_csid_ver1_hw), GFP_KERNEL); + + if (!ife_csid_hw) { + CAM_ERR(CAM_ISP, "Csid core %d hw allocation fails", + hw_intf->hw_idx); + return -ENOMEM; + } + + ife_csid_hw->hw_intf = hw_intf; + ife_csid_hw->hw_info = hw_info; + ife_csid_hw->core_info = csid_core_info; + hw_info->core_info = ife_csid_hw; + + CAM_DBG(CAM_ISP, "type %d index %d", + hw_intf->hw_type, + hw_intf->hw_idx); + + ife_csid_hw->flags.device_enabled = false; + ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&ife_csid_hw->hw_info->hw_mutex); + spin_lock_init(&ife_csid_hw->hw_info->hw_lock); + spin_lock_init(&ife_csid_hw->lock_state); + init_completion(&ife_csid_hw->hw_info->hw_complete); + + for (i = 0; i < CAM_IFE_CSID_IRQ_REG_MAX; i++) + init_completion(&ife_csid_hw->irq_complete[i]); + + rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info, + cam_ife_csid_irq, NULL, ife_csid_hw, is_custom); + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", + hw_intf->hw_idx); + return rc; + } + + if (cam_cpas_is_feature_supported(CAM_CPAS_QCFA_BINNING_ENABLE, + CAM_CPAS_HW_IDX_ANY, NULL)) + ife_csid_hw->flags.binning_enabled = true; + + ife_csid_hw->hw_intf->hw_ops.get_hw_caps = + cam_ife_csid_ver1_get_hw_caps; + ife_csid_hw->hw_intf->hw_ops.init = cam_ife_csid_ver1_init_hw; + ife_csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_ver1_deinit_hw; + ife_csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_ver1_reset; + ife_csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_ver1_reserve; + ife_csid_hw->hw_intf->hw_ops.release = cam_ife_csid_ver1_release; + ife_csid_hw->hw_intf->hw_ops.start = cam_ife_csid_ver1_start; + ife_csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_ver1_stop; + ife_csid_hw->hw_intf->hw_ops.read = cam_ife_csid_ver1_read; + ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_ver1_write; + ife_csid_hw->hw_intf->hw_ops.process_cmd = + cam_ife_csid_ver1_process_cmd; + + rc = cam_ife_csid_ver1_hw_init_path_res(ife_csid_hw); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] Probe Init failed", + hw_intf->hw_idx); + return rc; + } + + rc = cam_tasklet_init(&ife_csid_hw->tasklet, ife_csid_hw, + hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] init tasklet failed", + hw_intf->hw_idx); + goto err; + } + + INIT_LIST_HEAD(&ife_csid_hw->free_payload_list); + for (i = 0; i < CAM_IFE_CSID_VER1_EVT_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&ife_csid_hw->evt_payload[i].list); + list_add_tail(&ife_csid_hw->evt_payload[i].list, + &ife_csid_hw->free_payload_list); + } + + ife_csid_hw->debug_info.debug_val = 0; + ife_csid_hw->counters.error_irq_count = 0; + + return 0; +err: + cam_ife_csid_ver1_free_res(ife_csid_hw); + return rc; +} +EXPORT_SYMBOL(cam_ife_csid_hw_ver1_init); + +int cam_ife_csid_hw_ver1_deinit(struct cam_hw_info *hw_priv) +{ + struct cam_ife_csid_ver1_hw *csid_hw; + int rc = -EINVAL; + + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid param"); + return rc; + } + + csid_hw = (struct cam_ife_csid_ver1_hw *)hw_priv->core_info; + + /* release the privdate data memory from resources */ + cam_ife_csid_ver1_free_res(csid_hw); + + cam_ife_csid_deinit_soc_resources(&csid_hw->hw_info->soc_info); + + return 0; +} +EXPORT_SYMBOL(cam_ife_csid_hw_ver1_deinit); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.h new file mode 100644 index 0000000000..1595c591a2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver1.h @@ -0,0 +1,458 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_HW_VER1_H_ +#define _CAM_IFE_CSID_HW_VER1_H_ + +#define IFE_CSID_VER1_RX_DL0_EOT_CAPTURED BIT(0) +#define IFE_CSID_VER1_RX_DL1_EOT_CAPTURED BIT(1) +#define IFE_CSID_VER1_RX_DL2_EOT_CAPTURED BIT(2) +#define IFE_CSID_VER1_RX_DL3_EOT_CAPTURED BIT(3) +#define IFE_CSID_VER1_RX_DL0_SOT_CAPTURED BIT(4) +#define IFE_CSID_VER1_RX_DL1_SOT_CAPTURED BIT(5) +#define IFE_CSID_VER1_RX_DL2_SOT_CAPTURED BIT(6) +#define IFE_CSID_VER1_RX_DL3_SOT_CAPTURED BIT(7) +#define IFE_CSID_VER1_RX_LONG_PKT_CAPTURED BIT(8) +#define IFE_CSID_VER1_RX_SHORT_PKT_CAPTURED BIT(9) +#define IFE_CSID_VER1_RX_CPHY_PKT_HDR_CAPTURED BIT(10) +#define IFE_CSID_VER1_RX_CPHY_EOT_RECEPTION BIT(11) +#define IFE_CSID_VER1_RX_CPHY_SOT_RECEPTION BIT(12) +#define IFE_CSID_VER1_RX_CPHY_PH_CRC BIT(13) +#define IFE_CSID_VER1_RX_WARNING_ECC BIT(14) +#define IFE_CSID_VER1_RX_LANE0_FIFO_OVERFLOW BIT(15) +#define IFE_CSID_VER1_RX_LANE1_FIFO_OVERFLOW BIT(16) +#define IFE_CSID_VER1_RX_LANE2_FIFO_OVERFLOW BIT(17) +#define IFE_CSID_VER1_RX_LANE3_FIFO_OVERFLOW BIT(18) +#define IFE_CSID_VER1_RX_ERROR_CRC BIT(19) +#define IFE_CSID_VER1_RX_ERROR_ECC BIT(20) +#define IFE_CSID_VER1_RX_MMAPPED_VC_DT BIT(21) +#define IFE_CSID_VER1_RX_UNMAPPED_VC_DT BIT(22) +#define IFE_CSID_VER1_RX_STREAM_UNDERFLOW BIT(23) +#define IFE_CSID_VER1_RX_UNBOUNDED_FRAME BIT(24) +#define IFE_CSID_VER1_RX_TG_DONE BIT(25) +#define IFE_CSID_VER1_RX_TG_FIFO_OVERFLOW BIT(26) +#define IFE_CSID_VER1_RX_RST_DONE BIT(27) + +#define IFE_CSID_VER1_TOP_IRQ_DONE BIT(0) +#define IFE_CSID_VER1_PATH_INFO_RST_DONE BIT(1) +#define IFE_CSID_VER1_PATH_ERROR_FIFO_OVERFLOW BIT(2) +#define IFE_CSID_VER1_PATH_INFO_SUBSAMPLED_EOF BIT(3) +#define IFE_CSID_VER1_PATH_INFO_SUBSAMPLED_SOF BIT(4) +#define IFE_CSID_VER1_PATH_INFO_FRAME_DROP_EOF BIT(5) +#define IFE_CSID_VER1_PATH_INFO_FRAME_DROP_EOL BIT(6) +#define IFE_CSID_VER1_PATH_INFO_FRAME_DROP_SOL BIT(7) +#define IFE_CSID_VER1_PATH_INFO_FRAME_DROP_SOF BIT(8) +#define IFE_CSID_VER1_PATH_INFO_INPUT_EOF BIT(9) +#define IFE_CSID_VER1_PATH_INFO_INPUT_EOL BIT(10) +#define IFE_CSID_VER1_PATH_INFO_INPUT_SOL BIT(11) +#define IFE_CSID_VER1_PATH_INFO_INPUT_SOF BIT(12) +#define IFE_CSID_VER1_PATH_ERROR_PIX_COUNT BIT(13) +#define IFE_CSID_VER1_PATH_ERROR_LINE_COUNT BIT(14) +#define IFE_CSID_VER1_PATH_ERROR_CCIF_VIOLATION BIT(15) +#define IFE_CSID_VER1_PATH_OVERFLOW_RECOVERY BIT(17) + +#define CAM_IFE_CSID_VER1_EVT_PAYLOAD_MAX 256 + +/* + * struct cam_ife_csid_ver1_common_reg_info: Structure to hold Common info + * holds register address offsets + * shift values + * masks + */ +struct cam_ife_csid_ver1_common_reg_info { + /* MIPI CSID registers */ + uint32_t hw_version_addr; + uint32_t cfg0_addr; + uint32_t ctrl_addr; + uint32_t reset_addr; + uint32_t rst_strobes_addr; + uint32_t test_bus_ctrl_addr; + uint32_t top_irq_status_addr; + uint32_t top_irq_mask_addr; + uint32_t top_irq_clear_addr; + uint32_t top_irq_set_addr; + uint32_t irq_cmd_addr; + + /*Shift Bit Configurations*/ + uint32_t rst_done_shift_val; + uint32_t timestamp_stb_sel_shift_val; + uint32_t decode_format_shift_val; + uint32_t path_en_shift_val; + uint32_t dt_id_shift_val; + uint32_t vc_shift_val; + uint32_t dt_shift_val; + uint32_t fmt_shift_val; + uint32_t num_bytes_out_shift_val; + uint32_t crop_shift_val; + uint32_t debug_frm_drop_rst_shift_val; + uint32_t debug_timestamp_rst_shift_val; + uint32_t debug_format_measure_rst_shift_val; + uint32_t debug_misr_rst_shift_val; + uint32_t num_padding_pixels_shift_val; + uint32_t num_padding_rows_shift_val; + uint32_t fmt_measure_num_lines_shift_val; + uint32_t num_vbi_lines_shift_val; + uint32_t num_hbi_cycles_shift_val; + uint32_t multi_vcdt_vc1_shift_val; + uint32_t multi_vcdt_dt1_shift_val; + uint32_t multi_vcdt_ts_combo_en_shift_val; + uint32_t multi_vcdt_en_shift_val; + + /* config Values */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t num_udis; + uint32_t num_rdis; + uint32_t num_pix; + uint32_t num_ppp; + uint32_t rst_sw_reg_stb; + uint32_t rst_hw_reg_stb; + uint32_t rst_sw_hw_reg_stb; + uint32_t path_rst_stb_all; + uint32_t drop_supported; + uint32_t multi_vcdt_supported; + uint32_t timestamp_strobe_val; + uint32_t early_eof_supported; + uint32_t global_reset; + uint32_t aup_rup_supported; + + /* Masks */ + uint32_t ipp_irq_mask_all; + uint32_t rdi_irq_mask_all; + uint32_t ppp_irq_mask_all; + uint32_t udi_irq_mask_all; + uint32_t measure_en_hbi_vbi_cnt_mask; + uint32_t measure_pixel_line_en_mask; + uint32_t fmt_measure_num_line_mask; + uint32_t fmt_measure_num_pxl_mask; + uint32_t hblank_max_mask; + uint32_t hblank_min_mask; + uint32_t crop_pix_start_mask; + uint32_t crop_pix_end_mask; + uint32_t crop_line_start_mask; + uint32_t crop_line_end_mask; + uint32_t format_measure_height_mask_val; + uint32_t format_measure_height_shift_val; + uint32_t format_measure_width_mask_val; + uint32_t format_measure_width_shift_val; +}; + +/* + * struct cam_ife_csid_ver1_path_reg_info: Structure to hold PXL reg info + * holds register address offsets + * shift values + * masks + */ +struct cam_ife_csid_ver1_path_reg_info { + /* Pxl path register offsets*/ + uint32_t irq_status_addr; + uint32_t irq_mask_addr; + uint32_t irq_clear_addr; + uint32_t irq_set_addr; + + uint32_t cfg0_addr; + uint32_t cfg1_addr; + uint32_t ctrl_addr; + uint32_t frm_drop_pattern_addr; + uint32_t frm_drop_period_addr; + uint32_t irq_subsample_pattern_addr; + uint32_t irq_subsample_period_addr; + uint32_t hcrop_addr; + uint32_t vcrop_addr; + uint32_t pix_drop_pattern_addr; + uint32_t pix_drop_period_addr; + uint32_t line_drop_pattern_addr; + uint32_t line_drop_period_addr; + uint32_t rst_strobes_addr; + uint32_t status_addr; + uint32_t misr_val_addr; + uint32_t misr_val0_addr; + uint32_t misr_val1_addr; + uint32_t misr_val2_addr; + uint32_t misr_val3_addr; + uint32_t format_measure_cfg0_addr; + uint32_t format_measure_cfg1_addr; + uint32_t format_measure0_addr; + uint32_t format_measure1_addr; + uint32_t format_measure2_addr; + uint32_t yuv_chroma_conversion_addr; + uint32_t timestamp_curr0_sof_addr; + uint32_t timestamp_curr1_sof_addr; + uint32_t timestamp_prev0_sof_addr; + uint32_t timestamp_prev1_sof_addr; + uint32_t timestamp_curr0_eof_addr; + uint32_t timestamp_curr1_eof_addr; + uint32_t timestamp_prev0_eof_addr; + uint32_t timestamp_prev1_eof_addr; + uint32_t err_recovery_cfg0_addr; + uint32_t err_recovery_cfg1_addr; + uint32_t err_recovery_cfg2_addr; + uint32_t multi_vcdt_cfg0_addr; + uint32_t byte_cntr_ping_addr; + uint32_t byte_cntr_pong_addr; + /* shift bit configuration */ + uint32_t timestamp_en_shift_val; + uint32_t crop_v_en_shift_val; + uint32_t crop_h_en_shift_val; + uint32_t drop_v_en_shift_val; + uint32_t drop_h_en_shift_val; + uint32_t bin_h_en_shift_val; + uint32_t bin_v_en_shift_val; + uint32_t bin_en_shift_val; + uint32_t bin_qcfa_en_shift_val; + uint32_t halt_mode_master; + uint32_t halt_mode_internal; + uint32_t halt_mode_global; + uint32_t halt_mode_slave; + uint32_t halt_mode_shift; + uint32_t halt_master_sel_master_val; + uint32_t halt_master_sel_shift; + uint32_t halt_frame_boundary; + uint32_t resume_frame_boundary; + uint32_t halt_immediate; + uint32_t halt_cmd_shift; + uint32_t mipi_pack_supported; + uint32_t packing_fmt_shift_val; + uint32_t format_measure_en_shift_val; + uint32_t plain_fmt_shift_val; + uint32_t packing_format; + uint32_t pix_store_en_shift_val; + uint32_t early_eof_en_shift_val; + /* config Values */ + uint32_t ccif_violation_en; + uint32_t binning_supported; + uint32_t overflow_ctrl_mode_val; + uint32_t overflow_ctrl_en; + uint32_t fatal_err_mask; + uint32_t non_fatal_err_mask; +}; + +/* + * struct struct cam_ife_csid_ver1_tpg_reg_info: Structure to hold TPG reg info + * holds register address offsets + * shift values + * masks + */ +struct cam_ife_csid_ver1_tpg_reg_info { + uint32_t ctrl_addr; + uint32_t vc_cfg0_addr; + uint32_t vc_cfg1_addr; + uint32_t lfsr_seed_addr; + uint32_t dt_n_cfg_0_addr; + uint32_t dt_n_cfg_1_addr; + uint32_t dt_n_cfg_2_addr; + uint32_t color_bars_cfg_addr; + uint32_t color_box_cfg_addr; + uint32_t common_gen_cfg_addr; + uint32_t cgen_n_cfg_addr; + uint32_t cgen_n_x0_addr; + uint32_t cgen_n_x1_addr; + uint32_t cgen_n_x2_addr; + uint32_t cgen_n_xy_addr; + uint32_t cgen_n_y1_addr; + uint32_t cgen_n_y2_addr; + + /*configurations */ + uint32_t dtn_cfg_offset; + uint32_t cgen_cfg_offset; + uint32_t cpas_ife_reg_offset; + uint32_t hbi; + uint32_t vbi; + uint32_t lfsr_seed; + uint32_t width_shift; + uint32_t height_shift; + uint32_t fmt_shift; + uint32_t color_bar; + uint32_t line_interleave_mode; + uint32_t payload_mode; + uint32_t num_frames; + uint32_t num_active_dt; + uint32_t ctrl_cfg; + uint32_t phy_sel; + uint32_t num_frame_shift; + uint32_t line_interleave_shift; + uint32_t num_active_dt_shift; + uint32_t vbi_shift; + uint32_t hbi_shift; + uint32_t color_bar_shift; + uint32_t num_active_lanes_mask; +}; + +/* + * struct cam_ife_csid_ver1_reg_info: Structure for Reg info + * + * @csi2_reg: csi2 reg info + * @ipp_reg: IPP reg + * @ppp_reg: PPP reg + * @rdi_reg: RDI reg + * @udi_reg: UDI reg + * @start_pixel: start pixel for horizontal crop + * @tpg_reg: TPG reg + * @cmn_reg: Common reg info + * @csid_cust_node_map: Customer node map + * @fused_max_width: Max width based on fuse registers + * @width_fuse_max_val: Max value of width fuse + * @used_hw_capabilities Hw capabilities + */ +struct cam_ife_csid_ver1_reg_info { + struct cam_ife_csid_csi2_rx_reg_info *csi2_reg; + struct cam_ife_csid_ver1_path_reg_info *ipp_reg; + struct cam_ife_csid_ver1_path_reg_info *ppp_reg; + struct cam_ife_csid_ver1_path_reg_info *rdi_reg + [CAM_IFE_CSID_RDI_MAX]; + struct cam_ife_csid_ver1_path_reg_info *udi_reg + [CAM_IFE_CSID_UDI_MAX]; + struct cam_ife_csid_ver1_tpg_reg_info *tpg_reg; + struct cam_ife_csid_ver1_common_reg_info *cmn_reg; + const uint32_t csid_cust_node_map[ + CAM_IFE_CSID_HW_NUM_MAX]; + const uint32_t fused_max_width[ + CAM_IFE_CSID_WIDTH_FUSE_VAL_MAX]; + const uint32_t width_fuse_max_val; +}; + +/* + * struct cam_ife_csid_ver1_path_cfg: place holder for path parameters + * + * @cid: cid value for path + * @in_format: input format + * @out_format: output format + * @start_pixel: start pixel for horizontal crop + * @end_pixel: end pixel for horizontal crop + * @start_line: start line for vertical crop + * @end_line: end line for vertical crop + * @width: width of incoming data + * @height: height of incoming data + * @master_idx: master idx + * @horizontal_bin: horizontal binning enable/disable on path + * @vertical_bin: vertical binning enable/disable on path + * @qcfa_bin : qcfa binning enable/disable on path + * @hor_ver_bin : horizontal vertical binning enable/disable on path + * @num_bytes_out: Number of bytes out + * @sync_mode : Sync mode--> master/slave/none + * @crop_enable: flag to indicate crop enable + * @drop_enable: flag to indicate drop enable + * + */ +struct cam_ife_csid_ver1_path_cfg { + uint32_t cid; + uint32_t in_format; + uint32_t out_format; + uint32_t start_pixel; + uint32_t end_pixel; + uint32_t width; + uint32_t start_line; + uint32_t end_line; + uint32_t height; + uint32_t master_idx; + uint32_t horizontal_bin; + uint32_t vertical_bin; + uint32_t qcfa_bin; + uint32_t hor_ver_bin; + uint32_t num_bytes_out; + enum cam_isp_hw_sync_mode sync_mode; + bool crop_enable; + bool drop_enable; +}; + +/** + * struct cam_csid_evt_payload- payload for csid hw event + * @list : list head + * @irq_status : IRQ Status register + * @priv : Private data of payload + * @evt_type : Event type from CSID + * @hw_idx : Hw index + */ +struct cam_ife_csid_ver1_evt_payload { + struct list_head list; + uint32_t irq_status[CAM_IFE_CSID_IRQ_REG_MAX]; + void *priv; + uint32_t evt_type; + uint32_t hw_idx; +}; + +/** + * struct cam_ife_csid_tpg_cfg- csid tpg configuration data + * @width: width + * @height: height + * @test_pattern : pattern + * @encode_format: decode format + * @usage_type: whether dual isp is required + * @vc: virtual channel + * @dt: data type + * + */ +struct cam_ife_csid_ver1_tpg_cfg { + uint32_t width; + uint32_t height; + uint32_t test_pattern; + uint32_t encode_format; + uint32_t usage_type; + uint32_t vc; + uint32_t dt; +}; + +/* + * struct cam_ife_csid_ver1_hw: place holder for csid hw + * + * @hw_intf: hw intf + * @hw_info: hw info + * @core_info: csid core info + * @tasklet: tasklet to handle csid errors + * @token: private data to be sent with callback + * @counters: counters used in csid hw + * @path_res: array of path resources + * @cid_data: cid data + * @log_buf: Log Buffer to dump info + * @rx_cfg: rx configuration + * @flags: flags + * @irq_complete: complete variable for reset irq + * @debug_info: Debug info to capture debug info + * @timestamp: Timestamp to maintain evt timestamps + * @free_payload_list: List of free payload list + * @evt_payload: Event payload + * @clk_rate: clk rate for csid hw + * @res_type: cur res type for active hw + * @lock_state : spin lock + * + */ +struct cam_ife_csid_ver1_hw { + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + struct cam_ife_csid_core_info *core_info; + void *tasklet; + void *token; + struct cam_ife_csid_hw_counters counters; + struct cam_ife_csid_ver1_tpg_cfg tpg_cfg; + struct cam_isp_resource_node path_res + [CAM_IFE_PIX_PATH_RES_MAX]; + struct list_head free_payload_list; + struct cam_ife_csid_ver1_evt_payload evt_payload + [CAM_IFE_CSID_VER1_EVT_PAYLOAD_MAX]; + struct completion irq_complete + [CAM_IFE_CSID_IRQ_REG_MAX]; + struct cam_ife_csid_cid_data cid_data + [CAM_IFE_CSID_CID_MAX]; + uint8_t log_buf + [CAM_IFE_CSID_LOG_BUF_LEN]; + struct cam_ife_csid_rx_cfg rx_cfg; + struct cam_ife_csid_hw_flags flags; + struct cam_ife_csid_debug_info debug_info; + struct cam_ife_csid_timestamp timestamp; + uint64_t clk_rate; + uint32_t res_type; + spinlock_t lock_state; + cam_hw_mgr_event_cb_func event_cb; +}; + +int cam_ife_csid_hw_ver1_init(struct cam_hw_intf *csid_hw_intf, + struct cam_ife_csid_core_info *csid_core_info, + bool is_custom); + +int cam_ife_csid_hw_ver1_deinit(struct cam_hw_info *hw_priv); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c new file mode 100644 index 0000000000..2da8e88e60 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c @@ -0,0 +1,9211 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include +#include +#include + +#include + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_isp_hw.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_cdm_util.h" +#include "cam_common_util.h" +#include "cam_subdev.h" +#include "cam_compat.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +/* CSIPHY TPG VC/DT values */ +#define CAM_IFE_CPHY_TPG_VC_VAL 0x0 +#define CAM_IFE_CPHY_TPG_DT_VAL 0x2B + +/* Timeout values in usec */ +#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000 +#define CAM_IFE_CSID_TIMEOUT_ALL_US 100000 + +#define CAM_IFE_CSID_RESET_TIMEOUT_MS 100 + +/* + * Constant Factors needed to change QTimer ticks to nanoseconds + * QTimer Freq = 19.2 MHz + * Time(us) = ticks/19.2 + * Time(ns) = ticks/19.2 * 1000 + */ +#define CAM_IFE_CSID_QTIMER_MUL_FACTOR 10000 +#define CAM_IFE_CSID_QTIMER_DIV_FACTOR 192 + +/* Max number of sof irq's triggered in case of SOF freeze */ +#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12 + +/* Max CSI Rx irq error count threshold value */ +#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT 100 + +/* Max sensor switch out of sync threshold */ +#define CAM_IFE_CSID_MAX_OUT_OF_SYNC_ERR_COUNT 6 + +#define CAM_CSID_IRQ_CTRL_NAME_LEN 20 + +/* Trustedvm domain id 0x08 for all CSID path */ +#define CAM_IFE_CSID_PATH_DOMAIN_ID 0x08080808 + +char *cam_ife_csid_ver2_top_reg_name[] = { + "top", + "top2" +}; + +char *cam_ife_csid_ver2_rx_reg_name[] = { + "rx", + "rx2" +}; + +static void cam_ife_csid_ver2_print_debug_reg_status( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res); + +static int cam_ife_csid_ver2_print_hbi_vbi( + struct cam_ife_csid_ver2_hw *csid_hw); + +static void cam_ife_csid_ver2_maskout_all_irqs( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_csid_hw_stop_args *csid_stop); + +static void cam_ife_csid_ver2_reset_csid_params(struct cam_ife_csid_ver2_hw *csid_hw) +{ + memset(&csid_hw->rx_cfg, 0, sizeof(struct cam_ife_csid_ver2_rx_cfg)); + memset(&csid_hw->top_cfg, 0, sizeof(struct cam_ife_csid_ver2_top_cfg)); + memset(&csid_hw->debug_info, 0, sizeof(struct cam_ife_csid_ver2_debug_info)); + memset(&csid_hw->counters, 0, sizeof(struct cam_ife_csid_hw_counters)); + csid_hw->flags.pf_err_detected = false; + csid_hw->standby_asserted = false; + csid_hw->token = NULL; +} + +static bool cam_ife_csid_ver2_cpas_cb( + uint32_t handle, void *user_data, struct cam_cpas_irq_data *irq_data) +{ + bool handled = false; + struct cam_ife_csid_ver2_hw *csid_hw = (struct cam_ife_csid_ver2_hw *)user_data; + + if (!csid_hw || !irq_data) + return false; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_SLAVE_ERROR: + if (irq_data->u.slave_err.errlog0_low.err_code == CAM_CAMNOC_ADDRESS_DECODE_ERROR) { + csid_hw->flags.pf_err_detected = true; + CAM_DBG(CAM_ISP, "CPAS address decode error rxved for CSID[%u]", + csid_hw->hw_intf->hw_idx); + } + handled = true; + break; + default: + break; + } + + return handled; +} + +static void cam_ife_csid_ver2_decode_format_validate_on_err( + struct cam_ife_csid_ver2_hw *csid_hw, uint32_t *vc, + uint32_t *dt_arr, uint32_t *decode_fmt_arr, int num_dt) +{ + int i, j; + bool payload_fmt = false, vc_match; + uint32_t decode_fmt; + const struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + + decode_fmt = csid_reg->cmn_reg->decode_format_payload_only; + + for (i = 0; i < num_dt; i++) + payload_fmt |= (decode_fmt == decode_fmt_arr[i]); + + for (i = 0; payload_fmt && (i < num_dt); i++) { + if (decode_fmt_arr[i] != decode_fmt) { + CAM_ERR(CAM_ISP, "CSID:%u decode_fmt_%d mismatch: 0x%x", + csid_hw->hw_intf->hw_idx, i, decode_fmt_arr[i]); + } + } + + vc_match = (vc[0] == vc[1]); + + for (i = 0; vc_match && (i < num_dt); i++) { + for (j = 0; j < i; j++) { + if ((dt_arr[i] == dt_arr[j]) && (decode_fmt_arr[i] != decode_fmt_arr[j])) { + CAM_ERR(CAM_ISP, "CSID:%u Wrong multi VC-DT configuration", + csid_hw->hw_intf->hw_idx); + + CAM_ERR(CAM_ISP, + "CSID:%u fmt %d fmt1 %d vc %d vc1 %d dt%d %d dt%d %d", + csid_hw->hw_intf->hw_idx, + decode_fmt_arr[i], decode_fmt_arr[j], + vc[0], vc[1], i, dt_arr[i], + j, dt_arr[j]); + } + } + } +} + +static int cam_ife_csid_ver2_decode_format_validate( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0, i, j; + const struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg = + (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[path_cfg->cid]; + bool payload_fmt = false; + + /* Validation is only required for multi vc dt use case */ + if (!cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) + return rc; + + /** + * If any of the decode formats are for payload, HW requires all other + * decode formats to be for payload as well, loop over twice to check + */ + for (i = 0; i < CAM_ISP_VC_DT_CFG; i++) { + if (!cid_data->vc_dt[i].valid) + break; + if (path_cfg->path_format[i].decode_fmt == + csid_reg->cmn_reg->decode_format_payload_only) + payload_fmt = true; + } + + for (i = 0; payload_fmt && (i < CAM_ISP_VC_DT_CFG); i++) { + if (path_cfg->path_format[i].decode_fmt != + csid_reg->cmn_reg->decode_format_payload_only) { + CAM_ERR(CAM_ISP, "CSID:%u payload decode_fmt_%d mismatch: 0x%x", + csid_hw->hw_intf->hw_idx, i, path_cfg->path_format[i].decode_fmt); + rc = -EINVAL; + goto err; + } + } + + for (i = 0; i < CAM_ISP_VC_DT_CFG; i++) { + if (!cid_data->vc_dt[i].valid) + break; + for (j = 0; j < i; j++) { + if ((cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc == + cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc) && + (cid_data->vc_dt[i].dt == cid_data->vc_dt[j].dt)) { + if (path_cfg->path_format[i].decode_fmt != + path_cfg->path_format[j].decode_fmt) { + CAM_ERR(CAM_ISP, + "CSID:%u Wrong multi VC-DT configuration", + csid_hw->hw_intf->hw_idx); + + CAM_ERR(CAM_ISP, + "CSID:%u fmt %d fmt1 %d vc %d vc1 %d dt%d %d dt%d %d", + csid_hw->hw_intf->hw_idx, + path_cfg->path_format[i].decode_fmt, + path_cfg->path_format[j].decode_fmt, + cid_data->vc_dt[0].vc, + cid_data->vc_dt[1].vc, + i, cid_data->vc_dt[i].dt, + j, cid_data->vc_dt[j].dt); + rc = -EINVAL; + goto err; + } + } + } + } + + return rc; +err: + CAM_ERR(CAM_ISP, "Invalid decode fmt cfg CSID[%u] res [id %d name %s] rc %d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, rc); + return rc; +} + +static bool cam_ife_csid_ver2_disable_sof_retime( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg = (struct cam_ife_csid_ver2_path_cfg *) + res->res_priv; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = csid_reg->path_reg[res->res_id]; + + if (!(path_reg->capabilities & CAM_IFE_CSID_CAP_SOF_RETIME_DIS)) + return false; + + if (path_reg->disable_sof_retime_default) + return true; + + if (path_cfg->sfe_shdr || path_cfg->lcr_en) + return true; + + if (csid_hw->flags.rdi_lcr_en && res->res_id == CAM_IFE_PIX_PATH_RES_PPP) + return true; + + return false; +} + +static uint64_t __cam_ife_csid_ver2_get_time_stamp(void __iomem *mem_base, + uint32_t timestamp0_addr, uint32_t timestamp1_addr, + bool ts_comb_vcdt_en, uint32_t ts_comb_vcdt_mask) +{ + uint64_t timestamp_val, time_hi, time_lo, mask; + + time_hi = cam_io_r_mb(mem_base + timestamp1_addr); + time_lo = cam_io_r_mb(mem_base + timestamp0_addr); + mask = (uint64_t)ts_comb_vcdt_mask; + if (ts_comb_vcdt_en) + time_lo &= ~mask; + + timestamp_val = (time_hi << 32) | time_lo; + + return mul_u64_u32_div(timestamp_val, + CAM_IFE_CSID_QTIMER_MUL_FACTOR, + CAM_IFE_CSID_QTIMER_DIV_FACTOR); +} + +static void cam_ife_csid_ver2_print_camif_timestamps( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res; + bool found = false; + int i; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + + for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) { + res = &csid_hw->path_res[i]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg || !path_cfg->irq_handle) + continue; + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + if (path_cfg->handle_camif_irq && + res->is_rdi_primary_res) + found = true; + break; + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + if (path_cfg->handle_camif_irq) + found = true; + break; + default: + break; + } + if (found) + break; + } + + + if (found && path_cfg) { + path_reg = csid_reg->path_reg[res->res_id]; + + CAM_INFO(CAM_ISP, + "CSID[%u] %s SOF[%lld:%lld] EPOCH[%lld:%lld] EOF[%lld:%lld]", + csid_hw->hw_intf->hw_idx, res->res_name, + path_cfg->sof_ts.tv_sec, path_cfg->sof_ts.tv_nsec, + path_cfg->epoch_ts.tv_sec, path_cfg->epoch_ts.tv_nsec, + path_cfg->eof_ts.tv_sec, path_cfg->eof_ts.tv_nsec); + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d no path registers for res :%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return; + } + + CAM_INFO(CAM_ISP, + "qtimer current [SOF: %lld EOF: %lld] prev [SOF: %lld EOF: %lld]", + __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_curr0_sof_addr, + path_reg->timestamp_curr1_sof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask), + __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_curr0_eof_addr, + path_reg->timestamp_curr1_eof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask), + __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_perv0_sof_addr, + path_reg->timestamp_perv1_sof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask), + __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_perv0_eof_addr, + path_reg->timestamp_perv1_eof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask)); + } +} + +static void cam_ife_csid_ver2_update_event_ts( + struct timespec64 *dst_ts, struct timespec64 *src_ts) +{ + dst_ts->tv_sec = src_ts->tv_sec; + dst_ts->tv_nsec = src_ts->tv_nsec; +} + +static void cam_ife_csid_ver2_send_cdr_sweep_csi2_rx_vals( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_reg_info *csid_reg, + uint32_t csi2_rx_val) +{ + struct cam_subdev_msg_cdr_sweep_info csid_cdr_sweep_info; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + + if (unlikely(!csid_hw || !csid_reg)) + return; + + if (!csid_hw->debug_info.cdr_sweep_debug_enabled) + return; + + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + csid_cdr_sweep_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + csid_cdr_sweep_info.csi2_rx_total_crc_err = cam_io_r_mb(base + + csi2_reg->total_crc_err_addr); + csid_cdr_sweep_info.csi2_rx_total_pkts_rcvd = cam_io_r_mb(base + + csi2_reg->total_pkts_rcvd_addr); + csid_cdr_sweep_info.csi2_rx_status = csi2_rx_val ? csi2_rx_val : + cam_io_r_mb(base + csi2_reg->irq_status_addr[ + CAM_IFE_CSID_RX_IRQ_STATUS_REG0]); + csid_cdr_sweep_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + csid_cdr_sweep_info.epd_enabled = csid_hw->rx_cfg.epd_supported; + csid_cdr_sweep_info.csi2_err_seen = (csi2_rx_val != 0); + + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, CAM_SUBDEV_MESSAGE_CDR_SWEEP, + (void *)&csid_cdr_sweep_info); +} + +static int cam_ife_csid_ver2_set_debug( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_debug_cfg_args *debug_args) +{ + int bit_pos = 0; + uint32_t val, debug_val; + int i = 0; + const uint8_t *dbg_bit_pos = NULL; + const uint64_t *evt_bitmap = NULL; + + struct cam_ife_csid_ver2_reg_info *csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + memset(&csid_hw->debug_info, 0, + sizeof(struct cam_ife_csid_ver2_debug_info)); + csid_hw->debug_info.debug_val = debug_args->csid_debug; + csid_hw->debug_info.test_bus_val = debug_args->csid_testbus_debug; + csid_hw->debug_info.set_domain_id_enabled = debug_args->set_domain_id_enabled; + csid_hw->debug_info.domain_id_value = debug_args->domain_id_value; + csid_hw->debug_info.cdr_sweep_debug_enabled = debug_args->enable_cdr_sweep_debug; + + evt_bitmap = csid_reg->rx_debug_mask->evt_bitmap; + dbg_bit_pos = csid_reg->rx_debug_mask->bit_pos; + + /* + * RX capture debug + * [0:3] = rst strobes + * [4:11] = vc for capture + * [12:19] = dt for capture + */ + csid_hw->debug_info.rst_capture_strobes = (debug_args->csid_rx_capture_debug & + CAM_IFE_CSID_DEBUGFS_RST_STROBE_MASK); + csid_hw->debug_info.rx_capture_vc = ((debug_args->csid_rx_capture_debug >> + CAM_IFE_CSID_DEBUGFS_VC_SHIFT_MASK) & CAM_IFE_CSID_DEBUGFS_VC_DT_MASK); + csid_hw->debug_info.rx_capture_dt = ((debug_args->csid_rx_capture_debug >> + CAM_IFE_CSID_DEBUGFS_DT_SHIFT_MASK) & CAM_IFE_CSID_DEBUGFS_VC_DT_MASK); + csid_hw->debug_info.rx_capture_debug_set = debug_args->rx_capture_debug_set; + + debug_val = csid_hw->debug_info.debug_val; + while (debug_val) { + + if (!(debug_val & 0x1)) { + debug_val >>= 1; + bit_pos++; + continue; + } + + val = BIT(bit_pos); + + switch (val) { + case CAM_IFE_CSID_DEBUG_ENABLE_SOF_IRQ: + csid_hw->debug_info.path_mask |= + IFE_CSID_VER2_PATH_INFO_INPUT_SOF; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_EOF_IRQ: + csid_hw->debug_info.path_mask |= + IFE_CSID_VER2_PATH_INFO_INPUT_EOF; + break; + case CAM_IFE_CSID_DEBUG_ENABLE_SOT_IRQ: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_EOT_IRQ: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED]) | + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos + [CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos + [CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED]); + } + } + break; + case CAM_IFE_DEBUG_ENABLE_UNMAPPED_VC_DT_IRQ: + for (i = 0; i < csid_reg->num_rx_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT)) { + csid_hw->debug_info.rx_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_VOTE_UP_IRQ: + evt_bitmap = csid_reg->top_debug_mask->evt_bitmap; + dbg_bit_pos = csid_reg->top_debug_mask->bit_pos; + + for (i = 0; i < csid_reg->num_top_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP)) { + csid_hw->debug_info.top_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_VOTE_DN_IRQ: + evt_bitmap = csid_reg->top_debug_mask->evt_bitmap; + dbg_bit_pos = csid_reg->top_debug_mask->bit_pos; + + for (i = 0; i < csid_reg->num_top_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN)) { + csid_hw->debug_info.top_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN]); + } + } + break; + case CAM_IFE_CSID_DEBUG_ENABLE_ERR_NO_VOTE_DN_IRQ: + evt_bitmap = csid_reg->top_debug_mask->evt_bitmap; + dbg_bit_pos = csid_reg->top_debug_mask->bit_pos; + + for (i = 0; i < csid_reg->num_top_regs; i++) { + if (evt_bitmap[i] & BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN)) { + csid_hw->debug_info.top_mask[i] |= + BIT(dbg_bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN]); + } + } + break; + default: + break; + } + + debug_val >>= 1; + bit_pos++; + } + + return 0; +} + +static int cam_ife_csid_ver2_sof_irq_debug( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + int i = 0; + int data_idx = 0; + bool sof_irq_enable = false; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res; + uint32_t irq_mask = 0; + + if (*((uint32_t *)cmd_args) == 1) + sof_irq_enable = true; + + if (csid_hw->hw_info->hw_state == + CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "CSID:%u powered down unable to %s sof irq", + csid_hw->hw_intf->hw_idx, + (sof_irq_enable) ? "enable" : "disable"); + return 0; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i < CAM_IFE_PIX_PATH_RES_MAX; + i++) { + + res = &csid_hw->path_res[i]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (!path_cfg || !path_cfg->irq_handle) + continue; + + /* Assuming controller has only 1 register set */ + irq_mask = IFE_CSID_VER2_PATH_INFO_INPUT_SOF; + cam_irq_controller_update_irq( + csid_hw->path_irq_controller[res->res_id], + path_cfg->irq_handle, + sof_irq_enable, &irq_mask); + } + + if (sof_irq_enable) { + csid_hw->debug_info.path_mask |= + IFE_CSID_VER2_PATH_INFO_INPUT_SOF; + csid_hw->flags.sof_irq_triggered = true; + } else { + csid_hw->debug_info.path_mask &= + ~IFE_CSID_VER2_PATH_INFO_INPUT_SOF; + csid_hw->flags.sof_irq_triggered = false; + } + + CAM_INFO(CAM_ISP, "SOF freeze: CSID:%u SOF irq %s, Notify CSIPHY: %d", + csid_hw->hw_intf->hw_idx, + (sof_irq_enable) ? "enabled" : "disabled", csid_hw->rx_cfg.phy_sel - 1); + + data_idx = (int)(csid_hw->rx_cfg.phy_sel - csid_reg->cmn_reg->phy_sel_base_idx); + if (sof_irq_enable) { + if (data_idx < 0) + CAM_WARN(CAM_ISP, "CSID:%u Can't notify csiphy, incorrect phy selected=%d", + csid_hw->hw_intf->hw_idx, data_idx); + else { + CAM_INFO(CAM_ISP, "CSID:%u Notify CSIPHY: %d", csid_hw->hw_intf->hw_idx, + data_idx); + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_REG_DUMP, (void *)&data_idx); + } + } + + return 0; +} + +static int cam_ife_csid_ver2_get_evt_payload( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_evt_payload **evt_payload, + struct list_head *payload_list, + spinlock_t *lock) +{ + + spin_lock(lock); + + if (list_empty(payload_list)) { + *evt_payload = NULL; + spin_unlock(lock); + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload core %d", + csid_hw->hw_intf->hw_idx); + return -ENOMEM; + } + + *evt_payload = list_first_entry(payload_list, + struct cam_ife_csid_ver2_evt_payload, list); + list_del_init(&(*evt_payload)->list); + spin_unlock(lock); + + return 0; +} + +static int cam_ife_csid_ver2_put_evt_payload( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_evt_payload **evt_payload, + struct list_head *payload_list, + spinlock_t *lock) +{ + unsigned long flags; + + if (*evt_payload == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid payload core %d", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_ife_csid_ver2_evt_payload); + spin_lock_irqsave(lock, flags); + list_add_tail(&(*evt_payload)->list, payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(lock, flags); + + return 0; +} + +static int cam_ife_csid_ver2_top_info_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int rc = 0; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + + csid_hw = th_payload->handler_priv; + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + if (rc) { + CAM_WARN_RATE_LIMIT(CAM_ISP, "CSID:%u get payload failed, TOP info status: 0x%x", + csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + return rc; + } + + CAM_DBG(CAM_ISP, "CSID:%u TOP info status: 0x%x", csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + + evt_payload->irq_reg_val = th_payload->evt_status_arr[0]; + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + + return 0; +} + +static int cam_ife_csid_ver2_top2_info_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return cam_ife_csid_ver2_top_info_irq_top_half(evt_id, th_payload); +} + + +static int cam_ife_csid_ver2_top_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int rc = 0; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + + csid_hw = th_payload->handler_priv; + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + CAM_DBG(CAM_ISP, "CSID:%u TOP status: 0x%x", csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%u TOP status: 0x%x", + csid_hw->hw_intf->hw_idx, th_payload->evt_status_arr[0]); + return rc; + } + + evt_payload->irq_reg_val = th_payload->evt_status_arr[0]; + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + + return 0; +} + +static int cam_ife_csid_ver2_handle_buf_done_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + int rc = 0; + + csid_hw = th_payload->handler_priv; + + CAM_DBG(CAM_ISP, "CSID:%u Enter", csid_hw->hw_intf->hw_idx); + rc = cam_irq_controller_handle_irq(evt_id, + csid_hw->buf_done_irq_controller, CAM_IRQ_EVT_GROUP_0); + CAM_DBG(CAM_ISP, "CSID:%u Exit (rc=%d)", csid_hw->hw_intf->hw_idx, rc); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_ife_csid_ver2_handle_top2_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = th_payload->handler_priv; + int rc = 0; + + rc = cam_irq_controller_handle_irq(evt_id, + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1], + CAM_IRQ_EVT_GROUP_0); + CAM_DBG(CAM_ISP, "CSID:%d Exit (rc=%d)", csid_hw->hw_intf->hw_idx, rc); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_ife_csid_ver2_handle_rx_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + int rc = 0; + + csid_hw = th_payload->handler_priv; + + rc = cam_irq_controller_handle_irq(evt_id, + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + CAM_IRQ_EVT_GROUP_0); + CAM_DBG(CAM_ISP, "CSID:%d Exit (rc=%d)", csid_hw->hw_intf->hw_idx, rc); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_ife_csid_ver2_handle_rx2_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + int rc = 0; + + csid_hw = th_payload->handler_priv; + + rc = cam_irq_controller_handle_irq(evt_id, + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1], + CAM_IRQ_EVT_GROUP_0); + CAM_DBG(CAM_ISP, "CSID:%d Exit (rc=%d)", csid_hw->hw_intf->hw_idx, rc); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_ife_csid_ver2_handle_path_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_isp_resource_node *res = th_payload->handler_priv; + struct cam_ife_csid_ver2_hw *csid_hw = cam_isp_res_core_info(res); + int rc = 0; + + CAM_DBG(CAM_ISP, "CSID:%u %s Enter", csid_hw->hw_intf->hw_idx, res->res_name); + rc = cam_irq_controller_handle_irq(evt_id, + csid_hw->path_irq_controller[res->res_id], CAM_IRQ_EVT_GROUP_0); + CAM_DBG(CAM_ISP, "CSID:%u %s Exit (rc=%d)", csid_hw->hw_intf->hw_idx, res->res_name, rc); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_ife_csid_ver2_path_err_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + struct cam_isp_resource_node *res; + const uint8_t **irq_reg_tag; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + + res = th_payload->handler_priv; + + if (!res) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + CAM_DBG(CAM_ISP, "CSID:%u %s status: 0x%x", + csid_hw->hw_intf->hw_idx, + irq_reg_tag[path_cfg->irq_reg_idx], + th_payload->evt_status_arr[0]); + + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%u %s status: 0x%x", + csid_hw->hw_intf->hw_idx, + irq_reg_tag[path_cfg->irq_reg_idx], + th_payload->evt_status_arr[0]); + return rc; + } + + evt_payload->irq_reg_val = th_payload->evt_status_arr[0]; + + ktime_get_boottime_ts64(&path_cfg->error_ts); + th_payload->evt_payload_priv = evt_payload; + + return 0; +} + +static int cam_ife_csid_ver2_path_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + struct cam_isp_resource_node *res; + const uint8_t **irq_reg_tag; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + + res = th_payload->handler_priv; + + if (!res) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + soc_info = &csid_hw->hw_info->soc_info; + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + CAM_DBG(CAM_ISP, "CSID:%u %s status: 0x%x", + csid_hw->hw_intf->hw_idx, + (th_payload->is_comp_irq ? "COMP_IRQ" : + ((char *) irq_reg_tag[path_cfg->irq_reg_idx])), + th_payload->evt_status_arr[0]); + + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%u %s status: 0x%x", + csid_hw->hw_intf->hw_idx, + irq_reg_tag[path_cfg->irq_reg_idx], + th_payload->evt_status_arr[0]); + return rc; + } + + evt_payload->irq_reg_val = th_payload->evt_status_arr[0]; + + if (((evt_payload->irq_reg_val & path_reg->sof_irq_mask) || + ((path_reg->capabilities & CAM_IFE_CSID_CAP_MULTI_CTXT) && + (evt_payload->irq_reg_val & csid_reg->ipp_mc_reg->comp_sof_mask))) + && path_cfg->handle_camif_irq) { + evt_payload->sof_ts_reg_val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->timestamp_curr1_sof_addr); + evt_payload->sof_ts_reg_val = (evt_payload->sof_ts_reg_val << 32) | + cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->timestamp_curr0_sof_addr); + } + + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + + return 0; +} + +static int cam_ife_csid_ver2_mc_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + struct cam_isp_resource_node *res; + + res = th_payload->handler_priv; + + if (!res) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + th_payload->is_comp_irq = true; + rc = cam_ife_csid_ver2_path_top_half(evt_id, th_payload); + + if (rc) { + CAM_ERR(CAM_ISP, "Multi context top half fail"); + return rc; + } + + evt_payload = (struct cam_ife_csid_ver2_evt_payload *)th_payload->evt_payload_priv; + evt_payload->is_mc = true; + + return 0; +} + +static inline void cam_ife_csid_ver2_reset_discard_frame_cfg( + struct cam_isp_resource_node *res, + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_path_cfg *path_cfg) +{ + int rc; + + /* Reset discard config params */ + path_cfg->discard_init_frames = false; + path_cfg->skip_discard_frame_cfg = false; + path_cfg->num_frames_discard = 0; + path_cfg->sof_cnt = 0; + + /* Decrement discard frame ref cnt for this path */ + atomic_dec(&csid_hw->discard_frame_per_path); + + /* If input SOF irq is enabled explicitly - unsubscribe in th*/ + if (path_cfg->discard_irq_handle > 0) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->path_irq_controller[res->res_id], + path_cfg->discard_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "CSID[%u] Failed to unsubscribe input SOF for res: %s", + csid_hw->hw_intf->hw_idx, res->res_name); + + path_cfg->discard_irq_handle = 0; + } + + CAM_DBG(CAM_ISP, "CSID[%u] Reset discard frame config for res: %s discard_ref_cnt: %u", + csid_hw->hw_intf->hw_idx, res->res_name, + atomic_read(&csid_hw->discard_frame_per_path)); +} + +static int cam_ife_csid_ver2_discard_sof_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + + res = th_payload->handler_priv; + + if (!res) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + CAM_DBG(CAM_ISP, "CSID: %u status: 0x%x for res: %s", + csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[path_cfg->irq_reg_idx], + res->res_name); + + /* No need of payload since it's an exclusive th & bh */ + th_payload->evt_payload_priv = NULL; + + return 0; +} + +static int cam_ife_csid_ver2_discard_sof_pix_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + uint32_t val; + + if (!handler_priv) { + CAM_ERR(CAM_ISP, "Invalid handler_priv"); + return -EINVAL; + } + + res = handler_priv; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + soc_info = &csid_hw->hw_info->soc_info; + base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + path_reg = csid_reg->path_reg[res->res_id]; + + /* Count SOFs */ + path_cfg->sof_cnt++; + + CAM_DBG(CAM_ISP, "CSID[%u] Discard frame on %s path, num SOFs: %u", + csid_hw->hw_intf->hw_idx, res->res_name, path_cfg->sof_cnt); + + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + goto end; + } + + /* Check with requested number of frames to be dropped */ + if (path_cfg->sof_cnt == path_cfg->num_frames_discard) { + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) { + val = cam_io_r_mb(base + path_reg->ctrl_addr); + val |= path_reg->resume_frame_boundary; + cam_io_w_mb(val, base + path_reg->ctrl_addr); + CAM_DBG(CAM_ISP, + "CSID[%u] start cmd programmed for %s sof_cnt %u", + csid_hw->hw_intf->hw_idx, + res->res_name, + path_cfg->sof_cnt); + } + cam_ife_csid_ver2_reset_discard_frame_cfg(res, csid_hw, path_cfg); + } +end: + spin_unlock(&csid_hw->lock_state); + return 0; +} + +static int cam_ife_csid_ver2_discard_sof_rdi_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + uint32_t val; + + if (!handler_priv) { + CAM_ERR(CAM_ISP, "Invalid handler_priv"); + return -EINVAL; + } + + res = handler_priv; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + soc_info = &csid_hw->hw_info->soc_info; + base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + path_reg = csid_reg->path_reg[res->res_id]; + + /* Count SOFs */ + path_cfg->sof_cnt++; + CAM_DBG(CAM_ISP, "CSID[%u] Discard frame on %s path, num SOFs: %u", + csid_hw->hw_intf->hw_idx, res->res_name, path_cfg->sof_cnt); + + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + goto end; + } + + /* Check with requested number of frames to be dropped */ + if (path_cfg->sof_cnt == path_cfg->num_frames_discard) { + val = cam_io_r_mb(base + path_reg->ctrl_addr); + val |= path_reg->resume_frame_boundary; + cam_io_w_mb(val, base + path_reg->ctrl_addr); + CAM_DBG(CAM_ISP, + "CSID[%u] start cmd programmed for %s sof_cnt %u", + csid_hw->hw_intf->hw_idx, + res->res_name, path_cfg->sof_cnt); + + cam_ife_csid_ver2_reset_discard_frame_cfg(res, csid_hw, path_cfg); + } +end: + spin_unlock(&csid_hw->lock_state); + return 0; +} + +static int cam_ife_csid_ver2_stop_csi2_in_err( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + int rx_idx; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + CAM_DBG(CAM_ISP, "CSID:%u Stop csi2 rx", + csid_hw->hw_intf->hw_idx); + + if (csid_hw->rx_cfg.top_irq_handle) + cam_irq_controller_disable_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->rx_cfg.top_irq_handle); + + if (csid_reg->num_rx_regs > 1 && csid_hw->rx_cfg.rx2_irq_handle) + cam_irq_controller_disable_irq( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_cfg.rx2_irq_handle); + + for (rx_idx = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!csid_hw->rx_cfg.err_irq_handle[rx_idx]) + continue; + + cam_irq_controller_disable_irq( + csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.err_irq_handle[rx_idx]); + } + + for (rx_idx = 0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!csid_hw->rx_cfg.irq_handle[rx_idx]) + continue; + + cam_irq_controller_disable_irq( + csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.irq_handle[rx_idx]); + + } + + return 0; +} + +static inline void cam_ife_csid_ver2_maskout_rx_irqs( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc, rx_idx; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (rx_idx = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!csid_hw->rx_cfg.err_irq_handle[rx_idx]) + continue; + + rc = cam_irq_controller_unsubscribe_irq(csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.err_irq_handle[rx_idx]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx err[%d] irq for CSID:%u rc:%d", + rx_idx, csid_hw->hw_intf->hw_idx, rc); + csid_hw->rx_cfg.err_irq_handle[rx_idx] = 0; + } + + for (rx_idx = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!(csid_hw->rx_cfg.irq_handle[rx_idx])) + continue; + + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.irq_handle[rx_idx]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx info[%d] irq for CSID:%u rc:%d", + rx_idx, csid_hw->hw_intf->hw_idx, rc); + csid_hw->rx_cfg.irq_handle[rx_idx] = 0; + } + + if (csid_reg->num_rx_regs > 1 && + csid_hw->rx_cfg.rx2_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_cfg.rx2_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx2 irq evt in rx for CSID:%u rc:%d", + csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.rx2_irq_handle = 0; + rc = cam_irq_controller_unregister_dependent( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unregister rx2 dependent with rx CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + } + + if (csid_hw->rx_cfg.top_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->rx_cfg.top_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx irq in top for CSID:%u rc:%d", + csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.top_irq_handle = 0; + rc = cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unregister rx dependent with top CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + } +} + +static inline void cam_ife_csid_ver2_disable_rx_evts( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc, rx_idx; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (rx_idx = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!csid_hw->rx_cfg.err_irq_handle[rx_idx]) + continue; + + rc = cam_irq_controller_unsubscribe_irq_evt(csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.err_irq_handle[rx_idx]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx err[%d] irq evt for CSID:%u rc:%d", + rx_idx, csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.err_irq_handle[rx_idx] = 0; + + } + + for (rx_idx = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; rx_idx < csid_reg->num_rx_regs; rx_idx++) { + if (!csid_hw->rx_cfg.irq_handle[rx_idx]) + continue; + + rc = cam_irq_controller_unsubscribe_irq_evt(csid_hw->rx_irq_controller[rx_idx], + csid_hw->rx_cfg.irq_handle[rx_idx]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx err[%d] irq evt for CSID:%u rc:%d", + rx_idx, csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.irq_handle[rx_idx] = 0; + } + + if (csid_reg->num_rx_regs > 1 && csid_hw->rx_cfg.rx2_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_cfg.rx2_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx2 irq evt in rx for CSID:%u rc:%d", + csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.rx2_irq_handle = 0; + cam_irq_controller_unregister_dependent( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]); + } + + if (csid_hw->rx_cfg.top_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->rx_cfg.top_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe rx irq evt in top for CSID:%u rc:%d", + csid_hw->hw_intf->hw_idx, rc); + + csid_hw->rx_cfg.top_irq_handle = 0; + cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]); + } +} + + +static int cam_ife_csid_ver2_disable_csi2( + bool maskout_irqs, + struct cam_ife_csid_ver2_hw *csid_hw) +{ + CAM_DBG(CAM_ISP, "CSID:%u Disable csi2 rx", + csid_hw->hw_intf->hw_idx); + + if (!csid_hw->flags.rx_enabled) { + CAM_DBG(CAM_ISP, "CSID:%u Rx already disabled", + csid_hw->hw_intf->hw_idx); + return 0; + } + + if (maskout_irqs) + cam_ife_csid_ver2_maskout_rx_irqs(csid_hw); + else + cam_ife_csid_ver2_disable_rx_evts(csid_hw); + + csid_hw->flags.rx_enabled = false; + + return 0; +} + +static int cam_ife_csid_ver2_top2_err_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = 0; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + + csid_hw = th_payload->handler_priv; + if (!csid_hw) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->path_free_payload_list, &csid_hw->path_payload_lock); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d TOP2 status: 0x%X", + csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + } else { + evt_payload->irq_reg_val = th_payload->evt_status_arr[0]; + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + } + return rc; +} + +static inline int cam_ife_csid_ver2_rx_err_process_top_half( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t *evt_status_arr, + enum cam_ife_csid_rx_irq_regs rx_idx, + uint32_t *status) +{ + int32_t rc = 0; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + int discard_frame_count; + const uint64_t *evt_bitmap = NULL; + const uint8_t *bit_pos = NULL; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + + evt_bitmap = csid_reg->rx_debug_mask->evt_bitmap; + bit_pos = csid_reg->rx_debug_mask->bit_pos; + + discard_frame_count = atomic_read(&csid_hw->discard_frame_per_path); + if (discard_frame_count) { + CAM_DBG(CAM_ISP, "ignoring rx error (error:0x%x, remaining frames:%d)", + evt_status_arr[0], discard_frame_count); + return -ENODEV; + } + + if (csid_hw->flags.fatal_err_detected) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID[%u] already handling fatal error", + csid_hw->hw_intf->hw_idx); + return -ENODEV; + } + + *status = evt_status_arr[0]; + + if (csid_hw->rx_cfg.epd_supported) { + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION)) && + (*status & BIT(bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION]))) { + CAM_DBG(CAM_ISP, "CSID[%u] Rcvd Only ERROR_EOT for EPD sensor", + csid_hw->hw_intf->hw_idx); + return -ENODEV; + } + + *status &= (~BIT(bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION])); + } + + if (*status & csi2_reg->fatal_err_mask[rx_idx]) { + csid_hw->flags.fatal_err_detected = true; + cam_ife_csid_ver2_stop_csi2_in_err(csid_hw); + return rc; + } + + if (*status & csi2_reg->part_fatal_err_mask[rx_idx]) { + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION)) && + (*status & BIT(bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION]))) { + csid_hw->counters.error_irq_count++; + CAM_DBG(CAM_ISP, "CSID[%u] Recoverable Error Count:%u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.error_irq_count); + } + + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC)) && + (*status & BIT(bit_pos[CAM_IFE_CSID_RX_ERROR_CRC]))) { + csid_hw->counters.crc_error_irq_count++; + if (csid_hw->counters.crc_error_irq_count > csid_hw->crc_error_threshold) + CAM_DBG(CAM_ISP, + "CSID[%u] Recoverable CRC Error Count: %u, CRC Error threshold: %u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.crc_error_irq_count, + csid_hw->crc_error_threshold); + } + + if ((csid_hw->counters.error_irq_count > CAM_IFE_CSID_MAX_ERR_COUNT) || + (csid_hw->counters.crc_error_irq_count > csid_hw->crc_error_threshold)) { + csid_hw->flags.fatal_err_detected = true; + cam_ife_csid_ver2_stop_csi2_in_err(csid_hw); + } + } + + return rc; +} + +static int cam_ife_csid_ver2_rx_err_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = 0; + uint32_t status = 0; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + + csid_hw = th_payload->handler_priv; + if (!csid_hw) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + rc = cam_ife_csid_ver2_rx_err_process_top_half(csid_hw, th_payload->evt_status_arr, + CAM_IFE_CSID_RX_IRQ_STATUS_REG0, &status); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d rx_idx:%d", csid_hw->hw_intf->hw_idx, + CAM_IFE_CSID_RX_IRQ_STATUS_REG0); + return rc; + } + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->rx_free_payload_list, + &csid_hw->rx_payload_lock); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%u status: 0x%X", + csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + } else { + evt_payload->irq_reg_val = status; + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + } + return rc; +} + +static int cam_ife_csid_ver2_rx2_err_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = 0; + uint32_t status = 0; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_evt_payload *evt_payload; + + csid_hw = th_payload->handler_priv; + if (!csid_hw) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + rc = cam_ife_csid_ver2_rx_err_process_top_half(csid_hw, th_payload->evt_status_arr, + CAM_IFE_CSID_RX2_IRQ_STATUS_REG1, &status); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d rx_idx:%d", csid_hw->hw_intf->hw_idx, + CAM_IFE_CSID_RX2_IRQ_STATUS_REG1); + return rc; + } + + rc = cam_ife_csid_ver2_get_evt_payload(csid_hw, &evt_payload, + &csid_hw->rx_free_payload_list, + &csid_hw->rx_payload_lock); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d status: 0x%X", + csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + } else { + evt_payload->irq_reg_val = status; + ktime_get_boottime_ts64(&evt_payload->timestamp); + th_payload->evt_payload_priv = evt_payload; + } + return rc; +} + +static const char *__cam_ife_csid_ver2_parse_short_pkt_type(uint32_t data_type) +{ + switch (data_type) { + case 0: + return "Frame Start Code"; + case 1: + return "Frame End Code"; + case 2: + return "Line Start Code"; + case 3: + return "Line End Code"; + case 4: + case 5: + case 6: + case 7: + return "Reserved"; + default: + return "Invalid"; + } +} + +static int cam_ife_csid_ver2_handle_rx_debug_event( + struct cam_ife_csid_ver2_hw *csid_hw, + enum cam_ife_csid_rx_irq_regs rx_idx, + uint32_t bit_pos, + uint32_t *rst_strobe_val) +{ + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + uint32_t mask, val, val2; + const uint8_t *dbg_bit_pos = NULL; + const uint64_t *evt_bitmap = NULL; + struct timespec64 ts; + + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + mask = BIT(bit_pos); + dbg_bit_pos = csid_reg->rx_debug_mask->bit_pos; + evt_bitmap = csid_reg->rx_debug_mask->evt_bitmap; + + ktime_get_boottime_ts64(&ts); + + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED)) + && (mask == BIT(dbg_bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED]))) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + val2 = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_1_addr); + CAM_INFO(CAM_ISP, "CSID[%u] LONG_PKT_CAPTURED occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, ts.tv_sec, ts.tv_nsec); + CAM_INFO(CAM_ISP, + "The header of the first long pkt matching the configured vc-dt has been captured"); + CAM_INFO(CAM_ISP, + "Virtual Channel: %u, Data Type: %u, Word Count: %u, Error Correction Code: %u", + ((val & csi2_reg->vc_mask) >> csi2_reg->vc_shift), + ((val & csi2_reg->dt_mask) >> csi2_reg->dt_shift), + ((val & csi2_reg->wc_mask) >> csi2_reg->wc_shift), val2); + + /* Update reset long pkt strobe */ + *rst_strobe_val |= (1 << csi2_reg->long_pkt_strobe_rst_shift); + } else if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED)) && + (mask == BIT(dbg_bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED]))) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_short_pkt_0_addr); + val2 = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_short_pkt_1_addr); + CAM_INFO(CAM_ISP, "CSID[%u] SHORT_PKT_CAPTURED occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, ts.tv_sec, ts.tv_nsec); + CAM_INFO(CAM_ISP, + "The header of the first short pkt matching the configured vc-dt has been captured"); + CAM_INFO(CAM_ISP, + "Virtual Channel: %u, Data Type: %s, Frame Line Count: %u, Error Correction Code: %u", + ((val & csi2_reg->vc_mask) >> csi2_reg->vc_shift), + __cam_ife_csid_ver2_parse_short_pkt_type( + (val & csi2_reg->dt_mask) >> csi2_reg->dt_shift), + ((val & csi2_reg->wc_mask) >> csi2_reg->wc_shift), val2); + + /* Update reset short pkt strobe */ + *rst_strobe_val |= (1 << csi2_reg->short_pkt_strobe_rst_shift); + } else if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED)) && + (mask == BIT(dbg_bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED]))) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_cphy_pkt_hdr_addr); + CAM_INFO(CAM_ISP, "CSID[%u] CPHY_PKT_HDR_CAPTURED occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, ts.tv_sec, ts.tv_nsec); + CAM_INFO(CAM_ISP, + "The header of the first cphy pkt matching the configured vc-dt has been captured"); + CAM_INFO(CAM_ISP, + "Virtual Channel: %u, Data Type: %u, Word Count: %u", + ((val & csi2_reg->vc_mask) >> csi2_reg->vc_shift), + ((val & csi2_reg->dt_mask) >> csi2_reg->dt_shift), + ((val & csi2_reg->wc_mask) >> csi2_reg->wc_shift)); + + /* Update reset phy pkt strobe */ + *rst_strobe_val |= (1 << csi2_reg->cphy_pkt_strobe_rst_shift); + } else if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT)) && + (mask == BIT(dbg_bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT]))) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->cap_unmap_long_pkt_hdr_0_addr); + + CAM_ERR(CAM_ISP, + "CSID[%u] UNMAPPED_VC_DT occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, ts.tv_sec, ts.tv_nsec); + CAM_ERR(CAM_ISP, + "Sensor: A long packet has a VC_DT combination that is not configured for IPP or RDIs"); + CAM_ERR(CAM_ISP, + "Virtual Channel: %u, Data Type: %u, Word Count: %u", + ((val & csi2_reg->vc_mask) >> csi2_reg->vc_shift), + ((val & csi2_reg->dt_mask) >> csi2_reg->dt_shift), + ((val & csi2_reg->wc_mask) >> csi2_reg->wc_shift)); + CAM_ERR(CAM_ISP, + "SW Programming: Populate or Disable the VC DT in sensor driver XML"); + + csid_hw->counters.error_irq_count++; + + CAM_DBG(CAM_ISP, "CSID[%u] Recoverable Error Count:%u", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.error_irq_count); + + /* Update reset unmapped long pkt strobe */ + *rst_strobe_val |= (1 << csi2_reg->unmapped_pkt_strobe_rst_shift); + } else { + CAM_DBG(CAM_ISP, + "CSID[%u] RX_IRQ: %s", + csid_hw->hw_intf->hw_idx, + (*csid_reg->rx_irq_desc)[rx_idx][bit_pos].desc); + } + + return 0; +} + +static int cam_ife_csid_ver2_rx2_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t irq_status; + uint32_t rst_strobe_val = 0; + uint32_t bit_pos = 0, bit_set = 0; + + csid_hw = th_payload->handler_priv; + + if (!csid_hw) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + irq_status = th_payload->evt_status_arr[0]; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + + while (irq_status) { + bit_set = irq_status & 1; + if ((bit_set) && (BIT(bit_pos) & + csid_hw->debug_info.rx_mask[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1])) + cam_ife_csid_ver2_handle_rx_debug_event(csid_hw, + CAM_IFE_CSID_RX2_IRQ_STATUS_REG1, bit_pos, &rst_strobe_val); + bit_pos++; + irq_status >>= 1; + } + + /* Reset strobes for next set of pkts */ + if (rst_strobe_val && csid_hw->debug_info.rst_capture_strobes) { + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + + cam_io_w_mb(rst_strobe_val, soc_info->reg_map[0].mem_base + + csi2_reg->rst_strobes_addr); + } + return 0; +} + +static int cam_ife_csid_ver2_rx_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t irq_status; + uint32_t rst_strobe_val = 0; + uint32_t bit_pos = 0, bit_set = 0; + + csid_hw = th_payload->handler_priv; + + if (!csid_hw) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No private returned"); + return -ENODEV; + } + + irq_status = th_payload->evt_status_arr[0]; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + + while (irq_status) { + bit_set = irq_status & 1; + if ((bit_set) && (BIT(bit_pos) & + csid_hw->debug_info.rx_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG0])) + cam_ife_csid_ver2_handle_rx_debug_event(csid_hw, + CAM_IFE_CSID_RX_IRQ_STATUS_REG0, bit_pos, &rst_strobe_val); + bit_pos++; + irq_status >>= 1; + } + + /* Reset strobes for next set of pkts */ + if (rst_strobe_val && csid_hw->debug_info.rst_capture_strobes) { + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + + cam_io_w_mb(rst_strobe_val, soc_info->reg_map[0].mem_base + + csi2_reg->rst_strobes_addr); + } + return 0; +} + +static inline uint32_t cam_ife_csid_ver2_input_core_to_hw_idx(int core_sel) +{ + switch (core_sel) { + case CAM_IFE_CSID_INPUT_CORE_SEL_SFE_0: return 0; + case CAM_IFE_CSID_INPUT_CORE_SEL_SFE_1: return 1; + case CAM_IFE_CSID_INPUT_CORE_SEL_SFE_2: return 2; + /** + * For all invalid cases, return a very large value + * that can never be a valid hw idx. + */ + default: return 0xFFFF; + } +} + +static void cam_ife_csid_ver2_read_debug_err_vectors( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int i, j, k; + uint64_t timestamp; + uint32_t temp, debug_vec_error_reg[CAM_IFE_CSID_DEBUG_VEC_ERR_REGS] = {0}; + uint8_t log_buf[CAM_IFE_CSID_LOG_BUF_LEN] = {0}; + size_t len = 0; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (!(csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_DEBUG_ERR_VEC)) + return; + + for (i = 0; i < CAM_IFE_CSID_DEBUG_VEC_FIFO_SIZE; i++) { + cam_io_w_mb((i << CAM_IFE_CSID_DEBUG_TIMESTAMP_IRQ_SEL_SHIFT), + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->debug_err_vec_cfg); + + timestamp = __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + csid_reg->cmn_reg->debug_err_vec_ts_lb, + csid_reg->cmn_reg->debug_err_vec_ts_mb, + false, 0); + + if (!timestamp) { + CAM_DBG(CAM_ISP, "Debug IRQ vectors already read, skip"); + return; + } + + for (j = 0; j < CAM_IFE_CSID_DEBUG_VEC_ERR_REGS; j++) { + if (csid_reg->cmn_reg->debug_err_vec_irq[j] == 0) + break; + + temp = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->debug_err_vec_irq[j]); + temp ^= debug_vec_error_reg[j]; + debug_vec_error_reg[j] |= temp; + k = 0; + + while (temp) { + if (temp & 0x1) { + CAM_INFO_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, + &len, "%s ", (*csid_reg->debug_vec_desc)[j][k]); + } + temp >>= 1; + k++; + } + } + CAM_INFO(CAM_ISP, "Error(s) that occurred in time order %d at timestamp %lld: %s", + i, timestamp, log_buf); + memset(log_buf, 0x0, sizeof(uint8_t) * CAM_IFE_CSID_LOG_BUF_LEN); + } + + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->debug_err_vec_cfg); +} + +static int cam_ife_csid_ver2_handle_event_err( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t irq_status, + uint32_t err_type, + bool is_secondary, + struct cam_isp_resource_node *res) +{ + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_isp_hw_event_info evt = {0}; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + + if (!csid_hw->event_cb) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] event cb not registered", + csid_hw->hw_intf->hw_idx); + return 0; + } + + /* + * If PF is encountered skip notifying error to ctx, PF + * handler will do the necessary notifications + */ + if (csid_hw->flags.pf_err_detected) + return 0; + + evt.hw_idx = csid_hw->hw_intf->hw_idx; + evt.reg_val = irq_status; + evt.hw_type = CAM_ISP_HW_TYPE_CSID; + evt.is_secondary_evt = is_secondary; + err_evt_info.err_type = err_type; + evt.event_data = (void *)&err_evt_info; + + if (!is_secondary) { + if (res) { + cam_ife_csid_ver2_print_debug_reg_status(csid_hw, res); + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + evt.res_id = res->res_id; + CAM_ERR(CAM_ISP, + "csid[%u] Res:%s Err 0x%x status 0x%x time_stamp: %lld:%09lld", + csid_hw->hw_intf->hw_idx, res->res_name, err_type, + irq_status, path_cfg->error_ts.tv_sec, + path_cfg->error_ts.tv_nsec); + } else { + CAM_ERR(CAM_ISP, + "csid[%u] Rx Err: 0x%x status 0x%x", + csid_hw->hw_intf->hw_idx, err_type, irq_status); + } + } + + evt.in_core_idx = + cam_ife_csid_ver2_input_core_to_hw_idx(csid_hw->top_cfg.input_core_type); + + cam_ife_csid_ver2_print_hbi_vbi(csid_hw); + + cam_ife_csid_ver2_print_camif_timestamps(csid_hw); + + cam_ife_csid_ver2_read_debug_err_vectors(csid_hw); + + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_ERROR, (void *)&evt); + + return 0; +} + +static int cam_ife_csid_ver2_rx_err_process_bottom_half( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_evt_payload *payload, + enum cam_ife_csid_rx_irq_regs rx_idx) +{ + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + uint32_t irq_status = 0; + struct cam_hw_soc_info *soc_info = NULL; + uint8_t *log_buf = NULL; + uint32_t rx_irq_status = 0; + int data_idx = 0; + uint32_t total_crc; + uint32_t long_pkt_ftr_val; + uint32_t event_type = 0; + size_t len = 0, i; + uint32_t val = 0; + const uint64_t *evt_bitmap = NULL; + const uint8_t *bit_pos = NULL; + uint32_t irq_reg_val = payload->irq_reg_val; + + soc_info = &csid_hw->hw_info->soc_info; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + csi2_reg = csid_reg->csi2_reg; + + evt_bitmap = csid_reg->rx_debug_mask->evt_bitmap; + + bit_pos = csid_reg->rx_debug_mask->bit_pos; + + irq_status = irq_reg_val & csi2_reg->fatal_err_mask[rx_idx]; + + log_buf = csid_hw->log_buf; + log_buf[0] = '\0'; + + if (!csid_hw->flags.device_enabled) { + CAM_DBG(CAM_ISP, "CSID[%u] bottom-half after stop [0x%x]", + csid_hw->hw_intf->hw_idx, irq_status); + return 0; + } + + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + goto unlock; + } + + if (irq_status) { + bool lane_overflow = false; + bool lane_sot_lost = false; + bool lane_eot_lost = false; + char tmp_buf[10]; + int tmp_len = 0; + + for (i = 0; i < 4; i++) { + /* NOTE: Hardware specific bits */ + if ((evt_bitmap[rx_idx] & + (BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW)<timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "System: Data has been lost when transferring from PHY to CSID"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Check PHY clock, CSID clock and possible skew among the data lanes"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "DPHY: Enable skew calibration for datarate > 1.5Gbps/lane"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY: Perform CDR tuning"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Current CSID clock rate: %lluHz", csid_hw->clk_rate); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + tmp_len = 0; + len = 0; + for (i = 0; i < 4; i++) { + /* NOTE: Hardware specific bits */ + if ((evt_bitmap[rx_idx] & + (BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_LOST)<timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: Timing signals are missed received in the CPHY packet"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Check PHY/sensor config"); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + tmp_len = 0; + len = 0; + for (i = 0; i < 4; i++) { + /* NOTE: Hardware specific bits */ + if ((evt_bitmap[rx_idx] & + (BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_LOST)<timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: Timing signals are missed received in the CPHY packet"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Check PHY/sensor config"); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_PACKET_HEADER_CRC occurred at [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: All CPHY packet headers received are corrupted"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Check sensor data rate in output pixel clock in XML for proper PHY configuration"); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_MISSING_PKT_HDR_DATA; + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "STREAM_UNDERFLOW occurred at [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: Long packet payload size is less than payload header size, resulting a corrupted frame"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Perform PHY Tuning/Check sensor limitations"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Word Count: %u", val & 0xFFFF); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC)) && (irq_status & + BIT(bit_pos[CAM_IFE_CSID_RX_ERROR_ECC]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_HDR_CORRUPTED; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "DPHY_PACKET_HEADER_ECC_DETECTED occurred on [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: A short or long packet is corrupted and cannot be recovered"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: Check sensor datarate in output pixel clock in XML for proper PHY configuration"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Error Correction Code: 0x%x", + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_1_addr)); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_UNBOUNDED_FRAME; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "UNBOUNDED_FRAME: Frame started with EOF or No EOF"); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION)) && + (irq_status & (BIT(bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION])))) { + event_type |= CAM_ISP_HW_ERROR_CSID_MISSING_EOT; + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_EOT_RECEPTION: No EOT on lane/s, is_EPD: %c, PHY_Type: %s(%u)", + (csid_hw->rx_cfg.epd_supported & CAM_ISP_EPD_SUPPORT) ? 'Y' : 'N', + (csid_hw->rx_cfg.lane_type) ? "cphy" : "dphy", + csid_hw->rx_cfg.lane_type); + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + /** + * Starting from v880, CRC errors wil be treated as part fatal errors and + * only be flagged after happening several times. Leave this for backward + * compatibility. + */ + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC)) && (irq_status & + BIT(bit_pos[CAM_IFE_CSID_RX_ERROR_CRC]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED; + long_pkt_ftr_val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_ftr_addr); + total_crc = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->total_crc_err_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "LONG_PACKET_PAYLOAD_CRC occurred at [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: The calculated CRC of a long packet does not match the transmitted (expected) CRC, possible corruption"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: First frame CRC: Check sensor data rate / settle count in XML for proper PHY configuration; Streaming state: Check sensor constraints for exposure control else perform PHY CDR-EQ Tuning"); + + if (csid_hw->rx_cfg.lane_type == CAM_ISP_LANE_TYPE_CPHY) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_cphy_pkt_hdr_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Total CRC Errors: %u, Virtual Channel: %u, Data Type: %u, Word Count: %u", + total_crc, val >> 22, (val >> 16) & 0x3F, val & 0xFFFF); + } else { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Total CRC Errors: %u", total_crc); + } + CAM_ERR(CAM_ISP, "CSID[%u] Fatal Errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + rx_irq_status |= irq_status; + csid_hw->flags.fatal_err_detected = true; + + cam_ife_csid_ver2_send_cdr_sweep_csi2_rx_vals(csid_hw, csid_reg, irq_reg_val); + } + + irq_status = irq_reg_val & + csi2_reg->part_fatal_err_mask[rx_idx]; + + if (irq_status) { + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION]))) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CPHY_SOT_RECEPTION: Less SOTs on lane/s"); + CAM_ERR(CAM_ISP, "CSID[%u] Partly fatal errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_RX_ERROR_CRC]))) { + event_type |= CAM_ISP_HW_ERROR_CSID_PKT_PAYLOAD_CORRUPTED; + + /* Only print the CRC error logs when reaching the threshold */ + if (csid_hw->counters.crc_error_irq_count > csid_hw->crc_error_threshold) { + long_pkt_ftr_val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_ftr_addr); + total_crc = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->total_crc_err_addr); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "LONG_PACKET_PAYLOAD_CRC occurred at [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Sensor: The calculated CRC of a long packet does not match the transmitted (expected) CRC, possible corruption"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: First frame CRC: Check sensor data rate / settle count in XML for proper PHY configuration; Streaming state: Check sensor constraints for exposure control else perform PHY CDR-EQ Tuning"); + + if (csid_hw->rx_cfg.lane_type == CAM_ISP_LANE_TYPE_CPHY) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_cphy_pkt_hdr_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, + CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Total CRC Errors: %u, Virtual Channel: %u, Data Type: %u, Word Count: %u", + total_crc, val >> 22, (val >> 16) & 0x3F, + val & 0xFFFF); + } else { + CAM_ERR_BUF(CAM_ISP, log_buf, + CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Total CRC Errors: %u", total_crc); + } + + CAM_ERR(CAM_ISP, "CSID[%u] Partly fatal errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + } + + rx_irq_status |= irq_status; + + cam_ife_csid_ver2_send_cdr_sweep_csi2_rx_vals(csid_hw, csid_reg, irq_reg_val); + } + + irq_status = irq_reg_val & + csi2_reg->non_fatal_err_mask[rx_idx]; + + if (irq_status) { + len = 0; + if ((evt_bitmap[rx_idx] & BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT)) && (irq_status & + BIT(bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT]))) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->captured_long_pkt_0_addr); + + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "MMAPPED_VC_DT occurred at [%llu: %09llu]", + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "SW: A long packet has a VC_DT combination that is configured for more than one IPP or RDIs"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Virtual Channel: %u, Data Type: %u", + (val >> 22), ((val >> 16) & 0x3F)); + } + + CAM_ERR(CAM_ISP, "CSID[%u] Non-fatal-errors: %s", + csid_hw->hw_intf->hw_idx, log_buf); + } + + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] Rx Status 0x%x", + csid_hw->hw_intf->hw_idx, + irq_reg_val); + + data_idx = (int)(csid_hw->rx_cfg.phy_sel - csid_reg->cmn_reg->phy_sel_base_idx); + if ((!csid_hw->flags.reset_awaited) && csid_hw->flags.fatal_err_detected) { + if (!event_type) + event_type |= CAM_ISP_HW_ERROR_CSID_FATAL; + + if (data_idx < 0) + CAM_WARN(CAM_ISP, "Can't notify csiphy, incorrect phy selected=%d", + data_idx); + else + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_APPLY_CSIPHY_AUX, (void *)&data_idx); + + cam_ife_csid_ver2_handle_event_err(csid_hw, + rx_irq_status, event_type, false, NULL); + csid_hw->flags.reset_awaited = true; + } +unlock: + spin_unlock(&csid_hw->lock_state); + + return 0; +} + +static int cam_ife_csid_ver2_rx_err_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + int rc = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + + rc = cam_ife_csid_ver2_rx_err_process_bottom_half(csid_hw, + payload, CAM_IFE_CSID_RX_IRQ_STATUS_REG0); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->rx_free_payload_list, + &csid_hw->rx_payload_lock); + + return 0; +} + +static int cam_ife_csid_ver2_rx2_err_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + int rc = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + + rc = cam_ife_csid_ver2_rx_err_process_bottom_half(csid_hw, + payload, CAM_IFE_CSID_RX2_IRQ_STATUS_REG1); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, &csid_hw->rx_free_payload_list, + &csid_hw->rx_payload_lock); + + return rc; +} + +void cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler( + void *csid) +{ + struct cam_ife_csid_ver2_hw *csid_hw = csid; + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + void __iomem *base = + soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + uint32_t i = 0, rdi_cfg = 0; + uint8_t *log_buf = csid_hw->log_buf; + size_t len = 0; + + memset(log_buf, 0x0, sizeof(uint8_t) * CAM_IFE_CSID_LOG_BUF_LEN); + + for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i < CAM_IFE_PIX_PATH_RES_RDI_4; + i++) { + path_reg = csid_reg->path_reg[i - CAM_IFE_PIX_PATH_RES_RDI_0]; + + if (!(path_reg->capabilities & + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI)) + continue; + + rdi_cfg = cam_io_r_mb(base + path_reg->cfg1_addr); + + if (rdi_cfg & path_reg->pix_store_en_shift_val) + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "LINE BUFFER ENABLED for RDI%d", (i - CAM_IFE_PIX_PATH_RES_RDI_0)); + } + + if (len) + CAM_ERR(CAM_ISP, "CSID[%u] %s", csid_hw->hw_intf->hw_idx, log_buf); + +} + +void cam_ife_csid_hw_ver2_drv_err_handler(void *csid) +{ + struct cam_ife_csid_ver2_hw *csid_hw = csid; + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + void __iomem *base = + soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + uint32_t cfg0_val = 0, cfg1_val = 0, cfg2_val = 0, debug_drv_0_val = 0, debug_drv_1_val = 0; + + cfg0_val = cam_io_r_mb(base + csid_reg->cmn_reg->drv_cfg0_addr); + cfg1_val = cam_io_r_mb(base + csid_reg->cmn_reg->drv_cfg1_addr); + cfg2_val = cam_io_r_mb(base + csid_reg->cmn_reg->drv_cfg2_addr); + debug_drv_0_val = cam_io_r_mb(base + csid_reg->cmn_reg->debug_drv_0_addr); + debug_drv_1_val = cam_io_r_mb(base + csid_reg->cmn_reg->debug_drv_1_addr); + + CAM_INFO(CAM_ISP, + "CSID[%u] DRV cfg0:0x%x cfg1:0x%x cfg2:0x%x qtimer_val [start:end] [0x%x : 0x%x]", + csid_hw->hw_intf->hw_idx, cfg0_val, cfg1_val, cfg2_val, debug_drv_0_val, + debug_drv_1_val); +} + +void cam_ife_csid_hw_ver2_mup_mismatch_handler( + void *csid, void *resource) +{ + int i; + uint32_t idx = 0; + struct timespec64 current_ts; + struct cam_ife_csid_ver2_hw *csid_hw = csid; + struct cam_isp_resource_node *res = resource; + struct cam_ife_csid_ver2_path_cfg *path_cfg = + (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[path_cfg->cid]; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + + CAM_INFO(CAM_ISP, "CSID[%u] Last MUP value 0x%x programmed for res [id: %d name: %s]", + csid_hw->hw_intf->hw_idx, csid_hw->rx_cfg.mup, res->res_id, res->res_name); + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + for (i = 0; i < CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX; i++) { + if (!cid_data->vc_dt[i].valid) + break; + CAM_INFO(CAM_ISP, "CSID[%u] Virtual channel %d: %u, DT 0x%x", + csid_hw->hw_intf->hw_idx, i, cid_data->vc_dt[i].vc, + cid_data->vc_dt[i].dt); + } + + if (path_cfg->ts_comb_vcdt_en) { + ktime_get_boottime_ts64(¤t_ts); + + idx = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->timestamp_curr0_sof_addr) & + csid_reg->cmn_reg->ts_comb_vcdt_mask; + + if (idx < CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX) + CAM_DBG(CAM_ISP, + "CSID[%d] Received frame on VC: %d [id: %d name: %s] timestamp: [%llu:%09llu]", + csid_hw->hw_intf->hw_idx, idx, + res->res_id, res->res_name, current_ts.tv_sec, + current_ts.tv_nsec); + else + CAM_ERR(CAM_ISP, + "CSID[%d] Get invalid virtual channel index: %d on [id: %d name: %s] timestamp: [%llu:%09llu]", + csid_hw->hw_intf->hw_idx, idx, res->res_id, + res->res_name, current_ts.tv_sec, current_ts.tv_nsec); + } + } else { + CAM_ERR(CAM_ISP, "CSID[%u] Multi-VCDT is not enabled, virtual channel 0: %d", + csid_hw->hw_intf->hw_idx, + cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc); + } +} + +void cam_ife_csid_ver2_print_illegal_programming_irq_status( + void *csid, void *resource) +{ + struct cam_ife_csid_ver2_hw *csid_hw = csid; + struct cam_isp_resource_node *res = resource; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + uint32_t vcdt_cfg0 = 0, vcdt_cfg1 = 0, cfg0 = 0, cfg1 = 0; + uint32_t decode_fmt[CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX], + vc[CAM_IFE_CSID_MAX_VALID_VC_NUM], dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_MAX]; + int i; + size_t len = 0; + uint8_t log_buf[100] = {0}; + + csid_reg = csid_hw->core_info->csid_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg || (path_cfg->cid >= CAM_IFE_CSID_CID_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid params: path_cfg: %pK, num_cids: %d", + csid_hw->hw_intf->hw_idx, path_cfg, (path_cfg ? (path_cfg->cid) : -1)); + return; + } + + cid_data = &csid_hw->cid_data[path_cfg->cid]; + soc_info = &csid_hw->hw_info->soc_info; + base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + path_reg = csid_reg->path_reg[res->res_id]; + + cfg0 = cam_io_r_mb(base + path_reg->cfg0_addr); + cfg1 = cam_io_r_mb(base + path_reg->cfg1_addr); + vcdt_cfg0 = cam_io_r_mb(base + path_reg->multi_vcdt_cfg0_addr); + + CAM_INFO(CAM_ISP, "cfg0 = %x cfg1 = %x vcdt_cfg0 = %x vcrop %x early eof %d", + cfg0, cfg1, vcdt_cfg0, cam_io_r_mb(base + path_reg->vcrop_addr), + (cfg1 & BIT(path_reg->early_eof_en_shift_val))); + + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + decode_fmt[0] = ((cfg0 >> + csid_reg->cmn_reg->decode_format_shift_val) & + csid_reg->cmn_reg->decode_format_mask); + decode_fmt[1] = ((vcdt_cfg0 >> + csid_reg->cmn_reg->decode_format1_shift_val) & + csid_reg->cmn_reg->decode_format_mask); + vc[0] = ((cfg0 >> csid_reg->cmn_reg->vc_shift_val) & + csid_reg->cmn_reg->vc_mask); + dt[0] = ((cfg0 >> csid_reg->cmn_reg->dt_shift_val) & + csid_reg->cmn_reg->dt_mask); + vc[1] = ((vcdt_cfg0 >> csid_reg->cmn_reg->multi_vcdt_vc1_shift_val) & + csid_reg->cmn_reg->vc_mask); + dt[1] = ((vcdt_cfg0 >> csid_reg->cmn_reg->multi_vcdt_dt1_shift_val) & + csid_reg->cmn_reg->dt_mask); + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].valid) { + vcdt_cfg1 = cam_io_r_mb(base + path_reg->multi_vcdt_cfg1_addr); + dt[2] = ((vcdt_cfg1 >> csid_reg->cmn_reg->multi_vcdt_dt2_shift_val) & + csid_reg->cmn_reg->dt_mask); + dt[3] = ((vcdt_cfg1 >> csid_reg->cmn_reg->multi_vcdt_dt3_shift_val) & + csid_reg->cmn_reg->dt_mask); + decode_fmt[2] = + ((vcdt_cfg1 >> csid_reg->cmn_reg->decode_format2_shift_val) & + csid_reg->cmn_reg->decode_format_mask); + decode_fmt[3] = + ((vcdt_cfg1 >> csid_reg->cmn_reg->decode_format3_shift_val) & + csid_reg->cmn_reg->decode_format_mask); + } + + for (i = 0; i < CAM_ISP_VC_DT_CFG; i++) { + if (!cid_data->vc_dt[i].valid) + break; + if (i < CAM_IFE_CSID_MAX_VALID_VC_NUM) + CAM_INFO_BUF(CAM_ISP, log_buf, 100, &len, "VC%d: 0x%x ", i, vc[i]); + + CAM_INFO_BUF(CAM_ISP, log_buf, 100, &len, "DT%d: 0x%x, decode_fmt: 0x%x", + i, dt[i], decode_fmt[i]); + } + + CAM_INFO(CAM_ISP, "%s", log_buf); + + cam_ife_csid_ver2_decode_format_validate_on_err(csid_hw, vc, dt, + decode_fmt, cid_data->num_vc_dt); + } + + if (!(csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_DISABLE_EARLY_EOF) && + csid_reg->cmn_reg->early_eof_supported) { + if ((cfg1 & BIT(path_reg->early_eof_en_shift_val)) && + !(cfg1 & BIT(path_reg->crop_v_en_shift_val))) { + CAM_ERR(CAM_ISP, + "CSID:%u Early EOF %d enabled without VCROP %d", + csid_hw->hw_intf->hw_idx, + cfg1 & path_reg->early_eof_en_shift_val, + cfg1 & path_reg->crop_v_en_shift_val); + + } + } + + CAM_INFO(CAM_ISP, "CSID:%u Illegal Programming for res [id: %d name: %s]", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name); +} + +static void cam_ife_csid_ver2_print_debug_reg_status( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + uint32_t val0, val1, val2, val3 = 0; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + path_reg = csid_reg->path_reg[res->res_id]; + + val0 = cam_io_r_mb(mem_base + + path_reg->debug_camif_0_addr); + val1 = cam_io_r_mb(mem_base + + path_reg->debug_camif_1_addr); + val2 = cam_io_r_mb(mem_base + + path_reg->debug_halt_status_addr); + + /* Read test bus if enabled */ + if (csid_hw->debug_info.test_bus_enabled) + val3 = cam_io_r_mb(mem_base + + csid_reg->cmn_reg->test_bus_debug); + + CAM_INFO(CAM_ISP, + "CSID:%u debug_camif_0: 0x%x debug_camif_1: 0x%x halt_status: 0x%x test_bus: %s test_bus_val: 0x%x for res: %s ", + csid_hw->hw_intf->hw_idx, val0, val1, val2, + CAM_BOOL_TO_YESNO(csid_hw->debug_info.test_bus_enabled), + val3, res->res_name); +} + +static int cam_ife_csid_ver2_parse_path_irq_status( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + uint32_t index, + uint32_t err_mask, + uint32_t irq_status, + struct cam_ife_csid_ver2_evt_payload *evt_payload) +{ + const uint8_t **irq_reg_tag; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t bit_pos = 0; + uint32_t status, err_type = 0; + uint32_t sof_irq_debug_en = 0; + size_t len; + uint8_t *log_buf = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg = res->res_priv; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + log_buf = csid_hw->log_buf; + + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + status = irq_status & err_mask; + while (status) { + if (bit_pos >= csid_reg->num_path_err_irqs) { + CAM_ERR(CAM_ISP, "Invalid bit position: %u for path reg", bit_pos); + break; + } + + len = 0; + if ((csid_reg->path_irq_desc[bit_pos].bitmask != 0) && (status & 0x1)) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "CSID[%u] %s occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, csid_reg->path_irq_desc[bit_pos].irq_name, + path_cfg->error_ts.tv_sec, path_cfg->error_ts.tv_nsec); + if (csid_reg->path_irq_desc[bit_pos].desc) + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "%s", csid_reg->path_irq_desc[bit_pos].desc); + if (csid_reg->path_irq_desc[bit_pos].debug) + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, &len, + "Debug: %s", csid_reg->path_irq_desc[bit_pos].debug); + + /* List possbilities for illegal programming */ + if (csid_reg->path_irq_desc[bit_pos].err_type) { + if (csid_reg->path_irq_desc[bit_pos].err_type == + CAM_ISP_HW_ERROR_CSID_FATAL) { + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, + &len, "1. MUP programmed without RUP"); + CAM_ERR_BUF(CAM_ISP, log_buf, CAM_IFE_CSID_LOG_BUF_LEN, + &len, "2. Early EOF enabled with vCrop disabled"); + CAM_ERR_BUF(CAM_ISP, log_buf, + CAM_IFE_CSID_LOG_BUF_LEN, &len, + "3. Mismatch in the decode formats configured for multi vc-dt"); + } + + err_type |= csid_reg->path_irq_desc[bit_pos].err_type; + } + + if (csid_reg->path_irq_desc[bit_pos].err_handler) + csid_reg->path_irq_desc[bit_pos].err_handler(csid_hw, res); + } + + if (len) + CAM_ERR(CAM_ISP, "CSID[%u] %s status: 0x%x Errors:%s", + csid_hw->hw_intf->hw_idx, irq_reg_tag[index], + irq_status, log_buf); + + bit_pos++; + status >>= 1; + } + + status = irq_status & csid_hw->debug_info.path_mask; + bit_pos = 0; + while (status) { + if (bit_pos >= csid_reg->num_path_err_irqs) { + CAM_ERR(CAM_ISP, "Invalid bit position: %u for path reg", bit_pos); + break; + } + + if ((csid_reg->path_irq_desc[bit_pos].bitmask != 0) && (status & 0x1)) + CAM_INFO(CAM_ISP, "CSID[%u] IRQ %s %s timestamp: [%lld:%09lld]", + csid_hw->hw_intf->hw_idx, irq_reg_tag[index], + csid_reg->path_irq_desc[bit_pos].irq_name, + evt_payload->timestamp.tv_sec, + evt_payload->timestamp.tv_nsec); + + bit_pos++; + status >>= 1; + } + + if (csid_hw->flags.sof_irq_triggered) { + if ((irq_status & IFE_CSID_VER2_PATH_INFO_INPUT_SOF)) + csid_hw->counters.irq_debug_cnt++; + + if (csid_hw->counters.irq_debug_cnt >= + CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) { + cam_ife_csid_ver2_sof_irq_debug(csid_hw, + &sof_irq_debug_en); + csid_hw->counters.irq_debug_cnt = 0; + } + } + + return err_type; +} + +static void cam_ife_csid_ver2_top_err_process_bottom_half( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_evt_payload *payload, + enum cam_ife_csid_top_irq_regs top_idx) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t irq_status; + uint32_t event_type = 0; + uint32_t i = 0; + uint32_t irq_reg_val = payload->irq_reg_val; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + + irq_status = irq_reg_val & csid_reg->cmn_reg->top_err_irq_mask[top_idx]; + + if (!irq_status) { + CAM_ERR(CAM_ISP, "CSID:%u Unexpected Scenario top_idx:%u", + csid_hw->hw_intf->hw_idx, top_idx); + return; + } + + for (i = 0; i < csid_reg->num_top_err_irqs[top_idx]; i++) { + if (!csid_reg->top_irq_desc) { + CAM_ERR(CAM_ISP, "CSID:%u Unexpected Scenario top irq descriptor empty", + csid_hw->hw_intf->hw_idx); + break; + } + + if ((*csid_reg->top_irq_desc)[top_idx][i].bitmask & irq_status) { + CAM_ERR(CAM_ISP, "CSID[%u] %s occurred at [%llu: %09llu]", + csid_hw->hw_intf->hw_idx, + (*csid_reg->top_irq_desc)[top_idx][i].err_name, + payload->timestamp.tv_sec, payload->timestamp.tv_nsec); + CAM_ERR(CAM_ISP, "%s", (*csid_reg->top_irq_desc)[top_idx][i].desc); + if ((*csid_reg->top_irq_desc)[top_idx][i].debug) + CAM_ERR(CAM_ISP, "Debug: %s", + (*csid_reg->top_irq_desc)[top_idx][i].debug); + + if ((*csid_reg->top_irq_desc)[top_idx][i].err_handler) + (*csid_reg->top_irq_desc)[top_idx][i].err_handler(csid_hw); + + event_type |= (*csid_reg->top_irq_desc)[top_idx][i].err_type; + } + } + + if (event_type) + cam_ife_csid_ver2_handle_event_err(csid_hw, + irq_status, event_type, false, NULL); + +} + +static int cam_ife_csid_ver2_top2_err_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + + if (!csid_reg->num_top_err_irqs) { + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_ISP, 100, 1, + "CSID:%u Unexpected Scenario no top error irqs", csid_hw->hw_intf->hw_idx); + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, &csid_hw->path_payload_lock); + return 0; + } + + cam_ife_csid_ver2_top_err_process_bottom_half(csid_hw, payload, + CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return 0; +} + +static int cam_ife_csid_ver2_process_top_info( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t irq_reg_val, + enum cam_ife_csid_top_irq_regs top_idx, + struct timespec64 timestamp) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t irq_status; + const uint64_t *evt_bitmap; + const uint8_t *bit_pos; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + evt_bitmap = csid_reg->top_debug_mask->evt_bitmap; + bit_pos = csid_reg->top_debug_mask->bit_pos; + + irq_status = irq_reg_val & csid_hw->debug_info.top_mask[top_idx]; + + if (!irq_status) { + CAM_ERR(CAM_ISP, "CSID:%u Unexpected Scenario top_idx:%u", + csid_hw->hw_intf->hw_idx, top_idx); + return 0; + } + + if ((evt_bitmap[top_idx] & BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP]))) { + cam_cpas_log_votes(true); + CAM_INFO(CAM_ISP, "CSID:%d INFO_VOTE_UP timestamp:(%lld.%09lld)", + csid_hw->hw_intf->hw_idx, timestamp.tv_sec, + timestamp.tv_nsec); + } + + if ((evt_bitmap[top_idx] & BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN]))) { + cam_cpas_log_votes(true); + CAM_INFO(CAM_ISP, "CSID:%d INFO_VOTE_DN timestamp:(%lld.%09lld)", + csid_hw->hw_intf->hw_idx, timestamp.tv_sec, + timestamp.tv_nsec); + } + + if ((evt_bitmap[top_idx] & BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN)) && + (irq_status & BIT(bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN]))) { + CAM_INFO(CAM_ISP, "CSID:%d ERR_NO_VOTE_DN timestamp:(%lld.%09lld)", + csid_hw->hw_intf->hw_idx, timestamp.tv_sec, + timestamp.tv_nsec); + + cam_ife_csid_hw_ver2_drv_err_handler(csid_hw); + } + return 0; +} + +static int cam_ife_csid_ver2_top2_info_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + + cam_ife_csid_ver2_process_top_info(csid_hw, payload->irq_reg_val, + CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1, payload->timestamp); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return 0; +} + +static int cam_ife_csid_ver2_top_info_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + + cam_ife_csid_ver2_process_top_info(csid_hw, payload->irq_reg_val, + CAM_IFE_CSID_TOP_IRQ_STATUS_REG0, payload->timestamp); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return 0; +} + +static int cam_ife_csid_ver2_top_err_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + payload = evt_payload_priv; + csid_hw = handler_priv; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_reg->num_top_err_irqs) { + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_ISP, 100, 1, + "CSID:%u Unexpected Scenario no top error irqs", csid_hw->hw_intf->hw_idx); + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, &csid_hw->path_payload_lock); + return 0; + } + + cam_ife_csid_ver2_top_err_process_bottom_half(csid_hw, payload, + CAM_IFE_CSID_TOP_IRQ_STATUS_REG0); + + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return 0; +} + +void cam_ife_csid_ver2_print_format_measure_info( + void *csid, void *resource) +{ + struct cam_ife_csid_ver2_hw *csid_hw = csid; + struct cam_isp_resource_node *res = resource; + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = + csid_reg->path_reg[res->res_id]; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + void __iomem *base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + uint32_t expected_frame = 0, actual_frame = 0, format_measure_cfg0 = 0; + int data_idx = 0; + + actual_frame = cam_io_r_mb(base + path_reg->format_measure0_addr); + expected_frame = cam_io_r_mb(base + path_reg->format_measure_cfg1_addr); + format_measure_cfg0 = cam_io_r_mb(base + path_reg->format_measure_cfg0_addr); + + CAM_INFO(CAM_ISP, "CSID[%u] res [id: %d name: %s] format_measure_cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, format_measure_cfg0); + CAM_ERR(CAM_ISP, "CSID[%u] Frame Size Error Expected[h: %u w: %u] Actual[h: %u w: %u]", + csid_hw->hw_intf->hw_idx, ((expected_frame >> + csid_reg->cmn_reg->format_measure_height_shift_val) & + csid_reg->cmn_reg->format_measure_height_mask_val), + expected_frame & + csid_reg->cmn_reg->format_measure_width_mask_val, + ((actual_frame >> + csid_reg->cmn_reg->format_measure_height_shift_val) & + csid_reg->cmn_reg->format_measure_height_mask_val), + actual_frame & + csid_reg->cmn_reg->format_measure_width_mask_val); + + cam_ife_csid_ver2_print_hbi_vbi(csid_hw); + + /* AUX settings update to phy for pix and line count errors */ + data_idx = (int)(csid_hw->rx_cfg.phy_sel - csid_reg->cmn_reg->phy_sel_base_idx); + if (data_idx < 0) + CAM_WARN(CAM_ISP, "Can't notify csiphy, incorrect phy selected=%d", + data_idx); + else + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_APPLY_CSIPHY_AUX, (void *)&data_idx); + +} + +static int cam_ife_csid_ver2_ipp_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_hw_event_info evt_info; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_sof_ts_data sof_and_boot_time; + struct cam_csid_hw_stop_args csid_stop; + const uint8_t **irq_reg_tag; + uint32_t irq_status_ipp; + uint32_t err_mask; + uint32_t err_type = 0; + uint32_t sof_irq_mask = 0; + uint32_t eof_irq_mask = 0; + uint32_t epoch0_irq_mask = 0; + uint32_t rup_irq_mask = 0; + int rc = 0; + bool out_of_sync_fatal = false; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params. evt_payload_priv: %s, handler_priv: %s", + CAM_IS_NULL_TO_STR(evt_payload_priv), + CAM_IS_NULL_TO_STR(handler_priv)); + return -EINVAL; + } + + payload = evt_payload_priv; + res = handler_priv; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + csid_reg = csid_hw->core_info->csid_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + irq_reg_tag = cam_ife_csid_get_irq_reg_tag_ptr(); + + if (!path_cfg || (path_cfg->irq_reg_idx >= CAM_IFE_CSID_IRQ_REG_MAX)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid params: path_cfg: %pK, irq_reg_idx: %d", + csid_hw->hw_intf->hw_idx, path_cfg, + (path_cfg ? (path_cfg->irq_reg_idx) : -1)); + rc = -EINVAL; + goto end; + } + + irq_status_ipp = payload->irq_reg_val; + sof_and_boot_time.boot_time = payload->timestamp; + sof_and_boot_time.sof_ts = payload->sof_ts_reg_val; + + CAM_DBG(CAM_ISP, "CSID[%u] %s status:0x%x", csid_hw->hw_intf->hw_idx, + (payload->is_mc ? "COMP_IRQ" : ((char *)irq_reg_tag[path_cfg->irq_reg_idx])), + irq_status_ipp); + + if (!csid_hw->flags.device_enabled) { + CAM_DBG(CAM_ISP, "CSID[%u] bottom-half after stop [0x%x]", + csid_hw->hw_intf->hw_idx, irq_status_ipp); + goto end; + } + + evt_info.hw_type = CAM_ISP_HW_TYPE_CSID; + evt_info.hw_idx = csid_hw->hw_intf->hw_idx; + evt_info.res_type = CAM_ISP_RESOURCE_PIX_PATH; + evt_info.reg_val = irq_status_ipp; + evt_info.event_data = &sof_and_boot_time; + + if (!csid_hw->event_cb) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID[%u] event cb not registered", + csid_hw->hw_intf->hw_idx); + goto end; + } + + path_reg = csid_reg->path_reg[res->res_id]; + + if (payload->is_mc) { + sof_irq_mask = csid_reg->ipp_mc_reg->comp_sof_mask; + eof_irq_mask = csid_reg->ipp_mc_reg->comp_eof_mask; + rup_irq_mask = csid_reg->ipp_mc_reg->comp_rup_mask; + epoch0_irq_mask = csid_reg->ipp_mc_reg->comp_epoch0_mask; + evt_info.res_id = CAM_IFE_PIX_PATH_RES_IPP; + } else { + sof_irq_mask = path_reg->sof_irq_mask; + eof_irq_mask = path_reg->eof_irq_mask; + rup_irq_mask = path_reg->rup_irq_mask; + epoch0_irq_mask = path_reg->epoch0_irq_mask; + evt_info.res_id = res->res_id; + } + + if (irq_status_ipp & eof_irq_mask) { + cam_ife_csid_ver2_update_event_ts(&path_cfg->eof_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + } + + if (irq_status_ipp & sof_irq_mask) { + cam_ife_csid_ver2_update_event_ts(&path_cfg->sof_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + } + + if (irq_status_ipp & rup_irq_mask) + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info); + + if (irq_status_ipp & epoch0_irq_mask) { + if ((!csid_hw->flags.last_exp_valid) || + (csid_hw->flags.last_exp_valid && path_cfg->allow_epoch_cb)) { + cam_ife_csid_ver2_update_event_ts(&path_cfg->epoch_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_EPOCH, + (void *)&evt_info); + } + } + + if (payload->is_mc) { + payload->is_mc = false; + goto end; + } + + if (irq_status_ipp & IFE_CSID_VER2_PATH_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP) { + atomic_inc(&path_cfg->switch_out_of_sync_cnt); + /* If threshold is seen, notify error */ + if (atomic_read(&path_cfg->switch_out_of_sync_cnt) >= + CAM_IFE_CSID_MAX_OUT_OF_SYNC_ERR_COUNT) + out_of_sync_fatal = true; + } + + err_mask = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask; + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + goto unlock; + } + + err_type = cam_ife_csid_ver2_parse_path_irq_status( + csid_hw, res, path_cfg->irq_reg_idx, + err_mask, irq_status_ipp, payload); + + if (err_type || out_of_sync_fatal) { + if (out_of_sync_fatal) + err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP; + + /* Mask off all the irqs for Fatal errors */ + if (irq_status_ipp & path_reg->fatal_err_mask) { + csid_stop.num_res = 1; + csid_stop.node_res = &res; + cam_ife_csid_ver2_maskout_all_irqs(csid_hw, &csid_stop); + } + cam_ife_csid_ver2_handle_event_err(csid_hw, + irq_status_ipp, err_type, false, res); + } + +unlock: + spin_unlock(&csid_hw->lock_state); +end: + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return rc; +} + +static int cam_ife_csid_ver2_ppp_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_hw_info *hw_info; + struct cam_csid_hw_stop_args csid_stop; + uint32_t irq_status_ppp; + uint32_t err_mask; + uint32_t err_type = 0; + int rc = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params. evt_payload_priv: %s, handler_priv: %s", + CAM_IS_NULL_TO_STR(evt_payload_priv), + CAM_IS_NULL_TO_STR(handler_priv)); + return -EINVAL; + } + + payload = evt_payload_priv; + res = handler_priv; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (!path_cfg || (path_cfg->irq_reg_idx >= CAM_IFE_CSID_IRQ_REG_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid params: path_cfg: %pK, irq_reg_idx: %d", + csid_hw->hw_intf->hw_idx, path_cfg, + (path_cfg ? (path_cfg->irq_reg_idx) : -1)); + rc = -EINVAL; + goto end; + } + + irq_status_ppp = payload->irq_reg_val; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + path_reg = csid_reg->path_reg[res->res_id]; + err_mask = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask; + + CAM_DBG(CAM_ISP, "CSID[%u] PPP status:0x%x", csid_hw->hw_intf->hw_idx, + irq_status_ppp); + + if (!csid_hw->flags.device_enabled) { + CAM_DBG(CAM_ISP, "CSID[%u] bottom-half after stop [0x%x]", + csid_hw->hw_intf->hw_idx, irq_status_ppp); + goto end; + } + + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + goto unlock; + } + + err_type = cam_ife_csid_ver2_parse_path_irq_status( + csid_hw, res, CAM_IFE_CSID_IRQ_REG_PPP, + err_mask, irq_status_ppp, payload); + + if (err_type) { + /* Mask off all the irqs for Fatal errors */ + if (irq_status_ppp & path_reg->fatal_err_mask) { + csid_stop.num_res = 1; + csid_stop.node_res = &res; + cam_ife_csid_ver2_maskout_all_irqs(csid_hw, &csid_stop); + } + + cam_ife_csid_ver2_handle_event_err(csid_hw, + irq_status_ppp, + err_type, + false, + res); + } +unlock: + spin_unlock(&csid_hw->lock_state); +end: + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, + &csid_hw->path_payload_lock); + + return rc; +} + +static int cam_ife_csid_ver2_rdi_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_ife_csid_ver2_evt_payload *payload; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res; + const struct cam_ife_csid_ver2_path_reg_info *rdi_reg; + struct cam_csid_hw_stop_args csid_stop; + struct cam_hw_info *hw_info; + uint32_t irq_status_rdi; + uint32_t err_mask; + uint32_t err_type = 0; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_sof_ts_data sof_and_boot_time; + int rc = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params. evt_payload_priv: %s, handler_priv: %s", + CAM_IS_NULL_TO_STR(evt_payload_priv), + CAM_IS_NULL_TO_STR(handler_priv)); + return -EINVAL; + } + + payload = evt_payload_priv; + res = handler_priv; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (!path_cfg || (path_cfg->irq_reg_idx >= CAM_IFE_CSID_IRQ_REG_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid params: path_cfg: %pK, irq_reg_idx: %d", + csid_hw->hw_intf->hw_idx, path_cfg, + (path_cfg ? (path_cfg->irq_reg_idx) : -1)); + rc = -EINVAL; + goto end; + } + + sof_and_boot_time.boot_time = payload->timestamp; + sof_and_boot_time.sof_ts = payload->sof_ts_reg_val; + + evt_info.hw_idx = csid_hw->hw_intf->hw_idx; + evt_info.res_type = CAM_ISP_RESOURCE_PIX_PATH; + evt_info.event_data = &sof_and_boot_time; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + irq_status_rdi = payload->irq_reg_val; + rdi_reg = csid_reg->path_reg[res->res_id]; + + if (!rdi_reg) + goto end; + + if (!csid_hw->flags.device_enabled) { + CAM_DBG(CAM_ISP, "CSID:%u bottom-half after stop [0x%x]", + csid_hw->hw_intf->hw_idx, irq_status_rdi); + goto end; + } + + CAM_DBG(CAM_ISP, "CSID[%u] RDI:%d status:0x%x", + csid_hw->hw_intf->hw_idx, + res->res_id, irq_status_rdi); + err_mask = rdi_reg->non_fatal_err_mask | + rdi_reg->fatal_err_mask; + + spin_lock(&csid_hw->lock_state); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + spin_unlock(&csid_hw->lock_state); + goto end; + } + + err_type = cam_ife_csid_ver2_parse_path_irq_status(csid_hw, res, + path_cfg->irq_reg_idx, + err_mask, irq_status_rdi, payload); + + spin_unlock(&csid_hw->lock_state); + if (err_type) { + /* Mask off all the irqs for Fatal errors */ + if (irq_status_rdi & rdi_reg->fatal_err_mask) { + csid_stop.num_res = 1; + csid_stop.node_res = &res; + cam_ife_csid_ver2_maskout_all_irqs(csid_hw, &csid_stop); + } + + cam_ife_csid_ver2_handle_event_err(csid_hw, + irq_status_rdi, err_type, false, res); + goto end; + } + + if (!csid_hw->event_cb) { + CAM_DBG(CAM_ISP, "CSID[%u] no cb registered", + csid_hw->hw_intf->hw_idx); + goto end; + } + + evt_info.res_id = res->res_id; + evt_info.reg_val = irq_status_rdi; + evt_info.hw_type = CAM_ISP_HW_TYPE_CSID; + + if (irq_status_rdi & IFE_CSID_VER2_PATH_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP) { + bool is_secondary = true; + bool do_notify = false; + + /* Only notify if secondary event is subscribed for */ + if ((path_cfg->sec_evt_config.en_secondary_evt) && + (path_cfg->sec_evt_config.evt_type & + CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP)) + do_notify = true; + + /* Validate error threshold for primary RDI (master) */ + if (res->is_rdi_primary_res) { + atomic_inc(&path_cfg->switch_out_of_sync_cnt); + if (atomic_read(&path_cfg->switch_out_of_sync_cnt) >= + CAM_IFE_CSID_MAX_OUT_OF_SYNC_ERR_COUNT) { + do_notify = true; + is_secondary = false; + } + } + + if (do_notify) + cam_ife_csid_ver2_handle_event_err(csid_hw, irq_status_rdi, + CAM_ISP_HW_ERROR_CSID_SENSOR_FRAME_DROP, is_secondary, res); + } + + if (irq_status_rdi & rdi_reg->eof_irq_mask) { + cam_ife_csid_ver2_update_event_ts(&path_cfg->eof_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + } + + if ((irq_status_rdi & rdi_reg->sof_irq_mask)) { + if (path_cfg->sec_evt_config.en_secondary_evt && + (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF)) { + evt_info.is_secondary_evt = true; + } + cam_ife_csid_ver2_update_event_ts(&path_cfg->sof_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + } + + if (irq_status_rdi & rdi_reg->rup_irq_mask) + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info); + + if ((irq_status_rdi & rdi_reg->epoch0_irq_mask)) { + if (path_cfg->sec_evt_config.en_secondary_evt && + (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH)) { + evt_info.is_secondary_evt = true; + } + + if ((!csid_hw->flags.last_exp_valid) || + (csid_hw->flags.last_exp_valid && path_cfg->allow_epoch_cb)) { + cam_ife_csid_ver2_update_event_ts(&path_cfg->epoch_ts, &payload->timestamp); + csid_hw->event_cb(csid_hw->token, CAM_ISP_HW_EVENT_EPOCH, + (void *)&evt_info); + } + } +end: + cam_ife_csid_ver2_put_evt_payload(csid_hw, &payload, + &csid_hw->path_free_payload_list, &csid_hw->path_payload_lock); + + return rc; +} + +int cam_ife_csid_ver2_get_hw_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_ife_csid_hw_caps *hw_caps; + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_csid_soc_private *soc_private = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_private = (struct cam_csid_soc_private *) + csid_hw->hw_info->soc_info.soc_private; + + hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis; + hw_caps->num_pix = csid_reg->cmn_reg->num_pix; + hw_caps->num_ppp = csid_reg->cmn_reg->num_ppp; + hw_caps->major_version = csid_reg->cmn_reg->major_version; + hw_caps->minor_version = csid_reg->cmn_reg->minor_version; + hw_caps->version_incr = csid_reg->cmn_reg->version_incr; + hw_caps->global_reset_en = csid_reg->cmn_reg->global_reset; + hw_caps->aup_rup_en = csid_reg->cmn_reg->aup_rup_supported; + hw_caps->only_master_rup = csid_reg->cmn_reg->only_master_rup; + hw_caps->is_lite = soc_private->is_ife_csid_lite; + hw_caps->sfe_ipp_input_rdi_res = csid_reg->cmn_reg->sfe_ipp_input_rdi_res; + hw_caps->camif_irq_support = csid_reg->cmn_reg->camif_irq_support; + hw_caps->is_ife_sfe_mapped = csid_reg->is_ife_sfe_mapped; + + CAM_DBG(CAM_ISP, + "CSID:%u num-rdis:%d, num-pix:%d, major:%d minor:%d ver:%d", + csid_hw->hw_intf->hw_idx, hw_caps->num_rdis, + hw_caps->num_pix, hw_caps->major_version, + hw_caps->minor_version, hw_caps->version_incr); + + return rc; +} + +static void cam_ife_csid_ver2_dump_imp_regs( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + void __iomem *mem_base; + int top_irq_val[4] = {0}; + int reset_cfg = 0; + int dual_csid_cfg = 0; + uint32_t clk_lvl; + uint32_t hw_idx; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + hw_idx = csid_hw->hw_intf->hw_idx; + + /* Dumping CSID top irq registers */ + top_irq_val[0] = cam_io_r_mb(mem_base + + csid_reg->cmn_reg->top_irq_status_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + top_irq_val[1] = cam_io_r_mb(mem_base + + csid_reg->cmn_reg->top_irq_mask_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + top_irq_val[2] = cam_io_r_mb(mem_base + + csid_reg->cmn_reg->top_irq_clear_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + top_irq_val[3] = cam_io_r_mb(mem_base + + csid_reg->cmn_reg->top_irq_set_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + CAM_INFO(CAM_ISP, + "CSID[%d] csid top status 0x%x, mask 0x%x, clr 0x%x, set 0x%x", + hw_idx, top_irq_val[0], top_irq_val[1], + top_irq_val[2], top_irq_val[3]); + + /* Dumping CSID reset cfg and dual csid cfg */ + reset_cfg = cam_io_r_mb(mem_base + csid_reg->cmn_reg->reset_cfg_addr); + + if (csid_reg->top_reg) + dual_csid_cfg = cam_io_r_mb( + soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base + + csid_reg->top_reg->dual_csid_cfg0_addr[hw_idx]); + + CAM_INFO(CAM_ISP, "CSID[%d] csid reset_cfg 0x%x, dual_csid_cfg 0x%x, is_dual_en %d", + hw_idx, reset_cfg, dual_csid_cfg, csid_hw->top_cfg.dual_en); + + /* Dumping CSID Clock */ + cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate, + soc_info->src_clk_idx, &clk_lvl); + + CAM_INFO(CAM_ISP, + "CSID[%d] clk lvl %u received clk_rate %u applied clk_rate sw_client:%lu hw_client:[%lu %lu]", + hw_idx, clk_lvl, csid_hw->clk_rate, + soc_info->applied_src_clk_rates.sw_client, + soc_info->applied_src_clk_rates.hw_client[hw_idx].high, + soc_info->applied_src_clk_rates.hw_client[hw_idx].low); +} + +static int cam_ife_csid_ver2_wait_for_reset( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + unsigned long rem_jiffies = 0; + int rc = 0; + + rem_jiffies = cam_common_wait_for_completion_timeout( + &csid_hw->hw_info->hw_complete, + msecs_to_jiffies(CAM_IFE_CSID_RESET_TIMEOUT_MS)); + + if (rem_jiffies == 0) { + rc = -ETIMEDOUT; + if (csid_hw->debug_info.test_bus_enabled) { + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + + CAM_ERR(CAM_ISP, + "CSID[%u], sync-mode[%d] test_bus: 0x%x reset timed out", + csid_hw->hw_intf->hw_idx, csid_hw->sync_mode, + cam_io_r_mb( + soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->cmn_reg->test_bus_debug)); + } else + CAM_ERR(CAM_ISP, "CSID[%u], sync-mode[%d] reset timed out", + csid_hw->hw_intf->hw_idx, csid_hw->sync_mode); + + cam_ife_csid_ver2_dump_imp_regs(csid_hw); + } else + CAM_DBG(CAM_ISP, + "CSID[%u], sync-mode[%d] reset success", + csid_hw->hw_intf->hw_idx, + csid_hw->sync_mode); + + return rc; +} + +static int cam_ife_csid_ver2_reset_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_ife_csid_ver2_hw *csid_hw; + + csid_hw = th_payload->handler_priv; + + CAM_DBG(CAM_ISP, "CSID[%u] TOP_IRQ_STATUS_0 = 0x%x", csid_hw->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + + complete(&csid_hw->hw_info->hw_complete); + + return 0; +} + +static int cam_ife_csid_ver2_internal_reset( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t rst_cmd, uint32_t rst_location, uint32_t rst_mode) +{ + uint32_t val = 0; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + void __iomem *mem_base; + int rc = 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] powered down state", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + /* Input cut off prior to SW reset */ + if (rst_cmd == CAM_IFE_CSID_RESET_CMD_SW_RST) { + cam_io_w_mb(0x0, mem_base + csi2_reg->cfg0_addr); + cam_io_w_mb(0x0, mem_base + csi2_reg->cfg1_addr); + } + + if (csid_hw->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + goto wait_only; + + reinit_completion(&csid_hw->hw_info->hw_complete); + + /* Program the reset location */ + if (rst_location == CAM_IFE_CSID_RESET_LOC_PATH_ONLY) + val |= (csid_reg->cmn_reg->rst_loc_path_only_val << + csid_reg->cmn_reg->rst_location_shift_val); + else if (rst_location == CAM_IFE_CSID_RESET_LOC_COMPLETE) + val |= (csid_reg->cmn_reg->rst_loc_complete_csid_val << + csid_reg->cmn_reg->rst_location_shift_val); + + /*Program the mode */ + if (rst_mode == CAM_CSID_HALT_AT_FRAME_BOUNDARY) + val |= (csid_reg->cmn_reg->rst_mode_frame_boundary_val << + csid_reg->cmn_reg->rst_mode_shift_val); + else if (rst_mode == CAM_CSID_HALT_IMMEDIATELY) + val |= (csid_reg->cmn_reg->rst_mode_immediate_val << + csid_reg->cmn_reg->rst_mode_shift_val); + + cam_io_w_mb(val, mem_base + csid_reg->cmn_reg->reset_cfg_addr); + CAM_DBG(CAM_ISP, "CSID[%u] reset_cfg: 0x%x", + csid_hw->hw_intf->hw_idx, val); + val = 0; + + /*Program the cmd */ + if (rst_cmd == CAM_IFE_CSID_RESET_CMD_IRQ_CTRL) + val = csid_reg->cmn_reg->rst_cmd_irq_ctrl_only_val; + else if (rst_cmd == CAM_IFE_CSID_RESET_CMD_HW_RST) + val = csid_reg->cmn_reg->rst_cmd_hw_reset_complete_val; + else if (rst_cmd == CAM_IFE_CSID_RESET_CMD_SW_RST) + val = csid_reg->cmn_reg->rst_cmd_sw_reset_complete_val; + + cam_io_w_mb(val, mem_base + csid_reg->cmn_reg->reset_cmd_addr); + CAM_DBG(CAM_ISP, "CSID[%u] reset_cmd: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + if (rst_cmd == CAM_IFE_CSID_RESET_CMD_IRQ_CTRL) + return 0; + +wait_only: + + rc = cam_ife_csid_ver2_wait_for_reset(csid_hw); + + if (rc) + CAM_ERR(CAM_ISP, + "CSID[%u] Reset failed mode %d cmd %d loc %d", + csid_hw->hw_intf->hw_idx, + rst_mode, rst_cmd, rst_location); + reinit_completion(&csid_hw->hw_info->hw_complete); + return rc; +} + +int cam_ife_csid_ver2_reset(void *hw_priv, + void *reset_args, uint32_t arg_size) +{ + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_csid_reset_cfg_args *reset; + int rc = 0; + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + reset = (struct cam_csid_reset_cfg_args *)reset_args; + + mutex_lock(&csid_hw->hw_info->hw_mutex); + switch (reset->reset_type) { + case CAM_IFE_CSID_RESET_GLOBAL: + rc = cam_ife_csid_ver2_internal_reset(csid_hw, + CAM_IFE_CSID_RESET_CMD_SW_RST, + CAM_IFE_CSID_RESET_LOC_COMPLETE, + CAM_CSID_HALT_IMMEDIATELY); + break; + + case CAM_IFE_CSID_RESET_PATH: + rc = cam_ife_csid_ver2_internal_reset(csid_hw, + CAM_IFE_CSID_RESET_CMD_HW_RST, + CAM_IFE_CSID_RESET_LOC_PATH_ONLY, + CAM_CSID_HALT_IMMEDIATELY); + break; + + case CAM_IFE_CSID_RESET_GLOBAL_HW_ONLY: + rc = cam_ife_csid_ver2_internal_reset(csid_hw, + CAM_IFE_CSID_RESET_CMD_HW_RST, + CAM_IFE_CSID_RESET_LOC_COMPLETE, + CAM_CSID_HALT_IMMEDIATELY); + break; + + case CAM_IFE_CSID_RESET_GLOBAL_IRQ_CNTRL: + rc = cam_ife_csid_ver2_internal_reset(csid_hw, + CAM_IFE_CSID_RESET_CMD_IRQ_CTRL, + CAM_IFE_CSID_RESET_LOC_COMPLETE, + CAM_CSID_HALT_IMMEDIATELY); + break; + + default: + rc = -EINVAL; + break; + } + + if (rc) + CAM_ERR(CAM_ISP, "CSID[%u] reset type: %s failed", + csid_hw->hw_intf->hw_idx, + cam_ife_csid_reset_type_to_string(reset->reset_type)); + else + CAM_DBG(CAM_ISP, "CSID[%u] reset type: %s", + csid_hw->hw_intf->hw_idx, + cam_ife_csid_reset_type_to_string(reset->reset_type)); + + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static inline void cam_ife_csid_ver2_maskout_path_irqs( + int32_t res_id, + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_path_cfg *path_cfg) +{ + int rc; + + if (path_cfg->err_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->path_irq_controller[res_id], + path_cfg->err_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path err irq CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->err_irq_handle = 0; + } + + if (path_cfg->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->path_irq_controller[res_id], + path_cfg->irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path info irq CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->irq_handle = 0; + } + + if (path_cfg->discard_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->path_irq_controller[res_id], + path_cfg->discard_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path discard irq CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->discard_irq_handle = 0; + } + + if (path_cfg->top_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + path_cfg->top_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path irq in top CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->top_irq_handle = 0; + + rc = cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->path_irq_controller[res_id]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unregister path dependent with top CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + } +} + +static inline void cam_ife_csid_ver2_disable_path_irqs_evts( + int32_t res_id, + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_path_cfg *path_cfg) +{ + int rc; + + if (path_cfg->err_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->path_irq_controller[res_id], + path_cfg->err_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path err irq evt CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->err_irq_handle = 0; + } + + if (path_cfg->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->path_irq_controller[res_id], + path_cfg->irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path info irq evt CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->irq_handle = 0; + } + + if (path_cfg->discard_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->path_irq_controller[res_id], + path_cfg->discard_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path discard irq evt CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->discard_irq_handle = 0; + } + + if (path_cfg->top_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + path_cfg->top_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unsubscribe path irq in top evt CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + + path_cfg->top_irq_handle = 0; + + rc = cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->path_irq_controller[res_id]); + if (rc) + CAM_WARN(CAM_ISP, + "Failed to unregister path dependent with top CSID:%u rc: %d", + csid_hw->hw_intf->hw_idx, rc); + } +} + +static int cam_ife_csid_ver2_disable_path( + bool maskout_irqs, + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + struct cam_ife_csid_ver2_path_cfg *path_cfg; + int rc = 0; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, + "CSID:%u path res type:%d res_id:%d Invalid state:%d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%u Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (maskout_irqs) + cam_ife_csid_ver2_maskout_path_irqs(res->res_id, + csid_hw, path_cfg); + else + cam_ife_csid_ver2_disable_path_irqs_evts(res->res_id, + csid_hw, path_cfg); + + /* Reset frame drop fields at stream off */ + path_cfg->discard_init_frames = false; + path_cfg->skip_discard_frame_cfg = false; + path_cfg->num_frames_discard = 0; + path_cfg->sof_cnt = 0; + path_cfg->allow_epoch_cb = false; + atomic_set(&path_cfg->switch_out_of_sync_cnt, 0); + return rc; +} + +static bool cam_ife_csid_hw_ver2_need_unpack_mipi( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_csid_hw_reserve_resource_args *reserve, + const struct cam_ife_csid_ver2_path_reg_info *path_reg, + uint32_t format) +{ + bool need_unpack = false; + + switch(format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + /* + * CAM_FORMAT_PLAIN16_16 : can be removed? double check why default_out_format has it. + * default_out_format is used in xCFA usecases without real RDI0 out buffer. + * We still need to set need_unpack here so that we unpack incoming data (say MIPI10) + * into MSB. If default_out_format can be set to 16_10/16_12/16_14 - then we can remove + */ + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + need_unpack = (bool)(path_reg->capabilities & CAM_IFE_CSID_CAP_RDI_UNPACK_MSB); + break; + default: + need_unpack = false; + break; + } + + CAM_DBG(CAM_ISP, "CSID[%u], RDI_%u format %u need_unpack %u sfe_shdr %u", + csid_hw->hw_intf->hw_idx, reserve->res_id, format, need_unpack, + reserve->sfe_inline_shdr); + + return need_unpack; +} + +static int cam_ife_csid_hw_ver2_prepare_config_path_data( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_path_cfg *path_cfg, + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t cid) +{ + int rc, i; + const struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[cid]; + struct cam_isp_resource_node *res = &csid_hw->path_res[reserve->res_id]; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + + for(i = 0; i < reserve->in_port->num_valid_vc_dt; i++) + path_cfg->in_format[i] = reserve->in_port->format[i]; + + path_cfg->cid = cid; + path_cfg->sync_mode = reserve->sync_mode; + path_cfg->height = reserve->in_port->height; + path_cfg->start_line = reserve->in_port->line_start; + path_cfg->end_line = reserve->in_port->line_stop; + path_cfg->crop_enable = reserve->crop_enable; + path_cfg->drop_enable = reserve->drop_enable; + path_cfg->horizontal_bin = reserve->in_port->horizontal_bin; + path_cfg->vertical_bin = reserve->in_port->vertical_bin; + path_cfg->qcfa_bin = reserve->in_port->qcfa_bin; + path_cfg->num_bytes_out = reserve->in_port->num_bytes_out; + path_cfg->sec_evt_config.en_secondary_evt = reserve->sec_evt_config.en_secondary_evt; + path_cfg->sec_evt_config.evt_type = reserve->sec_evt_config.evt_type; + path_reg = csid_reg->path_reg[res->res_id]; + + if (reserve->out_port) + path_cfg->out_format = reserve->out_port->format; + else + path_cfg->out_format = path_reg->default_out_format; + + if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) { + path_cfg->start_pixel = reserve->in_port->left_start; + path_cfg->end_pixel = reserve->in_port->left_stop; + path_cfg->width = reserve->in_port->left_width; + + if (reserve->res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 && + reserve->res_id <= (CAM_IFE_PIX_PATH_RES_RDI_0 + + CAM_IFE_CSID_RDI_MAX - 1)) { + path_cfg->end_pixel = reserve->in_port->right_stop; + path_cfg->width = path_cfg->end_pixel - + path_cfg->start_pixel + 1; + } + CAM_DBG(CAM_ISP, + "CSID:%u res:%d master:startpixel 0x%x endpixel:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_pixel, path_cfg->end_pixel); + CAM_DBG(CAM_ISP, + "CSID:%u res:%d master:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_line, path_cfg->end_line); + } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) { + path_cfg->start_pixel = reserve->in_port->right_start; + path_cfg->end_pixel = reserve->in_port->right_stop; + path_cfg->width = reserve->in_port->right_width; + CAM_DBG(CAM_ISP, + "CSID:%u res:%d slave:start:0x%x end:0x%x width 0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_pixel, path_cfg->end_pixel, + path_cfg->width); + CAM_DBG(CAM_ISP, + "CSID:%u res:%d slave:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, reserve->res_id, + path_cfg->start_line, path_cfg->end_line); + } else { + path_cfg->width = reserve->in_port->left_width; + path_cfg->start_pixel = reserve->in_port->left_start; + path_cfg->end_pixel = reserve->in_port->left_stop; + CAM_DBG(CAM_ISP, + "CSID:%u res:%d left width %d start: %d stop:%d start line :%d end line %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + reserve->in_port->left_width, + reserve->in_port->left_start, + reserve->in_port->left_stop, + path_cfg->start_line, + path_cfg->end_line); + } + + switch (reserve->res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + /* + * Skip this if we are not configuring the unpacking at stream on. + * Also, we need to make sure wm parameters are not updated in this case + * for wm packing. + */ + if (!(csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SKIP_PATH_CFG1)) { + path_cfg->csid_out_unpack_msb = + cam_ife_csid_hw_ver2_need_unpack_mipi(csid_hw, reserve, path_reg, + path_cfg->out_format); + + /* + * if csid gives unpacked msb out, packing needs to be done at + * WM side if needed, based on the format the decision is + * taken at WM side + */ + reserve->use_wm_pack = path_cfg->csid_out_unpack_msb; + } + + for (i = 0; i < CAM_ISP_VC_DT_CFG; i++) { + if (cid_data->vc_dt[i].valid) { + rc = cam_ife_csid_get_format_rdi( + path_cfg->in_format[i], path_cfg->out_format, + &path_cfg->path_format[i], path_reg->mipi_pack_supported, + path_cfg->csid_out_unpack_msb); + if (rc) { + CAM_ERR(CAM_ISP, + "Invalid format cfg at idx: %d for res_id: 0x%x, in_fmt: 0x%x, out_fmt: 0x%x, mipi_pack_supported: %s, csid_unpack_msb: %s", + i, res->res_id, path_cfg->in_format[i], + path_cfg->out_format, + CAM_BOOL_TO_YESNO(path_reg->mipi_pack_supported), + CAM_BOOL_TO_YESNO(path_cfg->csid_out_unpack_msb)); + goto end; + } + } + } + break; + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + case CAM_IFE_PIX_PATH_RES_PPP: + + for (i = 0; i < CAM_ISP_VC_DT_CFG; i++) { + if (cid_data->vc_dt[i].valid) { + rc = cam_ife_csid_get_format_ipp_ppp( + path_cfg->in_format[i], + &path_cfg->path_format[i]); + if (rc) { + CAM_ERR(CAM_ISP, + "Invalid format cfg at idx: %d for res_id: 0x%x, in_fmt: 0x%x", + i, res->res_id, path_cfg->in_format[i]); + goto end; + } + } + } + + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_ISP, "CSID[%u] Invalid Res id %u", + csid_hw->hw_intf->hw_idx, reserve->res_id); + goto end; + } + + rc = cam_ife_csid_ver2_decode_format_validate(csid_hw, res); + if (rc) + goto end; + +end: + return rc; +} + +static int cam_ife_csid_hw_ver2_prepare_config_rx( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_csid_hw_reserve_resource_args *reserve) +{ + + /*Before calling this function we already validated the + * sancitity of in port args. If this function is called + * from somewhere else as well, please make sure to validate the + * in_port args before coming here. + */ + if (csid_hw->counters.csi2_reserve_cnt) { + CAM_DBG(CAM_ISP, "CSID %d Rx already reserved cnt %d", + csid_hw->hw_intf->hw_idx, + csid_hw->counters.csi2_reserve_cnt); + csid_hw->counters.csi2_reserve_cnt++; + return 0; + } + + csid_hw->rx_cfg.lane_cfg = + reserve->in_port->lane_cfg; + csid_hw->rx_cfg.lane_type = + reserve->in_port->lane_type; + csid_hw->rx_cfg.lane_num = + reserve->in_port->lane_num; + csid_hw->res_type = reserve->in_port->res_type; + csid_hw->rx_cfg.dynamic_sensor_switch_en = + reserve->in_port->dynamic_sensor_switch_en; + if (reserve->in_port->epd_supported) + csid_hw->rx_cfg.epd_supported = 1; + + switch (reserve->in_port->res_type) { + case CAM_ISP_IFE_IN_RES_TPG: + csid_hw->rx_cfg.phy_sel = 0; + csid_hw->rx_cfg.tpg_mux_sel = 0; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_0: + csid_hw->rx_cfg.tpg_mux_sel = 1; + csid_hw->rx_cfg.tpg_num_sel = 1; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_1: + csid_hw->rx_cfg.tpg_mux_sel = 1; + csid_hw->rx_cfg.tpg_num_sel = 2; + break; + case CAM_ISP_IFE_IN_RES_CPHY_TPG_2: + csid_hw->rx_cfg.tpg_mux_sel = 1; + csid_hw->rx_cfg.tpg_num_sel = 3; + break; + default: + csid_hw->rx_cfg.tpg_mux_sel = 0; + csid_hw->rx_cfg.phy_sel = + (reserve->in_port->res_type & 0xFF); + break; + } + + csid_hw->counters.csi2_reserve_cnt++; + CAM_DBG(CAM_ISP, + "CSID:%u Rx lane param: cfg:%u type:%u num:%u res:%u rx reserve count: %u", + csid_hw->hw_intf->hw_idx, + reserve->in_port->lane_cfg, reserve->in_port->lane_type, + reserve->in_port->lane_num, reserve->in_port->res_type, + csid_hw->counters.csi2_reserve_cnt); + + return 0; + +} + +static int cam_ife_csid_ver2_prepare_camif_config( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_csid_hw_reserve_resource_args *reserve, + struct cam_ife_csid_ver2_path_cfg *path_cfg) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + switch (reserve->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + path_cfg->epoch_cfg = (path_cfg->end_line - path_cfg->start_line) * + csid_reg->cmn_reg->epoch_factor / 100; + + if (path_cfg->epoch_cfg > path_cfg->end_line) + path_cfg->epoch_cfg = path_cfg->end_line; + + if (path_cfg->horizontal_bin || path_cfg->qcfa_bin) + path_cfg->epoch_cfg >>= 1; + + CAM_DBG(CAM_ISP, "CSID[%u] res_id: %u epoch factor: 0x%x factor %u", + csid_hw->hw_intf->hw_idx, reserve->res_id, path_cfg->epoch_cfg, + csid_reg->cmn_reg->epoch_factor); + break; + default: + CAM_DBG(CAM_ISP, "CSID[%u] No CAMIF epoch update for res: %u", + csid_hw->hw_intf->hw_idx, reserve->res_id); + break; + } + + return 0; +} + +int cam_ife_csid_hw_ver2_prepare_hw_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ver2_path_cfg *path_cfg, + struct cam_csid_hw_reserve_resource_args *reserve, + uint32_t cid) +{ + int rc = 0; + + rc = cam_ife_csid_hw_ver2_prepare_config_rx(csid_hw, reserve); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] rx config failed", + csid_hw->hw_intf->hw_idx); + return rc; + } + + rc = cam_ife_csid_hw_ver2_prepare_config_path_data(csid_hw, path_cfg, + reserve, cid); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] path data config failed", + csid_hw->hw_intf->hw_idx); + goto end; + } + + rc = cam_ife_csid_ver2_prepare_camif_config(csid_hw, reserve, path_cfg); + if (rc) + CAM_ERR(CAM_ISP, "CSID[%u] camif config failed", + csid_hw->hw_intf->hw_idx); + +end: + return rc; +} + +static int cam_ife_csid_ver2_in_port_validate( + struct cam_csid_hw_reserve_resource_args *reserve, + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc = 0; + + /* check in port args for RT streams*/ + if (!reserve->is_offline) { + rc = cam_ife_csid_check_in_port_args(reserve, + csid_hw->hw_intf->hw_idx); + if (rc) + goto err; + } + + if (csid_hw->counters.csi2_reserve_cnt) { + + if (csid_hw->token != reserve->cb_priv) { + CAM_ERR(CAM_ISP, + "CSID[%u] different Context for res %d rx reserve count: %u", + csid_hw->hw_intf->hw_idx, + reserve->res_id, csid_hw->counters.csi2_reserve_cnt); + rc = -EINVAL; + goto err; + } + + if (csid_hw->res_type != reserve->in_port->res_type) { + CAM_ERR(CAM_ISP, + "CSID[%u] Invalid res[%d] in_res_type[%d]", + csid_hw->hw_intf->hw_idx, + csid_hw->res_type, + reserve->in_port->res_type); + rc = -EINVAL; + goto err; + } + + if (csid_hw->rx_cfg.lane_cfg != + reserve->in_port->lane_cfg || + csid_hw->rx_cfg.lane_type != + reserve->in_port->lane_type || + csid_hw->rx_cfg.lane_num != + reserve->in_port->lane_num) { + CAM_ERR(CAM_ISP, + "CSID[%u] lane: num[%d %d] type[%d %d] cfg[%d %d]", + csid_hw->hw_intf->hw_idx, + csid_hw->rx_cfg.lane_num, + reserve->in_port->lane_num, + csid_hw->rx_cfg.lane_type, + reserve->in_port->lane_type, + csid_hw->rx_cfg.lane_cfg, + reserve->in_port->lane_cfg); + rc = -EINVAL; + goto err; + } + } + + return rc; +err: + CAM_ERR(CAM_ISP, "Invalid args CSID[%u] rc %d", + csid_hw->hw_intf->hw_idx, rc); + return rc; +} + +int cam_ife_csid_ver2_reserve(void *hw_priv, + void *reserve_args, uint32_t arg_size) +{ + + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_isp_resource_node *res = NULL; + struct cam_csid_hw_reserve_resource_args *reserve; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t cid; + int rc = 0; + + reserve = (struct cam_csid_hw_reserve_resource_args *)reserve_args; + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + res = &csid_hw->path_res[reserve->res_id]; + if (res->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + /** + * intentionally set as DBG log to since this log gets printed when hw manager + * checks if resource is available + */ + CAM_DBG(CAM_ISP, "CSID %d Res_id %d state %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state); + return -EBUSY; + } + + rc = cam_ife_csid_ver2_in_port_validate(reserve, csid_hw); + if (rc) { + CAM_ERR(CAM_ISP, "CSID %d Res_id %d port validation failed", + csid_hw->hw_intf->hw_idx, reserve->res_id); + return rc; + } + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg) { + CAM_ERR(CAM_ISP, + "CSID %d Unallocated Res_id %d state %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state); + return -EINVAL; + } + + rc = cam_ife_csid_cid_reserve(csid_hw->cid_data, &cid, + csid_hw->hw_intf->hw_idx, reserve); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID %d Res_id %d state %d invalid cid %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + res->res_state, cid); + return rc; + } + + /* Skip rx and csid cfg for offline */ + if (!reserve->is_offline) { + rc = cam_ife_csid_hw_ver2_prepare_hw_cfg(csid_hw, path_cfg, + reserve, cid); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] res %d hw_cfg fail", + csid_hw->hw_intf->hw_idx, reserve->res_id); + goto release; + } + } + + /* Acquire ownership */ + rc = cam_vmrm_soc_acquire_resources(csid_hw->hw_info->soc_info.hw_id); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] acquire ownership failed", + csid_hw->hw_intf->hw_idx); + goto release; + } + + reserve->node_res = res; + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + csid_hw->event_cb = reserve->event_cb; + csid_hw->tasklet = reserve->tasklet; + csid_hw->token = reserve->cb_priv; + reserve->buf_done_controller = csid_hw->buf_done_irq_controller; + reserve->mc_comp_buf_done_controller = + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]; + res->cdm_ops = reserve->cdm_ops; + csid_hw->flags.sfe_en = reserve->sfe_en; + path_cfg->sfe_shdr = reserve->sfe_inline_shdr; + path_cfg->handle_camif_irq = reserve->handle_camif_irq; + path_cfg->is_aeb_en = reserve->in_port->aeb_mode; + csid_hw->flags.offline_mode = reserve->is_offline; + reserve->need_top_cfg = csid_reg->need_top_cfg; + reserve->dynamic_drv_supported = csid_reg->dynamic_drv_supported; + + CAM_DBG(CAM_ISP, "CSID[%u] Resource[id: %d name:%s] state %d cid %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, res->res_name, + res->res_state, cid); + + return rc; + +release: + cam_ife_csid_cid_release(&csid_hw->cid_data[cid], csid_hw->hw_intf->hw_idx, path_cfg->cid); + cam_ife_csid_ver2_reset_csid_params(csid_hw); + return rc; +} + +int cam_ife_csid_ver2_release(void *hw_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_hw_info *hw_info; + struct cam_isp_resource_node *res = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + int rc = 0; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + res = (struct cam_isp_resource_node *)release_args; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) || + (res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) { + CAM_WARN(CAM_ISP, + "CSID:%u res type:%d Res %d in state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, + res->res_state); + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%u res type :%d Resource [id:%d name:%s] rx reserve count: %u", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id, res->res_name, csid_hw->counters.csi2_reserve_cnt); + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (path_cfg->cid >= CAM_IFE_CSID_CID_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid cid:%d", + csid_hw->hw_intf->hw_idx, path_cfg->cid); + rc = -EINVAL; + goto end; + } + + cam_ife_csid_cid_release(&csid_hw->cid_data[path_cfg->cid], + csid_hw->hw_intf->hw_idx, + path_cfg->cid); + + memset(path_cfg, 0, sizeof(*path_cfg)); + + csid_hw->sync_mode = CAM_ISP_HW_SYNC_NONE; + + if (csid_hw->counters.csi2_reserve_cnt) + csid_hw->counters.csi2_reserve_cnt--; + + if (!csid_hw->counters.csi2_reserve_cnt) + cam_ife_csid_ver2_reset_csid_params(csid_hw); + + res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rc = cam_vmrm_soc_release_resources(csid_hw->hw_info->soc_info.hw_id); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] vmrm soc release resources failed", + csid_hw->hw_intf->hw_idx); + } + +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static bool cam_ife_csid_hw_ver2_use_master_slave_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t res_id) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + bool ret = false; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res = NULL; + struct cam_isp_resource_node *ppp_res = NULL; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res_id]; + res = &csid_hw->path_res[res_id]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + switch (res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + if (path_cfg->is_aeb_en || path_cfg->sfe_shdr || csid_hw->flags.rdi_lcr_en) + ret = true; + + if (path_reg->use_master_slave_default) { + ppp_res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP]; + ret = ppp_res->res_state > CAM_ISP_RESOURCE_STATE_AVAILABLE ? true : false; + } + + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + if (path_cfg->is_aeb_en || path_cfg->sfe_shdr) + ret = true; + break; + case CAM_IFE_PIX_PATH_RES_PPP: + if (csid_hw->flags.rdi_lcr_en || path_reg->use_master_slave_default) + ret = true; + break; + default: + break; + } + + CAM_DBG(CAM_ISP, "CSID[%u] %s use master slave cfg %s", csid_hw->hw_intf->hw_idx, + res->res_name, CAM_BOOL_TO_YESNO(ret)); + return ret; +} + +static void cam_ife_csid_ver2_res_master_slave_cfg(struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t res_id) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t val; + void __iomem *mem_base; + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *res = &csid_hw->path_res[res_id]; + struct cam_ife_csid_ver2_path_cfg *path_cfg = + (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + struct cam_ife_csid_cid_data *cid_data = &csid_hw->cid_data[path_cfg->cid]; + + if (!cam_ife_csid_hw_ver2_use_master_slave_cfg(csid_hw, res_id)) + return; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + val = cam_io_r_mb(mem_base + csid_reg->cmn_reg->shdr_master_slave_cfg_addr); + + switch (res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + val |= BIT(csid_reg->cmn_reg->shdr_master_rdi0_shift); + if (cid_data->vc_dt[0].vc && csid_reg->cmn_reg->rx_mode_id_cfg1_addr) { + cam_io_w_mb(cid_data->vc_dt[0].vc, mem_base + + csid_reg->cmn_reg->rx_mode_id_cfg1_addr); + CAM_DBG(CAM_ISP, "CSID %d res:%s CSI2_RX_MODE_ID_CFG1:0x%x", + csid_hw->hw_intf->hw_idx, + res->res_name, cid_data->vc_dt[0].vc); + } + break; + case CAM_IFE_PIX_PATH_RES_RDI_1: + val |= BIT(csid_reg->cmn_reg->shdr_slave_rdi1_shift); + break; + case CAM_IFE_PIX_PATH_RES_RDI_2: + val |= BIT(csid_reg->cmn_reg->shdr_slave_rdi2_shift); + break; + case CAM_IFE_PIX_PATH_RES_PPP: + val |= BIT(csid_reg->cmn_reg->shdr_slave_ppp_shift); + break; + default: + break; + } + + val |= BIT(csid_reg->cmn_reg->shdr_master_slave_en_shift); + + cam_io_w_mb(val, mem_base + csid_reg->cmn_reg->shdr_master_slave_cfg_addr); + + CAM_DBG(CAM_ISP, "CSID %d res:%s master slave cfg 0x%x", csid_hw->hw_intf->hw_idx, + res->res_name, val); +} + +static int cam_ife_csid_ver2_init_config_rdi_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver2_common_reg_info *cmn_reg = NULL; + uint32_t val, cfg0 = 0, cfg1 = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + void __iomem *mem_base; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_reg->path_reg[res->res_id]) { + CAM_ERR(CAM_ISP, "CSID:%u RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + cmn_reg = csid_reg->cmn_reg; + path_reg = csid_reg->path_reg[res->res_id]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + cid_data = &csid_hw->cid_data[path_cfg->cid]; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + /* Enable client & cfg offline mode */ + if (csid_hw->flags.offline_mode) { + val = (1 << + path_reg->offline_mode_en_shift_val); + val |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(val, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "CSID:%u enabling res:%s cfg0: 0x%x for offline", + csid_hw->hw_intf->hw_idx, res->res_name, val); + return 0; + } + + /*Configure cfg0: + * VC + * DT + * Timestamp enable and strobe selection for v780 + * DT_ID cobination + * Decode Format + * Frame_id_dec_en + * VFR en + * offline mode + */ + cfg0 = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc << + cmn_reg->vc_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt << + cmn_reg->dt_shift_val) | + (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].decode_fmt << + cmn_reg->decode_format_shift_val); + + if (!csid_reg->cmn_reg->direct_cid_config) + cfg0 |= (path_cfg->cid << cmn_reg->dt_id_shift_val); + + if (csid_reg->cmn_reg->vfr_supported) + cfg0 |= path_cfg->vfr_en << cmn_reg->vfr_en_shift_val; + + if (csid_reg->cmn_reg->frame_id_dec_supported) + cfg0 |= path_cfg->frame_id_dec_en << + cmn_reg->frame_id_decode_en_shift_val; + + if (cmn_reg->timestamp_enabled_in_cfg0) + cfg0 |= (1 << path_reg->timestamp_en_shift_val) | + (cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val); + + if (cam_ife_csid_ver2_disable_sof_retime(csid_hw, res)) + cfg0 |= 1 << path_reg->sof_retiming_dis_shift; + + cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "CSID[%u] res:%s cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg0); + + path_cfg->ts_comb_vcdt_en = false; + + /*Configure Multi VC DT combo */ + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc << + cmn_reg->multi_vcdt_vc1_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].dt << + cmn_reg->multi_vcdt_dt1_shift_val) | + (1 << cmn_reg->multi_vcdt_en_shift_val); + + if (cmn_reg->ts_comb_vcdt_en) { + val |= (1 << cmn_reg->multi_vcdt_ts_combo_en_shift_val); + path_cfg->ts_comb_vcdt_en = true; + } + + if (cmn_reg->decode_format1_supported) + val |= (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].decode_fmt << + cmn_reg->decode_format1_shift_val); + + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg0_addr); + CAM_DBG(CAM_ISP, "CSID:%u res:%s multi_vcdt_cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, val); + } + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].valid) { + val = ((cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].dt << + cmn_reg->multi_vcdt_dt2_shift_val)) | + ((path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].decode_fmt << + cmn_reg->decode_format2_shift_val)); + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_3].valid) { + val |= ((cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_3].dt << + cmn_reg->multi_vcdt_dt3_shift_val)) | + ((path_cfg->path_format[ + CAM_IFE_CSID_MULTI_VC_DT_GRP_3].decode_fmt << + cmn_reg->decode_format3_shift_val)); + } + + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg1_addr); + + CAM_DBG(CAM_ISP, "CSID:%u res:%s multi_vcdt_cfg1: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, val); + } + + /* For some targets, cfg1 register is programmed as part of cdm buffer, so skip here */ + if (!(csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SKIP_PATH_CFG1)) { + + /*configure cfg1 addr + * Crop/Drop parameters + * Timestamp enable and strobe selection + * Plain format + * Packing format + */ + cfg1 = (path_cfg->crop_enable << path_reg->crop_h_en_shift_val) | + (path_cfg->crop_enable << + path_reg->crop_v_en_shift_val); + + if (cmn_reg->drop_supported) + cfg1 |= (path_cfg->drop_enable << + path_reg->drop_v_en_shift_val) | + (path_cfg->drop_enable << + path_reg->drop_h_en_shift_val); + + if (path_reg->mipi_pack_supported) + cfg1 |= path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].packing_fmt << + path_reg->packing_fmt_shift_val; + + cfg1 |= (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].plain_fmt << + path_reg->plain_fmt_shift_val); + + /* Keep the data in MSB, IFE/SFE pipeline, BUS expects data in MSB */ + if (path_cfg->csid_out_unpack_msb && + path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].plain_fmt) + cfg1 |= (1 << path_reg->plain_alignment_shift_val); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + cfg1 |= 1 << path_reg->format_measure_en_shift_val; + + if (!cmn_reg->timestamp_enabled_in_cfg0) + cfg1 |= (1 << path_reg->timestamp_en_shift_val) | + (cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val); + + /* We use line smoothting only on RDI_0 in all usecases */ + if ((path_reg->capabilities & + CAM_IFE_CSID_CAP_LINE_SMOOTHING_IN_RDI) && + (res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) + cfg1 |= 1 << path_reg->pix_store_en_shift_val; + + cam_io_w_mb(cfg1, mem_base + path_reg->cfg1_addr); + CAM_DBG(CAM_ISP, "CSID:%u res:%s cfg1: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg1); + } + + /* Enable the RDI path */ + cfg0 |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "CSID[%u] enabling path: %s cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg0); + + if (path_reg->overflow_ctrl_en) { + val = path_reg->overflow_ctrl_en | + path_reg->overflow_ctrl_mode_val; + cam_io_w_mb(val, mem_base + + path_reg->err_recovery_cfg0_addr); + } + + cam_ife_csid_ver2_res_master_slave_cfg(csid_hw, res->res_id); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val |= cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, mem_base + + path_reg->format_measure_cfg0_addr); + } + + return rc; +} + +static int cam_ife_csid_ver2_init_config_pxl_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + const struct cam_ife_csid_ver2_common_reg_info *cmn_reg = NULL; + uint32_t val = 0, cfg0 = 0, cfg1 = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_cid_data *cid_data; + void __iomem *mem_base; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, + "CSID:%u path res reg not found, type:%d res_id:%d res state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + cmn_reg = csid_reg->cmn_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + cid_data = &csid_hw->cid_data[path_cfg->cid]; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + /*Configure: + * VC + * DT + * Timestamp enable and strobe selection + * DT_ID cobination + * Decode Format + * Frame_id_dec_en + * VFR en + */ + cfg0 |= (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc << + cmn_reg->vc_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt << + cmn_reg->dt_shift_val) | + (path_cfg->cid << cmn_reg->dt_id_shift_val) | + (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].decode_fmt << + cmn_reg->decode_format_shift_val); + + if (csid_reg->cmn_reg->vfr_supported) + cfg0 |= path_cfg->vfr_en << cmn_reg->vfr_en_shift_val; + + if (csid_reg->cmn_reg->frame_id_dec_supported) + cfg0 |= path_cfg->frame_id_dec_en << + cmn_reg->frame_id_decode_en_shift_val; + + if (cmn_reg->timestamp_enabled_in_cfg0) + cfg0 |= (1 << path_reg->timestamp_en_shift_val) | + (cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val); + + if (cam_ife_csid_ver2_disable_sof_retime(csid_hw, res)) + cfg0 |= 1 << path_reg->sof_retiming_dis_shift; + + cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "CSID[%u] res:%s cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg0); + + path_cfg->ts_comb_vcdt_en = false; + + /*Configure Multi VC DT combo */ + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].valid) { + val = (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].vc << + cmn_reg->multi_vcdt_vc1_shift_val) | + (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].dt << + cmn_reg->multi_vcdt_dt1_shift_val) | + (1 << cmn_reg->multi_vcdt_en_shift_val); + + if (cmn_reg->ts_comb_vcdt_en) { + val |= (1 << cmn_reg->multi_vcdt_ts_combo_en_shift_val); + path_cfg->ts_comb_vcdt_en = true; + } + + if (cmn_reg->decode_format1_supported) + val |= (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_1].decode_fmt << + cmn_reg->decode_format1_shift_val); + + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg0_addr); + CAM_DBG(CAM_ISP, "CSID:%u res:%s multi_vcdt_cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, val); + } + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].valid) { + val = ((cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].dt << + cmn_reg->multi_vcdt_dt2_shift_val)) | + ((path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_2].decode_fmt << + cmn_reg->decode_format2_shift_val)); + + if (cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_3].valid) { + val |= ((cid_data->vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_3].dt << + cmn_reg->multi_vcdt_dt3_shift_val)) | + ((path_cfg->path_format[ + CAM_IFE_CSID_MULTI_VC_DT_GRP_3].decode_fmt << + cmn_reg->decode_format3_shift_val)); + } + + cam_io_w_mb(val, mem_base + path_reg->multi_vcdt_cfg1_addr); + + CAM_DBG(CAM_ISP, "CSID:%u res:%s multi_vcdt_cfg1: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, val); + } + + /* For some targets, cfg1 register is programmed as part of cdm buffer, so skip here */ + if (!(csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SKIP_PATH_CFG1)) { + + /*configure cfg1 addr + * Binning + * Crop/Drop parameters + * Early Eof + * Timestamp enable and strobe selection + * Pix store enable + */ + + if (csid_hw->flags.binning_enabled) { + + if (path_reg->binning_supported & CAM_IFE_CSID_BIN_HORIZONTAL) + cfg1 |= path_cfg->horizontal_bin << + path_reg->bin_h_en_shift_val; + + if (path_reg->binning_supported & CAM_IFE_CSID_BIN_VERTICAL) + cfg1 |= path_cfg->vertical_bin << + path_reg->bin_v_en_shift_val; + + if (path_reg->binning_supported & CAM_IFE_CSID_BIN_QCFA) + cfg1 |= path_cfg->qcfa_bin << + path_reg->bin_qcfa_en_shift_val; + + if (path_cfg->qcfa_bin || path_cfg->vertical_bin || + path_cfg->horizontal_bin) + cfg1 |= 1 << path_reg->bin_en_shift_val; + } + + cfg1 |= (path_cfg->crop_enable << path_reg->crop_h_en_shift_val) | + (path_cfg->crop_enable << + path_reg->crop_v_en_shift_val); + + if (cmn_reg->drop_supported) + cfg1 |= (path_cfg->drop_enable << + path_reg->drop_v_en_shift_val) | + (path_cfg->drop_enable << + path_reg->drop_h_en_shift_val); + + cfg1 |= 1 << path_reg->pix_store_en_shift_val; + + /*enable early eof based on crop enable */ + if (!(csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_DISABLE_EARLY_EOF) && + cmn_reg->early_eof_supported && path_cfg->crop_enable && + !(csid_hw->flags.rdi_lcr_en && res->res_id == CAM_IFE_PIX_PATH_RES_PPP)) + cfg1 |= (1 << path_reg->early_eof_en_shift_val); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + cfg1 |= 1 << path_reg->format_measure_en_shift_val; + + if (!cmn_reg->timestamp_enabled_in_cfg0) + cfg1 |= (1 << path_reg->timestamp_en_shift_val) | + (cmn_reg->timestamp_strobe_val << + cmn_reg->timestamp_stb_sel_shift_val); + + cam_io_w_mb(cfg1, mem_base + path_reg->cfg1_addr); + CAM_DBG(CAM_ISP, "CSID[%u] res:%s cfg1: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg1); + } + + /* Enable the Pxl path */ + cfg0 |= (1 << cmn_reg->path_en_shift_val); + cam_io_w_mb(cfg0, mem_base + path_reg->cfg0_addr); + CAM_DBG(CAM_ISP, "CSID[%u] enabling path: %s cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_name, cfg0); + + if (path_reg->overflow_ctrl_en) { + val = path_reg->overflow_ctrl_en | + path_reg->overflow_ctrl_mode_val; + cam_io_w_mb(val, mem_base + path_reg->err_recovery_cfg0_addr); + } + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(mem_base + + path_reg->format_measure_cfg0_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, + mem_base + path_reg->format_measure_cfg0_addr); + } + + cam_ife_csid_ver2_res_master_slave_cfg(csid_hw, res->res_id); + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + return rc; +} + +static inline __attribute_const__ CAM_IRQ_HANDLER_BOTTOM_HALF +cam_ife_csid_ver2_get_path_bh(int res_id) +{ + switch (res_id) { + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + return cam_ife_csid_ver2_rdi_bottom_half; + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + return cam_ife_csid_ver2_ipp_bottom_half; + case CAM_IFE_PIX_PATH_RES_PPP: + return cam_ife_csid_ver2_ppp_bottom_half; + default: + return NULL; + } +} + +static inline int cam_ife_csid_ver2_subscribe_sof_for_discard( + struct cam_ife_csid_ver2_path_cfg *path_cfg, + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + CAM_IRQ_HANDLER_TOP_HALF top_half_handler, + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler) +{ + int rc = 0; + uint32_t irq_mask = 0; + + irq_mask = IFE_CSID_VER2_PATH_INFO_INPUT_SOF; + path_cfg->discard_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->path_irq_controller[res->res_id], + CAM_IRQ_PRIORITY_0, + &irq_mask, + res, + top_half_handler, + bottom_half_handler, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (path_cfg->discard_irq_handle < 1) { + CAM_ERR(CAM_ISP, + "CSID[%u] Subscribing input SOF failed for discarding %d", + csid_hw->hw_intf->hw_idx, res->res_id); + rc = -EINVAL; + } + + CAM_DBG(CAM_ISP, + "CSID[%u] Subscribing input SOF for discard done res: %s rc: %d", + csid_hw->hw_intf->hw_idx, res->res_name, rc); + return rc; +} + +static int cam_ife_csid_ver2_mc_irq_subscribe(struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, uint32_t top_index) +{ + uint32_t tmp_mask; + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + + if (csid_hw->top_mc_irq_handle >= 1) + return 0; + + /* For multi context cases, we only need composite epoch and eof */ + tmp_mask = csid_reg->ipp_mc_reg->comp_eof_mask | csid_reg->ipp_mc_reg->comp_epoch0_mask; + + csid_hw->top_mc_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[top_index], + CAM_IRQ_PRIORITY_0, + &tmp_mask, + res, + cam_ife_csid_ver2_mc_top_half, + cam_ife_csid_ver2_get_path_bh(res->res_id), + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_mc_irq_handle < 1) { + CAM_ERR(CAM_ISP, "csid[%d] MC subscribe top irq fail %s", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + return 0; + +} + +static int cam_ife_csid_ver2_path_irq_subscribe( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + uint32_t dbg_frm_irq_mask, uint32_t err_irq_mask) +{ + uint32_t num_register = 0; + uint32_t *top_irq_mask = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg = res->res_priv; + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + int i, rc; + int top_index = -1; + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) { + if (csid_reg->path_reg[res->res_id]->top_irq_mask[i]) { + top_index = i; + break; + } + } + + if (top_index < 0 || top_index >= CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX) { + CAM_ERR(CAM_ISP, "csid[%d] %s Invalid top index %s index %d", + csid_hw->hw_intf->hw_idx, res->res_name, top_index); + return -EINVAL; + } + + num_register = csid_reg->top_irq_reg_info[top_index].num_registers; + top_irq_mask = vmalloc(sizeof(uint32_t) * num_register); + if (!top_irq_mask) { + CAM_ERR(CAM_ISP, "csid top_irq_mask allocation failed"); + return -ENOMEM; + } + + top_irq_mask[0] = csid_reg->path_reg[res->res_id]->top_irq_mask[top_index]; + + if ((res->res_id == CAM_IFE_PIX_PATH_RES_IPP) && + csid_reg->path_reg[res->res_id]->capabilities & + CAM_IFE_CSID_CAP_MULTI_CTXT) { + rc = cam_ife_csid_ver2_mc_irq_subscribe(csid_hw, res, top_index); + if (rc || csid_hw->top_mc_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] Failed to subscribe MC top irq", + csid_hw->hw_intf->hw_idx); + goto end; + } + } + + path_cfg->top_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[top_index], + CAM_IRQ_PRIORITY_0, + top_irq_mask, + res, + cam_ife_csid_ver2_handle_path_irq, + NULL, NULL, NULL, CAM_IRQ_EVT_GROUP_0); + + if (path_cfg->top_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] subscribe top irq fail %s", + csid_hw->hw_intf->hw_idx, res->res_name); + rc = -EINVAL; + goto unsub_mc; + } + + rc = cam_irq_controller_register_dependent(csid_hw->top_irq_controller[top_index], + csid_hw->path_irq_controller[res->res_id], top_irq_mask); + + if (rc) + goto unsub_top; + + path_cfg->irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->path_irq_controller[res->res_id], + CAM_IRQ_PRIORITY_1, + &dbg_frm_irq_mask, + res, + cam_ife_csid_ver2_path_top_half, + cam_ife_csid_ver2_get_path_bh(res->res_id), + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (path_cfg->irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] subscribe path irq fail %s", + csid_hw->hw_intf->hw_idx, res->res_name); + rc = -EINVAL; + goto unreg_path; + } + + path_cfg->err_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->path_irq_controller[res->res_id], + CAM_IRQ_PRIORITY_0, + &err_irq_mask, + res, + cam_ife_csid_ver2_path_err_top_half, + cam_ife_csid_ver2_get_path_bh(res->res_id), + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (path_cfg->err_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] subscribe error irq fail %s", + csid_hw->hw_intf->hw_idx, res->res_name); + rc = -EINVAL; + goto unsub_path; + } + vfree(top_irq_mask); + return 0; + +unsub_path: + cam_irq_controller_unsubscribe_irq(csid_hw->path_irq_controller[res->res_id], + path_cfg->irq_handle); + path_cfg->irq_handle = 0; +unreg_path: + cam_irq_controller_unregister_dependent(csid_hw->top_irq_controller[top_index], + csid_hw->path_irq_controller[res->res_id]); +unsub_top: + cam_irq_controller_unsubscribe_irq(csid_hw->top_irq_controller[top_index], + path_cfg->top_irq_handle); + path_cfg->top_irq_handle = 0; +unsub_mc: + cam_irq_controller_unsubscribe_irq(csid_hw->top_irq_controller[top_index], + csid_hw->top_mc_irq_handle); + csid_hw->top_mc_irq_handle = 0; +end: + vfree(top_irq_mask); + return rc; +} + +static void cam_ife_csid_ver2_path_rup_aup( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + struct cam_ife_csid_ver2_rup_aup_mask *rup_aup_mask) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + + if (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SPLIT_RUP_AUP) { + rup_aup_mask->rup_mask |= path_reg->rup_mask; + rup_aup_mask->aup_mask |= path_reg->aup_mask; + } else { + rup_aup_mask->rup_mask |= path_reg->rup_aup_mask; + } + +} + +static int cam_ife_csid_ver2_program_rdi_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + struct cam_ife_csid_ver2_rup_aup_mask *rup_aup_mask) +{ + int rc = 0; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + void __iomem *mem_base; + uint32_t dbg_frm_irq_mask = 0; + uint32_t err_irq_mask = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + + rc = cam_ife_csid_ver2_init_config_rdi_path( + csid_hw, res); + + if (rc) { + CAM_ERR(CAM_ISP, + "CSID:%u %s path res type:%d res_id:%d %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return rc; + } + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + path_reg = csid_reg->path_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%u RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + /* Set crc error threshold for primary rdi path */ + if (res->is_rdi_primary_res) + csid_hw->crc_error_threshold = path_cfg->height / CAM_IFE_CSID_MAX_CRC_ERR_DIVISOR; + + if (!csid_hw->flags.offline_mode && !(csid_reg->cmn_reg->capabilities & + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG)) { + CAM_DBG(CAM_ISP, "CSID:%u Rdi res: %d", + csid_hw->hw_intf->hw_idx, res->res_id); + + /*Program the camif part */ + cam_io_w_mb(path_cfg->epoch_cfg << path_reg->epoch0_shift_val, + mem_base + path_reg->epoch_irq_cfg_addr); + } + + dbg_frm_irq_mask = csid_hw->debug_info.path_mask; + + if (res->is_rdi_primary_res) { + dbg_frm_irq_mask |= path_reg->rup_irq_mask; + if (path_cfg->handle_camif_irq) + dbg_frm_irq_mask |= path_reg->sof_irq_mask | path_reg->eof_irq_mask | + path_reg->epoch0_irq_mask; + } + + /* Enable secondary events dictated by HW mgr for RDI paths */ + if (path_cfg->sec_evt_config.en_secondary_evt) { + if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_SOF) + dbg_frm_irq_mask |= path_reg->sof_irq_mask; + + if (path_cfg->sec_evt_config.evt_type & CAM_IFE_CSID_EVT_EPOCH) + dbg_frm_irq_mask |= path_reg->epoch0_irq_mask; + + CAM_DBG(CAM_ISP, + "CSID:%u Enable camif: %d evt irq for res: %s", + csid_hw->hw_intf->hw_idx, path_cfg->sec_evt_config.evt_type, res->res_name); + } + + if ((csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_MULTI_CTXT) && + path_cfg->is_aeb_en && (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_0)) + dbg_frm_irq_mask |= path_reg->epoch0_irq_mask; + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + path_cfg->irq_reg_idx = cam_ife_csid_convert_res_to_irq_reg(res->res_id); + if (path_cfg->discard_init_frames) { + rc = cam_ife_csid_ver2_subscribe_sof_for_discard( + path_cfg, csid_hw, res, + cam_ife_csid_ver2_discard_sof_top_half, + cam_ife_csid_ver2_discard_sof_rdi_bottom_half); + if (rc) + return rc; + } + + err_irq_mask = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask; + rc = cam_ife_csid_ver2_path_irq_subscribe(csid_hw, res, dbg_frm_irq_mask, err_irq_mask); + if (rc) + return rc; + cam_ife_csid_ver2_path_rup_aup(csid_hw, res, rup_aup_mask); + return rc; +} + + +static int cam_ife_csid_ver2_program_ipp_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + struct cam_ife_csid_ver2_rup_aup_mask *rup_aup_mask) +{ + int rc = 0; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + uint32_t start_mode_val = 0, err_irq_mask = 0, dbg_frm_irq_mask = 0; + void __iomem *mem_base; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + + rc = cam_ife_csid_ver2_init_config_pxl_path( + csid_hw, res); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID:%u %s path res type:%d res_id:%d %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return rc; + } + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%u IPP is not supported on HW", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + /* Set crc error threshold for ipp path */ + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) + csid_hw->crc_error_threshold = path_cfg->height / CAM_IFE_CSID_MAX_CRC_ERR_DIVISOR; + + if (!(csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG)) { + cam_io_w_mb(path_cfg->epoch_cfg << path_reg->epoch0_shift_val, + mem_base + path_reg->epoch_irq_cfg_addr); + + CAM_DBG(CAM_ISP, "CSID[%u] epoch_cfg 0x%x", + csid_hw->hw_intf->hw_idx, path_cfg->epoch_cfg); + } + + path_cfg->irq_reg_idx = cam_ife_csid_convert_res_to_irq_reg(res->res_id); + + dbg_frm_irq_mask = csid_hw->debug_info.path_mask; + + if (!(csid_reg->path_reg[res->res_id]->capabilities & CAM_IFE_CSID_CAP_MULTI_CTXT) && + (path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER)) { + dbg_frm_irq_mask |= path_reg->rup_irq_mask; + if (path_cfg->handle_camif_irq) + dbg_frm_irq_mask |= (path_reg->sof_irq_mask | path_reg->epoch0_irq_mask | + path_reg->eof_irq_mask); + } + + /* For multi context cases, listen to leading exposure sof and rup done */ + if ((res->res_id == CAM_IFE_PIX_PATH_RES_IPP) && + (csid_reg->path_reg[res->res_id]->capabilities & CAM_IFE_CSID_CAP_MULTI_CTXT)) { + dbg_frm_irq_mask |= path_reg->rup_irq_mask; + if (path_cfg->handle_camif_irq) + dbg_frm_irq_mask |= path_reg->sof_irq_mask; + } + + err_irq_mask = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask; + if (path_cfg->discard_init_frames) { + rc = cam_ife_csid_ver2_subscribe_sof_for_discard( + path_cfg, csid_hw, res, + cam_ife_csid_ver2_discard_sof_top_half, + cam_ife_csid_ver2_discard_sof_pix_bottom_half); + if (rc) + return rc; + } + + rc = cam_ife_csid_ver2_path_irq_subscribe(csid_hw, res, dbg_frm_irq_mask, err_irq_mask); + if (rc) + return rc; + + start_mode_val = path_reg->start_master_sel_val << path_reg->start_master_sel_shift; + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER) { + /* Set start mode as master */ + start_mode_val |= path_reg->start_mode_master << path_reg->start_mode_shift; + } else if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_SLAVE) { + /* Set start mode as slave */ + start_mode_val |= path_reg->start_mode_slave << path_reg->start_mode_shift; + } else { + /* Default is internal halt mode */ + start_mode_val = 0; + } + + cam_io_w_mb(start_mode_val, mem_base + path_reg->ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%u Pix res: %d ctrl val: 0x%x", csid_hw->hw_intf->hw_idx, + res->res_id, start_mode_val); + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) + cam_ife_csid_ver2_path_rup_aup(csid_hw, res, rup_aup_mask); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static int cam_ife_csid_ver2_enable_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + uint32_t val = 0; + uint32_t ctrl_addr = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + void __iomem *mem_base; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + if (path_cfg->discard_init_frames) { + CAM_DBG(CAM_ISP, "CSID[%u] skip start cmd for res: %s", + csid_hw->hw_intf->hw_idx, res->res_id); + goto end; + } + + path_reg = csid_reg->path_reg[res->res_id]; + val = path_reg->resume_frame_boundary; + ctrl_addr = path_reg->ctrl_addr; + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + case CAM_IFE_PIX_PATH_RES_PPP: + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + return 0; + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + if (csid_hw->flags.offline_mode) + return 0; + break; + default: + return -EINVAL; + } + + /* For single CSID use-cases with lcr configure global resume */ + if (csid_hw->flags.rdi_lcr_en && !csid_hw->top_cfg.dual_en) + val |= path_reg->start_mode_global << path_reg->start_mode_shift; + else + /* For dual preserve the master-slave config for IPP/PPP */ + val |= cam_io_r_mb(mem_base + ctrl_addr); + + cam_io_w_mb(val, mem_base + ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID[%u] start cmd: 0x%x programmed for res: %s", + csid_hw->hw_intf->hw_idx, val, res->res_name); +end: + /* Change state even if we don't configure start cmd */ + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return 0; +} + +static int cam_ife_csid_ver2_program_ppp_path( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_isp_resource_node *res, + struct cam_ife_csid_ver2_rup_aup_mask *rup_aup_mask) +{ + int rc = 0; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + uint32_t val = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + uint32_t irq_mask = 0; + void __iomem *mem_base; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + path_reg = csid_reg->path_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%u PPP is not supported on HW", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (csid_hw->flags.rdi_lcr_en && path_reg->crop_drop_enable) { + path_cfg->drop_enable = true; + path_cfg->crop_enable = true; + } + + rc = cam_ife_csid_ver2_init_config_pxl_path( + csid_hw, res); + + if (rc) { + CAM_ERR(CAM_ISP, + "CSID:%u %s path res type:%d res_id:%d %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return rc; + } + + path_cfg->irq_reg_idx = cam_ife_csid_convert_res_to_irq_reg(res->res_id); + + /* for dual case + * set ppp as slave + * if current csid is set as master set + * start_master_sel_val as 3 + */ + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) { + val = 0; + } else { + val = path_reg->start_mode_slave << + path_reg->start_mode_shift; + /* Set halt mode as internal master */ + if (csid_hw->sync_mode == CAM_ISP_HW_SYNC_MASTER) + val |= path_reg->start_master_sel_val << + path_reg->start_master_sel_shift; + } + + cam_io_w_mb(val, mem_base + path_reg->ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%u Pix res: %d ctrl val: 0x%x", + csid_hw->hw_intf->hw_idx, res->res_id, val); + + if (path_cfg->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_cfg->sync_mode == CAM_ISP_HW_SYNC_NONE) + cam_ife_csid_ver2_path_rup_aup(csid_hw, res, rup_aup_mask); + + val = csid_hw->debug_info.path_mask; + + if (path_cfg->discard_init_frames) { + rc = cam_ife_csid_ver2_subscribe_sof_for_discard( + path_cfg, csid_hw, res, + cam_ife_csid_ver2_discard_sof_top_half, + cam_ife_csid_ver2_discard_sof_pix_bottom_half); + if (rc) + return rc; + } + + irq_mask = path_reg->fatal_err_mask | path_reg->non_fatal_err_mask; + rc = cam_ife_csid_ver2_path_irq_subscribe(csid_hw, res, val, irq_mask); + if (rc) + return rc; + + return rc; +} + +static int cam_ife_csid_ver2_rx_capture_config( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver2_rx_cfg *rx_cfg; + uint32_t vc, dt, i; + uint32_t val = 0; + + for (i = 0; i < CAM_IFE_CSID_CID_MAX; i++) + if (csid_hw->cid_data[i].cid_cnt) + break; + + if (i == CAM_IFE_CSID_CID_MAX) { + CAM_WARN(CAM_ISP, "CSID[%u] no valid cid", + csid_hw->hw_intf->hw_idx); + return 0; + } + + rx_cfg = &csid_hw->rx_cfg; + if (csid_hw->debug_info.rx_capture_debug_set) { + vc = csid_hw->debug_info.rx_capture_vc; + dt = csid_hw->debug_info.rx_capture_dt; + } else { + vc = csid_hw->cid_data[i].vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].vc; + dt = csid_hw->cid_data[i].vc_dt[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].dt; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) + val = ((1 << csid_reg->csi2_reg->capture_short_pkt_en_shift) | + (vc << csid_reg->csi2_reg->capture_short_pkt_vc_shift)); + + /* CAM_IFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE */ + val |= ((1 << csid_reg->csi2_reg->capture_long_pkt_en_shift) | + (dt << csid_reg->csi2_reg->capture_long_pkt_dt_shift) | + (vc << csid_reg->csi2_reg->capture_long_pkt_vc_shift)); + + /* CAM_IFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE */ + if (rx_cfg->lane_type == CAM_ISP_LANE_TYPE_CPHY) { + val |= ((1 << csid_reg->csi2_reg->capture_cphy_pkt_en_shift) | + (dt << csid_reg->csi2_reg->capture_cphy_pkt_dt_shift) | + (vc << csid_reg->csi2_reg->capture_cphy_pkt_vc_shift)); + } + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + csid_reg->csi2_reg->capture_ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID[%u] rx capture_ctrl: 0x%x", csid_hw->hw_intf->hw_idx, val); + + return 0; +} + +static int cam_ife_csid_ver2_top2_irq_subscribe(struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t irq_mask, uint32_t err_irq_mask) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + uint32_t top_top2_irq_mask = irq_mask; + uint32_t top_info_irq_mask = 0, top_err_irq_mask = 0; + int rc; + + csid_hw->top_top2_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_0, + &top_top2_irq_mask, + csid_hw, + cam_ife_csid_ver2_handle_top2_irq, + NULL, NULL, NULL, CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_top2_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] Top2 Subscribe Top Irq fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + return rc; + } + + rc = cam_irq_controller_register_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1], + &top_top2_irq_mask); + + if (rc) + goto unsub_top; + + top_err_irq_mask = csid_reg->cmn_reg->top_err_irq_mask[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]; + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1] = + cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller + [CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1], + CAM_IRQ_PRIORITY_0, + &top_err_irq_mask, + csid_hw, + cam_ife_csid_ver2_top2_err_top_half, + cam_ife_csid_ver2_top2_err_irq_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] top2 err irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unreg_top2; + } + + if (csid_hw->debug_info.top_mask[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]) { + top_info_irq_mask = csid_hw->debug_info.top_mask[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]; + csid_hw->top_info_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1] + = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller + [CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1], + CAM_IRQ_PRIORITY_1, + &top_info_irq_mask, + csid_hw, + cam_ife_csid_ver2_top2_info_irq_top_half, + cam_ife_csid_ver2_top2_info_irq_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_info_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] Subscribe Top2 Info Irq fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unsub_top2_err; + } + } + + return 0; +unsub_top2_err: + if (csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]) + cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1], + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]); + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1] = 0; +unreg_top2: + cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]); +unsub_top: + cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_top2_irq_handle); + csid_hw->top_top2_irq_handle = 0; + return rc; +} + +static int cam_ife_csid_ver2_csi2_rx2_irq_subscribe(struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + uint32_t irq_mask, val = 0; + uint32_t rx_rx2_irq_mask = + csid_reg->csi2_reg->rx_rx2_irq_mask; + int rc = 0; + + csid_hw->rx_cfg.rx2_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->rx_irq_controller + [CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_0, + &rx_rx2_irq_mask, + csid_hw, + cam_ife_csid_ver2_handle_rx2_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.rx2_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] RX2 err irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + return rc; + } + + rc = cam_irq_controller_register_dependent( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1], + &rx_rx2_irq_mask); + + if (rc) + goto unsub_rx2; + + irq_mask = csid_hw->debug_info.rx_mask[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]; + if (irq_mask) { + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] = + cam_irq_controller_subscribe_irq( + csid_hw->rx_irq_controller + [CAM_IFE_CSID_RX2_IRQ_STATUS_REG1], + CAM_IRQ_PRIORITY_4, + &irq_mask, + csid_hw, + cam_ife_csid_ver2_rx2_top_half, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] RX debug irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unreg_rx2; + } + } + + val = csid_reg->csi2_reg->fatal_err_mask[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] | + csid_reg->csi2_reg->part_fatal_err_mask[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] | + csid_reg->csi2_reg->non_fatal_err_mask[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]; + + csid_hw->rx_cfg.err_irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] = + cam_irq_controller_subscribe_irq( + csid_hw->rx_irq_controller + [CAM_IFE_CSID_RX2_IRQ_STATUS_REG1], + CAM_IRQ_PRIORITY_0, + &val, + csid_hw, + cam_ife_csid_ver2_rx2_err_top_half, + cam_ife_csid_ver2_rx2_err_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.err_irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] RX2 err irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unsub_rx2_info; + } + return 0; + +unsub_rx2_info: + if (csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]) + cam_irq_controller_unsubscribe_irq( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1], + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]); + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1] = 0; +unreg_rx2: + cam_irq_controller_unregister_dependent( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]); +unsub_rx2: + cam_irq_controller_unsubscribe_irq( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_cfg.rx2_irq_handle); + csid_hw->rx_cfg.rx2_irq_handle = 0; + return rc; + +}; + +static int cam_ife_csid_ver2_csi2_irq_subscribe(struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t irq_mask, uint32_t err_irq_mask) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg = csid_hw->core_info->csid_reg; + uint32_t num_register = 0; + uint32_t *top_irq_mask = NULL; + int top_index = -1; + int i, rc; + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) { + if (csid_reg->csi2_reg->top_irq_mask[i]) { + top_index = i; + break; + } + } + + if (top_index < 0) { + CAM_ERR(CAM_ISP, "CSID[%d] RX Subscribe Top Irq fail due to invalid mask", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + return rc; + } + + num_register = csid_reg->top_irq_reg_info[top_index].num_registers; + top_irq_mask = vmalloc(sizeof(uint32_t) * num_register); + if (!top_irq_mask) { + CAM_ERR(CAM_ISP, "csid top_irq_mask allocation failed"); + return -ENOMEM; + } + + top_irq_mask[0] = csid_reg->csi2_reg->top_irq_mask[top_index]; + + csid_hw->rx_cfg.top_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[top_index], + CAM_IRQ_PRIORITY_0, + top_irq_mask, + csid_hw, + cam_ife_csid_ver2_handle_rx_irq, + NULL, NULL, NULL, CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.top_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] RX Subscribe Top Irq fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto err; + } + + rc = cam_irq_controller_register_dependent(csid_hw->top_irq_controller[top_index], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + top_irq_mask); + + if (rc) + goto unsub_top; + + if (irq_mask) { + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] = + cam_irq_controller_subscribe_irq( + csid_hw->rx_irq_controller + [CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_4, + &irq_mask, + csid_hw, + cam_ife_csid_ver2_rx_top_half, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] RX debug irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unreg_rx; + } + } + + csid_hw->rx_cfg.err_irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] = + cam_irq_controller_subscribe_irq( + csid_hw->rx_irq_controller + [CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_0, + &err_irq_mask, + csid_hw, + cam_ife_csid_ver2_rx_err_top_half, + cam_ife_csid_ver2_rx_err_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->rx_cfg.err_irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] < 1) { + CAM_ERR(CAM_ISP, "CSID[%d] RX err irq register fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unsub_rx; + } + + if (csid_reg->num_rx_regs > 1) { + rc = cam_ife_csid_ver2_csi2_rx2_irq_subscribe(csid_hw); + if (rc < 0) + goto unsub_rx; + } + vfree(top_irq_mask); + return 0; + +unsub_rx: + if (csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]) + cam_irq_controller_unsubscribe_irq( + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]); + csid_hw->rx_cfg.irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] = 0; +unreg_rx: + cam_irq_controller_unregister_dependent(csid_hw->top_irq_controller[top_index], + csid_hw->rx_irq_controller[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]); +unsub_top: + cam_irq_controller_unsubscribe_irq(csid_hw->top_irq_controller[top_index], + csid_hw->rx_cfg.top_irq_handle); + csid_hw->rx_cfg.top_irq_handle = 0; +err: + vfree(top_irq_mask); + return rc; +} + +static int cam_ife_csid_ver2_enable_csi2(struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + uint32_t val = 0; + void __iomem *mem_base; + struct cam_ife_csid_ver2_rx_cfg *rx_cfg; + int vc_full_width; + + if (csid_hw->flags.rx_enabled) + return 0; + + if (csid_hw->flags.offline_mode) + return 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + rx_cfg = &csid_hw->rx_cfg; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + /*Configure Rx cfg0 */ + val |= ((rx_cfg->lane_cfg << csi2_reg->lane_cfg_shift) | + ((rx_cfg->lane_num - 1) << csi2_reg->lane_num_shift) | + (rx_cfg->lane_type << csi2_reg->phy_type_shift)); + + if (rx_cfg->tpg_mux_sel) { + val |= ((rx_cfg->tpg_num_sel << csi2_reg->tpg_num_sel_shift) | + (rx_cfg->tpg_mux_sel << csi2_reg->tpg_mux_en_shift)); + } else { + val |= rx_cfg->phy_sel << csi2_reg->phy_num_shift; + } + + cam_io_w_mb(val, mem_base + csi2_reg->cfg0_addr); + + CAM_DBG(CAM_ISP, "CSID[%u] rx_cfg0: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + /*Configure Rx cfg1*/ + val = 1 << csi2_reg->misr_enable_shift_val; + val |= 1 << csi2_reg->ecc_correction_shift_en; + val |= (rx_cfg->epd_supported << csi2_reg->epd_mode_shift_en); + if (rx_cfg->dynamic_sensor_switch_en) { + val |= 1 << csi2_reg->dyn_sensor_switch_shift_en; + /* Disable rup_aup latch feature */ + if (csi2_reg->rup_aup_latch_supported) + val |= 1 << csi2_reg->rup_aup_latch_shift; + } + + vc_full_width = cam_ife_csid_is_vc_full_width(csid_hw->cid_data); + + if (vc_full_width == 1) { + val |= 1 << csi2_reg->vc_mode_shift_val; + } else if (vc_full_width < 0) { + CAM_ERR(CAM_ISP, "CSID[%u] Error VC DT", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + cam_io_w_mb(val, mem_base + csi2_reg->cfg1_addr); + CAM_DBG(CAM_ISP, "CSID[%u] rx_cfg1: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + val = csi2_reg->fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] | + csi2_reg->part_fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG0] | + csi2_reg->non_fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG0]; + + if (csid_hw->rx_cfg.epd_supported && + (csid_hw->rx_cfg.lane_type == CAM_ISP_LANE_TYPE_DPHY)) + val &= ~BIT(csid_reg->rx_debug_mask->bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION]); + + rc = cam_ife_csid_ver2_csi2_irq_subscribe(csid_hw, + csid_hw->debug_info.rx_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG0], val); + if (rc) + return rc; + + csid_hw->flags.rx_enabled = true; + + cam_ife_csid_ver2_rx_capture_config(csid_hw); + + return rc; +} + +static int cam_ife_csid_ver2_program_top( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + const struct cam_ife_csid_ver2_top_reg_info *top_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t val; + struct cam_hw_soc_info *soc_info; + int input_core_sel; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_reg->need_top_cfg) { + CAM_DBG(CAM_ISP, "CSID %d top not supported", + csid_hw->hw_intf->hw_idx); + return 0; + } + + top_reg = csid_reg->top_reg; + soc_info = &csid_hw->hw_info->soc_info; + + /* Porgram top parameters */ + input_core_sel = csid_reg->input_core_sel[csid_hw->hw_intf->hw_idx] + [csid_hw->top_cfg.input_core_type]; + + CAM_DBG(CAM_ISP, "CSID[%u] input_core_sel %d", + csid_hw->hw_intf->hw_idx, input_core_sel); + + if (input_core_sel == -1) { + CAM_ERR(CAM_ISP, "CSID[%u] invalid top input_core_type %u", + csid_hw->hw_intf->hw_idx, + csid_hw->top_cfg.input_core_type); + return -EINVAL; + } + + val = (uint32_t)input_core_sel << top_reg->input_core_type_shift_val; + + if ((csid_hw->top_cfg.offline_sfe_en) || + (top_reg->sfe_pipeline_bypassed && csid_hw->top_cfg.sfe_fs)) + val |= BIT(top_reg->sfe_offline_en_shift_val); + + val |= csid_hw->top_cfg.out_ife_en << + top_reg->out_ife_en_shift_val; + + val |= csid_hw->top_cfg.rdi_lcr; + + cam_io_w_mb(val, + soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base + + top_reg->io_path_cfg0_addr[csid_hw->hw_intf->hw_idx]); + + /*Program dual csid regs */ + + if (csid_hw->sync_mode == CAM_ISP_HW_SYNC_NONE) + return 0; + + val = csid_hw->top_cfg.dual_sync_core_sel << + top_reg->dual_sync_sel_shift_val; + val |= csid_hw->top_cfg.dual_en << + top_reg->dual_en_shift_val; + val |= csid_hw->top_cfg.master_slave_sel << + top_reg->master_slave_sel_shift_val; + + cam_io_w_mb(val, + soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base + + top_reg->dual_csid_cfg0_addr[csid_hw->hw_intf->hw_idx]); + + return 0; +} + +static int cam_ife_csid_ver2_set_cesta_clk_rate(struct cam_ife_csid_ver2_hw *csid_hw, + bool force_channel_switch, const char *identifier) +{ + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + bool channel_switch = force_channel_switch; + uint32_t lowest_clk_lvl = soc_info->lowest_clk_level; + int rc = 0; + + CAM_DBG(CAM_ISP, "CSID:%d clk_rate=%llu, channel_switch=%d, identifier=%s", + csid_hw->hw_intf->hw_idx, csid_hw->clk_rate, channel_switch, identifier); + + if (csid_hw->clk_rate) { + rc = cam_soc_util_set_src_clk_rate(soc_info, csid_hw->hw_intf->hw_idx, + csid_hw->clk_rate, + soc_info->clk_rate[lowest_clk_lvl][soc_info->src_clk_idx]); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed in setting cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d", + csid_hw->clk_rate, + soc_info->clk_rate[lowest_clk_lvl][soc_info->src_clk_idx], + csid_hw->hw_intf->hw_idx, rc); + return rc; + } + channel_switch = true; + } + + if (channel_switch) { + rc = cam_soc_util_cesta_channel_switch(csid_hw->hw_intf->hw_idx, identifier); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for cesta client:%d rc:%d", + csid_hw->hw_intf->hw_idx, rc); + return rc; + } + } + + return rc; +} + +static int cam_ife_csid_ver2_enable_core(struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t clk_lvl; + struct cam_csid_soc_private *soc_private; + uint32_t irq_mask = 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + soc_private = (struct cam_csid_soc_private *)soc_info->soc_private; + + /* overflow check before increment */ + if (csid_hw->hw_info->open_count == UINT_MAX) { + CAM_ERR(CAM_ISP, "CSID:%u Open count reached max", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + /* Increment ref Count */ + csid_hw->hw_info->open_count++; + + if (csid_hw->hw_info->open_count > 1) { + CAM_DBG(CAM_ISP, "CSID[%u] hw has already been enabled", + csid_hw->hw_intf->hw_idx); + return rc; + } + + rc = cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate, + soc_info->src_clk_idx, &clk_lvl); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID[%u] get clk level fail rate %u", + csid_hw->clk_rate); + } + + if (soc_private->is_ife_csid_lite) + CAM_DBG(CAM_ISP, + "CSID[%u] clk lvl %u received clk_rate %u applied clk_rate :%lu", + csid_hw->hw_intf->hw_idx, clk_lvl, csid_hw->clk_rate, + soc_info->applied_src_clk_rates.sw_client); + else + CAM_DBG(CAM_ISP, + "CSID[%d] clk lvl %u received clk_rate %u applied clk_rate sw_client:%lu hw_client:[%lu %lu]", + csid_hw->hw_intf->hw_idx, clk_lvl, csid_hw->clk_rate, + soc_info->applied_src_clk_rates.sw_client, + soc_info->applied_src_clk_rates.hw_client[csid_hw->hw_intf->hw_idx].high, + soc_info->applied_src_clk_rates.hw_client[csid_hw->hw_intf->hw_idx].low); + + rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%d] Enable soc failed", csid_hw->hw_intf->hw_idx); + goto err; + } + + CAM_DBG(CAM_ISP, "csid %d is_clk_drv_en %d is_drv_config_en %d", + csid_hw->hw_intf->hw_idx, soc_info->is_clk_drv_en, csid_hw->is_drv_config_en); + + /* + * cam_ife_csid_enable_soc_resources sets clk_lvl for high, low values, if the usecase + * enables drv config, i.e enabling csid DRV events, overwrite low value with LowSVS to get + * power benefit. + */ + if (soc_info->is_clk_drv_en && csid_hw->is_drv_config_en) { + rc = cam_ife_csid_ver2_set_cesta_clk_rate(csid_hw, false, "csid_init"); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in setting cesta clk rates, client_idx:%d rc:%d", + csid_hw->hw_intf->hw_idx, rc); + return rc; + } + } + + irq_mask = csid_reg->cmn_reg->top_reset_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]; + + csid_hw->reset_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_0, + &irq_mask, + csid_hw, + cam_ife_csid_ver2_reset_irq_top_half, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->reset_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] reset irq subscribe fail", + csid_hw->hw_intf->hw_idx); + goto disable_res; + } + + reinit_completion(&csid_hw->hw_info->hw_complete); + cam_ife_csid_ver2_program_top(csid_hw); + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP; + + return rc; + +disable_res: + cam_ife_csid_disable_soc_resources(soc_info); + +err: + CAM_ERR(CAM_ISP, "CSID[%u] init hw fail rc %d", + csid_hw->hw_intf->hw_idx, rc); + csid_hw->hw_info->open_count--; + return rc; +} + +static int cam_ife_csid_ver2_enable_hw( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + uint32_t val; + int i, rc; + void __iomem *mem_base; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + uint32_t num_register = 0; + uint32_t top_err_irq_mask = 0; + uint32_t *buf_done_irq_mask = NULL; + uint32_t top_info_irq_mask = 0; + uint32_t value = 0; + uint32_t index = 0; + + if (csid_hw->flags.device_enabled) { + CAM_DBG(CAM_ISP, "CSID[%u] hw has already been enabled", + csid_hw->hw_intf->hw_idx); + return 0; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + /* Clear IRQs */ + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) + cam_io_w_mb(1, (mem_base + csid_reg->cmn_reg->top_irq_clear_addr[i])); + + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; i < csid_reg->num_rx_regs; i++) + cam_io_w_mb(csid_reg->csi2_reg->irq_mask_all, + (mem_base + csid_reg->csi2_reg->irq_clear_addr[i])); + + for (i = 0; i < csid_reg->cmn_reg->num_pix; i++) { + switch (i) { + case 0: + path_reg = csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_IPP]; + break; + case 1: + path_reg = csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_IPP_1]; + break; + case 2: + path_reg = csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_IPP_2]; + break; + default: + path_reg = NULL; + break; + } + + if (!path_reg) { + CAM_ERR(CAM_ISP, "Unsupported num_pix:%d", i); + return -EINVAL; + } + cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all, + (mem_base + path_reg->irq_clear_addr)); + } + + if (csid_reg->num_rx_regs > 1) { + cam_io_w_mb(1, mem_base + + csid_reg->cmn_reg->top_irq_clear_addr[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]); + cam_io_w_mb(csid_reg->csi2_reg->irq_mask_all, mem_base + + csid_reg->csi2_reg->irq_clear_addr[CAM_IFE_CSID_RX2_IRQ_STATUS_REG1]); + } + + path_reg = csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_PPP]; + if (csid_reg->cmn_reg->num_ppp) + cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all, + (mem_base + path_reg->irq_clear_addr)); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + path_reg = csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_0 + i]; + cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all, + (mem_base + path_reg->irq_clear_addr)); + } + + cam_io_w_mb(1, (mem_base + csid_reg->cmn_reg->irq_cmd_addr)); + + /* Read hw version */ + val = cam_io_r_mb(mem_base + csid_reg->cmn_reg->hw_version_addr); + + /* + * Set CSID path domain id enable, used for debug purpose only, + * register access is restricted in normal builds, disable by default, + * and we can setup domain id value through debugfs, such as we want to + * set CSID path as TVM non-secure(0x08), we can echo value 0x08080808. + */ + CAM_DBG(CAM_ISP, "Set CSID:%u force set enabled: %d domain id value: 0x%x", + csid_hw->hw_intf->hw_idx, + csid_hw->debug_info.set_domain_id_enabled, + csid_hw->debug_info.domain_id_value); + + if (csid_hw->debug_info.set_domain_id_enabled) { + value = CAM_IFE_CSID_PATH_DOMAIN_ID; + + if (csid_hw->debug_info.domain_id_value) + value = csid_hw->debug_info.domain_id_value; + + rc = cam_common_util_get_string_index(soc_info->mem_block_name, + soc_info->num_mem_block, "csid_sec", &index); + + if ((rc == 0) && (index < CAM_IFE_CSID_MAX_MEM_BASE_ID)) { + CAM_DBG(CAM_ISP, "regbase found for CSID SEC, rc: %d, index: %d", + rc, index); + + cam_io_w_mb(value, + soc_info->reg_map[index].mem_base + + csid_reg->cmn_reg->path_domain_id_cfg0); + cam_io_w_mb(value, + soc_info->reg_map[index].mem_base + + csid_reg->cmn_reg->path_domain_id_cfg1); + + /* Some target may have 3 such registers, check and set cfg2 */ + if (csid_reg->cmn_reg->path_domain_id_cfg2) + cam_io_w_mb(value, + soc_info->reg_map[index].mem_base + + csid_reg->cmn_reg->path_domain_id_cfg2); + + CAM_DBG(CAM_ISP, "Set CSID:%u domain id value: 0x%x", + csid_hw->hw_intf->hw_idx, value); + } else { + CAM_WARN(CAM_ISP, "regbase not found for CSID SEC, rc: %d, index: %d", + rc, index); + } + } + + num_register = csid_reg->top_irq_reg_info[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0].num_registers; + buf_done_irq_mask = vmalloc(sizeof(uint32_t) * num_register); + if (!buf_done_irq_mask) { + CAM_ERR(CAM_ISP, "csid buf_done_irq_mask allocation failed"); + return -ENOMEM; + } + buf_done_irq_mask[0] = csid_reg->cmn_reg->top_buf_done_irq_mask; + + if (csid_reg->ipp_mc_reg) + buf_done_irq_mask[0] |= csid_reg->ipp_mc_reg->comp_subgrp0_mask | + csid_reg->ipp_mc_reg->comp_subgrp2_mask; + + csid_hw->buf_done_irq_handle = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_4, + buf_done_irq_mask, + csid_hw, + cam_ife_csid_ver2_handle_buf_done_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + if (csid_hw->buf_done_irq_handle < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] buf done irq subscribe fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto free_buf_done_mask; + } + + if (csid_reg->num_top_regs > 1) + cam_ife_csid_ver2_top2_irq_subscribe(csid_hw, csid_reg->cmn_reg->top_top2_irq_mask, + csid_reg->cmn_reg->top_err_irq_mask[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]); + + top_err_irq_mask = csid_reg->cmn_reg->top_err_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]; + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0] = + cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_0, + &top_err_irq_mask, + csid_hw, + cam_ife_csid_ver2_top_err_irq_top_half, + cam_ife_csid_ver2_top_err_irq_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0] < 1) { + CAM_ERR(CAM_ISP, "csid[%u] top error irq subscribe fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unsubscribe_buf_done; + } + + rc = cam_irq_controller_register_dependent(csid_hw->top_irq_controller + [CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->buf_done_irq_controller, buf_done_irq_mask); + + if (rc) { + cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->buf_done_irq_handle); + rc = -EINVAL; + goto unsubscribe_top_err; + } + + if (csid_hw->debug_info.top_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]) { + top_info_irq_mask = csid_hw->debug_info.top_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]; + csid_hw->top_info_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0] + = cam_irq_controller_subscribe_irq( + csid_hw->top_irq_controller + [CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_PRIORITY_1, + &top_info_irq_mask, + csid_hw, + cam_ife_csid_ver2_top_info_irq_top_half, + cam_ife_csid_ver2_top_info_irq_bottom_half, + csid_hw->tasklet, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (csid_hw->top_info_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0] < 1) { + CAM_ERR(CAM_ISP, "CSID[%u] Subscribe Top Info Irq fail", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto unsubscribe_top_err; + } + } + + csid_hw->flags.device_enabled = true; + csid_hw->flags.fatal_err_detected = false; + CAM_DBG(CAM_ISP, "CSID:%u CSID HW version: 0x%x", + csid_hw->hw_intf->hw_idx, val); + vfree(buf_done_irq_mask); + return 0; + + +unsubscribe_top_err: + cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + csid_hw->top_err_irq_handle[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0] = 0; + +unsubscribe_buf_done: + cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->buf_done_irq_handle); + csid_hw->buf_done_irq_handle = 0; +free_buf_done_mask: + vfree(buf_done_irq_mask); + return rc; +} + +int cam_ife_csid_ver2_init_hw(void *hw_priv, + void *init_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_hw_info *hw_info; + int rc = 0; + struct cam_isp_resource_node *res; + + if (!hw_priv || !init_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + rc = cam_ife_csid_ver2_enable_core(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] init hw fail", + csid_hw->hw_intf->hw_idx); + goto end; + } + res = (struct cam_isp_resource_node *)init_args; + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + + CAM_DBG(CAM_ISP, "CSID[%u] init hw", + csid_hw->hw_intf->hw_idx); +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_ife_csid_ver2_disable_core( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + const struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_top_reg_info *top_reg = NULL; + struct cam_hw_soc_info *soc_info; + struct cam_csid_soc_private *soc_private; + int rc = 0; + + /* Check for refcount */ + if (!csid_hw->hw_info->open_count) { + CAM_WARN(CAM_ISP, "CSID[%u] Unbalanced disable_hw", csid_hw->hw_intf->hw_idx); + return rc; + } + + /* Decrement ref Count */ + csid_hw->hw_info->open_count--; + + if (csid_hw->hw_info->open_count) + return rc; + + soc_info = &csid_hw->hw_info->soc_info; + soc_private = (struct cam_csid_soc_private *) + soc_info->soc_private; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + top_reg = csid_reg->top_reg; + cam_ife_csid_ver2_disable_csi2(true, csid_hw); + + /* Disable the top IRQ interrupt */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->top_irq_mask_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); + + if (csid_hw->reset_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->reset_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, + "CSID:%d Failed to unsubscribe reset irq", + csid_hw->hw_intf->hw_idx); + csid_hw->reset_irq_handle = 0; + } + + /* + * Wrapper config can be reset only by ares from + * camera subsystem power up or CSR ares bit + * in CSID clk branch both of which are not + * gauranteed at the end of a given CSID stream. + * Explicitly resetting the config for full CSIDs + * via AHB to avoid conflict on consecutive sessions + */ + if ((top_reg) && (!soc_private->is_ife_csid_lite)) { + cam_io_w_mb(top_reg->io_path_cfg_rst_val, + soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base + + top_reg->io_path_cfg0_addr[csid_hw->hw_intf->hw_idx]); + + cam_io_w_mb(top_reg->dual_cfg_rst_val, + soc_info->reg_map[CAM_IFE_CSID_TOP_MEM_BASE_ID].mem_base + + top_reg->dual_csid_cfg0_addr[csid_hw->hw_intf->hw_idx]); + } + + spin_lock_bh(&csid_hw->lock_state); + csid_hw->flags.device_enabled = false; + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + csid_hw->flags.rdi_lcr_en = false; + spin_unlock_bh(&csid_hw->lock_state); + rc = cam_ife_csid_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_ISP, "CSID:%u Disable CSID SOC failed", + csid_hw->hw_intf->hw_idx); + + csid_hw->counters.error_irq_count = 0; + csid_hw->counters.crc_error_irq_count = 0; + return rc; +} + +int cam_ife_csid_ver2_deinit_hw(void *hw_priv, + void *deinit_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + int rc = 0; + + if (!hw_priv || !deinit_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID:Invalid arguments"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + res = (struct cam_isp_resource_node *)deinit_args; + + if (csid_hw->hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, "CSID:%u already powered down", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid Res type %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "CSID:%u Res:%d already in De-init state", + csid_hw->hw_intf->hw_idx, + res->res_id); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + + if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW) + goto disable_hw; + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + case CAM_IFE_PIX_PATH_RES_PPP: + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver2_disable_path(true, csid_hw, res); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%u Invalid res type%d", + csid_hw->hw_intf->hw_idx, res->res_type); + break; + } + +disable_hw: + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + cam_ife_csid_ver2_disable_core(csid_hw); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + CAM_DBG(CAM_ISP, "De-Init CSID %d Path: %d", + csid_hw->hw_intf->hw_idx, res->res_id); + + return rc; +} + +static void cam_ife_csid_ver2_testbus_config( + struct cam_ife_csid_ver2_hw *csid_hw, uint32_t val) +{ + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + struct cam_ife_csid_ver2_reg_info *csid_reg = + (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + + cam_io_w_mb(val, soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->cmn_reg->test_bus_ctrl); + CAM_DBG(CAM_ISP, "CSID [%u] test_bus_ctrl: 0x%x", csid_hw->hw_intf->hw_idx, val); +} + +static void cam_ife_csid_ver2_send_secure_info( + struct cam_csid_hw_start_args *start_args, + struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_ife_csid_secure_info secure_info; + struct cam_ife_csid_ver2_reg_info *csid_reg; + int phy_sel = 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + secure_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + secure_info.cdm_hw_idx_mask = BIT(start_args->cdm_hw_idx); + secure_info.vc_mask = 0; + secure_info.csid_hw_idx_mask = BIT(csid_hw->hw_intf->hw_idx); + + CAM_DBG(CAM_ISP, + "PHY secure info for CSID[%u], lane_cfg: 0x%x, ife: 0x%x, cdm: 0x%x, vc_mask: 0x%llx", + csid_hw->hw_intf->hw_idx, + secure_info.lane_cfg, secure_info.csid_hw_idx_mask, + secure_info.cdm_hw_idx_mask, secure_info.vc_mask); + + /* Issue one call to PHY for dual IFE cases */ + phy_sel = (int)(csid_hw->rx_cfg.phy_sel - csid_reg->cmn_reg->phy_sel_base_idx); + if (phy_sel < 0) { + CAM_WARN(CAM_ISP, "Can't notify csiphy, incorrect phy selected=%d", + phy_sel); + } else { + secure_info.phy_sel = (uint32_t)phy_sel; + CAM_DBG(CAM_ISP, "Notify CSIPHY: %d", phy_sel); + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_DOMAIN_ID_SECURE_PARAMS, (void *)&secure_info); + } + +} + +int cam_ife_csid_ver2_start(void *hw_priv, void *args, + uint32_t arg_size) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_csid_hw_start_args *start_args; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_rup_aup_mask rup_aup_mask = {0}; + int rc = 0, i; + bool delay_rdi0_enable = false; + struct cam_subdev_msg_phy_conn_csid_info conn_csid_info; + struct cam_subdev_msg_phy_drv_info drv_info; + struct cam_subdev_msg_phy_halt_resume_info halt_resume_info = {0}; + bool cesta_enabled = false; + void __iomem *csid_clc_membase = NULL; + + if (!hw_priv || !args) { + CAM_ERR(CAM_ISP, "CSID Invalid params"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + start_args = (struct cam_csid_hw_start_args *)args; + csid_clc_membase = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + if (csid_hw->hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, "CSID:%u already powered down", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + + mutex_lock(&csid_hw->hw_info->hw_mutex); + csid_hw->flags.sof_irq_triggered = false; + csid_hw->counters.irq_debug_cnt = 0; + csid_hw->is_drv_config_en = start_args->is_drv_config_en; + + CAM_DBG(CAM_ISP, + "csid %d is_drv_config_en %d start_only %d is_internal_start %d, clk_rate=%lld", + csid_hw->hw_intf->hw_idx, csid_hw->is_drv_config_en, + start_args->start_only, start_args->is_internal_start, csid_hw->clk_rate); + + if (start_args->start_only) { + rc = cam_cpas_csid_process_resume(csid_hw->hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to process resume rc: %d", + csid_hw->hw_intf->hw_idx, rc); + goto end; + } + } + + if (soc_info->is_clk_drv_en && csid_hw->is_drv_config_en) { + /* + * This will do 2 things : + * 1. At the time of enable_core, we do not have drv info yet populated into + * csid layer, so we set high, low here will overwrite previously set high, low + * 2. We trigger a channel switch so that we go to high clocks for all including + * csiphy before calling CSIPHY RESUME, so that we remove sw client vote from + * csiphy device + */ + rc = cam_ife_csid_ver2_set_cesta_clk_rate(csid_hw, true, "csid_start"); + if (rc) { + CAM_ERR(CAM_ISP, "Failed in setting cesta clk rates, client_idx:%d rc:%d", + csid_hw->hw_intf->hw_idx, rc); + goto end; + } + halt_resume_info.do_drv_ops = true; + } + + if (halt_resume_info.do_drv_ops) { + if (csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE) { + halt_resume_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + halt_resume_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + halt_resume_info.csid_state = CAM_SUBDEV_PHY_CSID_RESUME; + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME, + (void *)&halt_resume_info); + } + } + + rc = cam_ife_csid_ver2_enable_hw(csid_hw); + + for (i = 0; i < start_args->num_res; i++) { + + res = start_args->node_res[i]; + CAM_DBG(CAM_ISP, "CSID:%u res_type :%d res[id:%d name:%s]", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id, res->res_name); + + if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid res tpe:%d res id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + switch (res->res_id) { + case CAM_IFE_PIX_PATH_RES_IPP: + case CAM_IFE_PIX_PATH_RES_IPP_1: + case CAM_IFE_PIX_PATH_RES_IPP_2: + rc = cam_ife_csid_ver2_program_ipp_path(csid_hw, res, &rup_aup_mask); + if (rc) + goto end; + + break; + case CAM_IFE_PIX_PATH_RES_PPP: + rc = cam_ife_csid_ver2_program_ppp_path(csid_hw, res, &rup_aup_mask); + if (rc) + goto end; + + break; + case CAM_IFE_PIX_PATH_RES_RDI_0: + case CAM_IFE_PIX_PATH_RES_RDI_1: + case CAM_IFE_PIX_PATH_RES_RDI_2: + case CAM_IFE_PIX_PATH_RES_RDI_3: + case CAM_IFE_PIX_PATH_RES_RDI_4: + rc = cam_ife_csid_ver2_program_rdi_path(csid_hw, res, &rup_aup_mask); + if (rc) + goto end; + + break; + default: + CAM_ERR(CAM_ISP, "CSID:%u Invalid res type %d", + csid_hw->hw_intf->hw_idx, res->res_type); + break; + } + } + + /* + * For targets with domain-id support, hand over relevant parameters + * to phy driver + */ + if ((csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE) && + start_args->is_secure && + csid_hw->flags.domain_id_security) + cam_ife_csid_ver2_send_secure_info(start_args, csid_hw); + + /* + * Configure RUP/AUP/MUP @ streamon for all enabled paths + * For internal recovery - skip this, CDM packet corresponding + * to the request being recovered will apply the appropriate RUP/AUP/MUP + */ + if (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SPLIT_RUP_AUP) { + rup_aup_mask.rup_aup_set_mask |= + (csid_hw->rx_cfg.mup << csid_reg->cmn_reg->mup_shift_val) | + BIT(csid_reg->cmn_reg->rup_aup_set_shift_val); + } else { + rup_aup_mask.rup_mask |= + (csid_hw->rx_cfg.mup << csid_reg->cmn_reg->mup_shift_val); + } + + if (!start_args->is_internal_start) { + if (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SPLIT_RUP_AUP) { + cam_io_w_mb(rup_aup_mask.rup_mask, + csid_clc_membase + csid_reg->cmn_reg->rup_cmd_addr); + cam_io_w_mb(rup_aup_mask.aup_mask, + csid_clc_membase + csid_reg->cmn_reg->aup_cmd_addr); + cam_io_w_mb(rup_aup_mask.rup_aup_set_mask, + csid_clc_membase + csid_reg->cmn_reg->rup_aup_cmd_addr); + } else { + cam_io_w_mb(rup_aup_mask.rup_mask, + csid_clc_membase + csid_reg->cmn_reg->rup_aup_cmd_addr); + + } + } + + CAM_DBG(CAM_ISP, "CSID:%u RUP:0x%x AUP: 0x%x MUP:0x%x at start updated: %s", + csid_hw->hw_intf->hw_idx, rup_aup_mask.rup_mask, rup_aup_mask.aup_mask, + rup_aup_mask.rup_aup_set_mask, CAM_BOOL_TO_YESNO(!start_args->is_internal_start)); + + + cam_ife_csid_ver2_enable_csi2(csid_hw); + + if (csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE) { + struct cam_csid_soc_private *soc_private = + (struct cam_csid_soc_private *)soc_info->soc_private; + + conn_csid_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + conn_csid_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + conn_csid_info.core_idx = csid_hw->hw_intf->hw_idx; + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, CAM_SUBDEV_MESSAGE_CONN_CSID_INFO, + (void *)&conn_csid_info); + + if (!soc_private->is_ife_csid_lite) + cam_cpas_query_drv_enable(NULL, &cesta_enabled); + + if (cesta_enabled) { + /* for ifelites, soc_info->is_clk_drv_en is false */ + drv_info.use_hw_client_voting = soc_info->is_clk_drv_en; + drv_info.is_drv_config_en = csid_hw->is_drv_config_en; + } else { + drv_info.use_hw_client_voting = false; + drv_info.is_drv_config_en = false; + } + + drv_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + drv_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + + /* + * send this even when cesta is not enabled - to support debugfs to switch between + * cesta and non-cesta between runs. CSIPHY might have stale information if we + * do not send this info everytime + */ + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_DRV_INFO, (void *)&drv_info); + } + + for (i = 0; i < start_args->num_res; i++) { + res = start_args->node_res[i]; + + if ((csid_hw->flags.rdi_lcr_en) && + (res->res_id == CAM_IFE_PIX_PATH_RES_RDI_0)) { + delay_rdi0_enable = true; + continue; + } + + cam_ife_csid_ver2_enable_path(csid_hw, res); + CAM_DBG(CAM_ISP, + "CSID[%u] res_type :%d res_id:%d enabled", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + } + + if (delay_rdi0_enable) { + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0]; + cam_ife_csid_ver2_enable_path(csid_hw, res); + CAM_DBG(CAM_ISP, "CSID[%u] Enabling RDI0 after all paths for LCR-PD cases", + csid_hw->hw_intf->hw_idx); + } + + if (csid_hw->debug_info.test_bus_val) { + cam_ife_csid_ver2_testbus_config(csid_hw, csid_hw->debug_info.test_bus_val); + csid_hw->debug_info.test_bus_enabled = true; + } else { + cam_ife_csid_ver2_testbus_config(csid_hw, + csid_reg->cmn_reg->sync_reset_ctrl_testbus_val); + csid_hw->debug_info.test_bus_enabled = true; + } + + /* Check for global resume */ + if (csid_hw->flags.rdi_lcr_en && !csid_hw->top_cfg.dual_en) { + cam_io_w_mb(1, + soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->cmn_reg->global_cmd_addr); + CAM_DBG(CAM_ISP, "CSID[%u] global start set", + csid_hw->hw_intf->hw_idx); + } + + csid_hw->flags.reset_awaited = false; + +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + /* Successful starts, release PHY from reset if standby was asserted */ + if (!rc && start_args->start_only && csid_hw->standby_asserted) { + if (csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE) { + halt_resume_info.reset_resume_phy = true; + halt_resume_info.do_drv_ops = false; + halt_resume_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + halt_resume_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + halt_resume_info.csid_state = CAM_SUBDEV_PHY_CSID_RESUME; + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME, + (void *)&halt_resume_info); + csid_hw->standby_asserted = false; + } + } + + return rc; +} + +static void cam_ife_csid_ver2_maskout_all_irqs( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_csid_hw_stop_args *csid_stop) +{ + int i; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + void __iomem *mem_base; + struct cam_isp_resource_node *res; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + if (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_TOP_MASK_ALL_IRQS) + goto disable_top_irq_status_reg0; + + /* Disable rx */ + if (!csid_hw->flags.offline_mode) + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; i < csid_reg->num_rx_regs; i++) + cam_io_w_mb(0x0, (mem_base + csi2_reg->irq_mask_addr[i])); + + for (i = 0; i < csid_stop->num_res; i++) { + res = csid_stop->node_res[i]; + path_reg = csid_reg->path_reg[res->res_id]; + + /* Disable path */ + cam_io_w_mb(0x0, + mem_base + path_reg->irq_mask_addr); + } + + /* Disable buf done */ + cam_io_w_mb(0x0, mem_base + + csid_reg->cmn_reg->buf_done_irq_mask_addr); + + if (csid_reg->num_top_regs > 1) { + cam_io_w_mb(0x0, (mem_base + + csid_reg->cmn_reg->top_irq_mask_addr[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1])); + } + +disable_top_irq_status_reg0: + /* Disable top except rst_done */ + cam_io_w_mb(csid_reg->cmn_reg->top_reset_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + mem_base + csid_reg->cmn_reg->top_irq_mask_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0]); +} + +int cam_ife_csid_ver2_stop(void *hw_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_isp_resource_node *res; + struct cam_hw_info *hw_info; + int rc = 0, i; + struct cam_csid_hw_stop_args *csid_stop; + struct cam_csid_reset_cfg_args reset = {0}; + struct cam_subdev_msg_phy_halt_resume_info halt_resume_info = {0}; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + void __iomem *mem_base; + struct cam_hw_soc_info *soc_info; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_csid_hw_stop_args))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + csi2_reg = csid_reg->csi2_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + if (csid_hw->hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, "CSID:%u already powered down", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + csid_stop = (struct cam_csid_hw_stop_args *) stop_args; + + if (!csid_stop->num_res) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid args", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%u num_res %d", csid_hw->hw_intf->hw_idx, + csid_stop->num_res); + + csid_hw->flags.device_enabled = false; + csid_hw->standby_asserted = (csid_stop->standby_en && + (csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE)); + + /* Mask out all irqs from HW */ + mutex_lock(&csid_hw->hw_info->hw_mutex); + cam_ife_csid_ver2_maskout_all_irqs(csid_hw, csid_stop); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + cam_ife_csid_ver2_send_cdr_sweep_csi2_rx_vals(csid_hw, csid_reg, 0); + + halt_resume_info.do_drv_ops = (csid_hw->hw_info->soc_info.is_clk_drv_en && + csid_hw->is_drv_config_en && (csid_hw->sync_mode != CAM_ISP_HW_SYNC_SLAVE)); + + if (likely(halt_resume_info.do_drv_ops)) { + halt_resume_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + halt_resume_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + halt_resume_info.csid_state = CAM_SUBDEV_PHY_CSID_HALT; + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME, + (void *)&halt_resume_info); + } + + /* Issue a halt & reset to ensure there is no HW activity post the halt block */ + reset.reset_type = CAM_IFE_CSID_RESET_PATH; + rc = cam_ife_csid_ver2_reset(hw_priv, &reset, + sizeof(struct cam_csid_reset_cfg_args)); + + /* Ensure halt is successful prior to clearing phy select */ + if (csid_hw->standby_asserted && !rc) { + /* Reset phy select */ + cam_io_w_mb(0, mem_base + csi2_reg->cfg0_addr); + + /* Issue a global HW reset to clean decoder state */ + reset.reset_type = CAM_IFE_CSID_RESET_GLOBAL_HW_ONLY; + cam_ife_csid_ver2_reset(hw_priv, &reset, + sizeof(struct cam_csid_reset_cfg_args)); + + reset.reset_type = CAM_IFE_CSID_RESET_GLOBAL_IRQ_CNTRL; + cam_ife_csid_ver2_reset(hw_priv, &reset, + sizeof(struct cam_csid_reset_cfg_args)); + + CAM_DBG(CAM_ISP, + "CSID:%u global HW and irq reset issued at stop for standby", + csid_hw->hw_intf->hw_idx); + } + + halt_resume_info.reset_resume_phy = csid_hw->standby_asserted; + halt_resume_info.do_drv_ops = false; + if (likely(halt_resume_info.reset_resume_phy)) { + halt_resume_info.phy_idx = (csid_hw->rx_cfg.phy_sel - 1); + halt_resume_info.lane_cfg = csid_hw->rx_cfg.lane_cfg; + halt_resume_info.csid_state = CAM_SUBDEV_PHY_CSID_HALT; + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME, + (void *)&halt_resume_info); + } + + atomic_set(&csid_hw->discard_frame_per_path, 0); + mutex_lock(&csid_hw->hw_info->hw_mutex); + for (i = 0; i < csid_stop->num_res; i++) { + res = csid_stop->node_res[i]; + rc = cam_ife_csid_ver2_disable_path(false, csid_hw, res); + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + CAM_DBG(CAM_ISP, "CSID:%u res_type %d Resource[id:%d name:%s]", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, + res->res_name); + } + if (csid_hw->buf_done_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->buf_done_irq_handle); + csid_hw->buf_done_irq_handle = 0; + + cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->buf_done_irq_controller); + } + + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) { + if (!csid_hw->top_err_irq_handle[i]) + continue; + rc = cam_irq_controller_unsubscribe_irq_evt(csid_hw->top_irq_controller[i], + csid_hw->top_err_irq_handle[i]); + csid_hw->top_err_irq_handle[i] = 0; + } + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) { + if (!csid_hw->debug_info.top_mask[i]) + continue; + rc = cam_irq_controller_unsubscribe_irq_evt(csid_hw->top_irq_controller[i], + csid_hw->top_info_irq_handle[i]); + csid_hw->top_info_irq_handle[i] = 0; + } + + + if (csid_hw->top_mc_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_mc_irq_handle); + csid_hw->top_mc_irq_handle = 0; + } + + if (csid_reg->num_top_regs > 1 && csid_hw->top_top2_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq_evt( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_top2_irq_handle); + csid_hw->top_top2_irq_handle = 0; + + cam_irq_controller_unregister_dependent( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1]); + } + + cam_ife_csid_ver2_disable_csi2(false, csid_hw); + if (csid_hw->debug_info.test_bus_enabled) + cam_ife_csid_ver2_testbus_config(csid_hw, 0x0); + + csid_hw->debug_info.test_bus_enabled = false; + csid_hw->flags.pf_err_detected = false; + csid_hw->flags.rdi_lcr_en = false; + csid_hw->flags.last_exp_valid = false; + mutex_unlock(&csid_hw->hw_info->hw_mutex); + + return rc; +} + +int cam_ife_csid_ver2_read(void *hw_priv, + void *read_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + + return -EINVAL; +} + +int cam_ife_csid_ver2_write(void *hw_priv, + void *write_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + return -EINVAL; +} + +static int cam_ife_csid_ver2_top_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args) +{ + struct cam_ife_csid_top_config_args *top_args; + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t hw_idx; + int rc = 0; + + if ((!csid_hw) || (!cmd_args)) + return -EINVAL; + + top_args = (struct cam_ife_csid_top_config_args *)cmd_args; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + hw_idx = csid_hw->hw_intf->hw_idx; + csid_hw->top_cfg.out_ife_en = true; + + /* config out_core parameter*/ + + switch (top_args->input_core_type) { + case CAM_IFE_CSID_INPUT_CORE_NONE: + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_NONE; + csid_hw->top_cfg.out_ife_en = false; + break; + + case CAM_IFE_CSID_INPUT_CORE_IFE: + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_INTERNAL; + break; + + case CAM_IFE_CSID_INPUT_CORE_SFE: + csid_hw->top_cfg.out_ife_en = false; + fallthrough; + case CAM_IFE_CSID_INPUT_CORE_SFE_IFE: + + if (top_args->core_idx == 0) { + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_0; + } else if (top_args->core_idx == 1) { + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_1; + } else if (top_args->core_idx == 2) { + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_2; + } else { + rc = -EINVAL; + CAM_ERR(CAM_ISP, + "CSID: %u Invalid SFE node %d", + hw_idx, top_args->core_idx); + } + + break; + + case CAM_IFE_CSID_INPUT_CORE_CUST_IFE: + + if (!(csid_reg->csid_cust_node_map[hw_idx] & + BIT(top_args->core_idx))) { + CAM_ERR(CAM_ISP, + "CSID: %u not supported for cust node %d", + hw_idx, top_args->core_idx); + rc = -EINVAL; + break; + } + + if (top_args->core_idx == 0) { + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_CUST_NODE_0; + } else if (top_args->core_idx == 1) { + csid_hw->top_cfg.input_core_type = + CAM_IFE_CSID_INPUT_CORE_SEL_CUST_NODE_1; + } else { + rc = -EINVAL; + CAM_ERR(CAM_ISP, + "CSID: %u Invalid Cust node %d", + hw_idx, top_args->core_idx); + } + break; + default: + break; + } + + csid_hw->top_cfg.offline_sfe_en = top_args->is_sfe_offline; + csid_hw->top_cfg.sfe_fs = top_args->is_sfe_fs; + CAM_DBG(CAM_ISP, + "CSID[%u] input_core_type:%d ife_out:%d sfe_offline:%d sfe_fs:%d", + hw_idx, csid_hw->top_cfg.input_core_type, + csid_hw->top_cfg.out_ife_en, + csid_hw->top_cfg.offline_sfe_en, + csid_hw->top_cfg.sfe_fs); + CAM_DBG(CAM_ISP, + "CSID[%u] Top config received: input_core_type%d core_idx:%d", + hw_idx, top_args->input_core_type, top_args->core_idx); + + /*config dual sync params */ + if (csid_hw->sync_mode == CAM_ISP_HW_SYNC_NONE) + return rc; + else if (csid_hw->sync_mode == CAM_ISP_HW_SYNC_MASTER) + csid_hw->top_cfg.master_slave_sel = + csid_reg->top_reg->master_sel_val; + else + csid_hw->top_cfg.master_slave_sel = + csid_reg->top_reg->slave_sel_val; + + csid_hw->top_cfg.dual_en = true; + csid_hw->top_cfg.dual_sync_core_sel = csid_hw->dual_core_idx + 1; + CAM_DBG(CAM_ISP, + "CSID[%u] Top dual sync config core_sel: %d sync_mode: %d", + hw_idx, csid_hw->sync_mode, + csid_hw->top_cfg.dual_sync_core_sel); + + return rc; +} + +static int cam_ife_csid_ver2_get_mc_reg_val_pair( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t *reg_val_pair, + struct cam_isp_csid_reg_update_args *rup_args) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_ife_csid_ver2_rup_aup_mask rup_aup_mask = {0}; + int rc = 0; + uint32_t i = 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = 0; i < rup_args->num_res; i++) { + path_reg = csid_reg->path_reg[rup_args->res[i]->res_id]; + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid Path Resource [id %d name %s]", + csid_hw->hw_intf->hw_idx, + rup_args->res[i]->res_id, + rup_args->res[i]->res_name); + return -EINVAL; + } + rup_aup_mask.rup_mask |= path_reg->rup_mask; + rup_aup_mask.aup_mask |= path_reg->aup_mask; + rup_aup_mask.rup_aup_set_mask |= path_reg->rup_aup_set_mask; + } + + reg_val_pair[0] = csid_reg->cmn_reg->rup_cmd_addr; + reg_val_pair[1] = rup_aup_mask.rup_mask; + reg_val_pair[2] = csid_reg->cmn_reg->aup_cmd_addr; + reg_val_pair[3] = rup_aup_mask.aup_mask; + reg_val_pair[4] = csid_reg->cmn_reg->rup_aup_cmd_addr; + reg_val_pair[5] = rup_aup_mask.rup_aup_set_mask; + + /** + * If not an actual request, configure last applied MUP + */ + if (rup_args->reg_write) + reg_val_pair[5] |= (rup_args->last_applied_mup << + csid_reg->cmn_reg->mup_shift_val); + else { + if (unlikely(rup_args->add_toggled_mup_entry)) + reg_val_pair[5] |= ((~csid_hw->rx_cfg.mup & 0x1) << + csid_reg->cmn_reg->mup_shift_val); + else + reg_val_pair[5] |= (csid_hw->rx_cfg.mup << + csid_reg->cmn_reg->mup_shift_val); + } + + CAM_DBG(CAM_ISP, + "CSID[%d] configure rup: 0x%x, offset: 0x%x, aup: 0x%x, offset: 0x%x toggle MUP: %s", + csid_hw->hw_intf->hw_idx, reg_val_pair[1], reg_val_pair[0], + reg_val_pair[3], reg_val_pair[2], + CAM_BOOL_TO_YESNO(rup_args->add_toggled_mup_entry)); + CAM_DBG(CAM_ISP, "CSID[%d] rup_aup_set reg: 0x%x, offset: 0x%x via %s", + csid_hw->hw_intf->hw_idx,reg_val_pair[5], reg_val_pair[4], + (rup_args->reg_write ? "AHB" : "CDM")); + return rc; +} + +static int cam_ife_csid_ver2_get_sc_reg_val_pair( + struct cam_ife_csid_ver2_hw *csid_hw, + uint32_t *reg_val_pair, + struct cam_isp_csid_reg_update_args *rup_args) +{ + uint32_t i = 0; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + int rc = 0; + struct cam_ife_csid_ver2_rup_aup_mask rup_aup_mask = {0}; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = 0; i < rup_args->num_res; i++) { + path_reg = csid_reg->path_reg[rup_args->res[i]->res_id]; + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid Path Resource [id %d name %s]", + csid_hw->hw_intf->hw_idx, + rup_args->res[i]->res_id, + rup_args->res[i]->res_name); + + return -EINVAL; + } + rup_aup_mask.rup_mask |= path_reg->rup_aup_mask; + } + + reg_val_pair[0] = csid_reg->cmn_reg->rup_aup_cmd_addr; + reg_val_pair[1] = rup_aup_mask.rup_mask; + + /** + * If not an actual request, configure last applied MUP + */ + if (rup_args->reg_write) + reg_val_pair[1] |= (rup_args->last_applied_mup << + csid_reg->cmn_reg->mup_shift_val); + else { + if (unlikely(rup_args->add_toggled_mup_entry)) + reg_val_pair[1] |= ((~csid_hw->rx_cfg.mup & 0x1) << + csid_reg->cmn_reg->mup_shift_val); + else + reg_val_pair[1] |= (csid_hw->rx_cfg.mup << + csid_reg->cmn_reg->mup_shift_val); + } + + CAM_DBG(CAM_ISP, "CSID[%d] configure rup_aup_mup: 0x%x offset: 0x%x via %s", + csid_hw->hw_intf->hw_idx, reg_val_pair[1], reg_val_pair[0], + (rup_args->reg_write ? "AHB" : "CDM")); + return rc; +} + +static int cam_ife_csid_ver2_reg_update( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_csid_reg_update_args *rup_args = cmd_args; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info = &csid_hw->hw_info->soc_info; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t num_reg_val_pairs = 0; + uint32_t reg_val_pair[6] = {0}; + int rc = 0; + uint32_t size, i = 0; + void __iomem *csid_clc_membase = soc_info->reg_map + [CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + if (arg_size != sizeof(struct cam_isp_csid_reg_update_args)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid arg size: %d expected:%ld", + csid_hw->hw_intf->hw_idx, arg_size, + sizeof(struct cam_isp_csid_reg_update_args)); + return -EINVAL; + } + + if (!rup_args) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid args", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + if (!rup_args->num_res || + rup_args->num_res > CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid num_res %u", + csid_hw->hw_intf->hw_idx, rup_args->num_res); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)rup_args->res[0]->cdm_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid CDM ops", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + num_reg_val_pairs = (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SPLIT_RUP_AUP) ? + 3 : 1; + + size = cdm_util_ops->cdm_required_size_reg_random(num_reg_val_pairs); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((!rup_args->reg_write) && ((size * 4) > rup_args->cmd.size)) { + CAM_ERR(CAM_ISP, "CSID[%u] buf size:%d is not sufficient, expected: %d", + csid_hw->hw_intf->hw_idx, rup_args->cmd.size, (size*4)); + return -EINVAL; + } + + + if (rup_args->mup_en) { + csid_hw->rx_cfg.mup = rup_args->mup_val; + CAM_DBG(CAM_ISP, "CSID[%u] MUP %u", + csid_hw->hw_intf->hw_idx, csid_hw->rx_cfg.mup); + } + + if (csid_reg->cmn_reg->capabilities & CAM_IFE_CSID_CAP_SPLIT_RUP_AUP) + cam_ife_csid_ver2_get_mc_reg_val_pair(csid_hw, reg_val_pair, rup_args); + else + cam_ife_csid_ver2_get_sc_reg_val_pair(csid_hw, reg_val_pair, rup_args); + + if (rup_args->reg_write) { + for (i = 0; i < (2 * num_reg_val_pairs); i = i + 2) + cam_io_w_mb(reg_val_pair[i + 1], csid_clc_membase + reg_val_pair[i]); + } else { + cdm_util_ops->cdm_write_regrandom(rup_args->cmd.cmd_buf_addr, + num_reg_val_pairs, reg_val_pair); + rup_args->cmd.used_bytes = size * 4; + } + return rc; +} + +static int cam_ife_csid_ver2_program_offline_go_cmd( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args, uint32_t arg_size) +{ + struct cam_ife_csid_offline_cmd_update_args *go_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_ife_csid_ver2_reg_info *csid_reg; + uint32_t size; + uint32_t reg_val_pair[2]; + + if (!go_args) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid args", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + if (arg_size != + sizeof(struct cam_ife_csid_offline_cmd_update_args)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid arg size: %d expected:%ld", + csid_hw->hw_intf->hw_idx, arg_size, + sizeof(struct cam_ife_csid_offline_cmd_update_args)); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)go_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid CDM ops", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > go_args->cmd.size) { + CAM_ERR(CAM_ISP, "CSID[%u] buf size:%d is not sufficient, expected: %d", + csid_hw->hw_intf->hw_idx, go_args->cmd.size, (size*4)); + return -EINVAL; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + reg_val_pair[0] = csid_reg->cmn_reg->offline_cmd_addr; + reg_val_pair[1] = 0x1; + + CAM_DBG(CAM_ISP, "CSID:%u offline_cmd 0x%x offset 0x%X", + csid_hw->hw_intf->hw_idx, + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(go_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + go_args->cmd.used_bytes = size * 4; + + return 0; +} + +static int cam_ife_csid_ver2_get_time_stamp( + struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args) +{ + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_isp_resource_node *res = NULL; + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + struct cam_hw_soc_info *soc_info; + struct cam_csid_get_time_stamp_args *timestamp_args; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct timespec64 ts; + + timestamp_args = (struct cam_csid_get_time_stamp_args *)cmd_args; + res = timestamp_args->node_res; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%u Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + path_reg = csid_reg->path_reg[res->res_id]; + + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%u Invalid res :%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (timestamp_args->get_prev_timestamp) { + timestamp_args->prev_time_stamp_val = __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_perv0_sof_addr, + path_reg->timestamp_perv1_sof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask); + } + + if (timestamp_args->get_curr_timestamp) { + timestamp_args->time_stamp_val = __cam_ife_csid_ver2_get_time_stamp( + soc_info->reg_map[0].mem_base, + path_reg->timestamp_curr0_sof_addr, + path_reg->timestamp_curr1_sof_addr, + path_cfg->ts_comb_vcdt_en, + csid_reg->cmn_reg->ts_comb_vcdt_mask); + } else { + if (path_cfg->ts_comb_vcdt_en) + timestamp_args->time_stamp_val &= + ~(uint64_t)csid_reg->cmn_reg->ts_comb_vcdt_mask; + timestamp_args->time_stamp_val = mul_u64_u32_div(timestamp_args->time_stamp_val, + CAM_IFE_CSID_QTIMER_MUL_FACTOR, + CAM_IFE_CSID_QTIMER_DIV_FACTOR); + } + + if (qtime_to_boottime == 0) { + if (timestamp_args->raw_boot_time) + ts = *timestamp_args->raw_boot_time; + else + ktime_get_boottime_ts64(&ts); + qtime_to_boottime = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec) - (int64_t)timestamp_args->time_stamp_val; + } + + timestamp_args->boot_timestamp = timestamp_args->time_stamp_val + + qtime_to_boottime; + CAM_DBG(CAM_ISP, "CSID:%u Resource[id:%d name:%s timestamp:%lld]", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, + timestamp_args->boot_timestamp); + csid_hw->timestamp.prev_sof_ts = timestamp_args->time_stamp_val; + csid_hw->timestamp.prev_boot_ts = timestamp_args->boot_timestamp; + + return 0; +} + +static int cam_ife_csid_ver2_print_hbi_vbi( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int i; + struct cam_isp_resource_node *res; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_common_reg_info *cmn_reg = NULL; + uint32_t hbi, vbi; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + cmn_reg = csid_reg->cmn_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) { + res = &csid_hw->path_res[i]; + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + path_reg = csid_reg->path_reg[res->res_id]; + if (!path_reg) { + CAM_ERR(CAM_ISP, "CSID:%u invalid res %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + path_reg->format_measure2_addr); + CAM_INFO(CAM_ISP, + "CSID[%u] Resource[id:%d, name:%s, min_hbi: %d max_hbi: %d cycles, vbi: %d cycles]", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, + (hbi & cmn_reg->format_measure_min_hbi_mask), + (hbi >> cmn_reg->format_measure_max_hbi_shift), vbi); + } + + return 0; +} + +static int cam_ife_csid_ver2_set_dynamic_switch_config( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_mode_switch_update_args *switch_update = NULL; + + if (!csid_hw) + return -EINVAL; + + switch_update = + (struct cam_ife_csid_mode_switch_update_args *)cmd_args; + + if (switch_update->mup_args.use_mup) { + csid_hw->rx_cfg.mup = switch_update->mup_args.mup_val; + CAM_DBG(CAM_ISP, "CSID[%u] MUP %u", + csid_hw->hw_intf->hw_idx, csid_hw->rx_cfg.mup); + } + + /* Handle number of frames to initially drop based on num starting exposures */ + if (switch_update->exp_update_args.reset_discard_cfg) { + struct cam_ife_csid_discard_frame_cfg_update *exp_update_args; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res; + int i; + + exp_update_args = &switch_update->exp_update_args; + for (i = CAM_IFE_PIX_PATH_RES_RDI_0; i <= CAM_IFE_PIX_PATH_RES_RDI_2; i++) { + res = &csid_hw->path_res[i]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + /* Skip if path_cfg is NULL */ + if (!path_cfg) + continue; + + /* Skip if res is not acquired or powered on */ + if ((res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW)) { + if ((i - CAM_IFE_PIX_PATH_RES_RDI_0) >= + exp_update_args->num_exposures) { + path_cfg->skip_discard_frame_cfg = true; + if (path_cfg->discard_init_frames) { + path_cfg->discard_init_frames = false; + path_cfg->num_frames_discard = 0; + atomic_dec(&csid_hw->discard_frame_per_path); + CAM_DBG(CAM_ISP, + "CSID[%u] Reset discard config for %s", + csid_hw->hw_intf->hw_idx, res->res_name); + } + } + } + } + } + + return 0; +} + +static int cam_ife_csid_ver2_set_csid_clock( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_clock_update_args *clk_update = NULL; + + if (!csid_hw) + return -EINVAL; + + clk_update = + (struct cam_ife_csid_clock_update_args *)cmd_args; + + csid_hw->clk_rate = clk_update->clk_rate; + + CAM_DBG(CAM_ISP, "CSID:%d, clk_rate=%lld", csid_hw->hw_intf->hw_idx, csid_hw->clk_rate); + + return 0; +} + +static int cam_ife_csid_ver2_mini_dump( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_ver2_mini_dump_data *md; + uint32_t i = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_ver2_res_mini_dump *md_res; + struct cam_isp_resource_node *res; + struct cam_hw_mini_dump_args *md_args; + + md_args = (struct cam_hw_mini_dump_args *)cmd_args; + if (md_args->len < sizeof(*md)) { + md_args->bytes_written = 0; + return 0; + } + + md = (struct cam_ife_csid_ver2_mini_dump_data *) + ((uint8_t *)md_args->start_addr); + md->clk_rate = csid_hw->clk_rate; + md->hw_state = csid_hw->hw_info->hw_state; + + for (i = 0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) { + res = &csid_hw->path_res[i]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg) + continue; + + md_res = &md->res[i]; + md_res->res_id = res->res_id; + scnprintf(md_res->res_name, CAM_ISP_RES_NAME_LEN, res->res_name); + memcpy(&md_res->path_cfg, path_cfg, sizeof(*path_cfg)); + } + + memcpy(&md->rx_cfg, &csid_hw->rx_cfg, sizeof(struct cam_ife_csid_ver2_rx_cfg)); + memcpy(&md->flags, &csid_hw->flags, sizeof(struct cam_ife_csid_hw_flags)); + memcpy(md->cid_data, csid_hw->cid_data, + sizeof(struct cam_ife_csid_cid_data) * CAM_IFE_CSID_CID_MAX); + md_args->bytes_written = sizeof(*md); + + return 0; +} + +static void *cam_ife_csid_ver2_user_dump_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + struct cam_hw_soc_info *soc_info = NULL; + uint32_t *addr; + uint32_t frame = 0; + uint32_t val0 = 0, val1 = 0, val2 = 0; + + res = (struct cam_isp_resource_node *)dump_struct; + hw_info = (struct cam_hw_info *)res->hw_intf->hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + csid_reg = csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + frame = cam_io_r_mb(soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->path_reg[res->res_id]->format_measure0_addr); + + val0 = cam_io_r_mb(soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->path_reg[res->res_id]->debug_camif_0_addr); + val1 = cam_io_r_mb(soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->path_reg[res->res_id]->debug_camif_1_addr); + val2 = cam_io_r_mb(soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base + + csid_reg->path_reg[res->res_id]->debug_halt_status_addr); + + addr = (uint32_t *)addr_ptr; + + *addr++ = ((frame >> csid_reg->cmn_reg->format_measure_height_shift_val) & + csid_reg->cmn_reg->format_measure_height_mask_val); + *addr++ = frame & csid_reg->cmn_reg->format_measure_width_mask_val; + *addr++ = val0; + *addr++ = val1; + *addr++ = val2; + + return addr; +} + +static int cam_ife_csid_ver2_user_dump( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + uint32_t i = 0; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_isp_resource_node *res; + struct cam_isp_hw_dump_args *dump_args; + int rc = 0; + + if (!csid_hw || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid bus private data"); + return -EINVAL; + } else if (csid_hw->hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "CSID:%u powered down", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + dump_args = (struct cam_isp_hw_dump_args *)cmd_args; + + rc = cam_common_user_dump_helper(dump_args, cam_common_user_dump_clock, + csid_hw->hw_info, sizeof(uint64_t), "CLK_RATE_PRINT:"); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u VER2: Clock dump failed, rc: %d", + csid_hw->hw_intf->hw_idx, rc); + return rc; + } + + /* Loop through CSID items */ + for (i = 0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) { + res = &csid_hw->path_res[i]; + + if (res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, + "CSID:%u VER2: path inactive res ID: %d, continuing", + csid_hw->hw_intf->hw_idx, res->res_id); + continue; + } + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg) + continue; + + rc = cam_common_user_dump_helper(dump_args, cam_ife_csid_ver2_user_dump_info, + res, sizeof(uint32_t), "CSID2_PATH.%s:", res->res_name); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u VER2: Info dump failed, rc:%d", + csid_hw->hw_intf->hw_idx, rc); + return rc; + } + + } + return 0; +} + +static int cam_ife_csid_ver2_dual_sync_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + struct cam_ife_csid_dual_sync_args *dual_sync_args; + + if (!csid_hw || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args %pK %pK", + csid_hw, cmd_args); + return -EINVAL; + } + + dual_sync_args = (struct cam_ife_csid_dual_sync_args *)cmd_args; + csid_hw->sync_mode = dual_sync_args->sync_mode; + csid_hw->dual_core_idx = dual_sync_args->dual_core_id; + + CAM_DBG(CAM_ISP, "CSID[%u] sync_mode %d dual_core_idx: %d", + csid_hw->hw_intf->hw_idx, csid_hw->sync_mode, + csid_hw->dual_core_idx); + + return 0; +} + +static int cam_ife_csid_ver2_dump_crop_reg( + struct cam_hw_info *hw_info, uint32_t *path_id) +{ + struct cam_ife_csid_ver2_hw *csid_hw; + const struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_isp_resource_node *path_res; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + uint32_t hcrop = 0, vcrop = 0, cfg0 = 0, cfg1 = 0; + int rc = 0; + void __iomem *mem_base; + + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + + spin_lock(&csid_hw->lock_state); + if ((csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) || + (*path_id >= CAM_IFE_PIX_PATH_RES_MAX)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid dev state :%d or path_id :%u", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state, + *path_id); + spin_unlock(&csid_hw->lock_state); + return -EINVAL; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *)csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + CAM_INFO(CAM_ISP, "CSID[%u] Clock Name=%s Clock Rate=%u", + csid_hw->hw_intf->hw_idx, soc_info->clk_name[soc_info->src_clk_idx], + cam_soc_util_get_applied_src_clk(soc_info, true)); + + path_res = &csid_hw->path_res[*path_id]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)path_res->res_priv; + if (path_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING && path_cfg) { + path_reg = csid_reg->path_reg[*path_id]; + hcrop = cam_io_r_mb(mem_base + path_reg->hcrop_addr); + vcrop = cam_io_r_mb(mem_base + path_reg->vcrop_addr); + cfg0 = cam_io_r_mb(mem_base + path_reg->cfg0_addr); + cfg1 = cam_io_r_mb(mem_base + path_reg->cfg1_addr); + CAM_INFO(CAM_ISP, "CSID[%u] %s HCROP=0x%x VCROP=0x%x, CFG0=0x%x CFG1=0x%x", + csid_hw->hw_intf->hw_idx, path_res->res_name, hcrop, vcrop, cfg0, cfg1); + CAM_INFO(CAM_ISP, "CSID[%u] %s width=%d height=%d start line:%d end line:%d", + csid_hw->hw_intf->hw_idx, path_res->res_name, path_cfg->width, + path_cfg->height, path_cfg->start_line, path_cfg->end_line); + } + + spin_unlock(&csid_hw->lock_state); + return rc; +} + +static int cam_ife_csid_ver2_set_discard_frame_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, + void *cmd_args) +{ + struct cam_isp_resource_node *res; + struct cam_ife_csid_ver2_path_cfg *path_cfg; + struct cam_ife_csid_discard_init_frame_args *discard_config = NULL; + + if (!csid_hw) + return -EINVAL; + + discard_config = + (struct cam_ife_csid_discard_init_frame_args *)cmd_args; + + if (discard_config->num_frames == 0xffffffff) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid number of frames: 0x%x", + csid_hw->hw_intf->hw_idx, discard_config->num_frames); + return -EINVAL; + } + + if (!discard_config->num_frames) { + CAM_DBG(CAM_ISP, "CSID[%u] No discard requested", csid_hw->hw_intf->hw_idx); + return 0; + } + + res = discard_config->res; + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid res_type: %d res id: %d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + /* Handle first stream on and consecutive streamons post flush */ + if ((res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW)) { + /* Skip if already set or need to skip based on stream on exposures */ + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (path_cfg->skip_discard_frame_cfg || path_cfg->discard_init_frames) + goto end; + + path_cfg->discard_init_frames = true; + path_cfg->sof_cnt = 0; + path_cfg->num_frames_discard = discard_config->num_frames; + atomic_inc(&csid_hw->discard_frame_per_path); + CAM_DBG(CAM_ISP, + "CSID[%u] discard num of frames: %u for path: %s discard_ref_cnt: %u", + csid_hw->hw_intf->hw_idx, discard_config->num_frames, res->res_name, + atomic_read(&csid_hw->discard_frame_per_path)); + } + +end: + return 0; +} + +static int cam_ife_csid_ver2_rdi_lcr_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args) +{ + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + struct cam_isp_resource_node *res = cmd_args; + + if (!csid_hw || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + path_reg = csid_reg->path_reg[res->res_id]; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + if (!path_cfg || !path_reg || !path_reg->capabilities || + !(path_reg->capabilities & CAM_IFE_CSID_CAP_INPUT_LCR)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid res %s", + csid_hw->hw_intf->hw_idx, res->res_name); + return -EINVAL; + } + + if (!path_cfg->sfe_shdr && (res->res_id != CAM_IFE_PIX_PATH_RES_RDI_0)) { + CAM_ERR(CAM_ISP, "CSID[%u] Invalid res: %s, capabilities 0x%x sfe_shdr: %u", + csid_hw->hw_intf->hw_idx, res->res_name, + path_reg->capabilities, path_cfg->sfe_shdr); + return -EINVAL; + } + + /* + * LCR should not be on for a resource if CSID is giving packed data + * this case would come for formats which are not supported. + */ + if (path_cfg->path_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0].packing_fmt) { + CAM_ERR(CAM_ISP, "CSID[%u] [%s] LCR not supported in_format %d out_format %d", + csid_hw->hw_intf->hw_idx, res->res_name, + path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0], + path_cfg->out_format); + return -EINVAL; + } + + /* if CSID unpacked data is not in MSB, we loose few bits going into PDAF, warn for now */ + if (!path_cfg->csid_out_unpack_msb) + CAM_WARN(CAM_ISP, + "CSID[%u] [%s] Input data to LCR is in LSB, in_format %d out_format %d", + csid_hw->hw_intf->hw_idx, res->res_name, + path_cfg->in_format[CAM_IFE_CSID_MULTI_VC_DT_GRP_0], + path_cfg->out_format); + + if (csid_hw->flags.sfe_en) + csid_hw->top_cfg.rdi_lcr |= BIT(res->res_id) << + csid_reg->top_reg->rdi_lcr_shift_val; + + csid_hw->flags.rdi_lcr_en = true; + path_cfg->lcr_en = true; + + CAM_DBG(CAM_ISP, "CSID[%u] %s top_cfg %u", + csid_hw->hw_intf->hw_idx, res->res_name, csid_hw->top_cfg.rdi_lcr); + + return 0; +} + +static int cam_ife_csid_init_config_update( + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_init_config_update *init_cfg = cmd_args; + struct cam_isp_resource_node *res = init_cfg->node_res; + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_init_config_update)) { + CAM_ERR(CAM_ISP, "Invalid args size expected: %zu actual: %zu", + sizeof(struct cam_isp_hw_init_config_update), + arg_size); + return -EINVAL; + } + + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + /* Skip epoch update if resource does not handle camif IRQs */ + if (!path_cfg->handle_camif_irq) + goto end; + + path_cfg->epoch_cfg = (path_cfg->end_line - path_cfg->start_line) * + init_cfg->init_config->epoch_cfg.epoch_factor / 100; + + if (path_cfg->epoch_cfg > path_cfg->end_line) + path_cfg->epoch_cfg = path_cfg->end_line; + + if (path_cfg->horizontal_bin || path_cfg->qcfa_bin) + path_cfg->epoch_cfg >>= 1; + + CAM_DBG(CAM_ISP, + "Init Update for res_name: %s epoch_factor: %x", + res->res_name, path_cfg->epoch_cfg); +end: + return 0; +} + +static int cam_ife_csid_ver2_dump_irq_desc( + struct cam_ife_csid_ver2_hw *csid_hw, void *args) +{ + int i, j, offset = 0; + struct cam_isp_irq_inject_param *inject_params = NULL; + const struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + + if (!args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + inject_params = (struct cam_isp_irq_inject_param *)args; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "Printing executable IRQ for hw_type: CSID reg_unit: %d\n", + inject_params->reg_unit); + + switch (inject_params->reg_unit) { + case CAM_ISP_CSID_TOP_REG: + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; + i < CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX; i++) { + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "TOP_IRQ_STATUS_REG[%d]:\n", i); + + for (j = 0; j < csid_reg->num_top_err_irqs[i]; j++) { + if (!(*csid_reg->top_irq_desc)[i][j].bitmask) + break; + + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s - %s\n", + (*csid_reg->top_irq_desc)[i][j].bitmask, + (*csid_reg->top_irq_desc)[i][j].err_name, + (*csid_reg->top_irq_desc)[i][j].desc); + } + } + break; + case CAM_ISP_CSID_RX_REG: + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; + i < CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX; i++) { + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "RX_IRQ_STATUS_REG[%d]:\n", i); + + for (j = 0; j < csid_reg->num_rx_err_irqs[i]; j++) { + if (!(*csid_reg->rx_irq_desc)[i][j].bitmask) + break; + + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s\n", + (*csid_reg->rx_irq_desc)[i][j].bitmask, + (*csid_reg->rx_irq_desc)[i][j].desc); + } + } + break; + case CAM_ISP_CSID_PATH_IPP_REG: + case CAM_ISP_CSID_PATH_PPP_REG: + case CAM_ISP_CSID_PATH_RDI0_REG: + case CAM_ISP_CSID_PATH_RDI1_REG: + case CAM_ISP_CSID_PATH_RDI2_REG: + case CAM_ISP_CSID_PATH_RDI3_REG: + case CAM_ISP_CSID_PATH_RDI4_REG: + for (i = 0; i < csid_reg->num_path_err_irqs; i++) + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s\n", + csid_reg->path_irq_desc[i].bitmask, + csid_reg->path_irq_desc[i].desc); + break; + default: + scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "No matched reg unit for injection\n"); + return -EINVAL; + } + + return 0; +} + +static int cam_ife_csid_ver2_irq_inject( + struct cam_ife_csid_ver2_hw *csid_hw, void *args) +{ + int i, j; + uint32_t irq_set_addr = 0; + struct cam_hw_soc_info *soc_info; + struct cam_isp_irq_inject_param *inject_params = NULL; + const struct cam_ife_csid_ver2_reg_info *csid_reg = NULL; + void __iomem *mem_base; + + if (!args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + inject_params = (struct cam_isp_irq_inject_param *)args; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + switch (inject_params->reg_unit) { + case CAM_ISP_CSID_TOP_REG: { + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; + i < CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX; i++) { + for (j = 0; j < csid_reg->num_top_err_irqs[i]; j++) { + if ((*csid_reg->top_irq_desc)[i][j].bitmask == + inject_params->irq_mask) { + irq_set_addr = csid_reg->cmn_reg->top_irq_set_addr[i]; + break; + } + } + } + break; + } + case CAM_ISP_CSID_RX_REG: { + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; + i < CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX; i++) { + for (j = 0; j < csid_reg->num_rx_err_irqs[i]; j++) { + if ((*csid_reg->rx_irq_desc)[i][j].bitmask == + inject_params->irq_mask) { + irq_set_addr = csid_reg->csi2_reg->irq_set_addr[i]; + break; + } + } + } + break; + } + case CAM_ISP_CSID_PATH_IPP_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_IPP]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_PPP_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_PPP]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_RDI0_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_0]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_RDI1_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_1]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_RDI2_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_2]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_RDI3_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_3]->irq_set_addr; + break; + } + case CAM_ISP_CSID_PATH_RDI4_REG: { + irq_set_addr = + csid_reg->path_reg[CAM_IFE_PIX_PATH_RES_RDI_4]->irq_set_addr; + break; + } + default: + CAM_INFO(CAM_ISP, "No matched reg unit for injection"); + return -EINVAL; + } + + cam_io_w_mb(inject_params->irq_mask, mem_base + irq_set_addr); + cam_io_w_mb(0x10, mem_base + csid_reg->cmn_reg->irq_cmd_addr); + CAM_INFO(CAM_ISP, "Injected : irq_mask %#x set_reg_offset %#x", + inject_params->irq_mask, irq_set_addr); + + return 0; +} + +static int cam_ife_csid_ver2_reset_out_of_sync_cnt( + struct cam_ife_csid_ver2_hw *csid_hw, void *args) +{ + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + struct cam_isp_resource_node *res = NULL; + + res = ((struct cam_csid_reset_out_of_sync_count_args *)args)->node_res; + path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv; + + if (!path_cfg) { + CAM_ERR(CAM_ISP, "Invalid res %s", res->res_name); + return -EINVAL; + } + + atomic_set(&path_cfg->switch_out_of_sync_cnt, 0); + + CAM_DBG(CAM_ISP, + "Reset out of sync cnt for res:%s", + res->res_name); + + return 0; +} + +static int cam_ife_csid_ver2_drv_config( + struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args) +{ + struct cam_hw_soc_info *soc_info; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_ife_csid_drv_config_args *drv_config; + uint32_t cfg0_val = 0, cfg1_val = 0; + void __iomem *mem_base; + int i; + + if (!csid_hw || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + drv_config = (struct cam_ife_csid_drv_config_args *) cmd_args; + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = (struct cam_ife_csid_ver2_reg_info *) csid_hw->core_info->csid_reg; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + cfg0_val = drv_config->drv_en << csid_reg->cmn_reg->drv_en_shift; + + /* Configure DRV RUP EN for init request, one time */ + if (drv_config->is_init_config) { + if (csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_state >= + CAM_ISP_RESOURCE_STATE_RESERVED) { + cfg1_val = csid_reg->cmn_reg->drv_rup_en_val_map[CAM_IFE_PIX_PATH_RES_IPP]; + } else if (csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0].res_state >= + CAM_ISP_RESOURCE_STATE_RESERVED) { + cfg1_val = + csid_reg->cmn_reg->drv_rup_en_val_map[CAM_IFE_PIX_PATH_RES_RDI_0]; + } else if (csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_state >= + CAM_ISP_RESOURCE_STATE_RESERVED) { + cfg1_val = csid_reg->cmn_reg->drv_rup_en_val_map[CAM_IFE_PIX_PATH_RES_PPP]; + } else { + CAM_ERR(CAM_ISP, "CSID:%u Failed to configure rup_en for drv", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + cam_io_w_mb(cfg1_val, mem_base + csid_reg->cmn_reg->drv_cfg1_addr); + csid_hw->drv_init_done = true; + } + + if (!csid_hw->drv_init_done) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to update drv config, init config not done", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + for (i = 0; i < CAM_ISP_MAX_PATHS; i++) + cfg0_val |= ((drv_config->path_idle_en & BIT(i)) ? + csid_reg->cmn_reg->drv_path_idle_en_val_map[i] : 0); + + cam_io_w_mb(cfg0_val, mem_base + csid_reg->cmn_reg->drv_cfg0_addr); + + cam_io_w_mb(drv_config->timeout_val, mem_base + csid_reg->cmn_reg->drv_cfg2_addr); + + if (debug_drv) + CAM_INFO(CAM_ISP, + "CSID[%u] sfe_en:%s DRV config init_req:%s cfg0_val:0x%x cfg1_val:0x%x cfg2_val:0x%x", + csid_hw->hw_intf->hw_idx, CAM_BOOL_TO_YESNO(csid_hw->flags.sfe_en), + CAM_BOOL_TO_YESNO(drv_config->is_init_config), cfg0_val, cfg1_val, + drv_config->timeout_val); + + CAM_DBG(CAM_ISP, + "CSID[%u] sfe_en:%s DRV config init_req:%s cfg0_val:0x%x cfg1_val:0x%x cfg2_val:0x%x", + csid_hw->hw_intf->hw_idx, CAM_BOOL_TO_YESNO(csid_hw->flags.sfe_en), + CAM_BOOL_TO_YESNO(drv_config->is_init_config), cfg0_val, cfg1_val, + drv_config->timeout_val); + + return 0; +} + +static int cam_ife_csid_ver2_irq_comp_cfg( + struct cam_ife_csid_ver2_hw *csid_hw, void *cmd_args, uint32_t arg_size) +{ + uint32_t comp_cfg_mask = 0; + size_t size = 0; + struct cam_isp_irq_comp_cfg *comp_cfg; + const struct cam_ife_csid_ver2_mc_reg_info *reg_info = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_isp_hw_get_cmd_update *cdm_args = NULL; + uint32_t reg_val_pair[2]; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "CSID %u Invalid args size expected: %zu actual: %zu", + csid_hw->hw_intf->hw_idx, sizeof(struct cam_isp_hw_get_cmd_update), + arg_size); + return -EINVAL; + } + + cdm_args = (struct cam_isp_hw_get_cmd_update *)cmd_args; + if (!cdm_args) { + CAM_ERR(CAM_ISP, "CSID:%u Error, Invalid args", csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + comp_cfg = (struct cam_isp_irq_comp_cfg *)cdm_args->data; + + reg_info = ((struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg)->ipp_mc_reg; + if (!reg_info) { + CAM_DBG(CAM_ISP, "No multi-context support in CSID[%u]", csid_hw->hw_intf->hw_idx); + return 0; + } + + comp_cfg_mask = (comp_cfg->ipp_src_ctxt_mask << reg_info->ipp_src_ctxt_mask_shift) | + (comp_cfg->ipp_dst_comp_mask << reg_info->ipp_dst_ctxt_mask_shift); + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + size = cdm_util_ops->cdm_required_size_reg_random(1); + + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "CSID:%u buf size:%d is not sufficient, expected: %d", + csid_hw->hw_intf->hw_idx, cdm_args->cmd.size, (size*4)); + return -EINVAL; + } + + reg_val_pair[0] = reg_info->irq_comp_cfg0_addr; + reg_val_pair[1] = comp_cfg_mask; + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +static int cam_ife_csid_ver2_get_primary_sof_timer_reg_addr( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_ts_reg_addr *sof_addr) +{ + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *csid_res; + struct cam_ife_csid_ver2_reg_info *csid_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg; + + if (!csid_hw || sof_addr->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "Invalid params, csid_hw is null: %s, res_id: %u", + CAM_IS_NULL_TO_STR(csid_hw), sof_addr->res_id); + return -EINVAL; + } + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + csid_res = &csid_hw->path_res[sof_addr->res_id]; + path_reg = csid_reg->path_reg[sof_addr->res_id]; + soc_info = &csid_hw->hw_info->soc_info; + + sof_addr->curr0_ts_addr = soc_info->reg_map[0].mem_base + + path_reg->timestamp_curr0_sof_addr; + sof_addr->curr1_ts_addr = soc_info->reg_map[0].mem_base + + path_reg->timestamp_curr1_sof_addr; + + return 0; +} + +static int cam_ife_csid_ver2_get_num_dt_supported( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + struct cam_ife_csid_ver2_reg_info *csid_reg; + int num_dt; + + if (unlikely(!csid_hw)) + return 0; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + if (!csid_reg->cmn_reg->multi_vcdt_supported) + return 1; + + num_dt = csid_reg->cmn_reg->num_dt_supported ? + csid_reg->cmn_reg->num_dt_supported : CAM_IFE_CSID_DEFAULT_NUM_DT; + + return num_dt; +} + +static int cam_ife_csid_ver2_path_exp_info_update( + struct cam_ife_csid_ver2_hw *csid_hw, + struct cam_ife_csid_exp_info_update_args *exp_info) +{ + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + + if (!csid_hw || !exp_info) { + CAM_ERR(CAM_ISP, "Invalid params, csid_hw is null: %s, exp_info is null: %s", + CAM_IS_NULL_TO_STR(csid_hw), CAM_IS_NULL_TO_STR(exp_info)); + return -EINVAL; + } + + if ((exp_info->num_sensor_out_exp > 1) && + (exp_info->num_sensor_out_exp > exp_info->num_process_exp)) + path_cfg = (struct cam_ife_csid_ver2_path_cfg *) + csid_hw->path_res[exp_info->last_exp_res_id].res_priv; + else + path_cfg = (struct cam_ife_csid_ver2_path_cfg *) + csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_priv; + + if (!path_cfg) { + CAM_ERR(CAM_ISP, "NULL path_cfg for exp info update"); + return -EINVAL; + } + + csid_hw->flags.last_exp_valid = exp_info->last_exp_valid; + path_cfg->allow_epoch_cb = true; + + return 0; +} + +static int cam_ife_csid_ver2_process_cmd(void *hw_priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_hw_info *hw_info; + + if (!hw_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid arguments"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_info->core_info; + + switch (cmd_type) { + case CAM_IFE_CSID_CMD_GET_TIME_STAMP: + rc = cam_ife_csid_ver2_get_time_stamp(csid_hw, cmd_args); + + if (csid_hw->debug_info.debug_val & + CAM_IFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) + cam_ife_csid_ver2_print_hbi_vbi(csid_hw); + + /* Reset CRC error count during SOF */ + csid_hw->counters.crc_error_irq_count = 0; + break; + case CAM_IFE_CSID_SET_CSID_DEBUG: + rc = cam_ife_csid_ver2_set_debug(csid_hw, + (struct cam_ife_csid_debug_cfg_args *)cmd_args); + break; + case CAM_IFE_CSID_SOF_IRQ_DEBUG: + rc = cam_ife_csid_ver2_sof_irq_debug(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE: + rc = cam_ife_csid_ver2_set_csid_clock(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_DUMP_HW: + break; + case CAM_IFE_CSID_TOP_CONFIG: + rc = cam_ife_csid_ver2_top_cfg(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_ife_csid_get_base(&hw_info->soc_info, + CAM_IFE_CSID_CLC_MEM_BASE_ID, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_ife_csid_ver2_reg_update(csid_hw, + cmd_args, arg_size); + break; + case CAM_IFE_CSID_SET_DUAL_SYNC_CONFIG: + rc = cam_ife_csid_ver2_dual_sync_cfg(csid_hw, + cmd_args); + break; + case CAM_ISP_HW_CSID_MINI_DUMP: + rc = cam_ife_csid_ver2_mini_dump(csid_hw, cmd_args); + break; + case CAM_ISP_HW_USER_DUMP: + rc = cam_ife_csid_ver2_user_dump(csid_hw, cmd_args); + break; + case CAM_IFE_CSID_PROGRAM_OFFLINE_CMD: + rc = cam_ife_csid_ver2_program_offline_go_cmd( + csid_hw, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_CSID_DYNAMIC_SWITCH_UPDATE: + rc = cam_ife_csid_ver2_set_dynamic_switch_config(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE: + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: { + struct cam_hw_soc_info *soc_info; + + soc_info = &csid_hw->hw_info->soc_info; + *((struct cam_hw_soc_info **)cmd_args) = soc_info; + break; + } + case CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES: + rc = cam_ife_csid_ver2_set_discard_frame_cfg(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_RDI_LCR_CFG: + rc = cam_ife_csid_ver2_rdi_lcr_cfg(csid_hw, cmd_args); + break; + case CAM_IFE_CSID_LOG_ACQUIRE_DATA: + /* Not supported on ver2 */ + rc = 0; + break; + case CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE: + rc = cam_ife_csid_init_config_update(cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DRV_CONFIG: + rc = cam_ife_csid_ver2_drv_config(csid_hw, cmd_args); + break; + case CAM_IFE_CSID_RESET_OUT_OF_SYNC_CNT: + rc = cam_ife_csid_ver2_reset_out_of_sync_cnt(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_DUMP_CROP_REG: + rc = cam_ife_csid_ver2_dump_crop_reg(hw_info, (uint32_t *)cmd_args); + break; + case CAM_ISP_HW_CMD_IRQ_COMP_CFG: + rc = cam_ife_csid_ver2_irq_comp_cfg(csid_hw, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_IRQ_INJECTION: + rc = cam_ife_csid_ver2_irq_inject(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + rc = cam_ife_csid_ver2_dump_irq_desc(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR: { + struct cam_ife_csid_ts_reg_addr *sof_addr_args = + (struct cam_ife_csid_ts_reg_addr *)cmd_args; + + if (!sof_addr_args->get_addr) { + CAM_ERR(CAM_ISP, "CSID does not support set of primary SOF ts addr"); + rc = -EINVAL; + } else + rc = cam_ife_csid_ver2_get_primary_sof_timer_reg_addr(csid_hw, + sof_addr_args); + } + break; + case CAM_ISP_HW_CMD_QUERY_CAP: { + struct cam_isp_hw_cap *csid_cap = (struct cam_isp_hw_cap *) cmd_args; + + csid_cap->max_dt_supported = cam_ife_csid_ver2_get_num_dt_supported(csid_hw); + + if (!csid_cap->max_dt_supported) + rc = -EINVAL; + } + break; + case CAM_ISP_HW_CMD_EXP_INFO_UPDATE: + rc = cam_ife_csid_ver2_path_exp_info_update(csid_hw, cmd_args); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%u unsupported cmd:%d", + csid_hw->hw_intf->hw_idx, cmd_type); + rc = -EINVAL; + break; + } + return rc; + +} + +static irqreturn_t cam_ife_csid_irq(int irq_num, void *data) +{ + struct cam_ife_csid_ver2_hw *csid_hw = data; + + if (!csid_hw) + return IRQ_NONE; + + return cam_irq_controller_handle_irq(irq_num, + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + CAM_IRQ_EVT_GROUP_0); +} + +static void cam_ife_csid_ver2_free_res(struct cam_ife_csid_ver2_hw *csid_hw) +{ + + struct cam_isp_resource_node *res; + uint32_t num_paths; + int i; + struct cam_ife_csid_ver2_reg_info *csid_reg; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + num_paths = csid_reg->cmn_reg->num_udis; + + for (i = 0; i < num_paths; i++) { + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_UDI_0 + i]; + CAM_MEM_FREE(res->res_priv); + res->res_priv = NULL; + } + + num_paths = csid_reg->cmn_reg->num_rdis; + + for (i = 0; i < num_paths; i++) { + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0 + i]; + CAM_MEM_FREE(res->res_priv); + res->res_priv = NULL; + } + + CAM_MEM_FREE(csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_priv); + csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP].res_priv = NULL; + CAM_MEM_FREE(csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP_1].res_priv); + csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP_1].res_priv = NULL; + CAM_MEM_FREE(csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP_2].res_priv); + csid_hw->path_res[CAM_IFE_PIX_PATH_RES_IPP_2].res_priv = NULL; + CAM_MEM_FREE(csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_priv); + csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_priv = NULL; +} + +static int cam_ife_ver2_hw_alloc_res( + struct cam_isp_resource_node *res, + uint32_t res_type, + struct cam_hw_intf *hw_intf, + uint32_t res_id) + +{ + struct cam_ife_csid_ver2_path_cfg *path_cfg = NULL; + + path_cfg = CAM_MEM_ZALLOC(sizeof(*path_cfg), GFP_KERNEL); + + if (!path_cfg) + return -ENOMEM; + + res->res_id = res_id; + res->res_type = res_type; + res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + res->hw_intf = hw_intf; + res->res_priv = path_cfg; + + return 0; +} + +static int cam_ife_csid_ver2_hw_init_path_res( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc = 0; + int i; + struct cam_ife_csid_ver2_reg_info *csid_reg; + struct cam_isp_resource_node *res; + int32_t res_id = -1; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + /* Initialize the IPP resources */ + for (i = 0; i < csid_reg->cmn_reg->num_pix; i++) { + switch (i) { + case 0: + res_id = CAM_IFE_PIX_PATH_RES_IPP; + break; + case 1: + res_id = CAM_IFE_PIX_PATH_RES_IPP_1; + break; + case 2: + res_id = CAM_IFE_PIX_PATH_RES_IPP_2; + break; + default: + res_id = -1; + break; + } + + if (res_id == -1) { + CAM_ERR(CAM_ISP, "CSID: %d Invalid res_id idx:%d", + csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + + res = &csid_hw->path_res[res_id]; + rc = cam_ife_ver2_hw_alloc_res(res, CAM_ISP_RESOURCE_PIX_PATH, + csid_hw->hw_intf, res_id); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %d IPP_%d res init fail", + csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + + scnprintf(csid_hw->path_res[res_id].res_name, CAM_ISP_RES_NAME_LEN, "IPP_%d", i); + } + + /* Initialize PPP resource */ + if (csid_reg->cmn_reg->num_ppp) { + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP]; + rc = cam_ife_ver2_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_PPP); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %u PPP res init fail", + csid_hw->hw_intf->hw_idx); + goto free_res; + } + scnprintf(csid_hw->path_res[CAM_IFE_PIX_PATH_RES_PPP].res_name, + CAM_ISP_RES_NAME_LEN, "PPP"); + } + + /* Initialize the RDI resource */ + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + /* res type is from RDI 0 to RDI3 */ + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_RDI_0 + i]; + rc = cam_ife_ver2_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_RDI_0 + i); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %u RDI[%d] res init fail", + csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + scnprintf(res->res_name, CAM_ISP_RES_NAME_LEN, "RDI_%d", i); + } + + /* Initialize the UDI resource */ + for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) { + /* res type is from UDI0 to UDI3 */ + res = &csid_hw->path_res[CAM_IFE_PIX_PATH_RES_UDI_0 + i]; + rc = cam_ife_ver2_hw_alloc_res( + res, + CAM_ISP_RESOURCE_PIX_PATH, + csid_hw->hw_intf, + CAM_IFE_PIX_PATH_RES_UDI_0 + i); + if (rc) { + CAM_ERR(CAM_ISP, "CSID: %u UDI[%d] res init fail", + csid_hw->hw_intf->hw_idx, i); + goto free_res; + } + scnprintf(res->res_name, CAM_ISP_RES_NAME_LEN, "UDI_%d", i); + } + + return rc; + +free_res: + cam_ife_csid_ver2_free_res(csid_hw); + return rc; +} + +static void cam_ife_csid_hw_deinit_irq( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + unsigned long flags; + int i; + struct cam_ife_csid_ver2_reg_info *csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) + cam_irq_controller_deinit(&csid_hw->top_irq_controller[i]); + + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; i < csid_reg->num_rx_regs; i++) + cam_irq_controller_deinit(&csid_hw->rx_irq_controller[i]); + + cam_irq_controller_deinit(&csid_hw->buf_done_irq_controller); + + for (i = 0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) { + if (csid_hw->path_irq_controller[i]) + cam_irq_controller_deinit(&csid_hw->path_irq_controller[i]); + } + + spin_lock_irqsave(&csid_hw->path_payload_lock, flags); + INIT_LIST_HEAD(&csid_hw->path_free_payload_list); + for (i = 0; i < CAM_IFE_CSID_VER2_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&csid_hw->path_evt_payload[i].list); + spin_unlock_irqrestore(&csid_hw->path_payload_lock, flags); + + spin_lock_irqsave(&csid_hw->rx_payload_lock, flags); + INIT_LIST_HEAD(&csid_hw->rx_free_payload_list); + for (i = 0; i < CAM_IFE_CSID_VER2_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&csid_hw->rx_evt_payload[i].list); + spin_unlock_irqrestore(&csid_hw->rx_payload_lock, flags); +} + +static int cam_ife_csid_hw_init_irq( + struct cam_ife_csid_ver2_hw *csid_hw) +{ + int rc = 0; + int i; + int ipp_res_id; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + struct cam_ife_csid_ver2_reg_info *csid_reg; + char name[CAM_CSID_IRQ_CTRL_NAME_LEN]; + + csid_reg = (struct cam_ife_csid_ver2_reg_info *) + csid_hw->core_info->csid_reg; + + soc_info = &csid_hw->hw_info->soc_info; + mem_base = soc_info->reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + + for (i = CAM_IFE_CSID_TOP_IRQ_STATUS_REG0; i < csid_reg->num_top_regs; i++) { + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_%s", csid_hw->hw_intf->hw_idx, + cam_ife_csid_ver2_top_reg_name[i]); + rc = cam_irq_controller_init(name, mem_base, &csid_reg->top_irq_reg_info[i], + &csid_hw->top_irq_controller[i]); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to init CSID %s irq controller rc = %d", + csid_hw->hw_intf->hw_idx, cam_ife_csid_ver2_top_reg_name[i], rc); + goto deinit_controller; + } + } + + for (i = CAM_IFE_CSID_RX_IRQ_STATUS_REG0; i < csid_reg->num_rx_regs; i++) { + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_%s", csid_hw->hw_intf->hw_idx, + cam_ife_csid_ver2_rx_reg_name[i]); + rc = cam_irq_controller_init(name, mem_base, &csid_reg->rx_irq_reg_info[i], + &csid_hw->rx_irq_controller[i]); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to init CSID rx irq controller rc = %d", + csid_hw->hw_intf->hw_idx, rc); + goto deinit_controller; + } + } + + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_buf_done", csid_hw->hw_intf->hw_idx); + rc = cam_irq_controller_init(name, mem_base, csid_reg->buf_done_irq_reg_info, + &csid_hw->buf_done_irq_controller); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to init CSID buf_done irq controller rc = %d", + csid_hw->hw_intf->hw_idx, rc); + goto deinit_controller; + } + + for (i = 0; i < csid_reg->cmn_reg->num_pix; i++) { + switch (i) { + case 0: + ipp_res_id = CAM_IFE_PIX_PATH_RES_IPP; + break; + case 1: + ipp_res_id = CAM_IFE_PIX_PATH_RES_IPP_1; + break; + case 2: + ipp_res_id = CAM_IFE_PIX_PATH_RES_IPP_2; + break; + default: + ipp_res_id = -1; + break; + } + + if (ipp_res_id == -1) { + CAM_ERR(CAM_ISP, "CSID: %d Invalid res_id idx:%d", + csid_hw->hw_intf->hw_idx, i); + goto deinit_controller; + + } + + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_ipp%d", + csid_hw->hw_intf->hw_idx, i); + rc = cam_irq_controller_init(name, mem_base, + csid_reg->path_irq_reg_info[ipp_res_id], + &csid_hw->path_irq_controller[ipp_res_id]); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to init CSID ipp%d irq controller rc = %d", + csid_hw->hw_intf->hw_idx, i, rc); + goto deinit_controller; + } + } + + if (csid_reg->cmn_reg->num_ppp) { + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_ppp", csid_hw->hw_intf->hw_idx); + rc = cam_irq_controller_init(name, mem_base, + csid_reg->path_irq_reg_info[CAM_IFE_PIX_PATH_RES_PPP], + &csid_hw->path_irq_controller[CAM_IFE_PIX_PATH_RES_PPP]); + + if (rc) { + CAM_ERR(CAM_ISP, + "CSID:%u Failed to init CSID ppp irq controller rc = %d", + csid_hw->hw_intf->hw_idx, rc); + goto deinit_controller; + } + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + snprintf(name, CAM_CSID_IRQ_CTRL_NAME_LEN, "csid%d_rdi%d", + csid_hw->hw_intf->hw_idx, i); + rc = cam_irq_controller_init(name, mem_base, + csid_reg->path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0 + i], + &csid_hw->path_irq_controller[CAM_IFE_PIX_PATH_RES_RDI_0 + i]); + if (rc) { + CAM_ERR(CAM_ISP, + "CSID:%u Failed to init CSID rdi%d irq controller rc = %d", + csid_hw->hw_intf->hw_idx, i, rc); + goto deinit_controller; + } + } + + spin_lock_init(&csid_hw->path_payload_lock); + INIT_LIST_HEAD(&csid_hw->path_free_payload_list); + for (i = 0; i < CAM_IFE_CSID_VER2_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&csid_hw->path_evt_payload[i].list); + list_add_tail(&csid_hw->path_evt_payload[i].list, + &csid_hw->path_free_payload_list); + } + spin_lock_init(&csid_hw->rx_payload_lock); + INIT_LIST_HEAD(&csid_hw->rx_free_payload_list); + for (i = 0; i < CAM_IFE_CSID_VER2_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&csid_hw->rx_evt_payload[i].list); + list_add_tail(&csid_hw->rx_evt_payload[i].list, + &csid_hw->rx_free_payload_list); + } + + return 0; + +deinit_controller: + cam_ife_csid_hw_deinit_irq(csid_hw); + return rc; +} + +int cam_ife_csid_ver2_irq_line_test(void *hw_priv) +{ + struct cam_ife_csid_ver2_hw *csid_hw; + struct cam_hw_soc_info *soc_info; + int rc = 0; + void __iomem *mem_base; + struct cam_ife_csid_ver2_reg_info *csid_reg; + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw = ((struct cam_hw_info *)hw_priv)->core_info; + soc_info = &csid_hw->hw_info->soc_info; + + mem_base = csid_hw->hw_info->soc_info.reg_map[CAM_IFE_CSID_CLC_MEM_BASE_ID].mem_base; + csid_reg = csid_hw->core_info->csid_reg; + rc = cam_ife_csid_enable_soc_resources(soc_info, soc_info->lowest_clk_level); + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] Enable soc failed", csid_hw->hw_intf->hw_idx); + return rc; + } + + CAM_DBG(CAM_ISP, "CSID[%u] hw-version:0x%x", + csid_hw->hw_intf->hw_idx, + cam_io_r_mb(mem_base + csid_reg->cmn_reg->hw_version_addr)); + + rc = cam_irq_controller_test_irq_line( + csid_hw->top_irq_controller[CAM_IFE_CSID_TOP_IRQ_STATUS_REG0], + "CSID:%d", csid_hw->hw_intf->hw_idx); + if (rc) + CAM_ERR(CAM_ISP, "CSID[%u] IRQ line test failed", csid_hw->hw_intf->hw_idx); + + cam_ife_csid_disable_soc_resources(soc_info); + return rc; +} + +int cam_ife_csid_hw_ver2_init(struct cam_hw_intf *hw_intf, + struct cam_ife_csid_core_info *core_info, + bool is_custom) +{ + int rc = -EINVAL; + struct cam_hw_info *hw_info; + struct cam_ife_csid_ver2_hw *csid_hw = NULL; + + if (!hw_intf || !core_info) { + CAM_ERR(CAM_ISP, "Invalid parameters intf: %pK hw_info: %pK", + hw_intf, core_info); + return rc; + } + + hw_info = (struct cam_hw_info *)hw_intf->hw_priv; + + csid_hw = CAM_MEM_ZALLOC(sizeof(struct cam_ife_csid_ver2_hw), GFP_KERNEL); + + if (!csid_hw) { + CAM_ERR(CAM_ISP, "Csid core %d hw allocation fails", + hw_intf->hw_idx); + return -ENOMEM; + } + + hw_info->core_info = csid_hw; + csid_hw->hw_intf = hw_intf; + csid_hw->hw_info = hw_info; + csid_hw->core_info = core_info; + CAM_DBG(CAM_ISP, "type %d index %d", + hw_intf->hw_type, + hw_intf->hw_idx); + + csid_hw->flags.device_enabled = false; + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&csid_hw->hw_info->hw_mutex); + spin_lock_init(&csid_hw->hw_info->hw_lock); + spin_lock_init(&csid_hw->lock_state); + init_completion(&csid_hw->hw_info->hw_complete); + atomic_set(&csid_hw->discard_frame_per_path, 0); + + rc = cam_ife_csid_init_soc_resources(&csid_hw->hw_info->soc_info, + cam_ife_csid_irq, cam_ife_csid_ver2_cpas_cb, csid_hw, is_custom); + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%u Failed to init_soc", + hw_intf->hw_idx); + return rc; + } + + if (cam_cpas_is_feature_supported(CAM_CPAS_QCFA_BINNING_ENABLE, + CAM_CPAS_HW_IDX_ANY, NULL)) + csid_hw->flags.binning_enabled = true; + + if (cam_cpas_query_domain_id_security_support()) + csid_hw->flags.domain_id_security = true; + + csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_ver2_get_hw_caps; + csid_hw->hw_intf->hw_ops.init = cam_ife_csid_ver2_init_hw; + csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_ver2_deinit_hw; + csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_ver2_reset; + csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_ver2_reserve; + csid_hw->hw_intf->hw_ops.release = cam_ife_csid_ver2_release; + csid_hw->hw_intf->hw_ops.start = cam_ife_csid_ver2_start; + csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_ver2_stop; + csid_hw->hw_intf->hw_ops.read = cam_ife_csid_ver2_read; + csid_hw->hw_intf->hw_ops.write = cam_ife_csid_ver2_write; + csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_ver2_process_cmd; + csid_hw->hw_intf->hw_ops.test_irq_line = cam_ife_csid_ver2_irq_line_test; + + rc = cam_ife_csid_hw_init_irq(csid_hw); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to init CSID irq"); + return rc; + } + + rc = cam_ife_csid_ver2_hw_init_path_res(csid_hw); + + if (rc) { + CAM_ERR(CAM_ISP, "CSID[%u] Probe Init failed", + hw_intf->hw_idx); + return rc; + } + csid_hw->debug_info.debug_val = 0; + csid_hw->counters.error_irq_count = 0; + csid_hw->debug_info.set_domain_id_enabled = false; + csid_hw->debug_info.domain_id_value = 0; + + return 0; + +} +EXPORT_SYMBOL(cam_ife_csid_hw_ver2_init); + +int cam_ife_csid_hw_ver2_deinit(struct cam_hw_info *hw_priv) +{ + struct cam_ife_csid_ver2_hw *csid_hw; + int rc = -EINVAL; + + csid_hw = (struct cam_ife_csid_ver2_hw *)hw_priv->core_info; + + if (!csid_hw) { + CAM_ERR(CAM_ISP, "Invalid param"); + return rc; + } + + cam_ife_csid_hw_deinit_irq(csid_hw); + + /* release the privdate data memory from resources */ + cam_ife_csid_ver2_free_res(csid_hw); + + cam_ife_csid_deinit_soc_resources(&csid_hw->hw_info->soc_info); + + return 0; +} +EXPORT_SYMBOL(cam_ife_csid_hw_ver2_deinit); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.h new file mode 100644 index 0000000000..47ef00f970 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.h @@ -0,0 +1,903 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_HW_VER2_H_ +#define _CAM_IFE_CSID_HW_VER2_H_ + +#include "cam_hw.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_ife_csid_soc.h" +#include "cam_ife_csid_common.h" + +#define CAM_IFE_CSID_VER2_PAYLOAD_MAX 256 + +#define IFE_CSID_VER2_PATH_ERROR_ILLEGAL_PROGRAM BIT(0) +#define IFE_CSID_VER2_PATH_ERROR_FIFO_OVERFLOW BIT(2) +#define IFE_CSID_VER2_PATH_CAMIF_EOF BIT(3) +#define IFE_CSID_VER2_PATH_CAMIF_SOF BIT(4) +#define IFE_CSID_VER2_PATH_INFO_FRAME_DROP_EOF BIT(5) +#define IFE_CSID_VER2_PATH_INFO_FRAME_DROP_EOL BIT(6) +#define IFE_CSID_VER2_PATH_INFO_FRAME_DROP_SOL BIT(7) +#define IFE_CSID_VER2_PATH_INFO_FRAME_DROP_SOF BIT(8) +#define IFE_CSID_VER2_PATH_INFO_INPUT_EOF BIT(9) +#define IFE_CSID_VER2_PATH_INFO_INPUT_EOL BIT(10) +#define IFE_CSID_VER2_PATH_INFO_INPUT_SOL BIT(11) +#define IFE_CSID_VER2_PATH_INFO_INPUT_SOF BIT(12) +#define IFE_CSID_VER2_PATH_ERROR_PIX_COUNT BIT(13) +#define IFE_CSID_VER2_PATH_ERROR_LINE_COUNT BIT(14) +#define IFE_CSID_VER2_PATH_VCDT_GRP0_SEL BIT(15) +#define IFE_CSID_VER2_PATH_VCDT_GRP1_SEL BIT(16) +#define IFE_CSID_VER2_PATH_VCDT_GRP_CHANGE BIT(17) +#define IFE_CSID_VER2_PATH_FRAME_DROP BIT(18) +#define IFE_CSID_VER2_PATH_RECOVERY_OVERFLOW BIT(19) +#define IFE_CSID_VER2_PATH_ERROR_REC_CCIF_VIOLATION BIT(20) +#define IFE_CSID_VER2_PATH_CAMIF_EPOCH0 BIT(21) +#define IFE_CSID_VER2_PATH_CAMIF_EPOCH1 BIT(22) +#define IFE_CSID_VER2_PATH_RUP_DONE BIT(23) +#define IFE_CSID_VER2_PATH_ILLEGAL_BATCH_ID BIT(24) +#define IFE_CSID_VER2_PATH_BATCH_END_MISSING_VIOLATION BIT(25) +#define IFE_CSID_VER2_PATH_HEIGHT_VIOLATION BIT(26) +#define IFE_CSID_VER2_PATH_WIDTH_VIOLATION BIT(27) +#define IFE_CSID_VER2_PATH_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP BIT(28) +#define IFE_CSID_VER2_PATH_CCIF_VIOLATION BIT(29) + +/*Each Bit represents the index of cust node hw*/ +#define IFE_CSID_VER2_CUST_NODE_IDX_0 0x1 +#define IFE_CSID_VER2_CUST_NODE_IDX_1 0x2 +#define IFE_CSID_VER2_CUST_NODE_IDX_2 0x4 + +enum cam_ife_csid_ver2_input_core_sel { + CAM_IFE_CSID_INPUT_CORE_SEL_NONE, + CAM_IFE_CSID_INPUT_CORE_SEL_INTERNAL, + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_0, + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_1, + CAM_IFE_CSID_INPUT_CORE_SEL_SFE_2, + CAM_IFE_CSID_INPUT_CORE_SEL_CUST_NODE_0, + CAM_IFE_CSID_INPUT_CORE_SEL_CUST_NODE_1, + CAM_IFE_CSID_INPUT_CORE_SEL_CUST_NODE_2, + CAM_IFE_CSID_INPUT_CORE_SEL_MAX, +}; + +enum cam_ife_csid_ver2_csid_reset_loc { + CAM_IFE_CSID_RESET_LOC_PATH_ONLY, + CAM_IFE_CSID_RESET_LOC_COMPLETE, + CAM_IFE_CSID_RESET_LOC_MAX, +}; + +enum cam_ife_csid_ver2_csid_reset_cmd { + CAM_IFE_CSID_RESET_CMD_IRQ_CTRL, + CAM_IFE_CSID_RESET_CMD_SW_RST, + CAM_IFE_CSID_RESET_CMD_HW_RST, + CAM_IFE_CSID_RESET_CMD_HW_MAX, +}; + +struct cam_ife_csid_ver2_debug_info { + uint32_t debug_val; + uint32_t rx_capture_vc; + uint32_t rx_capture_dt; + uint32_t rst_capture_strobes; + uint32_t top_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t rx_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t path_mask; + uint32_t test_bus_val; + uint32_t domain_id_value; + bool rx_capture_debug_set; + bool test_bus_enabled; + bool set_domain_id_enabled; + bool cdr_sweep_debug_enabled; +}; + +struct cam_ife_csid_ver2_top_cfg { + uint32_t input_core_type; + uint32_t dual_sync_core_sel; + uint32_t master_slave_sel; + uint32_t rdi_lcr; + bool dual_en; + bool offline_sfe_en; + bool out_ife_en; + bool sfe_fs; +}; + +struct cam_ife_csid_ver2_evt_payload { + struct list_head list; + uint32_t irq_reg_val; + struct timespec64 timestamp; + uint64_t sof_ts_reg_val; + bool is_mc; +}; + +/* + * struct cam_ife_csid_ver2_camif_data: place holder for camif parameters + * + * @epoch0_cfg: Epoch 0 configuration value + * @epoch1_cfg: Epoch 1 configuration value + */ +struct cam_ife_csid_ver2_camif_data { + uint32_t epoch0; + uint32_t epoch1; +}; + +struct cam_ife_csid_ver2_rx_cfg { + uint32_t phy_sel; + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; + uint32_t tpg_mux_sel; + uint32_t tpg_num_sel; + uint32_t mup; + uint32_t epd_supported; + uint32_t top_irq_handle; + uint32_t rx2_irq_handle; + uint32_t irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t err_irq_handle[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + bool dynamic_sensor_switch_en; +}; + +struct cam_ife_csid_ver2_csi2_rx_reg_info { + uint32_t irq_status_addr[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t irq_mask_addr[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t irq_clear_addr[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t irq_set_addr[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t cfg0_addr; + uint32_t cfg1_addr; + uint32_t capture_ctrl_addr; + uint32_t rst_strobes_addr; + uint32_t de_scramble_cfg0_addr; + uint32_t de_scramble_cfg1_addr; + uint32_t cap_unmap_long_pkt_hdr_0_addr; + uint32_t cap_unmap_long_pkt_hdr_1_addr; + uint32_t captured_short_pkt_0_addr; + uint32_t captured_short_pkt_1_addr; + uint32_t captured_long_pkt_0_addr; + uint32_t captured_long_pkt_1_addr; + uint32_t captured_long_pkt_ftr_addr; + uint32_t captured_cphy_pkt_hdr_addr; + uint32_t lane0_misr_addr; + uint32_t lane1_misr_addr; + uint32_t lane2_misr_addr; + uint32_t lane3_misr_addr; + uint32_t total_pkts_rcvd_addr; + uint32_t stats_ecc_addr; + uint32_t total_crc_err_addr; + uint32_t de_scramble_type3_cfg0_addr; + uint32_t de_scramble_type3_cfg1_addr; + uint32_t de_scramble_type2_cfg0_addr; + uint32_t de_scramble_type2_cfg1_addr; + uint32_t de_scramble_type1_cfg0_addr; + uint32_t de_scramble_type1_cfg1_addr; + uint32_t de_scramble_type0_cfg0_addr; + uint32_t de_scramble_type0_cfg1_addr; + + /*configurations */ + uint32_t rst_srb_all; + uint32_t rst_done_shift_val; + uint32_t irq_mask_all; + uint32_t misr_enable_shift_val; + uint32_t vc_mode_shift_val; + uint32_t capture_long_pkt_en_shift; + uint32_t capture_short_pkt_en_shift; + uint32_t capture_cphy_pkt_en_shift; + uint32_t capture_long_pkt_dt_shift; + uint32_t capture_long_pkt_vc_shift; + uint32_t capture_short_pkt_vc_shift; + uint32_t capture_cphy_pkt_dt_shift; + uint32_t capture_cphy_pkt_vc_shift; + uint32_t ecc_correction_shift_en; + uint32_t phy_bist_shift_en; + uint32_t epd_mode_shift_en; + uint32_t eotp_shift_en; + uint32_t dyn_sensor_switch_shift_en; + uint32_t rup_aup_latch_shift; + bool rup_aup_latch_supported; + uint32_t phy_num_mask; + uint32_t vc_mask; + uint32_t wc_mask; + uint32_t dt_mask; + uint32_t vc_shift; + uint32_t dt_shift; + uint32_t wc_shift; + uint32_t calc_crc_mask; + uint32_t expected_crc_mask; + uint32_t calc_crc_shift; + uint32_t lane_num_shift; + uint32_t lane_cfg_shift; + uint32_t phy_type_shift; + uint32_t phy_num_shift; + uint32_t tpg_mux_en_shift; + uint32_t tpg_num_sel_shift; + uint32_t long_pkt_strobe_rst_shift; + uint32_t short_pkt_strobe_rst_shift; + uint32_t cphy_pkt_strobe_rst_shift; + uint32_t unmapped_pkt_strobe_rst_shift; + uint32_t fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t part_fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t non_fatal_err_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t debug_irq_mask[CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + uint32_t top_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t rx_rx2_irq_mask; +}; + +/* + *struct cam_ife_csid_ver2_rup_aup_mask: place holder for rup/aup mask parameter + * + * @rup_mask: This stores value of rup mask when rup and aup are split, + * else it store value of unified rup aup mask. + * @aup_mask: This stores value of aup mask if rup and aup is split, else + * this is to be ignored. + * @rup_aup_set_mask: This is used to store rup_aup_sync register mask when rup + * and aup are split,else this value is to be ignored. + */ +struct cam_ife_csid_ver2_rup_aup_mask { + uint32_t rup_mask; + uint32_t aup_mask; + uint32_t rup_aup_set_mask; +}; + +/* + * struct cam_ife_csid_ver2_path_cfg: place holder for path parameters + * + * @error_ts: Error timestamp + * @sof_ts: SOF timestamp + * @epoch_ts: Epoch timestamp + * @eof_ts: EOF timestamp + * @cid: cid value for path + * @path_format: Array of Path format which contains format + * info i.e Decode format, Packing format etc + * @sec_evt_config: Secondary event config from HW mgr for a given path + * @in_format: Array of input format which contains format type + * @out_format: output format + * @start_pixel: start pixel for horizontal crop + * @end_pixel: end pixel for horizontal crop + * @start_line: start line for vertical crop + * @end_line: end line for vertical crop + * @width: width of incoming data + * @height: height of incoming data + * @master_idx: master idx + * @horizontal_bin: horizontal binning enable/disable on path + * @vertical_bin: vertical binning enable/disable on path + * @qcfa_bin : qcfa binning enable/disable on path + * @hor_ver_bin : horizontal vertical binning enable/disable on path + * @num_bytes_out: Number of bytes out + * @irq_handle: IRQ handle + * @err_irq_handle: Error IRQ handle + * @discard_irq_handle: IRQ handle for SOF when discarding initial frames + * @irq_reg_idx: IRQ Reg index + * @sof_cnt: SOF counter + * @num_frames_discard: number of frames to discard + * @epoch_cfg: Epoch configured value + * @switch_out_of_sync_cnt: Sensor out of sync error cnt + * @sync_mode : Sync mode--> master/slave/none + * @vfr_en : flag to indicate if variable frame rate is enabled + * @frame_id_dec_en: flag to indicate if frame id decoding is enabled + * @crop_enable: flag to indicate crop enable + * @drop_enable: flag to indicate drop enable + * @discard_init_frames: discard initial frames + * @skip_discard_frame_cfg: Skip handling discard config, the blob order between + * discard config and dynamic switch update cannot be gauranteed + * If we know the number of paths to avoid configuring discard + * for before processing discard config we can skip it for + * the corresponding paths + * @sfe_shdr: flag to indicate if sfe is inline shdr + * @lcr_en: Flag to indicate if path is part can be input to LCR + * @ts_comb_vcdt_en: Indicates if Timestamp combined vcdt flag is enabled + * @is_aeb_en: Flag to indicate if aeb mode is enabled + * @allow_epoch_cb: Flag to indicate if epoch callback is allowed for last exposure + * + */ +struct cam_ife_csid_ver2_path_cfg { + struct timespec64 error_ts; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct cam_ife_csid_path_format path_format[CAM_ISP_VC_DT_CFG]; + struct cam_csid_secondary_evt_config sec_evt_config; + uint32_t cid; + uint32_t in_format[CAM_ISP_VC_DT_CFG]; + uint32_t out_format; + uint32_t start_pixel; + uint32_t end_pixel; + uint32_t width; + uint32_t start_line; + uint32_t end_line; + uint32_t height; + uint32_t master_idx; + uint64_t clk_rate; + uint32_t horizontal_bin; + uint32_t vertical_bin; + uint32_t qcfa_bin; + uint32_t hor_ver_bin; + uint32_t num_bytes_out; + uint32_t top_irq_handle; + uint32_t irq_handle; + uint32_t err_irq_handle; + uint32_t discard_irq_handle; + uint32_t irq_reg_idx; + uint32_t sof_cnt; + uint32_t num_frames_discard; + uint32_t epoch_cfg; + atomic_t switch_out_of_sync_cnt; + enum cam_isp_hw_sync_mode sync_mode; + bool vfr_en; + bool frame_id_dec_en; + bool crop_enable; + bool drop_enable; + bool discard_init_frames; + bool skip_discard_frame_cfg; + bool sfe_shdr; + bool lcr_en; + bool csid_out_unpack_msb; + bool handle_camif_irq; + bool ts_comb_vcdt_en; + bool is_aeb_en; + bool allow_epoch_cb; +}; + +struct cam_ife_csid_ver2_top_reg_info { + uint32_t io_path_cfg0_addr[CAM_IFE_CSID_HW_NUM_MAX]; + uint32_t dual_csid_cfg0_addr[CAM_IFE_CSID_HW_NUM_MAX]; + uint32_t input_core_type_shift_val; + uint32_t sfe_offline_en_shift_val; + uint32_t out_ife_en_shift_val; + uint32_t dual_sync_sel_shift_val; + uint32_t dual_en_shift_val; + uint32_t master_slave_sel_shift_val; + uint32_t rdi_lcr_shift_val; + uint32_t master_sel_val; + uint32_t slave_sel_val; + uint32_t io_path_cfg_rst_val; + uint32_t dual_cfg_rst_val; + bool sfe_pipeline_bypassed; +}; + +struct cam_ife_csid_ver2_mc_ctxt_reg_info { + uint32_t epoch_irq_cfg_addr; +}; + +struct cam_ife_csid_ver2_mc_bcast_reg_info { + uint32_t pix_store_cfg_addr; +}; + +struct cam_ife_csid_ver2_mc_reg_info { + uint32_t irq_comp_cfg0_addr; + uint32_t ipp_src_ctxt_mask_shift; + uint32_t ipp_dst_ctxt_mask_shift; + uint32_t comp_sof_mask; + uint32_t comp_eof_mask; + uint32_t comp_epoch0_mask; + uint32_t comp_rup_mask; + uint32_t comp_subgrp0_mask; + uint32_t comp_subgrp2_mask; +}; + +struct cam_ife_csid_ver2_path_reg_info { + uint32_t irq_status_addr; + uint32_t irq_mask_addr; + uint32_t irq_clear_addr; + uint32_t irq_set_addr; + uint32_t cfg0_addr; + uint32_t ctrl_addr; + uint32_t debug_clr_cmd_addr; + uint32_t multi_vcdt_cfg0_addr; + uint32_t multi_vcdt_cfg1_addr; + uint32_t cfg1_addr; + uint32_t bin_cfg0_addr; + uint32_t pix_store_cfg0_addr; + uint32_t sparse_pd_extractor_cfg_addr; + uint32_t err_recovery_cfg0_addr; + uint32_t err_recovery_cfg1_addr; + uint32_t err_recovery_cfg2_addr; + uint32_t bin_pd_detect_cfg0_addr; + uint32_t bin_pd_detect_cfg1_addr; + uint32_t bin_pd_detect_cfg2_addr; + uint32_t debug_byte_cntr_ping_addr; + uint32_t debug_byte_cntr_pong_addr; + uint32_t camif_frame_cfg_addr; + uint32_t epoch_irq_cfg_addr; + uint32_t epoch0_subsample_ptrn_addr; + uint32_t epoch1_subsample_ptrn_addr; + uint32_t debug_rup_aup_status; + uint32_t debug_camif_1_addr; + uint32_t debug_camif_0_addr; + uint32_t frm_drop_pattern_addr; + uint32_t frm_drop_period_addr; + uint32_t irq_subsample_pattern_addr; + uint32_t irq_subsample_period_addr; + uint32_t hcrop_addr; + uint32_t vcrop_addr; + uint32_t pix_drop_pattern_addr; + uint32_t pix_drop_period_addr; + uint32_t line_drop_pattern_addr; + uint32_t line_drop_period_addr; + uint32_t debug_halt_status_addr; + uint32_t debug_misr_val0_addr; + uint32_t debug_misr_val1_addr; + uint32_t debug_misr_val2_addr; + uint32_t debug_misr_val3_addr; + uint32_t format_measure_cfg0_addr; + uint32_t format_measure_cfg1_addr; + uint32_t format_measure_cfg2_addr; + uint32_t format_measure0_addr; + uint32_t format_measure1_addr; + uint32_t format_measure2_addr; + uint32_t timestamp_curr0_sof_addr; + uint32_t timestamp_curr1_sof_addr; + uint32_t timestamp_perv0_sof_addr; + uint32_t timestamp_perv1_sof_addr; + uint32_t timestamp_curr0_eof_addr; + uint32_t timestamp_curr1_eof_addr; + uint32_t timestamp_perv0_eof_addr; + uint32_t timestamp_perv1_eof_addr; + uint32_t lut_bank_cfg_addr; + uint32_t batch_id_cfg0_addr; + uint32_t batch_id_cfg1_addr; + uint32_t batch_period_cfg_addr; + uint32_t batch_stream_id_cfg_addr; + uint32_t epoch0_cfg_batch_id0_addr; + uint32_t epoch1_cfg_batch_id0_addr; + uint32_t epoch0_cfg_batch_id1_addr; + uint32_t epoch1_cfg_batch_id1_addr; + uint32_t epoch0_cfg_batch_id2_addr; + uint32_t epoch1_cfg_batch_id2_addr; + uint32_t epoch0_cfg_batch_id3_addr; + uint32_t epoch1_cfg_batch_id3_addr; + uint32_t epoch0_cfg_batch_id4_addr; + uint32_t epoch1_cfg_batch_id4_addr; + uint32_t epoch0_cfg_batch_id5_addr; + uint32_t epoch1_cfg_batch_id5_addr; + uint32_t secure_mask_cfg0; + uint32_t path_batch_status; + uint32_t path_frame_id; + uint32_t cfg2_addr; + uint32_t debug_sim_monitor; + + /*Shift Bit Configurations*/ + uint32_t start_mode_master; + uint32_t start_mode_internal; + uint32_t start_mode_global; + uint32_t start_mode_slave; + uint32_t start_mode_shift; + uint32_t start_master_sel_val; + uint32_t start_master_sel_shift; + uint32_t resume_frame_boundary; + uint32_t offline_mode_supported; + uint32_t mipi_pack_supported; + uint32_t packing_fmt_shift_val; + uint32_t plain_fmt_shift_val; + uint32_t plain_alignment_shift_val; + uint32_t crop_v_en_shift_val; + uint32_t crop_h_en_shift_val; + uint32_t drop_v_en_shift_val; + uint32_t drop_h_en_shift_val; + uint32_t pix_store_en_shift_val; + uint32_t early_eof_en_shift_val; + uint32_t bin_h_en_shift_val; + uint32_t bin_v_en_shift_val; + uint32_t bin_en_shift_val; + uint32_t bin_qcfa_en_shift_val; + uint32_t format_measure_en_shift_val; + uint32_t timestamp_en_shift_val; + uint32_t min_hbi_shift_val; + uint32_t start_master_sel_shift_val; + uint32_t bin_pd_en_shift_val; + uint32_t bin_pd_blk_w_shift_val; + uint32_t bin_pd_blk_h_shift_val; + uint32_t bin_pd_detect_x_offset_shift_val; + uint32_t bin_pd_detect_x_end_shift_val; + uint32_t bin_pd_detect_y_offset_shift_val; + uint32_t bin_pd_detect_y_end_shift_val; + uint32_t byte_cntr_en_shift_val; + uint32_t offline_mode_en_shift_val; + uint32_t debug_byte_cntr_rst_shift_val; + uint32_t stripe_loc_shift_val; + uint32_t pix_pattern_shift_val; + uint32_t ccif_violation_en; + uint32_t overflow_ctrl_mode_val; + uint32_t overflow_ctrl_en; + uint32_t lut_bank_0_sel_val; + uint32_t lut_bank_1_sel_val; + uint32_t binning_supported; + uint32_t fatal_err_mask; + uint32_t non_fatal_err_mask; + uint32_t pix_pattern_shift; + uint32_t rup_irq_mask; + uint32_t sof_irq_mask; + uint32_t eof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t rup_aup_mask; + uint32_t rup_mask; + uint32_t aup_mask; + uint32_t rup_aup_set_mask; + uint32_t top_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t epoch0_cfg_val; + uint32_t epoch1_cfg_val; + uint32_t epoch0_shift_val; + uint32_t epoch1_shift_val; + uint32_t sof_retiming_dis_shift; + uint32_t capabilities; + uint32_t default_out_format; + bool crop_drop_enable; + bool disable_sof_retime_default; + bool use_master_slave_default; +}; + +struct cam_ife_csid_ver2_common_reg_info { + uint32_t hw_version_addr; + uint32_t cfg0_addr; + uint32_t cfg1_addr; + uint32_t global_cmd_addr; + uint32_t reset_cfg_addr; + uint32_t reset_cmd_addr; + uint32_t rup_aup_cmd_addr; + uint32_t rup_cmd_addr; + uint32_t aup_cmd_addr; + uint32_t offline_cmd_addr; + uint32_t shdr_master_slave_cfg_addr; + uint32_t top_irq_status_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t top_irq_mask_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t top_irq_clear_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t top_irq_set_addr[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t multi_sensor_mode_addr; + uint32_t irq_cmd_addr; + uint32_t buf_done_irq_status_addr; + uint32_t buf_done_irq_mask_addr; + uint32_t buf_done_irq_clear_addr; + uint32_t buf_done_irq_set_addr; + uint32_t test_bus_ctrl; + uint32_t test_bus_debug; + uint32_t drv_cfg0_addr; + uint32_t drv_cfg1_addr; + uint32_t drv_cfg2_addr; + uint32_t debug_drv_0_addr; + uint32_t debug_drv_1_addr; + uint32_t debug_sensor_hbi_irq_vcdt_addr; + uint32_t debug_violation_addr; + uint32_t debug_cfg_addr; + uint32_t debug_err_vec_irq[CAM_IFE_CSID_DEBUG_VEC_ERR_REGS]; + uint32_t debug_err_vec_cfg; + uint32_t debug_err_vec_ts_lb; + uint32_t debug_err_vec_ts_mb; + uint32_t rx_mode_id_cfg1_addr; + + /*Shift Bit Configurations*/ + uint32_t rst_done_shift_val; + uint32_t rst_location_shift_val; + uint32_t rst_mode_shift_val; + uint32_t timestamp_stb_sel_shift_val; + uint32_t frame_id_decode_en_shift_val; + uint32_t vfr_en_shift_val; + uint32_t decode_format_shift_val; + uint32_t decode_format1_shift_val; + uint32_t decode_format2_shift_val; + uint32_t decode_format3_shift_val; + bool decode_format1_supported; + uint32_t decode_format_mask; + uint32_t start_mode_shift_val; + uint32_t start_cmd_shift_val; + uint32_t path_en_shift_val; + uint32_t dt_id_shift_val; + uint32_t vc_shift_val; + uint32_t vc_mask; + uint32_t dt_shift_val; + uint32_t dt_mask; + uint32_t crop_shift_val; + uint32_t debug_frm_drop_rst_shift_val; + uint32_t debug_timestamp_rst_shift_val; + uint32_t debug_format_measure_rst_shift_val; + uint32_t debug_misr_rst_shift_val; + uint32_t num_padding_pixels_shift_val; + uint32_t num_padding_rows_shift_val; + uint32_t num_vbi_lines_shift_val; + uint32_t num_hbi_cycles_shift_val; + uint32_t epoch0_line_shift_val; + uint32_t epoch1_line_shift_val; + uint32_t camif_width_shift_val; + uint32_t camif_height_shift_val; + uint32_t batch_id0_shift_val; + uint32_t batch_id1_shift_val; + uint32_t batch_id2_shift_val; + uint32_t batch_id3_shift_val; + uint32_t batch_id4_shift_val; + uint32_t batch_id5_shift_val; + uint32_t batch_id0_period_shift_val; + uint32_t batch_id1_period_shift_val; + uint32_t batch_id2_period_shift_val; + uint32_t batch_id3_period_shift_val; + uint32_t batch_id4_period_shift_val; + uint32_t batch_id5_period_shift_val; + uint32_t stream_id_len_shift_val; + uint32_t stream_id_x_offset_shift_val; + uint32_t stream_id_y_offset_shift_val; + uint32_t multi_vcdt_vc1_shift_val; + uint32_t multi_vcdt_dt1_shift_val; + uint32_t multi_vcdt_dt2_shift_val; + uint32_t multi_vcdt_dt3_shift_val; + uint32_t multi_vcdt_ts_combo_en_shift_val; + uint32_t multi_vcdt_en_shift_val; + uint32_t mup_shift_val; + uint32_t rup_aup_set_shift_val; + uint32_t shdr_slave_ppp_shift; + uint32_t shdr_slave_rdi2_shift; + uint32_t shdr_slave_rdi1_shift; + uint32_t shdr_master_rdi0_shift; + uint32_t shdr_master_slave_en_shift; + uint32_t drv_en_shift; + uint32_t drv_rup_en_shift; + /* config Values */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t num_udis; + uint32_t num_rdis; + uint32_t num_pix; + uint32_t num_ppp; + uint32_t rst_loc_path_only_val; + uint32_t rst_loc_complete_csid_val; + uint32_t rst_mode_frame_boundary_val; + uint32_t rst_mode_immediate_val; + uint32_t rst_cmd_irq_ctrl_only_val; + uint32_t rst_cmd_sw_reset_complete_val; + uint32_t rst_cmd_hw_reset_complete_val; + uint32_t vfr_supported; + uint32_t frame_id_dec_supported; + uint32_t drop_supported; + uint32_t multi_vcdt_supported; + uint32_t timestamp_strobe_val; + uint32_t overflow_ctrl_mode_val; + uint32_t overflow_ctrl_en; + uint32_t early_eof_supported; + uint32_t global_reset; + uint32_t aup_rup_supported; + uint32_t only_master_rup; + uint32_t sfe_ipp_input_rdi_res; + uint32_t phy_sel_base_idx; + uint32_t num_dt_supported; + bool timestamp_enabled_in_cfg0; + bool camif_irq_support; + bool ts_comb_vcdt_en; + bool direct_cid_config; + uint32_t drv_rup_en_val_map[CAM_IFE_PIX_PATH_RES_MAX]; + uint32_t drv_path_idle_en_val_map[CAM_ISP_MAX_PATHS]; + uint32_t path_domain_id_cfg0; + uint32_t path_domain_id_cfg1; + uint32_t path_domain_id_cfg2; + + /* Masks */ + uint32_t ts_comb_vcdt_mask; + uint32_t pxl_cnt_mask; + uint32_t line_cnt_mask; + uint32_t hblank_max_mask; + uint32_t hblank_min_mask; + uint32_t epoch0_line_mask; + uint32_t epoch1_line_mask; + uint32_t camif_width_mask; + uint32_t camif_height_mask; + uint32_t crop_pix_start_mask; + uint32_t crop_pix_end_mask; + uint32_t crop_line_start_mask; + uint32_t crop_line_end_mask; + uint32_t format_measure_height_mask_val; + uint32_t format_measure_height_shift_val; + uint32_t format_measure_width_mask_val; + uint32_t format_measure_width_shift_val; + uint32_t format_measure_max_hbi_shift; + uint32_t format_measure_min_hbi_mask; + uint32_t measure_en_hbi_vbi_cnt_mask; + uint32_t measure_pixel_line_en_mask; + uint32_t ipp_irq_mask_all; + uint32_t rdi_irq_mask_all; + uint32_t ppp_irq_mask_all; + uint32_t udi_irq_mask_all; + uint32_t top_err_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t top_reset_irq_mask[CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + uint32_t top_buf_done_irq_mask; + uint32_t top_top2_irq_mask; + uint32_t epoch_factor; + uint32_t decode_format_payload_only; + uint32_t capabilities; + uint32_t sync_reset_ctrl_testbus_val; +}; + +/** + * struct cam_ife_csid_secure_info: Contains all relevant info to be + * programmed for targets supporting + * this feature + * @phy_sel: Intermediate value for this mask. CSID passes + * phy_sel.This variable's position at the top is to + * be left unchanged, to have it be used correctly + * in the cam_subdev_notify_message callback for + * csiphy + * @lane_cfg: This value is similar to lane_assign in the PHY + * driver, and is used to identify the particular + * PHY instance with which this IFE session is + * connected to. + * @vc_mask: Virtual channel masks (Unused for mobile usecase) + * @csid_hw_idx_mask: Bit position denoting CSID(s) in use for secure + * session + * @cdm_hw_idx_mask: Bit position denoting CDM in use for secure + * session + */ +struct cam_ife_csid_secure_info { + uint32_t phy_sel; + uint32_t lane_cfg; + uint64_t vc_mask; + uint32_t csid_hw_idx_mask; + uint32_t cdm_hw_idx_mask; +}; + +struct cam_ife_csid_ver2_reg_info { + struct cam_irq_controller_reg_info *top_irq_reg_info; + struct cam_irq_controller_reg_info *rx_irq_reg_info; + struct cam_irq_controller_reg_info *buf_done_irq_reg_info; + struct cam_irq_controller_reg_info *path_irq_reg_info[ + CAM_IFE_PIX_PATH_RES_MAX]; + const struct cam_ife_csid_ver2_common_reg_info *cmn_reg; + const struct cam_ife_csid_ver2_csi2_rx_reg_info *csi2_reg; + const struct cam_ife_csid_ver2_path_reg_info *path_reg[ + CAM_IFE_PIX_PATH_RES_MAX]; + const struct cam_ife_csid_ver2_top_reg_info *top_reg; + const struct cam_ife_csid_ver2_mc_reg_info *ipp_mc_reg; + const uint32_t need_top_cfg; + const uint32_t csid_cust_node_map[ + CAM_IFE_CSID_HW_NUM_MAX]; + const int input_core_sel[ + CAM_IFE_CSID_HW_NUM_MAX][CAM_IFE_CSID_INPUT_CORE_SEL_MAX]; + const struct cam_ife_csid_top_irq_desc (*top_irq_desc)[][32]; + const struct cam_ife_csid_irq_desc (*rx_irq_desc)[][32]; + const char* (*debug_vec_desc)[][32]; + const struct cam_ife_csid_irq_desc *path_irq_desc; + const uint32_t *num_top_err_irqs; + const uint32_t *num_rx_err_irqs; + const uint32_t num_path_err_irqs; + const struct cam_ife_csid_top_debug_mask *top_debug_mask; + const struct cam_ife_csid_rx_debug_mask *rx_debug_mask; + const uint32_t num_top_regs; + const uint32_t num_rx_regs; + bool is_ife_sfe_mapped; + bool dynamic_drv_supported; +}; + +/* + * struct cam_ife_csid_ver2_hw: place holder for csid hw + * + * @path_res: array of path resources + * @cid_data: cid data + * @rx_cfg: rx configuration + * @flags: flags + * @irq_complete: complete variable for reset irq + * @debug_info: Debug info to capture debug info + * @timestamp: Timestamp info + * @timestamp: Timestamp info + * @rx_evt_payload Payload for rx events + * @path_evt_payload Payload for path events + * @rx_free_payload_list: Free Payload list for rx events + * @free_payload_list: Free Payload list for rx events + * @lock_state : spin lock + * @payload_lock: spin lock for path payload + * @rx_payload_lock: spin lock for rx payload + * @csid_irq_controller: common csid irq controller + * @buf_done_irq_controller: buf done irq controller + * @hw_info: hw info + * @core_info: csid core info + * @token: Context private of ife hw manager + * @event_cb: Event cb to ife hw manager + * @counters: counters used in csid hw + * @log_buf: Log Buffer to dump info + * @clk_rate: clk rate for csid hw + * @res_type: cur res type for active hw + * @dual_core_idx: core idx in case of dual csid + * @tasklet: Tasklet for irq events + * @reset_irq_handle: Reset irq handle + * @buf_done_irq_handle: Buf done irq handle + * @top_err_irq_handle: Top Err IRQ handle + * @top_info_irq_handle: Top Info IRQ handle + * @top_mc_irq_handle: Top multi context irq handle + * @sync_mode: Master/Slave modes + * @mup: MUP for incoming VC of next frame + * @discard_frame_per_path: Count of paths dropping initial frames + * @drv_init_done: Indicates if drv init config is done + * @is_drv_config_en: If drv config is enabled + * @standby_asserted: Standby was asserted at stream off + * @crc_error_threshold: CRC error threshold to be treated as fatal error + * + */ +struct cam_ife_csid_ver2_hw { + struct cam_isp_resource_node path_res + [CAM_IFE_PIX_PATH_RES_MAX]; + struct cam_ife_csid_cid_data cid_data[CAM_IFE_CSID_CID_MAX]; + struct cam_ife_csid_ver2_top_cfg top_cfg; + struct cam_ife_csid_ver2_rx_cfg rx_cfg; + struct cam_ife_csid_hw_counters counters; + struct cam_ife_csid_hw_flags flags; + struct cam_ife_csid_ver2_debug_info debug_info; + struct cam_ife_csid_timestamp timestamp; + struct cam_ife_csid_ver2_evt_payload rx_evt_payload[ + CAM_IFE_CSID_VER2_PAYLOAD_MAX]; + struct cam_ife_csid_ver2_evt_payload path_evt_payload[ + CAM_IFE_CSID_VER2_PAYLOAD_MAX]; + struct list_head rx_free_payload_list; + struct list_head path_free_payload_list; + spinlock_t lock_state; + spinlock_t path_payload_lock; + spinlock_t rx_payload_lock; + void *top_irq_controller + [CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + void *rx_irq_controller + [CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX]; + void *path_irq_controller[CAM_IFE_PIX_PATH_RES_MAX]; + void *buf_done_irq_controller; + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + struct cam_ife_csid_core_info *core_info; + void *token; + cam_hw_mgr_event_cb_func event_cb; + uint8_t log_buf + [CAM_IFE_CSID_LOG_BUF_LEN]; + uint64_t clk_rate; + uint32_t res_type; + uint32_t dual_core_idx; + void *tasklet; + int reset_irq_handle; + int buf_done_irq_handle; + int top_err_irq_handle + [CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + int top_info_irq_handle + [CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX]; + int top_mc_irq_handle; + int top_top2_irq_handle; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t mup; + atomic_t discard_frame_per_path; + bool drv_init_done; + bool is_drv_config_en; + bool standby_asserted; + uint32_t crc_error_threshold; +}; + +/* + * struct cam_ife_csid_res_mini_dump: CSID Res mini dump place holder + * @res_id: Res id + * @res_name: Res name + * @path_cfg: path configuration + */ +struct cam_ife_csid_ver2_res_mini_dump { + uint32_t res_id; + uint8_t res_name[CAM_ISP_RES_NAME_LEN]; + struct cam_ife_csid_ver2_path_cfg path_cfg; +}; + +/* + * struct cam_ife_csid_mini_dump_data: CSID mini dump place holder + * + * @res: Mini dump data for res + * @flags: Flags + * @rx_cfg: Rx configuration + * @cid_data: CID data + * @clk_rate: Clock Rate + * @hw_state: hw state + */ +struct cam_ife_csid_ver2_mini_dump_data { + struct cam_ife_csid_ver2_res_mini_dump res[CAM_IFE_PIX_PATH_RES_MAX]; + struct cam_ife_csid_hw_flags flags; + struct cam_ife_csid_ver2_rx_cfg rx_cfg; + struct cam_ife_csid_cid_data cid_data[CAM_IFE_CSID_CID_MAX]; + uint64_t clk_rate; + uint8_t hw_state; +}; + +int cam_ife_csid_hw_ver2_init(struct cam_hw_intf *csid_hw_intf, + struct cam_ife_csid_core_info *csid_core_info, + bool is_custom); + +int cam_ife_csid_hw_ver2_deinit(struct cam_hw_info *hw_priv); +void cam_ife_csid_ver2_print_illegal_programming_irq_status(void *csid_hw, void *res); +void cam_ife_csid_ver2_print_format_measure_info(void *csid_hw, void *res); +void cam_ife_csid_hw_ver2_mup_mismatch_handler(void *csid_hw, void *res); +void cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler(void *csid_hw); +void cam_ife_csid_hw_ver2_drv_err_handler(void *csid); + + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite1080.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite1080.h new file mode 100644 index 0000000000..e59b827155 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite1080.h @@ -0,0 +1,1347 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_1080_H_ +#define _CAM_IFE_CSID_LITE_1080_H_ + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_1080_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "DPHY_PH_ECC_SEC", + }, + { + .bitmask = BIT(9), + .desc = "SENSOR_MODE_ID_CHANGE", + }, + {0}, + {0}, + { + .bitmask = BIT(12), + .desc = + "DL0_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(13), + .desc = + "DL1_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(14), + .desc = + "DL2_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(15), + .desc = + "DL3_EOT_LOST, Sensor: Issue is with the timing signals received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(16), + .desc = + "DL0_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 0 - Check phy/sensor config", + }, + { + .bitmask = BIT(17), + .desc = + "DL1_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 1 - Check phy/sensor config", + }, + { + .bitmask = BIT(18), + .desc = + "DL2_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 2 - Check phy/sensor config", + }, + { + .bitmask = BIT(19), + .desc = + "DL3_SOT_LOST, Sensor: Timing signals are missed received in the cphy packet on lane 3 - Check phy/sensor config", + }, + { + .bitmask = BIT(20), + .desc = + "DL0_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 0 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(21), + .desc = + "DL1_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 1 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(22), + .desc = + "DL2_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 2 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(23), + .desc = + "DL3_FIFO_OVERFLOW, System: Data has been lost when transferring from PHY to CSID on Lane 3 - Check PHY clock, CSID clock and possible skew among the data lanes", + }, + { + .bitmask = BIT(24), + .desc = + "CPHY_PH_CRC, Sensor: All CPHY packet headers received are corrupted - Check phy/sensor config", + }, + { + .bitmask = BIT(25), + .desc = + "PAYLOAD_CRC, Sensor: The calculated CRC of a long packet does not match the transmitted (expected) CRC, possible corruption - Check phy/sensor config", + }, + { + .bitmask = BIT(26), + .desc = + "DPHY_PH_ECC_DED, Sensor: A short or long packet is corrupted and cannot be recovered - Check phy/sensor config", + }, + { + .bitmask = BIT(27), + .desc = + "MMAPPED_VC_DT, SW: A long packet has a VC_DT combination that is configured for more than one IPP or RDIs", + }, + { + .bitmask = BIT(28), + .desc = + "UNMAPPED_VC_DT, Sensor: A long packet has a VC_DT combination that is not configured for IPP or RDIs", + }, + { + .bitmask = BIT(29), + .desc = + "STREAM_UNDERFLOW, Sensor: Long packet payload size is less than payload header size, resulting a corrupted frame - Perform PHY Tuning/Check sensor limitations", + }, + {0}, + { + .bitmask = BIT(31), + .desc = "CSI2_RX_IRQ_STATUS_2", + }, + }, + { + { + .bitmask = BIT(0), + .desc = + "LONG_PKT, Debug: The header of the first long pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(1), + .desc = + "SHORT_PKT, Debug: The header of the first short pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(2), + .desc = + "CPHY_PKT_HDR, Debug: The header of the first cphy pkt matching the configured vc-dt has been captured", + }, + { + .bitmask = BIT(3), + .desc = "Illegal programming for next frame ID config", + }, + }, +}; + +static const uint32_t cam_ife_csid_lite_1080_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_1080_rx_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_lite_1080_rx_irq_desc[1]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_1080_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .irq_name = "ILLEGAL_PROGRAMMING", + .desc = "SW: Illegal programming sequence", + .debug = "Check the following possiblities:", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + {0}, + { + .bitmask = BIT(2), + .irq_name = "INFO_DATA_FIFO_FULL", + }, + { + .bitmask = BIT(3), + .irq_name = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .irq_name = "CAMIF_SOF", + }, + {0}, + {0}, + { + .bitmask = BIT(7), + .err_type = CAM_ISP_HW_ERROR_CSID_ILLEGAL_DT_SWITCH, + .irq_name = "ILLEGAL_DT_SWITCH", + .desc = "SW: Change in DT without MUP", + .debug = "Check sensor configuration", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + {0}, + { + .bitmask = BIT(9), + .irq_name = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .irq_name = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .irq_name = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .irq_name = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_PIX_COUNT", + .desc = "SW: Mismatch in expected versus received number of pixels per line", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_LINE_COUNT", + .desc = "SW: Mismatch in expected versus received number of lines", + .debug = "Check SW config/sensor stream", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .irq_name = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(16), + .irq_name = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(17), + .irq_name = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .err_type = CAM_ISP_HW_ERROR_CSID_CAMIF_FRAME_DROP, + .irq_name = "CAMIF_FRAME_DROP", + .desc = + "Sensor: The pre CAMIF frame drop would drop a frame in case the new frame starts prior to the end of the previous frame", + .debug = "Slower downstream processing or faster frame generation from sensor", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .irq_name = "OVERFLOW_RECOVERY", + .desc = "Detected by the overflow recovery block", + .debug = "Backpressure downstream", + }, + { + .bitmask = BIT(20), + .irq_name = "ERROR_REC_CCIF_VIOLATION", + .desc = "Output CCIF has a violation with respect to frame timing", + }, + { + .bitmask = BIT(21), + .irq_name = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .irq_name = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .irq_name = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .irq_name = "ILLEGAL_BATCH_ID", + .desc = "SW: Decoded frame ID does not match with any of the programmed batch IDs", + .debug = "Check frame ID and all available batch IDs", + }, + { + .bitmask = BIT(25), + .irq_name = "BATCH_END_MISSING_VIOLATION", + .desc = "SW: Input number of frames is not a multiple of the batch size", + .debug = "Check the configured pattern/period for batching", + }, + { + .bitmask = BIT(26), + .irq_name = "UNBOUNDED_FRAME", + .desc = "Sensor: Frame end or frame start is missing", + .debug = "Check the settle count in sensor driver XML", + }, + { + .bitmask = BIT(27), + .irq_name = "ERROR_SER_INVALID_CTXT", + .desc = "Indicates invalid context on CCIF", + + }, + { + .bitmask = BIT(28), + .irq_name = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .desc = + "Sensor/SW: The programmed MUP is out of sync with the VC of the incoming frame", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .irq_name = "CCIF_VIOLATION", + .desc = + "The output CCIF from the serializer has a violation with respect to frame timing", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_lite_1080_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = + "Sensor/SW: Minimum VBI period between dynamically switching between two sensor modes was either violated or the downstream pipe was not active when the switch was made", + }, + }, + { + { + .bitmask = BIT(5), + .err_name = "ERROR_SENSOR_HBI", + .desc = "Sensor: Sensor HBI is less than expected HBI", + .debug = "Check sensor configuration", + }, + }, +}; + +static const uint32_t cam_ife_csid_lite_1080_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_1080_top_irq_desc[0]), + ARRAY_SIZE(cam_ife_csid_lite_1080_top_irq_desc[1]), +}; + +static const char *cam_ife_csid_lite_1080_debug_vec_desc[][32] = { + { + "ERROR_UNBOUNDED_FRAME_RDI1", + "ERROR_SER_INVALID_CTXT_RDI1", + "ERROR_SER_CCIF_VIOLATION_RDI1", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI1", + "ERROR_REC_FRAME_DROP_RDI1", + "ERROR_REC_OVERFLOW_RDI0", + "ERROR_CAMIF_CCIF_VIOLATION_RDI0", + "ERROR_ILLEGAL_BATCH_ID_RDI0", + "ERROR_UNBOUNDED_FRAME_RDI0", + "ERROR_SER_INVALID_CTXT_RDI0", + "ERROR_SER_CCIF_VIOLATION_RDI0", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI0", + "ERROR_REC_FRAME_DROP_RDI0", + "", + "", + "", + "", + "", + "", + "", + "", + "ERROR_DL0_FIFO_OVERFLOW", + "ERROR_DL1_FIFO_OVERFLOW", + "ERROR_DL2_FIFO_OVERFLOW", + "ERROR_DL3_FIFO_OVERFLOW", + "ERROR_CPHY_PH_CRC", + "ERROR_DPHY_PH_ECC_DED", + "ERROR_STREAM_UNDERFLOW", + "ERROR_NO_VOTE_DN", + "ERROR_VOTE_UP_LATE", + "ERROR_RDI_LINE_BUFFER_CONFLICT", + "ERROR_SENSOR_HBI" + }, + { + "ERROR_UNBOUNDED_FRAME_IPP0", + "ERROR_SER_INVALID_CTXT_IPP0", + "ERROR_SER_CCIF_VIOLATION_IPP0", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_IPP0", + "ERROR_REC_FRAME_DROP_IPP0", + "", + "", + "", + "", + "", + "", + "", + "", + "ERROR_REC_OVERFLOW_RDI3", + "ERROR_CAMIF_CCIF_VIOLATION_RDI3", + "ERROR_ILLEGAL_BATCH_ID_RDI3", + "ERROR_UNBOUNDED_FRAME_RDI3", + "ERROR_SER_INVALID_CTXT_RDI3", + "ERROR_SER_CCIF_VIOLATION_RDI3", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI3", + "ERROR_REC_FRAME_DROP_RDI3", + "ERROR_REC_OVERFLOW_RDI2", + "ERROR_CAMIF_CCIF_VIOLATION_RDI2", + "ERROR_ILLEGAL_BATCH_ID_RDI2", + "ERROR_UNBOUNDED_FRAME_RDI2", + "ERROR_SER_INVALID_CTXT_RDI2", + "ERROR_SER_CCIF_VIOLATION_RDI2", + "FATAL_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP_RDI2", + "ERROR_REC_FRAME_DROP_RDI2", + "ERROR_REC_OVERFLOW_RDI1", + "ERROR_CAMIF_CCIF_VIOLATION_RDI1", + "ERROR_ILLEGAL_BATCH_ID_RDI1" + }, + { + "ERROR_REC_OVERFLOW_IPP0", + "ERROR_CAMIF_CCIF_VIOLATION_IPP0", + "ERROR_ILLEGAL_BATCH_ID_IPP0", + } +}; + +static struct cam_irq_register_set cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_MAX] = { + /* Top */ + { + .mask_reg_offset = 0x00000184, + .clear_reg_offset = 0x00000188, + .status_reg_offset = 0x00000180, + .set_reg_offset = 0x0000018C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000001B4, + .clear_reg_offset = 0x000001B8, + .status_reg_offset = 0x000001B0, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x00000228, + .clear_reg_offset = 0x0000022C, + .status_reg_offset = 0x00000224, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000238, + .clear_reg_offset = 0x0000023C, + .status_reg_offset = 0x00000234, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000248, + .clear_reg_offset = 0x0000024C, + .status_reg_offset = 0x00000244, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000258, + .clear_reg_offset = 0x0000025C, + .status_reg_offset = 0x00000254, + }, + {}, /* no RDI4 */ + /* IPP */ + { + .mask_reg_offset = 0x000001E8, + .clear_reg_offset = 0x000001EC, + .status_reg_offset = 0x000001E4, + }, + {0}, /* no PPP */ + /* UDI_0 */ + {0}, + /* UDI_1 */ + {0}, + /* UDI_2 */ + {0}, + /* Top_2 */ + { + .mask_reg_offset = 0x00000194, + .clear_reg_offset = 0x00000198, + .status_reg_offset = 0x00000190, + .set_reg_offset = 0x0000019C, + }, + /* RX_2 */ + { + .mask_reg_offset = 0x000001C4, + .clear_reg_offset = 0x000001C8, + .status_reg_offset = 0x000001C0, + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_1080_top_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000110, + .global_set_bitmask = 0x00000010, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + }, + +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_1080_rx_irq_reg_info[] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], /* RX */ + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX_2], + .global_irq_cmd_offset = 0, /* intentionally set to zero */ + + }, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_1080_path_irq_reg_info[6] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, + }, + {}, /* no RDI4 */ + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_1080_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, + }, +}; + +static struct cam_irq_register_set cam_ife_csid_lite_1080_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x000001A4, + .clear_reg_offset = 0x000001A8, + .status_reg_offset = 0x000001A0, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_lite_1080_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_lite_1080_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_lite_1080_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0100, + .global_cmd_addr = 0x0104, + .reset_cfg_addr = 0x0108, + .reset_cmd_addr = 0x010C, + .irq_cmd_addr = 0x0110, + .rup_cmd_addr = 0x0114, + .aup_cmd_addr = 0x0118, + .rup_aup_cmd_addr = 0x011C, + .top_irq_status_addr = {0x0180, 0x0190}, + .top_irq_mask_addr = {0x0184, 0x0194}, + .top_irq_clear_addr = {0x0188, 0x0198}, + .top_irq_set_addr = {0x018C, 0x019C}, + .buf_done_irq_status_addr = 0x01A0, + .buf_done_irq_mask_addr = 0x01A4, + .buf_done_irq_clear_addr = 0x01A8, + .buf_done_irq_set_addr = 0x01AC, + .debug_sensor_hbi_irq_vcdt_addr = 0x29C, + .debug_violation_addr = 0x7E0, + .debug_cfg_addr = 0x7E4, + .debug_err_vec_irq = {0x2D4, 0x2D8, 0x2DC}, + .debug_err_vec_cfg = 0x2D0, + .debug_err_vec_ts_lb = 0x2E0, + .debug_err_vec_ts_mb = 0x2E4, + + /*configurations */ + .major_version = 10, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 1, + .num_ppp = 0, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format2_shift_val = 0, + .decode_format3_shift_val = 4, + .decode_format_mask = 0xF, + .decode_format1_supported = true, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_dt2_shift_val = 8, + .multi_vcdt_dt3_shift_val = 16, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .num_dt_supported = 4, + .ts_comb_vcdt_en = true, + .direct_cid_config = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .top_err_irq_mask = {0x00000002, 0x20}, + .rst_loc_path_only_val = 0x0, + .rst_location_shift_val = 4, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_reset_irq_mask = {0x1,}, + .top_buf_done_irq_mask = 0x2000, + .decode_format_payload_only = 0xF, + .phy_sel_base_idx = 1, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .capabilities = CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 | + CAM_IFE_CSID_CAP_SPLIT_RUP_AUP | + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG | + CAM_IFE_CSID_CAP_DEBUG_ERR_VEC, + + .top_top2_irq_mask = 0x80000000, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_lite_1080_csi2_reg_info = { + .irq_status_addr = {0x01B0, 0x01C0}, + .irq_mask_addr = {0x01B4, 0x01C4}, + .irq_clear_addr = {0x01B8, 0x01C8}, + .irq_set_addr = {0x01BC, 0x01CC}, + /*CSI2 rx control */ + .cfg0_addr = 0x0880, + .cfg1_addr = 0x0884, + .capture_ctrl_addr = 0x0888, + .rst_strobes_addr = 0x088C, + .cap_unmap_long_pkt_hdr_0_addr = 0x0890, + .cap_unmap_long_pkt_hdr_1_addr = 0x0894, + .captured_short_pkt_0_addr = 0x0898, + .captured_short_pkt_1_addr = 0x089C, + .captured_long_pkt_0_addr = 0x08A0, + .captured_long_pkt_1_addr = 0x08A4, + .captured_long_pkt_ftr_addr = 0x08A8, + .captured_cphy_pkt_hdr_addr = 0x08AC, + .lane0_misr_addr = 0x08B0, + .lane1_misr_addr = 0x08B4, + .lane2_misr_addr = 0x08B8, + .lane3_misr_addr = 0x08BC, + .total_pkts_rcvd_addr = 0x08C0, + .stats_ecc_addr = 0x08C4, + .total_crc_err_addr = 0x08C8, + .de_scramble_type3_cfg0_addr = 0x08CC, + .de_scramble_type3_cfg1_addr = 0x08D0, + .de_scramble_type2_cfg0_addr = 0x08D4, + .de_scramble_type2_cfg1_addr = 0x08D8, + .de_scramble_type1_cfg0_addr = 0x08DC, + .de_scramble_type1_cfg1_addr = 0x08E0, + .de_scramble_type0_cfg0_addr = 0x08E4, + .de_scramble_type0_cfg1_addr = 0x08E8, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 13, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .fatal_err_mask = {0x25fff000, 0x0}, + .part_fatal_err_mask = {0x02000000, 0x0}, + .non_fatal_err_mask = {0x08000000, 0x0}, + .top_irq_mask = {0x4, 0x0}, + .rx_rx2_irq_mask = 0x80000000, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_1080_ipp_reg_info = { + .irq_status_addr = 0x01E4, + .irq_mask_addr = 0x01E8, + .irq_clear_addr = 0x01EC, + .irq_set_addr = 0x01F0, + .cfg0_addr = 0x0C80, + .ctrl_addr = 0x0C88, + .debug_clr_cmd_addr = 0x0C8C, + .multi_vcdt_cfg0_addr = 0x0C90, + .multi_vcdt_cfg1_addr = 0x0C84, + .cfg1_addr = 0, + .err_recovery_cfg0_addr = 0, + .err_recovery_cfg1_addr = 0, + .err_recovery_cfg2_addr = 0, + .camif_frame_cfg_addr = 0x0C94, + .epoch_irq_cfg_addr = 0, + .epoch0_subsample_ptrn_addr = 0, + .epoch1_subsample_ptrn_addr = 0, + .debug_rup_aup_status = 0x0C98, + .debug_camif_1_addr = 0x0C9C, + .debug_camif_0_addr = 0x0CA0, + .debug_halt_status_addr = 0x0CA4, + .debug_misr_val0_addr = 0x0CA8, + .debug_misr_val1_addr = 0x0CAC, + .debug_misr_val2_addr = 0x0CB0, + .debug_misr_val3_addr = 0x0CB4, + .hcrop_addr = 0, + .vcrop_addr = 0, + .pix_drop_pattern_addr = 0, + .pix_drop_period_addr = 0, + .line_drop_pattern_addr = 0, + .line_drop_period_addr = 0, + .frm_drop_pattern_addr = 0x0CB8, + .frm_drop_period_addr = 0x0CBC, + .irq_subsample_pattern_addr = 0x0CC0, + .irq_subsample_period_addr = 0x0CC4, + .format_measure_cfg0_addr = 0x0CC8, + .format_measure_cfg1_addr = 0x0CCC, + .format_measure_cfg2_addr = 0x0CD0, + .format_measure0_addr = 0x0CD4, + .format_measure1_addr = 0x0CD8, + .format_measure2_addr = 0x0CDC, + .timestamp_curr0_sof_addr = 0x0CE0, + .timestamp_curr1_sof_addr = 0x0CE4, + .timestamp_perv0_sof_addr = 0x0CE8, + .timestamp_perv1_sof_addr = 0x0CEC, + .timestamp_curr0_eof_addr = 0x0CF0, + .timestamp_curr1_eof_addr = 0x0CF4, + .timestamp_perv0_eof_addr = 0x0CF8, + .timestamp_perv1_eof_addr = 0x0CFC, + .batch_period_cfg_addr = 0x0D00, + .batch_stream_id_cfg_addr = 0x0D04, + .epoch0_cfg_batch_id0_addr = 0x0D10, + .epoch1_cfg_batch_id0_addr = 0x0D14, + .epoch0_cfg_batch_id1_addr = 0x0D18, + .epoch1_cfg_batch_id1_addr = 0x0D1C, + .epoch0_cfg_batch_id2_addr = 0x0D20, + .epoch1_cfg_batch_id2_addr = 0x0D24, + .epoch0_cfg_batch_id3_addr = 0x0D28, + .epoch1_cfg_batch_id3_addr = 0x0D2C, + .epoch0_cfg_batch_id4_addr = 0x0D30, + .epoch1_cfg_batch_id4_addr = 0x0D34, + .epoch0_cfg_batch_id5_addr = 0x0D38, + .epoch1_cfg_batch_id5_addr = 0x0D3C, + + /* configurations */ + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .resume_frame_boundary = 1, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x1, + .aup_mask = 0x1, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x100,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_1080_rdi_0_reg_info = { + .irq_status_addr = 0x0224, + .irq_mask_addr = 0x0228, + .irq_clear_addr = 0x022C, + .irq_set_addr = 0x0230, + .cfg0_addr = 0x3080, + .ctrl_addr = 0x3088, + .debug_clr_cmd_addr = 0x308C, + .multi_vcdt_cfg0_addr = 0x3090, + .multi_vcdt_cfg1_addr = 0x3084, + .cfg1_addr = 0x3094, + .debug_byte_cntr_ping_addr = 0x30A8, + .debug_byte_cntr_pong_addr = 0x30AC, + .camif_frame_cfg_addr = 0x30B0, + .epoch_irq_cfg_addr = 0x30B4, + .epoch0_subsample_ptrn_addr = 0x30B8, + .epoch1_subsample_ptrn_addr = 0x30BC, + .debug_rup_aup_status = 0x30C0, + .debug_camif_1_addr = 0x30C4, + .debug_camif_0_addr = 0x30C8, + .frm_drop_pattern_addr = 0x30CC, + .frm_drop_period_addr = 0x30D0, + .irq_subsample_pattern_addr = 0x30D4, + .irq_subsample_period_addr = 0x30D8, + .hcrop_addr = 0x30DC, + .vcrop_addr = 0x30E0, + .pix_drop_pattern_addr = 0x30E4, + .pix_drop_period_addr = 0x30E8, + .line_drop_pattern_addr = 0x30EC, + .line_drop_period_addr = 0x30F0, + .debug_halt_status_addr = 0x30F8, + .debug_misr_val0_addr = 0x30FC, + .debug_misr_val1_addr = 0x3100, + .debug_misr_val2_addr = 0x3104, + .debug_misr_val3_addr = 0x3108, + .format_measure_cfg0_addr = 0x310C, + .format_measure_cfg1_addr = 0x3110, + .format_measure_cfg2_addr = 0x3114, + .format_measure0_addr = 0x3118, + .format_measure1_addr = 0x311C, + .format_measure2_addr = 0x3120, + .timestamp_curr0_sof_addr = 0x3124, + .timestamp_curr1_sof_addr = 0x3128, + .timestamp_perv0_sof_addr = 0x312C, + .timestamp_perv1_sof_addr = 0x3130, + .timestamp_curr0_eof_addr = 0x3134, + .timestamp_curr1_eof_addr = 0x3138, + .timestamp_perv0_eof_addr = 0x313C, + .timestamp_perv1_eof_addr = 0x3140, + .batch_period_cfg_addr = 0, + .batch_stream_id_cfg_addr = 0x3150, + .epoch0_cfg_batch_id0_addr = 0x3154, + .epoch1_cfg_batch_id0_addr = 0x3158, + .epoch0_cfg_batch_id1_addr = 0x315C, + .epoch1_cfg_batch_id1_addr = 0x3160, + .epoch0_cfg_batch_id2_addr = 0x3164, + .epoch1_cfg_batch_id2_addr = 0x3168, + .epoch0_cfg_batch_id3_addr = 0x316C, + .epoch1_cfg_batch_id3_addr = 0x3170, + .epoch0_cfg_batch_id4_addr = 0x3174, + .epoch1_cfg_batch_id4_addr = 0x3178, + .epoch0_cfg_batch_id5_addr = 0x317C, + .epoch1_cfg_batch_id5_addr = 0x3180, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x100, + .aup_mask = 0x100, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x10000,}, + .rup_aup_mask = 0x100010, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_1080_rdi_1_reg_info = { + .irq_status_addr = 0x0234, + .irq_mask_addr = 0x0238, + .irq_clear_addr = 0x023C, + .irq_set_addr = 0x0240, + .cfg0_addr = 0x3280, + .ctrl_addr = 0x3288, + .debug_clr_cmd_addr = 0x328C, + .multi_vcdt_cfg0_addr = 0x3290, + .multi_vcdt_cfg1_addr = 0x3284, + .cfg1_addr = 0x3294, + .debug_byte_cntr_ping_addr = 0x32A8, + .debug_byte_cntr_pong_addr = 0x32AC, + .camif_frame_cfg_addr = 0x32B0, + .epoch_irq_cfg_addr = 0x32B4, + .epoch0_subsample_ptrn_addr = 0x32B8, + .epoch1_subsample_ptrn_addr = 0x32BC, + .debug_rup_aup_status = 0x32C0, + .debug_camif_1_addr = 0x32C4, + .debug_camif_0_addr = 0x32C8, + .frm_drop_pattern_addr = 0x32CC, + .frm_drop_period_addr = 0x32D0, + .irq_subsample_pattern_addr = 0x32D4, + .irq_subsample_period_addr = 0x32D8, + .hcrop_addr = 0x32DC, + .vcrop_addr = 0x32E0, + .pix_drop_pattern_addr = 0x32E4, + .pix_drop_period_addr = 0x32E8, + .line_drop_pattern_addr = 0x32EC, + .line_drop_period_addr = 0x32F0, + .debug_halt_status_addr = 0x32F8, + .debug_misr_val0_addr = 0x32FC, + .debug_misr_val1_addr = 0x3300, + .debug_misr_val2_addr = 0x3304, + .debug_misr_val3_addr = 0x3308, + .format_measure_cfg0_addr = 0x330C, + .format_measure_cfg1_addr = 0x3310, + .format_measure_cfg2_addr = 0x3314, + .format_measure0_addr = 0x3318, + .format_measure1_addr = 0x331C, + .format_measure2_addr = 0x3320, + .timestamp_curr0_sof_addr = 0x3324, + .timestamp_curr1_sof_addr = 0x3328, + .timestamp_perv0_sof_addr = 0x332C, + .timestamp_perv1_sof_addr = 0x3330, + .timestamp_curr0_eof_addr = 0x3334, + .timestamp_curr1_eof_addr = 0x3338, + .timestamp_perv0_eof_addr = 0x333C, + .timestamp_perv1_eof_addr = 0x3340, + .batch_period_cfg_addr = 0, + .batch_stream_id_cfg_addr = 0x3350, + .epoch0_cfg_batch_id0_addr = 0x3354, + .epoch1_cfg_batch_id0_addr = 0x3358, + .epoch0_cfg_batch_id1_addr = 0x335C, + .epoch1_cfg_batch_id1_addr = 0x3360, + .epoch0_cfg_batch_id2_addr = 0x3364, + .epoch1_cfg_batch_id2_addr = 0x3368, + .epoch0_cfg_batch_id3_addr = 0x336C, + .epoch1_cfg_batch_id3_addr = 0x3370, + .epoch0_cfg_batch_id4_addr = 0x3374, + .epoch1_cfg_batch_id4_addr = 0x3378, + .epoch0_cfg_batch_id5_addr = 0x337C, + .epoch1_cfg_batch_id5_addr = 0x3380, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x200, + .aup_mask = 0x200, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x20000,}, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_1080_rdi_2_reg_info = { + .irq_status_addr = 0x0244, + .irq_mask_addr = 0x0248, + .irq_clear_addr = 0x024C, + .irq_set_addr = 0x0250, + .cfg0_addr = 0x3480, + .ctrl_addr = 0x3488, + .debug_clr_cmd_addr = 0x348C, + .multi_vcdt_cfg0_addr = 0x3490, + .multi_vcdt_cfg1_addr = 0x3484, + .cfg1_addr = 0x3494, + .debug_byte_cntr_ping_addr = 0x34A8, + .debug_byte_cntr_pong_addr = 0x34AC, + .camif_frame_cfg_addr = 0x34B0, + .epoch_irq_cfg_addr = 0x34B4, + .epoch0_subsample_ptrn_addr = 0x34B8, + .epoch1_subsample_ptrn_addr = 0x34BC, + .debug_rup_aup_status = 0x34C0, + .debug_camif_1_addr = 0x34C4, + .debug_camif_0_addr = 0x34C8, + .frm_drop_pattern_addr = 0x34CC, + .frm_drop_period_addr = 0x34D0, + .irq_subsample_pattern_addr = 0x34D4, + .irq_subsample_period_addr = 0x34D8, + .hcrop_addr = 0x34DC, + .vcrop_addr = 0x34E0, + .pix_drop_pattern_addr = 0x34E4, + .pix_drop_period_addr = 0x34E8, + .line_drop_pattern_addr = 0x34EC, + .line_drop_period_addr = 0x34F0, + .debug_halt_status_addr = 0x34F8, + .debug_misr_val0_addr = 0x34FC, + .debug_misr_val1_addr = 0x3500, + .debug_misr_val2_addr = 0x3504, + .debug_misr_val3_addr = 0x3508, + .format_measure_cfg0_addr = 0x350C, + .format_measure_cfg1_addr = 0x3510, + .format_measure_cfg2_addr = 0x3514, + .format_measure0_addr = 0x3518, + .format_measure1_addr = 0x351C, + .format_measure2_addr = 0x3520, + .timestamp_curr0_sof_addr = 0x3524, + .timestamp_curr1_sof_addr = 0x3528, + .timestamp_perv0_sof_addr = 0x352C, + .timestamp_perv1_sof_addr = 0x3530, + .timestamp_curr0_eof_addr = 0x3534, + .timestamp_curr1_eof_addr = 0x3538, + .timestamp_perv0_eof_addr = 0x353C, + .timestamp_perv1_eof_addr = 0x3540, + .batch_period_cfg_addr = 0, + .batch_stream_id_cfg_addr = 0x3550, + .epoch0_cfg_batch_id0_addr = 0x3554, + .epoch1_cfg_batch_id0_addr = 0x3558, + .epoch0_cfg_batch_id1_addr = 0x355C, + .epoch1_cfg_batch_id1_addr = 0x3560, + .epoch0_cfg_batch_id2_addr = 0x3564, + .epoch1_cfg_batch_id2_addr = 0x3568, + .epoch0_cfg_batch_id3_addr = 0x356C, + .epoch1_cfg_batch_id3_addr = 0x3570, + .epoch0_cfg_batch_id4_addr = 0x3574, + .epoch1_cfg_batch_id4_addr = 0x3578, + .epoch0_cfg_batch_id5_addr = 0x357C, + .epoch1_cfg_batch_id5_addr = 0x3580, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x400, + .aup_mask = 0x400, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x40000,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_1080_rdi_3_reg_info = { + .irq_status_addr = 0x0254, + .irq_mask_addr = 0x0258, + .irq_clear_addr = 0x025C, + .irq_set_addr = 0x0260, + .cfg0_addr = 0x3680, + .ctrl_addr = 0x3688, + .debug_clr_cmd_addr = 0x368C, + .multi_vcdt_cfg0_addr = 0x3690, + .multi_vcdt_cfg1_addr = 0x3684, + .cfg1_addr = 0x3694, + .debug_byte_cntr_ping_addr = 0x36A8, + .debug_byte_cntr_pong_addr = 0x36AC, + .camif_frame_cfg_addr = 0x36B0, + .epoch_irq_cfg_addr = 0x36B4, + .epoch0_subsample_ptrn_addr = 0x36B8, + .epoch1_subsample_ptrn_addr = 0x36BC, + .debug_rup_aup_status = 0x36C0, + .debug_camif_1_addr = 0x36C4, + .debug_camif_0_addr = 0x36C8, + .frm_drop_pattern_addr = 0x36CC, + .frm_drop_period_addr = 0x36D0, + .irq_subsample_pattern_addr = 0x36D4, + .irq_subsample_period_addr = 0x36D8, + .hcrop_addr = 0x36DC, + .vcrop_addr = 0x36E0, + .pix_drop_pattern_addr = 0x36E4, + .pix_drop_period_addr = 0x36E8, + .line_drop_pattern_addr = 0x36EC, + .line_drop_period_addr = 0x36F0, + .debug_halt_status_addr = 0x36F8, + .debug_misr_val0_addr = 0x36FC, + .debug_misr_val1_addr = 0x3700, + .debug_misr_val2_addr = 0x3704, + .debug_misr_val3_addr = 0x3708, + .format_measure_cfg0_addr = 0x370C, + .format_measure_cfg1_addr = 0x3710, + .format_measure_cfg2_addr = 0x3714, + .format_measure0_addr = 0x3718, + .format_measure1_addr = 0x371C, + .format_measure2_addr = 0x3720, + .timestamp_curr0_sof_addr = 0x3724, + .timestamp_curr1_sof_addr = 0x3728, + .timestamp_perv0_sof_addr = 0x372C, + .timestamp_perv1_sof_addr = 0x3730, + .timestamp_curr0_eof_addr = 0x3734, + .timestamp_curr1_eof_addr = 0x3738, + .timestamp_perv0_eof_addr = 0x373C, + .timestamp_perv1_eof_addr = 0x3740, + .batch_period_cfg_addr = 0, + .batch_stream_id_cfg_addr = 0x3750, + .epoch0_cfg_batch_id0_addr = 0x3754, + .epoch1_cfg_batch_id0_addr = 0x3758, + .epoch0_cfg_batch_id1_addr = 0x375C, + .epoch1_cfg_batch_id1_addr = 0x3760, + .epoch0_cfg_batch_id2_addr = 0x3764, + .epoch1_cfg_batch_id2_addr = 0x3768, + .epoch0_cfg_batch_id3_addr = 0x376C, + .epoch1_cfg_batch_id3_addr = 0x3770, + .epoch0_cfg_batch_id4_addr = 0x3774, + .epoch1_cfg_batch_id4_addr = 0x3778, + .epoch0_cfg_batch_id5_addr = 0x377C, + .epoch1_cfg_batch_id5_addr = 0x3780, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x2c1c6081, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_mask = 0x800, + .aup_mask = 0x800, + .rup_aup_set_mask = 0x1, + .top_irq_mask = {0x80000,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_lite_1080_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_lite_1080_top_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 16, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 17, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 18, +}; + + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_1080_reg_info = { + .top_irq_reg_info = cam_ife_csid_lite_1080_top_irq_reg_info, + .rx_irq_reg_info = cam_ife_csid_lite_1080_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_1080_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_1080_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_1080_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_1080_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_1080_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_1080_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_1080_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_1080_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_1080_rdi_3_reg_info, + .need_top_cfg = 0, + .top_irq_desc = &cam_ife_csid_lite_1080_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_1080_rx_irq_desc, + .debug_vec_desc = &cam_ife_csid_lite_1080_debug_vec_desc, + .path_irq_desc = cam_ife_csid_lite_1080_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_1080_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_1080_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_1080_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_1080_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_1080_rx_debug_mask, + .num_top_regs = 2, + .num_rx_regs = 2, +}; + +#endif /* _CAM_IFE_CSID_LITE_1080_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h new file mode 100644 index 0000000000..672e927786 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE17X_H_ +#define _CAM_IFE_CSID_LITE17X_H_ + +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" + +#define CAM_CSID_LITE_DRV_NAME "csid_lite" + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_17x_rdi_0_reg_info = { + + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x250, + .misr_val0_addr = 0x254, + .misr_val1_addr = 0x258, + .misr_val2_addr = 0x25c, + .misr_val3_addr = 0x260, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + .byte_cntr_ping_addr = 0x2e0, + .byte_cntr_pong_addr = 0x2e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .packing_fmt_shift_val = 30, + .plain_fmt_shift_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0xe000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_17x_rdi_1_reg_info = { + + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_17x_rdi_2_reg_info = { + + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .yuv_chroma_conversion_addr = 0x434, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_17x_rdi_3_reg_info = { + + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .yuv_chroma_conversion_addr = 0x534, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_lite_17x_csi2_reg_info = { + + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x3, + .fatal_err_mask = 0x78000, + .part_fatal_err_mask = 0x1801800, + .non_fatal_err_mask = 0x380000, +}; + + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_lite_17x_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .ctrl_cfg = 0x408007, + .lfsr_seed = 0x12345678, + .color_bar = 1, + .num_frames = 0, + .line_interleave_mode = 0x1, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + + +static struct cam_ife_csid_ver1_common_reg_info + cam_csid_lite_17x_cmn_reg_info = { + + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 1, + .minor_version = 7, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 0, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_lite_17x_reg_info = { + .cmn_reg = &cam_csid_lite_17x_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_17x_csi2_reg_info, + .ipp_reg = NULL, + .rdi_reg = { + &cam_ife_csid_lite_17x_rdi_0_reg_info, + &cam_ife_csid_lite_17x_rdi_1_reg_info, + &cam_ife_csid_lite_17x_rdi_2_reg_info, + &cam_ife_csid_lite_17x_rdi_3_reg_info, + }, + .tpg_reg = &cam_ife_csid_lite_17x_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_LITE17X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h new file mode 100644 index 0000000000..138d400387 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite480.h @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_480_H_ +#define _CAM_IFE_CSID_LITE_480_H_ + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_hw_ver1.h" + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_480_rdi_0_reg_info = { + .irq_status_addr = 0x30, + .irq_mask_addr = 0x34, + .irq_clear_addr = 0x38, + .irq_set_addr = 0x3c, + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .ctrl_addr = 0x208, + .frm_drop_pattern_addr = 0x20c, + .frm_drop_period_addr = 0x210, + .irq_subsample_pattern_addr = 0x214, + .irq_subsample_period_addr = 0x218, + .hcrop_addr = 0x21c, + .vcrop_addr = 0x220, + .pix_drop_pattern_addr = 0x224, + .pix_drop_period_addr = 0x228, + .line_drop_pattern_addr = 0x22c, + .line_drop_period_addr = 0x230, + .rst_strobes_addr = 0x240, + .status_addr = 0x250, + .misr_val0_addr = 0x254, + .misr_val1_addr = 0x258, + .misr_val2_addr = 0x25c, + .misr_val3_addr = 0x260, + .format_measure_cfg0_addr = 0x270, + .format_measure_cfg1_addr = 0x274, + .format_measure0_addr = 0x278, + .format_measure1_addr = 0x27c, + .format_measure2_addr = 0x280, + .timestamp_curr0_sof_addr = 0x290, + .timestamp_curr1_sof_addr = 0x294, + .timestamp_prev0_sof_addr = 0x298, + .timestamp_prev1_sof_addr = 0x29c, + .timestamp_curr0_eof_addr = 0x2a0, + .timestamp_curr1_eof_addr = 0x2a4, + .timestamp_prev0_eof_addr = 0x2a8, + .timestamp_prev1_eof_addr = 0x2ac, + .err_recovery_cfg0_addr = 0x2b0, + .err_recovery_cfg1_addr = 0x2b4, + .err_recovery_cfg2_addr = 0x2b8, + .multi_vcdt_cfg0_addr = 0x2bc, + .byte_cntr_ping_addr = 0x2e0, + .byte_cntr_pong_addr = 0x2e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .mipi_pack_supported = 1, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .overflow_ctrl_mode_val = 0x8, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x2e000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_480_rdi_1_reg_info = { + .irq_status_addr = 0x40, + .irq_mask_addr = 0x44, + .irq_clear_addr = 0x48, + .irq_set_addr = 0x4c, + .cfg0_addr = 0x300, + .cfg1_addr = 0x304, + .ctrl_addr = 0x308, + .frm_drop_pattern_addr = 0x30c, + .frm_drop_period_addr = 0x310, + .irq_subsample_pattern_addr = 0x314, + .irq_subsample_period_addr = 0x318, + .hcrop_addr = 0x31c, + .vcrop_addr = 0x320, + .pix_drop_pattern_addr = 0x324, + .pix_drop_period_addr = 0x328, + .line_drop_pattern_addr = 0x32c, + .line_drop_period_addr = 0x330, + .rst_strobes_addr = 0x340, + .status_addr = 0x350, + .misr_val0_addr = 0x354, + .misr_val1_addr = 0x358, + .misr_val2_addr = 0x35c, + .misr_val3_addr = 0x360, + .format_measure_cfg0_addr = 0x370, + .format_measure_cfg1_addr = 0x374, + .format_measure0_addr = 0x378, + .format_measure1_addr = 0x37c, + .format_measure2_addr = 0x380, + .timestamp_curr0_sof_addr = 0x390, + .timestamp_curr1_sof_addr = 0x394, + .timestamp_prev0_sof_addr = 0x398, + .timestamp_prev1_sof_addr = 0x39c, + .timestamp_curr0_eof_addr = 0x3a0, + .timestamp_curr1_eof_addr = 0x3a4, + .timestamp_prev0_eof_addr = 0x3a8, + .timestamp_prev1_eof_addr = 0x3ac, + .err_recovery_cfg0_addr = 0x3b0, + .err_recovery_cfg1_addr = 0x3b4, + .err_recovery_cfg2_addr = 0x3b8, + .multi_vcdt_cfg0_addr = 0x3bc, + .byte_cntr_ping_addr = 0x3e0, + .byte_cntr_pong_addr = 0x3e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .packing_fmt_shift_val = 30, + .plain_fmt_shift_val = 10, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .overflow_ctrl_mode_val = 0x8, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x2e000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_480_rdi_2_reg_info = { + .irq_status_addr = 0x50, + .irq_mask_addr = 0x54, + .irq_clear_addr = 0x58, + .irq_set_addr = 0x5c, + .cfg0_addr = 0x400, + .cfg1_addr = 0x404, + .ctrl_addr = 0x408, + .frm_drop_pattern_addr = 0x40c, + .frm_drop_period_addr = 0x410, + .irq_subsample_pattern_addr = 0x414, + .irq_subsample_period_addr = 0x418, + .hcrop_addr = 0x41c, + .vcrop_addr = 0x420, + .pix_drop_pattern_addr = 0x424, + .pix_drop_period_addr = 0x428, + .line_drop_pattern_addr = 0x42c, + .line_drop_period_addr = 0x430, + .rst_strobes_addr = 0x440, + .status_addr = 0x450, + .misr_val0_addr = 0x454, + .misr_val1_addr = 0x458, + .misr_val2_addr = 0x45c, + .misr_val3_addr = 0x460, + .format_measure_cfg0_addr = 0x470, + .format_measure_cfg1_addr = 0x474, + .format_measure0_addr = 0x478, + .format_measure1_addr = 0x47c, + .format_measure2_addr = 0x480, + .timestamp_curr0_sof_addr = 0x490, + .timestamp_curr1_sof_addr = 0x494, + .timestamp_prev0_sof_addr = 0x498, + .timestamp_prev1_sof_addr = 0x49c, + .timestamp_curr0_eof_addr = 0x4a0, + .timestamp_curr1_eof_addr = 0x4a4, + .timestamp_prev0_eof_addr = 0x4a8, + .timestamp_prev1_eof_addr = 0x4ac, + .err_recovery_cfg0_addr = 0x4b0, + .err_recovery_cfg1_addr = 0x4b4, + .err_recovery_cfg2_addr = 0x4b8, + .multi_vcdt_cfg0_addr = 0x4bc, + .byte_cntr_ping_addr = 0x4e0, + .byte_cntr_pong_addr = 0x4e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x2e000, +}; + +static struct cam_ife_csid_ver1_path_reg_info + cam_ife_csid_lite_480_rdi_3_reg_info = { + .irq_status_addr = 0x60, + .irq_mask_addr = 0x64, + .irq_clear_addr = 0x68, + .irq_set_addr = 0x6c, + .cfg0_addr = 0x500, + .cfg1_addr = 0x504, + .ctrl_addr = 0x508, + .frm_drop_pattern_addr = 0x50c, + .frm_drop_period_addr = 0x510, + .irq_subsample_pattern_addr = 0x514, + .irq_subsample_period_addr = 0x518, + .hcrop_addr = 0x51c, + .vcrop_addr = 0x520, + .pix_drop_pattern_addr = 0x524, + .pix_drop_period_addr = 0x528, + .line_drop_pattern_addr = 0x52c, + .line_drop_period_addr = 0x530, + .rst_strobes_addr = 0x540, + .status_addr = 0x550, + .misr_val0_addr = 0x554, + .misr_val1_addr = 0x558, + .misr_val2_addr = 0x55c, + .misr_val3_addr = 0x560, + .format_measure_cfg0_addr = 0x570, + .format_measure_cfg1_addr = 0x574, + .format_measure0_addr = 0x578, + .format_measure1_addr = 0x57c, + .format_measure2_addr = 0x580, + .timestamp_curr0_sof_addr = 0x590, + .timestamp_curr1_sof_addr = 0x594, + .timestamp_prev0_sof_addr = 0x598, + .timestamp_prev1_sof_addr = 0x59c, + .timestamp_curr0_eof_addr = 0x5a0, + .timestamp_curr1_eof_addr = 0x5a4, + .timestamp_prev0_eof_addr = 0x5a8, + .timestamp_prev1_eof_addr = 0x5ac, + .err_recovery_cfg0_addr = 0x5b0, + .err_recovery_cfg1_addr = 0x5b4, + .err_recovery_cfg2_addr = 0x5b8, + .multi_vcdt_cfg0_addr = 0x5bc, + .byte_cntr_ping_addr = 0x5e0, + .byte_cntr_pong_addr = 0x5e4, + /* configurations */ + .halt_mode_internal = 0, + .halt_mode_global = 1, + .halt_mode_shift = 2, + .halt_frame_boundary = 0, + .resume_frame_boundary = 1, + .halt_immediate = 2, + .halt_cmd_shift = 0, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .drop_v_en_shift_val = 4, + .drop_h_en_shift_val = 3, + .plain_fmt_shift_val = 10, + .packing_fmt_shift_val = 30, + .ccif_violation_en = 1, + .overflow_ctrl_en = 1, + .timestamp_en_shift_val = 2, + .format_measure_en_shift_val = 1, + .overflow_ctrl_mode_val = 0x8, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x2e000, +}; + +static struct cam_ife_csid_csi2_rx_reg_info + cam_ife_csid_lite_480_csi2_reg_info = { + .irq_status_addr = 0x20, + .irq_mask_addr = 0x24, + .irq_clear_addr = 0x28, + .irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .cfg0_addr = 0x100, + .cfg1_addr = 0x104, + .capture_ctrl_addr = 0x108, + .rst_strobes_addr = 0x110, + .de_scramble_cfg0_addr = 0x114, + .de_scramble_cfg1_addr = 0x118, + .cap_unmap_long_pkt_hdr_0_addr = 0x120, + .cap_unmap_long_pkt_hdr_1_addr = 0x124, + .captured_short_pkt_0_addr = 0x128, + .captured_short_pkt_1_addr = 0x12c, + .captured_long_pkt_0_addr = 0x130, + .captured_long_pkt_1_addr = 0x134, + .captured_long_pkt_ftr_addr = 0x138, + .captured_cphy_pkt_hdr_addr = 0x13c, + .lane0_misr_addr = 0x150, + .lane1_misr_addr = 0x154, + .lane2_misr_addr = 0x158, + .lane3_misr_addr = 0x15c, + .total_pkts_rcvd_addr = 0x160, + .stats_ecc_addr = 0x164, + .total_crc_err_addr = 0x168, + + .rst_srb_all = 0x3FFF, + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0x7, + .fatal_err_mask = 0x59FA800, + .part_fatal_err_mask = 0x0001000, + .non_fatal_err_mask = 0x0200000, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, +}; + +static struct cam_ife_csid_ver1_tpg_reg_info + cam_ife_csid_lite_480_tpg_reg_info = { + /*CSID TPG control */ + .ctrl_addr = 0x600, + .vc_cfg0_addr = 0x604, + .vc_cfg1_addr = 0x608, + .lfsr_seed_addr = 0x60c, + .dt_n_cfg_0_addr = 0x610, + .dt_n_cfg_1_addr = 0x614, + .dt_n_cfg_2_addr = 0x618, + .color_bars_cfg_addr = 0x640, + .color_box_cfg_addr = 0x644, + .common_gen_cfg_addr = 0x648, + .cgen_n_cfg_addr = 0x650, + .cgen_n_x0_addr = 0x654, + .cgen_n_x1_addr = 0x658, + .cgen_n_x2_addr = 0x65c, + .cgen_n_xy_addr = 0x660, + .cgen_n_y1_addr = 0x664, + .cgen_n_y2_addr = 0x668, + + /* configurations */ + .dtn_cfg_offset = 0xc, + .cgen_cfg_offset = 0x20, + .cpas_ife_reg_offset = 0x28, + .hbi = 0x740, + .vbi = 0x3FF, + .lfsr_seed = 0x12345678, + .ctrl_cfg = 0x408007, + .color_bar = 1, + .num_frames = 0, + .line_interleave_mode = 0x1, + .payload_mode = 0x8, + .num_active_lanes_mask = 0x30, + .num_active_dt = 0, + .fmt_shift = 16, + .num_frame_shift = 16, + .width_shift = 16, + .vbi_shift = 12, + .line_interleave_shift = 10, + .num_active_dt_shift = 8, + .color_bar_shift = 5, + .height_shift = 0, + .hbi_shift = 0, +}; + +static struct cam_ife_csid_ver1_common_reg_info + cam_ife_csid_lite_480_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .ctrl_addr = 0x8, + .reset_addr = 0xc, + .rst_strobes_addr = 0x10, + .test_bus_ctrl_addr = 0x14, + .top_irq_status_addr = 0x70, + .top_irq_mask_addr = 0x74, + .top_irq_clear_addr = 0x78, + .top_irq_set_addr = 0x7c, + .irq_cmd_addr = 0x80, + /*configurations */ + .major_version = 4, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 0, + .num_ppp = 0, + .rst_sw_reg_stb = 1, + .rst_hw_reg_stb = 0x1e, + .rst_sw_hw_reg_stb = 0x1f, + .path_rst_stb_all = 0x7f, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .timestamp_strobe_val = 0x2, + .timestamp_stb_sel_shift_val = 0, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, +}; + +static struct cam_ife_csid_ver1_reg_info cam_ife_csid_lite_480_reg_info = { + .cmn_reg = &cam_ife_csid_lite_480_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_480_csi2_reg_info, + .ipp_reg = NULL, + .ppp_reg = NULL, + .rdi_reg = { + &cam_ife_csid_lite_480_rdi_0_reg_info, + &cam_ife_csid_lite_480_rdi_1_reg_info, + &cam_ife_csid_lite_480_rdi_2_reg_info, + &cam_ife_csid_lite_480_rdi_3_reg_info, + }, + .tpg_reg = &cam_ife_csid_lite_480_tpg_reg_info, +}; +#endif /*_CAM_IFE_CSID_LITE_480_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite680.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite680.h new file mode 100644 index 0000000000..f61244029b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite680.h @@ -0,0 +1,1083 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_680_H_ +#define _CAM_IFE_CSID_LITE_680_H_ + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +static uint32_t cam_ife_csid_lite_680_num_top_irq_desc[] = {0,}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_680_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + }, + {} +}; + +static const uint32_t cam_ife_csid_lite_680_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_680_rx_irq_desc[0]), +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_680_path_irq_desc[] = { + { + .bitmask = BIT(0), + .desc = "", + }, + { + .bitmask = BIT(1), + .desc = "", + }, + { + .bitmask = BIT(2), + .desc = "ERROR_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(3), + .desc = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .desc = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .desc = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .desc = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .desc = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .desc = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .desc = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .desc = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .desc = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .desc = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .desc = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .desc = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .desc = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .desc = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .desc = "OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .desc = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .desc = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .desc = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .desc = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .desc = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .desc = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .desc = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .desc = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .desc = "CCIF_VIOLATION: Bad frame timings", + }, +}; +static struct cam_irq_register_set cam_ife_csid_lite_680_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + {}, /* no RDI4 */ + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + {}, /* no PPP */ +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_680_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_set_bitmask = 0x00000010, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_680_rx_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], /* RX */ + .global_irq_cmd_offset = 0, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_680_path_irq_reg_info[6] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, + }, + {}, /* no RDI4 */ + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_680_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, + }, +}; + +static struct cam_irq_register_set cam_ife_csid_lite_680_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_lite_680_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_lite_680_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_lite_680_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .global_cmd_addr = 0x8, + .reset_cfg_addr = 0xc, + .reset_cmd_addr = 0x10, + .irq_cmd_addr = 0x14, + .rup_aup_cmd_addr = 0x18, + .offline_cmd_addr = 0x1C, + .shdr_master_slave_cfg_addr = 0x20, + .top_irq_status_addr = {0x7C,}, + .top_irq_mask_addr = {0x80,}, + .top_irq_clear_addr = {0x84,}, + .top_irq_set_addr = {0x88,}, + .buf_done_irq_status_addr = 0x8C, + .buf_done_irq_mask_addr = 0x90, + .buf_done_irq_clear_addr = 0x94, + .buf_done_irq_set_addr = 0x98, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 1, + .num_ppp = 0, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_supported = false, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 0, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .rst_loc_path_only_val = 0x0, + .rst_location_shift_val = 4, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .top_reset_irq_mask = {0x1,}, + .top_buf_done_irq_mask = 0x2000, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .phy_sel_base_idx = 1, + .camif_irq_support = true, + .epoch_factor = 50, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_lite_680_csi2_reg_info = { + .irq_status_addr = {0x9C,}, + .irq_mask_addr = {0xA0,}, + .irq_clear_addr = {0xA4,}, + .irq_set_addr = {0xA8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .capture_ctrl_addr = 0x208, + .rst_strobes_addr = 0x20C, + .cap_unmap_long_pkt_hdr_0_addr = 0x210, + .cap_unmap_long_pkt_hdr_1_addr = 0x214, + .captured_short_pkt_0_addr = 0x218, + .captured_short_pkt_1_addr = 0x21c, + .captured_long_pkt_0_addr = 0x220, + .captured_long_pkt_1_addr = 0x224, + .captured_long_pkt_ftr_addr = 0x228, + .captured_cphy_pkt_hdr_addr = 0x22c, + .lane0_misr_addr = 0x230, + .lane1_misr_addr = 0x234, + .lane2_misr_addr = 0x238, + .lane3_misr_addr = 0x23c, + .total_pkts_rcvd_addr = 0x240, + .stats_ecc_addr = 0x244, + .total_crc_err_addr = 0x248, + .de_scramble_type3_cfg0_addr = 0x24C, + .de_scramble_type3_cfg1_addr = 0x250, + .de_scramble_type2_cfg0_addr = 0x254, + .de_scramble_type2_cfg1_addr = 0x258, + .de_scramble_type1_cfg0_addr = 0x25C, + .de_scramble_type1_cfg1_addr = 0x260, + .de_scramble_type0_cfg0_addr = 0x264, + .de_scramble_type0_cfg1_addr = 0x268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .top_irq_mask = {0x4,}, + .fatal_err_mask = {0x19FA800,}, + .part_fatal_err_mask = {0x0001000,}, + .non_fatal_err_mask = {0x0200000,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_680_ipp_reg_info = { + .irq_status_addr = 0xAC, + .irq_mask_addr = 0xB0, + .irq_clear_addr = 0xB4, + .irq_set_addr = 0xB8, + .cfg0_addr = 0x300, + .ctrl_addr = 0x304, + .debug_clr_cmd_addr = 0x308, + .multi_vcdt_cfg0_addr = 0x30c, + .cfg1_addr = 0x310, + .err_recovery_cfg0_addr = 0x318, + .err_recovery_cfg1_addr = 0x31C, + .err_recovery_cfg2_addr = 0x320, + .camif_frame_cfg_addr = 0x330, + .epoch_irq_cfg_addr = 0x334, + .epoch0_subsample_ptrn_addr = 0x338, + .epoch1_subsample_ptrn_addr = 0x33C, + .debug_camif_1_addr = 0x340, + .debug_camif_0_addr = 0x344, + .debug_halt_status_addr = 0x348, + .debug_misr_val0_addr = 0x34C, + .debug_misr_val1_addr = 0x350, + .debug_misr_val2_addr = 0x354, + .debug_misr_val3_addr = 0x358, + .hcrop_addr = 0x35c, + .vcrop_addr = 0x360, + .pix_drop_pattern_addr = 0x364, + .pix_drop_period_addr = 0x368, + .line_drop_pattern_addr = 0x36C, + .line_drop_period_addr = 0x370, + .frm_drop_pattern_addr = 0x374, + .frm_drop_period_addr = 0x378, + .irq_subsample_pattern_addr = 0x37C, + .irq_subsample_period_addr = 0x380, + .format_measure_cfg0_addr = 0x384, + .format_measure_cfg1_addr = 0x388, + .format_measure0_addr = 0x38C, + .format_measure1_addr = 0x390, + .format_measure2_addr = 0x394, + .timestamp_curr0_sof_addr = 0x398, + .timestamp_curr1_sof_addr = 0x39C, + .timestamp_perv0_sof_addr = 0x3A0, + .timestamp_perv1_sof_addr = 0x3A4, + .timestamp_curr0_eof_addr = 0x3A8, + .timestamp_curr1_eof_addr = 0x3AC, + .timestamp_perv0_eof_addr = 0x3B0, + .timestamp_perv1_eof_addr = 0x3B4, + .batch_period_cfg_addr = 0x3C4, + .batch_stream_id_cfg_addr = 0x3C8, + .epoch0_cfg_batch_id0_addr = 0x3CC, + .epoch1_cfg_batch_id0_addr = 0x3D0, + .epoch0_cfg_batch_id1_addr = 0x3D4, + .epoch1_cfg_batch_id1_addr = 0x3D8, + .epoch0_cfg_batch_id2_addr = 0x3DC, + .epoch1_cfg_batch_id2_addr = 0x3E0, + .epoch0_cfg_batch_id3_addr = 0x3E4, + .epoch1_cfg_batch_id3_addr = 0x3E8, + .epoch0_cfg_batch_id4_addr = 0x3EC, + .epoch1_cfg_batch_id4_addr = 0x3F0, + .epoch0_cfg_batch_id5_addr = 0x3F4, + .epoch1_cfg_batch_id5_addr = 0x3F8, + + /* configurations */ + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .resume_frame_boundary = 1, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 9, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x10080000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_680_rdi_0_reg_info = { + .irq_status_addr = 0xEC, + .irq_mask_addr = 0xF0, + .irq_clear_addr = 0xF4, + .irq_set_addr = 0xF8, + .cfg0_addr = 0x500, + .ctrl_addr = 0x504, + .debug_clr_cmd_addr = 0x508, + .multi_vcdt_cfg0_addr = 0x50c, + .cfg1_addr = 0x510, + .err_recovery_cfg0_addr = 0x514, + .err_recovery_cfg1_addr = 0x518, + .err_recovery_cfg2_addr = 0x51C, + .debug_byte_cntr_ping_addr = 0x520, + .debug_byte_cntr_pong_addr = 0x524, + .camif_frame_cfg_addr = 0x528, + .epoch_irq_cfg_addr = 0x52C, + .epoch0_subsample_ptrn_addr = 0x530, + .epoch1_subsample_ptrn_addr = 0x534, + .debug_camif_1_addr = 0x538, + .debug_camif_0_addr = 0x53C, + .frm_drop_pattern_addr = 0x540, + .frm_drop_period_addr = 0x540, + .irq_subsample_pattern_addr = 0x548, + .irq_subsample_period_addr = 0x54C, + .hcrop_addr = 0x550, + .vcrop_addr = 0x554, + .pix_drop_pattern_addr = 0x558, + .pix_drop_period_addr = 0x55C, + .line_drop_pattern_addr = 0x560, + .line_drop_period_addr = 0x564, + .debug_halt_status_addr = 0x568, + .debug_misr_val0_addr = 0x570, + .debug_misr_val1_addr = 0x574, + .debug_misr_val2_addr = 0x578, + .debug_misr_val3_addr = 0x57C, + .format_measure_cfg0_addr = 0x580, + .format_measure_cfg1_addr = 0x584, + .format_measure0_addr = 0x588, + .format_measure1_addr = 0x58C, + .format_measure2_addr = 0x590, + .timestamp_curr0_sof_addr = 0x594, + .timestamp_curr1_sof_addr = 0x598, + .timestamp_perv0_sof_addr = 0x59C, + .timestamp_perv1_sof_addr = 0x5A0, + .timestamp_curr0_eof_addr = 0x5A4, + .timestamp_curr1_eof_addr = 0x5A8, + .timestamp_perv0_eof_addr = 0x5AC, + .timestamp_perv1_eof_addr = 0x5B0, + .batch_period_cfg_addr = 0x5BC, + .batch_stream_id_cfg_addr = 0x5C0, + .epoch0_cfg_batch_id0_addr = 0x5C4, + .epoch1_cfg_batch_id0_addr = 0x5C8, + .epoch0_cfg_batch_id1_addr = 0x5CC, + .epoch1_cfg_batch_id1_addr = 0x5D0, + .epoch0_cfg_batch_id2_addr = 0x5D4, + .epoch1_cfg_batch_id2_addr = 0x5D8, + .epoch0_cfg_batch_id3_addr = 0x5DC, + .epoch1_cfg_batch_id3_addr = 0x5E0, + .epoch0_cfg_batch_id4_addr = 0x5E4, + .epoch1_cfg_batch_id4_addr = 0x5E8, + .epoch0_cfg_batch_id5_addr = 0x5EC, + .epoch1_cfg_batch_id5_addr = 0x5F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x10080000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_680_rdi_1_reg_info = { + .irq_status_addr = 0xFC, + .irq_mask_addr = 0x100, + .irq_clear_addr = 0x104, + .irq_set_addr = 0x108, + .cfg0_addr = 0x600, + .ctrl_addr = 0x604, + .debug_clr_cmd_addr = 0x608, + .multi_vcdt_cfg0_addr = 0x60c, + .cfg1_addr = 0x610, + .err_recovery_cfg0_addr = 0x614, + .err_recovery_cfg1_addr = 0x618, + .err_recovery_cfg2_addr = 0x61C, + .debug_byte_cntr_ping_addr = 0x620, + .debug_byte_cntr_pong_addr = 0x624, + .camif_frame_cfg_addr = 0x628, + .epoch_irq_cfg_addr = 0x62C, + .epoch0_subsample_ptrn_addr = 0x630, + .epoch1_subsample_ptrn_addr = 0x634, + .debug_camif_1_addr = 0x638, + .debug_camif_0_addr = 0x63C, + .frm_drop_pattern_addr = 0x640, + .frm_drop_period_addr = 0x644, + .irq_subsample_pattern_addr = 0x648, + .irq_subsample_period_addr = 0x64C, + .hcrop_addr = 0x650, + .vcrop_addr = 0x654, + .pix_drop_pattern_addr = 0x658, + .pix_drop_period_addr = 0x65C, + .line_drop_pattern_addr = 0x660, + .line_drop_period_addr = 0x664, + .debug_halt_status_addr = 0x66C, + .debug_misr_val0_addr = 0x670, + .debug_misr_val1_addr = 0x674, + .debug_misr_val2_addr = 0x678, + .debug_misr_val3_addr = 0x67C, + .format_measure_cfg0_addr = 0x680, + .format_measure_cfg1_addr = 0x684, + .format_measure0_addr = 0x688, + .format_measure1_addr = 0x68C, + .format_measure2_addr = 0x690, + .timestamp_curr0_sof_addr = 0x694, + .timestamp_curr1_sof_addr = 0x698, + .timestamp_perv0_sof_addr = 0x69C, + .timestamp_perv1_sof_addr = 0x6A0, + .timestamp_curr0_eof_addr = 0x6A4, + .timestamp_curr1_eof_addr = 0x6A8, + .timestamp_perv0_eof_addr = 0x6AC, + .timestamp_perv1_eof_addr = 0x6B0, + .batch_period_cfg_addr = 0x6BC, + .batch_stream_id_cfg_addr = 0x6C0, + .epoch0_cfg_batch_id0_addr = 0x6C4, + .epoch1_cfg_batch_id0_addr = 0x6C8, + .epoch0_cfg_batch_id1_addr = 0x6CC, + .epoch1_cfg_batch_id1_addr = 0x6D0, + .epoch0_cfg_batch_id2_addr = 0x6D4, + .epoch1_cfg_batch_id2_addr = 0x6D8, + .epoch0_cfg_batch_id3_addr = 0x6DC, + .epoch1_cfg_batch_id3_addr = 0x6E0, + .epoch0_cfg_batch_id4_addr = 0x6E4, + .epoch1_cfg_batch_id4_addr = 0x6E8, + .epoch0_cfg_batch_id5_addr = 0x6EC, + .epoch1_cfg_batch_id5_addr = 0x6F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x10080000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_680_rdi_2_reg_info = { + .irq_status_addr = 0x10C, + .irq_mask_addr = 0x110, + .irq_clear_addr = 0x114, + .irq_set_addr = 0x118, + .cfg0_addr = 0x700, + .ctrl_addr = 0x704, + .debug_clr_cmd_addr = 0x708, + .multi_vcdt_cfg0_addr = 0x70c, + .cfg1_addr = 0x710, + .err_recovery_cfg0_addr = 0x714, + .err_recovery_cfg1_addr = 0x718, + .err_recovery_cfg2_addr = 0x71C, + .debug_byte_cntr_ping_addr = 0x720, + .debug_byte_cntr_pong_addr = 0x724, + .camif_frame_cfg_addr = 0x728, + .epoch_irq_cfg_addr = 0x72C, + .epoch0_subsample_ptrn_addr = 0x730, + .epoch1_subsample_ptrn_addr = 0x734, + .debug_camif_1_addr = 0x738, + .debug_camif_0_addr = 0x73C, + .frm_drop_pattern_addr = 0x740, + .frm_drop_period_addr = 0x744, + .irq_subsample_pattern_addr = 0x748, + .irq_subsample_period_addr = 0x74C, + .hcrop_addr = 0x750, + .vcrop_addr = 0x754, + .pix_drop_pattern_addr = 0x758, + .pix_drop_period_addr = 0x75C, + .line_drop_pattern_addr = 0x760, + .line_drop_period_addr = 0x764, + .debug_halt_status_addr = 0x76C, + .debug_misr_val0_addr = 0x770, + .debug_misr_val1_addr = 0x774, + .debug_misr_val2_addr = 0x778, + .debug_misr_val3_addr = 0x77C, + .format_measure_cfg0_addr = 0x780, + .format_measure_cfg1_addr = 0x784, + .format_measure0_addr = 0x788, + .format_measure1_addr = 0x78C, + .format_measure2_addr = 0x790, + .timestamp_curr0_sof_addr = 0x794, + .timestamp_curr1_sof_addr = 0x798, + .timestamp_perv0_sof_addr = 0x79C, + .timestamp_perv1_sof_addr = 0x7A0, + .timestamp_curr0_eof_addr = 0x7A4, + .timestamp_curr1_eof_addr = 0x7A8, + .timestamp_perv0_eof_addr = 0x7AC, + .timestamp_perv1_eof_addr = 0x7B0, + .batch_period_cfg_addr = 0x7BC, + .batch_stream_id_cfg_addr = 0x7C0, + .epoch0_cfg_batch_id0_addr = 0x7C4, + .epoch1_cfg_batch_id0_addr = 0x7C8, + .epoch0_cfg_batch_id1_addr = 0x7CC, + .epoch1_cfg_batch_id1_addr = 0x7D0, + .epoch0_cfg_batch_id2_addr = 0x7D4, + .epoch1_cfg_batch_id2_addr = 0x7D8, + .epoch0_cfg_batch_id3_addr = 0x7DC, + .epoch1_cfg_batch_id3_addr = 0x7E0, + .epoch0_cfg_batch_id4_addr = 0x7E4, + .epoch1_cfg_batch_id4_addr = 0x7E8, + .epoch0_cfg_batch_id5_addr = 0x7EC, + .epoch1_cfg_batch_id5_addr = 0x7F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x10080000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_680_rdi_3_reg_info = { + .irq_status_addr = 0x11C, + .irq_mask_addr = 0x120, + .irq_clear_addr = 0x124, + .irq_set_addr = 0x128, + .cfg0_addr = 0x800, + .ctrl_addr = 0x804, + .debug_clr_cmd_addr = 0x808, + .multi_vcdt_cfg0_addr = 0x80c, + .cfg1_addr = 0x810, + .err_recovery_cfg0_addr = 0x814, + .err_recovery_cfg1_addr = 0x818, + .err_recovery_cfg2_addr = 0x81C, + .debug_byte_cntr_ping_addr = 0x820, + .debug_byte_cntr_pong_addr = 0x824, + .camif_frame_cfg_addr = 0x828, + .epoch_irq_cfg_addr = 0x82C, + .epoch0_subsample_ptrn_addr = 0x830, + .epoch1_subsample_ptrn_addr = 0x834, + .debug_camif_1_addr = 0x838, + .debug_camif_0_addr = 0x83C, + .frm_drop_pattern_addr = 0x840, + .frm_drop_period_addr = 0x840, + .irq_subsample_pattern_addr = 0x848, + .irq_subsample_period_addr = 0x84C, + .hcrop_addr = 0x850, + .vcrop_addr = 0x854, + .pix_drop_pattern_addr = 0x858, + .pix_drop_period_addr = 0x85C, + .line_drop_pattern_addr = 0x860, + .line_drop_period_addr = 0x864, + .debug_halt_status_addr = 0x868, + .debug_misr_val0_addr = 0x870, + .debug_misr_val1_addr = 0x874, + .debug_misr_val2_addr = 0x878, + .debug_misr_val3_addr = 0x87C, + .format_measure_cfg0_addr = 0x880, + .format_measure_cfg1_addr = 0x884, + .format_measure0_addr = 0x888, + .format_measure1_addr = 0x88C, + .format_measure2_addr = 0x890, + .timestamp_curr0_sof_addr = 0x894, + .timestamp_curr1_sof_addr = 0x898, + .timestamp_perv0_sof_addr = 0x89C, + .timestamp_perv1_sof_addr = 0x8A0, + .timestamp_curr0_eof_addr = 0x8A4, + .timestamp_curr1_eof_addr = 0x8A8, + .timestamp_perv0_eof_addr = 0x8AC, + .timestamp_perv1_eof_addr = 0x8B0, + .batch_period_cfg_addr = 0x8BC, + .batch_stream_id_cfg_addr = 0x8C0, + .epoch0_cfg_batch_id0_addr = 0x8C4, + .epoch1_cfg_batch_id0_addr = 0x8C8, + .epoch0_cfg_batch_id1_addr = 0x8CC, + .epoch1_cfg_batch_id1_addr = 0x8D0, + .epoch0_cfg_batch_id2_addr = 0x8D4, + .epoch1_cfg_batch_id2_addr = 0x8D8, + .epoch0_cfg_batch_id3_addr = 0x8DC, + .epoch1_cfg_batch_id3_addr = 0x8E0, + .epoch0_cfg_batch_id4_addr = 0x8E4, + .epoch1_cfg_batch_id4_addr = 0x8E8, + .epoch0_cfg_batch_id5_addr = 0x8EC, + .epoch1_cfg_batch_id5_addr = 0x8F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 4, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x4, + .non_fatal_err_mask = 0x10080000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, + .epoch1_shift_val = 0, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_lite_680_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_lite_680_top_debug_mask = { + + .evt_bitmap = {0ULL,}, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_680_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_680_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_680_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_680_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_680_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_680_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_680_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_680_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_680_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_680_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_680_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_680_rdi_3_reg_info, + .need_top_cfg = 0, + .rx_irq_desc = &cam_ife_csid_lite_680_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_680_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_680_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_680_num_rx_irq_desc, + .rx_debug_mask = &cam_ife_csid_lite_680_rx_debug_mask, + .top_debug_mask = &cam_ife_csid_lite_680_top_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; +#endif /* _CAM_IFE_CSID_LITE_680_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite780.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite780.h new file mode 100644 index 0000000000..5ea3236cca --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite780.h @@ -0,0 +1,1140 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_780_H_ +#define _CAM_IFE_CSID_LITE_780_H_ + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_780_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + }, +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_780_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .desc = "ILLEGAL_PROGRAMMING", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + { + .bitmask = BIT(1), + .desc = "EROOR_MSG_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(2), + .desc = "ERROR_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(3), + .desc = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .desc = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .desc = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .desc = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .desc = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .desc = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .desc = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .desc = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .desc = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .desc = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .desc = "ERROR_LINE_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .desc = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .desc = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .desc = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .desc = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .desc = "OVERFLOW_RECOVERY: Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .desc = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .desc = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .desc = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .desc = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .desc = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .desc = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .desc = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .desc = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .desc = "CCIF_VIOLATION: Bad frame timings", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_lite_780_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = "Fatal Error duirng dynamically switching between 2 sensors", + }, + { + .bitmask = BIT(18), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_NO_VOTE_DN", + .desc = "vote_up is asserted before IDLE is encountered in a frame", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "vote_up is asserted at the same time as an SOF", + }, + { + .bitmask = BIT(20), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = "Two or more RDIs programmed to access the shared line buffer", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + }, +}; + +static const uint32_t cam_ife_csid_lite_780_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_780_top_irq_desc[0]), +}; +static const uint32_t cam_ife_csid_lite_780_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_780_rx_irq_desc[0]), +}; + +static struct cam_irq_register_set cam_ife_csid_lite_780_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + {}, /* no RDI4 */ + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + {}, /* no PPP */ +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_780_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_set_bitmask = 0x00000010, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_780_rx_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], /* RX */ + .global_irq_cmd_offset = 0, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_780_path_irq_reg_info[6] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, + }, + {}, /* no RDI4 */ + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_780_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, + }, +}; + + +static struct cam_irq_register_set cam_ife_csid_lite_780_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_lite_780_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_lite_780_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_lite_780_cmn_reg_info = { + .hw_version_addr = 0x0, + .cfg0_addr = 0x4, + .global_cmd_addr = 0x8, + .reset_cfg_addr = 0xc, + .reset_cmd_addr = 0x10, + .irq_cmd_addr = 0x14, + .rup_aup_cmd_addr = 0x18, + .offline_cmd_addr = 0x1C, + .shdr_master_slave_cfg_addr = 0x20, + .top_irq_status_addr = {0x7C,}, + .top_irq_mask_addr = {0x80,}, + .top_irq_clear_addr = {0x84,}, + .top_irq_set_addr = {0x88,}, + .buf_done_irq_status_addr = 0x8C, + .buf_done_irq_mask_addr = 0x90, + .buf_done_irq_clear_addr = 0x94, + .buf_done_irq_set_addr = 0x98, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 1, + .num_ppp = 0, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format_mask = 0xF, + .decode_format1_supported = true, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .early_eof_supported = 1, + .vfr_supported = 1, + .mup_shift_val = 28, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .rst_loc_path_only_val = 0x0, + .rst_location_shift_val = 4, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_reset_irq_mask = { + 0x1, + }, + .top_buf_done_irq_mask = 0x2000, + .decode_format_payload_only = 0xF, + .phy_sel_base_idx = 1, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .epoch_factor = 50, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_lite_780_csi2_reg_info = { + .irq_status_addr = {0x9C,}, + .irq_mask_addr = {0xA0,}, + .irq_clear_addr = {0xA4,}, + .irq_set_addr = {0xA8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x200, + .cfg1_addr = 0x204, + .capture_ctrl_addr = 0x208, + .rst_strobes_addr = 0x20C, + .cap_unmap_long_pkt_hdr_0_addr = 0x210, + .cap_unmap_long_pkt_hdr_1_addr = 0x214, + .captured_short_pkt_0_addr = 0x218, + .captured_short_pkt_1_addr = 0x21c, + .captured_long_pkt_0_addr = 0x220, + .captured_long_pkt_1_addr = 0x224, + .captured_long_pkt_ftr_addr = 0x228, + .captured_cphy_pkt_hdr_addr = 0x22c, + .lane0_misr_addr = 0x230, + .lane1_misr_addr = 0x234, + .lane2_misr_addr = 0x238, + .lane3_misr_addr = 0x23c, + .total_pkts_rcvd_addr = 0x240, + .stats_ecc_addr = 0x244, + .total_crc_err_addr = 0x248, + .de_scramble_type3_cfg0_addr = 0x24C, + .de_scramble_type3_cfg1_addr = 0x250, + .de_scramble_type2_cfg0_addr = 0x254, + .de_scramble_type2_cfg1_addr = 0x258, + .de_scramble_type1_cfg0_addr = 0x25C, + .de_scramble_type1_cfg1_addr = 0x260, + .de_scramble_type0_cfg0_addr = 0x264, + .de_scramble_type0_cfg1_addr = 0x268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 11, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .top_irq_mask = {0x4,}, + .fatal_err_mask = {0x19FA800,}, + .part_fatal_err_mask = {0x0001000,}, + .non_fatal_err_mask = {0x0200000,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_780_ipp_reg_info = { + .irq_status_addr = 0xAC, + .irq_mask_addr = 0xB0, + .irq_clear_addr = 0xB4, + .irq_set_addr = 0xB8, + .cfg0_addr = 0x300, + .ctrl_addr = 0x304, + .debug_clr_cmd_addr = 0x308, + .multi_vcdt_cfg0_addr = 0x30c, + .cfg1_addr = 0x310, + .err_recovery_cfg0_addr = 0x318, + .err_recovery_cfg1_addr = 0x31C, + .err_recovery_cfg2_addr = 0x320, + .camif_frame_cfg_addr = 0x330, + .epoch_irq_cfg_addr = 0x334, + .epoch0_subsample_ptrn_addr = 0x338, + .epoch1_subsample_ptrn_addr = 0x33C, + .debug_camif_1_addr = 0x340, + .debug_camif_0_addr = 0x344, + .debug_halt_status_addr = 0x348, + .debug_misr_val0_addr = 0x34C, + .debug_misr_val1_addr = 0x350, + .debug_misr_val2_addr = 0x354, + .debug_misr_val3_addr = 0x358, + .hcrop_addr = 0x35c, + .vcrop_addr = 0x360, + .pix_drop_pattern_addr = 0x364, + .pix_drop_period_addr = 0x368, + .line_drop_pattern_addr = 0x36C, + .line_drop_period_addr = 0x370, + .frm_drop_pattern_addr = 0x374, + .frm_drop_period_addr = 0x378, + .irq_subsample_pattern_addr = 0x37C, + .irq_subsample_period_addr = 0x380, + .format_measure_cfg0_addr = 0x384, + .format_measure_cfg1_addr = 0x388, + .format_measure0_addr = 0x38C, + .format_measure1_addr = 0x390, + .format_measure2_addr = 0x394, + .timestamp_curr0_sof_addr = 0x398, + .timestamp_curr1_sof_addr = 0x39C, + .timestamp_perv0_sof_addr = 0x3A0, + .timestamp_perv1_sof_addr = 0x3A4, + .timestamp_curr0_eof_addr = 0x3A8, + .timestamp_curr1_eof_addr = 0x3AC, + .timestamp_perv0_eof_addr = 0x3B0, + .timestamp_perv1_eof_addr = 0x3B4, + .batch_period_cfg_addr = 0x3C4, + .batch_stream_id_cfg_addr = 0x3C8, + .epoch0_cfg_batch_id0_addr = 0x3CC, + .epoch1_cfg_batch_id0_addr = 0x3D0, + .epoch0_cfg_batch_id1_addr = 0x3D4, + .epoch1_cfg_batch_id1_addr = 0x3D8, + .epoch0_cfg_batch_id2_addr = 0x3DC, + .epoch1_cfg_batch_id2_addr = 0x3E0, + .epoch0_cfg_batch_id3_addr = 0x3E4, + .epoch1_cfg_batch_id3_addr = 0x3E8, + .epoch0_cfg_batch_id4_addr = 0x3EC, + .epoch1_cfg_batch_id4_addr = 0x3F0, + .epoch0_cfg_batch_id5_addr = 0x3F4, + .epoch1_cfg_batch_id5_addr = 0x3F8, + + /* configurations */ + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .resume_frame_boundary = 1, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000006, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_780_rdi_0_reg_info = { + .irq_status_addr = 0xEC, + .irq_mask_addr = 0xF0, + .irq_clear_addr = 0xF4, + .irq_set_addr = 0xF8, + .cfg0_addr = 0x500, + .ctrl_addr = 0x504, + .debug_clr_cmd_addr = 0x508, + .multi_vcdt_cfg0_addr = 0x50c, + .cfg1_addr = 0x510, + .err_recovery_cfg0_addr = 0x514, + .err_recovery_cfg1_addr = 0x518, + .err_recovery_cfg2_addr = 0x51C, + .debug_byte_cntr_ping_addr = 0x520, + .debug_byte_cntr_pong_addr = 0x524, + .camif_frame_cfg_addr = 0x528, + .epoch_irq_cfg_addr = 0x52C, + .epoch0_subsample_ptrn_addr = 0x530, + .epoch1_subsample_ptrn_addr = 0x534, + .debug_camif_1_addr = 0x538, + .debug_camif_0_addr = 0x53C, + .frm_drop_pattern_addr = 0x540, + .frm_drop_period_addr = 0x540, + .irq_subsample_pattern_addr = 0x548, + .irq_subsample_period_addr = 0x54C, + .hcrop_addr = 0x550, + .vcrop_addr = 0x554, + .pix_drop_pattern_addr = 0x558, + .pix_drop_period_addr = 0x55C, + .line_drop_pattern_addr = 0x560, + .line_drop_period_addr = 0x564, + .debug_halt_status_addr = 0x568, + .debug_misr_val0_addr = 0x570, + .debug_misr_val1_addr = 0x574, + .debug_misr_val2_addr = 0x578, + .debug_misr_val3_addr = 0x57C, + .format_measure_cfg0_addr = 0x580, + .format_measure_cfg1_addr = 0x584, + .format_measure0_addr = 0x588, + .format_measure1_addr = 0x58C, + .format_measure2_addr = 0x590, + .timestamp_curr0_sof_addr = 0x594, + .timestamp_curr1_sof_addr = 0x598, + .timestamp_perv0_sof_addr = 0x59C, + .timestamp_perv1_sof_addr = 0x5A0, + .timestamp_curr0_eof_addr = 0x5A4, + .timestamp_curr1_eof_addr = 0x5A8, + .timestamp_perv0_eof_addr = 0x5AC, + .timestamp_perv1_eof_addr = 0x5B0, + .batch_period_cfg_addr = 0x5BC, + .batch_stream_id_cfg_addr = 0x5C0, + .epoch0_cfg_batch_id0_addr = 0x5C4, + .epoch1_cfg_batch_id0_addr = 0x5C8, + .epoch0_cfg_batch_id1_addr = 0x5CC, + .epoch1_cfg_batch_id1_addr = 0x5D0, + .epoch0_cfg_batch_id2_addr = 0x5D4, + .epoch1_cfg_batch_id2_addr = 0x5D8, + .epoch0_cfg_batch_id3_addr = 0x5DC, + .epoch1_cfg_batch_id3_addr = 0x5E0, + .epoch0_cfg_batch_id4_addr = 0x5E4, + .epoch1_cfg_batch_id4_addr = 0x5E8, + .epoch0_cfg_batch_id5_addr = 0x5EC, + .epoch1_cfg_batch_id5_addr = 0x5F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .sof_retiming_dis_shift = 5, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_780_rdi_1_reg_info = { + .irq_status_addr = 0xFC, + .irq_mask_addr = 0x100, + .irq_clear_addr = 0x104, + .irq_set_addr = 0x108, + .cfg0_addr = 0x600, + .ctrl_addr = 0x604, + .debug_clr_cmd_addr = 0x608, + .multi_vcdt_cfg0_addr = 0x60c, + .cfg1_addr = 0x610, + .err_recovery_cfg0_addr = 0x614, + .err_recovery_cfg1_addr = 0x618, + .err_recovery_cfg2_addr = 0x61C, + .debug_byte_cntr_ping_addr = 0x620, + .debug_byte_cntr_pong_addr = 0x624, + .camif_frame_cfg_addr = 0x628, + .epoch_irq_cfg_addr = 0x62C, + .epoch0_subsample_ptrn_addr = 0x630, + .epoch1_subsample_ptrn_addr = 0x634, + .debug_camif_1_addr = 0x638, + .debug_camif_0_addr = 0x63C, + .frm_drop_pattern_addr = 0x640, + .frm_drop_period_addr = 0x644, + .irq_subsample_pattern_addr = 0x648, + .irq_subsample_period_addr = 0x64C, + .hcrop_addr = 0x650, + .vcrop_addr = 0x654, + .pix_drop_pattern_addr = 0x658, + .pix_drop_period_addr = 0x65C, + .line_drop_pattern_addr = 0x660, + .line_drop_period_addr = 0x664, + .debug_halt_status_addr = 0x66C, + .debug_misr_val0_addr = 0x670, + .debug_misr_val1_addr = 0x674, + .debug_misr_val2_addr = 0x678, + .debug_misr_val3_addr = 0x67C, + .format_measure_cfg0_addr = 0x680, + .format_measure_cfg1_addr = 0x684, + .format_measure0_addr = 0x688, + .format_measure1_addr = 0x68C, + .format_measure2_addr = 0x690, + .timestamp_curr0_sof_addr = 0x694, + .timestamp_curr1_sof_addr = 0x698, + .timestamp_perv0_sof_addr = 0x69C, + .timestamp_perv1_sof_addr = 0x6A0, + .timestamp_curr0_eof_addr = 0x6A4, + .timestamp_curr1_eof_addr = 0x6A8, + .timestamp_perv0_eof_addr = 0x6AC, + .timestamp_perv1_eof_addr = 0x6B0, + .batch_period_cfg_addr = 0x6BC, + .batch_stream_id_cfg_addr = 0x6C0, + .epoch0_cfg_batch_id0_addr = 0x6C4, + .epoch1_cfg_batch_id0_addr = 0x6C8, + .epoch0_cfg_batch_id1_addr = 0x6CC, + .epoch1_cfg_batch_id1_addr = 0x6D0, + .epoch0_cfg_batch_id2_addr = 0x6D4, + .epoch1_cfg_batch_id2_addr = 0x6D8, + .epoch0_cfg_batch_id3_addr = 0x6DC, + .epoch1_cfg_batch_id3_addr = 0x6E0, + .epoch0_cfg_batch_id4_addr = 0x6E4, + .epoch1_cfg_batch_id4_addr = 0x6E8, + .epoch0_cfg_batch_id5_addr = 0x6EC, + .epoch1_cfg_batch_id5_addr = 0x6F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_780_rdi_2_reg_info = { + .irq_status_addr = 0x10C, + .irq_mask_addr = 0x110, + .irq_clear_addr = 0x114, + .irq_set_addr = 0x118, + .cfg0_addr = 0x700, + .ctrl_addr = 0x704, + .debug_clr_cmd_addr = 0x708, + .multi_vcdt_cfg0_addr = 0x70c, + .cfg1_addr = 0x710, + .err_recovery_cfg0_addr = 0x714, + .err_recovery_cfg1_addr = 0x718, + .err_recovery_cfg2_addr = 0x71C, + .debug_byte_cntr_ping_addr = 0x720, + .debug_byte_cntr_pong_addr = 0x724, + .camif_frame_cfg_addr = 0x728, + .epoch_irq_cfg_addr = 0x72C, + .epoch0_subsample_ptrn_addr = 0x730, + .epoch1_subsample_ptrn_addr = 0x734, + .debug_camif_1_addr = 0x738, + .debug_camif_0_addr = 0x73C, + .frm_drop_pattern_addr = 0x740, + .frm_drop_period_addr = 0x744, + .irq_subsample_pattern_addr = 0x748, + .irq_subsample_period_addr = 0x74C, + .hcrop_addr = 0x750, + .vcrop_addr = 0x754, + .pix_drop_pattern_addr = 0x758, + .pix_drop_period_addr = 0x75C, + .line_drop_pattern_addr = 0x760, + .line_drop_period_addr = 0x764, + .debug_halt_status_addr = 0x76C, + .debug_misr_val0_addr = 0x770, + .debug_misr_val1_addr = 0x774, + .debug_misr_val2_addr = 0x778, + .debug_misr_val3_addr = 0x77C, + .format_measure_cfg0_addr = 0x780, + .format_measure_cfg1_addr = 0x784, + .format_measure0_addr = 0x788, + .format_measure1_addr = 0x78C, + .format_measure2_addr = 0x790, + .timestamp_curr0_sof_addr = 0x794, + .timestamp_curr1_sof_addr = 0x798, + .timestamp_perv0_sof_addr = 0x79C, + .timestamp_perv1_sof_addr = 0x7A0, + .timestamp_curr0_eof_addr = 0x7A4, + .timestamp_curr1_eof_addr = 0x7A8, + .timestamp_perv0_eof_addr = 0x7AC, + .timestamp_perv1_eof_addr = 0x7B0, + .batch_period_cfg_addr = 0x7BC, + .batch_stream_id_cfg_addr = 0x7C0, + .epoch0_cfg_batch_id0_addr = 0x7C4, + .epoch1_cfg_batch_id0_addr = 0x7C8, + .epoch0_cfg_batch_id1_addr = 0x7CC, + .epoch1_cfg_batch_id1_addr = 0x7D0, + .epoch0_cfg_batch_id2_addr = 0x7D4, + .epoch1_cfg_batch_id2_addr = 0x7D8, + .epoch0_cfg_batch_id3_addr = 0x7DC, + .epoch1_cfg_batch_id3_addr = 0x7E0, + .epoch0_cfg_batch_id4_addr = 0x7E4, + .epoch1_cfg_batch_id4_addr = 0x7E8, + .epoch0_cfg_batch_id5_addr = 0x7EC, + .epoch1_cfg_batch_id5_addr = 0x7F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_780_rdi_3_reg_info = { + .irq_status_addr = 0x11C, + .irq_mask_addr = 0x120, + .irq_clear_addr = 0x124, + .irq_set_addr = 0x128, + .cfg0_addr = 0x800, + .ctrl_addr = 0x804, + .debug_clr_cmd_addr = 0x808, + .multi_vcdt_cfg0_addr = 0x80c, + .cfg1_addr = 0x810, + .err_recovery_cfg0_addr = 0x814, + .err_recovery_cfg1_addr = 0x818, + .err_recovery_cfg2_addr = 0x81C, + .debug_byte_cntr_ping_addr = 0x820, + .debug_byte_cntr_pong_addr = 0x824, + .camif_frame_cfg_addr = 0x828, + .epoch_irq_cfg_addr = 0x82C, + .epoch0_subsample_ptrn_addr = 0x830, + .epoch1_subsample_ptrn_addr = 0x834, + .debug_camif_1_addr = 0x838, + .debug_camif_0_addr = 0x83C, + .frm_drop_pattern_addr = 0x840, + .frm_drop_period_addr = 0x840, + .irq_subsample_pattern_addr = 0x848, + .irq_subsample_period_addr = 0x84C, + .hcrop_addr = 0x850, + .vcrop_addr = 0x854, + .pix_drop_pattern_addr = 0x858, + .pix_drop_period_addr = 0x85C, + .line_drop_pattern_addr = 0x860, + .line_drop_period_addr = 0x864, + .debug_halt_status_addr = 0x868, + .debug_misr_val0_addr = 0x870, + .debug_misr_val1_addr = 0x874, + .debug_misr_val2_addr = 0x878, + .debug_misr_val3_addr = 0x87C, + .format_measure_cfg0_addr = 0x880, + .format_measure_cfg1_addr = 0x884, + .format_measure0_addr = 0x888, + .format_measure1_addr = 0x88C, + .format_measure2_addr = 0x890, + .timestamp_curr0_sof_addr = 0x894, + .timestamp_curr1_sof_addr = 0x898, + .timestamp_perv0_sof_addr = 0x89C, + .timestamp_perv1_sof_addr = 0x8A0, + .timestamp_curr0_eof_addr = 0x8A4, + .timestamp_curr1_eof_addr = 0x8A8, + .timestamp_perv0_eof_addr = 0x8AC, + .timestamp_perv1_eof_addr = 0x8B0, + .batch_period_cfg_addr = 0x8BC, + .batch_stream_id_cfg_addr = 0x8C0, + .epoch0_cfg_batch_id0_addr = 0x8C4, + .epoch1_cfg_batch_id0_addr = 0x8C8, + .epoch0_cfg_batch_id1_addr = 0x8CC, + .epoch1_cfg_batch_id1_addr = 0x8D0, + .epoch0_cfg_batch_id2_addr = 0x8D4, + .epoch1_cfg_batch_id2_addr = 0x8D8, + .epoch0_cfg_batch_id3_addr = 0x8DC, + .epoch1_cfg_batch_id3_addr = 0x8E0, + .epoch0_cfg_batch_id4_addr = 0x8E4, + .epoch1_cfg_batch_id4_addr = 0x8E8, + .epoch0_cfg_batch_id5_addr = 0x8EC, + .epoch1_cfg_batch_id5_addr = 0x8F0, + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000004, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_lite_780_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_lite_780_top_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 16, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 17, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 18, +}; + + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_780_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_780_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_780_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_780_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_780_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_780_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_780_csi2_reg_info, + .buf_done_irq_reg_info = &cam_ife_csid_lite_780_buf_done_irq_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_780_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_780_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_780_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_780_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_780_rdi_3_reg_info, + .need_top_cfg = 0, + .top_irq_desc = &cam_ife_csid_lite_780_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_780_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_780_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_780_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_780_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_780_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_780_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_780_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; +#endif /* _CAM_IFE_CSID_LITE_780_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite880.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite880.h new file mode 100644 index 0000000000..ef28cc1d51 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite880.h @@ -0,0 +1,1150 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_880_H_ +#define _CAM_IFE_CSID_LITE_880_H_ + +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_irq_controller.h" +#include "cam_isp_hw_mgr_intf.h" + +/* Offsets might not match due to csid secure regs at beginning of reg space */ + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_880_rx_irq_desc[][32] = { + { + { + .bitmask = BIT(0), + .desc = "DL0_EOT", + }, + { + .bitmask = BIT(1), + .desc = "DL1_EOT", + }, + { + .bitmask = BIT(2), + .desc = "DL2_EOT", + }, + { + .bitmask = BIT(3), + .desc = "DL3_EOT", + }, + { + .bitmask = BIT(4), + .desc = "DL0_SOT", + }, + { + .bitmask = BIT(5), + .desc = "DL1_SOT", + }, + { + .bitmask = BIT(6), + .desc = "DL2_SOT", + }, + { + .bitmask = BIT(7), + .desc = "DL3_SOT", + }, + { + .bitmask = BIT(8), + .desc = "LONG_PKT", + }, + { + .bitmask = BIT(9), + .desc = "SHORT_PKT", + }, + { + .bitmask = BIT(10), + .desc = "CPHY_PKT_HDR", + }, + { + .bitmask = BIT(11), + .desc = "ERROR_CPHY_EOT_RECEPTION", + }, + { + .bitmask = BIT(12), + .desc = "ERROR_CPHY_SOT_RECEPTION", + }, + { + .bitmask = BIT(13), + .desc = "ERROR_CPHY_PH_CRC", + }, + { + .bitmask = BIT(14), + .desc = "WARNING_ECC", + }, + { + .bitmask = BIT(15), + .desc = "ERROR_LANE0_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(16), + .desc = "ERROR_LANE1_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(17), + .desc = "ERROR_LANE2_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(18), + .desc = "ERROR_LANE3_FIFO_OVERFLOW", + }, + { + .bitmask = BIT(19), + .desc = "ERROR_CRC", + }, + { + .bitmask = BIT(20), + .desc = "ERROR_ECC", + }, + { + .bitmask = BIT(21), + .desc = "ERROR_MMAPPED_VC_DT", + }, + { + .bitmask = BIT(22), + .desc = "ERROR_UNMAPPED_VC_DT", + }, + { + .bitmask = BIT(23), + .desc = "ERROR_STREAM_UNDERFLOW", + }, + { + .bitmask = BIT(24), + .desc = "ERROR_UNBOUNDED_FRAME", + }, + { + .bitmask = BIT(25), + .desc = "ERROR_ILLEGAL_PROGRAMMING_IRQ", + }, + { + .bitmask = BIT(26), + .desc = "INFO_SENSOR_MODE_ID_CHANGE_IRQ", + }, + }, +}; + +static const struct cam_ife_csid_irq_desc cam_ife_csid_lite_880_path_irq_desc[] = { + { + .bitmask = BIT(0), + .err_type = CAM_ISP_HW_ERROR_CSID_FATAL, + .irq_name = "ILLEGAL_PROGRAMMING", + .desc = "SW: Illegal programming sequence", + .debug = "Check the following possiblities:", + .err_handler = cam_ife_csid_ver2_print_illegal_programming_irq_status, + }, + {0}, + { + .bitmask = BIT(2), + .irq_name = "INFO_DATA_FIFO_FULL", + }, + { + .bitmask = BIT(3), + .irq_name = "CAMIF_EOF", + }, + { + .bitmask = BIT(4), + .irq_name = "CAMIF_SOF", + }, + { + .bitmask = BIT(5), + .irq_name = "FRAME_DROP_EOF", + }, + { + .bitmask = BIT(6), + .irq_name = "FRAME_DROP_EOL", + }, + { + .bitmask = BIT(7), + .irq_name = "FRAME_DROP_SOL", + }, + { + .bitmask = BIT(8), + .irq_name = "FRAME_DROP_SOF", + }, + { + .bitmask = BIT(9), + .irq_name = "INFO_INPUT_EOF", + }, + { + .bitmask = BIT(10), + .irq_name = "INFO_INPUT_EOL", + }, + { + .bitmask = BIT(11), + .irq_name = "INFO_INPUT_SOL", + }, + { + .bitmask = BIT(12), + .irq_name = "INFO_INPUT_SOF", + }, + { + .bitmask = BIT(13), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_PIX_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(14), + .err_type = CAM_ISP_HW_ERROR_CSID_FRAME_SIZE, + .irq_name = "ERROR_LINE_COUNT", + .err_handler = cam_ife_csid_ver2_print_format_measure_info, + }, + { + .bitmask = BIT(15), + .irq_name = "VCDT_GRP0_SEL", + }, + { + .bitmask = BIT(16), + .irq_name = "VCDT_GRP1_SEL", + }, + { + .bitmask = BIT(17), + .irq_name = "VCDT_GRP_CHANGE", + }, + { + .bitmask = BIT(18), + .irq_name = "FRAME_DROP", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .irq_name = "OVERFLOW_RECOVERY", + .debug = "Back pressure/output fifo ovrfl", + }, + { + .bitmask = BIT(20), + .irq_name = "ERROR_REC_CCIF_VIOLATION From Camif", + }, + { + .bitmask = BIT(21), + .irq_name = "CAMIF_EPOCH0", + }, + { + .bitmask = BIT(22), + .irq_name = "CAMIF_EPOCH1", + }, + { + .bitmask = BIT(23), + .irq_name = "RUP_DONE", + }, + { + .bitmask = BIT(24), + .irq_name = "ILLEGAL_BATCH_ID", + }, + { + .bitmask = BIT(25), + .irq_name = "BATCH_END_MISSING_VIOLATION", + }, + { + .bitmask = BIT(26), + .irq_name = "HEIGHT_VIOLATION", + }, + { + .bitmask = BIT(27), + .irq_name = "WIDTH_VIOLATION", + }, + { + .bitmask = BIT(28), + .irq_name = "SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP", + .err_handler = cam_ife_csid_hw_ver2_mup_mismatch_handler, + }, + { + .bitmask = BIT(29), + .irq_name = "CCIF_VIOLATION: Bad frame timings", + }, +}; + +static const struct cam_ife_csid_top_irq_desc cam_ife_csid_lite_880_top_irq_desc[][32] = { + { + { + .bitmask = BIT(1), + .err_type = CAM_ISP_HW_ERROR_CSID_SENSOR_SWITCH_ERROR, + .err_name = "FATAL_SENSOR_SWITCHING_IRQ", + .desc = "Fatal Error duirng dynamically switching between 2 sensors", + }, + { + .bitmask = BIT(18), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_NO_VOTE_DN", + .desc = "vote_up is asserted before IDLE is encountered in a frame", + }, + { + .bitmask = BIT(19), + .err_type = CAM_ISP_HW_ERROR_RECOVERY_OVERFLOW, + .err_name = "ERROR_VOTE_UP_LATE", + .desc = "vote_up is asserted at the same time as an SOF", + }, + { + .bitmask = BIT(20), + .err_type = CAM_ISP_HW_ERROR_CSID_OUTPUT_FIFO_OVERFLOW, + .err_name = "ERROR_RDI_LINE_BUFFER_CONFLICT", + .desc = "Two or more RDIs programmed to access the shared line buffer", + .err_handler = cam_ife_csid_hw_ver2_rdi_line_buffer_conflict_handler, + }, + }, +}; + +static const uint32_t cam_ife_csid_lite_880_num_top_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_880_top_irq_desc[0]), +}; +static const uint32_t cam_ife_csid_lite_880_num_rx_irq_desc[] = { + ARRAY_SIZE(cam_ife_csid_lite_880_rx_irq_desc[0]), +}; + +static struct cam_irq_register_set cam_ife_csid_lite_880_irq_reg_set[9] = { + /* Top */ + { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x0000007C, + .set_reg_offset = 0x00000088, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + /* RX */ + { + .mask_reg_offset = 0x000000A0, + .clear_reg_offset = 0x000000A4, + .status_reg_offset = 0x0000009C, + }, + /* RDI0 */ + { + .mask_reg_offset = 0x000000F0, + .clear_reg_offset = 0x000000F4, + .status_reg_offset = 0x000000EC, + }, + /* RDI1 */ + { + .mask_reg_offset = 0x00000100, + .clear_reg_offset = 0x00000104, + .status_reg_offset = 0x000000FC, + }, + /* RDI2 */ + { + .mask_reg_offset = 0x00000110, + .clear_reg_offset = 0x00000114, + .status_reg_offset = 0x0000010C, + }, + /* RDI3 */ + { + .mask_reg_offset = 0x00000120, + .clear_reg_offset = 0x00000124, + .status_reg_offset = 0x0000011C, + }, + {}, /* no RDI4 */ + /* IPP */ + { + .mask_reg_offset = 0x000000B0, + .clear_reg_offset = 0x000000B4, + .status_reg_offset = 0x000000AC, + }, + {}, /* no PPP */ +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_880_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_TOP], + .global_irq_cmd_offset = 0x00000014, + .global_set_bitmask = 0x00000010, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_880_rx_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RX], /* RX */ + .global_irq_cmd_offset = 0, +}; + +static struct cam_irq_controller_reg_info cam_ife_csid_lite_880_path_irq_reg_info[6] = { + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_0], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_1], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_2], + .global_irq_cmd_offset = 0, + }, + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_RDI_3], + .global_irq_cmd_offset = 0, + }, + {}, /* no RDI4 */ + { + .num_registers = 1, + .irq_reg_set = &cam_ife_csid_lite_880_irq_reg_set[CAM_IFE_CSID_IRQ_REG_IPP], + .global_irq_cmd_offset = 0, + }, +}; + +static struct cam_irq_register_set cam_ife_csid_lite_880_buf_done_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000090, + .clear_reg_offset = 0x00000094, + .status_reg_offset = 0x0000008C, + }, +}; + +static struct cam_irq_controller_reg_info + cam_ife_csid_lite_880_buf_done_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = cam_ife_csid_lite_880_buf_done_irq_reg_set, + .global_irq_cmd_offset = 0, /* intentionally set to zero */ +}; + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_lite_880_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0004, + .global_cmd_addr = 0x0008, + .reset_cfg_addr = 0x000c, + .reset_cmd_addr = 0x0010, + .irq_cmd_addr = 0x0014, + .rup_aup_cmd_addr = 0x0018, + .offline_cmd_addr = 0x001C, + .shdr_master_slave_cfg_addr = 0x0020, + .top_irq_status_addr = {0x007C,}, + .top_irq_mask_addr = {0x0080,}, + .top_irq_clear_addr = {0x0084,}, + .top_irq_set_addr = {0x0088,}, + .buf_done_irq_status_addr = 0x008C, + .buf_done_irq_mask_addr = 0x0090, + .buf_done_irq_clear_addr = 0x0094, + .buf_done_irq_set_addr = 0x0098, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 1, + .num_ppp = 0, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format_mask = 0xF, + .decode_format1_supported = true, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .rst_loc_path_only_val = 0x0, + .rst_location_shift_val = 4, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_reset_irq_mask = {0x1,}, + .top_buf_done_irq_mask = 0x2000, + .decode_format_payload_only = 0xF, + .phy_sel_base_idx = 1, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .epoch_factor = 50, +}; + +static struct cam_ife_csid_ver2_csi2_rx_reg_info + cam_ife_csid_lite_880_csi2_reg_info = { + .irq_status_addr = {0x009C,}, + .irq_mask_addr = {0x00A0,}, + .irq_clear_addr = {0x00A4,}, + .irq_set_addr = {0x00A8,}, + /*CSI2 rx control */ + .cfg0_addr = 0x0200, + .cfg1_addr = 0x0204, + .capture_ctrl_addr = 0x0208, + .rst_strobes_addr = 0x020C, + .cap_unmap_long_pkt_hdr_0_addr = 0x0210, + .cap_unmap_long_pkt_hdr_1_addr = 0x0214, + .captured_short_pkt_0_addr = 0x0218, + .captured_short_pkt_1_addr = 0x021c, + .captured_long_pkt_0_addr = 0x0220, + .captured_long_pkt_1_addr = 0x0224, + .captured_long_pkt_ftr_addr = 0x0228, + .captured_cphy_pkt_hdr_addr = 0x022c, + .lane0_misr_addr = 0x0230, + .lane1_misr_addr = 0x0234, + .lane2_misr_addr = 0x0238, + .lane3_misr_addr = 0x023c, + .total_pkts_rcvd_addr = 0x0240, + .stats_ecc_addr = 0x0244, + .total_crc_err_addr = 0x0248, + .de_scramble_type3_cfg0_addr = 0x024C, + .de_scramble_type3_cfg1_addr = 0x0250, + .de_scramble_type2_cfg0_addr = 0x0254, + .de_scramble_type2_cfg1_addr = 0x0258, + .de_scramble_type1_cfg0_addr = 0x025C, + .de_scramble_type1_cfg1_addr = 0x0260, + .de_scramble_type0_cfg0_addr = 0x0264, + .de_scramble_type0_cfg1_addr = 0x0268, + + .rst_done_shift_val = 27, + .irq_mask_all = 0xFFFFFFF, + .misr_enable_shift_val = 6, + .vc_mode_shift_val = 2, + .capture_long_pkt_en_shift = 0, + .capture_short_pkt_en_shift = 1, + .capture_cphy_pkt_en_shift = 2, + .capture_long_pkt_dt_shift = 4, + .capture_long_pkt_vc_shift = 10, + .capture_short_pkt_vc_shift = 15, + .capture_cphy_pkt_dt_shift = 20, + .capture_cphy_pkt_vc_shift = 26, + .phy_num_mask = 0xf, + .vc_mask = 0x7C00000, + .dt_mask = 0x3f0000, + .wc_mask = 0xffff, + .vc_shift = 0x16, + .dt_shift = 0x10, + .wc_shift = 0, + .calc_crc_mask = 0xffff, + .expected_crc_mask = 0xffff, + .calc_crc_shift = 0x10, + .ecc_correction_shift_en = 0, + .lane_num_shift = 0, + .lane_cfg_shift = 4, + .phy_type_shift = 24, + .phy_num_shift = 20, + .tpg_mux_en_shift = 27, + .tpg_num_sel_shift = 28, + .phy_bist_shift_en = 7, + .epd_mode_shift_en = 8, + .eotp_shift_en = 9, + .dyn_sensor_switch_shift_en = 10, + .rup_aup_latch_shift = 13, + .rup_aup_latch_supported = true, + .long_pkt_strobe_rst_shift = 0, + .short_pkt_strobe_rst_shift = 1, + .cphy_pkt_strobe_rst_shift = 2, + .unmapped_pkt_strobe_rst_shift = 3, + .top_irq_mask = {0x4,}, + .fatal_err_mask = {0x19FA800,}, + .part_fatal_err_mask = {0x0001000,}, + .non_fatal_err_mask = {0x0200000,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_880_ipp_reg_info = { + .irq_status_addr = 0x00AC, + .irq_mask_addr = 0x00B0, + .irq_clear_addr = 0x00B4, + .irq_set_addr = 0x00B8, + .cfg0_addr = 0x0300, + .ctrl_addr = 0x0304, + .debug_clr_cmd_addr = 0x0308, + .multi_vcdt_cfg0_addr = 0x030c, + .cfg1_addr = 0x0310, + .err_recovery_cfg0_addr = 0x0318, + .err_recovery_cfg1_addr = 0x031C, + .err_recovery_cfg2_addr = 0x0320, + .camif_frame_cfg_addr = 0x0330, + .epoch_irq_cfg_addr = 0x0334, + .epoch0_subsample_ptrn_addr = 0x0338, + .epoch1_subsample_ptrn_addr = 0x033C, + .debug_camif_1_addr = 0x0340, + .debug_camif_0_addr = 0x0344, + .debug_halt_status_addr = 0x0348, + .debug_misr_val0_addr = 0x034C, + .debug_misr_val1_addr = 0x0350, + .debug_misr_val2_addr = 0x0354, + .debug_misr_val3_addr = 0x0358, + .hcrop_addr = 0x035c, + .vcrop_addr = 0x0360, + .pix_drop_pattern_addr = 0x0364, + .pix_drop_period_addr = 0x0368, + .line_drop_pattern_addr = 0x036C, + .line_drop_period_addr = 0x0370, + .frm_drop_pattern_addr = 0x0374, + .frm_drop_period_addr = 0x0378, + .irq_subsample_pattern_addr = 0x037C, + .irq_subsample_period_addr = 0x0380, + .format_measure_cfg0_addr = 0x0384, + .format_measure_cfg1_addr = 0x0388, + .format_measure0_addr = 0x038C, + .format_measure1_addr = 0x0390, + .format_measure2_addr = 0x0394, + .timestamp_curr0_sof_addr = 0x0398, + .timestamp_curr1_sof_addr = 0x039C, + .timestamp_perv0_sof_addr = 0x03A0, + .timestamp_perv1_sof_addr = 0x03A4, + .timestamp_curr0_eof_addr = 0x03A8, + .timestamp_curr1_eof_addr = 0x03AC, + .timestamp_perv0_eof_addr = 0x03B0, + .timestamp_perv1_eof_addr = 0x03B4, + .batch_period_cfg_addr = 0x03C4, + .batch_stream_id_cfg_addr = 0x03C8, + .epoch0_cfg_batch_id0_addr = 0x03CC, + .epoch1_cfg_batch_id0_addr = 0x03D0, + .epoch0_cfg_batch_id1_addr = 0x03D4, + .epoch1_cfg_batch_id1_addr = 0x03D8, + .epoch0_cfg_batch_id2_addr = 0x03DC, + .epoch1_cfg_batch_id2_addr = 0x03E0, + .epoch0_cfg_batch_id3_addr = 0x03E4, + .epoch1_cfg_batch_id3_addr = 0x03E8, + .epoch0_cfg_batch_id4_addr = 0x03EC, + .epoch1_cfg_batch_id4_addr = 0x03F0, + .epoch0_cfg_batch_id5_addr = 0x03F4, + .epoch1_cfg_batch_id5_addr = 0x03F8, + + /* configurations */ + .start_mode_internal = 0x0, + .start_mode_global = 0x1, + .start_mode_master = 0x2, + .start_mode_slave = 0x3, + .start_mode_shift = 2, + .start_master_sel_val = 0, + .start_master_sel_shift = 4, + .resume_frame_boundary = 1, + .crop_v_en_shift_val = 13, + .crop_h_en_shift_val = 12, + .drop_v_en_shift_val = 11, + .drop_h_en_shift_val = 10, + .pix_store_en_shift_val = 14, + .early_eof_en_shift_val = 16, + .format_measure_en_shift_val = 8, + .timestamp_en_shift_val = 6, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .min_hbi_shift_val = 4, + .start_master_sel_shift_val = 4, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x10001, + .top_irq_mask = {0x10,}, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_880_rdi_0_reg_info = { + .irq_status_addr = 0x00EC, + .irq_mask_addr = 0x00F0, + .irq_clear_addr = 0x00F4, + .irq_set_addr = 0x00F8, + .cfg0_addr = 0x0500, + .ctrl_addr = 0x0504, + .debug_clr_cmd_addr = 0x0508, + .multi_vcdt_cfg0_addr = 0x050c, + .cfg1_addr = 0x0510, + .err_recovery_cfg0_addr = 0x0514, + .err_recovery_cfg1_addr = 0x0518, + .err_recovery_cfg2_addr = 0x051C, + .debug_byte_cntr_ping_addr = 0x0520, + .debug_byte_cntr_pong_addr = 0x0524, + .camif_frame_cfg_addr = 0x0528, + .epoch_irq_cfg_addr = 0x052C, + .epoch0_subsample_ptrn_addr = 0x0530, + .epoch1_subsample_ptrn_addr = 0x0534, + .debug_camif_1_addr = 0x0538, + .debug_camif_0_addr = 0x053C, + .frm_drop_pattern_addr = 0x0540, + .frm_drop_period_addr = 0x0540, + .irq_subsample_pattern_addr = 0x0548, + .irq_subsample_period_addr = 0x054C, + .hcrop_addr = 0x0550, + .vcrop_addr = 0x0554, + .pix_drop_pattern_addr = 0x0558, + .pix_drop_period_addr = 0x055C, + .line_drop_pattern_addr = 0x0560, + .line_drop_period_addr = 0x0564, + .debug_halt_status_addr = 0x0568, + .debug_misr_val0_addr = 0x0570, + .debug_misr_val1_addr = 0x0574, + .debug_misr_val2_addr = 0x0578, + .debug_misr_val3_addr = 0x057C, + .format_measure_cfg0_addr = 0x0580, + .format_measure_cfg1_addr = 0x0584, + .format_measure0_addr = 0x0588, + .format_measure1_addr = 0x058C, + .format_measure2_addr = 0x0590, + .timestamp_curr0_sof_addr = 0x0594, + .timestamp_curr1_sof_addr = 0x0598, + .timestamp_perv0_sof_addr = 0x059C, + .timestamp_perv1_sof_addr = 0x05A0, + .timestamp_curr0_eof_addr = 0x05A4, + .timestamp_curr1_eof_addr = 0x05A8, + .timestamp_perv0_eof_addr = 0x05AC, + .timestamp_perv1_eof_addr = 0x05B0, + .batch_period_cfg_addr = 0x05BC, + .batch_stream_id_cfg_addr = 0x05C0, + .epoch0_cfg_batch_id0_addr = 0x05C4, + .epoch1_cfg_batch_id0_addr = 0x05C8, + .epoch0_cfg_batch_id1_addr = 0x05CC, + .epoch1_cfg_batch_id1_addr = 0x05D0, + .epoch0_cfg_batch_id2_addr = 0x05D4, + .epoch1_cfg_batch_id2_addr = 0x05D8, + .epoch0_cfg_batch_id3_addr = 0x05DC, + .epoch1_cfg_batch_id3_addr = 0x05E0, + .epoch0_cfg_batch_id4_addr = 0x05E4, + .epoch1_cfg_batch_id4_addr = 0x05E8, + .epoch0_cfg_batch_id5_addr = 0x05EC, + .epoch1_cfg_batch_id5_addr = 0x05F0, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x100010, + .top_irq_mask = {0x100,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_880_rdi_1_reg_info = { + .irq_status_addr = 0x00FC, + .irq_mask_addr = 0x0100, + .irq_clear_addr = 0x0104, + .irq_set_addr = 0x0108, + .cfg0_addr = 0x0600, + .ctrl_addr = 0x0604, + .debug_clr_cmd_addr = 0x0608, + .multi_vcdt_cfg0_addr = 0x060c, + .cfg1_addr = 0x0610, + .err_recovery_cfg0_addr = 0x0614, + .err_recovery_cfg1_addr = 0x0618, + .err_recovery_cfg2_addr = 0x061C, + .debug_byte_cntr_ping_addr = 0x0620, + .debug_byte_cntr_pong_addr = 0x0624, + .camif_frame_cfg_addr = 0x0628, + .epoch_irq_cfg_addr = 0x062C, + .epoch0_subsample_ptrn_addr = 0x0630, + .epoch1_subsample_ptrn_addr = 0x0634, + .debug_camif_1_addr = 0x0638, + .debug_camif_0_addr = 0x063C, + .frm_drop_pattern_addr = 0x0640, + .frm_drop_period_addr = 0x0644, + .irq_subsample_pattern_addr = 0x0648, + .irq_subsample_period_addr = 0x064C, + .hcrop_addr = 0x0650, + .vcrop_addr = 0x0654, + .pix_drop_pattern_addr = 0x0658, + .pix_drop_period_addr = 0x065C, + .line_drop_pattern_addr = 0x0660, + .line_drop_period_addr = 0x0664, + .debug_halt_status_addr = 0x066C, + .debug_misr_val0_addr = 0x0670, + .debug_misr_val1_addr = 0x0674, + .debug_misr_val2_addr = 0x0678, + .debug_misr_val3_addr = 0x067C, + .format_measure_cfg0_addr = 0x0680, + .format_measure_cfg1_addr = 0x0684, + .format_measure0_addr = 0x0688, + .format_measure1_addr = 0x068C, + .format_measure2_addr = 0x0690, + .timestamp_curr0_sof_addr = 0x0694, + .timestamp_curr1_sof_addr = 0x0698, + .timestamp_perv0_sof_addr = 0x069C, + .timestamp_perv1_sof_addr = 0x06A0, + .timestamp_curr0_eof_addr = 0x06A4, + .timestamp_curr1_eof_addr = 0x06A8, + .timestamp_perv0_eof_addr = 0x06AC, + .timestamp_perv1_eof_addr = 0x06B0, + .batch_period_cfg_addr = 0x06BC, + .batch_stream_id_cfg_addr = 0x06C0, + .epoch0_cfg_batch_id0_addr = 0x06C4, + .epoch1_cfg_batch_id0_addr = 0x06C8, + .epoch0_cfg_batch_id1_addr = 0x06CC, + .epoch1_cfg_batch_id1_addr = 0x06D0, + .epoch0_cfg_batch_id2_addr = 0x06D4, + .epoch1_cfg_batch_id2_addr = 0x06D8, + .epoch0_cfg_batch_id3_addr = 0x06DC, + .epoch1_cfg_batch_id3_addr = 0x06E0, + .epoch0_cfg_batch_id4_addr = 0x06E4, + .epoch1_cfg_batch_id4_addr = 0x06E8, + .epoch0_cfg_batch_id5_addr = 0x06EC, + .epoch1_cfg_batch_id5_addr = 0x06F0, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x200020, + .top_irq_mask = {0x200,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_880_rdi_2_reg_info = { + .irq_status_addr = 0x010C, + .irq_mask_addr = 0x0110, + .irq_clear_addr = 0x0114, + .irq_set_addr = 0x0118, + .cfg0_addr = 0x0700, + .ctrl_addr = 0x0704, + .debug_clr_cmd_addr = 0x0708, + .multi_vcdt_cfg0_addr = 0x070c, + .cfg1_addr = 0x0710, + .err_recovery_cfg0_addr = 0x0714, + .err_recovery_cfg1_addr = 0x0718, + .err_recovery_cfg2_addr = 0x071C, + .debug_byte_cntr_ping_addr = 0x0720, + .debug_byte_cntr_pong_addr = 0x0724, + .camif_frame_cfg_addr = 0x0728, + .epoch_irq_cfg_addr = 0x072C, + .epoch0_subsample_ptrn_addr = 0x0730, + .epoch1_subsample_ptrn_addr = 0x0734, + .debug_camif_1_addr = 0x0738, + .debug_camif_0_addr = 0x073C, + .frm_drop_pattern_addr = 0x0740, + .frm_drop_period_addr = 0x0744, + .irq_subsample_pattern_addr = 0x0748, + .irq_subsample_period_addr = 0x074C, + .hcrop_addr = 0x0750, + .vcrop_addr = 0x0754, + .pix_drop_pattern_addr = 0x0758, + .pix_drop_period_addr = 0x075C, + .line_drop_pattern_addr = 0x0760, + .line_drop_period_addr = 0x0764, + .debug_halt_status_addr = 0x076C, + .debug_misr_val0_addr = 0x0770, + .debug_misr_val1_addr = 0x0774, + .debug_misr_val2_addr = 0x0778, + .debug_misr_val3_addr = 0x077C, + .format_measure_cfg0_addr = 0x0780, + .format_measure_cfg1_addr = 0x0784, + .format_measure0_addr = 0x0788, + .format_measure1_addr = 0x078C, + .format_measure2_addr = 0x0790, + .timestamp_curr0_sof_addr = 0x0794, + .timestamp_curr1_sof_addr = 0x0798, + .timestamp_perv0_sof_addr = 0x079C, + .timestamp_perv1_sof_addr = 0x07A0, + .timestamp_curr0_eof_addr = 0x07A4, + .timestamp_curr1_eof_addr = 0x07A8, + .timestamp_perv0_eof_addr = 0x07AC, + .timestamp_perv1_eof_addr = 0x07B0, + .batch_period_cfg_addr = 0x07BC, + .batch_stream_id_cfg_addr = 0x07C0, + .epoch0_cfg_batch_id0_addr = 0x07C4, + .epoch1_cfg_batch_id0_addr = 0x07C8, + .epoch0_cfg_batch_id1_addr = 0x07CC, + .epoch1_cfg_batch_id1_addr = 0x07D0, + .epoch0_cfg_batch_id2_addr = 0x07D4, + .epoch1_cfg_batch_id2_addr = 0x07D8, + .epoch0_cfg_batch_id3_addr = 0x07DC, + .epoch1_cfg_batch_id3_addr = 0x07E0, + .epoch0_cfg_batch_id4_addr = 0x07E4, + .epoch1_cfg_batch_id4_addr = 0x07E8, + .epoch0_cfg_batch_id5_addr = 0x07EC, + .epoch1_cfg_batch_id5_addr = 0x07F0, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x400040, + .top_irq_mask = {0x400,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_ver2_path_reg_info + cam_ife_csid_lite_880_rdi_3_reg_info = { + .irq_status_addr = 0x011C, + .irq_mask_addr = 0x0120, + .irq_clear_addr = 0x0124, + .irq_set_addr = 0x0128, + .cfg0_addr = 0x0800, + .ctrl_addr = 0x0804, + .debug_clr_cmd_addr = 0x0808, + .multi_vcdt_cfg0_addr = 0x080c, + .cfg1_addr = 0x0810, + .err_recovery_cfg0_addr = 0x0814, + .err_recovery_cfg1_addr = 0x0818, + .err_recovery_cfg2_addr = 0x081C, + .debug_byte_cntr_ping_addr = 0x0820, + .debug_byte_cntr_pong_addr = 0x0824, + .camif_frame_cfg_addr = 0x0828, + .epoch_irq_cfg_addr = 0x082C, + .epoch0_subsample_ptrn_addr = 0x0830, + .epoch1_subsample_ptrn_addr = 0x0834, + .debug_camif_1_addr = 0x0838, + .debug_camif_0_addr = 0x083C, + .frm_drop_pattern_addr = 0x0840, + .frm_drop_period_addr = 0x0840, + .irq_subsample_pattern_addr = 0x0848, + .irq_subsample_period_addr = 0x084C, + .hcrop_addr = 0x0850, + .vcrop_addr = 0x0854, + .pix_drop_pattern_addr = 0x0858, + .pix_drop_period_addr = 0x085C, + .line_drop_pattern_addr = 0x0860, + .line_drop_period_addr = 0x0864, + .debug_halt_status_addr = 0x0868, + .debug_misr_val0_addr = 0x0870, + .debug_misr_val1_addr = 0x0874, + .debug_misr_val2_addr = 0x0878, + .debug_misr_val3_addr = 0x087C, + .format_measure_cfg0_addr = 0x0880, + .format_measure_cfg1_addr = 0x0884, + .format_measure0_addr = 0x0888, + .format_measure1_addr = 0x088C, + .format_measure2_addr = 0x0890, + .timestamp_curr0_sof_addr = 0x0894, + .timestamp_curr1_sof_addr = 0x0898, + .timestamp_perv0_sof_addr = 0x089C, + .timestamp_perv1_sof_addr = 0x08A0, + .timestamp_curr0_eof_addr = 0x08A4, + .timestamp_curr1_eof_addr = 0x08A8, + .timestamp_perv0_eof_addr = 0x08AC, + .timestamp_perv1_eof_addr = 0x08B0, + .batch_period_cfg_addr = 0x08BC, + .batch_stream_id_cfg_addr = 0x08C0, + .epoch0_cfg_batch_id0_addr = 0x08C4, + .epoch1_cfg_batch_id0_addr = 0x08C8, + .epoch0_cfg_batch_id1_addr = 0x08CC, + .epoch1_cfg_batch_id1_addr = 0x08D0, + .epoch0_cfg_batch_id2_addr = 0x08D4, + .epoch1_cfg_batch_id2_addr = 0x08D8, + .epoch0_cfg_batch_id3_addr = 0x08DC, + .epoch1_cfg_batch_id3_addr = 0x08E0, + .epoch0_cfg_batch_id4_addr = 0x08E4, + .epoch1_cfg_batch_id4_addr = 0x08E8, + .epoch0_cfg_batch_id5_addr = 0x08EC, + .epoch1_cfg_batch_id5_addr = 0x08F0, + + /* configurations */ + .resume_frame_boundary = 1, + .overflow_ctrl_en = 1, + .overflow_ctrl_mode_val = 0x8, + .mipi_pack_supported = 1, + .packing_fmt_shift_val = 15, + .plain_alignment_shift_val = 11, + .plain_fmt_shift_val = 12, + .crop_v_en_shift_val = 8, + .crop_h_en_shift_val = 7, + .drop_v_en_shift_val = 6, + .drop_h_en_shift_val = 5, + .early_eof_en_shift_val = 14, + .format_measure_en_shift_val = 3, + .timestamp_en_shift_val = 6, + .debug_byte_cntr_rst_shift_val = 2, + .ccif_violation_en = 1, + .fatal_err_mask = 0x20186001, + .non_fatal_err_mask = 0x12000000, + .sof_irq_mask = 0x10, + .rup_irq_mask = 0x800000, + .epoch0_irq_mask = 0x200000, + .epoch1_irq_mask = 0x400000, + .eof_irq_mask = 0x8, + .rup_aup_mask = 0x800080, + .top_irq_mask = {0x800,}, + .epoch0_shift_val = 16, +}; + +static struct cam_ife_csid_rx_debug_mask cam_ife_csid_lite_880_rx_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_RX_DL0_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_EOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL0_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL1_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL2_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_DL3_SOT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_LONG_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_WARNING_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_CRC) | + BIT_ULL(CAM_IFE_CSID_RX_ERROR_ECC) | + BIT_ULL(CAM_IFE_CSID_RX_MMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_UNMAPPED_VC_DT) | + BIT_ULL(CAM_IFE_CSID_RX_STREAM_UNDERFLOW) | + BIT_ULL(CAM_IFE_CSID_RX_UNBOUNDED_FRAME), + }, + + .bit_pos[CAM_IFE_CSID_RX_DL0_EOT_CAPTURED] = 0, + .bit_pos[CAM_IFE_CSID_RX_DL1_EOT_CAPTURED] = 1, + .bit_pos[CAM_IFE_CSID_RX_DL2_EOT_CAPTURED] = 2, + .bit_pos[CAM_IFE_CSID_RX_DL3_EOT_CAPTURED] = 3, + .bit_pos[CAM_IFE_CSID_RX_DL0_SOT_CAPTURED] = 4, + .bit_pos[CAM_IFE_CSID_RX_DL1_SOT_CAPTURED] = 5, + .bit_pos[CAM_IFE_CSID_RX_DL2_SOT_CAPTURED] = 6, + .bit_pos[CAM_IFE_CSID_RX_DL3_SOT_CAPTURED] = 7, + .bit_pos[CAM_IFE_CSID_RX_LONG_PKT_CAPTURED] = 8, + .bit_pos[CAM_IFE_CSID_RX_SHORT_PKT_CAPTURED] = 9, + .bit_pos[CAM_IFE_CSID_RX_CPHY_PKT_HDR_CAPTURED] = 10, + .bit_pos[CAM_IFE_CSID_RX_CPHY_EOT_RECEPTION] = 11, + .bit_pos[CAM_IFE_CSID_RX_CPHY_SOT_RECEPTION] = 12, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CPHY_PH_CRC] = 13, + .bit_pos[CAM_IFE_CSID_RX_WARNING_ECC] = 14, + .bit_pos[CAM_IFE_CSID_RX_LANE0_FIFO_OVERFLOW] = 15, + .bit_pos[CAM_IFE_CSID_RX_LANE1_FIFO_OVERFLOW] = 16, + .bit_pos[CAM_IFE_CSID_RX_LANE2_FIFO_OVERFLOW] = 17, + .bit_pos[CAM_IFE_CSID_RX_LANE3_FIFO_OVERFLOW] = 18, + .bit_pos[CAM_IFE_CSID_RX_ERROR_CRC] = 19, + .bit_pos[CAM_IFE_CSID_RX_ERROR_ECC] = 20, + .bit_pos[CAM_IFE_CSID_RX_MMAPPED_VC_DT] = 21, + .bit_pos[CAM_IFE_CSID_RX_UNMAPPED_VC_DT] = 22, + .bit_pos[CAM_IFE_CSID_RX_STREAM_UNDERFLOW] = 23, + .bit_pos[CAM_IFE_CSID_RX_UNBOUNDED_FRAME] = 24, +}; + +static struct cam_ife_csid_top_debug_mask cam_ife_csid_lite_880_top_debug_mask = { + + .evt_bitmap = { + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_UP) | + BIT_ULL(CAM_IFE_CSID_TOP_INFO_VOTE_DN) | + BIT_ULL(CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN), + }, + + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_UP] = 16, + .bit_pos[CAM_IFE_CSID_TOP_INFO_VOTE_DN] = 17, + .bit_pos[CAM_IFE_CSID_TOP_ERR_NO_VOTE_DN] = 18, +}; + + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_880_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_880_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_880_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_880_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_880_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_880_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_880_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_880_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_880_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_880_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_880_rdi_3_reg_info, + .need_top_cfg = 0, + .top_irq_desc = &cam_ife_csid_lite_880_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_880_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_880_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_880_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_880_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_880_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_880_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_880_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, + .is_ife_sfe_mapped = true, +}; +#endif /* _CAM_IFE_CSID_LITE_780_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite970.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite970.h new file mode 100644 index 0000000000..5b40d6491f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite970.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_970_H_ +#define _CAM_IFE_CSID_LITE_970_H_ + +#include "cam_ife_csid_lite880.h" +#include "cam_ife_csid_lite980.h" + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_970_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_880_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_880_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_880_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_980_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_880_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_880_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_880_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_880_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_880_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_880_rdi_3_reg_info, + .need_top_cfg = 0, + .dynamic_drv_supported = false, + .top_irq_desc = &cam_ife_csid_lite_880_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_880_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_880_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_880_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_880_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_880_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_880_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_880_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; + +#endif /* _CAM_IFE_CSID_LITE_970_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite975.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite975.h new file mode 100644 index 0000000000..73ca8ab721 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite975.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_975_H_ +#define _CAM_IFE_CSID_LITE_975_H_ + +#include "cam_ife_csid_lite880.h" +#include "cam_ife_csid_lite980.h" + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_975_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_880_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_880_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_880_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_980_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_880_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_880_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_880_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_880_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_880_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_880_rdi_3_reg_info, + .need_top_cfg = 0, + .dynamic_drv_supported = true, + .top_irq_desc = &cam_ife_csid_lite_880_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_880_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_880_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_880_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_880_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_880_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_880_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_880_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; + +#endif /* _CAM_IFE_CSID_LITE_975_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite980.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite980.h new file mode 100644 index 0000000000..2401387439 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite980.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_LITE_980_H_ +#define _CAM_IFE_CSID_LITE_980_H_ + +#include "cam_ife_csid_lite880.h" + +static struct cam_ife_csid_ver2_common_reg_info + cam_ife_csid_lite_980_cmn_reg_info = { + .hw_version_addr = 0x0000, + .cfg0_addr = 0x0004, + .global_cmd_addr = 0x0008, + .reset_cfg_addr = 0x000c, + .reset_cmd_addr = 0x0010, + .irq_cmd_addr = 0x0014, + .rup_aup_cmd_addr = 0x0018, + .offline_cmd_addr = 0x001C, + .shdr_master_slave_cfg_addr = 0x0020, + .top_irq_status_addr = {0x007C,}, + .top_irq_mask_addr = {0x0080,}, + .top_irq_clear_addr = {0x0084,}, + .top_irq_set_addr = {0x0088,}, + .buf_done_irq_status_addr = 0x008C, + .buf_done_irq_mask_addr = 0x0090, + .buf_done_irq_clear_addr = 0x0094, + .buf_done_irq_set_addr = 0x0098, + + /*configurations */ + .major_version = 6, + .minor_version = 8, + .version_incr = 0, + .num_rdis = 4, + .num_pix = 1, + .num_ppp = 0, + .rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .vc_mask = 0x1F, + .dt_shift_val = 16, + .dt_mask = 0x3F, + .crop_shift_val = 16, + .decode_format_shift_val = 12, + .decode_format1_shift_val = 16, + .decode_format_mask = 0xF, + .decode_format1_supported = true, + .frame_id_decode_en_shift_val = 1, + .multi_vcdt_vc1_shift_val = 2, + .multi_vcdt_dt1_shift_val = 7, + .multi_vcdt_ts_combo_en_shift_val = 13, + .multi_vcdt_en_shift_val = 0, + .timestamp_stb_sel_shift_val = 8, + .vfr_en_shift_val = 0, + .mup_shift_val = 28, + .early_eof_supported = 1, + .vfr_supported = 1, + .multi_vcdt_supported = 1, + .ts_comb_vcdt_en = true, + .ts_comb_vcdt_mask = 3, + .frame_id_dec_supported = 1, + .measure_en_hbi_vbi_cnt_mask = 0xc, + .measure_pixel_line_en_mask = 0x3, + .crop_pix_start_mask = 0x3fff, + .crop_pix_end_mask = 0xffff, + .crop_line_start_mask = 0x3fff, + .crop_line_end_mask = 0xffff, + .drop_supported = 1, + .ipp_irq_mask_all = 0x7FFF, + .rdi_irq_mask_all = 0x7FFF, + .rst_loc_path_only_val = 0x0, + .rst_location_shift_val = 4, + .rst_loc_complete_csid_val = 0x1, + .rst_mode_frame_boundary_val = 0x0, + .rst_mode_immediate_val = 0x1, + .rst_cmd_hw_reset_complete_val = 0x1, + .rst_cmd_sw_reset_complete_val = 0x2, + .rst_cmd_irq_ctrl_only_val = 0x4, + .timestamp_strobe_val = 0x2, + .global_reset = 1, + .aup_rup_supported = 1, + .only_master_rup = 1, + .format_measure_height_mask_val = 0xFFFF, + .format_measure_height_shift_val = 0x10, + .format_measure_width_mask_val = 0xFFFF, + .format_measure_width_shift_val = 0x0, + .top_reset_irq_mask = {0x1,}, + .top_buf_done_irq_mask = 0x2000, + .decode_format_payload_only = 0xF, + .phy_sel_base_idx = 1, + .timestamp_enabled_in_cfg0 = true, + .camif_irq_support = true, + .capabilities = CAM_IFE_CSID_CAP_SKIP_PATH_CFG1 | + CAM_IFE_CSID_CAP_SKIP_EPOCH_CFG, +}; + +static struct cam_ife_csid_ver2_reg_info cam_ife_csid_lite_980_reg_info = { + .top_irq_reg_info = &cam_ife_csid_lite_880_top_irq_reg_info, + .rx_irq_reg_info = &cam_ife_csid_lite_880_rx_irq_reg_info, + .path_irq_reg_info = { + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_0], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_1], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_2], + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_RDI_3], + NULL, + &cam_ife_csid_lite_880_path_irq_reg_info[CAM_IFE_PIX_PATH_RES_IPP], + }, + .buf_done_irq_reg_info = &cam_ife_csid_lite_880_buf_done_irq_reg_info, + .cmn_reg = &cam_ife_csid_lite_980_cmn_reg_info, + .csi2_reg = &cam_ife_csid_lite_880_csi2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_IPP] = &cam_ife_csid_lite_880_ipp_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_PPP] = NULL, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_0] = &cam_ife_csid_lite_880_rdi_0_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_1] = &cam_ife_csid_lite_880_rdi_1_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_2] = &cam_ife_csid_lite_880_rdi_2_reg_info, + .path_reg[CAM_IFE_PIX_PATH_RES_RDI_3] = &cam_ife_csid_lite_880_rdi_3_reg_info, + .need_top_cfg = 0, + .dynamic_drv_supported = true, + .top_irq_desc = &cam_ife_csid_lite_880_top_irq_desc, + .rx_irq_desc = &cam_ife_csid_lite_880_rx_irq_desc, + .path_irq_desc = cam_ife_csid_lite_880_path_irq_desc, + .num_top_err_irqs = cam_ife_csid_lite_880_num_top_irq_desc, + .num_rx_err_irqs = cam_ife_csid_lite_880_num_rx_irq_desc, + .num_path_err_irqs = ARRAY_SIZE(cam_ife_csid_lite_880_path_irq_desc), + .top_debug_mask = &cam_ife_csid_lite_880_top_debug_mask, + .rx_debug_mask = &cam_ife_csid_lite_880_rx_debug_mask, + .num_top_regs = 1, + .num_rx_regs = 1, +}; + +#endif /* _CAM_IFE_CSID_LITE_980_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite_mod.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite_mod.c new file mode 100644 index 0000000000..21bc954cbb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite_mod.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "camera_main.h" +#include "cam_ife_csid_dev.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_ife_csid_lite17x.h" +#include "cam_ife_csid_lite480.h" +#include "cam_ife_csid_lite680.h" +#include "cam_ife_csid_lite780.h" +#include "cam_ife_csid_lite880.h" +#include "cam_ife_csid_lite980.h" +#include "cam_ife_csid_lite975.h" +#include "cam_ife_csid_lite970.h" +#include "cam_ife_csid_lite1080.h" + +#define CAM_CSID_LITE_DRV_NAME "csid_lite" + +static struct cam_ife_csid_core_info cam_ife_csid_lite_17x_hw_info = { + .csid_reg = &cam_ife_csid_lite_17x_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_480_hw_info = { + .csid_reg = &cam_ife_csid_lite_480_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_680_hw_info = { + .csid_reg = &cam_ife_csid_lite_680_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_780_hw_info = { + .csid_reg = &cam_ife_csid_lite_780_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_880_hw_info = { + .csid_reg = &cam_ife_csid_lite_880_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_970_hw_info = { + .csid_reg = &cam_ife_csid_lite_970_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_975_hw_info = { + .csid_reg = &cam_ife_csid_lite_975_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_980_hw_info = { + .csid_reg = &cam_ife_csid_lite_980_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid_lite_1080_hw_info = { + .csid_reg = &cam_ife_csid_lite_1080_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static const struct of_device_id cam_ife_csid_lite_dt_match[] = { + { + .compatible = "qcom,csid-lite170", + .data = &cam_ife_csid_lite_17x_hw_info, + }, + { + .compatible = "qcom,csid-lite175", + .data = &cam_ife_csid_lite_17x_hw_info, + }, + { + .compatible = "qcom,csid-lite165", + .data = &cam_ife_csid_lite_17x_hw_info, + }, + { + .compatible = "qcom,csid-lite480", + .data = &cam_ife_csid_lite_480_hw_info, + }, + { + .compatible = "qcom,csid-lite570", + .data = &cam_ife_csid_lite_480_hw_info, + }, + { + .compatible = "qcom,csid-lite580", + .data = &cam_ife_csid_lite_480_hw_info, + }, + { + .compatible = "qcom,csid-lite680", + .data = &cam_ife_csid_lite_680_hw_info, + }, + { + .compatible = "qcom,csid-lite680_110", + .data = &cam_ife_csid_lite_680_hw_info, + }, + { + .compatible = "qcom,csid-lite780", + .data = &cam_ife_csid_lite_780_hw_info, + }, + { + .compatible = "qcom,csid-lite880", + .data = &cam_ife_csid_lite_880_hw_info, + }, + { + .compatible = "qcom,csid-lite980", + .data = &cam_ife_csid_lite_980_hw_info, + }, + { + .compatible = "qcom,csid-lite970", + .data = &cam_ife_csid_lite_970_hw_info, + }, + { + .compatible = "qcom,csid-lite975", + .data = &cam_ife_csid_lite_975_hw_info, + }, + { + .compatible = "qcom,csid-lite1080", + .data = &cam_ife_csid_lite_1080_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_ife_csid_lite_dt_match); + +struct platform_driver cam_ife_csid_lite_driver = { + .probe = cam_ife_csid_probe, + .remove = cam_ife_csid_remove, + .driver = { + .name = CAM_CSID_LITE_DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_ife_csid_lite_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ife_csid_lite_init_module(void) +{ + return platform_driver_register(&cam_ife_csid_lite_driver); +} + +void cam_ife_csid_lite_exit_module(void) +{ + platform_driver_unregister(&cam_ife_csid_lite_driver); +} + +MODULE_DESCRIPTION("CAM IFE_CSID_LITE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_mod.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_mod.c new file mode 100644 index 0000000000..489c418a1e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_mod.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_ife_csid_dev.h" +#include "camera_main.h" +#include "cam_ife_csid_common.h" +#include "cam_ife_csid_hw_ver1.h" +#include "cam_ife_csid_hw_ver2.h" +#include "cam_ife_csid170.h" +#include "cam_ife_csid170_200.h" +#include "cam_ife_csid175.h" +#include "cam_ife_csid175_200.h" +#include "cam_ife_csid480.h" +#include "cam_ife_csid570.h" +#include "cam_ife_csid580.h" +#include "cam_ife_csid680.h" +#include "cam_ife_csid680_110.h" +#include "cam_ife_csid780.h" +#include "cam_ife_csid880.h" +#include "cam_ife_csid980.h" +#include "cam_ife_csid975.h" +#include "cam_ife_csid970.h" +#include "cam_ife_csid1080.h" + +#define CAM_CSID_DRV_NAME "csid" + +static struct cam_ife_csid_core_info cam_ife_csid170_hw_info = { + .csid_reg = &cam_ife_csid_170_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid170_200_hw_info = { + .csid_reg = &cam_ife_csid_170_200_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid175_hw_info = { + .csid_reg = &cam_ife_csid_175_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid175_200_hw_info = { + .csid_reg = &cam_ife_csid_175_200_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid165_204_hw_info = { + .csid_reg = &cam_ife_csid_175_200_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid480_hw_info = { + .csid_reg = &cam_ife_csid_480_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid570_hw_info = { + .csid_reg = &cam_ife_csid_570_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid580_hw_info = { + .csid_reg = &cam_ife_csid_580_reg_info, + .sw_version = CAM_IFE_CSID_VER_1_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid680_hw_info = { + .csid_reg = &cam_ife_csid_680_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid680_110_hw_info = { + .csid_reg = &cam_ife_csid_680_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid780_hw_info = { + .csid_reg = &cam_ife_csid_780_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid880_hw_info = { + .csid_reg = &cam_ife_csid_880_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid980_hw_info = { + .csid_reg = &cam_ife_csid_980_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid970_hw_info = { + .csid_reg = &cam_ife_csid_970_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid975_hw_info = { + .csid_reg = &cam_ife_csid_975_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static struct cam_ife_csid_core_info cam_ife_csid1080_hw_info = { + .csid_reg = &cam_ife_csid_1080_reg_info, + .sw_version = CAM_IFE_CSID_VER_2_0, +}; + +static const struct of_device_id cam_ife_csid_dt_match[] = { + + { + .compatible = "qcom,csid170", + .data = &cam_ife_csid170_hw_info, + }, + { + .compatible = "qcom,csid170_200", + .data = &cam_ife_csid170_200_hw_info, + }, + { + .compatible = "qcom,csid175", + .data = &cam_ife_csid175_hw_info, + }, + { + .compatible = "qcom,csid175_200", + .data = &cam_ife_csid175_200_hw_info, + }, + { + .compatible = "qcom,csid165_204", + .data = &cam_ife_csid165_204_hw_info, + }, + { + .compatible = "qcom,csid480", + .data = &cam_ife_csid480_hw_info, + }, + { + .compatible = "qcom,csid570", + .data = &cam_ife_csid570_hw_info, + }, + { + .compatible = "qcom,csid580", + .data = &cam_ife_csid580_hw_info, + }, + { + .compatible = "qcom,csid680", + .data = &cam_ife_csid680_hw_info, + }, + { + .compatible = "qcom,csid680_110", + .data = &cam_ife_csid680_110_hw_info, + }, + { + .compatible = "qcom,csid780", + .data = &cam_ife_csid780_hw_info, + }, + { + .compatible = "qcom,csid880", + .data = &cam_ife_csid880_hw_info, + }, + { + .compatible = "qcom,csid980", + .data = &cam_ife_csid980_hw_info, + }, + { + .compatible = "qcom,csid970", + .data = &cam_ife_csid970_hw_info, + }, + { + .compatible = "qcom,csid975", + .data = &cam_ife_csid975_hw_info, + }, + { + .compatible = "qcom,csid1080", + .data = &cam_ife_csid1080_hw_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, cam_ife_csid_dt_match); + +struct platform_driver cam_ife_csid_driver = { + .probe = cam_ife_csid_probe, + .remove = cam_ife_csid_remove, + .driver = { + .name = CAM_CSID_DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_ife_csid_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ife_csid_init_module(void) +{ + return platform_driver_register(&cam_ife_csid_driver); +} + +void cam_ife_csid_exit_module(void) +{ + platform_driver_unregister(&cam_ife_csid_driver); +} + +MODULE_DESCRIPTION("CAM IFE_CSID driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c new file mode 100644 index 0000000000..33d5755ede --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include "cam_ife_csid_soc.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + struct device_node *of_node = NULL; + struct cam_csid_soc_private *soc_private = NULL; + int rc = 0; + + of_node = soc_info->pdev->dev.of_node; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) + return rc; + + soc_private = (struct cam_csid_soc_private *)soc_info->soc_private; + + rc = of_property_read_u32(of_node, "max-width", + &soc_private->max_width); + if (rc) { + CAM_DBG(CAM_ISP, "No max-width declared"); + soc_private->max_width_enabled = false; + } else { + soc_private->max_width_enabled = true; + } + + soc_private->is_ife_csid_lite = false; + if (strnstr(soc_info->compatible, "lite", + strlen(soc_info->compatible)) != NULL) { + soc_private->is_ife_csid_lite = true; + } + + rc = of_property_read_u32(of_node, "rt-wrapper-base", &soc_private->rt_wrapper_base); + if (rc) { + soc_private->rt_wrapper_base = 0; + CAM_DBG(CAM_ISP, "rc: %d Error reading rt_wrapper_base for core_idx: %u", + rc, soc_info->index); + rc = 0; + } + + return rc; +} + +static int cam_ife_csid_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t csid_irq_handler, + void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler, &(irq_data[0])); + if (rc) + return rc; + + return rc; +} + +int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t csid_irq_handler, cam_cpas_client_cb_func cpas_cb, + void *data, bool is_custom) +{ + int rc = 0; + struct cam_cpas_register_params cpas_register_param; + struct cam_csid_soc_private *soc_private; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_csid_soc_private), GFP_KERNEL); + if (!soc_private) + return -ENOMEM; + + soc_info->soc_private = soc_private; + + rc = cam_ife_csid_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed in get_dt_properties, rc=%d", rc); + goto free_soc_private; + } + + /* Need to see if we want post process the clock list */ + + if (!soc_private->is_ife_csid_lite) { + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc:%d", rc); + goto free_soc_private; + } + } + + rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler, + data); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Error Request platform resources failed rc=%d", rc); + goto free_soc_private; + } + + if (is_custom) + strscpy(cpas_register_param.identifier, "csid-custom", + CAM_HW_IDENTIFIER_LENGTH); + else + strscpy(cpas_register_param.identifier, "csid", + CAM_HW_IDENTIFIER_LENGTH); + + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + cpas_register_param.cam_cpas_client_cb = cpas_cb; + cpas_register_param.userdata = data; + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc); + goto release_soc; + } else { + soc_private->cpas_handle = cpas_register_param.client_handle; + } + + return rc; + +release_soc: + cam_soc_util_release_platform_resource(soc_info); +free_soc_private: + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_ife_csid_deinit_soc_resources( + struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_csid_soc_private *soc_private; + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -ENODEV; + } + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_WARN(CAM_ISP, + "soc release platform resource fail rc: %d", rc); + + return rc; +} + +int cam_ife_csid_enable_soc_resources( + struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level) +{ + int rc = 0; + struct cam_csid_soc_private *soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + soc_private = soc_info->soc_private; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + + CAM_DBG(CAM_ISP, "csid camnoc_bw:%lld mnoc_ab_bw:%lld mnoc_ib_bw:%lld ", + axi_vote.axi_path[0].camnoc_bw, + axi_vote.axi_path[0].mnoc_ab_bw, + axi_vote.axi_path[0].mnoc_ib_bw); + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_ISP, "Error CPAS start failed"); + rc = -EFAULT; + goto end; + } + + if (!soc_private->is_ife_csid_lite) { + /* query this everytime to support debugfs to disable clk drv */ + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc:%d", rc); + goto stop_cpas; + } + } + + rc = cam_soc_util_enable_platform_resource(soc_info, + (soc_info->is_clk_drv_en ? soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, + clk_level, true); + if (rc) { + CAM_ERR(CAM_ISP, "enable platform failed rc %d", rc); + goto stop_cpas; + } + + return rc; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_csid_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, + ((soc_info->is_clk_drv_en && (!soc_private->is_ife_csid_lite)) ? + soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, true); + if (rc) + CAM_ERR(CAM_ISP, "Disable platform failed"); + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc); + return rc; + } + + return rc; +} + +int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_ife_base_offset) +{ + int rc = 0; + struct cam_csid_soc_private *soc_private; + uint32_t cpass_ife_force_clk_offset; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + + soc_private = soc_info->soc_private; + cpass_ife_force_clk_offset = + cpas_ife_base_offset + (0x4 * soc_info->index); + rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + cpass_ife_force_clk_offset, 1, 1); + + if (rc) + CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock On failed", + soc_info->index); + else + CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock On", + soc_info->index); + + return rc; +} + +int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_ife_base_offset) +{ + int rc = 0; + struct cam_csid_soc_private *soc_private; + uint32_t cpass_ife_force_clk_offset; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + + soc_private = soc_info->soc_private; + cpass_ife_force_clk_offset = + cpas_ife_base_offset + (0x4 * soc_info->index); + rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + cpass_ife_force_clk_offset, 1, 0); + + if (rc) + CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock Off failed", + soc_info->index); + else + CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock off", + soc_info->index); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h new file mode 100644 index 0000000000..6e274d5646 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_IFE_CSID_SOC_H_ +#define _CAM_IFE_CSID_SOC_H_ + +#include "cam_isp_hw.h" +#include "cam_cpas_api.h" + + +/* + * struct cam_csid_soc_private: + * + * @Brief: Private SOC data specific to CSID HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver. + * This handle is used for all further interface + * with CPAS. + * @rt_wrapper_base: Base address of the RT-Wrapper if the hw is in rt-wrapper + * @max_width: Maxinum allowed width + * @is_ife_csid_lite: Flag to indicate Whether a full csid or a Lite csid + * @max_width_enabled: Flag to enable max width restriction + */ +struct cam_csid_soc_private { + uint32_t cpas_handle; + uint32_t rt_wrapper_base; + uint32_t max_width; + bool is_ife_csid_lite; + bool max_width_enabled; +}; + +/** + * struct csid_device_soc_info - CSID SOC info object + * + * @csi_vdd_voltage: csi vdd voltage value + * + */ +struct csid_device_soc_info { + int csi_vdd_voltage; +}; + +/** + * cam_ife_csid_init_soc_resources() + * + * @brief: csid initialization function for the soc info + * + * @soc_info: soc info structure pointer + * @csid_irq_handler: irq handler function to be registered + * @cpas_cb: handler for cpas cb + * @data: data for the callback functions + * @is_custom: for custom csid hw + * + */ +int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t csid_irq_handler, cam_cpas_client_cb_func cpas_cb, + void *data, bool is_custom); + + +/** + * cam_ife_csid_deinit_soc_resources() + * + * @brief: csid de initialization function for the soc info + * + * @soc_info: soc info structure pointer + * + */ +int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info); + +/** + * cam_ife_csid_enable_soc_resources() + * + * @brief: csid soc resource enable function + * + * @soc_info: soc info structure pointer + * @clk_lvl: vote level to start with + * + */ +int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info, + uint32_t clk_lvl); + +/** + * cam_ife_csid_disable_soc_resources() + * + * @brief: csid soc resource disable function + * + * @soc_info: soc info structure pointer + * + */ +int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +/** + * cam_ife_csid_enable_ife_force_clock() + * + * @brief: if csid testgen used for dual isp case, before + * starting csid test gen, enable ife force clock on + * through cpas + * + * @soc_info: soc info structure pointer + * @cpas_ife_base_offset: cpas ife force clock base reg offset value + * + */ +int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_ife_base_offset); + +/** + * cam_ife_csid_disable_ife_force_clock_on() + * + * @brief: disable the IFE force clock on after dual ISP + * CSID test gen stop + * + * @soc_info: soc info structure pointer + * @cpas_ife_base_offset: cpas ife force clock base reg offset value + * + */ +int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_ife_base_offset); + +/** + * cam_ife_csid_get_vote_level() + * + * @brief: get the vote level from clock rate + * + * @soc_info: soc info structure pointer + * @clock_rate clock rate + * + */ +uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info, + uint64_t clock_rate); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h new file mode 100644 index 0000000000..7ae662f5da --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h @@ -0,0 +1,624 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSID_HW_INTF_H_ +#define _CAM_CSID_HW_INTF_H_ + +#include "cam_isp_hw.h" +#include "cam_hw_intf.h" + +/* MAX IFE CSID instance */ +#define CAM_IFE_CSID_HW_NUM_MAX 8 +#define CAM_IFE_CSID_UDI_MAX 3 +#define RT_BASE_IDX 2 +#define CAM_ISP_MAX_PATHS 8 + +/* CSID multi-vcdt config related field */ +#define CAM_IFE_CSID_MAX_VALID_VC_NUM 2 +#define CAM_IFE_CSID_DEFAULT_NUM_DT 2 + +/** + * enum cam_ife_csid_hw_irq_regs - Specify the top irq reg + */ +enum cam_ife_csid_hw_irq_regs { + CAM_IFE_CSID_IRQ_TOP_REG_STATUS0, + CAM_IFE_CSID_IRQ_REGISTERS_MAX, +}; + +/** + * enum cam_ife_csid_top_irq_regs - Specify the top irq reg + */ +enum cam_ife_csid_top_irq_regs { + CAM_IFE_CSID_TOP_IRQ_STATUS_REG0, + CAM_IFE_CSID_TOP2_IRQ_STATUS_REG1, + CAM_IFE_CSID_TOP_IRQ_STATUS_REG_MAX, +}; + +/** + * enum cam_ife_csid_rx_irq_regs - Specify the rx irq reg + */ +enum cam_ife_csid_rx_irq_regs { + CAM_IFE_CSID_RX_IRQ_STATUS_REG0, + CAM_IFE_CSID_RX2_IRQ_STATUS_REG1, + CAM_IFE_CSID_RX_IRQ_STATUS_REG_MAX, +}; + +/** + * enum cam_ife_csid_input_core_type - Specify the csid input core + */ +enum cam_ife_csid_input_core_type { + CAM_IFE_CSID_INPUT_CORE_NONE, + CAM_IFE_CSID_INPUT_CORE_IFE, + CAM_IFE_CSID_INPUT_CORE_SFE_IFE, + CAM_IFE_CSID_INPUT_CORE_SFE, + CAM_IFE_CSID_INPUT_CORE_CUST_IFE, +}; + +/** + * enum cam_ife_pix_path_res_id - Specify the csid path + */ +enum cam_ife_pix_path_res_id { + CAM_IFE_PIX_PATH_RES_RDI_0, + CAM_IFE_PIX_PATH_RES_RDI_1, + CAM_IFE_PIX_PATH_RES_RDI_2, + CAM_IFE_PIX_PATH_RES_RDI_3, + CAM_IFE_PIX_PATH_RES_RDI_4, + CAM_IFE_PIX_PATH_RES_IPP, + CAM_IFE_PIX_PATH_RES_PPP, + CAM_IFE_PIX_PATH_RES_UDI_0, + CAM_IFE_PIX_PATH_RES_UDI_1, + CAM_IFE_PIX_PATH_RES_UDI_2, + CAM_IFE_PIX_PATH_RES_IPP_1, + CAM_IFE_PIX_PATH_RES_IPP_2, + CAM_IFE_PIX_PATH_RES_MAX, +}; + +/** + * enum cam_ife_cid_res_id - Specify the csid cid + */ +enum cam_ife_cid_res_id { + CAM_IFE_CSID_CID_0, + CAM_IFE_CSID_CID_1, + CAM_IFE_CSID_CID_2, + CAM_IFE_CSID_CID_3, + CAM_IFE_CSID_CID_MAX, +}; + +/** + * enum cam_ife_csid_secondary_evt_type - Specify the event type + */ +enum cam_ife_csid_secondary_evt_type { + CAM_IFE_CSID_EVT_SOF = 1, + CAM_IFE_CSID_EVT_EPOCH, + CAM_IFE_CSID_EVT_EOF, + CAM_IFE_CSID_EVT_SENSOR_SYNC_FRAME_DROP, + CAM_IFE_CSID_EVT_MAX, +}; + +/** + * struct cam_ife_csid_hw_caps- get the CSID hw capability + * @num_rdis: number of rdis supported by CSID HW device + * @num_pix: number of pxl paths supported by CSID HW device + * @num_ppp: number of ppp paths supported by CSID HW device + * @major_version : major version + * @minor_version: minor version + * @version_incr: version increment + * @sfe_ipp_input_rdi_res: RDI Res as an input to SFE + * @is_lite: is the ife_csid lite + * @global_reset_en: flag to indicate if global reset is enabled + * @aup_rup_en: flag to indicate if AUP RUP is on csid side + * @only_master_rup: flag to indicate if only master RUP + * @camif_irq_support: flag to indicate if CSID supports CAMIF irq + * @is_ife_sfe_mapped: flag to indicate if IFE & SFE are one-one mapped + */ +struct cam_ife_csid_hw_caps { + uint32_t num_rdis; + uint32_t num_pix; + uint32_t num_ppp; + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t sfe_ipp_input_rdi_res; + bool is_lite; + bool global_reset_en; + bool aup_rup_en; + bool only_master_rup; + bool camif_irq_support; + bool is_ife_sfe_mapped; +}; + +struct cam_isp_out_port_generic_info { + uint32_t res_type; + uint32_t format; + uint32_t width; + uint32_t height; + uint32_t comp_grp_id; + uint32_t split_point; + uint32_t secure_mode; + uint32_t reserved; + uint32_t wm_mode; + uint32_t hw_context_id; + bool rcs_en; + bool use_wm_pack; +}; + +struct cam_isp_in_port_generic_info { + uint32_t major_ver; + uint32_t minor_ver; + uint32_t res_type; + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; + uint32_t vc[CAM_ISP_VC_DT_CFG]; + uint32_t dt[CAM_ISP_VC_DT_CFG]; + uint32_t num_valid_vc_dt; + uint32_t format[CAM_ISP_VC_DT_CFG]; + uint32_t test_pattern; + uint32_t usage_type; + uint32_t left_start; + uint32_t left_stop; + uint32_t left_width; + uint32_t right_start; + uint32_t right_stop; + uint32_t right_width; + uint32_t line_start; + uint32_t line_stop; + uint32_t height; + uint32_t pixel_clk; + uint32_t batch_size; + uint32_t dsp_mode; + uint32_t hbi_cnt; + uint32_t fe_unpacker_fmt; + uint32_t cust_node; + uint32_t num_out_res; + uint32_t horizontal_bin; + uint32_t vertical_bin; + uint32_t qcfa_bin; + uint32_t num_bytes_out; + uint32_t ipp_count; + uint32_t ppp_count; + uint32_t rdi_count; + uint32_t udi_count; + uint32_t lcr_count; + uint32_t ife_rd_count; + uint32_t lite_path_count; + uint32_t sfe_port_count; + uint32_t sfe_in_path_type; + uint32_t sfe_ife_enable; + uint32_t epoch_factor; + uint32_t path_id; + uint32_t ipp_dst_hw_ctxt_mask; + bool secure_mode; + bool dynamic_sensor_switch_en; + bool can_use_lite; + bool sfe_binned_epoch_cfg; + bool epd_supported; + bool aeb_mode; + bool dynamic_hdr_switch_en; + struct cam_isp_out_port_generic_info *data; +}; + +/** + * struct cam_csid_secondary_evt_config - secondary event enablement + * @evt_type: Type of secondary event enabled [SOF/EPOCH/EOF...] + * @en_secondary_evt: Enable secondary event + * + */ +struct cam_csid_secondary_evt_config { + enum cam_ife_csid_secondary_evt_type evt_type; + bool en_secondary_evt; +}; + +/** + * struct cam_csid_hw_reserve_resource- hw reserve + * @res_type : Reource type CID or PATH + * if type is CID, then res_id is not required, + * if type is path then res id need to be filled + * @res_id : Resource id to be reserved + * @in_port : Input port resource info + * @out_port: Output port resource info, used for RDI path only + * @sync_mode: Sync mode + * Sync mode could be master, slave or none + * @master_idx: Master device index to be configured in the + * slave path + * for master path, this value is not required. + * only slave need to configure the master index value + * @dual_core_id: In case of dual csid, core id of another hw + * reserve + * @node_res : Reserved resource structure pointer + * @sec_evt_config: Config to enable secondary events for the given resource + * depending on the use-case + * @crop_enable : Flag to indicate CSID crop enable + * @drop_enable : Flag to indicate CSID drop enable + * @sfe_inline_shdr: Flag to indicate if sfe is inline shdr + * @is_offline : Flag to indicate offline + * @need_top_cfg: Flag to indicate if top cfg is needed + * @tasklet: Tasklet to schedule bottom halves + * @buf_done_controller: IRQ controller for buf done for version 680 hw + * @cdm_ops: CDM Ops + * @event_cb: Callback function to hw mgr in case of hw events + * @phy_sel: Phy selection number if tpg is enabled from userspace + * @cb_priv: Private pointer to return to callback + * @sfe_en: Flag to indicate if SFE is enabled + * @use_wm_pack: [OUT]Flag to indicate if WM packing is to be used for packing + * @handle_camif_irq: Flag to indicate if CSID IRQ is enabled + * @dynamic_drv_supported: Flag to indicate if dynamic drv is supported + * + */ +struct cam_csid_hw_reserve_resource_args { + enum cam_isp_resource_type res_type; + uint32_t res_id; + struct cam_isp_in_port_generic_info *in_port; + struct cam_isp_out_port_generic_info *out_port; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t master_idx; + uint32_t dual_core_id; + struct cam_isp_resource_node *node_res; + struct cam_csid_secondary_evt_config sec_evt_config; + bool crop_enable; + bool drop_enable; + bool sfe_inline_shdr; + bool is_offline; + bool need_top_cfg; + void *tasklet; + void *buf_done_controller; + void *mc_comp_buf_done_controller; + void *cdm_ops; + cam_hw_mgr_event_cb_func event_cb; + uint32_t phy_sel; + void *cb_priv; + bool sfe_en; + bool use_wm_pack; + bool handle_camif_irq; + bool dynamic_drv_supported; +}; + +/** + * enum cam_ife_csid_halt_cmd - Specify the halt command type + */ +enum cam_ife_csid_halt_cmd { + CAM_CSID_HALT_AT_FRAME_BOUNDARY, + CAM_CSID_RESUME_AT_FRAME_BOUNDARY, + CAM_CSID_HALT_IMMEDIATELY, + CAM_CSID_HALT_MAX, +}; + +/** + * enum cam_ife_csid_halt_mode - Specify the halt command type + */ +enum cam_ife_csid_halt_mode { + CAM_CSID_HALT_MODE_INTERNAL, + CAM_CSID_HALT_MODE_GLOBAL, + CAM_CSID_HALT_MODE_MASTER, + CAM_CSID_HALT_MODE_SLAVE, + CAM_CSID_HALT_MODE_MAX, +}; + +/** + * struct cam_ife_csid_hw_halt_args + * @halt_mode : Applicable only for PATH resources + * 0 Internal : The CSID responds to the HALT_CMD + * 1 Global : The CSID responds to the GLOBAL_HALT_CMD + * 2 Master : The CSID responds to the HALT_CMD + * 3 Slave : The CSID responds to the external halt command + * and not the HALT_CMD register + * @node_res : reource pointer array( ie cid or CSID) + * + */ +struct cam_ife_csid_hw_halt_args { + enum cam_ife_csid_halt_mode halt_mode; + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_csid_hw_stop- stop all resources + * @stop_cmd : Applicable only for PATH resources + * if stop command set to Halt immediately,driver will stop + * path immediately, manager need to reset the path after HI + * if stop command set to halt at frame boundary, driver will set + * halt at frame boundary and wait for frame boundary + * @node_res : reource pointer array( ie cid or CSID) + * @num_res : number of resources to be stopped + * @standby_en: Sensor Standby is enabled + * + */ +struct cam_csid_hw_stop_args { + enum cam_ife_csid_halt_cmd stop_cmd; + struct cam_isp_resource_node **node_res; + uint32_t num_res; + bool standby_en; +}; + +/** + * struct cam_csid_hw_start_args - Relevant info to pass from ife_hw_mgr layer + * to start various resource nodes. + * + * @node_res: Resource pointer array (cid or CSID) + * @num_res: Number of resources in node_res + * @cdm_hw_idx: Physical CDM in use together with these resources + * @is_secure: If these resources are run in secure session + * @is_internal_start: Start triggered internally for reset & recovery + * @start_only: start only, no init required + * @is_drv_config_en: If drv config is enabled + * + */ +struct cam_csid_hw_start_args { + struct cam_isp_resource_node **node_res; + uint32_t num_res; + uint32_t cdm_hw_idx; + bool is_secure; + bool is_internal_start; + bool start_only; + bool is_drv_config_en; +}; + + +/** + * enum cam_ife_csid_reset_type - Specify the reset type + */ +enum cam_ife_csid_reset_type { + CAM_IFE_CSID_RESET_GLOBAL, + CAM_IFE_CSID_RESET_PATH, + CAM_IFE_CSID_RESET_GLOBAL_HW_ONLY, + CAM_IFE_CSID_RESET_GLOBAL_IRQ_CNTRL, + CAM_IFE_CSID_RESET_MAX, +}; + +/** + * struct cam_ife_csid_reset_cfg- csid reset configuration + * @ reset_type : Global reset or path reset + * @res_node : resource need to be reset + * + */ +struct cam_csid_reset_cfg_args { + enum cam_ife_csid_reset_type reset_type; + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_csid_reset_out_of_sync_count_args + * @res_node : resource need to be reset + * + */ +struct cam_csid_reset_out_of_sync_count_args { + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_csid_get_time_stamp_args- time stamp capture arguments + * @node_res : resource to get the time stamp + * @raw_boot_time : Pointer to raw boot ts captured from top-half, if available + * @time_stamp_val : captured time stamp + * @boot_timestamp : boot time stamp + * @prev_time_stamp_val : previous captured time stamp + * @get_prev_timestamp : flag to fetch previous captured time stamp from hardware + * @get_curr_timestamp : flag to skip CSID timestamp reg read if already read from top-half + */ +struct cam_csid_get_time_stamp_args { + struct cam_isp_resource_node *node_res; + struct timespec64 *raw_boot_time; + uint64_t time_stamp_val; + uint64_t boot_timestamp; + uint64_t prev_time_stamp_val; + bool get_prev_timestamp; + bool get_curr_timestamp; +}; + +/** + * cam_ife_csid_hw_init() + * + * @brief: Initialize function for the CSID hardware + * + * @ife_csid_hw: CSID hardware instance returned + * @hw_idex: CSID hardware instance id + */ +int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw, + uint32_t hw_idx); + +/* + * struct cam_ife_csid_clock_update_args: + * + * @clk_rate: Clock rate requested + */ +struct cam_ife_csid_clock_update_args { + uint64_t clk_rate; +}; + +/* + * struct cam_ife_csid_qcfa_update_args: + * + * @res: Res node pointer + * @qcfa_binning: QCFA binning supported + */ +struct cam_ife_csid_qcfa_update_args { + struct cam_isp_resource_node *res; + uint32_t qcfa_binning; +}; + +/* + * struct cam_ife_sensor_dim_update_args: + * + * @res: Resource for which data is updated + * @sensor_data: expected path configuration + */ +struct cam_ife_sensor_dimension_update_args { + struct cam_isp_resource_node *res; + struct cam_isp_sensor_dimension sensor_data; +}; + +/* struct cam_ife_csid_top_config_args: + * + * @input_core_type: Input core type for CSID + * @core_idx: Core idx for out core + * @is_sfe_offline: flag to indicate if sfe is offline + * @is_sfe_fs: flag to indicate if use-case is Inline Bayer Fast Shutter (BRF) + */ +struct cam_ife_csid_top_config_args { + uint32_t input_core_type; + uint32_t core_idx; + bool is_sfe_offline; + bool is_sfe_fs; +}; + +/* + * struct cam_ife_csid_dual_sync_args: + * + * @sync_mode: Sync mode for dual csid master/slave + * @dual_core_id: Core idx for another core in case of dual isp + * + */ +struct cam_ife_csid_dual_sync_args { + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dual_core_id; +}; + +/* + * struct cam_isp_csid_reg_update_args: + * + * @cmd: cmd buf update args + * @node_res: Node res pointer + * @num_res: Num of resources + * @last_applied_mup: last applied MUP + * @mup_val: MUP value if configured + * @mup_en: Flag if dynamic sensor switch is enabled + * @reg_write: if set use AHB to config rup/aup + * @add_toggled_mup_entry: Add toggled mup entry to simulate out of sync + */ +struct cam_isp_csid_reg_update_args { + struct cam_isp_hw_cmd_buf_update cmd; + struct cam_isp_resource_node *res[CAM_IFE_PIX_PATH_RES_MAX]; + uint32_t num_res; + uint32_t last_applied_mup; + uint32_t mup_val; + uint32_t mup_en; + bool reg_write; + bool add_toggled_mup_entry; +}; + +/* + * struct cam_ife_csid_offline_cmd_update_args: + * + * @cmd: cmd buf update args + * @node_res: Node res pointer for offline RDI + */ +struct cam_ife_csid_offline_cmd_update_args { + struct cam_isp_hw_cmd_buf_update cmd; + struct cam_isp_resource_node *res; +}; + +/* + * struct cam_ife_csid_discard_frame_cfg_update: + * + * @reset_discard_cfg: Set if discard config needs to be reset + * @num_exposures: Number of current exposures for sHDR + */ +struct cam_ife_csid_discard_frame_cfg_update { + uint32_t num_exposures; + bool reset_discard_cfg; +}; + +/* + * struct cam_ife_csid_ts_reg_addr: + * + * @curr0_ts_addr: Reg addr of curr0_sof, input if set, + * output otherwise + * @curr1_ts_addr: Reg addr of curr1_sof, input if set, + * output otherwise + * @res_id (input): CSID path id, for get op + * @get: If call is to get addr + */ +struct cam_ife_csid_ts_reg_addr { + void __iomem *curr0_ts_addr; + void __iomem *curr1_ts_addr; + uint32_t res_id; + bool get_addr; +}; + +/* + * struct cam_ife_csid_mup_update_args: + * + * @mup_val: MUP for odd or even vc + * @use_mup: To indicate if CSID needs to consume this MUP + */ +struct cam_ife_csid_mup_update_args { + uint32_t mup_val; + bool use_mup; +}; + +/* + * struct cam_ife_csid_mode_switch_update_args: + * + * @mup_args: MUP related arguments + * @exp_update_args: Exposure update arguments + */ +struct cam_ife_csid_mode_switch_update_args { + struct cam_ife_csid_mup_update_args mup_args; + struct cam_ife_csid_discard_frame_cfg_update exp_update_args; +}; + +/* + * struct cam_ife_csid_discard_init_frame_args: + * + * @num_frames: Num frames to discard + * @res: Node res for this path + */ +struct cam_ife_csid_discard_init_frame_args { + uint32_t num_frames; + struct cam_isp_resource_node *res; +}; + +/* + * struct cam_ife_csid_debug_cfg_args: + * + * @csid_debug: CSID debug val + * @csid_rx_capture_debug: CSID rx capture debug val + * @csid_testbus_debug: CSID test bus val + * @domain_id_value: Value of domain id + * @rx_capture_debug_set: CSID rx capture debug set; + * @set_domain_id_enabled: Set domain id enabled + * @enable_cdr_sweep_debug: If CDR sweep for CSIPHY is enabled + */ +struct cam_ife_csid_debug_cfg_args { + uint64_t csid_debug; + uint32_t csid_rx_capture_debug; + uint32_t csid_testbus_debug; + uint32_t domain_id_value; + bool rx_capture_debug_set; + bool set_domain_id_enabled; + bool enable_cdr_sweep_debug; +}; + +/* + * struct cam_ife_csid_drv_config_args: + * + * @is_init_config: Indicator for init config + * @drv_en: Indicator for camera DRV enable + * @timeout_val: Timeout value from SOF to trigger up vote, given in number of GC cycles + * @path_idle_en: Mask for paths to be considered for consolidated IDLE + */ +struct cam_ife_csid_drv_config_args { + bool is_init_config; + bool drv_en; + uint32_t timeout_val; + uint32_t path_idle_en; +}; + +/* + * struct cam_ife_csid_exp_info_update_args: + * + * @num_process_exp: Number of processed exposures + * @num_out_exp: Number of sensor output exposures + * @last_exp_valid: Indicates if last exposure info is valid + * @last_exp_res_id: Resource id for last exposure + */ +struct cam_ife_csid_exp_info_update_args { + uint32_t num_process_exp; + uint32_t num_sensor_out_exp; + bool last_exp_valid; + uint32_t last_exp_res_id; +}; + +#endif /* _CAM_CSID_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h new file mode 100644 index 0000000000..81b7bd123b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ISP_HW_H_ +#define _CAM_ISP_HW_H_ + +#include +#include +#include "cam_hw.h" +#include "cam_soc_util.h" +#include "cam_irq_controller.h" +#include "cam_hw_intf.h" +#include "cam_cdm_intf_api.h" +#include "cam_hw_mgr_intf.h" + +/* Maximum length of tag while dumping */ +#define CAM_ISP_HW_DUMP_TAG_MAX_LEN 128 +/* Max isp hw pid values number */ +#define CAM_ISP_HW_MAX_PID_VAL 4 +/* Maximum number of output ports that map to an architecture specific input path */ +#define CAM_ISP_HW_PATH_PORT_MAP_MAX 3 + +/* + * MAX len of ISP Resource Name + */ +#define CAM_ISP_RES_NAME_LEN 16 + +/* + * MAX len of line_buffer + */ +#define LINE_BUFFER_LEN 1500 + +/* Access core_info of isp resource node */ +#define cam_isp_res_core_info(res) (((struct cam_hw_info *)res->hw_intf->hw_priv)->core_info) + +/* Add reg/val pair to buffer array and update the index */ +#define CAM_ISP_ADD_REG_VAL_PAIR(buf_array, length, index, offset, val) \ + do { \ + if (unlikely(index >= (length - 1))) { \ + CAM_ERR(CAM_ISP, \ + "Exceed buf size %u when adding reg/val at index %u and %u", \ + length, index, index+1); \ + } else { \ + buf_array[(index)++] = offset; \ + buf_array[(index)++] = val; \ + } \ + } while (0) + +enum cam_isp_exposure_type { + CAM_ISP_LAST_EXPOSURE, + CAM_ISP_LAST_1_EXPOSURE, + CAM_ISP_LAST_2_EXPOSURE, + CAM_ISP_EXPOSURE_MAX +}; + +enum cam_isp_bw_control_action { + CAM_ISP_BW_CONTROL_EXCLUDE = 0, + CAM_ISP_BW_CONTROL_INCLUDE = 1 +}; + +enum cam_isp_multi_ctxt_idx { + CAM_ISP_MULTI_CTXT_0, + CAM_ISP_MULTI_CTXT_1, + CAM_ISP_MULTI_CTXT_2, + CAM_ISP_MULTI_CTXT_MAX +}; + +/* + * struct cam_isp_bw_control_args: + * + * @node_res: Resource node info + * @action: Bandwidth control action + */ +struct cam_isp_bw_control_args { + struct cam_isp_resource_node *node_res; + enum cam_isp_bw_control_action action; +}; + +/* + * struct cam_isp_apply_clk_bw_args: + * + * @hw_intf: Isp hw intf pointer + * @request_id: Request Id + * @skip_clk_data_rst: Skip resetting any clk info + * @is_drv_config_en: Enable clk DRV while setting this clk rate + * @clock_updated: If clock is updated for any of the cores + */ +struct cam_isp_apply_clk_bw_args { + struct cam_hw_intf *hw_intf; + uint64_t request_id; + bool skip_clk_data_rst; + bool is_drv_config_en; + bool clock_updated; +}; + +/* + * struct cam_isp_sof_ts_data + * + * @boot_time Raw monotonic boot time + * @sof_ts: SOF reigster timestamp + */ +struct cam_isp_sof_ts_data { + struct timespec64 boot_time; + uint64_t sof_ts; +}; + +/* + * struct cam_isp_timestamp: + * + * @mono_time: Monotonic boot time + * @vt_time: AV Timer time + * @ticks: Qtimer ticks + * @sof_ts: SOF reigster timestamp + */ +struct cam_isp_timestamp { + struct timespec64 mono_time; + struct timespec64 vt_time; + uint64_t ticks; + uint64_t sof_ts; +}; + +/* + * cam_isp_hw_get_timestamp() + * + * @Brief: Get timestamp values + * + * @time_stamp: Structure that holds different time values + * + * @Return: Void + */ +void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp); + +enum cam_isp_hw_type { + CAM_ISP_HW_TYPE_CSID, + CAM_ISP_HW_TYPE_ISPIF, + CAM_ISP_HW_TYPE_VFE, + CAM_ISP_HW_TYPE_IFE_CSID, + CAM_ISP_HW_TYPE_TFE, + CAM_ISP_HW_TYPE_TFE_CSID, + CAM_ISP_HW_TYPE_TPG, + CAM_ISP_HW_TYPE_SFE, + CAM_ISP_HW_TYPE_MAX, +}; + +enum cam_isp_hw_split_id { + CAM_ISP_HW_SPLIT_LEFT = 0, + CAM_ISP_HW_SPLIT_RIGHT, + CAM_ISP_HW_SPLIT_MAX, +}; + +enum cam_isp_hw_usage_type { + CAM_ISP_HW_USAGE_TYPE_SINGLE = 0, + CAM_ISP_HW_USAGE_TYPE_DUAL, + CAM_ISP_HW_USAGE_TYPE_MAX, +}; + +enum cam_isp_hw_sync_mode { + CAM_ISP_HW_SYNC_NONE, + CAM_ISP_HW_SYNC_MASTER, + CAM_ISP_HW_SYNC_SLAVE, + CAM_ISP_HW_SYNC_MAX, +}; + +enum cam_isp_resource_state { + CAM_ISP_RESOURCE_STATE_UNAVAILABLE = 0, + CAM_ISP_RESOURCE_STATE_AVAILABLE = 1, + CAM_ISP_RESOURCE_STATE_RESERVED = 2, + CAM_ISP_RESOURCE_STATE_INIT_HW = 3, + CAM_ISP_RESOURCE_STATE_STREAMING = 4, +}; + +enum cam_isp_resource_type { + CAM_ISP_RESOURCE_UNINT, + CAM_ISP_RESOURCE_SRC, + CAM_ISP_RESOURCE_CID, + CAM_ISP_RESOURCE_PIX_PATH, + CAM_ISP_RESOURCE_VFE_IN, + CAM_ISP_RESOURCE_VFE_BUS_RD, + CAM_ISP_RESOURCE_VFE_OUT, + CAM_ISP_RESOURCE_TPG, + CAM_ISP_RESOURCE_TFE_IN, + CAM_ISP_RESOURCE_TFE_OUT, + CAM_ISP_RESOURCE_SFE_IN, + CAM_ISP_RESOURCE_SFE_RD, + CAM_ISP_RESOURCE_SFE_OUT, + CAM_ISP_RESOURCE_MAX, +}; + +enum cam_isp_hw_cmd_type { + CAM_ISP_HW_CMD_GET_CHANGE_BASE, + CAM_ISP_HW_CMD_GET_BUF_UPDATE, + CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM, + CAM_ISP_HW_CMD_GET_REG_UPDATE, + CAM_ISP_HW_CMD_GET_HFR_UPDATE, + CAM_ISP_HW_CMD_GET_HFR_UPDATE_RM, + CAM_ISP_HW_CMD_GET_WM_SECURE_MODE, + CAM_ISP_HW_CMD_GET_RM_SECURE_MODE, + CAM_ISP_HW_CMD_STRIPE_UPDATE, + CAM_ISP_HW_CMD_CLOCK_UPDATE, + CAM_ISP_HW_CMD_BW_UPDATE, + CAM_ISP_HW_CMD_BW_UPDATE_V2, + CAM_ISP_HW_CMD_BW_CONTROL, + CAM_ISP_HW_CMD_FCG_CONFIG, + CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ, + CAM_ISP_HW_CMD_GET_REG_DUMP, + CAM_ISP_HW_CMD_UBWC_UPDATE, + CAM_ISP_HW_CMD_DUMP_BUS_INFO, + CAM_ISP_HW_CMD_SOF_IRQ_DEBUG, + CAM_ISP_HW_CMD_SET_CAMIF_DEBUG, + CAM_ISP_HW_CMD_CAMIF_DATA, + CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE, + CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, + CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD, + CAM_ISP_HW_CMD_UBWC_UPDATE_V2, + CAM_ISP_HW_CMD_CORE_CONFIG, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE, + CAM_ISP_HW_CMD_CSID_QCFA_SUPPORTED, + CAM_ISP_HW_CMD_ADD_WAIT, + CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER, + CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA, + CAM_ISP_HW_CMD_TPG_PHY_CLOCK_UPDATE, + CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP, + CAM_ISP_HW_CMD_DUMP_HW, + CAM_ISP_HW_CMD_FE_TRIGGER_CMD, + CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ, + CAM_ISP_HW_CMD_IS_CONSUMED_ADDR_SUPPORT, + CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR, + CAM_ISP_HW_CMD_GET_RES_FOR_MID, + CAM_ISP_HW_CMD_BLANKING_UPDATE, + CAM_ISP_HW_CMD_CSID_CLOCK_DUMP, + CAM_ISP_HW_CMD_TPG_CORE_CFG_CMD, + CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE, + CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG, + CAM_ISP_HW_CMD_QUERY_CAP, + CAM_IFE_CSID_CMD_GET_TIME_STAMP, + CAM_IFE_CSID_SET_CSID_DEBUG, + CAM_IFE_CSID_SOF_IRQ_DEBUG, + CAM_IFE_CSID_LOG_ACQUIRE_DATA, + CAM_IFE_CSID_TOP_CONFIG, + CAM_IFE_CSID_PROGRAM_OFFLINE_CMD, + CAM_IFE_CSID_SET_DUAL_SYNC_CONFIG, + CAM_IFE_CSID_RESET_OUT_OF_SYNC_CNT, + CAM_ISP_HW_CMD_CSID_DYNAMIC_SWITCH_UPDATE, + CAM_ISP_HW_CMD_CSID_DISCARD_INIT_FRAMES, + CAM_ISP_HW_CMD_BUF_UPDATE, + CAM_ISP_HW_CMD_BUF_UPDATE_RM, + CAM_ISP_HW_NOTIFY_OVERFLOW, + CAM_ISP_HW_CMD_IS_PDAF_RDI2_MUX_EN, + CAM_ISP_HW_CMD_GET_PATH_PORT_MAP, + CAM_ISP_HW_CMD_IFE_DEBUG_CFG, + CAM_ISP_HW_SFE_SYS_CACHE_WM_CONFIG, + CAM_ISP_HW_SFE_SYS_CACHE_RM_CONFIG, + CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG, + CAM_ISP_HW_CMD_RM_ENABLE_DISABLE, + CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE, + CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE, + CAM_ISP_HW_CSID_MINI_DUMP, + CAM_ISP_HW_IFE_BUS_MINI_DUMP, + CAM_ISP_HW_SFE_BUS_MINI_DUMP, + CAM_ISP_HW_USER_DUMP, + CAM_ISP_HW_CMD_RDI_LCR_CFG, + CAM_ISP_HW_CMD_DRV_CONFIG, + CAM_ISP_HW_CMD_CSID_DUMP_CROP_REG, + CAM_ISP_HW_CMD_MC_CTXT_SEL, + CAM_ISP_HW_CMD_IRQ_COMP_CFG, + CAM_ISP_HW_CMD_IRQ_INJECTION, + CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION, + CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR, + CAM_ISP_HW_CMD_UBWC_UPDATE_V3, + CAM_ISP_HW_CMD_WM_CONFIG_UPDATE_V2, + CAM_ISP_HW_CMD_DYNAMIC_CLOCK_UPDATE, + CAM_ISP_HW_CMD_EXP_INFO_UPDATE, + CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS, + CAM_ISP_HW_CMD_MAX, +}; + +/* + * struct cam_isp_resource_node: + * + * @Brief: Structure representing HW resource object + * + * @res_type: Resource Type + * @res_id: Unique resource ID within res_type objects + * for a particular HW + * @res_state: State of the resource + * @hw_intf: HW Interface of HW to which this resource + * belongs + * @res_priv: Private data of the resource + * @list: list_head node for this resource + * @cdm_ops: CDM operation functions + * @tasklet_info: Tasklet structure that will be used to + * schedule IRQ events related to this resource + * @irq_handle: handle returned on subscribing for IRQ event + * @init: function pointer to init the HW resource + * @deinit: function pointer to deinit the HW resource + * @start: function pointer to start the HW resource + * @stop: function pointer to stop the HW resource + * @process_cmd: function pointer for processing commands + * specific to the resource + * @top_half_handler: Top Half handler function + * @bottom_half_handler: Bottom Half handler function + * @res_name: Name of resource + * @is_rdi_primary_res: Indicates whether RDI is primiary resource or not. + * Based on this, We need to enable interrupts on RDI path only. + */ +struct cam_isp_resource_node { + enum cam_isp_resource_type res_type; + uint32_t res_id; + enum cam_isp_resource_state res_state; + struct cam_hw_intf *hw_intf; + void *res_priv; + struct list_head list; + void *cdm_ops; + void *tasklet_info; + int irq_handle; + + int (*init)(struct cam_isp_resource_node *rsrc_node, + void *init_args, uint32_t arg_size); + int (*deinit)(struct cam_isp_resource_node *rsrc_node, + void *deinit_args, uint32_t arg_size); + int (*start)(struct cam_isp_resource_node *rsrc_node); + int (*stop)(struct cam_isp_resource_node *rsrc_node); + int (*process_cmd)(struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); + CAM_IRQ_HANDLER_TOP_HALF top_half_handler; + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler; + uint8_t res_name[CAM_ISP_RES_NAME_LEN]; + bool is_rdi_primary_res; +}; + +/* + * struct cam_isp_blanking_config: + * + * @Brief: Structure to pass blanking details + * @hbi: HBI Value + * @vbi: VBI Value + * node_res: Pointer to Resource Node object + */ +struct cam_isp_blanking_config { + uint32_t hbi; + uint32_t vbi; + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_isp_hw_error_event_info: + * + * @brief: Structure to pass error event details to hw mgr + * + * @err_type: Type of error being reported + * @err_mask: Exact error of the err_type + * + */ +struct cam_isp_hw_error_event_info { + uint32_t err_type; + uint32_t err_mask; +}; + +/** + * struct cam_isp_hw_bufdone_event_info: + * + * @brief: Structure to pass bufdone event details to hw mgr + * + * @res_id: Resource IDs to report buf dones + * @comp_grp_id: Bus comp group id + * @last_consumed_addr: Last consumed addr for resource ID at that index + * @is_hw_ctxt_comp: Indicates if the buf done event is hw context composite + * @is_early_done: Indicates if its an early done event + * + */ +struct cam_isp_hw_bufdone_event_info { + uint32_t res_id; + uint32_t comp_grp_id; + uint32_t last_consumed_addr; + bool is_hw_ctxt_comp; + bool is_early_done; +}; + +/* + * struct cam_isp_hw_event_info: + * + * @Brief: Structure to pass event details to hw mgr + * + * @res_type: Type of IFE resource + * @is_secondary_evt: Indicates if event was requested by hw mgr + * @res_id: Unique resource ID + * @hw_idx: IFE hw index + * @reg_val: Any critical register value captured during irq handling + * @hw_type: Hw Type sending the event + * @in_core_idx: Input core type if CSID error evt + * @event_data: Any additional data specific to this event + * + */ +struct cam_isp_hw_event_info { + enum cam_isp_resource_type res_type; + bool is_secondary_evt; + uint32_t res_id; + uint32_t hw_idx; + uint32_t reg_val; + uint32_t hw_type; + uint32_t in_core_idx; + void *event_data; +}; + +/* + * struct cam_isp_hw_cmd_buf_update: + * + * @Brief: Contain the new created command buffer information + * + * @cmd_buf_addr: Command buffer to store the change base command + * @size: Size of the buffer in bytes + * @used_bytes: Consumed bytes in the command buffer + * + */ +struct cam_isp_hw_cmd_buf_update { + uint32_t *cmd_buf_addr; + uint32_t size; + uint32_t used_bytes; +}; + +/* + * struct cam_isp_hw_get_wm_update: + * + * @Brief: Get cmd buffer for WM updates. + * + * @ image_buf: image buffer address array + * @ image_buf_offset: image buffer address offset array + * @ num_buf: Number of buffers in the image_buf array + * @ frame_header: frame header iova + * @ fh_enabled: flag to indicate if this WM enables frame header + * @ local_id: local id for the wm + * @ width: width of scratch buffer + * @ height: height of scratch buffer + * @ stride: stride of scratch buffer + * @ slice_height: slice height of scratch buffer + * @ io_cfg: IO buffer config information sent from UMD + * + */ +struct cam_isp_hw_get_wm_update { + dma_addr_t *image_buf; + uint32_t image_buf_offset[CAM_PACKET_MAX_PLANES]; + uint32_t num_buf; + uint64_t frame_header; + bool fh_enabled; + uint32_t local_id; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t slice_height; + struct cam_buf_io_cfg *io_cfg; +}; + +/* + * struct cam_isp_hw_get_res_for_mid: + * + * @Brief: Get the out resource id for given mid + * + * @mid: Mid number of hw outport numb + * @pid: Pid number associated with mid + * @out_res_id: Out resource id + * + */ +struct cam_isp_hw_get_res_for_mid { + uint32_t mid; + uint32_t pid; + uint32_t out_res_id; +}; + +/** + * struct cam_isp_hw_fcg_get_size: + * + * @Brief: Get the size of KMD buf FCG config needs + * + * @num_types: Num of types(STATS/PHASE) for each FCG config + * @num_ctxs: Num of contexts for each FCG config in MC_TFE + * @kmd_size: Size of KMD buffer that will be used for FCG + * @fcg_supported: Indicate whether FCG is supported by the hardware + */ +struct cam_isp_hw_fcg_get_size { + uint32_t num_types; + uint32_t num_ctxs; + uint32_t kmd_size; + bool fcg_supported; +}; + +/** + * struct cam_isp_hw_fcg_update: + * + * @Brief: Get FCG update and pass to lower level processing + * + * @cmd_addr: Command buffer address that FCG configs are written into + * @cmd_size: Size of the command + * @prediction_idx: Indicate exact FCG predictions to be used + * @data: Exact FCG configs + */ +struct cam_isp_hw_fcg_update { + uintptr_t cmd_buf_addr; + uint32_t cmd_size; + uint32_t prediction_idx; + void *data; +}; + +/** + * struct cam_isp_hw_fcg_cmd + * + * @Brief: Union struct for fcg related cmd + * + * @res: Resource node + * @cmd_type: Command type + * @get_size_flag: Indicate to get kmd size for FCG or apply FCG update + * True - Get the size of KMD buffer to carry reg/val pairs + * False - Apply FCG update and pass it to SFE/IFE/MC_TFE + */ +struct cam_isp_hw_fcg_cmd { + struct cam_isp_resource_node *res; + enum cam_isp_hw_cmd_type cmd_type; + bool get_size_flag; + union { + struct cam_isp_hw_fcg_update fcg_update; + struct cam_isp_hw_fcg_get_size fcg_get_size; + } u; +}; + +/* + * struct cam_isp_hw_get_cmd_update: + * + * @Brief: Get cmd buffer update for different CMD types + * + * @res: Resource node + * @cmd_type: Command type for which to get update + * @cdm_id : CDM id + * @cmd: Command buffer information + * @res: Resource node + * @cmd_type: Command type for which to get update + * @cmd: Command buffer information + * @use_scratch_cfg: To indicate if it's scratch buffer config + * @trigger_cdm_en: Flag to indicate if cdm is trigger + * + */ +struct cam_isp_hw_get_cmd_update { + struct cam_isp_resource_node *res; + enum cam_isp_hw_cmd_type cmd_type; + enum cam_cdm_id cdm_id; + struct cam_isp_hw_cmd_buf_update cmd; + bool use_scratch_cfg; + union { + void *data; + struct cam_isp_hw_get_wm_update *wm_update; + struct cam_isp_hw_get_wm_update *rm_update; + }; + bool trigger_cdm_en; +}; + +/* + * struct cam_isp_hw_dual_isp_update_args: + * + * @Brief: update the dual isp striping configuration. + * + * @ split_id: spilt id to inform left or rifht + * @ res: resource node + * @ dual_cfg: dual isp configuration + * + */ +struct cam_isp_hw_dual_isp_update_args { + enum cam_isp_hw_split_id split_id; + struct cam_isp_resource_node *res; + struct cam_isp_dual_config *dual_cfg; +}; + +/* + * struct cam_isp_hw_dump_args: + * + * @Brief: isp hw dump args + * + * @ req_id: request id + * @ cpu_addr: cpu address + * @ buf_len: buf len + * @ offset: offset of buffer + * @ ctxt_to_hw_map: ctx to hw map + * @ is_dump_all: flag to indicate if all information or just bw/clk rate + */ +struct cam_isp_hw_dump_args { + uint64_t req_id; + uintptr_t cpu_addr; + size_t buf_len; + size_t offset; + void *ctxt_to_hw_map; + bool is_dump_all; +}; + +/** + * struct cam_isp_hw_dump_header - ISP context dump header + * + * @Brief: isp hw dump header + * + * @tag: Tag name for the header + * @word_size: Size of word + * @size: Size of data + * + */ +struct cam_isp_hw_dump_header { + uint8_t tag[CAM_ISP_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * struct cam_isp_hw_intf_data - ISP hw intf data + * + * @Brief: isp hw intf pointer and pid list data + * + * @isp_hw_intf: Isp hw intf pointer + * @num_hw_pid: Number of pids for this hw + * @isp_hw_pid: Isp hw pid values + * + */ +struct cam_isp_hw_intf_data { + struct cam_hw_intf *hw_intf; + uint32_t num_hw_pid; + uint32_t hw_pid[CAM_ISP_HW_MAX_PID_VAL]; +}; + + +/** + * struct cam_isp_hw_regiter_dump_data - ISP skip reg dump data + * + * @Brief: ISP skip reg dump data + * + * @skip_regdump: Regdump skip required for this target or not + * @skip_regdump_start_offset: Start register address which needs to skip register dump + * @skip_regdump_stop_offset: Register address which needs to stop skiping register dump + * + */ + +struct cam_isp_hw_regiter_dump_data { + bool skip_regdump; + uint32_t skip_regdump_start_offset; + uint32_t skip_regdump_stop_offset; +}; + +/** + * struct cam_isp_hw_bus_cap: + * + * @Brief: ISP hw capabilities + * + * @max_out_res_type: Maximum value of out resource type supported by hw + * @num_perf_counters: Number of perf counters supported + * @max_fcg_ch_ctx: Maximum number of channels/contexts in FCG config provided by hw header + * @max_fcg_predictions: Maximum number of predictions in FCG config provided by hw header + * @max_dt_supported: Maximum number of DTs CSID can decode + * @fcg_supported: Indicate whether FCG config is supported by the hw + * @num_wr_perf_counters: Number of perf counters for write + * @support_consumed_addr: Indicate whether HW has last consumed addr reg + * + */ +struct cam_isp_hw_cap { + uint32_t max_out_res_type; + uint32_t num_perf_counters; + uint32_t num_wr_perf_counters; + uint32_t max_fcg_ch_ctx; + uint32_t max_fcg_predictions; + uint32_t max_dt_supported; + bool fcg_supported; + bool support_consumed_addr; + struct cam_isp_hw_regiter_dump_data skip_regdump_data; +}; + +/** + * struct cam_isp_hw_path_port_map: + * + * @Brief: ISP hw bus capabilities + * + * @num_entries: Number of entries + * @entry: Each row is an entry with the following structure: + * col #1: VFE IN path type + * col #2: ISP OUT resource type + */ +struct cam_isp_hw_path_port_map { + uint32_t num_entries; + uint32_t entry[CAM_ISP_HW_PATH_PORT_MAP_MAX][2]; +}; + + +/** + * struct cam_isp_hw_init_config_update: + * + * @Brief: Init config params for CSID/SFE/IFE resources + * + * @node_res: HW Resource + * @init_config: Init config params from userspace + */ +struct cam_isp_hw_init_config_update { + struct cam_isp_resource_node *node_res; + struct cam_isp_init_config *init_config; +}; + +/* + * struct cam_isp_hw_overflow_info: + * + * @Brief: ISP hw bus overflow info + * + * @res_id: Resource type + * @is_bus_overflow: Indicate whether bus overflow happened + */ +struct cam_isp_hw_overflow_info { + int res_id; + bool is_bus_overflow; +}; + +enum cam_isp_irq_inject_reg_unit_type { + CAM_ISP_CSID_TOP_REG, + CAM_ISP_CSID_RX_REG, + CAM_ISP_CSID_PATH_IPP_REG, + CAM_ISP_CSID_PATH_PPP_REG, + CAM_ISP_CSID_PATH_RDI0_REG, + CAM_ISP_CSID_PATH_RDI1_REG, + CAM_ISP_CSID_PATH_RDI2_REG, + CAM_ISP_CSID_PATH_RDI3_REG, + CAM_ISP_CSID_PATH_RDI4_REG, + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG, + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG, + CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG, + CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG, + CAM_ISP_REG_UNIT_MAX +}; + +/* + * struct cam_isp_irq_inject_param: + * + * @Brief: Params for isp irq injection + * + * @hw_type : Hw to inject IRQ + * @hw_idx : Index of the selected hw + * @reg_unit : Register to set irq + * @irq_mask : IRQ to be triggered + * @req_id : Req to trigger the IRQ + * @is_valid : Flag to indicate current set of params is valid or not + * @line_buf : Buffer to temporarily keep log + */ +struct cam_isp_irq_inject_param { + int32_t hw_type; + int32_t hw_idx; + int32_t reg_unit; + int32_t irq_mask; + uint64_t req_id; + bool is_valid; + char line_buf[LINE_BUFFER_LEN]; +}; + +/* + * struct cam_isp_params_injection: + * + * @Brief: args for irq injection or irq desc dump + * + * @param : Params for isp irq injection + * @vfe_hw_info : Vfe hw info + * @sfe_hw_info : Sfe hw info + */ +struct cam_isp_params_injection { + struct cam_isp_irq_inject_param *param; + void *vfe_hw_info; + void *sfe_hw_info; +}; + +#endif /* _CAM_ISP_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_sfe_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_sfe_hw_intf.h new file mode 100644 index 0000000000..fc6485b62e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_sfe_hw_intf.h @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE_HW_INTF_H_ +#define _CAM_SFE_HW_INTF_H_ + +#include "cam_isp_hw.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_cpas_api.h" + +#define SFE_CORE_BASE_IDX 0 +#define CAM_SFE_PERF_CNT_MAX 8 + +enum cam_sfe_core_id { + CAM_SFE_CORE_0, + CAM_SFE_CORE_1, + CAM_SFE_CORE_MAX, +}; + +enum cam_isp_hw_sfe_in { + CAM_ISP_HW_SFE_IN_PIX, + CAM_ISP_HW_SFE_IN_RDI0, + CAM_ISP_HW_SFE_IN_RDI1, + CAM_ISP_HW_SFE_IN_RDI2, + CAM_ISP_HW_SFE_IN_RDI3, + CAM_ISP_HW_SFE_IN_RDI4, + CAM_ISP_HW_SFE_IN_MAX, +}; + +enum cam_sfe_hw_irq_status { + CAM_SFE_IRQ_STATUS_SUCCESS, + CAM_SFE_IRQ_STATUS_ERR, + CAM_SFE_IRQ_STATUS_OVERFLOW, + CAM_SFE_IRQ_STATUS_VIOLATION, + CAM_SFE_IRQ_STATUS_MAX, +}; + +enum cam_sfe_hw_irq_regs { + CAM_SFE_IRQ_TOP_REG_STATUS0, + CAM_SFE_IRQ_REGISTERS_MAX, +}; + +enum cam_sfe_bus_wr_irq_regs { + CAM_SFE_IRQ_BUS_WR_REG_STATUS0, + CAM_SFE_BUS_WR_IRQ_REGISTERS_MAX, +}; + +enum cam_sfe_bus_rd_irq_regs { + CAM_SFE_IRQ_BUS_RD_REG_STATUS0, + CAM_SFE_BUS_RD_IRQ_REGISTERS_MAX, +}; + +/* + * struct cam_sfe_generic_debug_config: + * + * @sfe_debug_cfg : SFE debug cfg value + * @sfe_sensor_sel : SFE sensor sel for diag data + * @num_counters : Number of perf counters configured + * @sfe_perf_counter_val: SFE perf counter values + */ +struct cam_sfe_generic_debug_config { + uint32_t sfe_debug_cfg; + uint32_t sfe_sensor_sel; + uint32_t num_counters; + uint32_t sfe_perf_counter_val[CAM_SFE_PERF_CNT_MAX]; + uint32_t sfe_bus_wr_perf_counter_val[CAM_SFE_PERF_CNT_MAX]; +}; + +/* + * struct cam_sfe_sys_cache_debug_config: + * + * @sfe_cache_dbg: SFE cache debug cfg + */ + +struct cam_sfe_sys_cache_debug_config { + uint32_t sfe_cache_dbg; +}; + + +/* + * struct cam_sfe_debug_cfg_params: + * + * @cache_config: If the config is for cache + */ +struct cam_sfe_debug_cfg_params { + union { + struct cam_sfe_generic_debug_config dbg_cfg; + struct cam_sfe_sys_cache_debug_config cache_cfg; + } u; + bool cache_config; +}; + +/* + * struct cam_sfe_bw_update_args: + * + * @node_res: Resource to get the BW + * @sfe_vote: Vote info according to usage data (left/right/rdi) + */ +struct cam_sfe_bw_update_args { + struct cam_isp_resource_node *node_res; + struct cam_axi_vote sfe_vote; +}; + +/* + * struct cam_sfe_fe_update_args: + * + * @node_res: Resource to get fetch configuration + * @fe_config: fetch engine configuration + * + */ +struct cam_sfe_fe_update_args { + struct cam_isp_resource_node *node_res; + struct cam_fe_config fe_config; +}; + +/* + * struct cam_sfe_clock_update_args: + * + * @node_res: ISP Resource + * @clk_rate: Clock rate requested + * @vote_level: DRV vote level corresponding to requested rate + */ +struct cam_sfe_clock_update_args { + struct cam_isp_resource_node *node_res; + uint64_t clk_rate; + uint32_t vote_level; +}; + +/* + * struct cam_sfe_core_config_args: + * + * @node_res: ISP Resource + * @core_config: Core config for SFE + */ +struct cam_sfe_core_config_args { + struct cam_isp_resource_node *node_res; + struct cam_isp_sfe_core_config core_config; +}; + +/** + * struct cam_isp_sfe_bus_sys_cache_config: + * + * @Brief: Based on exp order rxved from userland + * configure sys cache for SFE WMs & RMs + * + * @wr_cfg_done: Output param to indicate if SFE + * bus WR accepted these settings + * @rd_cfg_done: Output param to indicate if SFE + * bus RD accepted these settings + * @res: SFE WM/RM Resource node + * @use_cache: If set cache configured + * @type: Dictates which slice ID to be used + * + */ +struct cam_isp_sfe_bus_sys_cache_config { + bool wr_cfg_done; + bool rd_cfg_done; + struct cam_isp_resource_node *res; + bool use_cache; + int scid; +}; + +/* + * struct cam_sfe_top_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * related to SFE top resource + * + * @list: list_head node for the payload + * @core_index: Index of SFE HW that generated this IRQ event + * @evt_id: IRQ event + * @irq_reg_val: IRQ and Error register values, read when IRQ was + * handled + * @violation_status ccif violation status + * @error_type: Identify different errors + * @ts: Timestamp + */ +struct cam_sfe_top_irq_evt_payload { + struct list_head list; + uint32_t core_index; + uint32_t evt_id; + uint32_t irq_reg_val[CAM_SFE_IRQ_REGISTERS_MAX]; + uint32_t violation_status; + uint32_t error_type; + struct cam_isp_timestamp ts; +}; + +/* + * struct cam_sfe_bus_wr_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * BUS related to SFE resources + * + * @list: list_head node for the payload + * @core_index: Index of SFE HW that generated this IRQ event + * @irq_reg_val Bus irq register status + * @ccif_violation_status ccif violation status + * @overflow_status bus overflow status + * @image_size_vio_sts image size violations status + * @error_type: Identify different errors + * @evt_id: IRQ event + * @ts: Timestamp + * @last_consumed_addr: Last consumed addr for resource + */ +struct cam_sfe_bus_wr_irq_evt_payload { + struct list_head list; + uint32_t core_index; + uint32_t irq_reg_val[CAM_SFE_BUS_WR_IRQ_REGISTERS_MAX]; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t error_type; + uint32_t evt_id; + struct cam_isp_timestamp ts; + uint32_t last_consumed_addr; +}; + +/* + * struct cam_sfe_bus_rd_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * BUS related to SFE resources + * + * @list: list_head node for the payload + * @core_index: Index of SFE HW that generated this IRQ event + * @irq_reg_val Bus irq register status + * @constraint_violation constraint violation + * @ccif_violation: CCIF violation + * @error_type: Identify different errors + * @evt_id: IRQ event + * @ts: Timestamp + */ +struct cam_sfe_bus_rd_irq_evt_payload { + struct list_head list; + uint32_t core_index; + uint32_t irq_reg_val[ + CAM_SFE_BUS_RD_IRQ_REGISTERS_MAX]; + uint32_t constraint_violation; + uint32_t ccif_violation; + uint32_t error_type; + uint32_t evt_id; + struct cam_isp_timestamp ts; +}; + +/* + * struct cam_sfe_hw_get_hw_cap: + * + * @reserved_1: reserved + * @reserved_2: reserved + * @reserved_3: reserved + * @reserved_4: reserved + */ +struct cam_sfe_hw_get_hw_cap { + uint32_t reserved_1; + uint32_t reserved_2; + uint32_t reserved_3; + uint32_t reserved_4; +}; + +/* + * struct cam_sfe_hw_vfe_bus_rd_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @res_id: Unique Identity of port to associate with this + * resource. + * @cdm_ops: CDM operations + * @unpacket_fmt: Unpacker format for read engine + * @is_offline: Flag to indicate offline usecase + * @secure_mode: If fetch is from secure/non-secure buffer + */ +struct cam_sfe_hw_sfe_bus_rd_acquire_args { + struct cam_isp_resource_node *rsrc_node; + uint32_t res_id; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t unpacker_fmt; + bool is_offline; + bool secure_mode; +}; + +/* + * struct cam_sfe_hw_sfe_bus_in_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @res_id: Unique Identity of port to associate with this + * resource. + * @cdm_ops: CDM operations + * @is_dual: Dual mode usecase + * @in_port: in port info + * @is_offline: Flag to indicate Offline IFE + */ +struct cam_sfe_hw_sfe_in_acquire_args { + struct cam_isp_resource_node *rsrc_node; + uint32_t res_id; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t is_dual; + struct cam_isp_in_port_generic_info *in_port; + bool is_offline; +}; + +/* + * struct cam_sfe_hw_sfe_out_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @out_port_info: Output Port details to acquire + * @unique_id: Unique Identity of Context to associate with this + * resource. Used for composite grouping of multiple + * resources in the same context + * @is_dual: Dual SFE or not + * @split_id: In case of Dual SFE, this is Left or Right. + * @is_master: In case of Dual SFE, this is Master or Slave. + * @cdm_ops: CDM operations + * @use_wm_pack: Flag to indicalte packing at WM side + * @comp_grp_id: SFE bus comp group id + */ +struct cam_sfe_hw_sfe_out_acquire_args { + struct cam_isp_resource_node *rsrc_node; + struct cam_isp_out_port_generic_info *out_port_info; + uint32_t unique_id; + uint32_t is_dual; + enum cam_isp_hw_split_id split_id; + uint32_t is_master; + struct cam_cdm_utils_ops *cdm_ops; + bool use_wm_pack; + uint32_t comp_grp_id; +}; + +/* + * struct cam_sfe_acquire_args: + * + * @rsrc_type: Type of Resource (OUT/IN) to acquire + * @tasklet: Tasklet to associate with this resource. This is + * used to schedule bottom of IRQ events associated + * with this resource. + * @priv: Context data + * @event_cb: Callback function to hw mgr in case of hw events + * @buf_done_controller: Buf done controller + * @sfe_out: Acquire args for SFE_OUT + * @sfe_bus_rd Acquire args for SFE_BUS_READ + * @sfe_in: Acquire args for SFE_IN + */ +struct cam_sfe_acquire_args { + enum cam_isp_resource_type rsrc_type; + void *tasklet; + void *priv; + cam_hw_mgr_event_cb_func event_cb; + void *buf_done_controller; + union { + struct cam_sfe_hw_sfe_out_acquire_args sfe_out; + struct cam_sfe_hw_sfe_in_acquire_args sfe_in; + struct cam_sfe_hw_sfe_bus_rd_acquire_args sfe_rd; + }; +}; + +/* + * cam_sfe_get_num_hws() + * + * @brief : Populates number of SFEs. + * + * @num_sfes : Fills number of SFEs in the address passed. + */ +void cam_sfe_get_num_hws(uint32_t *num_sfes); + +/* + * cam_sfe_hw_init() + * + * @Brief: Initialize SFE HW device + * + * @sfe_hw: sfe_hw interface to fill in and return on + * successful initialization + * @hw_idx: Index of SFE HW + */ +int cam_sfe_hw_init(struct cam_isp_hw_intf_data **sfe_hw, uint32_t hw_idx); + +#endif /* _CAM_SFE_HW_INTF_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_csid_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_csid_hw_intf.h new file mode 100644 index 0000000000..2b5f2ce354 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_csid_hw_intf.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_HW_INTF_H_ +#define _CAM_TFE_CSID_HW_INTF_H_ + +#include "cam_isp_hw.h" +#include "cam_hw_intf.h" +#include "cam_tfe.h" + +/* MAX TFE CSID instance */ +#define CAM_TFE_CSID_HW_NUM_MAX 3 +#define CAM_TFE_CSID_RDI_MAX 3 + +/** + * enum cam_tfe_pix_path_res_id - Specify the csid patch + */ +enum cam_tfe_csid_path_res_id { + CAM_TFE_CSID_PATH_RES_RDI_0, + CAM_TFE_CSID_PATH_RES_RDI_1, + CAM_TFE_CSID_PATH_RES_RDI_2, + CAM_TFE_CSID_PATH_RES_IPP, + CAM_TFE_CSID_PATH_RES_MAX, +}; + +/** + * enum cam_tfe_csid_irq_reg + */ +enum cam_tfe_csid_irq_reg { + TFE_CSID_IRQ_REG_RDI0, + TFE_CSID_IRQ_REG_RDI1, + TFE_CSID_IRQ_REG_RDI2, + TFE_CSID_IRQ_REG_TOP, + TFE_CSID_IRQ_REG_RX, + TFE_CSID_IRQ_REG_IPP, + TFE_CSID_IRQ_REG_MAX, +}; + +struct cam_isp_tfe_out_port_generic_info { + uint32_t res_id; + uint32_t format; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t comp_grp_id; + uint32_t secure_mode; + uint32_t wm_mode; + uint32_t reserved; +}; + +struct cam_isp_tfe_in_port_generic_info { + uint32_t major_ver; + uint32_t minor_ver; + uint32_t res_id; + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; + uint32_t vc[CAM_ISP_TFE_VC_DT_CFG]; + uint32_t dt[CAM_ISP_TFE_VC_DT_CFG]; + uint32_t num_valid_vc_dt; + uint32_t format; + uint32_t pix_pattern; + uint32_t usage_type; + uint32_t left_start; + uint32_t left_end; + uint32_t left_width; + uint32_t right_start; + uint32_t right_end; + uint32_t right_width; + uint32_t line_start; + uint32_t line_end; + uint32_t height; + uint32_t batch_size; + uint32_t dsp_mode; + uint32_t sensor_width; + uint32_t sensor_height; + uint32_t sensor_hbi; + uint32_t sensor_vbi; + uint32_t sensor_fps; + uint32_t init_frame_drop; + uint32_t num_out_res; + uint32_t bayer_bin; + uint32_t qcfa_bin; + uint32_t core_cfg; + uint32_t num_bytes_out; + uint32_t ipp_count; + uint32_t rdi_count; + uint32_t secure_mode; + struct cam_isp_tfe_out_port_generic_info *data; +}; + +/** + * struct cam_tfe_csid_hw_caps- get the CSID hw capability + * @num_rdis: number of rdis supported by CSID HW device + * @num_pix: number of pxl paths supported by CSID HW device + * @major_version : major version + * @minor_version: minor version + * @version_incr: version increment + * @sync_clk: sync clocks such that freq(TFE)>freq(CSID)>freq(CSIPHY) + * + */ +struct cam_tfe_csid_hw_caps { + uint32_t num_rdis; + uint32_t num_pix; + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + bool sync_clk; +}; + +/** + * struct cam_tfe_csid_hw_reserve_resource_args- hw reserve + * @res_type : Reource type ie PATH + * @res_id : Resource id to be reserved + * @in_port : Input port resource info + * @out_port: Output port resource info, used for RDI path only + * @sync_mode: Sync mode + * Sync mode could be master, slave or none + * @master_idx: Master device index to be configured in the slave path + * for master path, this value is not required. + * only slave need to configure the master index value + * @phy_sel: Phy selection number if tpg is enabled from userspace + * @event_cb_prv: Context data + * @event_cb: Callback function to hw mgr in case of hw events + * @node_res : Reserved resource structure pointer + * + */ +struct cam_tfe_csid_hw_reserve_resource_args { + enum cam_isp_resource_type res_type; + uint32_t res_id; + struct cam_isp_tfe_in_port_generic_info *in_port; + struct cam_isp_tfe_out_port_generic_info *out_port; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t master_idx; + uint32_t phy_sel; + void *event_cb_prv; + cam_hw_mgr_event_cb_func event_cb; + struct cam_isp_resource_node *node_res; +}; + +/** + * enum cam_tfe_csid_halt_cmd - Specify the halt command type + */ +enum cam_tfe_csid_halt_cmd { + CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY, + CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY, + CAM_TFE_CSID_HALT_IMMEDIATELY, + CAM_TFE_CSID_HALT_MAX, +}; + +/** + * enum cam_tfe_csid_halt_mode - Specify the halt command type + */ +enum cam_tfe_csid_halt_mode { + CAM_TFE_CSID_HALT_MODE_INTERNAL, + CAM_TFE_CSID_HALT_MODE_GLOBAL, + CAM_TFE_CSID_HALT_MODE_MASTER, + CAM_TFE_CSID_HALT_MODE_SLAVE, + CAM_TFE_CSID_HALT_MODE_MAX, +}; + +/** + * struct cam_tfe_csid_hw_halt_args + * @halt_mode : Applicable only for PATH resources + * 0 Internal : The CSID responds to the HALT_CMD + * 1 Global : The CSID responds to the GLOBAL_HALT_CMD + * 2 Master : The CSID responds to the HALT_CMD + * 3 Slave : The CSID responds to the external halt command + * and not the HALT_CMD register + * @node_res : reource pointer array( ie cid or CSID) + * + */ +struct cam_tfe_csid_hw_halt_args { + enum cam_tfe_csid_halt_mode halt_mode; + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_csid_hw_stop- stop all resources + * @stop_cmd : Applicable only for PATH resources + * if stop command set to Halt immediately,driver will stop + * path immediately, manager need to reset the path after HI + * if stop command set to halt at frame boundary, driver will set + * halt at frame boundary and wait for frame boundary + * @num_res : Number of resources to be stopped + * @node_res : Reource pointer array( ie cid or CSID) + * + */ +struct cam_tfe_csid_hw_stop_args { + enum cam_tfe_csid_halt_cmd stop_cmd; + uint32_t num_res; + struct cam_isp_resource_node **node_res; +}; + +/** + * enum cam_tfe_csid_reset_type - Specify the reset type + */ +enum cam_tfe_csid_reset_type { + CAM_TFE_CSID_RESET_GLOBAL, + CAM_TFE_CSID_RESET_PATH, + CAM_TFE_CSID_RESET_MAX, +}; + +/** + * struct cam_tfe_csid_reset_cfg- Csid reset configuration + * @ reset_type : Global reset or path reset + * @res_node : Resource need to be reset + * + */ +struct cam_tfe_csid_reset_cfg_args { + enum cam_tfe_csid_reset_type reset_type; + struct cam_isp_resource_node *node_res; +}; + +/** + * struct cam_csid_get_time_stamp_args- time stamp capture arguments + * @res_node : Resource to get the time stamp + * @time_stamp_val : Captured time stamp + * @boot_timestamp : Boot time stamp + * @prev_time_stamp_val : previous captured time stamp + * @get_prev_timestamp : flag to fetch previous captured time stamp from hardware + */ +struct cam_tfe_csid_get_time_stamp_args { + struct cam_isp_resource_node *node_res; + uint64_t time_stamp_val; + uint64_t boot_timestamp; + uint64_t prev_time_stamp_val; + bool get_prev_timestamp; +}; + +/** + * enum cam_tfe_csid_cmd_type - Specify the csid command + */ +enum cam_tfe_csid_cmd_type { + CAM_TFE_CSID_CMD_GET_TIME_STAMP, + CAM_TFE_CSID_SET_CSID_DEBUG, + CAM_TFE_CSID_SOF_IRQ_DEBUG, + CAM_TFE_CSID_CMD_GET_REG_DUMP, + CAM_TFE_CSID_LOG_ACQUIRE_DATA, + CAM_TFE_CSID_CMD_MAX, +}; + +/** + * cam_tfe_csid_hw_init() + * + * @brief: Initialize function for the CSID hardware + * + * @tfe_csid_hw: CSID hardware instance returned + * @hw_idex: CSID hardware instance id + */ +int cam_tfe_csid_hw_init(struct cam_hw_intf **tfe_csid_hw, + uint32_t hw_idx); + +/* + * struct cam_tfe_csid_clock_update_args: + * + * @clk_rate: Clock rate requested + */ +struct cam_tfe_csid_clock_update_args { + uint64_t clk_rate; +}; + + +#endif /* _CAM_TFE_CSID_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_hw_intf.h new file mode 100644 index 0000000000..b8fd2c1afd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_tfe_hw_intf.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_HW_INTF_H_ +#define _CAM_TFE_HW_INTF_H_ + +#include "cam_isp_hw.h" +#include "cam_cpas_api.h" + +#define CAM_TFE_HW_NUM_MAX 3 +#define TFE_CORE_BASE_IDX 0 + + +enum cam_isp_hw_tfe_in { + CAM_ISP_HW_TFE_IN_CAMIF = 0, + CAM_ISP_HW_TFE_IN_RDI0 = 1, + CAM_ISP_HW_TFE_IN_RDI1 = 2, + CAM_ISP_HW_TFE_IN_RDI2 = 3, + CAM_ISP_HW_TFE_IN_MAX, +}; + +enum cam_isp_hw_tfe_core { + CAM_ISP_HW_TFE_CORE_0, + CAM_ISP_HW_TFE_CORE_1, + CAM_ISP_HW_TFE_CORE_2, + CAM_ISP_HW_TFE_CORE_MAX, +}; + +enum cam_tfe_hw_irq_status { + CAM_TFE_IRQ_STATUS_SUCCESS, + CAM_TFE_IRQ_STATUS_ERR, + CAM_TFE_IRQ_STATUS_OVERFLOW, + CAM_TFE_IRQ_STATUS_P2I_ERROR, + CAM_TFE_IRQ_STATUS_VIOLATION, + CAM_TFE_IRQ_STATUS_MAX, +}; + +enum cam_tfe_hw_irq_regs { + CAM_TFE_IRQ_CAMIF_REG_STATUS0 = 0, + CAM_TFE_IRQ_CAMIF_REG_STATUS1 = 1, + CAM_TFE_IRQ_CAMIF_REG_STATUS2 = 2, + CAM_TFE_IRQ_REGISTERS_MAX, +}; + +enum cam_tfe_bus_irq_regs { + CAM_TFE_IRQ_BUS_REG_STATUS0 = 0, + CAM_TFE_IRQ_BUS_REG_STATUS1 = 1, + CAM_TFE_BUS_IRQ_REGISTERS_MAX, +}; + +enum cam_tfe_reset_type { + CAM_TFE_HW_RESET_HW_AND_REG, + CAM_TFE_HW_RESET_HW, + CAM_TFE_HW_RESET_MAX, +}; + +enum cam_tfe_bw_control_action { + CAM_TFE_BW_CONTROL_EXCLUDE = 0, + CAM_TFE_BW_CONTROL_INCLUDE = 1 +}; + +/* + * struct cam_tfe_hw_get_hw_cap: + * + * @max_width: Max width supported by HW + * @max_height: Max height supported by HW + * @max_pixel_num: Max Pixel channels available + * @max_rdi_num: Max Raw channels available + */ +struct cam_tfe_hw_get_hw_cap { + uint32_t max_width; + uint32_t max_height; + uint32_t max_pixel_num; + uint32_t max_rdi_num; +}; + +/* + * struct cam_tfe_hw_tfe_out_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @out_port_info: Output Port details to acquire + * @unique_id: Unique Identity of Context to associate with this + * resource. Used for composite grouping of multiple + * resources in the same context + * @is_dual: Dual TFE or not + * @split_id: In case of Dual TFE, this is Left or Right. + * (Default is Left if Single TFE) + * @is_master: In case of Dual TFE, this is Master or Slave. + * (Default is Master in case of Single TFE) + * @cdm_ops: CDM operations + * @ctx: Context data + * @comp_grp_id: TFE bus comp group id + */ +struct cam_tfe_hw_tfe_out_acquire_args { + struct cam_isp_resource_node *rsrc_node; + struct cam_isp_tfe_out_port_generic_info *out_port_info; + uint32_t unique_id; + uint32_t is_dual; + enum cam_isp_hw_split_id split_id; + uint32_t is_master; + struct cam_cdm_utils_ops *cdm_ops; + void *ctx; + uint32_t comp_grp_id; +}; + +/* + * struct cam_tfe_hw_tfe_in_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @res_id: Resource ID of resource to acquire if specific, + * else CAM_ISP_HW_TFE_IN_MAX + * @cdm_ops: CDM operations + * @sync_mode: In case of Dual TFE, this is Master or Slave. + * (Default is Master in case of Single TFE) + * @in_port: Input port details to acquire + * @camif_pd_enable Camif pd enable or disable + * @dual_tfe_sync_sel_idx Dual tfe master hardware index + */ +struct cam_tfe_hw_tfe_in_acquire_args { + struct cam_isp_resource_node *rsrc_node; + struct cam_isp_tfe_in_port_generic_info *in_port; + uint32_t res_id; + void *cdm_ops; + enum cam_isp_hw_sync_mode sync_mode; + bool camif_pd_enable; + uint32_t dual_tfe_sync_sel_idx; +}; + +/* + * struct cam_tfe_acquire_args: + * + * @rsrc_type: Type of Resource (OUT/IN) to acquire + * @tasklet: Tasklet to associate with this resource. This is + * used to schedule bottom of IRQ events associated + * with this resource. + * @priv: Context data + * @event_cb: Callback function to hw mgr in case of hw events + * @tfe_out: Acquire args for TFE_OUT + * @tfe_in: Acquire args for TFE_IN + */ +struct cam_tfe_acquire_args { + enum cam_isp_resource_type rsrc_type; + void *tasklet; + void *priv; + cam_hw_mgr_event_cb_func event_cb; + union { + struct cam_tfe_hw_tfe_out_acquire_args tfe_out; + struct cam_tfe_hw_tfe_in_acquire_args tfe_in; + }; +}; + +/* + * struct cam_tfe_clock_update_args: + * + * @node_res: Resource to get the time stamp + * @clk_rate: Clock rate requested + */ +struct cam_tfe_clock_update_args { + struct cam_isp_resource_node *node_res; + uint64_t clk_rate; +}; + +/* + * struct cam_tfe_bw_update_args: + * + * @node_res: Resource to get the BW + * @isp_vote: Vote info according to usage data (left/right/rdi) + */ +struct cam_tfe_bw_update_args { + struct cam_isp_resource_node *node_res; + struct cam_axi_vote isp_vote; +}; + +/* + * struct cam_tfe_dual_update_args: + * + * @Brief: update the dual isp striping configuration. + * + * @ split_id: spilt id to inform left or rifht + * @ res: resource node + * @ stripe_config: stripe configuration for port + * + */ +struct cam_tfe_dual_update_args { + enum cam_isp_hw_split_id split_id; + struct cam_isp_resource_node *res; + struct cam_isp_tfe_dual_stripe_config *stripe_config; +}; + +/* + * struct cam_tfe_bw_control_args: + * + * @node_res: Resource to get the time stamp + * @action: Bandwidth control action + */ +struct cam_tfe_bw_control_args { + struct cam_isp_resource_node *node_res; + enum cam_tfe_bw_control_action action; +}; + +/* + * struct cam_tfe_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * related to TFE_TOP resources + * + * @list: list_head node for the payload + * @core_index: Index of TFE HW that generated this IRQ event + * @core_info: Private data of handler in bottom half context + * @evt_id: IRQ event + * @irq_reg_val: IRQ and Error register values, read when IRQ was + * handled + * @bus_irq_val Bus irq register status + * @debug_status_0: Value of debug status_0 register at time of IRQ + * @ccif_violation_status ccif violation status + * @overflow_status bus overflow status + * @image_size_violation_status image size violations status + + * @error_type: Identify different errors + * @enable_reg_dump: enable register dump on error + * @ts: Timestamp + * @last_consumed_addr: Last consumed addr for resource + */ +struct cam_tfe_irq_evt_payload { + struct list_head list; + uint32_t core_index; + void *core_info; + uint32_t evt_id; + uint32_t irq_reg_val[CAM_TFE_IRQ_REGISTERS_MAX]; + uint32_t bus_irq_val[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t debug_status_0; + + uint32_t error_type; + bool enable_reg_dump; + struct cam_isp_timestamp ts; + uint32_t last_consumed_addr; +}; + +/* + * cam_tfe_hw_init() + * + * @Brief: Initialize TFE HW device + * + * @tfe_hw: tfe_hw interface to fill in and return on + * successful initialization + * @hw_idx: Index of TFE HW + */ +int cam_tfe_hw_init(struct cam_isp_hw_intf_data **tfe_hw, uint32_t hw_idx); + +#endif /* _CAM_TFE_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h new file mode 100644 index 0000000000..aa0af31e79 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h @@ -0,0 +1,480 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_HW_INTF_H_ +#define _CAM_VFE_HW_INTF_H_ + +#include "cam_isp_hw.h" +#include "cam_ife_csid_hw_intf.h" +#include "cam_cpas_api.h" + +#define CAM_VFE_HW_NUM_MAX 8 + +#define VFE_CORE_BASE_IDX 0 +#define RT_BASE_IDX 2 +/* + * VBIF and BUS do not exist on same HW. + * Hence both can be 1 below. + */ +#define VFE_VBIF_BASE_IDX 1 +#define VFE_BUS_BASE_IDX 1 + +#define CAM_VFE_MAX_UBWC_PORTS 12 + +#define CAM_VFE_PERF_CNT_MAX 8 + +/* Common capabilities for VFE */ +#define CAM_VFE_COMMON_CAP_SKIP_CORE_CFG BIT(0) +#define CAM_VFE_COMMON_CAP_CORE_MUX_CFG BIT(1) +#define CAM_VFE_COMMON_CAP_DEBUG_ERR_VEC BIT(2) + +enum cam_isp_hw_vfe_in_mux { + CAM_ISP_HW_VFE_IN_CAMIF = 0, + CAM_ISP_HW_VFE_IN_TESTGEN = 1, + CAM_ISP_HW_VFE_IN_RD = 2, + CAM_ISP_HW_VFE_IN_RDI0 = 3, + CAM_ISP_HW_VFE_IN_RDI1 = 4, + CAM_ISP_HW_VFE_IN_RDI2 = 5, + CAM_ISP_HW_VFE_IN_RDI3 = 6, + CAM_ISP_HW_VFE_IN_RDI4 = 7, + CAM_ISP_HW_VFE_IN_PDLIB = 8, + CAM_ISP_HW_VFE_IN_LCR = 9, + CAM_ISP_HW_VFE_IN_MAX, +}; + +enum cam_isp_hw_vfe_core { + CAM_ISP_HW_VFE_CORE_0, + CAM_ISP_HW_VFE_CORE_1, + CAM_ISP_HW_VFE_CORE_2, + CAM_ISP_HW_VFE_CORE_3, + CAM_ISP_HW_VFE_CORE_MAX, +}; + +enum cam_vfe_hw_irq_status { + CAM_VFE_IRQ_STATUS_ERR_COMP = -3, + CAM_VFE_IRQ_STATUS_COMP_OWRT = -2, + CAM_VFE_IRQ_STATUS_ERR = -1, + CAM_VFE_IRQ_STATUS_SUCCESS = 0, + CAM_VFE_IRQ_STATUS_OVERFLOW = 1, + CAM_VFE_IRQ_STATUS_P2I_ERROR = 2, + CAM_VFE_IRQ_STATUS_VIOLATION = 3, + CAM_VFE_IRQ_STATUS_MAX, +}; + +enum cam_vfe_irq_err_mask { + CAM_VFE_IRQ_ERR_MASK_HWPD_VIOLATION = 0x00000001, +}; + +enum cam_vfe_hw_irq_regs { + CAM_IFE_IRQ_CAMIF_REG_STATUS0 = 0, + CAM_IFE_IRQ_CAMIF_REG_STATUS1 = 1, + CAM_IFE_IRQ_CAMIF_REG_STATUS2 = 2, + CAM_IFE_IRQ_VIOLATION_STATUS = 3, + CAM_IFE_IRQ_BUS_OVERFLOW_STATUS = 4, + CAM_IFE_IRQ_REGISTERS_MAX, +}; + +enum cam_vfe_bus_irq_regs { + CAM_IFE_IRQ_BUS_REG_STATUS0 = 0, + CAM_IFE_IRQ_BUS_REG_STATUS1 = 1, + CAM_IFE_IRQ_BUS_REG_STATUS2 = 2, + CAM_IFE_IRQ_BUS_REG_COMP_ERR = 3, + CAM_IFE_IRQ_BUS_REG_COMP_OWRT = 4, + CAM_IFE_IRQ_BUS_DUAL_COMP_ERR = 5, + CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT = 6, + CAM_IFE_BUS_IRQ_REGISTERS_MAX, +}; + +enum cam_vfe_bus_ver3_irq_regs { + CAM_IFE_IRQ_BUS_VER3_REG_STATUS0 = 0, + CAM_IFE_IRQ_BUS_VER3_REG_STATUS1 = 1, + CAM_IFE_IRQ_BUS_VER3_REG_MAX, +}; + +enum cam_vfe_reset_type { + CAM_VFE_HW_RESET_HW_AND_REG, + CAM_VFE_HW_RESET_HW, + CAM_VFE_HW_RESET_MAX, +}; + +/* + * struct cam_vfe_hw_get_hw_cap: + * + * @Brief: Argument type for fetching the hw information for Query caps + * @major: Major revision number + * @minor: Minor revision number + * @incr: Increment revision number + * @is_lite: Flag to indicate Whether a full vfe or a Lite vfe + */ +struct cam_vfe_hw_get_hw_cap { + uint32_t major; + uint32_t minor; + uint32_t incr; + bool is_lite; +}; + +/* + * struct cam_vfe_hw_vfe_bus_rd_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @res_id: Unique Identity of port to associate with this + * resource. + * @is_dual: Flag to indicate dual VFE usecase + * @cdm_ops: CDM operations + * @unpacket_fmt: Unpacker format for read engine + * @is_offline: Flag to indicate offline usecase + */ +struct cam_vfe_hw_vfe_bus_rd_acquire_args { + struct cam_isp_resource_node *rsrc_node; + uint32_t res_id; + uint32_t is_dual; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t unpacker_fmt; + bool is_offline; +}; + +/* + * struct cam_vfe_hw_vfe_out_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @out_port_info: Output Port details to acquire + * @unique_id: Unique Identity of Context to associate with this + * resource. Used for composite grouping of multiple + * resources in the same context + * @is_dual: Dual VFE or not + * @split_id: In case of Dual VFE, this is Left or Right. + * (Default is Left if Single VFE) + * @is_master: In case of Dual VFE, this is Master or Slave. + * (Default is Master in case of Single VFE) + * @dual_slave_core: If Master and Slave exists, HW Index of Slave + * @cdm_ops: CDM operations + * @disable_ubwc_comp: Disable UBWC compression + * @use_wm_pack: Use WM Packing + * @comp_grp_id: VFE bus comp group id + * @use_hw_ctxt: Use HW context id info + * + */ +struct cam_vfe_hw_vfe_out_acquire_args { + struct cam_isp_resource_node *rsrc_node; + struct cam_isp_out_port_generic_info *out_port_info; + uint32_t unique_id; + uint32_t is_dual; + enum cam_isp_hw_split_id split_id; + uint32_t is_master; + uint32_t dual_slave_core; + struct cam_cdm_utils_ops *cdm_ops; + bool disable_ubwc_comp; + bool use_wm_pack; + uint32_t comp_grp_id; + bool use_hw_ctxt; +}; + +/* + * struct cam_vfe_hw_vfe_in_acquire_args: + * + * @rsrc_node: Pointer to Resource Node object, filled if acquire + * is successful + * @res_id: Resource ID of resource to acquire if specific, + * else CAM_ISP_HW_VFE_IN_MAX + * @dual_hw_idx: Slave core for this master core if dual vfe case + * @is_dual: flag to indicate if dual vfe case + * @hw_ctxt_mask: Mask to indicate destination hw contexts acquired corresponding to + * a particular CSID IPP path + * @cdm_ops: CDM operations + * @sync_mode: In case of Dual VFE, this is Master or Slave. + * (Default is Master in case of Single VFE) + * @in_port: Input port details to acquire + * @is_fe_enabled: Flag to indicate if FE is enabled + * @is_offline: Flag to indicate Offline IFE + * @handle_camif_irq: Flag to handle the cmaif irq in VFE + */ +struct cam_vfe_hw_vfe_in_acquire_args { + struct cam_isp_resource_node *rsrc_node; + uint32_t res_id; + uint32_t dual_hw_idx; + uint32_t is_dual; + uint32_t hw_ctxt_mask; + void *cdm_ops; + enum cam_isp_hw_sync_mode sync_mode; + struct cam_isp_in_port_generic_info *in_port; + bool is_fe_enabled; + bool is_offline; + bool handle_camif_irq; +}; + +/* + * struct cam_vfe_acquire_args: + * + * @rsrc_type: Type of Resource (OUT/IN) to acquire + * @tasklet: Tasklet to associate with this resource. This is + * used to schedule bottom of IRQ events associated + * with this resource. + * @priv: Context data + * @event_cb: Callback function to hw mgr in case of hw events + * @buf_done_controller: Buf done controller for isp + * @mc_comp_buf_done_controller: Hw context composite buf done controller for isp + * @vfe_out: Acquire args for VFE_OUT + * @vfe_bus_rd Acquire args for VFE_BUS_READ + * @vfe_in: Acquire args for VFE_IN + */ +struct cam_vfe_acquire_args { + enum cam_isp_resource_type rsrc_type; + void *tasklet; + void *priv; + cam_hw_mgr_event_cb_func event_cb; + void *buf_done_controller; + void *mc_comp_buf_done_controller; + union { + struct cam_vfe_hw_vfe_out_acquire_args vfe_out; + struct cam_vfe_hw_vfe_bus_rd_acquire_args vfe_bus_rd; + struct cam_vfe_hw_vfe_in_acquire_args vfe_in; + }; +}; + +/* + * struct cam_vfe_clock_update_args: + * + * @node_res: Resource to get the time stamp + * @clk_rate: Clock rate requested + * @vote_level: DRV vote level corresponding to requested rate + */ +struct cam_vfe_clock_update_args { + struct cam_isp_resource_node *node_res; + uint64_t clk_rate; + uint32_t vote_level; +}; + +/* + * struct cam_vfe_core_config_args: + * + * @node_res: Resource to get the time stamp + * @core_config: Core config for IFE + */ +struct cam_vfe_core_config_args { + struct cam_isp_resource_node *node_res; + struct cam_isp_core_config core_config; +}; + +/* + * struct cam_vfe_bw_update_args_v2: + * + * @node_res: Resource to get the BW + * @isp_vote: Vote info according to usage data (left/right/rdi) + */ +struct cam_vfe_bw_update_args_v2 { + struct cam_isp_resource_node *node_res; + struct cam_axi_vote isp_vote; +}; + +/* + * struct cam_vfe_bw_update_args: + * + * @node_res: Resource to get the BW + * @camnoc_bw_bytes: Bandwidth vote request for CAMNOC + * @external_bw_bytes: Bandwidth vote request from CAMNOC + * out to the rest of the path-to-DDR + */ +struct cam_vfe_bw_update_args { + struct cam_isp_resource_node *node_res; + uint64_t camnoc_bw_bytes; + uint64_t external_bw_bytes; +}; + +/* + * struct cam_vfe_fe_update_args: + * + * @node_res: Resource to get fetch configuration + * @fe_config: fetch engine configuration + * + */ +struct cam_vfe_fe_update_args { + struct cam_isp_resource_node *node_res; + struct cam_fe_config fe_config; +}; + +/* + * struct cam_vfe_top_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * related to VFE_TOP resources + * + * @list: list_head node for the payload + * @irq_reg_val: IRQ and Error register values, read when IRQ was + * handled + * @reg_val: Value of any critical register that needs to be + * read at during irq handling + * + * @ts: Timestamp + */ +struct cam_vfe_top_irq_evt_payload { + struct list_head list; + uint32_t irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t reg_val; + struct cam_isp_timestamp ts; +}; + +/* + * struct cam_vfe_bus_irq_evt_payload: + * + * @Brief: This structure is used to save payload for IRQ + * related to VFE_BUS resources + * + * @list: list_head node for the payload + * @core_index: Index of VFE HW that generated this IRQ event + * @debug_status_0: Value of debug status_0 register at time of IRQ + * @evt_id: IRQ event + * @irq_reg_val: IRQ and Error register values, read when IRQ was + * handled + * @error_type: Identify different errors + * @ts: Timestamp + * @last_consumed_addr: Last consumed addr for resource + * @is_hw_ctxt_comp_done: Indicates if the buf done is hw context composited + * @is_hw_ctxt_comp_done: Indicates if the irq is for an early done + */ +struct cam_vfe_bus_irq_evt_payload { + struct list_head list; + uint32_t core_index; + uint32_t debug_status_0; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t evt_id; + uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX]; + struct cam_isp_timestamp ts; + uint32_t last_consumed_addr; + bool is_hw_ctxt_comp_done; + bool is_early_done; +}; + +/** + * struct cam_ubwc_generic_plane_config - UBWC Plane configuration info + * + * @port_type: Port Type + * @meta_stride: UBWC metadata stride + * @meta_size: UBWC metadata plane size + * @meta_offset: UBWC metadata offset + * @packer_config: UBWC packer config + * @mode_config: UBWC mode config + * @static ctrl: UBWC static ctrl + * @ctrl_2: UBWC ctrl 2 + * @tile_config: UBWC tile config + * @h_init: UBWC horizontal initial coordinate in pixels + * @v_init: UBWC vertical initial coordinate in lines + * @stats_ctrl_2: UBWC stats control + * @lossy_threshold0 UBWC lossy threshold 0 + * @lossy_threshold1 UBWC lossy threshold 1 + * @lossy_var_offset UBWC offset variance threshold + * @bandwidth limit UBWC bandwidth limit + * @hw_ctx_id_mask: hw context id mask in case of multi context + */ +struct cam_vfe_generic_ubwc_plane_config { + uint32_t port_type; + uint32_t meta_stride; + uint32_t meta_size; + uint32_t meta_offset; + uint32_t packer_config; + uint32_t mode_config_0; + uint32_t mode_config_1; + uint32_t tile_config; + uint32_t h_init; + uint32_t v_init; + uint32_t static_ctrl; + uint32_t ctrl_2; + uint32_t stats_ctrl_2; + uint32_t lossy_threshold_0; + uint32_t lossy_threshold_1; + uint32_t lossy_var_offset; + uint32_t bandwidth_limit; + uint32_t hw_ctx_id_mask; +}; + +/** + * struct cam_ubwc_generic_config - UBWC Configuration Payload + * + * @api_version: UBWC config api version + * @ubwc_plane_config: Array of UBWC configurations per plane + */ +struct cam_vfe_generic_ubwc_config { + uint32_t api_version; + struct cam_vfe_generic_ubwc_plane_config + ubwc_plane_cfg[CAM_PACKET_MAX_PLANES - 1]; +}; + + +/* + * struct cam_vfe_generic_debug_config: + * + * @diag_config : VFE diag cfg register configuration + * @num_counters : Number of perf counters configured + * @vfe_perf_counter_val : VFE perf counter values + * @vfe_bus_wr_perf_counter_val : VFE wr perf counter values + * @disable_ife_mmu_prefetch : Disable IFE mmu prefetch + * @enable_ife_frame_irqs : Enable IFE frame timing IRQs + */ +struct cam_vfe_generic_debug_config { + uint64_t diag_config; + uint32_t num_counters; + uint32_t vfe_perf_counter_val[CAM_VFE_PERF_CNT_MAX]; + uint32_t vfe_bus_wr_perf_counter_val[CAM_VFE_PERF_CNT_MAX]; + bool disable_ife_mmu_prefetch; + bool enable_ife_frame_irqs; +}; + +/* + * struct cam_vfe_enable_sof_irq_args: + * + * @enable_sof_irq_debug: Enable IFE/TFE SOF IRQ debug + * @res : Resource node + */ +struct cam_vfe_enable_sof_irq_args { + struct cam_isp_resource_node *res; + bool enable_sof_irq_debug; +}; + +/* + * cam_vfe_get_num_ifes() + * + * @brief: Gets number of IFEs + * + * @num_ifes: Fills number of IFES in the address passed + */ +void cam_vfe_get_num_ifes(uint32_t *num_ifes); + +/* + * cam_vfe_get_num_ife_lites() + * + * @brief: Gets number of IFE-LITEs + * + * @num_ifes: Fills number of IFE-LITES in the address passed + */ +void cam_vfe_get_num_ife_lites(uint32_t *num_ife_lites); + +/* + * cam_vfe_hw_init() + * + * @Brief: Initialize VFE HW device + * + * @vfe_hw: vfe_hw interface to fill in and return on + * successful initialization + * @hw_idx: Index of VFE HW + */ +int cam_vfe_hw_init(struct cam_isp_hw_intf_data **vfe_hw, + uint32_t hw_idx); + +/* + * cam_vfe_put_evt_payload() + * + * @Brief: Put the evt payload back to free list + * + * @core_info: VFE HW core_info + * @evt_payload: Event payload data + */ +int cam_vfe_put_evt_payload(void *core_info, + struct cam_vfe_top_irq_evt_payload **evt_payload); + +#endif /* _CAM_VFE_HW_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.c new file mode 100644 index 0000000000..4d7fcb5372 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include "cam_csid_ppi_core.h" +#include "cam_csid_ppi100.h" +#include "cam_csid_ppi_dev.h" + +#define CAM_PPI_DRV_NAME "ppi_100" +#define CAM_PPI_VERSION_V100 0x10000000 + +static struct cam_csid_ppi_hw_info cam_csid_ppi100_hw_info = { + .ppi_reg = &cam_csid_ppi_100_reg_offset, +}; + +static const struct of_device_id cam_csid_ppi100_dt_match[] = { + { + .compatible = "qcom,ppi100", + .data = &cam_csid_ppi100_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_csid_ppi100_dt_match); + +struct platform_driver cam_csid_ppi100_driver = { + .probe = cam_csid_ppi_probe, + .remove = cam_csid_ppi_remove, + .driver = { + .name = CAM_PPI_DRV_NAME, + .of_match_table = cam_csid_ppi100_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_csid_ppi100_init_module(void) +{ + return platform_driver_register(&cam_csid_ppi100_driver); +} + +void cam_csid_ppi100_exit_module(void) +{ + platform_driver_unregister(&cam_csid_ppi100_driver); +} + +MODULE_DESCRIPTION("CAM CSID_PPI100 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.h new file mode 100644 index 0000000000..eacf87f4e9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi100.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CSID_PPI_100_H_ +#define _CAM_CSID_PPI_100_H_ + +#include "cam_csid_ppi_core.h" + +static struct cam_csid_ppi_reg_offset cam_csid_ppi_100_reg_offset = { + .ppi_hw_version_addr = 0, + .ppi_module_cfg_addr = 0x60, + .ppi_irq_status_addr = 0x68, + .ppi_irq_mask_addr = 0x6c, + .ppi_irq_set_addr = 0x70, + .ppi_irq_clear_addr = 0x74, + .ppi_irq_cmd_addr = 0x78, + .ppi_rst_cmd_addr = 0x7c, + .ppi_test_bus_ctrl_addr = 0x1f4, + .ppi_debug_addr = 0x1f8, + .ppi_spare_addr = 0x1fc, +}; + +/** + * @brief : API to register PPI Dev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_csid_ppi100_init_module(void); + +/** + * @brief : API to remove PPI Dev from platform framework. + */ +void cam_csid_ppi100_exit_module(void); +#endif /*_CAM_CSID_PPI_100_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.c new file mode 100644 index 0000000000..01dbb70253 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_csid_ppi_core.h" +#include "cam_csid_ppi_dev.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_io_util.h" +#include "cam_common_util.h" + +static int cam_csid_ppi_reset(struct cam_csid_ppi_hw *ppi_hw) +{ + struct cam_hw_soc_info *soc_info; + const struct cam_csid_ppi_reg_offset *ppi_reg; + int rc = 0; + uint32_t status; + + soc_info = &ppi_hw->hw_info->soc_info; + ppi_reg = ppi_hw->ppi_info->ppi_reg; + + CAM_DBG(CAM_ISP, "PPI:%d reset", ppi_hw->hw_intf->hw_idx); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_mask_addr); + cam_io_w_mb(PPI_RST_CONTROL, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_set_addr); + cam_io_w_mb(PPI_RST_CONTROL, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_rst_cmd_addr); + cam_io_w_mb(PPI_IRQ_CMD_SET, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_cmd_addr); + + rc = cam_common_read_poll_timeout(soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_status_addr, + 1000, 500000, 0x1, 0x1, &status); + + CAM_DBG(CAM_ISP, "PPI:%d reset status %d", ppi_hw->hw_intf->hw_idx, + status); + if (rc < 0) { + CAM_ERR(CAM_ISP, "PPI:%d ppi_reset fail rc = %d status = %d", + ppi_hw->hw_intf->hw_idx, rc, status); + return rc; + } + cam_io_w_mb(PPI_RST_CONTROL, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_clear_addr); + cam_io_w_mb(PPI_IRQ_CMD_CLEAR, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_cmd_addr); + + return 0; +} + +static int cam_csid_ppi_enable_hw(struct cam_csid_ppi_hw *ppi_hw) +{ + int rc = 0; + int32_t i; + uint64_t val; + const struct cam_csid_ppi_reg_offset *ppi_reg; + struct cam_hw_soc_info *soc_info; + uint32_t err_irq_mask; + + ppi_reg = ppi_hw->ppi_info->ppi_reg; + soc_info = &ppi_hw->hw_info->soc_info; + + CAM_DBG(CAM_ISP, "PPI:%d init PPI HW", ppi_hw->hw_intf->hw_idx); + + ppi_hw->hw_info->open_count++; + if (ppi_hw->hw_info->open_count > 1) { + CAM_DBG(CAM_ISP, "PPI:%d dual vfe already enabled", + ppi_hw->hw_intf->hw_idx); + return 0; + } + + for (i = 0; i < soc_info->num_clk; i++) { + rc = cam_soc_util_clk_enable(soc_info, CAM_CLK_SW_CLIENT_IDX, false, i, -1); + if (rc) + goto clk_disable; + } + + rc = cam_csid_ppi_reset(ppi_hw); + if (rc) + goto clk_disable; + + err_irq_mask = PPI_IRQ_FIFO0_OVERFLOW | PPI_IRQ_FIFO1_OVERFLOW | + PPI_IRQ_FIFO2_OVERFLOW; + cam_io_w_mb(err_irq_mask, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_mask_addr); + rc = cam_soc_util_irq_enable(soc_info); + if (rc) + goto clk_disable; + + cam_io_w_mb(PPI_RST_CONTROL, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_clear_addr); + cam_io_w_mb(PPI_IRQ_CMD_CLEAR, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_cmd_addr); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + ppi_reg->ppi_hw_version_addr); + CAM_DBG(CAM_ISP, "PPI:%d PPI HW version: 0x%x", + ppi_hw->hw_intf->hw_idx, val); + ppi_hw->device_enabled = 1; + + return 0; +clk_disable: + for (--i; i >= 0; i--) + cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, false, i); + ppi_hw->hw_info->open_count--; + return rc; +} + +static int cam_csid_ppi_disable_hw(struct cam_csid_ppi_hw *ppi_hw) +{ + int rc = 0; + int i; + struct cam_hw_soc_info *soc_info; + const struct cam_csid_ppi_reg_offset *ppi_reg; + uint64_t ppi_cfg_val = 0; + + CAM_DBG(CAM_ISP, "PPI:%d De-init PPI HW", + ppi_hw->hw_intf->hw_idx); + + if (!ppi_hw->hw_info->open_count) { + CAM_WARN(CAM_ISP, "ppi[%d] unbalanced disable hw", + ppi_hw->hw_intf->hw_idx); + return -EINVAL; + } + ppi_hw->hw_info->open_count--; + + if (ppi_hw->hw_info->open_count) + return rc; + + soc_info = &ppi_hw->hw_info->soc_info; + ppi_reg = ppi_hw->ppi_info->ppi_reg; + + CAM_DBG(CAM_ISP, "Calling PPI Reset"); + cam_csid_ppi_reset(ppi_hw); + CAM_DBG(CAM_ISP, "PPI Reset Done"); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_mask_addr); + cam_soc_util_irq_disable(soc_info); + + for (i = 0; i < CAM_CSID_PPI_LANES_MAX; i++) + ppi_cfg_val &= ~PPI_CFG_CPHY_DLX_EN(i); + + cam_io_w_mb(ppi_cfg_val, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_module_cfg_addr); + + ppi_hw->device_enabled = 0; + + for (i = 0; i < soc_info->num_clk; i++) + cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, false, i); + + return rc; +} + +static int cam_csid_ppi_init_hw(void *hw_priv, void *init_args, + uint32_t arg_size) +{ + int i, rc = 0; + uint32_t num_lanes; + uint32_t cphy; + uint32_t ppi_cfg_val = 0; + struct cam_csid_ppi_hw *ppi_hw; + struct cam_hw_info *ppi_hw_info; + const struct cam_csid_ppi_reg_offset *ppi_reg; + struct cam_hw_soc_info *soc_info; + struct cam_csid_ppi_cfg ppi_cfg; + + if (!hw_priv || !init_args || + (arg_size != sizeof(struct cam_csid_ppi_cfg))) { + CAM_ERR(CAM_ISP, "PPI: Invalid args"); + rc = -EINVAL; + goto end; + } + + ppi_hw_info = (struct cam_hw_info *)hw_priv; + ppi_hw = (struct cam_csid_ppi_hw *)ppi_hw_info->core_info; + ppi_reg = ppi_hw->ppi_info->ppi_reg; + ppi_cfg = *((struct cam_csid_ppi_cfg *)init_args); + + rc = cam_csid_ppi_enable_hw(ppi_hw); + if (rc) + goto end; + + num_lanes = ppi_cfg.lane_num; + cphy = ppi_cfg.lane_type; + CAM_DBG(CAM_ISP, "lane_cfg 0x%x | num_lanes 0x%x | lane_type 0x%x", + ppi_cfg.lane_cfg, num_lanes, cphy); + + if (cphy) { + ppi_cfg_val |= PPI_CFG_CPHY_DLX_SEL(0); + ppi_cfg_val |= PPI_CFG_CPHY_DLX_SEL(1); + } else { + ppi_cfg_val = 0; + } + + for (i = 0; i < CAM_CSID_PPI_LANES_MAX; i++) + ppi_cfg_val |= PPI_CFG_CPHY_DLX_EN(i); + + CAM_DBG(CAM_ISP, "ppi_cfg_val 0x%x", ppi_cfg_val); + soc_info = &ppi_hw->hw_info->soc_info; + cam_io_w_mb(ppi_cfg_val, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_module_cfg_addr); + + CAM_DBG(CAM_ISP, "ppi cfg 0x%x", + cam_io_r_mb(soc_info->reg_map[0].mem_base + + ppi_reg->ppi_module_cfg_addr)); +end: + return rc; +} + +static int cam_csid_ppi_deinit_hw(void *hw_priv, void *deinit_args, + uint32_t arg_size) +{ + int rc = 0; + struct cam_csid_ppi_hw *ppi_hw; + struct cam_hw_info *ppi_hw_info; + + CAM_DBG(CAM_ISP, "Enter"); + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "PPI:Invalid arguments"); + rc = -EINVAL; + goto end; + } + + ppi_hw_info = (struct cam_hw_info *)hw_priv; + ppi_hw = (struct cam_csid_ppi_hw *)ppi_hw_info->core_info; + + CAM_DBG(CAM_ISP, "Disabling PPI Hw"); + rc = cam_csid_ppi_disable_hw(ppi_hw); + if (rc < 0) + CAM_DBG(CAM_ISP, "Exit with %d", rc); +end: + return rc; +} + +int cam_csid_ppi_hw_probe_init(struct cam_hw_intf *ppi_hw_intf, + uint32_t ppi_idx) +{ + int rc = -EINVAL; + struct cam_hw_info *ppi_hw_info; + struct cam_csid_ppi_hw *csid_ppi_hw = NULL; + + if (ppi_idx >= CAM_CSID_PPI_HW_MAX) { + CAM_ERR(CAM_ISP, "Invalid ppi index:%d", ppi_idx); + goto err; + } + + ppi_hw_info = (struct cam_hw_info *) ppi_hw_intf->hw_priv; + csid_ppi_hw = (struct cam_csid_ppi_hw *) ppi_hw_info->core_info; + + csid_ppi_hw->hw_intf = ppi_hw_intf; + csid_ppi_hw->hw_info = ppi_hw_info; + + CAM_DBG(CAM_ISP, "type %d index %d", + csid_ppi_hw->hw_intf->hw_type, ppi_idx); + + rc = cam_csid_ppi_init_soc_resources(&csid_ppi_hw->hw_info->soc_info, + cam_csid_ppi_irq, csid_ppi_hw); + if (rc < 0) { + CAM_ERR(CAM_ISP, "PPI:%d Failed to init_soc", ppi_idx); + goto err; + } + + csid_ppi_hw->hw_intf->hw_ops.init = cam_csid_ppi_init_hw; + csid_ppi_hw->hw_intf->hw_ops.deinit = cam_csid_ppi_deinit_hw; + return 0; +err: + return rc; +} + +int cam_csid_ppi_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ppi_irq_handler, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_ISP, "PPI: Failed to get dt properties"); + goto end; + } + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, ppi_irq_handler, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_ISP, + "PPI: Error Request platform resources failed rc=%d", + rc); + goto err; + } +end: + return rc; +err: + cam_soc_util_release_platform_resource(soc_info); + return rc; +} + +irqreturn_t cam_csid_ppi_irq(int irq_num, void *data) +{ + uint32_t irq_status = 0; + uint32_t i, ppi_cfg_val = 0; + bool fatal_err_detected = false; + + struct cam_csid_ppi_hw *ppi_hw; + struct cam_hw_soc_info *soc_info; + const struct cam_csid_ppi_reg_offset *ppi_reg; + + if (!data) { + CAM_ERR(CAM_ISP, "PPI: Invalid arguments"); + return IRQ_HANDLED; + } + + ppi_hw = (struct cam_csid_ppi_hw *)data; + ppi_reg = ppi_hw->ppi_info->ppi_reg; + soc_info = &ppi_hw->hw_info->soc_info; + + if (ppi_hw->device_enabled != 1) + goto ret; + + irq_status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_status_addr); + + cam_io_w_mb(irq_status, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_clear_addr); + + cam_io_w_mb(PPI_IRQ_CMD_CLEAR, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_irq_cmd_addr); + + CAM_DBG(CAM_ISP, "PPI %d irq status 0x%x", ppi_hw->hw_intf->hw_idx, + irq_status); + + if (irq_status & PPI_IRQ_RST_DONE) { + CAM_DBG(CAM_ISP, "PPI Reset Done"); + goto ret; + } + if ((irq_status & PPI_IRQ_FIFO0_OVERFLOW) || + (irq_status & PPI_IRQ_FIFO1_OVERFLOW) || + (irq_status & PPI_IRQ_FIFO2_OVERFLOW)) { + fatal_err_detected = true; + goto handle_fatal_error; + } + +handle_fatal_error: + if (fatal_err_detected) { + CAM_ERR(CAM_ISP, "PPI: %d irq_status:0x%x", + ppi_hw->hw_intf->hw_idx, irq_status); + for (i = 0; i < CAM_CSID_PPI_LANES_MAX; i++) + ppi_cfg_val &= ~PPI_CFG_CPHY_DLX_EN(i); + + cam_io_w_mb(ppi_cfg_val, soc_info->reg_map[0].mem_base + + ppi_reg->ppi_module_cfg_addr); + } +ret: + CAM_DBG(CAM_ISP, "IRQ Handling exit"); + return IRQ_HANDLED; +} + +int cam_csid_ppi_hw_deinit(struct cam_csid_ppi_hw *csid_ppi_hw) +{ + if (!csid_ppi_hw) { + CAM_ERR(CAM_ISP, "Invalid param"); + return -EINVAL; + } + return cam_soc_util_release_platform_resource( + &csid_ppi_hw->hw_info->soc_info); +} + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.h new file mode 100644 index 0000000000..c39fa0c837 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_core.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CSID_PPI_HW_H_ +#define _CAM_CSID_PPI_HW_H_ + +#include "cam_hw.h" +#include "cam_hw_intf.h" + +#define CAM_CSID_PPI_HW_MAX 6 +#define CAM_CSID_PPI_LANES_MAX 3 + +#define PPI_IRQ_RST_DONE BIT(0) +#define PPI_IRQ_FIFO0_OVERFLOW BIT(1) +#define PPI_IRQ_FIFO1_OVERFLOW BIT(2) +#define PPI_IRQ_FIFO2_OVERFLOW BIT(3) + +#define PPI_IRQ_CMD_SET BIT(1) + +#define PPI_IRQ_CMD_CLEAR BIT(0) + +#define PPI_RST_CONTROL BIT(0) +/* + * Select the PHY (CPHY set '1' or DPHY set '0') + */ +#define PPI_CFG_CPHY_DLX_SEL(X) BIT(X) + +#define PPI_CFG_CPHY_DLX_EN(X) BIT(4+X) + +struct cam_csid_ppi_reg_offset { + uint32_t ppi_hw_version_addr; + uint32_t ppi_module_cfg_addr; + + uint32_t ppi_irq_status_addr; + uint32_t ppi_irq_mask_addr; + uint32_t ppi_irq_set_addr; + uint32_t ppi_irq_clear_addr; + uint32_t ppi_irq_cmd_addr; + uint32_t ppi_rst_cmd_addr; + uint32_t ppi_test_bus_ctrl_addr; + uint32_t ppi_debug_addr; + uint32_t ppi_spare_addr; +}; + +/** + * struct cam_csid_ppi_hw_info- ppi HW info + * + * @ppi_reg: ppi register offsets + * + */ +struct cam_csid_ppi_hw_info { + const struct cam_csid_ppi_reg_offset *ppi_reg; +}; + +/** + * struct cam_csid_ppi_hw- ppi hw device resources data + * + * @hw_intf: contain the ppi hw interface information + * @hw_info: ppi hw device information + * @ppi_info: ppi hw specific information + * @device_enabled Device enabled will set once ppi powered on and + * initial configuration are done. + * + */ +struct cam_csid_ppi_hw { + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + struct cam_csid_ppi_hw_info *ppi_info; + uint32_t device_enabled; +}; + +/** + * struct cam_csid_ppi_cfg - ppi lane configuration data + * @lane_type: lane type: c-phy or d-phy + * @lane_num : active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * + */ +struct cam_csid_ppi_cfg { + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; +}; + +int cam_csid_ppi_hw_probe_init(struct cam_hw_intf *ppi_hw_intf, + uint32_t ppi_idx); +int cam_csid_ppi_hw_deinit(struct cam_csid_ppi_hw *csid_ppi_hw); +int cam_csid_ppi_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ppi_irq_handler, void *irq_data); +int cam_csid_ppi_deinit_soc_resources(struct cam_hw_soc_info *soc_info); +int cam_csid_ppi_hw_init(struct cam_hw_intf **csid_ppi_hw, + uint32_t hw_idx); +#endif /* _CAM_CSID_PPI_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.c new file mode 100644 index 0000000000..7dba4790b9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "cam_isp_hw.h" +#include "cam_hw_intf.h" +#include "cam_csid_ppi_core.h" +#include "cam_csid_ppi_dev.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_hw_intf *cam_csid_ppi_hw_list[CAM_CSID_PPI_HW_MAX] = { + NULL, NULL, NULL, NULL}; + +static int cam_ppi_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *ppi_hw_intf; + struct cam_hw_info *ppi_hw_info; + struct cam_csid_ppi_hw *ppi_dev = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_csid_ppi_hw_info *ppi_hw_data = NULL; + uint32_t ppi_dev_idx; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + CAM_DBG(CAM_ISP, "PPI probe called"); + + ppi_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!ppi_hw_intf) { + rc = -ENOMEM; + goto err; + } + + ppi_hw_info = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!ppi_hw_info) { + rc = -ENOMEM; + goto free_hw_intf; + } + + ppi_dev = CAM_MEM_ZALLOC(sizeof(struct cam_csid_ppi_hw), GFP_KERNEL); + if (!ppi_dev) { + rc = -ENOMEM; + goto free_hw_info; + } + + of_property_read_u32(pdev->dev.of_node, "cell-index", &ppi_dev_idx); + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ISP, "No matching table for the CSID PPI HW!"); + rc = -EINVAL; + goto free_dev; + } + + ppi_hw_intf->hw_idx = ppi_dev_idx; + ppi_hw_intf->hw_priv = ppi_hw_info; + + if (ppi_hw_intf->hw_idx < CAM_CSID_PPI_HW_MAX) + cam_csid_ppi_hw_list[ppi_hw_intf->hw_idx] = ppi_hw_intf; + else { + rc = -EINVAL; + goto free_dev; + } + + ppi_hw_info->core_info = ppi_dev; + ppi_hw_info->soc_info.pdev = pdev; + ppi_hw_info->soc_info.dev = &pdev->dev; + ppi_hw_info->soc_info.dev_name = pdev->name; + ppi_hw_info->soc_info.index = ppi_dev_idx; + + ppi_hw_data = (struct cam_csid_ppi_hw_info *)match_dev->data; + ppi_dev->ppi_info = ppi_hw_data; + + rc = cam_csid_ppi_hw_probe_init(ppi_hw_intf, ppi_dev_idx); + if (rc) { + CAM_ERR(CAM_ISP, "PPI: Probe init failed!"); + goto free_dev; + } + + platform_set_drvdata(pdev, ppi_dev); + CAM_DBG(CAM_ISP, "PPI:%d probe successful", + ppi_hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; +free_dev: + CAM_MEM_FREE(ppi_dev); +free_hw_info: + CAM_MEM_FREE(ppi_hw_info); +free_hw_intf: + CAM_MEM_FREE(ppi_hw_intf); +err: + return rc; +} + +static void cam_ppi_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_csid_ppi_hw *ppi_dev = NULL; + struct cam_hw_intf *ppi_hw_intf; + struct cam_hw_info *ppi_hw_info; + struct platform_device *pdev = to_platform_device(dev); + + ppi_dev = (struct cam_csid_ppi_hw *)platform_get_drvdata(pdev); + + if (!ppi_dev) { + CAM_ERR(CAM_ISP, "Error No data in ppi_dev"); + return; + } + + ppi_hw_intf = ppi_dev->hw_intf; + ppi_hw_info = ppi_dev->hw_info; + + CAM_DBG(CAM_ISP, "PPI:%d remove", ppi_dev->hw_intf->hw_idx); + + cam_csid_ppi_hw_deinit(ppi_dev); + + CAM_MEM_FREE(ppi_dev); + CAM_MEM_FREE(ppi_hw_info); + CAM_MEM_FREE(ppi_hw_intf); +} + +int cam_csid_ppi_hw_init(struct cam_hw_intf **csid_ppi_hw, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_csid_ppi_hw_list[hw_idx]) { + *csid_ppi_hw = cam_csid_ppi_hw_list[hw_idx]; + } else { + *csid_ppi_hw = NULL; + rc = -1; + } + + return rc; +} +EXPORT_SYMBOL(cam_csid_ppi_hw_init); + +const static struct component_ops cam_ppi_component_ops = { + .bind = cam_ppi_component_bind, + .unbind = cam_ppi_component_unbind, +}; + +int cam_csid_ppi_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Adding PPI component"); + rc = component_add(&pdev->dev, &cam_ppi_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_csid_ppi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ppi_component_ops); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.h new file mode 100644 index 0000000000..d8995f811b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/ppi_hw/cam_csid_ppi_dev.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CSID_PPI_DEV_H_ +#define _CAM_CSID_PPI_DEV_H_ + +#include "cam_isp_hw.h" + +irqreturn_t cam_csid_ppi_irq(int irq_num, void *data); +int cam_csid_ppi_probe(struct platform_device *pdev); +int cam_csid_ppi_remove(struct platform_device *pdev); + +#endif /*_CAM_CSID_PPI_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe680.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe680.h new file mode 100644 index 0000000000..6d33836331 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe680.h @@ -0,0 +1,1345 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE680_H_ +#define _CAM_SFE680_H_ +#include "cam_sfe_core.h" +#include "cam_sfe_bus.h" +#include "cam_sfe_bus_rd.h" +#include "cam_sfe_bus_wr.h" + + +static struct cam_sfe_top_module_desc sfe_680_mod_desc[] = { + { + .id = 0, + .desc = "CRC_ZSL", + }, + { + .id = 1, + .desc = "COMP", + }, + { + .id = 2, + .desc = "CRC_PREV", + }, + { + .id = 3, + .desc = "HDRC", + }, + { + .id = 4, + .desc = "DECOMP", + }, + { + .id = 5, + .desc = "BPC_PDPC", + }, + { + .id = 6, + .desc = "BHIST_CH0", + }, + { + .id = 7, + .desc = "BG_CH0", + }, + { + .id = 8, + .desc = "LSC_CH0", + }, + { + .id = 9, + .desc = "CRC_CH0", + }, + { + .id = 10, + .desc = "CCIF_2x2_2x1", + }, + { + .id = 11, + .desc = "GAIN_CH0", + }, + { + .id = 12, + .desc = "BHIST_CH1", + }, + { + .id = 13, + .desc = "BG_CH1", + }, + { + .id = 14, + .desc = "LSC_CH1", + }, + { + .id = 15, + .desc = "CRC_CH1", + }, + { + .id = 16, + .desc = "GAIN_CH1", + }, + { + .id = 17, + .desc = "BHIST_CH2", + }, + { + .id = 18, + .desc = "BG_CH2", + }, + { + .id = 19, + .desc = "LSC_CH2", + }, + { + .id = 20, + .desc = "CRC_CH2", + }, + { + .id = 21, + .desc = "GAIN_CH2", + }, + { + .id = 22, + .desc = "LCR", + }, + { + .id = 23, + .desc = "QCFA_DEMUX", + }, +}; + +static struct cam_sfe_wr_client_desc sfe_680_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "REMOSAIC", + }, + { + .wm_id = 1, + .desc = "LCR", + }, + { + .wm_id = 2, + .desc = "STATS_BE0", + }, + { + .wm_id = 3, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 4, + .desc = "STATS_BE1", + }, + { + .wm_id = 5, + .desc = "STATS_BHIST1", + }, + { + .wm_id = 6, + .desc = "STATS_BE2", + }, + { + .wm_id = 7, + .desc = "STATS_BHIST2", + }, + { + .wm_id = 8, + .desc = "RDI_0", + }, + { + .wm_id = 9, + .desc = "RDI_1", + }, + { + .wm_id = 10, + .desc = "RDI_2", + }, + { + .wm_id = 11, + .desc = "RDI_3", + }, + { + .wm_id = 12, + .desc = "RDI_4", + }, +}; + +static struct cam_sfe_mode sfe_680_mode[] = { + { + .value = 0x1, + .desc = "FS Mode", + }, + { + .value = 0x3, + .desc = "Offline Mode", + }, + { + .value = 0x4, + .desc = "sHDR mode", + }, +}; + +static struct cam_sfe_top_err_irq_desc sfe_680_top_irq_err_desc[] = { + { + .bitmask = BIT(14), + .err_name = "PP_VIOLATION", + .desc = "CCIF protocol violation within any of the modules in pixel pipeline", + }, + { + .bitmask = BIT(15), + .err_name = "DIAG_VIOLATION", + .desc = "HBI is less than the minimum required HBI", + }, + { + .bitmask = BIT(16), + .err_name = "LINE_SMOOTH_VIOLATION", + .desc = "Line Smoothner IRQ fire", + }, +}; + +static struct cam_sfe_top_debug_info sfe680_clc_dbg_module_info[CAM_SFE_TOP_DBG_REG_MAX][8] = { + SFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + SFE_DBG_INFO_ARRAY_4bit( + "zsl_throttle", + "crc_zsl", + "comp_zsl", + "crc_prev", + "hdrc_ch2", + "hdrc_ch1", + "hdrc_ch0", + "stats_bhist_ch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "stats_bg_ch0", + "lsc_ch0", + "crc_ch0", + "ccif_2x2_to_2x1", + "decomp", + "msb_align_ch0", + "bpc_pdpc", + "ch0_gain" + ), + SFE_DBG_INFO_ARRAY_4bit( + "bhist_ch1", + "stats_bg_ch1", + "lsc_ch1", + "crc_ch1", + "msb_align_ch1", + "ch1_gain", + "bhist_ch2", + "stats_bg_ch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "lsc_ch2", + "crc_ch2", + "msb_align_ch2", + "ch2_gain", + "lcr_throttle", + "lcr", + "demux_fetch2", + "demux_fetch1" + ), + SFE_DBG_INFO_ARRAY_4bit( + "demux_fetch0", + "csid_ccif", + "RDI4", + "RDI3", + "RDI2", + "RDI1", + "RDI0", + "bhist2_bus_wr" + ), + SFE_DBG_INFO_ARRAY_4bit( + "bg2_bus_wr", + "bhist1_bus_wr", + "bg1_bus_wr", + "bhist0_bus_wr", + "bg0_bus_wr", + "lcr_bus_wr", + "zsl_bus_wr", + "sfe_op_throttle" + ), + SFE_DBG_INFO_ARRAY_4bit( + "line_smooth", + "pp", + "bus_conv_ch2", + "bus_conv_ch1", + "bus_conv_ch0", + "fe_ch2", + "fe_ch1", + "fe_ch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "rdi4", + "rdi3", + "rdi2", + "rdi1", + "rdi0", + "pixel", + "reserved", + "reserved" + ), +}; + +static struct cam_sfe_top_common_reg_offset sfe680_top_commong_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .stats_feature = 0x00000008, + .core_cgc_ctrl = 0x00000010, + .ahb_clk_ovd = 0x00000014, + .core_cfg = 0x00000018, + .ipp_violation_status = 0x00000030, + .diag_config = 0x00000034, + .diag_sensor_status_0 = 0x00000038, + .diag_sensor_status_1 = 0x0000003C, + .diag_sensor_frame_cnt_status0 = 0x00000040, + .diag_sensor_frame_cnt_status1 = 0x00000044, + .stats_ch2_throttle_cfg = 0x000000B0, + .stats_ch1_throttle_cfg = 0x000000B4, + .stats_ch0_throttle_cfg = 0x000000B8, + .lcr_throttle_cfg = 0x000000BC, + .hdr_throttle_cfg = 0x000000C0, + .sfe_op_throttle_cfg = 0x000000C4, + .bus_overflow_status = 0x00000868, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000080, + .perf_pix_count = 0x00000084, + .perf_line_count = 0x00000088, + .perf_stall_count = 0x0000008C, + .perf_always_count = 0x00000090, + .perf_count_status = 0x00000094, + }, + { + .perf_count_cfg = 0x00000098, + .perf_pix_count = 0x0000009C, + .perf_line_count = 0x000000A0, + .perf_stall_count = 0x000000A4, + .perf_always_count = 0x000000A8, + .perf_count_status = 0x000000AC, + }, + }, + .top_debug_cfg = 0x0000007C, + .lcr_supported = true, + .ir_supported = false, + .qcfa_only = true, + .num_sfe_mode = ARRAY_SIZE(sfe_680_mode), + .sfe_mode = sfe_680_mode, + .ipp_violation_mask = 0x4000, + .num_debug_registers = 12, + .top_cc_test_bus_supported = false, + .top_debug = { + 0x0000004C, + 0x00000050, + 0x00000054, + 0x00000058, + 0x0000005C, + 0x00000060, + 0x00000064, + 0x00000068, + 0x0000006C, + 0x00000070, + 0x00000074, + 0x00000078, + }, +}; + +static struct cam_sfe_modules_common_reg_offset sfe680_modules_common_reg = { + .demux_module_cfg = 0x00003060, + .demux_xcfa_cfg = 0x00003064, + .demux_hdr_cfg = 0x00003074, + .demux_lcr_sel = 0x00003078, + .hdrc_remo_mod_cfg = 0x00005860, + .hdrc_remo_xcfa_bin_cfg = 0x00005A78, + .xcfa_hdrc_remo_out_mux_cfg = 0x00005A74, +}; + +static struct cam_sfe_top_common_reg_data sfe_680_top_common_reg_data = { + .error_irq_mask = 0x1C000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .sensor_sel_shift = 0x1, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_pix_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .subscribe_irq_mask = 0xC, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_rdi0_reg_data = { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .subscribe_irq_mask = 0x30, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_rdi1_reg_data = { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .subscribe_irq_mask = 0xC0, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_rdi2_reg_data = { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .subscribe_irq_mask = 0x300, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_rdi3_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .subscribe_irq_mask = 0xC00, +}; + +static struct cam_sfe_path_common_reg_data sfe_680_rdi4_reg_data = { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .subscribe_irq_mask = 0x3000, +}; + +static struct cam_sfe_top_hw_info sfe680_top_hw_info = { + .common_reg = &sfe680_top_commong_reg, + .modules_hw_info = &sfe680_modules_common_reg, + .common_reg_data = &sfe_680_top_common_reg_data, + .ipp_module_desc = sfe_680_mod_desc, + .wr_client_desc = sfe_680_wr_client_desc, + .pix_reg_data = &sfe_680_pix_reg_data, + .rdi_reg_data[0] = &sfe_680_rdi0_reg_data, + .rdi_reg_data[1] = &sfe_680_rdi1_reg_data, + .rdi_reg_data[2] = &sfe_680_rdi2_reg_data, + .rdi_reg_data[3] = &sfe_680_rdi3_reg_data, + .rdi_reg_data[4] = &sfe_680_rdi4_reg_data, + .num_inputs = 6, + .input_type = { + CAM_SFE_PIX_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + }, + .num_top_errors = ARRAY_SIZE(sfe_680_top_irq_err_desc), + .top_err_desc = sfe_680_top_irq_err_desc, + .num_clc_module = 9, + .clc_dbg_mod_info = &sfe680_clc_dbg_module_info, +}; + +static struct cam_irq_register_set sfe680_bus_rd_irq_reg[1] = { + { + .mask_reg_offset = 0x00000404, + .clear_reg_offset = 0x00000408, + .status_reg_offset = 0x00000410, + .set_reg_offset = 0x00000418, + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_desc + sfe680_bus_rd_cons_error_desc[CAM_SFE_BUS_RD_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_desc = "Image Addr Unalign Latch", + }, + { + .bitmask = BIT(1), + .error_desc = "Ubwc Addr Unalign Latch", + }, + { + .bitmask = BIT(2), + .error_desc = "Stride Unalign Latch", + }, + { + .bitmask = BIT(3), + .error_desc = "Y Unit Unalign Latch", + }, + { + .bitmask = BIT(4), + .error_desc = "X Unit Unalign Latch", + }, + { + .bitmask = BIT(5), + .error_desc = "Image width Unalign Latch", + }, + { + .bitmask = BIT(6), + .error_desc = "Image height Unalign Latch", + }, + { + .bitmask = BIT(7), + .error_desc = "Meta Stride Unalign Latch", + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_info sfe680_bus_rd_constraint_error_info = { + .constraint_error_list = sfe680_bus_rd_cons_error_desc, + .cons_chk_en_val = 0xC, + .num_cons_err = 8, +}; + +static struct cam_sfe_bus_rd_hw_info sfe680_bus_rd_hw_info = { + .common_reg = { + .hw_version = 0x00000400, + .misr_reset = 0x0000041C, + .pwr_iso_cfg = 0x00000424, + .input_if_cmd = 0x00000414, + .test_bus_ctrl = 0x0000042C, + .security_cfg = 0x00000420, + .cons_violation_status = 0x00000434, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe680_bus_rd_irq_reg, + .global_irq_cmd_offset = 0x0000040C, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + }, + .num_client = 3, + .bus_client_reg = { + /* BUS Client 0 */ + { + .cfg = 0x00000450, + .image_addr = 0x00000458, + .buf_width = 0x0000045C, + .buf_height = 0x00000460, + .stride = 0x00000464, + .unpacker_cfg = 0x00000468, + .latency_buf_allocation = 0x0000047C, + .system_cache_cfg = 0x0000049C, + .debug_status_cfg = 0x00000490, + .debug_status_0 = 0x00000494, + .debug_status_1 = 0x00000498, + .name = "Fetch0", + }, + /* BUS Client 1 */ + { + .cfg = 0x000004F0, + .image_addr = 0x000004F8, + .buf_width = 0x000004FC, + .buf_height = 0x00000500, + .stride = 0x00000504, + .unpacker_cfg = 0x00000508, + .latency_buf_allocation = 0x0000051C, + .system_cache_cfg = 0x0000053C, + .debug_status_cfg = 0x00000530, + .debug_status_0 = 0x00000534, + .debug_status_1 = 0x00000538, + .name = "Fetch1", + }, + /* BUS Client 2 */ + { + .cfg = 0x00000590, + .image_addr = 0x00000598, + .buf_width = 0x0000059C, + .buf_height = 0x000005A0, + .stride = 0x000005A4, + .unpacker_cfg = 0x000005A8, + .latency_buf_allocation = 0x000005BC, + .system_cache_cfg = 0x000005DC, + .debug_status_cfg = 0x000005D0, + .debug_status_0 = 0x000005D4, + .debug_status_1 = 0x000005D8, + .name = "Fetch2", + }, + }, + .num_bus_rd_resc = 3, + .sfe_bus_rd_info = { + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI0, + .mid[0] = 0, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI1, + .mid[0] = 1, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI2, + .mid[0] = 2, + .max_width = -1, + .max_height = -1, + }, + }, + .top_irq_shift = 0x1, + /* + * Refer to CAMNOC HPG for the updated value for a given target + * 48 OTs, 2 SFEs each with 3 RDs, 48 / 6 = 8 + * We can allocate 256 * 8 = 2048 bytes. 256 bytes being + * the minimum + */ + .latency_buf_allocation = 2048, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0x1, + .fs_sync_shift = 0x5, + .constraint_error_info = &sfe680_bus_rd_constraint_error_info, +}; + +static struct cam_sfe_bus_wr_constraint_error_desc + sfe680_bus_wr_cons_error_desc[CAM_SFE_BUS_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input not supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input not supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input not supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input not supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format not supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 format not supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format not supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format not supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack MIPI 20 format not supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 14 format not supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 12 format not supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 10 format not supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack 128 BPP format not supported" + }, + { + .bitmask = BIT(13), + .error_description = "UBWC NV12 format not supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC NV12 4R format not supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC TP10 format not supported" + }, + { + .bitmask = BIT(16), + .error_description = "Frame based Mode not supported" + }, + { + .bitmask = BIT(17), + .error_description = "Index based Mode not supported" + }, + { + .bitmask = BIT(18), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(19), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(20), + .error_description = "FIFO frmheader addr unalign" + }, + { + .bitmask = BIT(21), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(22), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(23), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(25), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(28), + .error_description = "Meta Stride unalign" + }, +}; + +static struct cam_sfe_bus_wr_constraint_error_info sfe680_bus_wr_constraint_error_info = { + .constraint_error_list = sfe680_bus_wr_cons_error_desc, + .num_cons_err = 29, + .img_addr_unalign_shift = 21, + .img_width_unalign_shift = 26, +}; + +static struct cam_irq_register_set sfe680_bus_wr_irq_reg[1] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + .set_reg_offset = 0x00000850, + }, +}; + +static struct cam_sfe_bus_wr_hw_info sfe680_bus_wr_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .if_frameheader_cfg = { + 0x00000834, + 0x00000838, + 0x0000083C, + 0x00000840, + 0x00000844, + 0x00000848, + }, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008D4, + .debug_status_top = 0x000008D8, + .test_bus_ctrl = 0x000008DC, + .top_irq_mask_0 = 0x00000020, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe680_bus_wr_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + }, + .num_client = 13, + .bus_client_reg = { + /* BUS Client 0 REMOSAIC */ + { + .cfg = 0x00000A00, + .image_addr = 0x00000A04, + .frame_incr = 0x00000A08, + .image_cfg_0 = 0x00000A0C, + .image_cfg_1 = 0x00000A10, + .image_cfg_2 = 0x00000A14, + .packer_cfg = 0x00000A18, + .frame_header_addr = 0x00000A20, + .frame_header_incr = 0x00000A24, + .frame_header_cfg = 0x00000A28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000A30, + .irq_subsample_pattern = 0x00000A34, + .framedrop_period = 0x00000A38, + .framedrop_pattern = 0x00000A3C, + .system_cache_cfg = 0x00000A68, + .addr_status_0 = 0x00000A70, + .addr_status_1 = 0x00000A74, + .addr_status_2 = 0x00000A78, + .addr_status_3 = 0x00000A7C, + .debug_status_cfg = 0x00000A80, + .debug_status_0 = 0x00000A84, + .debug_status_1 = 0x00000A88, + .mmu_prefetch_cfg = 0x00000A60, + .mmu_prefetch_max_offset = 0x00000A64, + .bw_limiter_addr = 0x00000A1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_0, + }, + /* BUS Client 1 LCR */ + { + .cfg = 0x00000B00, + .image_addr = 0x00000B04, + .frame_incr = 0x00000B08, + .image_cfg_0 = 0x00000B0C, + .image_cfg_1 = 0x00000B10, + .image_cfg_2 = 0x00000B14, + .packer_cfg = 0x00000B18, + .frame_header_addr = 0x00000B20, + .frame_header_incr = 0x00000B24, + .frame_header_cfg = 0x00000B28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000B30, + .irq_subsample_pattern = 0x00000B34, + .framedrop_period = 0x00000B38, + .framedrop_pattern = 0x00000B3C, + .system_cache_cfg = 0x00000B68, + .addr_status_0 = 0x00000B70, + .addr_status_1 = 0x00000B74, + .addr_status_2 = 0x00000B78, + .addr_status_3 = 0x00000B7C, + .debug_status_cfg = 0x00000B80, + .debug_status_0 = 0x00000B84, + .debug_status_1 = 0x00000B88, + .mmu_prefetch_cfg = 0x00000B60, + .mmu_prefetch_max_offset = 0x00000B64, + .bw_limiter_addr = 0x00000B1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_1, + }, + /* BUS Client 2 STATS_BE_0 */ + { + .cfg = 0x00000C00, + .image_addr = 0x00000C04, + .frame_incr = 0x00000C08, + .image_cfg_0 = 0x00000C0C, + .image_cfg_1 = 0x00000C10, + .image_cfg_2 = 0x00000C14, + .packer_cfg = 0x00000C18, + .frame_header_addr = 0x00000C20, + .frame_header_incr = 0x00000C24, + .frame_header_cfg = 0x00000C28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000C30, + .irq_subsample_pattern = 0x00000C34, + .framedrop_period = 0x00000C38, + .framedrop_pattern = 0x00000C3C, + .system_cache_cfg = 0x00000C68, + .addr_status_0 = 0x00000C70, + .addr_status_1 = 0x00000C74, + .addr_status_2 = 0x00000C78, + .addr_status_3 = 0x00000C7C, + .debug_status_cfg = 0x00000C80, + .debug_status_0 = 0x00000C84, + .debug_status_1 = 0x00000C88, + .mmu_prefetch_cfg = 0x00000C60, + .mmu_prefetch_max_offset = 0x00000C64, + .bw_limiter_addr = 0x00000C1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 3 STATS_BHIST_0 */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .framedrop_period = 0x00000D38, + .framedrop_pattern = 0x00000D3C, + .system_cache_cfg = 0x00000D68, + .addr_status_0 = 0x00000D70, + .addr_status_1 = 0x00000D74, + .addr_status_2 = 0x00000D78, + .addr_status_3 = 0x00000D7C, + .debug_status_cfg = 0x00000D80, + .debug_status_0 = 0x00000D84, + .debug_status_1 = 0x00000D88, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .bw_limiter_addr = 0x00000D1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 4 STATS_BE_1 */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .system_cache_cfg = 0x00000E68, + .addr_status_0 = 0x00000E70, + .addr_status_1 = 0x00000E74, + .addr_status_2 = 0x00000E78, + .addr_status_3 = 0x00000E7C, + .debug_status_cfg = 0x00000E80, + .debug_status_0 = 0x00000E84, + .debug_status_1 = 0x00000E88, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 5 STATS_BHIST_1 */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .system_cache_cfg = 0x00000F68, + .addr_status_0 = 0x00000F70, + .addr_status_1 = 0x00000F74, + .addr_status_2 = 0x00000F78, + .addr_status_3 = 0x00000F7C, + .debug_status_cfg = 0x00000F80, + .debug_status_0 = 0x00000F84, + .debug_status_1 = 0x00000F88, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 6 STATS_BE_2 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .system_cache_cfg = 0x00001068, + .addr_status_0 = 0x00001070, + .addr_status_1 = 0x00001074, + .addr_status_2 = 0x00001078, + .addr_status_3 = 0x0000107C, + .debug_status_cfg = 0x00001080, + .debug_status_0 = 0x00001084, + .debug_status_1 = 0x00001088, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 7 STATS_BHIST_2 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .system_cache_cfg = 0x00001168, + .addr_status_0 = 0x00001170, + .addr_status_1 = 0x00001174, + .addr_status_2 = 0x00001178, + .addr_status_3 = 0x0000117C, + .debug_status_cfg = 0x00001180, + .debug_status_0 = 0x00001184, + .debug_status_1 = 0x00001188, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 8 RDI0 */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .line_done_cfg = 0x0000122C, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .system_cache_cfg = 0x00001268, + .addr_status_0 = 0x00001270, + .addr_status_1 = 0x00001274, + .addr_status_2 = 0x00001278, + .addr_status_3 = 0x0000127C, + .debug_status_cfg = 0x00001280, + .debug_status_0 = 0x00001284, + .debug_status_1 = 0x00001288, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_5, + }, + /* BUS Client 9 RDI1 */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .line_done_cfg = 0x0000132C, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .system_cache_cfg = 0x00001368, + .addr_status_0 = 0x00001370, + .addr_status_1 = 0x00001374, + .addr_status_2 = 0x00001378, + .addr_status_3 = 0x0000137C, + .debug_status_cfg = 0x00001380, + .debug_status_0 = 0x00001384, + .debug_status_1 = 0x00001388, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_6, + }, + /* BUS Client 10 RDI2 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .line_done_cfg = 0x0000142C, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .system_cache_cfg = 0x00001468, + .addr_status_0 = 0x00001470, + .addr_status_1 = 0x00001474, + .addr_status_2 = 0x00001478, + .addr_status_3 = 0x0000147C, + .debug_status_cfg = 0x00001480, + .debug_status_0 = 0x00001484, + .debug_status_1 = 0x00001488, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_7, + }, + /* BUS Client 11 RDI3 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .line_done_cfg = 0x0000152C, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .system_cache_cfg = 0x00001568, + .addr_status_0 = 0x00001570, + .addr_status_1 = 0x00001574, + .addr_status_2 = 0x00001578, + .addr_status_3 = 0x0000157C, + .debug_status_cfg = 0x00001580, + .debug_status_0 = 0x00001584, + .debug_status_1 = 0x00001588, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_8, + }, + /* BUS Client 12 RDI4 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .line_done_cfg = 0x0000162C, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .system_cache_cfg = 0x00001668, + .addr_status_0 = 0x00001670, + .addr_status_1 = 0x00001674, + .addr_status_2 = 0x00001678, + .addr_status_3 = 0x0000167C, + .debug_status_cfg = 0x00001680, + .debug_status_0 = 0x00001684, + .debug_status_1 = 0x00001688, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_9, + }, + }, + .num_out = 13, + .sfe_out_hw_info = { + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_1, + .mid[0] = 25, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 8, + .en_line_done = 1, + .name = "RDI_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_2, + .mid[0] = 26, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 9, + .en_line_done = 1, + .name = "RDI_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_3, + .mid[0] = 27, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 10, + .en_line_done = 1, + .name = "RDI_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_4, + .mid[0] = 28, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 11, + .name = "RDI_3", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI4, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_4, + .mid[0] = 29, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 12, + .name = "RDI_4", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RAW_DUMP, + .max_width = 9312, + .max_height = 6992, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 16, + .mid[1] = 17, + .num_mid = 2, + .num_wm = 1, + .wm_idx = 0, + .name = "REMOSIAC", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_LCR, + .max_width = 9312, + .max_height = 2048, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 18, + .num_wm = 1, + .wm_idx = 1, + .name = "LCR", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 19, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 2, + .name = "STATS_BE_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 20, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 3, + .name = "STATS_BHIST_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 21, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 4, + .name = "STATS_BE_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 22, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 5, + .name = "STATS_BHIST_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 23, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 6, + .name = "STATS_BE_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 24, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 7, + .name = "STATS_BHIST_2", + }, + }, + .constraint_error_info = &sfe680_bus_wr_constraint_error_info, + .comp_done_mask = { + BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23), + BIT(24), BIT(25), BIT(26), + }, + .num_comp_grp = 10, + .line_done_cfg = 0x11, + .top_irq_shift = 0x0, + .max_out_res = CAM_ISP_SFE_OUT_RES_BASE + 13, + .pack_align_shift = 0x5, + .max_bw_counter_limit = 0xFF, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0xD0000000, +}; + +static struct cam_irq_register_set sfe680_top_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000020, + .clear_reg_offset = 0x00000024, + .status_reg_offset = 0x00000028, + .set_reg_offset = 0x0000002C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, +}; + +static struct cam_irq_controller_reg_info sfe680_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe680_top_irq_reg_set, + .global_irq_cmd_offset = 0x0000001C, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +struct cam_sfe_hw_info cam_sfe680_hw_info = { + .irq_reg_info = &sfe680_top_irq_reg_info, + + .bus_wr_version = CAM_SFE_BUS_WR_VER_1_0, + .bus_wr_hw_info = &sfe680_bus_wr_hw_info, + + .bus_rd_version = CAM_SFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &sfe680_bus_rd_hw_info, + + .top_version = CAM_SFE_TOP_VER_1_0, + .top_hw_info = &sfe680_top_hw_info, +}; + +#endif /* _CAM_SFE680_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe780.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe780.h new file mode 100644 index 0000000000..1a8a9e8d85 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe780.h @@ -0,0 +1,1769 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE780_H_ +#define _CAM_SFE780_H_ +#include "cam_sfe_core.h" +#include "cam_sfe_bus.h" +#include "cam_sfe_bus_rd.h" +#include "cam_sfe_bus_wr.h" + +static struct cam_sfe_top_module_desc sfe_780_mod_desc[] = { + { + .id = 0, + .desc = "CRC_IRC", + }, + { + .id = 1, + .desc = "CRC_ZSL", + }, + { + .id = 2, + .desc = "COMP", + }, + { + .id = 3, + .desc = "LINE_SMOOTH", + }, + { + .id = 4, + .desc = "CRC_PREV", + }, + { + .id = 5, + .desc = "HDRC", + }, + { + .id = 6, + .desc = "DECOMP", + }, + { + .id = 7, + .desc = "BPC_PDPC", + }, + { + .id = 8, + .desc = "PDPC_BPC_ID_CH0", + }, + { + .id = 9, + .desc = "RS_CH0", + }, + { + .id = 10, + .desc = "BHIST_CH0", + }, + { + .id = 11, + .desc = "BG_CH0", + }, + { + .id = 12, + .desc = "LSC_CH0", + }, + { + .id = 13, + .desc = "CRC_CH0", + }, + { + .id = 14, + .desc = "CCIF_2x2_2x1", + }, + { + .id = 15, + .desc = "GAIN_CH0", + }, + { + .id = 16, + .desc = "PDPC_BPC_ID_CH1", + }, + { + .id = 17, + .desc = "RS_CH1", + }, + { + .id = 18, + .desc = "BHIST_CH1", + }, + { + .id = 19, + .desc = "BG_CH1", + }, + { + .id = 20, + .desc = "LSC_CH1", + }, + { + .id = 21, + .desc = "CRC_CH1", + }, + { + .id = 22, + .desc = "GAIN_CH1", + }, + { + .id = 23, + .desc = "PDPC_BPC_ID_CH2", + }, + { + .id = 24, + .desc = "RS_CH2", + }, + { + .id = 25, + .desc = "BHIST_CH2", + }, + { + .id = 26, + .desc = "BG_CH2", + }, + { + .id = 27, + .desc = "LSC_CH2", + }, + { + .id = 28, + .desc = "CRC_CH2", + }, + { + .id = 29, + .desc = "GAIN_CH2", + }, + { + .id = 30, + .desc = "XCFA_DEMUX", + }, +}; + +static struct cam_sfe_wr_client_desc sfe_780_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "REMOSAIC", + }, + { + .wm_id = 1, + .desc = "IR_OUT", + }, + { + .wm_id = 2, + .desc = "STATS_BE0", + }, + { + .wm_id = 3, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 4, + .desc = "STATS_BE1", + }, + { .wm_id = 5, + .desc = "STATS_BHIST1", + }, + { + .wm_id = 6, + .desc = "STATS_BE2", + }, + { + .wm_id = 7, + .desc = "STATS_BHIST2", + }, + { + .wm_id = 8, + .desc = "STATS_RS0", + }, + { + .wm_id = 9, + .desc = "STATS_RS1", + }, + { + .wm_id = 10, + .desc = "STATS_RS2", + }, + { + .wm_id = 11, + .desc = "RDI_0", + }, + { + .wm_id = 12, + .desc = "RDI_1", + }, + { + .wm_id = 13, + .desc = "RDI_2", + }, + { + .wm_id = 14, + .desc = "RDI_3", + }, + { + .wm_id = 15, + .desc = "RDI_4", + }, +}; + +static struct cam_sfe_top_cc_testbus_info + sfe780_testbus1_info[] = { + { + .mask = BIT(0), + .shift = 0, + .clc_name = "sw_xcfa_mode_sel", + }, + { + .mask = BIT(0), + .shift = 1, + .clc_name = "bus_rd_line_done_rdi2", + }, + { + .mask = BIT(0), + .shift = 2, + .clc_name = "bus_rd_line_done_rdi1", + }, + { + .mask = BIT(0), + .shift = 3, + .clc_name = "bus_rd_line_done_rdi0", + }, + { + .mask = BIT(0), + .shift = 4, + .clc_name = "down_count_flag", + }, + { + .mask = BIT(0), + .shift = 5, + .clc_name = "rdi2_upcount_flag", + }, + { + .mask = BIT(0), + .shift = 6, + .clc_name = "rdi1_upcount_flag", + }, + { + .mask = BIT(0), + .shift = 7, + .clc_name = "rdi0_upcount_flag", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 8, + .clc_name = "rdi2_meta_pkts", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 10, + .clc_name = "rdi1_meta_pkts", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 12, + .clc_name = "rdi0_meta_pkts", + }, + { + .mask = BIT(1) | BIT(2) | BIT(3), + .shift = 14, + .clc_name = "i_rdi2_sample", + }, + { + .mask = BIT(0), + .shift = 18, + .clc_name = "i_rdi2_vld", + }, + { + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), + .shift = 19, + .clc_name = "i_rdi1_sample", + }, + { + .mask = BIT(0), + .shift = 23, + .clc_name = "i_rdi1_vld", + }, + { + .mask = BIT(1) | BIT(2) | BIT(3), + .shift = 24, + .clc_name = "i_rdi0_sample", + }, + { + .mask = BIT(0), + .shift = 28, + .clc_name = "i_rdi0_vl", + }, +}; + +static struct cam_sfe_top_cc_testbus_info + sfe780_testbus2_info[] = { + { + .mask = BIT(0), + .shift = 0, + .clc_name = "meta_consumed_ipp", + }, + { + .mask = BIT(0), + .shift = 1, + .clc_name = "meta_consumed_bus_rd", + }, + { + .mask = BIT(0), + .shift = 2, + .clc_name = "o_rdi0_overflow_rdy", + }, + { + .mask = BIT(0), + .shift = 3, + .clc_name = "sw_single_dual_en", + }, + { + .mask = BIT(0), + .shift = 4, + .clc_name = "rd_rup_cond", + }, + { + .mask = BIT(0), + .shift = 5, + .clc_name = "bus_rd_rdy", + }, + { + .mask = BIT(0), + .shift = 6, + .clc_name = "next_state", + }, + { + .mask = BIT(0), + .shift = 7, + .clc_name = "curr_state", + }, + { + .mask = BIT(0), + .shift = 8, + .clc_name = "xcfa_mode_cpy", + }, + { + .mask = BIT(0), + .shift = 9, + .clc_name = "rd_kick_off_cond", + }, +}; + +static struct cam_sfe_mode sfe_780_mode[] = { + { + .value = 0x0, + .desc = "QCFA HDR/non-HDR mode", + }, + { + .value = 0x1, + .desc = "sHDR 1exp mode", + }, + { + .value = 0x2, + .desc = "sHDR 2exp mode", + }, + { + .value = 0x3, + .desc = "sHDR 3exp mode", + }, + { + .value = 0x4, + .desc = "Bayer offline mode", + }, + { + .value = 0x5, + .desc = "Bayer FS mode", + }, +}; + +static struct cam_sfe_top_err_irq_desc sfe_780_top_irq_err_desc[] = { + { + .bitmask = BIT(14), + .err_name = "PP_VIOLATION", + .desc = "CCIF protocol violation within any of the modules in pixel pipeline", + }, + { + .bitmask = BIT(15), + .err_name = "DIAG_VIOLATION", + .desc = "HBI is less than the minimum required HBI", + }, + { + .bitmask = BIT(17), + .err_name = "CONTEXT_CONTROLLER_VIOLATION", + .desc = "HW detects that there is third context entering SFE core", + }, + { + .bitmask = BIT(18), + .err_name = "CONTEXT_CONTROLLER_SWITCH_VIOLATION", + .desc = "The old context is not completed processing inside SFE.", + }, +}; + +static struct cam_sfe_top_debug_info + sfe780_clc_dbg_module_info[CAM_SFE_TOP_DBG_REG_MAX][8] = { + SFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + SFE_DBG_INFO_ARRAY_4bit( + "irc_throttle", + "crc_irc", + "zsl_throttle", + "crc_zsl", + "comp", + "ls", + "crc_prev", + "hdrc_ch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "hdrc_ch1", + "hdrc_ch0", + "bayer_rs_0", + "stats_bhist_ch0", + "stats_bg_ch0", + "lsc_ch0", + "crc_ch0", + "bpc_pdpc_id" + ), + SFE_DBG_INFO_ARRAY_4bit( + "2x2_2x1_ch0", + "decomp_ch0", + "msb_align_ch0", + "bpc_pdpc_ch0", + "ch_gain_ch0", + "bayer_rs_ch1", + "stats_bhist_ch1", + "stats_bg_ch1" + ), + SFE_DBG_INFO_ARRAY_4bit( + "lsc_ch1", + "crc_ch1", + "msb_align_ch1", + "bpc_pdpc_1d_ch1", + "ch_gain_ch1", + "bayer_rs_ch2", + "stats_bhist_ch2", + "stats_bg_ch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "lsc_ch2", + "crc_ch2", + "msb_align_ch2", + "bpc_pdpc_1d_ch2", + "ch_gain_ch2", + "demux_ch2", + "demux_ch1", + "demux_ch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "sfe_demux_pp", + "sfe_rdi4", + "sfe_rdi3", + "sfe_rdi2", + "sfe_rdi1", + "sfe_rdi0", + "bayer_rs_ch2", + "stats_bhist_ch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "sfe_stats_bg_ch2", + "sfe_bayer_rs_ch1", + "sfe_stats_bhist_ch1", + "sfe_stats_bg_ch1", + "sfe_bayer_rs_ch0", + "sfe_stats_bhist_ch0", + "sfe_stats_bg_ch0", + "sfe_irc" + ), + SFE_DBG_INFO_ARRAY_4bit( + "sfe_zsl", + "sfe_throttle", + "sfe_pp_zsl", + "sfe_conv_ch12", + "sfe_conv_ch1", + "sfe_conv_ch0", + "sfe_fe_ch2", + "sfe_fe_ch1" + ), + SFE_DBG_INFO_ARRAY_4bit( + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0", + "sfe_fe_ch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "rdi4", + "rdi3", + "rdi2", + "rdi1", + "rdi0", + "pixel", + "reserved", + "reserved" + ), +}; + +static struct cam_sfe_top_common_reg_offset sfe780_top_commong_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .stats_feature = 0x00000008, + .core_cgc_ctrl = 0x00000010, + .ahb_clk_ovd = 0x00000014, + .core_cfg = 0x000000CC, + .ipp_violation_status = 0x00000030, + .diag_config = 0x00000034, + .diag_sensor_status_0 = 0x00000038, + .diag_sensor_status_1 = 0x0000003C, + .diag_sensor_frame_cnt_status0 = 0x00000040, + .diag_sensor_frame_cnt_status1 = 0x00000044, + .stats_ch2_throttle_cfg = 0x000000B0, + .stats_ch1_throttle_cfg = 0x000000B4, + .stats_ch0_throttle_cfg = 0x000000B8, + .hdr_throttle_cfg = 0x000000C0, + .sfe_op_throttle_cfg = 0x000000C4, + .irc_throttle_cfg = 0x000000C8, + .sfe_single_dual_cfg = 0x000000D0, + .bus_overflow_status = 0x00000868, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000080, + .perf_pix_count = 0x00000084, + .perf_line_count = 0x00000088, + .perf_stall_count = 0x0000008C, + .perf_always_count = 0x00000090, + .perf_count_status = 0x00000094, + }, + { + .perf_count_cfg = 0x00000098, + .perf_pix_count = 0x0000009C, + .perf_line_count = 0x000000A0, + .perf_stall_count = 0x000000A4, + .perf_always_count = 0x000000A8, + .perf_count_status = 0x000000AC, + }, + }, + .top_debug_cfg = 0x0000007C, + .top_cc_test_bus_ctrl = 0x000001F0, + .lcr_supported = false, + .ir_supported = true, + .qcfa_only = false, + .num_sfe_mode = ARRAY_SIZE(sfe_780_mode), + .sfe_mode = sfe_780_mode, + .ipp_violation_mask = 0x4000, + .top_debug_testbus_reg = 11, + .top_cc_test_bus_supported = true, + .num_debug_registers = 18, + .top_debug = { + 0x0000004C, + 0x00000050, + 0x00000054, + 0x00000058, + 0x0000005C, + 0x00000060, + 0x00000064, + 0x00000068, + 0x0000006C, + 0x00000070, + 0x00000074, + 0x00000078, + 0x000000D4, + 0x000000D8, + 0x000000DC, + 0x000000E0, + 0x000000E4, + 0x000000E8, + }, +}; + +static struct cam_sfe_modules_common_reg_offset sfe780_modules_common_reg = { + .demux_module_cfg = 0x00003060, + .demux_xcfa_cfg = 0x00003064, + .demux_hdr_cfg = 0x00003074, + .hdrc_remo_mod_cfg = 0x00005860, + .xcfa_hdrc_remo_out_mux_cfg = 0x00005A74, + .hdrc_remo_xcfa_bin_cfg = 0x00005A78, +}; + +static struct cam_sfe_top_common_reg_data sfe_780_top_common_reg_data = { + .error_irq_mask = 0x6C000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .sensor_sel_shift = 0x1, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_pix_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .subscribe_irq_mask = 0xC, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_rdi0_reg_data = { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .subscribe_irq_mask = 0x30, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_rdi1_reg_data = { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .subscribe_irq_mask = 0xC0, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_rdi2_reg_data = { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .subscribe_irq_mask = 0x300, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_rdi3_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .subscribe_irq_mask = 0xC00, +}; + +static struct cam_sfe_path_common_reg_data sfe_780_rdi4_reg_data = { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .subscribe_irq_mask = 0x3000, +}; + +static struct cam_sfe_top_hw_info sfe780_top_hw_info = { + .common_reg = &sfe780_top_commong_reg, + .modules_hw_info = &sfe780_modules_common_reg, + .common_reg_data = &sfe_780_top_common_reg_data, + .ipp_module_desc = sfe_780_mod_desc, + .wr_client_desc = sfe_780_wr_client_desc, + .pix_reg_data = &sfe_780_pix_reg_data, + .rdi_reg_data[0] = &sfe_780_rdi0_reg_data, + .rdi_reg_data[1] = &sfe_780_rdi1_reg_data, + .rdi_reg_data[2] = &sfe_780_rdi2_reg_data, + .rdi_reg_data[3] = &sfe_780_rdi3_reg_data, + .rdi_reg_data[4] = &sfe_780_rdi4_reg_data, + .num_inputs = 6, + .input_type = { + CAM_SFE_PIX_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + }, + .num_top_errors = ARRAY_SIZE(sfe_780_top_irq_err_desc), + .top_err_desc = sfe_780_top_irq_err_desc, + .num_clc_module = 11, + .clc_dbg_mod_info = &sfe780_clc_dbg_module_info, + .num_of_testbus = 2, + .test_bus_info = { + /* TEST BUS 1 INFO */ + { + .debugfs_val = SFE_DEBUG_ENABLE_TESTBUS1, + .enable = false, + .value = 0x1, + .size = ARRAY_SIZE(sfe780_testbus1_info), + .testbus = sfe780_testbus1_info, + }, + /* TEST BUS 2 INFO */ + { + .debugfs_val = SFE_DEBUG_ENABLE_TESTBUS2, + .enable = false, + .value = 0x3, + .size = ARRAY_SIZE(sfe780_testbus2_info), + .testbus = sfe780_testbus2_info, + }, + }, +}; + +static struct cam_irq_register_set sfe780_bus_rd_irq_reg[1] = { + { + .mask_reg_offset = 0x00000404, + .clear_reg_offset = 0x00000408, + .status_reg_offset = 0x00000410, + .set_reg_offset = 0x00000418, + }, +}; + +static struct cam_sfe_bus_rd_err_irq_desc sfe780_bus_rd_irq_err_desc[] = { + { + .bitmask = BIT(0), + .err_name = "INFO_CONS_VIOLATION", + .desc = "Clients have illegal programming, check CONS_VIOLATION_STATUS", + }, + { + .bitmask = BIT(31), + .err_name = "INFO_CCIF_VIOLATION", + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_desc + sfe780_bus_rd_cons_error_desc[CAM_SFE_BUS_RD_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_desc = "Image Addr Unalign Latch", + }, + { + .bitmask = BIT(1), + .error_desc = "Ubwc Addr Unalign Latch", + }, + { + .bitmask = BIT(2), + .error_desc = "Stride Unalign Latch", + }, + { + .bitmask = BIT(3), + .error_desc = "Y Unit Unalign Latch", + }, + { + .bitmask = BIT(4), + .error_desc = "X Unit Unalign Latch", + }, + { + .bitmask = BIT(5), + .error_desc = "Image width Unalign Latch", + }, + { + .bitmask = BIT(6), + .error_desc = "Image height Unalign Latch", + }, + { + .bitmask = BIT(7), + .error_desc = "Meta Stride Unalign Latch", + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_info sfe780_bus_rd_constraint_error_info = { + .constraint_error_list = sfe780_bus_rd_cons_error_desc, + .cons_chk_en_val = 0xC, + .num_cons_err = 8, +}; + +static struct cam_sfe_bus_rd_hw_info sfe780_bus_rd_hw_info = { + .common_reg = { + .hw_version = 0x00000400, + .misr_reset = 0x0000041C, + .pwr_iso_cfg = 0x00000424, + .input_if_cmd = 0x00000414, + .test_bus_ctrl = 0x0000042C, + .security_cfg = 0x00000420, + .cons_violation_status = 0x00000434, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe780_bus_rd_irq_reg, + .global_irq_cmd_offset = 0x0000040C, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 3, + .bus_client_reg = { + /* BUS Client 0 */ + { + .cfg = 0x00000450, + .image_addr = 0x00000458, + .buf_width = 0x0000045C, + .buf_height = 0x00000460, + .stride = 0x00000464, + .unpacker_cfg = 0x00000468, + .latency_buf_allocation = 0x0000047C, + .system_cache_cfg = 0x0000049C, + .addr_cfg = 0x000004A4, + .debug_status_cfg = 0x00000490, + .debug_status_0 = 0x00000494, + .debug_status_1 = 0x00000498, + .name = "Fetch0", + }, + /* BUS Client 1 */ + { + .cfg = 0x000004F0, + .image_addr = 0x000004F8, + .buf_width = 0x000004FC, + .buf_height = 0x00000500, + .stride = 0x00000504, + .unpacker_cfg = 0x00000508, + .latency_buf_allocation = 0x0000051C, + .system_cache_cfg = 0x0000053C, + .addr_cfg = 0x00000544, + .debug_status_cfg = 0x00000530, + .debug_status_0 = 0x00000534, + .debug_status_1 = 0x00000538, + .name = "Fetch1", + }, + /* BUS Client 2 */ + { + .cfg = 0x00000590, + .image_addr = 0x00000598, + .buf_width = 0x0000059C, + .buf_height = 0x000005A0, + .stride = 0x000005A4, + .unpacker_cfg = 0x000005A8, + .latency_buf_allocation = 0x000005BC, + .system_cache_cfg = 0x000005DC, + .addr_cfg = 0x000005E4, + .debug_status_cfg = 0x000005D0, + .debug_status_0 = 0x000005D4, + .debug_status_1 = 0x000005D8, + .name = "Fetch2", + }, + }, + .num_bus_rd_resc = 3, + .sfe_bus_rd_info = { + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI0, + .mid[0] = 0, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI1, + .mid[0] = 1, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI2, + .mid[0] = 2, + .max_width = -1, + .max_height = -1, + }, + }, + .num_bus_rd_errors = ARRAY_SIZE(sfe780_bus_rd_irq_err_desc), + .bus_rd_err_desc = sfe780_bus_rd_irq_err_desc, + .top_irq_shift = 0x1, + .latency_buf_allocation = 2048, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0x1, + .fs_sync_shift = 0x5, + .constraint_error_info = &sfe780_bus_rd_constraint_error_info, +}; + +static struct cam_sfe_bus_wr_err_irq_desc sfe780_bus_wr_irq_err_desc[] = { + { + .bitmask = BIT(26), + .err_name = "IPCC_FENCE_DATA_ERR", + .desc = "IPCC or FENCE Data was not available in the Input Fifo", + }, + { + .bitmask = BIT(27), + .err_name = "IPCC_FENCE_ADDR_ERR", + .desc = "IPCC or FENCE address fifo was empty and read was attempted", + }, + { + .bitmask = BIT(28), + .err_name = "CONS_VIOLATION", + .desc = "Programming of software registers violated the constraints", + }, + { + .bitmask = BIT(30), + .err_name = "VIOLATION", + .desc = "Client has a violation in ccif protocol at input", + }, + { + .bitmask = BIT(31), + .err_name = "IMAGE_SIZE_VIOLATION", + .desc = "Programmed image size is not same as image size from the CCIF", + }, +}; + +static struct cam_sfe_bus_wr_constraint_error_desc + sfe780_bus_wr_cons_error_desc[CAM_SFE_BUS_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input not supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input not supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input not supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input not supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format not supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 format not supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format not supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format not supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack MIPI 20 format not supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 14 format not supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 12 format not supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 10 format not supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack 128 BPP format not supported" + }, + { + .bitmask = BIT(13), + .error_description = "UBWC NV12 format not supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC NV12 4R format not supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC TP10 format not supported" + }, + { + .bitmask = BIT(16), + .error_description = "Frame based Mode not supported" + }, + { + .bitmask = BIT(17), + .error_description = "Index based Mode not supported" + }, + { + .bitmask = BIT(18), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(19), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(20), + .error_description = "FIFO frmheader addr unalign" + }, + { + .bitmask = BIT(21), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(22), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(23), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(25), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(28), + .error_description = "Meta Stride unalign" + }, +}; + +static struct cam_sfe_bus_wr_constraint_error_info sfe780_bus_wr_constraint_error_info = { + .constraint_error_list = sfe780_bus_wr_cons_error_desc, + .num_cons_err = 29, + .img_addr_unalign_shift = 21, + .img_width_unalign_shift = 26, +}; + +static struct cam_irq_register_set sfe780_bus_wr_irq_reg[1] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + .set_reg_offset = 0x00000850, + }, +}; + +static struct cam_sfe_bus_wr_hw_info sfe780_bus_wr_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .if_frameheader_cfg = { + 0x00000834, + 0x00000838, + 0x0000083C, + 0x00000840, + 0x00000844, + 0x00000848, + }, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008D4, + .debug_status_top = 0x000008D8, + .test_bus_ctrl = 0x000008DC, + .top_irq_mask_0 = 0x00000020, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe780_bus_wr_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 16, + .bus_client_reg = { + /* BUS Client 0 REMOSAIC */ + { + .cfg = 0x00000A00, + .image_addr = 0x00000A04, + .frame_incr = 0x00000A08, + .image_cfg_0 = 0x00000A0C, + .image_cfg_1 = 0x00000A10, + .image_cfg_2 = 0x00000A14, + .packer_cfg = 0x00000A18, + .frame_header_addr = 0x00000A20, + .frame_header_incr = 0x00000A24, + .frame_header_cfg = 0x00000A28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000A30, + .irq_subsample_pattern = 0x00000A34, + .framedrop_period = 0x00000A38, + .framedrop_pattern = 0x00000A3C, + .system_cache_cfg = 0x00000A68, + .addr_cfg = 0x00000A70, + .addr_status_0 = 0x00000A74, + .addr_status_1 = 0x00000A78, + .addr_status_2 = 0x00000A7C, + .addr_status_3 = 0x00000A80, + .debug_status_cfg = 0x00000A84, + .debug_status_0 = 0x00000A88, + .debug_status_1 = 0x00000A8C, + .mmu_prefetch_cfg = 0x00000A60, + .mmu_prefetch_max_offset = 0x00000A64, + .bw_limiter_addr = 0x00000A1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_0, + }, + /* BUS Client 1 IR OUT */ + { + .cfg = 0x00000B00, + .image_addr = 0x00000B04, + .frame_incr = 0x00000B08, + .image_cfg_0 = 0x00000B0C, + .image_cfg_1 = 0x00000B10, + .image_cfg_2 = 0x00000B14, + .packer_cfg = 0x00000B18, + .frame_header_addr = 0x00000B20, + .frame_header_incr = 0x00000B24, + .frame_header_cfg = 0x00000B28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000B30, + .irq_subsample_pattern = 0x00000B34, + .framedrop_period = 0x00000B38, + .framedrop_pattern = 0x00000B3C, + .system_cache_cfg = 0x00000B68, + .addr_cfg = 0x00000B70, + .addr_status_0 = 0x00000B74, + .addr_status_1 = 0x00000B78, + .addr_status_2 = 0x00000B7C, + .addr_status_3 = 0x00000B80, + .debug_status_cfg = 0x00000B84, + .debug_status_0 = 0x00000B88, + .debug_status_1 = 0x00000B8C, + .mmu_prefetch_cfg = 0x00000B60, + .mmu_prefetch_max_offset = 0x00000B64, + .bw_limiter_addr = 0x00000B1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_1, + }, + /* BUS Client 2 STATS_BE_0 */ + { + .cfg = 0x00000C00, + .image_addr = 0x00000C04, + .frame_incr = 0x00000C08, + .image_cfg_0 = 0x00000C0C, + .image_cfg_1 = 0x00000C10, + .image_cfg_2 = 0x00000C14, + .packer_cfg = 0x00000C18, + .frame_header_addr = 0x00000C20, + .frame_header_incr = 0x00000C24, + .frame_header_cfg = 0x00000C28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000C30, + .irq_subsample_pattern = 0x00000C34, + .framedrop_period = 0x00000C38, + .framedrop_pattern = 0x00000C3C, + .system_cache_cfg = 0x00000C68, + .addr_cfg = 0x00000C70, + .addr_status_0 = 0x00000C74, + .addr_status_1 = 0x00000C78, + .addr_status_2 = 0x00000C7C, + .addr_status_3 = 0x00000C80, + .debug_status_cfg = 0x00000C84, + .debug_status_0 = 0x00000C88, + .debug_status_1 = 0x00000C8C, + .mmu_prefetch_cfg = 0x00000C60, + .mmu_prefetch_max_offset = 0x00000C64, + .bw_limiter_addr = 0x00000C1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 3 STATS_BHIST_0 */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .framedrop_period = 0x00000D38, + .framedrop_pattern = 0x00000D3C, + .system_cache_cfg = 0x00000D68, + .addr_cfg = 0x00000D70, + .addr_status_0 = 0x00000D74, + .addr_status_1 = 0x00000D78, + .addr_status_2 = 0x00000D7C, + .addr_status_3 = 0x00000D80, + .debug_status_cfg = 0x00000D84, + .debug_status_0 = 0x00000D88, + .debug_status_1 = 0x00000D8C, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .bw_limiter_addr = 0x00000D1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 4 STATS_BE_1 */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000E74, + .addr_status_1 = 0x00000E78, + .addr_status_2 = 0x00000E7C, + .addr_status_3 = 0x00000E80, + .debug_status_cfg = 0x00000E84, + .debug_status_0 = 0x00000E88, + .debug_status_1 = 0x00000E8C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 5 STATS_BHIST_1 */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000F70, + .addr_status_0 = 0x00000F74, + .addr_status_1 = 0x00000F78, + .addr_status_2 = 0x00000F7C, + .addr_status_3 = 0x00000F80, + .debug_status_cfg = 0x00000F84, + .debug_status_0 = 0x00000F88, + .debug_status_1 = 0x00000F8C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 6 STATS_BE_2 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .addr_status_0 = 0x00001074, + .addr_status_1 = 0x00001078, + .addr_status_2 = 0x0000107C, + .addr_status_3 = 0x00001080, + .debug_status_cfg = 0x00001084, + .debug_status_0 = 0x00001088, + .debug_status_1 = 0x0000108C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 7 STATS_BHIST_2 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .addr_status_0 = 0x00001174, + .addr_status_1 = 0x00001178, + .addr_status_2 = 0x0000117C, + .addr_status_3 = 0x00001180, + .debug_status_cfg = 0x00001184, + .debug_status_0 = 0x00001188, + .debug_status_1 = 0x0000118C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 8 STATS_RS_0 */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .addr_status_0 = 0x00001274, + .addr_status_1 = 0x00001278, + .addr_status_2 = 0x0000127C, + .addr_status_3 = 0x00001280, + .debug_status_cfg = 0x00001284, + .debug_status_0 = 0x00001288, + .debug_status_1 = 0x0000128C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 9 STATS_RS_1 */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .addr_status_0 = 0x00001374, + .addr_status_1 = 0x00001378, + .addr_status_2 = 0x0000137C, + .addr_status_3 = 0x00001380, + .debug_status_cfg = 0x00001384, + .debug_status_0 = 0x00001388, + .debug_status_1 = 0x0000138C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 10 STATS_RS_2 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001474, + .addr_status_1 = 0x00001478, + .addr_status_2 = 0x0000147C, + .addr_status_3 = 0x00001480, + .debug_status_cfg = 0x00001484, + .debug_status_0 = 0x00001488, + .debug_status_1 = 0x0000148C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 11 RDI0 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .line_done_cfg = 0x0000152C, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001574, + .addr_status_1 = 0x00001578, + .addr_status_2 = 0x0000157C, + .addr_status_3 = 0x00001580, + .debug_status_cfg = 0x00001584, + .debug_status_0 = 0x00001588, + .debug_status_1 = 0x0000158C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_5, + }, + /* BUS Client 12 RDI1 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .line_done_cfg = 0x0000162C, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001674, + .addr_status_1 = 0x00001678, + .addr_status_2 = 0x0000167C, + .addr_status_3 = 0x00001680, + .debug_status_cfg = 0x00001684, + .debug_status_0 = 0x00001688, + .debug_status_1 = 0x0000168C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_6, + }, + /* BUS Client 13 RDI2 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .line_done_cfg = 0x0000172C, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001774, + .addr_status_1 = 0x00001778, + .addr_status_2 = 0x0000177C, + .addr_status_3 = 0x00001780, + .debug_status_cfg = 0x00001784, + .debug_status_0 = 0x00001788, + .debug_status_1 = 0x0000178C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_7, + }, + /* BUS Client 14 RDI3 */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .line_done_cfg = 0x0000182C, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001874, + .addr_status_1 = 0x00001878, + .addr_status_2 = 0x0000187C, + .addr_status_3 = 0x00001880, + .debug_status_cfg = 0x00001884, + .debug_status_0 = 0x00001888, + .debug_status_1 = 0x0000188C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_8, + }, + /* BUS Client 15 RDI4 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .line_done_cfg = 0x0000192C, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001974, + .addr_status_1 = 0x00001978, + .addr_status_2 = 0x0000197C, + .addr_status_3 = 0x00001980, + .debug_status_cfg = 0x00001984, + .debug_status_0 = 0x00001988, + .debug_status_1 = 0x0000198C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_9, + }, + }, + .num_out = 16, + .sfe_out_hw_info = { + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_1, + .mid[0] = 45, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 11, + .en_line_done = 1, + .name = "RDI_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_2, + .mid[0] = 46, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 12, + .en_line_done = 1, + .name = "RDI_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_3, + .mid[0] = 47, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 13, + .en_line_done = 1, + .name = "RDI_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_4, + .mid[0] = 48, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 14, + .name = "RDI_3", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI4, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_5, + .mid[0] = 49, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 15, + .name = "RDI_4", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RAW_DUMP, + .max_width = 9312, + .max_height = 6992, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 32, + .mid[1] = 33, + .num_mid = 2, + .num_wm = 1, + .wm_idx = 0, + .name = "REMOSIAC", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_IR, + .max_width = 9312, + .max_height = 6772, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 34, + .mid[1] = 35, + .num_wm = 1, + .wm_idx = 1, + .name = "IR_OUT", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 36, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 2, + .name = "STATS_BE_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 37, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 3, + .name = "STATS_BHIST_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 38, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 4, + .name = "STATS_BE_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 39, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 5, + .name = "STATS_BHIST_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 40, + .num_wm = 1, + .wm_idx = 6, + .name = "STATS_BE_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 41, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 7, + .name = "STATS_BHIST_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 42, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 8, + .name = "STATS_RS_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 43, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 9, + .name = "STATS_RS_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 44, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 10, + .name = "STATS_RS_2", + }, + }, + .constraint_error_info = &sfe780_bus_wr_constraint_error_info, + .num_bus_wr_errors = ARRAY_SIZE(sfe780_bus_wr_irq_err_desc), + .bus_wr_err_desc = sfe780_bus_wr_irq_err_desc, + .comp_done_mask = { + BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23), + BIT(24), BIT(25), BIT(26), + }, + .num_comp_grp = 10, + .line_done_cfg = 0x11, + .top_irq_shift = 0x0, + .max_out_res = CAM_ISP_SFE_OUT_RES_BASE + 17, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0xD0000000, +}; + +static struct cam_irq_register_set sfe780_top_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000020, + .clear_reg_offset = 0x00000024, + .status_reg_offset = 0x00000028, + .set_reg_offset = 0x0000002C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, +}; + +static struct cam_irq_controller_reg_info sfe780_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe780_top_irq_reg_set, + .global_irq_cmd_offset = 0x0000001C, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +struct cam_sfe_hw_info cam_sfe780_hw_info = { + .irq_reg_info = &sfe780_top_irq_reg_info, + + .bus_wr_version = CAM_SFE_BUS_WR_VER_1_0, + .bus_wr_hw_info = &sfe780_bus_wr_hw_info, + + .bus_rd_version = CAM_SFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &sfe780_bus_rd_hw_info, + + .top_version = CAM_SFE_TOP_VER_1_0, + .top_hw_info = &sfe780_top_hw_info, +}; + +#endif /* _CAM_SFE780_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe880.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe880.h new file mode 100644 index 0000000000..081923348f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe880.h @@ -0,0 +1,1902 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE880_H_ +#define _CAM_SFE880_H_ +#include "cam_sfe_core.h" +#include "cam_sfe_bus.h" +#include "cam_sfe_bus_rd.h" +#include "cam_sfe_bus_wr.h" + +static struct cam_sfe_top_module_desc sfe_880_mod_desc[] = { + { + .id = 0, + .desc = "CRC_IRC", + }, + { + .id = 1, + .desc = "CRC_ZSL", + }, + { + .id = 2, + .desc = "COMP", + }, + { + .id = 3, + .desc = "LINE_SMOOTH", + }, + { + .id = 4, + .desc = "CRC_PREV", + }, + { + .id = 5, + .desc = "HDRC", + }, + { + .id = 6, + .desc = "DECOMP", + }, + { + .id = 7, + .desc = "BPC_PDPC", + }, + { + .id = 8, + .desc = "PDPC_BPC_ID_CH0", + }, + { + .id = 9, + .desc = "RS_CH0", + }, + { + .id = 10, + .desc = "BHIST_CH0", + }, + { + .id = 11, + .desc = "BG_CH0", + }, + { + .id = 12, + .desc = "LSC_CH0", + }, + { + .id = 13, + .desc = "CRC_CH0", + }, + { + .id = 14, + .desc = "CCIF_2x2_2x1", + }, + { + .id = 15, + .desc = "GAIN_CH0", + }, + { + .id = 16, + .desc = "PDPC_BPC_ID_CH1", + }, + { + .id = 17, + .desc = "RS_CH1", + }, + { + .id = 18, + .desc = "BHIST_CH1", + }, + { + .id = 19, + .desc = "BG_CH1", + }, + { + .id = 20, + .desc = "LSC_CH1", + }, + { + .id = 21, + .desc = "CRC_CH1", + }, + { + .id = 22, + .desc = "GAIN_CH1", + }, + { + .id = 23, + .desc = "PDPC_BPC_ID_CH2", + }, + { + .id = 24, + .desc = "RS_CH2", + }, + { + .id = 25, + .desc = "BHIST_CH2", + }, + { + .id = 26, + .desc = "BG_CH2", + }, + { + .id = 27, + .desc = "LSC_CH2", + }, + { + .id = 28, + .desc = "CRC_CH2", + }, + { + .id = 29, + .desc = "GAIN_CH2", + }, + { + .id = 30, + .desc = "XCFA_DEMUX", + }, + { + .id = 31, + .desc = "fcg_stats_ch2", + }, + { + .id = 32, + .desc = "fcg_stats_ch1", + }, + { + .id = 33, + .desc = "fcg_ch2", + }, + { + .id = 34, + .desc = "fcg_ch1", + }, + { + .id = 35, + .desc = "crc_hdr_stats", + }, +}; + +static struct cam_sfe_wr_client_desc sfe_880_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "REMOSAIC", + }, + { + .wm_id = 1, + .desc = "IR_OUT", + }, + { + .wm_id = 2, + .desc = "STATS_BE0", + }, + { + .wm_id = 3, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 4, + .desc = "STATS_BE1", + }, + { .wm_id = 5, + .desc = "STATS_BHIST1", + }, + { + .wm_id = 6, + .desc = "STATS_BE2", + }, + { + .wm_id = 7, + .desc = "STATS_BHIST2", + }, + { + .wm_id = 8, + .desc = "STATS_RS0", + }, + { + .wm_id = 9, + .desc = "STATS_RS1", + }, + { + .wm_id = 10, + .desc = "STATS_RS2", + }, + { + .wm_id = 11, + .desc = "RDI_0", + }, + { + .wm_id = 12, + .desc = "RDI_1", + }, + { + .wm_id = 13, + .desc = "RDI_2", + }, + { + .wm_id = 14, + .desc = "RDI_3", + }, + { + .wm_id = 15, + .desc = "RDI_4", + }, + { + .wm_id = 16, + .desc = "HDR_STATS" + }, +}; + +static struct cam_sfe_top_cc_testbus_info + sfe880_testbus1_info[] = { + { + .mask = BIT(0), + .shift = 0, + .clc_name = "sw_xcfa_mode_sel", + }, + { + .mask = BIT(0), + .shift = 1, + .clc_name = "bus_rd_line_done_rdi2", + }, + { + .mask = BIT(0), + .shift = 2, + .clc_name = "bus_rd_line_done_rdi1", + }, + { + .mask = BIT(0), + .shift = 3, + .clc_name = "bus_rd_line_done_rdi0", + }, + { + .mask = BIT(0), + .shift = 4, + .clc_name = "down_count_flag", + }, + { + .mask = BIT(0), + .shift = 5, + .clc_name = "rdi2_upcount_flag", + }, + { + .mask = BIT(0), + .shift = 6, + .clc_name = "rdi1_upcount_flag", + }, + { + .mask = BIT(0), + .shift = 7, + .clc_name = "rdi0_upcount_flag", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 8, + .clc_name = "rdi2_meta_pkts", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 10, + .clc_name = "rdi1_meta_pkts", + }, + { + .mask = BIT(0) | BIT(1), + .shift = 12, + .clc_name = "rdi0_meta_pkts", + }, + { + .mask = BIT(1) | BIT(2) | BIT(3), + .shift = 14, + .clc_name = "i_rdi2_sample", + }, + { + .mask = BIT(0), + .shift = 18, + .clc_name = "i_rdi2_vld", + }, + { + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), + .shift = 19, + .clc_name = "i_rdi1_sample", + }, + { + .mask = BIT(0), + .shift = 23, + .clc_name = "i_rdi1_vld", + }, + { + .mask = BIT(1) | BIT(2) | BIT(3), + .shift = 24, + .clc_name = "i_rdi0_sample", + }, + { + .mask = BIT(0), + .shift = 28, + .clc_name = "i_rdi0_vld", + }, +}; + +static struct cam_sfe_top_cc_testbus_info + sfe880_testbus2_info[] = { + { + .mask = BIT(0), + .shift = 0, + .clc_name = "meta_consumed_ipp", + }, + { + .mask = BIT(0), + .shift = 1, + .clc_name = "meta_consumed_bus_rd", + }, + { + .mask = BIT(0), + .shift = 2, + .clc_name = "o_rdi0_overflow_rdy", + }, + { + .mask = BIT(0), + .shift = 3, + .clc_name = "sw_single_dual_en", + }, + { + .mask = BIT(0), + .shift = 4, + .clc_name = "rd_rup_cond", + }, + { + .mask = BIT(0), + .shift = 5, + .clc_name = "bus_rd_rdy", + }, + { + .mask = BIT(0), + .shift = 6, + .clc_name = "next_state", + }, + { + .mask = BIT(0), + .shift = 7, + .clc_name = "curr_state", + }, + { + .mask = BIT(0), + .shift = 8, + .clc_name = "xcfa_mode_cpy", + }, + { + .mask = BIT(0), + .shift = 9, + .clc_name = "rd_kick_off_cond", + }, +}; + +static struct cam_sfe_mode sfe_880_mode[] = { + { + .value = 0x0, + .desc = "QCFA HDR/non-HDR mode", + }, + { + .value = 0x1, + .desc = "sHDR 1exp mode", + }, + { + .value = 0x2, + .desc = "sHDR 2exp mode", + }, + { + .value = 0x3, + .desc = "sHDR 3exp mode", + }, + { + .value = 0x4, + .desc = "Bayer offline mode", + }, + { + .value = 0x5, + .desc = "Bayer FS mode", + }, +}; + +static struct cam_sfe_top_err_irq_desc sfe_880_top_irq_err_desc[] = { + { + .bitmask = BIT(14), + .err_name = "PP_VIOLATION", + .desc = "CCIF protocol violation within modules in pixel pipeline", + }, + { + .bitmask = BIT(15), + .err_name = "DIAG_VIOLATION", + .desc = "Sensor: HBI is less than the minimum required HBI", + .debug = "Check Sensor config", + }, + { + .bitmask = BIT(17), + .err_name = "CONTEXT_CONTROLLER_VIOLATION", + .desc = + "HW detects that there is third context entering SFE core, possible hang downstream or incoming input is streaming faster", + }, + { + .bitmask = BIT(18), + .err_name = "CONTEXT_CONTROLLER_SWITCH_VIOLATION", + .desc = "Old context is not completed processing during switch mode", + }, +}; + +static struct cam_sfe_top_debug_info + sfe880_clc_dbg_module_info[CAM_SFE_TOP_DBG_REG_MAX][8] = { + SFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + SFE_DBG_INFO_ARRAY_4bit( + "irc_throttle", + "crop_rnd_clamp_ir", + "zsl_throttle", + "crop_rnd_clamp_zsl", + "compdecomp_zsl", + "line_smooth", + "crop_rnd_clamp_prev", + "xcfa_hdrc_remo_ip2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "xcfa_hdrc_remo_ip1", + "xcfa_hdrc_remo_ip0", + "stats_rs_ch0", + "stats_bhist_ch0", + "stats_awb_bg_ch0", + "lsc_ch0", + "crop_rnd_clamp_ch0", + "1d_pdpc_ch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "clc_2x2_2x1", + "compdecomp_prev", + "msb_align_ch0", + "xcfa_bpc_pdpc", + "channel_gains_ch0", + "stats_rs_ch1", + "stats_bhist_ch1", + "stats_awb_bg_ch1" + ), + SFE_DBG_INFO_ARRAY_4bit( + "lsc_ch1", + "crop_rnd_clamp_ch1", + "msb_align_ch1", + "1d_pdpc_ch1", + "channel_gains_ch1", + "stats_rs_ch2", + "stats_bhist_ch2", + "stats_awb_bg_ch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "lsc_ch2", + "crop_rnd_clamp_ch2", + "msb_align_ch2", + "1d_pdpc_ch2", + "channel_gains_ch2", + "xcfa_demux_fetch2", + "xcfa_demux_fetch1", + "xcfa_demux_fetch0" + ), + SFE_DBG_INFO_ARRAY_4bit( + "xcfa_demux_csid", + "sfe_hdr_stats_throttle", + "fcg_stats_ch2", + "fcg_stats_ch1", + "fcg_ch2", + "fcg_ch1", + "crop_rnd_clamp_hdr_stats", + "stats_rs_ch0_throttler" + ), + SFE_DBG_INFO_ARRAY_4bit( + "stats_bhist_ch0_throttler", + "stats_bg_ch0_throttler", + "stats_rs_ch1_throttler", + "stats_bhist_ch1_throttler", + "stats_bg_ch1_throttler", + "stats_rs_ch2_throttler", + "stats_bhist_ch2_throttler", + "stats_bg_ch2_throttler" + ), + SFE_DBG_INFO_ARRAY_4bit( + "sfe_rdi4", + "sfe_rdi3", + "sfe_rdi2", + "sfe_rdi1", + "sfe_rdi0", + "stats_rs2", + "stats_bhist2", + "stats_be2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "stats_rs1", + "stats_bhist1", + "stats_be1", + "stats_rs0", + "stats_bhist0", + "stats_be0", + "ir_out", + "remosaic" + ), + SFE_DBG_INFO_ARRAY_4bit( + "hdr_stats", + "sfe_op_throttler", + "sfe_pp_zsl", + "ccif_sample_width_conv_hdr_remo", + "ccif_converter_ch2", + "ccif_converter_ch1", + "ccif_converter_ch0", + "fetch2" + ), + SFE_DBG_INFO_ARRAY_4bit( + "fetch1", + "fetch0", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), +}; + +static struct cam_sfe_top_common_reg_offset sfe880_top_commong_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .stats_feature = 0x00000008, + .core_cgc_ctrl = 0x00000010, + .ahb_clk_ovd = 0x00000014, + .core_cfg = 0x000000CC, + .ipp_violation_status = 0x00000030, + .diag_config = 0x00000034, + .diag_sensor_status_0 = 0x00000038, + .diag_sensor_status_1 = 0x0000003C, + .diag_sensor_frame_cnt_status0 = 0x00000040, + .diag_sensor_frame_cnt_status1 = 0x00000044, + .stats_ch2_throttle_cfg = 0x000000B0, + .stats_ch1_throttle_cfg = 0x000000B4, + .stats_ch0_throttle_cfg = 0x000000B8, + .hdr_throttle_cfg = 0x000000C0, + .sfe_op_throttle_cfg = 0x000000C4, + .irc_throttle_cfg = 0x000000C8, + .sfe_single_dual_cfg = 0x000000D0, + .bus_overflow_status = 0x00000868, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000080, + .perf_pix_count = 0x00000084, + .perf_line_count = 0x00000088, + .perf_stall_count = 0x0000008C, + .perf_always_count = 0x00000090, + .perf_count_status = 0x00000094, + }, + { + .perf_count_cfg = 0x00000098, + .perf_pix_count = 0x0000009C, + .perf_line_count = 0x000000A0, + .perf_stall_count = 0x000000A4, + .perf_always_count = 0x000000A8, + .perf_count_status = 0x000000AC, + }, + }, + .top_debug_cfg = 0x0000007C, + .top_cc_test_bus_ctrl = 0x000001F0, + .lcr_supported = false, + .ir_supported = true, + .qcfa_only = false, + .num_sfe_mode = ARRAY_SIZE(sfe_880_mode), + .sfe_mode = sfe_880_mode, + .ipp_violation_mask = 0x4000, + .top_debug_testbus_reg = 13, + .top_debug_nonccif_regstart_idx = 12, + .top_cc_test_bus_supported = true, + .num_debug_registers = 20, + .top_debug = { + 0x0000004C, + 0x00000050, + 0x00000054, + 0x00000058, + 0x0000005C, + 0x00000060, + 0x00000064, + 0x00000068, + 0x0000006C, + 0x00000070, + 0x00000074, + 0x00000078, + 0x000000EC, + 0x000000F0, + 0x000000F4, + 0x000000F8, + 0x000000FC, + 0x00000100, + 0x00000104, + 0x00000108, + }, +}; + +static struct cam_sfe_fcg_module_info sfe880_fcg_module_info = { + .max_fcg_ch_ctx = 2, + .max_fcg_predictions = 3, + .fcg_index_shift = 16, + .max_reg_val_pair_size = 16, + .fcg_type_size = 2, + .fcg_ch1_phase_index_cfg_0 = 0x00007270, + .fcg_ch1_phase_index_cfg_1 = 0x00007274, + .fcg_ch2_phase_index_cfg_0 = 0x00007470, + .fcg_ch2_phase_index_cfg_1 = 0x00007474, + .fcg_ch1_stats_phase_index_cfg_0 = 0x00007670, + .fcg_ch1_stats_phase_index_cfg_1 = 0x00007674, + .fcg_ch2_stats_phase_index_cfg_0 = 0x00007870, + .fcg_ch2_stats_phase_index_cfg_1 = 0x00007874, +}; + +static struct cam_sfe_modules_common_reg_offset sfe880_modules_common_reg = { + .demux_module_cfg = 0x00003060, + .demux_xcfa_cfg = 0x00003064, + .demux_hdr_cfg = 0x00003074, + .hdrc_remo_mod_cfg = 0x00005860, + .xcfa_hdrc_remo_out_mux_cfg = 0x00005A74, + .hdrc_remo_xcfa_bin_cfg = 0x00005A78, + .fcg_module_info = &sfe880_fcg_module_info, + .fcg_supported = true, +}; + +static struct cam_sfe_top_common_reg_data sfe_880_top_common_reg_data = { + .error_irq_mask = 0x6C000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .sensor_sel_shift = 0x1, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_pix_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .subscribe_irq_mask = 0xC, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_rdi0_reg_data = { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .subscribe_irq_mask = 0x30, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_rdi1_reg_data = { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .subscribe_irq_mask = 0xC0, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_rdi2_reg_data = { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .subscribe_irq_mask = 0x300, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_rdi3_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .subscribe_irq_mask = 0xC00, +}; + +static struct cam_sfe_path_common_reg_data sfe_880_rdi4_reg_data = { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .subscribe_irq_mask = 0x3000, +}; + +static struct cam_sfe_top_hw_info sfe880_top_hw_info = { + .common_reg = &sfe880_top_commong_reg, + .modules_hw_info = &sfe880_modules_common_reg, + .common_reg_data = &sfe_880_top_common_reg_data, + .ipp_module_desc = sfe_880_mod_desc, + .wr_client_desc = sfe_880_wr_client_desc, + .pix_reg_data = &sfe_880_pix_reg_data, + .rdi_reg_data[0] = &sfe_880_rdi0_reg_data, + .rdi_reg_data[1] = &sfe_880_rdi1_reg_data, + .rdi_reg_data[2] = &sfe_880_rdi2_reg_data, + .rdi_reg_data[3] = &sfe_880_rdi3_reg_data, + .rdi_reg_data[4] = &sfe_880_rdi4_reg_data, + .num_inputs = 6, + .input_type = { + CAM_SFE_PIX_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + CAM_SFE_RDI_VER_1_0, + }, + .num_top_errors = ARRAY_SIZE(sfe_880_top_irq_err_desc), + .top_err_desc = sfe_880_top_irq_err_desc, + .num_clc_module = 12, + .clc_dbg_mod_info = &sfe880_clc_dbg_module_info, + .num_of_testbus = 2, + .test_bus_info = { + /* TEST BUS 1 INFO */ + { + .debugfs_val = SFE_DEBUG_ENABLE_TESTBUS1, + .enable = false, + .value = 0x1, + .size = ARRAY_SIZE(sfe880_testbus1_info), + .testbus = sfe880_testbus1_info, + }, + /* TEST BUS 2 INFO */ + { + .debugfs_val = SFE_DEBUG_ENABLE_TESTBUS2, + .enable = false, + .value = 0x3, + .size = ARRAY_SIZE(sfe880_testbus2_info), + .testbus = sfe880_testbus2_info, + }, + }, +}; + +static struct cam_irq_register_set sfe880_bus_rd_irq_reg[1] = { + { + .mask_reg_offset = 0x00000404, + .clear_reg_offset = 0x00000408, + .status_reg_offset = 0x00000410, + .set_reg_offset = 0x00000418, + }, +}; + +static struct cam_sfe_bus_rd_err_irq_desc sfe880_bus_rd_irq_err_desc[] = { + { + .bitmask = BIT(0), + .err_name = "INFO_CONS_VIOLATION", + .desc = "Clients have illegal programming, check CONS_VIOLATION_STATUS", + }, + { + .bitmask = BIT(31), + .err_name = "INFO_CCIF_VIOLATION", + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_desc + sfe880_bus_rd_cons_error_desc[CAM_SFE_BUS_RD_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_desc = "Image Addr Unalign Latch", + }, + { + .bitmask = BIT(1), + .error_desc = "Ubwc Addr Unalign Latch", + }, + { + .bitmask = BIT(2), + .error_desc = "Stride Unalign Latch", + }, + { + .bitmask = BIT(3), + .error_desc = "Y Unit Unalign Latch", + }, + { + .bitmask = BIT(4), + .error_desc = "X Unit Unalign Latch", + }, + { + .bitmask = BIT(5), + .error_desc = "Image width Unalign Latch", + }, + { + .bitmask = BIT(6), + .error_desc = "Image height Unalign Latch", + }, + { + .bitmask = BIT(7), + .error_desc = "Meta Stride Unalign Latch", + }, +}; + +static struct cam_sfe_bus_rd_constraint_error_info sfe880_bus_rd_constraint_error_info = { + .constraint_error_list = sfe880_bus_rd_cons_error_desc, + .num_cons_err = 8, + .cons_chk_en_val = 0xC, + .cons_chk_en_avail = true, +}; + +static struct cam_sfe_bus_rd_hw_info sfe880_bus_rd_hw_info = { + .common_reg = { + .hw_version = 0x00000400, + .misr_reset = 0x0000041C, + .pwr_iso_cfg = 0x00000424, + .input_if_cmd = 0x00000414, + .test_bus_ctrl = 0x0000042C, + .security_cfg = 0x00000420, + .cons_violation_status = 0x00000430, + .ccif_violation_status = 0x00000434, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe880_bus_rd_irq_reg, + .global_irq_cmd_offset = 0x0000040C, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 3, + .bus_client_reg = { + /* BUS Client 0 */ + { + .cfg = 0x00000450, + .image_addr = 0x00000458, + .buf_width = 0x0000045C, + .buf_height = 0x00000460, + .stride = 0x00000464, + .unpacker_cfg = 0x00000468, + .latency_buf_allocation = 0x0000047C, + .system_cache_cfg = 0x00000490, + .addr_cfg = 0x00000498, + .debug_status_cfg = 0x000004A4, + .debug_status_0 = 0x000004A8, + .debug_status_1 = 0x000004AC, + .name = "Fetch0", + }, + /* BUS Client 1 */ + { + .cfg = 0x000004F0, + .image_addr = 0x000004F8, + .buf_width = 0x000004FC, + .buf_height = 0x00000500, + .stride = 0x00000504, + .unpacker_cfg = 0x00000508, + .latency_buf_allocation = 0x0000051C, + .system_cache_cfg = 0x00000530, + .addr_cfg = 0x00000538, + .debug_status_cfg = 0x00000544, + .debug_status_0 = 0x00000548, + .debug_status_1 = 0x0000054C, + .name = "Fetch1", + }, + /* BUS Client 2 */ + { + .cfg = 0x00000590, + .image_addr = 0x00000598, + .buf_width = 0x0000059C, + .buf_height = 0x000005A0, + .stride = 0x000005A4, + .unpacker_cfg = 0x000005A8, + .latency_buf_allocation = 0x000005BC, + .system_cache_cfg = 0x000005D0, + .addr_cfg = 0x000005D8, + .debug_status_cfg = 0x000005E4, + .debug_status_0 = 0x000005E8, + .debug_status_1 = 0x000005EC, + .name = "Fetch2", + }, + }, + .num_bus_rd_resc = 3, + .sfe_bus_rd_info = { + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI0, + .mid[0] = 0, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI1, + .mid[0] = 1, + .max_width = -1, + .max_height = -1, + }, + { + .sfe_bus_rd_type = CAM_SFE_BUS_RD_RDI2, + .mid[0] = 2, + .max_width = -1, + .max_height = -1, + }, + }, + .num_bus_rd_errors = ARRAY_SIZE(sfe880_bus_rd_irq_err_desc), + .bus_rd_err_desc = sfe880_bus_rd_irq_err_desc, + .top_irq_shift = 0x1, + .latency_buf_allocation = 2048, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0x80000001, + .fs_sync_shift = 0x14, + .constraint_error_info = &sfe880_bus_rd_constraint_error_info, +}; + +static struct cam_sfe_bus_wr_err_irq_desc sfe880_bus_wr_irq_err_desc[] = { + { + .bitmask = BIT(26), + .err_name = "IPCC_FENCE_DATA_ERR", + .desc = "IPCC or FENCE Data was not available in the Input Fifo", + }, + { + .bitmask = BIT(27), + .err_name = "IPCC_FENCE_ADDR_ERR", + .desc = "IPCC or FENCE address fifo was empty and read was attempted", + }, + { + .bitmask = BIT(28), + .err_name = "CONS_VIOLATION", + .desc = "Programming of software registers violated the constraints", + }, + { + .bitmask = BIT(30), + .err_name = "VIOLATION", + .desc = "Client has a violation in ccif protocol at input", + }, + { + .bitmask = BIT(31), + .err_name = "IMAGE_SIZE_VIOLATION", + .desc = "Programmed image size is not same as image size from the CCIF", + }, +}; + +static struct cam_sfe_bus_wr_constraint_error_desc + sfe880_bus_wr_cons_error_desc[CAM_SFE_BUS_CONS_ERR_MAX] = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input not supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input not supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input not supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input not supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format not supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format not supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 24 BPP format not supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 32 BPP format not supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack 64 BPP format not supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 20 format not supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 14 format not supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 12 format not supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack MIPI 10 format not supported" + }, + { + .bitmask = BIT(13), + .error_description = "Pack 128 BPP format not supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC P16 format not supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC P10 format not supported" + }, + { + .bitmask = BIT(16), + .error_description = "UBWC NV12 format not supported" + }, + { + .bitmask = BIT(17), + .error_description = "UBWC NV12 4R format not supported" + }, + { + .bitmask = BIT(18), + .error_description = "UBWC TP10 format not supported" + }, + { + .bitmask = BIT(19), + .error_description = "Frame based Mode not supported" + }, + { + .bitmask = BIT(20), + .error_description = "Index based Mode not supported" + }, + { + .bitmask = BIT(21), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(22), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(23), + .error_description = "FIFO frmheader addr unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(25), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(28), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(29), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(30), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(31), + .error_description = "Meta Stride unalign" + }, +}; + +static struct cam_sfe_bus_wr_constraint_error_info sfe880_bus_wr_constraint_error_info = { + .constraint_error_list = sfe880_bus_wr_cons_error_desc, + .num_cons_err = 32, + .img_width_unalign_shift = 29, +}; + +static struct cam_irq_register_set sfe880_bus_wr_irq_reg[1] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + .set_reg_offset = 0x00000850, + }, +}; + +static struct cam_sfe_bus_wr_hw_info sfe880_bus_wr_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .if_frameheader_cfg = { + 0x00000834, + 0x00000838, + 0x0000083C, + 0x00000840, + 0x00000844, + 0x00000848, + }, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008F0, + .debug_status_top = 0x000008F4, + .test_bus_ctrl = 0x00000994, + .top_irq_mask_0 = 0x00000020, + .qos_eos_cfg = 0x00000990, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe880_bus_wr_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + }, + .num_perf_counters = 4, + .perf_cnt_status = 0x000008B4, + .perf_cnt_reg = { + { + .perf_cnt_cfg = 0x00000874, + .perf_cnt_val = 0x00000894, + }, + { + .perf_cnt_cfg = 0x00000878, + .perf_cnt_val = 0x00000898, + }, + { + .perf_cnt_cfg = 0x0000087C, + .perf_cnt_val = 0x0000089C, + }, + { + .perf_cnt_cfg = 0x00000880, + .perf_cnt_val = 0x000008A0, + }, + }, + }, + .num_client = 17, + .bus_client_reg = { + /* BUS Client 0 REMOSAIC */ + { + .cfg = 0x00000A00, + .image_addr = 0x00000A04, + .frame_incr = 0x00000A08, + .image_cfg_0 = 0x00000A0C, + .image_cfg_1 = 0x00000A10, + .image_cfg_2 = 0x00000A14, + .packer_cfg = 0x00000A18, + .frame_header_addr = 0x00000A20, + .frame_header_incr = 0x00000A24, + .frame_header_cfg = 0x00000A28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000A30, + .irq_subsample_pattern = 0x00000A34, + .framedrop_period = 0x00000A38, + .framedrop_pattern = 0x00000A3C, + .system_cache_cfg = 0x00000A68, + .addr_cfg = 0x00000A70, + .addr_status_0 = 0x00000A78, + .addr_status_1 = 0x00000A7C, + .addr_status_2 = 0x00000A80, + .addr_status_3 = 0x00000A84, + .debug_status_cfg = 0x00000A88, + .debug_status_0 = 0x00000A8C, + .debug_status_1 = 0x00000A90, + .mmu_prefetch_cfg = 0x00000A60, + .mmu_prefetch_max_offset = 0x00000A64, + .bw_limiter_addr = 0x00000A1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_0, + }, + /* BUS Client 1 IR OUT */ + { + .cfg = 0x00000B00, + .image_addr = 0x00000B04, + .frame_incr = 0x00000B08, + .image_cfg_0 = 0x00000B0C, + .image_cfg_1 = 0x00000B10, + .image_cfg_2 = 0x00000B14, + .packer_cfg = 0x00000B18, + .frame_header_addr = 0x00000B20, + .frame_header_incr = 0x00000B24, + .frame_header_cfg = 0x00000B28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000B30, + .irq_subsample_pattern = 0x00000B34, + .framedrop_period = 0x00000B38, + .framedrop_pattern = 0x00000B3C, + .system_cache_cfg = 0x00000B68, + .addr_cfg = 0x00000B70, + .addr_status_0 = 0x00000B78, + .addr_status_1 = 0x00000B7C, + .addr_status_2 = 0x00000B80, + .addr_status_3 = 0x00000B84, + .debug_status_cfg = 0x00000B88, + .debug_status_0 = 0x00000B8C, + .debug_status_1 = 0x00000B90, + .mmu_prefetch_cfg = 0x00000B60, + .mmu_prefetch_max_offset = 0x00000B64, + .bw_limiter_addr = 0x00000B1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_1, + }, + /* BUS Client 2 STATS_BE_0 */ + { + .cfg = 0x00000C00, + .image_addr = 0x00000C04, + .frame_incr = 0x00000C08, + .image_cfg_0 = 0x00000C0C, + .image_cfg_1 = 0x00000C10, + .image_cfg_2 = 0x00000C14, + .packer_cfg = 0x00000C18, + .frame_header_addr = 0x00000C20, + .frame_header_incr = 0x00000C24, + .frame_header_cfg = 0x00000C28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000C30, + .irq_subsample_pattern = 0x00000C34, + .framedrop_period = 0x00000C38, + .framedrop_pattern = 0x00000C3C, + .system_cache_cfg = 0x00000C68, + .addr_cfg = 0x00000C70, + .addr_status_0 = 0x00000C78, + .addr_status_1 = 0x00000C7C, + .addr_status_2 = 0x00000C80, + .addr_status_3 = 0x00000C84, + .debug_status_cfg = 0x00000C88, + .debug_status_0 = 0x00000C8C, + .debug_status_1 = 0x00000C90, + .mmu_prefetch_cfg = 0x00000C60, + .mmu_prefetch_max_offset = 0x00000C64, + .bw_limiter_addr = 0x00000C1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 3 STATS_BHIST_0 */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .framedrop_period = 0x00000D38, + .framedrop_pattern = 0x00000D3C, + .system_cache_cfg = 0x00000D68, + .addr_cfg = 0x00000D70, + .addr_status_0 = 0x00000D78, + .addr_status_1 = 0x00000D7C, + .addr_status_2 = 0x00000D80, + .addr_status_3 = 0x00000D84, + .debug_status_cfg = 0x00000D88, + .debug_status_0 = 0x00000D8C, + .debug_status_1 = 0x00000D90, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .bw_limiter_addr = 0x00000D1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 4 STATS_BE_1 */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000E78, + .addr_status_1 = 0x00000E7C, + .addr_status_2 = 0x00000E80, + .addr_status_3 = 0x00000E84, + .debug_status_cfg = 0x00000E88, + .debug_status_0 = 0x00000E8C, + .debug_status_1 = 0x00000E90, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 5 STATS_BHIST_1 */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .line_done_cfg = 0, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000F70, + .addr_status_0 = 0x00000F78, + .addr_status_1 = 0x00000F7C, + .addr_status_2 = 0x00000F80, + .addr_status_3 = 0x00000F84, + .debug_status_cfg = 0x00000F88, + .debug_status_0 = 0x00000F8C, + .debug_status_1 = 0x00000F90, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 6 STATS_BE_2 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .addr_status_0 = 0x00001078, + .addr_status_1 = 0x0000107C, + .addr_status_2 = 0x00001080, + .addr_status_3 = 0x00001084, + .debug_status_cfg = 0x00001088, + .debug_status_0 = 0x0000108C, + .debug_status_1 = 0x00001090, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 7 STATS_BHIST_2 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .addr_status_0 = 0x00001178, + .addr_status_1 = 0x0000117C, + .addr_status_2 = 0x00001180, + .addr_status_3 = 0x00001184, + .debug_status_cfg = 0x00001188, + .debug_status_0 = 0x0000118C, + .debug_status_1 = 0x00001190, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 8 STATS_RS_0 */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .addr_status_0 = 0x00001278, + .addr_status_1 = 0x0000127C, + .addr_status_2 = 0x00001280, + .addr_status_3 = 0x00001284, + .debug_status_cfg = 0x00001288, + .debug_status_0 = 0x0000128C, + .debug_status_1 = 0x00001290, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_2, + }, + /* BUS Client 9 STATS_RS_1 */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .addr_status_0 = 0x00001378, + .addr_status_1 = 0x0000137C, + .addr_status_2 = 0x00001380, + .addr_status_3 = 0x00001384, + .debug_status_cfg = 0x00001388, + .debug_status_0 = 0x0000138C, + .debug_status_1 = 0x00001390, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_3, + }, + /* BUS Client 10 STATS_RS_2 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .line_done_cfg = 0, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001478, + .addr_status_1 = 0x0000147C, + .addr_status_2 = 0x00001480, + .addr_status_3 = 0x00001484, + .debug_status_cfg = 0x00001488, + .debug_status_0 = 0x0000148C, + .debug_status_1 = 0x00001490, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_4, + }, + /* BUS Client 11 RDI0 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .line_done_cfg = 0x0000152C, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001578, + .addr_status_1 = 0x0000157C, + .addr_status_2 = 0x00001580, + .addr_status_3 = 0x00001584, + .debug_status_cfg = 0x00001588, + .debug_status_0 = 0x0000158C, + .debug_status_1 = 0x00001590, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_5, + }, + /* BUS Client 12 RDI1 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .line_done_cfg = 0x0000162C, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001678, + .addr_status_1 = 0x0000167C, + .addr_status_2 = 0x00001680, + .addr_status_3 = 0x00001684, + .debug_status_cfg = 0x00001688, + .debug_status_0 = 0x0000168C, + .debug_status_1 = 0x00001690, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_6, + }, + /* BUS Client 13 RDI2 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .line_done_cfg = 0x0000172C, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001778, + .addr_status_1 = 0x0000177C, + .addr_status_2 = 0x00001780, + .addr_status_3 = 0x00001784, + .debug_status_cfg = 0x00001788, + .debug_status_0 = 0x0000178C, + .debug_status_1 = 0x00001790, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_7, + }, + /* BUS Client 14 RDI3 */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .line_done_cfg = 0x0000182C, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001878, + .addr_status_1 = 0x0000187C, + .addr_status_2 = 0x00001880, + .addr_status_3 = 0x00001884, + .debug_status_cfg = 0x00001888, + .debug_status_0 = 0x0000188C, + .debug_status_1 = 0x00001890, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_8, + }, + /* BUS Client 15 RDI4 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .line_done_cfg = 0x0000192C, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001978, + .addr_status_1 = 0x0000197C, + .addr_status_2 = 0x00001980, + .addr_status_3 = 0x00001984, + .debug_status_cfg = 0x00001988, + .debug_status_0 = 0x0000198C, + .debug_status_1 = 0x00001990, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_9, + }, + /* BUS Client 16 HDR_STATS */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .addr_status_0 = 0x00001A78, + .addr_status_1 = 0x00001A7C, + .addr_status_2 = 0x00001A80, + .addr_status_3 = 0x00001A84, + .debug_status_cfg = 0x00001A88, + .debug_status_0 = 0x00001A8C, + .debug_status_1 = 0x00001A90, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_SFE_BUS_WR_COMP_GRP_10, + }, + }, + .num_out = 17, + .sfe_out_hw_info = { + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_1, + .mid[0] = 45, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 11, + .en_line_done = 1, + .name = "RDI_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_2, + .mid[0] = 46, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 12, + .en_line_done = 1, + .name = "RDI_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_3, + .mid[0] = 47, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 13, + .en_line_done = 1, + .name = "RDI_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_4, + .mid[0] = 48, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 14, + .name = "RDI_3", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RDI4, + .max_width = -1, + .max_height = -1, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_5, + .mid[0] = 49, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 15, + .name = "RDI_4", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_RAW_DUMP, + .max_width = 9312, + .max_height = 6992, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 32, + .mid[1] = 33, + .num_mid = 2, + .num_wm = 1, + .wm_idx = 0, + .name = "REMOSIAC", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_IR, + .max_width = 9312, + .max_height = 6772, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 34, + .mid[1] = 35, + .num_wm = 1, + .wm_idx = 1, + .name = "IR_OUT", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 36, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 2, + .name = "STATS_BE_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 37, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 3, + .name = "STATS_BHIST_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 38, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 4, + .name = "STATS_BE_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 39, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 5, + .name = "STATS_BHIST_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BE_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 40, + .num_wm = 1, + .wm_idx = 6, + .name = "STATS_BE_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BHIST_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 41, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 7, + .name = "STATS_BHIST_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_0, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 42, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 8, + .name = "STATS_RS_0", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_1, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 43, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 9, + .name = "STATS_RS_1", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_BAYER_RS_2, + .max_width = 7296, + .max_height = 5472, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 44, + .num_mid = 1, + .num_wm = 1, + .wm_idx = 10, + .name = "STATS_RS_2", + }, + { + .sfe_out_type = CAM_SFE_BUS_SFE_OUT_HDR_STATS, + .max_width = 9312, + .max_height = 6772, + .source_group = CAM_SFE_BUS_WR_SRC_GRP_0, + .mid[0] = 50, + .mid[1] = 51, + .num_mid = 2, + .num_wm = 1, + .wm_idx = 16, + .name = "HDR_STATS" + }, + }, + .constraint_error_info = &sfe880_bus_wr_constraint_error_info, + .num_bus_wr_errors = ARRAY_SIZE(sfe880_bus_wr_irq_err_desc), + .bus_wr_err_desc = sfe880_bus_wr_irq_err_desc, + .comp_done_mask = { + BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(23), + BIT(24), BIT(25), BIT(26), BIT(27), BIT(22), + }, + .num_comp_grp = 11, + .line_done_cfg = 0x11, + .top_irq_shift = 0x0, + .max_out_res = CAM_ISP_SFE_OUT_RES_BASE + 18, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, + .sys_cache_default_val = 0x20, + .irq_err_mask = 0xD0000000, +}; + +static struct cam_irq_register_set sfe880_top_irq_reg_set[1] = { + { + .mask_reg_offset = 0x00000020, + .clear_reg_offset = 0x00000024, + .status_reg_offset = 0x00000028, + .set_reg_offset = 0x0000002C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, +}; + +static struct cam_irq_controller_reg_info sfe880_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = sfe880_top_irq_reg_set, + .global_irq_cmd_offset = 0x0000001C, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +struct cam_sfe_hw_info cam_sfe880_hw_info = { + .irq_reg_info = &sfe880_top_irq_reg_info, + + .bus_wr_version = CAM_SFE_BUS_WR_VER_1_0, + .bus_wr_hw_info = &sfe880_bus_wr_hw_info, + + .bus_rd_version = CAM_SFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &sfe880_bus_rd_hw_info, + + .top_version = CAM_SFE_TOP_VER_1_0, + .top_hw_info = &sfe880_top_hw_info, +}; + +#endif /* _CAM_SFE880_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.c new file mode 100644 index 0000000000..a75cdcdbef --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_tasklet_util.h" +#include "cam_sfe_hw_intf.h" +#include "cam_sfe_soc.h" +#include "cam_sfe_core.h" +#include "cam_debug_util.h" + +static const char drv_name[] = "sfe"; + +int cam_sfe_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_sfe_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_sfe_read(void *device_priv, + void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_sfe_write(void *device_priv, + void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_sfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_hw_core_info *core_info = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid arguments"); + return -EINVAL; + } + + mutex_lock(&sfe_hw->hw_mutex); + sfe_hw->open_count++; + if (sfe_hw->open_count > 1) { + mutex_unlock(&sfe_hw->hw_mutex); + CAM_DBG(CAM_SFE, "SFE has already been initialized cnt: %d", + sfe_hw->open_count); + return 0; + } + mutex_unlock(&sfe_hw->hw_mutex); + + soc_info = &sfe_hw->soc_info; + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + + /* Turn ON Regulators, Clocks and other SOC resources */ + rc = cam_sfe_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_SFE, "Enable SOC failed"); + rc = -EFAULT; + goto deinit_hw; + } + + CAM_DBG(CAM_SFE, "SFE SOC resource enabled"); + + if (core_info->sfe_top->hw_ops.init) { + rc = core_info->sfe_top->hw_ops.init(core_info->sfe_top->top_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_SFE, "Top init failed rc=%d", rc); + goto deinit_hw; + } + } + + if (core_info->sfe_bus_wr->hw_ops.init) { + rc = core_info->sfe_bus_wr->hw_ops.init(core_info->sfe_bus_wr->bus_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_SFE, "Bus WR init failed rc=%d", rc); + goto deinit_hw; + } + } + + if (core_info->sfe_bus_rd->hw_ops.init) { + rc = core_info->sfe_bus_rd->hw_ops.init(core_info->sfe_bus_rd->bus_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_SFE, "Bus RD init failed rc=%d", rc); + goto deinit_hw; + } + } + + /* + * Async Reset as part of power ON + * Any register write in bus_wr_init/bus_rd_init + * will be cleared by sync reset in CSID. + * + */ + + sfe_hw->hw_state = CAM_HW_STATE_POWER_UP; + return rc; + +deinit_hw: + cam_sfe_deinit_hw(hw_priv, NULL, 0); + return rc; +} + +int cam_sfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_hw_core_info *core_info = NULL; + int rc = 0; + + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid arguments"); + return -EINVAL; + } + + mutex_lock(&sfe_hw->hw_mutex); + if (!sfe_hw->open_count) { + mutex_unlock(&sfe_hw->hw_mutex); + CAM_ERR(CAM_SFE, "Unbalanced deinit"); + return -EFAULT; + } + + sfe_hw->open_count--; + if (sfe_hw->open_count) { + mutex_unlock(&sfe_hw->hw_mutex); + CAM_DBG(CAM_SFE, "open_cnt non-zero: %d", + sfe_hw->open_count); + return 0; + } + mutex_unlock(&sfe_hw->hw_mutex); + + soc_info = &sfe_hw->soc_info; + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + + if (core_info->sfe_bus_rd->hw_ops.deinit) { + rc = core_info->sfe_bus_rd->hw_ops.deinit(core_info->sfe_bus_rd->bus_priv, NULL, 0); + if (rc) + CAM_ERR(CAM_SFE, "Bus RD deinit failed rc=%d", rc); + } + + if (core_info->sfe_bus_wr->hw_ops.deinit) { + rc = core_info->sfe_bus_wr->hw_ops.deinit(core_info->sfe_bus_wr->bus_priv, NULL, 0); + if (rc) + CAM_ERR(CAM_SFE, "Bus WR deinit failed rc=%d", rc); + } + + if (core_info->sfe_top->hw_ops.deinit) { + rc = core_info->sfe_top->hw_ops.deinit(core_info->sfe_top->top_priv, NULL, 0); + if (rc) + CAM_ERR(CAM_SFE, "Top deinit failed rc=%d", rc); + } + + /* Turn OFF Regulators, Clocks and other SOC resources */ + CAM_DBG(CAM_SFE, "Disable SFE SOC resource"); + rc = cam_sfe_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_SFE, "Disable SOC failed"); + + sfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + + CAM_DBG(CAM_SFE, "SFE deinit done rc: %d", rc); + return rc; +} + +int cam_sfe_test_irq_line(void *hw_priv) +{ + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_sfe_hw_core_info *core_info; + int rc = 0, local_ret; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid arguments"); + return -EINVAL; + } + + core_info = sfe_hw->core_info; + + local_ret = cam_sfe_init_hw(sfe_hw, NULL, 0); + if (local_ret) { + CAM_ERR(CAM_SFE, "SFE:%d failed to init hw", sfe_hw->soc_info.index); + return local_ret; + } + + local_ret = cam_irq_controller_test_irq_line(core_info->sfe_irq_controller, "SFE:%d", + sfe_hw->soc_info.index); + if (local_ret) { + CAM_ERR(CAM_SFE, "failed to test SFE:%d", sfe_hw->soc_info.index); + rc = local_ret; + } + + local_ret = cam_sfe_deinit_hw(sfe_hw, NULL, 0); + if (local_ret) { + CAM_ERR(CAM_SFE, "SFE:%d failed to deinit hw", sfe_hw->soc_info.index); + return local_ret; + } + + return rc; +} + +int cam_sfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size) +{ + struct cam_sfe_hw_core_info *core_info = NULL; + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_sfe_acquire_args *acquire; + int rc = -ENODEV; + + if (!hw_priv || !reserve_args || (arg_size != + sizeof(struct cam_sfe_acquire_args))) { + CAM_ERR(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + acquire = (struct cam_sfe_acquire_args *)reserve_args; + + CAM_DBG(CAM_SFE, "SFE acquire for res type: %d", + acquire->rsrc_type); + + mutex_lock(&sfe_hw->hw_mutex); + if (acquire->rsrc_type == CAM_ISP_RESOURCE_SFE_IN) + rc = core_info->sfe_top->hw_ops.reserve( + core_info->sfe_top->top_priv, + reserve_args, arg_size); + else if (acquire->rsrc_type == CAM_ISP_RESOURCE_SFE_OUT) + rc = core_info->sfe_bus_wr->hw_ops.reserve( + core_info->sfe_bus_wr->bus_priv, acquire, + sizeof(*acquire)); + else if (acquire->rsrc_type == CAM_ISP_RESOURCE_SFE_RD) + rc = core_info->sfe_bus_rd->hw_ops.reserve( + core_info->sfe_bus_rd->bus_priv, acquire, + sizeof(*acquire)); + else + CAM_ERR(CAM_SFE, "Invalid SFE res_type: %d", + acquire->rsrc_type); + mutex_unlock(&sfe_hw->hw_mutex); + + return rc; +} + +int cam_sfe_release(void *hw_priv, void *release_args, uint32_t arg_size) +{ + struct cam_sfe_hw_core_info *core_info = NULL; + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_isp_resource_node *sfe_res; + int rc = -ENODEV; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_sfe_hw_core_info *) sfe_hw->core_info; + sfe_res = (struct cam_isp_resource_node *) release_args; + + CAM_DBG(CAM_SFE, "SFE release for res type: %d", + sfe_res->res_type); + + mutex_lock(&sfe_hw->hw_mutex); + if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_IN) + rc = core_info->sfe_top->hw_ops.release( + core_info->sfe_top->top_priv, sfe_res, + sizeof(struct cam_isp_resource_node)); + else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_OUT) + rc = core_info->sfe_bus_wr->hw_ops.release( + core_info->sfe_bus_wr->bus_priv, sfe_res, + sizeof(*sfe_res)); + else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_RD) + rc = core_info->sfe_bus_rd->hw_ops.release( + core_info->sfe_bus_rd->bus_priv, sfe_res, + sizeof(*sfe_res)); + else + CAM_ERR(CAM_SFE, "Invalid SFE res type: %d", + sfe_res->res_type); + mutex_unlock(&sfe_hw->hw_mutex); + + return rc; +} + +int cam_sfe_start(void *hw_priv, void *start_args, uint32_t arg_size) +{ + struct cam_sfe_hw_core_info *core_info = NULL; + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_isp_resource_node *sfe_res; + int rc; + + if (!hw_priv || !start_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + sfe_res = (struct cam_isp_resource_node *)start_args; + core_info->tasklet_info = sfe_res->tasklet_info; + + mutex_lock(&sfe_hw->hw_mutex); + if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_IN) { + rc = core_info->sfe_top->hw_ops.start( + core_info->sfe_top->top_priv, sfe_res, + sizeof(struct cam_isp_resource_node)); + if (rc) + CAM_ERR(CAM_SFE, "Failed to start SFE IN rc: %d", rc); + } else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_OUT) { + rc = core_info->sfe_bus_wr->hw_ops.start(sfe_res, NULL, 0); + if (rc) + CAM_ERR(CAM_SFE, "Failed to start SFE BUS WR rc: %d", + rc); + } else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_RD) { + rc = core_info->sfe_bus_rd->hw_ops.start(sfe_res, + NULL, 0); + if (rc) + CAM_ERR(CAM_SFE, "Failed to start SFE BUS RD rc: %d", + rc); + } else { + CAM_ERR(CAM_SFE, "Invalid SFE res type:%d", + sfe_res->res_type); + rc = -EINVAL; + } + + mutex_unlock(&sfe_hw->hw_mutex); + CAM_DBG(CAM_SFE, + "Start for SFE res type: %u res id: %u res_state: %d rc: %d", + sfe_res->res_type, sfe_res->res_id, + sfe_res->res_state, rc); + return rc; +} + +int cam_sfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size) +{ + struct cam_sfe_hw_core_info *core_info = NULL; + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_isp_resource_node *sfe_res; + int rc = -EINVAL; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + sfe_res = (struct cam_isp_resource_node *)stop_args; + mutex_lock(&sfe_hw->hw_mutex); + + if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_IN) + rc = core_info->sfe_top->hw_ops.stop( + core_info->sfe_top->top_priv, sfe_res, + sizeof(struct cam_isp_resource_node)); + else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_OUT) + rc = core_info->sfe_bus_wr->hw_ops.stop(sfe_res, NULL, 0); + else if (sfe_res->res_type == CAM_ISP_RESOURCE_SFE_RD) + rc = core_info->sfe_bus_rd->hw_ops.stop(sfe_res, NULL, 0); + else + CAM_ERR(CAM_SFE, "Invalid SFE res type: %d", sfe_res->res_type); + + mutex_unlock(&sfe_hw->hw_mutex); + CAM_DBG(CAM_SFE, + "Stop for SFE res type: %u res id: %u res_state: %d rc: %d", + sfe_res->res_type, sfe_res->res_id, + sfe_res->res_state, rc); + + return rc; +} + +int cam_sfe_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *sfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_hw_core_info *core_info = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid arguments"); + return -EINVAL; + } + + soc_info = &sfe_hw->soc_info; + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + case CAM_ISP_HW_CMD_BW_CONTROL: + case CAM_ISP_HW_CMD_CORE_CONFIG: + case CAM_ISP_HW_NOTIFY_OVERFLOW: + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + case CAM_ISP_HW_CMD_FCG_CONFIG: + rc = core_info->sfe_top->hw_ops.process_cmd( + core_info->sfe_top->top_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + case CAM_ISP_HW_CMD_BUF_UPDATE: + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE: + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + case CAM_ISP_HW_SFE_SYS_CACHE_WM_CONFIG: + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + case CAM_ISP_HW_SFE_BUS_MINI_DUMP: + case CAM_ISP_HW_USER_DUMP: + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + case CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS: + rc = core_info->sfe_bus_wr->hw_ops.process_cmd( + core_info->sfe_bus_wr->bus_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE_RM: + case CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM: + case CAM_ISP_HW_CMD_BUF_UPDATE_RM: + case CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD: + case CAM_ISP_HW_SFE_SYS_CACHE_RM_CONFIG: + case CAM_ISP_HW_CMD_RM_ENABLE_DISABLE: + case CAM_ISP_HW_CMD_GET_RM_SECURE_MODE: + rc = core_info->sfe_bus_rd->hw_ops.process_cmd( + core_info->sfe_bus_rd->bus_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ: + /* Not supported */ + break; + case CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG: + /* propagate to SFE top */ + core_info->sfe_top->hw_ops.process_cmd( + core_info->sfe_top->top_priv, cmd_type, + cmd_args, arg_size); + fallthrough; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: + case CAM_ISP_HW_CMD_IRQ_INJECTION: + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + /* propagate to SFE bus wr */ + core_info->sfe_bus_wr->hw_ops.process_cmd( + core_info->sfe_bus_wr->bus_priv, cmd_type, + cmd_args, arg_size); + + /* propagate to SFE bus rd */ + core_info->sfe_bus_rd->hw_ops.process_cmd( + core_info->sfe_bus_rd->bus_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: + /* propagate to SFE top */ + core_info->sfe_top->hw_ops.process_cmd( + core_info->sfe_top->top_priv, cmd_type, + cmd_args, arg_size); + + /* propagate to SFE bus wr */ + core_info->sfe_bus_wr->hw_ops.process_cmd( + core_info->sfe_bus_wr->bus_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: + *((struct cam_hw_soc_info **)cmd_args) = soc_info; + rc = 0; + break; + default: + CAM_ERR(CAM_SFE, "Invalid cmd type: %d", cmd_type); + rc = -EINVAL; + break; + } + + return rc; +} + +irqreturn_t cam_sfe_irq(int irq_num, void *data) +{ + struct cam_hw_info *sfe_hw; + struct cam_sfe_hw_core_info *core_info; + + if (!data) + return IRQ_NONE; + + sfe_hw = (struct cam_hw_info *)data; + core_info = (struct cam_sfe_hw_core_info *)sfe_hw->core_info; + + return cam_irq_controller_handle_irq(irq_num, + core_info->sfe_irq_controller, CAM_IRQ_EVT_GROUP_0); +} + +int cam_sfe_core_init( + struct cam_sfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_sfe_hw_info *sfe_hw_info) +{ + int rc = -EINVAL; + + rc = cam_irq_controller_init(drv_name, + CAM_SOC_GET_REG_MAP_START(soc_info, SFE_CORE_BASE_IDX), + sfe_hw_info->irq_reg_info, &core_info->sfe_irq_controller); + if (rc) { + CAM_ERR(CAM_SFE, "SFE irq controller init failed"); + return rc; + } + + rc = cam_sfe_top_init(sfe_hw_info->top_version, soc_info, hw_intf, + sfe_hw_info->top_hw_info, core_info->sfe_irq_controller, + &core_info->sfe_top); + if (rc) { + CAM_ERR(CAM_SFE, "SFE top init failed rc: %d", rc); + goto deinit_controller; + } + + rc = cam_sfe_bus_init(sfe_hw_info->bus_wr_version, BUS_TYPE_SFE_WR, + soc_info, hw_intf, sfe_hw_info->bus_wr_hw_info, + core_info->sfe_irq_controller, + &core_info->sfe_bus_wr); + if (rc) { + CAM_ERR(CAM_SFE, "SFE bus wr init failed rc: %d", rc); + goto deinit_top; + } + + rc = cam_sfe_bus_init(sfe_hw_info->bus_rd_version, BUS_TYPE_SFE_RD, + soc_info, hw_intf, sfe_hw_info->bus_rd_hw_info, + core_info->sfe_irq_controller, + &core_info->sfe_bus_rd); + if (rc) { + CAM_ERR(CAM_SFE, "SFE bus rd init failed rc: %d", rc); + goto deinit_bus_wr; + } + + spin_lock_init(&core_info->spin_lock); + CAM_DBG(CAM_SFE, "SFE device [%u] INIT success", + hw_intf->hw_idx); + return rc; + +deinit_bus_wr: + cam_sfe_bus_deinit(sfe_hw_info->bus_wr_version, + BUS_TYPE_SFE_WR, &core_info->sfe_bus_wr); +deinit_top: + cam_sfe_top_deinit(sfe_hw_info->top_version, + &core_info->sfe_top); +deinit_controller: + if (cam_irq_controller_deinit(&core_info->sfe_irq_controller)) + CAM_ERR(CAM_SFE, + "Error cam_irq_controller_deinit failed rc=%d", rc); + + return rc; +} + +int cam_sfe_core_deinit( + struct cam_sfe_hw_core_info *core_info, + struct cam_sfe_hw_info *sfe_hw_info) +{ + int rc = -EINVAL; + unsigned long flags; + + spin_lock_irqsave(&core_info->spin_lock, flags); + + rc = cam_sfe_bus_deinit(sfe_hw_info->bus_rd_version, + BUS_TYPE_SFE_RD, &core_info->sfe_bus_rd); + if (rc) + CAM_ERR(CAM_SFE, + "SFE bus rd deinit failed rc: %d", rc); + + rc = cam_sfe_bus_deinit(sfe_hw_info->bus_wr_version, + BUS_TYPE_SFE_WR, &core_info->sfe_bus_wr); + if (rc) + CAM_ERR(CAM_SFE, + "SFE bus wr deinit failed rc: %d", rc); + + rc = cam_sfe_top_deinit(sfe_hw_info->top_version, + &core_info->sfe_top); + if (rc) + CAM_ERR(CAM_SFE, + "SFE top deinit failed rc: %d", rc); + + rc = cam_irq_controller_deinit(&core_info->sfe_irq_controller); + if (rc) + CAM_ERR(CAM_SFE, + "Error cam_irq_controller_deinit failed rc=%d", rc); + + spin_unlock_irqrestore(&core_info->spin_lock, flags); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.h new file mode 100644 index 0000000000..a8894aa208 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_core.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_SFE_CORE_H_ +#define _CAM_SFE_CORE_H_ + +#include +#include "cam_hw_intf.h" +#include "cam_sfe_hw_intf.h" +#include "cam_sfe_bus.h" +#include "cam_sfe_top.h" + +#define CAM_SFE_EVT_MAX 256 + +struct cam_sfe_hw_info { + struct cam_irq_controller_reg_info *irq_reg_info; + + uint32_t bus_wr_version; + void *bus_wr_hw_info; + + uint32_t bus_rd_version; + void *bus_rd_hw_info; + + uint32_t top_version; + void *top_hw_info; +}; + +struct cam_sfe_hw_core_info { + struct cam_sfe_hw_info *sfe_hw_info; + struct cam_sfe_top *sfe_top; + struct cam_sfe_bus *sfe_bus_wr; + struct cam_sfe_bus *sfe_bus_rd; + void *sfe_irq_controller; + void *tasklet_info; + spinlock_t spin_lock; +}; + +/* + * Debug config to enable interrupts and debug features + */ +#define SFE_DEBUG_ENABLE_SOF_EOF_IRQ BIT(0) +#define SFE_DEBUG_ENABLE_SENSOR_DIAG_INFO BIT(1) +#define SFE_DEBUG_ENABLE_FRAME_COUNTER BIT(2) +#define SFE_DEBUG_ENABLE_RD_DONE_IRQ BIT(3) +#define SFE_DEBUG_DISABLE_MMU_PREFETCH BIT(4) +#define SFE_DEBUG_ENABLE_TESTBUS1 BIT(8) +#define SFE_DEBUG_ENABLE_TESTBUS2 BIT(9) + +/* Reserve 4 bits for future test-busses in debug config */ +#define SFE_DEBUG_ENABLE_TESTBUS_RESERVED1 BIT(10) +#define SFE_DEBUG_ENABLE_TESTBUS_RESERVED2 BIT(11) +#define SFE_DEBUG_ENABLE_TESTBUS_RESERVED3 BIT(12) +#define SFE_DEBUG_ENABLE_TESTBUS_RESERVED4 BIT(13) + +int cam_sfe_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_sfe_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size); +int cam_sfe_read(void *device_priv, + void *read_args, uint32_t arg_size); +int cam_sfe_write(void *device_priv, + void *write_args, uint32_t arg_size); +int cam_sfe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_sfe_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size); +int cam_sfe_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_sfe_release(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_sfe_start(void *device_priv, + void *start_args, uint32_t arg_size); +int cam_sfe_stop(void *device_priv, + void *stop_args, uint32_t arg_size); +int cam_sfe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int cam_sfe_test_irq_line(void *hw_priv); + +irqreturn_t cam_sfe_irq(int irq_num, void *data); + +int cam_sfe_core_init(struct cam_sfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_sfe_hw_info *sfe_hw_info); + +int cam_sfe_core_deinit(struct cam_sfe_hw_core_info *core_info, + struct cam_sfe_hw_info *sfe_hw_info); + +#endif /* _CAM_SFE_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.c new file mode 100644 index 0000000000..a68330cba9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "cam_sfe_dev.h" +#include "cam_sfe_core.h" +#include "cam_sfe_soc.h" +#include "cam_sfe680.h" +#include "cam_sfe780.h" +#include "cam_sfe880.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_isp_hw_intf_data cam_sfe_hw_list[CAM_SFE_HW_NUM_MAX]; +static uint32_t g_num_sfe_hws; + +static int cam_sfe_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + + struct cam_hw_info *sfe_info = NULL; + struct cam_hw_intf *sfe_hw_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_sfe_hw_core_info *core_info = NULL; + struct cam_sfe_hw_info *hw_info = NULL; + struct platform_device *pdev = NULL; + struct cam_sfe_soc_private *soc_priv; + uint32_t sfe_dev_idx; + int i, rc = 0; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + pdev = to_platform_device(dev); + + rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &sfe_dev_idx); + if (rc) { + CAM_ERR(CAM_SFE, "Failed to read cell-index of SFE HW, rc: %d", rc); + goto end; + } + + if (!cam_cpas_is_feature_supported(CAM_CPAS_SFE_FUSE, BIT(sfe_dev_idx), NULL)) { + CAM_DBG(CAM_SFE, "SFE:%d is not supported", sfe_dev_idx); + goto end; + } + + sfe_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!sfe_hw_intf) { + rc = -ENOMEM; + goto end; + } + + sfe_info = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!sfe_info) { + rc = -ENOMEM; + goto free_sfe_hw_intf; + } + + sfe_info->soc_info.pdev = pdev; + sfe_info->soc_info.dev = &pdev->dev; + sfe_info->soc_info.dev_name = pdev->name; + sfe_hw_intf->hw_priv = sfe_info; + sfe_hw_intf->hw_idx = sfe_dev_idx; + sfe_hw_intf->hw_ops.get_hw_caps = cam_sfe_get_hw_caps; + sfe_hw_intf->hw_ops.init = cam_sfe_init_hw; + sfe_hw_intf->hw_ops.deinit = cam_sfe_deinit_hw; + sfe_hw_intf->hw_ops.reset = cam_sfe_reset; + sfe_hw_intf->hw_ops.reserve = cam_sfe_reserve; + sfe_hw_intf->hw_ops.release = cam_sfe_release; + sfe_hw_intf->hw_ops.start = cam_sfe_start; + sfe_hw_intf->hw_ops.stop = cam_sfe_stop; + sfe_hw_intf->hw_ops.read = cam_sfe_read; + sfe_hw_intf->hw_ops.write = cam_sfe_write; + sfe_hw_intf->hw_ops.process_cmd = cam_sfe_process_cmd; + sfe_hw_intf->hw_ops.test_irq_line = cam_sfe_test_irq_line; + sfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_SFE; + + CAM_DBG(CAM_SFE, "SFE component bind type %d index %d", + sfe_hw_intf->hw_type, sfe_hw_intf->hw_idx); + + platform_set_drvdata(pdev, sfe_hw_intf); + + sfe_info->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_hw_core_info), + GFP_KERNEL); + if (!sfe_info->core_info) { + CAM_DBG(CAM_SFE, "Failed to alloc for core"); + rc = -ENOMEM; + goto free_sfe_hw; + } + core_info = (struct cam_sfe_hw_core_info *)sfe_info->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_SFE, "Of_match Failed"); + rc = -EINVAL; + goto free_core_info; + } + hw_info = (struct cam_sfe_hw_info *)match_dev->data; + core_info->sfe_hw_info = hw_info; + + rc = cam_sfe_init_soc_resources(&sfe_info->soc_info, cam_sfe_irq, + sfe_info); + if (rc < 0) { + CAM_ERR(CAM_SFE, "Failed to init soc rc=%d", rc); + goto free_core_info; + } + + rc = cam_sfe_core_init(core_info, &sfe_info->soc_info, + sfe_hw_intf, hw_info); + if (rc < 0) { + CAM_ERR(CAM_SFE, "Failed to init core rc=%d", rc); + goto deinit_soc; + } + + sfe_info->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&sfe_info->hw_mutex); + spin_lock_init(&sfe_info->hw_lock); + init_completion(&sfe_info->hw_complete); + + if (sfe_hw_intf->hw_idx < CAM_SFE_HW_NUM_MAX) + cam_sfe_hw_list[sfe_hw_intf->hw_idx].hw_intf = sfe_hw_intf; + + soc_priv = sfe_info->soc_info.soc_private; + cam_sfe_hw_list[sfe_hw_intf->hw_idx].num_hw_pid = soc_priv->num_pid; + for (i = 0; i < soc_priv->num_pid; i++) + cam_sfe_hw_list[sfe_hw_intf->hw_idx].hw_pid[i] = + soc_priv->pid[i]; + + sfe_info->soc_info.hw_id = CAM_HW_ID_SFE0 + sfe_info->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&sfe_info->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_SFE, " hw instance populate failed: %d", rc); + goto deinit_soc; + } + + CAM_DBG(CAM_SFE, "SFE%d bound successfully", + sfe_hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +deinit_soc: + if (cam_sfe_deinit_soc_resources(&sfe_info->soc_info)) + CAM_ERR(CAM_SFE, "Failed to deinit soc"); +free_core_info: + CAM_MEM_FREE(sfe_info->core_info); +free_sfe_hw: + CAM_MEM_FREE(sfe_info); +free_sfe_hw_intf: + CAM_MEM_FREE(sfe_hw_intf); +end: + return rc; +} + +static void cam_sfe_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + + struct cam_hw_info *sfe_info = NULL; + struct cam_hw_intf *sfe_hw_intf = NULL; + struct cam_sfe_hw_core_info *core_info = NULL; + struct platform_device *pdev = NULL; + int rc = 0; + + pdev = to_platform_device(dev); + sfe_hw_intf = platform_get_drvdata(pdev); + if (!sfe_hw_intf) { + CAM_ERR(CAM_SFE, "Error! No data in pdev"); + return; + } + + CAM_DBG(CAM_SFE, "SFE component unbound type %d index %d", + sfe_hw_intf->hw_type, sfe_hw_intf->hw_idx); + + if (sfe_hw_intf->hw_idx < CAM_SFE_HW_NUM_MAX) + cam_sfe_hw_list[sfe_hw_intf->hw_idx].hw_intf = NULL; + + sfe_info = sfe_hw_intf->hw_priv; + if (!sfe_info) { + CAM_ERR(CAM_SFE, "HW data is NULL"); + goto free_sfe_hw_intf; + } + + core_info = (struct cam_sfe_hw_core_info *)sfe_info->core_info; + if (!core_info) { + CAM_ERR(CAM_SFE, "core data NULL"); + goto deinit_soc; + } + + rc = cam_sfe_core_deinit(core_info, core_info->sfe_hw_info); + if (rc < 0) + CAM_ERR(CAM_SFE, "Failed to deinit core rc=%d", rc); + + CAM_MEM_FREE(sfe_info->core_info); + +deinit_soc: + rc = cam_sfe_deinit_soc_resources(&sfe_info->soc_info); + if (rc < 0) + CAM_ERR(CAM_SFE, "Failed to deinit soc rc=%d", rc); + + mutex_destroy(&sfe_info->hw_mutex); + CAM_MEM_FREE(sfe_info); + + CAM_DBG(CAM_SFE, "SFE%d remove successful", sfe_hw_intf->hw_idx); + +free_sfe_hw_intf: + CAM_MEM_FREE(sfe_hw_intf); +} + +const static struct component_ops cam_sfe_component_ops = { + .bind = cam_sfe_component_bind, + .unbind = cam_sfe_component_unbind, +}; + +void cam_sfe_get_num_hws(uint32_t *sfe_num) +{ + if (sfe_num) + *sfe_num = g_num_sfe_hws; + else + CAM_ERR(CAM_SFE, "Invalid argument, g_num_sfe_hws: %u", g_num_sfe_hws); +} + +int cam_sfe_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_SFE, "Adding SFE component"); + g_num_sfe_hws++; + + rc = component_add(&pdev->dev, &cam_sfe_component_ops); + if (rc) + CAM_ERR(CAM_SFE, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_sfe_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_sfe_component_ops); + return 0; +} + +int cam_sfe_hw_init(struct cam_isp_hw_intf_data **sfe_hw, uint32_t hw_idx) +{ + int rc = 0; + + if (cam_sfe_hw_list[hw_idx].hw_intf) { + *sfe_hw = &cam_sfe_hw_list[hw_idx]; + rc = 0; + } else { + *sfe_hw = NULL; + rc = -ENODEV; + } + + return rc; +} + +static const struct of_device_id cam_sfe_dt_match[] = { + { + .compatible = "qcom,sfe680", + .data = &cam_sfe680_hw_info, + }, + { + .compatible = "qcom,sfe780", + .data = &cam_sfe780_hw_info, + }, + { + .compatible = "qcom,sfe880", + .data = &cam_sfe880_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_sfe_dt_match); + +struct platform_driver cam_sfe_driver = { + .probe = cam_sfe_probe, + .remove = cam_sfe_remove, + .driver = { + .name = "cam_sfe", + .owner = THIS_MODULE, + .of_match_table = cam_sfe_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_sfe_init_module(void) +{ + return platform_driver_register(&cam_sfe_driver); +} + + +void cam_sfe_exit_module(void) +{ + platform_driver_unregister(&cam_sfe_driver); +} + +MODULE_DESCRIPTION("CAM SFE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.h new file mode 100644 index 0000000000..d00bcc6144 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_dev.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_SFE_DEV_H_ +#define _CAM_SFE_DEV_H_ + +/** + * @brief : API to register SFE hw to platform framework. + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_init_module(void); + +/** + * @brief : API to remove SFE Hw from platform framework. + * @Return: 0: Success + * Non-zero: Failure + */ +void cam_sfe_exit_module(void); + +#endif /* _CAM_SFE_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.c new file mode 100644 index 0000000000..250b787518 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_cpas_api.h" +#include "cam_sfe_soc.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static int cam_sfe_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + struct cam_sfe_soc_private *soc_private = soc_info->soc_private; + struct platform_device *pdev = soc_info->pdev; + uint32_t num_pid = 0; + int i, rc = 0; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_SFE, "Error get DT properties failed rc=%d", rc); + goto end; + } + + rc = of_property_read_u32(pdev->dev.of_node, "rt-wrapper-base", + &soc_private->rt_wrapper_base); + if (rc) { + soc_private->rt_wrapper_base = 0; + CAM_DBG(CAM_ISP, "rc: %d Error reading rt_wrapper_base for core_idx: %u", + rc, soc_info->index); + rc = 0; + } + + soc_private->num_pid = 0; + num_pid = of_property_count_u32_elems(pdev->dev.of_node, "cam_hw_pid"); + CAM_DBG(CAM_SFE, "sfe:%d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_ISP_HW_MAX_PID_VAL) + goto end; + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(pdev->dev.of_node, "cam_hw_pid", i, + &soc_private->pid[i]); + + soc_private->num_pid = num_pid; +end: + return rc; +} + +static int cam_sfe_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler_func, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, irq_handler_func, &(irq_data[0])); + if (rc) + CAM_ERR(CAM_SFE, + "Error Request platform resource failed rc=%d", rc); + + return rc; +} + +static int cam_sfe_release_platform_resource(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_SFE, + "Error Release platform resource failed rc=%d", rc); + + return rc; +} + +int cam_sfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler_func, void *irq_data) +{ + int rc = 0; + struct cam_sfe_soc_private *soc_private; + struct cam_cpas_register_params cpas_register_param; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_soc_private), + GFP_KERNEL); + if (!soc_private) + return -ENOMEM; + + soc_info->soc_private = soc_private; + rc = cam_cpas_get_cpas_hw_version(&soc_private->cpas_version); + if (rc) { + CAM_ERR(CAM_SFE, "Error! Invalid cpas version rc=%d", rc); + goto free_soc_private; + } + soc_info->hw_version = soc_private->cpas_version; + + rc = cam_sfe_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_SFE, "Error Get DT properties failed rc=%d", rc); + goto free_soc_private; + } + + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_SFE, "Failed to query DRV enable rc:%d", rc); + goto free_soc_private; + } + + rc = cam_sfe_request_platform_resource(soc_info, + irq_handler_func, irq_data); + if (rc < 0) { + CAM_ERR(CAM_SFE, + "Error Request platform resources failed rc=%d", rc); + goto free_soc_private; + } + + strscpy(cpas_register_param.identifier, "sfe", + CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + cpas_register_param.cam_cpas_client_cb = NULL; + cpas_register_param.userdata = soc_info; + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_SFE, "CPAS registration failed rc=%d", rc); + goto release_soc; + } else { + soc_private->cpas_handle = cpas_register_param.client_handle; + } + + return rc; + +release_soc: + cam_soc_util_release_platform_resource(soc_info); +free_soc_private: + CAM_MEM_FREE(soc_private); + return rc; +} + +int cam_sfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_sfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_SFE, "Error soc_info NULL"); + return -ENODEV; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_SFE, "Error! soc_private NULL"); + return -ENODEV; + } + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_SFE, "CPAS unregistration failed rc=%d", rc); + + rc = cam_sfe_release_platform_resource(soc_info); + if (rc < 0) + CAM_ERR(CAM_SFE, + "Error Release platform resources failed rc=%d", rc); + + CAM_MEM_FREE(soc_private); + return rc; +} + +int cam_sfe_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_sfe_soc_private *soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + if (!soc_info) { + CAM_ERR(CAM_SFE, "Error! Invalid params"); + rc = -EINVAL; + goto end; + } + soc_private = soc_info->soc_private; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_CPAS_API_PATH_DATA_STD_START; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + + rc = cam_cpas_start(soc_private->cpas_handle, + &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_SFE, "CPAS start failed rc=%d", rc); + rc = -EFAULT; + goto end; + } + + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc:%d", rc); + return rc; + } + + /* + * using sfe index - assuming csid and sfe are one to one map on chipsets where + * cesta is present. + */ + rc = cam_soc_util_enable_platform_resource(soc_info, + (soc_info->is_clk_drv_en ? soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_SFE, "Enable platform failed rc=%d", rc); + goto stop_cpas; + } + + return rc; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_sfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + return -EPERM; +} + +int cam_sfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + return -EPERM; +} + +int cam_sfe_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_sfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_SFE, "Invalid params"); + rc = -EINVAL; + return rc; + } + + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, + (soc_info->is_clk_drv_en ? soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, true); + if (rc) + CAM_ERR(CAM_SFE, "Disable platform failed rc=%d", rc); + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_SFE, "CPAS stop failed rc=%d", rc); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.h new file mode 100644 index 0000000000..53fa5296f7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/cam_sfe_soc.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_SFE_SOC_H_ +#define _CAM_SFE_SOC_H_ + +#include "cam_soc_util.h" +#include "cam_isp_hw.h" + +/* + * struct cam_sfe_soc_private: + * + * @Brief: Private SOC data specific to SFE HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver + * This handle is used for all further interface + * with CPAS. + * @cpas_version: CPAS version + * @* @rt_wrapper_base: Base address of the RT-Wrapper if the hw is in rt-wrapper + * @num_pid: Number of pids of sfe + * @pid: SFE pid values list + */ +struct cam_sfe_soc_private { + uint32_t cpas_handle; + uint32_t cpas_version; + uint32_t rt_wrapper_base; + uint32_t num_pid; + uint32_t pid[CAM_ISP_HW_MAX_PID_VAL]; +}; + +/* + * cam_sfe_init_soc_resources() + * + * @Brief: Initialize SOC resources including private data + * + * @soc_info: Device soc information + * @handler: Irq handler function pointer + * @irq_data: Irq handler function Callback data + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t sfe_irq_handler, void *irq_data); + +/* + * cam_sfe_deinit_soc_resources() + * + * @Brief: Deinitialize SOC resources including private data + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_sfe_enable_soc_resources() + * + * @brief: Enable regulator, irq resources, Clocks + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_sfe_disable_soc_resources() + * + * @brief: Disable regulator, irq resources, Clocks + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_sfe_soc_enable_clk() + * + * @brief: Enable clock wsfe given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +extern int cam_sfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +/* + * cam_sfe_soc_disable_clk() + * + * @brief: Disable clock wsfe given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +#endif /* _CAM_SFE_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus.c new file mode 100644 index 0000000000..eb7df348a6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sfe_bus.h" +#include "cam_sfe_bus_rd.h" +#include "cam_sfe_bus_wr.h" +#include "cam_debug_util.h" + +int cam_sfe_bus_init( + uint32_t bus_version, + int bus_type, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus) +{ + int rc = -ENODEV; + + switch (bus_type) { + case BUS_TYPE_SFE_WR: + switch (bus_version) { + case CAM_SFE_BUS_WR_VER_1_0: + rc = cam_sfe_bus_wr_init(soc_info, hw_intf, + bus_hw_info, sfe_irq_controller, sfe_bus); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus WR Version 0x%x", + bus_version); + break; + } + break; + case BUS_TYPE_SFE_RD: + switch (bus_version) { + case CAM_SFE_BUS_RD_VER_1_0: + rc = cam_sfe_bus_rd_init(soc_info, hw_intf, + bus_hw_info, sfe_irq_controller, sfe_bus); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus RD Version 0x%x", + bus_version); + break; + } + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus type %d", bus_type); + break; + } + + return rc; +} + +int cam_sfe_bus_deinit( + uint32_t bus_version, + int bus_type, + struct cam_sfe_bus **sfe_bus) +{ + int rc = -ENODEV; + + switch (bus_type) { + case BUS_TYPE_SFE_WR: + switch (bus_version) { + case CAM_SFE_BUS_WR_VER_1_0: + rc = cam_sfe_bus_wr_deinit(sfe_bus); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus WR Version 0x%x", + bus_version); + break; + } + break; + case BUS_TYPE_SFE_RD: + switch (bus_version) { + case CAM_SFE_BUS_RD_VER_1_0: + rc = cam_sfe_bus_rd_deinit(sfe_bus); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus RD Version 0x%x", + bus_version); + break; + } + break; + default: + CAM_ERR(CAM_SFE, "Unsupported Bus type %d", bus_type); + break; + } + + return rc; +} + +static inline int __cam_sfe_bus_validate_alloc_type( + uint32_t alloc_type) { + + if ((alloc_type >= CACHE_ALLOC_NONE) && + (alloc_type <= CACHE_ALLOC_TBH_ALLOC)) + return 1; + else + return 0; +} + +void cam_sfe_bus_parse_cache_cfg( + bool is_read, + uint32_t debug_val, + struct cam_sfe_bus_cache_dbg_cfg *dbg_cfg) +{ + uint32_t scratch_alloc_shift = 0, buf_alloc_shift = 0; + uint32_t scratch_cfg, buf_cfg, alloc_type; + + if (debug_val == DISABLE_CACHING_FOR_ALL) { + dbg_cfg->disable_all = true; + goto end; + } + + if (is_read) { + scratch_alloc_shift = CACHE_SCRATCH_RD_ALLOC_SHIFT; + buf_alloc_shift = CACHE_BUF_RD_ALLOC_SHIFT; + } else { + scratch_alloc_shift = CACHE_SCRATCH_WR_ALLOC_SHIFT; + buf_alloc_shift = CACHE_BUF_WR_ALLOC_SHIFT; + } + + scratch_cfg = (debug_val >> CACHE_SCRATCH_DEBUG_SHIFT) & 0xF; + buf_cfg = (debug_val >> CACHE_BUF_DEBUG_SHIFT) & 0xF; + dbg_cfg->print_cache_cfg = (bool)(debug_val >> CACHE_BUF_PRINT_DBG_SHIFT); + + /* Check for scratch cfg */ + if (scratch_cfg == 0xF) { + dbg_cfg->disable_for_scratch = true; + } else if (scratch_cfg == 1) { + alloc_type = + (debug_val >> scratch_alloc_shift) & 0xF; + if (__cam_sfe_bus_validate_alloc_type(alloc_type)) { + dbg_cfg->scratch_alloc = alloc_type; + dbg_cfg->scratch_dbg_cfg = true; + } + dbg_cfg->disable_for_scratch = false; + } else { + /* Reset to default */ + dbg_cfg->disable_for_scratch = false; + dbg_cfg->scratch_dbg_cfg = false; + } + + /* Check for buf cfg */ + if (buf_cfg == 0xF) { + dbg_cfg->disable_for_buf = true; + } else if (buf_cfg == 1) { + alloc_type = + (debug_val >> buf_alloc_shift) & 0xF; + if (__cam_sfe_bus_validate_alloc_type(alloc_type)) { + dbg_cfg->buf_alloc = alloc_type; + dbg_cfg->buf_dbg_cfg = true; + } + dbg_cfg->disable_for_buf = false; + } else { + /* Reset to default */ + dbg_cfg->disable_for_buf = false; + dbg_cfg->buf_dbg_cfg = false; + } + + /* Reset to default */ + dbg_cfg->disable_all = false; + +end: + return; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c new file mode 100644 index 0000000000..5fe6d9fe09 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c @@ -0,0 +1,2259 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include + +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_hw_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_sfe_hw_intf.h" +#include "cam_tasklet_util.h" +#include "cam_sfe_bus.h" +#include "cam_sfe_bus_rd.h" +#include "cam_sfe_core.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "sfe_bus_rd"; + +#define MAX_BUF_UPDATE_REG_NUM \ + (sizeof(struct cam_sfe_bus_rd_reg_offset_bus_client)/4) + +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + +#define CAM_SFE_BUS_RD_PAYLOAD_MAX 16 + +enum cam_sfe_bus_rd_unpacker_format { + BUS_RD_UNPACKER_FMT_PLAIN_128 = 0x0, + BUS_RD_UNPACKER_FMT_PLAIN_8 = 0x1, + BUS_RD_UNPACKER_FMT_PLAIN_16_10BPP = 0x2, + BUS_RD_UNPACKER_FMT_PLAIN_16_12BPP = 0x3, + BUS_RD_UNPACKER_FMT_PLAIN_16_14BPP = 0x4, + BUS_RD_UNPACKER_FMT_PLAIN_32_20BPP = 0x5, + BUS_RD_UNPACKER_FMT_ARGB_10 = 0x6, + BUS_RD_UNPACKER_FMT_ARGB_12 = 0x7, + BUS_RD_UNPACKER_FMT_ARGB_14 = 0x8, + BUS_RD_UNPACKER_FMT_PLAIN_32 = 0x9, + BUS_RD_UNPACKER_FMT_PLAIN_64 = 0xA, + BUS_RD_UNPACKER_FMT_TP_10 = 0xB, + BUS_RD_UNPACKER_FMT_MIPI8 = 0xC, + BUS_RD_UNPACKER_FMT_MIPI10 = 0xD, + BUS_RD_UNPACKER_FMT_MIPI12 = 0xE, + BUS_RD_UNPACKER_FMT_MIPI14 = 0xF, + BUS_RD_UNPACKER_FMT_PLAIN_16_16BPP = 0x10, + BUS_RD_UNPACKER_FMT_PLAIN_128_ODD_EVEN = 0x11, + BUS_RD_UNPACKER_FMT_PLAIN_8_ODD_EVEN = 0x12, + BUS_RD_UNPACKER_FMT_MAX = 0x13, +}; + +struct cam_sfe_bus_rd_common_data { + uint32_t core_index; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_sfe_bus_rd_reg_offset_common *common_reg; + struct cam_hw_soc_info *soc_info; + uint32_t io_buf_update[ + MAX_REG_VAL_PAIR_SIZE]; + void *bus_irq_controller; + void *sfe_irq_controller; + spinlock_t spin_lock; + struct list_head free_payload_list; + struct cam_sfe_bus_rd_irq_evt_payload evt_payload[ + CAM_SFE_BUS_RD_PAYLOAD_MAX]; + cam_hw_mgr_event_cb_func event_cb; + uint32_t sfe_debug_cfg; + uint32_t irq_err_mask; + + struct cam_sfe_bus_cache_dbg_cfg cache_dbg_cfg; + uint32_t cons_chk_en_val; + bool cons_chk_en_avail; + bool err_irq_subscribe; +}; + +struct cam_sfe_bus_rd_rm_resource_data { + struct cam_sfe_bus_rd_common_data *common_data; + struct cam_sfe_bus_rd_reg_offset_bus_client *hw_regs; + void *ctx; + uint32_t index; + uint32_t min_vbi; + uint32_t fs_mode; + uint32_t hbi_count; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + uint32_t latency_buf_allocation; + uint32_t unpacker_cfg; + uint32_t burst_len; + uint32_t en_cfg; + uint32_t input_if_cmd; + uint32_t cache_cfg; + uint32_t current_scid; + uint32_t offset; + bool enable_caching; + bool enable_disable_cfg_done; +}; + +struct cam_sfe_bus_rd_data { + uint32_t bus_rd_type; + struct cam_sfe_bus_rd_common_data *common_data; + struct cam_sfe_bus_rd_priv *bus_priv; + + uint32_t num_rm; + struct cam_isp_resource_node *rm_res[PLANE_MAX]; + uint32_t format; + uint32_t max_width; + uint32_t max_height; + struct cam_cdm_utils_ops *cdm_util_ops; + void *priv; + uint32_t secure_mode; + uint32_t fs_sync_enable; + bool is_offline; +}; + +struct cam_sfe_bus_rd_priv { + struct cam_sfe_bus_rd_common_data common_data; + uint32_t num_client; + uint32_t num_bus_rd_resc; + + struct cam_isp_resource_node bus_client[ + CAM_SFE_BUS_RD_MAX_CLIENTS]; + struct cam_isp_resource_node sfe_bus_rd[ + CAM_SFE_BUS_RD_MAX]; + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info; + int irq_handle; + int error_irq_handle; + void *tasklet_info; + uint32_t top_irq_shift; + uint32_t latency_buf_allocation; + uint32_t sys_cache_default_cfg; +}; + +static void cam_sfe_bus_rd_pxls_to_bytes(uint32_t pxls, uint32_t fmt, + uint32_t *bytes) +{ + switch (fmt) { + case BUS_RD_UNPACKER_FMT_PLAIN_128: + *bytes = pxls * 16; + break; + case BUS_RD_UNPACKER_FMT_PLAIN_8: + case BUS_RD_UNPACKER_FMT_PLAIN_8_ODD_EVEN: + case BUS_RD_UNPACKER_FMT_PLAIN_16_10BPP: + case BUS_RD_UNPACKER_FMT_PLAIN_16_12BPP: + case BUS_RD_UNPACKER_FMT_PLAIN_16_14BPP: + case BUS_RD_UNPACKER_FMT_PLAIN_16_16BPP: + case BUS_RD_UNPACKER_FMT_ARGB_10: + case BUS_RD_UNPACKER_FMT_ARGB_12: + case BUS_RD_UNPACKER_FMT_ARGB_14: + *bytes = pxls * 2; + break; + case BUS_RD_UNPACKER_FMT_PLAIN_32_20BPP: + case BUS_RD_UNPACKER_FMT_PLAIN_32: + *bytes = pxls * 4; + break; + case BUS_RD_UNPACKER_FMT_PLAIN_64: + *bytes = pxls * 8; + break; + case BUS_RD_UNPACKER_FMT_TP_10: + *bytes = ALIGNUP(pxls, 3) * 4 / 3; + break; + case BUS_RD_UNPACKER_FMT_MIPI8: + *bytes = pxls; + break; + case BUS_RD_UNPACKER_FMT_MIPI10: + *bytes = ALIGNUP(pxls * 5, 4) / 4; + break; + case BUS_RD_UNPACKER_FMT_MIPI12: + *bytes = ALIGNUP(pxls * 3, 2) / 2; + break; + case BUS_RD_UNPACKER_FMT_MIPI14: + *bytes = ALIGNUP(pxls * 7, 4) / 4; + break; + default: + CAM_ERR(CAM_SFE, "Invalid unpacker_fmt:%d", fmt); + break; + } +} + +static enum cam_sfe_bus_rd_unpacker_format + cam_sfe_bus_get_unpacker_fmt(uint32_t unpack_fmt) +{ + switch (unpack_fmt) { + case CAM_FORMAT_PLAIN128: + return BUS_RD_UNPACKER_FMT_PLAIN_128; + case CAM_FORMAT_PLAIN8: + return BUS_RD_UNPACKER_FMT_PLAIN_8; + case CAM_FORMAT_PLAIN16_10: + return BUS_RD_UNPACKER_FMT_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return BUS_RD_UNPACKER_FMT_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return BUS_RD_UNPACKER_FMT_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN32_20: + return BUS_RD_UNPACKER_FMT_PLAIN_32_20BPP; + case CAM_FORMAT_ARGB_10: + return BUS_RD_UNPACKER_FMT_ARGB_10; + case CAM_FORMAT_ARGB_12: + return BUS_RD_UNPACKER_FMT_ARGB_12; + case CAM_FORMAT_ARGB_14: + return BUS_RD_UNPACKER_FMT_ARGB_14; + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_ARGB: + return BUS_RD_UNPACKER_FMT_PLAIN_32; + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_ARGB_16: + case CAM_FORMAT_PD10: + return BUS_RD_UNPACKER_FMT_PLAIN_64; + case CAM_FORMAT_TP10: + return BUS_RD_UNPACKER_FMT_TP_10; + case CAM_FORMAT_MIPI_RAW_8: + return BUS_RD_UNPACKER_FMT_MIPI8; + case CAM_FORMAT_MIPI_RAW_10: + return BUS_RD_UNPACKER_FMT_MIPI10; + case CAM_FORMAT_MIPI_RAW_12: + return BUS_RD_UNPACKER_FMT_MIPI12; + case CAM_FORMAT_MIPI_RAW_14: + return BUS_RD_UNPACKER_FMT_MIPI14; + case CAM_FORMAT_PLAIN16_16: + return BUS_RD_UNPACKER_FMT_PLAIN_16_16BPP; + case CAM_FORMAT_PLAIN8_SWAP: + return BUS_RD_UNPACKER_FMT_PLAIN_8_ODD_EVEN; + default: + return BUS_RD_UNPACKER_FMT_MAX; + } +} + +static enum cam_sfe_bus_rd_type + cam_sfe_bus_get_bus_rd_res_id(uint32_t res_type) +{ + switch (res_type) { + case CAM_ISP_SFE_IN_RD_0: + return CAM_SFE_BUS_RD_RDI0; + case CAM_ISP_SFE_IN_RD_1: + return CAM_SFE_BUS_RD_RDI1; + case CAM_ISP_SFE_IN_RD_2: + return CAM_SFE_BUS_RD_RDI2; + default: + return CAM_SFE_BUS_RD_MAX; + } +} + +static int cam_sfe_bus_get_num_rm( + enum cam_sfe_bus_rd_type res_type) +{ + switch (res_type) { + case CAM_SFE_BUS_RD_RDI0: + case CAM_SFE_BUS_RD_RDI1: + case CAM_SFE_BUS_RD_RDI2: + return 1; + default: + CAM_ERR(CAM_SFE, "Unsupported resource_type %u", res_type); + return -EINVAL; + } +} + +static int cam_sfe_bus_get_rm_idx( + enum cam_sfe_bus_rd_type sfe_bus_rd_res_id, + enum cam_sfe_bus_plane_type plane) +{ + int rm_idx = -1; + + switch (sfe_bus_rd_res_id) { + case CAM_SFE_BUS_RD_RDI0: + switch (plane) { + case PLANE_Y: + rm_idx = 0; + break; + default: + break; + } + break; + case CAM_SFE_BUS_RD_RDI1: + switch (plane) { + case PLANE_Y: + rm_idx = 1; + break; + default: + break; + } + break; + case CAM_SFE_BUS_RD_RDI2: + switch (plane) { + case PLANE_Y: + rm_idx = 2; + break; + default: + break; + } + break; + default: + break; + } + + return rm_idx; +} + +static int cam_sfe_bus_rd_get_evt_payload( + struct cam_sfe_bus_rd_common_data *common_data, + struct cam_sfe_bus_rd_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&common_data->spin_lock); + + if (list_empty(&common_data->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_SFE, + "No free BUS RD event payload"); + *evt_payload = NULL; + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&common_data->free_payload_list, + struct cam_sfe_bus_rd_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + +done: + spin_unlock(&common_data->spin_lock); + return rc; +} + +static int cam_sfe_bus_rd_put_evt_payload( + struct cam_sfe_bus_rd_common_data *common_data, + struct cam_sfe_bus_rd_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!common_data) { + CAM_ERR(CAM_SFE, "Invalid param common_data NULL"); + return -EINVAL; + } + + if (*evt_payload == NULL) { + CAM_ERR(CAM_SFE, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_sfe_bus_rd_irq_evt_payload); + spin_lock_irqsave(&common_data->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &common_data->free_payload_list); + spin_unlock_irqrestore(&common_data->spin_lock, flags); + + *evt_payload = NULL; + + return 0; +} + +static int cam_sfe_bus_rd_handle_irq( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_sfe_bus_rd_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + CAM_DBG(CAM_SFE, "Top Bus RD IRQ Received"); + + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_0); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_sfe_bus_rd_print_dimensions( + enum cam_sfe_bus_rd_type bus_rd_res_id, + struct cam_sfe_bus_rd_priv *bus_rd_priv) +{ + int i; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data; + struct cam_sfe_bus_rd_common_data *common_data; + + rsrc_node = &bus_rd_priv->sfe_bus_rd[bus_rd_res_id]; + rsrc_data = rsrc_node->res_priv; + + for (i = 0; i < rsrc_data->num_rm; i++) { + rm_data = rsrc_data->rm_res[i]->res_priv; + + if (!rm_data) { + CAM_WARN(CAM_SFE, "Unsupported SFE RD %d", bus_rd_res_id); + continue; + } + + common_data = rm_data->common_data; + CAM_INFO(CAM_SFE, + "SFE: %u RD: %u res_id: 0x%x width: %d height: %d stride: %d unpacker: 0x%x addr: 0x%x", + bus_rd_priv->common_data.core_index, i, rsrc_node->res_id, + rm_data->width, rm_data->height, rm_data->stride, rm_data->unpacker_cfg, + cam_io_r_mb(common_data->mem_base + rm_data->hw_regs->image_addr)); + } + + return 0; +} + +static void cam_sfe_bus_rd_print_constraint_error(struct cam_sfe_bus_rd_priv *bus_priv, + uint32_t cons_err, uint8_t *rm_name, uint32_t bus_rd_resc_type) +{ + struct cam_sfe_bus_rd_constraint_error_info *cons_err_info; + int i; + + cons_err_info = bus_priv->bus_rd_hw_info->constraint_error_info; + for (i = 0; i < cons_err_info->num_cons_err; i++) { + if (cons_err_info->constraint_error_list[i].bitmask & cons_err) + CAM_ERR(CAM_SFE, "RD resource type: RDI%u, Read master: %s, Error_desc: %s", + bus_rd_resc_type, rm_name, + cons_err_info->constraint_error_list[i].error_desc); + } +} + +static void cam_sfe_bus_rd_get_constraint_error(struct cam_sfe_bus_rd_priv *bus_priv, + uint32_t constraint_err_status) +{ + struct cam_isp_resource_node *sfe_bus_rd = NULL; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_rsrc_data = NULL; + uint32_t bus_rd_resc_type, cons_err; + uint8_t *rm_name; + int i, j; + + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + bus_rd_resc_type = + bus_priv->bus_rd_hw_info->sfe_bus_rd_info[i].sfe_bus_rd_type; + if (bus_rd_resc_type < 0 || + bus_rd_resc_type >= CAM_SFE_BUS_RD_MAX) { + CAM_ERR(CAM_SFE, "Invalid SFE RD resource type:%d", + bus_rd_resc_type); + return; + } + + sfe_bus_rd = &bus_priv->sfe_bus_rd[bus_rd_resc_type]; + if (!sfe_bus_rd || !sfe_bus_rd->res_priv) { + CAM_DBG(CAM_ISP, "SFE bus rd:%d in resc node or data is NULL", i); + continue; + } + + sfe_bus_rd_data = sfe_bus_rd->res_priv; + for (j = 0; j < sfe_bus_rd_data->num_rm; j++) { + rm_rsrc_data = sfe_bus_rd_data->rm_res[j]->res_priv; + if (!rm_rsrc_data) + continue; + + if (!(constraint_err_status & BIT(rm_rsrc_data->index))) + continue; + + rm_name = sfe_bus_rd_data->rm_res[j]->res_name; + cons_err = cam_io_r_mb( + bus_priv->common_data.mem_base + + rm_rsrc_data->hw_regs->debug_status_0); + if (!cons_err) + continue; + + cam_sfe_bus_rd_print_constraint_error(bus_priv, + cons_err, rm_name, bus_rd_resc_type); + } + } +} + +static int cam_sfe_bus_acquire_rm( + struct cam_sfe_bus_rd_priv *bus_rd_priv, + void *tasklet, + void *ctx, + enum cam_sfe_bus_rd_type sfe_bus_rd_res_id, + enum cam_sfe_bus_plane_type plane, + struct cam_isp_resource_node **rm_res, + uint32_t unpacker_fmt) +{ + uint32_t rm_idx = 0; + struct cam_isp_resource_node *rm_res_local = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rsrc_data = NULL; + + *rm_res = NULL; + + rm_idx = cam_sfe_bus_get_rm_idx(sfe_bus_rd_res_id, plane); + if (rm_idx < 0 || rm_idx >= bus_rd_priv->num_client) { + CAM_ERR(CAM_SFE, "Unsupported SFE RM:%d plane:%d", + sfe_bus_rd_res_id, plane); + return -EINVAL; + } + + rm_res_local = &bus_rd_priv->bus_client[rm_idx]; + if (rm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_DBG(CAM_SFE, "SFE:%u RM:%u res not available state:%d", + bus_rd_priv->common_data.core_index, rm_idx, + rm_res_local->res_state); + return -EALREADY; + } + rm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rm_res_local->tasklet_info = tasklet; + + rsrc_data = rm_res_local->res_priv; + rsrc_data->ctx = ctx; + rsrc_data->unpacker_cfg = + cam_sfe_bus_get_unpacker_fmt(unpacker_fmt); + if (rsrc_data->unpacker_cfg == BUS_RD_UNPACKER_FMT_MAX) { + CAM_ERR(CAM_SFE, "SFE:%u RM:%u Invalid unpacker fmt:%u", + bus_rd_priv->common_data.core_index, rm_idx, unpacker_fmt); + return -EINVAL; + } + + rsrc_data->latency_buf_allocation = + bus_rd_priv->latency_buf_allocation; + rsrc_data->enable_caching = false; + rsrc_data->enable_disable_cfg_done = false; + rsrc_data->offset = 0; + /* Default register value */ + rsrc_data->cache_cfg = 0x20; + + *rm_res = rm_res_local; + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d Acquired", + rsrc_data->common_data->core_index, rsrc_data->index); + return 0; +} + +static int cam_sfe_bus_release_rm(void *bus_priv, + struct cam_isp_resource_node *rm_res) +{ + struct cam_sfe_bus_rd_rm_resource_data *rsrc_data = + rm_res->res_priv; + + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 0; + rsrc_data->format = 0; + rsrc_data->unpacker_cfg = 0; + rsrc_data->burst_len = 0; + rsrc_data->en_cfg = 0; + rsrc_data->enable_caching = false; + rsrc_data->offset = 0; + + rm_res->tasklet_info = NULL; + rm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d released", + rsrc_data->common_data->core_index, rsrc_data->index); + return 0; +} + +static int cam_sfe_bus_start_rm(struct cam_isp_resource_node *rm_res) +{ + uint32_t width_in_bytes = 0; + struct cam_sfe_bus_rd_rm_resource_data *rm_data; + struct cam_sfe_bus_rd_common_data *common_data; + uint32_t core_cfg_mask; + + rm_data = rm_res->res_priv; + common_data = rm_data->common_data; + + cam_sfe_bus_rd_pxls_to_bytes(rm_data->width, + rm_data->unpacker_cfg, &width_in_bytes); + + cam_io_w_mb(width_in_bytes, common_data->mem_base + + rm_data->hw_regs->buf_width); + cam_io_w_mb(rm_data->height, common_data->mem_base + + rm_data->hw_regs->buf_height); + cam_io_w_mb(rm_data->stride, common_data->mem_base + + rm_data->hw_regs->stride); + cam_io_w_mb(rm_data->unpacker_cfg, common_data->mem_base + + rm_data->hw_regs->unpacker_cfg); + cam_io_w_mb(rm_data->latency_buf_allocation, common_data->mem_base + + rm_data->hw_regs->latency_buf_allocation); + + /* Ignore if already configured via CDM */ + if (!rm_data->enable_disable_cfg_done) { + core_cfg_mask = CAM_SFE_BUS_RD_EN_CLIENT_CFG; + if (common_data->cons_chk_en_avail) + core_cfg_mask |= CAM_SFE_BUS_RD_EN_CONS_CHK_CFG; + cam_io_w_mb(core_cfg_mask, + common_data->mem_base + rm_data->hw_regs->cfg); + } + + /* Enable constraint error detection */ + cam_io_w_mb(common_data->cons_chk_en_val, + common_data->mem_base + rm_data->hw_regs->debug_status_cfg); + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + CAM_DBG(CAM_SFE, + "Start SFE:%d RM:%d offset:0x%X width:%d [in bytes: %u] height:%d unpack_fmt:%d stride:%d latency_buf_alloc:%u", + rm_data->common_data->core_index, rm_data->index, + rm_data->offset, rm_data->width, width_in_bytes, + rm_data->height, rm_data->unpacker_cfg, rm_data->stride, + rm_data->latency_buf_allocation); + + return 0; +} + +static int cam_sfe_bus_stop_rm(struct cam_isp_resource_node *rm_res) +{ + struct cam_sfe_bus_rd_rm_resource_data *rsrc_data = + rm_res->res_priv; + struct cam_sfe_bus_rd_common_data *common_data = + rsrc_data->common_data; + + /* Disable RM */ + cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg); + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rsrc_data->enable_disable_cfg_done = false; + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d stopped", + rsrc_data->common_data->core_index, rsrc_data->index); + + return 0; +} + +static int cam_sfe_bus_init_rm_resource(uint32_t index, + struct cam_sfe_bus_rd_priv *bus_rd_priv, + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info, + struct cam_isp_resource_node *rm_res) +{ + struct cam_sfe_bus_rd_rm_resource_data *rsrc_data; + uint8_t *name; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_rd_rm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_SFE, "Failed to alloc SFE:%d RM res priv", + bus_rd_priv->common_data.core_index); + return -ENOMEM; + } + rm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->hw_regs = &bus_rd_hw_info->bus_client_reg[index]; + rsrc_data->common_data = &bus_rd_priv->common_data; + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&rm_res->list); + + rm_res->start = cam_sfe_bus_start_rm; + rm_res->stop = cam_sfe_bus_stop_rm; + rm_res->hw_intf = bus_rd_priv->common_data.hw_intf; + name = bus_rd_priv->bus_rd_hw_info->bus_client_reg[index].name; + if (name) + scnprintf(rm_res->res_name, CAM_ISP_RES_NAME_LEN, + "%s", name); + + return 0; +} + +static int cam_sfe_bus_deinit_rm_resource( + struct cam_isp_resource_node *rm_res) +{ + struct cam_sfe_bus_rd_rm_resource_data *rsrc_data; + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&rm_res->list); + + rm_res->start = NULL; + rm_res->stop = NULL; + rm_res->top_half_handler = NULL; + rm_res->bottom_half_handler = NULL; + rm_res->hw_intf = NULL; + + rsrc_data = rm_res->res_priv; + rm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_sfe_bus_rd_out_done_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = 0, i; + struct cam_isp_resource_node *sfe_rd = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + struct cam_sfe_bus_rd_irq_evt_payload *evt_payload; + + sfe_rd = th_payload->handler_priv; + if (!sfe_rd) { + CAM_ERR_RATE_LIMIT(CAM_SFE, "No SFE RD resource"); + return -ENODEV; + } + + rsrc_data = sfe_rd->res_priv; + + CAM_DBG(CAM_SFE, "SFE:%d Bus IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + + rc = cam_sfe_bus_rd_get_evt_payload(rsrc_data->common_data, + &evt_payload); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_SFE, + "Failed to get payload for SFE:%d Bus IRQ status_0: 0x%X status_1: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + evt_payload->core_index = rsrc_data->common_data->core_index; + evt_payload->evt_id = evt_id; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + return rc; +} + +static int cam_sfe_bus_rd_out_done_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + int rc = -EINVAL; + uint32_t status = 0; + struct cam_isp_resource_node *sfe_rd = handler_priv; + struct cam_sfe_bus_rd_data *rsrc_data = sfe_rd->res_priv; + struct cam_sfe_bus_rd_irq_evt_payload *evt_payload = evt_payload_priv; + + status = evt_payload->irq_reg_val[CAM_SFE_IRQ_BUS_RD_REG_STATUS0]; + cam_sfe_bus_rd_put_evt_payload(rsrc_data->common_data, &evt_payload); + + if (status & CAM_SFE_BUS_RD_IRQ_RUP_DONE) + CAM_DBG(CAM_SFE, "Received SFE:%d BUS RD RUP", + rsrc_data->common_data->core_index); + + if (status & CAM_SFE_BUS_RD_IRQ_RD0_BUF_DONE) + CAM_DBG(CAM_SFE, "Received SFE:%d BUS RD0 BUF DONE", + rsrc_data->common_data->core_index); + + if (status & CAM_SFE_BUS_RD_IRQ_RD1_BUF_DONE) + CAM_DBG(CAM_SFE, "Received SFE:%d BUS RD1 BUF DONE", + rsrc_data->common_data->core_index); + + if (status & CAM_SFE_BUS_RD_IRQ_RD2_BUF_DONE) + CAM_DBG(CAM_SFE, "Received SFE:%d BUS RD2 BUF DONE", + rsrc_data->common_data->core_index); + + return rc; +} + +static int cam_sfe_bus_rd_handle_err_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int i, rc = 0; + struct cam_sfe_bus_rd_priv *bus_priv; + struct cam_sfe_bus_rd_irq_evt_payload *evt_payload = NULL; + + bus_priv = th_payload->handler_priv; + rc = cam_sfe_bus_rd_get_evt_payload( + &bus_priv->common_data, &evt_payload); + if (rc) + return rc; + + for (i = 0; i < th_payload->num_registers; i++) { + evt_payload->irq_reg_val[i] = + th_payload->evt_status_arr[i]; + CAM_DBG(CAM_SFE, "SFE:%d Bus RD IRQ status_%d: 0x%x", + bus_priv->common_data.core_index, i, + th_payload->evt_status_arr[i]); + } + + evt_payload->constraint_violation = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->cons_violation_status); + + if (bus_priv->common_data.irq_err_mask + & CAM_SFE_BUS_RD_IRQ_CCIF_VIOLATION) { + evt_payload->ccif_violation = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->ccif_violation_status); + } + cam_irq_controller_disable_all(bus_priv->common_data.bus_irq_controller); + + cam_isp_hw_get_timestamp(&evt_payload->ts); + th_payload->evt_payload_priv = evt_payload; + return 0; +} + +static int cam_sfe_bus_rd_get_err_port_info(struct cam_sfe_bus_rd_priv *bus_priv, + uint32_t err_status, void **rsrc_data_priv, uint32_t *err_res_id) +{ + struct cam_sfe_bus_rd_rm_resource_data *rm_rsrc_data; + struct cam_isp_resource_node *sfe_bus_rd; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data; + uint32_t bus_rd_resc_type; + int i, j; + + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + bus_rd_resc_type = + bus_priv->bus_rd_hw_info->sfe_bus_rd_info[i].sfe_bus_rd_type; + if (bus_rd_resc_type < 0 || + (bus_rd_resc_type >= CAM_SFE_BUS_RD_MAX)) { + CAM_ERR(CAM_SFE, "Invalid SFE RD resource type:%d", + bus_rd_resc_type); + return -EINVAL; + } + + sfe_bus_rd = &bus_priv->sfe_bus_rd[bus_rd_resc_type]; + if (!sfe_bus_rd || !sfe_bus_rd->res_priv) + continue; + + if (sfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + sfe_bus_rd_data = sfe_bus_rd->res_priv; + *rsrc_data_priv = sfe_bus_rd_data->priv; + for (j = 0; j < sfe_bus_rd_data->num_rm; j++) { + rm_rsrc_data = sfe_bus_rd_data->rm_res[j]->res_priv; + if (!rm_rsrc_data) + continue; + + if (err_status & BIT(rm_rsrc_data->index)) { + *err_res_id = sfe_bus_rd->res_id; + return 0; + } + } + } + return 0; +} + +static int cam_sfe_bus_rd_handle_err_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + int rc = 0; + struct cam_sfe_bus_rd_irq_evt_payload *evt_payload; + struct cam_sfe_bus_rd_priv *bus_priv; + struct cam_sfe_bus_rd_common_data *common_data = NULL; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + uint32_t status = 0; + uint32_t violation_status = 0; + void *rsrc_data_priv = NULL; + uint32_t bus_rd_res_id = CAM_SFE_BUS_RD_MAX; + + if (!handler_priv || !evt_payload_priv) + return -EINVAL; + + bus_priv = (struct cam_sfe_bus_rd_priv *)handler_priv; + evt_payload = (struct cam_sfe_bus_rd_irq_evt_payload *)evt_payload_priv; + common_data = &bus_priv->common_data; + status = evt_payload->irq_reg_val[CAM_SFE_IRQ_BUS_RD_REG_STATUS0]; + + if (!(status & bus_priv->common_data.irq_err_mask)) + return 0; + + if (status & CAM_SFE_BUS_RD_IRQ_CONS_VIOLATION) { + CAM_ERR(CAM_SFE, "SFE:[%u] Constraint Violation occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + cam_sfe_bus_rd_get_constraint_error(bus_priv, evt_payload->constraint_violation); + } + + if (status & CAM_SFE_BUS_RD_IRQ_CCIF_VIOLATION) { + CAM_ERR(CAM_SFE, "SFE:[%d] CCIF Violation occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_SFE, "Violation status 0x%x", evt_payload->ccif_violation); + } + + violation_status = evt_payload->constraint_violation | evt_payload->ccif_violation; + + cam_sfe_bus_rd_put_evt_payload(common_data, &evt_payload); + + rc = cam_sfe_bus_rd_get_err_port_info(bus_priv, violation_status, + &rsrc_data_priv, &bus_rd_res_id); + if (rc < 0) + CAM_ERR(CAM_SFE, "Failed to get err port info, violation_status = 0x%x", + violation_status); + + if (!common_data->event_cb) + return 0; + + evt_info.hw_type = CAM_ISP_HW_TYPE_SFE; + evt_info.hw_idx = bus_priv->common_data.core_index; + evt_info.res_type = CAM_ISP_RESOURCE_SFE_RD; + evt_info.res_id = bus_rd_res_id; + err_evt_info.err_type = CAM_SFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (!rsrc_data_priv) { + CAM_WARN(CAM_SFE, + "SFE:[%d] bus rd error notification failed, cb data is NULL", + bus_priv->common_data.core_index); + return -EINVAL; + } + + common_data->event_cb(rsrc_data_priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + return 0; +} + +static int cam_sfe_bus_subscribe_error_irq( + struct cam_sfe_bus_rd_priv *bus_priv) +{ + uint32_t sfe_top_irq_mask[CAM_SFE_IRQ_REGISTERS_MAX] = {0}; + uint32_t bus_rd_err_irq_mask[CAM_SFE_BUS_RD_IRQ_REGISTERS_MAX] = {0}; + + /* Subscribe top IRQ */ + sfe_top_irq_mask[0] = (1 << bus_priv->top_irq_shift); + + bus_priv->irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.sfe_irq_controller, + CAM_IRQ_PRIORITY_0, + sfe_top_irq_mask, + bus_priv, + cam_sfe_bus_rd_handle_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->irq_handle < 1) { + CAM_ERR(CAM_SFE, + "Failed to subscribe TOP IRQ for BUS RD"); + bus_priv->irq_handle = 0; + return -EFAULT; + } + + cam_irq_controller_register_dependent(bus_priv->common_data.sfe_irq_controller, + bus_priv->common_data.bus_irq_controller, sfe_top_irq_mask); + + if (bus_priv->tasklet_info != NULL) { + bus_rd_err_irq_mask[0] = bus_priv->common_data.irq_err_mask; + + bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.bus_irq_controller, + CAM_IRQ_PRIORITY_0, + bus_rd_err_irq_mask, + bus_priv, + cam_sfe_bus_rd_handle_err_irq_top_half, + cam_sfe_bus_rd_handle_err_irq_bottom_half, + bus_priv->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->error_irq_handle < 1) { + CAM_ERR(CAM_SFE, "Failed to subscribe error IRQ"); + bus_priv->error_irq_handle = 0; + return -EFAULT; + } + } + + bus_priv->common_data.err_irq_subscribe = true; + CAM_DBG(CAM_SFE, "BUS RD error irq subscribed"); + return 0; +} + +static int cam_sfe_bus_rd_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *secure_mode = cmd_args; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + uint32_t *mode; + + rsrc_data = (struct cam_sfe_bus_rd_data *) + secure_mode->res->res_priv; + mode = (uint32_t *)secure_mode->data; + *mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ? + true : false; + + return 0; +} + +static int cam_sfe_bus_acquire_bus_rd(void *bus_priv, void *acquire_args, + uint32_t args_size) +{ + int rc = -ENODEV; + int i; + enum cam_sfe_bus_rd_type bus_rd_res_id; + int num_rm; + struct cam_sfe_bus_rd_priv *bus_rd_priv = + bus_priv; + struct cam_sfe_acquire_args *acq_args = acquire_args; + struct cam_sfe_hw_sfe_bus_rd_acquire_args *bus_rd_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_SFE, "Invalid Param"); + return -EINVAL; + } + + bus_rd_acquire_args = &acq_args->sfe_rd; + + bus_rd_res_id = cam_sfe_bus_get_bus_rd_res_id( + bus_rd_acquire_args->res_id); + if (bus_rd_res_id == CAM_SFE_BUS_RD_MAX) + return -ENODEV; + + num_rm = cam_sfe_bus_get_num_rm(bus_rd_res_id); + if (num_rm < 1) + return -EINVAL; + + rsrc_node = &bus_rd_priv->sfe_bus_rd[bus_rd_res_id]; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_SFE, "SFE:%d RM:0x%x not available state:%d", + bus_rd_priv->common_data.core_index, + acq_args->rsrc_type, rsrc_node->res_state); + return -EBUSY; + } + + rsrc_node->res_id = bus_rd_acquire_args->res_id; + rsrc_data = rsrc_node->res_priv; + + CAM_DBG(CAM_SFE, "SFE:%d acquire RD type:0x%x res_id 0x%x", + rsrc_data->common_data->core_index, acq_args->rsrc_type, rsrc_node->res_id); + + rsrc_data->num_rm = num_rm; + rsrc_node->tasklet_info = acq_args->tasklet; + bus_rd_priv->tasklet_info = acq_args->tasklet; + rsrc_node->cdm_ops = bus_rd_acquire_args->cdm_ops; + rsrc_data->cdm_util_ops = bus_rd_acquire_args->cdm_ops; + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->priv = acq_args->priv; + rsrc_data->is_offline = bus_rd_acquire_args->is_offline; + rsrc_data->bus_priv = bus_rd_priv; + + /* Enable FS sync for sHDR & FS */ + if (!rsrc_data->is_offline) + rsrc_data->fs_sync_enable = 1; + + if (!rsrc_data->secure_mode) { + rsrc_data->secure_mode = + bus_rd_acquire_args->secure_mode; + } else if (rsrc_data->secure_mode != + bus_rd_acquire_args->secure_mode) { + CAM_ERR(CAM_SFE, + "Current Mode %u Requesting Mode %u", + rsrc_data->secure_mode, + bus_rd_acquire_args->secure_mode); + return -EINVAL; + } + + for (i = 0; i < num_rm; i++) { + rc = cam_sfe_bus_acquire_rm(bus_rd_priv, + acq_args->tasklet, + acq_args->priv, + bus_rd_res_id, + i, + &rsrc_data->rm_res[i], + bus_rd_acquire_args->unpacker_fmt); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d RM:%d acquire failed rc:%d", + rsrc_data->common_data->core_index, + bus_rd_res_id, rc); + goto release_rm; + } + } + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + bus_rd_acquire_args->rsrc_node = rsrc_node; + + CAM_DBG(CAM_SFE, "SFE:%d acquire RD 0x%x successful", + rsrc_data->common_data->core_index, acq_args->rsrc_type); + return rc; + +release_rm: + for (i--; i >= 0; i--) + cam_sfe_bus_release_rm(bus_rd_priv, rsrc_data->rm_res[i]); + return rc; +} + +static int cam_sfe_bus_release_bus_rd(void *bus_priv, void *release_args, + uint32_t args_size) +{ + uint32_t i; + struct cam_isp_resource_node *sfe_bus_rd = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_SFE, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + sfe_bus_rd = release_args; + rsrc_data = sfe_bus_rd->res_priv; + + if (sfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_SFE, + "SFE:%d RD type:0x%x invalid resource state:%d", + rsrc_data->common_data->core_index, + sfe_bus_rd->res_id, sfe_bus_rd->res_state); + } + + for (i = 0; i < rsrc_data->num_rm; i++) + cam_sfe_bus_release_rm(bus_priv, rsrc_data->rm_res[i]); + rsrc_data->num_rm = 0; + + sfe_bus_rd->tasklet_info = NULL; + sfe_bus_rd->cdm_ops = NULL; + rsrc_data->cdm_util_ops = NULL; + rsrc_data->secure_mode = 0; + + if (sfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_sfe_bus_start_bus_rd( + void *hw_priv, void *stop_hw_args, uint32_t arg_size) +{ + int rc = -1, i; + const uint32_t buf_done_shift = 2; + uint32_t bus_rd_done_irq_mask[1] = {0}; + struct cam_isp_resource_node *sfe_bus_rd = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + struct cam_sfe_bus_rd_common_data *common_data = NULL; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + + sfe_bus_rd = (struct cam_isp_resource_node *)hw_priv; + rsrc_data = sfe_bus_rd->res_priv; + bus_priv = rsrc_data->bus_priv; + common_data = rsrc_data->common_data; + + CAM_DBG(CAM_SFE, "SFE:%d start RD type:0x%x", + common_data->hw_intf->hw_idx, sfe_bus_rd->res_id); + + if (sfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_SFE, "Invalid resource state:%d", + sfe_bus_rd->res_state); + return -EACCES; + } + + /* subscribe error irqs */ + if (!bus_priv->common_data.err_irq_subscribe) { + rc = cam_sfe_bus_subscribe_error_irq(bus_priv); + if (rc) + return rc; + } + + if (!rsrc_data->is_offline) { + uint32_t if_cmd_val = (rsrc_data->fs_sync_enable << + bus_priv->bus_rd_hw_info->fs_sync_shift); + + cam_io_w_mb(if_cmd_val, common_data->mem_base + + common_data->common_reg->input_if_cmd); + CAM_DBG(CAM_SFE, "SFE:%d fs_sync for RD:0x%x configured with val: 0x%x", + common_data->hw_intf->hw_idx, sfe_bus_rd->res_id, if_cmd_val); + } + + if (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) + cam_io_w_mb(1, common_data->mem_base + + common_data->common_reg->security_cfg); + + for (i = 0; i < rsrc_data->num_rm; i++) + rc = cam_sfe_bus_start_rm(rsrc_data->rm_res[i]); + + /* Remove after driver stabilizes */ + common_data->sfe_debug_cfg |= + SFE_DEBUG_ENABLE_RD_DONE_IRQ; + + if (common_data->sfe_debug_cfg & + SFE_DEBUG_ENABLE_RD_DONE_IRQ) { + + /* Subscribe RUP */ + bus_rd_done_irq_mask[0] |= 0x2; + + /* Subscribe buf done */ + bus_rd_done_irq_mask[0] |= + (1 << (rsrc_data->bus_rd_type + buf_done_shift)); + sfe_bus_rd->irq_handle = cam_irq_controller_subscribe_irq( + common_data->bus_irq_controller, + CAM_IRQ_PRIORITY_2, + bus_rd_done_irq_mask, + sfe_bus_rd, + cam_sfe_bus_rd_out_done_top_half, + cam_sfe_bus_rd_out_done_bottom_half, + sfe_bus_rd->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (sfe_bus_rd->irq_handle < 1) { + CAM_ERR(CAM_SFE, + "Failed to subscribe RUP/BUF done IRQ"); + sfe_bus_rd->irq_handle = 0; + return -EFAULT; + } + } + + sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_sfe_bus_stop_bus_rd( + void *hw_priv, void *stop_hw_args, uint32_t arg_size) +{ + int rc = 0, i; + struct cam_isp_resource_node *sfe_bus_rd = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + + sfe_bus_rd = (struct cam_isp_resource_node *)hw_priv; + rsrc_data = sfe_bus_rd->res_priv; + bus_priv = rsrc_data->bus_priv; + + if (sfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + sfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_SFE, "SFE: %d res_id: 0x%x state: %d", + rsrc_data->common_data->core_index, sfe_bus_rd->res_id, + sfe_bus_rd->res_state); + return rc; + } + for (i = 0; i < rsrc_data->num_rm; i++) + rc = cam_sfe_bus_stop_rm(rsrc_data->rm_res[i]); + + sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + /* Unsubscribe RUP & Buf done */ + if (sfe_bus_rd->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + rsrc_data->common_data->bus_irq_controller, + sfe_bus_rd->irq_handle); + if (rc) + CAM_ERR(CAM_SFE, "Failed to unsubscribe rup/buf done irq"); + sfe_bus_rd->irq_handle = 0; + } + + /* Unsubscribe error irqs */ + if (bus_priv->common_data.err_irq_subscribe) { + if (bus_priv->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.sfe_irq_controller, + bus_priv->irq_handle); + if (rc) + CAM_ERR(CAM_SFE, "Failed to unsubscribe top irq"); + bus_priv->irq_handle = 0; + cam_irq_controller_unregister_dependent( + bus_priv->common_data.sfe_irq_controller, + bus_priv->common_data.bus_irq_controller); + } + + if (bus_priv->error_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + if (rc) + CAM_ERR(CAM_SFE, "Failed to unsubscribe error irq"); + bus_priv->error_irq_handle = 0; + } + bus_priv->common_data.err_irq_subscribe = false; + } + + CAM_DBG(CAM_SFE, "SFE:%d stopped Bus RD:0x%x", + rsrc_data->common_data->core_index, + sfe_bus_rd->res_id); + return rc; +} + +static int cam_sfe_bus_init_sfe_bus_read_resource( + uint32_t index, + struct cam_sfe_bus_rd_priv *bus_rd_priv, + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info) +{ + struct cam_isp_resource_node *sfe_bus_rd = NULL; + struct cam_sfe_bus_rd_data *rsrc_data = NULL; + int rc = 0; + int32_t sfe_bus_rd_resc_type = + bus_rd_hw_info->sfe_bus_rd_info[index].sfe_bus_rd_type; + + if (sfe_bus_rd_resc_type < 0 || + sfe_bus_rd_resc_type >= CAM_SFE_BUS_RD_MAX) { + CAM_ERR(CAM_SFE, "Init SFE RD failed, Invalid type=%d", + sfe_bus_rd_resc_type); + return -EINVAL; + } + + sfe_bus_rd = &bus_rd_priv->sfe_bus_rd[sfe_bus_rd_resc_type]; + if (sfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + sfe_bus_rd->res_priv) { + CAM_ERR(CAM_SFE, + "Error. Looks like same resource is init again"); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_rd_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + sfe_bus_rd->res_priv = rsrc_data; + + sfe_bus_rd->res_type = CAM_ISP_RESOURCE_SFE_RD; + sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&sfe_bus_rd->list); + + rsrc_data->bus_rd_type = + bus_rd_hw_info->sfe_bus_rd_info[index].sfe_bus_rd_type; + rsrc_data->common_data = &bus_rd_priv->common_data; + rsrc_data->max_width = + bus_rd_hw_info->sfe_bus_rd_info[index].max_width; + rsrc_data->max_height = + bus_rd_hw_info->sfe_bus_rd_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + sfe_bus_rd->hw_intf = bus_rd_priv->common_data.hw_intf; + + return 0; +} + +static int cam_sfe_bus_deinit_sfe_bus_rd_resource( + struct cam_isp_resource_node *sfe_bus_rd_res) +{ + struct cam_sfe_bus_rd_data *rsrc_data = + sfe_bus_rd_res->res_priv; + + if (sfe_bus_rd_res->res_state == + CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + CAM_DBG(CAM_SFE, "HW%d Res %d already deinitialized"); + return 0; + } + + sfe_bus_rd_res->start = NULL; + sfe_bus_rd_res->stop = NULL; + sfe_bus_rd_res->top_half_handler = NULL; + sfe_bus_rd_res->bottom_half_handler = NULL; + sfe_bus_rd_res->hw_intf = NULL; + + sfe_bus_rd_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&sfe_bus_rd_res->list); + sfe_bus_rd_res->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +/* + * API similar to cam_sfe_bus_rd_update_rm() with the + * only change being config is done via AHB instead of CDM + */ +static int cam_sfe_bus_rd_config_rm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bus_rd_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data = NULL; + struct cam_sfe_bus_cache_dbg_cfg *cache_dbg_cfg = NULL; + uint32_t width = 0, height = 0, stride = 0, width_in_bytes = 0; + uint32_t i, img_addr = 0, img_offset = 0; + uint32_t curr_cache_cfg = 0; + dma_addr_t iova; + + bus_priv = (struct cam_sfe_bus_rd_priv *) priv; + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + cache_dbg_cfg = &bus_priv->common_data.cache_dbg_cfg; + + sfe_bus_rd_data = (struct cam_sfe_bus_rd_data *) + update_buf->res->res_priv; + + if (!sfe_bus_rd_data) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + CAM_DBG(CAM_SFE, "num of RM: %d scratch_buf_cfg: %s", + sfe_bus_rd_data->num_rm, + update_buf->use_scratch_cfg ? "true" : "false"); + if (update_buf->rm_update->num_buf != sfe_bus_rd_data->num_rm) { + CAM_ERR(CAM_SFE, + "Failed! Invalid number buffers:%d required:%d", + update_buf->rm_update->num_buf, + sfe_bus_rd_data->num_rm); + return -EINVAL; + } + + for (i = 0; i < sfe_bus_rd_data->num_rm; i++) { + rm_data = sfe_bus_rd_data->rm_res[i]->res_priv; + + stride = update_buf->rm_update->stride; + iova = update_buf->rm_update->image_buf[i] + rm_data->offset; + if (rm_data->width && rm_data->height) + { + width = rm_data->width; + height = rm_data->height; + } else { + width = update_buf->rm_update->width; + height = update_buf->rm_update->height; + } + + if (cam_smmu_is_expanded_memory()) { + img_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + img_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + } else { + img_addr = iova; + } + + /* update size register */ + cam_sfe_bus_rd_pxls_to_bytes(width, + rm_data->unpacker_cfg, &width_in_bytes); + rm_data->height = height; + rm_data->width = width; + curr_cache_cfg = rm_data->cache_cfg; + rm_data->cache_cfg = bus_priv->sys_cache_default_cfg; + if ((!cache_dbg_cfg->disable_for_scratch) && + (rm_data->enable_caching)) { + rm_data->cache_cfg = + rm_data->current_scid << 8; + rm_data->cache_cfg |= (3 << 4); + if (cache_dbg_cfg->scratch_dbg_cfg) + rm_data->cache_cfg |= cache_dbg_cfg->scratch_alloc; + else + rm_data->cache_cfg |= CACHE_ALLOC_FORGET; + + if (cache_dbg_cfg->print_cache_cfg && + (curr_cache_cfg != rm_data->cache_cfg)) { + CAM_INFO(CAM_SFE, + "SFE:%d Scratch Buff RM:%d current_scid:%d cache_cfg:0x%x", + rm_data->common_data->core_index, + rm_data->index, rm_data->current_scid, rm_data->cache_cfg); + } + } + + cam_io_w_mb(rm_data->cache_cfg, + rm_data->common_data->mem_base + + rm_data->hw_regs->system_cache_cfg); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d cache_cfg:0x%x", + rm_data->common_data->core_index, + rm_data->index, rm_data->cache_cfg); + + cam_io_w_mb(width_in_bytes, + rm_data->common_data->mem_base + + rm_data->hw_regs->buf_width); + cam_io_w_mb(height, + rm_data->common_data->mem_base + + rm_data->hw_regs->buf_height); + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d width:%d [in bytes: %d] height:%d", + rm_data->common_data->core_index, + rm_data->index, width, width_in_bytes, height); + + rm_data->stride = stride; + cam_io_w_mb(stride, + rm_data->common_data->mem_base + + rm_data->hw_regs->stride); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d image_stride:%d", + rm_data->common_data->core_index, + rm_data->index, stride); + + cam_io_w_mb( + img_addr, + rm_data->common_data->mem_base + + rm_data->hw_regs->image_addr); + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d image_address:0x%x offset: 0x%x", + rm_data->common_data->core_index, rm_data->index, + img_addr, rm_data->offset); + if (cam_smmu_is_expanded_memory()) + CAM_DBG(CAM_SFE, "SFE:%d RM:%d image address offset: 0x%x", + rm_data->common_data->core_index, + rm_data->index, + img_offset); + cam_io_w_mb(img_offset, + rm_data->common_data->mem_base + + rm_data->hw_regs->addr_cfg); + } + + return 0; +} + +static int cam_sfe_bus_rd_update_rm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bus_rd_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_sfe_bus_cache_dbg_cfg *cache_dbg_cfg = NULL; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t width = 0, height = 0, stride = 0, width_in_bytes = 0; + uint32_t i, j, size = 0, img_addr = 0, img_offset = 0; + dma_addr_t iova; + uint32_t curr_cache_cfg = 0; + + bus_priv = (struct cam_sfe_bus_rd_priv *) priv; + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + cache_dbg_cfg = &bus_priv->common_data.cache_dbg_cfg; + + sfe_bus_rd_data = (struct cam_sfe_bus_rd_data *) + update_buf->res->res_priv; + if (!sfe_bus_rd_data || !sfe_bus_rd_data->cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = sfe_bus_rd_data->cdm_util_ops; + CAM_DBG(CAM_SFE, "#of RM: %d scratch_buf_cfg: %s", + sfe_bus_rd_data->num_rm, + update_buf->use_scratch_cfg ? "true" : "false"); + if (update_buf->rm_update->num_buf != sfe_bus_rd_data->num_rm) { + CAM_ERR(CAM_SFE, + "Failed! Invalid number buffers:%d required:%d", + update_buf->rm_update->num_buf, + sfe_bus_rd_data->num_rm); + return -EINVAL; + } + + reg_val_pair = &sfe_bus_rd_data->common_data->io_buf_update[0]; + if (!update_buf->use_scratch_cfg) + io_cfg = update_buf->rm_update->io_cfg; + + for (i = 0, j = 0; i < sfe_bus_rd_data->num_rm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_SFE, + "reg_val_pair %d exceeds the array limit %lu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + rm_data = sfe_bus_rd_data->rm_res[i]->res_priv; + if (update_buf->use_scratch_cfg) { + stride = update_buf->rm_update->stride; + width = update_buf->rm_update->width; + height = update_buf->rm_update->height; + } else { + stride = io_cfg->planes[i].plane_stride; + width = io_cfg->planes[i].width; + height = io_cfg->planes[i].height; + } + + /* If width & height updated in blob, use that */ + if (rm_data->width && rm_data->height) { + width = rm_data->width; + height = rm_data->height; + } + + iova = update_buf->rm_update->image_buf[i] + rm_data->offset; + if (cam_smmu_is_expanded_memory()) { + img_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + img_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + } else { + img_addr = iova; + } + + /* update size register */ + cam_sfe_bus_rd_pxls_to_bytes(width, + rm_data->unpacker_cfg, &width_in_bytes); + rm_data->height = height; + rm_data->width = width; + + curr_cache_cfg = rm_data->cache_cfg; + rm_data->cache_cfg = bus_priv->sys_cache_default_cfg; + if (rm_data->enable_caching) { + if ((cache_dbg_cfg->disable_for_scratch) && + (update_buf->use_scratch_cfg)) + goto skip_cache_cfg; + + if ((cache_dbg_cfg->disable_for_buf) && + (!update_buf->use_scratch_cfg)) + goto skip_cache_cfg; + + rm_data->cache_cfg = + rm_data->current_scid << 8; + rm_data->cache_cfg |= (3 << 4); + if ((update_buf->use_scratch_cfg) && + (cache_dbg_cfg->scratch_dbg_cfg)) { + rm_data->cache_cfg |= cache_dbg_cfg->scratch_alloc; + } else if ((!update_buf->use_scratch_cfg) && + (cache_dbg_cfg->buf_dbg_cfg)) { + rm_data->cache_cfg |= cache_dbg_cfg->buf_alloc; + } else { + if (update_buf->use_scratch_cfg) + rm_data->cache_cfg |= CACHE_ALLOC_FORGET; + else + rm_data->cache_cfg |= CACHE_ALLOC_DEALLOC; + } + + if (cache_dbg_cfg->print_cache_cfg && + (curr_cache_cfg != rm_data->cache_cfg)) { + CAM_INFO(CAM_SFE, "SFE:%d RM:%d current_scid:%d cache_cfg:0x%x", + rm_data->common_data->core_index, + rm_data->index, rm_data->current_scid, rm_data->cache_cfg); + } + } + +skip_cache_cfg: + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->system_cache_cfg, + rm_data->cache_cfg); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d cache_cfg:0x%x", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->buf_width, width_in_bytes); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->buf_height, height); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d width:%d [in bytes: %d] height:%d", + rm_data->common_data->core_index, + rm_data->index, rm_data->width, + width_in_bytes, rm_data->height); + + rm_data->stride = stride; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->stride, rm_data->stride); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d image_stride:%d", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->image_addr, img_addr); + if (cam_smmu_is_expanded_memory()) + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->addr_cfg, img_offset); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d image_address:0x%x image_offset:0x%x", + rm_data->common_data->core_index, + rm_data->index, img_addr, img_offset); + } + + num_regval_pairs = j / 2; + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d insufficient, expected size:%d", + update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_buf->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + update_buf->cmd.used_bytes = 0; + CAM_DBG(CAM_SFE, + "No reg val pairs. num_rms: %u", + sfe_bus_rd_data->num_rm); + } + + return 0; +} + +static int cam_sfe_bus_rd_update_rm_core_cfg( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *cmd_update; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + bool enable_disable; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0, i, j, size = 0; + uint32_t hw_cfg = 0; + + cmd_update = (struct cam_isp_hw_get_cmd_update *) cmd_args; + enable_disable = *(bool *)cmd_update->data; + + sfe_bus_rd_data = (struct cam_sfe_bus_rd_data *) + cmd_update->res->res_priv; + + if (!sfe_bus_rd_data) { + CAM_ERR(CAM_SFE, "Invalid SFE rd data: %pK", + sfe_bus_rd_data); + return -EINVAL; + } + + if (enable_disable) { + hw_cfg = CAM_SFE_BUS_RD_EN_CLIENT_CFG; + if (sfe_bus_rd_data->common_data->cons_chk_en_avail) + hw_cfg |= CAM_SFE_BUS_RD_EN_CONS_CHK_CFG; + } + + cdm_util_ops = sfe_bus_rd_data->cdm_util_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid cdm ops: %pK", + cdm_util_ops); + return -EINVAL; + } + + reg_val_pair = &sfe_bus_rd_data->common_data->io_buf_update[0]; + for (i = 0, j = 0; i < sfe_bus_rd_data->num_rm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_SFE, + "reg_val_pair %d exceeds the array limit %lu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + rm_data = sfe_bus_rd_data->rm_res[i]->res_priv; + /* To avoid AHB write @ stream on */ + rm_data->enable_disable_cfg_done = true; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->cfg, hw_cfg); + CAM_DBG(CAM_SFE, "SFE:%d RM:%d cfg:0x%x", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + } + + num_regval_pairs = j / 2; + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > cmd_update->cmd.size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d insufficient, expected size:%d", + cmd_update->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + cmd_update->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + cmd_update->cmd.used_bytes = size * 4; + } else { + cmd_update->cmd.used_bytes = 0; + CAM_DBG(CAM_SFE, + "No reg val pairs. num_rms: %u", + sfe_bus_rd_data->num_rm); + } + + return 0; +} + +static int cam_sfe_bus_rd_get_res_for_mid( + struct cam_sfe_bus_rd_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_sfe_bus_rd_hw_info *hw_info; + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + struct cam_isp_hw_get_res_for_mid *get_res = NULL; + int i, j; + + get_res = (struct cam_isp_hw_get_res_for_mid *)cmd_update->data; + if (!get_res) { + CAM_ERR(CAM_SFE, + "invalid get resource for mid paramas"); + return -EINVAL; + } + + hw_info = bus_priv->bus_rd_hw_info; + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + for (j = 0; j < CAM_SFE_BUS_MAX_MID_PER_PORT; j++) { + if (hw_info->sfe_bus_rd_info[i].mid[j] == get_res->mid) + goto end; + } + } + + /* + * Do not update out_res_id in case of no match. + * Correct value will be dumped in hw mgr + */ + if (i == bus_priv->num_bus_rd_resc) { + CAM_INFO(CAM_SFE, "mid:%d does not match with any read resource", get_res->mid); + return 0; + } + +end: + CAM_INFO(CAM_SFE, "match mid :%d read resource: 0x%x found", + get_res->mid, bus_priv->sfe_bus_rd[i].res_id); + get_res->out_res_id = bus_priv->sfe_bus_rd[i].res_id; + return 0; +} + +static int cam_sfe_bus_rd_irq_inject( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info = NULL; + struct cam_irq_controller_reg_info *irq_reg_info = NULL; + struct cam_irq_register_set *inject_reg = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + bus_priv = (struct cam_sfe_bus_rd_priv *)priv; + soc_info = bus_priv->common_data.soc_info; + bus_rd_hw_info = (struct cam_sfe_bus_rd_hw_info *)bus_priv->bus_rd_hw_info; + irq_reg_info = &bus_rd_hw_info->common_reg.irq_reg_info; + inject_reg = irq_reg_info->irq_reg_set; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (!inject_reg) { + CAM_INFO(CAM_SFE, "Invalid inject_reg"); + return -EINVAL; + } + + if (inject_params->reg_unit == + CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG) { + cam_io_w_mb(inject_params->irq_mask, + soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + inject_reg->set_reg_offset); + cam_io_w_mb(0x10, soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + irq_reg_info->global_irq_cmd_offset); + CAM_INFO(CAM_SFE, "Injected : irq_mask %#x set_reg_offset %#x", + inject_params->irq_mask, inject_reg->set_reg_offset); + } + return 0; +} + +static int cam_sfe_bus_rd_dump_irq_desc( + void *priv, void *cmd_args, uint32_t arg_size) +{ + int i, offset = 0; + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + bus_priv = (struct cam_sfe_bus_rd_priv *)priv; + bus_rd_hw_info = (struct cam_sfe_bus_rd_hw_info *)bus_priv->bus_rd_hw_info; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (inject_params->reg_unit == + CAM_ISP_SFE_0_BUS_RD_INPUT_IF_IRQ_SET_REG) { + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "Printing executable IRQ for hw_type: SFE reg_unit: %d\n", + inject_params->reg_unit); + + for (i = 0; i < bus_rd_hw_info->num_bus_rd_errors; i++) + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s - %s\n", + bus_rd_hw_info->bus_rd_err_desc[i].bitmask, + bus_rd_hw_info->bus_rd_err_desc[i].err_name, + bus_rd_hw_info->bus_rd_err_desc[i].desc); + } + return 0; +} + +static int cam_sfe_bus_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_sfe_bus_rd_priv *bus_priv = hw_priv; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + CAM_DBG(CAM_SFE, "SFE:%d bus-rd hw-version:0x%x", + bus_priv->common_data.core_index, + cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->hw_version)); + + return 0; +} + +static int cam_sfe_bus_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + return 0; +} + +static int cam_sfe_bus_rd_config_update( + void *priv, void *cmd_args, uint32_t arg_size) +{ + int i; + struct cam_isp_hw_get_cmd_update *rm_config_update; + struct cam_isp_vfe_wm_config *rm_config = NULL; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data = NULL; + + rm_config_update = (struct cam_isp_hw_get_cmd_update *) cmd_args; + rm_config = (struct cam_isp_vfe_wm_config *) + rm_config_update->data; + + sfe_bus_rd_data = (struct cam_sfe_bus_rd_data *) + rm_config_update->res->res_priv; + + if (!sfe_bus_rd_data) { + CAM_ERR(CAM_SFE, "Failed! Invalid data"); + return -EINVAL; + } + + for (i = 0; i < sfe_bus_rd_data->num_rm; i++) { + rm_data = sfe_bus_rd_data->rm_res[i]->res_priv; + + rm_data->offset = rm_config->offset; + rm_data->width = rm_config->width; + rm_data->height = rm_config->height; + CAM_DBG(CAM_SFE, + "SFE: %u RM: %d offset: %u width: %u height: %u", + rm_data->index, rm_data->index, rm_data->offset, + rm_data->width, rm_data->height); + } + + return 0; +} + +static int cam_sfe_bus_rd_cache_config(void *priv, void *cmd_args, + uint32_t arg_size) +{ + int i; + struct cam_sfe_bus_rd_priv *bus_priv; + struct cam_isp_sfe_bus_sys_cache_config *cache_cfg; + struct cam_sfe_bus_rd_data *sfe_bus_rd_data = NULL; + struct cam_sfe_bus_rd_rm_resource_data *rm_data = NULL; + + bus_priv = (struct cam_sfe_bus_rd_priv *)priv; + cache_cfg = (struct cam_isp_sfe_bus_sys_cache_config *)cmd_args; + + sfe_bus_rd_data = (struct cam_sfe_bus_rd_data *) + cache_cfg->res->res_priv; + + if (!sfe_bus_rd_data) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + if (bus_priv->common_data.cache_dbg_cfg.disable_all) + return 0; + + for (i = 0; i < sfe_bus_rd_data->num_rm; i++) { + rm_data = (struct cam_sfe_bus_rd_rm_resource_data *) + sfe_bus_rd_data->rm_res[i]->res_priv; + rm_data->enable_caching = cache_cfg->use_cache; + rm_data->current_scid = cache_cfg->scid; + cache_cfg->rd_cfg_done = true; + + CAM_DBG(CAM_SFE, "SFE:%d RM:%d cache_enable:%s scid:%u", + rm_data->common_data->core_index, + rm_data->index, + (rm_data->enable_caching ? "true" : "false"), + rm_data->current_scid); + } + + return 0; +} + +static int cam_sfe_bus_rd_set_debug_cfg( + void *priv, void *cmd_args) +{ + struct cam_sfe_bus_rd_priv *bus_priv = + (struct cam_sfe_bus_rd_priv *) priv; + struct cam_sfe_debug_cfg_params *debug_cfg; + + debug_cfg = (struct cam_sfe_debug_cfg_params *)cmd_args; + + if (debug_cfg->cache_config) + cam_sfe_bus_parse_cache_cfg(true, + debug_cfg->u.cache_cfg.sfe_cache_dbg, + &bus_priv->common_data.cache_dbg_cfg); + else + bus_priv->common_data.sfe_debug_cfg = + debug_cfg->u.dbg_cfg.sfe_debug_cfg; + + return 0; +} + +static int cam_sfe_bus_rd_process_cmd( + void *priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_SFE, + "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM: + rc = cam_sfe_bus_rd_update_rm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BUF_UPDATE_RM: + rc = cam_sfe_bus_rd_config_rm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_RM_SECURE_MODE: + rc = cam_sfe_bus_rd_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD: + rc = cam_sfe_bus_rd_config_update(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG: + rc = cam_sfe_bus_rd_set_debug_cfg(priv, cmd_args); + break; + case CAM_ISP_HW_SFE_SYS_CACHE_RM_CONFIG: + rc = cam_sfe_bus_rd_cache_config(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_RM_ENABLE_DISABLE: + rc = cam_sfe_bus_rd_update_rm_core_cfg(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + rc = cam_sfe_bus_rd_get_res_for_mid(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: { + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)cmd_args; + enum cam_sfe_bus_rd_type bus_rd_res_id; + + bus_rd_res_id = cam_sfe_bus_get_bus_rd_res_id( + event_info->res_id); + + /* Skip if it's not a read resource */ + if (bus_rd_res_id == CAM_SFE_BUS_RD_MAX) { + CAM_DBG(CAM_SFE, + "Not a SFE read res: 0x%x - skip dump", + event_info->res_id); + rc = 0; + break; + } + + rc = cam_sfe_bus_rd_print_dimensions(bus_rd_res_id, + (struct cam_sfe_bus_rd_priv *)priv); + break; + } + case CAM_ISP_HW_CMD_IRQ_INJECTION: + rc = cam_sfe_bus_rd_irq_inject(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + rc = cam_sfe_bus_rd_dump_irq_desc(priv, cmd_args, arg_size); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_SFE, + "Invalid SFE BUS RD command type: %d", + cmd_type); + break; + } + + return rc; +} + +int cam_sfe_bus_rd_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus) +{ + int i, rc = 0; + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + struct cam_sfe_bus *sfe_bus_local; + struct cam_sfe_bus_rd_hw_info *bus_rd_hw_info = bus_hw_info; + + if (!soc_info || !hw_intf || !bus_hw_info) { + CAM_ERR(CAM_SFE, + "Invalid_params soc_info:%pK hw_intf:%pK hw_info:%pK data:%pK", + soc_info, hw_intf, bus_rd_hw_info); + rc = -EINVAL; + goto end; + } + + sfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus), GFP_KERNEL); + if (!sfe_bus_local) { + CAM_DBG(CAM_SFE, "Failed to alloc for sfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_rd_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_SFE, "Failed to alloc for sfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + + sfe_bus_local->bus_priv = bus_priv; + bus_priv->num_client = bus_rd_hw_info->num_client; + bus_priv->num_bus_rd_resc = + bus_rd_hw_info->num_bus_rd_resc; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, SFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.sfe_irq_controller = sfe_irq_controller; + bus_priv->common_data.common_reg = &bus_rd_hw_info->common_reg; + bus_priv->common_data.irq_err_mask = bus_rd_hw_info->irq_err_mask; + bus_priv->common_data.cons_chk_en_avail = + bus_rd_hw_info->constraint_error_info->cons_chk_en_avail; + bus_priv->common_data.cons_chk_en_val = + bus_rd_hw_info->constraint_error_info->cons_chk_en_val; + bus_priv->common_data.soc_info = soc_info; + bus_priv->top_irq_shift = bus_rd_hw_info->top_irq_shift; + bus_priv->latency_buf_allocation = bus_rd_hw_info->latency_buf_allocation; + bus_priv->sys_cache_default_cfg = bus_rd_hw_info->sys_cache_default_val; + bus_priv->bus_rd_hw_info = bus_rd_hw_info; + + rc = cam_irq_controller_init(drv_name, + bus_priv->common_data.mem_base, + &bus_rd_hw_info->common_reg.irq_reg_info, + &bus_priv->common_data.bus_irq_controller); + if (rc) { + CAM_ERR(CAM_SFE, "IRQ controller init failed"); + goto free_bus_priv; + } + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_sfe_bus_init_rm_resource(i, bus_priv, bus_hw_info, + &bus_priv->bus_client[i]); + if (rc < 0) { + CAM_ERR(CAM_SFE, "Init RM failed for client:%d, rc=%d", + i, rc); + goto deinit_rm; + } + } + + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + rc = cam_sfe_bus_init_sfe_bus_read_resource(i, bus_priv, + bus_rd_hw_info); + if (rc < 0) { + CAM_ERR(CAM_SFE, "Init SFE RD failed for client:%d, rc=%d", + i, rc); + goto deinit_sfe_bus_rd; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_SFE_BUS_RD_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + + sfe_bus_local->hw_ops.reserve = cam_sfe_bus_acquire_bus_rd; + sfe_bus_local->hw_ops.release = cam_sfe_bus_release_bus_rd; + sfe_bus_local->hw_ops.start = cam_sfe_bus_start_bus_rd; + sfe_bus_local->hw_ops.stop = cam_sfe_bus_stop_bus_rd; + sfe_bus_local->hw_ops.init = cam_sfe_bus_init_hw; + sfe_bus_local->hw_ops.deinit = cam_sfe_bus_deinit_hw; + sfe_bus_local->top_half_handler = cam_sfe_bus_rd_handle_irq; + sfe_bus_local->bottom_half_handler = NULL; + sfe_bus_local->hw_ops.process_cmd = cam_sfe_bus_rd_process_cmd; + + *sfe_bus = sfe_bus_local; + bus_priv->common_data.sfe_debug_cfg = 0; + + return rc; + +deinit_sfe_bus_rd: + for (--i; i >= 0; i--) + cam_sfe_bus_deinit_sfe_bus_rd_resource( + &bus_priv->sfe_bus_rd[i]); +deinit_rm: + if (i < 0) + i = bus_priv->num_client; + for (--i; i >= 0; i--) + cam_sfe_bus_deinit_rm_resource(&bus_priv->bus_client[i]); + +free_bus_priv: + CAM_MEM_FREE(sfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(sfe_bus_local); + +end: + return rc; +} + +int cam_sfe_bus_rd_deinit( + struct cam_sfe_bus **sfe_bus) +{ + int i, rc = 0; + unsigned long flags; + struct cam_sfe_bus_rd_priv *bus_priv = NULL; + struct cam_sfe_bus *sfe_bus_local; + + if (!sfe_bus || !*sfe_bus) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + sfe_bus_local = *sfe_bus; + + bus_priv = sfe_bus_local->bus_priv; + if (!bus_priv) { + CAM_ERR(CAM_SFE, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_SFE_BUS_RD_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_sfe_bus_deinit_rm_resource(&bus_priv->bus_client[i]); + if (rc < 0) + CAM_ERR(CAM_SFE, + "Deinit RM failed rc=%d", rc); + } + + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + rc = cam_sfe_bus_deinit_sfe_bus_rd_resource( + &bus_priv->sfe_bus_rd[i]); + if (rc < 0) + CAM_ERR(CAM_SFE, + "Deinit SFE RD failed rc=%d", rc); + } + + rc = cam_irq_controller_deinit( + &bus_priv->common_data.bus_irq_controller); + if (rc) + CAM_ERR(CAM_SFE, + "Deinit IRQ Controller failed rc=%d", rc); + + CAM_MEM_FREE(sfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(sfe_bus_local); + *sfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.h new file mode 100644 index 0000000000..b92059e098 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE_BUS_RD_H_ +#define _CAM_SFE_BUS_RD_H_ + +#include "cam_sfe_bus.h" + +#define CAM_SFE_BUS_RD_MAX_CLIENTS 3 +#define CAM_SFE_BUS_RD_CONS_ERR_MAX 32 + +/* IRQ status bits */ +#define CAM_SFE_BUS_RD_IRQ_CONS_VIOLATION BIT(0) +#define CAM_SFE_BUS_RD_IRQ_RUP_DONE BIT(1) +#define CAM_SFE_BUS_RD_IRQ_RD0_BUF_DONE BIT(2) +#define CAM_SFE_BUS_RD_IRQ_RD1_BUF_DONE BIT(3) +#define CAM_SFE_BUS_RD_IRQ_RD2_BUF_DONE BIT(4) +#define CAM_SFE_BUS_RD_IRQ_CCIF_VIOLATION BIT(31) + +/* Client core config selection */ +#define CAM_SFE_BUS_RD_EN_CLIENT_CFG BIT(0) +#define CAM_SFE_BUS_RD_EN_CONS_CHK_CFG BIT(2) + +enum cam_sfe_bus_rd_type { + CAM_SFE_BUS_RD_RDI0, + CAM_SFE_BUS_RD_RDI1, + CAM_SFE_BUS_RD_RDI2, + CAM_SFE_BUS_RD_MAX, +}; + +/* + * struct cam_sfe_bus_rd_err_irq_desc: + * + * @Brief: Bus rd error irq description + */ +struct cam_sfe_bus_rd_err_irq_desc { + uint32_t bitmask; + char *err_name; + char *desc; +}; + +/* + * struct cam_sfe_bus_rd_constraint_error_desc: + * + * @Brief: Constraint error description + */ +struct cam_sfe_bus_rd_constraint_error_desc { + uint32_t bitmask; + char *error_desc; +}; + +/* + * struct cam_sfe_bus_rd_constraint_error_info + * + * @Brief: Constraint error info + * + * @constraint_error_list: Error description for various constraint errors. + * @num_cons_err: Number of constraint violation errors. + * @cons_chk_en_val: Cons checker config value for debug select + * @cons_chk_en_avail: Indicate if there is an option to enable constraint checker + * violation in core cfg reg for each client. + */ +struct cam_sfe_bus_rd_constraint_error_info { + struct cam_sfe_bus_rd_constraint_error_desc + *constraint_error_list; + uint32_t num_cons_err; + uint32_t cons_chk_en_val; + bool cons_chk_en_avail; +}; + +/* + * struct cam_sfe_bus_rd_reg_offset_common: + * + * @Brief: Common registers across BUS RD Clients + */ +struct cam_sfe_bus_rd_reg_offset_common { + uint32_t hw_version; + uint32_t misr_reset; + uint32_t pwr_iso_cfg; + uint32_t input_if_cmd; + uint32_t test_bus_ctrl; + uint32_t security_cfg; + uint32_t cons_violation_status; + uint32_t ccif_violation_status; + struct cam_irq_controller_reg_info irq_reg_info; +}; + +/* + * struct cam_sfe_bus_rd_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS RD Clients + */ +struct cam_sfe_bus_rd_reg_offset_bus_client { + uint32_t cfg; + uint32_t image_addr; + uint32_t buf_width; + uint32_t buf_height; + uint32_t stride; + uint32_t unpacker_cfg; + uint32_t latency_buf_allocation; + uint32_t system_cache_cfg; + uint32_t addr_cfg; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; + char *name; +}; + +/* + * struct cam_sfe_bus_rd_hw_info: + * + * @Brief: HW capability of SFE Bus RD Client + */ +struct cam_sfe_bus_rd_info { + enum cam_sfe_bus_rd_type sfe_bus_rd_type; + uint32_t mid[CAM_SFE_BUS_MAX_MID_PER_PORT]; + uint32_t max_width; + uint32_t max_height; +}; + +/* + * struct cam_sfe_bus_rd_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @num_client: Number of bus rd clients + * @bus_client_reg: Bus client register info + * @num_bus_rd_resc: Number of SFE BUS RD masters + * @sfe_bus_rd_info: SFE bus rd client info + * @top_irq_shift: Top irq shift val + * @latency_buf_allocation: latency buf allocation + * @sys_cache_default_val: System cache default config + * @irq_err_mask: IRQ error mask + * @fs_sync_shift: Shift to enable FS sync + * @constraint_error_info: constraint violation errors info + */ +struct cam_sfe_bus_rd_hw_info { + struct cam_sfe_bus_rd_reg_offset_common common_reg; + uint32_t num_client; + struct cam_sfe_bus_rd_reg_offset_bus_client + bus_client_reg[CAM_SFE_BUS_RD_MAX_CLIENTS]; + uint32_t num_bus_rd_resc; + struct cam_sfe_bus_rd_info + sfe_bus_rd_info[CAM_SFE_BUS_RD_MAX]; + uint32_t num_bus_rd_errors; + struct cam_sfe_bus_rd_err_irq_desc *bus_rd_err_desc; + uint32_t top_irq_shift; + uint32_t latency_buf_allocation; + uint32_t sys_cache_default_val; + uint32_t irq_err_mask; + uint32_t fs_sync_shift; + struct cam_sfe_bus_rd_constraint_error_info *constraint_error_info; +}; + +/* + * cam_sfe_bus_rd_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @sfe_bus: Pointer to sfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_rd_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus); + +/* + * cam_sfe_bus_rd_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @sfe_bus: Pointer to sfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_rd_deinit(struct cam_sfe_bus **sfe_bus); + +#endif /* _CAM_SFE_BUS_RD_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c new file mode 100644 index 0000000000..08c262b4c0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c @@ -0,0 +1,3801 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include + +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_hw_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_sfe_hw_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_sfe_bus_wr.h" +#include "cam_sfe_core.h" +#include "cam_sfe_soc.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "sfe_bus_wr"; + +#define CAM_SFE_BUS_WR_PAYLOAD_MAX 256 + +#define CAM_SFE_RDI_BUS_DEFAULT_WIDTH 0xFFFF +#define CAM_SFE_RDI_BUS_DEFAULT_STRIDE 0xFFFF + +#define MAX_BUF_UPDATE_REG_NUM \ + (sizeof(struct cam_sfe_bus_reg_offset_bus_client) / 4) +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + +enum cam_sfe_bus_wr_packer_format { + PACKER_FMT_PLAIN_128, + PACKER_FMT_PLAIN_8, + PACKER_FMT_PLAIN_8_ODD_EVEN, + PACKER_FMT_PLAIN_8_LSB_MSB_10, + PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN, + PACKER_FMT_PLAIN_16_10BPP, + PACKER_FMT_PLAIN_16_12BPP, + PACKER_FMT_PLAIN_16_14BPP, + PACKER_FMT_PLAIN_16_16BPP, + PACKER_FMT_PLAIN_32, + PACKER_FMT_PLAIN_64, + PACKER_FMT_TP_10, + PACKER_FMT_MIPI10, + PACKER_FMT_MIPI12, + PACKER_FMT_MIPI14, + PACKER_FMT_MIPI20, + PACKER_FMT_PLAIN_32_20BPP, + PACKER_FMT_MAX, +}; + +enum cam_sfe_bus_wr_wm_mode { + CAM_SFE_WM_LINE_BASED_MODE, + CAM_SFE_WM_FRAME_BASED_MODE, + CAM_SFE_WM_INDEX_BASED_MODE, +}; + +struct cam_sfe_bus_wr_common_data { + uint32_t core_index; + uint32_t hw_version; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + void *sfe_irq_controller; + void *buf_done_controller; + void *bus_irq_controller; + struct cam_sfe_bus_reg_offset_common *common_reg; + uint32_t io_buf_update[ + MAX_REG_VAL_PAIR_SIZE]; + struct list_head free_payload_list; + spinlock_t spin_lock; + struct cam_sfe_bus_wr_irq_evt_payload evt_payload[ + CAM_SFE_BUS_WR_PAYLOAD_MAX]; + struct mutex bus_mutex; + uint32_t secure_mode; + uint32_t num_sec_out; + uint32_t addr_no_sync; + uint32_t line_done_cfg; + uint32_t pack_align_shift; + uint32_t max_bw_counter_limit; + bool err_irq_subscribe; + cam_hw_mgr_event_cb_func event_cb; + uint32_t irq_err_mask; + + uint32_t sys_cache_default_cfg; + uint32_t sfe_debug_cfg; + uint32_t perf_cnt_cfg[CAM_SFE_PERF_CNT_MAX]; + struct cam_sfe_bus_cache_dbg_cfg cache_dbg_cfg; + struct cam_hw_soc_info *soc_info; + uint32_t cntr; + bool perf_cnt_en; +}; + +struct cam_sfe_wr_scratch_buf_info { + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t slice_height; + dma_addr_t io_addr; +}; + +struct cam_sfe_bus_wr_wm_resource_data { + uint32_t index; + struct cam_sfe_bus_wr_common_data *common_data; + struct cam_sfe_bus_reg_offset_bus_client *hw_regs; + struct cam_sfe_wr_scratch_buf_info scratch_buf_info; + + + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + enum cam_sfe_bus_wr_packer_format pack_fmt; + enum cam_sfe_bus_wr_wm_mode wm_mode; + + uint32_t h_init; + + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + + uint32_t en_cfg; + uint32_t is_dual; + + uint32_t acquired_width; + uint32_t acquired_height; + + uint32_t cache_cfg; + int32_t current_scid; + bool enable_caching; + bool init_cfg_done; + bool hfr_cfg_done; + bool use_wm_pack; + bool en_line_done; +}; + +struct cam_sfe_bus_wr_comp_grp_data { + enum cam_sfe_bus_wr_comp_grp_type comp_grp_type; + uint32_t comp_done_mask; + struct cam_sfe_bus_wr_common_data *common_data; + + uint32_t is_master; + uint32_t is_dual; + uint32_t dual_slave_core; + uint32_t intra_client_mask; + uint32_t addr_sync_mode; + uint32_t composite_mask; + + uint32_t acquire_dev_cnt; + uint32_t irq_trigger_cnt; +}; + +struct cam_sfe_bus_wr_out_data { + uint32_t out_type; + uint32_t source_group; + struct cam_sfe_bus_wr_common_data *common_data; + struct cam_sfe_bus_wr_priv *bus_priv; + + uint32_t num_wm; + struct cam_isp_resource_node *wm_res; + + struct cam_isp_resource_node *comp_grp; + struct list_head sfe_out_list; + + uint32_t is_master; + uint32_t is_dual; + + uint32_t format; + uint32_t max_width; + uint32_t max_height; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t secure_mode; + void *priv; +}; + +struct cam_sfe_bus_wr_priv { + struct cam_sfe_bus_wr_common_data common_data; + uint32_t num_client; + uint32_t num_out; + uint32_t max_out_res; + uint32_t num_comp_grp; + uint32_t top_irq_shift; + + struct cam_isp_resource_node *comp_grp; + struct cam_isp_resource_node *sfe_out; + + int bus_irq_handle; + int error_irq_handle; + void *tasklet_info; + struct cam_sfe_bus_wr_constraint_error_info *constraint_error_info; + struct cam_sfe_bus_sfe_out_hw_info *sfe_out_hw_info; + struct cam_sfe_bus_wr_hw_info *bus_wr_hw_info; +}; + +static int cam_sfe_bus_subscribe_error_irq( + struct cam_sfe_bus_wr_priv *bus_priv); + +static inline int32_t cam_sfe_bus_wr_get_out_type( + struct cam_sfe_bus_wr_priv *bus_priv, + int32_t index) +{ + int32_t sfe_out_type = CAM_SFE_BUS_SFE_OUT_MAX; + + if (bus_priv && bus_priv->sfe_out_hw_info) + sfe_out_type = + bus_priv->sfe_out_hw_info[index].sfe_out_type; + + return sfe_out_type; +} + +static bool cam_sfe_bus_can_be_secure(uint32_t out_type) +{ + switch (out_type) { + case CAM_SFE_BUS_SFE_OUT_RAW_DUMP: + case CAM_SFE_BUS_SFE_OUT_RDI0: + case CAM_SFE_BUS_SFE_OUT_RDI1: + case CAM_SFE_BUS_SFE_OUT_RDI2: + case CAM_SFE_BUS_SFE_OUT_RDI3: + case CAM_SFE_BUS_SFE_OUT_RDI4: + case CAM_SFE_BUS_SFE_OUT_IR: + return true; + case CAM_SFE_BUS_SFE_OUT_LCR: + case CAM_SFE_BUS_SFE_OUT_BE_0: + case CAM_SFE_BUS_SFE_OUT_BHIST_0: + case CAM_SFE_BUS_SFE_OUT_BE_1: + case CAM_SFE_BUS_SFE_OUT_BHIST_1: + case CAM_SFE_BUS_SFE_OUT_BE_2: + case CAM_SFE_BUS_SFE_OUT_BHIST_2: + case CAM_SFE_BUS_SFE_OUT_BAYER_RS_0: + case CAM_SFE_BUS_SFE_OUT_BAYER_RS_1: + case CAM_SFE_BUS_SFE_OUT_BAYER_RS_2: + case CAM_SFE_BUS_SFE_OUT_HDR_STATS: + default: + return false; + } +} + +static enum cam_sfe_bus_sfe_out_type + cam_sfe_bus_wr_get_out_res_id(uint32_t res_type) +{ + switch (res_type) { + case CAM_ISP_SFE_OUT_RES_RAW_DUMP: + return CAM_SFE_BUS_SFE_OUT_RAW_DUMP; + case CAM_ISP_SFE_OUT_RES_RDI_0: + return CAM_SFE_BUS_SFE_OUT_RDI0; + case CAM_ISP_SFE_OUT_RES_RDI_1: + return CAM_SFE_BUS_SFE_OUT_RDI1; + case CAM_ISP_SFE_OUT_RES_RDI_2: + return CAM_SFE_BUS_SFE_OUT_RDI2; + case CAM_ISP_SFE_OUT_RES_RDI_3: + return CAM_SFE_BUS_SFE_OUT_RDI3; + case CAM_ISP_SFE_OUT_RES_RDI_4: + return CAM_SFE_BUS_SFE_OUT_RDI4; + case CAM_ISP_SFE_OUT_BE_STATS_0: + return CAM_SFE_BUS_SFE_OUT_BE_0; + case CAM_ISP_SFE_OUT_BHIST_STATS_0: + return CAM_SFE_BUS_SFE_OUT_BHIST_0; + case CAM_ISP_SFE_OUT_BE_STATS_1: + return CAM_SFE_BUS_SFE_OUT_BE_1; + case CAM_ISP_SFE_OUT_BHIST_STATS_1: + return CAM_SFE_BUS_SFE_OUT_BHIST_1; + case CAM_ISP_SFE_OUT_BE_STATS_2: + return CAM_SFE_BUS_SFE_OUT_BE_2; + case CAM_ISP_SFE_OUT_BHIST_STATS_2: + return CAM_SFE_BUS_SFE_OUT_BHIST_2; + case CAM_ISP_SFE_OUT_RES_LCR: + return CAM_SFE_BUS_SFE_OUT_LCR; + case CAM_ISP_SFE_OUT_RES_IR: + return CAM_SFE_BUS_SFE_OUT_IR; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_0: + return CAM_SFE_BUS_SFE_OUT_BAYER_RS_0; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_1: + return CAM_SFE_BUS_SFE_OUT_BAYER_RS_1; + case CAM_ISP_SFE_OUT_BAYER_RS_STATS_2: + return CAM_SFE_BUS_SFE_OUT_BAYER_RS_2; + case CAM_ISP_SFE_OUT_HDR_STATS: + return CAM_SFE_BUS_SFE_OUT_HDR_STATS; + default: + return CAM_SFE_BUS_SFE_OUT_MAX; + } +} + +bool cam_sfe_is_mipi_pcking_needed( + struct cam_sfe_bus_wr_priv *bus_priv, + int wm_index) +{ + int i; + + for(i = 0; i < bus_priv->num_out; i++) + { + if (((wm_index == bus_priv->sfe_out_hw_info[i].wm_idx) && + (bus_priv->sfe_out_hw_info[i].sfe_out_type == + CAM_SFE_BUS_SFE_OUT_RAW_DUMP)) || + ((wm_index == bus_priv->sfe_out_hw_info[i].wm_idx) && + (bus_priv->sfe_out_hw_info[i].sfe_out_type == + CAM_SFE_BUS_SFE_OUT_IR))) + return true; + } + + return false; +} + +static enum cam_sfe_bus_wr_packer_format + cam_sfe_bus_get_packer_fmt( + struct cam_sfe_bus_wr_priv *bus_priv, + uint32_t out_fmt, + int wm_index) +{ + bool is_mipi_packing = + cam_sfe_is_mipi_pcking_needed(bus_priv, wm_index); + + switch (out_fmt) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PD8: + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_MIPI_RAW_10: + if (is_mipi_packing) + return PACKER_FMT_MIPI10; + else + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_MIPI_RAW_12: + if (is_mipi_packing) + return PACKER_FMT_MIPI12; + else + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_MIPI_RAW_14: + if (is_mipi_packing) + return PACKER_FMT_MIPI14; + else + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_MIPI_RAW_20: + if (is_mipi_packing) + return PACKER_FMT_MIPI20; + else + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_PLAIN8: + return PACKER_FMT_PLAIN_8; + case CAM_FORMAT_NV12: + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_Y_ONLY: + return PACKER_FMT_PLAIN_8_LSB_MSB_10; + case CAM_FORMAT_PLAIN16_10: + return PACKER_FMT_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return PACKER_FMT_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return PACKER_FMT_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN16_16: + return PACKER_FMT_PLAIN_16_16BPP; + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_ARGB: + return PACKER_FMT_PLAIN_32; + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_ARGB_16: + case CAM_FORMAT_PD10: + return PACKER_FMT_PLAIN_64; + case CAM_FORMAT_UBWC_TP10: + case CAM_FORMAT_TP10: + return PACKER_FMT_TP_10; + default: + return PACKER_FMT_MAX; + } +} + +static void cam_sfe_bus_wr_print_constraint_errors( + struct cam_sfe_bus_wr_priv *bus_priv, + uint8_t *wm_name, + uint32_t constraint_errors) +{ + struct cam_sfe_bus_wr_constraint_error_info *constraint_err_info; + uint32_t i; + + constraint_err_info = bus_priv->constraint_error_info; + for (i = 0; i < constraint_err_info->num_cons_err; i++) { + if (constraint_err_info->constraint_error_list[i].bitmask & constraint_errors) + CAM_ERR(CAM_SFE, "Write Master: %s, Error_desc: %s", wm_name, + constraint_err_info->constraint_error_list[i].error_description); + } +} + +static void cam_sfe_bus_wr_get_constraint_errors( + bool *skip_error_notify, + struct cam_sfe_bus_wr_priv *bus_priv, + uint32_t *cons_err) +{ + uint32_t i, j, sfe_out_type; + uint8_t *wm_name = NULL; + struct cam_isp_resource_node *out_rsrc_node = NULL; + struct cam_sfe_bus_wr_out_data *out_rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_sfe_bus_wr_common_data *common_data = NULL; + struct cam_sfe_bus_wr_constraint_error_info *cons_err_info = NULL; + bool out_rsrc_is_rdi; + + cons_err_info = bus_priv->constraint_error_info; + + for (i = 0; i < bus_priv->num_out; i++) { + sfe_out_type = cam_sfe_bus_wr_get_out_type(bus_priv, i); + if ((sfe_out_type < 0) || + (sfe_out_type >= CAM_SFE_BUS_SFE_OUT_MAX)) { + CAM_ERR(CAM_SFE, "Invalid sfe out type:%d", + sfe_out_type); + return; + } + + out_rsrc_node = &bus_priv->sfe_out[sfe_out_type]; + if (!out_rsrc_node || !out_rsrc_node->res_priv) { + CAM_DBG(CAM_ISP, + "SFE out:%d out rsrc node or data is NULL", i); + continue; + } + + out_rsrc_data = out_rsrc_node->res_priv; + common_data = out_rsrc_data->common_data; + + if ((out_rsrc_data->out_type >= CAM_SFE_BUS_SFE_OUT_RDI0) && + (out_rsrc_data->out_type <= CAM_SFE_BUS_SFE_OUT_RDI4)) + out_rsrc_is_rdi = true; + else + out_rsrc_is_rdi = false; + + for (j = 0; j < out_rsrc_data->num_wm; j++) { + wm_data = out_rsrc_data->wm_res[j].res_priv; + wm_name = out_rsrc_data->wm_res[j].res_name; + if (!wm_data) + continue; + + *cons_err = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->debug_status_1); + if (*cons_err == 0) + continue; + + /* + * Due to HW bug in constraint checker which signals false alert, + * skip image addr unalign constraint error for RDI WMs on + * SFE v780 or older and skip image width unalign constraint error + * for any WM when programmed in frame base mode. + */ + + if (out_rsrc_is_rdi && (*cons_err & + BIT(cons_err_info->img_addr_unalign_shift))) { + *skip_error_notify = true; + CAM_DBG(CAM_SFE, + "Ignoring Image Addr Unalign error on SFE[%u] out rsrc: %u WM: %s", + common_data->core_index, out_rsrc_data->out_type, wm_name); + continue; + } + + if ((*cons_err & BIT(cons_err_info->img_width_unalign_shift)) && + (wm_data->en_cfg & BIT(16))) { + *skip_error_notify = true; + CAM_DBG(CAM_SFE, + "Ignoring Image Width Unalign error on SFE[%u] out rsrc: %u WM: %s in frame based mode", + common_data->core_index, out_rsrc_data->out_type, wm_name); + continue; + } + + cam_sfe_bus_wr_print_constraint_errors( + bus_priv, wm_name, *cons_err); + } + } +} + +static inline void cam_sfe_bus_config_rdi_wm_frame_based_mode( + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data) +{ + rsrc_data->width = CAM_SFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = CAM_SFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; +} + +static int cam_sfe_bus_config_rdi_wm( + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data) +{ + + rsrc_data->pack_fmt = PACKER_FMT_PLAIN_128; + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_10: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 5) / 4, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + + if (rsrc_data->use_wm_pack) { + rsrc_data->pack_fmt = PACKER_FMT_MIPI10; + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) + rsrc_data->width = ALIGNUP((rsrc_data->acquired_width), 8); + } + break; + case CAM_FORMAT_MIPI_RAW_6: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 3) / 4, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_MIPI_RAW_8: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP(rsrc_data->width, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_MIPI_RAW_12: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 3) / 2, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + + if (rsrc_data->use_wm_pack) { + rsrc_data->pack_fmt = PACKER_FMT_MIPI12; + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) + rsrc_data->width = ALIGNUP((rsrc_data->acquired_width), 8); + } + break; + case CAM_FORMAT_MIPI_RAW_14: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 7) / 2, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + if (rsrc_data->use_wm_pack) { + rsrc_data->pack_fmt = PACKER_FMT_MIPI14; + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) + rsrc_data->width = ALIGNUP((rsrc_data->acquired_width), 8); + } + break; + case CAM_FORMAT_MIPI_RAW_16: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 2), 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_MIPI_RAW_20: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 5) / 2, 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_PLAIN128: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 16), 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_PLAIN32_20: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->width = + ALIGNUP((rsrc_data->width * 4), 16) / 16; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_PLAIN8: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->en_cfg = 0x1; + rsrc_data->stride = rsrc_data->width * 2; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 2, 16) / 16; + rsrc_data->en_cfg = 0x1; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + case CAM_FORMAT_PLAIN64: + if (rsrc_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 8, 16) / 16; + rsrc_data->en_cfg = 0x1; + } else if (rsrc_data->wm_mode == CAM_SFE_WM_FRAME_BASED_MODE) { + cam_sfe_bus_config_rdi_wm_frame_based_mode(rsrc_data); + } else { + CAM_WARN(CAM_SFE, "No index mode support for SFE WM: %u", + rsrc_data->index); + } + break; + default: + CAM_ERR(CAM_SFE, "Unsupported RDI format %d", + rsrc_data->format); + return -EINVAL; + } + + return 0; +} + +static int cam_sfe_bus_acquire_wm( + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_sfe_hw_sfe_out_acquire_args *out_acq_args, + void *tasklet, + enum cam_sfe_bus_sfe_out_type sfe_out_res_id, + enum cam_sfe_bus_plane_type plane, + struct cam_isp_resource_node *wm_res, + uint32_t *comp_done_mask, + enum cam_sfe_bus_wr_comp_grp_type *comp_grp_id) +{ + int32_t wm_idx = 0, rc; + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data = NULL; + char wm_mode[50] = {0}; + + if (wm_res->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_SFE, "WM:%d not available state:%d", + wm_idx, wm_res->res_state); + return -EALREADY; + } + + rsrc_data = wm_res->res_priv; + wm_idx = rsrc_data->index; + rsrc_data->format = out_acq_args->out_port_info->format; + rsrc_data->use_wm_pack = out_acq_args->use_wm_pack; + rsrc_data->pack_fmt = cam_sfe_bus_get_packer_fmt(bus_priv, + rsrc_data->format, wm_idx); + + rsrc_data->width = out_acq_args->out_port_info->width; + rsrc_data->height = out_acq_args->out_port_info->height; + rsrc_data->acquired_width = out_acq_args->out_port_info->width; + rsrc_data->acquired_height = out_acq_args->out_port_info->height; + rsrc_data->is_dual = out_acq_args->is_dual; + rsrc_data->enable_caching = false; + rsrc_data->offset = 0; + + /* RDI0-4 line based mode by default */ + if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RDI0 || + sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RDI1 || + sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RDI2 || + sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RDI3 || + sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RDI4) + rsrc_data->wm_mode = CAM_SFE_WM_LINE_BASED_MODE; + + /* Set WM offset value to default */ + rsrc_data->offset = 0; + CAM_DBG(CAM_SFE, "WM:%d width %d height %d", rsrc_data->index, + rsrc_data->width, rsrc_data->height); + + if ((sfe_out_res_id >= CAM_SFE_BUS_SFE_OUT_RDI0) && + (sfe_out_res_id <= CAM_SFE_BUS_SFE_OUT_RDI4)) { + rc = cam_sfe_bus_config_rdi_wm(rsrc_data); + if (rc) + return rc; + } else if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_RAW_DUMP) { + rsrc_data->stride = rsrc_data->width; + rsrc_data->en_cfg = 0x1; + switch (rsrc_data->format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + /* LSB aligned */ + rsrc_data->pack_fmt |= + (1 << bus_priv->common_data.pack_align_shift); + break; + default: + break; + } + } else if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_IR) { + rsrc_data->stride = rsrc_data->width; + rsrc_data->en_cfg = 0x1; + switch (rsrc_data->format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + /* LSB aligned */ + rsrc_data->pack_fmt |= + (1 << bus_priv->common_data.pack_align_shift); + break; + default: + break; + } + } else if ((sfe_out_res_id >= CAM_SFE_BUS_SFE_OUT_BE_0) && + (sfe_out_res_id <= CAM_SFE_BUS_SFE_OUT_BAYER_RS_2)) { + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 1; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } else if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_LCR) { + switch (rsrc_data->format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + rsrc_data->stride = ALIGNUP(rsrc_data->width * 2, 8); + rsrc_data->en_cfg = 0x1; + /* LSB aligned */ + rsrc_data->pack_fmt |= + (1 << bus_priv->common_data.pack_align_shift); + break; + default: + CAM_ERR(CAM_SFE, "Invalid format %d out_type:%d", + rsrc_data->format, sfe_out_res_id); + return -EINVAL; + } + } else if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_HDR_STATS) { + rsrc_data->en_cfg = 0x1; + rsrc_data->stride = rsrc_data->width; + switch (rsrc_data->format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + /* LSB aligned */ + rsrc_data->pack_fmt |= + (1 << bus_priv->common_data.pack_align_shift); + break; + default: + break; + } + } else { + CAM_ERR(CAM_SFE, "Invalid out_type:%d requested", + sfe_out_res_id); + return -EINVAL; + } + + *comp_grp_id = rsrc_data->hw_regs->comp_group; + *comp_done_mask |= (1 << sfe_out_res_id); + + switch (rsrc_data->en_cfg) { + case 0x1: + strscpy(wm_mode, "line-based", sizeof(wm_mode)); + break; + case ((0x1 << 16) | 0x1): + strscpy(wm_mode, "frame-based", sizeof(wm_mode)); + break; + case ((0x2 << 16) | 0x1): + strscpy(wm_mode, "index-based", sizeof(wm_mode)); + break; + } + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + wm_res->tasklet_info = tasklet; + + CAM_DBG(CAM_SFE, + "SFE:%d WM:%d %s processed width:%d height:%d format:0x%X pack_fmt 0x%x %s", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name, rsrc_data->width, rsrc_data->height, + rsrc_data->format, rsrc_data->pack_fmt, wm_mode); + return 0; +} + +static int cam_sfe_bus_release_wm(void *bus_priv, + struct cam_isp_resource_node *wm_res) +{ + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data = + wm_res->res_priv; + + rsrc_data->offset = 0; + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 0; + rsrc_data->format = 0; + rsrc_data->pack_fmt = 0; + rsrc_data->irq_subsample_period = 0; + rsrc_data->irq_subsample_pattern = 0; + rsrc_data->framedrop_period = 0; + rsrc_data->framedrop_pattern = 0; + rsrc_data->h_init = 0; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + rsrc_data->en_cfg = 0; + rsrc_data->is_dual = 0; + rsrc_data->enable_caching = false; + rsrc_data->offset = 0; + + wm_res->tasklet_info = NULL; + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + CAM_DBG(CAM_SFE, "SFE:%d Release WM:%d", + rsrc_data->common_data->core_index, rsrc_data->index); + + return 0; +} + +static int cam_sfe_bus_start_wm(struct cam_isp_resource_node *wm_res) +{ + int j; + const uint32_t image_cfg_height_shift_val = 16; + const uint32_t enable_debug_status_1 = 11 << 8; + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_sfe_bus_wr_common_data *common_data = + rsrc_data->common_data; + + cam_io_w((rsrc_data->height << image_cfg_height_shift_val) + | rsrc_data->width, common_data->mem_base + + rsrc_data->hw_regs->image_cfg_0); + cam_io_w(rsrc_data->pack_fmt, + common_data->mem_base + rsrc_data->hw_regs->packer_cfg); + + /* configure line_done_cfg for RDI0-2 */ + if (rsrc_data->en_line_done) { + CAM_DBG(CAM_SFE, "configure line_done_cfg 0x%x for WM: %d", + rsrc_data->common_data->line_done_cfg, + rsrc_data->index); + cam_io_w_mb(rsrc_data->common_data->line_done_cfg, + common_data->mem_base + + rsrc_data->hw_regs->line_done_cfg); + } + + if (!(common_data->sfe_debug_cfg & SFE_DEBUG_DISABLE_MMU_PREFETCH)) { + cam_io_w_mb(1, common_data->mem_base + + rsrc_data->hw_regs->mmu_prefetch_cfg); + cam_io_w_mb(0xFFFFFFFF, common_data->mem_base + + rsrc_data->hw_regs->mmu_prefetch_max_offset); + CAM_DBG(CAM_SFE, "SFE: %u WM: %u MMU prefetch enabled", + rsrc_data->common_data->core_index, + rsrc_data->index); + } + + /* Enable WM */ + cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base + + rsrc_data->hw_regs->cfg); + + /* Enable constraint error detection */ + cam_io_w_mb(enable_debug_status_1, + common_data->mem_base + + rsrc_data->hw_regs->debug_status_cfg); + + CAM_DBG(CAM_SFE, + "Start SFE:%u WM:%d %s offset:0x%X en_cfg:0x%X width:%d height:%d", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name, (uint32_t) rsrc_data->hw_regs->cfg, + rsrc_data->en_cfg, rsrc_data->width, rsrc_data->height); + CAM_DBG(CAM_SFE, "WM:%d pk_fmt:%d stride:%d", + rsrc_data->index, rsrc_data->pack_fmt & PACKER_FMT_MAX, + rsrc_data->stride); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + common_data->cntr = 0; + + if (!common_data->perf_cnt_en) { + for (j = 0; j < CAM_SFE_PERF_CNT_MAX; j++) { + if (!common_data->perf_cnt_cfg[j]) + continue; + + cam_io_w_mb(common_data->perf_cnt_cfg[j], + common_data->mem_base + + common_data->common_reg->perf_cnt_reg[j].perf_cnt_cfg); + common_data->perf_cnt_en = true; + CAM_DBG(CAM_ISP, "SFE:%u perf_cnt_%d:0x%x", + rsrc_data->common_data->core_index, + j, common_data->perf_cnt_cfg[j]); + } + } + return 0; +} + +static int cam_sfe_bus_stop_wm(struct cam_isp_resource_node *wm_res) +{ + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_sfe_bus_wr_common_data *common_data = + rsrc_data->common_data; + + /* Disable WM */ + cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg); + CAM_DBG(CAM_SFE, "Stop SFE:%d WM:%d %s", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + common_data->perf_cnt_en = false; + + return 0; +} + +static int cam_sfe_bus_handle_wm_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_sfe_bus_handle_wm_done_bottom_half(void *wm_node, + void *evt_payload_priv) +{ + return -EPERM; +} + +static int cam_sfe_bus_init_wm_resource(uint32_t index, + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_sfe_bus_wr_hw_info *hw_info, + struct cam_isp_resource_node *wm_res, + struct cam_isp_resource_node **comp_grp, + uint8_t *wm_name, + bool en_line_done) +{ + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_wr_wm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_SFE, "Failed to alloc for WM res priv"); + return -ENOMEM; + } + + wm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->en_line_done = en_line_done; + rsrc_data->hw_regs = &hw_info->bus_client_reg[index]; + rsrc_data->common_data = &bus_priv->common_data; + *comp_grp = &bus_priv->comp_grp[(&hw_info->bus_client_reg[index])->comp_group]; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = cam_sfe_bus_start_wm; + wm_res->stop = cam_sfe_bus_stop_wm; + wm_res->top_half_handler = cam_sfe_bus_handle_wm_done_top_half; + wm_res->bottom_half_handler = + cam_sfe_bus_handle_wm_done_bottom_half; + wm_res->hw_intf = bus_priv->common_data.hw_intf; + if (wm_name) + scnprintf(wm_res->res_name, CAM_ISP_RES_NAME_LEN, "%s", + wm_name); + + return 0; +} + +static int cam_sfe_bus_deinit_wm_resource( + struct cam_isp_resource_node *wm_res) +{ + struct cam_sfe_bus_wr_wm_resource_data *rsrc_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = NULL; + wm_res->stop = NULL; + wm_res->top_half_handler = NULL; + wm_res->bottom_half_handler = NULL; + wm_res->hw_intf = NULL; + + rsrc_data = wm_res->res_priv; + wm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static void cam_sfe_bus_add_wm_to_comp_grp( + struct cam_isp_resource_node *comp_grp, + uint32_t composite_mask) +{ + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = comp_grp->res_priv; + + rsrc_data->composite_mask |= composite_mask; +} + +static int cam_sfe_bus_acquire_comp_grp( + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_isp_out_port_generic_info *out_port_info, + void *tasklet, + uint32_t is_dual, + uint32_t is_master, + struct cam_isp_resource_node *comp_grp, + enum cam_sfe_bus_wr_comp_grp_type comp_grp_id) +{ + int rc = 0; + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = NULL; + + rsrc_data = comp_grp->res_priv; + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + rsrc_data->intra_client_mask = 0x1; + comp_grp->tasklet_info = tasklet; + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + rsrc_data->is_master = is_master; + rsrc_data->is_dual = is_dual; + + if (is_master) + rsrc_data->addr_sync_mode = 0; + else + rsrc_data->addr_sync_mode = 1; + + } else { + rsrc_data = comp_grp->res_priv; + /* Do not support runtime change in composite mask */ + if (comp_grp->res_state == + CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_SFE, "Invalid State %d comp_grp:%u", + comp_grp->res_state, + rsrc_data->comp_grp_type); + return -EBUSY; + } + } + + CAM_DBG(CAM_SFE, "Acquire SFE:%d comp_grp:%u", + rsrc_data->common_data->core_index, rsrc_data->comp_grp_type); + + rsrc_data->acquire_dev_cnt++; + + return rc; +} + +static int cam_sfe_bus_release_comp_grp( + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_isp_resource_node *in_comp_grp) +{ + struct cam_sfe_bus_wr_comp_grp_data *in_rsrc_data = NULL; + + if (!in_comp_grp) { + CAM_ERR(CAM_SFE, "Invalid Params comp_grp %pK", in_comp_grp); + return -EINVAL; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_SFE, "Already released comp_grp"); + return 0; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_SFE, "Invalid State %d", + in_comp_grp->res_state); + return -EBUSY; + } + + in_rsrc_data = in_comp_grp->res_priv; + CAM_DBG(CAM_SFE, "Release SFE:%d comp_grp:%u", + bus_priv->common_data.core_index, + in_rsrc_data->comp_grp_type); + + in_rsrc_data->acquire_dev_cnt--; + if (in_rsrc_data->acquire_dev_cnt == 0) { + + in_rsrc_data->dual_slave_core = CAM_SFE_CORE_MAX; + in_rsrc_data->addr_sync_mode = 0; + in_rsrc_data->composite_mask = 0; + + in_comp_grp->tasklet_info = NULL; + in_comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + } + + return 0; +} + +static int cam_sfe_bus_start_comp_grp( + struct cam_isp_resource_node *comp_grp, + uint32_t *bus_irq_reg_mask) +{ + int rc = 0; + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = NULL; + + rsrc_data = comp_grp->res_priv; + + CAM_DBG(CAM_SFE, + "Start SFE:%d comp_grp:%d streaming state:%d comp_mask:0x%X", + rsrc_data->common_data->core_index, + rsrc_data->comp_grp_type, comp_grp->res_state, + rsrc_data->composite_mask); + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + return 0; + + /* CSID buf done register */ + bus_irq_reg_mask[0] = rsrc_data->comp_done_mask; + + CAM_DBG(CAM_SFE, "Start Done SFE:%d comp_grp:%d buf_done_mask:0x%x", + rsrc_data->common_data->core_index, + rsrc_data->comp_grp_type, + bus_irq_reg_mask[0]); + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static inline int cam_sfe_bus_stop_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return 0; +} + +static int cam_sfe_bus_wr_init_comp_grp(uint32_t index, + struct cam_hw_soc_info *soc_info, + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_sfe_bus_wr_hw_info *hw_info, + struct cam_isp_resource_node *comp_grp) +{ + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = NULL; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_wr_comp_grp_data), + GFP_KERNEL); + if (!rsrc_data) + return -ENOMEM; + + comp_grp->res_priv = rsrc_data; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rsrc_data->comp_grp_type = index; + rsrc_data->common_data = &bus_priv->common_data; + rsrc_data->dual_slave_core = CAM_SFE_CORE_MAX; + rsrc_data->comp_done_mask = hw_info->comp_done_mask[index]; + + comp_grp->top_half_handler = NULL; + comp_grp->hw_intf = bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_sfe_bus_deinit_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = + comp_grp->res_priv; + + comp_grp->start = NULL; + comp_grp->stop = NULL; + comp_grp->top_half_handler = NULL; + comp_grp->bottom_half_handler = NULL; + comp_grp->hw_intf = NULL; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + comp_grp->res_priv = NULL; + + if (!rsrc_data) { + CAM_ERR(CAM_SFE, "comp_grp_priv is NULL"); + return -ENODEV; + } + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_sfe_bus_wr_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *secure_mode = cmd_args; + struct cam_sfe_bus_wr_out_data *rsrc_data; + uint32_t *mode; + + rsrc_data = (struct cam_sfe_bus_wr_out_data *) + secure_mode->res->res_priv; + mode = (uint32_t *)secure_mode->data; + *mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ? + true : false; + + return 0; +} + +static int cam_sfe_bus_acquire_sfe_out(void *priv, void *acquire_args, + uint32_t args_size) +{ + int rc = -ENODEV; + int i; + enum cam_sfe_bus_sfe_out_type sfe_out_res_id; + struct cam_sfe_bus_wr_priv *bus_priv = priv; + struct cam_sfe_acquire_args *acq_args = acquire_args; + struct cam_sfe_hw_sfe_out_acquire_args *out_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0, mode; + enum cam_sfe_bus_wr_comp_grp_type comp_grp_id; + uint32_t client_done_mask = 0; + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_SFE, "Invalid Param"); + return -EINVAL; + } + + comp_grp_id = CAM_SFE_BUS_WR_COMP_GRP_MAX; + out_acquire_args = &acq_args->sfe_out; + + CAM_DBG(CAM_SFE, "SFE:%d Acquire out_type:0x%X", + bus_priv->common_data.core_index, + out_acquire_args->out_port_info->res_type); + + sfe_out_res_id = cam_sfe_bus_wr_get_out_res_id( + out_acquire_args->out_port_info->res_type); + if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_MAX) + return -ENODEV; + + rsrc_node = &bus_priv->sfe_out[sfe_out_res_id]; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_SFE, + "SFE:%d out_type:0x%X resource not available state:%d", + bus_priv->common_data.core_index, + sfe_out_res_id, rsrc_node->res_state); + return -EBUSY; + } + + if (!acq_args->buf_done_controller) { + CAM_ERR(CAM_SFE, "Invalid buf done controller"); + return -EINVAL; + } + + rsrc_data = rsrc_node->res_priv; + rsrc_data->common_data->buf_done_controller = + acq_args->buf_done_controller; + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->priv = acq_args->priv; + rsrc_data->bus_priv = bus_priv; + + secure_caps = cam_sfe_bus_can_be_secure( + rsrc_data->out_type); + mode = out_acquire_args->out_port_info->secure_mode; + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (!rsrc_data->common_data->num_sec_out) { + rsrc_data->secure_mode = mode; + rsrc_data->common_data->secure_mode = mode; + } else { + if (mode == rsrc_data->common_data->secure_mode) { + rsrc_data->secure_mode = + rsrc_data->common_data->secure_mode; + } else { + rc = -EINVAL; + CAM_ERR_RATE_LIMIT(CAM_SFE, + "Mismatch: Acquire mode[%d], drvr mode[%d]", + rsrc_data->common_data->secure_mode, + mode); + mutex_unlock( + &rsrc_data->common_data->bus_mutex); + return rc; + } + } + rsrc_data->common_data->num_sec_out++; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + bus_priv->tasklet_info = acq_args->tasklet; + rsrc_node->is_rdi_primary_res = false; + rsrc_node->res_id = out_acquire_args->out_port_info->res_type; + rsrc_node->tasklet_info = acq_args->tasklet; + rsrc_node->cdm_ops = out_acquire_args->cdm_ops; + rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops; + rsrc_data->format = out_acquire_args->out_port_info->format; + + /* Acquire WM and retrieve COMP GRP ID */ + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_sfe_bus_acquire_wm(bus_priv, + out_acquire_args, + acq_args->tasklet, + sfe_out_res_id, + i, + &rsrc_data->wm_res[i], + &client_done_mask, + &comp_grp_id); + if (rc) { + CAM_ERR(CAM_SFE, + "Failed to acquire WM SFE:%d out_type:%d rc:%d", + rsrc_data->common_data->core_index, + sfe_out_res_id, rc); + goto release_wm; + } + } + + /* Acquire composite group using COMP GRP ID */ + rc = cam_sfe_bus_acquire_comp_grp(bus_priv, + out_acquire_args->out_port_info, + acq_args->tasklet, + out_acquire_args->is_dual, + out_acquire_args->is_master, + rsrc_data->comp_grp, + comp_grp_id); + if (rc) { + CAM_ERR(CAM_SFE, + "Failed to acquire comp_grp SFE:%d out_type:%d rc:%d", + rsrc_data->common_data->core_index, + sfe_out_res_id, rc); + goto release_wm; + } + + rsrc_data->is_dual = out_acquire_args->is_dual; + rsrc_data->is_master = out_acquire_args->is_master; + + cam_sfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp, + client_done_mask); + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + out_acquire_args->rsrc_node = rsrc_node; + out_acquire_args->comp_grp_id = comp_grp_id; + + CAM_DBG(CAM_SFE, "Acquire successful"); + return rc; + +release_wm: + for (i--; i >= 0; i--) + cam_sfe_bus_release_wm(bus_priv, + &rsrc_data->wm_res[i]); + + return rc; +} + +static int cam_sfe_bus_release_sfe_out(void *bus_priv, void *release_args, + uint32_t args_size) +{ + uint32_t i; + struct cam_isp_resource_node *sfe_out = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_SFE, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + sfe_out = release_args; + rsrc_data = sfe_out->res_priv; + + if (sfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_SFE, + "Invalid resource state:%d SFE:%d out_type:0x%X", + sfe_out->res_state, rsrc_data->common_data->core_index, + sfe_out->res_id); + } + + for (i = 0; i < rsrc_data->num_wm; i++) + cam_sfe_bus_release_wm(bus_priv, &rsrc_data->wm_res[i]); + + if (rsrc_data->comp_grp) + cam_sfe_bus_release_comp_grp(bus_priv, + rsrc_data->comp_grp); + + sfe_out->tasklet_info = NULL; + sfe_out->cdm_ops = NULL; + rsrc_data->cdm_util_ops = NULL; + + secure_caps = cam_sfe_bus_can_be_secure(rsrc_data->out_type); + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (rsrc_data->secure_mode == + rsrc_data->common_data->secure_mode) { + rsrc_data->common_data->num_sec_out--; + rsrc_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } else { + /* + * The validity of the mode is properly + * checked while acquiring the output port. + * not expected to reach here, unless there is + * some corruption. + */ + CAM_ERR(CAM_SFE, "driver[%d],resource[%d] mismatch", + rsrc_data->common_data->secure_mode, + rsrc_data->secure_mode); + } + + if (!rsrc_data->common_data->num_sec_out) + rsrc_data->common_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + if (sfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + sfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_sfe_bus_wr_get_evt_payload( + struct cam_sfe_bus_wr_common_data *common_data, + struct cam_sfe_bus_wr_irq_evt_payload **evt_payload) +{ + int rc; + + spin_lock(&common_data->spin_lock); + + if (!common_data->err_irq_subscribe) { + CAM_ERR_RATE_LIMIT(CAM_SFE, "SFE:%d Bus uninitialized", + common_data->core_index); + *evt_payload = NULL; + rc = -EPERM; + goto done; + } + + if (list_empty(&common_data->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_SFE, "No free BUS event payload"); + *evt_payload = NULL; + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&common_data->free_payload_list, + struct cam_sfe_bus_wr_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&common_data->spin_lock); + return rc; +} + +static int cam_sfe_bus_wr_put_evt_payload( + struct cam_sfe_bus_wr_common_data *common_data, + struct cam_sfe_bus_wr_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!common_data) { + CAM_ERR(CAM_SFE, "Invalid param common_data NULL"); + return -EINVAL; + } + + if (*evt_payload == NULL) { + CAM_ERR(CAM_SFE, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_sfe_bus_wr_irq_evt_payload); + spin_lock_irqsave(&common_data->spin_lock, flags); + if (common_data->err_irq_subscribe) + list_add_tail(&(*evt_payload)->list, &common_data->free_payload_list); + + spin_unlock_irqrestore(&common_data->spin_lock, flags); + + *evt_payload = NULL; + + CAM_DBG(CAM_SFE, "Exit"); + return 0; +} + +static int cam_sfe_bus_start_sfe_out( + struct cam_isp_resource_node *sfe_out) +{ + int rc = 0, i; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_sfe_bus_wr_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask[1]; + + if (!sfe_out) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + + rsrc_data = sfe_out->res_priv; + bus_priv = rsrc_data->bus_priv; + common_data = rsrc_data->common_data; + + if (sfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_SFE, + "Invalid resource state:%d SFE:%d out_type:0x%X", + sfe_out->res_state, rsrc_data->common_data->core_index, + rsrc_data->out_type); + return -EACCES; + } + + /* subscribe when first out rsrc is streamed on */ + if (!bus_priv->common_data.err_irq_subscribe) { + rc = cam_sfe_bus_subscribe_error_irq(bus_priv); + if (rc) + return rc; + } + + CAM_DBG(CAM_SFE, "Start SFE:%d out_type:0x%X", + rsrc_data->common_data->core_index, rsrc_data->out_type); + + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_sfe_bus_start_wm(&rsrc_data->wm_res[i]); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Start Failed for out_type:0x%X", + sfe_out->res_state, rsrc_data->common_data->core_index, + rsrc_data->out_type); + + return rc; + } + } + + memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask)); + rc = cam_sfe_bus_start_comp_grp(rsrc_data->comp_grp, + bus_irq_reg_mask); + + if (rsrc_data->is_dual && !rsrc_data->is_master) + goto end; + + sfe_out->irq_handle = cam_irq_controller_subscribe_irq( + common_data->buf_done_controller, + CAM_IRQ_PRIORITY_1, + bus_irq_reg_mask, + sfe_out, + sfe_out->top_half_handler, + sfe_out->bottom_half_handler, + sfe_out->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (sfe_out->irq_handle < 1) { + CAM_ERR(CAM_SFE, "Subscribe IRQ failed for sfe out_res: %d", + sfe_out->res_id); + sfe_out->irq_handle = 0; + return -EFAULT; + } +end: + sfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_sfe_bus_stop_sfe_out( + struct cam_isp_resource_node *sfe_out) +{ + int rc = 0, i; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_sfe_bus_wr_common_data *common_data = NULL; + + if (!sfe_out) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + + rsrc_data = sfe_out->res_priv; + bus_priv = rsrc_data->bus_priv; + common_data = rsrc_data->common_data; + + if (sfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + sfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_SFE, "Stop SFE:%d out_type:0x%X state:%d", + rsrc_data->common_data->core_index, rsrc_data->out_type, + sfe_out->res_state); + return rc; + } + + rc = cam_sfe_bus_stop_comp_grp(rsrc_data->comp_grp); + + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_sfe_bus_stop_wm(&rsrc_data->wm_res[i]); + + if (sfe_out->irq_handle) { + cam_irq_controller_unsubscribe_irq( + common_data->buf_done_controller, + sfe_out->irq_handle); + sfe_out->irq_handle = 0; + } + + /* + * Unsubscribe error irq when first + * out rsrc is streamed off + */ + if (bus_priv->common_data.err_irq_subscribe) { + if (bus_priv->error_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + if (rc) + CAM_WARN(CAM_SFE, "failed to unsubscribe error irqs"); + bus_priv->error_irq_handle = 0; + } + + if (bus_priv->bus_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.sfe_irq_controller, + bus_priv->bus_irq_handle); + if (rc) + CAM_WARN(CAM_SFE, "failed to unsubscribe top irq"); + bus_priv->bus_irq_handle = 0; + cam_irq_controller_unregister_dependent( + bus_priv->common_data.sfe_irq_controller, + bus_priv->common_data.bus_irq_controller); + } + + bus_priv->common_data.err_irq_subscribe = false; + } + + sfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_sfe_bus_handle_sfe_out_done_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *sfe_out = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_irq_evt_payload *evt_payload; + struct cam_sfe_bus_wr_comp_grp_data *resource_data; + struct cam_sfe_bus_wr_wm_resource_data *wm_rsrc_data = NULL; + uint32_t status_0; + + sfe_out = th_payload->handler_priv; + if (!sfe_out) { + CAM_ERR_RATE_LIMIT(CAM_SFE, "No resource"); + return -ENODEV; + } + + rsrc_data = sfe_out->res_priv; + resource_data = rsrc_data->comp_grp->res_priv; + + CAM_DBG(CAM_SFE, "SFE:%d Bus IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + + rc = cam_sfe_bus_wr_get_evt_payload(rsrc_data->common_data, + &evt_payload); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_SFE, + "Failed to get payload for SFE:%d Bus IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + evt_payload->core_index = rsrc_data->common_data->core_index; + evt_payload->evt_id = evt_id; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + status_0 = th_payload->evt_status_arr[CAM_SFE_IRQ_BUS_WR_REG_STATUS0]; + + if (status_0 & resource_data->comp_done_mask) { + /* All SFE out ports have single WM */ + wm_rsrc_data = rsrc_data->wm_res->res_priv; + evt_payload->last_consumed_addr = cam_io_r_mb( + wm_rsrc_data->common_data->mem_base + + wm_rsrc_data->hw_regs->addr_status_0); + trace_cam_log_event("bufdone", "bufdone_IRQ", + status_0, resource_data->comp_grp_type); + } + + return rc; +} + +static int cam_sfe_bus_handle_comp_done_bottom_half( + void *handler_priv, + void *evt_payload_priv, + uint32_t *comp_grp_id) +{ + int rc = CAM_SFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *comp_grp = handler_priv; + struct cam_sfe_bus_wr_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_sfe_bus_wr_comp_grp_data *rsrc_data = comp_grp->res_priv; + uint32_t *cam_sfe_irq_regs; + uint32_t status_0; + + if (!evt_payload) + return rc; + + if (rsrc_data->is_dual && (!rsrc_data->is_master)) { + CAM_ERR(CAM_SFE, "Invalid comp_grp:%u is_master:%u", + rsrc_data->comp_grp_type, rsrc_data->is_master); + return rc; + } + + cam_sfe_irq_regs = evt_payload->irq_reg_val; + status_0 = cam_sfe_irq_regs[CAM_SFE_IRQ_BUS_WR_REG_STATUS0]; + + if (status_0 & rsrc_data->comp_done_mask) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE; + rc = CAM_SFE_IRQ_STATUS_SUCCESS; + } + + CAM_DBG(CAM_SFE, "SFE:%d comp_grp:%d Bus IRQ status_0: 0x%X rc:%d", + rsrc_data->common_data->core_index, rsrc_data->comp_grp_type, + status_0, rc); + + *comp_grp_id = rsrc_data->comp_grp_type; + + return rc; +} + +static int cam_sfe_bus_handle_sfe_out_done_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + int rc = -EINVAL; + uint32_t evt_id = 0; + struct cam_isp_resource_node *sfe_out = handler_priv; + struct cam_sfe_bus_wr_out_data *rsrc_data = sfe_out->res_priv; + struct cam_sfe_bus_wr_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_bufdone_event_info bufdone_evt_info = {0}; + void *ctx = NULL; + uint32_t comp_grp_id = 0; + + rc = cam_sfe_bus_handle_comp_done_bottom_half( + rsrc_data->comp_grp, evt_payload_priv, &comp_grp_id); + CAM_DBG(CAM_SFE, "SFE:%d out_type:0x%x comp_grp_id:%d rc:%d", + rsrc_data->common_data->core_index, + rsrc_data->out_type, comp_grp_id, rc); + + ctx = rsrc_data->priv; + + switch (rc) { + case CAM_SFE_IRQ_STATUS_SUCCESS: + evt_id = evt_payload->evt_id; + + evt_info.res_type = sfe_out->res_type; + evt_info.hw_idx = sfe_out->hw_intf->hw_idx; + evt_info.hw_type = CAM_ISP_HW_TYPE_SFE; + evt_info.res_id = sfe_out->res_id; + bufdone_evt_info.res_id = sfe_out->res_id; + bufdone_evt_info.comp_grp_id = comp_grp_id; + bufdone_evt_info.last_consumed_addr = evt_payload->last_consumed_addr; + evt_info.event_data = (void *)&bufdone_evt_info; + + if (rsrc_data->common_data->event_cb) + rsrc_data->common_data->event_cb(ctx, evt_id, + (void *)&evt_info); + break; + default: + break; + } + + cam_sfe_bus_wr_put_evt_payload(rsrc_data->common_data, &evt_payload); + + return rc; +} + +static int cam_sfe_bus_init_sfe_out_resource( + uint32_t index, + struct cam_sfe_bus_wr_priv *bus_priv, + struct cam_sfe_bus_wr_hw_info *hw_info) +{ + struct cam_isp_resource_node *sfe_out = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + int rc = 0, i = 0; + int32_t sfe_out_type = + cam_sfe_bus_wr_get_out_type(bus_priv, index); + + if (sfe_out_type < 0 || + sfe_out_type >= CAM_SFE_BUS_SFE_OUT_MAX) { + CAM_ERR(CAM_SFE, "Init SFE Out failed, Invalid type=%d", + sfe_out_type); + return -EINVAL; + } + + sfe_out = &bus_priv->sfe_out[sfe_out_type]; + if (sfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + sfe_out->res_priv) { + CAM_ERR(CAM_SFE, "sfe_out_type %d has already been initialized", + sfe_out_type); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_wr_out_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + sfe_out->res_priv = rsrc_data; + + sfe_out->res_type = CAM_ISP_RESOURCE_SFE_OUT; + sfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&sfe_out->list); + + rsrc_data->source_group = + hw_info->sfe_out_hw_info[index].source_group; + rsrc_data->out_type = + hw_info->sfe_out_hw_info[index].sfe_out_type; + rsrc_data->common_data = &bus_priv->common_data; + rsrc_data->max_width = + hw_info->sfe_out_hw_info[index].max_width; + rsrc_data->max_height = + hw_info->sfe_out_hw_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + rsrc_data->num_wm = hw_info->sfe_out_hw_info[index].num_wm; + + rsrc_data->wm_res = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + rsrc_data->num_wm), GFP_KERNEL); + if (!rsrc_data->wm_res) { + CAM_ERR(CAM_SFE, "Failed to alloc for wm_res"); + return -ENOMEM; + } + + /* All SFE clients have 1 WM hence i = 0 always */ + rc = cam_sfe_bus_init_wm_resource( + hw_info->sfe_out_hw_info[index].wm_idx, + bus_priv, hw_info, + &rsrc_data->wm_res[i], + &rsrc_data->comp_grp, + hw_info->sfe_out_hw_info[index].name, + hw_info->sfe_out_hw_info[index].en_line_done); + if (rc < 0) { + CAM_ERR(CAM_SFE, "SFE:%d init WM:%d failed rc:%d", + bus_priv->common_data.core_index, i, rc); + return rc; + } + + sfe_out->start = cam_sfe_bus_start_sfe_out; + sfe_out->stop = cam_sfe_bus_stop_sfe_out; + sfe_out->top_half_handler = + cam_sfe_bus_handle_sfe_out_done_top_half; + sfe_out->bottom_half_handler = + cam_sfe_bus_handle_sfe_out_done_bottom_half; + sfe_out->process_cmd = NULL; + sfe_out->hw_intf = bus_priv->common_data.hw_intf; + sfe_out->irq_handle = 0; + + return 0; +} + +static int cam_sfe_bus_deinit_sfe_out_resource( + struct cam_isp_resource_node *sfe_out) +{ + struct cam_sfe_bus_wr_out_data *rsrc_data = sfe_out->res_priv; + int rc = 0, i = 0; + + if (sfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + return 0; + } + + sfe_out->start = NULL; + sfe_out->stop = NULL; + sfe_out->top_half_handler = NULL; + sfe_out->bottom_half_handler = NULL; + sfe_out->hw_intf = NULL; + sfe_out->irq_handle = 0; + + sfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&sfe_out->list); + sfe_out->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + + rsrc_data->comp_grp = NULL; + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_sfe_bus_deinit_wm_resource(&rsrc_data->wm_res[i]); + if (rc < 0) + CAM_ERR(CAM_SFE, + "SFE:%d deinit WM:%d failed rc:%d", + rsrc_data->common_data->core_index, i, rc); + } + + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static inline void __cam_sfe_bus_wr_print_wm_info( + struct cam_sfe_bus_wr_wm_resource_data *wm_data) +{ + uint32_t addr_status0, addr_status1, addr_status2, addr_status3; + + addr_status0 = cam_io_r_mb(wm_data->common_data->mem_base + + wm_data->hw_regs->addr_status_0); + addr_status1 = cam_io_r_mb(wm_data->common_data->mem_base + + wm_data->hw_regs->addr_status_1); + addr_status2 = cam_io_r_mb(wm_data->common_data->mem_base + + wm_data->hw_regs->addr_status_2); + addr_status3 = cam_io_r_mb(wm_data->common_data->mem_base + + wm_data->hw_regs->addr_status_3); + + CAM_INFO(CAM_SFE, + "SFE:%u WM:%u width:%u height:%u stride:%u x_init:%u en_cfg:%u acquired width:%u height:%u pack_cfg: 0x%x", + wm_data->common_data->core_index, wm_data->index, + wm_data->width, wm_data->height, + wm_data->stride, wm_data->h_init, + wm_data->en_cfg, wm_data->acquired_width, + wm_data->acquired_height, wm_data->pack_fmt); + + CAM_INFO(CAM_SFE, + "SFE:%u WM:%u last_consumed_image_addr:0x%x last_consumed_frame_header:0x%x fifo_word_cnt:%d [FH + Image] current_image_addr:0x%x", + wm_data->common_data->hw_intf->hw_idx, wm_data->index, + addr_status0, addr_status1, addr_status2, addr_status3); +} + +static int cam_sfe_bus_wr_print_dimensions( + enum cam_sfe_bus_sfe_out_type sfe_out_res_id, + struct cam_sfe_bus_wr_priv *bus_priv) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_isp_resource_node *wm_res = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + int i, wm_idx; + + rsrc_node = &bus_priv->sfe_out[sfe_out_res_id]; + rsrc_data = rsrc_node->res_priv; + for (i = 0; i < rsrc_data->num_wm; i++) { + wm_res = &rsrc_data->wm_res[i]; + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + wm_res->res_priv; + wm_idx = wm_data->index; + if (wm_idx < 0 || wm_idx >= bus_priv->num_client) { + CAM_WARN(CAM_SFE, "Unsupported SFE out %d", sfe_out_res_id); + continue; + } + + __cam_sfe_bus_wr_print_wm_info(wm_data); + } + return 0; +} + +static int cam_sfe_bus_mini_dump( + struct cam_sfe_bus_wr_priv *bus_priv, + void *cmd_args) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm = NULL; + struct cam_sfe_bus_mini_dump_data *md; + struct cam_sfe_bus_wm_mini_dump *md_wm; + struct cam_hw_mini_dump_args *md_args; + struct cam_hw_info *hw_info = NULL; + uint32_t bytes_written = 0; + uint32_t i, j, k = 0; + + if (!bus_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid bus private data"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)bus_priv->common_data.hw_intf->hw_priv; + md_args = (struct cam_hw_mini_dump_args *)cmd_args; + + if (sizeof(*md) > md_args->len) { + md_args->bytes_written = 0; + return 0; + } + + md = (struct cam_sfe_bus_mini_dump_data *)md_args->start_addr; + md->clk_rate = cam_soc_util_get_applied_src_clk(&hw_info->soc_info, true); + md->hw_idx = bus_priv->common_data.hw_intf->hw_idx; + md->hw_state = hw_info->hw_state; + bytes_written += sizeof(*md); + md->wm = (struct cam_sfe_bus_wm_mini_dump *) + ((uint8_t *)md_args->start_addr + bytes_written); + + for (i = 0; i < bus_priv->num_out; i++) { + rsrc_node = &bus_priv->sfe_out[i]; + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) + continue; + + for (j = 0; j < rsrc_data->num_wm; j++) { + if (bytes_written + sizeof(*md_wm) > md_args->len) + goto end; + + md_wm = &md->wm[k]; + wm = rsrc_data->wm_res[j].res_priv; + md_wm->width = wm->width; + md_wm->index = wm->index; + md_wm->height = wm->height; + md_wm->stride = wm->stride; + md_wm->en_cfg = wm->en_cfg; + md_wm->h_init = wm->h_init; + md_wm->format = wm->format; + md_wm->acquired_width = wm->acquired_width; + md_wm->acquired_height = wm->acquired_height; + md_wm->state = rsrc_node->res_state; + scnprintf(md_wm->name, CAM_ISP_RES_NAME_LEN, + "%s", rsrc_data->wm_res[j].res_name); + k++; + bytes_written += sizeof(*md_wm); + } + } +end: + md->num_client = k; + md_args->bytes_written = bytes_written; + return 0; +} + +static void *cam_sfe_bus_wr_user_dump_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_sfe_bus_wr_wm_resource_data *wm = NULL; + uint32_t *addr; + uint32_t addr_status0; + uint32_t addr_status1; + uint32_t addr_status2; + uint32_t addr_status3; + + wm = (struct cam_sfe_bus_wr_wm_resource_data *)dump_struct; + + addr_status0 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_0); + addr_status1 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_1); + addr_status2 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_2); + addr_status3 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_3); + + addr = (uint32_t *)addr_ptr; + + *addr++ = wm->common_data->hw_intf->hw_idx; + *addr++ = wm->index; + *addr++ = addr_status0; + *addr++ = addr_status1; + *addr++ = addr_status2; + *addr++ = addr_status3; + + return addr; +} + +static int cam_sfe_bus_wr_user_dump( + struct cam_sfe_bus_wr_priv *bus_priv, + void *cmd_args) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm = NULL; + struct cam_hw_info *hw_info = NULL; + struct cam_isp_hw_dump_args *dump_args; + uint32_t i, j = 0; + int rc = 0; + int32_t sfe_out_type; + + + if (!bus_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid bus private data"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)bus_priv->common_data.hw_intf->hw_priv; + dump_args = (struct cam_isp_hw_dump_args *)cmd_args; + + if (hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "SFE BUS powered down, continuing"); + return -EINVAL; + } + + rc = cam_common_user_dump_helper(dump_args, cam_common_user_dump_clock, + hw_info, sizeof(uint64_t), "CLK_RATE_PRINT:"); + + if (rc) { + CAM_ERR(CAM_ISP, "SFE BUS WR: Clock dump failed, rc:%d", rc); + return rc; + } + + for (i = 0; i < bus_priv->num_out; i++) { + sfe_out_type = cam_sfe_bus_wr_get_out_type(bus_priv, i); + if ((sfe_out_type < 0) || + (sfe_out_type >= CAM_SFE_BUS_SFE_OUT_MAX)) { + CAM_ERR(CAM_SFE, "Invalid sfe out type:%d", + sfe_out_type); + return -EINVAL; + } + + rsrc_node = &bus_priv->sfe_out[sfe_out_type]; + if (rsrc_node->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, + "SFE BUS WR: path inactive res ID: %d, continuing", + rsrc_node->res_id); + continue; + } + + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) + continue; + for (j = 0; j < rsrc_data->num_wm; j++) { + + wm = rsrc_data->wm_res[j].res_priv; + if (!wm) + continue; + + rc = cam_common_user_dump_helper(dump_args, cam_sfe_bus_wr_user_dump_info, + wm, sizeof(uint32_t), "SFE_BUS_CLIENT.%s.%d:", + rsrc_data->wm_res[j].res_name, + rsrc_data->common_data->core_index); + + if (rc) { + CAM_ERR(CAM_ISP, "SFE BUS WR: Info dump failed, rc:%d", rc); + return rc; + } + } + } + return 0; +} + +static int cam_sfe_bus_wr_handle_bus_irq(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_sfe_bus_wr_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_0); + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_sfe_bus_wr_handle_err_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + + int i = 0, rc = 0; + struct cam_sfe_bus_wr_priv *bus_priv = + th_payload->handler_priv; + struct cam_sfe_bus_wr_irq_evt_payload *evt_payload; + + CAM_ERR_RATE_LIMIT(CAM_ISP, "SFE:%d BUS Err IRQ", + bus_priv->common_data.core_index); + + for (i = 0; i < th_payload->num_registers; i++) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "SFE:%d BUS IRQ status_%d: 0x%X", + bus_priv->common_data.core_index, i, + th_payload->evt_status_arr[i]); + } + cam_irq_controller_disable_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + + rc = cam_sfe_bus_wr_get_evt_payload(&bus_priv->common_data, + &evt_payload); + if (rc) + return rc; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->core_index = bus_priv->common_data.core_index; + + evt_payload->ccif_violation_status = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->ccif_violation_status); + + evt_payload->image_size_violation_status = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->image_size_violation_status); + + cam_isp_hw_get_timestamp(&evt_payload->ts); + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_sfe_bus_wr_get_err_port_info(struct cam_sfe_bus_wr_priv *bus_priv, + uint32_t err_status, void **rsrc_data_priv, uint64_t *out_port) +{ + int i, j, wm_idx, sfe_out_type; + struct cam_isp_resource_node *sfe_out = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + + for (i = 0; i < bus_priv->num_out; i++) { + sfe_out_type = cam_sfe_bus_wr_get_out_type(bus_priv, i); + if ((sfe_out_type < 0) || + (sfe_out_type >= CAM_SFE_BUS_SFE_OUT_MAX)) { + CAM_ERR(CAM_SFE, "Invalid sfe out type:%d", + sfe_out_type); + return -EINVAL; + } + + sfe_out = &bus_priv->sfe_out[sfe_out_type]; + if (!sfe_out || !sfe_out->res_priv) + continue; + + if (sfe_out->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + rsrc_data = (struct cam_sfe_bus_wr_out_data *)sfe_out->res_priv; + *rsrc_data_priv = rsrc_data->priv; + + for (j = 0; j < rsrc_data->num_wm; j++) { + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + rsrc_data->wm_res[j].res_priv; + if (!wm_data) + continue; + + wm_idx = wm_data->index; + if (wm_idx < 0 || wm_idx >= bus_priv->num_client) { + CAM_ERR(CAM_SFE, "Unsupported SFE out %d", + wm_idx); + return -EINVAL; + } + + if (err_status & BIT(wm_idx)) { + *out_port |= BIT_ULL(sfe_out->res_id & 0xFF); + __cam_sfe_bus_wr_print_wm_info(wm_data); + } + } + } + return 0; +} + +static int cam_sfe_bus_wr_handle_err_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + int idx = -1; + int rc = 0; + uint32_t status = 0; + uint32_t constraint_errors_status = 0; + uint32_t violation_status = 0; + uint64_t out_port_mask = 0; + bool skip_err_notify = false; + struct cam_sfe_bus_wr_priv *bus_priv = handler_priv; + struct cam_sfe_bus_wr_common_data *common_data; + struct cam_isp_hw_event_info evt_info; + struct cam_sfe_bus_wr_irq_evt_payload *evt_payload = evt_payload_priv; + void *rsrc_data_priv = NULL; + + if (!handler_priv || !evt_payload_priv) + return -EINVAL; + + common_data = &bus_priv->common_data; + + status = evt_payload->irq_reg_val[CAM_SFE_IRQ_BUS_WR_REG_STATUS0]; + + if (status & CAM_SFE_BUS_WR_IRQ_CCIF_VIOLATION) { + CAM_ERR(CAM_SFE, "SFE[%u] CCIF protocol violation occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_SFE, "Violation status: 0x%x", evt_payload->ccif_violation_status); + } + + if (status & CAM_SFE_BUS_WR_IRQ_IMAGE_SIZE_VIOLATION) { + CAM_ERR(CAM_SFE, + "SFE[%d] IMAGE_SIZE_VIOLATION occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_SFE, + "Sensor: Programmed image size is not same as actual image size from input"); + CAM_ERR(CAM_SFE, "Debug: Check SW programming/sensor config"); + CAM_ERR(CAM_SFE, "Violation status: 0x%x", + evt_payload->image_size_violation_status); + } + + if (status & CAM_SFE_BUS_WR_IRQ_CONS_VIOLATION) { + CAM_ERR(CAM_SFE, + "SFE[%u] CONSTRAINT_VIOLATION occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + cam_sfe_bus_wr_get_constraint_errors(&skip_err_notify, bus_priv, + &constraint_errors_status); + } + + violation_status = evt_payload->ccif_violation_status | + evt_payload->image_size_violation_status | constraint_errors_status; + + rc = cam_sfe_bus_wr_get_err_port_info(bus_priv, violation_status, + &rsrc_data_priv, &out_port_mask); + if (rc < 0) + CAM_ERR(CAM_SFE, "Failed to get err port info, violation_status = %d", + violation_status); + + cam_sfe_bus_wr_put_evt_payload(common_data, &evt_payload); + + if (!skip_err_notify) { + struct cam_isp_hw_error_event_info err_evt_info; + + evt_info.hw_idx = common_data->core_index; + evt_info.hw_type = CAM_ISP_HW_TYPE_SFE; + evt_info.res_type = CAM_ISP_RESOURCE_SFE_OUT; + err_evt_info.err_type = CAM_SFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (!rsrc_data_priv) { + CAM_WARN(CAM_SFE, + "SFE:[%d] out error notification failed, cb data is NULL", + bus_priv->common_data.core_index); + return -EINVAL; + } + + if (!common_data->event_cb) + return 0; + + if (!out_port_mask) { + /* No valid res_id found */ + evt_info.res_id = CAM_SFE_BUS_SFE_OUT_MAX; + common_data->event_cb(rsrc_data_priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + return 0; + } + + while (out_port_mask) { + idx++; + if (!(out_port_mask & 0x1)) { + out_port_mask >>= 1; + continue; + } + + evt_info.res_id = CAM_ISP_SFE_OUT_RES_BASE + idx; + common_data->event_cb(rsrc_data_priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + out_port_mask >>= 1; + } + } + return 0; +} + +static int cam_sfe_bus_subscribe_error_irq( + struct cam_sfe_bus_wr_priv *bus_priv) +{ + struct cam_sfe_bus_wr_common_data *common_data; + uint32_t top_irq_reg_mask[CAM_SFE_IRQ_REGISTERS_MAX] = {0}; + uint32_t bus_wr_error_irq_mask[CAM_SFE_BUS_WR_IRQ_REGISTERS_MAX] = {0}; + + /* Subscribe top IRQ */ + top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift); + + common_data = &bus_priv->common_data; + + bus_priv->bus_irq_handle = cam_irq_controller_subscribe_irq( + common_data->sfe_irq_controller, + CAM_IRQ_PRIORITY_0, + top_irq_reg_mask, + bus_priv, + cam_sfe_bus_wr_handle_bus_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->bus_irq_handle < 1) { + CAM_ERR(CAM_SFE, "Failed to subscribe BUS TOP IRQ"); + bus_priv->bus_irq_handle = 0; + return -EFAULT; + } + + cam_irq_controller_register_dependent(common_data->sfe_irq_controller, + common_data->bus_irq_controller, top_irq_reg_mask); + + if (bus_priv->tasklet_info != NULL) { + bus_wr_error_irq_mask[0] = common_data->irq_err_mask; + + bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq( + common_data->bus_irq_controller, + CAM_IRQ_PRIORITY_0, + bus_wr_error_irq_mask, + bus_priv, + cam_sfe_bus_wr_handle_err_irq_top_half, + cam_sfe_bus_wr_handle_err_irq_bottom_half, + bus_priv->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->error_irq_handle < 1) { + CAM_ERR(CAM_SFE, "Failed to subscribe BUS Error IRQ"); + bus_priv->error_irq_handle = 0; + return -EFAULT; + } + } + + common_data->err_irq_subscribe = true; + CAM_DBG(CAM_SFE, "BUS WR error irq subscribed"); + return 0; +} + +static int cam_sfe_bus_wr_update_wm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_sfe_bus_cache_dbg_cfg *cache_dbg_cfg = NULL; + uint32_t *reg_val_pair; + uint32_t img_addr = 0, img_offset = 0; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + uint32_t frame_inc = 0, val; + uint32_t stride = 0, slice_h = 0; + dma_addr_t iova; + uint32_t curr_cache_cfg = 0; + + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + cache_dbg_cfg = &bus_priv->common_data.cache_dbg_cfg; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + update_buf->res->res_priv; + if (!sfe_out_data || !sfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = sfe_out_data->cdm_util_ops; + if (update_buf->wm_update->num_buf != sfe_out_data->num_wm) { + CAM_ERR(CAM_SFE, + "Failed! Invalid number buffers:%d required:%d", + update_buf->wm_update->num_buf, sfe_out_data->num_wm); + return -EINVAL; + } + + reg_val_pair = &sfe_out_data->common_data->io_buf_update[0]; + if (update_buf->use_scratch_cfg) + CAM_DBG(CAM_SFE, "Using scratch buf config"); + else + io_cfg = update_buf->wm_update->io_cfg; + + for (i = 0, j = 0; i < sfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_SFE, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + sfe_out_data->wm_res[i].res_priv; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->cfg, wm_data->en_cfg); + CAM_DBG(CAM_SFE, "WM:%d %s en_cfg 0x%X", + wm_data->index, sfe_out_data->wm_res[i].res_name, + reg_val_pair[j-1]); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->packer_cfg, wm_data->pack_fmt); + CAM_DBG(CAM_SFE, "WM:%d %s packer_fmt 0x%X", + wm_data->index, sfe_out_data->wm_res[i].res_name, + reg_val_pair[j-1]); + + curr_cache_cfg = wm_data->cache_cfg; + wm_data->cache_cfg = bus_priv->common_data.sys_cache_default_cfg; + if (wm_data->enable_caching) { + if ((cache_dbg_cfg->disable_for_scratch) && + (update_buf->use_scratch_cfg)) + goto skip_cache_cfg; + + if ((cache_dbg_cfg->disable_for_buf) && + (!update_buf->use_scratch_cfg)) + goto skip_cache_cfg; + + wm_data->cache_cfg = + wm_data->current_scid << 8; + wm_data->cache_cfg |= 3 << 4; + if ((update_buf->use_scratch_cfg) && + (cache_dbg_cfg->scratch_dbg_cfg)) + wm_data->cache_cfg |= cache_dbg_cfg->scratch_alloc; + else if ((!update_buf->use_scratch_cfg) && + (cache_dbg_cfg->buf_dbg_cfg)) + wm_data->cache_cfg |= cache_dbg_cfg->buf_alloc; + else + wm_data->cache_cfg |= CACHE_ALLOC_ALLOC; + + if (cache_dbg_cfg->print_cache_cfg && + (curr_cache_cfg != wm_data->cache_cfg)) { + CAM_INFO(CAM_SFE, "SFE:%d WM:%d current_scid:%d cache_cfg:0x%x", + wm_data->common_data->core_index, + wm_data->index, wm_data->current_scid, wm_data->cache_cfg); + } + } + +skip_cache_cfg: + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->system_cache_cfg, + wm_data->cache_cfg); + CAM_DBG(CAM_SFE, "WM:%d cache_cfg:0x%x", + wm_data->index, reg_val_pair[j-1]); + + val = (wm_data->height << 16) | wm_data->width; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_0, val); + CAM_DBG(CAM_SFE, "WM:%d image height and width 0x%X", + wm_data->index, reg_val_pair[j-1]); + + /* For initial configuration program all bus registers */ + if (update_buf->use_scratch_cfg) { + stride = update_buf->wm_update->stride; + slice_h = update_buf->wm_update->slice_height; + } else { + stride = io_cfg->planes[i].plane_stride; + slice_h = io_cfg->planes[i].slice_height; + } + + val = stride; + CAM_DBG(CAM_SFE, "before stride %d", val); + val = ALIGNUP(val, 16); + if (val != stride) + CAM_DBG(CAM_SFE, "Warning stride %u expected %u", + stride, val); + + if (wm_data->stride != val || !wm_data->init_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_2, + stride); + wm_data->stride = val; + CAM_DBG(CAM_SFE, "WM:%d image stride 0x%X", + wm_data->index, reg_val_pair[j-1]); + } + + frame_inc = stride * slice_h; + + if (!(wm_data->en_cfg & (0x3 << 16))) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_1, wm_data->h_init); + CAM_DBG(CAM_SFE, "WM:%d h_init 0x%X", + wm_data->index, reg_val_pair[j-1]); + } + + /* WM Image address */ + iova = update_buf->wm_update->image_buf[i] + + wm_data->offset; + update_buf->wm_update->image_buf_offset[i] = + wm_data->offset; + + if (cam_smmu_is_expanded_memory()) { + img_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + img_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + /* Align frame inc to 256 bytes */ + frame_inc = CAM_36BIT_INTF_GET_IOVA_BASE(frame_inc); + + /* Only write to offset register in 36-bit enabled HW */ + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->addr_cfg, img_offset); + CAM_DBG(CAM_SFE, "WM:%d image offset 0x%X", + wm_data->index, img_offset); + } else { + img_addr = iova; + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_addr, img_addr); + + CAM_DBG(CAM_SFE, "WM:%d image address 0x%x", wm_data->index, img_addr); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->frame_incr, frame_inc); + CAM_DBG(CAM_SFE, "WM:%d frame_inc %d expanded mem: %s", + wm_data->index, reg_val_pair[j-1], + CAM_BOOL_TO_YESNO(cam_smmu_is_expanded_memory)); + + /* enable the WM */ + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->cfg, + wm_data->en_cfg); + + /* set initial configuration done */ + if (!wm_data->init_cfg_done) + wm_data->init_cfg_done = true; + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d insufficient, expected size:%d", + update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_buf->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_SFE, + "No reg val pairs. num_wms: %u", + sfe_out_data->num_wm); + update_buf->cmd.used_bytes = 0; + } + + return 0; +} + +/* + * API similar to cam_sfe_bus_wr_update_wm() with the + * only change being config is done via AHB instead of CDM + */ +static int cam_sfe_bus_wr_config_wm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_sfe_bus_cache_dbg_cfg *cache_dbg_cfg = NULL; + uint32_t i; + uint32_t frame_inc = 0, val, img_addr = 0, img_offset = 0; + uint32_t stride = 0, slice_h = 0; + dma_addr_t iova; + uint32_t curr_cache_cfg = 0; + + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + cache_dbg_cfg = &bus_priv->common_data.cache_dbg_cfg; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + update_buf->res->res_priv; + + if (!sfe_out_data) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + if (update_buf->wm_update->num_buf != sfe_out_data->num_wm) { + CAM_ERR(CAM_SFE, + "Failed! Invalid number buffers:%d required:%d", + update_buf->wm_update->num_buf, sfe_out_data->num_wm); + return -EINVAL; + } + + for (i = 0; i < sfe_out_data->num_wm; i++) { + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + sfe_out_data->wm_res[i].res_priv; + + val = (wm_data->height << 16) | wm_data->width; + cam_io_w_mb(val, + wm_data->common_data->mem_base + + wm_data->hw_regs->image_cfg_0); + CAM_DBG(CAM_SFE, "WM:%d image height and width 0x%X", + wm_data->index, val); + + /* For initial configuration program all bus registers */ + stride = update_buf->wm_update->stride; + slice_h = update_buf->wm_update->slice_height; + + val = stride; + CAM_DBG(CAM_SFE, "before stride %d", val); + val = ALIGNUP(val, 16); + if (val != stride && + val != wm_data->stride) + CAM_WARN(CAM_SFE, "Warning stride %u expected %u", + stride, val); + + if (wm_data->stride != val || !wm_data->init_cfg_done) { + cam_io_w_mb(stride, + wm_data->common_data->mem_base + + wm_data->hw_regs->image_cfg_2); + wm_data->stride = val; + CAM_DBG(CAM_SFE, "WM:%d image stride 0x%X", + wm_data->index, val); + } + + frame_inc = stride * slice_h; + + if (!(wm_data->en_cfg & (0x3 << 16))) { + cam_io_w_mb(wm_data->h_init, + wm_data->common_data->mem_base + + wm_data->hw_regs->image_cfg_1); + CAM_DBG(CAM_SFE, "WM:%d h_init 0x%X", + wm_data->index, wm_data->h_init); + } + + /* WM Image address */ + iova = update_buf->wm_update->image_buf[i] + + wm_data->offset; + + if (cam_smmu_is_expanded_memory()) { + img_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + img_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + frame_inc = CAM_36BIT_INTF_GET_IOVA_BASE(frame_inc); + + CAM_DBG(CAM_SFE, "WM:%d image address offset: 0x%x", + wm_data->index, img_offset); + cam_io_w_mb(img_offset, + wm_data->common_data->mem_base + wm_data->hw_regs->addr_cfg); + } else { + img_addr = iova; + } + + CAM_DBG(CAM_SFE, "WM:%d image address: 0x%x, offset: 0x%x", + wm_data->index, img_addr, wm_data->offset); + + cam_io_w_mb(img_addr, + wm_data->common_data->mem_base + wm_data->hw_regs->image_addr); + cam_io_w_mb(frame_inc, + wm_data->common_data->mem_base + + wm_data->hw_regs->frame_incr); + + CAM_DBG(CAM_SFE, "WM:%d frame_inc: %d expanded_mem: %s", + wm_data->index, frame_inc, + CAM_BOOL_TO_YESNO(cam_smmu_is_expanded_memory)); + + curr_cache_cfg = wm_data->cache_cfg; + wm_data->cache_cfg = bus_priv->common_data.sys_cache_default_cfg; + if ((!cache_dbg_cfg->disable_for_scratch) && + (wm_data->enable_caching)) { + wm_data->cache_cfg = + wm_data->current_scid << 8; + wm_data->cache_cfg |= 3 << 4; + if (cache_dbg_cfg->scratch_dbg_cfg) + wm_data->cache_cfg |= cache_dbg_cfg->scratch_alloc; + else + wm_data->cache_cfg |= CACHE_ALLOC_ALLOC; + + if (cache_dbg_cfg->print_cache_cfg && + (curr_cache_cfg != wm_data->cache_cfg)) { + CAM_INFO(CAM_SFE, + "SFE:%d Scratch Buff WM:%d current_scid:%d cache_cfg:0x%x", + wm_data->common_data->core_index, + wm_data->index, wm_data->current_scid, wm_data->cache_cfg); + } + } + + cam_io_w_mb(wm_data->cache_cfg, + wm_data->common_data->mem_base + + wm_data->hw_regs->system_cache_cfg); + CAM_DBG(CAM_SFE, "WM:%d cache_cfg:0x%x", + wm_data->index, wm_data->cache_cfg); + + cam_io_w_mb(wm_data->pack_fmt, + wm_data->common_data->mem_base + + wm_data->hw_regs->packer_cfg); + CAM_DBG(CAM_SFE, "WM:%d packer_cfg 0x%X", + wm_data->index, wm_data->pack_fmt); + + /* enable the WM */ + cam_io_w_mb(wm_data->en_cfg, + wm_data->common_data->mem_base + + wm_data->hw_regs->cfg); + CAM_DBG(CAM_SFE, "WM:%d en_cfg 0x%X", + wm_data->index, wm_data->en_cfg); + + /* set initial configuration done */ + if (!wm_data->init_cfg_done) + wm_data->init_cfg_done = true; + } + + return 0; +} + +static int cam_sfe_bus_wr_update_hfr(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_hfr; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_isp_port_hfr_config *hfr_cfg = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + + update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + update_hfr->res->res_priv; + + if (!sfe_out_data || !sfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = sfe_out_data->cdm_util_ops; + reg_val_pair = &sfe_out_data->common_data->io_buf_update[0]; + hfr_cfg = (struct cam_isp_port_hfr_config *)update_hfr->data; + + for (i = 0, j = 0; i < sfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_SFE, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = sfe_out_data->wm_res[i].res_priv; + if ((wm_data->framedrop_pattern != + hfr_cfg->framedrop_pattern) || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_pattern, + hfr_cfg->framedrop_pattern); + wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern; + CAM_DBG(CAM_SFE, "WM:%d framedrop pattern 0x%X", + wm_data->index, wm_data->framedrop_pattern); + } + + if (wm_data->framedrop_period != hfr_cfg->framedrop_period || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_period, + hfr_cfg->framedrop_period); + wm_data->framedrop_period = hfr_cfg->framedrop_period; + CAM_DBG(CAM_SFE, "WM:%d framedrop period 0x%X", + wm_data->index, wm_data->framedrop_period); + } + + if (wm_data->irq_subsample_period != hfr_cfg->subsample_period + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_period, + hfr_cfg->subsample_period); + wm_data->irq_subsample_period = + hfr_cfg->subsample_period; + CAM_DBG(CAM_SFE, "WM:%d irq subsample period 0x%X", + wm_data->index, wm_data->irq_subsample_period); + } + + if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_pattern, + hfr_cfg->subsample_pattern); + wm_data->irq_subsample_pattern = + hfr_cfg->subsample_pattern; + CAM_DBG(CAM_SFE, "WM:%d irq subsample pattern 0x%X", + wm_data->index, wm_data->irq_subsample_pattern); + } + + /* set initial configuration done */ + if (!wm_data->hfr_cfg_done) + wm_data->hfr_cfg_done = true; + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_hfr->cmd.size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d insufficient, expected size:%d", + update_hfr->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_hfr->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_hfr->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_SFE, + "No reg val pairs. num_wms: %u", + sfe_out_data->num_wm); + update_hfr->cmd.used_bytes = 0; + } + + return 0; +} + +static int cam_sfe_bus_wr_update_stripe_cfg(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_isp_hw_dual_isp_update_args *stripe_args; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_isp_dual_stripe_config *stripe_config; + uint32_t outport_id, ports_plane_idx, i; + + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + stripe_args->res->res_priv; + + if (!sfe_out_data) { + CAM_ERR(CAM_SFE, "Failed! Invalid data"); + return -EINVAL; + } + + outport_id = stripe_args->res->res_id & 0xFF; + if (stripe_args->res->res_id < CAM_ISP_SFE_OUT_RES_BASE || + stripe_args->res->res_id >= bus_priv->max_out_res) + return 0; + + ports_plane_idx = (stripe_args->split_id * + (stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) + + (outport_id * CAM_PACKET_MAX_PLANES); + for (i = 0; i < sfe_out_data->num_wm; i++) { + wm_data = sfe_out_data->wm_res[i].res_priv; + stripe_config = (struct cam_isp_dual_stripe_config *) + &stripe_args->dual_cfg->stripes_flex[ports_plane_idx + i]; + wm_data->width = stripe_config->width; + + /* + * UMD sends buffer offset address as offset for clients + * programmed to operate in frame/index based mode and h_init + * value as offset for clients programmed to operate in line + * based mode. + */ + + if (wm_data->en_cfg & (0x3 << 16)) + wm_data->offset = stripe_config->offset; + else + wm_data->h_init = stripe_config->offset; + + CAM_DBG(CAM_SFE, + "out_type:0x%X WM:%d width:%d offset:0x%X h_init:%d", + stripe_args->res->res_id, wm_data->index, + wm_data->width, wm_data->offset, wm_data->h_init); + } + + return 0; +} + +static int cam_sfe_bus_wr_update_wm_config( + void *cmd_args) +{ + int i; + enum cam_sfe_bus_wr_packer_format packer_fmt = PACKER_FMT_MAX; + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_isp_vfe_wm_config *wm_config = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + wm_config_update = cmd_args; + sfe_out_data = wm_config_update->res->res_priv; + wm_config = (struct cam_isp_vfe_wm_config *) + wm_config_update->data; + + if (!sfe_out_data || !sfe_out_data->cdm_util_ops || !wm_config) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + for (i = 0; i < sfe_out_data->num_wm; i++) { + wm_data = sfe_out_data->wm_res[i].res_priv; + + if (wm_config->wm_mode > CAM_SFE_WM_INDEX_BASED_MODE) { + CAM_ERR(CAM_SFE, "Invalid wm_mode: 0x%X WM:%d", + wm_config->wm_mode, wm_data->index); + return -EINVAL; + } + + wm_data->en_cfg = (wm_config->wm_mode << 16) | + (wm_config->virtual_frame_en << 1) | 0x1; + wm_data->width = wm_config->width; + + if (wm_config->packer_format) { + packer_fmt = cam_sfe_bus_get_packer_fmt(sfe_out_data->bus_priv, + wm_config->packer_format, wm_data->index); + + /* Reconfigure only for valid packer fmt */ + if (packer_fmt != PACKER_FMT_MAX) { + switch (wm_config->packer_format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + packer_fmt |= + (1 << wm_data->common_data->pack_align_shift); + break; + default: + break; + } + wm_data->pack_fmt = packer_fmt; + } + } + + if ((sfe_out_data->out_type >= CAM_SFE_BUS_SFE_OUT_RDI0) && + (sfe_out_data->out_type <= CAM_SFE_BUS_SFE_OUT_RDI4)) { + wm_data->wm_mode = wm_config->wm_mode; + + /* + * Update width based on format for line based mode only + * Image size ignored for frame based mode + * Index based not supported currently + */ + if (wm_data->wm_mode == CAM_SFE_WM_LINE_BASED_MODE) + cam_sfe_bus_config_rdi_wm(wm_data); + } + + if (i == PLANE_C) + wm_data->height = wm_config->height / 2; + else + wm_data->height = wm_config->height; + + wm_data->offset = wm_config->offset; + CAM_DBG(CAM_SFE, + "WM:%d en_cfg:0x%X height:%d width:%d offset:%u packer_fmt: 0x%x", + wm_data->index, wm_data->en_cfg, wm_data->height, + wm_data->width, wm_data->offset, wm_data->pack_fmt); + } + + return 0; +} + +static int cam_sfe_bus_wr_update_bw_limiter( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + struct cam_isp_wm_bw_limiter_config *wm_bw_limit_cfg = NULL; + uint32_t counter_limit = 0, reg_val = 0; + uint32_t *reg_val_pair, num_regval_pairs = 0; + uint32_t i, j, size = 0; + + wm_config_update = (struct cam_isp_hw_get_cmd_update *) cmd_args; + wm_bw_limit_cfg = (struct cam_isp_wm_bw_limiter_config *) + wm_config_update->data; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + wm_config_update->res->res_priv; + if (!sfe_out_data || !sfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = sfe_out_data->cdm_util_ops; + reg_val_pair = &sfe_out_data->common_data->io_buf_update[0]; + for (i = 0, j = 0; i < sfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - (MAX_BUF_UPDATE_REG_NUM * 2))) { + CAM_ERR(CAM_SFE, + "reg_val_pair %d exceeds the array limit %zu for WM idx %d", + j, MAX_REG_VAL_PAIR_SIZE, i); + return -ENOMEM; + } + + /* Num WMs needs to match max planes */ + if (i >= CAM_PACKET_MAX_PLANES) { + CAM_WARN(CAM_SFE, + "Num of WMs: %d exceeded max planes", i); + goto add_reg_pair; + } + + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + sfe_out_data->wm_res[i].res_priv; + if (!wm_data->hw_regs->bw_limiter_addr) { + CAM_ERR(CAM_SFE, + "WM: %d %s has no support for bw limiter", + wm_data->index, sfe_out_data->wm_res[i].res_name); + return -EINVAL; + } + + counter_limit = wm_bw_limit_cfg->counter_limit[i]; + + /* Validate max counter limit */ + if (counter_limit > + wm_data->common_data->max_bw_counter_limit) { + CAM_WARN(CAM_SFE, + "Invalid counter limit: 0x%x capping to max: 0x%x", + wm_bw_limit_cfg->counter_limit[i], + wm_data->common_data->max_bw_counter_limit); + counter_limit = wm_data->common_data->max_bw_counter_limit; + } + + if (wm_bw_limit_cfg->enable_limiter && counter_limit) { + reg_val = 1; + reg_val |= (counter_limit << 1); + } else { + reg_val = 0; + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->bw_limiter_addr, reg_val); + CAM_DBG(CAM_SFE, "WM: %d %s bw_limter: 0x%x", + wm_data->index, sfe_out_data->wm_res[i].res_name, + reg_val_pair[j-1]); + } + +add_reg_pair: + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > wm_config_update->cmd.size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d insufficient, expected size:%d", + wm_config_update->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + wm_config_update->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + wm_config_update->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_SFE, + "No reg val pairs. num_wms: %u", + sfe_out_data->num_wm); + wm_config_update->cmd.used_bytes = 0; + } + + return 0; +} + +static int cam_sfe_bus_wr_get_res_for_mid( + struct cam_sfe_bus_wr_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + struct cam_isp_hw_get_res_for_mid *get_res = NULL; + int i, j; + + get_res = (struct cam_isp_hw_get_res_for_mid *)cmd_update->data; + if (!get_res) { + CAM_ERR(CAM_SFE, + "invalid get resource for mid paramas"); + return -EINVAL; + } + + for (i = 0; i < bus_priv->num_out; i++) { + + for (j = 0; j < bus_priv->sfe_out_hw_info[i].num_mid; j++) { + if (bus_priv->sfe_out_hw_info[i].mid[j] == get_res->mid) + goto end; + } + } + /* + * Do not update out_res_id in case of no match. + * Correct value will be dumped in hw mgr + */ + if (i == bus_priv->num_out) { + CAM_INFO(CAM_SFE, "mid:%d does not match with any out resource", get_res->mid); + return 0; + } + +end: + CAM_INFO(CAM_SFE, "match mid :%d out resource: %s 0x%x found", + get_res->mid, bus_priv->sfe_out_hw_info[i].name, + bus_priv->sfe_out[i].res_id); + get_res->out_res_id = bus_priv->sfe_out[i].res_id; + return 0; +} + +static int cam_sfe_bus_wr_irq_inject( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_sfe_bus_wr_priv *bus_priv = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_bus_wr_hw_info *bus_wr_hw_info = NULL; + struct cam_irq_controller_reg_info *irq_reg_info = NULL; + struct cam_irq_register_set *inject_reg = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + + bus_priv = (struct cam_sfe_bus_wr_priv *)priv; + soc_info = bus_priv->common_data.soc_info; + bus_wr_hw_info = (struct cam_sfe_bus_wr_hw_info *)bus_priv->bus_wr_hw_info; + irq_reg_info = &bus_wr_hw_info->common_reg.irq_reg_info; + inject_reg = irq_reg_info->irq_reg_set; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (!inject_params || !inject_reg) { + CAM_INFO(CAM_SFE, "Invalid inject_params or inject_reg"); + return -EINVAL; + } + + if (inject_params->reg_unit == + CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG) { + + cam_io_w_mb(inject_params->irq_mask, + soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + inject_reg->set_reg_offset); + cam_io_w_mb(0x10, soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + irq_reg_info->global_irq_cmd_offset); + CAM_INFO(CAM_SFE, "Injected : irq_mask %#x set_reg_offset %#x", + inject_params->irq_mask, inject_reg->set_reg_offset); + } + return 0; +} + +static int cam_sfe_bus_wr_dump_irq_desc( + void *priv, void *cmd_args, uint32_t arg_size) +{ + int i, offset = 0; + struct cam_sfe_bus_wr_priv *bus_priv = NULL; + struct cam_sfe_bus_wr_hw_info *bus_wr_hw_info = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + bus_priv = (struct cam_sfe_bus_wr_priv *)priv; + bus_wr_hw_info = (struct cam_sfe_bus_wr_hw_info *)bus_priv->bus_wr_hw_info; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (inject_params->reg_unit == + CAM_ISP_SFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG) { + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "Printing executable IRQ for hw_type: SFE reg_unit: %d\n", + inject_params->reg_unit); + + for (i = 0; i < bus_wr_hw_info->num_bus_wr_errors; i++) + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s - %s\n", + bus_wr_hw_info->bus_wr_err_desc[i].bitmask, + bus_wr_hw_info->bus_wr_err_desc[i].err_name, + bus_wr_hw_info->bus_wr_err_desc[i].desc); + } + return 0; +} + +static int cam_sfe_bus_wr_start_hw(void *hw_priv, + void *start_hw_args, uint32_t arg_size) +{ + return cam_sfe_bus_start_sfe_out(hw_priv); +} + +static int cam_sfe_bus_wr_stop_hw(void *hw_priv, + void *stop_hw_args, uint32_t arg_size) +{ + return cam_sfe_bus_stop_sfe_out(hw_priv); +} + +static int cam_sfe_bus_wr_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_sfe_bus_wr_priv *bus_priv = hw_priv; + + if (!hw_priv) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + CAM_DBG(CAM_SFE, "SFE:%d bus-wr hw-version:0x%x", + bus_priv->common_data.core_index, + cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->hw_version)); + + return 0; +} + +static int cam_sfe_bus_wr_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + return 0; +} + +static int cam_sfe_bus_wr_cache_config( + void *priv, void *cmd_args, + uint32_t arg_size) +{ + int i; + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_isp_sfe_bus_sys_cache_config *cache_cfg; + struct cam_sfe_bus_wr_out_data *sfe_out_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_data = NULL; + + + bus_priv = (struct cam_sfe_bus_wr_priv *)priv; + cache_cfg = (struct cam_isp_sfe_bus_sys_cache_config *)cmd_args; + + sfe_out_data = (struct cam_sfe_bus_wr_out_data *) + cache_cfg->res->res_priv; + + if (!sfe_out_data) { + CAM_ERR(CAM_SFE, "Invalid data"); + return -EINVAL; + } + + if (bus_priv->common_data.cache_dbg_cfg.disable_all) + return 0; + + for (i = 0; i < sfe_out_data->num_wm; i++) { + wm_data = (struct cam_sfe_bus_wr_wm_resource_data *) + sfe_out_data->wm_res[i].res_priv; + wm_data->enable_caching = cache_cfg->use_cache; + wm_data->current_scid = cache_cfg->scid; + cache_cfg->wr_cfg_done = true; + + CAM_DBG(CAM_SFE, "SFE:%d WM:%d cache_enable:%s scid:%u", + wm_data->common_data->core_index, + wm_data->index, + (wm_data->enable_caching ? "true" : "false"), + wm_data->current_scid); + } + + return 0; +} + +static int cam_sfe_bus_wr_set_debug_cfg( + void *priv, void *cmd_args) +{ + int i; + struct cam_sfe_bus_wr_priv *bus_priv = + (struct cam_sfe_bus_wr_priv *) priv; + struct cam_sfe_debug_cfg_params *debug_cfg; + + debug_cfg = (struct cam_sfe_debug_cfg_params *)cmd_args; + + if (debug_cfg->cache_config) + cam_sfe_bus_parse_cache_cfg(false, + debug_cfg->u.cache_cfg.sfe_cache_dbg, + &bus_priv->common_data.cache_dbg_cfg); + else + bus_priv->common_data.sfe_debug_cfg = + debug_cfg->u.dbg_cfg.sfe_debug_cfg; + + for (i = 0; i < CAM_SFE_PERF_CNT_MAX; i++) { + bus_priv->common_data.perf_cnt_cfg[i] = + debug_cfg->u.dbg_cfg.sfe_bus_wr_perf_counter_val[i]; + } + + return 0; +} + +static int cam_sfe_bus_read_rst_perf_cntrs( + struct cam_sfe_bus_wr_priv *bus_priv) +{ + int i; + bool print = false; + uint32_t val, status; + size_t len = 0; + uint8_t log_buf[256]; + struct cam_sfe_bus_wr_common_data *common_data = &bus_priv->common_data; + + if (!common_data->perf_cnt_en) + return 0; + + common_data->cntr++; + for (i = 0; i < CAM_SFE_PERF_CNT_MAX; i++) { + status = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->perf_cnt_status); + if (!(status & BIT(i))) + continue; + + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->perf_cnt_reg[i].perf_cnt_val); + + cam_io_w_mb(common_data->perf_cnt_cfg[i], + common_data->mem_base + + common_data->common_reg->perf_cnt_reg[i].perf_cnt_cfg); + print = true; + CAM_INFO_BUF(CAM_SFE, log_buf, 256, &len, "cnt%d: 0x%x ", i, val); + } + + if (print) + CAM_INFO(CAM_ISP, + "SFE%u Frame: %u Perf counters c%s", + common_data->core_index, common_data->cntr, log_buf); + + return 0; +} + +static uint32_t cam_sfe_bus_get_last_consumed_addr( + struct cam_sfe_bus_wr_priv *bus_priv, + uint32_t res_type) +{ + uint32_t last_consumed_addr; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_sfe_bus_wr_out_data *rsrc_data = NULL; + struct cam_sfe_bus_wr_wm_resource_data *wm_rsrc_data = NULL; + enum cam_sfe_bus_sfe_out_type res_id; + + res_id = cam_sfe_bus_wr_get_out_res_id(res_type); + + if (res_id >= CAM_SFE_BUS_SFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "invalid res id:%u", res_id); + return 0; + } + + rsrc_node = &bus_priv->sfe_out[res_id]; + rsrc_data = rsrc_node->res_priv; + + /* All SFE out ports have single WM */ + wm_rsrc_data = rsrc_data->wm_res->res_priv; + last_consumed_addr = cam_io_r_mb( + wm_rsrc_data->common_data->mem_base + + wm_rsrc_data->hw_regs->addr_status_0); + + return last_consumed_addr; +} + +static int cam_sfe_bus_wr_process_cmd( + void *priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_sfe_bus_wr_priv *bus_priv; + struct cam_isp_hw_done_event_data *done; + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + rc = cam_sfe_bus_wr_update_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BUF_UPDATE: + rc = cam_sfe_bus_wr_config_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + rc = cam_sfe_bus_wr_update_hfr(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + rc = cam_sfe_bus_wr_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + rc = cam_sfe_bus_wr_update_stripe_cfg(priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + rc = 0; + break; + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: { + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)cmd_args; + enum cam_sfe_bus_sfe_out_type sfe_out_res_id; + + sfe_out_res_id = + cam_sfe_bus_wr_get_out_res_id(event_info->res_id); + + /* Skip if not write resource */ + if (sfe_out_res_id == CAM_SFE_BUS_SFE_OUT_MAX) { + CAM_DBG(CAM_SFE, + "No a SFE write res: 0x%x - skip dump", + event_info->res_id); + rc = 0; + break; + } + + rc = cam_sfe_bus_wr_print_dimensions( + sfe_out_res_id, (struct cam_sfe_bus_wr_priv *)priv); + break; + } + case CAM_ISP_HW_SFE_BUS_MINI_DUMP: { + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + + rc = cam_sfe_bus_mini_dump(bus_priv, cmd_args); + break; + } + case CAM_ISP_HW_USER_DUMP: { + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + + rc = cam_sfe_bus_wr_user_dump(bus_priv, cmd_args); + break; + } + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE: + rc = cam_sfe_bus_wr_update_wm_config(cmd_args); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: { + struct cam_isp_hw_cap *sfe_bus_cap; + + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + sfe_bus_cap = (struct cam_isp_hw_cap *) cmd_args; + sfe_bus_cap->max_out_res_type = bus_priv->max_out_res; + sfe_bus_cap->num_wr_perf_counters = + bus_priv->common_data.common_reg->num_perf_counters; + rc = 0; + break; + } + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + done = (struct cam_isp_hw_done_event_data *) cmd_args; + done->last_consumed_addr = cam_sfe_bus_get_last_consumed_addr( + bus_priv, done->resource_handle); + break; + case CAM_ISP_HW_SFE_SYS_CACHE_WM_CONFIG: + rc = cam_sfe_bus_wr_cache_config(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG: + rc = cam_sfe_bus_wr_set_debug_cfg(priv, cmd_args); + break; + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + rc = cam_sfe_bus_wr_update_bw_limiter(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + rc = cam_sfe_bus_wr_get_res_for_mid(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_IRQ_INJECTION: + rc = cam_sfe_bus_wr_irq_inject(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + rc = cam_sfe_bus_wr_dump_irq_desc(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS: { + bus_priv = (struct cam_sfe_bus_wr_priv *) priv; + rc = cam_sfe_bus_read_rst_perf_cntrs(bus_priv); + } + break; + default: + CAM_ERR_RATE_LIMIT(CAM_SFE, "Invalid HW command type:%d", + cmd_type); + break; + } + + return rc; +} + +int cam_sfe_bus_wr_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus) +{ + int i, rc = 0; + struct cam_sfe_bus_wr_priv *bus_priv = NULL; + struct cam_sfe_bus *sfe_bus_local; + struct cam_sfe_bus_wr_hw_info *hw_info = bus_hw_info; + + CAM_DBG(CAM_SFE, "Enter"); + + if (!soc_info || !hw_intf || !bus_hw_info) { + CAM_ERR(CAM_SFE, + "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK", + soc_info, hw_intf, bus_hw_info); + rc = -EINVAL; + goto end; + } + + sfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus), GFP_KERNEL); + if (!sfe_bus_local) { + CAM_DBG(CAM_SFE, "Failed to alloc for sfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_bus_wr_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_SFE, "Failed to alloc for sfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + sfe_bus_local->bus_priv = bus_priv; + + bus_priv->num_client = hw_info->num_client; + bus_priv->num_out = hw_info->num_out; + bus_priv->max_out_res = hw_info->max_out_res; + bus_priv->num_comp_grp = hw_info->num_comp_grp; + bus_priv->top_irq_shift = hw_info->top_irq_shift; + bus_priv->common_data.num_sec_out = 0; + bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, SFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.common_reg = &hw_info->common_reg; + bus_priv->common_data.line_done_cfg = hw_info->line_done_cfg; + bus_priv->common_data.pack_align_shift = hw_info->pack_align_shift; + bus_priv->common_data.max_bw_counter_limit = hw_info->max_bw_counter_limit; + bus_priv->common_data.err_irq_subscribe = false; + bus_priv->common_data.sfe_irq_controller = sfe_irq_controller; + bus_priv->common_data.irq_err_mask = hw_info->irq_err_mask; + bus_priv->common_data.sys_cache_default_cfg = hw_info->sys_cache_default_val; + bus_priv->common_data.soc_info = soc_info; + bus_priv->constraint_error_info = hw_info->constraint_error_info; + bus_priv->sfe_out_hw_info = hw_info->sfe_out_hw_info; + bus_priv->bus_wr_hw_info = hw_info; + rc = cam_cpas_get_cpas_hw_version(&bus_priv->common_data.hw_version); + if (rc) { + CAM_ERR(CAM_SFE, "Failed to get hw_version rc:%d", rc); + goto free_bus_priv; + } + + bus_priv->comp_grp = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + bus_priv->num_comp_grp), GFP_KERNEL); + if (!bus_priv->comp_grp) { + CAM_ERR(CAM_SFE, "Failed to alloc for bus comp groups"); + rc = -ENOMEM; + goto free_bus_priv; + } + + bus_priv->sfe_out = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + CAM_SFE_BUS_SFE_OUT_MAX), GFP_KERNEL); + if (!bus_priv->sfe_out) { + CAM_ERR(CAM_SFE, "Failed to alloc for bus out res"); + rc = -ENOMEM; + goto free_comp_grp; + } + + mutex_init(&bus_priv->common_data.bus_mutex); + rc = cam_irq_controller_init(drv_name, + bus_priv->common_data.mem_base, + &hw_info->common_reg.irq_reg_info, + &bus_priv->common_data.bus_irq_controller); + if (rc) { + CAM_ERR(CAM_SFE, "Init bus_irq_controller failed"); + goto free_sfe_out; + } + + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_sfe_bus_wr_init_comp_grp(i, soc_info, + bus_priv, bus_hw_info, + &bus_priv->comp_grp[i]); + if (rc < 0) { + CAM_ERR(CAM_SFE, "SFE:%d init comp_grp:%d failed rc:%d", + bus_priv->common_data.core_index, i, rc); + goto deinit_comp_grp; + } + } + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_sfe_bus_init_sfe_out_resource(i, bus_priv, + bus_hw_info); + if (rc < 0) { + CAM_ERR(CAM_SFE, + "SFE:%d init out_type:0x%X failed rc:%d", + bus_priv->common_data.core_index, i, rc); + goto deinit_sfe_out; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + + for (i = 0; i < CAM_SFE_BUS_WR_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + + sfe_bus_local->hw_ops.reserve = cam_sfe_bus_acquire_sfe_out; + sfe_bus_local->hw_ops.release = cam_sfe_bus_release_sfe_out; + sfe_bus_local->hw_ops.start = cam_sfe_bus_wr_start_hw; + sfe_bus_local->hw_ops.stop = cam_sfe_bus_wr_stop_hw; + sfe_bus_local->hw_ops.init = cam_sfe_bus_wr_init_hw; + sfe_bus_local->hw_ops.deinit = cam_sfe_bus_wr_deinit_hw; + sfe_bus_local->top_half_handler = NULL; + sfe_bus_local->bottom_half_handler = NULL; + sfe_bus_local->hw_ops.process_cmd = cam_sfe_bus_wr_process_cmd; + bus_priv->bus_irq_handle = 0; + bus_priv->common_data.sfe_debug_cfg = 0; + *sfe_bus = sfe_bus_local; + + CAM_DBG(CAM_SFE, "Exit"); + return rc; + +deinit_sfe_out: + for (--i; i >= 0; i--) + cam_sfe_bus_deinit_sfe_out_resource(&bus_priv->sfe_out[i]); + +deinit_comp_grp: + if (i < 0) + i = bus_priv->num_comp_grp; + for (--i; i >= 0; i--) + cam_sfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + +free_sfe_out: + CAM_MEM_FREE(bus_priv->sfe_out); + +free_comp_grp: + CAM_MEM_FREE(bus_priv->comp_grp); + +free_bus_priv: + CAM_MEM_FREE(sfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(sfe_bus_local); + +end: + return rc; +} + +int cam_sfe_bus_wr_deinit( + struct cam_sfe_bus **sfe_bus) +{ + int i, rc = 0; + struct cam_sfe_bus_wr_priv *bus_priv = NULL; + struct cam_sfe_bus *sfe_bus_local; + unsigned long flags; + + if (!sfe_bus || !*sfe_bus) { + CAM_ERR(CAM_SFE, "Invalid input"); + return -EINVAL; + } + sfe_bus_local = *sfe_bus; + + bus_priv = sfe_bus_local->bus_priv; + if (!bus_priv) { + CAM_ERR(CAM_SFE, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_SFE_BUS_WR_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + bus_priv->common_data.err_irq_subscribe = false; + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_sfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + if (rc < 0) + CAM_ERR(CAM_SFE, + "SFE:%d deinit comp_grp:%d failed rc:%d", + bus_priv->common_data.core_index, i, rc); + } + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_sfe_bus_deinit_sfe_out_resource( + &bus_priv->sfe_out[i]); + if (rc < 0) + CAM_ERR(CAM_SFE, + "SFE:%d deinit out_type:0x%X failed rc:%d", + bus_priv->common_data.core_index, i, rc); + } + + CAM_MEM_FREE(bus_priv->comp_grp); + CAM_MEM_FREE(bus_priv->sfe_out); + + mutex_destroy(&bus_priv->common_data.bus_mutex); + + CAM_MEM_FREE(sfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(sfe_bus_local); + + *sfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.h new file mode 100644 index 0000000000..1fc02869f4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_SFE_BUS_WR_H_ +#define _CAM_SFE_BUS_WR_H_ + +#include "cam_sfe_hw_intf.h" +#include "cam_sfe_bus.h" + +#define CAM_SFE_BUS_WR_MAX_CLIENTS 17 +#define CAM_SFE_BUS_WR_MAX_SUB_GRPS 6 +#define CAM_SFE_BUS_CONS_ERR_MAX 32 + +#define CAM_SFE_BUS_WR_IRQ_CONS_VIOLATION BIT(28) +#define CAM_SFE_BUS_WR_IRQ_CCIF_VIOLATION BIT(30) +#define CAM_SFE_BUS_WR_IRQ_IMAGE_SIZE_VIOLATION BIT(31) + +enum cam_sfe_bus_wr_src_grp { + CAM_SFE_BUS_WR_SRC_GRP_0, + CAM_SFE_BUS_WR_SRC_GRP_1, + CAM_SFE_BUS_WR_SRC_GRP_2, + CAM_SFE_BUS_WR_SRC_GRP_3, + CAM_SFE_BUS_WR_SRC_GRP_4, + CAM_SFE_BUS_WR_SRC_GRP_5, + CAM_SFE_BUS_WR_SRC_GRP_MAX, +}; + +enum cam_sfe_bus_wr_comp_grp_type { + CAM_SFE_BUS_WR_COMP_GRP_0, + CAM_SFE_BUS_WR_COMP_GRP_1, + CAM_SFE_BUS_WR_COMP_GRP_2, + CAM_SFE_BUS_WR_COMP_GRP_3, + CAM_SFE_BUS_WR_COMP_GRP_4, + CAM_SFE_BUS_WR_COMP_GRP_5, + CAM_SFE_BUS_WR_COMP_GRP_6, + CAM_SFE_BUS_WR_COMP_GRP_7, + CAM_SFE_BUS_WR_COMP_GRP_8, + CAM_SFE_BUS_WR_COMP_GRP_9, + CAM_SFE_BUS_WR_COMP_GRP_10, + CAM_SFE_BUS_WR_COMP_GRP_MAX, +}; + +enum cam_sfe_bus_sfe_out_type { + CAM_SFE_BUS_SFE_OUT_RDI0, + CAM_SFE_BUS_SFE_OUT_RDI1, + CAM_SFE_BUS_SFE_OUT_RDI2, + CAM_SFE_BUS_SFE_OUT_RDI3, + CAM_SFE_BUS_SFE_OUT_RDI4, + CAM_SFE_BUS_SFE_OUT_RAW_DUMP, + CAM_SFE_BUS_SFE_OUT_LCR, + CAM_SFE_BUS_SFE_OUT_BE_0, + CAM_SFE_BUS_SFE_OUT_BHIST_0, + CAM_SFE_BUS_SFE_OUT_BE_1, + CAM_SFE_BUS_SFE_OUT_BHIST_1, + CAM_SFE_BUS_SFE_OUT_BE_2, + CAM_SFE_BUS_SFE_OUT_BHIST_2, + CAM_SFE_BUS_SFE_OUT_BAYER_RS_0, + CAM_SFE_BUS_SFE_OUT_BAYER_RS_1, + CAM_SFE_BUS_SFE_OUT_BAYER_RS_2, + CAM_SFE_BUS_SFE_OUT_IR, + CAM_SFE_BUS_SFE_OUT_HDR_STATS, + CAM_SFE_BUS_SFE_OUT_MAX, +}; + +/* + * struct cam_sfe_bus_wr_err_irq_desc: + * + * @Brief: Bus wr error irq description + */ +struct cam_sfe_bus_wr_err_irq_desc { + uint32_t bitmask; + char *err_name; + char *desc; +}; + +/* + * struct cam_sfe_constraint_error_desc: + * + * @Brief: Constraint error desc + */ +struct cam_sfe_bus_wr_constraint_error_desc { + uint32_t bitmask; + char *error_description; +}; + +/* + * @brief: Constraint error info + * + * @error_desc: Error description for various constraint errors. + * @num_cons_err: Number of constraint errors + * @img_addr_unalign_shift: shift for image address unalign error + * @img_width_unalign_shift: shift for image width unalign error + * + */ +struct cam_sfe_bus_wr_constraint_error_info { + struct cam_sfe_bus_wr_constraint_error_desc *constraint_error_list; + uint32_t num_cons_err; + uint32_t img_addr_unalign_shift; + uint32_t img_width_unalign_shift; +}; + +struct cam_sfe_bus_wr_perf_cnt_hw_info { + uint32_t perf_cnt_cfg; + uint32_t perf_cnt_val; +}; + +/* + * struct cam_sfe_bus_reg_offset_common: + * + * @Brief: Common registers across all BUS Clients + */ +struct cam_sfe_bus_reg_offset_common { + uint32_t hw_version; + uint32_t cgc_ovd; + uint32_t if_frameheader_cfg[CAM_SFE_BUS_WR_MAX_SUB_GRPS]; + uint32_t pwr_iso_cfg; + uint32_t overflow_status_clear; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t debug_status_top_cfg; + uint32_t debug_status_top; + uint32_t test_bus_ctrl; + uint32_t top_irq_mask_0; + uint32_t qos_eos_cfg; + struct cam_irq_controller_reg_info irq_reg_info; + uint32_t num_perf_counters; + uint32_t perf_cnt_status; + struct cam_sfe_bus_wr_perf_cnt_hw_info perf_cnt_reg[CAM_SFE_PERF_CNT_MAX]; +}; + +/* + * struct cam_sfe_bus_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS Clients + */ +struct cam_sfe_bus_reg_offset_bus_client { + uint32_t cfg; + uint32_t image_addr; + uint32_t frame_incr; + uint32_t image_cfg_0; + uint32_t image_cfg_1; + uint32_t image_cfg_2; + uint32_t packer_cfg; + uint32_t frame_header_addr; + uint32_t frame_header_incr; + uint32_t frame_header_cfg; + uint32_t line_done_cfg; + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + uint32_t system_cache_cfg; + uint32_t addr_cfg; + uint32_t addr_status_0; + uint32_t addr_status_1; + uint32_t addr_status_2; + uint32_t addr_status_3; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; + uint32_t mmu_prefetch_cfg; + uint32_t mmu_prefetch_max_offset; + uint32_t bw_limiter_addr; + uint32_t comp_group; +}; + +/* + * struct cam_sfe_bus_sfe_out_hw_info: + * + * @Brief: HW capability of SFE Bus Client + */ +struct cam_sfe_bus_sfe_out_hw_info { + enum cam_sfe_bus_sfe_out_type sfe_out_type; + uint32_t max_width; + uint32_t max_height; + uint32_t source_group; + uint32_t mid[CAM_SFE_BUS_MAX_MID_PER_PORT]; + uint32_t num_mid; + uint32_t num_wm; + uint32_t wm_idx; + uint32_t en_line_done; + uint8_t *name; +}; + +/* + * struct cam_sfe_bus_wr_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @num_client: Total number of write clients + * @bus_client_reg: Bus client register info + * @sfe_out_hw_info: SFE output capability + * @constraint_error_info: Constraint Error information + * @comp_done_mask: List of buf done mask shift values for + * each comp grp + * @num_comp_grp: Number of composite groups + * @line_done_cfg: Line done cfg for wr/rd sync + * @top_irq_shift: Mask shift for top level BUS WR irq + * @max_out_res: maximum number of sfe out res in uapi + * @pack_align_shift: Packer format alignment bit shift + * @max_bw_counter_limit: Max BW counter limit + * @sys_cache_default_val: System cache default config + * @irq_err_mask: IRQ error mask + */ +struct cam_sfe_bus_wr_hw_info { + struct cam_sfe_bus_reg_offset_common common_reg; + uint32_t num_client; + struct cam_sfe_bus_reg_offset_bus_client + bus_client_reg[CAM_SFE_BUS_WR_MAX_CLIENTS]; + uint32_t num_out; + struct cam_sfe_bus_sfe_out_hw_info + sfe_out_hw_info[CAM_SFE_BUS_SFE_OUT_MAX]; + struct cam_sfe_bus_wr_constraint_error_info + *constraint_error_info; + uint32_t num_bus_wr_errors; + struct cam_sfe_bus_wr_err_irq_desc *bus_wr_err_desc; + uint32_t comp_done_mask[CAM_SFE_BUS_WR_COMP_GRP_MAX]; + uint32_t num_comp_grp; + uint32_t line_done_cfg; + uint32_t top_irq_shift; + uint32_t max_out_res; + uint32_t pack_align_shift; + uint32_t max_bw_counter_limit; + uint32_t sys_cache_default_val; + uint32_t irq_err_mask; +}; + +/** + * struct cam_sfe_bus_wm_mini_dump - SFE WM data + * + * @width Width + * @height Height + * @stride stride + * @h_init init height + * @acquired_width acquired width + * @acquired_height acquired height + * @en_cfg Enable flag + * @format format + * @index Index + * @state state + * @name Res name + */ +struct cam_sfe_bus_wm_mini_dump { + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t h_init; + uint32_t acquired_width; + uint32_t acquired_height; + uint32_t en_cfg; + uint32_t format; + uint32_t index; + uint32_t state; + uint8_t name[CAM_ISP_RES_NAME_LEN]; +}; + +/** + * struct cam_sfe_bus_mini_dump_data - SFE bus mini dump data + * + * @wm: Write Master client information + * @clk_rate: Clock rate + * @num_client: Num client + * @hw_state: hw statte + * @hw_idx: Hw index + */ +struct cam_sfe_bus_mini_dump_data { + struct cam_sfe_bus_wm_mini_dump *wm; + uint64_t clk_rate; + uint32_t num_client; + uint8_t hw_state; + uint8_t hw_idx; +}; + +/* + * cam_sfe_bus_wr_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @sfe_irq_controller: SFE irq controller + * @sfe_bus: Pointer to sfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_wr_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus); + +/* + * cam_sfe_bus_wr_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @sfe_bus: Pointer to sfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_wr_deinit(struct cam_sfe_bus **sfe_bus); + +#endif /* _CAM_SFE_BUS_WR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/include/cam_sfe_bus.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/include/cam_sfe_bus.h new file mode 100644 index 0000000000..75903e503c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/include/cam_sfe_bus.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE_BUS_H_ +#define _CAM_SFE_BUS_H_ + +#include "cam_sfe_hw_intf.h" + +#define CAM_SFE_BUS_WR_VER_1_0 0x1000 +#define CAM_SFE_BUS_RD_VER_1_0 0x1000 +#define CAM_SFE_BUS_MAX_MID_PER_PORT 4 + +#define ALIGNUP(value, alignment) \ + ((value + alignment - 1) / alignment * alignment) + +#define CACHE_ALLOC_NONE 0 +#define CACHE_ALLOC_ALLOC 1 +#define CACHE_ALLOC_ALLOC_CLEAN 2 +#define CACHE_ALLOC_ALLOC_TRANS 3 +#define CACHE_ALLOC_CLEAN 5 +#define CACHE_ALLOC_DEALLOC 6 +#define CACHE_ALLOC_FORGET 7 +#define CACHE_ALLOC_TBH_ALLOC 8 + +#define DISABLE_CACHING_FOR_ALL 0xFFFFFF +#define CACHE_SCRATCH_RD_ALLOC_SHIFT 0 +#define CACHE_SCRATCH_WR_ALLOC_SHIFT 4 +#define CACHE_SCRATCH_DEBUG_SHIFT 8 +#define CACHE_BUF_RD_ALLOC_SHIFT 12 +#define CACHE_BUF_WR_ALLOC_SHIFT 16 +#define CACHE_BUF_DEBUG_SHIFT 20 +#define CACHE_BUF_PRINT_DBG_SHIFT 26 + +enum cam_sfe_bus_plane_type { + PLANE_Y, + PLANE_C, + PLANE_MAX, +}; + +enum cam_sfe_bus_type { + BUS_TYPE_SFE_WR, + BUS_TYPE_SFE_RD, + BUS_TYPE_SFE_MAX, +}; + +/* + * struct cam_sfe_bus_cache_dbg_cfg: + * + * @Brief: Bus cache debug cfg + * + * @scratch_alloc: Alloc type for scratch + * @buf_alloc: Alloc type for actual buffer + * @disable_all: Disable caching for all [scratch/snapshot] + * @disable_for_scratch: Disable caching for scratch + * @scratch_dbg_cfg: Scratch alloc configured + * @disable_for_buf: Disable caching for buffer + * @buf_dbg_cfg: Buf alloc configured + * @print_cache_cfg: Print cache cfg + */ +struct cam_sfe_bus_cache_dbg_cfg { + uint32_t scratch_alloc; + uint32_t buf_alloc; + bool disable_all; + bool disable_for_scratch; + bool scratch_dbg_cfg; + bool disable_for_buf; + bool buf_dbg_cfg; + bool print_cache_cfg; +}; + +/* + * struct cam_sfe_bus: + * + * @Brief: Bus interface structure + * + * @bus_priv: Private data of BUS + * @hw_ops: Hardware interface functions + * @top_half_handler: Top Half handler function + * @bottom_half_handler: Bottom Half handler function + */ +struct cam_sfe_bus { + void *bus_priv; + struct cam_hw_ops hw_ops; + CAM_IRQ_HANDLER_TOP_HALF top_half_handler; + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler; +}; + +/* + * cam_sfe_bus_init() + * + * @Brief: Initialize Bus layer + * + * @bus_version: Version of BUS to initialize + * @bus_type: Bus Type RD/WR + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @sfe_irq_controller: SFE irq controller + * @sfe_bus: Pointer to sfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_init( + uint32_t bus_version, + int bus_type, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *sfe_irq_controller, + struct cam_sfe_bus **sfe_bus); + +/* + * cam_sfe_bus_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @bus_version: Version of BUS to deinitialize + * @sfe_bus: Pointer to sfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_sfe_bus_deinit( + uint32_t bus_version, + int bus_type, + struct cam_sfe_bus **sfe_bus); + + +/* + * cam_sfe_bus_parse_cache_cfg() + * + * @Brief: Parse SFE debug config + * + * @is_read: If set it's RM + * @debug_val: Debug val to be parsed + * @dbg_cfg: Debug cfg of RM/WM + * + */ +void cam_sfe_bus_parse_cache_cfg( + bool is_read, + uint32_t debug_val, + struct cam_sfe_bus_cache_dbg_cfg *dbg_cfg); + +#endif /* _CAM_SFE_BUS_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c new file mode 100644 index 0000000000..3c87e28b09 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c @@ -0,0 +1,2491 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_io_util.h" +#include "cam_cdm_util.h" +#include "cam_sfe_hw_intf.h" +#include "cam_tasklet_util.h" +#include "cam_sfe_top.h" +#include "cam_debug_util.h" +#include "cam_sfe_soc.h" +#include "cam_sfe_core.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +struct cam_sfe_core_cfg { + uint32_t mode_sel; + uint32_t ops_mode_cfg; + uint32_t fs_mode_cfg; +}; + +struct cam_sfe_top_common_data { + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *hw_intf; + struct cam_sfe_top_common_reg_offset *common_reg; + struct cam_sfe_top_common_reg_data *common_reg_data; + struct cam_irq_controller *sfe_irq_controller; + struct cam_sfe_top_irq_evt_payload evt_payload[CAM_SFE_EVT_MAX]; + struct list_head free_payload_list; +}; + +struct cam_sfe_top_perf_counter_cfg { + uint32_t perf_counter_val; + bool dump_counter; +}; + +struct cam_sfe_top_priv { + struct cam_sfe_top_common_data common_data; + struct cam_isp_resource_node in_rsrc[CAM_SFE_TOP_IN_PORT_MAX]; + uint32_t num_in_ports; + unsigned long applied_clk_rate; + unsigned long req_clk_rate[CAM_SFE_TOP_IN_PORT_MAX]; + uint32_t last_bw_counter; + uint32_t last_clk_counter; + uint64_t total_bw_applied; + struct cam_axi_vote agg_incoming_vote; + struct cam_axi_vote req_axi_vote[CAM_SFE_TOP_IN_PORT_MAX]; + struct cam_axi_vote last_bw_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + uint64_t last_total_bw_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + unsigned long last_clk_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + enum cam_clk_bw_state clk_state; + enum cam_clk_bw_state bw_state; + enum cam_isp_bw_control_action axi_vote_control[CAM_SFE_TOP_IN_PORT_MAX]; + struct cam_axi_vote applied_axi_vote; + struct cam_sfe_core_cfg core_cfg; + uint32_t sfe_debug_cfg; + uint32_t sensor_sel_diag_cfg; + struct cam_sfe_top_perf_counter_cfg perf_counters[CAM_SFE_PERF_COUNTER_MAX]; + uint32_t cc_testbus_sel_cfg; + int error_irq_handle; + uint16_t reserve_cnt; + uint16_t start_stop_cnt; + void *priv_per_stream; + spinlock_t spin_lock; + cam_hw_mgr_event_cb_func event_cb; + struct cam_sfe_wr_client_desc *wr_client_desc; + struct cam_sfe_top_hw_info *hw_info; + uint32_t num_clc_module; + struct cam_sfe_top_debug_info (*clc_dbg_mod_info)[CAM_SFE_TOP_DBG_REG_MAX][8]; + bool skip_clk_data_rst; +}; + +struct cam_sfe_path_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_sfe_top_priv *top_priv; + struct cam_sfe_top_common_reg_offset *common_reg; + struct cam_sfe_top_common_reg_data *common_reg_data; + struct cam_sfe_modules_common_reg_offset *modules_reg; + struct cam_sfe_path_common_reg_data *path_reg_data; + struct cam_hw_soc_info *soc_info; + uint32_t min_hblank_cnt; + int sof_eof_handle; +}; + +static int cam_sfe_top_apply_clock_start_stop(struct cam_sfe_top_priv *top_priv); + +static int cam_sfe_top_apply_bw_start_stop(struct cam_sfe_top_priv *top_priv); + +static void cam_sfe_top_print_ipp_violation_info(struct cam_sfe_top_priv *top_priv, + uint32_t violation_status); + +static const char *cam_sfe_top_clk_bw_state_to_string(uint32_t state) +{ + switch (state) { + case CAM_CLK_BW_STATE_UNCHANGED: + return "UNCHANGED"; + case CAM_CLK_BW_STATE_INCREASE: + return "INCREASE"; + case CAM_CLK_BW_STATE_DECREASE: + return "DECREASE"; + default: + return "Invalid State"; + } +} + +static int cam_sfe_top_set_axi_bw_vote( + struct cam_sfe_top_priv *top_priv, + struct cam_axi_vote *final_bw_vote, uint64_t total_bw_new_vote, bool start_stop, + uint64_t request_id) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_soc_private *soc_private = NULL; + int i, j; + + soc_info = top_priv->common_data.soc_info; + soc_private = (struct cam_sfe_soc_private *)soc_info->soc_private; + + CAM_DBG(CAM_PERF, "SFE:%d Sending final BW to cpas bw_state:%s bw_vote:%llu req_id:%ld", + top_priv->common_data.hw_intf->hw_idx, + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + total_bw_new_vote, (start_stop ? -1 : request_id)); + rc = cam_cpas_update_axi_vote(soc_private->cpas_handle, + final_bw_vote); + if (!rc) { + memcpy(&top_priv->applied_axi_vote, + final_bw_vote, + sizeof(struct cam_axi_vote)); + top_priv->total_bw_applied = total_bw_new_vote; + } else { + CAM_ERR(CAM_PERF, "BW request failed, rc=%d", rc); + for (i = 0; i < final_bw_vote->num_paths; i++) { + CAM_INFO(CAM_PERF, + "sfe[%d] : Applied BW Vote : [%s][%s][%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, + cam_cpas_axi_util_path_type_to_string( + final_bw_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + final_bw_vote->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + final_bw_vote->axi_path[i].vote_level), + final_bw_vote->axi_path[i].camnoc_bw, + final_bw_vote->axi_path[i].mnoc_ab_bw, + final_bw_vote->axi_path[i].mnoc_ib_bw); + } + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + for (j = 0; j < top_priv->last_bw_vote[i].num_paths; j++) { + CAM_INFO(CAM_PERF, + "sfe[%d] : History[%d] BW Vote : [%s][%s] [%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, i, + cam_cpas_axi_util_path_type_to_string( + top_priv->last_bw_vote[i].axi_path[j].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + top_priv->last_bw_vote[i].axi_path[j].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + top_priv->last_bw_vote[i].axi_path[j].vote_level), + top_priv->last_bw_vote[i].axi_path[j].camnoc_bw, + top_priv->last_bw_vote[i].axi_path[j].mnoc_ab_bw, + top_priv->last_bw_vote[i].axi_path[j].mnoc_ib_bw); + } + } + + } + + return rc; + +} + +static int cam_sfe_top_set_hw_clk_rate(struct cam_sfe_top_priv *top_priv, + unsigned long final_clk_rate, bool start_stop, uint64_t request_id, bool is_drv_config_en) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_soc_private *soc_private = NULL; + struct cam_ahb_vote ahb_vote; + int rc = 0, clk_lvl = -1; + unsigned long cesta_clk_rate_high, cesta_clk_rate_low; + int cesta_client_idx = -1; + uint32_t lowest_clk_lvl; + + soc_info = top_priv->common_data.soc_info; + soc_private = (struct cam_sfe_soc_private *)soc_info->soc_private; + + lowest_clk_lvl = soc_info->lowest_clk_level; + cesta_clk_rate_high = final_clk_rate; + cesta_clk_rate_low = final_clk_rate; + + if (soc_info->is_clk_drv_en) { + cesta_client_idx = top_priv->common_data.hw_intf->hw_idx; + if (is_drv_config_en) + cesta_clk_rate_low = + soc_info->clk_rate[lowest_clk_lvl][soc_info->src_clk_idx]; + else + cesta_clk_rate_low = final_clk_rate; + } + + CAM_DBG(CAM_PERF, + "Applying SFE:%d Clock name=%s idx=%d cesta_client_idx:%d req clk[high low]=[%lu %lu] req_id=%ld", + top_priv->common_data.hw_intf->hw_idx, soc_info->clk_name[soc_info->src_clk_idx], + soc_info->src_clk_idx, cesta_client_idx, cesta_clk_rate_high, cesta_clk_rate_low, + (start_stop ? -1 : request_id)); + + rc = cam_soc_util_set_src_clk_rate(soc_info, cesta_client_idx, cesta_clk_rate_high, + cesta_clk_rate_low); + if (!rc) { + top_priv->applied_clk_rate = cesta_clk_rate_high; + rc = cam_soc_util_get_clk_level(soc_info, cesta_clk_rate_low, + soc_info->src_clk_idx, &clk_lvl); + if (rc) { + CAM_WARN(CAM_ISP, + "Failed to get clk level for %s with clk_rate %lu src_idx %d rc %d", + soc_info->dev_name, cesta_clk_rate_high, + soc_info->src_clk_idx, rc); + rc = 0; + goto end; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_lvl; + cam_cpas_update_ahb_vote(soc_private->cpas_handle, &ahb_vote); + } else { + CAM_ERR(CAM_ISP, + "SFE:%d cesta_client_idx:%d Failed to set the req clk rate[high low]: [%llu %llu] rc:%d", + top_priv->common_data.hw_intf->hw_idx, cesta_client_idx, + cesta_clk_rate_high, cesta_clk_rate_low, rc); + } + +end: + return rc; +} + +static void cam_sfe_top_check_module_status( + uint32_t num_reg, uint32_t *reg_val, + struct cam_sfe_top_debug_info (*status_list)[CAM_SFE_TOP_DBG_REG_MAX][8]) +{ + bool found = false; + uint32_t i, j, val = 0; + size_t len = 0; + uint8_t log_buf[1024]; + + if (!status_list) + return; + + for (i = 0; i < num_reg; i++) { + /* Check for ideal values */ + if ((reg_val[i] == 0) || (reg_val[i] == 0x55555555)) + continue; + + for (j = 0; j < 8; j++) { + val = reg_val[i] >> (*status_list)[i][j].shift; + val &= 0xF; + if (val == 0 || val == 5) + continue; + + CAM_INFO_BUF(CAM_SFE, log_buf, 1024, &len, "%s [I:%u V:%u R:%u]", + (*status_list)[i][j].clc_name, + ((val >> 2) & 1), ((val >> 1) & 1), (val & 1)); + found = true; + } + if (found) + CAM_INFO_RATE_LIMIT(CAM_SFE, "Check config for Debug%u - %s", + i, log_buf); + len = 0; + found = false; + } +} + +static void cam_sfe_top_print_cc_test_bus( + struct cam_sfe_top_priv *top_priv) +{ + struct cam_sfe_top_common_data *common_data = &top_priv->common_data; + struct cam_hw_soc_info *soc_info = common_data->soc_info; + void __iomem *mem_base = + soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + struct cam_sfe_top_cc_testbus_info *testbus_info; + uint32_t dbg_testbus_reg = common_data->common_reg->top_debug_testbus_reg; + uint32_t size = 0, reg_val = 0, val = 0, i = 0; + size_t len = 0; + uint8_t log_buf[1024] = {0}; + + reg_val = cam_io_r(mem_base + + common_data->common_reg->top_debug[dbg_testbus_reg]); + + for (i = 0; i < top_priv->hw_info->num_of_testbus; i++) { + if (top_priv->cc_testbus_sel_cfg == + top_priv->hw_info->test_bus_info[i].value) { + size = top_priv->hw_info->test_bus_info[i].size; + testbus_info = + top_priv->hw_info->test_bus_info[i].testbus; + break; + } + } + + if (i == top_priv->hw_info->num_of_testbus) { + CAM_WARN(CAM_SFE, + "Unexpected value[%d] is programed in test_bus_ctrl reg", + top_priv->cc_testbus_sel_cfg); + return; + } + + for (i = 0; i < size; i++) { + val = reg_val >> testbus_info[i].shift; + val &= testbus_info[i].mask; + if (val) + CAM_INFO_BUF(CAM_SFE, log_buf, 1024, &len, "%s [val:%u]", + testbus_info[i].clc_name, val); + } + + CAM_INFO(CAM_SFE, "SFE_TOP_DEBUG_%d val %d config %s", + common_data->common_reg->top_debug_testbus_reg, + reg_val, log_buf); +} + +static void cam_sfe_top_dump_perf_counters( + const char *event, + const char *res_name, + struct cam_sfe_top_priv *top_priv) +{ + int i; + void __iomem *mem_base; + struct cam_sfe_top_common_data *common_data; + struct cam_hw_soc_info *soc_info; + struct cam_sfe_top_common_reg_offset *common_reg; + + common_data = &top_priv->common_data; + common_reg = common_data->common_reg; + soc_info = common_data->soc_info; + mem_base = soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (top_priv->perf_counters[i].dump_counter) { + CAM_INFO(CAM_SFE, + "SFE [%u] on %s %s counter: %d pixel_cnt: %d line_cnt: %d stall_cnt: %d always_cnt: %d status: 0x%x", + common_data->hw_intf->hw_idx, res_name, event, (i + 1), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_pix_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_line_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_stall_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_always_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_count_status)); + } + } +} + +static void cam_sfe_top_print_debug_reg_info( + struct cam_sfe_top_priv *top_priv) +{ + void __iomem *mem_base; + struct cam_sfe_top_common_data *common_data; + struct cam_hw_soc_info *soc_info; + uint32_t *reg_val = NULL; + uint32_t num_reg = 0; + int i = 0, j; + + common_data = &top_priv->common_data; + soc_info = common_data->soc_info; + num_reg = common_data->common_reg->num_debug_registers; + mem_base = soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + reg_val = CAM_MEM_ZALLOC_ARRAY(num_reg, sizeof(uint32_t), GFP_KERNEL); + if (!reg_val) + return; + + while (i < num_reg) { + for (j = 0; j < 4 && i < num_reg; j++, i++) { + reg_val[i] = cam_io_r(mem_base + + common_data->common_reg->top_debug[i]); + } + CAM_INFO(CAM_SFE, "Debug%u: 0x%x Debug%u: 0x%x Debug%u: 0x%x Debug%u: 0x%x", + (i - 4), reg_val[i - 4], (i - 3), reg_val[i - 3], + (i - 2), reg_val[i - 2], (i - 1), reg_val[i - 1]); + } + + cam_sfe_top_check_module_status(top_priv->num_clc_module, + reg_val, top_priv->clc_dbg_mod_info); + + if (common_data->common_reg->top_cc_test_bus_supported && + top_priv->cc_testbus_sel_cfg) + cam_sfe_top_print_cc_test_bus(top_priv); + + CAM_MEM_FREE(reg_val); + + cam_sfe_top_dump_perf_counters("ERROR", "", top_priv); +} + +static struct cam_axi_vote *cam_sfe_top_delay_bw_reduction( + struct cam_sfe_top_priv *top_priv, + uint64_t *to_be_applied_bw) +{ + uint32_t i; + int vote_idx = -1; + uint64_t max_bw = 0; + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + if (top_priv->last_total_bw_vote[i] >= max_bw) { + vote_idx = i; + max_bw = top_priv->last_total_bw_vote[i]; + } + } + + *to_be_applied_bw = max_bw; + + return &top_priv->last_bw_vote[vote_idx]; +} + +int cam_sfe_top_calc_axi_bw_vote(struct cam_sfe_top_priv *top_priv, + bool start_stop, struct cam_axi_vote **to_be_applied_axi_vote, + uint64_t *total_bw_new_vote, uint64_t request_id) +{ + int rc = 0; + uint32_t i; + uint32_t num_paths = 0; + bool bw_unchanged = true; + struct cam_axi_vote *final_bw_vote = NULL; + + if (top_priv->num_in_ports > CAM_SFE_TOP_IN_PORT_MAX) { + CAM_ERR(CAM_SFE, "Invalid number of ports %d, max %d", + top_priv->num_in_ports, CAM_SFE_TOP_IN_PORT_MAX); + rc = -EINVAL; + goto end; + } + + memset(&top_priv->agg_incoming_vote, 0, sizeof(struct cam_axi_vote)); + for (i = 0; i < top_priv->num_in_ports; i++) { + if (top_priv->axi_vote_control[i] == + CAM_ISP_BW_CONTROL_INCLUDE) { + if (num_paths + + top_priv->req_axi_vote[i].num_paths > + CAM_CPAS_MAX_PATHS_PER_CLIENT) { + CAM_ERR(CAM_PERF, + "Required paths(%d) more than max(%d)", + num_paths + + top_priv->req_axi_vote[i].num_paths, + CAM_CPAS_MAX_PATHS_PER_CLIENT); + rc = -EINVAL; + goto end; + } + + memcpy(&top_priv->agg_incoming_vote.axi_path[num_paths], + &top_priv->req_axi_vote[i].axi_path[0], + top_priv->req_axi_vote[i].num_paths * + sizeof( + struct cam_cpas_axi_per_path_bw_vote)); + num_paths += top_priv->req_axi_vote[i].num_paths; + } + } + + top_priv->agg_incoming_vote.num_paths = num_paths; + + for (i = 0; i < top_priv->agg_incoming_vote.num_paths; i++) { + CAM_DBG(CAM_PERF, + "sfe[%d] : New BW Vote : counter[%d] [%s][%s][%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, + top_priv->last_bw_counter, + cam_cpas_axi_util_path_type_to_string( + top_priv->agg_incoming_vote.axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + top_priv->agg_incoming_vote.axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + top_priv->agg_incoming_vote.axi_path[i].vote_level), + top_priv->agg_incoming_vote.axi_path[i].camnoc_bw, + top_priv->agg_incoming_vote.axi_path[i].mnoc_ab_bw, + top_priv->agg_incoming_vote.axi_path[i].mnoc_ib_bw); + + *total_bw_new_vote += top_priv->agg_incoming_vote.axi_path[i].camnoc_bw; + } + + memcpy(&top_priv->last_bw_vote[top_priv->last_bw_counter], &top_priv->agg_incoming_vote, + sizeof(struct cam_axi_vote)); + top_priv->last_total_bw_vote[top_priv->last_bw_counter] = *total_bw_new_vote; + top_priv->last_bw_counter = (top_priv->last_bw_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + + if (*total_bw_new_vote != top_priv->total_bw_applied) + bw_unchanged = false; + + CAM_DBG(CAM_PERF, + "applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d req_id=%ld", + top_priv->total_bw_applied, + *total_bw_new_vote, bw_unchanged, start_stop, (start_stop ? -1 : request_id)); + + if (bw_unchanged) { + CAM_DBG(CAM_PERF, "BW config unchanged"); + *to_be_applied_axi_vote = NULL; + top_priv->bw_state = CAM_CLK_BW_STATE_UNCHANGED; + goto end; + } + + if (start_stop) { + /* need to vote current request immediately */ + final_bw_vote = &top_priv->agg_incoming_vote; + /* Reset everything, we can start afresh */ + memset(top_priv->last_bw_vote, 0, sizeof(struct cam_axi_vote) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + memset(top_priv->last_total_bw_vote, 0, sizeof(uint64_t) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + top_priv->last_bw_counter = 0; + top_priv->last_bw_vote[top_priv->last_bw_counter] = top_priv->agg_incoming_vote; + top_priv->last_total_bw_vote[top_priv->last_bw_counter] = *total_bw_new_vote; + top_priv->last_bw_counter = (top_priv->last_bw_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + } else { + /* + * Find max bw request in last few frames. This will be the bw + * that we want to vote to CPAS now. + */ + final_bw_vote = cam_sfe_top_delay_bw_reduction(top_priv, total_bw_new_vote); + if (*total_bw_new_vote == 0) + CAM_DBG(CAM_PERF, "to_be_applied_axi_vote is 0, req_id:%llu", request_id); + } + + for (i = 0; i < final_bw_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "sfe[%d] : Apply BW Vote : [%s][%s][%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, + cam_cpas_axi_util_path_type_to_string( + final_bw_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + final_bw_vote->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + final_bw_vote->axi_path[i].vote_level), + final_bw_vote->axi_path[i].camnoc_bw, + final_bw_vote->axi_path[i].mnoc_ab_bw, + final_bw_vote->axi_path[i].mnoc_ib_bw); + } + + if (*total_bw_new_vote == top_priv->total_bw_applied) { + CAM_DBG(CAM_PERF, "SFE:%d Final BW Unchanged after delay", + top_priv->common_data.hw_intf->hw_idx); + top_priv->bw_state = CAM_CLK_BW_STATE_UNCHANGED; + *to_be_applied_axi_vote = NULL; + goto end; + } else if (*total_bw_new_vote > top_priv->total_bw_applied) { + top_priv->bw_state = CAM_CLK_BW_STATE_INCREASE; + } else { + top_priv->bw_state = CAM_CLK_BW_STATE_DECREASE; + } + + CAM_DBG(CAM_PERF, + "sfe[%d] : Delayed update: applied_total=%lld new_total=%lld, start_stop=%d bw_state=%s req_id=%ld", + top_priv->common_data.hw_intf->hw_idx, top_priv->total_bw_applied, + *total_bw_new_vote, start_stop, + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + (start_stop ? -1 : request_id)); + + *to_be_applied_axi_vote = final_bw_vote; + +end: + return rc; +} + +int cam_sfe_top_bw_update(struct cam_sfe_soc_private *soc_private, + struct cam_sfe_top_priv *top_priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_sfe_bw_update_args *bw_update = NULL; + struct cam_isp_resource_node *res = NULL; + int rc = 0; + int i; + + bw_update = (struct cam_sfe_bw_update_args *)cmd_args; + res = bw_update->node_res; + + if (!res || !res->hw_intf || !res->hw_intf->hw_priv) + return -EINVAL; + + + if (res->res_type != CAM_ISP_RESOURCE_SFE_IN || + res->res_id >= CAM_ISP_HW_SFE_IN_MAX) { + CAM_ERR(CAM_SFE, "SFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (top_priv->num_in_ports > CAM_SFE_TOP_IN_PORT_MAX) { + CAM_ERR(CAM_SFE, "Invalid number of ports %d, max %d", + top_priv->num_in_ports, CAM_SFE_TOP_IN_PORT_MAX); + return -EINVAL; + } + + for (i = 0; i < top_priv->num_in_ports; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + memcpy(&top_priv->req_axi_vote[i], + &bw_update->sfe_vote, + sizeof(struct cam_axi_vote)); + top_priv->axi_vote_control[i] = + CAM_ISP_BW_CONTROL_INCLUDE; + break; + } + } + + return rc; +} + +int cam_sfe_top_bw_control(struct cam_sfe_soc_private *soc_private, + struct cam_sfe_top_priv *top_priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_bw_control_args *bw_ctrl = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + int rc = 0; + int i; + + bw_ctrl = (struct cam_isp_bw_control_args *)cmd_args; + res = bw_ctrl->node_res; + + if (!res || !res->hw_intf->hw_priv) + return -EINVAL; + + hw_info = res->hw_intf->hw_priv; + + if (res->res_type != CAM_ISP_RESOURCE_SFE_IN || + res->res_id >= CAM_ISP_HW_SFE_IN_MAX) { + CAM_ERR(CAM_SFE, "SFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (top_priv->num_in_ports > CAM_SFE_TOP_IN_PORT_MAX) { + CAM_ERR(CAM_SFE, "Invalid number of ports %d, max %d", + top_priv->num_in_ports, CAM_SFE_TOP_IN_PORT_MAX); + return -EINVAL; + } + + for (i = 0; i < top_priv->num_in_ports; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + top_priv->axi_vote_control[i] = bw_ctrl->action; + break; + } + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR_RATE_LIMIT(CAM_SFE, + "SFE:%d Not ready to set BW yet :%d", + res->hw_intf->hw_idx, + hw_info->hw_state); + } else { + rc = cam_sfe_top_apply_bw_start_stop(top_priv); + } + + return rc; +} + +static int cam_sfe_top_core_cfg( + struct cam_sfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_sfe_core_config_args *sfe_core_cfg = NULL; + + if ((!cmd_args) || + (arg_size != sizeof(struct cam_sfe_core_config_args))) { + CAM_ERR(CAM_SFE, "Invalid inputs"); + return -EINVAL; + } + + sfe_core_cfg = (struct cam_sfe_core_config_args *)cmd_args; + top_priv->core_cfg.mode_sel = + sfe_core_cfg->core_config.mode_sel; + top_priv->core_cfg.fs_mode_cfg = + sfe_core_cfg->core_config.fs_mode_cfg; + top_priv->core_cfg.ops_mode_cfg = + sfe_core_cfg->core_config.ops_mode_cfg; + + return 0; +} + +static inline void cam_sfe_top_delay_clk_reduction( + struct cam_sfe_top_priv *top_priv, + unsigned long *max_clk) +{ + int i; + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + if (top_priv->last_clk_vote[i] > (*max_clk)) + *max_clk = top_priv->last_clk_vote[i]; + } +} + +int cam_sfe_top_calc_hw_clk_rate( + struct cam_sfe_top_priv *top_priv, bool start_stop, + unsigned long *final_clk_rate, uint64_t request_id) +{ + int i, rc = 0; + unsigned long max_req_clk_rate = 0; + + for (i = 0; i < top_priv->num_in_ports; i++) { + if (top_priv->req_clk_rate[i] > max_req_clk_rate) + max_req_clk_rate = top_priv->req_clk_rate[i]; + } + + if (start_stop && !top_priv->skip_clk_data_rst) { + /* need to vote current clk immediately */ + *final_clk_rate = max_req_clk_rate; + /* Reset everything, we can start afresh */ + memset(top_priv->last_clk_vote, 0, sizeof(uint64_t) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + top_priv->last_clk_counter = 0; + top_priv->last_clk_vote[top_priv->last_clk_counter] = max_req_clk_rate; + top_priv->last_clk_counter = (top_priv->last_clk_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + } else { + top_priv->last_clk_vote[top_priv->last_clk_counter] = max_req_clk_rate; + top_priv->last_clk_counter = (top_priv->last_clk_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + + /* Find max clk request in last few requests */ + cam_sfe_top_delay_clk_reduction(top_priv, final_clk_rate); + if (!(*final_clk_rate)) { + CAM_ERR(CAM_PERF, "Final clock rate is zero"); + return -EINVAL; + } + } + + if (*final_clk_rate == top_priv->applied_clk_rate) + top_priv->clk_state = CAM_CLK_BW_STATE_UNCHANGED; + else if (*final_clk_rate > top_priv->applied_clk_rate) + top_priv->clk_state = CAM_CLK_BW_STATE_INCREASE; + else + top_priv->clk_state = CAM_CLK_BW_STATE_DECREASE; + + CAM_DBG(CAM_PERF, "SFE:%d Clock state:%s hw_clk_rate:%llu req_id:%ld", + top_priv->common_data.hw_intf->hw_idx, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + top_priv->applied_clk_rate, (start_stop ? -1 : request_id)); + + return rc; +} + + +static int cam_sfe_top_get_base( + struct cam_sfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t mem_base = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_sfe_soc_private *soc_private; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_SFE, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->common_data.soc_info) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + soc_private = top_priv->common_data.soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "soc_private is null"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_SFE, "buf size: %d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE( + top_priv->common_data.soc_info, + SFE_CORE_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_SFE, "failed to get mem_base, index: %d num_reg_map: %u", + SFE_CORE_BASE_IDX, top_priv->common_data.soc_info->num_reg_map); + return -EINVAL; + } + + if (cdm_args->cdm_id == CAM_CDM_RT) { + if (!soc_private->rt_wrapper_base) { + CAM_ERR(CAM_ISP, "rt_wrapper_base_addr is null"); + return -EINVAL; + } + mem_base -= soc_private->rt_wrapper_base; + } + + CAM_DBG(CAM_SFE, "core %d mem_base 0x%x, CDM Id: %d", + top_priv->common_data.soc_info->index, mem_base, + cdm_args->cdm_id); + + cdm_util_ops->cdm_write_changebase(cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_sfe_top_clock_update( + struct cam_sfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_sfe_clock_update_args *clk_update = NULL; + struct cam_isp_resource_node *res = NULL; + int i; + + if (arg_size != sizeof(struct cam_sfe_clock_update_args)) { + CAM_ERR(CAM_SFE, "Invalid cmd size"); + return -EINVAL; + } + + clk_update = + (struct cam_sfe_clock_update_args *)cmd_args; + if (!clk_update || !clk_update->node_res || !top_priv || + !top_priv->common_data.soc_info) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + res = clk_update->node_res; + + if (!res || !res->hw_intf->hw_priv) { + CAM_ERR(CAM_PERF, "Invalid inputs"); + return -EINVAL; + } + + + if (res->res_type != CAM_ISP_RESOURCE_SFE_IN || + res->res_id >= CAM_ISP_HW_SFE_IN_MAX) { + CAM_ERR(CAM_PERF, + "SFE: %d Invalid res_type: %d res_id: %d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < top_priv->num_in_ports; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + top_priv->req_clk_rate[i] = clk_update->clk_rate; + break; + } + } + + return 0; +} + +static int cam_sfe_set_top_debug( + struct cam_sfe_top_priv *top_priv, + void *cmd_args) +{ + int i; + uint32_t max_counters = top_priv->common_data.common_reg->num_perf_counters; + struct cam_sfe_debug_cfg_params *debug_cfg; + + debug_cfg = (struct cam_sfe_debug_cfg_params *)cmd_args; + if (!debug_cfg->cache_config) { + top_priv->sfe_debug_cfg = debug_cfg->u.dbg_cfg.sfe_debug_cfg; + top_priv->sensor_sel_diag_cfg = debug_cfg->u.dbg_cfg.sfe_sensor_sel; + + if (debug_cfg->u.dbg_cfg.num_counters <= max_counters) + for (i = 0; i < max_counters; i++) + top_priv->perf_counters[i].perf_counter_val = + debug_cfg->u.dbg_cfg.sfe_perf_counter_val[i]; + } + + return 0; +} + +static int cam_sfe_top_handle_overflow( + struct cam_sfe_top_priv *top_priv, void *cmd_args) +{ + struct cam_sfe_top_common_data *common_data; + struct cam_hw_soc_info *soc_info; + uint32_t bus_overflow_status, violation_status, tmp; + uint32_t i = 0; + struct cam_isp_hw_overflow_info *overflow_info = NULL; + + overflow_info = (struct cam_isp_hw_overflow_info *)cmd_args; + common_data = &top_priv->common_data; + soc_info = common_data->soc_info; + + bus_overflow_status = cam_io_r(soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->bus_overflow_status); + violation_status = cam_io_r(soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->ipp_violation_status); + + CAM_ERR(CAM_ISP, + "SFE%d src_clk_rate sw_client:%lu hw_client:[%lu %lu] bus_overflow_status:%s violation: %s", + soc_info->index, soc_info->applied_src_clk_rates.sw_client, + soc_info->applied_src_clk_rates.hw_client[soc_info->index].high, + soc_info->applied_src_clk_rates.hw_client[soc_info->index].low, + CAM_BOOL_TO_YESNO(bus_overflow_status), CAM_BOOL_TO_YESNO(violation_status)); + + tmp = bus_overflow_status; + while (tmp) { + if (tmp & 0x1) + CAM_ERR(CAM_ISP, "SFE Bus Overflow %s ", + top_priv->wr_client_desc[i].desc); + tmp = tmp >> 1; + i++; + } + + if (violation_status) + cam_sfe_top_print_ipp_violation_info(top_priv, violation_status); + cam_sfe_top_print_debug_reg_info(top_priv); + + if (bus_overflow_status) { + cam_cpas_log_votes(false); + overflow_info->is_bus_overflow = true; + } + + return 0; +} + +static int cam_sfe_top_apply_clk_bw_update(struct cam_sfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *hw_info = NULL; + struct cam_hw_intf *hw_intf = NULL; + struct cam_axi_vote *to_be_applied_axi_vote = NULL; + struct cam_isp_apply_clk_bw_args *clk_bw_args = NULL; + unsigned long final_clk_rate = 0; + uint64_t total_bw_new_vote = 0; + uint64_t request_id; + int rc = 0; + + if (arg_size != sizeof(struct cam_isp_apply_clk_bw_args)) { + CAM_ERR(CAM_SFE, "Invalid arg size: %u", arg_size); + return -EINVAL; + } + + clk_bw_args = (struct cam_isp_apply_clk_bw_args *)cmd_args; + request_id = clk_bw_args->request_id; + hw_intf = clk_bw_args->hw_intf; + if (!hw_intf) { + CAM_ERR(CAM_SFE, "Invalid hw_intf"); + return -EINVAL; + } + + hw_info = hw_intf->hw_priv; + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_DBG(CAM_PERF, + "SFE:%d Not ready to set clocks yet :%d", + hw_intf->hw_idx, hw_info->hw_state); + goto end; + } + + if (clk_bw_args->skip_clk_data_rst) { + top_priv->skip_clk_data_rst = true; + CAM_DBG(CAM_SFE, "SFE:%u requested to avoid clk data rst", + hw_intf->hw_idx); + return 0; + } + + rc = cam_sfe_top_calc_hw_clk_rate(top_priv, false, &final_clk_rate, request_id); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in calculating clock rate rc=%d", + hw_intf->hw_idx, rc); + goto end; + } + + rc = cam_sfe_top_calc_axi_bw_vote(top_priv, false, + &to_be_applied_axi_vote, &total_bw_new_vote, request_id); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in calculating bw vote rc=%d", + hw_intf->hw_idx, rc); + goto end; + } + + if ((!to_be_applied_axi_vote) && (top_priv->bw_state != CAM_CLK_BW_STATE_UNCHANGED)) { + CAM_ERR(CAM_PERF, "SFE:%d Invalid BW vote for state:%s", hw_intf->hw_idx, + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state)); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_PERF, + "SFE:%d APPLY CLK/BW req_id:%ld clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, request_id, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + + /* Determine BW and clock voting sequence according to state */ + if ((top_priv->clk_state == CAM_CLK_BW_STATE_UNCHANGED) && + (top_priv->bw_state == CAM_CLK_BW_STATE_UNCHANGED)) { + goto end; + } else if (top_priv->clk_state == CAM_CLK_BW_STATE_UNCHANGED) { + rc = cam_sfe_top_set_axi_bw_vote(top_priv, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_priv->bw_state == CAM_CLK_BW_STATE_UNCHANGED) { + rc = cam_sfe_top_set_hw_clk_rate(top_priv, final_clk_rate, false, request_id, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_priv->clk_state == CAM_CLK_BW_STATE_INCREASE) { + /* Set BW first, followed by Clock */ + rc = cam_sfe_top_set_axi_bw_vote(top_priv, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + + rc = cam_sfe_top_set_hw_clk_rate(top_priv, final_clk_rate, false, request_id, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_priv->clk_state == CAM_CLK_BW_STATE_DECREASE) { + /* Set Clock first, followed by BW */ + rc = cam_sfe_top_set_hw_clk_rate(top_priv, final_clk_rate, false, request_id, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + + rc = cam_sfe_top_set_axi_bw_vote(top_priv, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else { + CAM_ERR(CAM_SFE, + "Invalid state to apply CLK/BW clk_state:%s bw_state:%s is_drv_config_en:%s", + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state), + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + rc = -EINVAL; + goto end; + } + + if (top_priv->clk_state != CAM_CLK_BW_STATE_UNCHANGED) + clk_bw_args->clock_updated = true; + +end: + top_priv->clk_state = CAM_CLK_BW_STATE_INIT; + top_priv->bw_state = CAM_CLK_BW_STATE_INIT; + return rc; +} + +static int cam_sfe_top_apply_clock_start_stop(struct cam_sfe_top_priv *top_priv) +{ + int rc = 0; + unsigned long final_clk_rate = 0; + + rc = cam_sfe_top_calc_hw_clk_rate(top_priv, true, &final_clk_rate, 0); + if (rc) { + CAM_ERR(CAM_SFE, + "SFE:%d Failed in calculating clock rate rc=%d", + top_priv->common_data.hw_intf->hw_idx, rc); + goto end; + } + + if (top_priv->clk_state == CAM_CLK_BW_STATE_UNCHANGED) + goto end; + + rc = cam_sfe_top_set_hw_clk_rate(top_priv, final_clk_rate, true, 0, false); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in voting final clk:%lu clk_state:%s", + top_priv->common_data.hw_intf->hw_idx, final_clk_rate, + cam_sfe_top_clk_bw_state_to_string(top_priv->clk_state)); + goto end; + } + +end: + top_priv->clk_state = CAM_CLK_BW_STATE_INIT; + top_priv->skip_clk_data_rst = false; + return rc; +} + +static int cam_sfe_top_apply_bw_start_stop(struct cam_sfe_top_priv *top_priv) +{ + int rc = 0; + uint64_t total_bw_new_vote = 0; + struct cam_axi_vote *to_be_applied_axi_vote = NULL; + + rc = cam_sfe_top_calc_axi_bw_vote(top_priv, true, + &to_be_applied_axi_vote, &total_bw_new_vote, 0); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in calculating bw vote rc=%d", + top_priv->common_data.hw_intf->hw_idx, rc); + goto end; + } + + if (top_priv->bw_state == CAM_CLK_BW_STATE_UNCHANGED) + goto end; + + rc = cam_sfe_top_set_axi_bw_vote(top_priv, to_be_applied_axi_vote, total_bw_new_vote, + true, 0); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in voting final bw:%llu bw_state:%s", + top_priv->common_data.hw_intf->hw_idx, total_bw_new_vote, + cam_sfe_top_clk_bw_state_to_string(top_priv->bw_state)); + goto end; + } + +end: + top_priv->bw_state = CAM_CLK_BW_STATE_INIT; + return rc; +} + +static int cam_sfe_top_apply_fcg_update( + struct cam_sfe_top_priv *top_priv, + struct cam_isp_hw_fcg_update *fcg_update, + struct cam_cdm_utils_ops *cdm_util_ops) +{ + struct cam_isp_fcg_config_internal *fcg_config; + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx; + struct cam_isp_predict_fcg_config_internal *fcg_pr; + struct cam_sfe_top_hw_info *hw_info; + struct cam_sfe_fcg_module_info *fcg_module_info; + uint32_t size, fcg_index_shift; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + int rc = 0, i, j = 0; + + if (!top_priv || !fcg_update || (fcg_update->prediction_idx == 0)) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + hw_info = top_priv->hw_info; + fcg_config = (struct cam_isp_fcg_config_internal *)fcg_update->data; + if (!fcg_config || !hw_info) { + CAM_ERR(CAM_SFE, "Invalid config params"); + return -EINVAL; + } + + fcg_module_info = hw_info->modules_hw_info->fcg_module_info; + if (!fcg_module_info) { + CAM_ERR(CAM_SFE, "Invalid FCG common data"); + return -EINVAL; + } + + if (fcg_config->num_ch_ctx > CAM_ISP_MAX_FCG_CH_CTXS) { + CAM_ERR(CAM_SFE, "out of bound %d", + fcg_config->num_ch_ctx); + return -EINVAL; + } + + reg_val_pair = CAM_MEM_ZALLOC_ARRAY(fcg_module_info->max_reg_val_pair_size, + sizeof(uint32_t), + GFP_KERNEL); + if (!reg_val_pair) { + CAM_ERR(CAM_SFE, "Failed allocating memory for reg val pair"); + return -ENOMEM; + } + + fcg_index_shift = fcg_module_info->fcg_index_shift; + + for (i = 0, j = 0; i < fcg_config->num_ch_ctx; i++) { + if (j >= fcg_module_info->max_reg_val_pair_size) { + CAM_ERR(CAM_SFE, "reg_val_pair %d exceeds the array limit %u", + j, fcg_module_info->max_reg_val_pair_size); + rc = -ENOMEM; + goto free_mem; + } + + fcg_ch_ctx = &fcg_config->ch_ctx_fcg_configs[i]; + if (!fcg_ch_ctx) { + CAM_ERR(CAM_SFE, "Failed in FCG channel/context dereference"); + rc = -EINVAL; + goto free_mem; + } + + fcg_pr = &fcg_ch_ctx->predicted_fcg_configs[ + fcg_update->prediction_idx - 1]; + if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) { + switch (fcg_ch_ctx->fcg_ch_ctx_id) { + case CAM_ISP_FCG_MASK_CH1: + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch1_phase_index_cfg_0, + (fcg_pr->phase_index_r | + fcg_pr->phase_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch1_phase_index_cfg_1, + fcg_pr->phase_index_b); + CAM_DBG(CAM_SFE, + "Program FCG registers for SFE channel 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + case CAM_ISP_FCG_MASK_CH2: + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch2_phase_index_cfg_0, + (fcg_pr->phase_index_r | + fcg_pr->phase_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch2_phase_index_cfg_1, + fcg_pr->phase_index_b); + CAM_DBG(CAM_SFE, + "Program FCG registers for SFE channel 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported channel id: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id); + rc = -EINVAL; + goto free_mem; + } + } + + if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_STATS) { + switch (fcg_ch_ctx->fcg_ch_ctx_id) { + case CAM_ISP_FCG_MASK_CH1: + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch1_stats_phase_index_cfg_0, + (fcg_pr->stats_index_r | + fcg_pr->stats_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch1_stats_phase_index_cfg_1, + fcg_pr->stats_index_b); + CAM_DBG(CAM_SFE, + "Program FCG registers for SFE channel 0x%x, stats_index_cfg_0: %u, stats_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + case CAM_ISP_FCG_MASK_CH2: + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch2_stats_phase_index_cfg_0, + (fcg_pr->stats_index_r | + fcg_pr->stats_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_ch2_stats_phase_index_cfg_1, + fcg_pr->stats_index_b); + CAM_DBG(CAM_SFE, + "Program FCG registers for SFE channel 0x%x, stats_index_cfg_0: %u, stats_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + default: + CAM_ERR(CAM_SFE, "Unsupported channel id: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id); + rc = -EINVAL; + goto free_mem; + } + } + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + if ((size * 4) != fcg_update->cmd_size) { + CAM_ERR(CAM_SFE, + "Failed! Buf size:%d is wrong, expected size: %d", + fcg_update->cmd_size, size * 4); + rc = -ENOMEM; + goto free_mem; + } + + cdm_util_ops->cdm_write_regrandom( + (uint32_t *)fcg_update->cmd_buf_addr, + num_regval_pairs, reg_val_pair); + } else { + CAM_WARN(CAM_SFE, "No reg val pairs"); + } + +free_mem: + CAM_MEM_FREE(reg_val_pair); + return rc; +} + +static int cam_sfe_top_get_fcg_buf_size( + struct cam_sfe_top_priv *top_priv, + struct cam_isp_hw_fcg_get_size *fcg_get_size, + struct cam_cdm_utils_ops *cdm_util_ops) +{ + struct cam_sfe_top_hw_info *hw_info; + struct cam_sfe_modules_common_reg_offset *modules_hw_info; + uint32_t num_types; + + if (!top_priv) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + hw_info = top_priv->hw_info; + if (!hw_info) { + CAM_ERR(CAM_SFE, "Invalid config params"); + return -EINVAL; + } + + modules_hw_info = hw_info->modules_hw_info; + if (!modules_hw_info) { + CAM_ERR(CAM_SFE, "Invalid modules hw info"); + return -EINVAL; + } + + if (!modules_hw_info->fcg_supported) { + fcg_get_size->fcg_supported = false; + CAM_DBG(CAM_SFE, "FCG is not supported by hardware"); + return 0; + } + + fcg_get_size->fcg_supported = true; + num_types = fcg_get_size->num_types; + if (num_types == 0) { + CAM_ERR(CAM_SFE, "Number of types(STATS/PHASE) requested is empty"); + return -EINVAL; + } + + fcg_get_size->kmd_size = + cdm_util_ops->cdm_required_size_reg_random( + num_types * modules_hw_info->fcg_module_info->fcg_type_size); + return 0; +} + +static int cam_sfe_top_fcg_config( + struct cam_sfe_top_priv *top_priv, + void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_fcg_cmd *fcg_cmd; + struct cam_cdm_utils_ops *cdm_util_ops; + int rc; + + if (arg_size != sizeof(struct cam_isp_hw_fcg_cmd)) { + CAM_ERR(CAM_SFE, "Invalid cmd size, arg_size: %u, expected size: %u", + arg_size, sizeof(struct cam_isp_hw_fcg_cmd)); + return -EINVAL; + } + + fcg_cmd = (struct cam_isp_hw_fcg_cmd *) cmd_args; + if (!fcg_cmd || !fcg_cmd->res) { + CAM_ERR(CAM_SFE, "Invalid cmd args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)fcg_cmd->res->cdm_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_SFE, "Invalid CDM ops"); + return -EINVAL; + } + + if (fcg_cmd->get_size_flag) { + struct cam_isp_hw_fcg_get_size *fcg_get_size; + + fcg_get_size = &fcg_cmd->u.fcg_get_size; + rc = cam_sfe_top_get_fcg_buf_size(top_priv, fcg_get_size, cdm_util_ops); + } else { + struct cam_isp_hw_fcg_update *fcg_update; + + fcg_update = &fcg_cmd->u.fcg_update; + rc = cam_sfe_top_apply_fcg_update(top_priv, fcg_update, cdm_util_ops); + } + + return rc; +} + +int cam_sfe_top_process_cmd(void *priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_sfe_top_priv *top_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sfe_soc_private *soc_private = NULL; + + if (!priv) { + CAM_ERR(CAM_SFE, "Invalid top_priv"); + return -EINVAL; + } + + top_priv = (struct cam_sfe_top_priv *) priv; + soc_info = top_priv->common_data.soc_info; + soc_private = soc_info->soc_private; + + if (!soc_private) { + CAM_ERR(CAM_SFE, "soc private is NULL"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_sfe_top_get_base(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + rc = cam_sfe_top_clock_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + rc = cam_sfe_top_bw_update(soc_private, top_priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_CONTROL: + rc = cam_sfe_top_bw_control(soc_private, top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_CORE_CONFIG: + rc = cam_sfe_top_core_cfg(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_SET_SFE_DEBUG_CFG: + rc = cam_sfe_set_top_debug(top_priv, cmd_args); + break; + case CAM_ISP_HW_NOTIFY_OVERFLOW: + rc = cam_sfe_top_handle_overflow(top_priv, cmd_args); + break; + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + rc = cam_sfe_top_apply_clk_bw_update(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_FCG_CONFIG: + rc = cam_sfe_top_fcg_config(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: { + struct cam_isp_hw_cap *sfe_cap; + struct cam_sfe_fcg_module_info *fcg_module_info; + + sfe_cap = (struct cam_isp_hw_cap *) cmd_args; + sfe_cap->num_perf_counters = + top_priv->common_data.common_reg->num_perf_counters; + if (top_priv->hw_info->modules_hw_info->fcg_supported) { + fcg_module_info = + top_priv->hw_info->modules_hw_info->fcg_module_info; + sfe_cap->fcg_supported = true; + sfe_cap->max_fcg_ch_ctx = + fcg_module_info->max_fcg_ch_ctx; + sfe_cap->max_fcg_predictions = + fcg_module_info->max_fcg_predictions; + } + } + break; + default: + CAM_ERR(CAM_SFE, "Invalid cmd type: %d", cmd_type); + rc = -EINVAL; + break; + } + + return rc; +} + +int cam_sfe_top_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size) +{ + struct cam_sfe_top_priv *top_priv; + struct cam_sfe_acquire_args *args; + struct cam_sfe_hw_sfe_in_acquire_args *acquire_args; + int rc = -EINVAL, i; + + if (!device_priv || !reserve_args) { + CAM_ERR(CAM_SFE, + "Error invalid input arguments"); + return rc; + } + + top_priv = (struct cam_sfe_top_priv *)device_priv; + args = (struct cam_sfe_acquire_args *)reserve_args; + acquire_args = &args->sfe_in; + + if (top_priv->reserve_cnt) { + if (top_priv->priv_per_stream != args->priv) { + CAM_DBG(CAM_SFE, + "Acquiring same SFE[%u] HW res: %u for different streams", + top_priv->common_data.hw_intf->hw_idx, acquire_args->res_id); + rc = -EINVAL; + return rc; + } + } + + for (i = 0; i < CAM_SFE_TOP_IN_PORT_MAX; i++) { + CAM_DBG(CAM_SFE, "i: %d res_id: %d state: %d", i, + acquire_args->res_id, top_priv->in_rsrc[i].res_state); + + if ((top_priv->in_rsrc[i].res_id == acquire_args->res_id) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_AVAILABLE)) { + CAM_DBG(CAM_SFE, + "SFE [%u] for rsrc: %u acquired", + top_priv->in_rsrc[i].hw_intf->hw_idx, + acquire_args->res_id); + + /* Acquire ownership */ + if (top_priv->reserve_cnt == 0) { + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_SFE0 + + top_priv->in_rsrc[i].hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_SFE, "SFE[%u] acquire ownership failed", + top_priv->in_rsrc[i].hw_intf->hw_idx); + return rc; + } + } + + top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops; + top_priv->in_rsrc[i].tasklet_info = args->tasklet; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_RESERVED; + acquire_args->rsrc_node = + &top_priv->in_rsrc[i]; + rc = 0; + break; + } + } + + if (!rc) { + top_priv->reserve_cnt++; + top_priv->priv_per_stream = args->priv; + top_priv->event_cb = args->event_cb; + } + + return rc; +} + +int cam_sfe_top_release(void *device_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_sfe_top_priv *top_priv; + struct cam_isp_resource_node *in_res; + int rc = 0; + + if (!device_priv || !release_args) { + CAM_ERR(CAM_SFE, "Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_sfe_top_priv *)device_priv; + in_res = (struct cam_isp_resource_node *)release_args; + + CAM_DBG(CAM_SFE, + "Release for SFE [%u] resource id: %u in state: %d", + in_res->hw_intf->hw_idx, in_res->res_id, + in_res->res_state); + if (in_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_SFE, "SFE [%u] invalid res_state: %d", + in_res->hw_intf->hw_idx, in_res->res_state); + return -EINVAL; + } + + in_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + in_res->cdm_ops = NULL; + in_res->tasklet_info = NULL; + if (top_priv->reserve_cnt) + top_priv->reserve_cnt--; + + if (!top_priv->reserve_cnt) { + top_priv->priv_per_stream = NULL; + top_priv->event_cb = NULL; + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_SFE0 + in_res->hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_SFE, "SFE[%u] vmrm soc release resources failed", + in_res->hw_intf->hw_idx); + } + } + + return rc; +} + +static int cam_sfe_top_get_evt_payload( + struct cam_sfe_top_priv *top_priv, + struct cam_sfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&top_priv->spin_lock); + if (list_empty(&top_priv->common_data.free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free CAMIF LITE event payload"); + *evt_payload = NULL; + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry( + &top_priv->common_data.free_payload_list, + struct cam_sfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + +done: + spin_unlock(&top_priv->spin_lock); + return rc; +} + +static int cam_sfe_top_put_evt_payload( + struct cam_sfe_top_priv *top_priv, + struct cam_sfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!top_priv) { + CAM_ERR(CAM_SFE, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_SFE, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_sfe_top_irq_evt_payload); + spin_lock_irqsave(&top_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &top_priv->common_data.free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&top_priv->spin_lock, flags); + + CAM_DBG(CAM_SFE, "Done"); + return 0; +} + +static int cam_sfe_top_handle_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int rc = 0, i; + uint32_t irq_status = 0; + void __iomem *base = NULL; + struct cam_sfe_top_priv *top_priv; + struct cam_sfe_top_irq_evt_payload *evt_payload; + + top_priv = th_payload->handler_priv; + irq_status = th_payload->evt_status_arr[0]; + + CAM_DBG(CAM_SFE, "Top error IRQ Received: 0x%x", irq_status); + + base = top_priv->common_data.soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + if (irq_status & top_priv->common_data.common_reg_data->error_irq_mask) { + cam_irq_controller_disable_all( + top_priv->common_data.sfe_irq_controller); + } + + rc = cam_sfe_top_get_evt_payload(top_priv, &evt_payload); + if (rc) + return rc; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = + th_payload->evt_status_arr[i]; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + evt_payload->violation_status = + cam_io_r(base + + top_priv->common_data.common_reg->ipp_violation_status); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_sfe_top_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int rc = 0, i; + uint32_t irq_status = 0; + struct cam_sfe_top_priv *top_priv; + struct cam_isp_resource_node *res; + struct cam_sfe_path_data *path_data; + struct cam_sfe_top_irq_evt_payload *evt_payload; + + res = th_payload->handler_priv; + path_data = res->res_priv; + top_priv = path_data->top_priv; + + rc = cam_sfe_top_get_evt_payload(top_priv, &evt_payload); + if (rc) + return rc; + + irq_status = th_payload->evt_status_arr[0]; + CAM_DBG(CAM_SFE, "SFE top irq status: 0x%x", + irq_status); + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = + th_payload->evt_status_arr[i]; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + th_payload->evt_payload_priv = evt_payload; + return rc; +} + +void cam_sfe_top_sel_frame_counter( + uint32_t res_id, uint32_t *val, + bool read_counter, + struct cam_sfe_path_data *path_data) +{ + const uint32_t frame_cnt_shift = 0x4; + uint32_t frame_cnt0 = 0, frame_cnt1 = 0; + struct cam_sfe_top_common_reg_offset *common_reg = NULL; + + if (read_counter) { + common_reg = path_data->common_reg; + frame_cnt0 = cam_io_r(path_data->mem_base + + common_reg->diag_sensor_frame_cnt_status0); + frame_cnt1 = cam_io_r(path_data->mem_base + + common_reg->diag_sensor_frame_cnt_status1); + } + + switch (res_id) { + case CAM_ISP_HW_SFE_IN_PIX: + *val |= (1 << frame_cnt_shift); + if (read_counter) + CAM_INFO(CAM_SFE, "IPP frame_cnt 0x%x", + frame_cnt0 & 0xFF); + break; + case CAM_ISP_HW_SFE_IN_RDI0: + *val |= (1 << (frame_cnt_shift + 1)); + if (read_counter) + CAM_INFO(CAM_SFE, "RDI0 frame_cnt 0x%x", + (frame_cnt0 >> 16) & 0xFF); + break; + case CAM_ISP_HW_SFE_IN_RDI1: + *val |= (1 << (frame_cnt_shift + 2)); + if (read_counter) + CAM_INFO(CAM_SFE, "RDI1 frame_cnt 0x%x", + (frame_cnt0 >> 24) & 0xFF); + break; + case CAM_ISP_HW_SFE_IN_RDI2: + *val |= (1 << (frame_cnt_shift + 3)); + if (read_counter) + CAM_INFO(CAM_SFE, "RDI2 frame_cnt 0x%x", + frame_cnt1 & 0xFF); + break; + case CAM_ISP_HW_SFE_IN_RDI3: + *val |= (1 << (frame_cnt_shift + 4)); + if (read_counter) + CAM_INFO(CAM_SFE, "RDI3 frame_cnt 0x%x", + (frame_cnt1 >> 16) & 0xFF); + break; + case CAM_ISP_HW_SFE_IN_RDI4: + *val |= (1 << (frame_cnt_shift + 5)); + if (read_counter) + CAM_INFO(CAM_SFE, "RDI4 frame_cnt 0x%x", + (frame_cnt1 >> 24) & 0xFF); + break; + default: + break; + } +} + +static void cam_sfe_top_print_ipp_violation_info( + struct cam_sfe_top_priv *top_priv, + uint32_t violation_status) +{ + struct cam_sfe_top_common_data *common_data = &top_priv->common_data; + struct cam_hw_soc_info *soc_info = common_data->soc_info; + uint32_t val = violation_status; + + if (top_priv->hw_info->ipp_module_desc) + CAM_ERR(CAM_SFE, "SFE[%u] IPP Violation Module id: [%u %s]", + soc_info->index, + top_priv->hw_info->ipp_module_desc[val].id, + top_priv->hw_info->ipp_module_desc[val].desc); + +} + +static void cam_sfe_top_print_top_irq_error( + struct cam_sfe_top_priv *top_priv, + struct cam_sfe_top_irq_evt_payload *payload, + uint32_t irq_status, + uint32_t violation_status) +{ + uint32_t i = 0; + + for (i = 0; i < top_priv->hw_info->num_top_errors; i++) { + if (top_priv->hw_info->top_err_desc[i].bitmask & irq_status) { + CAM_ERR(CAM_SFE, "SFE[%u] %s occurred at [%llu: %09llu]", + top_priv->common_data.soc_info->index, + top_priv->hw_info->top_err_desc[i].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_SFE, "%s", top_priv->hw_info->top_err_desc[i].desc); + if (top_priv->hw_info->top_err_desc[i].debug) + CAM_ERR(CAM_SFE, "Debug: %s", + top_priv->hw_info->top_err_desc[i].debug); + } + } + + if (irq_status & top_priv->common_data.common_reg->ipp_violation_mask) + cam_sfe_top_print_ipp_violation_info(top_priv, violation_status); + +} + +static int cam_sfe_top_handle_err_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + int i; + uint32_t viol_sts; + uint32_t irq_status[CAM_SFE_IRQ_REGISTERS_MAX] = {0}; + enum cam_sfe_hw_irq_status ret = CAM_SFE_IRQ_STATUS_MAX; + struct cam_isp_hw_event_info evt_info; + struct cam_sfe_top_priv *top_priv = handler_priv; + struct cam_sfe_top_irq_evt_payload *payload = evt_payload_priv; + + for (i = 0; i < CAM_SFE_IRQ_REGISTERS_MAX; i++) + irq_status[i] = payload->irq_reg_val[i]; + + evt_info.hw_idx = top_priv->common_data.soc_info->index; + evt_info.hw_type = CAM_ISP_HW_TYPE_SFE; + evt_info.res_type = CAM_ISP_RESOURCE_SFE_IN; + evt_info.reg_val = 0; + + if (irq_status[0] & top_priv->common_data.common_reg_data->error_irq_mask) { + struct cam_isp_hw_error_event_info err_evt_info; + + viol_sts = payload->violation_status; + CAM_INFO(CAM_SFE, "Violation status 0x%x", viol_sts); + cam_sfe_top_print_top_irq_error(top_priv, payload, + irq_status[0], viol_sts); + err_evt_info.err_type = CAM_SFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + cam_sfe_top_print_debug_reg_info(top_priv); + if (top_priv->event_cb) + top_priv->event_cb(top_priv->priv_per_stream, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + ret = CAM_SFE_IRQ_STATUS_VIOLATION; + } + + cam_sfe_top_put_evt_payload(top_priv, &payload); + + return ret; +} + +static int cam_sfe_top_handle_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + int i; + uint32_t val0, val1, frame_cnt = 0, offset0, offset1; + uint32_t irq_status[CAM_SFE_IRQ_REGISTERS_MAX] = {0}; + enum cam_sfe_hw_irq_status ret = CAM_SFE_IRQ_STATUS_MAX; + struct cam_isp_resource_node *res = handler_priv; + struct cam_sfe_path_data *path_data = res->res_priv; + struct cam_sfe_top_priv *top_priv = path_data->top_priv; + struct cam_sfe_top_irq_evt_payload *payload = evt_payload_priv; + + for (i = 0; i < CAM_SFE_IRQ_REGISTERS_MAX; i++) + irq_status[i] = payload->irq_reg_val[i]; + + if (irq_status[0] & path_data->path_reg_data->subscribe_irq_mask) { + if (irq_status[0] & path_data->path_reg_data->sof_irq_mask) { + CAM_DBG(CAM_SFE, "SFE:%d Received %s SOF", + res->hw_intf->hw_idx, + res->res_name); + offset0 = path_data->common_reg->diag_sensor_status_0; + offset1 = path_data->common_reg->diag_sensor_status_1; + /* check for any debug info at SOF */ + if (top_priv->sfe_debug_cfg & + SFE_DEBUG_ENABLE_SENSOR_DIAG_INFO) { + val0 = cam_io_r(path_data->mem_base + + offset0); + val1 = cam_io_r(path_data->mem_base + + offset1); + CAM_INFO(CAM_SFE, + "SFE:%d HBI: 0x%x VBI: 0x%x NEQ_HBI: %s HBI_MIN_ERR: %s", + res->hw_intf->hw_idx, (val0 & 0x3FFF), val1, + (val0 & (0x4000) ? "TRUE" : "FALSE"), + (val0 & (0x8000) ? "TRUE" : "FALSE")); + } + + if (top_priv->sfe_debug_cfg & + SFE_DEBUG_ENABLE_FRAME_COUNTER) + cam_sfe_top_sel_frame_counter( + res->res_id, &frame_cnt, + true, path_data); + + cam_sfe_top_dump_perf_counters("SOF", res->res_name, top_priv); + } + + if (irq_status[0] & + path_data->path_reg_data->eof_irq_mask) { + CAM_DBG(CAM_SFE, "SFE:%d Received %s EOF", + res->hw_intf->hw_idx, + res->res_name); + + cam_sfe_top_dump_perf_counters("EOF", res->res_name, top_priv); + } + ret = CAM_SFE_IRQ_STATUS_SUCCESS; + } + + cam_sfe_top_put_evt_payload(top_priv, &payload); + + return ret; +} + +int cam_sfe_top_start( + void *priv, void *start_args, uint32_t arg_size) +{ + int rc = -EINVAL; + uint32_t val = 0, diag_cfg = 0; + bool debug_cfg_enabled = false; + struct cam_sfe_top_priv *top_priv; + struct cam_isp_resource_node *sfe_res; + struct cam_hw_info *hw_info = NULL; + struct cam_sfe_path_data *path_data; + uint32_t error_mask[CAM_SFE_IRQ_REGISTERS_MAX]; + uint32_t sof_eof_mask[CAM_SFE_IRQ_REGISTERS_MAX]; + uint32_t core_cfg = 0, i = 0; + + if (!priv || !start_args) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + top_priv = (struct cam_sfe_top_priv *)priv; + sfe_res = (struct cam_isp_resource_node *) start_args; + + hw_info = (struct cam_hw_info *)sfe_res->hw_intf->hw_priv; + if (!hw_info) { + CAM_ERR(CAM_SFE, "Invalid input"); + return rc; + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_SFE, "SFE HW [%u] not powered up", + hw_info->soc_info.index); + rc = -EPERM; + return rc; + } + + path_data = (struct cam_sfe_path_data *)sfe_res->res_priv; + rc = cam_sfe_top_apply_clock_start_stop(top_priv); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in applying start clock rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + rc = cam_sfe_top_apply_bw_start_stop(top_priv); + if (rc) { + CAM_ERR(CAM_SFE, "SFE:%d Failed in applying start bw rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + core_cfg = cam_io_r_mb(path_data->mem_base + + path_data->common_reg->core_cfg); + + /* core cfg updated via CDM */ + CAM_DBG(CAM_SFE, "SFE HW [%u] core_cfg: 0x%x", + hw_info->soc_info.index, core_cfg); + + for (i = 0; i < path_data->common_reg->num_sfe_mode; i++) { + if (path_data->common_reg->sfe_mode[i].value == + core_cfg) { + CAM_DBG(CAM_SFE, "SFE HW [%u] : [%s]", + hw_info->soc_info.index, + path_data->common_reg->sfe_mode[i].desc); + break; + } + } + + /* First resource stream on */ + if (!top_priv->start_stop_cnt) { + + /* Enable debug cfg registers */ + cam_io_w(path_data->common_reg_data->top_debug_cfg_en, + path_data->mem_base + + path_data->common_reg->top_debug_cfg); + + /* Enables the context controller testbus*/ + if (path_data->common_reg->top_cc_test_bus_supported) { + for (i = 0; i < top_priv->hw_info->num_of_testbus; i++) { + if ((top_priv->sfe_debug_cfg & + top_priv->hw_info->test_bus_info[i].debugfs_val) || + top_priv->hw_info->test_bus_info[i].enable) { + top_priv->cc_testbus_sel_cfg = + top_priv->hw_info->test_bus_info[i].value; + break; + } + } + + if (top_priv->cc_testbus_sel_cfg) + cam_io_w(top_priv->cc_testbus_sel_cfg, + path_data->mem_base + + path_data->common_reg->top_cc_test_bus_ctrl); + } + + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (!top_priv->perf_counters[i].perf_counter_val) + continue; + + top_priv->perf_counters[i].dump_counter = true; + cam_io_w_mb(top_priv->perf_counters[i].perf_counter_val, + path_data->mem_base + + path_data->common_reg->perf_count_reg[i].perf_count_cfg); + CAM_DBG(CAM_SFE, "SFE [%u] perf_count_%d: 0x%x", + hw_info->soc_info.index, (i + 1), + top_priv->perf_counters[i].perf_counter_val); + } + } + + /* Enable sensor diag info */ + if (top_priv->sfe_debug_cfg & + SFE_DEBUG_ENABLE_SENSOR_DIAG_INFO) { + if ((top_priv->sensor_sel_diag_cfg) && + (top_priv->sensor_sel_diag_cfg < + CAM_SFE_TOP_IN_PORT_MAX)) + val |= top_priv->sensor_sel_diag_cfg << + path_data->common_reg_data->sensor_sel_shift; + debug_cfg_enabled = true; + } + + if (top_priv->sfe_debug_cfg & SFE_DEBUG_ENABLE_FRAME_COUNTER) { + cam_sfe_top_sel_frame_counter(sfe_res->res_id, &val, + false, path_data); + debug_cfg_enabled = true; + } + + if (debug_cfg_enabled) { + diag_cfg = cam_io_r(path_data->mem_base + + path_data->common_reg->diag_config); + diag_cfg |= val; + diag_cfg |= path_data->common_reg_data->enable_diagnostic_hw; + CAM_DBG(CAM_SFE, "Diag config 0x%x", diag_cfg); + cam_io_w(diag_cfg, + path_data->mem_base + + path_data->common_reg->diag_config); + } + + error_mask[0] = path_data->common_reg_data->error_irq_mask; + /* Enable error IRQ by default once */ + if (!top_priv->error_irq_handle) { + top_priv->error_irq_handle = cam_irq_controller_subscribe_irq( + top_priv->common_data.sfe_irq_controller, + CAM_IRQ_PRIORITY_0, + error_mask, + top_priv, + cam_sfe_top_handle_err_irq_top_half, + cam_sfe_top_handle_err_irq_bottom_half, + sfe_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (top_priv->error_irq_handle < 1) { + CAM_ERR(CAM_SFE, "Failed to subscribe Top IRQ"); + top_priv->error_irq_handle = 0; + return -EFAULT; + } + } + + /* Remove after driver stabilizes */ + top_priv->sfe_debug_cfg |= SFE_DEBUG_ENABLE_SOF_EOF_IRQ; + + if ((top_priv->sfe_debug_cfg & SFE_DEBUG_ENABLE_SOF_EOF_IRQ) || + (debug_cfg_enabled)) { + if (!path_data->sof_eof_handle) { + sof_eof_mask[0] = + path_data->path_reg_data->subscribe_irq_mask; + path_data->sof_eof_handle = + cam_irq_controller_subscribe_irq( + top_priv->common_data.sfe_irq_controller, + CAM_IRQ_PRIORITY_1, + sof_eof_mask, + sfe_res, + cam_sfe_top_handle_irq_top_half, + cam_sfe_top_handle_irq_bottom_half, + sfe_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (path_data->sof_eof_handle < 1) { + CAM_ERR(CAM_SFE, + "Failed to subscribe SOF/EOF IRQ"); + path_data->sof_eof_handle = 0; + return -EFAULT; + } + } + } + + sfe_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + top_priv->start_stop_cnt++; + return 0; +} + +int cam_sfe_top_stop( + void *priv, void *stop_args, uint32_t arg_size) +{ + int i; + bool debug_cfg_disable = false; + uint32_t val = 0, diag_cfg = 0; + struct cam_sfe_top_priv *top_priv; + struct cam_isp_resource_node *sfe_res; + struct cam_sfe_path_data *path_data; + + if (!priv || !stop_args) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + top_priv = (struct cam_sfe_top_priv *)priv; + sfe_res = (struct cam_isp_resource_node *) stop_args; + path_data = sfe_res->res_priv; + + if (sfe_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED || + sfe_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) + return 0; + + /* Unsubscribe for IRQs */ + sfe_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + for (i = 0; i < CAM_SFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->in_rsrc[i].res_id == sfe_res->res_id) { + if (!top_priv->skip_clk_data_rst) + top_priv->req_clk_rate[i] = 0; + memset(&top_priv->req_axi_vote[i], 0, + sizeof(struct cam_axi_vote)); + top_priv->axi_vote_control[i] = + CAM_ISP_BW_CONTROL_EXCLUDE; + break; + } + } + + if (path_data->sof_eof_handle > 0) { + cam_irq_controller_unsubscribe_irq( + top_priv->common_data.sfe_irq_controller, + path_data->sof_eof_handle); + path_data->sof_eof_handle = 0; + } + + if (top_priv->sfe_debug_cfg & SFE_DEBUG_ENABLE_FRAME_COUNTER) { + cam_sfe_top_sel_frame_counter(sfe_res->res_id, &val, + false, path_data); + debug_cfg_disable = true; + } + + if (top_priv->start_stop_cnt) + top_priv->start_stop_cnt--; + + if (!top_priv->start_stop_cnt && + ((top_priv->sfe_debug_cfg & + SFE_DEBUG_ENABLE_FRAME_COUNTER) || + (top_priv->sfe_debug_cfg & + SFE_DEBUG_ENABLE_SENSOR_DIAG_INFO))) { + val |= path_data->common_reg_data->enable_diagnostic_hw; + debug_cfg_disable = true; + } + + if (debug_cfg_disable) { + diag_cfg = cam_io_r(path_data->mem_base + + path_data->common_reg->diag_config); + diag_cfg &= ~val; + cam_io_w(diag_cfg, + path_data->mem_base + + path_data->common_reg->diag_config); + } + + /* + * Reset clk rate & unsubscribe error irq + * when all resources are streamed off + */ + if (!top_priv->start_stop_cnt) { + if (!top_priv->skip_clk_data_rst) + top_priv->applied_clk_rate = 0; + + if (top_priv->error_irq_handle > 0) { + cam_irq_controller_unsubscribe_irq( + top_priv->common_data.sfe_irq_controller, + top_priv->error_irq_handle); + top_priv->error_irq_handle = 0; + } + + /* Reset perf counters at stream off */ + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (top_priv->perf_counters[i].dump_counter) + cam_io_w_mb(0x0, path_data->mem_base + + path_data->common_reg->perf_count_reg[i].perf_count_cfg); + top_priv->perf_counters[i].dump_counter = false; + } + } + + return 0; +} + +int cam_sfe_top_init_hw(void *priv, void *init_hw_args, uint32_t arg_size) +{ + struct cam_sfe_top_priv *top_priv = priv; + void __iomem *mem_base; + + if (!priv) { + CAM_ERR(CAM_SFE, "Invalid args"); + return -EINVAL; + } + + mem_base = top_priv->common_data.soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + + CAM_DBG(CAM_SFE, "SFE:%d hw-version:0x%x", + top_priv->common_data.hw_intf->hw_idx, + cam_io_r_mb(mem_base + top_priv->hw_info->common_reg->hw_version)); + + return 0; +} + +int cam_sfe_top_init( + uint32_t hw_version, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *sfe_irq_controller, + struct cam_sfe_top **sfe_top_ptr) +{ + int i, j, rc = 0; + struct cam_sfe_top_priv *top_priv = NULL; + struct cam_sfe_path_data *path_data = NULL; + struct cam_sfe_top *sfe_top; + struct cam_sfe_top_hw_info *sfe_top_hw_info = + (struct cam_sfe_top_hw_info *)top_hw_info; + + sfe_top = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_top), GFP_KERNEL); + if (!sfe_top) { + CAM_DBG(CAM_SFE, "Error, Failed to alloc for sfe_top"); + rc = -ENOMEM; + goto end; + } + + top_priv = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_top_priv), + GFP_KERNEL); + if (!top_priv) { + rc = -ENOMEM; + goto free_sfe_top; + } + + sfe_top->top_priv = top_priv; + top_priv->common_data.sfe_irq_controller = sfe_irq_controller; + if (sfe_top_hw_info->num_inputs > CAM_SFE_TOP_IN_PORT_MAX) { + CAM_ERR(CAM_SFE, + "Invalid number of input resources: %d max: %d", + sfe_top_hw_info->num_inputs, + CAM_SFE_TOP_IN_PORT_MAX); + rc = -EINVAL; + goto free_top_priv; + } + + top_priv->num_in_ports = sfe_top_hw_info->num_inputs; + + CAM_DBG(CAM_SFE, + "Initializing SFE [%u] top with hw_version: 0x%x", + hw_intf->hw_idx, hw_version); + for (i = 0, j = 0; i < top_priv->num_in_ports && + j < CAM_SFE_RDI_MAX; i++) { + top_priv->in_rsrc[i].res_type = + CAM_ISP_RESOURCE_SFE_IN; + top_priv->in_rsrc[i].hw_intf = hw_intf; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + top_priv->req_clk_rate[i] = 0; + + if (sfe_top_hw_info->input_type[i] == + CAM_SFE_PIX_VER_1_0) { + top_priv->in_rsrc[i].res_id = + CAM_ISP_HW_SFE_IN_PIX; + + path_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_path_data), + GFP_KERNEL); + if (!path_data) { + CAM_DBG(CAM_SFE, + "Failed to alloc SFE [%u] pix data", + hw_intf->hw_idx); + goto deinit_resources; + } + top_priv->in_rsrc[i].res_priv = path_data; + path_data->mem_base = + soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + path_data->path_reg_data = + sfe_top_hw_info->pix_reg_data; + path_data->common_reg = sfe_top_hw_info->common_reg; + path_data->common_reg_data = + sfe_top_hw_info->common_reg_data; + path_data->modules_reg = + sfe_top_hw_info->modules_hw_info; + path_data->top_priv = top_priv; + path_data->hw_intf = hw_intf; + path_data->soc_info = soc_info; + scnprintf(top_priv->in_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "PIX"); + } else if (sfe_top_hw_info->input_type[i] == + CAM_SFE_RDI_VER_1_0) { + top_priv->in_rsrc[i].res_id = + CAM_ISP_HW_SFE_IN_RDI0 + j; + + path_data = CAM_MEM_ZALLOC(sizeof(struct cam_sfe_path_data), + GFP_KERNEL); + if (!path_data) { + CAM_DBG(CAM_SFE, + "Failed to alloc SFE [%u] rdi data res_id: %u", + hw_intf->hw_idx, + (CAM_ISP_HW_SFE_IN_RDI0 + j)); + goto deinit_resources; + } + + scnprintf(top_priv->in_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "RDI%d", j); + + top_priv->in_rsrc[i].res_priv = path_data; + + path_data->mem_base = + soc_info->reg_map[SFE_CORE_BASE_IDX].mem_base; + path_data->hw_intf = hw_intf; + path_data->common_reg = sfe_top_hw_info->common_reg; + path_data->common_reg_data = + sfe_top_hw_info->common_reg_data; + path_data->modules_reg = + sfe_top_hw_info->modules_hw_info; + path_data->soc_info = soc_info; + path_data->top_priv = top_priv; + path_data->path_reg_data = + sfe_top_hw_info->rdi_reg_data[j++]; + } else { + CAM_WARN(CAM_SFE, "Invalid SFE input type: %u", + sfe_top_hw_info->input_type[i]); + } + } + + top_priv->common_data.soc_info = soc_info; + top_priv->common_data.hw_intf = hw_intf; + top_priv->common_data.common_reg = + sfe_top_hw_info->common_reg; + top_priv->common_data.common_reg_data = sfe_top_hw_info->common_reg_data; + top_priv->hw_info = sfe_top_hw_info; + top_priv->wr_client_desc = sfe_top_hw_info->wr_client_desc; + top_priv->num_clc_module = sfe_top_hw_info->num_clc_module; + top_priv->clc_dbg_mod_info = sfe_top_hw_info->clc_dbg_mod_info; + top_priv->sfe_debug_cfg = 0; + top_priv->cc_testbus_sel_cfg = 0; + + sfe_top->hw_ops.process_cmd = cam_sfe_top_process_cmd; + sfe_top->hw_ops.start = cam_sfe_top_start; + sfe_top->hw_ops.stop = cam_sfe_top_stop; + sfe_top->hw_ops.reserve = cam_sfe_top_reserve; + sfe_top->hw_ops.release = cam_sfe_top_release; + sfe_top->hw_ops.init = cam_sfe_top_init_hw; + + spin_lock_init(&top_priv->spin_lock); + INIT_LIST_HEAD(&top_priv->common_data.free_payload_list); + for (i = 0; i < CAM_SFE_EVT_MAX; i++) { + INIT_LIST_HEAD(&top_priv->common_data.evt_payload[i].list); + list_add_tail(&top_priv->common_data.evt_payload[i].list, + &top_priv->common_data.free_payload_list); + } + + *sfe_top_ptr = sfe_top; + + return rc; + +deinit_resources: + for (--i; i >= 0; i--) { + top_priv->in_rsrc[i].start = NULL; + top_priv->in_rsrc[i].stop = NULL; + top_priv->in_rsrc[i].process_cmd = NULL; + top_priv->in_rsrc[i].top_half_handler = NULL; + top_priv->in_rsrc[i].bottom_half_handler = NULL; + + if (!top_priv->in_rsrc[i].res_priv) + continue; + + CAM_MEM_FREE(top_priv->in_rsrc[i].res_priv); + top_priv->in_rsrc[i].res_priv = NULL; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + } +free_top_priv: + CAM_MEM_FREE(sfe_top->top_priv); + sfe_top->top_priv = NULL; +free_sfe_top: + CAM_MEM_FREE(sfe_top); +end: + *sfe_top_ptr = NULL; + return rc; +} + +int cam_sfe_top_deinit( + uint32_t hw_version, + struct cam_sfe_top **sfe_top_ptr) +{ + int i, rc = 0; + unsigned long flags; + struct cam_sfe_top *sfe_top; + struct cam_sfe_top_priv *top_priv; + + if (!sfe_top_ptr) { + CAM_ERR(CAM_SFE, "Error Invalid input"); + return -ENODEV; + } + + sfe_top = *sfe_top_ptr; + if (!sfe_top) { + CAM_ERR(CAM_SFE, "Error sfe top NULL"); + return -ENODEV; + } + + top_priv = sfe_top->top_priv; + if (!top_priv) { + CAM_ERR(CAM_SFE, "Error sfe top priv NULL"); + rc = -ENODEV; + goto free_sfe_top; + } + + CAM_DBG(CAM_SFE, + "Deinit SFE [%u] top with hw_version 0x%x", + top_priv->common_data.hw_intf->hw_idx, + hw_version); + + spin_lock_irqsave(&top_priv->spin_lock, flags); + INIT_LIST_HEAD(&top_priv->common_data.free_payload_list); + for (i = 0; i < CAM_SFE_EVT_MAX; i++) + INIT_LIST_HEAD( + &top_priv->common_data.evt_payload[i].list); + spin_unlock_irqrestore(&top_priv->spin_lock, flags); + + for (i = 0; i < CAM_SFE_TOP_IN_PORT_MAX; i++) { + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + top_priv->in_rsrc[i].start = NULL; + top_priv->in_rsrc[i].stop = NULL; + top_priv->in_rsrc[i].process_cmd = NULL; + top_priv->in_rsrc[i].top_half_handler = NULL; + top_priv->in_rsrc[i].bottom_half_handler = NULL; + + if (!top_priv->in_rsrc[i].res_priv) { + CAM_ERR(CAM_SFE, "Error res_priv is NULL"); + continue; + } + + CAM_MEM_FREE(top_priv->in_rsrc[i].res_priv); + top_priv->in_rsrc[i].res_priv = NULL; + } + + CAM_MEM_FREE(sfe_top->top_priv); + +free_sfe_top: + CAM_MEM_FREE(sfe_top); + *sfe_top_ptr = NULL; + + return rc; +} + + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.h new file mode 100644 index 0000000000..fbe40b4af2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SFE_TOP_H_ +#define _CAM_SFE_TOP_H_ + +#include "cam_cpas_api.h" + +#define CAM_SFE_PIX_VER_1_0 0x10 +#define CAM_SFE_RDI_VER_1_0 0x1000 +#define CAM_SFE_TOP_VER_1_0 0x10000 + +#define CAM_SFE_TOP_IN_PORT_MAX 6 +#define CAM_SFE_RDI_MAX 5 + +#define CAM_SHIFT_TOP_CORE_CFG_MODE_SEL 2 +#define CAM_SHIFT_TOP_CORE_CFG_OPS_MODE_CFG 1 +#define CAM_SHIFT_TOP_CORE_CFG_FS_MODE_CFG 0 + +#define CAM_SFE_TOP_DBG_REG_MAX 20 +#define CAM_SFE_TOP_TESTBUS_MAX 2 +#define CAM_SFE_PERF_COUNTER_MAX 2 + +struct cam_sfe_top_module_desc { + uint32_t id; + uint8_t *desc; +}; + +struct cam_sfe_top { + void *top_priv; + struct cam_hw_ops hw_ops; +}; + +struct cam_sfe_mode { + int value; + char *desc; +}; + +struct cam_sfe_wr_client_desc { + uint32_t wm_id; + uint8_t *desc; +}; + +struct cam_sfe_top_err_irq_desc { + uint32_t bitmask; + char *err_name; + char *desc; + char *debug; +}; + +struct cam_sfe_top_debug_info { + uint32_t shift; + char *clc_name; +}; + +struct cam_sfe_top_cc_testbus_info { + uint32_t mask; + uint32_t shift; + char *clc_name; +}; + +struct cam_sfe_testbus_info { + uint32_t debugfs_val; + bool enable; + uint32_t value; + uint32_t size; + struct cam_sfe_top_cc_testbus_info *testbus; +}; + +struct cam_sfe_top_perf_count_reg_offset { + uint32_t perf_count_cfg; + uint32_t perf_pix_count; + uint32_t perf_line_count; + uint32_t perf_stall_count; + uint32_t perf_always_count; + uint32_t perf_count_status; +}; + +struct cam_sfe_top_common_reg_offset { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t stats_feature; + uint32_t core_cgc_ctrl; + uint32_t ahb_clk_ovd; + uint32_t core_cfg; + uint32_t ipp_violation_status; + uint32_t diag_config; + uint32_t diag_sensor_status_0; + uint32_t diag_sensor_status_1; + uint32_t diag_sensor_frame_cnt_status0; + uint32_t diag_sensor_frame_cnt_status1; + uint32_t stats_ch2_throttle_cfg; + uint32_t stats_ch1_throttle_cfg; + uint32_t stats_ch0_throttle_cfg; + uint32_t lcr_throttle_cfg; + uint32_t hdr_throttle_cfg; + uint32_t sfe_op_throttle_cfg; + uint32_t irc_throttle_cfg; + uint32_t sfe_single_dual_cfg; + uint32_t bus_overflow_status; + uint32_t num_perf_counters; + struct cam_sfe_top_perf_count_reg_offset + perf_count_reg[CAM_SFE_PERF_COUNTER_MAX]; + uint32_t top_debug_cfg; + uint32_t top_cc_test_bus_ctrl; + bool lcr_supported; + bool ir_supported; + bool qcfa_only; + struct cam_sfe_mode *sfe_mode; + uint32_t num_sfe_mode; + uint32_t ipp_violation_mask; + uint32_t top_debug_testbus_reg; + uint32_t top_debug_nonccif_regstart_idx; + uint32_t top_cc_test_bus_supported; + uint32_t num_debug_registers; + uint32_t top_debug[CAM_SFE_TOP_DBG_REG_MAX]; +}; + +struct cam_sfe_fcg_module_info { + uint32_t max_fcg_ch_ctx; + uint32_t max_fcg_predictions; + uint32_t fcg_index_shift; + uint32_t max_reg_val_pair_size; + uint32_t fcg_type_size; + uint32_t fcg_ch1_phase_index_cfg_0; + uint32_t fcg_ch1_phase_index_cfg_1; + uint32_t fcg_ch2_phase_index_cfg_0; + uint32_t fcg_ch2_phase_index_cfg_1; + uint32_t fcg_ch1_stats_phase_index_cfg_0; + uint32_t fcg_ch1_stats_phase_index_cfg_1; + uint32_t fcg_ch2_stats_phase_index_cfg_0; + uint32_t fcg_ch2_stats_phase_index_cfg_1; +}; + +struct cam_sfe_modules_common_reg_offset { + uint32_t demux_module_cfg; + uint32_t demux_xcfa_cfg; + uint32_t demux_hdr_cfg; + uint32_t demux_lcr_sel; + uint32_t hdrc_remo_mod_cfg; + uint32_t hdrc_remo_xcfa_bin_cfg; + uint32_t xcfa_hdrc_remo_out_mux_cfg; + struct cam_sfe_fcg_module_info *fcg_module_info; + bool fcg_supported; +}; + +struct cam_sfe_top_common_reg_data { + uint32_t error_irq_mask; + uint32_t enable_diagnostic_hw; + uint32_t top_debug_cfg_en; + uint32_t sensor_sel_shift; +}; + +struct cam_sfe_path_common_reg_data { + uint32_t sof_irq_mask; + uint32_t eof_irq_mask; + uint32_t subscribe_irq_mask; +}; + +struct cam_sfe_top_hw_info { + struct cam_sfe_top_common_reg_offset *common_reg; + struct cam_sfe_modules_common_reg_offset *modules_hw_info; + struct cam_sfe_top_common_reg_data *common_reg_data; + struct cam_sfe_top_module_desc *ipp_module_desc; + struct cam_sfe_wr_client_desc *wr_client_desc; + struct cam_sfe_path_common_reg_data *pix_reg_data; + struct cam_sfe_path_common_reg_data *rdi_reg_data[CAM_SFE_RDI_MAX]; + uint32_t num_inputs; + uint32_t input_type[ + CAM_SFE_TOP_IN_PORT_MAX]; + uint32_t num_top_errors; + struct cam_sfe_top_err_irq_desc *top_err_desc; + uint32_t num_clc_module; + struct cam_sfe_top_debug_info (*clc_dbg_mod_info)[CAM_SFE_TOP_DBG_REG_MAX][8]; + uint32_t num_of_testbus; + struct cam_sfe_testbus_info test_bus_info[CAM_SFE_TOP_TESTBUS_MAX]; +}; + +int cam_sfe_top_init( + uint32_t hw_version, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *sfe_irq_controller, + struct cam_sfe_top **sfe_top); + +int cam_sfe_top_deinit( + uint32_t hw_version, + struct cam_sfe_top **sfe_top); + +#define SFE_DBG_INFO(shift_val, name) {.shift = shift_val, .clc_name = name} + +#define SFE_DBG_INFO_ARRAY_4bit(name1, name2, name3, name4, name5, name6, name7, name8) \ + { \ + SFE_DBG_INFO(0, name1), \ + SFE_DBG_INFO(4, name2), \ + SFE_DBG_INFO(8, name3), \ + SFE_DBG_INFO(12, name4), \ + SFE_DBG_INFO(16, name5), \ + SFE_DBG_INFO(20, name6), \ + SFE_DBG_INFO(24, name7), \ + SFE_DBG_INFO(28, name8), \ + } + +#endif /* _CAM_SFE_TOP_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.c new file mode 100644 index 0000000000..61c9e60acd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + + +#include +#include "cam_tfe_csid_core.h" +#include "cam_tfe_csid530.h" +#include "cam_tfe_csid640.h" +#include "cam_tfe_csid_dev.h" +#include "camera_main.h" + +#define CAM_TFE_CSID_DRV_NAME "tfe_csid" + +static const struct of_device_id cam_tfe_csid_dt_match[] = { + { + .compatible = "qcom,csid530", + .data = &cam_tfe_csid530_hw_info, + }, + { + .compatible = "qcom,csid640", + .data = &cam_tfe_csid640_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_tfe_csid_dt_match); + +struct platform_driver cam_tfe_csid_driver = { + .probe = cam_tfe_csid_probe, + .remove = cam_tfe_csid_remove, + .driver = { + .name = CAM_TFE_CSID_DRV_NAME, + .of_match_table = cam_tfe_csid_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_tfe_csid_init_module(void) +{ + return platform_driver_register(&cam_tfe_csid_driver); +} + +void cam_tfe_csid_exit_module(void) +{ + platform_driver_unregister(&cam_tfe_csid_driver); +} + +MODULE_DESCRIPTION("CAM TFE_CSID driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.h new file mode 100644 index 0000000000..3edc187662 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_H_ +#define _CAM_TFE_CSID_H_ + +int cam_tfe_csid_init_module(void); +void cam_tfe_csid_exit_module(void); + +#endif /*_CAM_TFE_CSID_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.h new file mode 100644 index 0000000000..89bd203746 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid530.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_530_H_ +#define _CAM_TFE_CSID_530_H_ + +#include "cam_tfe_csid_core.h" + +#define CAM_TFE_CSID_VERSION_V530 0x50030000 + +static struct cam_tfe_csid_pxl_reg_offset cam_tfe_csid_530_ipp_reg_offset = { + .csid_pxl_irq_status_addr = 0x30, + .csid_pxl_irq_mask_addr = 0x34, + .csid_pxl_irq_clear_addr = 0x38, + .csid_pxl_irq_set_addr = 0x3c, + + .csid_pxl_cfg0_addr = 0x200, + .csid_pxl_cfg1_addr = 0x204, + .csid_pxl_ctrl_addr = 0x208, + .csid_pxl_hcrop_addr = 0x21c, + .csid_pxl_vcrop_addr = 0x220, + .csid_pxl_rst_strobes_addr = 0x240, + .csid_pxl_status_addr = 0x254, + .csid_pxl_misr_val_addr = 0x258, + .csid_pxl_timestamp_curr0_sof_addr = 0x290, + .csid_pxl_timestamp_curr1_sof_addr = 0x294, + .csid_pxl_timestamp_perv0_sof_addr = 0x298, + .csid_pxl_timestamp_perv1_sof_addr = 0x29c, + .csid_pxl_timestamp_curr0_eof_addr = 0x2a0, + .csid_pxl_timestamp_curr1_eof_addr = 0x2a4, + .csid_pxl_timestamp_perv0_eof_addr = 0x2a8, + .csid_pxl_timestamp_perv1_eof_addr = 0x2ac, + .csid_pxl_err_recovery_cfg0_addr = 0x2d0, + .csid_pxl_err_recovery_cfg1_addr = 0x2d4, + .csid_pxl_err_recovery_cfg2_addr = 0x2d8, + /* configurations */ + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .halt_master_sel_shift = 4, + .halt_mode_shift = 2, + .halt_master_sel_master_val = 3, + .halt_master_sel_slave_val = 0, + .binning_supported = 0, + .is_multi_vc_dt_supported = false, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_0_reg_offset = { + .csid_rdi_irq_status_addr = 0x40, + .csid_rdi_irq_mask_addr = 0x44, + .csid_rdi_irq_clear_addr = 0x48, + .csid_rdi_irq_set_addr = 0x4c, + + .csid_rdi_cfg0_addr = 0x300, + .csid_rdi_cfg1_addr = 0x304, + .csid_rdi_ctrl_addr = 0x308, + .csid_rdi_rst_strobes_addr = 0x340, + .csid_rdi_status_addr = 0x350, + .csid_rdi_misr_val0_addr = 0x354, + .csid_rdi_misr_val1_addr = 0x358, + .csid_rdi_timestamp_curr0_sof_addr = 0x390, + .csid_rdi_timestamp_curr1_sof_addr = 0x394, + .csid_rdi_timestamp_prev0_sof_addr = 0x398, + .csid_rdi_timestamp_prev1_sof_addr = 0x39c, + .csid_rdi_timestamp_curr0_eof_addr = 0x3a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x3a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x3a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x3ac, + .csid_rdi_err_recovery_cfg0_addr = 0x3b0, + .csid_rdi_err_recovery_cfg1_addr = 0x3b4, + .csid_rdi_err_recovery_cfg2_addr = 0x3b8, + .csid_rdi_byte_cntr_ping_addr = 0x3e0, + .csid_rdi_byte_cntr_pong_addr = 0x3e4, + /* configurations */ + .is_multi_vc_dt_supported = false, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_1_reg_offset = { + .csid_rdi_irq_status_addr = 0x50, + .csid_rdi_irq_mask_addr = 0x54, + .csid_rdi_irq_clear_addr = 0x58, + .csid_rdi_irq_set_addr = 0x5c, + + .csid_rdi_cfg0_addr = 0x400, + .csid_rdi_cfg1_addr = 0x404, + .csid_rdi_ctrl_addr = 0x408, + .csid_rdi_rst_strobes_addr = 0x440, + .csid_rdi_status_addr = 0x450, + .csid_rdi_misr_val0_addr = 0x454, + .csid_rdi_misr_val1_addr = 0x458, + .csid_rdi_timestamp_curr0_sof_addr = 0x490, + .csid_rdi_timestamp_curr1_sof_addr = 0x494, + .csid_rdi_timestamp_prev0_sof_addr = 0x498, + .csid_rdi_timestamp_prev1_sof_addr = 0x49c, + .csid_rdi_timestamp_curr0_eof_addr = 0x4a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x4a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x4a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x4ac, + .csid_rdi_err_recovery_cfg0_addr = 0x4b0, + .csid_rdi_err_recovery_cfg1_addr = 0x4b4, + .csid_rdi_err_recovery_cfg2_addr = 0x4b8, + .csid_rdi_byte_cntr_ping_addr = 0x4e0, + .csid_rdi_byte_cntr_pong_addr = 0x4e4, + /* configurations */ + .is_multi_vc_dt_supported = false, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_530_rdi_2_reg_offset = { + .csid_rdi_irq_status_addr = 0x60, + .csid_rdi_irq_mask_addr = 0x64, + .csid_rdi_irq_clear_addr = 0x68, + .csid_rdi_irq_set_addr = 0x6c, + + .csid_rdi_cfg0_addr = 0x500, + .csid_rdi_cfg1_addr = 0x504, + .csid_rdi_ctrl_addr = 0x508, + .csid_rdi_rst_strobes_addr = 0x540, + .csid_rdi_status_addr = 0x550, + .csid_rdi_misr_val0_addr = 0x554, + .csid_rdi_misr_val1_addr = 0x558, + .csid_rdi_timestamp_curr0_sof_addr = 0x590, + .csid_rdi_timestamp_curr1_sof_addr = 0x594, + .csid_rdi_timestamp_prev0_sof_addr = 0x598, + .csid_rdi_timestamp_prev1_sof_addr = 0x59c, + .csid_rdi_timestamp_curr0_eof_addr = 0x5a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x5a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x5a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x5ac, + .csid_rdi_err_recovery_cfg0_addr = 0x5b0, + .csid_rdi_err_recovery_cfg1_addr = 0x5b4, + .csid_rdi_err_recovery_cfg2_addr = 0x5b8, + .csid_rdi_byte_cntr_ping_addr = 0x5e0, + .csid_rdi_byte_cntr_pong_addr = 0x5e4, + /* configurations */ + .is_multi_vc_dt_supported = false, +}; + +static struct cam_tfe_csid_csi2_rx_reg_offset + cam_tfe_csid_530_csi2_reg_offset = { + .csid_csi2_rx_irq_status_addr = 0x20, + .csid_csi2_rx_irq_mask_addr = 0x24, + .csid_csi2_rx_irq_clear_addr = 0x28, + .csid_csi2_rx_irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .phy_sel_base = 1, + .csid_csi2_rx_cfg0_addr = 0x100, + .csid_csi2_rx_cfg1_addr = 0x104, + .csid_csi2_rx_capture_ctrl_addr = 0x108, + .csid_csi2_rx_rst_strobes_addr = 0x110, + .csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr = 0x120, + .csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr = 0x124, + .csid_csi2_rx_captured_short_pkt_0_addr = 0x128, + .csid_csi2_rx_captured_short_pkt_1_addr = 0x12c, + .csid_csi2_rx_captured_long_pkt_0_addr = 0x130, + .csid_csi2_rx_captured_long_pkt_1_addr = 0x134, + .csid_csi2_rx_captured_long_pkt_ftr_addr = 0x138, + .csid_csi2_rx_captured_cphy_pkt_hdr_addr = 0x13c, + .csid_csi2_rx_total_pkts_rcvd_addr = 0x160, + .csid_csi2_rx_stats_ecc_addr = 0x164, + .csid_csi2_rx_total_crc_err_addr = 0x168, + + .phy_tpg_base_id = 0, + .csi2_rst_srb_all = 0x3FFF, + .csi2_rst_done_shift_val = 27, + .csi2_irq_mask_all = 0xFFFFFFF, + .csi2_misr_enable_shift_val = 6, + .csi2_capture_long_pkt_en_shift = 0, + .csi2_capture_short_pkt_en_shift = 1, + .csi2_capture_cphy_pkt_en_shift = 2, + .csi2_capture_long_pkt_dt_shift = 4, + .csi2_capture_long_pkt_vc_shift = 10, + .csi2_capture_short_pkt_vc_shift = 12, + .csi2_capture_cphy_pkt_dt_shift = 14, + .csi2_capture_cphy_pkt_vc_shift = 20, + .csi2_rx_phy_num_mask = 0x7, + .csi2_rx_long_pkt_hdr_rst_stb_shift = 0x1, + .csi2_rx_short_pkt_hdr_rst_stb_shift = 0x2, + .csi2_rx_cphy_pkt_hdr_rst_stb_shift = 0x3, + .need_to_sel_tpg_mux = false, +}; + +static struct cam_tfe_csid_common_reg_offset + cam_tfe_csid_530_cmn_reg_offset = { + .csid_hw_version_addr = 0x0, + .csid_cfg0_addr = 0x4, + .csid_ctrl_addr = 0x8, + .csid_rst_strobes_addr = 0x10, + + .csid_test_bus_ctrl_addr = 0x14, + .csid_top_irq_status_addr = 0x70, + .csid_top_irq_mask_addr = 0x74, + .csid_top_irq_clear_addr = 0x78, + .csid_top_irq_set_addr = 0x7c, + .csid_irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 5, + .minor_version = 3, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .csid_reg_rst_stb = 1, + .csid_rst_stb = 0x1e, + .csid_rst_stb_sw_all = 0x1f, + .ipp_path_rst_stb_all = 0x17, + .rdi_path_rst_stb_all = 0x97, + .path_rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .fmt_shift_val = 12, + .plain_fmt_shit_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .crop_shift = 16, + .ipp_irq_mask_all = 0x3FFFF, + .rdi_irq_mask_all = 0x3FFFF, + .top_tfe2_pix_pipe_fuse_reg = 0xFE4, + .top_tfe2_fuse_reg = 0xFE8, + .format_measure_support = false, +}; + +static struct cam_tfe_csid_reg_offset cam_tfe_csid_530_reg_offset = { + .cmn_reg = &cam_tfe_csid_530_cmn_reg_offset, + .csi2_reg = &cam_tfe_csid_530_csi2_reg_offset, + .ipp_reg = &cam_tfe_csid_530_ipp_reg_offset, + .rdi_reg = { + &cam_tfe_csid_530_rdi_0_reg_offset, + &cam_tfe_csid_530_rdi_1_reg_offset, + &cam_tfe_csid_530_rdi_2_reg_offset, + }, +}; + +static struct cam_tfe_csid_hw_info cam_tfe_csid530_hw_info = { + .csid_reg = &cam_tfe_csid_530_reg_offset, + .hw_dts_version = CAM_TFE_CSID_VERSION_V530, +}; + +#endif /*_CAM_TFE_CSID_530_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid640.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid640.h new file mode 100644 index 0000000000..582a998417 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid640.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_640_H_ +#define _CAM_TFE_CSID_640_H_ + +#include "cam_tfe_csid_core.h" + +#define CAM_TFE_CSID_VERSION_V640 0x60040000 + +static struct cam_tfe_csid_pxl_reg_offset cam_tfe_csid_640_ipp_reg_offset = { + .csid_pxl_irq_status_addr = 0x30, + .csid_pxl_irq_mask_addr = 0x34, + .csid_pxl_irq_clear_addr = 0x38, + .csid_pxl_irq_set_addr = 0x3c, + + .csid_pxl_cfg0_addr = 0x200, + .csid_pxl_cfg1_addr = 0x204, + .csid_pxl_ctrl_addr = 0x208, + .csid_pxl_hcrop_addr = 0x21c, + .csid_pxl_vcrop_addr = 0x220, + .csid_pxl_rst_strobes_addr = 0x240, + .csid_pxl_status_addr = 0x254, + .csid_pxl_misr_val_addr = 0x258, + .csid_pxl_timestamp_curr0_sof_addr = 0x290, + .csid_pxl_timestamp_curr1_sof_addr = 0x294, + .csid_pxl_timestamp_perv0_sof_addr = 0x298, + .csid_pxl_timestamp_perv1_sof_addr = 0x29c, + .csid_pxl_timestamp_curr0_eof_addr = 0x2a0, + .csid_pxl_timestamp_curr1_eof_addr = 0x2a4, + .csid_pxl_timestamp_perv0_eof_addr = 0x2a8, + .csid_pxl_timestamp_perv1_eof_addr = 0x2ac, + .csid_pxl_err_recovery_cfg0_addr = 0x2d0, + .csid_pxl_err_recovery_cfg1_addr = 0x2d4, + .csid_pxl_err_recovery_cfg2_addr = 0x2d8, + .csid_pxl_multi_vcdt_cfg0_addr = 0x2dc, + .csid_pxl_format_measure_cfg0_addr = 0x270, + .csid_pxl_format_measure_cfg1_addr = 0x274, + .csid_pxl_format_measure0_addr = 0x278, + .csid_pxl_format_measure1_addr = 0x27c, + .csid_pxl_format_measure2_addr = 0x280, + + /* configurations */ + .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, + .halt_master_sel_shift = 4, + .halt_mode_shift = 2, + .halt_master_sel_master_val = 3, + .halt_master_sel_slave_val = 0, + .binning_supported = 3, + .bin_qcfa_en_shift_val = 30, + .bin_en_shift_val = 2, + .is_multi_vc_dt_supported = true, + .format_measure_en_shift_val = 0, + .measure_en_hbi_vbi_cnt_val = 0xc, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_640_rdi_0_reg_offset = { + .csid_rdi_irq_status_addr = 0x40, + .csid_rdi_irq_mask_addr = 0x44, + .csid_rdi_irq_clear_addr = 0x48, + .csid_rdi_irq_set_addr = 0x4c, + + .csid_rdi_cfg0_addr = 0x300, + .csid_rdi_cfg1_addr = 0x304, + .csid_rdi_ctrl_addr = 0x308, + .csid_rdi_rst_strobes_addr = 0x340, + .csid_rdi_status_addr = 0x350, + .csid_rdi_misr_val0_addr = 0x354, + .csid_rdi_misr_val1_addr = 0x358, + .csid_rdi_timestamp_curr0_sof_addr = 0x390, + .csid_rdi_timestamp_curr1_sof_addr = 0x394, + .csid_rdi_timestamp_prev0_sof_addr = 0x398, + .csid_rdi_timestamp_prev1_sof_addr = 0x39c, + .csid_rdi_timestamp_curr0_eof_addr = 0x3a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x3a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x3a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x3ac, + .csid_rdi_err_recovery_cfg0_addr = 0x3b0, + .csid_rdi_err_recovery_cfg1_addr = 0x3b4, + .csid_rdi_err_recovery_cfg2_addr = 0x3b8, + .csid_rdi_byte_cntr_ping_addr = 0x3e0, + .csid_rdi_byte_cntr_pong_addr = 0x3e4, + .csid_rdi_multi_vcdt_cfg0_addr = 0x3bc, + .csid_rdi_format_measure_cfg0_addr = 0x370, + .csid_rdi_format_measure_cfg1_addr = 0x374, + .csid_rdi_format_measure0_addr = 0x378, + .csid_rdi_format_measure1_addr = 0x37c, + .csid_rdi_format_measure2_addr = 0x380, + + /* configurations */ + .is_multi_vc_dt_supported = true, + .format_measure_en_shift_val = 0, + .measure_en_hbi_vbi_cnt_val = 0xc, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_640_rdi_1_reg_offset = { + .csid_rdi_irq_status_addr = 0x50, + .csid_rdi_irq_mask_addr = 0x54, + .csid_rdi_irq_clear_addr = 0x58, + .csid_rdi_irq_set_addr = 0x5c, + + .csid_rdi_cfg0_addr = 0x400, + .csid_rdi_cfg1_addr = 0x404, + .csid_rdi_ctrl_addr = 0x408, + .csid_rdi_rst_strobes_addr = 0x440, + .csid_rdi_status_addr = 0x450, + .csid_rdi_misr_val0_addr = 0x454, + .csid_rdi_misr_val1_addr = 0x458, + .csid_rdi_timestamp_curr0_sof_addr = 0x490, + .csid_rdi_timestamp_curr1_sof_addr = 0x494, + .csid_rdi_timestamp_prev0_sof_addr = 0x498, + .csid_rdi_timestamp_prev1_sof_addr = 0x49c, + .csid_rdi_timestamp_curr0_eof_addr = 0x4a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x4a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x4a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x4ac, + .csid_rdi_err_recovery_cfg0_addr = 0x4b0, + .csid_rdi_err_recovery_cfg1_addr = 0x4b4, + .csid_rdi_err_recovery_cfg2_addr = 0x4b8, + .csid_rdi_byte_cntr_ping_addr = 0x4e0, + .csid_rdi_byte_cntr_pong_addr = 0x4e4, + .csid_rdi_multi_vcdt_cfg0_addr = 0x4bc, + .csid_rdi_format_measure_cfg0_addr = 0x470, + .csid_rdi_format_measure_cfg1_addr = 0x474, + .csid_rdi_format_measure0_addr = 0x478, + .csid_rdi_format_measure1_addr = 0x47c, + .csid_rdi_format_measure2_addr = 0x480, + + /* configurations */ + .is_multi_vc_dt_supported = true, + .format_measure_en_shift_val = 0, + .measure_en_hbi_vbi_cnt_val = 0xc, +}; + +static struct cam_tfe_csid_rdi_reg_offset cam_tfe_csid_640_rdi_2_reg_offset = { + .csid_rdi_irq_status_addr = 0x60, + .csid_rdi_irq_mask_addr = 0x64, + .csid_rdi_irq_clear_addr = 0x68, + .csid_rdi_irq_set_addr = 0x6c, + + .csid_rdi_cfg0_addr = 0x500, + .csid_rdi_cfg1_addr = 0x504, + .csid_rdi_ctrl_addr = 0x508, + .csid_rdi_rst_strobes_addr = 0x540, + .csid_rdi_status_addr = 0x550, + .csid_rdi_misr_val0_addr = 0x554, + .csid_rdi_misr_val1_addr = 0x558, + .csid_rdi_timestamp_curr0_sof_addr = 0x590, + .csid_rdi_timestamp_curr1_sof_addr = 0x594, + .csid_rdi_timestamp_prev0_sof_addr = 0x598, + .csid_rdi_timestamp_prev1_sof_addr = 0x59c, + .csid_rdi_timestamp_curr0_eof_addr = 0x5a0, + .csid_rdi_timestamp_curr1_eof_addr = 0x5a4, + .csid_rdi_timestamp_prev0_eof_addr = 0x5a8, + .csid_rdi_timestamp_prev1_eof_addr = 0x5ac, + .csid_rdi_err_recovery_cfg0_addr = 0x5b0, + .csid_rdi_err_recovery_cfg1_addr = 0x5b4, + .csid_rdi_err_recovery_cfg2_addr = 0x5b8, + .csid_rdi_byte_cntr_ping_addr = 0x5e0, + .csid_rdi_byte_cntr_pong_addr = 0x5e4, + .csid_rdi_multi_vcdt_cfg0_addr = 0x5bc, + .csid_rdi_format_measure_cfg0_addr = 0x570, + .csid_rdi_format_measure_cfg1_addr = 0x574, + .csid_rdi_format_measure0_addr = 0x578, + .csid_rdi_format_measure1_addr = 0x57c, + .csid_rdi_format_measure2_addr = 0x580, + + /* configurations */ + .is_multi_vc_dt_supported = true, + .format_measure_en_shift_val = 0, + .measure_en_hbi_vbi_cnt_val = 0xc, +}; + +static struct cam_tfe_csid_csi2_rx_reg_offset + cam_tfe_csid_640_csi2_reg_offset = { + .csid_csi2_rx_irq_status_addr = 0x20, + .csid_csi2_rx_irq_mask_addr = 0x24, + .csid_csi2_rx_irq_clear_addr = 0x28, + .csid_csi2_rx_irq_set_addr = 0x2c, + + /*CSI2 rx control */ + .phy_sel_base = 1, + .csid_csi2_rx_cfg0_addr = 0x100, + .csid_csi2_rx_cfg1_addr = 0x104, + .csid_csi2_rx_capture_ctrl_addr = 0x108, + .csid_csi2_rx_rst_strobes_addr = 0x110, + .csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr = 0x120, + .csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr = 0x124, + .csid_csi2_rx_captured_short_pkt_0_addr = 0x128, + .csid_csi2_rx_captured_short_pkt_1_addr = 0x12c, + .csid_csi2_rx_captured_long_pkt_0_addr = 0x130, + .csid_csi2_rx_captured_long_pkt_1_addr = 0x134, + .csid_csi2_rx_captured_long_pkt_ftr_addr = 0x138, + .csid_csi2_rx_captured_cphy_pkt_hdr_addr = 0x13c, + .csid_csi2_rx_total_pkts_rcvd_addr = 0x160, + .csid_csi2_rx_stats_ecc_addr = 0x164, + .csid_csi2_rx_total_crc_err_addr = 0x168, + + .phy_tpg_base_id = 1, + .csi2_rst_srb_all = 0x3FFF, + .csi2_rst_done_shift_val = 27, + .csi2_irq_mask_all = 0xFFFFFFF, + .csi2_misr_enable_shift_val = 6, + .csi2_capture_long_pkt_en_shift = 0, + .csi2_capture_short_pkt_en_shift = 1, + .csi2_capture_cphy_pkt_en_shift = 2, + .csi2_capture_long_pkt_dt_shift = 4, + .csi2_capture_long_pkt_vc_shift = 10, + .csi2_capture_short_pkt_vc_shift = 12, + .csi2_capture_cphy_pkt_dt_shift = 14, + .csi2_capture_cphy_pkt_vc_shift = 20, + .csi2_rx_phy_num_mask = 0x7, + .csi2_rx_long_pkt_hdr_rst_stb_shift = 0x1, + .csi2_rx_short_pkt_hdr_rst_stb_shift = 0x2, + .csi2_rx_cphy_pkt_hdr_rst_stb_shift = 0x3, + .need_to_sel_tpg_mux = true, +}; + +static struct cam_tfe_csid_common_reg_offset + cam_tfe_csid_640_cmn_reg_offset = { + .csid_hw_version_addr = 0x0, + .csid_cfg0_addr = 0x4, + .csid_ctrl_addr = 0x8, + .csid_rst_strobes_addr = 0x10, + + .csid_test_bus_ctrl_addr = 0x14, + .csid_top_irq_status_addr = 0x70, + .csid_top_irq_mask_addr = 0x74, + .csid_top_irq_clear_addr = 0x78, + .csid_top_irq_set_addr = 0x7c, + .csid_irq_cmd_addr = 0x80, + + /*configurations */ + .major_version = 5, + .minor_version = 3, + .version_incr = 0, + .num_rdis = 3, + .num_pix = 1, + .csid_reg_rst_stb = 1, + .csid_rst_stb = 0x1e, + .csid_rst_stb_sw_all = 0x1f, + .ipp_path_rst_stb_all = 0x17, + .rdi_path_rst_stb_all = 0x97, + .path_rst_done_shift_val = 1, + .path_en_shift_val = 31, + .dt_id_shift_val = 27, + .vc_shift_val = 22, + .dt_shift_val = 16, + .vc1_shift_val = 2, + .dt1_shift_val = 7, + .multi_vc_dt_en_shift_val = 0, + .fmt_shift_val = 12, + .plain_fmt_shit_val = 10, + .crop_v_en_shift_val = 6, + .crop_h_en_shift_val = 5, + .crop_shift = 16, + .ipp_irq_mask_all = 0x3FFFF, + .rdi_irq_mask_all = 0x3FFFF, + .top_tfe2_pix_pipe_fuse_reg = 0xFE4, + .top_tfe2_fuse_reg = 0xFE8, + .format_measure_support = true, + .format_measure_height_shift_val = 16, + .format_measure_height_mask_val = 0xe, + .format_measure_width_mask_val = 0x10, + .sync_clk = true, +}; + +static struct cam_tfe_csid_reg_offset cam_tfe_csid_640_reg_offset = { + .cmn_reg = &cam_tfe_csid_640_cmn_reg_offset, + .csi2_reg = &cam_tfe_csid_640_csi2_reg_offset, + .ipp_reg = &cam_tfe_csid_640_ipp_reg_offset, + .rdi_reg = { + &cam_tfe_csid_640_rdi_0_reg_offset, + &cam_tfe_csid_640_rdi_1_reg_offset, + &cam_tfe_csid_640_rdi_2_reg_offset, + }, +}; + +static struct cam_tfe_csid_hw_info cam_tfe_csid640_hw_info = { + .csid_reg = &cam_tfe_csid_640_reg_offset, + .hw_dts_version = CAM_TFE_CSID_VERSION_V640, +}; + +#endif /*_CAM_TFE_CSID_640_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c new file mode 100644 index 0000000000..ce6a160847 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c @@ -0,0 +1,4104 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_tfe_csid_core.h" +#include "cam_csid_ppi_core.h" +#include "cam_isp_hw.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_subdev.h" +#include "cam_tasklet_util.h" +#include "cam_common_util.h" +#include "cam_tfe_csid_hw_intf.h" +#include +#include "cam_cpas_hw_intf.h" +#include "cam_mem_mgr_api.h" + +/* Timeout value in msec */ +#define TFE_CSID_TIMEOUT 1000 + +/* Timeout values in usec */ +#define CAM_TFE_CSID_TIMEOUT_SLEEP_US 1000 +#define CAM_TFE_CSID_TIMEOUT_ALL_US 100000 + +/* + * Constant Factors needed to change QTimer ticks to nanoseconds + * QTimer Freq = 19.2 MHz + * Time(us) = ticks/19.2 + * Time(ns) = ticks/19.2 * 1000 + */ +#define CAM_TFE_CSID_QTIMER_MUL_FACTOR 10000 +#define CAM_TFE_CSID_QTIMER_DIV_FACTOR 192 + +/* Max number of sof irq's triggered in case of SOF freeze */ +#define CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX 12 + +/* Max CSI Rx irq error count threshold value */ +#define CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT 5 + +static int cam_tfe_csid_is_ipp_format_supported( + uint32_t in_format) +{ + int rc = -EINVAL; + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + rc = 0; + break; + default: + break; + } + return rc; +} + +static int cam_tfe_csid_get_format_rdi( + uint32_t in_format, uint32_t out_format, + uint32_t *decode_fmt, uint32_t *plain_fmt) +{ + int rc = 0; + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_6: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN8: + *decode_fmt = 0x0; + *plain_fmt = 0x0; + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_8: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_PLAIN128: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN8: + *decode_fmt = 0x1; + *plain_fmt = 0x0; + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_10: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_PLAIN128: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN16_10: + *decode_fmt = 0x2; + *plain_fmt = 0x1; + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_12: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_12: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN16_12: + *decode_fmt = 0x3; + *plain_fmt = 0x1; + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_14: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_14: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN16_14: + *decode_fmt = 0x4; + *plain_fmt = 0x1; + break; + default: + rc = -EINVAL; + break; + } + break; + case CAM_FORMAT_MIPI_RAW_16: + switch (out_format) { + case CAM_FORMAT_MIPI_RAW_16: + *decode_fmt = 0xf; + break; + case CAM_FORMAT_PLAIN16_16: + *decode_fmt = 0x5; + *plain_fmt = 0x1; + break; + default: + rc = -EINVAL; + break; + } + break; + default: + rc = -EINVAL; + break; + } + + if (rc) + CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d", + in_format, out_format); + + return rc; +} + +static int cam_tfe_csid_get_format_ipp( + uint32_t in_format, + uint32_t *decode_fmt, uint32_t *plain_fmt) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "input format:%d", + in_format); + + switch (in_format) { + case CAM_FORMAT_MIPI_RAW_6: + *decode_fmt = 0; + *plain_fmt = 0; + break; + case CAM_FORMAT_MIPI_RAW_8: + *decode_fmt = 0x1; + *plain_fmt = 0; + break; + case CAM_FORMAT_MIPI_RAW_10: + *decode_fmt = 0x2; + *plain_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_12: + *decode_fmt = 0x3; + *plain_fmt = 0x1; + break; + default: + CAM_ERR(CAM_ISP, "Unsupported format %d", + in_format); + rc = -EINVAL; + } + + CAM_DBG(CAM_ISP, "decode_fmt:%d plain_fmt:%d", + *decode_fmt, *plain_fmt); + + return rc; +} + +static int cam_tfe_match_vc_dt_pair(int32_t *vc, uint32_t *dt, + uint32_t num_valid_vc_dt, struct cam_tfe_csid_cid_data *cid_data) +{ + int i; + + if (num_valid_vc_dt == 0 || num_valid_vc_dt > CAM_ISP_TFE_VC_DT_CFG) { + CAM_ERR(CAM_ISP, "invalid num_valid_vc_dt: %d", + num_valid_vc_dt); + return -EINVAL; + } + + for (i = 0; i < num_valid_vc_dt; i++) { + if (vc[i] != cid_data->vc_dt[i].vc || + dt[i] != cid_data->vc_dt[i].dt) + return -EINVAL; + } + + return 0; +} + +static void cam_tfe_csid_enable_path_for_init_frame_drop( + struct cam_tfe_csid_hw *csid_hw, + int res_id) +{ + struct cam_tfe_csid_path_cfg *path_data; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg = NULL; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *res; + uint32_t val; + + if (!csid_hw) { + CAM_WARN(CAM_ISP, "csid_hw cannot be NULL"); + return; + } + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res_id == CAM_TFE_CSID_PATH_RES_IPP) { + res = &csid_hw->ipp_res; + pxl_reg = csid_reg->ipp_reg; + } else if (res_id >= CAM_TFE_CSID_PATH_RES_RDI_0 && + res_id <= CAM_TFE_CSID_PATH_RES_RDI_2) { + res = &csid_hw->rdi_res[res_id]; + rdi_reg = csid_reg->rdi_reg[res_id]; + } else { + CAM_ERR(CAM_ISP, "Invalid res_id"); + return; + } + + path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv; + + if (!path_data || !path_data->init_frame_drop) + return; + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) + return; + + path_data->res_sof_cnt++; + if ((path_data->res_sof_cnt + 1) < + path_data->res_sof_cnt) { + CAM_WARN(CAM_ISP, "Res %d sof count overflow %d", + res_id, path_data->res_sof_cnt); + return; + } + + CAM_DBG(CAM_ISP, "CSID:%d res_id %d SOF cnt:%d init_frame_drop:%d", + csid_hw->hw_intf->hw_idx, res_id, path_data->res_sof_cnt, + path_data->init_frame_drop); + + if ((path_data->res_sof_cnt == + path_data->init_frame_drop) && + pxl_reg) { + CAM_DBG(CAM_ISP, "CSID:%d Enabling pixel IPP Path", + csid_hw->hw_intf->hw_idx); + if (path_data->sync_mode != + CAM_ISP_HW_SYNC_SLAVE) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + } + + if (!(csid_hw->csid_debug & + TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_irq_mask_addr); + val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_irq_mask_addr); + } + } else if ((path_data->res_sof_cnt == + path_data->init_frame_drop) && rdi_reg) { + CAM_DBG(CAM_ISP, "Enabling RDI %d Path", res_id); + cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY, + soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_ctrl_addr); + if (!(csid_hw->csid_debug & + TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_irq_mask_addr); + val &= ~(TFE_CSID_PATH_INFO_INPUT_SOF); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_irq_mask_addr); + } + } +} + +static bool cam_tfe_csid_check_path_active(struct cam_tfe_csid_hw *csid_hw) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t i; + uint32_t path_status = 1; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + /* check the IPP path status */ + if (csid_reg->cmn_reg->num_pix) { + path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_status_addr); + CAM_DBG(CAM_ISP, "CSID:%d IPP path status:%d", + csid_hw->hw_intf->hw_idx, path_status); + /* if status is 0 then it is active */ + if (!path_status) + goto end; + } + + /* Check the RDI path status */ + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_status_addr); + CAM_DBG(CAM_ISP, "CSID:%d RDI:%d path status:%d", + csid_hw->hw_intf->hw_idx, i, path_status); + /* if status is 0 then it is active */ + if (!path_status) + goto end; + } + +end: + /* if status is 0 then path is active */ + return path_status ? false : true; +} + +static void cam_tfe_csid_reset_path_data( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + struct cam_tfe_csid_path_cfg *path_data = NULL; + + if (!csid_hw || !res) { + CAM_WARN(CAM_ISP, "csid_hw or res cannot be NULL"); + return; + } + path_data = res->res_priv; + + if (path_data) { + path_data->init_frame_drop = 0; + path_data->res_sof_cnt = 0; + } +} + +static int cam_tfe_csid_cid_get(struct cam_tfe_csid_hw *csid_hw, + int32_t *vc, uint32_t *dt, uint32_t num_valid_vc_dt, uint32_t *cid) +{ + uint32_t i = 0, j = 0; + + if (num_valid_vc_dt == 0 || num_valid_vc_dt > CAM_ISP_TFE_VC_DT_CFG) { + CAM_ERR(CAM_ISP, "CSID:%d invalid num_valid_vc_dt: %d", + csid_hw->hw_intf->hw_idx, num_valid_vc_dt); + return -EINVAL; + } + + + /* Return already reserved CID if the VC/DT matches */ + for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) { + if (csid_hw->cid_res[i].cnt >= 1) { + if (!cam_tfe_match_vc_dt_pair(vc, dt, num_valid_vc_dt, + &csid_hw->cid_res[i])) { + csid_hw->cid_res[i].cnt++; + *cid = i; + CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated", + csid_hw->hw_intf->hw_idx, i); + return 0; + } + } + } + + for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) { + if (!csid_hw->cid_res[i].cnt) { + for (j = 0; j < num_valid_vc_dt; j++) { + csid_hw->cid_res[i].vc_dt[j].vc = vc[j]; + csid_hw->cid_res[i].vc_dt[j].dt = dt[j]; + csid_hw->cid_res[i].num_valid_vc_dt++; + csid_hw->cid_res[i].cnt++; + } + *cid = i; + CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated", + csid_hw->hw_intf->hw_idx, i); + return 0; + } + } + + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Free cid is not available", + csid_hw->hw_intf->hw_idx); + /* Dump CID values */ + for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CID:%d vc:%d dt:%d cnt:%d", + csid_hw->hw_intf->hw_idx, i, + csid_hw->cid_res[i].vc_dt[0].vc, + csid_hw->cid_res[i].vc_dt[0].dt, + csid_hw->cid_res[i].cnt); + } + return -EINVAL; +} + +static int cam_tfe_csid_global_reset(struct cam_tfe_csid_hw *csid_hw) +{ + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_tfe_csid_path_cfg *path_data = NULL; + int rc = 0; + uint32_t val = 0, i; + uint32_t status; + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = csid_hw->csid_info->csid_reg; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%d Csid reset", csid_hw->hw_intf->hw_idx); + + /* Mask all interrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_mask_addr); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + + if (csid_hw->pxl_pipe_enable) + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_mask_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr); + + /* clear all interrupts */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_clear_addr); + + cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr); + + if (csid_hw->pxl_pipe_enable) + cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_clear_addr); + + for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_irq_cmd_addr); + + cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base + + csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); + + /* perform the top CSID HW registers reset */ + cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb, + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_rst_strobes_addr); + + rc = cam_common_read_poll_timeout(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_status_addr, + CAM_TFE_CSID_TIMEOUT_SLEEP_US, CAM_TFE_CSID_TIMEOUT_ALL_US, + 0x1, 0x1, &status); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d", + csid_hw->hw_intf->hw_idx, rc); + rc = -ETIMEDOUT; + } + + status = cam_io_r(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_status_addr); + CAM_DBG(CAM_ISP, "Status reg %d", status); + + /* perform the SW registers reset */ + reinit_completion(&csid_hw->csid_top_complete); + cam_io_w_mb(csid_reg->cmn_reg->csid_reg_rst_stb, + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_rst_strobes_addr); + + rc = cam_common_wait_for_completion_timeout( + &csid_hw->csid_top_complete, + msecs_to_jiffies(TFE_CSID_TIMEOUT)); + if (rc <= 0) { + CAM_ERR(CAM_ISP, "CSID:%d soft reg reset fail rc = %d", + csid_hw->hw_intf->hw_idx, rc); + if (rc == 0) + rc = -ETIMEDOUT; + } else + rc = 0; + + usleep_range(3000, 3010); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + if (val != 0) + CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d", + csid_hw->hw_intf->hw_idx, val); + csid_hw->error_irq_count = 0; + csid_hw->prev_boot_timestamp = 0; + + if (csid_hw->pxl_pipe_enable) { + path_data = (struct cam_tfe_csid_path_cfg *) + csid_hw->ipp_res.res_priv; + path_data->res_sof_cnt = 0; + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + path_data = (struct cam_tfe_csid_path_cfg *) + csid_hw->rdi_res[i].res_priv; + path_data->res_sof_cnt = 0; + } + + return rc; +} + +static int cam_tfe_csid_path_reset(struct cam_tfe_csid_hw *csid_hw, + struct cam_tfe_csid_reset_cfg_args *reset) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *res; + const struct cam_tfe_csid_reg_offset *csid_reg; + uint32_t reset_strb_addr, reset_strb_val, val, id; + struct completion *complete; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + res = reset->node_res; + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%d resource:%d", + csid_hw->hw_intf->hw_idx, res->res_id); + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) { + if (!csid_reg->ipp_reg) { + CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d", + csid_hw->hw_intf->hw_idx, + res->res_id); + return -EINVAL; + } + + reset_strb_addr = csid_reg->ipp_reg->csid_pxl_rst_strobes_addr; + complete = &csid_hw->csid_ipp_complete; + reset_strb_val = csid_reg->cmn_reg->ipp_path_rst_stb_all; + + /* Enable path reset done interrupt */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_mask_addr); + val |= TFE_CSID_PATH_INFO_RST_DONE; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_mask_addr); + } else { + id = res->res_id; + if (!csid_reg->rdi_reg[id]) { + CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d", + csid_hw->hw_intf->hw_idx, + res->res_id); + return -EINVAL; + } + + reset_strb_addr = + csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr; + complete = + &csid_hw->csid_rdin_complete[id]; + reset_strb_val = csid_reg->cmn_reg->rdi_path_rst_stb_all; + + /* Enable path reset done interrupt */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr); + val |= TFE_CSID_PATH_INFO_RST_DONE; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr); + } + + reinit_completion(complete); + + /* Reset the corresponding tfe csid path */ + cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base + + reset_strb_addr); + + rc = cam_common_wait_for_completion_timeout(complete, + msecs_to_jiffies(TFE_CSID_TIMEOUT)); + if (rc <= 0) { + CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d", + csid_hw->hw_intf->hw_idx, + res->res_id, rc); + if (rc == 0) + rc = -ETIMEDOUT; + } + +end: + return rc; +} + +static int cam_tfe_csid_cid_reserve(struct cam_tfe_csid_hw *csid_hw, + struct cam_tfe_csid_hw_reserve_resource_args *cid_reserv, + uint32_t *cid_value) +{ + int i, rc = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + + CAM_DBG(CAM_ISP, + "CSID:%d res_id:0x%x Lane type:%d lane_num:%d dt:%d vc:%d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->res_id, + cid_reserv->in_port->lane_type, + cid_reserv->in_port->lane_num, + cid_reserv->in_port->dt[0], + cid_reserv->in_port->vc[0]); + + if (cid_reserv->in_port->res_id >= CAM_ISP_TFE_IN_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid phy sel %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->res_id); + rc = -EINVAL; + goto end; + } + + if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid lane type %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->lane_type); + rc = -EINVAL; + goto end; + } + + if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY && + cid_reserv->in_port->lane_num > 4)) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->lane_num); + rc = -EINVAL; + goto end; + } + + if (cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY && + cid_reserv->in_port->lane_num > 3) { + CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->lane_type, + cid_reserv->in_port->lane_num); + rc = -EINVAL; + goto end; + } + + /* CSID CSI2 v1.1 supports 4 vc */ + for (i = 0; i < cid_reserv->in_port->num_valid_vc_dt; i++) { + if (cid_reserv->in_port->dt[i] > 0x3f || + cid_reserv->in_port->vc[i] > 0x3) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->vc[i], + cid_reserv->in_port->dt[i]); + rc = -EINVAL; + goto end; + } + } + + if (csid_hw->csi2_reserve_cnt == UINT_MAX) { + CAM_ERR(CAM_ISP, + "CSID%d reserve cnt reached max", + csid_hw->hw_intf->hw_idx); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "Reserve_cnt %u", csid_hw->csi2_reserve_cnt); + + if (csid_hw->csi2_reserve_cnt) { + /* current configure res type should match requested res type */ + if (csid_hw->in_res_id != cid_reserv->in_port->res_id) { + rc = -EINVAL; + goto end; + } + + if (csid_hw->csi2_rx_cfg.lane_cfg != + cid_reserv->in_port->lane_cfg || + csid_hw->csi2_rx_cfg.lane_type != + cid_reserv->in_port->lane_type || + csid_hw->csi2_rx_cfg.lane_num != + cid_reserv->in_port->lane_num) { + rc = -EINVAL; + goto end; + } + } + + rc = cam_tfe_csid_cid_get(csid_hw, + cid_reserv->in_port->vc, + cid_reserv->in_port->dt, + cid_reserv->in_port->num_valid_vc_dt, + cid_value); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d CID Reserve failed res_id %d", + csid_hw->hw_intf->hw_idx, + cid_reserv->in_port->res_id); + goto end; + } + + if (!csid_hw->csi2_reserve_cnt) { + csid_hw->in_res_id = cid_reserv->in_port->res_id; + + csid_hw->csi2_rx_cfg.lane_cfg = + cid_reserv->in_port->lane_cfg; + csid_hw->csi2_rx_cfg.lane_type = + cid_reserv->in_port->lane_type; + csid_hw->csi2_rx_cfg.lane_num = + cid_reserv->in_port->lane_num; + + switch (cid_reserv->in_port->res_id) { + case CAM_ISP_TFE_IN_RES_TPG: + csid_hw->csi2_rx_cfg.phy_sel = 0; + break; + case CAM_ISP_TFE_IN_RES_CPHY_TPG_0: + case CAM_ISP_TFE_IN_RES_CPHY_TPG_1: + case CAM_ISP_TFE_IN_RES_CPHY_TPG_2: + csid_reg = csid_hw->csid_info->csid_reg; + csid_hw->csi2_rx_cfg.phy_sel = + ((cid_reserv->in_port->res_id & 0xFF) - + CAM_ISP_TFE_IN_RES_CPHY_TPG_0) + + csid_reg->csi2_reg->phy_tpg_base_id; + break; + default: + csid_hw->csi2_rx_cfg.phy_sel = + (cid_reserv->in_port->res_id & 0xFF) - 1; + break; + } + } + + csid_hw->csi2_reserve_cnt++; + CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired reserv cnt:%d", + csid_hw->hw_intf->hw_idx, *cid_value, + csid_hw->csi2_reserve_cnt); + +end: + return rc; +} + +static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw, + struct cam_tfe_csid_hw_reserve_resource_args *reserve) +{ + int i, rc = 0; + struct cam_tfe_csid_path_cfg *path_data; + struct cam_isp_resource_node *res; + uint32_t cid_value; + + /* CSID CSI2 v2.0 supports 4 vc */ + for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) { + if (reserve->in_port->dt[i] > 0x3f || + reserve->in_port->vc[i] > 0x3 || + (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d", + csid_hw->hw_intf->hw_idx, + reserve->in_port->vc[i], + reserve->in_port->dt[i], + reserve->sync_mode); + rc = -EINVAL; + goto end; + } + } + + switch (reserve->res_id) { + case CAM_TFE_CSID_PATH_RES_IPP: + if (csid_hw->ipp_res.res_state != + CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_DBG(CAM_ISP, + "CSID:%d IPP resource not available %d", + csid_hw->hw_intf->hw_idx, + csid_hw->ipp_res.res_state); + rc = -EINVAL; + goto end; + } + + if (cam_tfe_csid_is_ipp_format_supported( + reserve->in_port->format)) { + CAM_ERR(CAM_ISP, + "CSID:%d res id:%d un support format %d", + csid_hw->hw_intf->hw_idx, reserve->res_id, + reserve->in_port->format); + rc = -EINVAL; + goto end; + } + rc = cam_tfe_csid_cid_reserve(csid_hw, reserve, &cid_value); + if (rc) + goto end; + + /* assign the IPP resource */ + res = &csid_hw->ipp_res; + CAM_DBG(CAM_ISP, + "CSID:%d IPP resource:%d acquired successfully", + csid_hw->hw_intf->hw_idx, res->res_id); + + break; + + case CAM_TFE_CSID_PATH_RES_RDI_0: + case CAM_TFE_CSID_PATH_RES_RDI_1: + case CAM_TFE_CSID_PATH_RES_RDI_2: + if (csid_hw->rdi_res[reserve->res_id].res_state != + CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, + "CSID:%d RDI:%d resource not available %d", + csid_hw->hw_intf->hw_idx, + reserve->res_id, + csid_hw->rdi_res[reserve->res_id].res_state); + rc = -EINVAL; + goto end; + } + + rc = cam_tfe_csid_cid_reserve(csid_hw, reserve, &cid_value); + if (rc) + goto end; + + res = &csid_hw->rdi_res[reserve->res_id]; + CAM_DBG(CAM_ISP, + "CSID:%d RDI resource:%d acquire success", + csid_hw->hw_intf->hw_idx, + res->res_id); + + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d", + csid_hw->hw_intf->hw_idx, reserve->res_id); + rc = -EINVAL; + goto end; + } + + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv; + + CAM_DBG(CAM_ISP, "sensor width:%d height:%d fps:%d vbi:%d hbi:%d", + reserve->in_port->sensor_width, + reserve->in_port->sensor_height, + reserve->in_port->sensor_fps, + reserve->in_port->sensor_vbi, + reserve->in_port->sensor_hbi); + path_data->sensor_width = reserve->in_port->sensor_width; + path_data->sensor_height = reserve->in_port->sensor_height; + path_data->sensor_fps = reserve->in_port->sensor_fps; + path_data->sensor_hbi = reserve->in_port->sensor_vbi; + path_data->sensor_vbi = reserve->in_port->sensor_hbi; + + path_data->cid = cid_value; + path_data->in_format = reserve->in_port->format; + path_data->out_format = reserve->out_port->format; + path_data->sync_mode = reserve->sync_mode; + path_data->height = reserve->in_port->height; + path_data->start_line = reserve->in_port->line_start; + path_data->end_line = reserve->in_port->line_end; + path_data->usage_type = reserve->in_port->usage_type; + + path_data->bayer_bin = reserve->in_port->bayer_bin; + path_data->qcfa_bin = reserve->in_port->qcfa_bin; + + csid_hw->event_cb = reserve->event_cb; + csid_hw->event_cb_priv = reserve->event_cb_prv; + + if (path_data->qcfa_bin) { + if (!cam_cpas_is_feature_supported(CAM_CPAS_QCFA_BINNING_ENABLE, + CAM_CPAS_HW_IDX_ANY, NULL)) { + CAM_ERR(CAM_ISP, "QCFA bin not supported!"); + rc = -EINVAL; + goto end; + } + } + + /* Enable crop only for ipp */ + if (reserve->res_id == CAM_TFE_CSID_PATH_RES_IPP) + path_data->crop_enable = true; + + CAM_DBG(CAM_ISP, + "Res id: %d height:%d line_start %d line_end %d crop_en %d", + reserve->res_id, reserve->in_port->height, + reserve->in_port->line_start, reserve->in_port->line_end, + path_data->crop_enable); + + path_data->num_valid_vc_dt = 0; + + for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) { + path_data->vc_dt[i].vc = reserve->in_port->vc[i]; + path_data->vc_dt[i].dt = reserve->in_port->dt[i]; + path_data->num_valid_vc_dt++; + } + + if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) { + path_data->start_pixel = reserve->in_port->left_start; + path_data->end_pixel = reserve->in_port->left_end; + path_data->width = reserve->in_port->left_width; + CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x", + csid_hw->hw_intf->hw_idx, path_data->start_pixel, + path_data->end_pixel); + CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, path_data->start_line, + path_data->end_line); + } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) { + path_data->master_idx = reserve->master_idx; + CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d", + csid_hw->hw_intf->hw_idx, path_data->master_idx); + path_data->start_pixel = reserve->in_port->right_start; + path_data->end_pixel = reserve->in_port->right_end; + path_data->width = reserve->in_port->right_width; + CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x", + csid_hw->hw_intf->hw_idx, path_data->start_pixel, + path_data->end_pixel, path_data->width); + CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x", + csid_hw->hw_intf->hw_idx, path_data->start_line, + path_data->end_line); + } else { + path_data->width = reserve->in_port->left_width; + path_data->start_pixel = reserve->in_port->left_start; + path_data->end_pixel = reserve->in_port->left_end; + CAM_DBG(CAM_ISP, "Res id: %d left width %d start: %d stop:%d", + reserve->res_id, reserve->in_port->left_width, + reserve->in_port->left_start, + reserve->in_port->left_end); + } + + CAM_DBG(CAM_ISP, "Res %d width %d height %d", reserve->res_id, + path_data->width, path_data->height); + reserve->node_res = res; + +end: + return rc; +} + +static int cam_tfe_csid_enable_csi2( + struct cam_tfe_csid_hw *csid_hw) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_csid_ppi_cfg ppi_lane_cfg; + uint32_t val = 0; + uint32_t ppi_index = 0, rc; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + CAM_DBG(CAM_ISP, "CSID:%d config csi2 rx", + csid_hw->hw_intf->hw_idx); + + /* rx cfg0 */ + val = 0; + val = (csid_hw->csi2_rx_cfg.lane_num - 1) | + (csid_hw->csi2_rx_cfg.lane_cfg << 4) | + (csid_hw->csi2_rx_cfg.lane_type << 24); + val |= (csid_hw->csi2_rx_cfg.phy_sel & + csid_reg->csi2_reg->csi2_rx_phy_num_mask) << 20; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr); + + if (csid_hw->in_res_id >= CAM_ISP_TFE_IN_RES_CPHY_TPG_0 && + csid_hw->in_res_id <= CAM_ISP_TFE_IN_RES_CPHY_TPG_2 && + csid_reg->csi2_reg->need_to_sel_tpg_mux) { + cam_cpas_enable_tpg_mux_sel(csid_hw->in_res_id - + CAM_ISP_TFE_IN_RES_CPHY_TPG_0); + } + + /* rx cfg1 */ + val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val); + + /* enable packet ecc correction */ + val |= 1; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); + + /* Enable the CSI2 rx inerrupts */ + val = TFE_CSID_CSI2_RX_INFO_RST_DONE | + TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW | + TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW | + TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW | + TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW | + TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION | + TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION | + TFE_CSID_CSI2_RX_ERROR_CRC | + TFE_CSID_CSI2_RX_ERROR_ECC | + TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT | + TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW | + TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME | + TFE_CSID_CSI2_RX_ERROR_CPHY_PH_CRC; + + /* Enable the interrupt based on csid debug info set */ + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOT_IRQ) + val |= TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED; + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOT_IRQ) + val |= TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED | + TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED; + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) + val |= TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED; + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) + val |= TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED; + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) + val |= TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + /* + * There is one to one mapping for ppi index with phy index + * we do not always update phy sel equal to phy number,for some + * targets "phy_sel = phy_num + 1", and for some targets it is + * "phy_sel = phy_num", ppi_index should be updated accordingly + */ + ppi_index = csid_hw->csi2_rx_cfg.phy_sel - csid_reg->csi2_reg->phy_sel_base; + + if (csid_hw->ppi_hw_intf[ppi_index] && csid_hw->ppi_enable) { + ppi_lane_cfg.lane_type = csid_hw->csi2_rx_cfg.lane_type; + ppi_lane_cfg.lane_num = csid_hw->csi2_rx_cfg.lane_num; + ppi_lane_cfg.lane_cfg = csid_hw->csi2_rx_cfg.lane_cfg; + + CAM_DBG(CAM_ISP, "ppi_index to init %d", ppi_index); + rc = csid_hw->ppi_hw_intf[ppi_index]->hw_ops.init( + csid_hw->ppi_hw_intf[ppi_index]->hw_priv, + &ppi_lane_cfg, + sizeof(struct cam_csid_ppi_cfg)); + if (rc < 0) { + CAM_ERR(CAM_ISP, "PPI:%d Init Failed", + ppi_index); + return rc; + } + } + + + return 0; +} + +static int cam_tfe_csid_disable_csi2( + struct cam_tfe_csid_hw *csid_hw) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + uint32_t ppi_index = 0, rc; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + CAM_DBG(CAM_ISP, "CSID:%d Disable csi2 rx", + csid_hw->hw_intf->hw_idx); + + /* Disable the CSI2 rx inerrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + + /* Reset the Rx CFG registers */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); + + ppi_index = csid_hw->csi2_rx_cfg.phy_sel - csid_reg->csi2_reg->phy_sel_base; + if (csid_hw->ppi_hw_intf[ppi_index] && csid_hw->ppi_enable) { + /* De-Initialize the PPI bridge */ + CAM_DBG(CAM_ISP, "ppi_index to de-init %d\n", ppi_index); + rc = csid_hw->ppi_hw_intf[ppi_index]->hw_ops.deinit( + csid_hw->ppi_hw_intf[ppi_index]->hw_priv, + NULL, 0); + if (rc < 0) { + CAM_ERR(CAM_ISP, "PPI:%d De-Init Failed", ppi_index); + return rc; + } + } + + return 0; +} + +static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw) +{ + int rc = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_csid_path_cfg *path_data = NULL; + uint32_t i, val, clk_lvl; + unsigned long flags; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + /* overflow check before increment */ + if (csid_hw->hw_info->open_count == UINT_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Open count reached max", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + /* Increment ref Count */ + csid_hw->hw_info->open_count++; + if (csid_hw->hw_info->open_count > 1) { + CAM_DBG(CAM_ISP, "CSID hw has already been enabled"); + return rc; + } + + CAM_DBG(CAM_ISP, "CSID:%d init CSID HW", + csid_hw->hw_intf->hw_idx); + + rc = cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate, + soc_info->src_clk_idx, &clk_lvl); + CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl); + + rc = cam_tfe_csid_enable_soc_resources(soc_info, clk_lvl); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed", + csid_hw->hw_intf->hw_idx); + goto err; + } + + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP; + /* Disable the top IRQ interrupt */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_mask_addr); + /* Reset CSID top */ + rc = cam_tfe_csid_global_reset(csid_hw); + if (rc) + goto disable_soc; + + /* clear all interrupts */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_clear_addr); + + cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr); + + if (csid_hw->pxl_pipe_enable) + cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_clear_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_irq_cmd_addr); + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_hw_version_addr); + CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + /* enable the csi2 rx */ + rc = cam_tfe_csid_enable_csi2(csid_hw); + if (rc) + goto disable_soc; + + spin_lock_irqsave(&csid_hw->spin_lock, flags); + csid_hw->fatal_err_detected = false; + csid_hw->device_enabled = 1; + spin_unlock_irqrestore(&csid_hw->spin_lock, flags); + + if (csid_hw->pxl_pipe_enable ) { + path_data = (struct cam_tfe_csid_path_cfg *) + csid_hw->ipp_res.res_priv; + path_data->res_sof_cnt = 0; + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + path_data = (struct cam_tfe_csid_path_cfg *) + csid_hw->rdi_res[i].res_priv; + path_data->res_sof_cnt = 0; + } + + return rc; + +disable_soc: + cam_tfe_csid_disable_soc_resources(soc_info); + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; +err: + csid_hw->hw_info->open_count--; + return rc; +} + +static int cam_tfe_csid_disable_hw(struct cam_tfe_csid_hw *csid_hw) +{ + int rc = -EINVAL; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_reg_offset *csid_reg; + unsigned long flags; + + /* Check for refcount */ + if (!csid_hw->hw_info->open_count) { + CAM_WARN(CAM_ISP, "Unbalanced disable_hw"); + return rc; + } + + /* Decrement ref Count */ + csid_hw->hw_info->open_count--; + + if (csid_hw->hw_info->open_count) { + rc = 0; + return rc; + } + + soc_info = &csid_hw->hw_info->soc_info; + csid_reg = csid_hw->csid_info->csid_reg; + + /* Disable the csi2 */ + cam_tfe_csid_disable_csi2(csid_hw); + + CAM_DBG(CAM_ISP, "%s:Calling Global Reset", __func__); + cam_tfe_csid_global_reset(csid_hw); + CAM_DBG(CAM_ISP, "%s:Global Reset Done", __func__); + + CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW", + csid_hw->hw_intf->hw_idx); + + /* Disable the top IRQ interrupt */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_mask_addr); + + rc = cam_tfe_csid_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed", + csid_hw->hw_intf->hw_idx); + + spin_lock_irqsave(&csid_hw->spin_lock, flags); + csid_hw->device_enabled = 0; + spin_unlock_irqrestore(&csid_hw->spin_lock, flags); + csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + csid_hw->error_irq_count = 0; + csid_hw->prev_boot_timestamp = 0; + + return rc; +} + +static int cam_tfe_csid_init_config_pxl_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + struct cam_tfe_csid_path_cfg *path_data; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL; + uint32_t decode_format = 0, plain_format = 0, val = 0; + + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + pxl_reg = csid_reg->ipp_reg; + if (!pxl_reg) { + CAM_ERR(CAM_ISP, "CSID:%d IPP :%d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Config IPP Path"); + rc = cam_tfe_csid_get_format_ipp(path_data->in_format, + &decode_format, &plain_format); + if (rc) + return rc; + + /* + * configure Pxl path and enable the time stamp capture. + * enable the HW measrurement blocks + */ + val = (path_data->vc_dt[0].vc << csid_reg->cmn_reg->vc_shift_val) | + (path_data->vc_dt[0].dt << csid_reg->cmn_reg->dt_shift_val) | + (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) | + (decode_format << csid_reg->cmn_reg->fmt_shift_val) | + (path_data->crop_enable << + csid_reg->cmn_reg->crop_h_en_shift_val) | + (path_data->crop_enable << + csid_reg->cmn_reg->crop_v_en_shift_val) | + (1 << 1); + + if (pxl_reg->binning_supported && (path_data->qcfa_bin || + path_data->bayer_bin)) { + + CAM_DBG(CAM_ISP, + "Set Binning mode, binning_supported: %d, qcfa_bin: %d, bayer_bin: %d", + pxl_reg->binning_supported, path_data->qcfa_bin, + path_data->bayer_bin); + + if (path_data->bayer_bin && !(pxl_reg->binning_supported & + CAM_TFE_CSID_BIN_BAYER)) { + CAM_ERR(CAM_ISP, + "Bayer bin is not supported! binning_supported: %d", + pxl_reg->binning_supported); + return -EINVAL; + } + + if (path_data->qcfa_bin && !(pxl_reg->binning_supported & + CAM_TFE_CSID_BIN_QCFA)) { + CAM_ERR(CAM_ISP, + "QCFA bin is not supported! binning_supported: %d", + pxl_reg->binning_supported); + return -EINVAL; + } + + if (path_data->qcfa_bin && path_data->bayer_bin) { + CAM_ERR(CAM_ISP, + "Bayer bin and QCFA bin could not be enabled together!"); + return -EINVAL; + } + + if (path_data->bayer_bin) + val |= 1 << pxl_reg->bin_en_shift_val; + + if (path_data->qcfa_bin) { + val |= 1 << pxl_reg->bin_qcfa_en_shift_val; + val |= 1 << pxl_reg->bin_en_shift_val; + } + } + + if (csid_reg->cmn_reg->format_measure_support && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO)) + val |= (1 << pxl_reg->format_measure_en_shift_val); + + val |= (1 << pxl_reg->pix_store_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + + if (pxl_reg->is_multi_vc_dt_supported && + (path_data->num_valid_vc_dt > 1)) { + val = ((path_data->vc_dt[1].vc << + csid_reg->cmn_reg->vc1_shift_val) | + (path_data->vc_dt[1].dt << + csid_reg->cmn_reg->dt1_shift_val) | + 1 << csid_reg->cmn_reg->multi_vc_dt_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_multi_vcdt_cfg0_addr); + } + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg1_addr); + + /* select the post irq sub sample strobe for time stamp capture */ + val |= TFE_CSID_TIMESTAMP_STB_POST_IRQ; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg1_addr); + + if (path_data->crop_enable) { + val = (((path_data->end_pixel & 0xFFFF) << + csid_reg->cmn_reg->crop_shift) | + (path_data->start_pixel & 0xFFFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_hcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + val = (((path_data->end_line & 0xFFFF) << + csid_reg->cmn_reg->crop_shift) | + (path_data->start_line & 0xFFFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_vcrop_addr); + CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + /* Enable generating early eof strobe based on crop config */ + if (!(csid_hw->csid_debug & TFE_CSID_DEBUG_DISABLE_EARLY_EOF)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + val |= (1 << pxl_reg->early_eof_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + } + } + + if (csid_reg->cmn_reg->format_measure_support && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); + val |= pxl_reg->measure_en_hbi_vbi_cnt_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); + } + + /* Enable the Pxl path */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + val |= (1 << csid_reg->cmn_reg->path_en_shift_val); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + + /* Enable Error Detection Overflow ctrl mode: 2 -> Detect overflow */ + val = 0x9; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_err_recovery_cfg0_addr); + + /* configure the rx packet capture based on csid debug set */ + val = 0; + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) + val = ((1 << + csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift)); + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) + val |= ((1 << + csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) | + (path_data->vc_dt[0].dt << + csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift)); + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) + val |= ((1 << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) | + (path_data->vc_dt[0].dt << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift)); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr); + CAM_DBG(CAM_ISP, "rx capture control value 0x%x", val); + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + + return rc; +} + +static int cam_tfe_csid_deinit_pxl_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + pxl_reg = csid_reg->ipp_reg; + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) { + CAM_ERR(CAM_ISP, + "CSID:%d IPP Res type %d res_id:%d in wrong state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + rc = -EINVAL; + } + + if (!pxl_reg) { + CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + rc = -EINVAL; + goto end; + } + + /* Disable Error Recovery */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_err_recovery_cfg0_addr); + +end: + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_tfe_csid_enable_pxl_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_csid_path_cfg *path_data; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg = NULL; + uint32_t val = 0; + + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + pxl_reg = csid_reg->ipp_reg; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) { + CAM_ERR(CAM_ISP, + "CSID:%d IPP path res type:%d res_id:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + if (!pxl_reg) { + CAM_ERR(CAM_ISP, "CSID:%d IPP resid: %d not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Enable IPP path"); + + /* Set master or slave path */ + if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) + /* Set halt mode as master */ + val = (TFE_CSID_HALT_MODE_MASTER << + pxl_reg->halt_mode_shift) | + (pxl_reg->halt_master_sel_master_val << + pxl_reg->halt_master_sel_shift); + else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + /* Set halt mode as slave and set master idx */ + val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift); + else + /* Default is internal halt mode */ + val = 0; + + /* + * Resume at frame boundary if Master or No Sync. + * Slave will get resume command from Master. + */ + if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) + val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + + CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x", + csid_hw->hw_intf->hw_idx, val); + + /* Enable the required pxl path interrupts */ + val = TFE_CSID_PATH_INFO_RST_DONE | + TFE_CSID_PATH_ERROR_FIFO_OVERFLOW | + TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION | + TFE_CSID_PATH_IPP_OVERFLOW_IRQ; + + if (csid_reg->cmn_reg->format_measure_support) { + val |= TFE_CSID_PATH_ERROR_PIX_COUNT | + TFE_CSID_PATH_ERROR_LINE_COUNT; + } + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ) + val |= TFE_CSID_PATH_INFO_INPUT_SOF; + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ) + val |= TFE_CSID_PATH_INFO_INPUT_EOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_irq_mask_addr); + + CAM_DBG(CAM_ISP, "Enable IPP IRQ mask 0x%x", val); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return 0; +} + +static int cam_tfe_csid_change_pxl_halt_mode( + struct cam_tfe_csid_hw *csid_hw, + struct cam_tfe_csid_hw_halt_args *csid_halt) +{ + uint32_t val = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg; + struct cam_isp_resource_node *res; + + res = csid_halt->node_res; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_id != CAM_TFE_CSID_PATH_RES_IPP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res id %d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "CSID:%d Res:%d in invalid state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_state); + return -EINVAL; + } + + pxl_reg = csid_reg->ipp_reg; + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_irq_mask_addr); + + /* configure Halt for slave */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + val &= ~0xC; + val |= (csid_halt->halt_mode << 2); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + CAM_DBG(CAM_ISP, "CSID:%d IPP path Res halt mode:%d configured:%x", + csid_hw->hw_intf->hw_idx, csid_halt->halt_mode, val); + + return 0; +} + +static int cam_tfe_csid_disable_pxl_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res, + enum cam_tfe_csid_halt_cmd stop_cmd) +{ + int rc = 0; + uint32_t val = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_csid_path_cfg *path_data; + const struct cam_tfe_csid_pxl_reg_offset *pxl_reg; + + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW || + res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_state); + return rc; + } + + pxl_reg = csid_reg->ipp_reg; + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_DBG(CAM_ISP, "CSID:%d IPP path Res:%d Invalid state%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_state); + return -EINVAL; + } + + if (!pxl_reg) { + CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (stop_cmd != CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY && + stop_cmd != CAM_TFE_CSID_HALT_IMMEDIATELY) { + CAM_ERR(CAM_ISP, + "CSID:%d IPP path un supported stop command:%d", + csid_hw->hw_intf->hw_idx, stop_cmd); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%d res_id:%d IPP path", + csid_hw->hw_intf->hw_idx, res->res_id); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_irq_mask_addr); + + if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER || + path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) { + /* configure Halt */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + val &= ~0x3; + val |= stop_cmd; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + } + + if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE && + stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) { + /* configure Halt for slave */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + val &= ~0xF; + val |= stop_cmd; + val |= (TFE_CSID_HALT_MODE_MASTER << 2); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_ctrl_addr); + } + + return rc; +} + +static int cam_tfe_csid_init_config_rdi_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + struct cam_tfe_csid_path_cfg *path_data; + const struct cam_tfe_csid_reg_offset *csid_reg; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg; + struct cam_hw_soc_info *soc_info; + uint32_t path_format = 0, plain_fmt = 0, val = 0, id; + + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + id = res->res_id; + if (!csid_reg->rdi_reg[id]) { + CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW", + csid_hw->hw_intf->hw_idx, id); + return -EINVAL; + } + + rdi_reg = csid_reg->rdi_reg[id]; + rc = cam_tfe_csid_get_format_rdi(path_data->in_format, + path_data->out_format, &path_format, &plain_fmt); + if (rc) + return rc; + + /* + * RDI path config and enable the time stamp capture + * Enable the measurement blocks + */ + val = (path_data->vc_dt[0].vc << csid_reg->cmn_reg->vc_shift_val) | + (path_data->vc_dt[0].dt << csid_reg->cmn_reg->dt_shift_val) | + (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) | + (path_format << csid_reg->cmn_reg->fmt_shift_val) | + (plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) | + (1 << 2) | 1; + + if (csid_reg->cmn_reg->format_measure_support && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO)) + val |= (1 << rdi_reg->format_measure_en_shift_val); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + + if (csid_reg->rdi_reg[id]->is_multi_vc_dt_supported && + (path_data->num_valid_vc_dt > 1)) { + val = ((path_data->vc_dt[1].vc << + csid_reg->cmn_reg->vc1_shift_val) | + (path_data->vc_dt[1].dt << + csid_reg->cmn_reg->dt1_shift_val) | + (1 << csid_reg->cmn_reg->multi_vc_dt_en_shift_val)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_multi_vcdt_cfg0_addr); + } + + /* select the post irq sub sample strobe for time stamp capture */ + cam_io_w_mb(TFE_CSID_TIMESTAMP_STB_POST_IRQ, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr); + + /* Enable Error Detection, Overflow ctrl mode: 2 -> Detect overflow */ + val = 0x9; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_err_recovery_cfg0_addr); + + /* Configure the halt mode */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + + if (csid_reg->cmn_reg->format_measure_support && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure_cfg0_addr); + val |= rdi_reg->measure_en_hbi_vbi_cnt_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure_cfg0_addr); + } + + /* Enable the RPP path */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + val |= (1 << csid_reg->cmn_reg->path_en_shift_val); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + + /* configure the rx packet capture based on csid debug set */ + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) + val = ((1 << + csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift)); + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) + val |= ((1 << + csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) | + (path_data->vc_dt[0].dt << + csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift)); + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) + val |= ((1 << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) | + (path_data->vc_dt[0].dt << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) | + (path_data->vc_dt[0].vc << + csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + + return rc; +} + +static int cam_tfe_csid_deinit_rdi_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + int rc = 0; + uint32_t id; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + id = res->res_id; + + if (res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 || + res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW || + !csid_reg->rdi_reg[id]) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, + res->res_state); + return -EINVAL; + } + + /* Disable Error Recovery */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_err_recovery_cfg0_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_tfe_csid_enable_rdi_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_csid_path_cfg *path_data; + uint32_t id, val; + bool path_active = false; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + id = res->res_id; + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + + if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW || + res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2 || + !csid_reg->rdi_reg[id]) { + CAM_ERR(CAM_ISP, + "CSID:%d invalid res type:%d res_id:%d state%d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + return -EINVAL; + } + + /*Drop one frame extra on RDI for dual TFE use case */ + if (path_data->usage_type == CAM_ISP_TFE_IN_RES_USAGE_DUAL) + path_data->init_frame_drop = 1; + + /*resume at frame boundary */ + if (!path_data->init_frame_drop) { + CAM_DBG(CAM_ISP, "Start RDI:%d path", id); + /* resume at frame boundary */ + cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + } else { + path_active = cam_tfe_csid_check_path_active(csid_hw); + if (path_active) + cam_io_w_mb(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + + CAM_DBG(CAM_ISP, + "CSID:%d %s RDI:%d path frame drop %d", + csid_hw->hw_intf->hw_idx, + path_active ? "Starting" : "Not Starting", id, + path_data->init_frame_drop); + } + + /* Enable the required RDI interrupts */ + val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW | + TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION | + TFE_CSID_PATH_RDI_OVERFLOW_IRQ; + + if (csid_reg->cmn_reg->format_measure_support) { + val |= TFE_CSID_PATH_ERROR_PIX_COUNT | + TFE_CSID_PATH_ERROR_LINE_COUNT; + } + + if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ) || + (path_data->init_frame_drop && !path_active)) + val |= TFE_CSID_PATH_INFO_INPUT_SOF; + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ) + val |= TFE_CSID_PATH_INFO_INPUT_EOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr); + + res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return 0; +} + +static int cam_tfe_csid_disable_rdi_path( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res, + enum cam_tfe_csid_halt_cmd stop_cmd) +{ + int rc = 0; + uint32_t id, val = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_csid_path_cfg *path_data; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + id = res->res_id; + path_data = (struct cam_tfe_csid_path_cfg *) res->res_priv; + + if ((res->res_id > CAM_TFE_CSID_PATH_RES_RDI_2) || + (!csid_reg->rdi_reg[res->res_id])) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d", + csid_hw->hw_intf->hw_idx, res->res_id); + return -EINVAL; + } + + if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW || + res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID:%d Res:%d already in stopped state:%d", + csid_hw->hw_intf->hw_idx, + res->res_id, res->res_state); + return rc; + } + + if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID:%d Res:%d Invalid res_state%d", + csid_hw->hw_intf->hw_idx, res->res_id, + res->res_state); + return -EINVAL; + } + + if (stop_cmd != CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY && + stop_cmd != CAM_TFE_CSID_HALT_IMMEDIATELY) { + CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d", + csid_hw->hw_intf->hw_idx, stop_cmd); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CSID:%d res_id:%d", + csid_hw->hw_intf->hw_idx, res->res_id); + + path_data->init_frame_drop = 0; + path_data->res_sof_cnt = 0; + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr); + + /* Halt the RDI path */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + val &= ~0x3; + val |= stop_cmd; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + + return rc; +} + +static int cam_tfe_csid_poll_stop_status( + struct cam_tfe_csid_hw *csid_hw, + uint32_t res_mask) +{ + int rc = 0; + uint32_t csid_status_addr = 0, val = 0, res_id = 0; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + for (; res_id < CAM_TFE_CSID_PATH_RES_MAX; res_id++, res_mask >>= 1) { + if ((res_mask & 0x1) == 0) + continue; + val = 0; + + if (res_id == CAM_TFE_CSID_PATH_RES_IPP) { + csid_status_addr = + csid_reg->ipp_reg->csid_pxl_status_addr; + + if (csid_hw->ipp_res.res_state != + CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + } else { + csid_status_addr = + csid_reg->rdi_reg[res_id]->csid_rdi_status_addr; + + if (csid_hw->rdi_res[res_id].res_state != + CAM_ISP_RESOURCE_STATE_STREAMING) + continue; + + } + + CAM_DBG(CAM_ISP, "start polling CSID:%d res_id:%d", + csid_hw->hw_intf->hw_idx, res_id); + + rc = cam_common_read_poll_timeout( + soc_info->reg_map[0].mem_base + + csid_status_addr, + CAM_TFE_CSID_TIMEOUT_SLEEP_US, + CAM_TFE_CSID_TIMEOUT_ALL_US, + 0x1, 0x1, &val); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d", + csid_hw->hw_intf->hw_idx, res_id, rc); + rc = -ETIMEDOUT; + break; + } + CAM_DBG(CAM_ISP, "End polling CSID:%d res_id:%d", + csid_hw->hw_intf->hw_idx, res_id); + } + + return rc; +} + +static int __cam_tfe_csid_read_timestamp(void __iomem *base, + uint32_t msb_offset, uint32_t lsb_offset, uint64_t *timestamp) +{ + uint32_t lsb, msb, tmp, torn = 0; + + msb = cam_io_r_mb(base + msb_offset); + do { + tmp = msb; + torn++; + lsb = cam_io_r_mb(base + lsb_offset); + msb = cam_io_r_mb(base + msb_offset); + } while (tmp != msb); + + *timestamp = msb; + *timestamp = (*timestamp << 32) | lsb; + + return (torn > 1); +} + +static int cam_tfe_csid_get_time_stamp( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + struct cam_tfe_csid_get_time_stamp_args *time_stamp; + struct cam_isp_resource_node *res; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg; + struct timespec64 ts; + uint32_t id, torn, prev_torn; + uint64_t time_delta; + + time_stamp = (struct cam_tfe_csid_get_time_stamp_args *)cmd_args; + res = time_stamp->node_res; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) { + torn = __cam_tfe_csid_read_timestamp( + soc_info->reg_map[0].mem_base, + csid_reg->ipp_reg->csid_pxl_timestamp_curr1_sof_addr, + csid_reg->ipp_reg->csid_pxl_timestamp_curr0_sof_addr, + &time_stamp->time_stamp_val); + if (time_stamp->get_prev_timestamp) { + prev_torn = __cam_tfe_csid_read_timestamp( + soc_info->reg_map[0].mem_base, + csid_reg->ipp_reg->csid_pxl_timestamp_perv1_sof_addr, + csid_reg->ipp_reg->csid_pxl_timestamp_perv0_sof_addr, + &time_stamp->prev_time_stamp_val); + } + } else { + id = res->res_id; + rdi_reg = csid_reg->rdi_reg[id]; + torn = __cam_tfe_csid_read_timestamp( + soc_info->reg_map[0].mem_base, + rdi_reg->csid_rdi_timestamp_curr1_sof_addr, + rdi_reg->csid_rdi_timestamp_curr0_sof_addr, + &time_stamp->time_stamp_val); + if (time_stamp->get_prev_timestamp) { + prev_torn = __cam_tfe_csid_read_timestamp( + soc_info->reg_map[0].mem_base, + rdi_reg->csid_rdi_timestamp_prev1_sof_addr, + rdi_reg->csid_rdi_timestamp_prev0_sof_addr, + &time_stamp->prev_time_stamp_val); + } + } + + time_stamp->time_stamp_val = mul_u64_u32_div( + time_stamp->time_stamp_val, + CAM_TFE_CSID_QTIMER_MUL_FACTOR, + CAM_TFE_CSID_QTIMER_DIV_FACTOR); + + if (time_stamp->get_prev_timestamp) { + time_stamp->prev_time_stamp_val = mul_u64_u32_div( + time_stamp->prev_time_stamp_val, + CAM_TFE_CSID_QTIMER_MUL_FACTOR, + CAM_TFE_CSID_QTIMER_DIV_FACTOR); + } + + if (!csid_hw->prev_boot_timestamp) { + ktime_get_boottime_ts64(&ts); + time_stamp->boot_timestamp = + (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + csid_hw->prev_qtimer_ts = 0; + CAM_DBG(CAM_ISP, "timestamp:%lld", + time_stamp->boot_timestamp); + } else { + time_delta = time_stamp->time_stamp_val - + csid_hw->prev_qtimer_ts; + + if (csid_hw->prev_boot_timestamp > + U64_MAX - time_delta) { + CAM_WARN(CAM_ISP, "boottimestamp overflowed"); + CAM_INFO(CAM_ISP, + "currQTimer %lx prevQTimer %lx prevBootTimer %lx torn %d", + time_stamp->time_stamp_val, + csid_hw->prev_qtimer_ts, + csid_hw->prev_boot_timestamp, torn); + return -EINVAL; + } + + time_stamp->boot_timestamp = + csid_hw->prev_boot_timestamp + time_delta; + } + + CAM_DBG(CAM_ISP, + "currQTimer %lx prevQTimer %lx currBootTimer %lx prevBootTimer %lx torn %d", + time_stamp->time_stamp_val, + csid_hw->prev_qtimer_ts, time_stamp->boot_timestamp, + csid_hw->prev_boot_timestamp, torn); + + csid_hw->prev_qtimer_ts = time_stamp->time_stamp_val; + csid_hw->prev_boot_timestamp = time_stamp->boot_timestamp; + + return 0; +} + +static int cam_tfe_csid_print_hbi_vbi( + struct cam_tfe_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg; + uint32_t hbi = 0, vbi = 0; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) { + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_format_measure2_addr); + } else if ((res->res_id >= CAM_TFE_CSID_PATH_RES_RDI_0) && + (res->res_id <= CAM_TFE_CSID_PATH_RES_RDI_2)) { + rdi_reg = csid_reg->rdi_reg[res->res_id]; + + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure2_addr); + } else { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + return -EINVAL; + } + + CAM_INFO(CAM_ISP, + "CSID[%u] Resource[id:%d name:%s hbi 0x%x vbi 0x%x]", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_name, hbi, vbi); + + return 0; +} + +static int cam_tfe_csid_set_csid_debug(struct cam_tfe_csid_hw *csid_hw, + void *cmd_args) +{ + uint32_t *csid_debug; + + csid_debug = (uint32_t *) cmd_args; + csid_hw->csid_debug = *csid_debug; + CAM_DBG(CAM_ISP, "CSID:%d set csid debug value:%d", + csid_hw->hw_intf->hw_idx, csid_hw->csid_debug); + + return 0; +} + +static int cam_tfe_csid_get_hw_caps(void *hw_priv, + void *get_hw_cap_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw_caps *hw_caps; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + const struct cam_tfe_csid_reg_offset *csid_reg; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + csid_reg = csid_hw->csid_info->csid_reg; + hw_caps = (struct cam_tfe_csid_hw_caps *) get_hw_cap_args; + + hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis; + hw_caps->num_pix = csid_hw->pxl_pipe_enable; + hw_caps->major_version = csid_reg->cmn_reg->major_version; + hw_caps->minor_version = csid_reg->cmn_reg->minor_version; + hw_caps->version_incr = csid_reg->cmn_reg->version_incr; + hw_caps->sync_clk = csid_reg->cmn_reg->sync_clk; + + CAM_DBG(CAM_ISP, + "CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d", + csid_hw->hw_intf->hw_idx, hw_caps->num_rdis, + hw_caps->num_pix, hw_caps->major_version, + hw_caps->minor_version, hw_caps->version_incr); + + return rc; +} + +static int cam_tfe_csid_reset(void *hw_priv, + void *reset_args, uint32_t arg_size) +{ + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_tfe_csid_reset_cfg_args *reset; + int rc = 0; + + if (!hw_priv || !reset_args || (arg_size != + sizeof(struct cam_tfe_csid_reset_cfg_args))) { + CAM_ERR(CAM_ISP, "CSID:Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + reset = (struct cam_tfe_csid_reset_cfg_args *)reset_args; + + switch (reset->reset_type) { + case CAM_TFE_CSID_RESET_GLOBAL: + rc = cam_tfe_csid_global_reset(csid_hw); + break; + case CAM_TFE_CSID_RESET_PATH: + rc = cam_tfe_csid_path_reset(csid_hw, reset); + break; + default: + CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d", + reset->reset_type); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_tfe_csid_reserve(void *hw_priv, + void *reserve_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_tfe_csid_hw_reserve_resource_args *reserv; + + if (!hw_priv || !reserve_args || (arg_size != + sizeof(struct cam_tfe_csid_hw_reserve_resource_args))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + reserv = (struct cam_tfe_csid_hw_reserve_resource_args *)reserve_args; + + if (reserv->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d", + csid_hw->hw_intf->hw_idx, reserv->res_type); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "res_type %d, CSID: %u", + reserv->res_type, csid_hw->hw_intf->hw_idx); + + mutex_lock(&csid_hw->hw_info->hw_mutex); + rc = cam_tfe_csid_path_reserve(csid_hw, reserv); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_tfe_csid_release(void *hw_priv, + void *release_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res; + struct cam_tfe_csid_path_cfg *path_data; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + res = (struct cam_isp_resource_node *)release_args; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX)) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + csid_hw->event_cb = NULL; + csid_hw->event_cb_priv = NULL; + + if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) || + (res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) { + CAM_WARN(CAM_ISP, + "CSID:%d res type:%d Res %d in state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, + res->res_state); + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + + path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv; + if (csid_hw->cid_res[path_data->cid].cnt) + csid_hw->cid_res[path_data->cid].cnt--; + + if (csid_hw->csi2_reserve_cnt) + csid_hw->csi2_reserve_cnt--; + + if (!csid_hw->csi2_reserve_cnt) + memset(&csid_hw->csi2_rx_cfg, 0, + sizeof(struct cam_tfe_csid_csi2_rx_cfg)); + + CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d", + csid_hw->hw_intf->hw_idx, + res->res_id, csid_hw->cid_res[path_data->cid].cnt, + csid_hw->csi2_reserve_cnt); + + res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + cam_tfe_csid_reset_path_data(csid_hw, res); + +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_tfe_csid_reset_retain_sw_reg( + struct cam_tfe_csid_hw *csid_hw) +{ + int rc = 0; + uint32_t status; + const struct cam_tfe_csid_reg_offset *csid_reg = + csid_hw->csid_info->csid_reg; + struct cam_hw_soc_info *soc_info; + + soc_info = &csid_hw->hw_info->soc_info; + + /* Mask top interrupts */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_mask_addr); + /* clear the top interrupt first */ + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_irq_cmd_addr); + + cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb, + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_rst_strobes_addr); + + rc = cam_common_read_poll_timeout(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_status_addr, + CAM_TFE_CSID_TIMEOUT_SLEEP_US, CAM_TFE_CSID_TIMEOUT_ALL_US, + 0x1, 0x1, &status); + + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d", + csid_hw->hw_intf->hw_idx, rc); + status = cam_io_r(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_status_addr); + CAM_DBG(CAM_ISP, "Status reg %d", status); + } else { + CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d", + csid_hw->hw_intf->hw_idx, rc); + rc = 0; + } + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_clear_addr); + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_irq_cmd_addr); + + return rc; +} + +static int cam_tfe_csid_init_hw(void *hw_priv, + void *init_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res; + const struct cam_tfe_csid_reg_offset *csid_reg; + unsigned long flags; + + if (!hw_priv || !init_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + res = (struct cam_isp_resource_node *)init_args; + csid_reg = csid_hw->csid_info->csid_reg; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) && + (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) { + CAM_ERR(CAM_ISP, + "CSID:%d res type:%d res_id:%dInvalid state %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id, res->res_state); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + + /* Initialize the csid hardware */ + rc = cam_tfe_csid_enable_hw(csid_hw); + if (rc) + goto end; + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) + rc = cam_tfe_csid_init_config_pxl_path(csid_hw, res); + else + rc = cam_tfe_csid_init_config_rdi_path(csid_hw, res); + + rc = cam_tfe_csid_reset_retain_sw_reg(csid_hw); + if (rc < 0) + CAM_ERR(CAM_ISP, "CSID: Failed in SW reset"); + + if (rc) + cam_tfe_csid_disable_hw(csid_hw); + + spin_lock_irqsave(&csid_hw->spin_lock, flags); + csid_hw->device_enabled = 1; + spin_unlock_irqrestore(&csid_hw->spin_lock, flags); +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_tfe_csid_deinit_hw(void *hw_priv, + void *deinit_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res; + + if (!hw_priv || !deinit_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID:Invalid arguments"); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "Enter"); + res = (struct cam_isp_resource_node *)deinit_args; + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state", + csid_hw->hw_intf->hw_idx, + res->res_id); + goto end; + } + + CAM_DBG(CAM_ISP, "De-Init IPP Path: %d", res->res_id); + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) + rc = cam_tfe_csid_deinit_pxl_path(csid_hw, res); + else + rc = cam_tfe_csid_deinit_rdi_path(csid_hw, res); + + /* Disable CSID HW */ + CAM_DBG(CAM_ISP, "Disabling CSID Hw"); + cam_tfe_csid_disable_hw(csid_hw); + CAM_DBG(CAM_ISP, "%s: Exit", __func__); + +end: + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return rc; +} + +static int cam_tfe_csid_start(void *hw_priv, void *start_args, + uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res; + const struct cam_tfe_csid_reg_offset *csid_reg; + + if (!hw_priv || !start_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + res = (struct cam_isp_resource_node *)start_args; + csid_reg = csid_hw->csid_info->csid_reg; + + if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH && + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + rc = -EINVAL; + goto end; + } + + /* Reset sof irq debug fields */ + csid_hw->sof_irq_triggered = false; + csid_hw->irq_debug_cnt = 0; + + CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d", + csid_hw->hw_intf->hw_idx, res->res_type, res->res_id); + + switch (res->res_type) { + case CAM_ISP_RESOURCE_PIX_PATH: + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) + rc = cam_tfe_csid_enable_pxl_path(csid_hw, res); + else + rc = cam_tfe_csid_enable_rdi_path(csid_hw, res); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d", + csid_hw->hw_intf->hw_idx, res->res_type); + break; + } +end: + return rc; +} + +int cam_tfe_csid_halt(struct cam_tfe_csid_hw *csid_hw, + void *halt_args) +{ + struct cam_isp_resource_node *res; + struct cam_tfe_csid_hw_halt_args *csid_halt; + int rc = 0; + + if (!csid_hw || !halt_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_halt = (struct cam_tfe_csid_hw_halt_args *)halt_args; + + /* Change the halt mode */ + res = csid_halt->node_res; + CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id); + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type %d", + csid_hw->hw_intf->hw_idx, + res->res_type); + return -EINVAL; + } + + switch (res->res_id) { + case CAM_TFE_CSID_PATH_RES_IPP: + rc = cam_tfe_csid_change_pxl_halt_mode(csid_hw, csid_halt); + break; + default: + CAM_DBG(CAM_ISP, "CSID:%d res_id %d", + csid_hw->hw_intf->hw_idx, + res->res_id); + break; + } + + return rc; +} + +static int cam_tfe_csid_stop(void *hw_priv, + void *stop_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res; + struct cam_tfe_csid_hw_stop_args *csid_stop; + uint32_t i; + uint32_t res_mask = 0; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_tfe_csid_hw_stop_args))) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + csid_stop = (struct cam_tfe_csid_hw_stop_args *) stop_args; + + if (!csid_stop->num_res) { + CAM_ERR(CAM_ISP, "CSID: Invalid args"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + CAM_DBG(CAM_ISP, "CSID:%d num_res %d", + csid_hw->hw_intf->hw_idx, + csid_stop->num_res); + + /* Stop the resource first */ + for (i = 0; i < csid_stop->num_res; i++) { + res = csid_stop->node_res[i]; + CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d", + csid_hw->hw_intf->hw_idx, + res->res_type, res->res_id); + switch (res->res_type) { + case CAM_ISP_RESOURCE_PIX_PATH: + res_mask |= (1 << res->res_id); + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) + rc = cam_tfe_csid_disable_pxl_path(csid_hw, + res, csid_stop->stop_cmd); + else + rc = cam_tfe_csid_disable_rdi_path(csid_hw, + res, csid_stop->stop_cmd); + + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d", + csid_hw->hw_intf->hw_idx, + res->res_type); + break; + } + } + + if (res_mask) + rc = cam_tfe_csid_poll_stop_status(csid_hw, res_mask); + + for (i = 0; i < csid_stop->num_res; i++) { + res = csid_stop->node_res[i]; + res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW; + } + + CAM_DBG(CAM_ISP, "%s: Exit", __func__); + return rc; +} + +static int cam_tfe_csid_read(void *hw_priv, + void *read_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + return -EINVAL; +} + +static int cam_tfe_csid_write(void *hw_priv, + void *write_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "CSID: un supported"); + return -EINVAL; +} + +static int cam_tfe_csid_sof_irq_debug( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + int i = 0; + uint32_t val = 0; + bool sof_irq_enable = false; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (*((uint32_t *)cmd_args) == 1) + sof_irq_enable = true; + + if (csid_hw->hw_info->hw_state == + CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "CSID:%d powered down unable to %s sof irq", + csid_hw->hw_intf->hw_idx, + sof_irq_enable ? "enable" : "disable"); + return 0; + } + + if (csid_reg->ipp_reg) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_mask_addr); + + if (val) { + if (sof_irq_enable) + val |= TFE_CSID_PATH_INFO_INPUT_SOF; + else + val &= ~TFE_CSID_PATH_INFO_INPUT_SOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_mask_addr); + val = 0; + } + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr); + if (val) { + if (sof_irq_enable) + val |= TFE_CSID_PATH_INFO_INPUT_SOF; + else + val &= ~TFE_CSID_PATH_INFO_INPUT_SOF; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr); + val = 0; + } + } + + if (sof_irq_enable) { + csid_hw->csid_debug |= TFE_CSID_DEBUG_ENABLE_SOF_IRQ; + csid_hw->sof_irq_triggered = true; + } else { + csid_hw->csid_debug &= ~TFE_CSID_DEBUG_ENABLE_SOF_IRQ; + csid_hw->sof_irq_triggered = false; + } + + if (!in_irq()) + CAM_INFO(CAM_ISP, "SOF freeze: CSID:%d SOF irq %s", + csid_hw->hw_intf->hw_idx, + sof_irq_enable ? "enabled" : "disabled"); + + return 0; +} + +static int cam_tfe_csid_set_csid_clock( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + struct cam_tfe_csid_clock_update_args *clk_update = NULL; + + if (!csid_hw) + return -EINVAL; + + clk_update = + (struct cam_tfe_csid_clock_update_args *)cmd_args; + + csid_hw->clk_rate = clk_update->clk_rate; + CAM_DBG(CAM_ISP, "CSID clock rate %llu", csid_hw->clk_rate); + + return 0; +} + +static int cam_tfe_csid_dump_csid_clock( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + struct cam_hw_soc_info *soc_info; + + if (!csid_hw) + return -EINVAL; + + soc_info = &csid_hw->hw_info->soc_info; + + CAM_INFO(CAM_ISP, "CSID:%d sw_client clk rate:%lu ", + csid_hw->hw_intf->hw_idx, + soc_info->applied_src_clk_rates.sw_client); + + return 0; +} + +static int cam_tfe_csid_set_csid_clock_dynamically( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + struct cam_hw_soc_info *soc_info; + unsigned long *clk_rate; + int rc = 0; + + soc_info = &csid_hw->hw_info->soc_info; + clk_rate = (unsigned long *)cmd_args; + + CAM_DBG(CAM_ISP, "CSID[%u] clock rate requested: %llu curr: %llu", + csid_hw->hw_intf->hw_idx, *clk_rate, soc_info->applied_src_clk_rates.sw_client); + + if (*clk_rate <= soc_info->applied_src_clk_rates.sw_client) + goto end; + + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, *clk_rate, 0); + if (rc) { + CAM_ERR(CAM_ISP, + "unable to set clock dynamically rate:%llu", *clk_rate); + return rc; + } +end: + *clk_rate = soc_info->applied_src_clk_rates.sw_client; + CAM_DBG(CAM_ISP, "CSID[%u] new clock rate %llu", + csid_hw->hw_intf->hw_idx, soc_info->applied_src_clk_rates.sw_client); + + return rc; +} + +static int cam_tfe_csid_get_regdump(struct cam_tfe_csid_hw *csid_hw, + void *cmd_args) +{ + struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_hw_soc_info *soc_info; + struct cam_isp_resource_node *res; + struct cam_tfe_csid_path_cfg *path_data; + uint32_t id; + int i, val; + + csid_reg = (struct cam_tfe_csid_reg_offset *) + csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + res = (struct cam_isp_resource_node *)cmd_args; + path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_TFE_CSID_PATH_RES_MAX) { + CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP) { + CAM_INFO(CAM_ISP, "Dumping CSID:%d IPP registers ", + csid_hw->hw_intf->hw_idx); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_cfg0_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->ipp_reg->csid_pxl_cfg0_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_cfg1_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->ipp_reg->csid_pxl_cfg1_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_ctrl_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->ipp_reg->csid_pxl_ctrl_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_hcrop_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->ipp_reg->csid_pxl_hcrop_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_vcrop_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->ipp_reg->csid_pxl_vcrop_addr, val); + } else { + id = res->res_id; + CAM_INFO(CAM_ISP, "Dumping CSID:%d RDI:%d registers ", + csid_hw->hw_intf->hw_idx, id); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr); + CAM_INFO(CAM_ISP, "offset 0x%x=0x08%x", + csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr, val); + } + CAM_INFO(CAM_ISP, + "start pix:%d end pix:%d start line:%d end line:%d w:%d h:%d", + path_data->start_pixel, path_data->end_pixel, + path_data->start_line, path_data->end_line, + path_data->width, path_data->height); + CAM_INFO(CAM_ISP, + "clock:%d crop_enable:%d num of vc_dt:%d informat:%d outformat:%d", + path_data->clk_rate, path_data->crop_enable, + path_data->num_valid_vc_dt, + path_data->in_format, path_data->out_format); + for (i = 0; i < path_data->num_valid_vc_dt; i++) { + CAM_INFO(CAM_ISP, "vc[%d]: %d, dt[%d]: %d", + i, path_data->vc_dt[i].vc, i, path_data->vc_dt[i].dt); + } + + return 0; +} + +static int cam_tfe_csid_dump_hw( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + int i; + uint8_t *dst; + uint32_t *addr, *start; + uint64_t *clk_addr, *clk_start; + uint32_t min_len; + uint32_t num_reg; + uint32_t reg_size = 0; + size_t remain_len; + struct cam_isp_hw_dump_header *hdr; + struct cam_isp_hw_dump_args *dump_args = + (struct cam_isp_hw_dump_args *)cmd_args; + struct cam_hw_soc_info *soc_info; + + if (!dump_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + if (!dump_args->cpu_addr || !dump_args->buf_len) { + CAM_ERR(CAM_ISP, + "Invalid params %pK %zu", + (void *)dump_args->cpu_addr, + dump_args->buf_len); + return -EINVAL; + } + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, + "Dump offset overshoot offset %zu buf_len %zu", + dump_args->offset, dump_args->buf_len); + return -ENOSPC; + } + + soc_info = &csid_hw->hw_info->soc_info; + if (dump_args->is_dump_all) + reg_size = soc_info->reg_map[0].size; + + min_len = reg_size + + sizeof(struct cam_isp_hw_dump_header) + + (sizeof(uint32_t) * CAM_TFE_CSID_DUMP_MISC_NUM_WORDS); + remain_len = dump_args->buf_len - dump_args->offset; + + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u", + remain_len, min_len); + return -ENOSPC; + } + + mutex_lock(&csid_hw->hw_info->hw_mutex); + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return -EINVAL; + } + + if (!dump_args->is_dump_all) + goto dump_bw; + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "CSID_REG:"); + addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + num_reg = soc_info->reg_map[0].size/4; + hdr->word_size = sizeof(uint32_t); + *addr = soc_info->index; + addr++; + + for (i = 0; i < num_reg; i++) { + addr[0] = soc_info->mem_block[0]->start + (i*4); + addr[1] = cam_io_r(soc_info->reg_map[0].mem_base + + (i*4)); + addr += 2; + } + + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); +dump_bw: + dst = (char *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "CSID_CLK_RATE:"); + clk_addr = (uint64_t *)(dst + + sizeof(struct cam_isp_hw_dump_header)); + clk_start = clk_addr; + hdr->word_size = sizeof(uint64_t); + *clk_addr++ = csid_hw->clk_rate; + hdr->size = hdr->word_size * (clk_addr - clk_start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset); + mutex_unlock(&csid_hw->hw_info->hw_mutex); + return 0; +} + +static int cam_tfe_csid_log_acquire_data( + struct cam_tfe_csid_hw *csid_hw, void *cmd_args) +{ + struct cam_isp_resource_node *res = + (struct cam_isp_resource_node *)cmd_args; + struct cam_tfe_csid_path_cfg *path_data; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_reg_offset *csid_reg; + uint32_t byte_cnt_ping, byte_cnt_pong; + + path_data = (struct cam_tfe_csid_path_cfg *)res->res_priv; + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, + "CSID:%d invalid res id:%d res type: %d state:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_type, + res->res_state); + return -EINVAL; + } + + /* Dump all the acquire data for this */ + CAM_INFO(CAM_ISP, + "CSID:%d res id:%d type:%d state:%d in f:%d out f:%d st pix:%d end pix:%d st line:%d end line:%d", + csid_hw->hw_intf->hw_idx, res->res_id, res->res_type, + res->res_type, path_data->in_format, path_data->out_format, + path_data->start_pixel, path_data->end_pixel, + path_data->start_line, path_data->end_line); + + if (res->res_id >= CAM_TFE_CSID_PATH_RES_RDI_0 && + res->res_id <= CAM_TFE_CSID_PATH_RES_RDI_2) { + /* read total number of bytes transmitted through RDI */ + byte_cnt_ping = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[res->res_id]->csid_rdi_byte_cntr_ping_addr); + byte_cnt_pong = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[res->res_id]->csid_rdi_byte_cntr_pong_addr); + CAM_INFO(CAM_ISP, + "CSID:%d res id:%d byte cnt val ping:%d pong:%d", + csid_hw->hw_intf->hw_idx, res->res_id, + byte_cnt_ping, byte_cnt_pong); + } + + return 0; + +} + +static int cam_tfe_csid_process_cmd(void *hw_priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res = NULL; + + if (!hw_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "CSID: Invalid arguments"); + return -EINVAL; + } + + csid_hw_info = (struct cam_hw_info *)hw_priv; + csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info; + + switch (cmd_type) { + case CAM_TFE_CSID_CMD_GET_TIME_STAMP: + rc = cam_tfe_csid_get_time_stamp(csid_hw, cmd_args); + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + res = ((struct cam_tfe_csid_get_time_stamp_args *)cmd_args)->node_res; + cam_tfe_csid_print_hbi_vbi(csid_hw, res); + } + break; + case CAM_TFE_CSID_SET_CSID_DEBUG: + rc = cam_tfe_csid_set_csid_debug(csid_hw, cmd_args); + break; + case CAM_TFE_CSID_SOF_IRQ_DEBUG: + rc = cam_tfe_csid_sof_irq_debug(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE: + rc = cam_tfe_csid_set_csid_clock(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CLOCK_DUMP: + rc = cam_tfe_csid_dump_csid_clock(csid_hw, cmd_args); + break; + case CAM_TFE_CSID_CMD_GET_REG_DUMP: + rc = cam_tfe_csid_get_regdump(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_DUMP_HW: + rc = cam_tfe_csid_dump_hw(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_CSID_CHANGE_HALT_MODE: + rc = cam_tfe_csid_halt(csid_hw, cmd_args); + break; + case CAM_TFE_CSID_LOG_ACQUIRE_DATA: + rc = cam_tfe_csid_log_acquire_data(csid_hw, cmd_args); + break; + case CAM_ISP_HW_CMD_DYNAMIC_CLOCK_UPDATE: + rc = cam_tfe_csid_set_csid_clock_dynamically(csid_hw, cmd_args); + break; + default: + CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d", + csid_hw->hw_intf->hw_idx, cmd_type); + rc = -EINVAL; + break; + } + + return rc; +} + +static int cam_tfe_csid_get_evt_payload( + struct cam_tfe_csid_hw *csid_hw, + struct cam_csid_evt_payload **evt_payload) +{ + + spin_lock(&csid_hw->spin_lock); + + if (list_empty(&csid_hw->free_payload_list)) { + *evt_payload = NULL; + spin_unlock(&csid_hw->spin_lock); + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload core %d", + csid_hw->hw_intf->hw_idx); + return -ENOMEM; + } + + *evt_payload = list_first_entry(&csid_hw->free_payload_list, + struct cam_csid_evt_payload, list); + list_del_init(&(*evt_payload)->list); + spin_unlock(&csid_hw->spin_lock); + + return 0; +} + +static int cam_tfe_csid_put_evt_payload( + struct cam_tfe_csid_hw *csid_hw, + struct cam_csid_evt_payload **evt_payload) +{ + unsigned long flags; + + if (*evt_payload == NULL) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid payload core %d", + csid_hw->hw_intf->hw_idx); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_csid_evt_payload); + spin_lock_irqsave(&csid_hw->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &csid_hw->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&csid_hw->spin_lock, flags); + + return 0; +} + +static int cam_tfe_csid_evt_bottom_half_handler( + void *handler_priv, + void *evt_payload_priv) +{ + struct cam_tfe_csid_hw *csid_hw; + struct cam_csid_evt_payload *evt_payload; + const struct cam_tfe_csid_reg_offset *csid_reg; + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_isp_hw_event_info event_info; + int i; + int rc = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, + "Invalid Param handler_priv %pK evt_payload_priv %pK", + handler_priv, evt_payload_priv); + return 0; + } + + csid_hw = (struct cam_tfe_csid_hw *)handler_priv; + evt_payload = (struct cam_csid_evt_payload *)evt_payload_priv; + csid_reg = csid_hw->csid_info->csid_reg; + + if (!csid_hw->event_cb || !csid_hw->event_cb_priv) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "hw_idx %d Invalid args %pK %pK", + csid_hw->hw_intf->hw_idx, + csid_hw->event_cb, + csid_hw->event_cb_priv); + goto end; + } + + if (csid_hw->event_cb_priv != evt_payload->priv) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "hw_idx %d priv mismatch %pK, %pK", + csid_hw->hw_intf->hw_idx, + csid_hw->event_cb_priv, + evt_payload->priv); + goto end; + } + + if (csid_hw->sof_irq_triggered && (evt_payload->evt_type == + CAM_ISP_HW_ERROR_NONE)) { + if (evt_payload->irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_INFO_INPUT_SOF) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d IPP SOF received", + csid_hw->hw_intf->hw_idx); + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + if (evt_payload->irq_status[i] & + TFE_CSID_PATH_INFO_INPUT_SOF) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d RDI:%d SOF received", + csid_hw->hw_intf->hw_idx, i); + } + } else { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID %d err %d phy %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x", + csid_hw->hw_intf->hw_idx, + evt_payload->evt_type, + csid_hw->csi2_rx_cfg.phy_sel, + evt_payload->irq_status[TFE_CSID_IRQ_REG_TOP], + evt_payload->irq_status[TFE_CSID_IRQ_REG_RX], + evt_payload->irq_status[TFE_CSID_IRQ_REG_IPP], + evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI0], + evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI1], + evt_payload->irq_status[TFE_CSID_IRQ_REG_RDI2]); + } + /* this hunk can be extended to handle more cases + * which we want to offload to bottom half from + * irq handlers + */ + err_evt_info.err_type = evt_payload->evt_type; + event_info.hw_idx = evt_payload->hw_idx; + + switch (evt_payload->evt_type) { + case CAM_ISP_HW_ERROR_CSID_FATAL: + if (csid_hw->fatal_err_detected) + break; + event_info.event_data = (void *)&err_evt_info; + csid_hw->fatal_err_detected = true; + rc = csid_hw->event_cb(NULL, + CAM_ISP_HW_EVENT_ERROR, (void *)&event_info); + break; + + default: + CAM_DBG(CAM_ISP, "CSID[%d] error type %d", + csid_hw->hw_intf->hw_idx, + evt_payload->evt_type); + break; + } +end: + cam_tfe_csid_put_evt_payload(csid_hw, &evt_payload); + return 0; +} + +static int cam_tfe_csid_handle_hw_err_irq( + struct cam_tfe_csid_hw *csid_hw, + int evt_type, + uint32_t *irq_status) +{ + int rc = 0; + int i; + void *bh_cmd = NULL; + struct cam_csid_evt_payload *evt_payload; + + CAM_DBG(CAM_ISP, "CSID[%d] error %d", + csid_hw->hw_intf->hw_idx, evt_type); + + rc = cam_tfe_csid_get_evt_payload(csid_hw, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No free payload core %d", + csid_hw->hw_intf->hw_idx); + return rc; + } + + rc = tasklet_bh_api.get_bh_payload_func(csid_hw->tasklet, &bh_cmd); + if (rc || !bh_cmd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID[%d] Can not get cmd for tasklet, evt_type %d", + csid_hw->hw_intf->hw_idx, + evt_type); + return rc; + } + + evt_payload->evt_type = evt_type; + evt_payload->priv = csid_hw->event_cb_priv; + evt_payload->hw_idx = csid_hw->hw_intf->hw_idx; + + for (i = 0; i < TFE_CSID_IRQ_REG_MAX; i++) + evt_payload->irq_status[i] = irq_status[i]; + + tasklet_bh_api.bottom_half_enqueue_func(csid_hw->tasklet, + bh_cmd, + csid_hw, + evt_payload, + cam_tfe_csid_evt_bottom_half_handler); + + return rc; +} + +irqreturn_t cam_tfe_csid_irq(int irq_num, void *data) +{ + struct cam_tfe_csid_hw *csid_hw; + struct cam_hw_soc_info *soc_info; + const struct cam_tfe_csid_reg_offset *csid_reg; + const struct cam_tfe_csid_pxl_reg_offset *ipp_reg; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg; + const struct cam_tfe_csid_common_reg_offset *cmn_reg; + const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg; + uint32_t irq_status[TFE_CSID_IRQ_REG_MAX]; + bool fatal_err_detected = false, is_error_irq = false; + uint32_t sof_irq_debug_en = 0, log_en = 0; + unsigned long flags; + uint32_t i, val, val1; + uint32_t data_idx; + + if (!data) { + CAM_ERR(CAM_ISP, "CSID: Invalid arguments"); + return IRQ_HANDLED; + } + + csid_hw = (struct cam_tfe_csid_hw *)data; + data_idx = csid_hw->csi2_rx_cfg.phy_sel - 1; + CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx); + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + csi2_reg = csid_reg->csi2_reg; + + /* read */ + irq_status[TFE_CSID_IRQ_REG_TOP] = + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_status_addr); + + irq_status[TFE_CSID_IRQ_REG_RX] = + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr); + + if (csid_hw->pxl_pipe_enable) + irq_status[TFE_CSID_IRQ_REG_IPP] = + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_status_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) + irq_status[i] = + cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr); + + /* clear */ + cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_TOP], + soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_top_irq_clear_addr); + + cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_RX], + soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr); + + if (csid_hw->pxl_pipe_enable) + cam_io_w_mb(irq_status[TFE_CSID_IRQ_REG_IPP], + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_irq_clear_addr); + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + cam_io_w_mb(irq_status[i], + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr); + } + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + csid_reg->cmn_reg->csid_irq_cmd_addr); + + + /* Software register reset complete*/ + if (irq_status[TFE_CSID_IRQ_REG_TOP]) + complete(&csid_hw->csid_top_complete); + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) + complete(&csid_hw->csid_csi2_complete); + + spin_lock_irqsave(&csid_hw->spin_lock, flags); + if (csid_hw->device_enabled == 1) { + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) { + fatal_err_detected = true; + goto handle_fatal_error; + } + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) { + fatal_err_detected = true; + goto handle_fatal_error; + } + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) { + fatal_err_detected = true; + goto handle_fatal_error; + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) { + fatal_err_detected = true; + goto handle_fatal_error; + } + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) + csid_hw->error_irq_count++; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) + csid_hw->error_irq_count++; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) + csid_hw->error_irq_count++; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) + csid_hw->error_irq_count++; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_CRC) + is_error_irq = true; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_ECC) + is_error_irq = true; + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) + is_error_irq = true; + } +handle_fatal_error: + spin_unlock_irqrestore(&csid_hw->spin_lock, flags); + + if (csid_hw->error_irq_count || fatal_err_detected) + is_error_irq = true; + + if (csid_hw->error_irq_count > + CAM_TFE_CSID_MAX_IRQ_ERROR_COUNT) { + fatal_err_detected = true; + csid_hw->error_irq_count = 0; + } + + if (fatal_err_detected) { + /* Reset the Rx CFG registers */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr); + /* phy_sel starts from 1 and should never be zero*/ + if (csid_hw->csi2_rx_cfg.phy_sel > 0) { + cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE, + CAM_SUBDEV_MESSAGE_REG_DUMP, (void *)&data_idx); + } + cam_tfe_csid_handle_hw_err_irq(csid_hw, + CAM_ISP_HW_ERROR_CSID_FATAL, irq_status); + } + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOT_IRQ) { + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL0_EOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL1_EOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL2_EOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL3_EOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + } + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOT_IRQ) { + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL0_SOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL1_SOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL2_SOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + if (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d PHY_DL3_SOT_CAPTURED", + csid_hw->hw_intf->hw_idx); + } + } + + if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) && + (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED", + csid_hw->hw_intf->hw_idx); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_long_pkt_0_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d long packet VC :%d DT:%d WC:%d", + csid_hw->hw_intf->hw_idx, + (val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF)); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_long_pkt_1_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d long packet ECC :%d", + csid_hw->hw_intf->hw_idx, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_long_pkt_ftr_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d long pkt cal CRC:%d expected CRC:%d", + csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF)); + /* reset long pkt strobe to capture next long packet */ + val = (1 << csi2_reg->csi2_rx_long_pkt_hdr_rst_stb_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_rst_strobes_addr); + } + if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) && + (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED", + csid_hw->hw_intf->hw_idx); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_short_pkt_0_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d short pkt VC :%d DT:%d LC:%d", + csid_hw->hw_intf->hw_idx, + (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF)); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_short_pkt_1_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d short packet ECC :%d", + csid_hw->hw_intf->hw_idx, val); + /* reset short pkt strobe to capture next short packet */ + val = (1 << csi2_reg->csi2_rx_short_pkt_hdr_rst_stb_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_rst_strobes_addr); + } + + if ((csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) && + (irq_status[TFE_CSID_IRQ_REG_RX] & + TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED", + csid_hw->hw_intf->hw_idx); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_captured_cphy_pkt_hdr_addr); + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d cphy packet VC :%d DT:%d WC:%d", + csid_hw->hw_intf->hw_idx, + (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF)); + /* reset cphy pkt strobe to capture next short packet */ + val = (1 << csi2_reg->csi2_rx_cphy_pkt_hdr_rst_stb_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csi2_reg->csid_csi2_rx_rst_strobes_addr); + } + + if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG) { + + if (irq_status[TFE_CSID_IRQ_REG_IPP] & + BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID IPP reset complete"); + + if (irq_status[TFE_CSID_IRQ_REG_TOP]) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID TOP reset complete"); + + if (irq_status[TFE_CSID_IRQ_REG_RX] & + BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID RX reset complete"); + } + + /* read the IPP errors */ + if (csid_hw->pxl_pipe_enable) { + /* IPP reset done bit */ + if (irq_status[TFE_CSID_IRQ_REG_IPP] & + BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) { + CAM_DBG(CAM_ISP, "CSID IPP reset complete"); + complete(&csid_hw->csid_ipp_complete); + } + + if ((irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_INFO_INPUT_SOF) && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) { + if (!csid_hw->sof_irq_triggered) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d IPP SOF received", + csid_hw->hw_intf->hw_idx); + else + log_en = 1; + + if (csid_hw->sof_irq_triggered) + csid_hw->irq_debug_cnt++; + } + + if ((irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_INFO_INPUT_EOF) && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received", + csid_hw->hw_intf->hw_idx); + } + + if (irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) { + /* Stop IPP path immediately */ + cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY, + soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_ctrl_addr); + is_error_irq = true; + } + + if (irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION) + is_error_irq = true; + + if ((irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_ERROR_PIX_COUNT) || + (irq_status[TFE_CSID_IRQ_REG_IPP] & + TFE_CSID_PATH_ERROR_LINE_COUNT)) { + ipp_reg = csid_reg->ipp_reg; + cmn_reg = csid_reg->cmn_reg; + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + ipp_reg->csid_pxl_format_measure0_addr); + val1 = cam_io_r_mb(soc_info->reg_map[0].mem_base + + ipp_reg->csid_pxl_format_measure_cfg1_addr); + + CAM_ERR(CAM_ISP, + "Pix/Line count error for CSID: %d IPP path, Expected:: height: %d, width: %d and Actual:: height: %d width %d", + csid_hw->hw_intf->hw_idx, + ((val1 >> + cmn_reg->format_measure_height_shift_val) & + cmn_reg->format_measure_height_mask_val), + val1 & + cmn_reg->format_measure_width_mask_val, + ((val >> + cmn_reg->format_measure_height_shift_val) & + cmn_reg->format_measure_height_mask_val), + val & + cmn_reg->format_measure_width_mask_val); + } + + } + + for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) { + + if ((irq_status[i] & + BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) && + (csid_hw->csid_debug & + TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG)) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d RDI%d reset complete", + csid_hw->hw_intf->hw_idx, i); + + if (irq_status[i] & + BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) { + CAM_DBG(CAM_ISP, "CSID:%d RDI%d reset complete", + csid_hw->hw_intf->hw_idx, i); + complete(&csid_hw->csid_rdin_complete[i]); + } + + if (irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) + cam_tfe_csid_enable_path_for_init_frame_drop(csid_hw, i); + + if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_SOF) && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_SOF_IRQ)) { + if (!csid_hw->sof_irq_triggered) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d RDI:%d SOF received", + csid_hw->hw_intf->hw_idx, i); + else + log_en = 1; + + if (csid_hw->sof_irq_triggered) + csid_hw->irq_debug_cnt++; + } + + if ((irq_status[i] & TFE_CSID_PATH_INFO_INPUT_EOF) && + (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOF_IRQ)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "CSID:%d RDI:%d EOF received", + csid_hw->hw_intf->hw_idx, i); + } + + if (irq_status[i] & TFE_CSID_PATH_ERROR_FIFO_OVERFLOW) { + /* Stop RDI path immediately */ + is_error_irq = true; + cam_io_w_mb(CAM_TFE_CSID_HALT_IMMEDIATELY, + soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr); + } + + if ((irq_status[i] & TFE_CSID_PATH_RDI_OVERFLOW_IRQ) || + (irq_status[i] & + TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION)) + is_error_irq = true; + + if ((irq_status[i] & TFE_CSID_PATH_ERROR_PIX_COUNT) || + (irq_status[i] & TFE_CSID_PATH_ERROR_LINE_COUNT)) { + rdi_reg = csid_reg->rdi_reg[i]; + cmn_reg = csid_reg->cmn_reg; + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure0_addr); + val1 = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure_cfg1_addr); + + CAM_ERR(CAM_ISP, + "Pix/Line count error for CSID:%d RDI:%d path, Expected:: height: %d, width: %d and Actual:: height: %d width %d", + csid_hw->hw_intf->hw_idx, i, + ((val1 >> + cmn_reg->format_measure_height_shift_val) & + cmn_reg->format_measure_height_mask_val), + val1 & + cmn_reg->format_measure_width_mask_val, + ((val >> + cmn_reg->format_measure_height_shift_val) & + cmn_reg->format_measure_height_mask_val), + val & + cmn_reg->format_measure_width_mask_val); + } + } + + if (is_error_irq || log_en) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "CSID %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x", + csid_hw->hw_intf->hw_idx, + irq_status[TFE_CSID_IRQ_REG_TOP], + irq_status[TFE_CSID_IRQ_REG_RX], + irq_status[TFE_CSID_IRQ_REG_IPP]); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x CSID clk:%d", + irq_status[TFE_CSID_IRQ_REG_RDI0], + irq_status[TFE_CSID_IRQ_REG_RDI1], + irq_status[TFE_CSID_IRQ_REG_RDI2], + soc_info->applied_src_clk_rates.sw_client); + + cam_tfe_csid_handle_hw_err_irq(csid_hw, + CAM_ISP_HW_ERROR_NONE, irq_status); + } + + if (csid_hw->irq_debug_cnt >= CAM_TFE_CSID_IRQ_SOF_DEBUG_CNT_MAX) { + cam_tfe_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en); + csid_hw->irq_debug_cnt = 0; + } + + CAM_DBG(CAM_ISP, "IRQ Handling exit"); + return IRQ_HANDLED; +} + +int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf, + uint32_t csid_idx) +{ + int rc = -EINVAL; + uint32_t i, j, val, clk_lvl; + struct cam_tfe_csid_path_cfg *path_data; + struct cam_hw_info *csid_hw_info; + struct cam_tfe_csid_hw *tfe_csid_hw = NULL; + const struct cam_tfe_csid_reg_offset *csid_reg; + + if (csid_idx >= CAM_TFE_CSID_HW_NUM_MAX) { + CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx); + return rc; + } + + csid_hw_info = (struct cam_hw_info *) csid_hw_intf->hw_priv; + tfe_csid_hw = (struct cam_tfe_csid_hw *) csid_hw_info->core_info; + + tfe_csid_hw->hw_intf = csid_hw_intf; + tfe_csid_hw->hw_info = csid_hw_info; + csid_reg = tfe_csid_hw->csid_info->csid_reg; + + CAM_DBG(CAM_ISP, "type %d index %d", + tfe_csid_hw->hw_intf->hw_type, csid_idx); + + tfe_csid_hw->device_enabled = 0; + tfe_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&tfe_csid_hw->hw_info->hw_mutex); + spin_lock_init(&tfe_csid_hw->hw_info->hw_lock); + spin_lock_init(&tfe_csid_hw->spin_lock); + init_completion(&tfe_csid_hw->hw_info->hw_complete); + + init_completion(&tfe_csid_hw->csid_top_complete); + init_completion(&tfe_csid_hw->csid_csi2_complete); + init_completion(&tfe_csid_hw->csid_ipp_complete); + for (i = 0; i < CAM_TFE_CSID_RDI_MAX; i++) + init_completion(&tfe_csid_hw->csid_rdin_complete[i]); + + rc = cam_tfe_csid_init_soc_resources(&tfe_csid_hw->hw_info->soc_info, + cam_tfe_csid_irq, tfe_csid_hw); + if (rc < 0) { + CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx); + goto err; + } + rc = cam_soc_util_get_clk_level(&tfe_csid_hw->hw_info->soc_info, + tfe_csid_hw->clk_rate, + tfe_csid_hw->hw_info->soc_info.src_clk_idx, &clk_lvl); + CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl); + + rc = cam_tfe_csid_enable_soc_resources(&tfe_csid_hw->hw_info->soc_info, + clk_lvl); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed", + tfe_csid_hw->hw_intf->hw_idx); + goto err; + } + + tfe_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_tfe_csid_get_hw_caps; + tfe_csid_hw->hw_intf->hw_ops.init = cam_tfe_csid_init_hw; + tfe_csid_hw->hw_intf->hw_ops.deinit = cam_tfe_csid_deinit_hw; + tfe_csid_hw->hw_intf->hw_ops.reset = cam_tfe_csid_reset; + tfe_csid_hw->hw_intf->hw_ops.reserve = cam_tfe_csid_reserve; + tfe_csid_hw->hw_intf->hw_ops.release = cam_tfe_csid_release; + tfe_csid_hw->hw_intf->hw_ops.start = cam_tfe_csid_start; + tfe_csid_hw->hw_intf->hw_ops.stop = cam_tfe_csid_stop; + tfe_csid_hw->hw_intf->hw_ops.read = cam_tfe_csid_read; + tfe_csid_hw->hw_intf->hw_ops.write = cam_tfe_csid_write; + tfe_csid_hw->hw_intf->hw_ops.process_cmd = cam_tfe_csid_process_cmd; + + /* reset the cid values */ + for (i = 0; i < CAM_TFE_CSID_CID_MAX; i++) { + for (j = 0; j < CAM_ISP_TFE_VC_DT_CFG ; j++) { + tfe_csid_hw->cid_res[i].vc_dt[j].vc = 0; + tfe_csid_hw->cid_res[i].vc_dt[j].dt = 0; + } + tfe_csid_hw->cid_res[i].num_valid_vc_dt = 0; + tfe_csid_hw->cid_res[i].cnt = 0; + } + + if (tfe_csid_hw->hw_intf->hw_idx == 2) { + val = cam_io_r_mb( + tfe_csid_hw->hw_info->soc_info.reg_map[1].mem_base + + csid_reg->cmn_reg->top_tfe2_fuse_reg); + if (val) { + CAM_INFO(CAM_ISP, "TFE 2 is not supported by hardware"); + + rc = cam_tfe_csid_disable_soc_resources( + &tfe_csid_hw->hw_info->soc_info); + if (rc) + CAM_ERR(CAM_ISP, + "CSID:%d Disable CSID SOC failed", + tfe_csid_hw->hw_intf->hw_idx); + else + rc = -EINVAL; + goto err; + } + } + + val = cam_io_r_mb( + tfe_csid_hw->hw_info->soc_info.reg_map[1].mem_base + + csid_reg->cmn_reg->top_tfe2_pix_pipe_fuse_reg); + + /* Initialize the IPP resources */ + if (!(val && (tfe_csid_hw->hw_intf->hw_idx == 2))) { + CAM_DBG(CAM_ISP, "initializing the pix path"); + + tfe_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH; + tfe_csid_hw->ipp_res.res_id = CAM_TFE_CSID_PATH_RES_IPP; + tfe_csid_hw->ipp_res.res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + tfe_csid_hw->ipp_res.hw_intf = tfe_csid_hw->hw_intf; + path_data = CAM_MEM_ZALLOC(sizeof(*path_data), + GFP_KERNEL); + if (!path_data) { + rc = -ENOMEM; + goto err; + } + tfe_csid_hw->ipp_res.res_priv = path_data; + tfe_csid_hw->pxl_pipe_enable = 1; + } + + /* Initialize the RDI resource */ + for (i = 0; i < tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis; + i++) { + /* res type is from RDI 0 to RDI2 */ + tfe_csid_hw->rdi_res[i].res_type = + CAM_ISP_RESOURCE_PIX_PATH; + tfe_csid_hw->rdi_res[i].res_id = i; + tfe_csid_hw->rdi_res[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + tfe_csid_hw->rdi_res[i].hw_intf = tfe_csid_hw->hw_intf; + + path_data = CAM_MEM_ZALLOC(sizeof(*path_data), + GFP_KERNEL); + if (!path_data) { + rc = -ENOMEM; + goto err; + } + tfe_csid_hw->rdi_res[i].res_priv = path_data; + } + + rc = cam_tasklet_init(&tfe_csid_hw->tasklet, tfe_csid_hw, csid_idx); + if (rc) { + CAM_ERR(CAM_ISP, "Unable to create CSID tasklet rc %d", rc); + goto err; + } + + INIT_LIST_HEAD(&tfe_csid_hw->free_payload_list); + for (i = 0; i < CAM_CSID_EVT_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&tfe_csid_hw->evt_payload[i].list); + list_add_tail(&tfe_csid_hw->evt_payload[i].list, + &tfe_csid_hw->free_payload_list); + } + + tfe_csid_hw->csid_debug = 0; + tfe_csid_hw->error_irq_count = 0; + tfe_csid_hw->prev_boot_timestamp = 0; + + rc = cam_tfe_csid_disable_soc_resources( + &tfe_csid_hw->hw_info->soc_info); + if (rc) { + CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed", + tfe_csid_hw->hw_intf->hw_idx); + goto err; + } + + /* Check if ppi bridge is present or not? */ + tfe_csid_hw->ppi_enable = of_property_read_bool( + csid_hw_info->soc_info.pdev->dev.of_node, + "ppi-enable"); + + if (!tfe_csid_hw->ppi_enable) + return 0; + + /* Initialize the PPI bridge */ + for (i = 0; i < CAM_CSID_PPI_HW_MAX; i++) { + rc = cam_csid_ppi_hw_init(&tfe_csid_hw->ppi_hw_intf[i], i); + if (rc < 0) { + CAM_INFO(CAM_ISP, "PPI init failed for PPI %d", i); + rc = 0; + break; + } + } + + return 0; +err: + if (rc) { + CAM_MEM_FREE(tfe_csid_hw->ipp_res.res_priv); + for (i = 0; i < + tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis; + i++) + CAM_MEM_FREE(tfe_csid_hw->rdi_res[i].res_priv); + } + + return rc; +} + + +int cam_tfe_csid_hw_deinit(struct cam_tfe_csid_hw *tfe_csid_hw) +{ + int rc = -EINVAL; + uint32_t i; + + if (!tfe_csid_hw) { + CAM_ERR(CAM_ISP, "Invalid param"); + return rc; + } + + /* release the privdate data memory from resources */ + CAM_MEM_FREE(tfe_csid_hw->ipp_res.res_priv); + + for (i = 0; i < + tfe_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis; + i++) { + CAM_MEM_FREE(tfe_csid_hw->rdi_res[i].res_priv); + } + + cam_tfe_csid_deinit_soc_resources(&tfe_csid_hw->hw_info->soc_info); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h new file mode 100644 index 0000000000..bcda7d489d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h @@ -0,0 +1,526 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_HW_H_ +#define _CAM_TFE_CSID_HW_H_ + +#include "cam_hw.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_tfe_csid_soc.h" +#include "cam_csid_ppi_core.h" + +#define CAM_TFE_CSID_CID_MAX 4 + +/* Each word is taken as uint32_t, for dumping uint64_t count as 2 words + * 1. soc_index + * 2. clk_rate --> uint64_t -> 2 words + */ +#define CAM_TFE_CSID_DUMP_MISC_NUM_WORDS 3 + +#define TFE_CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED BIT(0) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED BIT(1) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED BIT(2) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED BIT(3) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED BIT(4) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED BIT(5) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED BIT(6) +#define TFE_CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED BIT(7) +#define TFE_CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED BIT(8) +#define TFE_CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED BIT(9) +#define TFE_CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED BIT(10) +#define TFE_CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION BIT(11) +#define TFE_CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION BIT(12) +#define TFE_CSID_CSI2_RX_ERROR_CPHY_PH_CRC BIT(13) +#define TFE_CSID_CSI2_RX_WARNING_ECC BIT(14) +#define TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW BIT(15) +#define TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW BIT(16) +#define TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW BIT(17) +#define TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW BIT(18) +#define TFE_CSID_CSI2_RX_ERROR_CRC BIT(19) +#define TFE_CSID_CSI2_RX_ERROR_ECC BIT(20) +#define TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT BIT(21) +#define TFE_CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT BIT(22) +#define TFE_CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW BIT(23) +#define TFE_CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME BIT(24) +#define TFE_CSID_CSI2_RX_INFO_RST_DONE BIT(27) + +#define TFE_CSID_PATH_INFO_RST_DONE BIT(1) +#define TFE_CSID_PATH_ERROR_FIFO_OVERFLOW BIT(2) +#define TFE_CSID_PATH_INFO_INPUT_EOF BIT(9) +#define TFE_CSID_PATH_INFO_INPUT_EOL BIT(10) +#define TFE_CSID_PATH_INFO_INPUT_SOL BIT(11) +#define TFE_CSID_PATH_INFO_INPUT_SOF BIT(12) +#define TFE_CSID_PATH_ERROR_PIX_COUNT BIT(13) +#define TFE_CSID_PATH_ERROR_LINE_COUNT BIT(14) +#define TFE_CSID_PATH_IPP_ERROR_CCIF_VIOLATION BIT(15) +#define TFE_CSID_PATH_IPP_OVERFLOW_IRQ BIT(16) +#define TFE_CSID_PATH_IPP_FRAME_DROP BIT(17) +#define TFE_CSID_PATH_RDI_FRAME_DROP BIT(16) +#define TFE_CSID_PATH_RDI_OVERFLOW_IRQ BIT(17) +#define TFE_CSID_PATH_RDI_ERROR_CCIF_VIOLATION BIT(18) + +/* + * Debug values enable the corresponding interrupts and debug logs provide + * necessary information + */ +#define TFE_CSID_DEBUG_ENABLE_SOF_IRQ BIT(0) +#define TFE_CSID_DEBUG_ENABLE_EOF_IRQ BIT(1) +#define TFE_CSID_DEBUG_ENABLE_SOT_IRQ BIT(2) +#define TFE_CSID_DEBUG_ENABLE_EOT_IRQ BIT(3) +#define TFE_CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE BIT(4) +#define TFE_CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE BIT(5) +#define TFE_CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6) +#define TFE_CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7) +#define TFE_CSID_DEBUG_DISABLE_EARLY_EOF BIT(8) +#define TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG BIT(9) + +#define CAM_CSID_EVT_PAYLOAD_MAX 10 + +/* Binning supported masks. Binning support changes for specific paths + * and also for targets. With the mask, we handle the supported features + * in reg files and handle in code accordingly. + */ + +#define CAM_TFE_CSID_BIN_BAYER BIT(0) +#define CAM_TFE_CSID_BIN_QCFA BIT(1) + +/* enum cam_csid_path_halt_mode select the path halt mode control */ +enum cam_tfe_csid_path_halt_mode { + TFE_CSID_HALT_MODE_INTERNAL, + TFE_CSID_HALT_MODE_GLOBAL, + TFE_CSID_HALT_MODE_MASTER, + TFE_CSID_HALT_MODE_SLAVE, +}; + +/** + *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to + * capture the timestamp + */ +enum cam_tfe_csid_path_timestamp_stb_sel { + TFE_CSID_TIMESTAMP_STB_PRE_HALT, + TFE_CSID_TIMESTAMP_STB_POST_HALT, + TFE_CSID_TIMESTAMP_STB_POST_IRQ, + TFE_CSID_TIMESTAMP_STB_MAX, +}; + +struct cam_tfe_csid_pxl_reg_offset { + /* Pxl path register offsets*/ + uint32_t csid_pxl_irq_status_addr; + uint32_t csid_pxl_irq_mask_addr; + uint32_t csid_pxl_irq_clear_addr; + uint32_t csid_pxl_irq_set_addr; + + uint32_t csid_pxl_cfg0_addr; + uint32_t csid_pxl_cfg1_addr; + uint32_t csid_pxl_ctrl_addr; + uint32_t csid_pxl_hcrop_addr; + uint32_t csid_pxl_vcrop_addr; + uint32_t csid_pxl_rst_strobes_addr; + uint32_t csid_pxl_status_addr; + uint32_t csid_pxl_misr_val_addr; + uint32_t csid_pxl_timestamp_curr0_sof_addr; + uint32_t csid_pxl_timestamp_curr1_sof_addr; + uint32_t csid_pxl_timestamp_perv0_sof_addr; + uint32_t csid_pxl_timestamp_perv1_sof_addr; + uint32_t csid_pxl_timestamp_curr0_eof_addr; + uint32_t csid_pxl_timestamp_curr1_eof_addr; + uint32_t csid_pxl_timestamp_perv0_eof_addr; + uint32_t csid_pxl_timestamp_perv1_eof_addr; + uint32_t csid_pxl_err_recovery_cfg0_addr; + uint32_t csid_pxl_err_recovery_cfg1_addr; + uint32_t csid_pxl_err_recovery_cfg2_addr; + uint32_t csid_pxl_multi_vcdt_cfg0_addr; + uint32_t csid_pxl_format_measure_cfg0_addr; + uint32_t csid_pxl_format_measure_cfg1_addr; + uint32_t csid_pxl_format_measure0_addr; + uint32_t csid_pxl_format_measure1_addr; + uint32_t csid_pxl_format_measure2_addr; + + /* configuration */ + uint32_t pix_store_en_shift_val; + uint32_t early_eof_en_shift_val; + uint32_t halt_master_sel_shift; + uint32_t halt_mode_shift; + uint32_t halt_master_sel_master_val; + uint32_t halt_master_sel_slave_val; + uint32_t binning_supported; + uint32_t bin_qcfa_en_shift_val; + uint32_t bin_en_shift_val; + uint32_t format_measure_en_shift_val; + uint32_t measure_en_hbi_vbi_cnt_val; + bool is_multi_vc_dt_supported; +}; + +struct cam_tfe_csid_rdi_reg_offset { + uint32_t csid_rdi_irq_status_addr; + uint32_t csid_rdi_irq_mask_addr; + uint32_t csid_rdi_irq_clear_addr; + uint32_t csid_rdi_irq_set_addr; + + /*RDI N register address */ + uint32_t csid_rdi_cfg0_addr; + uint32_t csid_rdi_cfg1_addr; + uint32_t csid_rdi_ctrl_addr; + uint32_t csid_rdi_rst_strobes_addr; + uint32_t csid_rdi_status_addr; + uint32_t csid_rdi_misr_val0_addr; + uint32_t csid_rdi_misr_val1_addr; + uint32_t csid_rdi_timestamp_curr0_sof_addr; + uint32_t csid_rdi_timestamp_curr1_sof_addr; + uint32_t csid_rdi_timestamp_prev0_sof_addr; + uint32_t csid_rdi_timestamp_prev1_sof_addr; + uint32_t csid_rdi_timestamp_curr0_eof_addr; + uint32_t csid_rdi_timestamp_curr1_eof_addr; + uint32_t csid_rdi_timestamp_prev0_eof_addr; + uint32_t csid_rdi_timestamp_prev1_eof_addr; + uint32_t csid_rdi_err_recovery_cfg0_addr; + uint32_t csid_rdi_err_recovery_cfg1_addr; + uint32_t csid_rdi_err_recovery_cfg2_addr; + uint32_t csid_rdi_byte_cntr_ping_addr; + uint32_t csid_rdi_byte_cntr_pong_addr; + uint32_t csid_rdi_multi_vcdt_cfg0_addr; + uint32_t csid_rdi_format_measure_cfg0_addr; + uint32_t csid_rdi_format_measure_cfg1_addr; + uint32_t csid_rdi_format_measure0_addr; + uint32_t csid_rdi_format_measure1_addr; + uint32_t csid_rdi_format_measure2_addr; + + /* configuration */ + uint32_t packing_format; + uint32_t format_measure_en_shift_val; + uint32_t measure_en_hbi_vbi_cnt_val; + bool is_multi_vc_dt_supported; +}; + +struct cam_tfe_csid_csi2_rx_reg_offset { + uint32_t csid_csi2_rx_irq_status_addr; + uint32_t csid_csi2_rx_irq_mask_addr; + uint32_t csid_csi2_rx_irq_clear_addr; + uint32_t csid_csi2_rx_irq_set_addr; + uint32_t csid_csi2_rx_cfg0_addr; + uint32_t csid_csi2_rx_cfg1_addr; + uint32_t csid_csi2_rx_capture_ctrl_addr; + uint32_t csid_csi2_rx_rst_strobes_addr; + uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr; + uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr; + uint32_t csid_csi2_rx_captured_short_pkt_0_addr; + uint32_t csid_csi2_rx_captured_short_pkt_1_addr; + uint32_t csid_csi2_rx_captured_long_pkt_0_addr; + uint32_t csid_csi2_rx_captured_long_pkt_1_addr; + uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr; + uint32_t csid_csi2_rx_captured_cphy_pkt_hdr_addr; + uint32_t csid_csi2_rx_total_pkts_rcvd_addr; + uint32_t csid_csi2_rx_stats_ecc_addr; //no + uint32_t csid_csi2_rx_total_crc_err_addr; + + /*configurations */ + uint32_t phy_tpg_base_id; + uint32_t phy_sel_base; + uint32_t csi2_rst_srb_all; + uint32_t csi2_rst_done_shift_val; + uint32_t csi2_irq_mask_all; + uint32_t csi2_misr_enable_shift_val; + uint32_t csi2_vc_mode_shift_val; + uint32_t csi2_capture_long_pkt_en_shift; + uint32_t csi2_capture_short_pkt_en_shift; + uint32_t csi2_capture_cphy_pkt_en_shift; + uint32_t csi2_capture_long_pkt_dt_shift; + uint32_t csi2_capture_long_pkt_vc_shift; + uint32_t csi2_capture_short_pkt_vc_shift; + uint32_t csi2_capture_cphy_pkt_dt_shift; + uint32_t csi2_capture_cphy_pkt_vc_shift; + uint32_t csi2_rx_phy_num_mask; + uint32_t csi2_rx_long_pkt_hdr_rst_stb_shift; + uint32_t csi2_rx_short_pkt_hdr_rst_stb_shift; + uint32_t csi2_rx_cphy_pkt_hdr_rst_stb_shift; + bool need_to_sel_tpg_mux; +}; + +struct cam_tfe_csid_common_reg_offset { + /* MIPI CSID registers */ + uint32_t csid_hw_version_addr; + uint32_t csid_cfg0_addr; + uint32_t csid_ctrl_addr; + uint32_t csid_rst_strobes_addr; + + uint32_t csid_test_bus_ctrl_addr; + uint32_t csid_top_irq_status_addr; + uint32_t csid_top_irq_mask_addr; + uint32_t csid_top_irq_clear_addr; + uint32_t csid_top_irq_set_addr; + uint32_t csid_irq_cmd_addr; + + /*configurations */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t num_rdis; + uint32_t num_pix; + uint32_t csid_reg_rst_stb; + uint32_t csid_rst_stb; + uint32_t csid_rst_stb_sw_all; + uint32_t ipp_path_rst_stb_all; + uint32_t rdi_path_rst_stb_all; + uint32_t path_rst_done_shift_val; + uint32_t path_en_shift_val; + uint32_t dt_id_shift_val; + uint32_t vc_shift_val; + uint32_t dt_shift_val; + uint32_t vc1_shift_val; + uint32_t dt1_shift_val; + uint32_t multi_vc_dt_en_shift_val; + uint32_t fmt_shift_val; + uint32_t plain_fmt_shit_val; + uint32_t crop_v_en_shift_val; + uint32_t crop_h_en_shift_val; + uint32_t crop_shift; + uint32_t ipp_irq_mask_all; + uint32_t rdi_irq_mask_all; + uint32_t top_tfe2_pix_pipe_fuse_reg; + uint32_t top_tfe2_fuse_reg; + uint32_t format_measure_height_shift_val; + uint32_t format_measure_height_mask_val; + uint32_t format_measure_width_mask_val; + bool format_measure_support; + bool sync_clk; +}; + +/** + * struct cam_tfe_csid_reg_offset- CSID instance register info + * + * @cmn_reg: csid common registers info + * @ipp_reg: ipp register offset information + * @ppp_reg: ppp register offset information + * @rdi_reg: rdi register offset information + * + */ +struct cam_tfe_csid_reg_offset { + const struct cam_tfe_csid_common_reg_offset *cmn_reg; + const struct cam_tfe_csid_csi2_rx_reg_offset *csi2_reg; + const struct cam_tfe_csid_pxl_reg_offset *ipp_reg; + const struct cam_tfe_csid_rdi_reg_offset *rdi_reg[CAM_TFE_CSID_RDI_MAX]; +}; + +/** + * struct cam_tfe_csid_hw_info- CSID HW info + * + * @csid_reg: csid register offsets + * @hw_dts_version: HW DTS version + * @csid_max_clk: maximim csid clock + * + */ +struct cam_tfe_csid_hw_info { + const struct cam_tfe_csid_reg_offset *csid_reg; + uint32_t hw_dts_version; + uint32_t csid_max_clk; +}; + +/** + * struct cam_tfe_csid_csi2_rx_cfg- csid csi2 rx configuration data + * @phy_sel: input resource type for sensor only + * @lane_type: lane type: c-phy or d-phy + * @lane_num : active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * + */ +struct cam_tfe_csid_csi2_rx_cfg { + uint32_t phy_sel; + uint32_t lane_type; + uint32_t lane_num; + uint32_t lane_cfg; +}; + +/** + * struct vc_dt_data- VC DT data + * + * @vc: VC data + * @dt: DT data + * + */ +struct vc_dt_data { + uint32_t vc; + uint32_t dt; +}; + +/** + * struct cam_tfe_csid_cid_data- cid configuration private data + * + * @vc_dt: VC DT data + * @num_valid_vc_dt: Number of VC-DTs + * @cnt: Cid resource reference count. + * + */ +struct cam_tfe_csid_cid_data { + struct vc_dt_data vc_dt[CAM_ISP_TFE_VC_DT_CFG]; + uint32_t num_valid_vc_dt; + uint32_t cnt; +}; + +/** + * struct cam_tfe_csid_path_cfg- csid path configuration details. It is stored + * as private data for IPP/ RDI paths + * @vc_dt : VC DT data + * @num_valid_vc_dt: Number of valid VC-DTs + * @cid cid number, it is same as DT_ID number in HW + * @in_format: input decode format + * @out_format: output format + * @crop_enable: crop is enable or disabled, if enabled + * then remaining parameters are valid. + * @start_pixel: start pixel + * @end_pixel: end_pixel + * @width: width + * @start_line: start line + * @end_line: end_line + * @height: heigth + * @sync_mode: Applicable for IPP/RDI path reservation + * Reserving the path for master IPP or slave IPP + * master (set value 1), Slave ( set value 2) + * for RDI, set mode to none + * @master_idx: For Slave reservation, Give master TFE instance Index. + * Slave will synchronize with master Start and stop + * operations + * @clk_rate: Clock rate + * @sensor_width: Sensor width in pixel + * @sensor_height: Sensor height in pixel + * @sensor_fps: Sensor fps + * @sensor_hbi: Sensor horizontal blanking interval + * @sensor_vbi: Sensor vertical blanking interval + * @bayer_bin: Bayer binning + * @qcfa_bin: Quad-CFA binning + * @usage_type: dual or single tfe information + * @init_frame_drop init frame drop value. In dual ife case rdi need to drop + * one more frame than pix. + * @res_sof_cnt path resource sof count value. it used for initial + * frame drop + * + */ +struct cam_tfe_csid_path_cfg { + struct vc_dt_data vc_dt[CAM_ISP_TFE_VC_DT_CFG]; + uint32_t num_valid_vc_dt; + uint32_t cid; + uint32_t in_format; + uint32_t out_format; + bool crop_enable; + uint32_t start_pixel; + uint32_t end_pixel; + uint32_t width; + uint32_t start_line; + uint32_t end_line; + uint32_t height; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t master_idx; + uint64_t clk_rate; + uint32_t sensor_width; + uint32_t sensor_height; + uint32_t sensor_fps; + uint32_t sensor_hbi; + uint32_t sensor_vbi; + uint32_t bayer_bin; + uint32_t qcfa_bin; + uint32_t usage_type; + uint32_t init_frame_drop; + uint32_t res_sof_cnt; +}; + +/** + * struct cam_csid_evt_payload- payload for csid hw event + * @list : list head + * @evt_type : Event type from CSID + * @irq_status : IRQ Status register + * @hw_idx : Hw index + * @priv : Private data of payload + */ +struct cam_csid_evt_payload { + struct list_head list; + uint32_t evt_type; + uint32_t irq_status[TFE_CSID_IRQ_REG_MAX]; + uint32_t hw_idx; + void *priv; +}; + +/** + * struct cam_tfe_csid_hw- csid hw device resources data + * + * @hw_intf: contain the csid hw interface information + * @hw_info: csid hw device information + * @csid_info: csid hw specific information + * @tasklet: tasklet to handle csid errors + * @free_payload_list: list head for payload + * @evt_payload: Event payload to be passed to tasklet + * @in_res_id: csid in resource type + * @csi2_rx_cfg: csi2 rx decoder configuration for csid + * @csi2_rx_reserve_cnt: csi2 reservations count value + * @ipp_res: image pixel path resource + * @rdi_res: raw dump image path resources + * @cid_res: cid resources values + * @csid_top_reset_complete: csid top reset completion + * @csid_csi2_reset_complete: csi2 reset completion + * @csid_ipp_reset_complete: ipp reset completion + * @csid_ppp_complete: ppp reset completion + * @csid_rdin_reset_complete: rdi n completion + * @csid_debug: csid debug information to enable the SOT, EOT, + * SOF, EOF, measure etc in the csid hw + * @clk_rate Clock rate + * @sof_irq_triggered: Flag is set on receiving event to enable sof irq + * incase of SOF freeze. + * @irq_debug_cnt: Counter to track sof irq's when above flag is set. + * @error_irq_count Error IRQ count, if continuous error irq comes + * need to stop the CSID and mask interrupts. + * @device_enabled Device enabled will set once CSID powered on and + * initial configuration are done. + * @lock_state csid spin lock + * @fatal_err_detected flag to indicate fatal errror is reported + * @event_cb: Callback function to hw mgr in case of hw events + * @event_cb_priv: Context data + * @ppi_hw_intf interface to ppi hardware + * @ppi_enabled flag to specify if the hardware has ppi bridge + * or not + * @prev_boot_timestamp previous frame bootime stamp + * @prev_qtimer_ts previous frame qtimer csid timestamp + * @sync_clk sync clocks such that freq(TFE)>freq(CSID)>freq(CSIPHY) + * + */ +struct cam_tfe_csid_hw { + struct cam_hw_intf *hw_intf; + struct cam_hw_info *hw_info; + struct cam_tfe_csid_hw_info *csid_info; + void *tasklet; + struct list_head free_payload_list; + struct cam_csid_evt_payload evt_payload[CAM_CSID_EVT_PAYLOAD_MAX]; + uint32_t in_res_id; + struct cam_tfe_csid_csi2_rx_cfg csi2_rx_cfg; + uint32_t csi2_reserve_cnt; + uint32_t pxl_pipe_enable; + struct cam_isp_resource_node ipp_res; + struct cam_isp_resource_node rdi_res[CAM_TFE_CSID_RDI_MAX]; + struct cam_tfe_csid_cid_data cid_res[CAM_TFE_CSID_CID_MAX]; + struct completion csid_top_complete; + struct completion csid_csi2_complete; + struct completion csid_ipp_complete; + struct completion csid_rdin_complete[CAM_TFE_CSID_RDI_MAX]; + uint64_t csid_debug; + uint64_t clk_rate; + bool sof_irq_triggered; + uint32_t irq_debug_cnt; + uint32_t error_irq_count; + uint32_t device_enabled; + spinlock_t spin_lock; + bool fatal_err_detected; + cam_hw_mgr_event_cb_func event_cb; + void *event_cb_priv; + struct cam_hw_intf *ppi_hw_intf[CAM_CSID_PPI_HW_MAX]; + bool ppi_enable; + uint64_t prev_boot_timestamp; + uint64_t prev_qtimer_ts; + bool sync_clk; +}; + +int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf, + uint32_t csid_idx); + +int cam_tfe_csid_hw_deinit(struct cam_tfe_csid_hw *tfe_csid_hw); + +#endif /* _CAM_TFE_CSID_HW_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.c new file mode 100644 index 0000000000..7bf294c63d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "cam_tfe_csid_core.h" +#include "cam_tfe_csid_dev.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_hw_intf *cam_tfe_csid_hw_list[CAM_TFE_CSID_HW_NUM_MAX] = { + 0, 0, 0}; + +static int cam_tfe_csid_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + + struct cam_hw_intf *csid_hw_intf; + struct cam_hw_info *csid_hw_info; + struct cam_tfe_csid_hw *csid_dev = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_tfe_csid_hw_info *csid_hw_data = NULL; + uint32_t csid_dev_idx; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + CAM_DBG(CAM_ISP, "probe called"); + + csid_hw_intf = CAM_MEM_ZALLOC(sizeof(*csid_hw_intf), GFP_KERNEL); + if (!csid_hw_intf) { + rc = -ENOMEM; + goto err; + } + + csid_hw_info = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!csid_hw_info) { + rc = -ENOMEM; + goto free_hw_intf; + } + + csid_dev = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_csid_hw), GFP_KERNEL); + if (!csid_dev) { + rc = -ENOMEM; + goto free_hw_info; + } + + /* get tfe csid hw index */ + of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx); + /* get tfe csid hw information */ + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ISP, "No matching table for the tfe csid hw"); + rc = -EINVAL; + goto free_dev; + } + + csid_hw_intf->hw_idx = csid_dev_idx; + csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_TFE_CSID; + csid_hw_intf->hw_priv = csid_hw_info; + + csid_hw_info->core_info = csid_dev; + csid_hw_info->soc_info.pdev = pdev; + csid_hw_info->soc_info.dev = &pdev->dev; + csid_hw_info->soc_info.dev_name = pdev->name; + csid_hw_info->soc_info.index = csid_dev_idx; + + csid_hw_data = (struct cam_tfe_csid_hw_info *)match_dev->data; + /* need to setup the pdev before call the tfe hw probe init */ + csid_dev->csid_info = csid_hw_data; + + rc = cam_tfe_csid_hw_probe_init(csid_hw_intf, csid_dev_idx); + if (rc) + goto free_dev; + + platform_set_drvdata(pdev, csid_dev); + CAM_DBG(CAM_ISP, "CSID:%d probe successful", + csid_hw_intf->hw_idx); + + if (csid_hw_intf->hw_idx < CAM_TFE_CSID_HW_NUM_MAX) + cam_tfe_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf; + else + goto free_dev; + + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; + +free_dev: + CAM_MEM_FREE(csid_dev); +free_hw_info: + CAM_MEM_FREE(csid_hw_info); +free_hw_intf: + CAM_MEM_FREE(csid_hw_intf); +err: + return rc; +} + +void cam_tfe_csid_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_tfe_csid_hw *csid_dev = NULL; + struct cam_hw_intf *csid_hw_intf; + struct cam_hw_info *csid_hw_info; + struct platform_device *pdev = to_platform_device(dev); + + csid_dev = (struct cam_tfe_csid_hw *)platform_get_drvdata(pdev); + + if (!csid_dev) { + CAM_ERR(CAM_ISP, "Error No data in csid_dev"); + return; + } + + csid_hw_intf = csid_dev->hw_intf; + csid_hw_info = csid_dev->hw_info; + + CAM_DBG(CAM_ISP, "CSID:%d remove", + csid_dev->hw_intf->hw_idx); + + cam_tfe_csid_hw_deinit(csid_dev); + + /*release the csid device memory */ + CAM_MEM_FREE(csid_dev); + CAM_MEM_FREE(csid_hw_info); + CAM_MEM_FREE(csid_hw_intf); +} + +const static struct component_ops cam_tfe_csid_component_ops = { + .bind = cam_tfe_csid_component_bind, + .unbind = cam_tfe_csid_component_unbind, +}; + +int cam_tfe_csid_probe(struct platform_device *pdev) +{ + int rc = 0; + + rc = component_add(&pdev->dev, &cam_tfe_csid_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_tfe_csid_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_tfe_csid_component_ops); + return 0; +} + +int cam_tfe_csid_hw_init(struct cam_hw_intf **tfe_csid_hw, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_tfe_csid_hw_list[hw_idx]) { + *tfe_csid_hw = cam_tfe_csid_hw_list[hw_idx]; + } else { + *tfe_csid_hw = NULL; + rc = -1; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.h new file mode 100644 index 0000000000..3193d64455 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_dev.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_DEV_H_ +#define _CAM_TFE_CSID_DEV_H_ + +#include "cam_isp_hw.h" + +irqreturn_t cam_tfe_csid_irq(int irq_num, void *data); + +int cam_tfe_csid_probe(struct platform_device *pdev); +int cam_tfe_csid_remove(struct platform_device *pdev); + +#endif /*_CAM_TFE_CSID_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.c new file mode 100644 index 0000000000..34e8b254e7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include "cam_tfe_csid_soc.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +int cam_tfe_csid_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t csid_irq_handler, void *data) +{ + int rc = 0, i; + struct cam_cpas_register_params cpas_register_param; + struct cam_tfe_csid_soc_private *soc_private; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_csid_soc_private), + GFP_KERNEL); + if (!soc_private) + return -ENOMEM; + + soc_info->soc_private = soc_private; + + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) + return rc; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + /* Need to see if we want post process the clock list */ + rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler, &(irq_data[0])); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Error Request platform resources failed rc=%d", rc); + goto free_soc_private; + } + + memset(&cpas_register_param, 0, sizeof(cpas_register_param)); + strscpy(cpas_register_param.identifier, "csid", + CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc); + goto release_soc; + } else { + soc_private->cpas_handle = cpas_register_param.client_handle; + } + + return rc; + +release_soc: + cam_soc_util_release_platform_resource(soc_info); +free_soc_private: + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_tfe_csid_deinit_soc_resources( + struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_tfe_csid_soc_private *soc_private; + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -ENODEV; + } + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + + return rc; +} + +int cam_tfe_csid_enable_soc_resources( + struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level) +{ + int rc = 0; + struct cam_tfe_csid_soc_private *soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + soc_private = soc_info->soc_private; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + + CAM_DBG(CAM_ISP, "csid camnoc_bw:%lld mnoc_ab_bw:%lld mnoc_ib_bw:%lld ", + axi_vote.axi_path[0].camnoc_bw, + axi_vote.axi_path[0].mnoc_ab_bw, + axi_vote.axi_path[0].mnoc_ib_bw); + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_ISP, "Error CPAS start failed"); + rc = -EFAULT; + goto end; + } + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + clk_level, true); + if (rc) { + CAM_ERR(CAM_ISP, "enable platform failed"); + goto stop_cpas; + } + + return rc; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_tfe_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_tfe_csid_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_ISP, "Disable platform failed"); + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc); + return rc; + } + + return rc; +} + +int cam_tfe_csid_enable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_tfe_base_offset) +{ + int rc = 0; + struct cam_tfe_csid_soc_private *soc_private; + uint32_t cpass_tfe_force_clk_offset; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + + soc_private = soc_info->soc_private; + cpass_tfe_force_clk_offset = + cpas_tfe_base_offset + (0x4 * soc_info->index); + rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + cpass_tfe_force_clk_offset, 1, 1); + + if (rc) + CAM_ERR(CAM_ISP, "CPASS set TFE:%d Force clock On failed", + soc_info->index); + else + CAM_DBG(CAM_ISP, "CPASS set TFE:%d Force clock On", + soc_info->index); + + return rc; +} + +int cam_tfe_csid_disable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_tfe_base_offset) +{ + int rc = 0; + struct cam_tfe_csid_soc_private *soc_private; + uint32_t cpass_tfe_force_clk_offset; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + return -EINVAL; + } + + soc_private = soc_info->soc_private; + cpass_tfe_force_clk_offset = + cpas_tfe_base_offset + (0x4 * soc_info->index); + rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + cpass_tfe_force_clk_offset, 1, 0); + + if (rc) + CAM_ERR(CAM_ISP, "CPASS set TFE:%d Force clock Off failed", + soc_info->index); + else + CAM_DBG(CAM_ISP, "CPASS set TFE:%d Force clock off", + soc_info->index); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.h new file mode 100644 index 0000000000..98f7a909ee --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_soc.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TFE_CSID_SOC_H_ +#define _CAM_TFE_CSID_SOC_H_ + +#include "cam_isp_hw.h" + +/* + * struct cam_csid_soc_private: + * + * @Brief: Private SOC data specific to CSID HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver. + * This handle is used for all further interface + * with CPAS. + */ +struct cam_tfe_csid_soc_private { + uint32_t cpas_handle; +}; + +/** + * struct csid_device_soc_info - CSID SOC info object + * + * @csi_vdd_voltage: Csi vdd voltage value + * + */ +struct cam_tfe_csid_device_soc_info { + int csi_vdd_voltage; +}; + +/** + * cam_tfe_csid_init_soc_resources() + * + * @brief: Csid initialization function for the soc info + * + * @soc_info: Soc info structure pointer + * @csid_irq_handler: Irq handler function to be registered + * @irq_data: Irq data for the callback function + * + */ +int cam_tfe_csid_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t csid_irq_handler, void *irq_data); + + +/** + * cam_tfe_csid_deinit_soc_resources() + * + * @brief: Csid de initialization function for the soc info + * + * @soc_info: Soc info structure pointer + * + */ +int cam_tfe_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info); + +/** + * cam_tfe_csid_enable_soc_resources() + * + * @brief: Csid soc resource enable function + * + * @soc_info: Soc info structure pointer + * @clk_lvl: Vote level to start with + * + */ +int cam_tfe_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info, + uint32_t clk_lvl); + +/** + * cam_tfe_csid_disable_soc_resources() + * + * @brief: Csid soc resource disable function + * + * @soc_info: Soc info structure pointer + * + */ +int cam_tfe_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +/** + * cam_tfe_csid_enable_tfe_force_clock() + * + * @brief: If csid testgen used for dual isp case, before + * starting csid test gen, enable tfe force clock on + * through cpas + * + * @soc_info: Soc info structure pointer + * @cpas_tfe_base_offset: Cpas tfe force clock base reg offset value + * + */ +int cam_tfe_csid_enable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_tfe_base_offset); + +/** + * cam_tfe_csid_disable_tfe_force_clock_on() + * + * @brief: Disable the TFE force clock on after dual ISP + * CSID test gen stop + * + * @soc_info: Soc info structure pointer + * @cpas_tfe_base_offset: Cpas tfe force clock base reg offset value + * + */ +int cam_tfe_csid_disable_tfe_force_clock_on(struct cam_hw_soc_info *soc_info, + uint32_t cpas_tfe_base_offset); + +/** + * cam_tfe_csid_get_vote_level() + * + * @brief: Get the vote level from clock rate + * + * @soc_info: Soc info structure pointer + * @clock_rate Clock rate + * + */ +uint32_t cam_tfe_csid_get_vote_level(struct cam_hw_soc_info *soc_info, + uint64_t clock_rate); + +#endif /* _CAM_TFE_CSID_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.c new file mode 100644 index 0000000000..33999c4b95 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include "cam_tfe530.h" +#include "cam_tfe640.h" +#include "cam_tfe_hw_intf.h" +#include "cam_tfe_core.h" +#include "cam_tfe_dev.h" +#include "camera_main.h" + +static const struct of_device_id cam_tfe_dt_match[] = { + { + .compatible = "qcom,tfe530", + .data = &cam_tfe530, + }, + { + .compatible = "qcom,tfe640", + .data = &cam_tfe640, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_tfe_dt_match); + +struct platform_driver cam_tfe_driver = { + .probe = cam_tfe_probe, + .remove = cam_tfe_remove, + .driver = { + .name = "cam_tfe", + .of_match_table = cam_tfe_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_tfe_init_module(void) +{ + return platform_driver_register(&cam_tfe_driver); +} + +void cam_tfe_exit_module(void) +{ + platform_driver_unregister(&cam_tfe_driver); +} + +MODULE_DESCRIPTION("CAM TFE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h new file mode 100644 index 0000000000..19982cb03e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h @@ -0,0 +1,930 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_TFE530_H_ +#define _CAM_TFE530_H_ +#include "cam_tfe_core.h" +#include "cam_tfe_bus.h" + + +static struct cam_tfe_top_reg_offset_common tfe530_top_commong_reg = { + .hw_version = 0x00001000, + .hw_capability = 0x00001004, + .lens_feature = 0x00001008, + .stats_feature = 0x0000100C, + .zoom_feature = 0x00001010, + .global_reset_cmd = 0x00001014, + .core_cgc_ctrl = 0x00001018, + .ahb_cgc_ctrl = 0x0000101C, + .core_cfg_0 = 0x00001024, + .core_cfg_1 = 0x00001028, + .reg_update_cmd = 0x0000102C, + .diag_config = 0x00001060, + .diag_sensor_status_0 = 0x00001064, + .diag_sensor_status_1 = 0x00001068, + .diag_sensor_frame_cnt_status = 0x0000106C, + .violation_status = 0x00001070, + .stats_throttle_cnt_cfg_0 = 0x00001074, + .stats_throttle_cnt_cfg_1 = 0x00001078, + .num_debug_reg = 4, + .debug_reg = { + 0x000010A0, + 0x000010A4, + 0x000010A8, + 0x000010AC, + }, + .debug_cfg = 0x000010DC, + .num_perf_cfg = 1, + .perf_cfg = { + { + .perf_cnt_cfg = 0x000010E0, + .perf_pixel_count = 0x000010E4, + .perf_line_count = 0x000010E8, + .perf_stall_count = 0x000010EC, + .perf_always_count = 0x000010F0, + .perf_count_status = 0x000010F4, + }, + }, + .diag_min_hbi_error_shift = 15, + .diag_neq_hbi_shift = 14, + .diag_sensor_hbi_mask = 0x3FFF, + .serializer_supported = false, +}; + +static struct cam_tfe_camif_reg tfe530_camif_reg = { + .hw_version = 0x00001200, + .hw_status = 0x00001204, + .module_cfg = 0x00001260, + .pdaf_raw_crop_width_cfg = 0x00001268, + .pdaf_raw_crop_height_cfg = 0x0000126C, + .line_skip_pattern = 0x00001270, + .pixel_skip_pattern = 0x00001274, + .period_cfg = 0x00001278, + .irq_subsample_pattern = 0x0000127C, + .epoch_irq_cfg = 0x00001280, + .debug_1 = 0x000013F0, + .debug_0 = 0x000013F4, + .test_bus_ctrl = 0x000013F8, + .spare = 0x000013F8, + .reg_update_cmd = 0x0000102C, +}; + +static struct cam_tfe_camif_reg_data tfe530_camif_reg_data = { + .extern_reg_update_mask = 0x00000001, + .dual_tfe_pix_en_shift = 0x00000001, + .extern_reg_update_shift = 0x0, + .camif_pd_rdi2_src_sel_shift = 0x2, + .dual_tfe_sync_sel_shift = 18, + .delay_line_en_shift = 8, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x7000000, + .module_enable_shift = 0, + .pix_out_enable_shift = 8, + .pdaf_output_enable_shift = 9, + .dsp_mode_shift = 0, + .dsp_mode_mask = 0, + .dsp_en_shift = 0, + .dsp_en_mask = 0, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .epoch1_irq_mask = 0x00000008, + .eof_irq_mask = 0x00000002, + .reg_update_irq_mask = 0x00000001, + .error_irq_mask0 = 0x00010100, + .error_irq_mask2 = 0x00000023, + .subscribe_irq_mask = { + 0x00000000, + 0x00000007, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .perf_cnt_start_cmd_shift = 0, + .perf_cnt_continuous_shift = 2, + .perf_client_sel_shift = 8, + .perf_window_start_shift = 16, + .perf_window_end_shift = 20, +}; + +static struct cam_tfe_rdi_reg tfe530_rdi0_reg = { + .rdi_hw_version = 0x00001400, + .rdi_hw_status = 0x00001404, + .rdi_module_config = 0x00001460, + .rdi_skip_period = 0x00001468, + .rdi_irq_subsample_pattern = 0x0000146C, + .rdi_epoch_irq = 0x00001470, + .rdi_debug_1 = 0x000015F0, + .rdi_debug_0 = 0x000015F4, + .rdi_test_bus_ctrl = 0x000015F8, + .rdi_spare = 0x000015FC, + .reg_update_cmd = 0x0000102C, +}; + +static struct cam_tfe_rdi_reg_data tfe530_rdi0_reg_data = { + .reg_update_cmd_data = 0x2, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00000010, + .epoch0_irq_mask = 0x00000040, + .epoch1_irq_mask = 0x00000080, + .eof_irq_mask = 0x00000020, + .error_irq_mask0 = 0x00020200, + .error_irq_mask2 = 0x00000004, + .subscribe_irq_mask = { + 0x00000000, + 0x00000030, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x1, + .diag_sensor_shift = 0x1, +}; + +static struct cam_tfe_rdi_reg tfe530_rdi1_reg = { + .rdi_hw_version = 0x00001600, + .rdi_hw_status = 0x00001604, + .rdi_module_config = 0x00001660, + .rdi_skip_period = 0x00001668, + .rdi_irq_subsample_pattern = 0x0000166C, + .rdi_epoch_irq = 0x00001670, + .rdi_debug_1 = 0x000017F0, + .rdi_debug_0 = 0x000017F4, + .rdi_test_bus_ctrl = 0x000017F8, + .rdi_spare = 0x000017FC, + .reg_update_cmd = 0x0000102C, +}; + +static struct cam_tfe_rdi_reg_data tfe530_rdi1_reg_data = { + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00000100, + .epoch0_irq_mask = 0x00000400, + .epoch1_irq_mask = 0x00000800, + .eof_irq_mask = 0x00000200, + .error_irq_mask0 = 0x00040400, + .error_irq_mask2 = 0x00000008, + .subscribe_irq_mask = { + 0x00000000, + 0x00000300, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x2, + .diag_sensor_shift = 0x1, +}; + +static struct cam_tfe_rdi_reg tfe530_rdi2_reg = { + .rdi_hw_version = 0x00001800, + .rdi_hw_status = 0x00001804, + .rdi_module_config = 0x00001860, + .rdi_skip_period = 0x00001868, + .rdi_irq_subsample_pattern = 0x0000186C, + .rdi_epoch_irq = 0x00001870, + .rdi_debug_1 = 0x000019F0, + .rdi_debug_0 = 0x000019F4, + .rdi_test_bus_ctrl = 0x000019F8, + .rdi_spare = 0x000019FC, + .reg_update_cmd = 0x0000102C, +}; + +static struct cam_tfe_rdi_reg_data tfe530_rdi2_reg_data = { + .reg_update_cmd_data = 0x8, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00001000, + .epoch0_irq_mask = 0x00004000, + .epoch1_irq_mask = 0x00008000, + .eof_irq_mask = 0x00002000, + .error_irq_mask0 = 0x00080800, + .error_irq_mask2 = 0x00000004, + .subscribe_irq_mask = { + 0x00000000, + 0x00003000, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x3, + .diag_sensor_shift = 0x1, + +}; + +static struct cam_tfe_clc_hw_status tfe530_clc_hw_info[CAM_TFE_MAX_CLC] = { + { + .name = "CLC_CAMIF", + .hw_status_reg = 0x1204, + }, + { + .name = "CLC_RDI0_CAMIF", + .hw_status_reg = 0x1404, + }, + { + .name = "CLC_RDI1_CAMIF", + .hw_status_reg = 0x1604, + }, + { + .name = "CLC_RDI2_CAMIF", + .hw_status_reg = 0x1804, + }, + { + .name = "CLC_CHANNEL_GAIN", + .hw_status_reg = 0x2604, + }, + { + .name = "CLC_LENS_ROLL_OFF", + .hw_status_reg = 0x2804, + }, + { + .name = "CLC_WB_BDS", + .hw_status_reg = 0x2A04, + }, + { + .name = "CLC_STATS_BHIST", + .hw_status_reg = 0x2C04, + }, + { + .name = "CLC_STATS_TINTLESS_BG", + .hw_status_reg = 0x2E04, + }, + { + .name = "CLC_STATS_BAF", + .hw_status_reg = 0x3004, + }, + { + .name = "CLC_STATS_AWB_BG", + .hw_status_reg = 0x3204, + }, + { + .name = "CLC_STATS_AEC_BG", + .hw_status_reg = 0x3404, + }, + { + .name = "CLC_STATS_RAW_OUT", + .hw_status_reg = 0x3604, + }, + { + .name = "CLC_STATS_CROP_POST_BDS", + .hw_status_reg = 0x3804, + }, +}; + +static struct cam_tfe_top_hw_info tfe530_top_hw_info = { + .common_reg = &tfe530_top_commong_reg, + .camif_hw_info = { + .camif_reg = &tfe530_camif_reg, + .reg_data = &tfe530_camif_reg_data, + }, + .rdi_hw_info = { + { + .rdi_reg = &tfe530_rdi0_reg, + .reg_data = &tfe530_rdi0_reg_data, + }, + { + .rdi_reg = &tfe530_rdi1_reg, + .reg_data = &tfe530_rdi1_reg_data, + }, + { + .rdi_reg = &tfe530_rdi2_reg, + .reg_data = &tfe530_rdi2_reg_data, + }, + }, + .in_port = { + CAM_TFE_CAMIF_VER_1_0, + CAM_TFE_RDI_VER_1_0, + CAM_TFE_RDI_VER_1_0, + CAM_TFE_RDI_VER_1_0 + }, + .reg_dump_data = { + .num_reg_dump_entries = 19, + .num_lut_dump_entries = 0, + .bus_start_addr = 0x2000, + .bus_write_top_end_addr = 0x2120, + .bus_client_start_addr = 0x2200, + .bus_client_offset = 0x100, + .num_bus_clients = 10, + .reg_entry = { + { + .start_offset = 0x1000, + .end_offset = 0x10F4, + }, + { + .start_offset = 0x1260, + .end_offset = 0x1280, + }, + { + .start_offset = 0x13F0, + .end_offset = 0x13FC, + }, + { + .start_offset = 0x1460, + .end_offset = 0x1470, + }, + { + .start_offset = 0x15F0, + .end_offset = 0x15FC, + }, + { + .start_offset = 0x1660, + .end_offset = 0x1670, + }, + { + .start_offset = 0x17F0, + .end_offset = 0x17FC, + }, + { + .start_offset = 0x1860, + .end_offset = 0x1870, + }, + { + .start_offset = 0x19F0, + .end_offset = 0x19FC, + }, + { + .start_offset = 0x2660, + .end_offset = 0x2694, + }, + { + .start_offset = 0x2860, + .end_offset = 0x2884, + }, + { + .start_offset = 0x2A60, + .end_offset = 0X2B34, + }, + { + .start_offset = 0x2C60, + .end_offset = 0X2C80, + }, + { + .start_offset = 0x2E60, + .end_offset = 0X2E7C, + }, + { + .start_offset = 0x3060, + .end_offset = 0X3110, + }, + { + .start_offset = 0x3260, + .end_offset = 0X3278, + }, + { + .start_offset = 0x3460, + .end_offset = 0X3478, + }, + { + .start_offset = 0x3660, + .end_offset = 0X3684, + }, + { + .start_offset = 0x3860, + .end_offset = 0X3884, + }, + }, + .lut_entry = { + { + .lut_word_size = 1, + .lut_bank_sel = 0x40, + .lut_addr_size = 180, + .dmi_reg_offset = 0x2800, + }, + { + .lut_word_size = 1, + .lut_bank_sel = 0x41, + .lut_addr_size = 180, + .dmi_reg_offset = 0x3000, + }, + }, + }, +}; + +static struct cam_tfe_bus_hw_info tfe530_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001A00, + .cgc_ovd = 0x00001A08, + .comp_cfg_0 = 0x00001A0C, + .comp_cfg_1 = 0x00001A10, + .frameheader_cfg = { + 0x00001A34, + 0x00001A38, + 0x00001A3C, + 0x00001A40, + }, + .pwr_iso_cfg = 0x00001A5C, + .overflow_status_clear = 0x00001A60, + .ccif_violation_status = 0x00001A64, + .overflow_status = 0x00001A68, + .image_size_violation_status = 0x00001A70, + .perf_count_cfg = { + 0x00001A74, + 0x00001A78, + 0x00001A7C, + 0x00001A80, + 0x00001A84, + 0x00001A88, + 0x00001A8C, + 0x00001A90, + }, + .perf_count_val = { + 0x00001A94, + 0x00001A98, + 0x00001A9C, + 0x00001AA0, + 0x00001AA4, + 0x00001AA8, + 0x00001AAC, + 0x00001AB0, + }, + .perf_count_status = 0x00001AB4, + .debug_status_top_cfg = 0x00001AD4, + .debug_status_top = 0x00001AD8, + .test_bus_ctrl = 0x00001ADC, + .irq_mask = { + 0x00001A18, + 0x00001A1C, + }, + .irq_clear = { + 0x00001A20, + 0x00001A24, + }, + .irq_status = { + 0x00001A28, + 0x00001A2C, + }, + .irq_cmd = 0x00001A30, + .cons_violation_shift = 28, + .violation_shift = 30, + .image_size_violation = 31, + }, + .num_client = 10, + .bus_client_reg = { + /* BUS Client 0 BAYER */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .bw_limit = 0x00001C1C, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .addr_status_0 = 0x00001C68, + .addr_status_1 = 0x00001C6C, + .addr_status_2 = 0x00001C70, + .addr_status_3 = 0x00001C74, + .debug_status_cfg = 0x00001C78, + .debug_status_0 = 0x00001C7C, + .debug_status_1 = 0x00001C80, + .comp_group = CAM_TFE_BUS_COMP_GRP_0, + .client_name = "BAYER", + }, + /* BUS Client 1 IDEAL RAW*/ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .bw_limit = 0x00001D1C, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .addr_status_0 = 0x00001D68, + .addr_status_1 = 0x00001D6C, + .addr_status_2 = 0x00001D70, + .addr_status_3 = 0x00001D74, + .debug_status_cfg = 0x00001D78, + .debug_status_0 = 0x00001D7C, + .debug_status_1 = 0x00001D80, + .comp_group = CAM_TFE_BUS_COMP_GRP_1, + .client_name = "IDEAL_RAW", + }, + /* BUS Client 2 Stats BE Tintless */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .bw_limit = 0x00001E1C, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .line_done_cfg = 0x00001E00, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .addr_status_0 = 0x00001E68, + .addr_status_1 = 0x00001E6C, + .addr_status_2 = 0x00001E70, + .addr_status_3 = 0x00001E74, + .debug_status_cfg = 0x00001E78, + .debug_status_0 = 0x00001E7C, + .debug_status_1 = 0x00001E80, + .comp_group = CAM_TFE_BUS_COMP_GRP_2, + .client_name = "STATS BE TINTLESS", + }, + /* BUS Client 3 Stats Bhist */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .bw_limit = 0x00001F1C, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .addr_status_0 = 0x00001F68, + .addr_status_1 = 0x00001F6C, + .addr_status_2 = 0x00001F70, + .addr_status_3 = 0x00001F74, + .debug_status_cfg = 0x00001F78, + .debug_status_0 = 0x00001F7C, + .debug_status_1 = 0x00001F80, + .comp_group = CAM_TFE_BUS_COMP_GRP_2, + .client_name = "STATS BHIST", + }, + /* BUS Client 4 Stats AWB BG */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .bw_limit = 0x0000201C, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .addr_status_0 = 0x00002068, + .addr_status_1 = 0x0000206C, + .addr_status_2 = 0x00002070, + .addr_status_3 = 0x00002074, + .debug_status_cfg = 0x00002078, + .debug_status_0 = 0x0000207C, + .debug_status_1 = 0x00002080, + .comp_group = CAM_TFE_BUS_COMP_GRP_3, + .client_name = "STATS AWB BG", + }, + /* BUS Client 5 Stats AEC BG */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .bw_limit = 0x0000211C, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .addr_status_0 = 0x00002168, + .addr_status_1 = 0x0000216C, + .addr_status_2 = 0x00002170, + .addr_status_3 = 0x00002174, + .debug_status_cfg = 0x00002178, + .debug_status_0 = 0x0000217C, + .debug_status_1 = 0x00002180, + .comp_group = CAM_TFE_BUS_COMP_GRP_3, + .client_name = "STATS AEC BG", + }, + /* BUS Client 6 Stats BAF */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .bw_limit = 0x0000221C, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .addr_status_0 = 0x00002268, + .addr_status_1 = 0x0000226C, + .addr_status_2 = 0x00002270, + .addr_status_3 = 0x00002274, + .debug_status_cfg = 0x00002278, + .debug_status_0 = 0x0000227C, + .debug_status_1 = 0x00002280, + .comp_group = CAM_TFE_BUS_COMP_GRP_4, + .client_name = "STATS BAF", + }, + /* BUS Client 7 RDI0 */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .bw_limit = 0x0000231C, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .addr_status_0 = 0x00002368, + .addr_status_1 = 0x0000236C, + .addr_status_2 = 0x00002370, + .addr_status_3 = 0x00002374, + .debug_status_cfg = 0x00002378, + .debug_status_0 = 0x0000237C, + .debug_status_1 = 0x00002380, + .comp_group = CAM_TFE_BUS_COMP_GRP_5, + .client_name = "RDI0", + }, + /* BUS Client 8 RDI1 */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .bw_limit = 0x0000241C, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .addr_status_0 = 0x00002468, + .addr_status_1 = 0x0000246C, + .addr_status_2 = 0x00002470, + .addr_status_3 = 0x00002474, + .debug_status_cfg = 0x00002478, + .debug_status_0 = 0x0000247C, + .debug_status_1 = 0x00002480, + .comp_group = CAM_TFE_BUS_COMP_GRP_6, + .client_name = "RDI1", + }, + /* BUS Client 9 PDAF/RDI2*/ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .bw_limit = 0x0000251C, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .addr_status_0 = 0x00002568, + .addr_status_1 = 0x0000256C, + .addr_status_2 = 0x00002570, + .addr_status_3 = 0x00002574, + .debug_status_cfg = 0x00002578, + .debug_status_0 = 0x0000257C, + .debug_status_1 = 0x00002580, + .comp_group = CAM_TFE_BUS_COMP_GRP_7, + .client_name = "RDI2/PADF", + }, + }, + .num_out = 11, + .tfe_out_hw_info = { + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_5, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_1, + .mid[0] = 23, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_6, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_2, + .mid[0] = 24, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_7, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_3, + .mid[0] = 25, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_0, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 16, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RAW_DUMP, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_1, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 17, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_PDAF, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_7, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_3, + .mid[0] = 25, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_3, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 21, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_2, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 19, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_2, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 18, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_3, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 20, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_4, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 22, + }, + }, + .num_comp_grp = 8, + .max_wm_per_comp_grp = 2, + .comp_done_shift = 8, + .top_bus_wr_irq_shift = 1, + .comp_buf_done_mask = 0xFF00, + .comp_rup_done_mask = 0xF, + .bus_irq_error_mask = { + 0xD0000000, + 0x00000000, + }, + .support_consumed_addr = true, + .pdaf_rdi2_mux_en = true, + .rdi_width = 64, + .max_bw_counter_limit = 0xFF, + .counter_limit_shift = 1, + .counter_limit_mask = 0xF, +}; + +struct cam_tfe_hw_info cam_tfe530 = { + .top_irq_mask = { + 0x00001034, + 0x00001038, + 0x0000103C, + }, + .top_irq_clear = { + 0x00001040, + 0x00001044, + 0x00001048, + }, + .top_irq_status = { + 0x0000104C, + 0x00001050, + 0x00001054, + }, + .top_irq_cmd = 0x00001030, + .global_clear_bitmask = 0x00000001, + + .bus_irq_mask = { + 0x00001A18, + 0x00001A1C, + }, + .bus_irq_clear = { + 0x00001A20, + 0x00001A24, + }, + .bus_irq_status = { + 0x00001A28, + 0x00001A2C, + }, + .bus_irq_cmd = 0x00001A30, + .bus_violation_reg = 0x00001A64, + .bus_overflow_reg = 0x00001A68, + .bus_image_size_vilation_reg = 0x1A70, + .bus_overflow_clear_cmd = 0x1A60, + .debug_status_top = 0x1AD8, + + .reset_irq_mask = { + 0x00000001, + 0x00000000, + 0x00000000, + }, + .error_irq_mask = { + 0x000F0F00, + 0x00000000, + 0x0000003F, + }, + .bus_reg_irq_mask = { + 0x00000002, + 0x00000000, + }, + .bus_error_irq_mask = { + 0xC0000000, + 0x00000000, + }, + + .num_clc = 14, + .clc_hw_status_info = tfe530_clc_hw_info, + .bus_version = CAM_TFE_BUS_1_0, + .bus_hw_info = &tfe530_bus_hw_info, + + .top_version = CAM_TFE_TOP_1_0, + .top_hw_info = &tfe530_top_hw_info, +}; + +#endif /* _CAM_TFE530_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe640.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe640.h new file mode 100644 index 0000000000..9b60c47491 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe640.h @@ -0,0 +1,1245 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_TFE640_H_ +#define _CAM_TFE640_H_ +#include "cam_tfe_core.h" +#include "cam_tfe_bus.h" + + +static struct cam_tfe_top_reg_offset_common tfe640_top_commong_reg = { + .hw_version = 0x00001800, + .hw_capability = 0x00001804, + .lens_feature = 0x00001808, + .stats_feature = 0x0000180C, + .zoom_feature = 0x00001810, + .global_reset_cmd = 0x00001814, + .core_cgc_ctrl = 0x00001818, + .ahb_cgc_ctrl = 0x0000181C, + .core_cfg_0 = 0x00001824, + .reg_update_cmd = 0x0000182C, + .diag_config = 0x00001860, + .diag_sensor_status_0 = 0x00001864, + .diag_sensor_status_1 = 0x00001868, + .diag_sensor_frame_cnt_status = 0x0000186C, + .violation_status = 0x00001870, + .stats_throttle_cnt_cfg_0 = 0x00001874, + .stats_throttle_cnt_cfg_1 = 0x00001878, + .num_debug_reg = 10, + .debug_reg = { + 0x000018A0, + 0x000018A4, + 0x000018A8, + 0x000018AC, + 0x000018B0, + 0x000018B4, + 0x000018B8, + 0x000018BC, + 0x000018C0, + 0x000018C4, + }, + .debug_cfg = 0x000018DC, + .num_perf_cfg = 2, + .perf_cfg = { + { + .perf_cnt_cfg = 0x000018E0, + .perf_pixel_count = 0x000018E4, + .perf_line_count = 0x000018E8, + .perf_stall_count = 0x000018EC, + .perf_always_count = 0x000018F0, + .perf_count_status = 0x000018F4, + }, + { + .perf_cnt_cfg = 0x000018F8, + .perf_pixel_count = 0x000018FC, + .perf_line_count = 0x00001900, + .perf_stall_count = 0x00001904, + .perf_always_count = 0x00001908, + .perf_count_status = 0x0000190C, + }, + }, + .diag_min_hbi_error_shift = 15, + .diag_neq_hbi_shift = 14, + .diag_sensor_hbi_mask = 0x3FFF, + .serializer_supported = true, +}; + +static struct cam_tfe_camif_reg tfe640_camif_reg = { + .hw_version = 0x00001C00, + .hw_status = 0x00001C04, + .module_cfg = 0x00001C60, + .pdaf_raw_crop_width_cfg = 0x00001C68, + .pdaf_raw_crop_height_cfg = 0x00001C6C, + .line_skip_pattern = 0x00001C70, + .pixel_skip_pattern = 0x00001C74, + .period_cfg = 0x00001C78, + .irq_subsample_pattern = 0x00001C7C, + .epoch_irq_cfg = 0x00001C80, + .debug_1 = 0x00001DF0, + .debug_0 = 0x00001DF4, + .test_bus_ctrl = 0x00001DF8, + .spare = 0x00001DFC, + .reg_update_cmd = 0x0000182C, +}; + +static struct cam_tfe_camif_reg_data tfe640_camif_reg_data = { + .extern_reg_update_mask = 0x00000001, + .dual_tfe_pix_en_shift = 0x00000001, + .extern_reg_update_shift = 0x0, + .dual_tfe_sync_sel_shift = 18, + .delay_line_en_shift = 8, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x7000000, + .module_enable_shift = 0, + .pix_out_enable_shift = 8, + .pdaf_output_enable_shift = 9, + .dsp_mode_shift = 0, + .dsp_mode_mask = 0, + .dsp_en_shift = 0, + .dsp_en_mask = 0, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .epoch1_irq_mask = 0x00000008, + .eof_irq_mask = 0x00000002, + .reg_update_irq_mask = 0x00000001, + .error_irq_mask0 = 0x00010100, + .error_irq_mask2 = 0x00000023, + .subscribe_irq_mask = { + 0x00000000, + 0x00000007, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .perf_cnt_start_cmd_shift = 0, + .perf_cnt_continuous_shift = 2, + .perf_client_sel_shift = 8, + .perf_window_start_shift = 16, + .perf_window_end_shift = 20, + .ai_c_srl_en_shift = 11, + .ds16_c_srl_en_shift = 10, + .ds4_c_srl_en_shift = 9, +}; + +static struct cam_tfe_rdi_reg tfe640_rdi0_reg = { + .rdi_hw_version = 0x00001E00, + .rdi_hw_status = 0x00001E04, + .rdi_module_config = 0x00001E60, + .rdi_skip_period = 0x00001E68, + .rdi_irq_subsample_pattern = 0x00001E6C, + .rdi_epoch_irq = 0x00001E70, + .rdi_debug_1 = 0x00001FF0, + .rdi_debug_0 = 0x00001FF4, + .rdi_test_bus_ctrl = 0x00001FF8, + .rdi_spare = 0x00001FFC, + .reg_update_cmd = 0x0000182C, +}; + +static struct cam_tfe_rdi_reg_data tfe640_rdi0_reg_data = { + .reg_update_cmd_data = 0x2, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00000010, + .epoch0_irq_mask = 0x00000040, + .epoch1_irq_mask = 0x00000080, + .eof_irq_mask = 0x00000020, + .error_irq_mask0 = 0x00020200, + .error_irq_mask2 = 0x00000004, + .subscribe_irq_mask = { + 0x00000000, + 0x00000030, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x1, + .diag_sensor_shift = 0x1, +}; + +static struct cam_tfe_rdi_reg tfe640_rdi1_reg = { + .rdi_hw_version = 0x00002000, + .rdi_hw_status = 0x00002004, + .rdi_module_config = 0x00002060, + .rdi_skip_period = 0x00002068, + .rdi_irq_subsample_pattern = 0x0000206C, + .rdi_epoch_irq = 0x00002070, + .rdi_debug_1 = 0x000021F0, + .rdi_debug_0 = 0x000021F4, + .rdi_test_bus_ctrl = 0x000021F8, + .rdi_spare = 0x000021FC, + .reg_update_cmd = 0x0000182C, +}; + +static struct cam_tfe_rdi_reg_data tfe640_rdi1_reg_data = { + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00000100, + .epoch0_irq_mask = 0x00000400, + .epoch1_irq_mask = 0x00000800, + .eof_irq_mask = 0x00000200, + .error_irq_mask0 = 0x00040400, + .error_irq_mask2 = 0x00000008, + .subscribe_irq_mask = { + 0x00000000, + 0x00000300, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x2, + .diag_sensor_shift = 0x1, +}; + +static struct cam_tfe_rdi_reg tfe640_rdi2_reg = { + .rdi_hw_version = 0x00002200, + .rdi_hw_status = 0x00002204, + .rdi_module_config = 0x00002260, + .rdi_skip_period = 0x00002268, + .rdi_irq_subsample_pattern = 0x0000226C, + .rdi_epoch_irq = 0x00002270, + .rdi_debug_1 = 0x000023F0, + .rdi_debug_0 = 0x000023F4, + .rdi_test_bus_ctrl = 0x000023F8, + .rdi_spare = 0x000023FC, + .reg_update_cmd = 0x0000182C, +}; + +static struct cam_tfe_rdi_reg_data tfe640_rdi2_reg_data = { + .reg_update_cmd_data = 0x8, + .epoch_line_cfg = 0x00140014, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x07000000, + .rdi_out_enable_shift = 0, + + .sof_irq_mask = 0x00001000, + .epoch0_irq_mask = 0x00004000, + .epoch1_irq_mask = 0x00008000, + .eof_irq_mask = 0x00002000, + .error_irq_mask0 = 0x00080800, + .error_irq_mask2 = 0x00000004, + .subscribe_irq_mask = { + 0x00000000, + 0x00003000, + 0x00000000, + }, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel = 0x3, + .diag_sensor_shift = 0x1, + +}; + +static struct cam_tfe_clc_hw_status tfe640_clc_hw_info[CAM_TFE_MAX_CLC] = { + { + .name = "CLC_CAMIF", + .hw_status_reg = 0x1C04, + }, + { + .name = "CLC_RDI0_CAMIF", + .hw_status_reg = 0x1E04, + }, + { + .name = "CLC_RDI1_CAMIF", + .hw_status_reg = 0x2004, + }, + { + .name = "CLC_RDI2_CAMIF", + .hw_status_reg = 0x2204, + }, + { + .name = "CLC_CHANNEL_GAIN", + .hw_status_reg = 0x6004, + }, + { + .name = "CLC_BPC_PDPC", + .hw_status_reg = 0x6204, + }, + { + .name = "CLC_LCS", + .hw_status_reg = 0x6404, + }, + { + .name = "CLC_SHARED_LB", + .hw_status_reg = 0x6604, + }, + { + .name = "CLC_WB_BDS", + .hw_status_reg = 0x6804, + }, + { + .name = "CLC_CROP_RND_CLAMP_POST_BDS", + .hw_status_reg = 0x6A04, + }, + { + .name = "CLC_BLS", + .hw_status_reg = 0x6C04, + }, + { + .name = "CLC_BAYER_GLUT", + .hw_status_reg = 0x6E04, + }, + { + .name = "CLC_BAYER_DS4", + .hw_status_reg = 0x7004, + }, + { + .name = "CLC_COLOR_XFORM_DS4", + .hw_status_reg = 0x7204, + }, + { + .name = "CLC_CHROMA_DS2", + .hw_status_reg = 0x7404, + }, + { + .name = "CLC_CROP_RND_CLAMP_Y_DS4", + .hw_status_reg = 0x7604, + }, + { + .name = "CLC_CROP_RND_CLAMP_C_DS4", + .hw_status_reg = 0x7804, + }, + { + .name = "CLC_R2PD_DS4", + .hw_status_reg = 0x7A04, + }, + { + .name = "CLC_DOWNSCALE_4TO1_Y", + .hw_status_reg = 0x7C04, + }, + { + .name = "CLC_DOWNSCALE_4TO1_C", + .hw_status_reg = 0x7E04, + }, + { + .name = "CLC_CROP_RND_CLAMP_Y_DS16", + .hw_status_reg = 0x8004, + }, + { + .name = "CLC_CROP_RND_CLAMP_C_DS16", + .hw_status_reg = 0x8204, + }, + { + .name = "CLC_R2PD_DS16", + .hw_status_reg = 0x8404, + }, + { + .name = "CLC_WB_GAIN", + .hw_status_reg = 0x8604, + }, + { + .name = "CLC_BAYER_DS2", + .hw_status_reg = 0x8804, + }, + { + .name = "CLC_GTM", + .hw_status_reg = 0x8A04, + }, + { + .name = "CLC_COLOR_XFORM_AI_DS", + .hw_status_reg = 0x8C04, + }, + { + .name = "CLC_DOWNSCALE_MN_Y", + .hw_status_reg = 0x8E04, + }, + { + .name = "CLC_DOWNSCALE_MN_C", + .hw_status_reg = 0x9004, + }, + { + .name = "CLC_CROP_RND_CLAMP_Y_AI_DS", + .hw_status_reg = 0x9204, + }, + { + .name = "CLC_CROP_RND_CLAMP_C_AI_DS", + .hw_status_reg = 0x9404, + }, + { + .name = "CLC_CROP_RND_CLAMP_IDEAL_RAW", + .hw_status_reg = 0x9604, + }, + { + .name = "CLC_ABF", + .hw_status_reg = 0x9804, + }, + { + .name = "CLC_STATS_BG", + .hw_status_reg = 0x9A04, + }, + { + .name = "CLC_STATS_BHIST", + .hw_status_reg = 0x9C04, + }, + { + .name = "CLC_STATS_AWB_BG", + .hw_status_reg = 0x9E04, + }, + { + .name = "CLC_STATS_AEC_BG", + .hw_status_reg = 0xA004, + }, + { + .name = "CLC_STATS_BAF", + .hw_status_reg = 0xA204, + }, + { + .name = "CLC_STATS_RS", + .hw_status_reg = 0xA404, + }, +}; + +static struct cam_tfe_top_hw_info tfe640_top_hw_info = { + .common_reg = &tfe640_top_commong_reg, + .camif_hw_info = { + .camif_reg = &tfe640_camif_reg, + .reg_data = &tfe640_camif_reg_data, + }, + .rdi_hw_info = { + { + .rdi_reg = &tfe640_rdi0_reg, + .reg_data = &tfe640_rdi0_reg_data, + }, + { + .rdi_reg = &tfe640_rdi1_reg, + .reg_data = &tfe640_rdi1_reg_data, + }, + { + .rdi_reg = &tfe640_rdi2_reg, + .reg_data = &tfe640_rdi2_reg_data, + }, + }, + .in_port = { + CAM_TFE_CAMIF_VER_1_0, + CAM_TFE_RDI_VER_1_0, + CAM_TFE_RDI_VER_1_0, + CAM_TFE_RDI_VER_1_0 + }, + .reg_dump_data = { + .num_reg_dump_entries = 19, + .num_lut_dump_entries = 0, + .bus_start_addr = 0x2000, + .bus_write_top_end_addr = 0x2120, + .bus_client_start_addr = 0x2200, + .bus_client_offset = 0x100, + .num_bus_clients = 10, + .reg_entry = { + { + .start_offset = 0x1000, + .end_offset = 0x10F4, + }, + { + .start_offset = 0x1260, + .end_offset = 0x1280, + }, + { + .start_offset = 0x13F0, + .end_offset = 0x13FC, + }, + { + .start_offset = 0x1460, + .end_offset = 0x1470, + }, + { + .start_offset = 0x15F0, + .end_offset = 0x15FC, + }, + { + .start_offset = 0x1660, + .end_offset = 0x1670, + }, + { + .start_offset = 0x17F0, + .end_offset = 0x17FC, + }, + { + .start_offset = 0x1860, + .end_offset = 0x1870, + }, + { + .start_offset = 0x19F0, + .end_offset = 0x19FC, + }, + { + .start_offset = 0x2660, + .end_offset = 0x2694, + }, + { + .start_offset = 0x2860, + .end_offset = 0x2884, + }, + { + .start_offset = 0x2A60, + .end_offset = 0X2B34, + }, + { + .start_offset = 0x2C60, + .end_offset = 0X2C80, + }, + { + .start_offset = 0x2E60, + .end_offset = 0X2E7C, + }, + { + .start_offset = 0x3060, + .end_offset = 0X3110, + }, + { + .start_offset = 0x3260, + .end_offset = 0X3278, + }, + { + .start_offset = 0x3460, + .end_offset = 0X3478, + }, + { + .start_offset = 0x3660, + .end_offset = 0X3684, + }, + { + .start_offset = 0x3860, + .end_offset = 0X3884, + }, + }, + .lut_entry = { + { + .lut_word_size = 1, + .lut_bank_sel = 0x40, + .lut_addr_size = 180, + .dmi_reg_offset = 0x2800, + }, + { + .lut_word_size = 1, + .lut_bank_sel = 0x41, + .lut_addr_size = 180, + .dmi_reg_offset = 0x3000, + }, + }, + }, +}; + +static struct cam_tfe_bus_hw_info tfe640_bus_hw_info = { + .common_reg = { + .hw_version = 0x00003000, + .cgc_ovd = 0x00003008, + .comp_cfg_0 = 0x0000300C, + .comp_cfg_1 = 0x00003010, + .frameheader_cfg = { + 0x00003034, + 0x00003038, + 0x0000303C, + 0x00003040, + }, + .pwr_iso_cfg = 0x0000305C, + .overflow_status_clear = 0x00003060, + .ccif_violation_status = 0x00003064, + .overflow_status = 0x00003068, + .image_size_violation_status = 0x00003070, + .perf_count_cfg = { + 0x00003074, + 0x00003078, + 0x0000307C, + 0x00003080, + 0x00003084, + 0x00003088, + 0x0000308C, + 0x00003090, + }, + .perf_count_val = { + 0x00003094, + 0x00003098, + 0x0000309C, + 0x000030A0, + 0x000030A4, + 0x000030A8, + 0x000030AC, + 0x000030B0, + }, + .perf_count_status = 0x000030B4, + .debug_status_top_cfg = 0x000030D4, + .debug_status_top = 0x000030D8, + .test_bus_ctrl = 0x000030DC, + .irq_mask = { + 0x00003018, + 0x0000301C, + }, + .irq_clear = { + 0x00003020, + 0x00003024, + }, + .irq_status = { + 0x00003028, + 0x0000302C, + }, + .irq_cmd = 0x00003030, + .cons_violation_shift = 28, + .violation_shift = 30, + .image_size_violation = 31, + }, + .num_client = 16, + .bus_client_reg = { + /* BUS Client 0 BAYER */ + { + .cfg = 0x00003200, + .image_addr = 0x00003204, + .frame_incr = 0x00003208, + .image_cfg_0 = 0x0000320C, + .image_cfg_1 = 0x00003210, + .image_cfg_2 = 0x00003214, + .packer_cfg = 0x00003218, + .bw_limit = 0x0000321C, + .frame_header_addr = 0x00003220, + .frame_header_incr = 0x00003224, + .frame_header_cfg = 0x00003228, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003230, + .irq_subsample_pattern = 0x00003234, + .framedrop_period = 0x00003238, + .framedrop_pattern = 0x0000323C, + .addr_status_0 = 0x00003268, + .addr_status_1 = 0x0000326C, + .addr_status_2 = 0x00003270, + .addr_status_3 = 0x00003274, + .debug_status_cfg = 0x00003278, + .debug_status_0 = 0x0000327C, + .debug_status_1 = 0x00003280, + .comp_group = CAM_TFE_BUS_COMP_GRP_0, + .client_name = "BAYER", + }, + /* BUS Client 1 IDEAL RAW*/ + { + .cfg = 0x00003300, + .image_addr = 0x00003304, + .frame_incr = 0x00003308, + .image_cfg_0 = 0x0000330C, + .image_cfg_1 = 0x00003310, + .image_cfg_2 = 0x00003314, + .packer_cfg = 0x00003318, + .bw_limit = 0x0000331C, + .frame_header_addr = 0x00003320, + .frame_header_incr = 0x00003324, + .frame_header_cfg = 0x00003328, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003330, + .irq_subsample_pattern = 0x00003334, + .framedrop_period = 0x00003338, + .framedrop_pattern = 0x0000333C, + .addr_status_0 = 0x00003368, + .addr_status_1 = 0x0000336C, + .addr_status_2 = 0x00003370, + .addr_status_3 = 0x00003374, + .debug_status_cfg = 0x00003378, + .debug_status_0 = 0x0000337C, + .debug_status_1 = 0x00003380, + .comp_group = CAM_TFE_BUS_COMP_GRP_1, + .client_name = "IDEAL_RAW", + }, + /* BUS Client 2 Stats BE Tintless */ + { + .cfg = 0x00003400, + .image_addr = 0x00003404, + .frame_incr = 0x00003408, + .image_cfg_0 = 0x0000340C, + .image_cfg_1 = 0x00003410, + .image_cfg_2 = 0x00003414, + .packer_cfg = 0x00003418, + .bw_limit = 0x0000341C, + .frame_header_addr = 0x00003420, + .frame_header_incr = 0x00003424, + .frame_header_cfg = 0x00003428, + .line_done_cfg = 0x00003400, + .irq_subsample_period = 0x00003430, + .irq_subsample_pattern = 0x00003434, + .framedrop_period = 0x00003438, + .framedrop_pattern = 0x0000343C, + .addr_status_0 = 0x00003468, + .addr_status_1 = 0x0000346C, + .addr_status_2 = 0x00003470, + .addr_status_3 = 0x00003474, + .debug_status_cfg = 0x00003478, + .debug_status_0 = 0x0000347C, + .debug_status_1 = 0x00003480, + .comp_group = CAM_TFE_BUS_COMP_GRP_2, + .client_name = "STATS BE TINTLESS", + }, + /* BUS Client 3 Stats Bhist */ + { + .cfg = 0x00003500, + .image_addr = 0x00003504, + .frame_incr = 0x00003508, + .image_cfg_0 = 0x0000350C, + .image_cfg_1 = 0x00003510, + .image_cfg_2 = 0x00003514, + .packer_cfg = 0x00003518, + .bw_limit = 0x0000351C, + .frame_header_addr = 0x00003520, + .frame_header_incr = 0x00003524, + .frame_header_cfg = 0x00003528, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003530, + .irq_subsample_pattern = 0x00003534, + .framedrop_period = 0x00003538, + .framedrop_pattern = 0x0000353C, + .addr_status_0 = 0x00003568, + .addr_status_1 = 0x0000356C, + .addr_status_2 = 0x00003570, + .addr_status_3 = 0x00003574, + .debug_status_cfg = 0x00003578, + .debug_status_0 = 0x0000357C, + .debug_status_1 = 0x00003580, + .comp_group = CAM_TFE_BUS_COMP_GRP_2, + .client_name = "STATS BHIST", + }, + /* BUS Client 4 Stats AWB BG */ + { + .cfg = 0x00003600, + .image_addr = 0x00003604, + .frame_incr = 0x00003608, + .image_cfg_0 = 0x0000360C, + .image_cfg_1 = 0x00003610, + .image_cfg_2 = 0x00003614, + .packer_cfg = 0x00003618, + .bw_limit = 0x0000361C, + .frame_header_addr = 0x00003620, + .frame_header_incr = 0x00003624, + .frame_header_cfg = 0x00003628, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003630, + .irq_subsample_pattern = 0x00003634, + .framedrop_period = 0x00003638, + .framedrop_pattern = 0x0000363C, + .addr_status_0 = 0x00003668, + .addr_status_1 = 0x0000366C, + .addr_status_2 = 0x00003670, + .addr_status_3 = 0x00003674, + .debug_status_cfg = 0x00003678, + .debug_status_0 = 0x0000367C, + .debug_status_1 = 0x00003680, + .comp_group = CAM_TFE_BUS_COMP_GRP_3, + .client_name = "STATS AWB BG", + }, + /* BUS Client 5 Stats AEC BG */ + { + .cfg = 0x00003700, + .image_addr = 0x00003704, + .frame_incr = 0x00003708, + .image_cfg_0 = 0x0000370C, + .image_cfg_1 = 0x00003710, + .image_cfg_2 = 0x00003714, + .packer_cfg = 0x00003718, + .bw_limit = 0x0000371C, + .frame_header_addr = 0x00003720, + .frame_header_incr = 0x00003724, + .frame_header_cfg = 0x00003728, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003730, + .irq_subsample_pattern = 0x00003734, + .framedrop_period = 0x00003738, + .framedrop_pattern = 0x0000373C, + .addr_status_0 = 0x00003768, + .addr_status_1 = 0x0000376C, + .addr_status_2 = 0x00003770, + .addr_status_3 = 0x00003774, + .debug_status_cfg = 0x00003778, + .debug_status_0 = 0x0000377C, + .debug_status_1 = 0x00003780, + .comp_group = CAM_TFE_BUS_COMP_GRP_3, + .client_name = "STATS AEC BG", + }, + /* BUS Client 6 Stats BAF */ + { + .cfg = 0x00003800, + .image_addr = 0x00003804, + .frame_incr = 0x00003808, + .image_cfg_0 = 0x0000380C, + .image_cfg_1 = 0x00003810, + .image_cfg_2 = 0x00003814, + .packer_cfg = 0x00003818, + .bw_limit = 0x0000381C, + .frame_header_addr = 0x00003820, + .frame_header_incr = 0x00003824, + .frame_header_cfg = 0x00003828, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003830, + .irq_subsample_pattern = 0x00003834, + .framedrop_period = 0x00003838, + .framedrop_pattern = 0x0000383C, + .addr_status_0 = 0x00003868, + .addr_status_1 = 0x0000386C, + .addr_status_2 = 0x00003870, + .addr_status_3 = 0x00003874, + .debug_status_cfg = 0x00003878, + .debug_status_0 = 0x0000387C, + .debug_status_1 = 0x00003880, + .comp_group = CAM_TFE_BUS_COMP_GRP_4, + .client_name = "STATS BAF", + }, + /* BUS Client 7 RDI0 */ + { + .cfg = 0x00003900, + .image_addr = 0x00003904, + .frame_incr = 0x00003908, + .image_cfg_0 = 0x0000390C, + .image_cfg_1 = 0x00003910, + .image_cfg_2 = 0x00003914, + .packer_cfg = 0x00003918, + .bw_limit = 0x0000391C, + .frame_header_addr = 0x00003920, + .frame_header_incr = 0x00003924, + .frame_header_cfg = 0x00003928, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003930, + .irq_subsample_pattern = 0x00003934, + .framedrop_period = 0x00003938, + .framedrop_pattern = 0x0000393C, + .addr_status_0 = 0x00003968, + .addr_status_1 = 0x0000396C, + .addr_status_2 = 0x00003970, + .addr_status_3 = 0x00003974, + .debug_status_cfg = 0x00003978, + .debug_status_0 = 0x0000397C, + .debug_status_1 = 0x00003980, + .comp_group = CAM_TFE_BUS_COMP_GRP_5, + .client_name = "RDI0", + }, + /* BUS Client 8 RDI1 */ + { + .cfg = 0x00003A00, + .image_addr = 0x00003A04, + .frame_incr = 0x00003A08, + .image_cfg_0 = 0x00003A0C, + .image_cfg_1 = 0x00003A10, + .image_cfg_2 = 0x00003A14, + .packer_cfg = 0x00003A18, + .bw_limit = 0x00003A1C, + .frame_header_addr = 0x00003A20, + .frame_header_incr = 0x00003A24, + .frame_header_cfg = 0x00003A28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003A30, + .irq_subsample_pattern = 0x00003A34, + .framedrop_period = 0x00003A38, + .framedrop_pattern = 0x00003A3C, + .addr_status_0 = 0x00003A68, + .addr_status_1 = 0x00003A6C, + .addr_status_2 = 0x00003A70, + .addr_status_3 = 0x00003A74, + .debug_status_cfg = 0x00003A78, + .debug_status_0 = 0x00003A7C, + .debug_status_1 = 0x00003A80, + .comp_group = CAM_TFE_BUS_COMP_GRP_6, + .client_name = "RDI1", + }, + /* BUS Client 9 RDI2 */ + { + .cfg = 0x00003B00, + .image_addr = 0x00003B04, + .frame_incr = 0x00003B08, + .image_cfg_0 = 0x00003B0C, + .image_cfg_1 = 0x00003B10, + .image_cfg_2 = 0x00003B14, + .packer_cfg = 0x00003B18, + .bw_limit = 0x00003B1C, + .frame_header_addr = 0x00003B20, + .frame_header_incr = 0x00003B24, + .frame_header_cfg = 0x00003B28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003B30, + .irq_subsample_pattern = 0x00003B34, + .framedrop_period = 0x00003B38, + .framedrop_pattern = 0x00003B3C, + .addr_status_0 = 0x00003B68, + .addr_status_1 = 0x00003B6C, + .addr_status_2 = 0x00003B70, + .addr_status_3 = 0x00003B74, + .debug_status_cfg = 0x00003B78, + .debug_status_0 = 0x00003B7C, + .debug_status_1 = 0x00003B80, + .comp_group = CAM_TFE_BUS_COMP_GRP_7, + .client_name = "RDI2", + }, + /* BUS Client 10 PDAF */ + { + .cfg = 0x00003C00, + .image_addr = 0x00003C04, + .frame_incr = 0x00003C08, + .image_cfg_0 = 0x00003C0C, + .image_cfg_1 = 0x00003C10, + .image_cfg_2 = 0x00003C14, + .packer_cfg = 0x00003C18, + .bw_limit = 0x00003C1C, + .frame_header_addr = 0x00003C20, + .frame_header_incr = 0x00003C24, + .frame_header_cfg = 0x00003C28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003C30, + .irq_subsample_pattern = 0x00003C34, + .framedrop_period = 0x00003C38, + .framedrop_pattern = 0x00003C3C, + .addr_status_0 = 0x00003C68, + .addr_status_1 = 0x00003C6C, + .addr_status_2 = 0x00003C70, + .addr_status_3 = 0x00003C74, + .debug_status_cfg = 0x00003C78, + .debug_status_0 = 0x00003C7C, + .debug_status_1 = 0x00003C80, + .comp_group = CAM_TFE_BUS_COMP_GRP_8, + .client_name = "PDAF", + }, + /* BUS Client 11 DS4 */ + { + .cfg = 0x00003D00, + .image_addr = 0x00003D04, + .frame_incr = 0x00003D08, + .image_cfg_0 = 0x00003D0C, + .image_cfg_1 = 0x00003D10, + .image_cfg_2 = 0x00003D14, + .packer_cfg = 0x00003D18, + .bw_limit = 0x00003D1C, + .frame_header_addr = 0x00003D20, + .frame_header_incr = 0x00003D24, + .frame_header_cfg = 0x00003D28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003D30, + .irq_subsample_pattern = 0x00003D34, + .framedrop_period = 0x00003D38, + .framedrop_pattern = 0x00003D3C, + .addr_status_0 = 0x00003D68, + .addr_status_1 = 0x00003D6C, + .addr_status_2 = 0x00003D70, + .addr_status_3 = 0x00003D74, + .debug_status_cfg = 0x00003D78, + .debug_status_0 = 0x00003D7C, + .debug_status_1 = 0x00003D80, + .comp_group = CAM_TFE_BUS_COMP_GRP_0, + .client_name = "DS4", + }, + /* BUS Client 12 DS16 */ + { + .cfg = 0x00003E00, + .image_addr = 0x00003E04, + .frame_incr = 0x00003E08, + .image_cfg_0 = 0x00003E0C, + .image_cfg_1 = 0x00003E10, + .image_cfg_2 = 0x00003E14, + .packer_cfg = 0x00003E18, + .bw_limit = 0x00003E1C, + .frame_header_addr = 0x00003E20, + .frame_header_incr = 0x00003E24, + .frame_header_cfg = 0x00003E28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003E30, + .irq_subsample_pattern = 0x00003E34, + .framedrop_period = 0x00003E38, + .framedrop_pattern = 0x00003E3C, + .addr_status_0 = 0x00003E68, + .addr_status_1 = 0x00003E6C, + .addr_status_2 = 0x00003E70, + .addr_status_3 = 0x00003E74, + .debug_status_cfg = 0x00003E78, + .debug_status_0 = 0x00003E7C, + .debug_status_1 = 0x00003E80, + .comp_group = CAM_TFE_BUS_COMP_GRP_0, + .client_name = "DS16", + }, + /* BUS Client 13 AI-Y */ + { + .cfg = 0x00003F00, + .image_addr = 0x00003F04, + .frame_incr = 0x00003F08, + .image_cfg_0 = 0x00003F0C, + .image_cfg_1 = 0x00003F10, + .image_cfg_2 = 0x00003F14, + .packer_cfg = 0x00003F18, + .bw_limit = 0x00003F1C, + .frame_header_addr = 0x00003F20, + .frame_header_incr = 0x00003F24, + .frame_header_cfg = 0x00003F28, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00003F30, + .irq_subsample_pattern = 0x00003F34, + .framedrop_period = 0x00003F38, + .framedrop_pattern = 0x00003F3C, + .addr_status_0 = 0x00003F68, + .addr_status_1 = 0x00003F6C, + .addr_status_2 = 0x00003F70, + .addr_status_3 = 0x00003F74, + .debug_status_cfg = 0x00003F78, + .debug_status_0 = 0x00003F7C, + .debug_status_1 = 0x00003F80, + .comp_group = CAM_TFE_BUS_COMP_GRP_9, + .client_name = "AI-Y", + }, + /* BUS Client 14 AI-C */ + { + .cfg = 0x00004000, + .image_addr = 0x00004004, + .frame_incr = 0x00004008, + .image_cfg_0 = 0x0000400C, + .image_cfg_1 = 0x00004010, + .image_cfg_2 = 0x00004014, + .packer_cfg = 0x00004018, + .bw_limit = 0x0000401C, + .frame_header_addr = 0x00004020, + .frame_header_incr = 0x00004024, + .frame_header_cfg = 0x00004028, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00004030, + .irq_subsample_pattern = 0x00004034, + .framedrop_period = 0x00004038, + .framedrop_pattern = 0x0000403C, + .addr_status_0 = 0x00004068, + .addr_status_1 = 0x0000406C, + .addr_status_2 = 0x00004070, + .addr_status_3 = 0x00004074, + .debug_status_cfg = 0x00004078, + .debug_status_0 = 0x0000407C, + .debug_status_1 = 0x00004080, + .comp_group = CAM_TFE_BUS_COMP_GRP_9, + .client_name = "AI-C", + }, + /* BUS Client 15 Stats RS */ + { + .cfg = 0x00004100, + .image_addr = 0x00004104, + .frame_incr = 0x00004108, + .image_cfg_0 = 0x0000410C, + .image_cfg_1 = 0x00004110, + .image_cfg_2 = 0x00004114, + .packer_cfg = 0x00004118, + .bw_limit = 0x0000411C, + .frame_header_addr = 0x00004120, + .frame_header_incr = 0x00004124, + .frame_header_cfg = 0x00004128, + .line_done_cfg = 0x00000000, + .irq_subsample_period = 0x00004130, + .irq_subsample_pattern = 0x00004134, + .framedrop_period = 0x00004138, + .framedrop_pattern = 0x0000413C, + .addr_status_0 = 0x00004168, + .addr_status_1 = 0x0000416C, + .addr_status_2 = 0x00004170, + .addr_status_3 = 0x00004174, + .debug_status_cfg = 0x00004178, + .debug_status_0 = 0x0000417C, + .debug_status_1 = 0x00004180, + .comp_group = CAM_TFE_BUS_COMP_GRP_10, + .client_name = "STATS RS", + }, + }, + .num_out = 15, + .tfe_out_hw_info = { + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_5, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_1, + .mid[0] = 4, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_6, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_2, + .mid[0] = 5, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_7, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_3, + .mid[0] = 6, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_0, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 16, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_RAW_DUMP, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_1, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 7, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_PDAF, + .max_width = 4096, + .max_height = 4096, + .composite_group = CAM_TFE_BUS_COMP_GRP_8, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 26, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_3, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 21, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_2, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 18, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_2, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 17, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_3, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 19, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_4, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 21, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_10, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 27, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_DS4, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_0, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 22, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_DS16, + .max_width = -1, + .max_height = -1, + .composite_group = CAM_TFE_BUS_COMP_GRP_0, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 23, + }, + { + .tfe_out_id = CAM_TFE_BUS_TFE_OUT_AI, + .max_width = 1920, + .max_height = 1920, + .composite_group = CAM_TFE_BUS_COMP_GRP_9, + .rup_group_id = CAM_TFE_BUS_RUP_GRP_0, + .mid[0] = 24, + .mid[1] = 25, + }, + }, + .num_comp_grp = 11, + .max_wm_per_comp_grp = 3, + .comp_done_shift = 8, + .top_bus_wr_irq_shift = 1, + .comp_buf_done_mask = 0x7FF00, + .comp_rup_done_mask = 0xF, + .bus_irq_error_mask = { + 0xD0000000, + 0x00000000, + }, + .support_consumed_addr = true, + .pdaf_rdi2_mux_en = false, + .rdi_width = 128, + .max_bw_counter_limit = 0xFF, + .counter_limit_shift = 1, + .counter_limit_mask = 0xF, +}; + +struct cam_tfe_hw_info cam_tfe640 = { + .top_irq_mask = { + 0x00001834, + 0x00001838, + 0x0000183C, + }, + .top_irq_clear = { + 0x00001840, + 0x00001844, + 0x00001848, + }, + .top_irq_status = { + 0x0000184C, + 0x00001850, + 0x00001854, + }, + .top_irq_cmd = 0x00001830, + .global_clear_bitmask = 0x00000001, + .bus_irq_mask = { + 0x00003018, + 0x0000301C, + }, + .bus_irq_clear = { + 0x00003020, + 0x00003024, + }, + .bus_irq_status = { + 0x00003028, + 0x0000302C, + }, + .bus_irq_cmd = 0x00003030, + .bus_violation_reg = 0x00003064, + .bus_overflow_reg = 0x00003068, + .bus_image_size_vilation_reg = 0x3070, + .bus_overflow_clear_cmd = 0x3060, + .debug_status_top = 0x30D8, + + .reset_irq_mask = { + 0x00000001, + 0x00000000, + 0x00000000, + }, + .error_irq_mask = { + 0x000F0F00, + 0x00000000, + 0x0000003F, + }, + .bus_reg_irq_mask = { + 0x00000002, + 0x00000000, + }, + .bus_error_irq_mask = { + 0xC0000000, + 0x00000000, + }, + + .num_clc = 39, + .clc_hw_status_info = tfe640_clc_hw_info, + .bus_version = CAM_TFE_BUS_1_0, + .bus_hw_info = &tfe640_bus_hw_info, + + .top_version = CAM_TFE_TOP_1_0, + .top_hw_info = &tfe640_top_hw_info, +}; + +#endif /* _CAM_TFE640__H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.c new file mode 100644 index 0000000000..31a2ebee52 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.c @@ -0,0 +1,2832 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_hw_intf.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_tfe_hw_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_tfe_bus.h" +#include "cam_tfe_irq.h" +#include "cam_tfe_soc.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "tfe_bus"; + +#define CAM_TFE_BUS_IRQ_REG0 0 +#define CAM_TFE_BUS_IRQ_REG1 1 + +#define CAM_TFE_BUS_PAYLOAD_MAX 256 + +#define CAM_TFE_RDI_BUS_DEFAULT_WIDTH 0xFFFF +#define CAM_TFE_RDI_BUS_DEFAULT_STRIDE 0xFFFF + +#define MAX_BUF_UPDATE_REG_NUM \ + (sizeof(struct cam_tfe_bus_reg_offset_bus_client) / 4) +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + + +enum cam_tfe_bus_packer_format { + PACKER_FMT_PLAIN_128, + PACKER_FMT_PLAIN_8, + PACKER_FMT_PLAIN_8_ODD_EVEN, + PACKER_FMT_PLAIN_8_LSB_MSB_10, + PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN, + PACKER_FMT_PLAIN_16_10BPP, + PACKER_FMT_PLAIN_16_12BPP, + PACKER_FMT_PLAIN_16_14BPP, + PACKER_FMT_PLAIN_16_16BPP, + PACKER_FMT_PLAIN_32, + PACKER_FMT_PLAIN_64, + PACKER_FMT_TP_10, + PACKET_FMT_MIPI10, + PACKET_FMT_MIPI12, + PACKER_FMT_MAX, +}; + +struct cam_tfe_bus_common_data { + uint32_t core_index; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + void *tfe_core_data; + struct cam_tfe_bus_reg_offset_common *common_reg; + uint32_t io_buf_update[MAX_REG_VAL_PAIR_SIZE]; + + spinlock_t spin_lock; + struct mutex bus_mutex; + uint32_t secure_mode; + uint32_t num_sec_out; + uint32_t comp_done_shift; + uint32_t rdi_width; + bool is_lite; + bool support_consumed_addr; + cam_hw_mgr_event_cb_func event_cb; + bool rup_irq_enable[CAM_TFE_BUS_RUP_GRP_MAX]; + bool pdaf_rdi2_mux_en; + uint32_t max_bw_counter_limit; + uint32_t counter_limit_shift; + uint32_t counter_limit_mask; +}; + +struct cam_tfe_bus_wm_resource_data { + uint32_t index; + uint32_t out_id; + struct cam_tfe_bus_common_data *common_data; + struct cam_tfe_bus_reg_offset_bus_client *hw_regs; + + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + uint32_t pack_fmt; + uint32_t burst_len; + uint32_t mode; + + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + + uint32_t en_cfg; + uint32_t image_addr_offset; + uint32_t is_dual; + + uint32_t acquired_width; + uint32_t acquired_height; + uint32_t acquired_stride; + bool limiter_blob_status; +}; + +struct cam_tfe_bus_comp_grp_data { + enum cam_tfe_bus_comp_grp_id comp_grp_id; + struct cam_tfe_bus_common_data *common_data; + + uint32_t is_master; + uint32_t is_dual; + uint32_t addr_sync_mode; + uint32_t composite_mask; + + uint32_t acquire_dev_cnt; + uint32_t source_grp; + uint32_t max_wm_per_comp_grp; + + struct cam_isp_resource_node **out_rsrc; +}; + +struct cam_tfe_bus_tfe_out_data { + uint32_t out_id; + uint32_t composite_group; + uint32_t rup_group_id; + uint32_t source_group; + struct cam_tfe_bus_common_data *common_data; + struct cam_tfe_bus_priv *bus_priv; + + uint32_t num_wm; + struct cam_isp_resource_node *wm_res[PLANE_MAX]; + + struct cam_isp_resource_node *comp_grp; + struct list_head tfe_out_list; + + uint32_t is_master; + uint32_t is_dual; + + uint32_t format; + uint32_t max_width; + uint32_t max_height; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t secure_mode; + void *priv; + cam_hw_mgr_event_cb_func event_cb; + uint32_t mid[CAM_TFE_BUS_MAX_MID_PER_PORT]; +}; + +struct cam_tfe_bus_priv { + struct cam_tfe_bus_common_data common_data; + uint32_t num_client; + uint32_t num_out; + uint32_t num_comp_grp; + uint32_t max_wm_per_comp_grp; + uint32_t top_bus_wr_irq_shift; + + struct cam_isp_resource_node bus_client[CAM_TFE_BUS_MAX_CLIENTS]; + struct cam_isp_resource_node comp_grp[CAM_TFE_BUS_COMP_GRP_MAX]; + struct cam_isp_resource_node tfe_out[CAM_TFE_BUS_TFE_OUT_MAX]; + + struct list_head free_comp_grp; + struct list_head used_comp_grp; + + void *tasklet_info; + uint32_t comp_buf_done_mask; + uint32_t comp_rup_done_mask; + uint32_t bus_irq_error_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; +}; + +static bool cam_tfe_bus_can_be_secure(uint32_t out_id) +{ + switch (out_id) { + case CAM_TFE_BUS_TFE_OUT_FULL: + case CAM_TFE_BUS_TFE_OUT_RAW_DUMP: + case CAM_TFE_BUS_TFE_OUT_RDI0: + case CAM_TFE_BUS_TFE_OUT_RDI1: + case CAM_TFE_BUS_TFE_OUT_RDI2: + case CAM_TFE_BUS_TFE_OUT_DS4: + case CAM_TFE_BUS_TFE_OUT_DS16: + case CAM_TFE_BUS_TFE_OUT_AI: + return true; + + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE: + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST: + case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG: + case CAM_TFE_BUS_TFE_OUT_STATS_BF: + case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG: + case CAM_TFE_BUS_TFE_OUT_STATS_RS: + default: + return false; + } +} + +static enum cam_tfe_bus_tfe_out_id + cam_tfe_bus_get_out_res_id(uint32_t out_res_id) +{ + switch (out_res_id) { + case CAM_ISP_TFE_OUT_RES_FULL: + return CAM_TFE_BUS_TFE_OUT_FULL; + case CAM_ISP_TFE_OUT_RES_RAW_DUMP: + return CAM_TFE_BUS_TFE_OUT_RAW_DUMP; + case CAM_ISP_TFE_OUT_RES_PDAF: + return CAM_TFE_BUS_TFE_OUT_PDAF; + case CAM_ISP_TFE_OUT_RES_RDI_0: + return CAM_TFE_BUS_TFE_OUT_RDI0; + case CAM_ISP_TFE_OUT_RES_RDI_1: + return CAM_TFE_BUS_TFE_OUT_RDI1; + case CAM_ISP_TFE_OUT_RES_RDI_2: + return CAM_TFE_BUS_TFE_OUT_RDI2; + case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE: + return CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE; + case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST: + return CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST; + case CAM_ISP_TFE_OUT_RES_STATS_TL_BG: + return CAM_TFE_BUS_TFE_OUT_STATS_TL_BG; + case CAM_ISP_TFE_OUT_RES_STATS_BF: + return CAM_TFE_BUS_TFE_OUT_STATS_BF; + case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG: + return CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG; + case CAM_ISP_TFE_OUT_RES_STATS_RS: + return CAM_TFE_BUS_TFE_OUT_STATS_RS; + case CAM_ISP_TFE_OUT_RES_DS4: + return CAM_TFE_BUS_TFE_OUT_DS4; + case CAM_ISP_TFE_OUT_RES_DS16: + return CAM_TFE_BUS_TFE_OUT_DS16; + case CAM_ISP_TFE_OUT_RES_AI: + return CAM_TFE_BUS_TFE_OUT_AI; + default: + return CAM_TFE_BUS_TFE_OUT_MAX; + } +} + +static int cam_tfe_bus_get_num_wm( + enum cam_tfe_bus_tfe_out_id out_res_id, + uint32_t format) +{ + switch (out_res_id) { + case CAM_TFE_BUS_TFE_OUT_RDI0: + case CAM_TFE_BUS_TFE_OUT_RDI1: + case CAM_TFE_BUS_TFE_OUT_RDI2: + switch (format) { + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN128: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_PDAF: + switch (format) { + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + return 1; + default: + break; + } + break; + + case CAM_TFE_BUS_TFE_OUT_FULL: + switch (format) { + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_RAW_DUMP: + switch (format) { + case CAM_FORMAT_ARGB_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_DS4: + switch (format) { + case CAM_FORMAT_PD10: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_DS16: + switch (format) { + case CAM_FORMAT_PD10: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_AI: + switch (format) { + case CAM_FORMAT_NV12: + case CAM_FORMAT_TP10: + case CAM_FORMAT_PD10: + return 2; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE: + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST: + case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG: + case CAM_TFE_BUS_TFE_OUT_STATS_BF: + case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG: + switch (format) { + case CAM_FORMAT_PLAIN64: + return 1; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_RS: + switch (format) { + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_PLAIN32_20: + return 1; + default: + break; + } + break; + default: + break; + } + + CAM_ERR(CAM_ISP, "Unsupported format %u for resource id %u", + format, out_res_id); + + return -EINVAL; +} + +static int cam_tfe_bus_get_wm_idx( + enum cam_tfe_bus_tfe_out_id tfe_out_res_id, + enum cam_tfe_bus_plane_type plane, + bool pdaf_rdi2_mux_en) +{ + int wm_idx = -1; + + switch (tfe_out_res_id) { + case CAM_TFE_BUS_TFE_OUT_RDI0: + switch (plane) { + case PLANE_Y: + wm_idx = 7; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_RDI1: + switch (plane) { + case PLANE_Y: + wm_idx = 8; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_RDI2: + switch (plane) { + case PLANE_Y: + wm_idx = 9; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_PDAF: + switch (plane) { + case PLANE_Y: + if (pdaf_rdi2_mux_en) + wm_idx = 9; + else + wm_idx = 10; + break; + default: + break; + } + break; + + case CAM_TFE_BUS_TFE_OUT_FULL: + switch (plane) { + case PLANE_Y: + wm_idx = 0; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_RAW_DUMP: + switch (plane) { + case PLANE_Y: + wm_idx = 1; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE: + switch (plane) { + case PLANE_Y: + wm_idx = 5; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST: + switch (plane) { + case PLANE_Y: + wm_idx = 3; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG: + switch (plane) { + case PLANE_Y: + wm_idx = 4; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_TL_BG: + switch (plane) { + case PLANE_Y: + wm_idx = 2; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_BF: + switch (plane) { + case PLANE_Y: + wm_idx = 6; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_STATS_RS: + switch (plane) { + case PLANE_Y: + wm_idx = 15; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_DS4: + switch (plane) { + case PLANE_Y: + wm_idx = 11; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_DS16: + switch (plane) { + case PLANE_Y: + wm_idx = 12; + break; + default: + break; + } + break; + case CAM_TFE_BUS_TFE_OUT_AI: + switch (plane) { + case PLANE_Y: + wm_idx = 13; + break; + case PLANE_C: + wm_idx = 14; + break; + default: + break; + } + break; + default: + break; + } + + return wm_idx; +} + +static enum cam_tfe_bus_packer_format + cam_tfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index) +{ + switch (out_fmt) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PD8: + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_PLAIN8: + return PACKER_FMT_PLAIN_8; + case CAM_FORMAT_Y_ONLY: + return PACKER_FMT_PLAIN_8_LSB_MSB_10; + case CAM_FORMAT_PLAIN16_10: + return PACKER_FMT_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return PACKER_FMT_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return PACKER_FMT_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN16_16: + return PACKER_FMT_PLAIN_16_16BPP; + case CAM_FORMAT_ARGB: + return PACKER_FMT_PLAIN_32; + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_PD10: + return PACKER_FMT_PLAIN_64; + case CAM_FORMAT_TP10: + return PACKER_FMT_TP_10; + default: + return PACKER_FMT_MAX; + } +} + +static int cam_tfe_bus_acquire_rdi_wm( + struct cam_tfe_bus_wm_resource_data *rsrc_data) +{ + int pack_fmt = 0; + int rdi_width = rsrc_data->common_data->rdi_width; + + if (rdi_width == 64) + pack_fmt = 0xa; + else if (rdi_width == 128) + pack_fmt = 0x0; + + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_6: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 6, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_PLAIN8: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 8, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + case CAM_FORMAT_MIPI_RAW_10: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 10, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + case CAM_FORMAT_MIPI_RAW_12: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 12, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + case CAM_FORMAT_MIPI_RAW_14: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 14, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_PLAIN16_16: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 16, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PLAIN64: + rsrc_data->pack_fmt = pack_fmt; + if (rsrc_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE) { + rsrc_data->width = + ALIGNUP(rsrc_data->width * 64, rdi_width) / + rdi_width; + rsrc_data->en_cfg = 0x1; + } else { + rsrc_data->width = + CAM_TFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = + CAM_TFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + } + break; + default: + CAM_ERR(CAM_ISP, "Unsupported RDI:%d format %d", + rsrc_data->index, rsrc_data->format); + return -EINVAL; + } + + return 0; +} + +static int cam_tfe_bus_acquire_wm( + struct cam_tfe_bus_priv *bus_priv, + struct cam_isp_tfe_out_port_generic_info *out_port_info, + struct cam_isp_resource_node **wm_res, + void *tasklet, + enum cam_tfe_bus_tfe_out_id tfe_out_res_id, + enum cam_tfe_bus_plane_type plane, + uint32_t *client_done_mask, + uint32_t is_dual, + enum cam_tfe_bus_comp_grp_id *comp_grp_id) +{ + struct cam_isp_resource_node *wm_res_local = NULL; + struct cam_tfe_bus_wm_resource_data *rsrc_data = NULL; + uint32_t wm_idx = 0; + int rc = 0; + + *wm_res = NULL; + /* No need to allocate for BUS TFE OUT to WM is fixed. */ + wm_idx = cam_tfe_bus_get_wm_idx(tfe_out_res_id, plane, + bus_priv->common_data.pdaf_rdi2_mux_en); + if (wm_idx < 0 || wm_idx >= bus_priv->num_client) { + CAM_ERR(CAM_ISP, "Unsupported TFE out %d plane %d", + tfe_out_res_id, plane); + return -EINVAL; + } + + wm_res_local = &bus_priv->bus_client[wm_idx]; + if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "WM:%d not available state:%d", + wm_idx, wm_res_local->res_state); + return -EALREADY; + } + wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + wm_res_local->tasklet_info = tasklet; + + rsrc_data = wm_res_local->res_priv; + rsrc_data->format = out_port_info->format; + rsrc_data->pack_fmt = cam_tfe_bus_get_packer_fmt(rsrc_data->format, + wm_idx); + + rsrc_data->width = out_port_info->width; + rsrc_data->height = out_port_info->height; + rsrc_data->stride = out_port_info->stride; + rsrc_data->mode = out_port_info->wm_mode; + rsrc_data->out_id = tfe_out_res_id; + + /* + * Store the acquire width, height separately. For frame based ports + * width and height modified again + */ + rsrc_data->acquired_width = out_port_info->width; + rsrc_data->acquired_height = out_port_info->height; + rsrc_data->acquired_stride = out_port_info->stride; + + rsrc_data->is_dual = is_dual; + /* Set WM offset value to default */ + rsrc_data->offset = 0; + + if (((rsrc_data->index >= 7) && (rsrc_data->index <= 9)) && + (tfe_out_res_id != CAM_TFE_BUS_TFE_OUT_PDAF)) { + /* WM 7-9 refers to RDI 0/ RDI 1/RDI 2 */ + rc = cam_tfe_bus_acquire_rdi_wm(rsrc_data); + if (rc) + return rc; + + } else if (rsrc_data->index == 0 || rsrc_data->index == 1 || + (tfe_out_res_id == CAM_TFE_BUS_TFE_OUT_PDAF) || + (rsrc_data->index >= 11 && rsrc_data->index <= 14)) { + /* WM 0 FULL_OUT WM 1 IDEAL RAW, CAM_TFE_BUS_TFE_OUT_PDAF for pdaf */ + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_8: + rsrc_data->pack_fmt = 0x1; + break; + case CAM_FORMAT_MIPI_RAW_10: + rsrc_data->pack_fmt = 0xc; + break; + case CAM_FORMAT_MIPI_RAW_12: + rsrc_data->pack_fmt = 0xd; + break; + case CAM_FORMAT_PLAIN8: + rsrc_data->pack_fmt = 0x1; + break; + case CAM_FORMAT_PLAIN16_10: + rsrc_data->pack_fmt = 0x5; + rsrc_data->pack_fmt |= 0x10; + break; + case CAM_FORMAT_PLAIN16_12: + rsrc_data->pack_fmt = 0x6; + rsrc_data->pack_fmt |= 0x10; + break; + case CAM_FORMAT_PD10: + rsrc_data->pack_fmt = 0x0; + rsrc_data->width = DIV_ROUND_UP(rsrc_data->width, 4); + rsrc_data->height /= 2; + break; + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + case CAM_FORMAT_Y_ONLY: + rsrc_data->pack_fmt = 0x1; + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + default: + CAM_ERR(CAM_ISP, "Invalid format %d", + rsrc_data->format); + return -EINVAL; + } + rsrc_data->en_cfg = 0x1; + } else if ((rsrc_data->index >= 2 && rsrc_data->index <= 6) || + (rsrc_data->index == 15)) { + /* WM 2-6 and 15 are stats */ + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 1; + rsrc_data->en_cfg = (0x1 << 16) | 0x1; + + /*RS state packet format*/ + if (rsrc_data->index == 15) + rsrc_data->pack_fmt = 0x9; + } else { + CAM_ERR(CAM_ISP, "Invalid WM:%d requested", rsrc_data->index); + return -EINVAL; + } + + *wm_res = wm_res_local; + *comp_grp_id = rsrc_data->hw_regs->comp_group; + *client_done_mask |= (1 << wm_idx); + + CAM_DBG(CAM_ISP, + "WM:%d processed width:%d height:%d format:0x%x comp_group:%d packt format:0x%x wm mode:%d", + rsrc_data->index, rsrc_data->width, rsrc_data->height, + rsrc_data->format, *comp_grp_id, rsrc_data->pack_fmt, + rsrc_data->mode); + return 0; +} + +static int cam_tfe_bus_release_wm(void *bus_priv, + struct cam_isp_resource_node *wm_res) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data = wm_res->res_priv; + + rsrc_data->offset = 0; + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 0; + rsrc_data->format = 0; + rsrc_data->pack_fmt = 0; + rsrc_data->burst_len = 0; + rsrc_data->irq_subsample_period = 0; + rsrc_data->irq_subsample_pattern = 0; + rsrc_data->framedrop_period = 0; + rsrc_data->framedrop_pattern = 0; + rsrc_data->en_cfg = 0; + rsrc_data->is_dual = 0; + rsrc_data->limiter_blob_status = false; + + wm_res->tasklet_info = NULL; + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + CAM_DBG(CAM_ISP, "TFE:%dRelease WM:%d", + rsrc_data->common_data->core_index, rsrc_data->index); + + return 0; +} + +static int cam_tfe_bus_start_wm(struct cam_isp_resource_node *wm_res) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_tfe_bus_common_data *common_data = + rsrc_data->common_data; + + /* Skip to overwrite if wm bandwidth limiter blob already sent */ + if (!rsrc_data->limiter_blob_status) + cam_io_w(rsrc_data->common_data->counter_limit_mask, + common_data->mem_base + rsrc_data->hw_regs->bw_limit); + + cam_io_w((rsrc_data->height << 16) | rsrc_data->width, + common_data->mem_base + rsrc_data->hw_regs->image_cfg_0); + cam_io_w(rsrc_data->pack_fmt, + common_data->mem_base + rsrc_data->hw_regs->packer_cfg); + + /* Configure stride for RDIs on full TFE and TFE lite */ + if ((rsrc_data->index > 6) && + ((rsrc_data->mode != CAM_ISP_TFE_WM_LINE_BASED_MODE) && + (rsrc_data->out_id != CAM_TFE_BUS_TFE_OUT_PDAF))) { + cam_io_w_mb(rsrc_data->stride, (common_data->mem_base + + rsrc_data->hw_regs->image_cfg_2)); + CAM_DBG(CAM_ISP, "WM:%d configure stride reg :0x%x", + rsrc_data->index, + rsrc_data->stride); + } + + /* Enable WM */ + cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base + + rsrc_data->hw_regs->cfg); + + CAM_DBG(CAM_ISP, "TFE:%d WM:%d width = %d, height = %d", + common_data->core_index, rsrc_data->index, + rsrc_data->width, rsrc_data->height); + CAM_DBG(CAM_ISP, "WM:%d pk_fmt = %d", rsrc_data->index, + rsrc_data->pack_fmt); + CAM_DBG(CAM_ISP, "WM:%d stride = %d, burst len = %d", + rsrc_data->index, rsrc_data->stride, 0xf); + CAM_DBG(CAM_ISP, "TFE:%d Start WM:%d offset 0x%x val 0x%x", + common_data->core_index, rsrc_data->index, + (uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return 0; +} + +static int cam_tfe_bus_stop_wm(struct cam_isp_resource_node *wm_res) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_tfe_bus_common_data *common_data = + rsrc_data->common_data; + + /* Disable WM */ + cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg); + CAM_DBG(CAM_ISP, "TFE:%d Stop WM:%d", + rsrc_data->common_data->core_index, rsrc_data->index); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return 0; +} + +static int cam_tfe_bus_init_wm_resource(uint32_t index, + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_bus_hw_info *hw_info, + struct cam_isp_resource_node *wm_res) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bus_wm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv"); + return -ENOMEM; + } + wm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->hw_regs = &hw_info->bus_client_reg[index]; + rsrc_data->common_data = &bus_priv->common_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = cam_tfe_bus_start_wm; + wm_res->stop = cam_tfe_bus_stop_wm; + wm_res->hw_intf = bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_tfe_bus_deinit_wm_resource( + struct cam_isp_resource_node *wm_res) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = NULL; + wm_res->stop = NULL; + wm_res->top_half_handler = NULL; + wm_res->bottom_half_handler = NULL; + wm_res->hw_intf = NULL; + + rsrc_data = wm_res->res_priv; + wm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static void cam_tfe_bus_add_wm_to_comp_grp( + struct cam_isp_resource_node *comp_grp, + uint32_t composite_mask) +{ + struct cam_tfe_bus_comp_grp_data *rsrc_data = comp_grp->res_priv; + + if (rsrc_data) + rsrc_data->composite_mask |= composite_mask; + else + CAM_ERR(CAM_ISP, "Invalid rsrc data"); +} + +static bool cam_tfe_bus_match_comp_grp( + struct cam_tfe_bus_priv *bus_priv, + struct cam_isp_resource_node **comp_grp, + uint32_t comp_grp_id) +{ + struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL; + struct cam_isp_resource_node *comp_grp_local = NULL; + + list_for_each_entry(comp_grp_local, + &bus_priv->used_comp_grp, list) { + rsrc_data = comp_grp_local->res_priv; + if (rsrc_data->comp_grp_id == comp_grp_id) { + /* Match found */ + *comp_grp = comp_grp_local; + return true; + } + } + + list_for_each_entry(comp_grp_local, + &bus_priv->free_comp_grp, list) { + rsrc_data = comp_grp_local->res_priv; + if (rsrc_data->comp_grp_id == comp_grp_id) { + /* Match found */ + *comp_grp = comp_grp_local; + list_del(&comp_grp_local->list); + list_add_tail(&comp_grp_local->list, + &bus_priv->used_comp_grp); + return false; + } + } + + *comp_grp = NULL; + return false; +} + +static int cam_tfe_bus_acquire_comp_grp( + struct cam_tfe_bus_priv *bus_priv, + struct cam_isp_tfe_out_port_generic_info *out_port_info, + void *tasklet, + uint32_t is_dual, + uint32_t is_master, + struct cam_isp_resource_node **comp_grp, + enum cam_tfe_bus_comp_grp_id comp_grp_id, + struct cam_isp_resource_node *out_rsrc, + uint32_t source_group) +{ + int rc = 0; + struct cam_isp_resource_node *comp_grp_local = NULL; + struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL; + bool previously_acquired = false; + + if (comp_grp_id >= CAM_TFE_BUS_COMP_GRP_0 && + comp_grp_id < CAM_TFE_BUS_COMP_GRP_MAX) { + /* Check if matching comp_grp has already been acquired */ + previously_acquired = cam_tfe_bus_match_comp_grp( + bus_priv, &comp_grp_local, comp_grp_id); + } + + if (!comp_grp_local) { + CAM_ERR(CAM_ISP, "Invalid comp_grp_id:%d", comp_grp_id); + return -ENODEV; + } + + rsrc_data = comp_grp_local->res_priv; + if (rsrc_data->acquire_dev_cnt >= rsrc_data->max_wm_per_comp_grp) { + CAM_ERR(CAM_ISP, "Many acquires comp_grp_id:%d", comp_grp_id); + return -ENODEV; + } + + if (!previously_acquired) { + comp_grp_local->tasklet_info = tasklet; + comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + rsrc_data->is_master = is_master; + rsrc_data->is_dual = is_dual; + + if (is_master) + rsrc_data->addr_sync_mode = 0; + else + rsrc_data->addr_sync_mode = 1; + } else { + rsrc_data = comp_grp_local->res_priv; + /* Do not support runtime change in composite mask */ + if (comp_grp_local->res_state == + CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u", + comp_grp_local->res_state, + rsrc_data->comp_grp_id); + return -EBUSY; + } + } + + CAM_DBG(CAM_ISP, "rsrc_data %x acquire_dev_cnt %d", rsrc_data, rsrc_data->acquire_dev_cnt); + rsrc_data->source_grp = source_group; + rsrc_data->out_rsrc[rsrc_data->acquire_dev_cnt] = out_rsrc; + rsrc_data->acquire_dev_cnt++; + *comp_grp = comp_grp_local; + CAM_DBG(CAM_ISP, "Acquire comp_grp id:%u", rsrc_data->comp_grp_id); + + return rc; +} + +static int cam_tfe_bus_release_comp_grp( + struct cam_tfe_bus_priv *bus_priv, + struct cam_isp_resource_node *comp_grp) +{ + struct cam_isp_resource_node *comp_grp_local = NULL; + struct cam_tfe_bus_comp_grp_data *comp_rsrc_data = NULL; + int match_found = 0; + + if (!comp_grp) { + CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", comp_grp); + return -EINVAL; + } + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "Already released Comp Grp"); + return 0; + } + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "Invalid State %d", + comp_grp->res_state); + return -EBUSY; + } + + comp_rsrc_data = comp_grp->res_priv; + CAM_DBG(CAM_ISP, "Comp Grp id %u", comp_rsrc_data->comp_grp_id); + + list_for_each_entry(comp_grp_local, &bus_priv->used_comp_grp, list) { + if (comp_grp_local == comp_grp) { + match_found = 1; + break; + } + } + + if (!match_found) { + CAM_ERR(CAM_ISP, "Could not find comp_grp_id:%u", + comp_rsrc_data->comp_grp_id); + return -ENODEV; + } + + comp_rsrc_data->acquire_dev_cnt--; + if (comp_rsrc_data->acquire_dev_cnt == 0) { + list_del(&comp_grp_local->list); + + comp_rsrc_data->addr_sync_mode = 0; + comp_rsrc_data->composite_mask = 0; + + comp_grp_local->tasklet_info = NULL; + comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + list_add_tail(&comp_grp_local->list, &bus_priv->free_comp_grp); + CAM_DBG(CAM_ISP, "Comp Grp id %u released", + comp_rsrc_data->comp_grp_id); + } + + return 0; +} + +static int cam_tfe_bus_start_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + int rc = 0; + uint32_t val; + struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL; + struct cam_tfe_bus_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask_0 = 0; + + rsrc_data = comp_grp->res_priv; + common_data = rsrc_data->common_data; + + CAM_DBG(CAM_ISP, "TFE:%d comp_grp_id:%d streaming state:%d mask:0x%x", + common_data->core_index, rsrc_data->comp_grp_id, + comp_grp->res_state, rsrc_data->composite_mask); + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + return 0; + + if (rsrc_data->is_dual) { + if (rsrc_data->is_master) { + val = cam_io_r(common_data->mem_base + + common_data->common_reg->comp_cfg_0); + val |= (0x1 << (rsrc_data->comp_grp_id + 16)); + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val = cam_io_r(common_data->mem_base + + common_data->common_reg->comp_cfg_1); + val |= (0x1 << rsrc_data->comp_grp_id); + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_1); + } else { + val = cam_io_r(common_data->mem_base + + common_data->common_reg->comp_cfg_0); + val |= (0x1 << rsrc_data->comp_grp_id); + val |= (0x1 << (rsrc_data->comp_grp_id + 16)); + cam_io_w(val, common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val = cam_io_r(common_data->mem_base + + common_data->common_reg->comp_cfg_1); + val |= (0x1 << rsrc_data->comp_grp_id); + cam_io_w(val, common_data->mem_base + + common_data->common_reg->comp_cfg_1); + } + } + + if (rsrc_data->is_dual && !rsrc_data->is_master) + goto end; + + /* Update the composite done mask in bus irq mask*/ + bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + bus_irq_reg_mask_0 |= (0x1 << (rsrc_data->comp_grp_id + + rsrc_data->common_data->comp_done_shift)); + cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + + CAM_DBG(CAM_ISP, "TFE:%d start COMP_GRP:%d bus_irq_mask_0 0x%x", + common_data->core_index, rsrc_data->comp_grp_id, + bus_irq_reg_mask_0); + +end: + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static int cam_tfe_bus_stop_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL; + struct cam_tfe_bus_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask_0 = 0; + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + return 0; + + rsrc_data = (struct cam_tfe_bus_comp_grp_data *)comp_grp->res_priv; + common_data = rsrc_data->common_data; + + /* Update the composite done mask in bus irq mask*/ + bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + bus_irq_reg_mask_0 &= ~(0x1 << (rsrc_data->comp_grp_id + + rsrc_data->common_data->comp_done_shift)); + cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return 0; +} + +static int cam_tfe_bus_init_comp_grp(uint32_t index, + struct cam_hw_soc_info *soc_info, + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_bus_hw_info *hw_info, + struct cam_isp_resource_node *comp_grp) +{ + struct cam_tfe_bus_comp_grp_data *rsrc_data = NULL; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bus_comp_grp_data), + GFP_KERNEL); + if (!rsrc_data) + return -ENOMEM; + + comp_grp->res_priv = rsrc_data; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&comp_grp->list); + + comp_grp->res_id = index; + rsrc_data->comp_grp_id = index; + rsrc_data->common_data = &bus_priv->common_data; + rsrc_data->max_wm_per_comp_grp = + bus_priv->max_wm_per_comp_grp; + + rsrc_data->out_rsrc = CAM_MEM_ZALLOC(sizeof(struct cam_isp_resource_node *) * + rsrc_data->max_wm_per_comp_grp, GFP_KERNEL); + if (!rsrc_data->out_rsrc) + return -ENOMEM; + + list_add_tail(&comp_grp->list, &bus_priv->free_comp_grp); + + comp_grp->hw_intf = bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_tfe_bus_deinit_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + struct cam_tfe_bus_comp_grp_data *rsrc_data = + comp_grp->res_priv; + + comp_grp->start = NULL; + comp_grp->stop = NULL; + comp_grp->top_half_handler = NULL; + comp_grp->bottom_half_handler = NULL; + comp_grp->hw_intf = NULL; + + list_del_init(&comp_grp->list); + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + comp_grp->res_priv = NULL; + + if (!rsrc_data) { + CAM_ERR(CAM_ISP, "comp_grp_priv is NULL"); + return -ENODEV; + } + CAM_MEM_FREE(rsrc_data->out_rsrc); + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_tfe_bus_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *secure_mode = cmd_args; + struct cam_tfe_bus_tfe_out_data *rsrc_data; + uint32_t *mode; + + rsrc_data = (struct cam_tfe_bus_tfe_out_data *) + secure_mode->res->res_priv; + mode = (uint32_t *)secure_mode->data; + *mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ? + true : false; + + return 0; +} + +static int cam_tfe_bus_acquire_tfe_out(void *priv, void *acquire_args, + uint32_t args_size) +{ + struct cam_tfe_bus_priv *bus_priv = priv; + struct cam_tfe_acquire_args *acq_args = acquire_args; + struct cam_tfe_hw_tfe_out_acquire_args *out_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + enum cam_tfe_bus_tfe_out_id tfe_out_res_id; + enum cam_tfe_bus_comp_grp_id comp_grp_id; + int i, rc = -ENODEV; + uint32_t secure_caps = 0, mode; + uint32_t format, num_wm, client_done_mask = 0; + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_ISP, "Invalid Param"); + return -EINVAL; + } + + out_acquire_args = &acq_args->tfe_out; + format = out_acquire_args->out_port_info->format; + + CAM_DBG(CAM_ISP, "resid 0x%x fmt:%d, sec mode:%d wm mode:%d", + out_acquire_args->out_port_info->res_id, format, + out_acquire_args->out_port_info->secure_mode, + out_acquire_args->out_port_info->wm_mode); + CAM_DBG(CAM_ISP, "width:%d, height:%d stride:%d", + out_acquire_args->out_port_info->width, + out_acquire_args->out_port_info->height, + out_acquire_args->out_port_info->stride); + + tfe_out_res_id = cam_tfe_bus_get_out_res_id( + out_acquire_args->out_port_info->res_id); + if (tfe_out_res_id == CAM_TFE_BUS_TFE_OUT_MAX) + return -ENODEV; + + num_wm = cam_tfe_bus_get_num_wm(tfe_out_res_id, format); + if (num_wm < 1) + return -EINVAL; + + rsrc_node = &bus_priv->tfe_out[tfe_out_res_id]; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d", + tfe_out_res_id, rsrc_node->res_state); + return -EBUSY; + } + + rsrc_data = rsrc_node->res_priv; + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->event_cb = acq_args->event_cb; + rsrc_data->priv = acq_args->priv; + rsrc_data->bus_priv = bus_priv; + + secure_caps = cam_tfe_bus_can_be_secure(rsrc_data->out_id); + mode = out_acquire_args->out_port_info->secure_mode; + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (!rsrc_data->common_data->num_sec_out) { + rsrc_data->secure_mode = mode; + rsrc_data->common_data->secure_mode = mode; + } else { + if (mode == rsrc_data->common_data->secure_mode) { + rsrc_data->secure_mode = + rsrc_data->common_data->secure_mode; + } else { + rc = -EINVAL; + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Mismatch: Acquire mode[%d], drvr mode[%d]", + rsrc_data->common_data->secure_mode, + mode); + mutex_unlock( + &rsrc_data->common_data->bus_mutex); + return -EINVAL; + } + } + rsrc_data->common_data->num_sec_out++; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + bus_priv->tasklet_info = acq_args->tasklet; + rsrc_data->num_wm = num_wm; + rsrc_node->is_rdi_primary_res = 0; + rsrc_node->res_id = out_acquire_args->out_port_info->res_id; + rsrc_node->cdm_ops = out_acquire_args->cdm_ops; + rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops; + + /* Acquire WM and retrieve COMP GRP ID */ + for (i = 0; i < num_wm; i++) { + rc = cam_tfe_bus_acquire_wm(bus_priv, + out_acquire_args->out_port_info, + &rsrc_data->wm_res[i], + acq_args->tasklet, + tfe_out_res_id, + i, &client_done_mask, + out_acquire_args->is_dual, + &comp_grp_id); + if (rc) { + CAM_ERR(CAM_ISP, + "TFE:%d WM acquire failed for Out %d rc=%d", + rsrc_data->common_data->core_index, + tfe_out_res_id, rc); + goto release_wm; + } + } + + /* Acquire composite group using COMP GRP ID */ + rc = cam_tfe_bus_acquire_comp_grp(bus_priv, + out_acquire_args->out_port_info, + acq_args->tasklet, + out_acquire_args->is_dual, + out_acquire_args->is_master, + &rsrc_data->comp_grp, + comp_grp_id, + rsrc_node, + rsrc_data->source_group); + if (rc) { + CAM_ERR(CAM_ISP, + "TFE%d Comp_Grp acquire fail for Out %d rc=%d", + rsrc_data->common_data->core_index, + tfe_out_res_id, rc); + return rc; + } + + rsrc_data->is_dual = out_acquire_args->is_dual; + rsrc_data->is_master = out_acquire_args->is_master; + + cam_tfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp, + client_done_mask); + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + out_acquire_args->rsrc_node = rsrc_node; + out_acquire_args->comp_grp_id = comp_grp_id; + return rc; + +release_wm: + for (i--; i >= 0; i--) + cam_tfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]); + + if (rsrc_data->comp_grp) + cam_tfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp); + + return rc; +} + +static int cam_tfe_bus_release_tfe_out(void *priv, void *release_args, + uint32_t args_size) +{ + struct cam_tfe_bus_priv *bus_priv = priv; + struct cam_isp_resource_node *tfe_out = NULL; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0; + uint32_t i; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + tfe_out = (struct cam_isp_resource_node *)release_args; + rsrc_data = (struct cam_tfe_bus_tfe_out_data *)tfe_out->res_priv; + + if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Invalid resource state:%d res id:%d", + tfe_out->res_state, tfe_out->res_id); + } + + for (i = 0; i < rsrc_data->num_wm; i++) + cam_tfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]); + + rsrc_data->num_wm = 0; + + if (rsrc_data->comp_grp) + cam_tfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp); + + rsrc_data->comp_grp = NULL; + + tfe_out->tasklet_info = NULL; + tfe_out->cdm_ops = NULL; + rsrc_data->cdm_util_ops = NULL; + + secure_caps = cam_tfe_bus_can_be_secure(rsrc_data->out_id); + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (rsrc_data->secure_mode == + rsrc_data->common_data->secure_mode) { + rsrc_data->common_data->num_sec_out--; + rsrc_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } else { + /* + * The validity of the mode is properly + * checked while acquiring the output port. + * not expected to reach here, unless there is + * some corruption. + */ + CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch", + rsrc_data->common_data->secure_mode, + rsrc_data->secure_mode); + } + + if (!rsrc_data->common_data->num_sec_out) + rsrc_data->common_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + tfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_tfe_bus_start_tfe_out(void *hw_priv, + void *start_hw_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *tfe_out = hw_priv; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + struct cam_tfe_bus_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask_0 = 0; + uint32_t rup_group_id = 0; + int rc = 0, i; + + if (!tfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = tfe_out->res_priv; + common_data = rsrc_data->common_data; + rup_group_id = rsrc_data->rup_group_id; + + CAM_DBG(CAM_ISP, "TFE:%d Start resource index %d", + common_data->core_index, rsrc_data->out_id); + + if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "TFE:%d Invalid resource state:%d", + common_data->core_index, tfe_out->res_state); + return -EACCES; + } + + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_tfe_bus_start_wm(rsrc_data->wm_res[i]); + + rc = cam_tfe_bus_start_comp_grp(rsrc_data->comp_grp); + + if (rsrc_data->is_dual && !rsrc_data->is_master && + !tfe_out->is_rdi_primary_res) + goto end; + + if (common_data->rup_irq_enable[rup_group_id]) + goto end; + + /* Update the composite regupdate mask in bus irq mask*/ + bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + bus_irq_reg_mask_0 |= (0x1 << rup_group_id); + cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + common_data->rup_irq_enable[rup_group_id] = true; + +end: + tfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_tfe_bus_stop_tfe_out(void *hw_priv, + void *stop_hw_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *tfe_out = hw_priv; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + struct cam_tfe_bus_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask_0 = 0, rup_group = 0; + int rc = 0, i; + + if (!tfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = tfe_out->res_priv; + common_data = rsrc_data->common_data; + rup_group = rsrc_data->rup_group_id; + + if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + tfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "tfe_out res_state is %d", tfe_out->res_state); + return rc; + } + + rc = cam_tfe_bus_stop_comp_grp(rsrc_data->comp_grp); + + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_tfe_bus_stop_wm(rsrc_data->wm_res[i]); + + + if (!common_data->rup_irq_enable[rup_group]) + goto end; + + /* disable composite regupdate mask in bus irq mask register*/ + bus_irq_reg_mask_0 = cam_io_r(common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + bus_irq_reg_mask_0 &= ~(0x1 << rup_group); + cam_io_w_mb(bus_irq_reg_mask_0, common_data->mem_base + + common_data->common_reg->irq_mask[CAM_TFE_BUS_IRQ_REG0]); + common_data->rup_irq_enable[rup_group] = false; + +end: + tfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_tfe_bus_init_tfe_out_resource(uint32_t index, + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_bus_hw_info *hw_info) +{ + struct cam_isp_resource_node *tfe_out = NULL; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + int rc = 0; + int32_t tfe_out_id = hw_info->tfe_out_hw_info[index].tfe_out_id; + int i; + + if (tfe_out_id < 0 || + tfe_out_id >= CAM_TFE_BUS_TFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "Init TFE Out failed, Invalid type=%d", + tfe_out_id); + return -EINVAL; + } + + tfe_out = &bus_priv->tfe_out[tfe_out_id]; + if (tfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + tfe_out->res_priv) { + CAM_ERR(CAM_ISP, "tfe_out_id %d has already been initialized", + tfe_out_id); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bus_tfe_out_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + tfe_out->res_priv = rsrc_data; + + tfe_out->res_type = CAM_ISP_RESOURCE_TFE_OUT; + tfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&tfe_out->list); + + rsrc_data->composite_group = + hw_info->tfe_out_hw_info[index].composite_group; + rsrc_data->rup_group_id = + hw_info->tfe_out_hw_info[index].rup_group_id; + rsrc_data->out_id = + hw_info->tfe_out_hw_info[index].tfe_out_id; + rsrc_data->common_data = &bus_priv->common_data; + rsrc_data->max_width = + hw_info->tfe_out_hw_info[index].max_width; + rsrc_data->max_height = + hw_info->tfe_out_hw_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + + for (i = 0; i < CAM_TFE_BUS_MAX_MID_PER_PORT; i++) + rsrc_data->mid[i] = hw_info->tfe_out_hw_info[index].mid[i]; + + tfe_out->hw_intf = bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_tfe_bus_deinit_tfe_out_resource( + struct cam_isp_resource_node *tfe_out) +{ + struct cam_tfe_bus_tfe_out_data *rsrc_data = tfe_out->res_priv; + + if (tfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized"); + return 0; + } + + tfe_out->start = NULL; + tfe_out->stop = NULL; + tfe_out->top_half_handler = NULL; + tfe_out->bottom_half_handler = NULL; + tfe_out->hw_intf = NULL; + + tfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&tfe_out->list); + tfe_out->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static const char *cam_tfe_bus_rup_type( + uint32_t group_id) +{ + switch (group_id) { + case CAM_ISP_HW_TFE_IN_CAMIF: + return "CAMIF RUP"; + case CAM_ISP_HW_TFE_IN_RDI0: + return "RDI0 RUP"; + case CAM_ISP_HW_TFE_IN_RDI1: + return "RDI1 RUP"; + case CAM_ISP_HW_TFE_IN_RDI2: + return "RDI2 RUP"; + default: + return "invalid rup group"; + } +} +static int cam_tfe_bus_rup_bottom_half( + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_bus_common_data *common_data; + struct cam_tfe_bus_tfe_out_data *out_rsrc_data; + struct cam_isp_hw_event_info evt_info; + uint32_t i, j; + + common_data = &bus_priv->common_data; + evt_info.hw_idx = bus_priv->common_data.core_index; + evt_info.res_type = CAM_ISP_RESOURCE_TFE_OUT; + + for (i = 0; i < CAM_TFE_BUS_RUP_GRP_MAX; i++) { + if (!(evt_payload->bus_irq_val[0] & + bus_priv->comp_rup_done_mask)) + break; + + if (evt_payload->bus_irq_val[0] & BIT(i)) { + for (j = 0; j < CAM_TFE_BUS_TFE_OUT_MAX; j++) { + out_rsrc_data = + (struct cam_tfe_bus_tfe_out_data *) + bus_priv->tfe_out[j].res_priv; + if ((out_rsrc_data->rup_group_id == i) && + (bus_priv->tfe_out[j].res_state == + CAM_ISP_RESOURCE_STATE_STREAMING)) + break; + } + + if (j == CAM_TFE_BUS_TFE_OUT_MAX) { + CAM_ERR(CAM_ISP, + "TFE:%d out rsc active status[0]:0x%x", + bus_priv->common_data.core_index, + evt_payload->bus_irq_val[0]); + continue; + } + + CAM_DBG(CAM_ISP, "TFE:%d Received %s", + bus_priv->common_data.core_index, + cam_tfe_bus_rup_type(i)); + evt_info.res_id = i; + if (out_rsrc_data->event_cb) { + out_rsrc_data->event_cb( + out_rsrc_data->priv, + CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + /* reset the rup bit */ + evt_payload->bus_irq_val[0] &= ~BIT(i); + } else + CAM_ERR(CAM_ISP, + "TFE:%d No event cb id:%lld evt id:%d", + bus_priv->common_data.core_index, + out_rsrc_data->out_id, evt_info.res_id); + } + } + + return 0; +} + +static uint32_t cam_tfe_bus_get_last_consumed_addr( + struct cam_tfe_bus_priv *bus_priv, + uint32_t out_id) +{ + uint32_t val = 0; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_tfe_bus_tfe_out_data *rsrc_data = NULL; + struct cam_tfe_bus_wm_resource_data *wm_rsrc_data = NULL; + + if (out_id >= CAM_TFE_BUS_TFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "invalid out_id:%u", out_id); + return 0; + } + + rsrc_node = &bus_priv->tfe_out[out_id]; + rsrc_data = rsrc_node->res_priv; + wm_rsrc_data = rsrc_data->wm_res[PLANE_Y]->res_priv; + + val = cam_io_r_mb( + wm_rsrc_data->common_data->mem_base + + wm_rsrc_data->hw_regs->addr_status_0); + + return val; +} + +static int cam_tfe_bus_bufdone_bottom_half( + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_bus_common_data *common_data; + struct cam_tfe_bus_tfe_out_data *out_rsrc_data; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_resource_node *out_rsrc = NULL; + struct cam_tfe_bus_comp_grp_data *comp_rsrc_data; + struct cam_isp_hw_bufdone_event_info bufdone_evt_info = {0}; + uint32_t i; + + common_data = &bus_priv->common_data; + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + if (!(evt_payload->bus_irq_val[0] & + bus_priv->comp_buf_done_mask)) + break; + + comp_rsrc_data = (struct cam_tfe_bus_comp_grp_data *) + bus_priv->comp_grp[i].res_priv; + + if (evt_payload->bus_irq_val[0] & + BIT(comp_rsrc_data->comp_grp_id + + bus_priv->common_data.comp_done_shift)) { + out_rsrc = comp_rsrc_data->out_rsrc[0]; + out_rsrc_data = out_rsrc->res_priv; + evt_info.res_type = out_rsrc->res_type; + evt_info.hw_idx = out_rsrc->hw_intf->hw_idx; + evt_info.res_id = out_rsrc->res_id; + bufdone_evt_info.res_id = out_rsrc->res_id; + bufdone_evt_info.comp_grp_id = comp_rsrc_data->comp_grp_id; + bufdone_evt_info.last_consumed_addr = + cam_tfe_bus_get_last_consumed_addr( + out_rsrc_data->bus_priv, + out_rsrc_data->out_id); + evt_info.event_data = (void *)&bufdone_evt_info; + + if (out_rsrc_data->event_cb) + out_rsrc_data->event_cb(out_rsrc_data->priv, + CAM_ISP_HW_EVENT_DONE, + (void *)&evt_info); + } + + evt_payload->bus_irq_val[0] &= + BIT(comp_rsrc_data->comp_grp_id + + bus_priv->common_data.comp_done_shift); + } + + return 0; +} + +static void cam_tfe_bus_error_bottom_half( + struct cam_tfe_bus_priv *bus_priv, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_bus_wm_resource_data *rsrc_data; + struct cam_tfe_bus_reg_offset_common *common_reg; + uint32_t i, overflow_status, image_size_violation_status; + uint32_t ccif_violation_status; + + common_reg = bus_priv->common_data.common_reg; + + CAM_INFO(CAM_ISP, "BUS IRQ[0]:0x%x BUS IRQ[1]:0x%x", + evt_payload->bus_irq_val[0], evt_payload->bus_irq_val[1]); + + overflow_status = cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->overflow_status); + + image_size_violation_status = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->image_size_violation_status); + + ccif_violation_status = cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->ccif_violation_status); + + CAM_INFO(CAM_ISP, + "ccif violation status:0x%x image size violation:0x%x overflow status:0x%x", + ccif_violation_status, + image_size_violation_status, + overflow_status); + + /* Check the bus errors */ + if (evt_payload->bus_irq_val[0] & BIT(common_reg->cons_violation_shift)) + CAM_INFO(CAM_ISP, "CONS_VIOLATION"); + + if (evt_payload->bus_irq_val[0] & BIT(common_reg->violation_shift)) + CAM_INFO(CAM_ISP, "VIOLATION"); + + if (evt_payload->bus_irq_val[0] & + BIT(common_reg->image_size_violation)) { + CAM_INFO(CAM_ISP, "IMAGE_SIZE_VIOLATION val :0x%x", + evt_payload->image_size_violation_status); + + for (i = 0; i < bus_priv->num_client; i++) { + if (!(evt_payload->image_size_violation_status >> i)) + break; + + if (evt_payload->image_size_violation_status & BIT(i)) { + rsrc_data = bus_priv->bus_client[i].res_priv; + CAM_INFO(CAM_ISP, + "WM:%d width 0x%x height:0x%x format:%d stride:0x%x offset:0x%x encfg:0x%x", + i, + rsrc_data->acquired_width, + rsrc_data->acquired_height, + rsrc_data->format, + rsrc_data->acquired_stride, + rsrc_data->offset, + rsrc_data->en_cfg); + + CAM_INFO(CAM_ISP, + "WM:%d current width 0x%x height:0x%x stride:0x%x", + i, + rsrc_data->width, + rsrc_data->height, + rsrc_data->stride); + + } + } + } + + if (overflow_status) { + for (i = 0; i < bus_priv->num_client; i++) { + + if (!(evt_payload->overflow_status >> i)) + break; + + if (evt_payload->overflow_status & BIT(i)) { + rsrc_data = bus_priv->bus_client[i].res_priv; + CAM_INFO(CAM_ISP, + "WM:%d %s BUS OVERFLOW width0x%x height:0x%x format:%d stride:0x%x offset:0x%x encfg:%x", + i, + rsrc_data->hw_regs->client_name, + rsrc_data->acquired_width, + rsrc_data->acquired_height, + rsrc_data->format, + rsrc_data->acquired_stride, + rsrc_data->offset, + rsrc_data->en_cfg); + + CAM_INFO(CAM_ISP, + "WM:%d current width:0x%x height:0x%x stride:0x%x", + i, + rsrc_data->width, + rsrc_data->height, + rsrc_data->stride); + } + } + } +} + +static int cam_tfe_bus_bottom_half(void *priv, + bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload, + bool error_process) +{ + struct cam_tfe_bus_priv *bus_priv; + uint32_t val; + + if (!priv || !evt_payload) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid priv param"); + return -EINVAL; + } + bus_priv = (struct cam_tfe_bus_priv *) priv; + + if (error_process) { + cam_tfe_bus_error_bottom_half(bus_priv, evt_payload); + goto end; + } + + /* if bus errors are there, mask all bus errors */ + if (evt_payload->bus_irq_val[0] & bus_priv->bus_irq_error_mask[0]) { + val = cam_io_r(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[0]); + val &= ~bus_priv->bus_irq_error_mask[0]; + cam_io_w(val, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[0]); + + } + + if (rup_process) { + if (evt_payload->bus_irq_val[0] & + bus_priv->comp_rup_done_mask) + cam_tfe_bus_rup_bottom_half(bus_priv, evt_payload); + } else { + if (evt_payload->bus_irq_val[0] & + bus_priv->comp_buf_done_mask) + cam_tfe_bus_bufdone_bottom_half(bus_priv, evt_payload); + } + +end: + return 0; + +} + +static int cam_tfe_bus_update_wm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_buf_io_cfg *io_cfg; + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_tfe_bus_wm_resource_data *wm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + uint32_t frame_inc = 0, val; + + bus_priv = (struct cam_tfe_bus_priv *) priv; + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) + update_buf->res->res_priv; + + cdm_util_ops = tfe_out_data->cdm_util_ops; + + if (!tfe_out_data || !cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + if (update_buf->wm_update->num_buf != tfe_out_data->num_wm) { + CAM_ERR(CAM_ISP, + "Failed! Invalid number buffers:%d required:%d", + update_buf->wm_update->num_buf, tfe_out_data->num_wm); + return -EINVAL; + } + + reg_val_pair = &tfe_out_data->common_data->io_buf_update[0]; + io_cfg = update_buf->wm_update->io_cfg; + + for (i = 0, j = 0; i < tfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = tfe_out_data->wm_res[i]->res_priv; + /* update width register */ + val = ((wm_data->height << 16) | (wm_data->width & 0xFFFF)); + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->image_cfg_0, val); + CAM_DBG(CAM_ISP, "WM:%d image height and width 0x%x", + wm_data->index, reg_val_pair[j-1]); + + val = wm_data->offset; + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->image_cfg_1, val); + CAM_DBG(CAM_ISP, "WM:%d xinit 0x%x", + wm_data->index, reg_val_pair[j-1]); + + if ((wm_data->index < 7) || ((wm_data->index >= 7) && + (wm_data->mode == CAM_ISP_TFE_WM_LINE_BASED_MODE)) || + (wm_data->out_id == CAM_TFE_BUS_TFE_OUT_PDAF) || + (wm_data->index >= 11 && wm_data->index <= 15)) { + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->image_cfg_2, + io_cfg->planes[i].plane_stride); + wm_data->stride = io_cfg->planes[i].plane_stride; + CAM_DBG(CAM_ISP, "WM %d image stride 0x%x", + wm_data->index, reg_val_pair[j-1]); + } + + frame_inc = io_cfg->planes[i].plane_stride * + io_cfg->planes[i].slice_height; + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->image_addr, + update_buf->wm_update->image_buf[i]); + CAM_DBG(CAM_ISP, "WM %d image address 0x%x", + wm_data->index, reg_val_pair[j-1]); + update_buf->wm_update->image_buf_offset[i] = 0; + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->frame_incr, frame_inc); + CAM_DBG(CAM_ISP, "WM %d frame_inc %d", + wm_data->index, reg_val_pair[j-1]); + + /* enable the WM */ + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->cfg, + wm_data->en_cfg); + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_buf->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + update_buf->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "No reg val pairs. num_wms: %u", + tfe_out_data->num_wm); + } + + return 0; +} + +static int cam_tfe_bus_update_hfr(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *update_hfr; + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_tfe_bus_wm_resource_data *wm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_isp_tfe_port_hfr_config *hfr_cfg = NULL; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + + bus_priv = (struct cam_tfe_bus_priv *) priv; + update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) + update_hfr->res->res_priv; + + cdm_util_ops = tfe_out_data->cdm_util_ops; + + if (!tfe_out_data || !cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + reg_val_pair = &tfe_out_data->common_data->io_buf_update[0]; + hfr_cfg = (struct cam_isp_tfe_port_hfr_config *)update_hfr->data; + + for (i = 0, j = 0; i < tfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = tfe_out_data->wm_res[i]->res_priv; + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->framedrop_pattern, + hfr_cfg->framedrop_pattern); + wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern; + CAM_DBG(CAM_ISP, "WM:%d framedrop pattern 0x%x", + wm_data->index, wm_data->framedrop_pattern); + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->framedrop_period, + hfr_cfg->framedrop_period); + wm_data->framedrop_period = hfr_cfg->framedrop_period; + CAM_DBG(CAM_ISP, "WM:%d framedrop period 0x%x", + wm_data->index, wm_data->framedrop_period); + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->irq_subsample_period, + hfr_cfg->subsample_period); + wm_data->irq_subsample_period = hfr_cfg->subsample_period; + CAM_DBG(CAM_ISP, "WM:%d irq subsample period 0x%x", + wm_data->index, wm_data->irq_subsample_period); + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, + wm_data->hw_regs->irq_subsample_pattern, + hfr_cfg->subsample_pattern); + wm_data->irq_subsample_pattern = hfr_cfg->subsample_pattern; + CAM_DBG(CAM_ISP, "WM:%d irq subsample pattern 0x%x", + wm_data->index, wm_data->irq_subsample_pattern); + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_hfr->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + update_hfr->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_hfr->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_hfr->cmd.used_bytes = size * 4; + } else { + update_hfr->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "No reg val pairs. num_wms: %u", + tfe_out_data->num_wm); + } + + return 0; +} + +static int cam_tfe_bus_update_stripe_cfg(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv; + struct cam_tfe_dual_update_args *stripe_args; + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_tfe_bus_wm_resource_data *wm_data = NULL; + struct cam_isp_tfe_dual_stripe_config *stripe_config; + uint32_t i; + + bus_priv = (struct cam_tfe_bus_priv *) priv; + stripe_args = (struct cam_tfe_dual_update_args *)cmd_args; + + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) + stripe_args->res->res_priv; + + if (!tfe_out_data) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + if (stripe_args->res->res_id < CAM_ISP_TFE_OUT_RES_BASE || + stripe_args->res->res_id >= CAM_ISP_TFE_OUT_RES_MAX) + return 0; + + stripe_config = (struct cam_isp_tfe_dual_stripe_config *) + stripe_args->stripe_config; + + for (i = 0; i < tfe_out_data->num_wm; i++) { + stripe_config = &stripe_args->stripe_config[i]; + wm_data = tfe_out_data->wm_res[i]->res_priv; + wm_data->width = stripe_config->width; + wm_data->offset = stripe_config->offset; + CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:0x%x", + stripe_args->res->res_id, wm_data->index, + wm_data->width, wm_data->offset); + } + + return 0; +} + +static int cam_tfe_bus_get_res_id_for_mid( + struct cam_tfe_bus_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_isp_hw_get_cmd_update *cmd_update = + (struct cam_isp_hw_get_cmd_update *)cmd_args; + struct cam_isp_hw_get_res_for_mid *get_res = NULL; + int i, j; + + get_res = (struct cam_isp_hw_get_res_for_mid *)cmd_update->data; + if (!get_res) { + CAM_ERR(CAM_ISP, + "invalid get resource for mid paramas"); + return -EINVAL; + } + + for (i = 0; i < bus_priv->num_out; i++) { + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) + bus_priv->tfe_out[i].res_priv; + + if (!tfe_out_data) + continue; + + for (j = 0; j < CAM_TFE_BUS_MAX_MID_PER_PORT; j++) { + if (tfe_out_data->mid[j] == get_res->mid) + goto end; + } + } + + if (i == bus_priv->num_out) { + CAM_ERR(CAM_ISP, + "mid:%d does not match with any out resource", + get_res->mid); + get_res->out_res_id = 0; + return -EINVAL; + } + +end: + CAM_INFO(CAM_ISP, "match mid :%d out resource:%d found", + get_res->mid, bus_priv->tfe_out[i].res_id); + get_res->out_res_id = bus_priv->tfe_out[i].res_id; + return 0; +} + +static int cam_tfe_bus_update_bw_limiter( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_tfe_bus_wm_resource_data *wm_data = NULL; + struct cam_isp_tfe_wm_bw_limiter_config *wm_bw_limit_cfg = NULL; + uint32_t counter_limit = 0, reg_val = 0; + uint32_t *reg_val_pair, num_regval_pairs = 0; + uint32_t i, j, size = 0; + + bus_priv = (struct cam_tfe_bus_priv *) priv; + wm_config_update = (struct cam_isp_hw_get_cmd_update *) cmd_args; + wm_bw_limit_cfg = (struct cam_isp_tfe_wm_bw_limiter_config *) wm_config_update->data; + + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) wm_config_update->res->res_priv; + if (!tfe_out_data || !tfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = tfe_out_data->cdm_util_ops; + reg_val_pair = &tfe_out_data->common_data->io_buf_update[0]; + for (i = 0, j = 0; i < tfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - (MAX_BUF_UPDATE_REG_NUM * 2))) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %zu for WM idx %d", + j, MAX_REG_VAL_PAIR_SIZE, i); + return -ENOMEM; + } + + /* Num WMs needs to match max planes */ + if (i >= CAM_PACKET_MAX_PLANES) { + CAM_WARN(CAM_ISP, "Num of WMs: %d exceeded max planes", i); + goto add_reg_pair; + } + + wm_data = (struct cam_tfe_bus_wm_resource_data *) tfe_out_data->wm_res[i]->res_priv; + if (!wm_data->hw_regs->bw_limit) { + CAM_ERR(CAM_ISP, + "WM: %d %s has no support for bw limiter", + wm_data->index, tfe_out_data->wm_res[i]->res_name); + return -EINVAL; + } + + counter_limit = wm_bw_limit_cfg->counter_limit[i]; + + /* Validate max counter limit */ + if (counter_limit > wm_data->common_data->max_bw_counter_limit) { + CAM_WARN(CAM_ISP, "Invalid counter limit: 0x%x capping to max: 0x%x", + wm_bw_limit_cfg->counter_limit[i], + wm_data->common_data->max_bw_counter_limit); + counter_limit = wm_data->common_data->max_bw_counter_limit; + } + + if (wm_bw_limit_cfg->enable_limiter && counter_limit) { + reg_val = 1; + reg_val |= (counter_limit << wm_data->common_data->counter_limit_shift); + } else { + reg_val = 0; + } + + wm_data->limiter_blob_status = true; + + CAM_TFE_ADD_REG_VAL_PAIR(reg_val_pair, j, wm_data->hw_regs->bw_limit, reg_val); + CAM_DBG(CAM_ISP, "WM: %d for %s bw_limter: 0x%x", + wm_data->index, tfe_out_data->wm_res[i]->res_name, reg_val_pair[j-1]); + } + +add_reg_pair: + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random(num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > wm_config_update->cmd.size) { + CAM_ERR(CAM_ISP, "Failed! Buf size:%d insufficient, expected size:%d", + wm_config_update->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + wm_config_update->cmd.cmd_buf_addr, num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + wm_config_update->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_ISP, "No reg val pairs. num_wms: %u", tfe_out_data->num_wm); + wm_config_update->cmd.used_bytes = 0; + } + + return 0; +} + +static int cam_tfe_bus_dump_bus_info( + struct cam_tfe_bus_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bus_tfe_out_data *tfe_out_data = NULL; + struct cam_isp_hw_get_cmd_update *cmd_update = + (struct cam_isp_hw_get_cmd_update *)cmd_args; + struct cam_tfe_bus_wm_resource_data *wm_data; + struct cam_tfe_bus_common_data *common_data; + uint32_t i, addr_status0, addr_status1, addr_status2, addr_status3; + + tfe_out_data = (struct cam_tfe_bus_tfe_out_data *) + cmd_update->res->res_priv; + common_data = tfe_out_data->common_data; + + for (i = 0; i < tfe_out_data->num_wm; i++) { + wm_data = tfe_out_data->wm_res[i]->res_priv; + addr_status0 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_0); + addr_status1 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_1); + addr_status2 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_2); + addr_status3 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_3); + CAM_INFO(CAM_ISP, + "TFE:%d WM:%d %s last consumed addr:0x%x last frame addr:0x%x fifo cnt:0x%x cur clt addr:0x%x", + common_data->hw_intf->hw_idx, + wm_data->index, + wm_data->hw_regs->client_name, + addr_status0, + addr_status1, + addr_status2, + addr_status3); + + CAM_INFO(CAM_ISP, + "WM:%d %s width0x%x height:0x%x format:%d stride:0x%x offset:0x%x encfg:%x", + wm_data->index, + wm_data->hw_regs->client_name, + wm_data->acquired_width, + wm_data->acquired_height, + wm_data->format, + wm_data->acquired_stride, + wm_data->offset, + wm_data->en_cfg); + + CAM_INFO(CAM_ISP, + "WM:%d current width:0x%x height:0x%x stride:0x%x", + wm_data->index, + wm_data->width, + wm_data->height, + wm_data->stride); + } + + for (i = 0; i < bus_priv->num_client; i++) { + wm_data = bus_priv->bus_client[i].res_priv; + /* disable WM */ + cam_io_w_mb(0, common_data->mem_base + + wm_data->hw_regs->cfg); + + } + return 0; +} + +static int cam_tfe_bus_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv = hw_priv; + uint32_t i, top_irq_reg_mask[3] = {0}; + int rc = -EINVAL; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + top_irq_reg_mask[0] = (1 << bus_priv->top_bus_wr_irq_shift); + + rc = cam_tfe_irq_config(bus_priv->common_data.tfe_core_data, + top_irq_reg_mask, CAM_TFE_TOP_IRQ_REG_NUM, true); + if (rc) + return rc; + + /* configure the error irq */ + for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++) + cam_io_w(bus_priv->bus_irq_error_mask[i], + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[i]); + + return 0; +} + +static int cam_tfe_bus_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv = hw_priv; + uint32_t top_irq_reg_mask[3] = {0}; + int rc = 0; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Error: Invalid args"); + return -EINVAL; + } + top_irq_reg_mask[0] = (1 << bus_priv->top_bus_wr_irq_shift); + rc = cam_tfe_irq_config(bus_priv->common_data.tfe_core_data, + top_irq_reg_mask, CAM_TFE_TOP_IRQ_REG_NUM, false); + if (rc) + return rc; + + /* configure the error irq */ + cam_io_w(0, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[0]); + + cam_io_w_mb(0, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[1]); + + return rc; +} + +static int cam_tfe_bus_process_cmd(void *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bus_priv *bus_priv; + int rc = -EINVAL; + uint32_t i, val; + bool *support_consumed_addr; + bool *pdaf_rdi2_mux_en; + struct cam_isp_hw_done_event_data *done; + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + rc = cam_tfe_bus_update_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + rc = cam_tfe_bus_update_hfr(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + rc = cam_tfe_bus_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + rc = cam_tfe_bus_update_stripe_cfg(priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + bus_priv = (struct cam_tfe_bus_priv *) priv; + /* disable the bus error interrupts */ + for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++) { + val = cam_io_r(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[i]); + val &= ~bus_priv->bus_irq_error_mask[i]; + cam_io_w(val, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->irq_mask[i]); + } + break; + case CAM_ISP_HW_CMD_IS_CONSUMED_ADDR_SUPPORT: + bus_priv = (struct cam_tfe_bus_priv *) priv; + support_consumed_addr = (bool *)cmd_args; + *support_consumed_addr = + bus_priv->common_data.support_consumed_addr; + break; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + rc = cam_tfe_bus_get_res_id_for_mid(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: + rc = cam_tfe_bus_dump_bus_info(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_IS_PDAF_RDI2_MUX_EN: + bus_priv = (struct cam_tfe_bus_priv *) priv; + pdaf_rdi2_mux_en = (bool *)cmd_args; + *pdaf_rdi2_mux_en = bus_priv->common_data.pdaf_rdi2_mux_en; + break; + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + rc = cam_tfe_bus_update_bw_limiter(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + bus_priv = (struct cam_tfe_bus_priv *) priv; + done = (struct cam_isp_hw_done_event_data *) cmd_args; + done->last_consumed_addr = cam_tfe_bus_get_last_consumed_addr( + bus_priv, done->resource_handle); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d", + cmd_type); + break; + } + + return rc; +} + +int cam_tfe_bus_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *core_data, + struct cam_tfe_bus **tfe_bus) +{ + int i, rc = 0; + struct cam_tfe_bus_priv *bus_priv = NULL; + struct cam_tfe_bus *tfe_bus_local; + struct cam_tfe_bus_hw_info *hw_info = bus_hw_info; + + if (!soc_info || !hw_intf || !bus_hw_info) { + CAM_ERR(CAM_ISP, + "Invalid params soc_info:%pK hw_intf:%pK hw_info%pK", + soc_info, hw_intf, bus_hw_info); + rc = -EINVAL; + goto end; + } + + tfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bus), GFP_KERNEL); + if (!tfe_bus_local) { + CAM_DBG(CAM_ISP, "Failed to alloc for tfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_bus_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_ISP, "Failed to alloc for tfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + tfe_bus_local->bus_priv = bus_priv; + + bus_priv->num_client = hw_info->num_client; + bus_priv->num_out = hw_info->num_out; + bus_priv->num_comp_grp = hw_info->num_comp_grp; + bus_priv->max_wm_per_comp_grp = hw_info->max_wm_per_comp_grp; + bus_priv->top_bus_wr_irq_shift = hw_info->top_bus_wr_irq_shift; + bus_priv->common_data.comp_done_shift = hw_info->comp_done_shift; + + bus_priv->common_data.num_sec_out = 0; + bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, TFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.tfe_core_data = core_data; + bus_priv->common_data.common_reg = &hw_info->common_reg; + bus_priv->comp_buf_done_mask = hw_info->comp_buf_done_mask; + bus_priv->comp_rup_done_mask = hw_info->comp_rup_done_mask; + bus_priv->common_data.support_consumed_addr = + hw_info->support_consumed_addr; + bus_priv->common_data.pdaf_rdi2_mux_en = hw_info->pdaf_rdi2_mux_en; + bus_priv->common_data.rdi_width = hw_info->rdi_width; + bus_priv->common_data.max_bw_counter_limit = hw_info->max_bw_counter_limit; + bus_priv->common_data.counter_limit_shift = hw_info->counter_limit_shift; + bus_priv->common_data.counter_limit_mask = hw_info->counter_limit_mask; + + for (i = 0; i < CAM_TFE_BUS_IRQ_REGISTERS_MAX; i++) + bus_priv->bus_irq_error_mask[i] = + hw_info->bus_irq_error_mask[i]; + + if (strnstr(soc_info->compatible, "lite", + strlen(soc_info->compatible)) != NULL) + bus_priv->common_data.is_lite = true; + else + bus_priv->common_data.is_lite = false; + + for (i = 0; i < CAM_TFE_BUS_RUP_GRP_MAX; i++) + bus_priv->common_data.rup_irq_enable[i] = false; + + mutex_init(&bus_priv->common_data.bus_mutex); + + INIT_LIST_HEAD(&bus_priv->free_comp_grp); + INIT_LIST_HEAD(&bus_priv->used_comp_grp); + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_tfe_bus_init_wm_resource(i, bus_priv, bus_hw_info, + &bus_priv->bus_client[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc); + goto deinit_wm; + } + } + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_tfe_bus_init_comp_grp(i, soc_info, + bus_priv, bus_hw_info, + &bus_priv->comp_grp[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc); + goto deinit_comp_grp; + } + } + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_tfe_bus_init_tfe_out_resource(i, bus_priv, + bus_hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init TFE Out failed rc=%d", rc); + goto deinit_tfe_out; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + + tfe_bus_local->hw_ops.reserve = cam_tfe_bus_acquire_tfe_out; + tfe_bus_local->hw_ops.release = cam_tfe_bus_release_tfe_out; + tfe_bus_local->hw_ops.start = cam_tfe_bus_start_tfe_out; + tfe_bus_local->hw_ops.stop = cam_tfe_bus_stop_tfe_out; + tfe_bus_local->hw_ops.init = cam_tfe_bus_init_hw; + tfe_bus_local->hw_ops.deinit = cam_tfe_bus_deinit_hw; + tfe_bus_local->bottom_half_handler = cam_tfe_bus_bottom_half; + tfe_bus_local->hw_ops.process_cmd = cam_tfe_bus_process_cmd; + + *tfe_bus = tfe_bus_local; + + return rc; + +deinit_tfe_out: + if (i < 0) + i = CAM_TFE_BUS_TFE_OUT_MAX; + for (--i; i >= 0; i--) + cam_tfe_bus_deinit_tfe_out_resource(&bus_priv->tfe_out[i]); + +deinit_comp_grp: + if (i < 0) + i = bus_priv->num_comp_grp; + for (--i; i >= 0; i--) + cam_tfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + +deinit_wm: + if (i < 0) + i = bus_priv->num_client; + for (--i; i >= 0; i--) + cam_tfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]); + + CAM_MEM_FREE(tfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(tfe_bus_local); + +end: + return rc; +} + +int cam_tfe_bus_deinit( + struct cam_tfe_bus **tfe_bus) +{ + int i, rc = 0; + struct cam_tfe_bus_priv *bus_priv = NULL; + struct cam_tfe_bus *tfe_bus_local; + + if (!tfe_bus || !*tfe_bus) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + tfe_bus_local = *tfe_bus; + bus_priv = tfe_bus_local->bus_priv; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_tfe_bus_deinit_wm_resource( + &bus_priv->bus_client[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit WM failed rc=%d", rc); + } + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_tfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit Comp Grp failed rc=%d", rc); + } + + for (i = 0; i < CAM_TFE_BUS_TFE_OUT_MAX; i++) { + rc = cam_tfe_bus_deinit_tfe_out_resource( + &bus_priv->tfe_out[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit TFE Out failed rc=%d", rc); + } + + INIT_LIST_HEAD(&bus_priv->free_comp_grp); + INIT_LIST_HEAD(&bus_priv->used_comp_grp); + + mutex_destroy(&bus_priv->common_data.bus_mutex); + CAM_MEM_FREE(tfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(tfe_bus_local); + + *tfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.h new file mode 100644 index 0000000000..218952fe73 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_bus.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_TFE_BUS_H_ +#define _CAM_TFE_BUS_H_ + +#include "cam_soc_util.h" +#include "cam_isp_hw.h" +#include "cam_tfe_hw_intf.h" + +#define CAM_TFE_BUS_MAX_CLIENTS 16 +#define CAM_TFE_BUS_MAX_SUB_GRPS 4 +#define CAM_TFE_BUS_MAX_PERF_CNT_REG 8 +#define CAM_TFE_BUS_MAX_IRQ_REGISTERS 2 +#define CAM_TFE_BUS_CLIENT_NAME_MAX_LENGTH 32 + +#define CAM_TFE_BUS_1_0 0x1000 +#define CAM_TFE_BUS_MAX_MID_PER_PORT 2 + + +#define CAM_TFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \ + do { \ + buf_array[(index)++] = offset; \ + buf_array[(index)++] = val; \ + } while (0) + +#define ALIGNUP(value, alignment) \ + ((value + alignment - 1) / alignment * alignment) + +typedef int (*CAM_BUS_HANDLER_BOTTOM_HALF)(void *bus_priv, + bool rup_process, struct cam_tfe_irq_evt_payload *evt_payload, + bool error_process); + +enum cam_tfe_bus_plane_type { + PLANE_Y, + PLANE_C, + PLANE_MAX, +}; + + +enum cam_tfe_bus_tfe_core_id { + CAM_TFE_BUS_TFE_CORE_0, + CAM_TFE_BUS_TFE_CORE_1, + CAM_TFE_BUS_TFE_CORE_2, + CAM_TFE_BUS_TFE_CORE_MAX, +}; + +enum cam_tfe_bus_comp_grp_id { + CAM_TFE_BUS_COMP_GRP_0, + CAM_TFE_BUS_COMP_GRP_1, + CAM_TFE_BUS_COMP_GRP_2, + CAM_TFE_BUS_COMP_GRP_3, + CAM_TFE_BUS_COMP_GRP_4, + CAM_TFE_BUS_COMP_GRP_5, + CAM_TFE_BUS_COMP_GRP_6, + CAM_TFE_BUS_COMP_GRP_7, + CAM_TFE_BUS_COMP_GRP_8, + CAM_TFE_BUS_COMP_GRP_9, + CAM_TFE_BUS_COMP_GRP_10, + CAM_TFE_BUS_COMP_GRP_MAX, +}; + +enum cam_tfe_bus_rup_grp_id { + CAM_TFE_BUS_RUP_GRP_0, + CAM_TFE_BUS_RUP_GRP_1, + CAM_TFE_BUS_RUP_GRP_2, + CAM_TFE_BUS_RUP_GRP_3, + CAM_TFE_BUS_RUP_GRP_MAX, +}; + +enum cam_tfe_bus_tfe_out_id { + CAM_TFE_BUS_TFE_OUT_RDI0, + CAM_TFE_BUS_TFE_OUT_RDI1, + CAM_TFE_BUS_TFE_OUT_RDI2, + CAM_TFE_BUS_TFE_OUT_FULL, + CAM_TFE_BUS_TFE_OUT_RAW_DUMP, + CAM_TFE_BUS_TFE_OUT_PDAF, + CAM_TFE_BUS_TFE_OUT_STATS_HDR_BE, + CAM_TFE_BUS_TFE_OUT_STATS_HDR_BHIST, + CAM_TFE_BUS_TFE_OUT_STATS_TL_BG, + CAM_TFE_BUS_TFE_OUT_STATS_AWB_BG, + CAM_TFE_BUS_TFE_OUT_STATS_BF, + CAM_TFE_BUS_TFE_OUT_STATS_RS, + CAM_TFE_BUS_TFE_OUT_DS4, + CAM_TFE_BUS_TFE_OUT_DS16, + CAM_TFE_BUS_TFE_OUT_AI, + CAM_TFE_BUS_TFE_OUT_MAX, +}; + +/* + * struct cam_tfe_bus_reg_offset_common: + * + * @Brief: Common registers across all BUS Clients + */ +struct cam_tfe_bus_reg_offset_common { + uint32_t hw_version; + uint32_t cgc_ovd; + uint32_t comp_cfg_0; + uint32_t comp_cfg_1; + uint32_t frameheader_cfg[4]; + uint32_t pwr_iso_cfg; + uint32_t overflow_status_clear; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t perf_count_cfg[CAM_TFE_BUS_MAX_PERF_CNT_REG]; + uint32_t perf_count_val[CAM_TFE_BUS_MAX_PERF_CNT_REG]; + uint32_t perf_count_status; + uint32_t debug_status_top_cfg; + uint32_t debug_status_top; + uint32_t test_bus_ctrl; + uint32_t irq_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; + uint32_t irq_clear[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; + uint32_t irq_status[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; + uint32_t irq_cmd; + /* common register data */ + uint32_t cons_violation_shift; + uint32_t violation_shift; + uint32_t image_size_violation; +}; + +/* + * struct cam_tfe_bus_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS Clients + */ +struct cam_tfe_bus_reg_offset_bus_client { + uint32_t cfg; + uint32_t image_addr; + uint32_t frame_incr; + uint32_t image_cfg_0; + uint32_t image_cfg_1; + uint32_t image_cfg_2; + uint32_t packer_cfg; + uint32_t bw_limit; + uint32_t frame_header_addr; + uint32_t frame_header_incr; + uint32_t frame_header_cfg; + uint32_t line_done_cfg; + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + uint32_t addr_status_0; + uint32_t addr_status_1; + uint32_t addr_status_2; + uint32_t addr_status_3; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; + uint32_t comp_group; + /*bus data */ + uint8_t client_name[CAM_TFE_BUS_CLIENT_NAME_MAX_LENGTH]; +}; + +/* + * struct cam_tfe_bus_tfe_out_hw_info: + * + * @Brief: HW capability of TFE Bus Client + * tfe_out_id Tfe out port id + * max_width Max width supported by the outport + * max_height Max height supported by outport + * composite_group Out port composite group id + * rup_group_id Reg update group of outport id + * mid: ouport mid value + */ +struct cam_tfe_bus_tfe_out_hw_info { + enum cam_tfe_bus_tfe_out_id tfe_out_id; + uint32_t max_width; + uint32_t max_height; + uint32_t composite_group; + uint32_t rup_group_id; + uint32_t mid[CAM_TFE_BUS_MAX_MID_PER_PORT]; +}; + +/* + * struct cam_tfe_bus_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @num_client: Total number of write clients + * @bus_client_reg: Bus client register info + * @tfe_out_hw_info: TFE output capability + * @num_comp_grp: Number of composite group + * @max_wm_per_comp_grp: Max number of wm associated with one composite group + * @comp_done_shift: Mask shift for comp done mask + * @top_bus_wr_irq_shift: Mask shift for top level BUS WR irq + * @comp_buf_done_mask: Composite buf done bits mask + * @comp_rup_done_mask: Reg update done mask + * @bus_irq_error_mask: Bus irq error mask bits + * @rdi_width: RDI WM width + * @support_consumed_addr: Indicate if bus support consumed address + * @pdaf_rdi2_mux_en: Indicate is PDAF is muxed with RDI2 + * @max_bw_counter_limit: Max BW counter limit + * @counter_limit_shift: Mask shift for BW counter limit + * @counter_limit_mask: Default Mask of BW limit counter + */ +struct cam_tfe_bus_hw_info { + struct cam_tfe_bus_reg_offset_common common_reg; + uint32_t num_client; + struct cam_tfe_bus_reg_offset_bus_client + bus_client_reg[CAM_TFE_BUS_MAX_CLIENTS]; + uint32_t num_out; + struct cam_tfe_bus_tfe_out_hw_info + tfe_out_hw_info[CAM_TFE_BUS_TFE_OUT_MAX]; + uint32_t num_comp_grp; + uint32_t max_wm_per_comp_grp; + uint32_t comp_done_shift; + uint32_t top_bus_wr_irq_shift; + uint32_t comp_buf_done_mask; + uint32_t comp_rup_done_mask; + uint32_t bus_irq_error_mask[CAM_TFE_BUS_IRQ_REGISTERS_MAX]; + uint32_t rdi_width; + bool support_consumed_addr; + bool pdaf_rdi2_mux_en; + uint32_t max_bw_counter_limit; + uint32_t counter_limit_shift; + uint32_t counter_limit_mask; +}; + +/* + * struct cam_tfe_bus: + * + * @Brief: Bus interface structure + * + * @bus_priv: Private data of bus + * @hw_ops: Hardware interface functions + * @bottom_half_handler: Bottom Half handler function + */ +struct cam_tfe_bus { + void *bus_priv; + struct cam_hw_ops hw_ops; + CAM_BUS_HANDLER_BOTTOM_HALF bottom_half_handler; +}; + +/* + * cam_tfe_bus_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @core_data: Core data pointer used for top irq config + * @tfe_bus: Pointer to tfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_bus_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *core_data, + struct cam_tfe_bus **tfe_bus); + +/* + * cam_tfe_bus_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @tfe_bus: Pointer to tfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_bus_deinit(struct cam_tfe_bus **tfe_bus); + +#endif /* _CAM_TFE_BUS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c new file mode 100644 index 0000000000..736216f709 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c @@ -0,0 +1,3062 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_cdm_util.h" +#include "cam_tasklet_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_tfe_soc.h" +#include "cam_tfe_core.h" +#include "cam_tfe_bus.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_compat.h" +#include "cam_common_util.h" +#include "cam_tfe_csid_hw_intf.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "tfe"; + +#define CAM_TFE_HW_RESET_HW_AND_REG_VAL 0x1 +#define CAM_TFE_HW_RESET_HW_VAL 0x10000 +#define CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES 3 +#define CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 +#define CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES 3 + +struct cam_tfe_top_common_data { + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *hw_intf; + struct cam_tfe_top_reg_offset_common *common_reg; + struct cam_tfe_reg_dump_data *reg_dump_data; +}; + +struct cam_tfe_top_priv { + struct cam_tfe_top_common_data common_data; + struct cam_isp_resource_node in_rsrc[CAM_TFE_TOP_IN_PORT_MAX]; + unsigned long hw_clk_rate; + struct cam_axi_vote applied_axi_vote; + struct cam_axi_vote req_axi_vote[CAM_TFE_TOP_IN_PORT_MAX]; + unsigned long req_clk_rate[CAM_TFE_TOP_IN_PORT_MAX]; + struct cam_axi_vote last_vote[CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES]; + uint32_t last_counter; + uint64_t total_bw_applied; + enum cam_tfe_bw_control_action + axi_vote_control[CAM_TFE_TOP_IN_PORT_MAX]; + uint32_t irq_prepared_mask[3]; + void *tasklet_info; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct timespec64 error_ts; + uint32_t top_debug; +}; + +struct cam_tfe_camif_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_tfe_top_reg_offset_common *common_reg; + struct cam_tfe_camif_reg *camif_reg; + struct cam_tfe_camif_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dsp_mode; + uint32_t pix_pattern; + uint32_t left_first_pixel; + uint32_t left_last_pixel; + uint32_t right_first_pixel; + uint32_t right_last_pixel; + uint32_t first_line; + uint32_t last_line; + bool enable_sof_irq_debug; + uint32_t irq_debug_cnt; + uint32_t camif_debug; + uint32_t camif_pd_enable; + uint32_t dual_tfe_sync_sel; + uint32_t hbi_value; + uint32_t vbi_value; + uint32_t qcfa_bin; + uint32_t bayer_bin; + uint32_t core_cfg; +}; + +struct cam_tfe_rdi_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_tfe_top_reg_offset_common *common_reg; + struct cam_tfe_rdi_reg *rdi_reg; + struct cam_tfe_rdi_reg_data *reg_data; + cam_hw_mgr_event_cb_func event_cb; + void *priv; + enum cam_isp_hw_sync_mode sync_mode; + uint32_t pix_pattern; + uint32_t left_first_pixel; + uint32_t left_last_pixel; + uint32_t first_line; + uint32_t last_line; +}; + +static int cam_tfe_validate_pix_pattern(uint32_t pattern) +{ + int rc; + + switch (pattern) { + case CAM_ISP_TFE_PATTERN_BAYER_RGRGRG: + case CAM_ISP_TFE_PATTERN_BAYER_GRGRGR: + case CAM_ISP_TFE_PATTERN_BAYER_BGBGBG: + case CAM_ISP_TFE_PATTERN_BAYER_GBGBGB: + case CAM_ISP_TFE_PATTERN_YUV_YCBYCR: + case CAM_ISP_TFE_PATTERN_YUV_YCRYCB: + case CAM_ISP_TFE_PATTERN_YUV_CBYCRY: + case CAM_ISP_TFE_PATTERN_YUV_CRYCBY: + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, "Error Invalid pix pattern:%d", pattern); + rc = -EINVAL; + break; + } + return rc; +} + +static int cam_tfe_get_evt_payload(struct cam_tfe_hw_core_info *core_info, + struct cam_tfe_irq_evt_payload **evt_payload) +{ + spin_lock(&core_info->spin_lock); + if (list_empty(&core_info->free_payload_list)) { + *evt_payload = NULL; + spin_unlock(&core_info->spin_lock); + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core id 0x%x", + core_info->core_index); + return -ENODEV; + } + + *evt_payload = list_first_entry(&core_info->free_payload_list, + struct cam_tfe_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + spin_unlock(&core_info->spin_lock); + + return 0; +} + +int cam_tfe_put_evt_payload(void *core_info, + struct cam_tfe_irq_evt_payload **evt_payload) +{ + struct cam_tfe_hw_core_info *tfe_core_info = core_info; + unsigned long flags; + + if (!core_info) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_tfe_irq_evt_payload); + spin_lock_irqsave(&tfe_core_info->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &tfe_core_info->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&tfe_core_info->spin_lock, flags); + + return 0; +} + +int cam_tfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size) +{ + return -EPERM; +} + +void cam_tfe_get_timestamp(struct cam_isp_timestamp *time_stamp) +{ + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + time_stamp->mono_time.tv_sec = ts.tv_sec; + time_stamp->mono_time.tv_nsec = ts.tv_nsec; +} + +int cam_tfe_irq_config(void *tfe_core_data, + uint32_t *irq_mask, uint32_t num_reg, bool enable) +{ + struct cam_tfe_hw_core_info *core_info; + struct cam_tfe_top_priv *top_priv; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + bool need_lock; + unsigned long flags = 0; + uint32_t i, val; + + if (!tfe_core_data) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Invalid core data"); + return -EINVAL; + } + + core_info = (struct cam_tfe_hw_core_info *)tfe_core_data; + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + soc_info = (struct cam_hw_soc_info *)top_priv->common_data.soc_info; + mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base; + + need_lock = !in_irq(); + if (need_lock) + spin_lock_irqsave(&core_info->spin_lock, flags); + + for (i = 0; i < num_reg; i++) { + val = cam_io_r_mb(mem_base + + core_info->tfe_hw_info->top_irq_mask[i]); + if (enable) + val |= irq_mask[i]; + else + val &= ~irq_mask[i]; + cam_io_w_mb(val, mem_base + + core_info->tfe_hw_info->top_irq_mask[i]); + } + if (need_lock) + spin_unlock_irqrestore(&core_info->spin_lock, flags); + + return 0; +} + +static void cam_tfe_log_tfe_in_debug_status( + struct cam_tfe_top_priv *top_priv) +{ + void __iomem *mem_base; + struct cam_tfe_camif_data *camif_data; + struct cam_tfe_rdi_data *rdi_data; + uint32_t i, val_0, val_1; + + mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base; + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if ((top_priv->in_rsrc[i].res_state != + CAM_ISP_RESOURCE_STATE_STREAMING)) + continue; + + if (top_priv->in_rsrc[i].res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + camif_data = (struct cam_tfe_camif_data *) + top_priv->in_rsrc[i].res_priv; + val_0 = cam_io_r(mem_base + + camif_data->camif_reg->debug_0); + val_1 = cam_io_r(mem_base + + camif_data->camif_reg->debug_1); + CAM_INFO(CAM_ISP, + "camif debug1:0x%x Height:0x%x, width:0x%x", + val_1, + ((val_0 >> 16) & 0x1FFF), + (val_0 & 0x1FFF)); + CAM_INFO(CAM_ISP, + "Acquired sync mode:%d left start pxl:0x%x end_pixel:0x%x", + camif_data->sync_mode, + camif_data->left_first_pixel, + camif_data->left_last_pixel); + + if (camif_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + CAM_INFO(CAM_ISP, + "sync mode:%d right start pxl:0x%x end_pixel:0x%x", + camif_data->sync_mode, + camif_data->right_first_pixel, + camif_data->right_last_pixel); + + CAM_INFO(CAM_ISP, + "Acquired line start:0x%x line end:0x%x", + camif_data->first_line, + camif_data->last_line); + CAM_INFO(CAM_ISP, "vbi_value:0x%x hbi_value:0x%x", + camif_data->vbi_value, + camif_data->hbi_value); + + } else if ((top_priv->in_rsrc[i].res_id >= + CAM_ISP_HW_TFE_IN_RDI0) || + (top_priv->in_rsrc[i].res_id <= + CAM_ISP_HW_TFE_IN_RDI2)) { + rdi_data = (struct cam_tfe_rdi_data *) + top_priv->in_rsrc[i].res_priv; + val_0 = cam_io_r(mem_base + + rdi_data->rdi_reg->rdi_debug_0); + val_1 = cam_io_r(mem_base + + rdi_data->rdi_reg->rdi_debug_1); + CAM_INFO(CAM_ISP, + "RDI res id:%d debug1:0x%x Height:0x%x, width:0x%x", + top_priv->in_rsrc[i].res_id, + val_1, ((val_0 >> 16) & 0x1FFF), + (val_0 & 0x1FFF)); + CAM_INFO(CAM_ISP, + "sync mode:%d left start pxl:0x%x end_pixel:0x%x", + rdi_data->sync_mode, + rdi_data->left_first_pixel, + rdi_data->left_last_pixel); + CAM_INFO(CAM_ISP, + "sync mode:%d line start:0x%x line end:0x%x", + rdi_data->sync_mode, + rdi_data->first_line, + rdi_data->last_line); + } + } +} +static void cam_tfe_log_error_irq_status( + struct cam_tfe_hw_core_info *core_info, + struct cam_tfe_top_priv *top_priv, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_hw_info *hw_info; + void __iomem *mem_base; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_soc_private *soc_private; + + struct cam_tfe_clc_hw_status *clc_hw_status; + struct timespec64 ts; + uint32_t i, val_0, val_1, val_2, val_3; + + + ktime_get_boottime_ts64(&ts); + hw_info = core_info->tfe_hw_info; + mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base; + soc_info = top_priv->common_data.soc_info; + soc_private = top_priv->common_data.soc_info->soc_private; + + CAM_INFO(CAM_ISP, "current monotonic timestamp:[%lld.%06lld]", + ts.tv_sec, ts.tv_nsec/1000); + CAM_INFO(CAM_ISP, + "ERROR timestamp:[%lld.%06lld] SOF timestamp:[%lld.%06lld] EPOCH timestamp:[%lld.%06lld] EOF timestamp:[%lld.%06lld]", + top_priv->error_ts.tv_sec, + top_priv->error_ts.tv_nsec/1000, + top_priv->sof_ts.tv_sec, + top_priv->sof_ts.tv_nsec/1000, + top_priv->epoch_ts.tv_sec, + top_priv->epoch_ts.tv_nsec/1000, + top_priv->eof_ts.tv_sec, + top_priv->eof_ts.tv_nsec/1000); + + CAM_INFO(CAM_ISP, "TOP IRQ[0]:0x%x IRQ[1]:0x%x IRQ[2]:0x%x", + evt_payload->irq_reg_val[0], evt_payload->irq_reg_val[1], + evt_payload->irq_reg_val[2]); + + for (i = 0; i < top_priv->common_data.common_reg->num_debug_reg; i++) { + val_0 = cam_io_r(mem_base + + top_priv->common_data.common_reg->debug_reg[i]); + CAM_INFO(CAM_ISP, "Top debug [%d]:0x%x", i, val_0); + } + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + for (i = 0; i < top_priv->common_data.common_reg->num_perf_cfg; i++) { + val_0 = cam_io_r(mem_base + + top_priv->common_data.common_reg->perf_cfg[i].perf_pixel_count); + + val_1 = cam_io_r(mem_base + + top_priv->common_data.common_reg->perf_cfg[i].perf_line_count); + + val_2 = cam_io_r(mem_base + + top_priv->common_data.common_reg->perf_cfg[i].perf_stall_count); + + val_3 = cam_io_r(mem_base + + top_priv->common_data.common_reg->perf_cfg[i].perf_always_count); + + CAM_INFO(CAM_ISP, + "Top perf cnt [%d] pix:0x%x line:0x%x stall:0x%x always:0x%x", + i, val_0, val_1, val_2, val_3); + } + + clc_hw_status = hw_info->clc_hw_status_info; + for (i = 0; i < hw_info->num_clc; i++) { + val_0 = cam_io_r(mem_base + + clc_hw_status[i].hw_status_reg); + if (val_0) + CAM_INFO(CAM_ISP, + "CLC HW status :name:%s offset:0x%x value:0x%x", + clc_hw_status[i].name, + clc_hw_status[i].hw_status_reg, + val_0); + } + + cam_tfe_log_tfe_in_debug_status(top_priv); + + /* Check the overflow errors */ + if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) { + if (evt_payload->irq_reg_val[0] & BIT(8)) + CAM_INFO(CAM_ISP, "PP_FRAME_DROP"); + + if (evt_payload->irq_reg_val[0] & BIT(9)) + CAM_INFO(CAM_ISP, "RDI0_FRAME_DROP"); + + if (evt_payload->irq_reg_val[0] & BIT(10)) + CAM_INFO(CAM_ISP, "RDI1_FRAME_DROP"); + + if (evt_payload->irq_reg_val[0] & BIT(11)) + CAM_INFO(CAM_ISP, "RDI2_FRAME_DROP"); + + if (evt_payload->irq_reg_val[0] & BIT(16)) + CAM_INFO(CAM_ISP, "PP_OVERFLOW"); + + if (evt_payload->irq_reg_val[0] & BIT(17)) + CAM_INFO(CAM_ISP, "RDI0_OVERFLOW"); + + if (evt_payload->irq_reg_val[0] & BIT(18)) + CAM_INFO(CAM_ISP, "RDI1_OVERFLOW"); + + if (evt_payload->irq_reg_val[0] & BIT(19)) + CAM_INFO(CAM_ISP, "RDI2_OVERFLOW"); + } + + /* Check the violation errors */ + if (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2]) { + if (evt_payload->irq_reg_val[2] & BIT(0)) + CAM_INFO(CAM_ISP, "PP_CAMIF_VIOLATION"); + + if (evt_payload->irq_reg_val[2] & BIT(1)) + CAM_INFO(CAM_ISP, "PP_VIOLATION"); + + if (evt_payload->irq_reg_val[2] & BIT(2)) + CAM_INFO(CAM_ISP, "RDI0_CAMIF_VIOLATION"); + + if (evt_payload->irq_reg_val[2] & BIT(3)) + CAM_INFO(CAM_ISP, "RDI1_CAMIF_VIOLATION"); + + if (evt_payload->irq_reg_val[2] & BIT(4)) + CAM_INFO(CAM_ISP, "RDI2_CAMIF_VIOLATION"); + + if (evt_payload->irq_reg_val[2] & BIT(5)) + CAM_INFO(CAM_ISP, "DIAG_VIOLATION"); + + val_0 = cam_io_r(mem_base + + top_priv->common_data.common_reg->violation_status); + CAM_INFO(CAM_ISP, "TOP Violation status:0x%x", val_0); + } + + core_info->tfe_bus->bottom_half_handler( + core_info->tfe_bus->bus_priv, false, evt_payload, true); + + CAM_INFO(CAM_ISP, + "TFE clock rate:%d TFE total bw applied:%lld", + top_priv->hw_clk_rate, + top_priv->total_bw_applied); + + cam_cpas_log_votes(false); +} + +static int cam_tfe_error_irq_bottom_half( + struct cam_tfe_hw_core_info *core_info, + struct cam_tfe_top_priv *top_priv, + struct cam_tfe_irq_evt_payload *evt_payload, + cam_hw_mgr_event_cb_func event_cb, + void *event_cb_priv) +{ + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_isp_hw_event_info evt_info; + struct cam_tfe_hw_info *hw_info; + uint32_t error_detected = 0; + + hw_info = core_info->tfe_hw_info; + evt_info.hw_idx = core_info->core_index; + evt_info.res_type = CAM_ISP_RESOURCE_TFE_IN; + + if (evt_payload->irq_reg_val[0] & hw_info->error_irq_mask[0]) { + err_evt_info.err_type = CAM_TFE_IRQ_STATUS_OVERFLOW; + error_detected = 1; + } + + if ((evt_payload->bus_irq_val[0] & hw_info->bus_error_irq_mask[0]) || + (evt_payload->irq_reg_val[2] & hw_info->error_irq_mask[2])) { + err_evt_info.err_type = CAM_TFE_IRQ_STATUS_VIOLATION; + error_detected = 1; + } + + if (error_detected) { + evt_info.event_data = (void *)&err_evt_info; + top_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + cam_tfe_log_error_irq_status(core_info, top_priv, evt_payload); + if (event_cb) + event_cb(event_cb_priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + else + CAM_ERR(CAM_ISP, "TFE:%d invalid eventcb:", + core_info->core_index); + } + + return 0; +} + +static int cam_tfe_rdi_irq_bottom_half( + struct cam_tfe_top_priv *top_priv, + struct cam_isp_resource_node *rdi_node, + bool epoch_process, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_rdi_data *rdi_priv; + struct cam_isp_hw_event_info evt_info; + struct cam_hw_info *hw_info; + struct cam_tfe_top_reg_offset_common *common_reg; + uint32_t val, val2; + + rdi_priv = (struct cam_tfe_rdi_data *)rdi_node->res_priv; + hw_info = rdi_node->hw_intf->hw_priv; + + evt_info.hw_idx = rdi_node->hw_intf->hw_idx; + evt_info.res_id = rdi_node->res_id; + evt_info.res_type = rdi_node->res_type; + + if ((!epoch_process) && (evt_payload->irq_reg_val[1] & + rdi_priv->reg_data->eof_irq_mask)) { + CAM_DBG(CAM_ISP, "Received EOF"); + top_priv->eof_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->eof_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + } + + if ((!epoch_process) && (evt_payload->irq_reg_val[1] & + rdi_priv->reg_data->sof_irq_mask)) { + CAM_DBG(CAM_ISP, "Received SOF"); + top_priv->sof_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->sof_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + + if (top_priv->top_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + common_reg = rdi_priv->common_reg; + val = cam_io_r(rdi_priv->mem_base + + common_reg->diag_sensor_status_0); + val2 = cam_io_r(rdi_priv->mem_base + + common_reg->diag_sensor_status_1); + CAM_INFO(CAM_ISP, + "TFE:%d diag sensor hbi min error:%d neq hbi:%d HBI:%d VBI:%d", + rdi_node->hw_intf->hw_idx, + ((val >> common_reg->diag_min_hbi_error_shift) + & 0x1), + ((val >> common_reg->diag_neq_hbi_shift) & 0x1), + (val & common_reg->diag_sensor_hbi_mask), + val2); + } + } + + if (epoch_process && (evt_payload->irq_reg_val[1] & + rdi_priv->reg_data->epoch0_irq_mask)) { + CAM_DBG(CAM_ISP, "Received EPOCH0"); + top_priv->epoch_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->epoch_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); + } + + return 0; +} + +static int cam_tfe_camif_irq_bottom_half( + struct cam_tfe_top_priv *top_priv, + struct cam_isp_resource_node *camif_node, + bool epoch_process, + struct cam_tfe_irq_evt_payload *evt_payload) +{ + struct cam_tfe_camif_data *camif_priv; + struct cam_isp_hw_event_info evt_info; + struct cam_hw_info *hw_info; + struct cam_tfe_top_reg_offset_common *common_reg; + uint32_t val, val2; + + camif_priv = camif_node->res_priv; + hw_info = camif_node->hw_intf->hw_priv; + evt_info.hw_idx = camif_node->hw_intf->hw_idx; + evt_info.res_id = camif_node->res_id; + evt_info.res_type = camif_node->res_type; + + if ((!epoch_process) && (evt_payload->irq_reg_val[1] & + camif_priv->reg_data->eof_irq_mask)) { + CAM_DBG(CAM_ISP, "Received EOF"); + + top_priv->eof_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->eof_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + } + + if ((!epoch_process) && (evt_payload->irq_reg_val[1] & + camif_priv->reg_data->sof_irq_mask)) { + if ((camif_priv->enable_sof_irq_debug) && + (camif_priv->irq_debug_cnt <= + CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF"); + + camif_priv->irq_debug_cnt++; + if (camif_priv->irq_debug_cnt == + CAM_TFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + camif_priv->enable_sof_irq_debug = + false; + camif_priv->irq_debug_cnt = 0; + } + } else + CAM_DBG(CAM_ISP, "Received SOF"); + + top_priv->sof_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->sof_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + + if (top_priv->top_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + common_reg = camif_priv->common_reg; + val = cam_io_r(camif_priv->mem_base + + common_reg->diag_sensor_status_0); + val2 = cam_io_r(camif_priv->mem_base + + common_reg->diag_sensor_status_1); + CAM_INFO(CAM_ISP, + "TFE:%d diag sensor hbi min error:%d neq hbi:%d HBI:%d VBI:%d", + camif_node->hw_intf->hw_idx, + ((val >> common_reg->diag_min_hbi_error_shift) + & 0x1), + ((val >> common_reg->diag_neq_hbi_shift) & 0x1), + (val & common_reg->diag_sensor_hbi_mask), + val2); + } + } + + if (epoch_process && (evt_payload->irq_reg_val[1] & + camif_priv->reg_data->epoch0_irq_mask)) { + CAM_DBG(CAM_ISP, "Received EPOCH"); + + top_priv->epoch_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + top_priv->epoch_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); + } + + return 0; +} + +static int cam_tfe_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + struct cam_tfe_hw_core_info *core_info; + struct cam_tfe_top_priv *top_priv; + struct cam_tfe_irq_evt_payload *evt_payload; + struct cam_tfe_camif_data *camif_priv; + struct cam_tfe_rdi_data *rdi_priv; + cam_hw_mgr_event_cb_func event_cb = NULL; + void *event_cb_priv = NULL; + uint32_t i; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, + "Invalid params handle_priv:%pK, evt_payload_priv:%pK", + handler_priv, evt_payload_priv); + return 0; + } + + core_info = (struct cam_tfe_hw_core_info *)handler_priv; + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + evt_payload = evt_payload_priv; + + /* process sof and eof */ + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if ((top_priv->in_rsrc[i].res_id == + CAM_ISP_HW_TFE_IN_CAMIF) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_STREAMING)) { + camif_priv = (struct cam_tfe_camif_data *) + top_priv->in_rsrc[i].res_priv; + event_cb = camif_priv->event_cb; + event_cb_priv = camif_priv->priv; + + if (camif_priv->reg_data->subscribe_irq_mask[1] & + evt_payload->irq_reg_val[1]) + cam_tfe_camif_irq_bottom_half(top_priv, + &top_priv->in_rsrc[i], false, + evt_payload); + + } else if ((top_priv->in_rsrc[i].res_id >= + CAM_ISP_HW_TFE_IN_RDI0) && + (top_priv->in_rsrc[i].res_id <= + CAM_ISP_HW_TFE_IN_RDI2) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_STREAMING) && + top_priv->in_rsrc[i].is_rdi_primary_res) { + rdi_priv = (struct cam_tfe_rdi_data *) + top_priv->in_rsrc[i].res_priv; + event_cb = rdi_priv->event_cb; + event_cb_priv = rdi_priv->priv; + + if (rdi_priv->reg_data->subscribe_irq_mask[1] & + evt_payload->irq_reg_val[1]) + cam_tfe_rdi_irq_bottom_half(top_priv, + &top_priv->in_rsrc[i], false, + evt_payload); + } + } + + /* process the irq errors */ + cam_tfe_error_irq_bottom_half(core_info, top_priv, evt_payload, + event_cb, event_cb_priv); + + /* process the reg update in the bus */ + if (evt_payload->irq_reg_val[0] & + core_info->tfe_hw_info->bus_reg_irq_mask[0]) { + core_info->tfe_bus->bottom_half_handler( + core_info->tfe_bus->bus_priv, true, evt_payload, false); + } + + /* process the epoch */ + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if ((top_priv->in_rsrc[i].res_id == + CAM_ISP_HW_TFE_IN_CAMIF) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_STREAMING)) { + camif_priv = (struct cam_tfe_camif_data *) + top_priv->in_rsrc[i].res_priv; + if (camif_priv->reg_data->subscribe_irq_mask[1] & + evt_payload->irq_reg_val[1]) + cam_tfe_camif_irq_bottom_half(top_priv, + &top_priv->in_rsrc[i], true, + evt_payload); + } else if ((top_priv->in_rsrc[i].res_id >= + CAM_ISP_HW_TFE_IN_RDI0) && + (top_priv->in_rsrc[i].res_id <= + CAM_ISP_HW_TFE_IN_RDI2) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_STREAMING)) { + rdi_priv = (struct cam_tfe_rdi_data *) + top_priv->in_rsrc[i].res_priv; + if (rdi_priv->reg_data->subscribe_irq_mask[1] & + evt_payload->irq_reg_val[1]) + cam_tfe_rdi_irq_bottom_half(top_priv, + &top_priv->in_rsrc[i], true, + evt_payload); + } + } + + /* process the bufone */ + if (evt_payload->irq_reg_val[0] & + core_info->tfe_hw_info->bus_reg_irq_mask[0]) { + core_info->tfe_bus->bottom_half_handler( + core_info->tfe_bus->bus_priv, false, evt_payload, + false); + } + + cam_tfe_put_evt_payload(core_info, &evt_payload); + + return 0; +} + +static int cam_tfe_irq_err_top_half( + struct cam_tfe_hw_core_info *core_info, + void __iomem *mem_base, + uint32_t *top_irq_status, + uint32_t *bus_irq_status) +{ + uint32_t i; + + if ((top_irq_status[0] & core_info->tfe_hw_info->error_irq_mask[0]) || + (top_irq_status[2] & + core_info->tfe_hw_info->error_irq_mask[2]) || + (bus_irq_status[0] & + core_info->tfe_hw_info->bus_error_irq_mask[0])) { + CAM_ERR(CAM_ISP, + "Encountered Error: tfe:%d: Irq_status0=0x%x status2=0x%x", + core_info->core_index, top_irq_status[0], + top_irq_status[2]); + CAM_ERR(CAM_ISP, + "Encountered Error: tfe:%d:BUS Irq_status0=0x%x", + core_info->core_index, bus_irq_status[0]); + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + cam_io_w(0, mem_base + + core_info->tfe_hw_info->top_irq_mask[i]); + + cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask, + mem_base + core_info->tfe_hw_info->top_irq_cmd); + } + + return 0; +} + +irqreturn_t cam_tfe_irq(int irq_num, void *data) +{ + struct cam_hw_info *tfe_hw; + struct cam_tfe_hw_core_info *core_info; + struct cam_tfe_top_priv *top_priv; + void __iomem *mem_base; + struct cam_tfe_irq_evt_payload *evt_payload; + uint32_t top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM] = {0}; + uint32_t bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS] = {0}; + uint32_t i, ccif_violation = 0, overflow_status = 0; + uint32_t image_sz_violation = 0; + void *bh_cmd = NULL; + int rc = -EINVAL; + + if (!data) + return IRQ_NONE; + + tfe_hw = (struct cam_hw_info *)data; + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base; + + if (tfe_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_ISP, "TFE:%d hw is not powered up", + core_info->core_index); + return IRQ_HANDLED; + } + + spin_lock(&core_info->spin_lock); + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + top_irq_status[i] = cam_io_r(mem_base + + core_info->tfe_hw_info->top_irq_status[i]); + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + cam_io_w(top_irq_status[i], mem_base + + core_info->tfe_hw_info->top_irq_clear[i]); + + cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask, + mem_base + core_info->tfe_hw_info->top_irq_cmd); + + CAM_DBG(CAM_ISP, "TFE:%d IRQ status_0:0x%x status_1:0x%x status_2:0x%x", + core_info->core_index, top_irq_status[0], + top_irq_status[1], top_irq_status[2]); + + if (top_irq_status[0] & core_info->tfe_hw_info->bus_reg_irq_mask[0]) { + for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++) + bus_irq_status[i] = cam_io_r(mem_base + + core_info->tfe_hw_info->bus_irq_status[i]); + + for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++) + cam_io_w(bus_irq_status[i], mem_base + + core_info->tfe_hw_info->bus_irq_clear[i]); + + ccif_violation = cam_io_r(mem_base + + core_info->tfe_hw_info->bus_violation_reg); + overflow_status = cam_io_r(mem_base + + core_info->tfe_hw_info->bus_overflow_reg); + image_sz_violation = cam_io_r(mem_base + + core_info->tfe_hw_info->bus_image_size_vilation_reg); + + cam_io_w(core_info->tfe_hw_info->global_clear_bitmask, + mem_base + core_info->tfe_hw_info->bus_irq_cmd); + + CAM_DBG(CAM_ISP, "TFE:%d BUS IRQ status_0:0x%x status_1:0x%x", + core_info->core_index, bus_irq_status[0], + bus_irq_status[1]); + } + spin_unlock(&core_info->spin_lock); + + /* check reset */ + if ((top_irq_status[0] & core_info->tfe_hw_info->reset_irq_mask[0]) || + (top_irq_status[1] & + core_info->tfe_hw_info->reset_irq_mask[1]) || + (top_irq_status[2] & + core_info->tfe_hw_info->reset_irq_mask[2])) { + /* Reset ack */ + complete(&core_info->reset_complete); + return IRQ_HANDLED; + } + + /* Check the irq errors */ + cam_tfe_irq_err_top_half(core_info, mem_base, top_irq_status, + bus_irq_status); + + rc = cam_tfe_get_evt_payload(core_info, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status2=0x%x", + top_irq_status[0], top_irq_status[1]); + goto end; + } + + cam_tfe_get_timestamp(&evt_payload->ts); + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + evt_payload->irq_reg_val[i] = top_irq_status[i]; + + for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++) + evt_payload->bus_irq_val[i] = bus_irq_status[i]; + + evt_payload->ccif_violation_status = ccif_violation; + evt_payload->overflow_status = overflow_status; + evt_payload->image_size_violation_status = image_sz_violation; + + evt_payload->core_index = core_info->core_index; + evt_payload->core_info = core_info; + + rc = tasklet_bh_api.get_bh_payload_func( + top_priv->tasklet_info, &bh_cmd); + if (rc || !bh_cmd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No payload, IRQ handling frozen"); + cam_tfe_put_evt_payload(core_info, &evt_payload); + goto end; + } + + tasklet_bh_api.bottom_half_enqueue_func( + top_priv->tasklet_info, + bh_cmd, + core_info, + evt_payload, + cam_tfe_irq_bottom_half); + +end: + return IRQ_HANDLED; + +} + +static int cam_tfe_top_set_hw_clk_rate( + struct cam_tfe_top_priv *top_priv) +{ + struct cam_hw_soc_info *soc_info = NULL; + int i, rc = 0; + unsigned long max_clk_rate = 0; + + soc_info = top_priv->common_data.soc_info; + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->req_clk_rate[i] > max_clk_rate) + max_clk_rate = top_priv->req_clk_rate[i]; + } + if (max_clk_rate == top_priv->hw_clk_rate) + return 0; + + CAM_DBG(CAM_ISP, "TFE:%d Clock name=%s idx=%d clk=%llu", + top_priv->common_data.soc_info->index, + soc_info->clk_name[soc_info->src_clk_idx], + soc_info->src_clk_idx, max_clk_rate); + + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, max_clk_rate, 0); + + if (!rc) + top_priv->hw_clk_rate = max_clk_rate; + else + CAM_ERR(CAM_ISP, "TFE:%d set src clock rate:%lld failed, rc=%d", + top_priv->common_data.soc_info->index, max_clk_rate, rc); + + return rc; +} + +static int cam_tfe_top_dynamic_clock_update( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, + uint32_t arg_size) +{ + struct cam_hw_soc_info *soc_info; + unsigned long *clk_rate; + int rc = 0; + + soc_info = top_priv->common_data.soc_info; + clk_rate = (unsigned long *)cmd_args; + CAM_DBG(CAM_ISP, "TFE[%u] clock rate requested: %llu curr: %llu", + top_priv->common_data.hw_intf->hw_idx, *clk_rate, + soc_info->applied_src_clk_rates.sw_client); + + if (*clk_rate <= top_priv->hw_clk_rate) + goto end; + + rc = cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, *clk_rate, 0); + if (!rc) { + top_priv->hw_clk_rate = *clk_rate; + } else { + CAM_ERR(CAM_ISP, + "unable to set clock dynamically rate: %llu", + *clk_rate); + return rc; + } +end: + *clk_rate = soc_info->applied_src_clk_rates.sw_client; + CAM_DBG(CAM_ISP, "TFE[%u] new clock rate %llu", + top_priv->common_data.hw_intf->hw_idx, soc_info->applied_src_clk_rates.sw_client); + + return rc; +} + +static struct cam_axi_vote *cam_tfe_top_delay_bw_reduction( + struct cam_tfe_top_priv *top_priv, + uint64_t *to_be_applied_bw) +{ + uint32_t i, j; + int vote_idx = -1; + uint64_t max_bw = 0; + uint64_t total_bw; + struct cam_axi_vote *curr_l_vote; + + for (i = 0; i < (CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) { + total_bw = 0; + curr_l_vote = &top_priv->last_vote[i]; + for (j = 0; j < curr_l_vote->num_paths; j++) { + if (total_bw > + (U64_MAX - + curr_l_vote->axi_path[j].camnoc_bw)) { + CAM_ERR(CAM_ISP, "Overflow at idx: %d", j); + return NULL; + } + + total_bw += curr_l_vote->axi_path[j].camnoc_bw; + } + + if (total_bw > max_bw) { + vote_idx = i; + max_bw = total_bw; + } + } + + if (vote_idx < 0) + return NULL; + + *to_be_applied_bw = max_bw; + + return &top_priv->last_vote[vote_idx]; +} + +static int cam_tfe_top_set_axi_bw_vote( + struct cam_tfe_top_priv *top_priv, + bool start_stop) +{ + struct cam_axi_vote *agg_vote = NULL; + struct cam_axi_vote *to_be_applied_axi_vote = NULL; + struct cam_hw_soc_info *soc_info = top_priv->common_data.soc_info; + struct cam_tfe_soc_private *soc_private = soc_info->soc_private; + int rc = 0; + uint32_t i; + uint32_t num_paths = 0; + uint64_t total_bw_new_vote = 0; + bool bw_unchanged = true; + bool apply_bw_update = false; + + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -EINVAL; + } + + agg_vote = CAM_MEM_ZALLOC(sizeof(struct cam_axi_vote), GFP_KERNEL); + if (!agg_vote) { + CAM_ERR(CAM_ISP, "Out of memory"); + return -ENOMEM; + } + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->axi_vote_control[i] == + CAM_TFE_BW_CONTROL_INCLUDE) { + if (num_paths + + top_priv->req_axi_vote[i].num_paths > + CAM_CPAS_MAX_PATHS_PER_CLIENT) { + CAM_ERR(CAM_ISP, + "Required paths(%d) more than max(%d)", + num_paths + + top_priv->req_axi_vote[i].num_paths, + CAM_CPAS_MAX_PATHS_PER_CLIENT); + rc = -EINVAL; + goto free_mem; + } + + memcpy(&agg_vote->axi_path[num_paths], + &top_priv->req_axi_vote[i].axi_path[0], + top_priv->req_axi_vote[i].num_paths * + sizeof( + struct cam_cpas_axi_per_path_bw_vote)); + num_paths += top_priv->req_axi_vote[i].num_paths; + } + } + + agg_vote->num_paths = num_paths; + + for (i = 0; i < agg_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "tfe[%d] : New BW Vote : counter[%d] [%s][%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, + top_priv->last_counter, + cam_cpas_axi_util_path_type_to_string( + agg_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + agg_vote->axi_path[i].transac_type), + agg_vote->axi_path[i].camnoc_bw, + agg_vote->axi_path[i].mnoc_ab_bw, + agg_vote->axi_path[i].mnoc_ib_bw); + + total_bw_new_vote += agg_vote->axi_path[i].camnoc_bw; + } + + memcpy(&top_priv->last_vote[top_priv->last_counter], agg_vote, + sizeof(struct cam_axi_vote)); + top_priv->last_counter = (top_priv->last_counter + 1) % + (CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); + + if ((agg_vote->num_paths != top_priv->applied_axi_vote.num_paths) || + (total_bw_new_vote != top_priv->total_bw_applied)) + bw_unchanged = false; + + CAM_DBG(CAM_PERF, + "tfe[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d", + top_priv->common_data.hw_intf->hw_idx, + top_priv->total_bw_applied, total_bw_new_vote, + bw_unchanged, start_stop); + + if (bw_unchanged) { + CAM_DBG(CAM_ISP, "BW config unchanged"); + rc = 0; + goto free_mem; + } + + if (start_stop) { + /* need to vote current request immediately */ + to_be_applied_axi_vote = agg_vote; + /* Reset everything, we can start afresh */ + memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) * + (CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES)); + top_priv->last_counter = 0; + top_priv->last_vote[top_priv->last_counter] = *agg_vote; + top_priv->last_counter = (top_priv->last_counter + 1) % + (CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES); + } else { + /* + * Find max bw request in last few frames. This will the bw + * that we want to vote to CPAS now. + */ + to_be_applied_axi_vote = + cam_tfe_top_delay_bw_reduction(top_priv, + &total_bw_new_vote); + if (!to_be_applied_axi_vote) { + CAM_ERR(CAM_ISP, "to_be_applied_axi_vote is NULL"); + rc = -EINVAL; + goto free_mem; + } + } + + for (i = 0; i < to_be_applied_axi_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "tfe[%d] : Apply BW Vote : [%s][%s] [%llu %llu %llu]", + top_priv->common_data.hw_intf->hw_idx, + cam_cpas_axi_util_path_type_to_string( + to_be_applied_axi_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + to_be_applied_axi_vote->axi_path[i].transac_type), + to_be_applied_axi_vote->axi_path[i].camnoc_bw, + to_be_applied_axi_vote->axi_path[i].mnoc_ab_bw, + to_be_applied_axi_vote->axi_path[i].mnoc_ib_bw); + } + + if ((to_be_applied_axi_vote->num_paths != + top_priv->applied_axi_vote.num_paths) || + (total_bw_new_vote != top_priv->total_bw_applied)) + apply_bw_update = true; + + CAM_DBG(CAM_PERF, + "tfe[%d] : Delayed update: applied_total=%lld, new_total=%lld apply_bw_update=%d, start_stop=%d", + top_priv->common_data.hw_intf->hw_idx, + top_priv->total_bw_applied, total_bw_new_vote, + apply_bw_update, start_stop); + + if (apply_bw_update) { + rc = cam_cpas_update_axi_vote(soc_private->cpas_handle, + to_be_applied_axi_vote); + if (!rc) { + memcpy(&top_priv->applied_axi_vote, + to_be_applied_axi_vote, + sizeof(struct cam_axi_vote)); + top_priv->total_bw_applied = total_bw_new_vote; + } else { + CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc); + } + } + +free_mem: + CAM_MEM_ZFREE((void *)agg_vote, sizeof(struct cam_axi_vote)); + agg_vote = NULL; + return rc; +} + +static int cam_tfe_top_get_base(struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t mem_base = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->common_data.soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE( + top_priv->common_data.soc_info, TFE_CORE_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_ISP, "failed to get mem_base, index: %d num_reg_map: %u", + TFE_CORE_BASE_IDX, top_priv->common_data.soc_info->num_reg_map); + return -EINVAL; + } + + cdm_util_ops->cdm_write_changebase( + cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_tfe_top_get_reg_update( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_tfe_camif_data *camif_rsrc_data = NULL; + struct cam_tfe_rdi_data *rdi_rsrc_data = NULL; + struct cam_isp_resource_node *in_res; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + in_res = cdm_args->res; + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + camif_rsrc_data = in_res->res_priv; + reg_val_pair[0] = camif_rsrc_data->camif_reg->reg_update_cmd; + reg_val_pair[1] = + camif_rsrc_data->reg_data->reg_update_cmd_data; + } else if ((in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0) && + (in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2)) { + rdi_rsrc_data = in_res->res_priv; + reg_val_pair[0] = rdi_rsrc_data->rdi_reg->reg_update_cmd; + reg_val_pair[1] = rdi_rsrc_data->reg_data->reg_update_cmd_data; + } + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +static int cam_tfe_top_clock_update( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_clock_update_args *clk_update = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + int i, rc = 0; + + clk_update = + (struct cam_tfe_clock_update_args *)cmd_args; + res = clk_update->node_res; + + if (!res || !res->hw_intf->hw_priv) { + CAM_ERR(CAM_ISP, "Invalid input res %pK", res); + return -EINVAL; + } + + hw_info = res->hw_intf->hw_priv; + + if (res->res_type != CAM_ISP_RESOURCE_TFE_IN || + res->res_id >= CAM_ISP_HW_TFE_IN_MAX) { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + top_priv->req_clk_rate[i] = clk_update->clk_rate; + break; + } + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_DBG(CAM_ISP, + "TFE:%d Not ready to set clocks yet :%d", + res->hw_intf->hw_idx, + hw_info->hw_state); + } else + rc = cam_tfe_top_set_hw_clk_rate(top_priv); + + return rc; +} + +static int cam_tfe_top_bw_update( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bw_update_args *bw_update = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + int rc = 0; + int i; + + bw_update = (struct cam_tfe_bw_update_args *)cmd_args; + res = bw_update->node_res; + + if (!res || !res->hw_intf || !res->hw_intf->hw_priv) + return -EINVAL; + + hw_info = res->hw_intf->hw_priv; + + if (res->res_type != CAM_ISP_RESOURCE_TFE_IN || + res->res_id >= CAM_ISP_HW_TFE_IN_MAX) { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < CAM_ISP_HW_TFE_IN_MAX; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + memcpy(&top_priv->req_axi_vote[i], &bw_update->isp_vote, + sizeof(struct cam_axi_vote)); + top_priv->axi_vote_control[i] = + CAM_TFE_BW_CONTROL_INCLUDE; + break; + } + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "TFE:%d Not ready to set BW yet :%d", + res->hw_intf->hw_idx, + hw_info->hw_state); + } else { + rc = cam_tfe_top_set_axi_bw_vote(top_priv, false); + } + + return rc; +} + +static int cam_tfe_top_bw_control( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_bw_control_args *bw_ctrl = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + int rc = 0; + int i; + + bw_ctrl = (struct cam_tfe_bw_control_args *)cmd_args; + res = bw_ctrl->node_res; + + if (!res || !res->hw_intf->hw_priv) + return -EINVAL; + + hw_info = res->hw_intf->hw_priv; + + if (res->res_type != CAM_ISP_RESOURCE_TFE_IN || + res->res_id >= CAM_ISP_HW_TFE_IN_MAX) { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->in_rsrc[i].res_id == res->res_id) { + top_priv->axi_vote_control[i] = bw_ctrl->action; + break; + } + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "TFE:%d Not ready to set BW yet :%d", + res->hw_intf->hw_idx, + hw_info->hw_state); + } else { + rc = cam_tfe_top_set_axi_bw_vote(top_priv, true); + } + + return rc; +} + +static int cam_tfe_top_get_reg_dump( + struct cam_tfe_top_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *reg_dump_cmd = cmd_args; + struct cam_tfe_soc_private *soc_private; + struct cam_tfe_reg_dump_data *reg_dump_data; + struct cam_hw_soc_info *soc_info; + void __iomem *mem_base; + int i, j, num_reg_dump_entries; + uint32_t val_0, val_1, val_2, val_3, wm_offset, start_offset; + uint32_t end_offset, lut_word_size, lut_size, lut_bank_sel, lut_dmi_reg; + + if (!reg_dump_cmd) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if ((reg_dump_cmd->res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (reg_dump_cmd->res->res_state == + CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + soc_info = top_priv->common_data.soc_info; + soc_private = top_priv->common_data.soc_info->soc_private; + mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base; + CAM_INFO(CAM_ISP, "dump tfe:%d registers", + top_priv->common_data.hw_intf->hw_idx); + + reg_dump_data = top_priv->common_data.reg_dump_data; + num_reg_dump_entries = reg_dump_data->num_reg_dump_entries; + for (i = 0; i < num_reg_dump_entries; i++) { + start_offset = reg_dump_data->reg_entry[i].start_offset; + end_offset = reg_dump_data->reg_entry[i].end_offset; + + for (j = start_offset; (j + 0xc) <= end_offset; j += 0x10) { + val_0 = cam_io_r_mb(mem_base + j); + val_1 = cam_io_r_mb(mem_base + j + 4); + val_2 = cam_io_r_mb(mem_base + j + 0x8); + val_3 = cam_io_r_mb(mem_base + j + 0xc); + CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x", + j, val_0, val_1, val_2, val_3); + } + } + + num_reg_dump_entries = reg_dump_data->num_lut_dump_entries; + for (i = 0; i < num_reg_dump_entries; i++) { + lut_bank_sel = reg_dump_data->lut_entry[i].lut_bank_sel; + lut_size = reg_dump_data->lut_entry[i].lut_addr_size; + lut_word_size = reg_dump_data->lut_entry[i].lut_word_size; + lut_dmi_reg = reg_dump_data->lut_entry[i].dmi_reg_offset; + + cam_io_w_mb(lut_bank_sel, mem_base + lut_dmi_reg + 4); + cam_io_w_mb(0, mem_base + 0xC28); + + for (j = 0; j < lut_size; j++) { + val_0 = cam_io_r_mb(mem_base + 0xC30); + CAM_INFO(CAM_ISP, "Bank%d:0x%x LO: 0x%x", + lut_bank_sel, j, val_0); + } + } + /* No mem selected */ + cam_io_w_mb(0, mem_base + 0xC24); + cam_io_w_mb(0, mem_base + 0xC28); + + start_offset = reg_dump_data->bus_start_addr; + end_offset = reg_dump_data->bus_write_top_end_addr; + + CAM_INFO(CAM_ISP, "bus start addr:0x%x end_offset:0x%x", + start_offset, end_offset); + + for (i = start_offset; (i + 0xc) <= end_offset; i += 0x10) { + val_0 = cam_io_r_mb(mem_base + i); + val_1 = cam_io_r_mb(mem_base + i + 4); + val_2 = cam_io_r_mb(mem_base + i + 0x8); + val_3 = cam_io_r_mb(mem_base + i + 0xc); + CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x", + i, val_0, val_1, val_2, val_3); + } + + wm_offset = reg_dump_data->bus_client_start_addr; + + CAM_INFO(CAM_ISP, "bus wm offset:0x%x", + wm_offset); + + for (j = 0; j < reg_dump_data->num_bus_clients; j++) { + for (i = 0x0; (i + 0xc) <= 0x3C; i += 0x10) { + val_0 = cam_io_r_mb(mem_base + wm_offset + i); + val_1 = cam_io_r_mb(mem_base + wm_offset + i + 4); + val_2 = cam_io_r_mb(mem_base + wm_offset + i + 0x8); + val_3 = cam_io_r_mb(mem_base + wm_offset + i + 0xc); + CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x", + (wm_offset + i), val_0, val_1, val_2, val_3); + } + for (i = 0x60; (i + 0xc) <= 0x80; i += 0x10) { + val_0 = cam_io_r_mb(mem_base + wm_offset + i); + val_1 = cam_io_r_mb(mem_base + wm_offset + i + 4); + val_2 = cam_io_r_mb(mem_base + wm_offset + i + 0x8); + val_3 = cam_io_r_mb(mem_base + wm_offset + i + 0xc); + CAM_INFO(CAM_ISP, "0x%04x=0x%08x 0x%08x 0x%08x 0x%08x", + (wm_offset + i), val_0, val_1, val_2, val_3); + } + wm_offset += reg_dump_data->bus_client_offset; + } + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + /* dump the clock votings */ + CAM_INFO(CAM_ISP, "TFE:%d clk=%ld", + top_priv->common_data.hw_intf->hw_idx, + top_priv->hw_clk_rate); + + return 0; +} + +static int cam_tfe_hw_dump( + struct cam_tfe_hw_core_info *core_info, + void *cmd_args, + uint32_t arg_size) +{ + int i, j; + uint8_t *dst; + uint32_t reg_start_offset; + uint32_t reg_dump_size = 0; + uint32_t lut_dump_size = 0; + uint32_t num_lut_dump_entries = 0; + uint32_t num_reg; + uint32_t lut_word_size, lut_size; + uint32_t lut_bank_sel, lut_dmi_reg; + void __iomem *reg_base; + void __iomem *mem_base; + uint32_t *addr, *start; + uint64_t *clk_waddr, *clk_wstart; + size_t remain_len; + uint32_t min_len; + struct cam_hw_info *tfe_hw_info; + struct cam_hw_soc_info *soc_info; + struct cam_tfe_top_priv *top_priv; + struct cam_tfe_soc_private *soc_private; + struct cam_tfe_reg_dump_data *reg_dump_data; + struct cam_isp_hw_dump_header *hdr; + struct cam_isp_hw_dump_args *dump_args = + (struct cam_isp_hw_dump_args *)cmd_args; + + if (!dump_args || !core_info) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + if (!dump_args->cpu_addr || !dump_args->buf_len) { + CAM_ERR(CAM_ISP, + "Invalid params %pK %zu", + (void *)dump_args->cpu_addr, + dump_args->buf_len); + return -EINVAL; + } + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, + "Dump offset overshoot offset %zu buf_len %zu", + dump_args->offset, dump_args->buf_len); + return -ENOSPC; + } + + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + tfe_hw_info = + (struct cam_hw_info *)(top_priv->common_data.hw_intf->hw_priv); + reg_dump_data = top_priv->common_data.reg_dump_data; + soc_info = top_priv->common_data.soc_info; + soc_private = top_priv->common_data.soc_info->soc_private; + mem_base = soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base; + + if (dump_args->is_dump_all) { + + /*Dump registers size*/ + for (i = 0; i < reg_dump_data->num_reg_dump_entries; i++) + reg_dump_size += + (reg_dump_data->reg_entry[i].end_offset - + reg_dump_data->reg_entry[i].start_offset); + + /* + * We dump the offset as well, so the total size dumped becomes + * multiplied by 2 + */ + reg_dump_size *= 2; + + /* LUT dump size */ + for (i = 0; i < reg_dump_data->num_lut_dump_entries; i++) + lut_dump_size += + ((reg_dump_data->lut_entry[i].lut_addr_size) * + (reg_dump_data->lut_entry[i].lut_word_size/8)); + + num_lut_dump_entries = reg_dump_data->num_lut_dump_entries; + } + + /*Minimum len comprises of: + * lut_dump_size + reg_dump_size + sizeof dump_header + + * (num_lut_dump_entries--> represents number of banks) + + * (misc number of words) * sizeof(uint32_t) + */ + min_len = lut_dump_size + reg_dump_size + + sizeof(struct cam_isp_hw_dump_header) + + (num_lut_dump_entries * sizeof(uint32_t)) + + (sizeof(uint32_t) * CAM_TFE_CORE_DUMP_MISC_NUM_WORDS); + + remain_len = dump_args->buf_len - dump_args->offset; + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u", + remain_len, min_len); + return -ENOSPC; + } + + mutex_lock(&tfe_hw_info->hw_mutex); + if (tfe_hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "TFE:%d HW not powered up", + core_info->core_index); + mutex_unlock(&tfe_hw_info->hw_mutex); + return -EPERM; + } + + if (!dump_args->is_dump_all) + goto dump_bw; + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + hdr->word_size = sizeof(uint32_t); + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_REG:"); + addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + *addr++ = soc_info->index; + for (i = 0; i < reg_dump_data->num_reg_dump_entries; i++) { + num_reg = (reg_dump_data->reg_entry[i].end_offset - + reg_dump_data->reg_entry[i].start_offset)/4; + reg_start_offset = reg_dump_data->reg_entry[i].start_offset; + reg_base = mem_base + reg_start_offset; + for (j = 0; j < num_reg; j++) { + addr[0] = + soc_info->mem_block[TFE_CORE_BASE_IDX]->start + + reg_start_offset + (j*4); + addr[1] = cam_io_r(reg_base + (j*4)); + addr += 2; + } + } + + /*Dump bus top registers*/ + num_reg = (reg_dump_data->bus_write_top_end_addr - + reg_dump_data->bus_start_addr)/4; + reg_base = mem_base + reg_dump_data->bus_start_addr; + reg_start_offset = soc_info->mem_block[TFE_CORE_BASE_IDX]->start + + reg_dump_data->bus_start_addr; + for (i = 0; i < num_reg; i++) { + addr[0] = reg_start_offset + (i*4); + addr[1] = cam_io_r(reg_base + (i*4)); + addr += 2; + } + + /* Dump bus clients */ + reg_base = mem_base + reg_dump_data->bus_client_start_addr; + reg_start_offset = soc_info->mem_block[TFE_CORE_BASE_IDX]->start + + reg_dump_data->bus_client_start_addr; + for (j = 0; j < reg_dump_data->num_bus_clients; j++) { + + for (i = 0; i <= 0x3c; i += 4) { + addr[0] = reg_start_offset + i; + addr[1] = cam_io_r(reg_base + i); + addr += 2; + } + for (i = 0x60; i <= 0x80; i += 4) { + addr[0] = reg_start_offset + (i*4); + addr[1] = cam_io_r(reg_base + (i*4)); + addr += 2; + } + reg_base += reg_dump_data->bus_client_offset; + reg_start_offset += reg_dump_data->bus_client_offset; + } + + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + + /* Dump LUT entries */ + for (i = 0; i < reg_dump_data->num_lut_dump_entries; i++) { + + lut_bank_sel = reg_dump_data->lut_entry[i].lut_bank_sel; + lut_size = reg_dump_data->lut_entry[i].lut_addr_size; + lut_word_size = reg_dump_data->lut_entry[i].lut_word_size; + lut_dmi_reg = reg_dump_data->lut_entry[i].dmi_reg_offset; + dst = (char *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "LUT_REG:"); + hdr->word_size = lut_word_size/8; + addr = (uint32_t *)(dst + + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + *addr++ = lut_bank_sel; + cam_io_w_mb(lut_bank_sel, mem_base + lut_dmi_reg + 4); + cam_io_w_mb(0, mem_base + 0xC28); + for (j = 0; j < lut_size; j++) { + *addr = cam_io_r_mb(mem_base + 0xc30); + addr++; + } + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + } + cam_io_w_mb(0, mem_base + 0xC24); + cam_io_w_mb(0, mem_base + 0xC28); + +dump_bw: + dst = (char *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_CLK_RATE_BW:"); + clk_waddr = (uint64_t *)(dst + + sizeof(struct cam_isp_hw_dump_header)); + clk_wstart = clk_waddr; + hdr->word_size = sizeof(uint64_t); + *clk_waddr++ = top_priv->hw_clk_rate; + *clk_waddr++ = top_priv->total_bw_applied; + + hdr->size = hdr->word_size * (clk_waddr - clk_wstart); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + + dst = (char *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "TFE_NIU_MAXWR:"); + addr = (uint32_t *)(dst + + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + hdr->word_size = sizeof(uint32_t); + + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + mutex_unlock(&tfe_hw_info->hw_mutex); + + CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset); + return 0; +} + +static int cam_tfe_camif_irq_reg_dump( + struct cam_tfe_hw_core_info *core_info, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + struct cam_isp_hw_get_cmd_update *cmd_update; + struct cam_isp_resource_node *camif_res = NULL; + void __iomem *mem_base; + uint32_t i; + + int rc = 0; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments\n"); + return -EINVAL; + } + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + cmd_update = (struct cam_isp_hw_get_cmd_update *)cmd_args; + camif_res = cmd_update->res; + mem_base = top_priv->common_data.soc_info->reg_map[0].mem_base; + if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) { + CAM_ERR(CAM_ISP, "Error! Invalid state\n"); + return 0; + } + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) { + CAM_INFO(CAM_ISP, + "Core Id =%d TOP IRQ status[%d ] val 0x%x", + core_info->core_index, i, + cam_io_r_mb(mem_base + + core_info->tfe_hw_info->top_irq_status[i])); + } + + for (i = 0; i < CAM_TFE_BUS_MAX_IRQ_REGISTERS; i++) { + CAM_INFO(CAM_ISP, + "Core Id =%d BUS IRQ status[%d ] val:0x%x", + core_info->core_index, i, + cam_io_r_mb(mem_base + + core_info->tfe_hw_info->bus_irq_status[i])); + } + + return rc; +} + +int cam_tfe_set_top_debug(struct cam_tfe_hw_core_info *core_info, + void *cmd_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + uint32_t *debug_val; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + debug_val = (uint32_t *)cmd_args; + top_priv->top_debug = *debug_val; + + CAM_DBG(CAM_ISP, "TFE:%d top debug set:%d", + core_info->core_index, + top_priv->top_debug); + + return 0; +} + + +int cam_tfe_top_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + struct cam_tfe_acquire_args *args; + struct cam_tfe_hw_tfe_in_acquire_args *acquire_args; + struct cam_tfe_camif_data *camif_data; + struct cam_tfe_rdi_data *rdi_data; + uint32_t i; + int rc = -EINVAL; + + if (!device_priv || !reserve_args) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_tfe_top_priv *)device_priv; + args = (struct cam_tfe_acquire_args *)reserve_args; + acquire_args = &args->tfe_in; + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + CAM_DBG(CAM_ISP, "i :%d res_id:%d state:%d", i, + acquire_args->res_id, top_priv->in_rsrc[i].res_state); + + if ((top_priv->in_rsrc[i].res_id == acquire_args->res_id) && + (top_priv->in_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_AVAILABLE)) { + rc = cam_tfe_validate_pix_pattern( + acquire_args->in_port->pix_pattern); + if (rc) + return rc; + + if (acquire_args->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + camif_data = (struct cam_tfe_camif_data *) + top_priv->in_rsrc[i].res_priv; + camif_data->pix_pattern = + acquire_args->in_port->pix_pattern; + camif_data->dsp_mode = + acquire_args->in_port->dsp_mode; + camif_data->left_first_pixel = + acquire_args->in_port->left_start; + camif_data->left_last_pixel = + acquire_args->in_port->left_end; + camif_data->right_first_pixel = + acquire_args->in_port->right_start; + camif_data->right_last_pixel = + acquire_args->in_port->right_end; + camif_data->first_line = + acquire_args->in_port->line_start; + camif_data->last_line = + acquire_args->in_port->line_end; + if (acquire_args->in_port->res_id == CAM_ISP_TFE_IN_RES_TPG) + camif_data->vbi_value = 0; + else + camif_data->vbi_value = + acquire_args->in_port->sensor_vbi; + camif_data->hbi_value = + acquire_args->in_port->sensor_hbi; + camif_data->camif_pd_enable = + acquire_args->camif_pd_enable; + camif_data->dual_tfe_sync_sel = + acquire_args->dual_tfe_sync_sel_idx; + camif_data->sync_mode = acquire_args->sync_mode; + camif_data->event_cb = args->event_cb; + camif_data->priv = args->priv; + camif_data->qcfa_bin = + acquire_args->in_port->qcfa_bin; + camif_data->bayer_bin = + acquire_args->in_port->bayer_bin; + camif_data->core_cfg = + acquire_args->in_port->core_cfg; + + CAM_DBG(CAM_ISP, + "TFE:%d pix_pattern:%d dsp_mode=%d", + top_priv->in_rsrc[i].hw_intf->hw_idx, + camif_data->pix_pattern, + camif_data->dsp_mode); + } else { + rdi_data = (struct cam_tfe_rdi_data *) + top_priv->in_rsrc[i].res_priv; + rdi_data->pix_pattern = + acquire_args->in_port->pix_pattern; + rdi_data->sync_mode = acquire_args->sync_mode; + rdi_data->event_cb = args->event_cb; + rdi_data->priv = args->priv; + rdi_data->left_first_pixel = + acquire_args->in_port->left_start; + rdi_data->left_last_pixel = + acquire_args->in_port->left_end; + rdi_data->first_line = + acquire_args->in_port->line_start; + rdi_data->last_line = + acquire_args->in_port->line_end; + } + + top_priv->in_rsrc[i].cdm_ops = acquire_args->cdm_ops; + top_priv->in_rsrc[i].tasklet_info = args->tasklet; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_RESERVED; + top_priv->tasklet_info = args->tasklet; + acquire_args->rsrc_node = + &top_priv->in_rsrc[i]; + rc = 0; + break; + } + } + + return rc; +} + +int cam_tfe_top_release(void *device_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + struct cam_isp_resource_node *in_res; + + if (!device_priv || !release_args) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_tfe_top_priv *)device_priv; + in_res = (struct cam_isp_resource_node *)release_args; + + CAM_DBG(CAM_ISP, "TFE:%d resource id:%d in state %d", + in_res->hw_intf->hw_idx, in_res->res_id, + in_res->res_state); + if (in_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "TFE:%d Error Resource Invalid res_state :%d", + in_res->hw_intf->hw_idx, in_res->res_state); + return -EINVAL; + } + in_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + in_res->cdm_ops = NULL; + in_res->tasklet_info = NULL; + in_res->is_rdi_primary_res = 0; + + return 0; +} + +static int cam_tfe_camif_resource_start( + struct cam_tfe_hw_core_info *core_info, + struct cam_isp_resource_node *camif_res) +{ + struct cam_tfe_camif_data *rsrc_data; + struct cam_tfe_soc_private *soc_private; + struct cam_tfe_top_priv *top_priv; + uint32_t val = 0; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t computed_epoch_line_cfg; + uint32_t camera_hw_version = 0; + struct cam_hw_intf *tfe_device; + bool pdaf_rdi2_mux_en = false; + + if (!camif_res || !core_info) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "TFE:%d Error Invalid camif res res_state:%d", + core_info->core_index, camif_res->res_state); + return -EINVAL; + } + + rsrc_data = (struct cam_tfe_camif_data *)camif_res->res_priv; + soc_private = rsrc_data->soc_info->soc_private; + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + + if (!soc_private) { + CAM_ERR(CAM_ISP, "TFE:%d Error soc_private NULL", + core_info->core_index); + return -ENODEV; + } + + /* Config tfe core*/ + val = 0; + if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val = (1 << rsrc_data->reg_data->extern_reg_update_shift); + + if ((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) || + (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)) { + val |= (1 << rsrc_data->reg_data->dual_tfe_pix_en_shift); + val |= ((rsrc_data->dual_tfe_sync_sel + 1) << + rsrc_data->reg_data->dual_tfe_sync_sel_shift); + } + + tfe_device = rsrc_data->hw_intf; + + tfe_device->hw_ops.process_cmd(tfe_device->hw_priv, + CAM_ISP_HW_CMD_IS_PDAF_RDI2_MUX_EN, + &pdaf_rdi2_mux_en, + sizeof(pdaf_rdi2_mux_en)); + + if (pdaf_rdi2_mux_en && !rsrc_data->camif_pd_enable) + val |= (1 << rsrc_data->reg_data->camif_pd_rdi2_src_sel_shift); + + /* enables the Delay Line CLC in the pixel pipeline */ + val |= BIT(rsrc_data->reg_data->delay_line_en_shift); + + if (rsrc_data->common_reg->serializer_supported) { + val |= rsrc_data->core_cfg & + (1 << rsrc_data->reg_data->ai_c_srl_en_shift); + + val |= rsrc_data->core_cfg & + (1 << rsrc_data->reg_data->ds16_c_srl_en_shift); + + val |= rsrc_data->core_cfg & + (1 << rsrc_data->reg_data->ds4_c_srl_en_shift); + } + + + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0); + + CAM_DBG(CAM_ISP, "TFE:%d core_cfg 0 val:0x%x", core_info->core_index, + val); + + if (cam_cpas_get_cpas_hw_version(&camera_hw_version)) + CAM_ERR(CAM_ISP, "Failed to get HW version"); + + if (camera_hw_version == CAM_CPAS_TITAN_540_V100) { + val = cam_io_r(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_1); + val &= ~BIT(0); + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_1); + CAM_DBG(CAM_ISP, "TFE:%d core_cfg 1 val:0x%x", + core_info->core_index, val); + } + + /* Epoch config */ + epoch0_irq_mask = (((rsrc_data->last_line + rsrc_data->vbi_value) - + rsrc_data->first_line) / 2); + if (epoch0_irq_mask > (rsrc_data->last_line - rsrc_data->first_line)) + epoch0_irq_mask = (rsrc_data->last_line - + rsrc_data->first_line); + + if (rsrc_data->bayer_bin || rsrc_data->qcfa_bin) + epoch0_irq_mask >>= 1; + + epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & + 0xFFFF; + computed_epoch_line_cfg = (epoch0_irq_mask << 16) | + epoch1_irq_mask; + cam_io_w_mb(computed_epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq_cfg); + CAM_DBG(CAM_ISP, "TFE:%d first_line: %u\n" + "last_line: %u\n" + "epoch_line_cfg: 0x%x", + core_info->core_index, + rsrc_data->first_line, + rsrc_data->last_line, + computed_epoch_line_cfg); + + camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd); + CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx, + rsrc_data->reg_data->reg_update_cmd_data); + + /* Disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + + if (top_priv->top_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->diag_config); + val |= rsrc_data->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->diag_config); + } + + /* Enable the irq only for master or single tfe usecase */ + if (rsrc_data->sync_mode != CAM_ISP_HW_SYNC_SLAVE) + cam_tfe_irq_config(core_info, rsrc_data->reg_data->subscribe_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, true); + + /* Program perf counters */ + val = (1 << rsrc_data->reg_data->perf_cnt_start_cmd_shift) | + (1 << rsrc_data->reg_data->perf_cnt_continuous_shift) | + (1 << rsrc_data->reg_data->perf_client_sel_shift) | + (1 << rsrc_data->reg_data->perf_window_start_shift) | + (2 << rsrc_data->reg_data->perf_window_end_shift); + cam_io_w_mb(val, + rsrc_data->mem_base + + rsrc_data->common_reg->perf_cfg[0].perf_cnt_cfg); + CAM_DBG(CAM_ISP, "TFE:%d perf_cfg val:%d", core_info->core_index, + val); + + /* Enable the top debug registers */ + cam_io_w_mb(0x1, + rsrc_data->mem_base + rsrc_data->common_reg->debug_cfg); + + CAM_DBG(CAM_ISP, "Start Camif TFE %d Done", core_info->core_index); + return 0; +} + +int cam_tfe_top_start(struct cam_tfe_hw_core_info *core_info, + void *start_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + struct cam_isp_resource_node *in_res; + struct cam_hw_info *hw_info = NULL; + struct cam_tfe_rdi_data *rsrc_rdi_data; + uint32_t val; + int rc = 0; + + if (!start_args) { + CAM_ERR(CAM_ISP, "TFE:%d Error Invalid input arguments", + core_info->core_index); + return -EINVAL; + } + + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + in_res = (struct cam_isp_resource_node *)start_args; + hw_info = (struct cam_hw_info *)in_res->hw_intf->hw_priv; + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "TFE:%d HW not powered up", + core_info->core_index); + rc = -EPERM; + goto end; + } + + rc = cam_tfe_top_set_hw_clk_rate(top_priv); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d set_hw_clk_rate failed, rc=%d", + hw_info->soc_info.index, rc); + return rc; + } + + rc = cam_tfe_top_set_axi_bw_vote(top_priv, true); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d set_axi_bw_vote failed, rc=%d", + core_info->core_index, rc); + return rc; + } + + if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + cam_tfe_camif_resource_start(core_info, in_res); + } else if (in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0 && + in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2) { + rsrc_rdi_data = (struct cam_tfe_rdi_data *) in_res->res_priv; + val = (rsrc_rdi_data->pix_pattern << + rsrc_rdi_data->reg_data->pixel_pattern_shift); + + val |= (1 << rsrc_rdi_data->reg_data->rdi_out_enable_shift); + cam_io_w_mb(val, rsrc_rdi_data->mem_base + + rsrc_rdi_data->rdi_reg->rdi_module_config); + + /* Epoch config */ + cam_io_w_mb(rsrc_rdi_data->reg_data->epoch_line_cfg, + rsrc_rdi_data->mem_base + + rsrc_rdi_data->rdi_reg->rdi_epoch_irq); + + /* Reg Update */ + cam_io_w_mb(rsrc_rdi_data->reg_data->reg_update_cmd_data, + rsrc_rdi_data->mem_base + + rsrc_rdi_data->rdi_reg->reg_update_cmd); + in_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Enable the irq */ + if (in_res->is_rdi_primary_res) + cam_tfe_irq_config(core_info, + rsrc_rdi_data->reg_data->subscribe_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, true); + + if (top_priv->top_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r_mb(rsrc_rdi_data->mem_base + + rsrc_rdi_data->common_reg->diag_config); + val |= ((rsrc_rdi_data->reg_data->enable_diagnostic_hw)| + (rsrc_rdi_data->reg_data->diag_sensor_sel << + rsrc_rdi_data->reg_data->diag_sensor_shift)); + cam_io_w_mb(val, rsrc_rdi_data->mem_base + + rsrc_rdi_data->common_reg->diag_config); + } + + CAM_DBG(CAM_ISP, "TFE:%d Start RDI %d", core_info->core_index, + in_res->res_id - CAM_ISP_HW_TFE_IN_RDI0); + } + + core_info->irq_err_config_cnt++; + if (core_info->irq_err_config_cnt == 1) { + cam_tfe_irq_config(core_info, + core_info->tfe_hw_info->error_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, true); + top_priv->error_ts.tv_sec = 0; + top_priv->error_ts.tv_nsec = 0; + top_priv->sof_ts.tv_sec = 0; + top_priv->sof_ts.tv_nsec = 0; + top_priv->epoch_ts.tv_sec = 0; + top_priv->epoch_ts.tv_nsec = 0; + top_priv->eof_ts.tv_sec = 0; + top_priv->eof_ts.tv_nsec = 0; + } + +end: + return rc; +} + +int cam_tfe_top_stop(struct cam_tfe_hw_core_info *core_info, + void *stop_args, uint32_t arg_size) +{ + struct cam_tfe_top_priv *top_priv; + struct cam_isp_resource_node *in_res; + struct cam_hw_info *hw_info = NULL; + struct cam_tfe_camif_data *camif_data; + struct cam_tfe_rdi_data *rsrc_rdi_data; + uint32_t val = 0; + int i, rc = 0; + + if (!stop_args) { + CAM_ERR(CAM_ISP, "TFE:%d Error Invalid input arguments", + core_info->core_index); + return -EINVAL; + } + + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + in_res = (struct cam_isp_resource_node *)stop_args; + hw_info = (struct cam_hw_info *)in_res->hw_intf->hw_priv; + + if (in_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED || + in_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) + return 0; + + if (in_res->res_id == CAM_ISP_HW_TFE_IN_CAMIF) { + camif_data = (struct cam_tfe_camif_data *)in_res->res_priv; + + cam_io_w_mb(0, camif_data->mem_base + + camif_data->camif_reg->module_cfg); + + cam_tfe_irq_config(core_info, + camif_data->reg_data->subscribe_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, false); + + if (in_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + in_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + val = cam_io_r_mb(camif_data->mem_base + + camif_data->common_reg->diag_config); + if (val & camif_data->reg_data->enable_diagnostic_hw) { + val &= ~camif_data->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, camif_data->mem_base + + camif_data->common_reg->diag_config); + } + } else if ((in_res->res_id >= CAM_ISP_HW_TFE_IN_RDI0) && + (in_res->res_id <= CAM_ISP_HW_TFE_IN_RDI2)) { + rsrc_rdi_data = (struct cam_tfe_rdi_data *) in_res->res_priv; + cam_io_w_mb(0x0, rsrc_rdi_data->mem_base + + rsrc_rdi_data->rdi_reg->rdi_module_config); + + if (in_res->is_rdi_primary_res) + cam_tfe_irq_config(core_info, + rsrc_rdi_data->reg_data->subscribe_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, false); + + if (in_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + in_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + } else { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res id:%d", + core_info->core_index, in_res->res_id); + return -EINVAL; + } + + if (!rc) { + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->in_rsrc[i].res_id == in_res->res_id) { + top_priv->req_clk_rate[i] = 0; + memset(&top_priv->req_axi_vote[i], 0, + sizeof(struct cam_axi_vote)); + top_priv->axi_vote_control[i] = + CAM_TFE_BW_CONTROL_EXCLUDE; + break; + } + } + } + + core_info->irq_err_config_cnt--; + if (!core_info->irq_err_config_cnt) + cam_tfe_irq_config(core_info, + core_info->tfe_hw_info->error_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, false); + + return rc; +} + +int cam_tfe_top_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + struct cam_tfe_hw_core_info *core_info) +{ + struct cam_tfe_top_priv *top_priv = NULL; + struct cam_tfe_top_hw_info *hw_info = top_hw_info; + struct cam_tfe_soc_private *soc_private = NULL; + struct cam_tfe_camif_data *camif_priv = NULL; + struct cam_tfe_rdi_data *rdi_priv = NULL; + int i, j, rc = 0; + + top_priv = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_top_priv), + GFP_KERNEL); + if (!top_priv) { + CAM_DBG(CAM_ISP, "TFE:%DError Failed to alloc for tfe_top_priv", + core_info->core_index); + rc = -ENOMEM; + goto end; + } + core_info->top_priv = top_priv; + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "TFE:%d Error soc_private NULL", + core_info->core_index); + rc = -ENODEV; + goto free_tfe_top_priv; + } + + top_priv->hw_clk_rate = 0; + memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) * + (CAM_TFE_TOP_IN_PORT_MAX * + CAM_TFE_DELAY_BW_REDUCTION_NUM_FRAMES)); + top_priv->last_counter = 0; + + for (i = 0, j = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + top_priv->in_rsrc[i].res_type = CAM_ISP_RESOURCE_TFE_IN; + top_priv->in_rsrc[i].hw_intf = hw_intf; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + top_priv->req_clk_rate[i] = 0; + memset(&top_priv->req_axi_vote[i], 0, + sizeof(struct cam_axi_vote)); + top_priv->axi_vote_control[i] = + CAM_TFE_BW_CONTROL_EXCLUDE; + + if (hw_info->in_port[i] == CAM_TFE_CAMIF_VER_1_0) { + top_priv->in_rsrc[i].res_id = + CAM_ISP_HW_TFE_IN_CAMIF; + + camif_priv = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_camif_data), + GFP_KERNEL); + if (!camif_priv) { + CAM_DBG(CAM_ISP, + "TFE:%dError Failed to alloc for camif_priv", + core_info->core_index); + goto free_tfe_top_priv; + } + + top_priv->in_rsrc[i].res_priv = camif_priv; + + camif_priv->mem_base = + soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base; + camif_priv->camif_reg = + hw_info->camif_hw_info.camif_reg; + camif_priv->common_reg = hw_info->common_reg; + camif_priv->reg_data = + hw_info->camif_hw_info.reg_data; + camif_priv->hw_intf = hw_intf; + camif_priv->soc_info = soc_info; + + } else if (hw_info->in_port[i] == + CAM_TFE_RDI_VER_1_0) { + top_priv->in_rsrc[i].res_id = + CAM_ISP_HW_TFE_IN_RDI0 + j; + + rdi_priv = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_rdi_data), + GFP_KERNEL); + if (!rdi_priv) { + CAM_DBG(CAM_ISP, + "TFE:%d Error Failed to alloc for rdi_priv", + core_info->core_index); + goto deinit_resources; + } + + top_priv->in_rsrc[i].res_priv = rdi_priv; + + rdi_priv->mem_base = + soc_info->reg_map[TFE_CORE_BASE_IDX].mem_base; + rdi_priv->hw_intf = hw_intf; + rdi_priv->common_reg = hw_info->common_reg; + rdi_priv->rdi_reg = + hw_info->rdi_hw_info[j].rdi_reg; + rdi_priv->reg_data = + hw_info->rdi_hw_info[j++].reg_data; + } else { + CAM_WARN(CAM_ISP, "TFE:%d Invalid inport type: %u", + core_info->core_index, hw_info->in_port[i]); + } + } + + top_priv->common_data.soc_info = soc_info; + top_priv->common_data.hw_intf = hw_intf; + top_priv->common_data.common_reg = hw_info->common_reg; + top_priv->common_data.reg_dump_data = &hw_info->reg_dump_data; + + return rc; + +deinit_resources: + for (--i; i >= 0; i--) { + + top_priv->in_rsrc[i].start = NULL; + top_priv->in_rsrc[i].stop = NULL; + top_priv->in_rsrc[i].process_cmd = NULL; + top_priv->in_rsrc[i].top_half_handler = NULL; + top_priv->in_rsrc[i].bottom_half_handler = NULL; + + if (!top_priv->in_rsrc[i].res_priv) + continue; + + CAM_MEM_FREE(top_priv->in_rsrc[i].res_priv); + top_priv->in_rsrc[i].res_priv = NULL; + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + } +free_tfe_top_priv: + CAM_MEM_FREE(core_info->top_priv); + core_info->top_priv = NULL; +end: + return rc; +} + + +int cam_tfe_top_deinit(struct cam_tfe_top_priv *top_priv) +{ + int i, rc = 0; + + if (!top_priv) { + CAM_ERR(CAM_ISP, "Error Invalid input"); + return -EINVAL; + } + + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + top_priv->in_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + top_priv->in_rsrc[i].start = NULL; + top_priv->in_rsrc[i].stop = NULL; + top_priv->in_rsrc[i].process_cmd = NULL; + top_priv->in_rsrc[i].top_half_handler = NULL; + top_priv->in_rsrc[i].bottom_half_handler = NULL; + + if (!top_priv->in_rsrc[i].res_priv) { + CAM_ERR(CAM_ISP, "Error res_priv is NULL"); + return -ENODEV; + } + + CAM_MEM_FREE(top_priv->in_rsrc[i].res_priv); + top_priv->in_rsrc[i].res_priv = NULL; + } + + return rc; +} + +int cam_tfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size) +{ + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_tfe_top_priv *top_priv = NULL; + struct cam_tfe_hw_info *hw_info = NULL; + void __iomem *mem_base; + uint32_t *reset_reg_args = reset_core_args; + uint32_t i, reset_reg_val, irq_status[3]; + int rc; + + CAM_DBG(CAM_ISP, "Enter"); + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + soc_info = &tfe_hw->soc_info; + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + top_priv = core_info->top_priv; + hw_info = core_info->tfe_hw_info; + mem_base = tfe_hw->soc_info.reg_map[TFE_CORE_BASE_IDX].mem_base; + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + irq_status[i] = cam_io_r(mem_base + + core_info->tfe_hw_info->top_irq_status[i]); + + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + cam_io_w(irq_status[i], mem_base + + core_info->tfe_hw_info->top_irq_clear[i]); + + cam_io_w_mb(core_info->tfe_hw_info->global_clear_bitmask, + mem_base + core_info->tfe_hw_info->top_irq_cmd); + + /* Mask all irq registers */ + for (i = 0; i < CAM_TFE_TOP_IRQ_REG_NUM; i++) + cam_io_w(0, mem_base + + core_info->tfe_hw_info->top_irq_mask[i]); + + cam_tfe_irq_config(core_info, hw_info->reset_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, true); + + reinit_completion(&core_info->reset_complete); + + CAM_DBG(CAM_ISP, "calling RESET on tfe %d", soc_info->index); + + switch (*reset_reg_args) { + case CAM_TFE_HW_RESET_HW_AND_REG: + reset_reg_val = CAM_TFE_HW_RESET_HW_AND_REG_VAL; + break; + default: + reset_reg_val = CAM_TFE_HW_RESET_HW_VAL; + break; + } + + cam_io_w_mb(reset_reg_val, mem_base + + top_priv->common_data.common_reg->global_reset_cmd); + + CAM_DBG(CAM_ISP, "TFE:%d waiting for tfe reset complete", + core_info->core_index); + /* Wait for Completion or Timeout of 100ms */ + rc = cam_common_wait_for_completion_timeout( + &core_info->reset_complete, + msecs_to_jiffies(100)); + if (rc <= 0) { + CAM_ERR(CAM_ISP, "TFE:%d Error Reset Timeout", + core_info->core_index); + rc = -ETIMEDOUT; + } else { + rc = 0; + CAM_DBG(CAM_ISP, "TFE:%d reset complete done (%d)", + core_info->core_index, rc); + } + + CAM_DBG(CAM_ISP, "TFE:%d reset complete done (%d)", + core_info->core_index, rc); + + cam_tfe_irq_config(core_info, hw_info->reset_irq_mask, + CAM_TFE_TOP_IRQ_REG_NUM, false); + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +int cam_tfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_tfe_top_priv *top_priv; + void __iomem *mem_base; + int i, rc = 0; + unsigned long max_clk_rate = 0; + uint32_t reset_core_args = + CAM_TFE_HW_RESET_HW_AND_REG; + + CAM_DBG(CAM_ISP, "Enter"); + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + soc_info = &tfe_hw->soc_info; + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + top_priv = (struct cam_tfe_top_priv *)core_info->top_priv; + + mutex_lock(&tfe_hw->hw_mutex); + tfe_hw->open_count++; + if (tfe_hw->open_count > 1) { + mutex_unlock(&tfe_hw->hw_mutex); + CAM_DBG(CAM_ISP, "TFE:%d has already been initialized cnt %d", + core_info->core_index, tfe_hw->open_count); + return 0; + } + mutex_unlock(&tfe_hw->hw_mutex); + /* read clock value based on clock blob received */ + for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) { + if (top_priv->req_clk_rate[i] > max_clk_rate) + max_clk_rate = top_priv->req_clk_rate[i]; + } + + /* Turn ON Regulators, Clocks and other SOC resources */ + rc = cam_tfe_enable_soc_resources(soc_info, max_clk_rate); + if (rc) { + CAM_ERR(CAM_ISP, "Enable SOC failed"); + rc = -EFAULT; + goto decrement_open_cnt; + } + tfe_hw->hw_state = CAM_HW_STATE_POWER_UP; + + mem_base = tfe_hw->soc_info.reg_map[TFE_CORE_BASE_IDX].mem_base; + CAM_DBG(CAM_ISP, "TFE:%d Enable soc done", core_info->core_index); + + /* Do HW Reset */ + rc = cam_tfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t)); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d Reset Failed rc=%d", + core_info->core_index, rc); + goto disable_soc; + } + + top_priv->hw_clk_rate = max_clk_rate; + core_info->irq_err_config_cnt = 0; + core_info->irq_err_config = false; + rc = core_info->tfe_bus->hw_ops.init(core_info->tfe_bus->bus_priv, + NULL, 0); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d Top HW init Failed rc=%d", + core_info->core_index, rc); + goto disable_soc; + } + + return rc; + +disable_soc: + cam_tfe_disable_soc_resources(soc_info); + tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + +decrement_open_cnt: + mutex_lock(&tfe_hw->hw_mutex); + tfe_hw->open_count--; + mutex_unlock(&tfe_hw->hw_mutex); + return rc; +} + +int cam_tfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + int rc = 0; + uint32_t reset_core_args = + CAM_TFE_HW_RESET_HW_AND_REG; + + CAM_DBG(CAM_ISP, "Enter"); + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + soc_info = &tfe_hw->soc_info; + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + + mutex_lock(&tfe_hw->hw_mutex); + if (!tfe_hw->open_count) { + mutex_unlock(&tfe_hw->hw_mutex); + CAM_ERR_RATE_LIMIT(CAM_ISP, "TFE:%d Error Unbalanced deinit", + core_info->core_index); + return -EFAULT; + } + tfe_hw->open_count--; + if (tfe_hw->open_count) { + mutex_unlock(&tfe_hw->hw_mutex); + CAM_DBG(CAM_ISP, "TFE:%d open_cnt non-zero =%d", + core_info->core_index, tfe_hw->open_count); + return 0; + } + mutex_unlock(&tfe_hw->hw_mutex); + + rc = core_info->tfe_bus->hw_ops.deinit(core_info->tfe_bus->bus_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_ISP, "TFE:%d Bus HW deinit Failed rc=%d", + core_info->core_index, rc); + + rc = cam_tfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t)); + + /* Turn OFF Regulators, Clocks and other SOC resources */ + CAM_DBG(CAM_ISP, "TFE:%d Disable SOC resource", core_info->core_index); + rc = cam_tfe_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_ISP, " TFE:%d Disable SOC failed", + core_info->core_index); + + tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +int cam_tfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size) +{ + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_tfe_acquire_args *acquire; + int rc = -ENODEV; + + if (!hw_priv || !reserve_args || (arg_size != + sizeof(struct cam_tfe_acquire_args))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + acquire = (struct cam_tfe_acquire_args *)reserve_args; + + CAM_DBG(CAM_ISP, "TFE:%d acquire res type: %d", + core_info->core_index, acquire->rsrc_type); + mutex_lock(&tfe_hw->hw_mutex); + if (acquire->rsrc_type == CAM_ISP_RESOURCE_TFE_IN) { + rc = cam_tfe_top_reserve(core_info->top_priv, + reserve_args, arg_size); + } else if (acquire->rsrc_type == CAM_ISP_RESOURCE_TFE_OUT) { + rc = core_info->tfe_bus->hw_ops.reserve( + core_info->tfe_bus->bus_priv, acquire, + sizeof(*acquire)); + } else { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d", + core_info->core_index, acquire->rsrc_type); + } + + mutex_unlock(&tfe_hw->hw_mutex); + + return rc; +} + + +int cam_tfe_release(void *hw_priv, void *release_args, uint32_t arg_size) +{ + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_isp_resource_node *isp_res; + int rc = -ENODEV; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + isp_res = (struct cam_isp_resource_node *) release_args; + + mutex_lock(&tfe_hw->hw_mutex); + if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_IN) + rc = cam_tfe_top_release(core_info->top_priv, isp_res, + sizeof(*isp_res)); + else if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) { + rc = core_info->tfe_bus->hw_ops.release( + core_info->tfe_bus->bus_priv, isp_res, + sizeof(*isp_res)); + } else { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d", + core_info->core_index, isp_res->res_type); + } + + mutex_unlock(&tfe_hw->hw_mutex); + + return rc; +} + +int cam_tfe_start(void *hw_priv, void *start_args, uint32_t arg_size) +{ + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_isp_resource_node *start_res; + + int rc = 0; + + if (!hw_priv || !start_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + start_res = (struct cam_isp_resource_node *)start_args; + core_info->tasklet_info = start_res->tasklet_info; + + mutex_lock(&tfe_hw->hw_mutex); + if (start_res->res_type == CAM_ISP_RESOURCE_TFE_IN) { + rc = cam_tfe_top_start(core_info, start_args, + arg_size); + if (rc) + CAM_ERR(CAM_ISP, "TFE:%d Start failed. type:%d", + core_info->core_index, start_res->res_type); + + } else if (start_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) { + rc = core_info->tfe_bus->hw_ops.start(start_res, NULL, 0); + } else { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d", + core_info->core_index, start_res->res_type); + rc = -EFAULT; + } + + mutex_unlock(&tfe_hw->hw_mutex); + + return rc; +} + +int cam_tfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size) +{ + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_isp_resource_node *isp_res; + int rc = -EINVAL; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + isp_res = (struct cam_isp_resource_node *)stop_args; + + mutex_lock(&tfe_hw->hw_mutex); + if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_IN) { + rc = cam_tfe_top_stop(core_info, isp_res, + sizeof(struct cam_isp_resource_node)); + } else if (isp_res->res_type == CAM_ISP_RESOURCE_TFE_OUT) { + rc = core_info->tfe_bus->hw_ops.stop(isp_res, NULL, 0); + } else { + CAM_ERR(CAM_ISP, "TFE:%d Invalid res type:%d", + core_info->core_index, isp_res->res_type); + } + + CAM_DBG(CAM_ISP, "TFE:%d stopped res type:%d res id:%d res_state:%d ", + core_info->core_index, isp_res->res_type, + isp_res->res_id, isp_res->res_state); + + mutex_unlock(&tfe_hw->hw_mutex); + + return rc; +} + +int cam_tfe_read(void *hw_priv, void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_tfe_write(void *hw_priv, void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_tfe_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *tfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_tfe_hw_info *hw_info = NULL; + + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + soc_info = &tfe_hw->soc_info; + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + hw_info = core_info->tfe_hw_info; + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_tfe_top_get_base(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_tfe_top_get_reg_update(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + rc = cam_tfe_top_clock_update(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + rc = cam_tfe_top_bw_update(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_CONTROL: + rc = cam_tfe_top_bw_control(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_DUMP: + rc = cam_tfe_top_get_reg_dump(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP: + rc = cam_tfe_camif_irq_reg_dump(core_info, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: + *((struct cam_hw_soc_info **)cmd_args) = soc_info; + break; + case CAM_ISP_HW_CMD_DUMP_HW: + rc = cam_tfe_hw_dump(core_info, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + rc = cam_tfe_set_top_debug(core_info, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_DYNAMIC_CLOCK_UPDATE: + rc = cam_tfe_top_dynamic_clock_update(core_info->top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + case CAM_ISP_HW_CMD_IS_CONSUMED_ADDR_SUPPORT: + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: + case CAM_ISP_HW_CMD_IS_PDAF_RDI2_MUX_EN: + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + rc = core_info->tfe_bus->hw_ops.process_cmd( + core_info->tfe_bus->bus_priv, cmd_type, cmd_args, + arg_size); + break; + default: + CAM_ERR(CAM_ISP, "TFE:%d Invalid cmd type:%d", + core_info->core_index, cmd_type); + rc = -EINVAL; + break; + } + return rc; +} + +int cam_tfe_core_init(struct cam_tfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_tfe_hw_info *tfe_hw_info) +{ + int rc = -EINVAL; + int i; + + rc = cam_tfe_top_init(soc_info, hw_intf, tfe_hw_info->top_hw_info, + core_info); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_top_init failed", + core_info->core_index); + goto end; + } + + rc = cam_tfe_bus_init(soc_info, hw_intf, + tfe_hw_info->bus_hw_info, core_info, + &core_info->tfe_bus); + if (rc) { + CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_bus_init failed", + core_info->core_index); + goto deinit_top; + } + + INIT_LIST_HEAD(&core_info->free_payload_list); + for (i = 0; i < CAM_TFE_EVT_MAX; i++) { + INIT_LIST_HEAD(&core_info->evt_payload[i].list); + list_add_tail(&core_info->evt_payload[i].list, + &core_info->free_payload_list); + } + + core_info->irq_err_config = false; + core_info->irq_err_config_cnt = 0; + spin_lock_init(&core_info->spin_lock); + init_completion(&core_info->reset_complete); + + return rc; + +deinit_top: + cam_tfe_top_deinit(core_info->top_priv); + +end: + return rc; +} + +int cam_tfe_core_deinit(struct cam_tfe_hw_core_info *core_info, + struct cam_tfe_hw_info *tfe_hw_info) +{ + int rc = -EINVAL; + int i; + unsigned long flags; + + spin_lock_irqsave(&core_info->spin_lock, flags); + + INIT_LIST_HEAD(&core_info->free_payload_list); + for (i = 0; i < CAM_TFE_EVT_MAX; i++) + INIT_LIST_HEAD(&core_info->evt_payload[i].list); + + rc = cam_tfe_bus_deinit(&core_info->tfe_bus); + if (rc) + CAM_ERR(CAM_ISP, "TFE:%d Error cam_tfe_bus_deinit failed rc=%d", + core_info->core_index, rc); + + rc = cam_tfe_top_deinit(core_info->top_priv); + CAM_MEM_FREE(core_info->top_priv); + core_info->top_priv = NULL; + + if (rc) + CAM_ERR(CAM_ISP, "Error cam_tfe_top_deinit failed rc=%d", rc); + + spin_unlock_irqrestore(&core_info->spin_lock, flags); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h new file mode 100644 index 0000000000..0ef9aa7e27 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + + +#ifndef _CAM_TFE_CORE_H_ +#define _CAM_TFE_CORE_H_ + +#include +#include "cam_hw_intf.h" +#include "cam_tfe_bus.h" +#include "cam_tfe_hw_intf.h" +#include "cam_tfe_irq.h" + +#define CAM_TFE_CAMIF_VER_1_0 0x10 +#define CAM_TFE_RDI_VER_1_0 0x1000 +#define CAM_TFE_TOP_1_0 0x1000 +#define CAM_TFE_TOP_IN_PORT_MAX 4 +#define CAM_TFE_RDI_MAX 4 + +#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS BIT(0) +#define CAM_TFE_EVT_MAX 256 + +#define CAM_TFE_MAX_REG_DUMP_ENTRIES 20 +#define CAM_TFE_MAX_LUT_DUMP_ENTRIES 10 + +#define CAM_TFE_MAX_CLC 40 +#define CAM_TFE_CLC_NAME_LENGTH_MAX 32 +#define CAM_TFE_MAX_DEBUG_REG 10 +#define CAM_TFE_MAX_PERF_CNT 2 + +/*we take each word as uint32_t, for dumping uint64_t count as 2 words + * soc index + * clk_rate--> uint64_t--> count as 2 words + * BW--> uint64_t --> count as 2 words + * MAX_NIU + */ +#define CAM_TFE_CORE_DUMP_MISC_NUM_WORDS 4 + +enum cam_tfe_lut_word_size { + CAM_TFE_LUT_WORD_SIZE_32, + CAM_TFE_LUT_WORD_SIZE_64, + CAM_TFE_LUT_WORD_SIZE_MAX, +}; + +struct cam_tfe_reg_dump_entry { + uint32_t start_offset; + uint32_t end_offset; +}; + +struct cam_tfe_lut_dump_entry { + enum cam_tfe_lut_word_size lut_word_size; + uint32_t lut_bank_sel; + uint32_t lut_addr_size; + uint32_t dmi_reg_offset; +}; +struct cam_tfe_reg_dump_data { + uint32_t num_reg_dump_entries; + uint32_t num_lut_dump_entries; + uint32_t bus_start_addr; + uint32_t bus_write_top_end_addr; + uint32_t bus_client_start_addr; + uint32_t bus_client_offset; + uint32_t num_bus_clients; + struct cam_tfe_reg_dump_entry + reg_entry[CAM_TFE_MAX_REG_DUMP_ENTRIES]; + struct cam_tfe_lut_dump_entry + lut_entry[CAM_TFE_MAX_LUT_DUMP_ENTRIES]; +}; + +struct cam_tfe_top_reg_perf_cfg { + uint32_t perf_cnt_cfg; + uint32_t perf_pixel_count; + uint32_t perf_line_count; + uint32_t perf_stall_count; + uint32_t perf_always_count; + uint32_t perf_count_status; +}; + +struct cam_tfe_top_reg_offset_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t lens_feature; + uint32_t stats_feature; + uint32_t zoom_feature; + uint32_t global_reset_cmd; + uint32_t core_cgc_ctrl; + uint32_t ahb_cgc_ctrl; + uint32_t core_cfg_0; + uint32_t core_cfg_1; + uint32_t reg_update_cmd; + uint32_t diag_config; + uint32_t diag_sensor_status_0; + uint32_t diag_sensor_status_1; + uint32_t diag_sensor_frame_cnt_status; + uint32_t violation_status; + uint32_t stats_throttle_cnt_cfg_0; + uint32_t stats_throttle_cnt_cfg_1; + uint32_t num_debug_reg; + uint32_t debug_reg[CAM_TFE_MAX_DEBUG_REG]; + uint32_t debug_cfg; + uint32_t num_perf_cfg; + struct cam_tfe_top_reg_perf_cfg perf_cfg[CAM_TFE_MAX_PERF_CNT]; + + /*reg data */ + uint32_t diag_min_hbi_error_shift; + uint32_t diag_neq_hbi_shift; + uint32_t diag_sensor_hbi_mask; + + /* configuration */ + bool serializer_supported; +}; + +struct cam_tfe_camif_reg { + uint32_t hw_version; + uint32_t hw_status; + uint32_t module_cfg; + uint32_t pdaf_raw_crop_width_cfg; + uint32_t pdaf_raw_crop_height_cfg; + uint32_t line_skip_pattern; + uint32_t pixel_skip_pattern; + uint32_t period_cfg; + uint32_t irq_subsample_pattern; + uint32_t epoch_irq_cfg; + uint32_t debug_1; + uint32_t debug_0; + uint32_t test_bus_ctrl; + uint32_t spare; + uint32_t reg_update_cmd; +}; + +struct cam_tfe_camif_reg_data { + uint32_t extern_reg_update_mask; + uint32_t dual_tfe_pix_en_shift; + uint32_t extern_reg_update_shift; + uint32_t camif_pd_rdi2_src_sel_shift; + uint32_t dual_tfe_sync_sel_shift; + uint32_t delay_line_en_shift; + + uint32_t pixel_pattern_shift; + uint32_t pixel_pattern_mask; + uint32_t module_enable_shift; + uint32_t pix_out_enable_shift; + uint32_t pdaf_output_enable_shift; + + uint32_t dsp_mode_shift; + uint32_t dsp_mode_mask; + uint32_t dsp_en_shift; + uint32_t dsp_en_mask; + + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t eof_irq_mask; + uint32_t reg_update_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask2; + uint32_t subscribe_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; + + uint32_t enable_diagnostic_hw; + uint32_t perf_cnt_start_cmd_shift; + uint32_t perf_cnt_continuous_shift; + uint32_t perf_client_sel_shift; + uint32_t perf_window_start_shift; + uint32_t perf_window_end_shift; + + uint32_t ai_c_srl_en_shift; + uint32_t ds16_c_srl_en_shift; + uint32_t ds4_c_srl_en_shift; +}; + +struct cam_tfe_camif_hw_info { + struct cam_tfe_camif_reg *camif_reg; + struct cam_tfe_camif_reg_data *reg_data; +}; + +struct cam_tfe_rdi_reg { + uint32_t rdi_hw_version; + uint32_t rdi_hw_status; + uint32_t rdi_module_config; + uint32_t rdi_skip_period; + uint32_t rdi_irq_subsample_pattern; + uint32_t rdi_epoch_irq; + uint32_t rdi_debug_1; + uint32_t rdi_debug_0; + uint32_t rdi_test_bus_ctrl; + uint32_t rdi_spare; + uint32_t reg_update_cmd; +}; + +struct cam_tfe_rdi_reg_data { + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + + uint32_t pixel_pattern_shift; + uint32_t pixel_pattern_mask; + uint32_t rdi_out_enable_shift; + + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask2; + uint32_t subscribe_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t enable_diagnostic_hw; + uint32_t diag_sensor_sel; + uint32_t diag_sensor_shift; +}; + +struct cam_tfe_clc_hw_status { + uint8_t name[CAM_TFE_CLC_NAME_LENGTH_MAX]; + uint32_t hw_status_reg; +}; + +struct cam_tfe_rdi_hw_info { + struct cam_tfe_rdi_reg *rdi_reg; + struct cam_tfe_rdi_reg_data *reg_data; +}; + +struct cam_tfe_top_hw_info { + struct cam_tfe_top_reg_offset_common *common_reg; + struct cam_tfe_camif_hw_info camif_hw_info; + struct cam_tfe_rdi_hw_info rdi_hw_info[CAM_TFE_RDI_MAX]; + uint32_t in_port[CAM_TFE_TOP_IN_PORT_MAX]; + struct cam_tfe_reg_dump_data reg_dump_data; +}; + +struct cam_tfe_hw_info { + uint32_t top_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t top_irq_clear[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t top_irq_status[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t top_irq_cmd; + uint32_t global_clear_bitmask; + + uint32_t bus_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; + uint32_t bus_irq_clear[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; + uint32_t bus_irq_status[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; + uint32_t bus_irq_cmd; + + uint32_t bus_violation_reg; + uint32_t bus_overflow_reg; + uint32_t bus_image_size_vilation_reg; + uint32_t bus_overflow_clear_cmd; + uint32_t debug_status_top; + + uint32_t reset_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t error_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM]; + uint32_t bus_reg_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; + uint32_t bus_error_irq_mask[CAM_TFE_BUS_MAX_IRQ_REGISTERS]; + + uint32_t num_clc; + struct cam_tfe_clc_hw_status *clc_hw_status_info; + + uint32_t top_version; + void *top_hw_info; + + uint32_t bus_version; + void *bus_hw_info; +}; + +struct cam_tfe_hw_core_info { + uint32_t core_index; + struct cam_tfe_hw_info *tfe_hw_info; + void *top_priv; + struct cam_tfe_bus *tfe_bus; + void *tasklet_info; + struct cam_tfe_irq_evt_payload evt_payload[CAM_TFE_EVT_MAX]; + struct list_head free_payload_list; + bool irq_err_config; + uint32_t irq_err_config_cnt; + spinlock_t spin_lock; + struct completion reset_complete; +}; + +int cam_tfe_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_tfe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_tfe_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size); +int cam_tfe_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size); +int cam_tfe_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_tfe_release(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_tfe_start(void *device_priv, + void *start_args, uint32_t arg_size); +int cam_tfe_stop(void *device_priv, + void *stop_args, uint32_t arg_size); +int cam_tfe_read(void *device_priv, + void *read_args, uint32_t arg_size); +int cam_tfe_write(void *device_priv, + void *write_args, uint32_t arg_size); +int cam_tfe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + +irqreturn_t cam_tfe_irq(int irq_num, void *data); + +int cam_tfe_core_init(struct cam_tfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_tfe_hw_info *tfe_hw_info); + +int cam_tfe_core_deinit(struct cam_tfe_hw_core_info *core_info, + struct cam_tfe_hw_info *tfe_hw_info); + +#endif /* _CAM_TFE_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.c new file mode 100644 index 0000000000..d67577cfd3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "cam_tfe_dev.h" +#include "cam_tfe_core.h" +#include "cam_tfe_soc.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" + +static struct cam_isp_hw_intf_data cam_tfe_hw_list[CAM_TFE_HW_NUM_MAX]; + +static int cam_tfe_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *tfe_hw = NULL; + struct cam_hw_intf *tfe_hw_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + struct cam_tfe_hw_info *hw_info = NULL; + struct cam_tfe_soc_private *tfe_soc_priv; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + uint32_t i; + + tfe_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!tfe_hw_intf) { + rc = -ENOMEM; + goto end; + } + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &tfe_hw_intf->hw_idx); + + tfe_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!tfe_hw) { + rc = -ENOMEM; + goto free_tfe_hw_intf; + } + + tfe_hw->soc_info.pdev = pdev; + tfe_hw->soc_info.dev = &pdev->dev; + tfe_hw->soc_info.dev_name = pdev->name; + tfe_hw_intf->hw_priv = tfe_hw; + tfe_hw_intf->hw_ops.get_hw_caps = cam_tfe_get_hw_caps; + tfe_hw_intf->hw_ops.init = cam_tfe_init_hw; + tfe_hw_intf->hw_ops.deinit = cam_tfe_deinit_hw; + tfe_hw_intf->hw_ops.reset = cam_tfe_reset; + tfe_hw_intf->hw_ops.reserve = cam_tfe_reserve; + tfe_hw_intf->hw_ops.release = cam_tfe_release; + tfe_hw_intf->hw_ops.start = cam_tfe_start; + tfe_hw_intf->hw_ops.stop = cam_tfe_stop; + tfe_hw_intf->hw_ops.read = cam_tfe_read; + tfe_hw_intf->hw_ops.write = cam_tfe_write; + tfe_hw_intf->hw_ops.process_cmd = cam_tfe_process_cmd; + tfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_TFE; + + CAM_DBG(CAM_ISP, "TFE component bind,type %d index %d", + tfe_hw_intf->hw_type, tfe_hw_intf->hw_idx); + + platform_set_drvdata(pdev, tfe_hw_intf); + + tfe_hw->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_hw_core_info), + GFP_KERNEL); + if (!tfe_hw->core_info) { + CAM_DBG(CAM_ISP, "Failed to alloc for core"); + rc = -ENOMEM; + goto free_tfe_hw; + } + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ISP, "Of_match Failed"); + rc = -EINVAL; + goto free_core_info; + } + hw_info = (struct cam_tfe_hw_info *)match_dev->data; + core_info->tfe_hw_info = hw_info; + core_info->core_index = tfe_hw_intf->hw_idx; + + rc = cam_tfe_init_soc_resources(&tfe_hw->soc_info, cam_tfe_irq, + tfe_hw); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc); + goto free_core_info; + } + + rc = cam_tfe_core_init(core_info, &tfe_hw->soc_info, + tfe_hw_intf, hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc); + goto deinit_soc; + } + + tfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&tfe_hw->hw_mutex); + spin_lock_init(&tfe_hw->hw_lock); + init_completion(&tfe_hw->hw_complete); + + if (tfe_hw_intf->hw_idx < CAM_TFE_HW_NUM_MAX) + cam_tfe_hw_list[tfe_hw_intf->hw_idx].hw_intf = tfe_hw_intf; + else { + CAM_ERR(CAM_ISP, "HW index:%d is wrong max HW idx:%d", + tfe_hw_intf->hw_idx, CAM_TFE_HW_NUM_MAX); + goto deinit_soc; + } + + tfe_soc_priv = tfe_hw->soc_info.soc_private; + cam_tfe_hw_list[tfe_hw_intf->hw_idx].num_hw_pid = tfe_soc_priv->num_pid; + for (i = 0; i < tfe_soc_priv->num_pid; i++) + cam_tfe_hw_list[tfe_hw_intf->hw_idx].hw_pid[i] = + tfe_soc_priv->pid[i]; + + cam_tfe_init_hw(tfe_hw, NULL, 0); + cam_tfe_deinit_hw(tfe_hw, NULL, 0); + + CAM_DBG(CAM_ISP, "TFE:%d component bound successfully", + tfe_hw_intf->hw_idx); + return rc; + +deinit_soc: + if (cam_tfe_deinit_soc_resources(&tfe_hw->soc_info)) + CAM_ERR(CAM_ISP, "Failed to deinit soc"); +free_core_info: + CAM_MEM_FREE(tfe_hw->core_info); +free_tfe_hw: + CAM_MEM_FREE(tfe_hw); +free_tfe_hw_intf: + CAM_MEM_FREE(tfe_hw_intf); +end: + return rc; +} + +static void cam_tfe_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *tfe_hw = NULL; + struct cam_hw_intf *tfe_hw_intf = NULL; + struct cam_tfe_hw_core_info *core_info = NULL; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + + tfe_hw_intf = platform_get_drvdata(pdev); + if (!tfe_hw_intf) { + CAM_ERR(CAM_ISP, "Error! No data in pdev"); + return; + } + + CAM_DBG(CAM_ISP, "type %d index %d", + tfe_hw_intf->hw_type, tfe_hw_intf->hw_idx); + + if (tfe_hw_intf->hw_idx < CAM_TFE_HW_NUM_MAX) + cam_tfe_hw_list[tfe_hw_intf->hw_idx].hw_intf = NULL; + + tfe_hw = tfe_hw_intf->hw_priv; + if (!tfe_hw) { + CAM_ERR(CAM_ISP, "Error! HW data is NULL"); + goto free_tfe_hw_intf; + } + + core_info = (struct cam_tfe_hw_core_info *)tfe_hw->core_info; + if (!core_info) { + CAM_ERR(CAM_ISP, "Error! core data NULL"); + goto deinit_soc; + } + + rc = cam_tfe_core_deinit(core_info, core_info->tfe_hw_info); + if (rc < 0) + CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc); + + CAM_MEM_FREE(tfe_hw->core_info); + +deinit_soc: + rc = cam_tfe_deinit_soc_resources(&tfe_hw->soc_info); + if (rc < 0) + CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc); + + mutex_destroy(&tfe_hw->hw_mutex); + CAM_MEM_FREE(tfe_hw); + + CAM_DBG(CAM_ISP, "TFE%d component unbound", tfe_hw_intf->hw_idx); + +free_tfe_hw_intf: + CAM_MEM_FREE(tfe_hw_intf); +} + +const static struct component_ops cam_tfe_component_ops = { + .bind = cam_tfe_component_bind, + .unbind = cam_tfe_component_unbind, +}; + +int cam_tfe_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ISP, "Adding TFE component"); + rc = component_add(&pdev->dev, &cam_tfe_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_tfe_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_tfe_component_ops); + return 0; +} + +int cam_tfe_hw_init(struct cam_isp_hw_intf_data **tfe_hw_intf, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_tfe_hw_list[hw_idx].hw_intf) { + *tfe_hw_intf = &cam_tfe_hw_list[hw_idx]; + rc = 0; + } else { + *tfe_hw_intf = NULL; + rc = -ENODEV; + } + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.h new file mode 100644 index 0000000000..63ae1b17d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_dev.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TFE_DEV_H_ +#define _CAM_TFE_DEV_H_ + +#include + +/* + * cam_tfe_probe() + * + * @brief: Driver probe function called on Boot + * + * @pdev: Platform Device pointer + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_probe(struct platform_device *pdev); + +/* + * cam_tfe_remove() + * + * @brief: Driver remove function + * + * @pdev: Platform Device pointer + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_remove(struct platform_device *pdev); + +int cam_tfe_init_module(void); +void cam_tfe_exit_module(void); + +#endif /* _CAM_TFE_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_irq.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_irq.h new file mode 100644 index 0000000000..dba62d7104 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_irq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_TFE_IRQ_H_ +#define _CAM_TFE_IRQ_H_ + +#include + +#define CAM_TFE_TOP_IRQ_REG_NUM 3 + +/* + * cam_tfe_irq_config() + * + * @brief: Tfe hw irq configuration + * + * @tfe_core_data: tfe core pointer + * @irq_mask: Irq mask for enable interrupts or disable + * @num_reg: Number irq mask registers + * @enable: enable = 1, enable the given irq mask interrupts + * enable = 0 disable the given irq mask interrupts + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_irq_config(void *tfe_core_data, + uint32_t *irq_mask, uint32_t num_reg, bool enable); + + +#endif /* _CAM_TFE_IRQ_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.c new file mode 100644 index 0000000000..a726ff87bf --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_cpas_api.h" +#include "cam_tfe_soc.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static bool cam_tfe_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + bool error_handled = false; + + if (!irq_data) + return error_handled; + + CAM_DBG(CAM_ISP, "CPSS error type=%d ", + irq_data->irq_type); + + return error_handled; +} + +int cam_tfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t tfe_irq_handler, void *data) +{ + struct cam_tfe_soc_private *soc_private; + struct cam_cpas_register_params cpas_register_param; + int rc = 0, i = 0, num_pid = 0; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_tfe_soc_private), + GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc); + goto free_soc_private; + } + + /* set some default values */ + soc_private->num_pid = 0; + + num_pid = of_property_count_u32_elems(soc_info->pdev->dev.of_node, + "cam_hw_pid"); + CAM_DBG(CAM_CPAS, "tfe:%d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_ISP_HW_MAX_PID_VAL) + goto clk_option; + + for (i = 0; i < num_pid; i++) { + of_property_read_u32_index(soc_info->pdev->dev.of_node, + "cam_hw_pid", i, &soc_private->pid[i]); + CAM_INFO(CAM_CPAS, "tfe:%d I:%d pid %d", soc_info->index, + i, soc_private->pid[i]); + } + + soc_private->num_pid = num_pid; + +clk_option: + + rc = cam_soc_util_get_option_clk_by_name(soc_info, CAM_TFE_DSP_CLK_NAME, + &soc_private->dsp_clk_index); + if (rc) + CAM_WARN(CAM_ISP, "Option clk get failed with rc %d", rc); + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, tfe_irq_handler, &(irq_data[0])); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Error! Request platform resources failed rc=%d", rc); + goto free_soc_private; + } + + memset(&cpas_register_param, 0, sizeof(cpas_register_param)); + strscpy(cpas_register_param.identifier, "tfe", + CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + cpas_register_param.cam_cpas_client_cb = cam_tfe_cpas_cb; + cpas_register_param.userdata = soc_info; + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc); + goto release_soc; + } + + soc_private->cpas_handle = cpas_register_param.client_handle; + + return rc; + +release_soc: + cam_soc_util_release_platform_resource(soc_info); +free_soc_private: + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_tfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_tfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! soc_info NULL"); + return -ENODEV; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error! soc_private NULL"); + return -ENODEV; + } + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_ISP, "CPAS0 unregistration failed rc=%d", rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_ISP, + "Error! Release platform resource failed rc=%d", rc); + + + if (soc_private->dsp_clk_index != -1) { + rc = cam_soc_util_put_optional_clk(soc_info, + soc_private->dsp_clk_index); + if (rc) + CAM_ERR(CAM_ISP, + "Error Put dsp clk failed rc=%d", rc); + } + + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_tfe_enable_soc_resources( + struct cam_hw_soc_info *soc_info, + unsigned long max_clk_rate) +{ + int rc = 0; + struct cam_tfe_soc_private *soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + int32_t apply_level = CAM_LOWSVS_VOTE; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid params"); + rc = -EINVAL; + goto end; + } + soc_private = soc_info->soc_private; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_CPAS_API_PATH_DATA_STD_START; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_ISP, "Error! CPAS0 start failed rc=%d", rc); + rc = -EFAULT; + goto end; + } + + if (max_clk_rate) { + rc = cam_soc_util_get_clk_level(soc_info, max_clk_rate, soc_info->src_clk_idx, + &apply_level); + if (rc) + CAM_ERR(CAM_ISP, "Error! getting clk level"); + } + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + apply_level, true); + + if (rc) { + CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc); + goto stop_cpas; + } + + return rc; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_tfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + int rc = 0; + struct cam_tfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + rc = -EINVAL; + return rc; + } + soc_private = soc_info->soc_private; + + if (strcmp(clk_name, CAM_TFE_DSP_CLK_NAME) == 0) { + rc = cam_soc_util_clk_enable(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_private->dsp_clk_index, 0); + if (rc) + CAM_ERR(CAM_ISP, + "Error enable dsp clk failed rc=%d", rc); + } + + return rc; +} + +int cam_tfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + int rc = 0; + struct cam_tfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + rc = -EINVAL; + return rc; + } + soc_private = soc_info->soc_private; + + if (strcmp(clk_name, CAM_TFE_DSP_CLK_NAME) == 0) { + rc = cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_private->dsp_clk_index); + if (rc) + CAM_ERR(CAM_ISP, + "Error enable dsp clk failed rc=%d", rc); + } + + return rc; +} + + +int cam_tfe_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_tfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid params"); + rc = -EINVAL; + return rc; + } + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc); + return rc; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.h new file mode 100644 index 0000000000..11ae1591cb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_soc.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE_SOC_H_ +#define _CAM_TFE_SOC_H_ + +#include "cam_soc_util.h" +#include "cam_isp_hw.h" + +#define CAM_TFE_DSP_CLK_NAME "tfe_dsp_clk" + +enum cam_cpas_handle_id { + CAM_CPAS_HANDLE_CAMIF, + CAM_CPAS_HANDLE_RAW, + CAM_CPAS_HANDLE_MAX, +}; + +/* + * struct cam_tfe_soc_private: + * + * @Brief: Private SOC data specific to TFE HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver. + * This handle is used for all further interface + * with CPAS. + * @cpas_version: Has cpas version read from Hardware + * @dsp_clk_index: DSP clk index in optional clocks + * @num_pid: number of pids of tfe + * @pid: TFE pid value list + */ +struct cam_tfe_soc_private { + uint32_t cpas_handle; + uint32_t cpas_version; + int32_t dsp_clk_index; + uint32_t num_pid; + uint32_t pid[CAM_ISP_HW_MAX_PID_VAL]; +}; + +/* + * cam_tfe_init_soc_resources() + * + * @Brief: Initialize SOC resources including private data + * + * @soc_info: Device soc information + * @handler: Irq handler function pointer + * @irq_data: Irq handler function Callback data + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t tfe_irq_handler, void *irq_data); + +/* + * cam_tfe_deinit_soc_resources() + * + * @Brief: Deinitialize SOC resources including private data + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_tfe_enable_soc_resources() + * + * @brief: Enable regulator, irq resources, start CPAS + * + * @soc_info: Device soc information + * + * @max_clk_rate: Max clock rate value + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_enable_soc_resources(struct cam_hw_soc_info *soc_info, + unsigned long max_clk_rate); + +/* + * cam_tfe_disable_soc_resources() + * + * @brief: Disable regulator, irq resources, stop CPAS + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_tfe_soc_enable_clk() + * + * @brief: Enable clock with given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +/* + * cam_tfe_soc_disable_dsp_clk() + * + * @brief: Disable clock with given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_tfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +#endif /* _CAM_TFE_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c new file mode 100644 index 0000000000..6d641c3ea4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c @@ -0,0 +1,726 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "cam_tasklet_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus.h" +#include "cam_vfe_top.h" +#include "cam_ife_hw_mgr.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_common_util.h" + +static const char drv_name[] = "vfe"; + +int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size) +{ + struct cam_hw_info *vfe_dev = hw_priv; + struct cam_vfe_hw_core_info *core_info = NULL; + int rc = 0; + + CAM_DBG(CAM_ISP, "Enter"); + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_dev->core_info; + + if (core_info->vfe_top->hw_ops.get_hw_caps) + core_info->vfe_top->hw_ops.get_hw_caps( + core_info->vfe_top->top_priv, + get_hw_cap_args, arg_size); + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +int cam_vfe_reset_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = -EINVAL; + struct cam_hw_info *vfe_hw; + + vfe_hw = th_payload->handler_priv; + + CAM_DBG(CAM_ISP, "TOP_IRQ_STATUS_0 = 0x%x", + th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS0]); + + complete(&vfe_hw->hw_complete); + + return rc; +} + +int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_isp_resource_node *isp_res = NULL; + int rc = 0; + uint32_t reset_core_args = + CAM_VFE_HW_RESET_HW_AND_REG; + + CAM_DBG(CAM_ISP, "Enter"); + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + mutex_lock(&vfe_hw->hw_mutex); + vfe_hw->open_count++; + if (vfe_hw->open_count > 1) { + mutex_unlock(&vfe_hw->hw_mutex); + CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d", + vfe_hw->open_count); + return 0; + } + mutex_unlock(&vfe_hw->hw_mutex); + + soc_info = &vfe_hw->soc_info; + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + + /* Turn ON Regulators, Clocks and other SOC resources */ + rc = cam_vfe_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_ISP, "Enable SOC failed"); + rc = -EFAULT; + goto decrement_open_cnt; + } + + isp_res = (struct cam_isp_resource_node *)init_hw_args; + if (isp_res && isp_res->init) { + rc = isp_res->init(isp_res, NULL, 0); + if (rc) { + CAM_ERR(CAM_ISP, "init Failed rc=%d", rc); + goto disable_soc; + } + } + + CAM_DBG(CAM_ISP, "Enable soc done"); + + /* Do HW Reset */ + rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t)); + if (rc) { + CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc); + goto deinint_vfe_res; + } + + rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv, + NULL, 0); + if (rc) { + CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc); + goto deinint_vfe_res; + } + + rc = core_info->vfe_top->hw_ops.init(core_info->vfe_top->top_priv, + NULL, 0); + if (rc) { + CAM_ERR(CAM_ISP, "Top HW init Failed rc=%d", rc); + goto deinint_vfe_res; + } + + if (core_info->vfe_rd_bus) { + rc = core_info->vfe_rd_bus->hw_ops.init( + core_info->vfe_rd_bus->bus_priv, + NULL, 0); + if (rc) { + CAM_ERR(CAM_ISP, "Bus RD HW init Failed rc=%d", rc); + goto deinint_vfe_res; + } + } + + vfe_hw->hw_state = CAM_HW_STATE_POWER_UP; + return rc; + +deinint_vfe_res: + if (isp_res && isp_res->deinit) + isp_res->deinit(isp_res, NULL, 0); +disable_soc: + cam_vfe_disable_soc_resources(soc_info); +decrement_open_cnt: + mutex_lock(&vfe_hw->hw_mutex); + vfe_hw->open_count--; + mutex_unlock(&vfe_hw->hw_mutex); + return rc; +} + +int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_isp_resource_node *isp_res = NULL; + int rc = 0; + uint32_t reset_core_args = + CAM_VFE_HW_RESET_HW_AND_REG; + + CAM_DBG(CAM_ISP, "Enter"); + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + isp_res = (struct cam_isp_resource_node *)deinit_hw_args; + if (isp_res && isp_res->deinit) { + rc = isp_res->deinit(isp_res, NULL, 0); + if (rc) + CAM_ERR(CAM_ISP, "deinit failed"); + } + + mutex_lock(&vfe_hw->hw_mutex); + if (!vfe_hw->open_count) { + mutex_unlock(&vfe_hw->hw_mutex); + CAM_ERR(CAM_ISP, "Error. Unbalanced deinit"); + return -EFAULT; + } + vfe_hw->open_count--; + if (vfe_hw->open_count) { + mutex_unlock(&vfe_hw->hw_mutex); + CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count); + return 0; + } + mutex_unlock(&vfe_hw->hw_mutex); + + soc_info = &vfe_hw->soc_info; + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + + rc = core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc); + + if (core_info->vfe_rd_bus) { + rc = core_info->vfe_rd_bus->hw_ops.deinit( + core_info->vfe_rd_bus->bus_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc); + } + + rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t)); + + /* Turn OFF Regulators, Clocks and other SOC resources */ + CAM_DBG(CAM_ISP, "Disable SOC resource, rc: %d", rc); + rc = cam_vfe_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_ISP, "Disable SOC failed"); + + vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size) +{ + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_vfe_hw_core_info *core_info; + struct cam_vfe_irq_hw_info *irq_info; + uint32_t top_reset_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + int rc = 0; + + CAM_DBG(CAM_ISP, "Enter"); + + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + irq_info = core_info->vfe_hw_info->irq_hw_info; + + if(!(irq_info->supported_irq & CAM_VFE_HW_IRQ_CAP_RESET)) + goto skip_reset; + + memset(top_reset_irq_reg_mask, 0, sizeof(top_reset_irq_reg_mask)); + top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + irq_info->reset_mask; + + irq_info->reset_irq_handle = cam_irq_controller_subscribe_irq( + core_info->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + top_reset_irq_reg_mask, + vfe_hw, + cam_vfe_reset_irq_top_half, + NULL, NULL, NULL, CAM_IRQ_EVT_GROUP_0); + + if (irq_info->reset_irq_handle < 1) { + CAM_ERR(CAM_ISP, "subscribe irq controller failed"); + irq_info->reset_irq_handle = 0; + return -EFAULT; + } + + reinit_completion(&vfe_hw->hw_complete); + + CAM_DBG(CAM_ISP, "Calling RESET on VFE"); + + core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, + reset_core_args, arg_size); + + /* Wait for Completion or Timeout of 500ms */ + rc = cam_common_wait_for_completion_timeout( + &vfe_hw->hw_complete, 500); + + if (!rc) + CAM_ERR(CAM_ISP, "Reset Timeout"); + else + CAM_DBG(CAM_ISP, "Reset complete (%d)", rc); + + rc = cam_irq_controller_unsubscribe_irq( + core_info->vfe_irq_controller, + irq_info->reset_irq_handle); + if (rc) + CAM_ERR(CAM_ISP, "Error. Unsubscribe failed"); + irq_info->reset_irq_handle = 0; + +skip_reset: + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp) +{ + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + time_stamp->mono_time.tv_sec = ts.tv_sec; + time_stamp->mono_time.tv_nsec = ts.tv_nsec; +} + +int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size) +{ + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_vfe_acquire_args *acquire; + int rc = -ENODEV; + + if (!hw_priv || !reserve_args || (arg_size != + sizeof(struct cam_vfe_acquire_args))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + acquire = (struct cam_vfe_acquire_args *)reserve_args; + + CAM_DBG(CAM_ISP, "acq res type: %d", acquire->rsrc_type); + mutex_lock(&vfe_hw->hw_mutex); + if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN) { + rc = core_info->vfe_top->hw_ops.reserve( + core_info->vfe_top->top_priv, + acquire, sizeof(*acquire)); + } else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT) { + rc = core_info->vfe_bus->hw_ops.reserve( + core_info->vfe_bus->bus_priv, acquire, + sizeof(*acquire)); + } else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_BUS_RD) { + if (core_info->vfe_rd_bus) + rc = core_info->vfe_rd_bus->hw_ops.reserve( + core_info->vfe_rd_bus->bus_priv, acquire, + sizeof(*acquire)); + } else + CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type); + + mutex_unlock(&vfe_hw->hw_mutex); + + return rc; +} + +int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size) +{ + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_isp_resource_node *isp_res; + int rc = -ENODEV; + + if (!hw_priv || !release_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + isp_res = (struct cam_isp_resource_node *) release_args; + + mutex_lock(&vfe_hw->hw_mutex); + if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) + rc = core_info->vfe_top->hw_ops.release( + core_info->vfe_top->top_priv, isp_res, + sizeof(*isp_res)); + else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) + rc = core_info->vfe_bus->hw_ops.release( + core_info->vfe_bus->bus_priv, isp_res, + sizeof(*isp_res)); + else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_BUS_RD) { + if (core_info->vfe_rd_bus) + rc = core_info->vfe_rd_bus->hw_ops.release( + core_info->vfe_rd_bus->bus_priv, isp_res, + sizeof(*isp_res)); + } else { + CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type); + } + + mutex_unlock(&vfe_hw->hw_mutex); + + return rc; +} + + +int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size) +{ + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_isp_resource_node *isp_res; + int rc = 0; + + if (!hw_priv || !start_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + isp_res = (struct cam_isp_resource_node *)start_args; + core_info->tasklet_info = isp_res->tasklet_info; + + mutex_lock(&vfe_hw->hw_mutex); + if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) { + rc = core_info->vfe_top->hw_ops.start( + core_info->vfe_top->top_priv, isp_res, + sizeof(struct cam_isp_resource_node)); + + if (rc) + CAM_ERR(CAM_ISP, "Failed to start VFE IN"); + } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0); + + if (rc) + CAM_ERR(CAM_ISP, "Failed to start VFE OUT"); + } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_BUS_RD) { + if (core_info->vfe_rd_bus) { + rc = core_info->vfe_rd_bus->hw_ops.start(isp_res, + NULL, 0); + + if (rc) + CAM_ERR(CAM_ISP, "Failed to start BUS RD"); + } + } else { + CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type); + rc = -EFAULT; + } + + mutex_unlock(&vfe_hw->hw_mutex); + + return rc; +} + +int cam_vfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size) +{ + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_vfe_irq_hw_info *irq_info = NULL; + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_isp_resource_node *isp_res; + int rc = -EINVAL; + + if (!hw_priv || !stop_args || + (arg_size != sizeof(struct cam_isp_resource_node))) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + isp_res = (struct cam_isp_resource_node *)stop_args; + irq_info = core_info->vfe_hw_info->irq_hw_info; + + mutex_lock(&vfe_hw->hw_mutex); + if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) { + rc = core_info->vfe_top->hw_ops.stop( + core_info->vfe_top->top_priv, isp_res, + sizeof(struct cam_isp_resource_node)); + } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) { + rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0); + } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_BUS_RD) { + if (core_info->vfe_rd_bus) + rc = core_info->vfe_rd_bus->hw_ops.stop(isp_res, + NULL, 0); + } else { + CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type); + } + + if (irq_info->reset_irq_handle > 0) { + cam_irq_controller_unsubscribe_irq( + core_info->vfe_irq_controller, + irq_info->reset_irq_handle); + irq_info->reset_irq_handle = 0; + } + + mutex_unlock(&vfe_hw->hw_mutex); + + return rc; +} + +int cam_vfe_read(void *hw_priv, void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_write(void *hw_priv, void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *vfe_hw = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_hw_core_info *core_info = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + soc_info = &vfe_hw->soc_info; + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + case CAM_ISP_HW_CMD_BW_UPDATE: + case CAM_ISP_HW_CMD_BW_CONTROL: + case CAM_ISP_HW_CMD_CORE_CONFIG: + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + case CAM_ISP_HW_CMD_DUMP_HW: + case CAM_ISP_HW_CMD_ADD_WAIT: + case CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER: + case CAM_ISP_HW_CMD_CAMIF_DATA: + case CAM_ISP_HW_NOTIFY_OVERFLOW: + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD: + case CAM_ISP_HW_CMD_GET_PATH_PORT_MAP: + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + case CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE: + case CAM_ISP_HW_CMD_RDI_LCR_CFG: + case CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR: + case CAM_ISP_HW_CMD_FCG_CONFIG: + case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: + rc = core_info->vfe_top->hw_ops.process_cmd( + core_info->vfe_top->top_priv, cmd_type, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + case CAM_ISP_HW_CMD_UBWC_UPDATE: + case CAM_ISP_HW_CMD_UBWC_UPDATE_V2: + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE: + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + case CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ: + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + case CAM_ISP_HW_IFE_BUS_MINI_DUMP: + case CAM_ISP_HW_CMD_BUF_UPDATE: + case CAM_ISP_HW_USER_DUMP: + case CAM_ISP_HW_CMD_MC_CTXT_SEL: + case CAM_ISP_HW_CMD_IRQ_INJECTION: + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + case CAM_ISP_HW_CMD_UBWC_UPDATE_V3: + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE_V2: + case CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS: + rc = core_info->vfe_bus->hw_ops.process_cmd( + core_info->vfe_bus->bus_priv, cmd_type, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE_RM: + case CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM: + case CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD: + if (core_info->vfe_rd_bus) + rc = core_info->vfe_rd_bus->hw_ops.process_cmd( + core_info->vfe_rd_bus->bus_priv, cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: + *((struct cam_hw_soc_info **)cmd_args) = soc_info; + rc = 0; + break; + case CAM_ISP_HW_CMD_QUERY_CAP: + case CAM_ISP_HW_CMD_IFE_DEBUG_CFG: + /* forward to bus and top */ + core_info->vfe_bus->hw_ops.process_cmd( + core_info->vfe_bus->bus_priv, cmd_type, cmd_args, + arg_size); + + core_info->vfe_top->hw_ops.process_cmd( + core_info->vfe_top->top_priv, cmd_type, cmd_args, + arg_size); + break; + default: + CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type); + rc = -EINVAL; + break; + } + return rc; +} + +irqreturn_t cam_vfe_irq(int irq_num, void *data) +{ + struct cam_hw_info *vfe_hw; + struct cam_vfe_hw_core_info *core_info; + + if (!data) + return IRQ_NONE; + + vfe_hw = (struct cam_hw_info *)data; + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + + return cam_irq_controller_handle_irq(irq_num, + core_info->vfe_irq_controller, CAM_IRQ_EVT_GROUP_0); +} + +int cam_vfe_test_irq_line(void *hw_priv) +{ + struct cam_hw_info *vfe_hw = hw_priv; + void *vfe_irq_ctrl; + int rc = 0, local_ret; + + if (!hw_priv) { + CAM_ERR(CAM_ISP, "invalid argument"); + return -EINVAL; + } + + vfe_irq_ctrl = ((struct cam_vfe_hw_core_info *)vfe_hw->core_info)->vfe_irq_controller; + local_ret = cam_vfe_init_hw(vfe_hw, NULL, 0); + if (local_ret) { + CAM_ERR(CAM_ISP, "VFE:%d failed to init hw", vfe_hw->soc_info.index); + return local_ret; + } + + local_ret = cam_irq_controller_test_irq_line(vfe_irq_ctrl, "VFE:%d", + vfe_hw->soc_info.index); + if (local_ret) { + CAM_ERR(CAM_ISP, "VFE:%d IRQ line test failed", vfe_hw->soc_info.index); + rc = local_ret; + } + + local_ret = cam_vfe_deinit_hw(vfe_hw, NULL, 0); + if (local_ret) { + CAM_ERR(CAM_ISP, "VFE:%d failed to deinit hw", vfe_hw->soc_info.index); + return local_ret; + } + + return rc; +} + +int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_vfe_hw_info *vfe_hw_info) +{ + int rc = -EINVAL; + struct cam_vfe_soc_private *soc_private = NULL; + + CAM_DBG(CAM_ISP, "Enter"); + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + rc = cam_irq_controller_init(drv_name, + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX), + vfe_hw_info->irq_hw_info->top_irq_reg, + &core_info->vfe_irq_controller); + if (rc) { + CAM_ERR(CAM_ISP, + "Error, cam_irq_controller_init failed rc = %d", rc); + return rc; + } + + rc = cam_vfe_top_init(vfe_hw_info->top_version, soc_info, hw_intf, + vfe_hw_info->top_hw_info, core_info->vfe_irq_controller, + &core_info->vfe_top); + if (rc) { + CAM_ERR(CAM_ISP, "Error, cam_vfe_top_init failed rc = %d", rc); + goto deinit_controller; + } + + rc = cam_vfe_bus_init(vfe_hw_info->bus_version, BUS_TYPE_WR, + soc_info, hw_intf, vfe_hw_info->bus_hw_info, + core_info->vfe_irq_controller, &core_info->vfe_bus); + if (rc) { + CAM_ERR(CAM_ISP, "Error, cam_vfe_bus_init failed rc = %d", rc); + goto deinit_top; + } + + /* Probe fetch engine only if it exists - 0x0 is not a valid version */ + if (vfe_hw_info->bus_rd_version) { + rc = cam_vfe_bus_init(vfe_hw_info->bus_rd_version, BUS_TYPE_RD, + soc_info, hw_intf, vfe_hw_info->bus_rd_hw_info, + core_info->vfe_irq_controller, &core_info->vfe_rd_bus); + if (rc) { + CAM_WARN(CAM_ISP, "Error, RD cam_vfe_bus_init failed"); + rc = 0; + } + CAM_DBG(CAM_ISP, "vfe_bus_rd %pK hw_idx %d", + core_info->vfe_rd_bus, hw_intf->hw_idx); + } + + spin_lock_init(&core_info->spin_lock); + + return rc; + +deinit_top: + cam_vfe_top_deinit(vfe_hw_info->top_version, + &core_info->vfe_top); + +deinit_controller: + cam_irq_controller_deinit(&core_info->vfe_irq_controller); + + return rc; +} + +int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info, + struct cam_vfe_hw_info *vfe_hw_info) +{ + int rc = -EINVAL; + unsigned long flags; + + spin_lock_irqsave(&core_info->spin_lock, flags); + + rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version, + &core_info->vfe_bus); + if (rc) + CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc); + + rc = cam_vfe_top_deinit(vfe_hw_info->top_version, + &core_info->vfe_top); + if (rc) + CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc); + + rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller); + if (rc) + CAM_ERR(CAM_ISP, + "Error cam_irq_controller_deinit failed rc=%d", rc); + + spin_unlock_irqrestore(&core_info->spin_lock, flags); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h new file mode 100644 index 0000000000..4ece6f7e84 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_CORE_H_ +#define _CAM_VFE_CORE_H_ + +#include +#include "cam_hw_intf.h" +#include "cam_vfe_top.h" +#include "cam_vfe_bus.h" +#include "cam_vfe_hw_intf.h" + +#define CAM_VFE_HW_IRQ_CAP_SOF BIT(0) +#define CAM_VFE_HW_IRQ_CAP_EPOCH_0 BIT(1) +#define CAM_VFE_HW_IRQ_CAP_EPOCH_1 BIT(2) +#define CAM_VFE_HW_IRQ_CAP_RUP BIT(3) +#define CAM_VFE_HW_IRQ_CAP_BUF_DONE BIT(4) +#define CAM_VFE_HW_IRQ_CAP_EOF BIT(5) +#define CAM_VFE_HW_IRQ_CAP_RESET BIT(6) + +#define CAM_VFE_HW_IRQ_CAP_INT_CSID 0x7F +#define CAM_VFE_HW_IRQ_CAP_LITE_INT_CSID 0x79 +#define CAM_VFE_HW_IRQ_CAP_EXT_CSID 0x27 +#define CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID 0x21 + +struct cam_vfe_irq_hw_info { + int reset_irq_handle; + uint32_t reset_mask; + struct cam_irq_controller_reg_info *top_irq_reg; + uint32_t supported_irq; +}; + +struct cam_vfe_hw_info { + struct cam_vfe_irq_hw_info *irq_hw_info; + + uint32_t bus_version; + void *bus_hw_info; + + uint32_t bus_rd_version; + void *bus_rd_hw_info; + + uint32_t top_version; + void *top_hw_info; + uint32_t camif_version; + void *camif_reg; + + uint32_t camif_lite_version; + void *camif_lite_reg; +}; + +#define CAM_VFE_EVT_MAX 256 + +struct cam_vfe_hw_core_info { + struct cam_vfe_hw_info *vfe_hw_info; + void *vfe_irq_controller; + struct cam_vfe_top *vfe_top; + struct cam_vfe_bus *vfe_bus; + struct cam_vfe_bus *vfe_rd_bus; + void *tasklet_info; + spinlock_t spin_lock; +}; + +int cam_vfe_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_vfe_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_vfe_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size); +int cam_vfe_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size); +int cam_vfe_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_vfe_release(void *device_priv, + void *reserve_args, uint32_t arg_size); +int cam_vfe_start(void *device_priv, + void *start_args, uint32_t arg_size); +int cam_vfe_stop(void *device_priv, + void *stop_args, uint32_t arg_size); +int cam_vfe_read(void *device_priv, + void *read_args, uint32_t arg_size); +int cam_vfe_write(void *device_priv, + void *write_args, uint32_t arg_size); +int cam_vfe_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int cam_vfe_test_irq_line(void *hw_priv); + +irqreturn_t cam_vfe_irq(int irq_num, void *data); + +int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + struct cam_vfe_hw_info *vfe_hw_info); + +int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info, + struct cam_vfe_hw_info *vfe_hw_info); + +#endif /* _CAM_VFE_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c new file mode 100644 index 0000000000..e3158e56e5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#include +#include +#include +#include + +#include "cam_vfe_core.h" +#include "cam_vfe_soc.h" +#include "cam_debug_util.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include +#include "cam_req_mgr_dev.h" + +static struct cam_isp_hw_intf_data cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX]; +static uint32_t g_num_ife_hws, g_num_ife_lite_hws; + +static int cam_vfe_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *vfe_hw = NULL; + struct cam_hw_intf *vfe_hw_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_vfe_hw_core_info *core_info = NULL; + struct cam_vfe_hw_info *hw_info = NULL; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct cam_vfe_soc_private *vfe_soc_priv; + uint32_t vfe_dev_idx; + uint32_t i; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + + rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &vfe_dev_idx); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to read cell-index of IFE HW, rc: %d", rc); + goto end; + } + + if (!cam_cpas_is_feature_supported(CAM_CPAS_ISP_FUSE, BIT(vfe_dev_idx), NULL) || + !cam_cpas_is_feature_supported(CAM_CPAS_ISP_LITE_FUSE, + BIT(vfe_dev_idx), NULL)) { + CAM_DBG(CAM_ISP, "IFE:%d is not supported", vfe_dev_idx); + goto end; + } + + vfe_hw_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!vfe_hw_intf) { + rc = -ENOMEM; + goto end; + } + + vfe_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!vfe_hw) { + rc = -ENOMEM; + goto free_vfe_hw_intf; + } + + vfe_hw->soc_info.pdev = pdev; + vfe_hw->soc_info.dev = &pdev->dev; + vfe_hw->soc_info.dev_name = pdev->name; + vfe_hw_intf->hw_priv = vfe_hw; + vfe_hw_intf->hw_idx = vfe_dev_idx; + vfe_hw_intf->hw_ops.get_hw_caps = cam_vfe_get_hw_caps; + vfe_hw_intf->hw_ops.init = cam_vfe_init_hw; + vfe_hw_intf->hw_ops.deinit = cam_vfe_deinit_hw; + vfe_hw_intf->hw_ops.reset = cam_vfe_reset; + vfe_hw_intf->hw_ops.reserve = cam_vfe_reserve; + vfe_hw_intf->hw_ops.release = cam_vfe_release; + vfe_hw_intf->hw_ops.start = cam_vfe_start; + vfe_hw_intf->hw_ops.stop = cam_vfe_stop; + vfe_hw_intf->hw_ops.read = cam_vfe_read; + vfe_hw_intf->hw_ops.write = cam_vfe_write; + vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd; + vfe_hw_intf->hw_ops.test_irq_line = cam_vfe_test_irq_line; + vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE; + + CAM_DBG(CAM_ISP, "VFE component bind, type %d index %d", + vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx); + + platform_set_drvdata(pdev, vfe_hw_intf); + + vfe_hw->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_hw_core_info), + GFP_KERNEL); + if (!vfe_hw->core_info) { + CAM_DBG(CAM_ISP, "Failed to alloc for core"); + rc = -ENOMEM; + goto free_vfe_hw; + } + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_ISP, "Of_match Failed"); + rc = -EINVAL; + goto free_core_info; + } + hw_info = (struct cam_vfe_hw_info *)match_dev->data; + core_info->vfe_hw_info = hw_info; + + rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq, + vfe_hw); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc); + goto free_core_info; + } + + rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info, + vfe_hw_intf, hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc); + goto deinit_soc; + } + + vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&vfe_hw->hw_mutex); + spin_lock_init(&vfe_hw->hw_lock); + init_completion(&vfe_hw->hw_complete); + + if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX) + cam_vfe_hw_list[vfe_hw_intf->hw_idx].hw_intf = vfe_hw_intf; + + vfe_soc_priv = vfe_hw->soc_info.soc_private; + cam_vfe_hw_list[vfe_hw_intf->hw_idx].num_hw_pid = vfe_soc_priv->num_pid; + for (i = 0; i < vfe_soc_priv->num_pid; i++) + cam_vfe_hw_list[vfe_hw_intf->hw_idx].hw_pid[i] = + vfe_soc_priv->pid[i]; + + vfe_hw->soc_info.hw_id = CAM_HW_ID_IFE0 + vfe_hw->soc_info.index; + rc = cam_vmvm_populate_hw_instance_info(&vfe_hw->soc_info, NULL, NULL); + if (rc) { + CAM_ERR(CAM_ISP, " hw instance populate failed: %d", rc); + goto deinit_soc; + } + + CAM_DBG(CAM_ISP, "VFE:%d component bound successfully", + vfe_hw_intf->hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +deinit_soc: + if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info)) + CAM_ERR(CAM_ISP, "Failed to deinit soc"); +free_core_info: + CAM_MEM_FREE(vfe_hw->core_info); +free_vfe_hw: + CAM_MEM_FREE(vfe_hw); +free_vfe_hw_intf: + CAM_MEM_FREE(vfe_hw_intf); +end: + return rc; +} + +static void cam_vfe_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *vfe_hw = NULL; + struct cam_hw_intf *vfe_hw_intf = NULL; + struct cam_vfe_hw_core_info *core_info = NULL; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + + vfe_hw_intf = platform_get_drvdata(pdev); + if (!vfe_hw_intf) { + CAM_ERR(CAM_ISP, "Error! No data in pdev"); + return; + } + + CAM_DBG(CAM_ISP, "VFE component unbind, type %d index %d", + vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx); + + if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX) + cam_vfe_hw_list[vfe_hw_intf->hw_idx].hw_intf = NULL; + + vfe_hw = vfe_hw_intf->hw_priv; + if (!vfe_hw) { + CAM_ERR(CAM_ISP, "Error! HW data is NULL"); + goto free_vfe_hw_intf; + } + + core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info; + if (!core_info) { + CAM_ERR(CAM_ISP, "Error! core data NULL"); + goto deinit_soc; + } + + rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info); + if (rc < 0) + CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc); + + CAM_MEM_FREE(vfe_hw->core_info); + +deinit_soc: + rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info); + if (rc < 0) + CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc); + + mutex_destroy(&vfe_hw->hw_mutex); + CAM_MEM_FREE(vfe_hw); + + CAM_DBG(CAM_ISP, "VFE%d component unbound", vfe_hw_intf->hw_idx); + +free_vfe_hw_intf: + CAM_MEM_FREE(vfe_hw_intf); +} + +const static struct component_ops cam_vfe_component_ops = { + .bind = cam_vfe_component_bind, + .unbind = cam_vfe_component_unbind, +}; + +void cam_vfe_get_num_ifes(uint32_t *num_ifes) +{ + if (num_ifes) + *num_ifes = g_num_ife_hws; + else + CAM_ERR(CAM_ISP, "Invalid argument, g_num_ife_hws: %u", g_num_ife_hws); +} + +void cam_vfe_get_num_ife_lites(uint32_t *num_ife_lites) +{ + if (num_ife_lites) + *num_ife_lites = g_num_ife_lite_hws; + else + CAM_ERR(CAM_ISP, "Invalid argument, g_num_ife_lite_hws: %u", g_num_ife_lite_hws); +} + +int cam_vfe_probe(struct platform_device *pdev) +{ + int rc = 0; + const char *compatible_name; + struct device_node *of_node = NULL; + + CAM_DBG(CAM_ISP, "Adding VFE component"); + of_node = pdev->dev.of_node; + + rc = of_property_read_string_index(of_node, "compatible", 0, + (const char **)&compatible_name); + if (rc) + CAM_ERR(CAM_ISP, "No compatible string present for: %s, rc: %d", + pdev->name, rc); + + if (strnstr(compatible_name, "lite", strlen(compatible_name)) != NULL) + g_num_ife_lite_hws++; + else if (strnstr(compatible_name, "vfe", strlen(compatible_name)) != NULL) + g_num_ife_hws++; + else if (strnstr(compatible_name, "mc_tfe", strlen(compatible_name)) != NULL) + g_num_ife_hws++; + else + CAM_ERR(CAM_ISP, "Failed to increment number of IFEs/IFE-LITEs"); + + rc = component_add(&pdev->dev, &cam_vfe_component_ops); + if (rc) + CAM_ERR(CAM_ISP, "failed to add component rc: %d", rc); + + return rc; +} + +int cam_vfe_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_vfe_component_ops); + return 0; +} + +int cam_vfe_hw_init(struct cam_isp_hw_intf_data **vfe_hw_intf, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_vfe_hw_list[hw_idx].hw_intf) { + *vfe_hw_intf = &cam_vfe_hw_list[hw_idx]; + rc = 0; + } else { + CAM_ERR(CAM_ISP, "inval param"); + *vfe_hw_intf = NULL; + rc = -ENODEV; + } + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h new file mode 100644 index 0000000000..c2d78b69c6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_DEV_H_ +#define _CAM_VFE_DEV_H_ + +#include + +/* + * cam_vfe_probe() + * + * @brief: Driver probe function called on Boot + * + * @pdev: Platform Device pointer + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_probe(struct platform_device *pdev); + +/* + * cam_vfe_remove() + * + * @brief: Driver remove function + * + * @pdev: Platform Device pointer + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_remove(struct platform_device *pdev); + +#endif /* _CAM_VFE_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c new file mode 100644 index 0000000000..e46d7c6287 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_cpas_api.h" +#include "cam_vfe_soc.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata, + struct cam_cpas_irq_data *irq_data) +{ + bool error_handled = false; + + if (!irq_data) + return error_handled; + + switch (irq_data->irq_type) { + case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR: + case CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR: + CAM_ERR_RATE_LIMIT(CAM_ISP, + "IFE UBWC Encode error type=%d status=%x", + irq_data->irq_type, + irq_data->u.enc_err.encerr_status.value); + error_handled = true; + break; + default: + break; + } + + return error_handled; +} + +static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, num_ubwc_cfg = 0, i = 0, num_pid = 0; + struct device_node *of_node = NULL; + struct platform_device *pdev = NULL; + struct cam_vfe_soc_private *vfe_soc_private; + + pdev = soc_info->pdev; + of_node = pdev->dev.of_node; + vfe_soc_private = soc_info->soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc); + return rc; + } + + rc = of_property_read_u32(of_node, "rt-wrapper-base", &vfe_soc_private->rt_wrapper_base); + if (rc) { + vfe_soc_private->rt_wrapper_base = 0; + CAM_DBG(CAM_ISP, "rc: %d Error reading rt_wrapper_base for core_idx: %u", + rc, soc_info->index); + rc = 0; + } + + vfe_soc_private->is_ife_lite = false; + if (strnstr(soc_info->compatible, "lite", + strlen(soc_info->compatible)) != NULL) { + vfe_soc_private->is_ife_lite = true; + goto pid; + } + + if (!of_property_read_bool(of_node, "ubwc-static-cfg")) { + CAM_DBG(CAM_ISP, "ubwc-static-cfg not supported"); + goto pid; + } + + num_ubwc_cfg = of_property_count_u32_elems(of_node, + "ubwc-static-cfg"); + + if (num_ubwc_cfg < 0 || num_ubwc_cfg > UBWC_STATIC_CONFIG_MAX) { + CAM_ERR(CAM_ISP, "wrong num_ubwc_cfg: %d", + num_ubwc_cfg); + rc = num_ubwc_cfg; + goto pid; + } + + for (i = 0; i < num_ubwc_cfg; i++) { + rc = of_property_read_u32_index(of_node, + "ubwc-static-cfg", i, + &vfe_soc_private->ubwc_static_ctrl[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "unable to read ubwc static config"); + } + } +pid: + /* set some default values */ + vfe_soc_private->num_pid = 0; + + num_pid = of_property_count_u32_elems(pdev->dev.of_node, "cam_hw_pid"); + CAM_DBG(CAM_CPAS, "vfe:%d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_ISP_HW_MAX_PID_VAL) + goto end; + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(pdev->dev.of_node, "cam_hw_pid", i, + &vfe_soc_private->pid[i]); + + vfe_soc_private->num_pid = num_pid; + +end: + + return rc; +} + +static int cam_vfe_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t vfe_irq_handler, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler, &(irq_data[0])); + if (rc) + CAM_ERR(CAM_ISP, + "Error! Request platform resource failed rc=%d", rc); + + return rc; +} + +static int cam_vfe_release_platform_resource(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_ISP, + "Error! Release platform resource failed rc=%d", rc); + + return rc; +} + +int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t vfe_irq_handler, void *irq_data) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + struct cam_cpas_register_params cpas_register_param; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_soc_private), + GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_cpas_get_cpas_hw_version(&soc_private->cpas_version); + if (rc) { + CAM_ERR(CAM_ISP, "Error! Invalid cpas version rc=%d", rc); + goto free_soc_private; + } + soc_info->hw_version = soc_private->cpas_version; + + rc = cam_vfe_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc); + goto free_soc_private; + } + + if (!soc_private->is_ife_lite) { + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc:%d", rc); + goto free_soc_private; + } + } + + rc = cam_soc_util_get_option_clk_by_name(soc_info, CAM_VFE_DSP_CLK_NAME, + &soc_private->dsp_clk_index); + if (rc) + CAM_DBG(CAM_ISP, "Option clk get failed with rc %d", rc); + + rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler, + irq_data); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "Error! Request platform resources failed rc=%d", rc); + goto free_soc_private; + } + + strscpy(cpas_register_param.identifier, "ife", + CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = soc_info->dev; + cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb; + cpas_register_param.userdata = soc_info; + + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc); + goto release_soc; + } else { + soc_private->cpas_handle = cpas_register_param.client_handle; + } + return rc; + +release_soc: + cam_soc_util_release_platform_resource(soc_info); +free_soc_private: + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! soc_info NULL"); + return -ENODEV; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error! soc_private NULL"); + return -ENODEV; + } + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc); + + rc = cam_vfe_release_platform_resource(soc_info); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Error! Release platform resources failed rc=%d", rc); + + if (soc_private->dsp_clk_index != -1) { + rc = cam_soc_util_put_optional_clk(soc_info, + soc_private->dsp_clk_index); + if (rc) + CAM_ERR(CAM_ISP, + "Error Put dsp clk failed rc=%d", rc); + } + CAM_MEM_FREE(soc_private); + + return rc; +} + +int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid params"); + rc = -EINVAL; + goto end; + } + + soc_private = soc_info->soc_private; + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + + axi_vote.axi_path[0].path_data_type = CAM_CPAS_API_PATH_DATA_STD_START; + if (!soc_private->is_ife_lite) + axi_vote.axi_path[0].vote_level = CAM_CPAS_VOTE_LEVEL_LOW; + + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_RT_AXI_BW; + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc); + rc = -EFAULT; + goto end; + } + + if (!soc_private->is_ife_lite) { + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to query DRV enable rc:%d", rc); + return rc; + } + } + + rc = cam_soc_util_enable_platform_resource(soc_info, + (soc_info->is_clk_drv_en ? soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc); + goto stop_cpas; + } + + return rc; + +stop_cpas: + cam_cpas_stop(soc_private->cpas_handle); +end: + return rc; +} + +int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + rc = -EINVAL; + goto end; + } + soc_private = soc_info->soc_private; + + if (!strcmp(clk_name, CAM_VFE_DSP_CLK_NAME)) { + if (soc_private->dsp_clk_index == -1) { + CAM_ERR(CAM_ISP, + "DSP clock not supported for vfe: %d", soc_info->index); + rc = -EPERM; + goto end; + } + + rc = cam_soc_util_clk_enable(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_private->dsp_clk_index, 0); + if (rc) + CAM_ERR(CAM_ISP, + "Error enable dsp clk failed rc=%d", rc); + } + +end: + return rc; +} + +int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error Invalid params"); + rc = -EINVAL; + goto end; + } + soc_private = soc_info->soc_private; + + if (!strcmp(clk_name, CAM_VFE_DSP_CLK_NAME)) { + if (soc_private->dsp_clk_index == -1) { + CAM_ERR(CAM_ISP, + "DSP clock not supported for vfe: %d", soc_info->index); + rc = -EPERM; + goto end; + } + + rc = cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_private->dsp_clk_index); + if (rc) + CAM_ERR(CAM_ISP, + "Error disable dsp clk failed rc=%d", rc); + } + +end: + return rc; +} + + +int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct cam_vfe_soc_private *soc_private; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid params"); + rc = -EINVAL; + return rc; + } + + soc_private = soc_info->soc_private; + + rc = cam_soc_util_disable_platform_resource(soc_info, + (soc_info->is_clk_drv_en ? soc_info->index : CAM_CLK_SW_CLIENT_IDX), true, true); + if (rc) { + CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc); + return rc; + } + + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) { + CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc); + return rc; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h new file mode 100644 index 0000000000..d93e34b8fd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_SOC_H_ +#define _CAM_VFE_SOC_H_ + +#include "cam_soc_util.h" +#include "cam_isp_hw.h" + +#define CAM_VFE_DSP_CLK_NAME "ife_dsp_clk" + +#define UBWC_STATIC_CONFIG_MAX 2 + +/* + * struct cam_vfe_soc_private: + * + * @Brief: Private SOC data specific to VFE HW Driver + * + * @cpas_handle: Handle returned on registering with CPAS driver. + * This handle is used for all further interface + * with CPAS. + * @cpas_version: Has cpas version read from Hardware + * @rt_wrapper_base: Base address of the RT-Wrapper if the hw is in rt-wrapper + * @dsp_clk_index: DSP clk index in optional clocks + * @ubwc_static_ctrl: UBWC static control configuration + * @is_ife_lite: Flag to indicate full vs lite IFE + * @ife_clk_src: IFE source clock + * @num_pid: Number of pids of ife + * @pid: IFE pid values list + */ +struct cam_vfe_soc_private { + uint32_t cpas_handle; + uint32_t cpas_version; + uint32_t rt_wrapper_base; + int32_t dsp_clk_index; + uint32_t ubwc_static_ctrl[UBWC_STATIC_CONFIG_MAX]; + bool is_ife_lite; + uint64_t ife_clk_src; + uint32_t num_pid; + uint32_t pid[CAM_ISP_HW_MAX_PID_VAL]; +}; + +/* + * cam_vfe_init_soc_resources() + * + * @Brief: Initialize SOC resources including private data + * + * @soc_info: Device soc information + * @handler: Irq handler function pointer + * @irq_data: Irq handler function Callback data + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t vfe_irq_handler, void *irq_data); + +/* + * cam_vfe_deinit_soc_resources() + * + * @Brief: Deinitialize SOC resources including private data + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_vfe_enable_soc_resources() + * + * @brief: Enable regulator, irq resources, start CPAS + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_vfe_disable_soc_resources() + * + * @brief: Disable regulator, irq resources, stop CPAS + * + * @soc_info: Device soc information + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +/* + * cam_vfe_soc_enable_clk() + * + * @brief: Enable clock with given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +/* + * cam_vfe_soc_disable_dsp_clk() + * + * @brief: Disable clock with given name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to enable + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info, + const char *clk_name); + +#endif /* _CAM_VFE_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe1080.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe1080.h new file mode 100644 index 0000000000..fe53a175ee --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe1080.h @@ -0,0 +1,2939 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE1080_H_ +#define _CAM_TFE1080_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_TFE_1080_NUM_TOP_DBG_REG 17 +#define CAM_TFE_1080_NUM_BAYER_DBG_REG 11 +#define CAM_TFE_BUS_VER3_1080_MAX_CLIENTS 28 + +static struct cam_vfe_top_ver4_module_desc tfe1080_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_STATS_AWB_BG_TINTLESS", + }, + { + .id = 1, + .desc = "CLC_STATS_AWB_BG_AE", + }, + { + .id = 2, + .desc = "CLC_STATS_BHIST_AEC", + }, + { + .id = 3, + .desc = "CLC_STATS_RS", + }, + { + .id = 4, + .desc = "CLC_STATS_BFW_AWB", + }, + { + .id = 5, + .desc = "CLC_STATS_AWB_BG_AWB", + }, + { + .id = 6, + .desc = "CLC_STATS_BHIST_AF", + }, + { + .id = 7, + .desc = "CLC_STATS_AWB_BG_ALSC", + }, + { + .id = 8, + .desc = "CLC_STATS_BHIST_TMC", + }, + { + .id = 9, + .desc = "CLC_COMPDECOMP_FD", + }, + { + .id = 10, + .desc = "CLC_BLS", + }, + { + .id = 11, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 12, + .desc = "CLC_GTM", + }, + { + .id = 13, + .desc = "CLC_GLUT", + }, + { + .id = 14, + .desc = "CLC_COLOR_XFORM", + }, + { + .id = 15, + .desc = "CLC_DOWNSCALE_MN_Y", + }, + { + .id = 16, + .desc = "CLC_DOWNSCALE_MN_C", + }, + { + .id = 17, + .desc = "CLC_CROP_RND_CLAMP_FD_Y", + }, + { + .id = 18, + .desc = "CLC_CROP_RND_CLAMP_FD_C", + }, + { + .id = 19, + .desc = "CLC_BDS2_DEMO", + }, + { + .id = 20, + .desc = "CLC_PUNCH_BDS2", + }, + { + .id = 21, + .desc = "CLC_PUNCH_DS4_MUX", + }, + { + .id = 22, + .desc = "CLC_BAYER_DS_4_DS4", + }, + { + .id = 23, + .desc = "CLC_CROP_RND_CLAMP_DS4", + }, + { + .id = 24, + .desc = "CLC_PUNCH_DS16", + }, + { + .id = 25, + .desc = "CLC_BAYER_DS_4_DS16", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_DS16", + }, + { + .id = 27, + .desc = "CLC_CROP_RND_CLAMP_DS2", + }, + { + .id = 28, + .desc = "CLC_RCS_DS2", + }, + { + .id = 29, + .desc = "CLC_CROP_RND_CLAMP_FULL_OUT", + }, + { + .id = 30, + .desc = "CLC_COMPDECOMP_BYPASS", + }, + { + .id = 31, + .desc = "CLC_CROP_RND_CLAMP_BYPASS", + }, + { + .id = 32, + .desc = "CLC_RCS_FULL_OUT", + }, + /** + * Main_PP status register does not capture CLC_HAF violation, + * but debug_err_vec + timestamp feature does. Since both use + * the same violation ID table, we add it here + */ + { + .id = 33, + .desc = "CLC_HAF", + }, +}; + +struct cam_vfe_top_ver4_module_desc tfe1080_bayer_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_UNIV_CHANNEL_GAINS", + }, + { + .id = 2, + .desc = "CLC_QPDR", + }, + { + .id = 3, + .desc = "CLC_BPC_PDPC_GIC", + }, + { + .id = 4, + .desc = "CLC_PDPC_BPC_1D", + }, + { + .id = 5, + .desc = "CLC_ABF_BINC", + }, + { + .id = 6, + .desc = "CLC_CHANNEL_GAINS", + }, + { + .id = 7, + .desc = "CLC_LSC", + }, + { + .id = 8, + .desc = "CLC_FCG", + }, + { + .id = 9, + .desc = "CLC_WB_GAIN", + }, + { + .id = 10, + .desc = "CLC_COMPDECOMP_BAYER", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc tfe1080_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "FULL", + }, + { + .wm_id = 1, + .desc = "DS4_Y", + }, + { + .wm_id = 2, + .desc = "DS4_C", + }, + { + .wm_id = 3, + .desc = "DS16_Y", + }, + { + .wm_id = 4, + .desc = "DS16_C", + }, + { + .wm_id = 5, + .desc = "DS2_Y", + }, + { + .wm_id = 6, + .desc = "DS2_C", + }, + { + .wm_id = 7, + .desc = "FD_Y", + }, + { + .wm_id = 8, + .desc = "FD_C", + }, + { + .wm_id = 9, + .desc = "PIXEL_RAW", + }, + { + .wm_id = 10, + .desc = "AEC_BG", + }, + { + .wm_id = 11, + .desc = "STATS_AEC_BHIST", + }, + { + .wm_id = 12, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 13, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 14, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 15, + .desc = "STATS_AF_BHIST", + }, + { + .wm_id = 16, + .desc = "STATS_ALSC_BG", + }, + { + .wm_id = 17, + .desc = "STATS_FLICKER_BAYERRS", + }, + { + .wm_id = 18, + .desc = "STATS_TMC_BHIST", + }, + { + .wm_id = 19, + .desc = "PDAF_0_STATS", + }, + { + .wm_id = 20, + .desc = "PDAF_1_PREPROCESS_2PD", + }, + { + .wm_id = 21, + .desc = "PDAF_2_PARSED_DATA", + }, + { + .wm_id = 22, + .desc = "PDAF_3_CAF", + }, + { + .wm_id = 23, + .desc = "RDI0", + }, + { + .wm_id = 24, + .desc = "RDI1", + }, + { + .wm_id = 25, + .desc = "RDI2", + }, + { + .wm_id = 26, + .desc = "RDI3", + }, + { + .wm_id = 27, + .desc = "RDI4", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc tfe1080_top_irq_err_desc[] = { + { + .bitmask = BIT(2), + .err_name = "BAYER_HM violation", + .desc = "CLC CCIF Violation", + }, + { + .bitmask = BIT(24), + .err_name = "DYNAMIC PDAF SWITCH VIOLATION", + .desc = + "HAF RDI exposure select changes dynamically, the common vbi is insufficient", + }, + { + .bitmask = BIT(25), + .err_name = "HAF violation", + .desc = "CLC_HAF Violation", + }, + { + .bitmask = BIT(26), + .err_name = "PP VIOLATION", + .desc = "CCIF protocol violation", + }, + { + .bitmask = BIT(27), + .err_name = "DIAG VIOLATION", + .desc = "Sensor: The HBI at TFE input is less than the spec (64 cycles)", + .debug = "Check sensor config", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc tfe1080_haf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output", + }, + { + .bitmask = BIT(2), + .desc = "Sim monitor 3 violation - parsed output", + }, + { + .bitmask = BIT(3), + .desc = "Sim monitor 4 violation - CAF output", + }, + { + .bitmask = BIT(4), + .desc = "PDAF constraint violation", + }, + { + .bitmask = BIT(5), + .desc = "CAF constraint violation", + }, +}; + +static struct cam_irq_register_set tfe1080_top_irq_reg_set = { + .mask_reg_offset = 0x000001D4, + .clear_reg_offset = 0x000001D8, + .status_reg_offset = 0x000001DC, + .set_reg_offset = 0x000001E0, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), +}; + +static struct cam_irq_controller_reg_info tfe1080_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &tfe1080_top_irq_reg_set, + .global_irq_cmd_offset = 0x000001D0, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t tfe1080_top_debug_reg[] = { + 0x000004EC, + 0x000004F0, + 0x000004F4, + 0x000004F8, + 0x000004FC, + 0x00000500, + 0x00000504, + 0x00000508, + 0x0000050C, + 0x00000510, + 0x00000514, + 0x00000518, + 0x0000051C, + 0x00000520, + 0x00000524, + 0x00000528, + 0x0000052C, +}; + +static uint32_t tfe1080_bayer_debug_reg[] = { + 0x0000E4E8, + 0x0000E4EC, + 0x0000E4F0, + 0x0000E4F4, + 0x0000E4F8, + 0x0000E4FC, + 0x0000E500, + 0x0000E504, + 0x0000E508, + 0x0000E50C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common tfe1080_top_common_reg = { + .hw_version = 0x00000000, + .core_cgc_ovd_0 = 0x00000100, + .ahb_cgc_ovd = 0x00000108, + .core_mux_cfg = 0x00000110, + .pdaf_input_cfg_0 = 0x00000114, + .pdaf_input_cfg_1 = 0x00000118, + .stats_throttle_cfg_0 = 0x00000150, + .stats_throttle_cfg_1 = 0x00000154, + .stats_throttle_cfg_2 = 0x00000158, + .core_cfg_4 = 0x0000015C, + .pdaf_parsed_throttle_cfg = 0x00000160, + .fd_y_throttle_cfg = 0x00000168, + .fd_c_throttle_cfg = 0x0000016C, + .ds16_g_throttle_cfg = 0x00000170, + .ds16_br_throttle_cfg = 0x00000174, + .ds4_g_throttle_cfg = 0x00000178, + .ds4_br_throttle_cfg = 0x0000017C, + .ds2_g_throttle_cfg = 0x00000180, + .ds2_br_throttle_cfg = 0x00000184, + .full_out_throttle_cfg = 0x00000188, + .diag_config = 0x0000039C, + .global_reset_cmd = 0x000001D0, + .diag_sensor_status = {0x000003A0, 0x000003A4, 0x000003A8, 0x000003AC}, + .diag_frm_cnt_status = {0x000003B0, 0x000003B4, 0x000003B8}, + .ipp_violation_status = 0x00000248, + .bayer_violation_status = 0x0000E248, + .pdaf_violation_status = 0x00009304, + .dsp_status = 0x0, + .bus_violation_status = 0x00001064, + .bus_overflow_status = 0x00001068, + .num_perf_counters = 8, + .perf_count_reg = { + { + .perf_count_cfg = 0x000003DC, + .perf_count_cfg_mc = 0x000003E0, + .perf_pix_count = 0x000003E4, + .perf_line_count = 0x000003E8, + .perf_stall_count = 0x000003EC, + .perf_always_count = 0x000003F0, + .perf_count_status = 0x000003F4, + }, + { + .perf_count_cfg = 0x000003F8, + .perf_count_cfg_mc = 0x000003FC, + .perf_pix_count = 0x00000400, + .perf_line_count = 0x00000404, + .perf_stall_count = 0x00000408, + .perf_always_count = 0x0000040C, + .perf_count_status = 0x00000410, + }, + { + .perf_count_cfg = 0x00000414, + .perf_count_cfg_mc = 0x00000418, + .perf_pix_count = 0x0000041C, + .perf_line_count = 0x00000420, + .perf_stall_count = 0x00000424, + .perf_always_count = 0x00000428, + .perf_count_status = 0x0000042C, + }, + { + .perf_count_cfg = 0x00000430, + .perf_count_cfg_mc = 0x00000434, + .perf_pix_count = 0x00000438, + .perf_line_count = 0x0000043C, + .perf_stall_count = 0x00000440, + .perf_always_count = 0x00000444, + .perf_count_status = 0x00000448, + }, + /* Bayer perf count regs from here onwards */ + { + .perf_count_cfg = 0x0000E3DC, + .perf_count_cfg_mc = 0x0000E3E0, + .perf_pix_count = 0x0000E3E4, + .perf_line_count = 0x0000E3E8, + .perf_stall_count = 0x0000E3EC, + .perf_always_count = 0x0000E3F0, + .perf_count_status = 0x0000E3F4, + }, + { + .perf_count_cfg = 0x0000E3F8, + .perf_count_cfg_mc = 0x0000E3FC, + .perf_pix_count = 0x0000E400, + .perf_line_count = 0x0000E404, + .perf_stall_count = 0x0000E408, + .perf_always_count = 0x0000E40C, + .perf_count_status = 0x0000E410, + }, + { + .perf_count_cfg = 0x0000E414, + .perf_count_cfg_mc = 0x0000E418, + .perf_pix_count = 0x0000E41C, + .perf_line_count = 0x0000E420, + .perf_stall_count = 0x0000E424, + .perf_always_count = 0x0000E428, + .perf_count_status = 0x0000E42C, + }, + { + .perf_count_cfg = 0x0000E430, + .perf_count_cfg_mc = 0x0000E434, + .perf_pix_count = 0x0000E438, + .perf_line_count = 0x0000E43C, + .perf_stall_count = 0x0000E440, + .perf_always_count = 0x0000E444, + .perf_count_status = 0x0000E448, + }, + }, + .top_debug_err_vec_irq = {0x000004E4, 0x000004E8}, + .top_debug_err_vec_ts_lb = 0x000004DC, + .top_debug_err_vec_ts_mb = 0x000004E0, + .bayer_debug_err_vec_irq = {0x0000E4E4, 0x0}, + .bayer_debug_err_vec_ts_lb = 0x0000E4DC, + .bayer_debug_err_vec_ts_mb = 0x0000E4E0, + .top_debug_cfg = 0x00000548, + .bayer_debug_cfg = 0x0000E518, + .num_top_debug_reg = CAM_TFE_1080_NUM_TOP_DBG_REG, + .top_debug = tfe1080_top_debug_reg, + .num_bayer_debug_reg = CAM_TFE_1080_NUM_BAYER_DBG_REG, + .bayer_debug = tfe1080_bayer_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS0, + .capabilities = CAM_VFE_COMMON_CAP_SKIP_CORE_CFG | + CAM_VFE_COMMON_CAP_CORE_MUX_CFG | + CAM_VFE_COMMON_CAP_DEBUG_ERR_VEC, +}; + +static struct cam_vfe_ver4_path_reg_data tfe1080_ipp_common_reg_data = { + .sof_irq_mask = 0x150, + .eof_irq_mask = 0x2A0, + .error_irq_mask = 0xF000005, + .ipp_violation_mask = 0x4000000, + .bayer_violation_mask = 0x4, + .pdaf_violation_mask = 0x2000000, + .diag_violation_mask = 0x8000000, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0xF000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .is_mc_path = true, + /* SOF and EOF mask combined for each context */ + .frm_irq_hw_ctxt_mask = { + 0x30, + 0xC0, + 0x300, + }, +}; + +static struct cam_vfe_ver4_path_reg_data tfe1080_pdlib_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, +}; + +static struct cam_vfe_ver4_path_reg_data tfe1080_vfe_full_rdi_reg_data[5] = { + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xA, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x4000, + .eof_irq_mask = 0x8000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xC, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x10000, + .eof_irq_mask = 0x20000, + .error_irq_mask = 0x0, + .enable_diagnostic_hw = 0x1, + .diag_sensor_sel_mask = 0xE, + .diag_frm_count_mask_0 = 0x200, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x40000, + .eof_irq_mask = 0x80000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x10, + .diag_frm_count_mask_0 = 0x400, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x100000, + .eof_irq_mask = 0x200000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x12, + .diag_frm_count_mask_0 = 0x800, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +struct cam_vfe_ver4_path_hw_info + tfe1080_rdi_hw_info_arr[] = { + { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_vfe_full_rdi_reg_data[2], + }, + { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_vfe_full_rdi_reg_data[3], + }, + { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_vfe_full_rdi_reg_data[4], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info tfe1080_top_dbg_reg_info[ + CAM_TFE_1080_NUM_TOP_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "STATS_AWB_BG_TINTLESS", + 0x00000530, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "STATS_AWB_BG_AE", + 0x00000530, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "STATS_BHIST_AEC", + 0x00000530, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "STATS_RS", + 0x00000530, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "STATS_BFW_AWB", + 0x00000530, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "STATS_AWB_BG_AWB", + 0x00000530, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "STATS_BHIST_AF", + 0x00000530, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "STATS_AWB_BG_ALSC", + 0x00000530, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "STATS_BHIST_TMC", + 0x00000530, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "compdecomp_fd", + 0x00000530, BIT(27)), + VFE_DBG_INFO_WITH_IDLE(8, "color_correct", + 0x00000530, BIT(28)), + VFE_DBG_INFO_WITH_IDLE(12, "gtm", + 0x00000530, BIT(29)), + VFE_DBG_INFO_WITH_IDLE(16, "glut", + 0x00000530, BIT(30)), + VFE_DBG_INFO_WITH_IDLE(20, "color_xform", + 0x00000530, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(24, "downscale_mn_y", + 0x00000534, BIT(0)), + VFE_DBG_INFO_WITH_IDLE(28, "downscale_mn_c", + 0x00000534, BIT(1)), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "crop_rnd_clamp_fd_y", + 0x00000534, BIT(2)), + VFE_DBG_INFO_WITH_IDLE(4, "crop_rnd_clamp_fd_c", + 0x00000534, BIT(3)), + VFE_DBG_INFO_WITH_IDLE(8, "bds2_demo", + 0x00000534, (BIT(4) | BIT(5) | BIT(6))), + VFE_DBG_INFO_WITH_IDLE(12, "punch_bds2", + 0x00000534, (BIT(7) | BIT(8) | BIT(9))), + VFE_DBG_INFO_WITH_IDLE(16, "punch_ds4_mux", + 0x00000534, (BIT(10) | BIT(11) | BIT(12))), + VFE_DBG_INFO_WITH_IDLE(20, "bayer_ds_4_ds4", + 0x00000534, (BIT(13) | BIT(14) | BIT(15))), + VFE_DBG_INFO_WITH_IDLE(24, "crop_rnd_clamp_ds4", + 0x00000534, (BIT(16) | BIT(17) | BIT(18))), + VFE_DBG_INFO_WITH_IDLE(28, "punch_ds16", + 0x00000534, (BIT(19) | BIT(20) | BIT(21))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "bayer_ds_4_ds16", + 0x00000534, (BIT(22) | BIT(23) | BIT(24))), + VFE_DBG_INFO_WITH_IDLE(4, "crop_rnd_clamp_ds16", + 0x00000534, (BIT(25) | BIT(26) | BIT(27))), + VFE_DBG_INFO_WITH_IDLE(8, "crop_rnd_clamp_ds2", + 0x00000534, (BIT(28) | BIT(29) | BIT(30))), + VFE_DBG_INFO_WITH_IDLE(12, "clc_haf", + 0x00000534, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(16, "clc_rcs_ds2", + 0x00000538, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(20, "clc_crop_rnd_clamp_full_out", + 0x00000538, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(24, "clc_compdecomp_bypass", + 0x00000538, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(28, "clc_crop_rnd_clamp_bypass", + 0x00000538, (BIT(9) | BIT(10) | BIT(11))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_rcs_full_out", + 0x00000538, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_haf", + 0x00000538, BIT(15)), + VFE_DBG_INFO_WITH_IDLE(8, "csid_tfe_ipp", + 0x00000538, (BIT(16) | BIT(17) | BIT(18))), + VFE_DBG_INFO_WITH_IDLE(12, "ppp_repeater", + 0x00000538, BIT(19)), + VFE_DBG_INFO_WITH_IDLE(16, "stats_awb_bg_tintless_throttle", + 0x00000538, (BIT(20) | BIT(21) | BIT(22))), + VFE_DBG_INFO_WITH_IDLE(20, "stats_awb_bg_ae_throttle", + 0x00000538, (BIT(23) | BIT(24) | BIT(25))), + VFE_DBG_INFO_WITH_IDLE(24, "stats_ae_bhist_throttle", + 0x00000538, (BIT(26) | BIT(27) | BIT(28))), + VFE_DBG_INFO_WITH_IDLE(28, "stats_bayer_rs_throttle", + 0x00000538, (BIT(29) | BIT(30) | BIT(31))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_bayer_bfw_throttle", + 0x0000053C, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "stats_awb_bg_awb_throttle", + 0x0000053C, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "stats_bhist_af_throttle", + 0x0000053C, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "full_out_throttle", + 0x0000053C, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "ds4_out_y_throttle", + 0x0000053C, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "ds4_out_c_throttle", + 0x0000053C, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "ds16_out_y_throttle", + 0x0000053C, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "ds16_out_c_throttle", + 0x0000053C, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "ds2_out_y_throttle", + 0x0000053C, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "ds2_out_c_throttle", + 0x0000053C, (BIT(27) | BIT(28) | BIT(29))), + VFE_DBG_INFO_WITH_IDLE(8, "raw_out_throttle", + 0x00000540, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(12, "fd_out_y_throttle", + 0x00000540, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(16, "fd_out_c_throttle", + 0x00000540, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(20, "haf_sad_stats_throttle", + 0x0000053C, BIT(30)), + VFE_DBG_INFO_WITH_IDLE(24, "haf_caf_stats_throttle", + 0x0000053C, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(28, "haf_parsed_throttle", + 0x00000540, BIT(9)), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "haf_pre_processed", + 0x00000540, BIT(10)), + VFE_DBG_INFO(4, "full_out"), + VFE_DBG_INFO(8, "ubwc_stats"), + VFE_DBG_INFO(12, "ds4_out_y"), + VFE_DBG_INFO(16, "ds4_out_c"), + VFE_DBG_INFO(20, "ds16_out_y"), + VFE_DBG_INFO(24, "ds16_out_c"), + VFE_DBG_INFO(28, "ds2_out_y"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "ubwc_stats", + "ds2_out_c", + "fd_out_y", + "fd_out_c", + "raw_out", + "stats_awb_bg_ae", + "stats_ae_bhist", + "stats_awb_bg_tintless" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_awb_bg_alsc", + 0x00000540, (BIT(20) | BIT(21) | BIT(22))), + VFE_DBG_INFO(4, "stats_throttle_to_bus_awb_bg_awb"), + VFE_DBG_INFO(8, "stats_throttle_to_bus_bayer_bfw"), + VFE_DBG_INFO(12, "stats_throttle_to_bus_bhist_af"), + VFE_DBG_INFO(16, "stats_throttle_to_bus_awb_bg_alsc"), + VFE_DBG_INFO(20, "stats_throttle_to_bus_bayer_rs"), + VFE_DBG_INFO(24, "stats_throttle_to_bus_bhist_tmc"), + VFE_DBG_INFO(28, "stats_throttle_to_bus_sad"), + + }, + VFE_DBG_INFO_ARRAY_4bit( + "tfe_haf_processed_to_bus", + "tfe_haf_parsed_to_bus", + "tfe_stats_throttle_to_bus", + "rdi0_splitter_to_bus_wr", + "rdi1_splitter_to_bus_wr", + "rdi2_splitter_to_bus_wr", + "rdi3_splitter_to_bus_wr", + "rdi4_splitter_to_bus_wr" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_bhist_tmc_throttle", + 0x00000540, (BIT(23) | BIT(24) | BIT(25))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_bls", + 0x00000544, BIT(1)), + VFE_DBG_INFO(8, "reserved"), + VFE_DBG_INFO(12, "reserved"), + VFE_DBG_INFO(16, "reserved"), + VFE_DBG_INFO(20, "reserved"), + VFE_DBG_INFO(24, "reserved"), + VFE_DBG_INFO(28, "reserved"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info tfe1080_bayer_dbg_reg_info[ + CAM_TFE_1080_NUM_BAYER_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_demux_w0", + 0x0000E510, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_bpc_pdpc_gic_w0", + 0x0000E510, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "clc_pdpc_bpc_1d_w0", + 0x0000E510, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "clc_abf_binc_w0", + 0x0000E510, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "clc_channel_gains_w0", + 0x0000E510, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "clc_lsc_w3", + 0x0000E510, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "clc_fcg_w2", + 0x0000E510, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "clc_wb_gain_w6", + 0x0000E510, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_compdecomp_bayer_w0", + 0x0000E510, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "reserved", + 0x0000E510, BIT(27)), + VFE_DBG_INFO_WITH_IDLE(8, "clc_univ_channel_gains", + 0x0000E510, BIT(28)), + VFE_DBG_INFO_WITH_IDLE(12, "clc_qpdr", + 0x0000E510, BIT(29)), + VFE_DBG_INFO(16, "reserved"), + VFE_DBG_INFO(20, "reserved"), + VFE_DBG_INFO(24, "reserved"), + VFE_DBG_INFO(28, "reserved"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info tfe1080_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFFFF, + .name = "SENSOR_VBI_IPP_CONTEXT_0", + }, + { + .bitmask = 0x10000000, + .name = "SENSOR_VBI_IPP_MIN_ERROR", + }, + { + .bitmask = 0xFFFF, + .name = "SENSOR_VBI_IPP_CONTEXT_1", + }, + { + .bitmask = 0xFFFF0000, + .name = "SENSOR_VBI_IPP_CONTEXT_2", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PPP_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_4_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_CONTEXT0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_IPP_CONTEXT1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_IPP_CONTEXT2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_IPP_ALL_CONTEXT_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe1080_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &tfe1080_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &tfe1080_diag_reg_info[3], + }, + { + .num_fields = 2, + .field = &tfe1080_diag_reg_info[4], + }, + { + .num_fields = 2, + .field = &tfe1080_diag_reg_info[6], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe1080_diag_frame_field[] = { + { + .num_fields = 4, + .field = &tfe1080_diag_reg_info[8], + }, + { + .num_fields = 2, + .field = &tfe1080_diag_reg_info[12], + }, + { + .num_fields = 4, + .field = &tfe1080_diag_reg_info[14], + }, +}; + +static struct cam_vfe_ver4_fcg_module_info tfe1080_fcg_module_info = { + .max_fcg_ch_ctx = 3, + .max_fcg_predictions = 3, + .fcg_index_shift = 16, + .max_reg_val_pair_size = 6, + .fcg_type_size = 2, + .fcg_phase_index_cfg_0 = 0x00010470, + .fcg_phase_index_cfg_1 = 0x00010474, + .fcg_reg_ctxt_shift = 0x0, + .fcg_reg_ctxt_sel = 0x000105F4, + .fcg_reg_ctxt_mask = 0x7, +}; + +static struct cam_vfe_top_ver4_hw_info tfe1080_top_hw_info = { + .common_reg = &tfe1080_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_ipp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &tfe1080_top_common_reg, + .reg_data = &tfe1080_pdlib_reg_data, + }, + .rdi_hw_info = tfe1080_rdi_hw_info_arr, + .wr_client_desc = tfe1080_wr_client_desc, + .ipp_module_desc = tfe1080_ipp_mod_desc, + .bayer_module_desc = tfe1080_bayer_mod_desc, + .num_mux = 7, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + }, + .num_path_port_map = 3, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA}, + }, + .num_rdi = ARRAY_SIZE(tfe1080_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(tfe1080_top_irq_err_desc), + .top_err_desc = tfe1080_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(tfe1080_haf_violation_desc), + .pdaf_violation_desc = tfe1080_haf_violation_desc, + .top_debug_reg_info = &tfe1080_top_dbg_reg_info, + .bayer_debug_reg_info = &tfe1080_bayer_dbg_reg_info, + .fcg_module_info = &tfe1080_fcg_module_info, + .fcg_mc_supported = true, + .diag_sensor_info = tfe1080_diag_sensor_field, + .diag_frame_info = tfe1080_diag_frame_field, +}; + +static struct cam_irq_register_set tfe1080_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00001018, + .clear_reg_offset = 0x00001020, + .status_reg_offset = 0x00001028, + }, + { + .mask_reg_offset = 0x0000101C, + .clear_reg_offset = 0x00001024, + .status_reg_offset = 0x0000102C, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_0 = { + .meta_addr = 0x00001540, + .meta_cfg = 0x00001544, + .mode_cfg = 0x00001548, + .stats_ctrl = 0x0000154C, + .ctrl_2 = 0x00001550, + .lossy_thresh0 = 0x00001554, + .lossy_thresh1 = 0x00001558, + .off_lossy_var = 0x0000155C, + .bw_limit = 0x0000151C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_1 = { + .meta_addr = 0x00001640, + .meta_cfg = 0x00001644, + .mode_cfg = 0x00001648, + .stats_ctrl = 0x0000164C, + .ctrl_2 = 0x00001650, + .lossy_thresh0 = 0x00001654, + .lossy_thresh1 = 0x00001658, + .off_lossy_var = 0x0000165C, + .bw_limit = 0x0000161C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_2 = { + .meta_addr = 0x00001740, + .meta_cfg = 0x00001744, + .mode_cfg = 0x00001748, + .stats_ctrl = 0x0000174C, + .ctrl_2 = 0x00001750, + .lossy_thresh0 = 0x00001754, + .lossy_thresh1 = 0x00001758, + .off_lossy_var = 0x0000175C, + .bw_limit = 0x0000171C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_3 = { + .meta_addr = 0x00001840, + .meta_cfg = 0x00001844, + .mode_cfg = 0x00001848, + .stats_ctrl = 0x0000184C, + .ctrl_2 = 0x00001850, + .lossy_thresh0 = 0x00001854, + .lossy_thresh1 = 0x00001858, + .off_lossy_var = 0x0000185C, + .bw_limit = 0x0000181C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_4 = { + .meta_addr = 0x00001940, + .meta_cfg = 0x00001944, + .mode_cfg = 0x00001948, + .stats_ctrl = 0x0000194C, + .ctrl_2 = 0x00001950, + .lossy_thresh0 = 0x00001954, + .lossy_thresh1 = 0x00001958, + .off_lossy_var = 0x0000195C, + .bw_limit = 0x0000191C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_5 = { + .meta_addr = 0x00001A40, + .meta_cfg = 0x00001A44, + .mode_cfg = 0x00001A48, + .stats_ctrl = 0x00001A4C, + .ctrl_2 = 0x00001A50, + .lossy_thresh0 = 0x00001A54, + .lossy_thresh1 = 0x00001A58, + .off_lossy_var = 0x00001A5C, + .bw_limit = 0x00001A1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_6 = { + .meta_addr = 0x00001B40, + .meta_cfg = 0x00001B44, + .mode_cfg = 0x00001B48, + .stats_ctrl = 0x00001B4C, + .ctrl_2 = 0x00001B50, + .lossy_thresh0 = 0x00001B54, + .lossy_thresh1 = 0x00001B58, + .off_lossy_var = 0x00001B5C, + .bw_limit = 0x00001B1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe1080_ubwc_regs_client_20 = { + .meta_addr = 0x00002940, + .meta_cfg = 0x00002944, + .mode_cfg = 0x00002948, + .stats_ctrl = 0x0000294C, + .ctrl_2 = 0x00002950, + .lossy_thresh0 = 0x00002954, + .lossy_thresh1 = 0x00002958, + .off_lossy_var = 0x0000295C, + .bw_limit = 0x0000291C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t tfe1080_out_port_mid[][12] = { + {56}, + {57}, + {58}, + {59}, + {60}, + {32, 34, 36, 33, 35, 37}, + {44, 46, 48, 45, 47, 49, 50, 52, 54, 51, 53, 55}, + {38, 40, 42, 39, 41, 43, 44, 46, 48, 45, 47, 49}, + {32, 34, 36, 33, 35, 37, 38, 40, 42, 39, 41, 43}, + {56, 57, 58}, + {50, 52, 54, 51, 53, 55}, + {32, 33, 34}, + {35, 36, 37}, + {38, 39, 40}, + {41, 42, 43}, + {44, 45, 46}, + {47, 48, 49}, + {50, 51, 52}, + {53, 54, 55}, + {56, 57, 58}, + {59}, + {61, 62}, + {60}, + {59}, +}; + +static struct cam_vfe_bus_ver3_hw_info tfe1080_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001000, + .cgc_ovd = 0x00001008, + .ctxt_sel = 0x00001124, + .ubwc_static_ctrl = 0x00001058, + .pwr_iso_cfg = 0x0000105C, + .overflow_status_clear = 0x00001060, + .ccif_violation_status = 0x00001064, + .overflow_status = 0x00001068, + .image_size_violation_status = 0x00001070, + .debug_status_top_cfg = 0x000010F0, + .debug_status_top = 0x000010F4, + .test_bus_ctrl = 0x00001128, + .mc_read_sel_shift = 0x5, + .mc_write_sel_shift = 0x0, + .mc_ctxt_mask = 0x7, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = tfe1080_bus_irq_reg, + .global_irq_cmd_offset = 0x00001030, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = CAM_TFE_BUS_VER3_1080_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .ctxt_cfg = 0x00001578, + .addr_status_0 = 0x00001590, + .addr_status_1 = 0x00001594, + .addr_status_2 = 0x00001598, + .addr_status_3 = 0x0000159C, + .debug_status_cfg = 0x0000157C, + .debug_status_0 = 0x00001580, + .debug_status_1 = 0x00001584, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10)| + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + .rcs_en_mask = 0x200, + }, + /* BUS Client 1 DS4_Y */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .ctxt_cfg = 0x00001678, + .addr_status_0 = 0x00001690, + .addr_status_1 = 0x00001694, + .addr_status_2 = 0x00001698, + .addr_status_3 = 0x0000169C, + .debug_status_cfg = 0x0000167C, + .debug_status_0 = 0x00001680, + .debug_status_1 = 0x00001684, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 2 DS4_C */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .ctxt_cfg = 0x00001778, + .addr_status_0 = 0x00001790, + .addr_status_1 = 0x00001794, + .addr_status_2 = 0x00001798, + .addr_status_3 = 0x0000179C, + .debug_status_cfg = 0x0000177C, + .debug_status_0 = 0x00001780, + .debug_status_1 = 0x00001784, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_2, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 3 DS16_Y */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .ctxt_cfg = 0x00001878, + .addr_status_0 = 0x00001890, + .addr_status_1 = 0x00001894, + .addr_status_2 = 0x00001898, + .addr_status_3 = 0x0000189C, + .debug_status_cfg = 0x0000187C, + .debug_status_0 = 0x00001880, + .debug_status_1 = 0x00001884, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_3, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 4 DS16_C */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .ctxt_cfg = 0x00001978, + .addr_status_0 = 0x00001990, + .addr_status_1 = 0x00001994, + .addr_status_2 = 0x00001998, + .addr_status_3 = 0x0000199C, + .debug_status_cfg = 0x0000197C, + .debug_status_0 = 0x00001980, + .debug_status_1 = 0x00001984, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 5 DS2_Y */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .ctxt_cfg = 0x00001A78, + .addr_status_0 = 0x00001A90, + .addr_status_1 = 0x00001A94, + .addr_status_2 = 0x00001A98, + .addr_status_3 = 0x00001A9C, + .debug_status_cfg = 0x00001A7C, + .debug_status_0 = 0x00001A80, + .debug_status_1 = 0x00001A84, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + .rcs_en_mask = 0x200, + }, + /* BUS Client 6 DS2_C */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .ctxt_cfg = 0x00001B78, + .addr_status_0 = 0x00001B90, + .addr_status_1 = 0x00001B94, + .addr_status_2 = 0x00001B98, + .addr_status_3 = 0x00001B9C, + .debug_status_cfg = 0x00001B7C, + .debug_status_0 = 0x00001B80, + .debug_status_1 = 0x00001B84, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe1080_ubwc_regs_client_6, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + + .rcs_en_mask = 0x200, + }, + /* BUS Client 7 FD_Y */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .ctxt_cfg = 0x00001C78, + .addr_status_0 = 0x00001C90, + .addr_status_1 = 0x00001C94, + .addr_status_2 = 0x00001C98, + .addr_status_3 = 0x00001C9C, + .debug_status_cfg = 0x00001C7C, + .debug_status_0 = 0x00001C80, + .debug_status_1 = 0x00001C84, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 8 FD_C */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_cfg = 0x00001D70, + .ctxt_cfg = 0x00001D78, + .addr_status_0 = 0x00001D90, + .addr_status_1 = 0x00001D94, + .addr_status_2 = 0x00001D98, + .addr_status_3 = 0x00001D9C, + .debug_status_cfg = 0x00001D7C, + .debug_status_0 = 0x00001D80, + .debug_status_1 = 0x00001D84, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 9 PIXEL RAW */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_cfg = 0x00001E70, + .ctxt_cfg = 0x00001E78, + .addr_status_0 = 0x00001E90, + .addr_status_1 = 0x00001E94, + .addr_status_2 = 0x00001E98, + .addr_status_3 = 0x00001E9C, + .debug_status_cfg = 0x00001E7C, + .debug_status_0 = 0x00001E80, + .debug_status_1 = 0x00001E84, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14), + }, + /* BUS Client 10 STATS_AEC_BE */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_cfg = 0x00001F70, + .ctxt_cfg = 0x00001F78, + .addr_status_0 = 0x00001F90, + .addr_status_1 = 0x00001F94, + .addr_status_2 = 0x00001F98, + .addr_status_3 = 0x00001F9C, + .debug_status_cfg = 0x00001F7C, + .debug_status_0 = 0x00001F80, + .debug_status_1 = 0x00001F84, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 11 STATS_AEC_BHIST */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_cfg = 0x00002070, + .ctxt_cfg = 0x00002078, + .addr_status_0 = 0x00002090, + .addr_status_1 = 0x00002094, + .addr_status_2 = 0x00002098, + .addr_status_3 = 0x0000209C, + .debug_status_cfg = 0x0000207C, + .debug_status_0 = 0x00002080, + .debug_status_1 = 0x00002084, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 12 STATS_TINTLESS_BG */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_cfg = 0x00002170, + .ctxt_cfg = 0x00002178, + .addr_status_0 = 0x00002190, + .addr_status_1 = 0x00002194, + .addr_status_2 = 0x00002198, + .addr_status_3 = 0x0000219C, + .debug_status_cfg = 0x0000217C, + .debug_status_0 = 0x00002180, + .debug_status_1 = 0x00002184, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 13 STATS_AWB_BG */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_cfg = 0x00002270, + .ctxt_cfg = 0x00002278, + .addr_status_0 = 0x00002290, + .addr_status_1 = 0x00002294, + .addr_status_2 = 0x00002298, + .addr_status_3 = 0x0000229C, + .debug_status_cfg = 0x0000227C, + .debug_status_0 = 0x00002280, + .debug_status_1 = 0x00002284, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 14 STATS_AWB_BFW */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_cfg = 0x00002370, + .ctxt_cfg = 0x00002378, + .addr_status_0 = 0x00002390, + .addr_status_1 = 0x00002394, + .addr_status_2 = 0x00002398, + .addr_status_3 = 0x0000239C, + .debug_status_cfg = 0x0000237C, + .debug_status_0 = 0x00002380, + .debug_status_1 = 0x00002384, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 15 STATS_AF_BHIST */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_cfg = 0x00002470, + .ctxt_cfg = 0x00002478, + .addr_status_0 = 0x00002490, + .addr_status_1 = 0x00002494, + .addr_status_2 = 0x00002498, + .addr_status_3 = 0x0000249C, + .debug_status_cfg = 0x0000247C, + .debug_status_0 = 0x00002480, + .debug_status_1 = 0x00002484, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 STATS_ALSC_BG */ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .mmu_prefetch_cfg = 0x00002560, + .mmu_prefetch_max_offset = 0x00002564, + .system_cache_cfg = 0x00002568, + .addr_cfg = 0x00002570, + .ctxt_cfg = 0x00002578, + .addr_status_0 = 0x00002590, + .addr_status_1 = 0x00002594, + .addr_status_2 = 0x00002598, + .addr_status_3 = 0x0000259C, + .debug_status_cfg = 0x0000257C, + .debug_status_0 = 0x00002580, + .debug_status_1 = 0x00002584, + .bw_limiter_addr = 0x0000251C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 17 STATS_FLICKER_BAYERS */ + { + .cfg = 0x00002600, + .image_addr = 0x00002604, + .frame_incr = 0x00002608, + .image_cfg_0 = 0x0000260C, + .image_cfg_1 = 0x00002610, + .image_cfg_2 = 0x00002614, + .packer_cfg = 0x00002618, + .frame_header_addr = 0x00002620, + .frame_header_incr = 0x00002624, + .frame_header_cfg = 0x00002628, + .irq_subsample_period = 0x00002630, + .irq_subsample_pattern = 0x00002634, + .framedrop_period = 0x00002638, + .framedrop_pattern = 0x0000263C, + .mmu_prefetch_cfg = 0x00002660, + .mmu_prefetch_max_offset = 0x00002664, + .system_cache_cfg = 0x00002668, + .addr_cfg = 0x00002670, + .ctxt_cfg = 0x00002678, + .addr_status_0 = 0x00002690, + .addr_status_1 = 0x00002694, + .addr_status_2 = 0x00002698, + .addr_status_3 = 0x0000269C, + .debug_status_cfg = 0x0000267C, + .debug_status_0 = 0x00002680, + .debug_status_1 = 0x00002684, + .bw_limiter_addr = 0x0000261C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + /* BUS Client 18 STATS_TMC_BHIST */ + { + .cfg = 0x00002700, + .image_addr = 0x00002704, + .frame_incr = 0x00002708, + .image_cfg_0 = 0x0000270C, + .image_cfg_1 = 0x00002710, + .image_cfg_2 = 0x00002714, + .packer_cfg = 0x00002718, + .frame_header_addr = 0x00002720, + .frame_header_incr = 0x00002724, + .frame_header_cfg = 0x00002728, + .irq_subsample_period = 0x00002730, + .irq_subsample_pattern = 0x00002734, + .framedrop_period = 0x00002738, + .framedrop_pattern = 0x0000273C, + .mmu_prefetch_cfg = 0x00002760, + .mmu_prefetch_max_offset = 0x00002764, + .system_cache_cfg = 0x00002768, + .addr_cfg = 0x00002770, + .ctxt_cfg = 0x00002778, + .addr_status_0 = 0x00002790, + .addr_status_1 = 0x00002794, + .addr_status_2 = 0x00002798, + .addr_status_3 = 0x0000279C, + .debug_status_cfg = 0x0000277C, + .debug_status_0 = 0x00002780, + .debug_status_1 = 0x00002784, + .bw_limiter_addr = 0x0000271C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 19 PDAF_0 */ /* Note: PDAF_SAD == 2PD*/ + { + .cfg = 0x00002800, + .image_addr = 0x00002804, + .frame_incr = 0x00002808, + .image_cfg_0 = 0x0000280C, + .image_cfg_1 = 0x00002810, + .image_cfg_2 = 0x00002814, + .packer_cfg = 0x00002818, + .frame_header_addr = 0x00002820, + .frame_header_incr = 0x00002824, + .frame_header_cfg = 0x00002828, + .irq_subsample_period = 0x00002830, + .irq_subsample_pattern = 0x00002834, + .framedrop_period = 0x00002838, + .framedrop_pattern = 0x0000283C, + .mmu_prefetch_cfg = 0x00002860, + .mmu_prefetch_max_offset = 0x00002864, + .system_cache_cfg = 0x00002868, + .addr_cfg = 0x00002870, + .ctxt_cfg = 0x00002878, + .addr_status_0 = 0x00002890, + .addr_status_1 = 0x00002894, + .addr_status_2 = 0x00002898, + .addr_status_3 = 0x0000289C, + .debug_status_cfg = 0x0000287C, + .debug_status_0 = 0x00002880, + .debug_status_1 = 0x00002884, + .bw_limiter_addr = 0x0000281C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 20 PDAF_1 */ + { + .cfg = 0x00002900, + .image_addr = 0x00002904, + .frame_incr = 0x00002908, + .image_cfg_0 = 0x0000290C, + .image_cfg_1 = 0x00002910, + .image_cfg_2 = 0x00002914, + .packer_cfg = 0x00002918, + .frame_header_addr = 0x00002920, + .frame_header_incr = 0x00002924, + .frame_header_cfg = 0x00002928, + .irq_subsample_period = 0x00002930, + .irq_subsample_pattern = 0x00002934, + .framedrop_period = 0x00002938, + .framedrop_pattern = 0x0000293C, + .mmu_prefetch_cfg = 0x00002960, + .mmu_prefetch_max_offset = 0x00002964, + .system_cache_cfg = 0x00002968, + .addr_cfg = 0x00002970, + .ctxt_cfg = 0x00002978, + .addr_status_0 = 0x00002990, + .addr_status_1 = 0x00002994, + .addr_status_2 = 0x00002998, + .addr_status_3 = 0x0000299C, + .debug_status_cfg = 0x0000297C, + .debug_status_0 = 0x00002980, + .debug_status_1 = 0x00002984, + .bw_limiter_addr = 0x0000291C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = &tfe1080_ubwc_regs_client_20, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_UBWC_P016), + }, + /* BUS Client 21 PDAF_2 */ + { + .cfg = 0x00002A00, + .image_addr = 0x00002A04, + .frame_incr = 0x00002A08, + .image_cfg_0 = 0x00002A0C, + .image_cfg_1 = 0x00002A10, + .image_cfg_2 = 0x00002A14, + .packer_cfg = 0x00002A18, + .frame_header_addr = 0x00002A20, + .frame_header_incr = 0x00002A24, + .frame_header_cfg = 0x00002A28, + .irq_subsample_period = 0x00002A30, + .irq_subsample_pattern = 0x00002A34, + .framedrop_period = 0x00002A38, + .framedrop_pattern = 0x00002A3C, + .mmu_prefetch_cfg = 0x00002A60, + .mmu_prefetch_max_offset = 0x00002A64, + .system_cache_cfg = 0x00002A68, + .addr_cfg = 0x00002A70, + .ctxt_cfg = 0x00002A78, + .addr_status_0 = 0x00002A90, + .addr_status_1 = 0x00002A94, + .addr_status_2 = 0x00002A98, + .addr_status_3 = 0x00002A9C, + .debug_status_cfg = 0x00002A7C, + .debug_status_0 = 0x00002A80, + .debug_status_1 = 0x00002A84, + .bw_limiter_addr = 0x00002A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 22 PDAF_3 */ + { + .cfg = 0x00002B00, + .image_addr = 0x00002B04, + .frame_incr = 0x00002B08, + .image_cfg_0 = 0x00002B0C, + .image_cfg_1 = 0x00002B10, + .image_cfg_2 = 0x00002B14, + .packer_cfg = 0x00002B18, + .frame_header_addr = 0x00002B20, + .frame_header_incr = 0x00002B24, + .frame_header_cfg = 0x00002B28, + .irq_subsample_period = 0x00002B30, + .irq_subsample_pattern = 0x00002B34, + .framedrop_period = 0x00002B38, + .framedrop_pattern = 0x00002B3C, + .mmu_prefetch_cfg = 0x00002B60, + .mmu_prefetch_max_offset = 0x00002B64, + .system_cache_cfg = 0x00002B68, + .addr_cfg = 0x00002B70, + .ctxt_cfg = 0x00002B78, + .addr_status_0 = 0x00002B90, + .addr_status_1 = 0x00002B94, + .addr_status_2 = 0x00002B98, + .addr_status_3 = 0x00002B9C, + .debug_status_cfg = 0x00002B7C, + .debug_status_0 = 0x00002B80, + .debug_status_1 = 0x00002B84, + .bw_limiter_addr = 0x00002B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 23 RDI_0 */ + { + .cfg = 0x00002C00, + .image_addr = 0x00002C04, + .frame_incr = 0x00002C08, + .image_cfg_0 = 0x00002C0C, + .image_cfg_1 = 0x00002C10, + .image_cfg_2 = 0x00002C14, + .packer_cfg = 0x00002C18, + .frame_header_addr = 0x00002C20, + .frame_header_incr = 0x00002C24, + .frame_header_cfg = 0x00002C28, + .irq_subsample_period = 0x00002C30, + .irq_subsample_pattern = 0x00002C34, + .framedrop_period = 0x00002C38, + .framedrop_pattern = 0x00002C3C, + .mmu_prefetch_cfg = 0x00002C60, + .mmu_prefetch_max_offset = 0x00002C64, + .system_cache_cfg = 0x00002C68, + .addr_cfg = 0x00002C70, + .ctxt_cfg = 0x00002C78, + .addr_status_0 = 0x00002C90, + .addr_status_1 = 0x00002C94, + .addr_status_2 = 0x00002C98, + .addr_status_3 = 0x00002C9C, + .debug_status_cfg = 0x00002C7C, + .debug_status_0 = 0x00002C80, + .debug_status_1 = 0x00002C84, + .bw_limiter_addr = 0x00002C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 24 RDI_1 */ + { + .cfg = 0x00002D00, + .image_addr = 0x00002D04, + .frame_incr = 0x00002D08, + .image_cfg_0 = 0x00002D0C, + .image_cfg_1 = 0x00002D10, + .image_cfg_2 = 0x00002D14, + .packer_cfg = 0x00002D18, + .frame_header_addr = 0x00002D20, + .frame_header_incr = 0x00002D24, + .frame_header_cfg = 0x00002D28, + .irq_subsample_period = 0x00002D30, + .irq_subsample_pattern = 0x00002D34, + .framedrop_period = 0x00002D38, + .framedrop_pattern = 0x00002D3C, + .mmu_prefetch_cfg = 0x00002D60, + .mmu_prefetch_max_offset = 0x00002D64, + .system_cache_cfg = 0x00002D68, + .addr_cfg = 0x00002D70, + .ctxt_cfg = 0x00002D78, + .addr_status_0 = 0x00002D90, + .addr_status_1 = 0x00002D94, + .addr_status_2 = 0x00002D98, + .addr_status_3 = 0x00002D9C, + .debug_status_cfg = 0x00002D7C, + .debug_status_0 = 0x00002D80, + .debug_status_1 = 0x00002D84, + .bw_limiter_addr = 0x00002D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 25 RDI_2 */ + { + .cfg = 0x00002E00, + .image_addr = 0x00002E04, + .frame_incr = 0x00002E08, + .image_cfg_0 = 0x00002E0C, + .image_cfg_1 = 0x00002E10, + .image_cfg_2 = 0x00002E14, + .packer_cfg = 0x00002E18, + .frame_header_addr = 0x00002E20, + .frame_header_incr = 0x00002E24, + .frame_header_cfg = 0x00002E28, + .irq_subsample_period = 0x00002E30, + .irq_subsample_pattern = 0x00002E34, + .framedrop_period = 0x00002E38, + .framedrop_pattern = 0x00002E3C, + .mmu_prefetch_cfg = 0x00002E60, + .mmu_prefetch_max_offset = 0x00002E64, + .system_cache_cfg = 0x00002E68, + .addr_cfg = 0x00002E70, + .ctxt_cfg = 0x00002E78, + .addr_status_0 = 0x00002E90, + .addr_status_1 = 0x00002E94, + .addr_status_2 = 0x00002E98, + .addr_status_3 = 0x00002E9C, + .debug_status_cfg = 0x00002E7C, + .debug_status_0 = 0x00002E80, + .debug_status_1 = 0x00002E84, + .bw_limiter_addr = 0x00002E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 26 RDI_3 */ + { + .cfg = 0x00002F00, + .image_addr = 0x00002F04, + .frame_incr = 0x00002F08, + .image_cfg_0 = 0x00002F0C, + .image_cfg_1 = 0x00002F10, + .image_cfg_2 = 0x00002F14, + .packer_cfg = 0x00002F18, + .frame_header_addr = 0x00002F20, + .frame_header_incr = 0x00002F24, + .frame_header_cfg = 0x00002F28, + .irq_subsample_period = 0x00002F30, + .irq_subsample_pattern = 0x00002F34, + .framedrop_period = 0x00002F38, + .framedrop_pattern = 0x00002F3C, + .mmu_prefetch_cfg = 0x00002F60, + .mmu_prefetch_max_offset = 0x00002F64, + .system_cache_cfg = 0x00002F68, + .addr_cfg = 0x00002F70, + .ctxt_cfg = 0x00002F78, + .addr_status_0 = 0x00002F90, + .addr_status_1 = 0x00002F94, + .addr_status_2 = 0x00002F98, + .addr_status_3 = 0x00002F9C, + .debug_status_cfg = 0x00002F7C, + .debug_status_0 = 0x00002F80, + .debug_status_1 = 0x00002F84, + .bw_limiter_addr = 0x00002F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 27 RDI_4 */ + { + .cfg = 0x00003000, + .image_addr = 0x00003004, + .frame_incr = 0x00003008, + .image_cfg_0 = 0x0000300C, + .image_cfg_1 = 0x00003010, + .image_cfg_2 = 0x00003014, + .packer_cfg = 0x00003018, + .frame_header_addr = 0x00003020, + .frame_header_incr = 0x00003024, + .frame_header_cfg = 0x00003028, + .irq_subsample_period = 0x00003030, + .irq_subsample_pattern = 0x00003034, + .framedrop_period = 0x00003038, + .framedrop_pattern = 0x0000303C, + .mmu_prefetch_cfg = 0x00003060, + .mmu_prefetch_max_offset = 0x00003064, + .system_cache_cfg = 0x00003068, + .addr_cfg = 0x00003070, + .ctxt_cfg = 0x00003078, + .addr_status_0 = 0x00003090, + .addr_status_1 = 0x00003094, + .addr_status_2 = 0x00003098, + .addr_status_3 = 0x0000309C, + .debug_status_cfg = 0x0000307C, + .debug_status_0 = 0x00003080, + .debug_status_1 = 0x00003084, + .bw_limiter_addr = 0x0000301C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + }, + .num_out = 24, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = tfe1080_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = tfe1080_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = tfe1080_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_5, + .mid = tfe1080_out_port_mid[3], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 26, + }, + .name = { + "RDI_3", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI4, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_6, + .mid = tfe1080_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 27, + }, + .name = { + "RDI_4", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4672, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[5], + .num_mid = 6, + .num_wm = 1, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 0, + }, + .name = { + "FULL", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1168, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[6], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 1, + 2, + }, + .name = { + "DS4_Y", + "DS4_C" + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 292, + .max_height = 1024, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[7], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 3, + 4, + }, + .name = { + "DS16_Y", + "DS16_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS2, + .max_width = 4672, + .max_height = 8192, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[8], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .wm_idx = { + 5, + 6, + }, + .name = { + "DS2_Y", + "DS2_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 9312, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[9], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .cntxt_cfg_except = true, + .wm_idx = { + 7, + 8, + }, + .name = { + "FD_Y", + "FD_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + .max_width = 4672, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[10], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 9, + }, + .name = { + "PIXEL_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[11], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 10, + }, + .name = { + "STATS_AEC_BE", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[12], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 11, + }, + .name = { + "STATS_BHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[13], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 12, + }, + .name = { + "STATS_TL_BG", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[14], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 13, + }, + .name = { + "STATS_AWB_BG", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[15], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 14, + }, + .name = { + "AWB_BFW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[16], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 15, + }, + .name = { + "AF_BHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[17], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 16, + }, + .name = { + "ALSC_BG", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[18], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 17, + }, + .name = { + "STATS_RS", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe1080_out_port_mid[19], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 18, + }, + .name = { + "STATS_TMC_BHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 14592, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe1080_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 19, + }, + .name = { + "PDAF_0_2PD", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe1080_out_port_mid[21], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 20, + }, + .name = { + "PDAF_1_PREPROCESS_2PD", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe1080_out_port_mid[22], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "PDAF_2_PARSED_DATA", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe1080_out_port_mid[23], + .num_mid = 1, + .num_wm = 1, + .mc_based = false, + .mc_grp_shift = 4, + .wm_idx = { + 22, + }, + .name = { + "STATS_CAF", + }, + }, + }, + .num_cons_err = 32, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input Not Supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input Not Supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input Not Supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input Not Supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format Not Supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format Not Supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 24 BPP format Not Supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 32 BPP format Not Supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack 64 BPP format Not Supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 20 format Not Supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 14 format Not Supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 12 format Not Supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack MIPI 10 format Not Supported" + }, + { + .bitmask = BIT(13), + .error_description = "Pack 128 BPP format Not Supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC P016 format Not Supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC P010 format Not Supported" + }, + { + .bitmask = BIT(16), + .error_description = "UBWC NV12 format Not Supported" + }, + { + .bitmask = BIT(17), + .error_description = "UBWC NV12 4R format Not Supported" + }, + { + .bitmask = BIT(18), + .error_description = "UBWC TP10 format Not Supported" + }, + { + .bitmask = BIT(19), + .error_description = "Frame based Mode Not Supported" + }, + { + .bitmask = BIT(20), + .error_description = "Index based Mode Not Supported" + }, + { + .bitmask = BIT(21), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(22), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(23), + .error_description = "FIFO framehdr addr unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(25), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(28), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(29), + .error_description = "Image Width unalign", + }, + { + .bitmask = BIT(30), + .error_description = "Image Height unalign", + }, + { + .bitmask = BIT(31), + .error_description = "Meta Stride unalign", + }, + }, + .num_comp_grp = 10, + .support_consumed_addr = true, + .mc_comp_done_mask = { + BIT(24), 0x0, BIT(25), 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + }, + .comp_done_mask = { + 0x7, BIT(3), 0x70, BIT(7), BIT(8), BIT(16), + BIT(17), BIT(18), BIT(19), BIT(20), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 43, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, + .skip_regdump = true, + .skip_regdump_start_offset = 0x1000, + .skip_regdump_stop_offset = 0x309C, +}; + +static struct cam_vfe_irq_hw_info tfe1080_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_EXT_CSID, + .top_irq_reg = &tfe1080_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_tfe1080_hw_info = { + .irq_hw_info = &tfe1080_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &tfe1080_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &tfe1080_top_hw_info, +}; +#endif /* _CAM_TFE1080_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe970.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe970.h new file mode 100644 index 0000000000..3ec624744e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe970.h @@ -0,0 +1,2180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE970_H_ +#define _CAM_TFE970_H_ +#include "cam_vfe_core.h" +#include "cam_tfe980.h" + +#define CAM_TFE_BUS_VER3_970_MAX_CLIENTS 24 + +static struct cam_vfe_top_ver4_module_desc tfe970_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_STATS_AWB_BG_TINTLESS", + }, + { + .id = 1, + .desc = "CLC_STATS_AWB_BG_AE", + }, + { + .id = 2, + .desc = "CLC_STATS_BHIST_AEC", + }, + { + .id = 3, + .desc = "CLC_STATS_RS", + }, + { + .id = 4, + .desc = "CLC_STATS_BFW_AWB", + }, + { + .id = 5, + .desc = "CLC_STATS_AWB_BG_AWB", + }, + { + .id = 6, + .desc = "CLC_STATS_BHIST_AF", + }, + { + .id = 7, + .desc = "CLC_STATS_AWB_BG_ALSC", + }, + { + .id = 8, + .desc = "CLC_STATS_BHIST_TMC", + }, + { + .id = 9, + .desc = "CLC_COMPDECOMP_FD", + }, + { + .id = 10, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 11, + .desc = "CLC_GTM", + }, + { + .id = 12, + .desc = "CLC_GLUT", + }, + { + .id = 13, + .desc = "CLC_COLOR_XFORM", + }, + { + .id = 14, + .desc = "CLC_DOWNSCALE_MN_Y", + }, + { + .id = 15, + .desc = "CLC_DOWNSCALE_MN_C", + }, + { + .id = 16, + .desc = "CLC_CROP_RND_CLAMP_FD_Y", + }, + { + .id = 17, + .desc = "CLC_CROP_RND_CLAMP_FD_C", + }, + { + .id = 18, + .desc = "CLC_BDS2_DEMO", + }, + { + .id = 19, + .desc = "CLC_PUNCH_BDS2", + }, + { + .id = 20, + .desc = "CLC_PUNCH_DS4_MUX", + }, + { + .id = 21, + .desc = "CLC_BAYER_DS_4_DS4", + }, + { + .id = 22, + .desc = "CLC_CROP_RND_CLAMP_DS4" + }, + { + .id = 23, + .desc = "CLC_PUNCH_DS16" + }, + { + .id = 24, + .desc = "CLC_BAYER_DS_4_DS16", + }, + { + .id = 25, + .desc = "CLC_CROP_RND_CLAMP_DS16", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_DS2", + }, + { + .id = 27, + .desc = "CLC_RCS_DS2", + }, + { + .id = 28, + .desc = "CLC_CROP_RND_CLAMP_FULL_OUT", + }, + { + .id = 29, + .desc = "CLC_COMPDECOMP_BYPASS", + }, + { + .id = 30, + .desc = "CLC_CROP_RND_CLAMP_BYPASS", + }, + { + .id = 31, + .desc = "CLC_RCS_FULL_OUT", + }, +}; + +struct cam_vfe_top_ver4_module_desc tfe970_bayer_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_BPC_PDPC_GIC", + }, + { + .id = 2, + .desc = "CLC_PDPC_BPC_1D", + }, + { + .id = 3, + .desc = "CLC_ABF_BINC", + }, + { + .id = 4, + .desc = "CLC_CHANNEL_GAINS", + }, + { + .id = 5, + .desc = "CLC_LSC", + }, + { + .id = 6, + .desc = "CLC_FCG", + }, + { + .id = 7, + .desc = "CLC_WB_GAIN", + }, + { + .id = 8, + .desc = "CLC_COMPDECOMP_BAYER", + }, + { + .id = 9, + .desc = "CLC_CROP_RND_CLAMP_WIRC", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc tfe970_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "VIDEO", + }, + { + .wm_id = 1, + .desc = "DS4_Y", + }, + { + .wm_id = 2, + .desc = "DS4_C", + }, + { + .wm_id = 3, + .desc = "DS16_Y", + }, + { + .wm_id = 4, + .desc = "DS16_C", + }, + { + .wm_id = 5, + .desc = "DS2_Y", + }, + { + .wm_id = 6, + .desc = "DS2_C", + }, + { + .wm_id = 7, + .desc = "FD_Y", + }, + { + .wm_id = 8, + .desc = "FD_C", + }, + { + .wm_id = 9, + .desc = "STATS_AEC_BE", + }, + { + .wm_id = 10, + .desc = "STATS_AEC_BHIST", + }, + { + .wm_id = 11, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 12, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 13, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 14, + .desc = "STATS_FLICKER", + }, + { + .wm_id = 15, + .desc = "STATS_TMC_BHIST", + }, + { + .wm_id = 16, + .desc = "PDAF_0_STATS", + }, + { + .wm_id = 17, + .desc = "PDAF_1_PREPROCESS_2PD", + }, + { + .wm_id = 18, + .desc = "PDAF_2_PARSED_DATA", + }, + { + .wm_id = 19, + .desc = "PDAF_3_CAF", + }, + { + .wm_id = 20, + .desc = "RDI0", + }, + { + .wm_id = 21, + .desc = "RDI1", + }, + { + .wm_id = 22, + .desc = "RDI2", + }, + { + .wm_id = 23, + .desc = "RDI3", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc tfe970_top_irq_err_desc[] = { + { + .bitmask = BIT(2), + .err_name = "BAYER_HM violation", + .desc = "CLC CCIF Violation", + }, + { + .bitmask = BIT(24), + .err_name = "DYNAMIC PDAF SWITCH VIOLATION", + .desc = + "HAF RDI exposure select changes dynamically, the common vbi is insufficient", + }, + { + .bitmask = BIT(25), + .err_name = "HAF violation", + .desc = "CLC_HAF Violation", + }, + { + .bitmask = BIT(26), + .err_name = "PP VIOLATION", + .desc = "CCIF protocol violation", + }, + { + .bitmask = BIT(27), + .err_name = "DIAG VIOLATION", + .desc = "Sensor: The HBI at TFE input is less than the spec (64 cycles)", + .debug = "Check sensor config", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc tfe970_haf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output", + }, + { + .bitmask = BIT(2), + .desc = "Sim monitor 3 violation - parsed output", + }, + { + .bitmask = BIT(3), + .desc = "Sim monitor 4 violation - CAF output", + }, + { + .bitmask = BIT(4), + .desc = "PDAF constraint violation", + }, + { + .bitmask = BIT(5), + .desc = "CAF constraint violation", + }, + { + .bitmask = BIT(6), + .desc = "RDI-PD protocol violation", + }, +}; + +static uint32_t tfe970_top_debug_reg[] = { + 0x00000190, + 0x00000194, + 0x00000198, + 0x0000019C, + 0x000001A0, + 0x000001A4, + 0x000001A8, + 0x000001AC, + 0x000001B0, + 0x000001B4, + 0x000001B8, + 0x000001BC, + 0x000001C0, + 0x000001C4, + 0x000001C8, + 0x000001CC, + 0x000001D0, +}; + +static uint32_t tfe970_bayer_debug_reg[] = { + 0x0000C1BC, + 0x0000C1C0, + 0x0000C1C4, + 0x0000C1C8, + 0x0000C1CC, + 0x0000C1D0, + 0x0000C1D4, + 0x0000C1D8, + 0x0000C1DC, + 0x0000C1E0, +}; + +static struct cam_vfe_top_ver4_reg_offset_common tfe970_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .main_feature = 0x00000008, + .bayer_feature = 0x0000000C, + .stats_feature = 0x00000010, + .fd_feature = 0x00000014, + .core_cgc_ovd_0 = 0x00000018, + .ahb_cgc_ovd = 0x00000020, + .core_mux_cfg = 0x00000024, + .pdaf_input_cfg_0 = 0x00000028, + .pdaf_input_cfg_1 = 0x0000002C, + .stats_throttle_cfg_0 = 0x00000030, + .stats_throttle_cfg_1 = 0x00000034, + .stats_throttle_cfg_2 = 0x00000038, + .core_cfg_4 = 0x0000003C, + .pdaf_parsed_throttle_cfg = 0x00000040, + .wirc_throttle_cfg = 0x00000044, + .fd_y_throttle_cfg = 0x00000048, + .fd_c_throttle_cfg = 0x0000004C, + .ds16_g_throttle_cfg = 0x00000050, + .ds16_br_throttle_cfg = 0x00000054, + .ds4_g_throttle_cfg = 0x00000058, + .ds4_br_throttle_cfg = 0x0000005C, + .ds2_g_throttle_cfg = 0x00000060, + .ds2_br_throttle_cfg = 0x00000064, + .full_out_throttle_cfg = 0x00000068, + .diag_config = 0x00000094, + .global_reset_cmd = 0x0000007C, + .diag_sensor_status = {0x00000098, 0x0000009C}, + .diag_frm_cnt_status = {0x000000A0, 0x000000A4, 0x000000A8}, + .ipp_violation_status = 0x00000090, + .bayer_violation_status = 0x0000C024, + .pdaf_violation_status = 0x00009304, + .bus_violation_status = 0x00000864, + .bus_overflow_status = 0x00000868, + .num_perf_counters = 8, + .perf_count_reg = { + { + .perf_count_cfg = 0x000000AC, + .perf_count_cfg_mc = 0x000000B0, + .perf_pix_count = 0x000000B4, + .perf_line_count = 0x000000B8, + .perf_stall_count = 0x000000BC, + .perf_always_count = 0x000000C0, + .perf_count_status = 0x000000C4, + }, + { + .perf_count_cfg = 0x000000C8, + .perf_count_cfg_mc = 0x000000CC, + .perf_pix_count = 0x000000D0, + .perf_line_count = 0x000000D4, + .perf_stall_count = 0x000000D8, + .perf_always_count = 0x000000DC, + .perf_count_status = 0x000000E0, + }, + { + .perf_count_cfg = 0x000000E4, + .perf_count_cfg_mc = 0x000000E8, + .perf_pix_count = 0x000000EC, + .perf_line_count = 0x000000F0, + .perf_stall_count = 0x000000F4, + .perf_always_count = 0x000000F8, + .perf_count_status = 0x000000FC, + }, + { + .perf_count_cfg = 0x00000100, + .perf_count_cfg_mc = 0x00000104, + .perf_pix_count = 0x00000108, + .perf_line_count = 0x0000010C, + .perf_stall_count = 0x00000110, + .perf_always_count = 0x00000114, + .perf_count_status = 0x00000118, + }, + /* Bayer per count regs from here onwards */ + { + .perf_count_cfg = 0x0000C028, + .perf_count_cfg_mc = 0x0000C02C, + .perf_pix_count = 0x0000C030, + .perf_line_count = 0x0000C034, + .perf_stall_count = 0x0000C038, + .perf_always_count = 0x0000C03C, + .perf_count_status = 0x0000C040, + }, + { + .perf_count_cfg = 0x0000C044, + .perf_count_cfg_mc = 0x0000C048, + .perf_pix_count = 0x0000C04C, + .perf_line_count = 0x0000C050, + .perf_stall_count = 0x0000C054, + .perf_always_count = 0x0000C058, + .perf_count_status = 0x0000C05C, + }, + { + .perf_count_cfg = 0x0000C060, + .perf_count_cfg_mc = 0x0000C064, + .perf_pix_count = 0x0000C068, + .perf_line_count = 0x0000C06C, + .perf_stall_count = 0x0000C070, + .perf_always_count = 0x0000C074, + .perf_count_status = 0x0000C078, + }, + { + .perf_count_cfg = 0x0000C07C, + .perf_count_cfg_mc = 0x0000C080, + .perf_pix_count = 0x0000C084, + .perf_line_count = 0x0000C088, + .perf_stall_count = 0x0000C08C, + .perf_always_count = 0x0000C090, + .perf_count_status = 0x0000C094, + }, + }, + .top_debug_cfg = 0x000001EC, + .bayer_debug_cfg = 0x0000C1EC, + .num_top_debug_reg = CAM_TFE_980_NUM_TOP_DBG_REG, + .top_debug = tfe970_top_debug_reg, + .num_bayer_debug_reg = CAM_TFE_980_NUM_BAYER_DBG_REG, + .bayer_debug = tfe970_bayer_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS0, + .capabilities = CAM_VFE_COMMON_CAP_SKIP_CORE_CFG | + CAM_VFE_COMMON_CAP_CORE_MUX_CFG, +}; + +static struct cam_vfe_ver4_path_reg_data tfe970_ipp_common_reg_data = { + .sof_irq_mask = 0x150, + .eof_irq_mask = 0x2A0, + .error_irq_mask = 0xF000004, + .ipp_violation_mask = 0x4000000, + .bayer_violation_mask = 0x4, + .pdaf_violation_mask = 0x2000000, + .diag_violation_mask = 0x8000000, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0xF000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .is_mc_path = true, + /* SOF and EOF mask combined for each context */ + .frm_irq_hw_ctxt_mask = { + 0x30, + 0xC0, + 0x300, + }, +}; + +static struct cam_vfe_ver4_path_reg_data tfe970_pdlib_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x40, + .top_debug_cfg_en = 3, +}; + +static struct cam_vfe_ver4_path_reg_data tfe970_vfe_full_rdi_reg_data[5] = { + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xA, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x4000, + .eof_irq_mask = 0x8000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xC, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x10000, + .eof_irq_mask = 0x20000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xE, + .diag_frm_count_mask_0 = 0x200, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x40000, + .eof_irq_mask = 0x80000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x10, + .diag_frm_count_mask_0 = 0x400, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +struct cam_vfe_ver4_path_hw_info + tfe970_rdi_hw_info_arr[] = { + { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_vfe_full_rdi_reg_data[2], + }, + { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_vfe_full_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info tfe970_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PPP_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_CONTEXT0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_IPP_CONTEXT1_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_IPP_ALL_CONTEXT_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe970_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &tfe970_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &tfe970_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe970_diag_frame_field[] = { + { + .num_fields = 4, + .field = &tfe970_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &tfe970_diag_reg_info[8], + }, + { + .num_fields = 3, + .field = &tfe970_diag_reg_info[10], + }, +}; + +static struct cam_vfe_ver4_fcg_module_info tfe970_fcg_module_info = { + .max_fcg_ch_ctx = 2, + .max_fcg_predictions = 3, + .fcg_index_shift = 16, + .max_reg_val_pair_size = 6, + .fcg_type_size = 2, + .fcg_phase_index_cfg_0 = 0x0000DE70, + .fcg_phase_index_cfg_1 = 0x0000DE74, + .fcg_reg_ctxt_shift = 0x0, + .fcg_reg_ctxt_sel = 0x0000DFF4, + .fcg_reg_ctxt_mask = 0x7, +}; + +static struct cam_vfe_top_ver4_hw_info tfe970_top_hw_info = { + .common_reg = &tfe970_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_ipp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &tfe970_top_common_reg, + .reg_data = &tfe970_pdlib_reg_data, + }, + .rdi_hw_info = tfe970_rdi_hw_info_arr, + .wr_client_desc = tfe970_wr_client_desc, + .ipp_module_desc = tfe970_ipp_mod_desc, + .bayer_module_desc = tfe970_bayer_mod_desc, + .num_mux = 6, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + }, + .num_path_port_map = 3, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA}, + }, + .num_rdi = ARRAY_SIZE(tfe970_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(tfe970_top_irq_err_desc), + .top_err_desc = tfe970_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(tfe970_haf_violation_desc), + .pdaf_violation_desc = tfe970_haf_violation_desc, + .top_debug_reg_info = &tfe980_top_dbg_reg_info, + .bayer_debug_reg_info = &tfe980_bayer_dbg_reg_info, + .fcg_module_info = &tfe970_fcg_module_info, + .fcg_mc_supported = true, + .diag_sensor_info = tfe970_diag_sensor_field, + .diag_frame_info = tfe970_diag_frame_field, +}; + +static struct cam_irq_register_set tfe970_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + }, + { + .mask_reg_offset = 0x0000081C, + .clear_reg_offset = 0x00000824, + .status_reg_offset = 0x0000082C, + }, +}; + +static uint32_t tfe970_out_port_mid[][12] = { + {28}, + {29}, + {30}, + {31}, + {16, 17, 18, 19}, + {16, 17, 18, 19, 20, 21}, + {22, 23, 24, 25, 26, 27}, + {20, 21, 22, 23, 24, 25, 26, 27}, + {8, 9, 10}, + {16, 17}, + {18, 19}, + {20, 21}, + {22, 23}, + {24, 25}, + {26, 27}, + {28, 29}, + {11}, + {12}, + {13}, + {14}, +}; + +static struct cam_vfe_bus_ver3_hw_info tfe970_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .ctxt_sel = 0x00000924, + .ubwc_static_ctrl = 0x00000858, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008F0, + .debug_status_top = 0x000008F4, + .test_bus_ctrl = 0x00000928, + .mc_read_sel_shift = 0x5, + .mc_write_sel_shift = 0x0, + .mc_ctxt_mask = 0x7, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = tfe970_bus_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = CAM_TFE_BUS_VER3_970_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .framedrop_period = 0x00000D38, + .framedrop_pattern = 0x00000D3C, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .system_cache_cfg = 0x00000D68, + .addr_cfg = 0x00000D70, + .ctxt_cfg = 0x00000D78, + .addr_status_0 = 0x00000D90, + .addr_status_1 = 0x00000D94, + .addr_status_2 = 0x00000D98, + .addr_status_3 = 0x00000D9C, + .debug_status_cfg = 0x00000D7C, + .debug_status_0 = 0x00000D80, + .debug_status_1 = 0x00000D84, + .bw_limiter_addr = 0x00000D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10)| + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + .rcs_en_mask = 0x200, + }, + /* BUS Client 1 DS4_Y */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .ctxt_cfg = 0x00000E78, + .addr_status_0 = 0x00000E90, + .addr_status_1 = 0x00000E94, + .addr_status_2 = 0x00000E98, + .addr_status_3 = 0x00000E9C, + .debug_status_cfg = 0x00000E7C, + .debug_status_0 = 0x00000E80, + .debug_status_1 = 0x00000E84, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 2 DS4_C */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000F70, + .ctxt_cfg = 0x00000F78, + .addr_status_0 = 0x00000F90, + .addr_status_1 = 0x00000F94, + .addr_status_2 = 0x00000F98, + .addr_status_3 = 0x00000F9C, + .debug_status_cfg = 0x00000F7C, + .debug_status_0 = 0x00000F80, + .debug_status_1 = 0x00000F84, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 3 DS16_Y */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .ctxt_cfg = 0x00001078, + .addr_status_0 = 0x00001090, + .addr_status_1 = 0x00001094, + .addr_status_2 = 0x00001098, + .addr_status_3 = 0x0000109C, + .debug_status_cfg = 0x0000107C, + .debug_status_0 = 0x00001080, + .debug_status_1 = 0x00001084, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 4 DS16_C */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .ctxt_cfg = 0x00001178, + .addr_status_0 = 0x00001190, + .addr_status_1 = 0x00001194, + .addr_status_2 = 0x00001198, + .addr_status_3 = 0x0000119C, + .debug_status_cfg = 0x0000117C, + .debug_status_0 = 0x00001180, + .debug_status_1 = 0x00001184, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 5 DS2_Y */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .ctxt_cfg = 0x00001278, + .addr_status_0 = 0x00001290, + .addr_status_1 = 0x00001294, + .addr_status_2 = 0x00001298, + .addr_status_3 = 0x0000129C, + .debug_status_cfg = 0x0000127C, + .debug_status_0 = 0x00001280, + .debug_status_1 = 0x00001284, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + .rcs_en_mask = 0x200, + }, + /* BUS Client 6 DS2_C */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .ctxt_cfg = 0x00001378, + .addr_status_0 = 0x00001390, + .addr_status_1 = 0x00001394, + .addr_status_2 = 0x00001398, + .addr_status_3 = 0x0000139C, + .debug_status_cfg = 0x0000137C, + .debug_status_0 = 0x00001380, + .debug_status_1 = 0x00001384, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_6, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + + .rcs_en_mask = 0x200, + }, + /* BUS Client 7 FD_Y */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .ctxt_cfg = 0x00001478, + .addr_status_0 = 0x00001490, + .addr_status_1 = 0x00001494, + .addr_status_2 = 0x00001498, + .addr_status_3 = 0x0000149C, + .debug_status_cfg = 0x0000147C, + .debug_status_0 = 0x00001480, + .debug_status_1 = 0x00001484, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 8 FD_C */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .ctxt_cfg = 0x00001578, + .addr_status_0 = 0x00001590, + .addr_status_1 = 0x00001594, + .addr_status_2 = 0x00001598, + .addr_status_3 = 0x0000159C, + .debug_status_cfg = 0x0000157C, + .debug_status_0 = 0x00001580, + .debug_status_1 = 0x00001584, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 9 STATS_AEC_BE */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .ctxt_cfg = 0x00001678, + .addr_status_0 = 0x00001690, + .addr_status_1 = 0x00001694, + .addr_status_2 = 0x00001698, + .addr_status_3 = 0x0000169C, + .debug_status_cfg = 0x0000167C, + .debug_status_0 = 0x00001680, + .debug_status_1 = 0x00001684, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 10 STATS_AEC_BHIST */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .ctxt_cfg = 0x00001778, + .addr_status_0 = 0x00001790, + .addr_status_1 = 0x00001794, + .addr_status_2 = 0x00001798, + .addr_status_3 = 0x0000179C, + .debug_status_cfg = 0x0000177C, + .debug_status_0 = 0x00001780, + .debug_status_1 = 0x00001784, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 11 STATS_TINTLESS_BG */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .ctxt_cfg = 0x00001878, + .addr_status_0 = 0x00001890, + .addr_status_1 = 0x00001894, + .addr_status_2 = 0x00001898, + .addr_status_3 = 0x0000189C, + .debug_status_cfg = 0x0000187C, + .debug_status_0 = 0x00001880, + .debug_status_1 = 0x00001884, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 12 STATS_AWB_BG */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .ctxt_cfg = 0x00001978, + .addr_status_0 = 0x00001990, + .addr_status_1 = 0x00001994, + .addr_status_2 = 0x00001998, + .addr_status_3 = 0x0000199C, + .debug_status_cfg = 0x0000197C, + .debug_status_0 = 0x00001980, + .debug_status_1 = 0x00001984, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 13 STATS_AWB_BFW */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .ctxt_cfg = 0x00001A78, + .addr_status_0 = 0x00001A90, + .addr_status_1 = 0x00001A94, + .addr_status_2 = 0x00001A98, + .addr_status_3 = 0x00001A9C, + .debug_status_cfg = 0x00001A7C, + .debug_status_0 = 0x00001A80, + .debug_status_1 = 0x00001A84, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 14 STATS_FLICKER_BAYERS */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .ctxt_cfg = 0x00001B78, + .addr_status_0 = 0x00001B90, + .addr_status_1 = 0x00001B94, + .addr_status_2 = 0x00001B98, + .addr_status_3 = 0x00001B9C, + .debug_status_cfg = 0x00001B7C, + .debug_status_0 = 0x00001B80, + .debug_status_1 = 0x00001B84, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + /* BUS Client 15 STATS_TMC_BHIST */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .ctxt_cfg = 0x00001C78, + .addr_status_0 = 0x00001C90, + .addr_status_1 = 0x00001C94, + .addr_status_2 = 0x00001C98, + .addr_status_3 = 0x00001C9C, + .debug_status_cfg = 0x00001C7C, + .debug_status_0 = 0x00001C80, + .debug_status_1 = 0x00001C84, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 PDAF_0 */ /* Note: PDAF_SAD == 2PD*/ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_cfg = 0x00001D70, + .ctxt_cfg = 0x00001D78, + .addr_status_0 = 0x00001D90, + .addr_status_1 = 0x00001D94, + .addr_status_2 = 0x00001D98, + .addr_status_3 = 0x00001D9C, + .debug_status_cfg = 0x00001D7C, + .debug_status_0 = 0x00001D80, + .debug_status_1 = 0x00001D84, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 17 PDAF_1 */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_cfg = 0x00001E70, + .ctxt_cfg = 0x00001E78, + .addr_status_0 = 0x00001E90, + .addr_status_1 = 0x00001E94, + .addr_status_2 = 0x00001E98, + .addr_status_3 = 0x00001E9C, + .debug_status_cfg = 0x00001E7C, + .debug_status_0 = 0x00001E80, + .debug_status_1 = 0x00001E84, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_UBWC_P016), + }, + /* BUS Client 18 PDAF_2 */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_cfg = 0x00001F70, + .ctxt_cfg = 0x00001F78, + .addr_status_0 = 0x00001F90, + .addr_status_1 = 0x00001F94, + .addr_status_2 = 0x00001F98, + .addr_status_3 = 0x00001F9C, + .debug_status_cfg = 0x00001F7C, + .debug_status_0 = 0x00001F80, + .debug_status_1 = 0x00001F84, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16), + + }, + /* BUS Client 19 PDAF_3 */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_cfg = 0x00002070, + .ctxt_cfg = 0x00002078, + .addr_status_0 = 0x00002090, + .addr_status_1 = 0x00002094, + .addr_status_2 = 0x00002098, + .addr_status_3 = 0x0000209C, + .debug_status_cfg = 0x0000207C, + .debug_status_0 = 0x00002080, + .debug_status_1 = 0x00002084, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 20 RDI_0 */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_cfg = 0x00002170, + .ctxt_cfg = 0x00002178, + .addr_status_0 = 0x00002190, + .addr_status_1 = 0x00002194, + .addr_status_2 = 0x00002198, + .addr_status_3 = 0x0000219C, + .debug_status_cfg = 0x0000217C, + .debug_status_0 = 0x00002180, + .debug_status_1 = 0x00002184, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 21 RDI_1 */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_cfg = 0x00002270, + .ctxt_cfg = 0x00002278, + .addr_status_0 = 0x00002290, + .addr_status_1 = 0x00002294, + .addr_status_2 = 0x00002298, + .addr_status_3 = 0x0000229C, + .debug_status_cfg = 0x0000227C, + .debug_status_0 = 0x00002280, + .debug_status_1 = 0x00002284, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 22 RDI_2 */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_cfg = 0x00002370, + .ctxt_cfg = 0x00002378, + .addr_status_0 = 0x00002390, + .addr_status_1 = 0x00002394, + .addr_status_2 = 0x00002398, + .addr_status_3 = 0x0000239C, + .debug_status_cfg = 0x0000237C, + .debug_status_0 = 0x00002380, + .debug_status_1 = 0x00002384, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 23 RDI_3 */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_cfg = 0x00002470, + .ctxt_cfg = 0x00002478, + .addr_status_0 = 0x00002490, + .addr_status_1 = 0x00002494, + .addr_status_2 = 0x00002498, + .addr_status_3 = 0x0000249C, + .debug_status_cfg = 0x0000247C, + .debug_status_0 = 0x00002480, + .debug_status_1 = 0x00002484, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + }, + .num_out = 20, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = tfe970_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 20, + }, + .name = { + "RDI_0", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = tfe970_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "RDI_1", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = tfe970_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 22, + }, + .name = { + "RDI_2", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_5, + .mid = tfe970_out_port_mid[3], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_3", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4672, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[4], + .num_mid = 6, + .num_wm = 1, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 0, + }, + .name = { + "FULL", + }, + .pid_mask = 0x7, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1168, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[5], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 1, + 2, + }, + .name = { + "DS4_Y", + "DS4_C" + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 292, + .max_height = 1024, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[6], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 3, + 4, + }, + .name = { + "DS16_Y", + "DS16_C", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS2, + .max_width = 4672, + .max_height = 8192, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[7], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .wm_idx = { + 5, + 6, + }, + .name = { + "DS2_Y", + "DS2_C", + }, + .pid_mask = 0x7, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 9312, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[8], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .cntxt_cfg_except = true, + .wm_idx = { + 7, + 8, + }, + .name = { + "FD_Y", + "FD_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[9], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 9, + }, + .name = { + "STATS_AEC_BE", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[10], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 10, + }, + .name = { + "STATS_BHIST", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[11], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 11, + }, + .name = { + "STATS_TL_BG", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[12], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 12, + }, + .name = { + "STATS_AWB_BG", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[13], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 13, + }, + .name = { + "AWB_BFW", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[14], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 14, + }, + .name = { + "STATS_RS", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe970_out_port_mid[15], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 15, + }, + .name = { + "STATS_TMC_BHIST", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 14592, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe970_out_port_mid[16], + .num_mid = 1, + .num_wm = 1, + .early_done_mask = BIT(28), + .wm_idx = { + 16, + }, + .name = { + "PDAF_0_2PD", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe970_out_port_mid[17], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 17, + }, + .name = { + "PDAF_1_PREPROCESS_2PD", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe970_out_port_mid[18], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 18, + }, + .name = { + "PDAF_2_PARSED_DATA", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe970_out_port_mid[19], + .num_mid = 1, + .num_wm = 1, + .early_done_mask = BIT(29), + .mc_based = false, + .mc_grp_shift = 4, + .wm_idx = { + 19, + }, + .name = { + "STATS_CAF", + }, + .pid_mask = 0x70000, + }, + }, + .num_cons_err = 32, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input Not Supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input Not Supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input Not Supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input Not Supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format Not Supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format Not Supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 24 BPP format Not Supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 32 BPP format Not Supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack 64 BPP format Not Supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 20 format Not Supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 14 format Not Supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 12 format Not Supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack MIPI 10 format Not Supported" + }, + { + .bitmask = BIT(13), + .error_description = "Pack 128 BPP format Not Supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC P016 format Not Supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC P010 format Not Supported" + }, + { + .bitmask = BIT(16), + .error_description = "UBWC NV12 format Not Supported" + }, + { + .bitmask = BIT(17), + .error_description = "UBWC NV12 4R format Not Supported" + }, + { + .bitmask = BIT(18), + .error_description = "UBWC TP10 format Not Supported" + }, + { + .bitmask = BIT(19), + .error_description = "Frame based Mode Not Supported" + }, + { + .bitmask = BIT(20), + .error_description = "Index based Mode Not Supported" + }, + { + .bitmask = BIT(21), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(22), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(23), + .error_description = "FIFO framehdr addr unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(25), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(28), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(29), + .error_description = "Image Width unalign", + }, + { + .bitmask = BIT(30), + .error_description = "Image Height unalign", + }, + { + .bitmask = BIT(31), + .error_description = "Meta Stride unalign", + }, + }, + .num_comp_grp = 9, + .support_consumed_addr = true, + .mc_comp_done_mask = { + BIT(24), 0x0, BIT(25), 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + }, + .comp_done_mask = { + 0x3, BIT(3), 0x70, BIT(7), BIT(8), BIT(16), + BIT(17), BIT(18), BIT(19), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 43, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, + .skip_regdump = true, + .skip_regdump_start_offset = 0x800, + .skip_regdump_stop_offset = 0x209C, +}; + +static struct cam_vfe_hw_info cam_tfe970_hw_info = { + .irq_hw_info = &tfe980_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &tfe970_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &tfe970_top_hw_info, +}; +#endif /* _CAM_TFE970_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe975.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe975.h new file mode 100644 index 0000000000..d629239a7e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe975.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE975_H_ +#define _CAM_TFE975_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" +#include "cam_tfe980.h" + +static struct cam_vfe_hw_info cam_tfe975_hw_info = { + .irq_hw_info = &tfe980_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &tfe980_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &tfe980_top_hw_info, +}; +#endif /* _CAM_TFE975_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe980.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe980.h new file mode 100644 index 0000000000..dd3426b27a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_tfe980.h @@ -0,0 +1,2958 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_TFE980_H_ +#define _CAM_TFE980_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_TFE_980_NUM_TOP_DBG_REG 17 +#define CAM_TFE_980_NUM_BAYER_DBG_REG 10 +#define CAM_TFE_BUS_VER3_980_MAX_CLIENTS 28 + +static struct cam_vfe_top_ver4_module_desc tfe980_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_STATS_AWB_BG_TINTLESS", + }, + { + .id = 1, + .desc = "CLC_STATS_AWB_BG_AE", + }, + { + .id = 2, + .desc = "CLC_STATS_BHIST_AEC", + }, + { + .id = 3, + .desc = "CLC_STATS_RS", + }, + { + .id = 4, + .desc = "CLC_STATS_BFW_AWB", + }, + { + .id = 5, + .desc = "CLC_STATS_AWB_BG_AWB", + }, + { + .id = 6, + .desc = "CLC_STATS_BHIST_AF", + }, + { + .id = 7, + .desc = "CLC_STATS_AWB_BG_ALSC", + }, + { + .id = 8, + .desc = "CLC_STATS_BHIST_TMC", + }, + { + .id = 9, + .desc = "CLC_COMPDECOMP_FD", + }, + { + .id = 10, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 11, + .desc = "CLC_GTM", + }, + { + .id = 12, + .desc = "CLC_GLUT", + }, + { + .id = 13, + .desc = "CLC_COLOR_XFORM", + }, + { + .id = 14, + .desc = "CLC_DOWNSCALE_MN_Y", + }, + { + .id = 15, + .desc = "CLC_DOWNSCALE_MN_C", + }, + { + .id = 16, + .desc = "CLC_CROP_RND_CLAMP_FD_Y", + }, + { + .id = 17, + .desc = "CLC_CROP_RND_CLAMP_FD_C", + }, + { + .id = 18, + .desc = "CLC_BDS2_DEMO", + }, + { + .id = 19, + .desc = "CLC_PUNCH_BDS2", + }, + { + .id = 20, + .desc = "CLC_PUNCH_DS4_MUX", + }, + { + .id = 21, + .desc = "CLC_BAYER_DS_4_DS4", + }, + { + .id = 22, + .desc = "CLC_CROP_RND_CLAMP_DS4" + }, + { + .id = 23, + .desc = "CLC_PUNCH_DS16" + }, + { + .id = 24, + .desc = "CLC_BAYER_DS_4_DS16", + }, + { + .id = 25, + .desc = "CLC_CROP_RND_CLAMP_DS16", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_DS2", + }, + { + .id = 27, + .desc = "CLC_RCS_DS2", + }, + { + .id = 28, + .desc = "CLC_CROP_RND_CLAMP_FULL_OUT", + }, + { + .id = 29, + .desc = "CLC_COMPDECOMP_BYPASS", + }, + { + .id = 30, + .desc = "CLC_CROP_RND_CLAMP_BYPASS", + }, + { + .id = 31, + .desc = "CLC_RCS_FULL_OUT", + }, +}; + +struct cam_vfe_top_ver4_module_desc tfe980_bayer_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_BPC_PDPC_GIC", + }, + { + .id = 2, + .desc = "CLC_PDPC_BPC_1D", + }, + { + .id = 3, + .desc = "CLC_ABF_BINC", + }, + { + .id = 4, + .desc = "CLC_CHANNEL_GAINS", + }, + { + .id = 5, + .desc = "CLC_LSC", + }, + { + .id = 6, + .desc = "CLC_FCG", + }, + { + .id = 7, + .desc = "CLC_WB_GAIN", + }, + { + .id = 8, + .desc = "CLC_COMPDECOMP_BAYER", + }, + { + .id = 9, + .desc = "CLC_CROP_RND_CLAMP_WIRC", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc tfe980_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "VIDEO", + }, + { + .wm_id = 1, + .desc = "DS4_Y", + }, + { + .wm_id = 2, + .desc = "DS4_C", + }, + { + .wm_id = 3, + .desc = "DS16_Y", + }, + { + .wm_id = 4, + .desc = "DS16_C", + }, + { + .wm_id = 5, + .desc = "DS2_Y", + }, + { + .wm_id = 6, + .desc = "DS2_C", + }, + { + .wm_id = 7, + .desc = "FD_Y", + }, + { + .wm_id = 8, + .desc = "FD_C", + }, + { + .wm_id = 9, + .desc = "IR_OUT", + }, + { + .wm_id = 10, + .desc = "STATS_AEC_BE", + }, + { + .wm_id = 11, + .desc = "STATS_AEC_BHIST", + }, + { + .wm_id = 12, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 13, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 14, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 15, + .desc = "STATS_AF_BHIST", + }, + { + .wm_id = 16, + .desc = "STATS_ALSC_BG", + }, + { + .wm_id = 17, + .desc = "STATS_FLICKER", + }, + { + .wm_id = 18, + .desc = "STATS_TMC_BHIST", + }, + { + .wm_id = 19, + .desc = "PDAF_0_STATS", + }, + { + .wm_id = 20, + .desc = "PDAF_1_PREPROCESS_2PD", + }, + { + .wm_id = 21, + .desc = "PDAF_2_PARSED_DATA", + }, + { + .wm_id = 22, + .desc = "PDAF_3_CAF", + }, + { + .wm_id = 23, + .desc = "RDI0", + }, + { + .wm_id = 24, + .desc = "RDI1", + }, + { + .wm_id = 25, + .desc = "RDI2", + }, + { + .wm_id = 26, + .desc = "RDI3", + }, + { + .wm_id = 27, + .desc = "RDI4", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc tfe980_top_irq_err_desc[] = { + { + .bitmask = BIT(2), + .err_name = "BAYER_HM violation", + .desc = "CLC CCIF Violation", + }, + { + .bitmask = BIT(24), + .err_name = "DYNAMIC PDAF SWITCH VIOLATION", + .desc = + "HAF RDI exposure select changes dynamically, the common vbi is insufficient", + }, + { + .bitmask = BIT(25), + .err_name = "HAF violation", + .desc = "CLC_HAF Violation", + }, + { + .bitmask = BIT(26), + .err_name = "PP VIOLATION", + .desc = "CCIF protocol violation", + }, + { + .bitmask = BIT(27), + .err_name = "DIAG VIOLATION", + .desc = "Sensor: The HBI at TFE input is less than the spec (64 cycles)", + .debug = "Check sensor config", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc tfe980_haf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output", + }, + { + .bitmask = BIT(2), + .desc = "Sim monitor 3 violation - parsed output", + }, + { + .bitmask = BIT(3), + .desc = "Sim monitor 4 violation - CAF output", + }, + { + .bitmask = BIT(4), + .desc = "PDAF constraint violation", + }, + { + .bitmask = BIT(5), + .desc = "CAF constraint violation", + }, +}; + +static struct cam_irq_register_set tfe980_top_irq_reg_set = { + .mask_reg_offset = 0x00000080, + .clear_reg_offset = 0x00000084, + .status_reg_offset = 0x00000088, + .set_reg_offset = 0x0000008C, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), +}; + +static struct cam_irq_controller_reg_info tfe980_top_irq_reg_info = { + .num_registers = 1, + .irq_reg_set = &tfe980_top_irq_reg_set, + .global_irq_cmd_offset = 0x0000007C, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t tfe980_top_debug_reg[] = { + 0x00000190, + 0x00000194, + 0x00000198, + 0x0000019C, + 0x000001A0, + 0x000001A4, + 0x000001A8, + 0x000001AC, + 0x000001B0, + 0x000001B4, + 0x000001B8, + 0x000001BC, + 0x000001C0, + 0x000001C4, + 0x000001C8, + 0x000001CC, + 0x000001D0, +}; + +static uint32_t tfe980_bayer_debug_reg[] = { + 0x0000C1BC, + 0x0000C1C0, + 0x0000C1C4, + 0x0000C1C8, + 0x0000C1CC, + 0x0000C1D0, + 0x0000C1D4, + 0x0000C1D8, + 0x0000C1DC, + 0x0000C1E0, +}; + +static struct cam_vfe_top_ver4_reg_offset_common tfe980_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .main_feature = 0x00000008, + .bayer_feature = 0x0000000C, + .stats_feature = 0x00000010, + .fd_feature = 0x00000014, + .core_cgc_ovd_0 = 0x00000018, + .ahb_cgc_ovd = 0x00000020, + .core_mux_cfg = 0x00000024, + .pdaf_input_cfg_0 = 0x00000028, + .pdaf_input_cfg_1 = 0x0000002C, + .stats_throttle_cfg_0 = 0x00000030, + .stats_throttle_cfg_1 = 0x00000034, + .stats_throttle_cfg_2 = 0x00000038, + .core_cfg_4 = 0x0000003C, + .pdaf_parsed_throttle_cfg = 0x00000040, + .wirc_throttle_cfg = 0x00000044, + .fd_y_throttle_cfg = 0x00000048, + .fd_c_throttle_cfg = 0x0000004C, + .ds16_g_throttle_cfg = 0x00000050, + .ds16_br_throttle_cfg = 0x00000054, + .ds4_g_throttle_cfg = 0x00000058, + .ds4_br_throttle_cfg = 0x0000005C, + .ds2_g_throttle_cfg = 0x00000060, + .ds2_br_throttle_cfg = 0x00000064, + .full_out_throttle_cfg = 0x00000068, + .diag_config = 0x00000094, + .global_reset_cmd = 0x0000007C, + .diag_sensor_status = {0x00000098, 0x0000009C}, + .diag_frm_cnt_status = {0x000000A0, 0x000000A4, 0x000000A8}, + .ipp_violation_status = 0x00000090, + .bayer_violation_status = 0x0000C024, + .pdaf_violation_status = 0x00009304, + .dsp_status = 0x0000006C, + .bus_violation_status = 0x00000864, + .bus_overflow_status = 0x00000868, + .num_perf_counters = 8, + .perf_count_reg = { + { + .perf_count_cfg = 0x000000AC, + .perf_count_cfg_mc = 0x000000B0, + .perf_pix_count = 0x000000B4, + .perf_line_count = 0x000000B8, + .perf_stall_count = 0x000000BC, + .perf_always_count = 0x000000C0, + .perf_count_status = 0x000000C4, + }, + { + .perf_count_cfg = 0x000000C8, + .perf_count_cfg_mc = 0x000000CC, + .perf_pix_count = 0x000000D0, + .perf_line_count = 0x000000D4, + .perf_stall_count = 0x000000D8, + .perf_always_count = 0x000000DC, + .perf_count_status = 0x000000E0, + }, + { + .perf_count_cfg = 0x000000E4, + .perf_count_cfg_mc = 0x000000E8, + .perf_pix_count = 0x000000EC, + .perf_line_count = 0x000000F0, + .perf_stall_count = 0x000000F4, + .perf_always_count = 0x000000F8, + .perf_count_status = 0x000000FC, + }, + { + .perf_count_cfg = 0x00000100, + .perf_count_cfg_mc = 0x00000104, + .perf_pix_count = 0x00000108, + .perf_line_count = 0x0000010C, + .perf_stall_count = 0x00000110, + .perf_always_count = 0x00000114, + .perf_count_status = 0x00000118, + }, + /* Bayer per count regs from here onwards */ + { + .perf_count_cfg = 0x0000C028, + .perf_count_cfg_mc = 0x0000C02C, + .perf_pix_count = 0x0000C030, + .perf_line_count = 0x0000C034, + .perf_stall_count = 0x0000C038, + .perf_always_count = 0x0000C03C, + .perf_count_status = 0x0000C040, + }, + { + .perf_count_cfg = 0x0000C044, + .perf_count_cfg_mc = 0x0000C048, + .perf_pix_count = 0x0000C04C, + .perf_line_count = 0x0000C050, + .perf_stall_count = 0x0000C054, + .perf_always_count = 0x0000C058, + .perf_count_status = 0x0000C05C, + }, + { + .perf_count_cfg = 0x0000C060, + .perf_count_cfg_mc = 0x0000C064, + .perf_pix_count = 0x0000C068, + .perf_line_count = 0x0000C06C, + .perf_stall_count = 0x0000C070, + .perf_always_count = 0x0000C074, + .perf_count_status = 0x0000C078, + }, + { + .perf_count_cfg = 0x0000C07C, + .perf_count_cfg_mc = 0x0000C080, + .perf_pix_count = 0x0000C084, + .perf_line_count = 0x0000C088, + .perf_stall_count = 0x0000C08C, + .perf_always_count = 0x0000C090, + .perf_count_status = 0x0000C094, + }, + }, + .top_debug_cfg = 0x000001EC, + .bayer_debug_cfg = 0x0000C1EC, + .num_top_debug_reg = CAM_TFE_980_NUM_TOP_DBG_REG, + .top_debug = tfe980_top_debug_reg, + .num_bayer_debug_reg = CAM_TFE_980_NUM_BAYER_DBG_REG, + .bayer_debug = tfe980_bayer_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS0, + .capabilities = CAM_VFE_COMMON_CAP_SKIP_CORE_CFG | + CAM_VFE_COMMON_CAP_CORE_MUX_CFG, +}; + +static struct cam_vfe_ver4_path_reg_data tfe980_ipp_common_reg_data = { + .sof_irq_mask = 0x150, + .eof_irq_mask = 0x2A0, + .error_irq_mask = 0xF000004, + .ipp_violation_mask = 0x4000000, + .bayer_violation_mask = 0x4, + .pdaf_violation_mask = 0x2000000, + .diag_violation_mask = 0x8000000, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0xF000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .is_mc_path = true, + /* SOF and EOF mask combined for each context */ + .frm_irq_hw_ctxt_mask = { + 0x30, + 0xC0, + 0x300, + }, +}; + +static struct cam_vfe_ver4_path_reg_data tfe980_pdlib_reg_data = { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x40, + .top_debug_cfg_en = 3, +}; + +static struct cam_vfe_ver4_path_reg_data tfe980_vfe_full_rdi_reg_data[5] = { + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xA, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x4000, + .eof_irq_mask = 0x8000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xC, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x10000, + .eof_irq_mask = 0x20000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0xE, + .diag_frm_count_mask_0 = 0x200, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x40000, + .eof_irq_mask = 0x80000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x10, + .diag_frm_count_mask_0 = 0x400, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x100000, + .eof_irq_mask = 0x200000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x12, + .diag_frm_count_mask_0 = 0x800, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +struct cam_vfe_ver4_path_hw_info + tfe980_rdi_hw_info_arr[] = { + { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_vfe_full_rdi_reg_data[2], + }, + { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_vfe_full_rdi_reg_data[3], + }, + { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_vfe_full_rdi_reg_data[4], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info tfe980_top_dbg_reg_info[ + CAM_TFE_980_NUM_TOP_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "STATS_AWB_BG_TINTLESS", + 0x000001D4, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "STATS_AWB_BG_AE", + 0x000001D4, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "STATS_BHIST_AEC", + 0x000001D4, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "STATS_RS", + 0x000001D4, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "STATS_BFW_AWB", + 0x000001D4, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "STATS_AWB_BG_AWB", + 0x000001D4, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "STATS_BHIST_AF", + 0x000001D4, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "STATS_AWB_BG_ALSC", + 0x000001D4, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "STATS_BHIST_TMC", + 0x000001D4, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "compdecomp_fd", + 0x000001D4, BIT(27)), + VFE_DBG_INFO_WITH_IDLE(8, "color_correct", + 0x000001D4, BIT(28)), + VFE_DBG_INFO_WITH_IDLE(12, "gtm", + 0x000001D4, BIT(29)), + VFE_DBG_INFO_WITH_IDLE(16, "glut", + 0x000001D4, BIT(30)), + VFE_DBG_INFO_WITH_IDLE(20, "color_xform", + 0x000001D4, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(24, "downscale_mn_y", + 0x000001D8, BIT(0)), + VFE_DBG_INFO_WITH_IDLE(28, "downscale_mn_c", + 0x000001D8, BIT(1)), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "crop_rnd_clamp_fd_y", + 0x000001D8, BIT(2)), + VFE_DBG_INFO_WITH_IDLE(4, "crop_rnd_clamp_fd_c", + 0x000001D8, BIT(3)), + VFE_DBG_INFO_WITH_IDLE(8, "bds2_demo", + 0x000001D8, (BIT(4) | BIT(5) | BIT(6))), + VFE_DBG_INFO_WITH_IDLE(12, "punch_bds2", + 0x000001D8, (BIT(7) | BIT(8) | BIT(9))), + VFE_DBG_INFO_WITH_IDLE(16, "punch_ds4_mux", + 0x000001D8, (BIT(10) | BIT(11) | BIT(12))), + VFE_DBG_INFO_WITH_IDLE(20, "bayer_ds_4_ds4", + 0x000001D8, (BIT(13) | BIT(14) | BIT(15))), + VFE_DBG_INFO_WITH_IDLE(24, "crop_rnd_clamp_ds4", + 0x000001D8, (BIT(16) | BIT(17) | BIT(18))), + VFE_DBG_INFO_WITH_IDLE(28, "punch_ds16", + 0x000001D8, (BIT(19) | BIT(20) | BIT(21))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "bayer_ds_4_ds16", + 0x000001D8, (BIT(22) | BIT(23) | BIT(24))), + VFE_DBG_INFO_WITH_IDLE(4, "crop_rnd_clamp_ds16", + 0x000001D8, (BIT(25) | BIT(26) | BIT(27))), + VFE_DBG_INFO_WITH_IDLE(8, "crop_rnd_clamp_ds2", + 0x000001D8, (BIT(28) | BIT(29) | BIT(30))), + VFE_DBG_INFO_WITH_IDLE(12, "clc_haf", + 0x000001D8, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(16, "clc_rcs_ds2", + 0x000001DC, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(20, "clc_crop_rnd_clamp_full_out", + 0x000001DC, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(24, "clc_compdecomp_bypass", + 0x000001DC, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(28, "clc_crop_rnd_clamp_bypass", + 0x000001DC, (BIT(9) | BIT(10) | BIT(11))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_rcs_full_out", + 0x000001DC, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_haf", + 0x000001DC, BIT(15)), + VFE_DBG_INFO_WITH_IDLE(8, "csid_tfe_ipp", + 0x000001DC, (BIT(16) | BIT(17) | BIT(18))), + VFE_DBG_INFO_WITH_IDLE(12, "ppp_repeater", + 0x000001DC, BIT(19)), + VFE_DBG_INFO_WITH_IDLE(16, "stats_awb_bg_tintless_throttle", + 0x000001DC, (BIT(20) | BIT(21) | BIT(22))), + VFE_DBG_INFO_WITH_IDLE(20, "stats_awb_bg_ae_throttle", + 0x000001DC, (BIT(23) | BIT(24) | BIT(25))), + VFE_DBG_INFO_WITH_IDLE(24, "stats_ae_bhist_throttle", + 0x000001DC, (BIT(26) | BIT(27) | BIT(28))), + VFE_DBG_INFO_WITH_IDLE(28, "stats_bayer_rs_throttle", + 0x000001DC, (BIT(29) | BIT(30) | BIT(31))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_bayer_bfw_throttle", + 0x000001E0, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "stats_awb_bg_awb_throttle", + 0x000001E0, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "stats_bhist_af_throttle", + 0x000001E0, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "full_out_throttle", + 0x000001E0, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "ds4_out_y_throttle", + 0x000001E0, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "ds4_out_c_throttle", + 0x000001E0, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "ds16_out_y_throttle", + 0x000001E0, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "ds16_out_c_throttle", + 0x000001E0, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "ds2_out_y_throttle", + 0x000001E0, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "ds2_out_c_throttle", + 0x000001E0, (BIT(27) | BIT(28) | BIT(29))), + VFE_DBG_INFO_WITH_IDLE(8, "tfe_w_ir_throttle", + 0x000001E4, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(12, "fd_out_y_throttle", + 0x000001E4, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(16, "fd_out_c_throttle", + 0x000001E4, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(20, "haf_sad_stats_throttle", + 0x000001E0, BIT(30)), + VFE_DBG_INFO_WITH_IDLE(24, "haf_caf_stats_throttle", + 0x000001E0, BIT(31)), + VFE_DBG_INFO_WITH_IDLE(28, "haf_parsed_throttle", + 0x000001E4, BIT(9)), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "haf_pre_processed", + 0x000001E4, BIT(10)), + VFE_DBG_INFO(4, "full_out"), + VFE_DBG_INFO(8, "ubwc_stats"), + VFE_DBG_INFO(12, "ds4_out_y"), + VFE_DBG_INFO(16, "ds4_out_c"), + VFE_DBG_INFO(20, "ds16_out_y"), + VFE_DBG_INFO(24, "ds16_out_c"), + VFE_DBG_INFO(28, "ds2_out_y"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "ubwc_stats", + "ds2_out_c", + "fd_out_y", + "fd_out_c", + "raw_out", + "stats_awb_bg_ae", + "stats_ae_bhist", + "stats_awb_bg_tintless" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_awb_bg_alsc", + 0x000001E4, (BIT(20) | BIT(21) | BIT(22))), + VFE_DBG_INFO(4, "stats_throttle_to_bus_awb_bg_awb"), + VFE_DBG_INFO(8, "stats_throttle_to_bus_bayer_bfw"), + VFE_DBG_INFO(12, "stats_throttle_to_bus_bhist_af"), + VFE_DBG_INFO(16, "stats_throttle_to_bus_awb_bg_alsc"), + VFE_DBG_INFO(20, "stats_throttle_to_bus_bayer_rs"), + VFE_DBG_INFO(24, "stats_throttle_to_bus_bhist_tmc"), + VFE_DBG_INFO(28, "stats_throttle_to_bus_sad"), + + }, + VFE_DBG_INFO_ARRAY_4bit( + "tfe_haf_processed_to_bus", + "tfe_haf_parsed_to_bus", + "tfe_stats_throttle_to_bus", + "rdi0_splitter_to_bus_wr", + "rdi1_splitter_to_bus_wr", + "rdi2_splitter_to_bus_wr", + "rdi3_splitter_to_bus_wr", + "rdi4_splitter_to_bus_wr" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "stats_bhist_tmc_throttle", + 0x000001E4, (BIT(23) | BIT(24) | BIT(25))), + VFE_DBG_INFO(4, "reserved"), + VFE_DBG_INFO(8, "reserved"), + VFE_DBG_INFO(12, "reserved"), + VFE_DBG_INFO(16, "reserved"), + VFE_DBG_INFO(20, "reserved"), + VFE_DBG_INFO(24, "reserved"), + VFE_DBG_INFO(28, "reserved"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info tfe980_bayer_dbg_reg_info[ + CAM_TFE_980_NUM_BAYER_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_demux_w0", + 0x0000C1E4, (BIT(0) | BIT(1) | BIT(2))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_bpc_pdpc_gic_w0", + 0x0000C1E4, (BIT(3) | BIT(4) | BIT(5))), + VFE_DBG_INFO_WITH_IDLE(8, "clc_pdpc_bpc_1d_w0", + 0x0000C1E4, (BIT(6) | BIT(7) | BIT(8))), + VFE_DBG_INFO_WITH_IDLE(12, "clc_abf_binc_w0", + 0x0000C1E4, (BIT(9) | BIT(10) | BIT(11))), + VFE_DBG_INFO_WITH_IDLE(16, "clc_channel_gains_w0", + 0x0000C1E4, (BIT(12) | BIT(13) | BIT(14))), + VFE_DBG_INFO_WITH_IDLE(20, "clc_lsc_w3", + 0x0000C1E4, (BIT(15) | BIT(16) | BIT(17))), + VFE_DBG_INFO_WITH_IDLE(24, "clc_fcg_w2", + 0x0000C1E4, (BIT(18) | BIT(19) | BIT(20))), + VFE_DBG_INFO_WITH_IDLE(28, "clc_wb_gain_w6", + 0x0000C1E4, (BIT(21) | BIT(22) | BIT(23))), + }, + { + VFE_DBG_INFO_WITH_IDLE(0, "clc_compdecomp_bayer_w0", + 0x0000C1E4, (BIT(24) | BIT(25) | BIT(26))), + VFE_DBG_INFO_WITH_IDLE(4, "clc_crop_rnd_clamp_wirc_w10", + 0x0000C1E4, BIT(27)), + VFE_DBG_INFO(8, "reserved"), + VFE_DBG_INFO(12, "reserved"), + VFE_DBG_INFO(16, "reserved"), + VFE_DBG_INFO(20, "reserved"), + VFE_DBG_INFO(24, "reserved"), + VFE_DBG_INFO(28, "reserved"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved" + ), + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + VFE_DBG_INFO(32, "non_ccif_0"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + VFE_DBG_INFO(32, "non_ccif_1"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + VFE_DBG_INFO(32, "non_ccif_2"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + VFE_DBG_INFO(32, "non_ccif_3"), + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info tfe980_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PPP_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_4_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_CONTEXT0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_IPP_CONTEXT1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_IPP_CONTEXT2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_IPP_ALL_CONTEXT_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe980_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &tfe980_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &tfe980_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields tfe980_diag_frame_field[] = { + { + .num_fields = 4, + .field = &tfe980_diag_reg_info[4], + }, + { + .num_fields = 2, + .field = &tfe980_diag_reg_info[8], + }, + { + .num_fields = 4, + .field = &tfe980_diag_reg_info[10], + }, +}; + +static struct cam_vfe_ver4_fcg_module_info tfe980_fcg_module_info = { + .max_fcg_ch_ctx = 3, + .max_fcg_predictions = 3, + .fcg_index_shift = 16, + .max_reg_val_pair_size = 6, + .fcg_type_size = 2, + .fcg_phase_index_cfg_0 = 0x0000DE70, + .fcg_phase_index_cfg_1 = 0x0000DE74, + .fcg_reg_ctxt_shift = 0x0, + .fcg_reg_ctxt_sel = 0x0000DFF4, + .fcg_reg_ctxt_mask = 0x7, +}; + +static struct cam_vfe_top_ver4_hw_info tfe980_top_hw_info = { + .common_reg = &tfe980_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_ipp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &tfe980_top_common_reg, + .reg_data = &tfe980_pdlib_reg_data, + }, + .rdi_hw_info = tfe980_rdi_hw_info_arr, + .wr_client_desc = tfe980_wr_client_desc, + .ipp_module_desc = tfe980_ipp_mod_desc, + .bayer_module_desc = tfe980_bayer_mod_desc, + .num_mux = 7, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + }, + .num_path_port_map = 3, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA}, + }, + .num_rdi = ARRAY_SIZE(tfe980_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(tfe980_top_irq_err_desc), + .top_err_desc = tfe980_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(tfe980_haf_violation_desc), + .pdaf_violation_desc = tfe980_haf_violation_desc, + .top_debug_reg_info = &tfe980_top_dbg_reg_info, + .bayer_debug_reg_info = &tfe980_bayer_dbg_reg_info, + .fcg_module_info = &tfe980_fcg_module_info, + .fcg_mc_supported = true, + .diag_sensor_info = tfe980_diag_sensor_field, + .diag_frame_info = tfe980_diag_frame_field, +}; + +static struct cam_irq_register_set tfe980_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + }, + { + .mask_reg_offset = 0x0000081C, + .clear_reg_offset = 0x00000824, + .status_reg_offset = 0x0000082C, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_0 = { + .meta_addr = 0x00000D40, + .meta_cfg = 0x00000D44, + .mode_cfg = 0x00000D48, + .stats_ctrl = 0x00000D4C, + .ctrl_2 = 0x00000D50, + .lossy_thresh0 = 0x00000D54, + .lossy_thresh1 = 0x00000D58, + .off_lossy_var = 0x00000D5C, + .bw_limit = 0x00000D1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_1 = { + .meta_addr = 0x00000E40, + .meta_cfg = 0x00000E44, + .mode_cfg = 0x00000E48, + .stats_ctrl = 0x00000E4C, + .ctrl_2 = 0x00000E50, + .lossy_thresh0 = 0x00000E54, + .lossy_thresh1 = 0x00000E58, + .off_lossy_var = 0x00000E5C, + .bw_limit = 0x00000E1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_2 = { + .meta_addr = 0x00000F40, + .meta_cfg = 0x00000F44, + .mode_cfg = 0x00000F48, + .stats_ctrl = 0x00000F4C, + .ctrl_2 = 0x00000F50, + .lossy_thresh0 = 0x00000F54, + .lossy_thresh1 = 0x00000F58, + .off_lossy_var = 0x00000F5C, + .bw_limit = 0x00000F1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_3 = { + .meta_addr = 0x00001040, + .meta_cfg = 0x00001044, + .mode_cfg = 0x00001048, + .stats_ctrl = 0x0000104C, + .ctrl_2 = 0x00001050, + .lossy_thresh0 = 0x00001054, + .lossy_thresh1 = 0x00001058, + .off_lossy_var = 0x0000105C, + .bw_limit = 0x0000101C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_4 = { + .meta_addr = 0x00001140, + .meta_cfg = 0x00001144, + .mode_cfg = 0x00001148, + .stats_ctrl = 0x0000114C, + .ctrl_2 = 0x00001150, + .lossy_thresh0 = 0x00001154, + .lossy_thresh1 = 0x00001158, + .off_lossy_var = 0x0000115C, + .bw_limit = 0x0000111C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_5 = { + .meta_addr = 0x00001240, + .meta_cfg = 0x00001244, + .mode_cfg = 0x00001248, + .stats_ctrl = 0x0000124C, + .ctrl_2 = 0x00001250, + .lossy_thresh0 = 0x00001254, + .lossy_thresh1 = 0x00001258, + .off_lossy_var = 0x0000125C, + .bw_limit = 0x0000121C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_6 = { + .meta_addr = 0x00001340, + .meta_cfg = 0x00001344, + .mode_cfg = 0x00001348, + .stats_ctrl = 0x0000134C, + .ctrl_2 = 0x00001350, + .lossy_thresh0 = 0x00001354, + .lossy_thresh1 = 0x00001358, + .off_lossy_var = 0x0000135C, + .bw_limit = 0x0000131C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + tfe980_ubwc_regs_client_20 = { + .meta_addr = 0x00002140, + .meta_cfg = 0x00002144, + .mode_cfg = 0x00002148, + .stats_ctrl = 0x0000214C, + .ctrl_2 = 0x00002150, + .lossy_thresh0 = 0x00002154, + .lossy_thresh1 = 0x00002158, + .off_lossy_var = 0x0000215C, + .bw_limit = 0x0000211C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t tfe980_out_port_mid[][12] = { + {58}, + {59}, + {60}, + {61}, + {62}, + {32, 34, 36, 33, 35, 37}, + {32, 34, 36, 33, 35, 37, 38, 40, 42, 39, 41, 43}, + {44, 46, 48, 45, 47, 49, 50, 52, 54, 51, 53, 55}, + {38, 40, 42, 39, 41, 43, 44, 46, 48, 45, 47, 49}, + {8, 9, 10}, + {56, 57}, + {32, 33, 34}, + {35, 36, 37}, + {38, 39, 40}, + {41, 42, 43}, + {44, 45, 46}, + {47, 48, 49}, + {50, 51, 52}, + {53, 54, 55}, + {56, 57, 58}, + {11}, + {50, 51}, + {12}, + {13}, +}; + +static struct cam_vfe_bus_ver3_hw_info tfe980_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .ctxt_sel = 0x00000924, + .ubwc_static_ctrl = 0x00000858, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008F0, + .debug_status_top = 0x000008F4, + .test_bus_ctrl = 0x00000928, + .mc_read_sel_shift = 0x5, + .mc_write_sel_shift = 0x0, + .mc_ctxt_mask = 0x7, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = tfe980_bus_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + }, + .num_perf_counters = 8, + .perf_cnt_status = 0x000008B4, + .perf_cnt_reg = { + { + .perf_cnt_cfg = 0x00000874, + .perf_cnt_val = 0x00000894, + }, + { + .perf_cnt_cfg = 0x00000878, + .perf_cnt_val = 0x00000898, + }, + { + .perf_cnt_cfg = 0x0000087C, + .perf_cnt_val = 0x0000089C, + }, + { + .perf_cnt_cfg = 0x00000880, + .perf_cnt_val = 0x000008A0, + }, + { + .perf_cnt_cfg = 0x00000884, + .perf_cnt_val = 0x000008A4, + }, + { + .perf_cnt_cfg = 0x00000888, + .perf_cnt_val = 0x000008A8, + }, + { + .perf_cnt_cfg = 0x0000088C, + .perf_cnt_val = 0x000008AC, + }, + { + .perf_cnt_cfg = 0x00000890, + .perf_cnt_val = 0x000008B0, + }, + }, + }, + .num_client = CAM_TFE_BUS_VER3_980_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .framedrop_period = 0x00000D38, + .framedrop_pattern = 0x00000D3C, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .system_cache_cfg = 0x00000D68, + .addr_cfg = 0x00000D70, + .ctxt_cfg = 0x00000D78, + .addr_status_0 = 0x00000D90, + .addr_status_1 = 0x00000D94, + .addr_status_2 = 0x00000D98, + .addr_status_3 = 0x00000D9C, + .debug_status_cfg = 0x00000D7C, + .debug_status_0 = 0x00000D80, + .debug_status_1 = 0x00000D84, + .bw_limiter_addr = 0x00000D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10)| + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + .rcs_en_mask = 0x200, + }, + /* BUS Client 1 DS4_Y */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .ctxt_cfg = 0x00000E78, + .addr_status_0 = 0x00000E90, + .addr_status_1 = 0x00000E94, + .addr_status_2 = 0x00000E98, + .addr_status_3 = 0x00000E9C, + .debug_status_cfg = 0x00000E7C, + .debug_status_0 = 0x00000E80, + .debug_status_1 = 0x00000E84, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 2 DS4_C */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000F70, + .ctxt_cfg = 0x00000F78, + .addr_status_0 = 0x00000F90, + .addr_status_1 = 0x00000F94, + .addr_status_2 = 0x00000F98, + .addr_status_3 = 0x00000F9C, + .debug_status_cfg = 0x00000F7C, + .debug_status_0 = 0x00000F80, + .debug_status_1 = 0x00000F84, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_2, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 3 DS16_Y */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .ctxt_cfg = 0x00001078, + .addr_status_0 = 0x00001090, + .addr_status_1 = 0x00001094, + .addr_status_2 = 0x00001098, + .addr_status_3 = 0x0000109C, + .debug_status_cfg = 0x0000107C, + .debug_status_0 = 0x00001080, + .debug_status_1 = 0x00001084, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_3, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 4 DS16_C */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .ctxt_cfg = 0x00001178, + .addr_status_0 = 0x00001190, + .addr_status_1 = 0x00001194, + .addr_status_2 = 0x00001198, + .addr_status_3 = 0x0000119C, + .debug_status_cfg = 0x0000117C, + .debug_status_0 = 0x00001180, + .debug_status_1 = 0x00001184, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10), + }, + /* BUS Client 5 DS2_Y */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .ctxt_cfg = 0x00001278, + .addr_status_0 = 0x00001290, + .addr_status_1 = 0x00001294, + .addr_status_2 = 0x00001298, + .addr_status_3 = 0x0000129C, + .debug_status_cfg = 0x0000127C, + .debug_status_0 = 0x00001280, + .debug_status_1 = 0x00001284, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + .rcs_en_mask = 0x200, + }, + /* BUS Client 6 DS2_C */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .ctxt_cfg = 0x00001378, + .addr_status_0 = 0x00001390, + .addr_status_1 = 0x00001394, + .addr_status_2 = 0x00001398, + .addr_status_3 = 0x0000139C, + .debug_status_cfg = 0x0000137C, + .debug_status_0 = 0x00001380, + .debug_status_1 = 0x00001384, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &tfe980_ubwc_regs_client_6, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_GBR_TP10) | + BIT_ULL(CAM_FORMAT_GBR_UBWC_TP10) | + BIT_ULL(CAM_FORMAT_BAYER_UBWC_TP10), + + .rcs_en_mask = 0x200, + }, + /* BUS Client 7 FD_Y */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .ctxt_cfg = 0x00001478, + .addr_status_0 = 0x00001490, + .addr_status_1 = 0x00001494, + .addr_status_2 = 0x00001498, + .addr_status_3 = 0x0000149C, + .debug_status_cfg = 0x0000147C, + .debug_status_0 = 0x00001480, + .debug_status_1 = 0x00001484, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 8 FD_C */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .ctxt_cfg = 0x00001578, + .addr_status_0 = 0x00001590, + .addr_status_1 = 0x00001594, + .addr_status_2 = 0x00001598, + .addr_status_3 = 0x0000159C, + .debug_status_cfg = 0x0000157C, + .debug_status_0 = 0x00001580, + .debug_status_1 = 0x00001584, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12), + }, + /* BUS Client 9 IR */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .ctxt_cfg = 0x00001678, + .addr_status_0 = 0x00001690, + .addr_status_1 = 0x00001694, + .addr_status_2 = 0x00001698, + .addr_status_3 = 0x0000169C, + .debug_status_cfg = 0x0000167C, + .debug_status_0 = 0x00001680, + .debug_status_1 = 0x00001684, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14), + }, + /* BUS Client 10 STATS_AEC_BE */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .ctxt_cfg = 0x00001778, + .addr_status_0 = 0x00001790, + .addr_status_1 = 0x00001794, + .addr_status_2 = 0x00001798, + .addr_status_3 = 0x0000179C, + .debug_status_cfg = 0x0000177C, + .debug_status_0 = 0x00001780, + .debug_status_1 = 0x00001784, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 11 STATS_AEC_BHIST */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .ctxt_cfg = 0x00001878, + .addr_status_0 = 0x00001890, + .addr_status_1 = 0x00001894, + .addr_status_2 = 0x00001898, + .addr_status_3 = 0x0000189C, + .debug_status_cfg = 0x0000187C, + .debug_status_0 = 0x00001880, + .debug_status_1 = 0x00001884, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 12 STATS_TINTLESS_BG */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .ctxt_cfg = 0x00001978, + .addr_status_0 = 0x00001990, + .addr_status_1 = 0x00001994, + .addr_status_2 = 0x00001998, + .addr_status_3 = 0x0000199C, + .debug_status_cfg = 0x0000197C, + .debug_status_0 = 0x00001980, + .debug_status_1 = 0x00001984, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 13 STATS_AWB_BG */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .ctxt_cfg = 0x00001A78, + .addr_status_0 = 0x00001A90, + .addr_status_1 = 0x00001A94, + .addr_status_2 = 0x00001A98, + .addr_status_3 = 0x00001A9C, + .debug_status_cfg = 0x00001A7C, + .debug_status_0 = 0x00001A80, + .debug_status_1 = 0x00001A84, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 14 STATS_AWB_BFW */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .ctxt_cfg = 0x00001B78, + .addr_status_0 = 0x00001B90, + .addr_status_1 = 0x00001B94, + .addr_status_2 = 0x00001B98, + .addr_status_3 = 0x00001B9C, + .debug_status_cfg = 0x00001B7C, + .debug_status_0 = 0x00001B80, + .debug_status_1 = 0x00001B84, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 15 STATS_AF_BHIST */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .ctxt_cfg = 0x00001C78, + .addr_status_0 = 0x00001C90, + .addr_status_1 = 0x00001C94, + .addr_status_2 = 0x00001C98, + .addr_status_3 = 0x00001C9C, + .debug_status_cfg = 0x00001C7C, + .debug_status_0 = 0x00001C80, + .debug_status_1 = 0x00001C84, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 STATS_ALSC_BG */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_cfg = 0x00001D70, + .ctxt_cfg = 0x00001D78, + .addr_status_0 = 0x00001D90, + .addr_status_1 = 0x00001D94, + .addr_status_2 = 0x00001D98, + .addr_status_3 = 0x00001D9C, + .debug_status_cfg = 0x00001D7C, + .debug_status_0 = 0x00001D80, + .debug_status_1 = 0x00001D84, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 17 STATS_FLICKER_BAYERS */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_cfg = 0x00001E70, + .ctxt_cfg = 0x00001E78, + .addr_status_0 = 0x00001E90, + .addr_status_1 = 0x00001E94, + .addr_status_2 = 0x00001E98, + .addr_status_3 = 0x00001E9C, + .debug_status_cfg = 0x00001E7C, + .debug_status_0 = 0x00001E80, + .debug_status_1 = 0x00001E84, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + /* BUS Client 18 STATS_TMC_BHIST */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_cfg = 0x00001F70, + .ctxt_cfg = 0x00001F78, + .addr_status_0 = 0x00001F90, + .addr_status_1 = 0x00001F94, + .addr_status_2 = 0x00001F98, + .addr_status_3 = 0x00001F9C, + .debug_status_cfg = 0x00001F7C, + .debug_status_0 = 0x00001F80, + .debug_status_1 = 0x00001F84, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 19 PDAF_0 */ /* Note: PDAF_SAD == 2PD*/ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_cfg = 0x00002070, + .ctxt_cfg = 0x00002078, + .addr_status_0 = 0x00002090, + .addr_status_1 = 0x00002094, + .addr_status_2 = 0x00002098, + .addr_status_3 = 0x0000209C, + .debug_status_cfg = 0x0000207C, + .debug_status_0 = 0x00002080, + .debug_status_1 = 0x00002084, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 20 PDAF_1 */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_cfg = 0x00002170, + .ctxt_cfg = 0x00002178, + .addr_status_0 = 0x00002190, + .addr_status_1 = 0x00002194, + .addr_status_2 = 0x00002198, + .addr_status_3 = 0x0000219C, + .debug_status_cfg = 0x0000217C, + .debug_status_0 = 0x00002180, + .debug_status_1 = 0x00002184, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = &tfe980_ubwc_regs_client_20, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_UBWC_P016) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 21 PDAF_2 */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_cfg = 0x00002270, + .ctxt_cfg = 0x00002278, + .addr_status_0 = 0x00002290, + .addr_status_1 = 0x00002294, + .addr_status_2 = 0x00002298, + .addr_status_3 = 0x0000229C, + .debug_status_cfg = 0x0000227C, + .debug_status_0 = 0x00002280, + .debug_status_1 = 0x00002284, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 22 PDAF_3 */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_cfg = 0x00002370, + .ctxt_cfg = 0x00002378, + .addr_status_0 = 0x00002390, + .addr_status_1 = 0x00002394, + .addr_status_2 = 0x00002398, + .addr_status_3 = 0x0000239C, + .debug_status_cfg = 0x0000237C, + .debug_status_0 = 0x00002380, + .debug_status_1 = 0x00002384, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 23 RDI_0 */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_cfg = 0x00002470, + .ctxt_cfg = 0x00002478, + .addr_status_0 = 0x00002490, + .addr_status_1 = 0x00002494, + .addr_status_2 = 0x00002498, + .addr_status_3 = 0x0000249C, + .debug_status_cfg = 0x0000247C, + .debug_status_0 = 0x00002480, + .debug_status_1 = 0x00002484, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 24 RDI_1 */ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .mmu_prefetch_cfg = 0x00002560, + .mmu_prefetch_max_offset = 0x00002564, + .system_cache_cfg = 0x00002568, + .addr_cfg = 0x00002570, + .ctxt_cfg = 0x00002578, + .addr_status_0 = 0x00002590, + .addr_status_1 = 0x00002594, + .addr_status_2 = 0x00002598, + .addr_status_3 = 0x0000259C, + .debug_status_cfg = 0x0000257C, + .debug_status_0 = 0x00002580, + .debug_status_1 = 0x00002584, + .bw_limiter_addr = 0x0000251C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 25 RDI_2 */ + { + .cfg = 0x00002600, + .image_addr = 0x00002604, + .frame_incr = 0x00002608, + .image_cfg_0 = 0x0000260C, + .image_cfg_1 = 0x00002610, + .image_cfg_2 = 0x00002614, + .packer_cfg = 0x00002618, + .frame_header_addr = 0x00002620, + .frame_header_incr = 0x00002624, + .frame_header_cfg = 0x00002628, + .irq_subsample_period = 0x00002630, + .irq_subsample_pattern = 0x00002634, + .framedrop_period = 0x00002638, + .framedrop_pattern = 0x0000263C, + .mmu_prefetch_cfg = 0x00002660, + .mmu_prefetch_max_offset = 0x00002664, + .system_cache_cfg = 0x00002668, + .addr_cfg = 0x00002670, + .ctxt_cfg = 0x00002678, + .addr_status_0 = 0x00002690, + .addr_status_1 = 0x00002694, + .addr_status_2 = 0x00002698, + .addr_status_3 = 0x0000269C, + .debug_status_cfg = 0x0000267C, + .debug_status_0 = 0x00002680, + .debug_status_1 = 0x00002684, + .bw_limiter_addr = 0x0000261C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN16_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_12) | BIT_ULL(CAM_FORMAT_PLAIN16_14) | + BIT_ULL(CAM_FORMAT_PLAIN16_16) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 26 RDI_3 */ + { + .cfg = 0x00002700, + .image_addr = 0x00002704, + .frame_incr = 0x00002708, + .image_cfg_0 = 0x0000270C, + .image_cfg_1 = 0x00002710, + .image_cfg_2 = 0x00002714, + .packer_cfg = 0x00002718, + .frame_header_addr = 0x00002720, + .frame_header_incr = 0x00002724, + .frame_header_cfg = 0x00002728, + .irq_subsample_period = 0x00002730, + .irq_subsample_pattern = 0x00002734, + .framedrop_period = 0x00002738, + .framedrop_pattern = 0x0000273C, + .mmu_prefetch_cfg = 0x00002760, + .mmu_prefetch_max_offset = 0x00002764, + .system_cache_cfg = 0x00002768, + .addr_cfg = 0x00002770, + .ctxt_cfg = 0x00002778, + .addr_status_0 = 0x00002790, + .addr_status_1 = 0x00002794, + .addr_status_2 = 0x00002798, + .addr_status_3 = 0x0000279C, + .debug_status_cfg = 0x0000277C, + .debug_status_0 = 0x00002780, + .debug_status_1 = 0x00002784, + .bw_limiter_addr = 0x0000271C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 27 RDI_4 */ + { + .cfg = 0x00002800, + .image_addr = 0x00002804, + .frame_incr = 0x00002808, + .image_cfg_0 = 0x0000280C, + .image_cfg_1 = 0x00002810, + .image_cfg_2 = 0x00002814, + .packer_cfg = 0x00002818, + .frame_header_addr = 0x00002820, + .frame_header_incr = 0x00002824, + .frame_header_cfg = 0x00002828, + .irq_subsample_period = 0x00002830, + .irq_subsample_pattern = 0x00002834, + .framedrop_period = 0x00002838, + .framedrop_pattern = 0x0000283C, + .mmu_prefetch_cfg = 0x00002860, + .mmu_prefetch_max_offset = 0x00002864, + .system_cache_cfg = 0x00002868, + .addr_cfg = 0x00002870, + .ctxt_cfg = 0x00002878, + .addr_status_0 = 0x00002890, + .addr_status_1 = 0x00002894, + .addr_status_2 = 0x00002898, + .addr_status_3 = 0x0000289C, + .debug_status_cfg = 0x0000287C, + .debug_status_0 = 0x00002880, + .debug_status_1 = 0x00002884, + .bw_limiter_addr = 0x0000281C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN128), + }, + }, + .num_out = 24, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = tfe980_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_0", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = tfe980_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_1", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = tfe980_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_2", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_5, + .mid = tfe980_out_port_mid[3], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 26, + }, + .name = { + "RDI_3", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI4, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_6, + .mid = tfe980_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 27, + }, + .name = { + "RDI_4", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4672, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[5], + .num_mid = 6, + .num_wm = 1, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 0, + }, + .name = { + "FULL", + }, + .pid_mask = 0x1300, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1168, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[6], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 1, + 2, + }, + .name = { + "DS4_Y", + "DS4_C" + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 292, + .max_height = 1024, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[7], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .mc_grp_shift = 0, + .wm_idx = { + 3, + 4, + }, + .name = { + "DS16_Y", + "DS16_C", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS2, + .max_width = 4672, + .max_height = 8192, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[8], + .num_mid = 12, + .num_wm = 2, + .line_based = 1, + .mc_based = true, + .wm_idx = { + 5, + 6, + }, + .name = { + "DS2_Y", + "DS2_C", + }, + .pid_mask = 0x1300, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 9312, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[9], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .cntxt_cfg_except = true, + .wm_idx = { + 7, + 8, + }, + .name = { + "FD_Y", + "FD_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_IR, + .max_width = 9312, + .max_height = 8192, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[10], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .cntxt_cfg_except = true, + .wm_idx = { + 9, + }, + .name = { + "IR", + }, + .pid_mask = 0x6400, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[11], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 10, + }, + .name = { + "STATS_AEC_BE", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[12], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 11, + }, + .name = { + "STATS_BHIST", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[13], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 12, + }, + .name = { + "STATS_TL_BG", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[14], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 13, + }, + .name = { + "STATS_AWB_BG", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[15], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 14, + }, + .name = { + "AWB_BFW", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[16], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 15, + }, + .name = { + "AF_BHIST", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[17], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 16, + }, + .name = { + "ALSC_BG", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[18], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 17, + }, + .name = { + "STATS_RS", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = tfe980_out_port_mid[19], + .num_mid = 3, + .num_wm = 1, + .mc_based = true, + .mc_grp_shift = 4, + .wm_idx = { + 18, + }, + .name = { + "STATS_TMC_BHIST", + }, + .pid_mask = 0x70, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 14592, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe980_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .early_done_mask = BIT(28), + .wm_idx = { + 19, + }, + .name = { + "PDAF_0_2PD", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe980_out_port_mid[21], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 20, + }, + .name = { + "PDAF_1_PREPROCESS_2PD", + }, + .pid_mask = 0x1300, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe980_out_port_mid[22], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "PDAF_2_PARSED_DATA", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = tfe980_out_port_mid[23], + .num_mid = 1, + .num_wm = 1, + .early_done_mask = BIT(29), + .mc_based = false, + .mc_grp_shift = 4, + .wm_idx = { + 22, + }, + .name = { + "STATS_CAF", + }, + .pid_mask = 0x70000, + }, + }, + .num_cons_err = 32, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input Not Supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input Not Supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input Not Supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input Not Supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format Not Supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format Not Supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 24 BPP format Not Supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 32 BPP format Not Supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack 64 BPP format Not Supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 20 format Not Supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 14 format Not Supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 12 format Not Supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack MIPI 10 format Not Supported" + }, + { + .bitmask = BIT(13), + .error_description = "Pack 128 BPP format Not Supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC P016 format Not Supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC P010 format Not Supported" + }, + { + .bitmask = BIT(16), + .error_description = "UBWC NV12 format Not Supported" + }, + { + .bitmask = BIT(17), + .error_description = "UBWC NV12 4R format Not Supported" + }, + { + .bitmask = BIT(18), + .error_description = "UBWC TP10 format Not Supported" + }, + { + .bitmask = BIT(19), + .error_description = "Frame based Mode Not Supported" + }, + { + .bitmask = BIT(20), + .error_description = "Index based Mode Not Supported" + }, + { + .bitmask = BIT(21), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(22), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(23), + .error_description = "FIFO framehdr addr unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(25), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(28), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(29), + .error_description = "Image Width unalign", + }, + { + .bitmask = BIT(30), + .error_description = "Image Height unalign", + }, + { + .bitmask = BIT(31), + .error_description = "Meta Stride unalign", + }, + }, + .num_comp_grp = 10, + .support_consumed_addr = true, + .mc_comp_done_mask = { + BIT(24), 0x0, BIT(25), 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + }, + .comp_done_mask = { + 0x7, BIT(3), 0x70, BIT(7), BIT(8), BIT(16), + BIT(17), BIT(18), BIT(19), BIT(20), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 43, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, + .skip_regdump = true, + .skip_regdump_start_offset = 0x800, + .skip_regdump_stop_offset = 0x209C, +}; + +static struct cam_vfe_irq_hw_info tfe980_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_EXT_CSID, + .top_irq_reg = &tfe980_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_tfe980_hw_info = { + .irq_hw_info = &tfe980_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &tfe980_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &tfe980_top_hw_info, +}; +#endif /* _CAM_TFE980_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.c new file mode 100644 index 0000000000..a39d2689a9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_vfe170.h" +#include "cam_vfe170_150.h" +#include "cam_vfe175.h" +#include "cam_vfe175_130.h" +#include "cam_vfe480.h" +#include "cam_vfe570.h" +#include "cam_vfe580.h" +#include "cam_vfe680.h" +#include "cam_vfe680_110.h" +#include "cam_vfe780.h" +#include "cam_vfe880.h" +#include "cam_tfe980.h" +#include "cam_tfe975.h" +#include "cam_tfe970.h" +#include "cam_tfe1080.h" +#include "cam_vfe_lite17x.h" +#include "cam_vfe_lite48x.h" +#include "cam_vfe_lite68x.h" +#include "cam_vfe_lite78x.h" +#include "cam_vfe_lite88x.h" +#include "cam_vfe_lite98x.h" +#include "cam_vfe_lite97x.h" +#include "cam_vfe_lite108x.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_core.h" +#include "cam_vfe_dev.h" +#include "camera_main.h" + +static const struct of_device_id cam_vfe_dt_match[] = { + { + .compatible = "qcom,vfe170", + .data = &cam_vfe170_hw_info, + }, + { + .compatible = "qcom,vfe170_150", + .data = &cam_vfe170_150_hw_info, + }, + { + .compatible = "qcom,vfe175", + .data = &cam_vfe175_hw_info, + }, + { + .compatible = "qcom,vfe175_130", + .data = &cam_vfe175_130_hw_info, + }, + { + .compatible = "qcom,vfe480", + .data = &cam_vfe480_hw_info, + }, + { + .compatible = "qcom,vfe570", + .data = &cam_vfe570_hw_info, + }, + { + .compatible = "qcom,vfe580", + .data = &cam_vfe580_hw_info, + }, + { + .compatible = "qcom,vfe680", + .data = &cam_vfe680_hw_info, + }, + { + .compatible = "qcom,vfe680_110", + .data = &cam_vfe680_110_hw_info, + }, + { + .compatible = "qcom,vfe780", + .data = &cam_vfe780_hw_info, + }, + { + .compatible = "qcom,vfe880", + .data = &cam_vfe880_hw_info, + }, + { + .compatible = "qcom,mc_tfe980", + .data = &cam_tfe980_hw_info, + }, + { + .compatible = "qcom,mc_tfe970", + .data = &cam_tfe970_hw_info, + }, + { + .compatible = "qcom,mc_tfe975", + .data = &cam_tfe975_hw_info, + }, + { + .compatible = "qcom,mc_tfe1080", + .data = &cam_tfe1080_hw_info, + }, + { + .compatible = "qcom,vfe-lite170", + .data = &cam_vfe_lite17x_hw_info, + }, + { + .compatible = "qcom,vfe-lite175", + .data = &cam_vfe_lite17x_hw_info, + }, + { + .compatible = "qcom,vfe-lite480", + .data = &cam_vfe_lite48x_hw_info, + }, + { + .compatible = "qcom,vfe-lite570", + .data = &cam_vfe_lite48x_hw_info, + }, + { + .compatible = "qcom,vfe-lite580", + .data = &cam_vfe_lite48x_hw_info, + }, + { + .compatible = "qcom,vfe-lite680", + .data = &cam_vfe_lite68x_hw_info, + }, + { + .compatible = "qcom,vfe-lite680_110", + .data = &cam_vfe_lite68x_hw_info, + }, + { + .compatible = "qcom,vfe-lite780", + .data = &cam_vfe_lite78x_hw_info, + }, + { + .compatible = "qcom,vfe-lite880", + .data = &cam_vfe_lite88x_hw_info, + }, + { + .compatible = "qcom,vfe-lite980", + .data = &cam_vfe_lite98x_hw_info, + }, + { + .compatible = "qcom,vfe-lite970", + .data = &cam_vfe_lite97x_hw_info, + }, + { + .compatible = "qcom,vfe-lite975", + .data = &cam_vfe_lite97x_hw_info, + }, + { + .compatible = "qcom,vfe-lite1080", + .data = &cam_vfe_lite108x_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_vfe_dt_match); + +struct platform_driver cam_vfe_driver = { + .probe = cam_vfe_probe, + .remove = cam_vfe_remove, + .driver = { + .name = "cam_vfe", + .owner = THIS_MODULE, + .of_match_table = cam_vfe_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_vfe_init_module(void) +{ + return platform_driver_register(&cam_vfe_driver); +} + + +void cam_vfe_exit_module(void) +{ + platform_driver_unregister(&cam_vfe_driver); +} + +MODULE_DESCRIPTION("CAM VFE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.h new file mode 100644 index 0000000000..5c2d1ef0eb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_H_ +#define _CAM_VFE_H_ + +/** + * @brief : API to register VFE hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_vfe_init_module(void); + +/** + * @brief : API to remove VFE Hw from platform framework. + */ +void cam_vfe_exit_module(void); + +#endif /* _CAM_VFE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h new file mode 100644 index 0000000000..1e13e9a9cd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE170_H_ +#define _CAM_VFE170_H_ + +#include "cam_vfe_camif_ver2.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x0000005C, + .clear_reg_offset = 0x00000064, + .status_reg_offset = 0x0000006C, + }, + { + .mask_reg_offset = 0x00000060, + .clear_reg_offset = 0x00000068, + .status_reg_offset = 0x00000070, + }, +}; + +static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe170_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000058, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = { + .camif_cmd = 0x00000478, + .camif_config = 0x0000047C, + .line_skip_pattern = 0x00000488, + .pixel_skip_pattern = 0x0000048C, + .skip_period = 0x00000490, + .irq_subsample_pattern = 0x0000049C, + .epoch_irq = 0x000004A0, + .raw_crop_width_cfg = 0x00000CE4, + .raw_crop_height_cfg = 0x00000CE8, + .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, +}; + +static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = { + .raw_crop_first_pixel_shift = 16, + .raw_crop_first_pixel_mask = 0xFFFF, + .raw_crop_last_pixel_shift = 0x0, + .raw_crop_last_pixel_mask = 0x3FFF, + .raw_crop_first_line_shift = 16, + .raw_crop_first_line_mask = 0xFFFF, + .raw_crop_last_line_shift = 0, + .raw_crop_last_line_mask = 0x3FFF, + .input_mux_sel_shift = 5, + .input_mux_sel_mask = 0x3, + .extern_reg_update_shift = 4, + .extern_reg_update_mask = 1, + .pixel_pattern_shift = 0, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 23, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 3, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .reg_update_irq_mask = 0x00000010, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x0003FC00, + .error_irq_mask1 = 0x0FFF7E80, + .enable_diagnostic_hw = 0x1, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = { + .reset = 0x0000001C, + .cgc_ovd = 0x0000002C, + .enable = 0x00000040, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_reg = { + .reset = 0x00000020, + .cgc_ovd = 0x00000030, + .enable = 0x00000044, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_reg = { + .reset = 0x00000024, + .cgc_ovd = 0x00000034, + .enable = 0x00000048, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = { + .reset = 0x00000028, + .cgc_ovd = 0x00000038, + .enable = 0x0000004C, +}; + +static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .global_reset_cmd = 0x00000018, + .module_ctrl = { + &lens_170_reg, + &stats_170_reg, + &color_170_reg, + &zoom_170_reg, + }, + .bus_cgc_ovd = 0x0000003C, + .core_cfg = 0x00000050, + .three_D_cfg = 0x00000054, + .violation_status = 0x0000007C, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = { + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_rdi_0_data = { + .reg_update_cmd_data = 0x2, + .sof_irq_mask = 0x8000000, + .reg_update_irq_mask = 0x20, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_rdi_1_data = { + .reg_update_cmd_data = 0x4, + .sof_irq_mask = 0x10000000, + .reg_update_irq_mask = 0x40, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_rdi_2_data = { + .reg_update_cmd_data = 0x8, + .sof_irq_mask = 0x20000000, + .reg_update_irq_mask = 0x80, +}; + +struct cam_vfe_top_dump_data vfe170_dump_data = { + .num_reg_dump_entries = 2, + .num_lut_dump_entries = 1, + .dmi_cfg = 0xc24, + .dmi_addr = 0xc28, + .dmi_data_path_hi = 0xc2C, + .dmi_data_path_lo = 0xc30, + .reg_entry = { + { + .reg_dump_start = 0x0, + .reg_dump_end = 0x1164, + }, + { + .reg_dump_start = 0x2000, + .reg_dump_end = 0x397C, + }, + }, + .lut_entry = { + { + .lut_word_size = 64, + .lut_bank_sel = 0x40, + .lut_addr_size = 180, + }, + }, +}; + +static struct cam_vfe_rdi_overflow_status vfe170_rdi_irq_status = { + .rdi0_overflow_mask = 0x8, + .rdi1_overflow_mask = 0x10, + .rdi2_overflow_mask = 0x18, + .rdi3_overflow_mask = 0x20, + .rdi_overflow_mask = 0x3c, +}; + +static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = { + .common_reg = &vfe170_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe170_top_common_reg, + .camif_reg = &vfe170_camif_reg, + .reg_data = &vfe_170_camif_reg_data, + }, + .camif_lite_hw_info = { + .common_reg = NULL, + .camif_lite_reg = NULL, + .reg_data = NULL, + }, + .rdi_hw_info = { + .common_reg = &vfe170_top_common_reg, + .rdi_reg = &vfe170_rdi_reg, + .rdi_irq_status = &vfe170_rdi_irq_status, + .reg_data = { + &vfe_170_rdi_0_data, + &vfe_170_rdi_1_data, + &vfe_170_rdi_2_data, + NULL, + }, + }, + .num_mux = 4, + .mux_type = { + CAM_VFE_CAMIF_VER_2_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .dump_data = &vfe170_dump_data, +}; + +static struct cam_irq_register_set vfe170_bus_irq_reg[3] = { + { + .mask_reg_offset = 0x00002044, + .clear_reg_offset = 0x00002050, + .status_reg_offset = 0x0000205C, + }, + { + .mask_reg_offset = 0x00002048, + .clear_reg_offset = 0x00002054, + .status_reg_offset = 0x00002060, + }, + { + .mask_reg_offset = 0x0000204C, + .clear_reg_offset = 0x00002058, + .status_reg_offset = 0x00002064, + }, +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = { + .tile_cfg = 0x0000252C, + .h_init = 0x00002530, + .v_init = 0x00002534, + .meta_addr = 0x00002538, + .meta_offset = 0x0000253C, + .meta_stride = 0x00002540, + .mode_cfg_0 = 0x00002544, + .bw_limit = 0x000025A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = { + .tile_cfg = 0x0000262C, + .h_init = 0x00002630, + .v_init = 0x00002634, + .meta_addr = 0x00002638, + .meta_offset = 0x0000263C, + .meta_stride = 0x00002640, + .mode_cfg_0 = 0x00002644, + .bw_limit = 0x000026A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = { + .common_reg = { + .hw_version = 0x00002000, + .hw_capability = 0x00002004, + .sw_reset = 0x00002008, + .cgc_ovd = 0x0000200C, + .pwr_iso_cfg = 0x000020CC, + .dual_master_comp_cfg = 0x00002028, + .irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe170_bus_irq_reg, + .global_irq_cmd_offset = 0x00002068, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + .comp_error_status = 0x0000206C, + .comp_ovrwr_status = 0x00002070, + .dual_comp_error_status = 0x00002074, + .dual_comp_ovrwr_status = 0x00002078, + .addr_sync_cfg = 0x0000207C, + .addr_sync_frame_hdr = 0x00002080, + .addr_sync_no_sync = 0x00002084, + .debug_status_cfg = 0x0000226C, + .debug_status_0 = 0x00002270, + .top_irq_mask_0 = 0x0000005C, + }, + .num_client = 20, + .bus_client_reg = { + /* BUS Client 0 */ + { + .status0 = 0x00002200, + .status1 = 0x00002204, + .cfg = 0x00002208, + .header_addr = 0x0000220C, + .header_cfg = 0x00002210, + .image_addr = 0x00002214, + .image_addr_offset = 0x00002218, + .buffer_width_cfg = 0x0000221C, + .buffer_height_cfg = 0x00002220, + .packer_cfg = 0x00002224, + .stride = 0x00002228, + .irq_subsample_period = 0x00002248, + .irq_subsample_pattern = 0x0000224C, + .framedrop_period = 0x00002250, + .framedrop_pattern = 0x00002254, + .frame_inc = 0x00002258, + .burst_limit = 0x0000225C, + .ubwc_regs = NULL, + }, + /* BUS Client 1 */ + { + .status0 = 0x00002300, + .status1 = 0x00002304, + .cfg = 0x00002308, + .header_addr = 0x0000230C, + .header_cfg = 0x00002310, + .image_addr = 0x00002314, + .image_addr_offset = 0x00002318, + .buffer_width_cfg = 0x0000231C, + .buffer_height_cfg = 0x00002320, + .packer_cfg = 0x00002324, + .stride = 0x00002328, + .irq_subsample_period = 0x00002348, + .irq_subsample_pattern = 0x0000234C, + .framedrop_period = 0x00002350, + .framedrop_pattern = 0x00002354, + .frame_inc = 0x00002358, + .burst_limit = 0x0000235C, + .ubwc_regs = NULL, + }, + /* BUS Client 2 */ + { + .status0 = 0x00002400, + .status1 = 0x00002404, + .cfg = 0x00002408, + .header_addr = 0x0000240C, + .header_cfg = 0x00002410, + .image_addr = 0x00002414, + .image_addr_offset = 0x00002418, + .buffer_width_cfg = 0x0000241C, + .buffer_height_cfg = 0x00002420, + .packer_cfg = 0x00002424, + .stride = 0x00002428, + .irq_subsample_period = 0x00002448, + .irq_subsample_pattern = 0x0000244C, + .framedrop_period = 0x00002450, + .framedrop_pattern = 0x00002454, + .frame_inc = 0x00002458, + .burst_limit = 0x0000245C, + .ubwc_regs = NULL, + }, + /* BUS Client 3 */ + { + .status0 = 0x00002500, + .status1 = 0x00002504, + .cfg = 0x00002508, + .header_addr = 0x0000250C, + .header_cfg = 0x00002510, + .image_addr = 0x00002514, + .image_addr_offset = 0x00002518, + .buffer_width_cfg = 0x0000251C, + .buffer_height_cfg = 0x00002520, + .packer_cfg = 0x00002524, + .stride = 0x00002528, + .irq_subsample_period = 0x00002548, + .irq_subsample_pattern = 0x0000254C, + .framedrop_period = 0x00002550, + .framedrop_pattern = 0x00002554, + .frame_inc = 0x00002558, + .burst_limit = 0x0000255C, + .ubwc_regs = &ubwc_regs_client_3, + }, + /* BUS Client 4 */ + { + .status0 = 0x00002600, + .status1 = 0x00002604, + .cfg = 0x00002608, + .header_addr = 0x0000260C, + .header_cfg = 0x00002610, + .image_addr = 0x00002614, + .image_addr_offset = 0x00002618, + .buffer_width_cfg = 0x0000261C, + .buffer_height_cfg = 0x00002620, + .packer_cfg = 0x00002624, + .stride = 0x00002628, + .irq_subsample_period = 0x00002648, + .irq_subsample_pattern = 0x0000264C, + .framedrop_period = 0x00002650, + .framedrop_pattern = 0x00002654, + .frame_inc = 0x00002658, + .burst_limit = 0x0000265C, + .ubwc_regs = &ubwc_regs_client_4, + }, + /* BUS Client 5 */ + { + .status0 = 0x00002700, + .status1 = 0x00002704, + .cfg = 0x00002708, + .header_addr = 0x0000270C, + .header_cfg = 0x00002710, + .image_addr = 0x00002714, + .image_addr_offset = 0x00002718, + .buffer_width_cfg = 0x0000271C, + .buffer_height_cfg = 0x00002720, + .packer_cfg = 0x00002724, + .stride = 0x00002728, + .irq_subsample_period = 0x00002748, + .irq_subsample_pattern = 0x0000274C, + .framedrop_period = 0x00002750, + .framedrop_pattern = 0x00002754, + .frame_inc = 0x00002758, + .burst_limit = 0x0000275C, + .ubwc_regs = NULL, + }, + /* BUS Client 6 */ + { + .status0 = 0x00002800, + .status1 = 0x00002804, + .cfg = 0x00002808, + .header_addr = 0x0000280C, + .header_cfg = 0x00002810, + .image_addr = 0x00002814, + .image_addr_offset = 0x00002818, + .buffer_width_cfg = 0x0000281C, + .buffer_height_cfg = 0x00002820, + .packer_cfg = 0x00002824, + .stride = 0x00002828, + .irq_subsample_period = 0x00002848, + .irq_subsample_pattern = 0x0000284C, + .framedrop_period = 0x00002850, + .framedrop_pattern = 0x00002854, + .frame_inc = 0x00002858, + .burst_limit = 0x0000285C, + .ubwc_regs = NULL, + }, + /* BUS Client 7 */ + { + .status0 = 0x00002900, + .status1 = 0x00002904, + .cfg = 0x00002908, + .header_addr = 0x0000290C, + .header_cfg = 0x00002910, + .image_addr = 0x00002914, + .image_addr_offset = 0x00002918, + .buffer_width_cfg = 0x0000291C, + .buffer_height_cfg = 0x00002920, + .packer_cfg = 0x00002924, + .stride = 0x00002928, + .irq_subsample_period = 0x00002948, + .irq_subsample_pattern = 0x0000294C, + .framedrop_period = 0x00002950, + .framedrop_pattern = 0x00002954, + .frame_inc = 0x00002958, + .burst_limit = 0x0000295C, + .ubwc_regs = NULL, + }, + /* BUS Client 8 */ + { + .status0 = 0x00002A00, + .status1 = 0x00002A04, + .cfg = 0x00002A08, + .header_addr = 0x00002A0C, + .header_cfg = 0x00002A10, + .image_addr = 0x00002A14, + .image_addr_offset = 0x00002A18, + .buffer_width_cfg = 0x00002A1C, + .buffer_height_cfg = 0x00002A20, + .packer_cfg = 0x00002A24, + .stride = 0x00002A28, + .irq_subsample_period = 0x00002A48, + .irq_subsample_pattern = 0x00002A4C, + .framedrop_period = 0x00002A50, + .framedrop_pattern = 0x00002A54, + .frame_inc = 0x00002A58, + .burst_limit = 0x00002A5C, + .ubwc_regs = NULL, + }, + /* BUS Client 9 */ + { + .status0 = 0x00002B00, + .status1 = 0x00002B04, + .cfg = 0x00002B08, + .header_addr = 0x00002B0C, + .header_cfg = 0x00002B10, + .image_addr = 0x00002B14, + .image_addr_offset = 0x00002B18, + .buffer_width_cfg = 0x00002B1C, + .buffer_height_cfg = 0x00002B20, + .packer_cfg = 0x00002B24, + .stride = 0x00002B28, + .irq_subsample_period = 0x00002B48, + .irq_subsample_pattern = 0x00002B4C, + .framedrop_period = 0x00002B50, + .framedrop_pattern = 0x00002B54, + .frame_inc = 0x00002B58, + .burst_limit = 0x00002B5C, + .ubwc_regs = NULL, + }, + /* BUS Client 10 */ + { + .status0 = 0x00002C00, + .status1 = 0x00002C04, + .cfg = 0x00002C08, + .header_addr = 0x00002C0C, + .header_cfg = 0x00002C10, + .image_addr = 0x00002C14, + .image_addr_offset = 0x00002C18, + .buffer_width_cfg = 0x00002C1C, + .buffer_height_cfg = 0x00002C20, + .packer_cfg = 0x00002C24, + .stride = 0x00002C28, + .irq_subsample_period = 0x00002C48, + .irq_subsample_pattern = 0x00002C4C, + .framedrop_period = 0x00002C50, + .framedrop_pattern = 0x00002C54, + .frame_inc = 0x00002C58, + .burst_limit = 0x00002C5C, + .ubwc_regs = NULL, + }, + /* BUS Client 11 */ + { + .status0 = 0x00002D00, + .status1 = 0x00002D04, + .cfg = 0x00002D08, + .header_addr = 0x00002D0C, + .header_cfg = 0x00002D10, + .image_addr = 0x00002D14, + .image_addr_offset = 0x00002D18, + .buffer_width_cfg = 0x00002D1C, + .buffer_height_cfg = 0x00002D20, + .packer_cfg = 0x00002D24, + .stride = 0x00002D28, + .irq_subsample_period = 0x00002D48, + .irq_subsample_pattern = 0x00002D4C, + .framedrop_period = 0x00002D50, + .framedrop_pattern = 0x00002D54, + .frame_inc = 0x00002D58, + .burst_limit = 0x00002D5C, + .ubwc_regs = NULL, + }, + /* BUS Client 12 */ + { + .status0 = 0x00002E00, + .status1 = 0x00002E04, + .cfg = 0x00002E08, + .header_addr = 0x00002E0C, + .header_cfg = 0x00002E10, + .image_addr = 0x00002E14, + .image_addr_offset = 0x00002E18, + .buffer_width_cfg = 0x00002E1C, + .buffer_height_cfg = 0x00002E20, + .packer_cfg = 0x00002E24, + .stride = 0x00002E28, + .irq_subsample_period = 0x00002E48, + .irq_subsample_pattern = 0x00002E4C, + .framedrop_period = 0x00002E50, + .framedrop_pattern = 0x00002E54, + .frame_inc = 0x00002E58, + .burst_limit = 0x00002E5C, + .ubwc_regs = NULL, + }, + /* BUS Client 13 */ + { + .status0 = 0x00002F00, + .status1 = 0x00002F04, + .cfg = 0x00002F08, + .header_addr = 0x00002F0C, + .header_cfg = 0x00002F10, + .image_addr = 0x00002F14, + .image_addr_offset = 0x00002F18, + .buffer_width_cfg = 0x00002F1C, + .buffer_height_cfg = 0x00002F20, + .packer_cfg = 0x00002F24, + .stride = 0x00002F28, + .irq_subsample_period = 0x00002F48, + .irq_subsample_pattern = 0x00002F4C, + .framedrop_period = 0x00002F50, + .framedrop_pattern = 0x00002F54, + .frame_inc = 0x00002F58, + .burst_limit = 0x00002F5C, + .ubwc_regs = NULL, + }, + /* BUS Client 14 */ + { + .status0 = 0x00003000, + .status1 = 0x00003004, + .cfg = 0x00003008, + .header_addr = 0x0000300C, + .header_cfg = 0x00003010, + .image_addr = 0x00003014, + .image_addr_offset = 0x00003018, + .buffer_width_cfg = 0x0000301C, + .buffer_height_cfg = 0x00003020, + .packer_cfg = 0x00003024, + .stride = 0x00003028, + .irq_subsample_period = 0x00003048, + .irq_subsample_pattern = 0x0000304C, + .framedrop_period = 0x00003050, + .framedrop_pattern = 0x00003054, + .frame_inc = 0x00003058, + .burst_limit = 0x0000305C, + .ubwc_regs = NULL, + }, + /* BUS Client 15 */ + { + .status0 = 0x00003100, + .status1 = 0x00003104, + .cfg = 0x00003108, + .header_addr = 0x0000310C, + .header_cfg = 0x00003110, + .image_addr = 0x00003114, + .image_addr_offset = 0x00003118, + .buffer_width_cfg = 0x0000311C, + .buffer_height_cfg = 0x00003120, + .packer_cfg = 0x00003124, + .stride = 0x00003128, + .irq_subsample_period = 0x00003148, + .irq_subsample_pattern = 0x0000314C, + .framedrop_period = 0x00003150, + .framedrop_pattern = 0x00003154, + .frame_inc = 0x00003158, + .burst_limit = 0x0000315C, + .ubwc_regs = NULL, + }, + /* BUS Client 16 */ + { + .status0 = 0x00003200, + .status1 = 0x00003204, + .cfg = 0x00003208, + .header_addr = 0x0000320C, + .header_cfg = 0x00003210, + .image_addr = 0x00003214, + .image_addr_offset = 0x00003218, + .buffer_width_cfg = 0x0000321C, + .buffer_height_cfg = 0x00003220, + .packer_cfg = 0x00003224, + .stride = 0x00003228, + .irq_subsample_period = 0x00003248, + .irq_subsample_pattern = 0x0000324C, + .framedrop_period = 0x00003250, + .framedrop_pattern = 0x00003254, + .frame_inc = 0x00003258, + .burst_limit = 0x0000325C, + .ubwc_regs = NULL, + }, + /* BUS Client 17 */ + { + .status0 = 0x00003300, + .status1 = 0x00003304, + .cfg = 0x00003308, + .header_addr = 0x0000330C, + .header_cfg = 0x00003310, + .image_addr = 0x00003314, + .image_addr_offset = 0x00003318, + .buffer_width_cfg = 0x0000331C, + .buffer_height_cfg = 0x00003320, + .packer_cfg = 0x00003324, + .stride = 0x00003328, + .irq_subsample_period = 0x00003348, + .irq_subsample_pattern = 0x0000334C, + .framedrop_period = 0x00003350, + .framedrop_pattern = 0x00003354, + .frame_inc = 0x00003358, + .burst_limit = 0x0000335C, + .ubwc_regs = NULL, + }, + /* BUS Client 18 */ + { + .status0 = 0x00003400, + .status1 = 0x00003404, + .cfg = 0x00003408, + .header_addr = 0x0000340C, + .header_cfg = 0x00003410, + .image_addr = 0x00003414, + .image_addr_offset = 0x00003418, + .buffer_width_cfg = 0x0000341C, + .buffer_height_cfg = 0x00003420, + .packer_cfg = 0x00003424, + .stride = 0x00003428, + .irq_subsample_period = 0x00003448, + .irq_subsample_pattern = 0x0000344C, + .framedrop_period = 0x00003450, + .framedrop_pattern = 0x00003454, + .frame_inc = 0x00003458, + .burst_limit = 0x0000345C, + .ubwc_regs = NULL, + }, + /* BUS Client 19 */ + { + .status0 = 0x00003500, + .status1 = 0x00003504, + .cfg = 0x00003508, + .header_addr = 0x0000350C, + .header_cfg = 0x00003510, + .image_addr = 0x00003514, + .image_addr_offset = 0x00003518, + .buffer_width_cfg = 0x0000351C, + .buffer_height_cfg = 0x00003520, + .packer_cfg = 0x00003524, + .stride = 0x00003528, + .irq_subsample_period = 0x00003548, + .irq_subsample_pattern = 0x0000354C, + .framedrop_period = 0x00003550, + .framedrop_pattern = 0x00003554, + .frame_inc = 0x00003558, + .burst_limit = 0x0000355C, + .ubwc_regs = NULL, + }, + }, + .comp_grp_reg = { + /* CAM_VFE_BUS_VER2_COMP_GRP_0 */ + { + .comp_mask = 0x00002010, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_1 */ + { + .comp_mask = 0x00002014, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_2 */ + { + .comp_mask = 0x00002018, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_3 */ + { + .comp_mask = 0x0000201C, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_4 */ + { + .comp_mask = 0x00002020, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_5 */ + { + .comp_mask = 0x00002024, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */ + { + .comp_mask = 0x0000202C, + .addr_sync_mask = 0x00002088, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */ + { + .comp_mask = 0x00002030, + .addr_sync_mask = 0x0000208C, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */ + { + .comp_mask = 0x00002034, + .addr_sync_mask = 0x00002090, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */ + { + .comp_mask = 0x00002038, + .addr_sync_mask = 0x00002094, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */ + { + .comp_mask = 0x0000203C, + .addr_sync_mask = 0x00002098, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */ + { + .comp_mask = 0x00002040, + .addr_sync_mask = 0x0000209C, + }, + }, + .num_out = 18, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + }, + }, + .top_irq_shift = 9, + .support_consumed_addr = false, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 19, +}; + +static struct cam_vfe_irq_hw_info vfe170_irq_hw_info = { + .reset_mask = BIT(31), + .supported_irq = CAM_VFE_HW_IRQ_CAP_INT_CSID, + .top_irq_reg = &vfe170_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe170_hw_info = { + .irq_hw_info = &vfe170_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_2_0, + .bus_hw_info = &vfe170_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_2_0, + .top_hw_info = &vfe170_top_hw_info, + + .camif_version = CAM_VFE_CAMIF_VER_2_0, + .camif_reg = &vfe170_camif_reg, + + .camif_lite_version = 0, + .camif_lite_reg = NULL, + +}; + +#endif /* _CAM_VFE170_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170_150.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170_150.h new file mode 100644 index 0000000000..4ecdce2ad9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170_150.h @@ -0,0 +1,862 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE170_150_H_ +#define _CAM_VFE170_150_H_ + +#include "cam_vfe_camif_ver2.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe170_150_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x0000005C, + .clear_reg_offset = 0x00000064, + .status_reg_offset = 0x0000006C, + }, + { + .mask_reg_offset = 0x00000060, + .clear_reg_offset = 0x00000068, + .status_reg_offset = 0x00000070, + }, +}; + +static struct cam_irq_controller_reg_info vfe170_150_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe170_150_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000058, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_camif_ver2_reg vfe170_150_camif_reg = { + .dual_vfe_sync = 0x00000100, + .camif_cmd = 0x00000478, + .camif_config = 0x0000047C, + .line_skip_pattern = 0x00000488, + .pixel_skip_pattern = 0x0000048C, + .skip_period = 0x00000490, + .irq_subsample_pattern = 0x0000049C, + .epoch_irq = 0x000004A0, + .raw_crop_width_cfg = 0x00000CE4, + .raw_crop_height_cfg = 0x00000CE8, + .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, +}; + +static struct cam_vfe_camif_reg_data vfe_170_150_camif_reg_data = { + .raw_crop_first_pixel_shift = 16, + .raw_crop_first_pixel_mask = 0xFFFF, + .raw_crop_last_pixel_shift = 0x0, + .raw_crop_last_pixel_mask = 0x3FFF, + .raw_crop_first_line_shift = 16, + .raw_crop_first_line_mask = 0xFFFF, + .raw_crop_last_line_shift = 0, + .raw_crop_last_line_mask = 0x3FFF, + .input_mux_sel_shift = 5, + .input_mux_sel_mask = 0x3, + .extern_reg_update_shift = 4, + .extern_reg_update_mask = 1, + .pixel_pattern_shift = 0, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 23, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 3, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .reg_update_irq_mask = 0x00000010, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x0003FC00, + .error_irq_mask1 = 0x0FFF7E80, + .enable_diagnostic_hw = 0x1, + .dual_vfe_sync_mask = 0x3, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_150_reg = { + .reset = 0x0000001C, + .cgc_ovd = 0x0000002C, + .enable = 0x00000040, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_150_reg = { + .reset = 0x00000020, + .cgc_ovd = 0x00000030, + .enable = 0x00000044, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_150_reg = { + .reset = 0x00000024, + .cgc_ovd = 0x00000034, + .enable = 0x00000048, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_150_reg = { + .reset = 0x00000028, + .cgc_ovd = 0x00000038, + .enable = 0x0000004C, +}; + +static struct cam_vfe_top_ver2_reg_offset_common vfe170_150_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .global_reset_cmd = 0x00000018, + .module_ctrl = { + &lens_170_150_reg, + &stats_170_150_reg, + &color_170_150_reg, + &zoom_170_150_reg, + }, + .bus_cgc_ovd = 0x0000003C, + .core_cfg = 0x00000050, + .three_D_cfg = 0x00000054, + .violation_status = 0x0000007C, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_ver2_reg vfe170_150_rdi_reg = { + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_150_rdi_0_data = { + .reg_update_cmd_data = 0x2, + .sof_irq_mask = 0x8000000, + .reg_update_irq_mask = 0x20, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_150_rdi_1_data = { + .reg_update_cmd_data = 0x4, + .sof_irq_mask = 0x10000000, + .reg_update_irq_mask = 0x40, +}; + +static struct cam_vfe_rdi_reg_data vfe_170_150_rdi_2_data = { + .reg_update_cmd_data = 0x8, + .sof_irq_mask = 0x20000000, + .reg_update_irq_mask = 0x80, +}; + +static struct cam_vfe_top_ver2_hw_info vfe170_150_top_hw_info = { + .common_reg = &vfe170_150_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe170_150_top_common_reg, + .camif_reg = &vfe170_150_camif_reg, + .reg_data = &vfe_170_150_camif_reg_data, + }, + .camif_lite_hw_info = { + .common_reg = NULL, + .camif_lite_reg = NULL, + .reg_data = NULL, + }, + .rdi_hw_info = { + .common_reg = &vfe170_150_top_common_reg, + .rdi_reg = &vfe170_150_rdi_reg, + .reg_data = { + &vfe_170_150_rdi_0_data, + &vfe_170_150_rdi_1_data, + &vfe_170_150_rdi_2_data, + NULL, + }, + }, + .num_mux = 4, + .mux_type = { + CAM_VFE_CAMIF_VER_2_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, +}; + +static struct cam_irq_register_set vfe170_150_bus_irq_reg[3] = { + { + .mask_reg_offset = 0x00002044, + .clear_reg_offset = 0x00002050, + .status_reg_offset = 0x0000205C, + }, + { + .mask_reg_offset = 0x00002048, + .clear_reg_offset = 0x00002054, + .status_reg_offset = 0x00002060, + }, + { + .mask_reg_offset = 0x0000204C, + .clear_reg_offset = 0x00002058, + .status_reg_offset = 0x00002064, + }, +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_client + vfe_170_150_ubwc_regs_client_3 = { + .tile_cfg = 0x0000252C, + .h_init = 0x00002530, + .v_init = 0x00002534, + .meta_addr = 0x00002538, + .meta_offset = 0x0000253C, + .meta_stride = 0x00002540, + .mode_cfg_0 = 0x00002544, + .bw_limit = 0x000025A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_client + vfe_170_150_ubwc_regs_client_4 = { + .tile_cfg = 0x0000262C, + .h_init = 0x00002630, + .v_init = 0x00002634, + .meta_addr = 0x00002638, + .meta_offset = 0x0000263C, + .meta_stride = 0x00002640, + .mode_cfg_0 = 0x00002644, + .bw_limit = 0x000026A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_hw_info vfe170_150_bus_hw_info = { + .common_reg = { + .hw_version = 0x00002000, + .hw_capability = 0x00002004, + .sw_reset = 0x00002008, + .cgc_ovd = 0x0000200C, + .pwr_iso_cfg = 0x000020CC, + .dual_master_comp_cfg = 0x00002028, + .irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe170_150_bus_irq_reg, + .global_irq_cmd_offset = 0x00002068, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + .comp_error_status = 0x0000206C, + .comp_ovrwr_status = 0x00002070, + .dual_comp_error_status = 0x00002074, + .dual_comp_ovrwr_status = 0x00002078, + .addr_sync_cfg = 0x0000207C, + .addr_sync_frame_hdr = 0x00002080, + .addr_sync_no_sync = 0x00002084, + .debug_status_cfg = 0x0000226C, + .debug_status_0 = 0x00002270, + }, + .num_client = 20, + .bus_client_reg = { + /* BUS Client 0 */ + { + .status0 = 0x00002200, + .status1 = 0x00002204, + .cfg = 0x00002208, + .header_addr = 0x0000220C, + .header_cfg = 0x00002210, + .image_addr = 0x00002214, + .image_addr_offset = 0x00002218, + .buffer_width_cfg = 0x0000221C, + .buffer_height_cfg = 0x00002220, + .packer_cfg = 0x00002224, + .stride = 0x00002228, + .irq_subsample_period = 0x00002248, + .irq_subsample_pattern = 0x0000224C, + .framedrop_period = 0x00002250, + .framedrop_pattern = 0x00002254, + .frame_inc = 0x00002258, + .burst_limit = 0x0000225C, + .ubwc_regs = NULL, + }, + /* BUS Client 1 */ + { + .status0 = 0x00002300, + .status1 = 0x00002304, + .cfg = 0x00002308, + .header_addr = 0x0000230C, + .header_cfg = 0x00002310, + .image_addr = 0x00002314, + .image_addr_offset = 0x00002318, + .buffer_width_cfg = 0x0000231C, + .buffer_height_cfg = 0x00002320, + .packer_cfg = 0x00002324, + .stride = 0x00002328, + .irq_subsample_period = 0x00002348, + .irq_subsample_pattern = 0x0000234C, + .framedrop_period = 0x00002350, + .framedrop_pattern = 0x00002354, + .frame_inc = 0x00002358, + .burst_limit = 0x0000235C, + .ubwc_regs = NULL, + }, + /* BUS Client 2 */ + { + .status0 = 0x00002400, + .status1 = 0x00002404, + .cfg = 0x00002408, + .header_addr = 0x0000240C, + .header_cfg = 0x00002410, + .image_addr = 0x00002414, + .image_addr_offset = 0x00002418, + .buffer_width_cfg = 0x0000241C, + .buffer_height_cfg = 0x00002420, + .packer_cfg = 0x00002424, + .stride = 0x00002428, + .irq_subsample_period = 0x00002448, + .irq_subsample_pattern = 0x0000244C, + .framedrop_period = 0x00002450, + .framedrop_pattern = 0x00002454, + .frame_inc = 0x00002458, + .burst_limit = 0x0000245C, + .ubwc_regs = NULL, + }, + /* BUS Client 3 */ + { + .status0 = 0x00002500, + .status1 = 0x00002504, + .cfg = 0x00002508, + .header_addr = 0x0000250C, + .header_cfg = 0x00002510, + .image_addr = 0x00002514, + .image_addr_offset = 0x00002518, + .buffer_width_cfg = 0x0000251C, + .buffer_height_cfg = 0x00002520, + .packer_cfg = 0x00002524, + .stride = 0x00002528, + .irq_subsample_period = 0x00002548, + .irq_subsample_pattern = 0x0000254C, + .framedrop_period = 0x00002550, + .framedrop_pattern = 0x00002554, + .frame_inc = 0x00002558, + .burst_limit = 0x0000255C, + .ubwc_regs = + &vfe_170_150_ubwc_regs_client_3, + }, + /* BUS Client 4 */ + { + .status0 = 0x00002600, + .status1 = 0x00002604, + .cfg = 0x00002608, + .header_addr = 0x0000260C, + .header_cfg = 0x00002610, + .image_addr = 0x00002614, + .image_addr_offset = 0x00002618, + .buffer_width_cfg = 0x0000261C, + .buffer_height_cfg = 0x00002620, + .packer_cfg = 0x00002624, + .stride = 0x00002628, + .irq_subsample_period = 0x00002648, + .irq_subsample_pattern = 0x0000264C, + .framedrop_period = 0x00002650, + .framedrop_pattern = 0x00002654, + .frame_inc = 0x00002658, + .burst_limit = 0x0000265C, + .ubwc_regs = + &vfe_170_150_ubwc_regs_client_4, + }, + /* BUS Client 5 */ + { + .status0 = 0x00002700, + .status1 = 0x00002704, + .cfg = 0x00002708, + .header_addr = 0x0000270C, + .header_cfg = 0x00002710, + .image_addr = 0x00002714, + .image_addr_offset = 0x00002718, + .buffer_width_cfg = 0x0000271C, + .buffer_height_cfg = 0x00002720, + .packer_cfg = 0x00002724, + .stride = 0x00002728, + .irq_subsample_period = 0x00002748, + .irq_subsample_pattern = 0x0000274C, + .framedrop_period = 0x00002750, + .framedrop_pattern = 0x00002754, + .frame_inc = 0x00002758, + .burst_limit = 0x0000275C, + .ubwc_regs = NULL, + }, + /* BUS Client 6 */ + { + .status0 = 0x00002800, + .status1 = 0x00002804, + .cfg = 0x00002808, + .header_addr = 0x0000280C, + .header_cfg = 0x00002810, + .image_addr = 0x00002814, + .image_addr_offset = 0x00002818, + .buffer_width_cfg = 0x0000281C, + .buffer_height_cfg = 0x00002820, + .packer_cfg = 0x00002824, + .stride = 0x00002828, + .irq_subsample_period = 0x00002848, + .irq_subsample_pattern = 0x0000284C, + .framedrop_period = 0x00002850, + .framedrop_pattern = 0x00002854, + .frame_inc = 0x00002858, + .burst_limit = 0x0000285C, + .ubwc_regs = NULL, + }, + /* BUS Client 7 */ + { + .status0 = 0x00002900, + .status1 = 0x00002904, + .cfg = 0x00002908, + .header_addr = 0x0000290C, + .header_cfg = 0x00002910, + .image_addr = 0x00002914, + .image_addr_offset = 0x00002918, + .buffer_width_cfg = 0x0000291C, + .buffer_height_cfg = 0x00002920, + .packer_cfg = 0x00002924, + .stride = 0x00002928, + .irq_subsample_period = 0x00002948, + .irq_subsample_pattern = 0x0000294C, + .framedrop_period = 0x00002950, + .framedrop_pattern = 0x00002954, + .frame_inc = 0x00002958, + .burst_limit = 0x0000295C, + .ubwc_regs = NULL, + }, + /* BUS Client 8 */ + { + .status0 = 0x00002A00, + .status1 = 0x00002A04, + .cfg = 0x00002A08, + .header_addr = 0x00002A0C, + .header_cfg = 0x00002A10, + .image_addr = 0x00002A14, + .image_addr_offset = 0x00002A18, + .buffer_width_cfg = 0x00002A1C, + .buffer_height_cfg = 0x00002A20, + .packer_cfg = 0x00002A24, + .stride = 0x00002A28, + .irq_subsample_period = 0x00002A48, + .irq_subsample_pattern = 0x00002A4C, + .framedrop_period = 0x00002A50, + .framedrop_pattern = 0x00002A54, + .frame_inc = 0x00002A58, + .burst_limit = 0x00002A5C, + .ubwc_regs = NULL, + }, + /* BUS Client 9 */ + { + .status0 = 0x00002B00, + .status1 = 0x00002B04, + .cfg = 0x00002B08, + .header_addr = 0x00002B0C, + .header_cfg = 0x00002B10, + .image_addr = 0x00002B14, + .image_addr_offset = 0x00002B18, + .buffer_width_cfg = 0x00002B1C, + .buffer_height_cfg = 0x00002B20, + .packer_cfg = 0x00002B24, + .stride = 0x00002B28, + .irq_subsample_period = 0x00002B48, + .irq_subsample_pattern = 0x00002B4C, + .framedrop_period = 0x00002B50, + .framedrop_pattern = 0x00002B54, + .frame_inc = 0x00002B58, + .burst_limit = 0x00002B5C, + .ubwc_regs = NULL, + }, + /* BUS Client 10 */ + { + .status0 = 0x00002C00, + .status1 = 0x00002C04, + .cfg = 0x00002C08, + .header_addr = 0x00002C0C, + .header_cfg = 0x00002C10, + .image_addr = 0x00002C14, + .image_addr_offset = 0x00002C18, + .buffer_width_cfg = 0x00002C1C, + .buffer_height_cfg = 0x00002C20, + .packer_cfg = 0x00002C24, + .stride = 0x00002C28, + .irq_subsample_period = 0x00002C48, + .irq_subsample_pattern = 0x00002C4C, + .framedrop_period = 0x00002C50, + .framedrop_pattern = 0x00002C54, + .frame_inc = 0x00002C58, + .burst_limit = 0x00002C5C, + .ubwc_regs = NULL, + }, + /* BUS Client 11 */ + { + .status0 = 0x00002D00, + .status1 = 0x00002D04, + .cfg = 0x00002D08, + .header_addr = 0x00002D0C, + .header_cfg = 0x00002D10, + .image_addr = 0x00002D14, + .image_addr_offset = 0x00002D18, + .buffer_width_cfg = 0x00002D1C, + .buffer_height_cfg = 0x00002D20, + .packer_cfg = 0x00002D24, + .stride = 0x00002D28, + .irq_subsample_period = 0x00002D48, + .irq_subsample_pattern = 0x00002D4C, + .framedrop_period = 0x00002D50, + .framedrop_pattern = 0x00002D54, + .frame_inc = 0x00002D58, + .burst_limit = 0x00002D5C, + .ubwc_regs = NULL, + }, + /* BUS Client 12 */ + { + .status0 = 0x00002E00, + .status1 = 0x00002E04, + .cfg = 0x00002E08, + .header_addr = 0x00002E0C, + .header_cfg = 0x00002E10, + .image_addr = 0x00002E14, + .image_addr_offset = 0x00002E18, + .buffer_width_cfg = 0x00002E1C, + .buffer_height_cfg = 0x00002E20, + .packer_cfg = 0x00002E24, + .stride = 0x00002E28, + .irq_subsample_period = 0x00002E48, + .irq_subsample_pattern = 0x00002E4C, + .framedrop_period = 0x00002E50, + .framedrop_pattern = 0x00002E54, + .frame_inc = 0x00002E58, + .burst_limit = 0x00002E5C, + .ubwc_regs = NULL, + }, + /* BUS Client 13 */ + { + .status0 = 0x00002F00, + .status1 = 0x00002F04, + .cfg = 0x00002F08, + .header_addr = 0x00002F0C, + .header_cfg = 0x00002F10, + .image_addr = 0x00002F14, + .image_addr_offset = 0x00002F18, + .buffer_width_cfg = 0x00002F1C, + .buffer_height_cfg = 0x00002F20, + .packer_cfg = 0x00002F24, + .stride = 0x00002F28, + .irq_subsample_period = 0x00002F48, + .irq_subsample_pattern = 0x00002F4C, + .framedrop_period = 0x00002F50, + .framedrop_pattern = 0x00002F54, + .frame_inc = 0x00002F58, + .burst_limit = 0x00002F5C, + .ubwc_regs = NULL, + }, + /* BUS Client 14 */ + { + .status0 = 0x00003000, + .status1 = 0x00003004, + .cfg = 0x00003008, + .header_addr = 0x0000300C, + .header_cfg = 0x00003010, + .image_addr = 0x00003014, + .image_addr_offset = 0x00003018, + .buffer_width_cfg = 0x0000301C, + .buffer_height_cfg = 0x00003020, + .packer_cfg = 0x00003024, + .stride = 0x00003028, + .irq_subsample_period = 0x00003048, + .irq_subsample_pattern = 0x0000304C, + .framedrop_period = 0x00003050, + .framedrop_pattern = 0x00003054, + .frame_inc = 0x00003058, + .burst_limit = 0x0000305C, + .ubwc_regs = NULL, + }, + /* BUS Client 15 */ + { + .status0 = 0x00003100, + .status1 = 0x00003104, + .cfg = 0x00003108, + .header_addr = 0x0000310C, + .header_cfg = 0x00003110, + .image_addr = 0x00003114, + .image_addr_offset = 0x00003118, + .buffer_width_cfg = 0x0000311C, + .buffer_height_cfg = 0x00003120, + .packer_cfg = 0x00003124, + .stride = 0x00003128, + .irq_subsample_period = 0x00003148, + .irq_subsample_pattern = 0x0000314C, + .framedrop_period = 0x00003150, + .framedrop_pattern = 0x00003154, + .frame_inc = 0x00003158, + .burst_limit = 0x0000315C, + .ubwc_regs = NULL, + }, + /* BUS Client 16 */ + { + .status0 = 0x00003200, + .status1 = 0x00003204, + .cfg = 0x00003208, + .header_addr = 0x0000320C, + .header_cfg = 0x00003210, + .image_addr = 0x00003214, + .image_addr_offset = 0x00003218, + .buffer_width_cfg = 0x0000321C, + .buffer_height_cfg = 0x00003220, + .packer_cfg = 0x00003224, + .stride = 0x00003228, + .irq_subsample_period = 0x00003248, + .irq_subsample_pattern = 0x0000324C, + .framedrop_period = 0x00003250, + .framedrop_pattern = 0x00003254, + .frame_inc = 0x00003258, + .burst_limit = 0x0000325C, + .ubwc_regs = NULL, + }, + /* BUS Client 17 */ + { + .status0 = 0x00003300, + .status1 = 0x00003304, + .cfg = 0x00003308, + .header_addr = 0x0000330C, + .header_cfg = 0x00003310, + .image_addr = 0x00003314, + .image_addr_offset = 0x00003318, + .buffer_width_cfg = 0x0000331C, + .buffer_height_cfg = 0x00003320, + .packer_cfg = 0x00003324, + .stride = 0x00003328, + .irq_subsample_period = 0x00003348, + .irq_subsample_pattern = 0x0000334C, + .framedrop_period = 0x00003350, + .framedrop_pattern = 0x00003354, + .frame_inc = 0x00003358, + .burst_limit = 0x0000335C, + .ubwc_regs = NULL, + }, + /* BUS Client 18 */ + { + .status0 = 0x00003400, + .status1 = 0x00003404, + .cfg = 0x00003408, + .header_addr = 0x0000340C, + .header_cfg = 0x00003410, + .image_addr = 0x00003414, + .image_addr_offset = 0x00003418, + .buffer_width_cfg = 0x0000341C, + .buffer_height_cfg = 0x00003420, + .packer_cfg = 0x00003424, + .stride = 0x00003428, + .irq_subsample_period = 0x00003448, + .irq_subsample_pattern = 0x0000344C, + .framedrop_period = 0x00003450, + .framedrop_pattern = 0x00003454, + .frame_inc = 0x00003458, + .burst_limit = 0x0000345C, + .ubwc_regs = NULL, + }, + /* BUS Client 19 */ + { + .status0 = 0x00003500, + .status1 = 0x00003504, + .cfg = 0x00003508, + .header_addr = 0x0000350C, + .header_cfg = 0x00003510, + .image_addr = 0x00003514, + .image_addr_offset = 0x00003518, + .buffer_width_cfg = 0x0000351C, + .buffer_height_cfg = 0x00003520, + .packer_cfg = 0x00003524, + .stride = 0x00003528, + .irq_subsample_period = 0x00003548, + .irq_subsample_pattern = 0x0000354C, + .framedrop_period = 0x00003550, + .framedrop_pattern = 0x00003554, + .frame_inc = 0x00003558, + .burst_limit = 0x0000355C, + .ubwc_regs = NULL, + }, + }, + .comp_grp_reg = { + /* CAM_VFE_BUS_VER2_COMP_GRP_0 */ + { + .comp_mask = 0x00002010, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_1 */ + { + .comp_mask = 0x00002014, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_2 */ + { + .comp_mask = 0x00002018, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_3 */ + { + .comp_mask = 0x0000201C, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_4 */ + { + .comp_mask = 0x00002020, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_5 */ + { + .comp_mask = 0x00002024, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */ + { + .comp_mask = 0x0000202C, + .addr_sync_mask = 0x00002088, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */ + { + .comp_mask = 0x00002030, + .addr_sync_mask = 0x0000208C, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */ + { + .comp_mask = 0x00002034, + .addr_sync_mask = 0x00002090, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */ + { + .comp_mask = 0x00002038, + .addr_sync_mask = 0x00002094, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */ + { + .comp_mask = 0x0000203C, + .addr_sync_mask = 0x00002098, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */ + { + .comp_mask = 0x00002040, + .addr_sync_mask = 0x0000209C, + }, + }, + .num_out = 18, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + }, + }, + .support_consumed_addr = false, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 19, +}; + +static struct cam_vfe_irq_hw_info vfe170_150_irq_hw_info = { + .reset_mask = BIT(31), + .supported_irq = CAM_VFE_HW_IRQ_CAP_INT_CSID, + .top_irq_reg = &vfe170_150_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe170_150_hw_info = { + .irq_hw_info = &vfe170_150_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_2_0, + .bus_hw_info = &vfe170_150_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_2_0, + .top_hw_info = &vfe170_150_top_hw_info, + + .camif_version = CAM_VFE_CAMIF_VER_2_0, + .camif_reg = &vfe170_150_camif_reg, + + .camif_lite_version = 0, + .camif_lite_reg = NULL, + +}; + +#endif /* _CAM_vfe170_150_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h new file mode 100644 index 0000000000..98b8abe7be --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h @@ -0,0 +1,1057 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE175_H_ +#define _CAM_VFE175_H_ + +#include "cam_vfe_camif_ver2.h" +#include "cam_vfe_camif_lite_ver2.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe175_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x0000005C, + .clear_reg_offset = 0x00000064, + .status_reg_offset = 0x0000006C, + }, + { + .mask_reg_offset = 0x00000060, + .clear_reg_offset = 0x00000068, + .status_reg_offset = 0x00000070, + }, +}; + +static struct cam_irq_controller_reg_info vfe175_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe175_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000058, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_camif_ver2_reg vfe175_camif_reg = { + .camif_cmd = 0x00000478, + .camif_config = 0x0000047C, + .line_skip_pattern = 0x00000488, + .pixel_skip_pattern = 0x0000048C, + .skip_period = 0x00000490, + .irq_subsample_pattern = 0x0000049C, + .epoch_irq = 0x000004A0, + .raw_crop_width_cfg = 0x00000CE4, + .raw_crop_height_cfg = 0x00000CE8, + .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, +}; + +static struct cam_vfe_camif_reg_data vfe_175_camif_reg_data = { + .raw_crop_first_pixel_shift = 16, + .raw_crop_first_pixel_mask = 0xFFFF, + .raw_crop_last_pixel_shift = 0x0, + .raw_crop_last_pixel_mask = 0x3FFF, + .raw_crop_first_line_shift = 16, + .raw_crop_first_line_mask = 0xFFFF, + .raw_crop_last_line_shift = 0, + .raw_crop_last_line_mask = 0x3FFF, + .input_mux_sel_shift = 5, + .input_mux_sel_mask = 0x3, + .extern_reg_update_shift = 4, + .extern_reg_update_mask = 1, + .pixel_pattern_shift = 0, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 23, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 3, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .reg_update_irq_mask = 0x00000010, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x0003FC00, + .error_irq_mask1 = 0xEFFF7E80, + .subscribe_irq_mask0 = 0x00000017, + .subscribe_irq_mask1 = 0x00000000, + .enable_diagnostic_hw = 0x1, +}; + +static struct cam_vfe_camif_lite_ver2_reg vfe175_camif_lite_reg = { + .camif_lite_cmd = 0x00000FC0, + .camif_lite_config = 0x00000FC4, + .lite_skip_period = 0x00000FC8, + .lite_irq_subsample_pattern = 0x00000FCC, + .lite_epoch_irq = 0x00000FD0, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_camif_lite_ver2_reg_data vfe175_camif_lite_reg_data = { + .dual_pd_reg_update_cmd_data = 0x20, + .lite_epoch_line_cfg = 0x00140014, + .lite_sof_irq_mask = 0x00040000, + .lite_epoch0_irq_mask = 0x00100000, + .dual_pd_reg_upd_irq_mask = 0x04000000, + .lite_eof_irq_mask = 0x00080000, + .lite_err_irq_mask0 = 0x00400000, + .lite_err_irq_mask1 = 0x00004100, + .lite_subscribe_irq_mask0 = 0x001C0000, + .lite_subscribe_irq_mask1 = 0x0, + .extern_reg_update_shift = 4, + .dual_pd_path_sel_shift = 24, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_175_reg = { + .reset = 0x0000001C, + .cgc_ovd = 0x0000002C, + .enable = 0x00000040, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_175_reg = { + .reset = 0x00000020, + .cgc_ovd = 0x00000030, + .enable = 0x00000044, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl color_175_reg = { + .reset = 0x00000024, + .cgc_ovd = 0x00000034, + .enable = 0x00000048, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_175_reg = { + .reset = 0x00000028, + .cgc_ovd = 0x00000038, + .enable = 0x0000004C, +}; + +static struct cam_vfe_top_ver2_reg_offset_common vfe175_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .global_reset_cmd = 0x00000018, + .module_ctrl = { + &lens_175_reg, + &stats_175_reg, + &color_175_reg, + &zoom_175_reg, + }, + .bus_cgc_ovd = 0x0000003C, + .core_cfg = 0x00000050, + .three_D_cfg = 0x00000054, + .violation_status = 0x0000007C, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_ver2_reg vfe175_rdi_reg = { + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_common_reg_data vfe175_rdi_reg_data = { + .subscribe_irq_mask0 = 0x780001E0, + .subscribe_irq_mask1 = 0x0, + .error_irq_mask0 = 0x0, + .error_irq_mask1 = 0x3C, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_rdi_0_data = { + .reg_update_cmd_data = 0x2, + .sof_irq_mask = 0x8000000, + .reg_update_irq_mask = 0x20, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_rdi_1_data = { + .reg_update_cmd_data = 0x4, + .sof_irq_mask = 0x10000000, + .reg_update_irq_mask = 0x40, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_rdi_2_data = { + .reg_update_cmd_data = 0x8, + .sof_irq_mask = 0x20000000, + .reg_update_irq_mask = 0x80, +}; + +struct cam_vfe_top_dump_data vfe175_dump_data = { + .num_reg_dump_entries = 2, + .num_lut_dump_entries = 1, + .dmi_cfg = 0xc24, + .dmi_addr = 0xc28, + .dmi_data_path_hi = 0xc2C, + .dmi_data_path_lo = 0xc30, + .reg_entry = { + { + .reg_dump_start = 0x0, + .reg_dump_end = 0x1164, + }, + { + .reg_dump_start = 0x2000, + .reg_dump_end = 0x397C, + }, + }, + .lut_entry = { + { + .lut_word_size = 64, + .lut_bank_sel = 0x40, + .lut_addr_size = 180, + }, + }, +}; + +static struct cam_vfe_top_ver2_hw_info vfe175_top_hw_info = { + .common_reg = &vfe175_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe175_top_common_reg, + .camif_reg = &vfe175_camif_reg, + .reg_data = &vfe_175_camif_reg_data, + }, + .camif_lite_hw_info = { + .common_reg = &vfe175_top_common_reg, + .camif_lite_reg = &vfe175_camif_lite_reg, + .reg_data = &vfe175_camif_lite_reg_data, + }, + .rdi_hw_info = { + .common_reg = &vfe175_top_common_reg, + .rdi_reg = &vfe175_rdi_reg, + .common_reg_data = &vfe175_rdi_reg_data, + .reg_data = { + &vfe_175_rdi_0_data, + &vfe_175_rdi_1_data, + &vfe_175_rdi_2_data, + NULL, + }, + }, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_2_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_CAMIF_LITE_VER_2_0, + }, + .dump_data = &vfe175_dump_data, +}; + +static struct cam_irq_register_set vfe175_bus_irq_reg[3] = { + { + .mask_reg_offset = 0x00002044, + .clear_reg_offset = 0x00002050, + .status_reg_offset = 0x0000205C, + }, + { + .mask_reg_offset = 0x00002048, + .clear_reg_offset = 0x00002054, + .status_reg_offset = 0x00002060, + }, + { + .mask_reg_offset = 0x0000204C, + .clear_reg_offset = 0x00002058, + .status_reg_offset = 0x00002064, + }, +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_ubwc_regs_client_3 = { + .tile_cfg = 0x0000252C, + .h_init = 0x00002530, + .v_init = 0x00002534, + .meta_addr = 0x00002538, + .meta_offset = 0x0000253C, + .meta_stride = 0x00002540, + .mode_cfg_0 = 0x00002544, + .mode_cfg_1 = 0x000025A4, + .bw_limit = 0x000025A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_ubwc_regs_client_4 = { + .tile_cfg = 0x0000262C, + .h_init = 0x00002630, + .v_init = 0x00002634, + .meta_addr = 0x00002638, + .meta_offset = 0x0000263C, + .meta_stride = 0x00002640, + .mode_cfg_0 = 0x00002644, + .mode_cfg_1 = 0x000026A4, + .bw_limit = 0x000026A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_ubwc_regs_client_20 = { + .tile_cfg = 0x0000362C, + .h_init = 0x00003630, + .v_init = 0x00003634, + .meta_addr = 0x00003638, + .meta_offset = 0x0000363C, + .meta_stride = 0x00003640, + .mode_cfg_0 = 0x00003644, + .mode_cfg_1 = 0x000036A4, + .bw_limit = 0x000036A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_ubwc_regs_client_21 = { + .tile_cfg = 0x0000372C, + .h_init = 0x00003730, + .v_init = 0x00003734, + .meta_addr = 0x00003738, + .meta_offset = 0x0000373C, + .meta_stride = 0x00003740, + .mode_cfg_0 = 0x00003744, + .mode_cfg_1 = 0x000037A4, + .bw_limit = 0x000037A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_hw_info vfe175_bus_hw_info = { + .common_reg = { + .hw_version = 0x00002000, + .hw_capability = 0x00002004, + .sw_reset = 0x00002008, + .cgc_ovd = 0x0000200C, + .pwr_iso_cfg = 0x000020CC, + .dual_master_comp_cfg = 0x00002028, + .irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe175_bus_irq_reg, + .global_irq_cmd_offset = 0x00002068, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + .comp_error_status = 0x0000206C, + .comp_ovrwr_status = 0x00002070, + .dual_comp_error_status = 0x00002074, + .dual_comp_ovrwr_status = 0x00002078, + .addr_sync_cfg = 0x0000207C, + .addr_sync_frame_hdr = 0x00002080, + .addr_sync_no_sync = 0x00002084, + .debug_status_cfg = 0x0000226C, + .debug_status_0 = 0x00002270, + .top_irq_mask_0 = 0x0000005C, + }, + .num_client = 24, + .bus_client_reg = { + /* BUS Client 0 */ + { + .status0 = 0x00002200, + .status1 = 0x00002204, + .cfg = 0x00002208, + .header_addr = 0x0000220C, + .header_cfg = 0x00002210, + .image_addr = 0x00002214, + .image_addr_offset = 0x00002218, + .buffer_width_cfg = 0x0000221C, + .buffer_height_cfg = 0x00002220, + .packer_cfg = 0x00002224, + .stride = 0x00002228, + .irq_subsample_period = 0x00002248, + .irq_subsample_pattern = 0x0000224C, + .framedrop_period = 0x00002250, + .framedrop_pattern = 0x00002254, + .frame_inc = 0x00002258, + .burst_limit = 0x0000225C, + .ubwc_regs = NULL, + }, + /* BUS Client 1 */ + { + .status0 = 0x00002300, + .status1 = 0x00002304, + .cfg = 0x00002308, + .header_addr = 0x0000230C, + .header_cfg = 0x00002310, + .image_addr = 0x00002314, + .image_addr_offset = 0x00002318, + .buffer_width_cfg = 0x0000231C, + .buffer_height_cfg = 0x00002320, + .packer_cfg = 0x00002324, + .stride = 0x00002328, + .irq_subsample_period = 0x00002348, + .irq_subsample_pattern = 0x0000234C, + .framedrop_period = 0x00002350, + .framedrop_pattern = 0x00002354, + .frame_inc = 0x00002358, + .burst_limit = 0x0000235C, + .ubwc_regs = NULL, + }, + /* BUS Client 2 */ + { + .status0 = 0x00002400, + .status1 = 0x00002404, + .cfg = 0x00002408, + .header_addr = 0x0000240C, + .header_cfg = 0x00002410, + .image_addr = 0x00002414, + .image_addr_offset = 0x00002418, + .buffer_width_cfg = 0x0000241C, + .buffer_height_cfg = 0x00002420, + .packer_cfg = 0x00002424, + .stride = 0x00002428, + .irq_subsample_period = 0x00002448, + .irq_subsample_pattern = 0x0000244C, + .framedrop_period = 0x00002450, + .framedrop_pattern = 0x00002454, + .frame_inc = 0x00002458, + .burst_limit = 0x0000245C, + .ubwc_regs = NULL, + }, + /* BUS Client 3 */ + { + .status0 = 0x00002500, + .status1 = 0x00002504, + .cfg = 0x00002508, + .header_addr = 0x0000250C, + .header_cfg = 0x00002510, + .image_addr = 0x00002514, + .image_addr_offset = 0x00002518, + .buffer_width_cfg = 0x0000251C, + .buffer_height_cfg = 0x00002520, + .packer_cfg = 0x00002524, + .stride = 0x00002528, + .irq_subsample_period = 0x00002548, + .irq_subsample_pattern = 0x0000254C, + .framedrop_period = 0x00002550, + .framedrop_pattern = 0x00002554, + .frame_inc = 0x00002558, + .burst_limit = 0x0000255C, + .ubwc_regs = &vfe175_ubwc_regs_client_3, + }, + /* BUS Client 4 */ + { + .status0 = 0x00002600, + .status1 = 0x00002604, + .cfg = 0x00002608, + .header_addr = 0x0000260C, + .header_cfg = 0x00002610, + .image_addr = 0x00002614, + .image_addr_offset = 0x00002618, + .buffer_width_cfg = 0x0000261C, + .buffer_height_cfg = 0x00002620, + .packer_cfg = 0x00002624, + .stride = 0x00002628, + .irq_subsample_period = 0x00002648, + .irq_subsample_pattern = 0x0000264C, + .framedrop_period = 0x00002650, + .framedrop_pattern = 0x00002654, + .frame_inc = 0x00002658, + .burst_limit = 0x0000265C, + .ubwc_regs = &vfe175_ubwc_regs_client_4, + }, + /* BUS Client 5 */ + { + .status0 = 0x00002700, + .status1 = 0x00002704, + .cfg = 0x00002708, + .header_addr = 0x0000270C, + .header_cfg = 0x00002710, + .image_addr = 0x00002714, + .image_addr_offset = 0x00002718, + .buffer_width_cfg = 0x0000271C, + .buffer_height_cfg = 0x00002720, + .packer_cfg = 0x00002724, + .stride = 0x00002728, + .irq_subsample_period = 0x00002748, + .irq_subsample_pattern = 0x0000274C, + .framedrop_period = 0x00002750, + .framedrop_pattern = 0x00002754, + .frame_inc = 0x00002758, + .burst_limit = 0x0000275C, + .ubwc_regs = NULL, + }, + /* BUS Client 6 */ + { + .status0 = 0x00002800, + .status1 = 0x00002804, + .cfg = 0x00002808, + .header_addr = 0x0000280C, + .header_cfg = 0x00002810, + .image_addr = 0x00002814, + .image_addr_offset = 0x00002818, + .buffer_width_cfg = 0x0000281C, + .buffer_height_cfg = 0x00002820, + .packer_cfg = 0x00002824, + .stride = 0x00002828, + .irq_subsample_period = 0x00002848, + .irq_subsample_pattern = 0x0000284C, + .framedrop_period = 0x00002850, + .framedrop_pattern = 0x00002854, + .frame_inc = 0x00002858, + .burst_limit = 0x0000285C, + .ubwc_regs = NULL, + }, + /* BUS Client 7 */ + { + .status0 = 0x00002900, + .status1 = 0x00002904, + .cfg = 0x00002908, + .header_addr = 0x0000290C, + .header_cfg = 0x00002910, + .image_addr = 0x00002914, + .image_addr_offset = 0x00002918, + .buffer_width_cfg = 0x0000291C, + .buffer_height_cfg = 0x00002920, + .packer_cfg = 0x00002924, + .stride = 0x00002928, + .irq_subsample_period = 0x00002948, + .irq_subsample_pattern = 0x0000294C, + .framedrop_period = 0x00002950, + .framedrop_pattern = 0x00002954, + .frame_inc = 0x00002958, + .burst_limit = 0x0000295C, + .ubwc_regs = NULL, + }, + /* BUS Client 8 */ + { + .status0 = 0x00002A00, + .status1 = 0x00002A04, + .cfg = 0x00002A08, + .header_addr = 0x00002A0C, + .header_cfg = 0x00002A10, + .image_addr = 0x00002A14, + .image_addr_offset = 0x00002A18, + .buffer_width_cfg = 0x00002A1C, + .buffer_height_cfg = 0x00002A20, + .packer_cfg = 0x00002A24, + .stride = 0x00002A28, + .irq_subsample_period = 0x00002A48, + .irq_subsample_pattern = 0x00002A4C, + .framedrop_period = 0x00002A50, + .framedrop_pattern = 0x00002A54, + .frame_inc = 0x00002A58, + .burst_limit = 0x00002A5C, + .ubwc_regs = NULL, + }, + /* BUS Client 9 */ + { + .status0 = 0x00002B00, + .status1 = 0x00002B04, + .cfg = 0x00002B08, + .header_addr = 0x00002B0C, + .header_cfg = 0x00002B10, + .image_addr = 0x00002B14, + .image_addr_offset = 0x00002B18, + .buffer_width_cfg = 0x00002B1C, + .buffer_height_cfg = 0x00002B20, + .packer_cfg = 0x00002B24, + .stride = 0x00002B28, + .irq_subsample_period = 0x00002B48, + .irq_subsample_pattern = 0x00002B4C, + .framedrop_period = 0x00002B50, + .framedrop_pattern = 0x00002B54, + .frame_inc = 0x00002B58, + .burst_limit = 0x00002B5C, + .ubwc_regs = NULL, + }, + /* BUS Client 10 */ + { + .status0 = 0x00002C00, + .status1 = 0x00002C04, + .cfg = 0x00002C08, + .header_addr = 0x00002C0C, + .header_cfg = 0x00002C10, + .image_addr = 0x00002C14, + .image_addr_offset = 0x00002C18, + .buffer_width_cfg = 0x00002C1C, + .buffer_height_cfg = 0x00002C20, + .packer_cfg = 0x00002C24, + .stride = 0x00002C28, + .irq_subsample_period = 0x00002C48, + .irq_subsample_pattern = 0x00002C4C, + .framedrop_period = 0x00002C50, + .framedrop_pattern = 0x00002C54, + .frame_inc = 0x00002C58, + .burst_limit = 0x00002C5C, + .ubwc_regs = NULL, + }, + /* BUS Client 11 */ + { + .status0 = 0x00002D00, + .status1 = 0x00002D04, + .cfg = 0x00002D08, + .header_addr = 0x00002D0C, + .header_cfg = 0x00002D10, + .image_addr = 0x00002D14, + .image_addr_offset = 0x00002D18, + .buffer_width_cfg = 0x00002D1C, + .buffer_height_cfg = 0x00002D20, + .packer_cfg = 0x00002D24, + .stride = 0x00002D28, + .irq_subsample_period = 0x00002D48, + .irq_subsample_pattern = 0x00002D4C, + .framedrop_period = 0x00002D50, + .framedrop_pattern = 0x00002D54, + .frame_inc = 0x00002D58, + .burst_limit = 0x00002D5C, + .ubwc_regs = NULL, + }, + /* BUS Client 12 */ + { + .status0 = 0x00002E00, + .status1 = 0x00002E04, + .cfg = 0x00002E08, + .header_addr = 0x00002E0C, + .header_cfg = 0x00002E10, + .image_addr = 0x00002E14, + .image_addr_offset = 0x00002E18, + .buffer_width_cfg = 0x00002E1C, + .buffer_height_cfg = 0x00002E20, + .packer_cfg = 0x00002E24, + .stride = 0x00002E28, + .irq_subsample_period = 0x00002E48, + .irq_subsample_pattern = 0x00002E4C, + .framedrop_period = 0x00002E50, + .framedrop_pattern = 0x00002E54, + .frame_inc = 0x00002E58, + .burst_limit = 0x00002E5C, + .ubwc_regs = NULL, + }, + /* BUS Client 13 */ + { + .status0 = 0x00002F00, + .status1 = 0x00002F04, + .cfg = 0x00002F08, + .header_addr = 0x00002F0C, + .header_cfg = 0x00002F10, + .image_addr = 0x00002F14, + .image_addr_offset = 0x00002F18, + .buffer_width_cfg = 0x00002F1C, + .buffer_height_cfg = 0x00002F20, + .packer_cfg = 0x00002F24, + .stride = 0x00002F28, + .irq_subsample_period = 0x00002F48, + .irq_subsample_pattern = 0x00002F4C, + .framedrop_period = 0x00002F50, + .framedrop_pattern = 0x00002F54, + .frame_inc = 0x00002F58, + .burst_limit = 0x00002F5C, + .ubwc_regs = NULL, + }, + /* BUS Client 14 */ + { + .status0 = 0x00003000, + .status1 = 0x00003004, + .cfg = 0x00003008, + .header_addr = 0x0000300C, + .header_cfg = 0x00003010, + .image_addr = 0x00003014, + .image_addr_offset = 0x00003018, + .buffer_width_cfg = 0x0000301C, + .buffer_height_cfg = 0x00003020, + .packer_cfg = 0x00003024, + .stride = 0x00003028, + .irq_subsample_period = 0x00003048, + .irq_subsample_pattern = 0x0000304C, + .framedrop_period = 0x00003050, + .framedrop_pattern = 0x00003054, + .frame_inc = 0x00003058, + .burst_limit = 0x0000305C, + .ubwc_regs = NULL, + }, + /* BUS Client 15 */ + { + .status0 = 0x00003100, + .status1 = 0x00003104, + .cfg = 0x00003108, + .header_addr = 0x0000310C, + .header_cfg = 0x00003110, + .image_addr = 0x00003114, + .image_addr_offset = 0x00003118, + .buffer_width_cfg = 0x0000311C, + .buffer_height_cfg = 0x00003120, + .packer_cfg = 0x00003124, + .stride = 0x00003128, + .irq_subsample_period = 0x00003148, + .irq_subsample_pattern = 0x0000314C, + .framedrop_period = 0x00003150, + .framedrop_pattern = 0x00003154, + .frame_inc = 0x00003158, + .burst_limit = 0x0000315C, + .ubwc_regs = NULL, + }, + /* BUS Client 16 */ + { + .status0 = 0x00003200, + .status1 = 0x00003204, + .cfg = 0x00003208, + .header_addr = 0x0000320C, + .header_cfg = 0x00003210, + .image_addr = 0x00003214, + .image_addr_offset = 0x00003218, + .buffer_width_cfg = 0x0000321C, + .buffer_height_cfg = 0x00003220, + .packer_cfg = 0x00003224, + .stride = 0x00003228, + .irq_subsample_period = 0x00003248, + .irq_subsample_pattern = 0x0000324C, + .framedrop_period = 0x00003250, + .framedrop_pattern = 0x00003254, + .frame_inc = 0x00003258, + .burst_limit = 0x0000325C, + .ubwc_regs = NULL, + }, + /* BUS Client 17 */ + { + .status0 = 0x00003300, + .status1 = 0x00003304, + .cfg = 0x00003308, + .header_addr = 0x0000330C, + .header_cfg = 0x00003310, + .image_addr = 0x00003314, + .image_addr_offset = 0x00003318, + .buffer_width_cfg = 0x0000331C, + .buffer_height_cfg = 0x00003320, + .packer_cfg = 0x00003324, + .stride = 0x00003328, + .irq_subsample_period = 0x00003348, + .irq_subsample_pattern = 0x0000334C, + .framedrop_period = 0x00003350, + .framedrop_pattern = 0x00003354, + .frame_inc = 0x00003358, + .burst_limit = 0x0000335C, + .ubwc_regs = NULL, + }, + /* BUS Client 18 */ + { + .status0 = 0x00003400, + .status1 = 0x00003404, + .cfg = 0x00003408, + .header_addr = 0x0000340C, + .header_cfg = 0x00003410, + .image_addr = 0x00003414, + .image_addr_offset = 0x00003418, + .buffer_width_cfg = 0x0000341C, + .buffer_height_cfg = 0x00003420, + .packer_cfg = 0x00003424, + .stride = 0x00003428, + .irq_subsample_period = 0x00003448, + .irq_subsample_pattern = 0x0000344C, + .framedrop_period = 0x00003450, + .framedrop_pattern = 0x00003454, + .frame_inc = 0x00003458, + .burst_limit = 0x0000345C, + .ubwc_regs = NULL, + }, + /* BUS Client 19 */ + { + .status0 = 0x00003500, + .status1 = 0x00003504, + .cfg = 0x00003508, + .header_addr = 0x0000350C, + .header_cfg = 0x00003510, + .image_addr = 0x00003514, + .image_addr_offset = 0x00003518, + .buffer_width_cfg = 0x0000351C, + .buffer_height_cfg = 0x00003520, + .packer_cfg = 0x00003524, + .stride = 0x00003528, + .irq_subsample_period = 0x00003548, + .irq_subsample_pattern = 0x0000354C, + .framedrop_period = 0x00003550, + .framedrop_pattern = 0x00003554, + .frame_inc = 0x00003558, + .burst_limit = 0x0000355C, + .ubwc_regs = NULL, + }, + /* BUS Client 20 */ + { + .status0 = 0x00003600, + .status1 = 0x00003604, + .cfg = 0x00003608, + .header_addr = 0x0000360C, + .header_cfg = 0x00003610, + .image_addr = 0x00003614, + .image_addr_offset = 0x00003618, + .buffer_width_cfg = 0x0000361C, + .buffer_height_cfg = 0x00003620, + .packer_cfg = 0x00003624, + .stride = 0x00003628, + .irq_subsample_period = 0x00003648, + .irq_subsample_pattern = 0x0000364C, + .framedrop_period = 0x00003650, + .framedrop_pattern = 0x00003654, + .frame_inc = 0x00003658, + .burst_limit = 0x0000365C, + .ubwc_regs = &vfe175_ubwc_regs_client_20, + }, + /* BUS Client 21 */ + { + .status0 = 0x00003700, + .status1 = 0x00003704, + .cfg = 0x00003708, + .header_addr = 0x0000370C, + .header_cfg = 0x00003710, + .image_addr = 0x00003714, + .image_addr_offset = 0x00003718, + .buffer_width_cfg = 0x0000371C, + .buffer_height_cfg = 0x00003720, + .packer_cfg = 0x00003724, + .stride = 0x00003728, + .irq_subsample_period = 0x00003748, + .irq_subsample_pattern = 0x0000374C, + .framedrop_period = 0x00003750, + .framedrop_pattern = 0x00003754, + .frame_inc = 0x00003758, + .burst_limit = 0x0000375C, + .ubwc_regs = &vfe175_ubwc_regs_client_21, + }, + /* BUS Client 22 */ + { + .status0 = 0x00003800, + .status1 = 0x00003804, + .cfg = 0x00003808, + .header_addr = 0x0000380C, + .header_cfg = 0x00003810, + .image_addr = 0x00003814, + .image_addr_offset = 0x00003818, + .buffer_width_cfg = 0x0000381C, + .buffer_height_cfg = 0x00003820, + .packer_cfg = 0x00003824, + .stride = 0x00003828, + .irq_subsample_period = 0x00003848, + .irq_subsample_pattern = 0x0000384C, + .framedrop_period = 0x00003850, + .framedrop_pattern = 0x00003854, + .frame_inc = 0x00003858, + .burst_limit = 0x0000385C, + .ubwc_regs = NULL, + }, + /* BUS Client 23 */ + { + .status0 = 0x00003900, + .status1 = 0x00003904, + .cfg = 0x00003908, + .header_addr = 0x0000390C, + .header_cfg = 0x00003910, + .image_addr = 0x00003914, + .image_addr_offset = 0x00003918, + .buffer_width_cfg = 0x0000391C, + .buffer_height_cfg = 0x00003920, + .packer_cfg = 0x00003924, + .stride = 0x00003928, + .irq_subsample_period = 0x00003948, + .irq_subsample_pattern = 0x0000394C, + .framedrop_period = 0x00003950, + .framedrop_pattern = 0x00003954, + .frame_inc = 0x00003958, + .burst_limit = 0x0000395C, + .ubwc_regs = NULL, + }, + }, + .comp_grp_reg = { + /* CAM_VFE_BUS_VER2_COMP_GRP_0 */ + { + .comp_mask = 0x00002010, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_1 */ + { + .comp_mask = 0x00002014, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_2 */ + { + .comp_mask = 0x00002018, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_3 */ + { + .comp_mask = 0x0000201C, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_4 */ + { + .comp_mask = 0x00002020, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_5 */ + { + .comp_mask = 0x00002024, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */ + { + .comp_mask = 0x0000202C, + .addr_sync_mask = 0x00002088, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */ + { + .comp_mask = 0x00002030, + .addr_sync_mask = 0x0000208C, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */ + { + .comp_mask = 0x00002034, + .addr_sync_mask = 0x00002090, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */ + { + .comp_mask = 0x00002038, + .addr_sync_mask = 0x00002094, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */ + { + .comp_mask = 0x0000203C, + .addr_sync_mask = 0x00002098, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */ + { + .comp_mask = 0x00002040, + .addr_sync_mask = 0x0000209C, + }, + }, + .num_out = 22, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_2PD, + .max_width = 1920, + .max_height = 1080, + }, + }, + .top_irq_shift = 9, + .support_consumed_addr = false, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 23, +}; + +static struct cam_vfe_irq_hw_info vfe175_irq_hw_info = { + .reset_mask = BIT(31), + .supported_irq = CAM_VFE_HW_IRQ_CAP_INT_CSID, + .top_irq_reg = &vfe175_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe175_hw_info = { + .irq_hw_info = &vfe175_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_2_0, + .bus_hw_info = &vfe175_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_2_0, + .top_hw_info = &vfe175_top_hw_info, + + .camif_version = CAM_VFE_CAMIF_VER_2_0, + .camif_reg = &vfe175_camif_reg, + + .camif_lite_version = CAM_VFE_CAMIF_LITE_VER_2_0, + .camif_lite_reg = &vfe175_camif_lite_reg, + +}; + +#endif /* _CAM_VFE175_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h new file mode 100644 index 0000000000..b6282b6d42 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h @@ -0,0 +1,1176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE175_130_H_ +#define _CAM_VFE175_130_H_ + +#include "cam_vfe_camif_ver2.h" +#include "cam_vfe_camif_lite_ver2.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_vfe_bus_rd_ver1.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe175_130_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x0000005C, + .clear_reg_offset = 0x00000064, + .status_reg_offset = 0x0000006C, + }, + { + .mask_reg_offset = 0x00000060, + .clear_reg_offset = 0x00000068, + .status_reg_offset = 0x00000070, + }, +}; + +static struct cam_vfe_rdi_overflow_status vfe175_130_rdi_irq_status = { + .rdi0_overflow_mask = 0x8, + .rdi1_overflow_mask = 0x10, + .rdi2_overflow_mask = 0x18, + .rdi3_overflow_mask = 0x20, + .rdi_overflow_mask = 0x3c, +}; + +static struct cam_irq_controller_reg_info vfe175_130_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe175_130_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000058, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_camif_ver2_reg vfe175_130_camif_reg = { + .camif_cmd = 0x00000478, + .camif_config = 0x0000047C, + .line_skip_pattern = 0x00000488, + .pixel_skip_pattern = 0x0000048C, + .skip_period = 0x00000490, + .irq_subsample_pattern = 0x0000049C, + .epoch_irq = 0x000004A0, + .raw_crop_width_cfg = 0x00000CE4, + .raw_crop_height_cfg = 0x00000CE8, + .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, +}; + +static struct cam_vfe_camif_reg_data vfe_175_130_camif_reg_data = { + .raw_crop_first_pixel_shift = 16, + .raw_crop_first_pixel_mask = 0xFFFF, + .raw_crop_last_pixel_shift = 0x0, + .raw_crop_last_pixel_mask = 0x3FFF, + .raw_crop_first_line_shift = 16, + .raw_crop_first_line_mask = 0xFFFF, + .raw_crop_last_line_shift = 0, + .raw_crop_last_line_mask = 0x3FFF, + .input_mux_sel_shift = 5, + .input_mux_sel_mask = 0x3, + .extern_reg_update_shift = 4, + .extern_reg_update_mask = 1, + .pixel_pattern_shift = 0, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 23, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 3, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .reg_update_irq_mask = 0x00000010, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x0003FC00, + .error_irq_mask1 = 0xEFFF7E80, + .subscribe_irq_mask0 = 0x00000017, + .subscribe_irq_mask1 = 0x00000000, + .enable_diagnostic_hw = 0x1, +}; + +static struct cam_vfe_fe_ver1_reg vfe175_130_fe_reg = { + .camif_cmd = 0x00000478, + .camif_config = 0x0000047C, + .line_skip_pattern = 0x00000488, + .pixel_skip_pattern = 0x0000048C, + .skip_period = 0x00000490, + .irq_subsample_pattern = 0x0000049C, + .epoch_irq = 0x000004A0, + .raw_crop_width_cfg = 0x00000CE4, + .raw_crop_height_cfg = 0x00000CE8, + .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, + .fe_cfg = 0x00000084, +}; + +static struct cam_vfe_fe_reg_data vfe_175_130_fe_reg_data = { + .raw_crop_first_pixel_shift = 16, + .raw_crop_first_pixel_mask = 0xFFFF, + .raw_crop_last_pixel_shift = 0x0, + .raw_crop_last_pixel_mask = 0x3FFF, + .raw_crop_first_line_shift = 16, + .raw_crop_first_line_mask = 0xFFFF, + .raw_crop_last_line_shift = 0, + .raw_crop_last_line_mask = 0x3FFF, + .input_mux_sel_shift = 5, + .input_mux_sel_mask = 0x3, + .extern_reg_update_shift = 4, + .extern_reg_update_mask = 1, + .pixel_pattern_shift = 0, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 23, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 3, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .reg_update_irq_mask = 0x00000010, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x0003FC00, + .error_irq_mask1 = 0xEFFF7E80, + .enable_diagnostic_hw = 0x1, + .fe_mux_data = 0x2, + .hbi_cnt_shift = 0x8, +}; + +static struct cam_vfe_camif_lite_ver2_reg vfe175_130_camif_lite_reg = { + .camif_lite_cmd = 0x00000FC0, + .camif_lite_config = 0x00000FC4, + .lite_skip_period = 0x00000FC8, + .lite_irq_subsample_pattern = 0x00000FCC, + .lite_epoch_irq = 0x00000FD0, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_camif_lite_ver2_reg_data + vfe175_130_camif_lite_reg_data = { + .dual_pd_reg_update_cmd_data = 0x20, + .lite_epoch_line_cfg = 0x00140014, + .lite_sof_irq_mask = 0x00040000, + .lite_epoch0_irq_mask = 0x00100000, + .dual_pd_reg_upd_irq_mask = 0x04000000, + .lite_eof_irq_mask = 0x00080000, + .lite_err_irq_mask0 = 0x00400000, + .lite_err_irq_mask1 = 0x00004100, + .extern_reg_update_shift = 4, + .dual_pd_path_sel_shift = 24, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_175_130_reg = { + .reset = 0x0000001C, + .cgc_ovd = 0x0000002C, + .enable = 0x00000040, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_175_130_reg = { + .reset = 0x00000020, + .cgc_ovd = 0x00000030, + .enable = 0x00000044, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl color_175_130_reg = { + .reset = 0x00000024, + .cgc_ovd = 0x00000034, + .enable = 0x00000048, +}; + +static struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_175_130_reg = { + .reset = 0x00000028, + .cgc_ovd = 0x00000038, + .enable = 0x0000004C, +}; + +static struct cam_vfe_top_ver2_reg_offset_common vfe175_130_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .global_reset_cmd = 0x00000018, + .module_ctrl = { + &lens_175_130_reg, + &stats_175_130_reg, + &color_175_130_reg, + &zoom_175_130_reg, + }, + .bus_cgc_ovd = 0x0000003C, + .core_cfg = 0x00000050, + .three_D_cfg = 0x00000054, + .violation_status = 0x0000007C, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_ver2_reg vfe175_130_rdi_reg = { + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_common_reg_data vfe175_130_rdi_reg_data = { + .subscribe_irq_mask0 = 0x780001E0, + .subscribe_irq_mask1 = 0x0, + .error_irq_mask0 = 0x0, + .error_irq_mask1 = 0x3C, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_130_rdi_0_data = { + .reg_update_cmd_data = 0x2, + .sof_irq_mask = 0x8000000, + .reg_update_irq_mask = 0x20, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_130_rdi_1_data = { + .reg_update_cmd_data = 0x4, + .sof_irq_mask = 0x10000000, + .reg_update_irq_mask = 0x40, +}; + +static struct cam_vfe_rdi_reg_data vfe_175_130_rdi_2_data = { + .reg_update_cmd_data = 0x8, + .sof_irq_mask = 0x20000000, + .reg_update_irq_mask = 0x80, +}; + +struct cam_vfe_top_dump_data vfe175_130_dump_data = { + .num_reg_dump_entries = 2, + .num_lut_dump_entries = 1, + .dmi_cfg = 0xc24, + .dmi_addr = 0xc28, + .dmi_data_path_hi = 0xc2C, + .dmi_data_path_lo = 0xc30, + .reg_entry = { + { + .reg_dump_start = 0x0, + .reg_dump_end = 0x1164, + }, + { + .reg_dump_start = 0x2000, + .reg_dump_end = 0x397C, + }, + }, + .lut_entry = { + { + .lut_word_size = 64, + .lut_bank_sel = 0x40, + .lut_addr_size = 180, + }, + }, +}; + +static struct cam_vfe_top_ver2_hw_info vfe175_130_top_hw_info = { + .common_reg = &vfe175_130_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe175_130_top_common_reg, + .camif_reg = &vfe175_130_camif_reg, + .reg_data = &vfe_175_130_camif_reg_data, + }, + .camif_lite_hw_info = { + .common_reg = &vfe175_130_top_common_reg, + .camif_lite_reg = &vfe175_130_camif_lite_reg, + .reg_data = &vfe175_130_camif_lite_reg_data, + }, + .rdi_hw_info = { + .common_reg = &vfe175_130_top_common_reg, + .rdi_reg = &vfe175_130_rdi_reg, + .common_reg_data = &vfe175_130_rdi_reg_data, + .rdi_irq_status = &vfe175_130_rdi_irq_status, + .reg_data = { + &vfe_175_130_rdi_0_data, + &vfe_175_130_rdi_1_data, + &vfe_175_130_rdi_2_data, + NULL, + }, + }, + .fe_hw_info = { + .common_reg = &vfe175_130_top_common_reg, + .fe_reg = &vfe175_130_fe_reg, + .reg_data = &vfe_175_130_fe_reg_data, + }, + .num_mux = 6, + .mux_type = { + CAM_VFE_CAMIF_VER_2_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_CAMIF_LITE_VER_2_0, + CAM_VFE_IN_RD_VER_1_0, + }, + .dump_data = &vfe175_130_dump_data, +}; + +static struct cam_irq_register_set vfe175_130_bus_rd_irq_reg[1] = { + { + .mask_reg_offset = 0x00005010, + .clear_reg_offset = 0x00005014, + .status_reg_offset = 0x0000501C, + }, +}; + +static struct cam_irq_register_set vfe175_130_bus_irq_reg[3] = { + { + .mask_reg_offset = 0x00002044, + .clear_reg_offset = 0x00002050, + .status_reg_offset = 0x0000205C, + }, + { + .mask_reg_offset = 0x00002048, + .clear_reg_offset = 0x00002054, + .status_reg_offset = 0x00002060, + }, + { + .mask_reg_offset = 0x0000204C, + .clear_reg_offset = 0x00002058, + .status_reg_offset = 0x00002064, + }, +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_130_ubwc_regs_client_3 = { + .tile_cfg = 0x0000252C, + .h_init = 0x00002530, + .v_init = 0x00002534, + .meta_addr = 0x00002538, + .meta_offset = 0x0000253C, + .meta_stride = 0x00002540, + .mode_cfg_0 = 0x00002544, + .mode_cfg_1 = 0x000025A4, + .bw_limit = 0x000025A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_130_ubwc_regs_client_4 = { + .tile_cfg = 0x0000262C, + .h_init = 0x00002630, + .v_init = 0x00002634, + .meta_addr = 0x00002638, + .meta_offset = 0x0000263C, + .meta_stride = 0x00002640, + .mode_cfg_0 = 0x00002644, + .mode_cfg_1 = 0x000026A4, + .bw_limit = 0x000026A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_130_ubwc_regs_client_20 = { + .tile_cfg = 0x0000362C, + .h_init = 0x00003630, + .v_init = 0x00003634, + .meta_addr = 0x00003638, + .meta_offset = 0x0000363C, + .meta_stride = 0x00003640, + .mode_cfg_0 = 0x00003644, + .mode_cfg_1 = 0x000036A4, + .bw_limit = 0x000036A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + vfe175_130_ubwc_regs_client_21 = { + .tile_cfg = 0x0000372C, + .h_init = 0x00003730, + .v_init = 0x00003734, + .meta_addr = 0x00003738, + .meta_offset = 0x0000373C, + .meta_stride = 0x00003740, + .mode_cfg_0 = 0x00003744, + .mode_cfg_1 = 0x000037A4, + .bw_limit = 0x000037A0, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_rd_ver1_hw_info vfe175_130_bus_rd_hw_info = { + .common_reg = { + .hw_version = 0x00005000, + .hw_capability = 0x00005004, + .sw_reset = 0x00005008, + .cgc_ovd = 0x0000500C, + .pwr_iso_cfg = 0x000050CC, + .input_if_cmd = 0x00005020, + .test_bus_ctrl = 0x00005048, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe175_130_bus_rd_irq_reg, + .global_irq_cmd_offset = 0x00005018, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + }, + .num_client = 1, + .bus_client_reg = { + /* BUS Client 0 */ + { + .cfg = 0x00005050, + .image_addr = 0x00005058, + .buf_size = 0x0000505C, + .stride = 0x00005060, + .unpacker_cfg = 0x00005064, + .latency_buf_allocation = 0x00005078, + .burst_limit = 0x00005080, + }, + }, + .num_bus_rd_resc = 1, + .vfe_bus_rd_hw_info = { + { + .vfe_bus_rd_type = CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0, + .max_width = -1, + .max_height = -1, + }, + }, + .top_irq_shift = 23, +}; + +static struct cam_vfe_bus_ver2_hw_info vfe175_130_bus_hw_info = { + .common_reg = { + .hw_version = 0x00002000, + .hw_capability = 0x00002004, + .sw_reset = 0x00002008, + .cgc_ovd = 0x0000200C, + .pwr_iso_cfg = 0x000020CC, + .dual_master_comp_cfg = 0x00002028, + .irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe175_130_bus_irq_reg, + .global_irq_cmd_offset = 0x00002068, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + .comp_error_status = 0x0000206C, + .comp_ovrwr_status = 0x00002070, + .dual_comp_error_status = 0x00002074, + .dual_comp_ovrwr_status = 0x00002078, + .addr_sync_cfg = 0x0000207C, + .addr_sync_frame_hdr = 0x00002080, + .addr_sync_no_sync = 0x00002084, + .debug_status_cfg = 0x0000226C, + .debug_status_0 = 0x00002270, + .top_irq_mask_0 = 0x0000005C, + }, + .num_client = 24, + .bus_client_reg = { + /* BUS Client 0 */ + { + .status0 = 0x00002200, + .status1 = 0x00002204, + .cfg = 0x00002208, + .header_addr = 0x0000220C, + .header_cfg = 0x00002210, + .image_addr = 0x00002214, + .image_addr_offset = 0x00002218, + .buffer_width_cfg = 0x0000221C, + .buffer_height_cfg = 0x00002220, + .packer_cfg = 0x00002224, + .stride = 0x00002228, + .irq_subsample_period = 0x00002248, + .irq_subsample_pattern = 0x0000224C, + .framedrop_period = 0x00002250, + .framedrop_pattern = 0x00002254, + .frame_inc = 0x00002258, + .burst_limit = 0x0000225C, + .ubwc_regs = NULL, + }, + /* BUS Client 1 */ + { + .status0 = 0x00002300, + .status1 = 0x00002304, + .cfg = 0x00002308, + .header_addr = 0x0000230C, + .header_cfg = 0x00002310, + .image_addr = 0x00002314, + .image_addr_offset = 0x00002318, + .buffer_width_cfg = 0x0000231C, + .buffer_height_cfg = 0x00002320, + .packer_cfg = 0x00002324, + .stride = 0x00002328, + .irq_subsample_period = 0x00002348, + .irq_subsample_pattern = 0x0000234C, + .framedrop_period = 0x00002350, + .framedrop_pattern = 0x00002354, + .frame_inc = 0x00002358, + .burst_limit = 0x0000235C, + .ubwc_regs = NULL, + }, + /* BUS Client 2 */ + { + .status0 = 0x00002400, + .status1 = 0x00002404, + .cfg = 0x00002408, + .header_addr = 0x0000240C, + .header_cfg = 0x00002410, + .image_addr = 0x00002414, + .image_addr_offset = 0x00002418, + .buffer_width_cfg = 0x0000241C, + .buffer_height_cfg = 0x00002420, + .packer_cfg = 0x00002424, + .stride = 0x00002428, + .irq_subsample_period = 0x00002448, + .irq_subsample_pattern = 0x0000244C, + .framedrop_period = 0x00002450, + .framedrop_pattern = 0x00002454, + .frame_inc = 0x00002458, + .burst_limit = 0x0000245C, + .ubwc_regs = NULL, + }, + /* BUS Client 3 */ + { + .status0 = 0x00002500, + .status1 = 0x00002504, + .cfg = 0x00002508, + .header_addr = 0x0000250C, + .header_cfg = 0x00002510, + .image_addr = 0x00002514, + .image_addr_offset = 0x00002518, + .buffer_width_cfg = 0x0000251C, + .buffer_height_cfg = 0x00002520, + .packer_cfg = 0x00002524, + .stride = 0x00002528, + .irq_subsample_period = 0x00002548, + .irq_subsample_pattern = 0x0000254C, + .framedrop_period = 0x00002550, + .framedrop_pattern = 0x00002554, + .frame_inc = 0x00002558, + .burst_limit = 0x0000255C, + .ubwc_regs = + &vfe175_130_ubwc_regs_client_3, + }, + /* BUS Client 4 */ + { + .status0 = 0x00002600, + .status1 = 0x00002604, + .cfg = 0x00002608, + .header_addr = 0x0000260C, + .header_cfg = 0x00002610, + .image_addr = 0x00002614, + .image_addr_offset = 0x00002618, + .buffer_width_cfg = 0x0000261C, + .buffer_height_cfg = 0x00002620, + .packer_cfg = 0x00002624, + .stride = 0x00002628, + .irq_subsample_period = 0x00002648, + .irq_subsample_pattern = 0x0000264C, + .framedrop_period = 0x00002650, + .framedrop_pattern = 0x00002654, + .frame_inc = 0x00002658, + .burst_limit = 0x0000265C, + .ubwc_regs = + &vfe175_130_ubwc_regs_client_4, + }, + /* BUS Client 5 */ + { + .status0 = 0x00002700, + .status1 = 0x00002704, + .cfg = 0x00002708, + .header_addr = 0x0000270C, + .header_cfg = 0x00002710, + .image_addr = 0x00002714, + .image_addr_offset = 0x00002718, + .buffer_width_cfg = 0x0000271C, + .buffer_height_cfg = 0x00002720, + .packer_cfg = 0x00002724, + .stride = 0x00002728, + .irq_subsample_period = 0x00002748, + .irq_subsample_pattern = 0x0000274C, + .framedrop_period = 0x00002750, + .framedrop_pattern = 0x00002754, + .frame_inc = 0x00002758, + .burst_limit = 0x0000275C, + .ubwc_regs = NULL, + }, + /* BUS Client 6 */ + { + .status0 = 0x00002800, + .status1 = 0x00002804, + .cfg = 0x00002808, + .header_addr = 0x0000280C, + .header_cfg = 0x00002810, + .image_addr = 0x00002814, + .image_addr_offset = 0x00002818, + .buffer_width_cfg = 0x0000281C, + .buffer_height_cfg = 0x00002820, + .packer_cfg = 0x00002824, + .stride = 0x00002828, + .irq_subsample_period = 0x00002848, + .irq_subsample_pattern = 0x0000284C, + .framedrop_period = 0x00002850, + .framedrop_pattern = 0x00002854, + .frame_inc = 0x00002858, + .burst_limit = 0x0000285C, + .ubwc_regs = NULL, + }, + /* BUS Client 7 */ + { + .status0 = 0x00002900, + .status1 = 0x00002904, + .cfg = 0x00002908, + .header_addr = 0x0000290C, + .header_cfg = 0x00002910, + .image_addr = 0x00002914, + .image_addr_offset = 0x00002918, + .buffer_width_cfg = 0x0000291C, + .buffer_height_cfg = 0x00002920, + .packer_cfg = 0x00002924, + .stride = 0x00002928, + .irq_subsample_period = 0x00002948, + .irq_subsample_pattern = 0x0000294C, + .framedrop_period = 0x00002950, + .framedrop_pattern = 0x00002954, + .frame_inc = 0x00002958, + .burst_limit = 0x0000295C, + .ubwc_regs = NULL, + }, + /* BUS Client 8 */ + { + .status0 = 0x00002A00, + .status1 = 0x00002A04, + .cfg = 0x00002A08, + .header_addr = 0x00002A0C, + .header_cfg = 0x00002A10, + .image_addr = 0x00002A14, + .image_addr_offset = 0x00002A18, + .buffer_width_cfg = 0x00002A1C, + .buffer_height_cfg = 0x00002A20, + .packer_cfg = 0x00002A24, + .stride = 0x00002A28, + .irq_subsample_period = 0x00002A48, + .irq_subsample_pattern = 0x00002A4C, + .framedrop_period = 0x00002A50, + .framedrop_pattern = 0x00002A54, + .frame_inc = 0x00002A58, + .burst_limit = 0x00002A5C, + .ubwc_regs = NULL, + }, + /* BUS Client 9 */ + { + .status0 = 0x00002B00, + .status1 = 0x00002B04, + .cfg = 0x00002B08, + .header_addr = 0x00002B0C, + .header_cfg = 0x00002B10, + .image_addr = 0x00002B14, + .image_addr_offset = 0x00002B18, + .buffer_width_cfg = 0x00002B1C, + .buffer_height_cfg = 0x00002B20, + .packer_cfg = 0x00002B24, + .stride = 0x00002B28, + .irq_subsample_period = 0x00002B48, + .irq_subsample_pattern = 0x00002B4C, + .framedrop_period = 0x00002B50, + .framedrop_pattern = 0x00002B54, + .frame_inc = 0x00002B58, + .burst_limit = 0x00002B5C, + .ubwc_regs = NULL, + }, + /* BUS Client 10 */ + { + .status0 = 0x00002C00, + .status1 = 0x00002C04, + .cfg = 0x00002C08, + .header_addr = 0x00002C0C, + .header_cfg = 0x00002C10, + .image_addr = 0x00002C14, + .image_addr_offset = 0x00002C18, + .buffer_width_cfg = 0x00002C1C, + .buffer_height_cfg = 0x00002C20, + .packer_cfg = 0x00002C24, + .stride = 0x00002C28, + .irq_subsample_period = 0x00002C48, + .irq_subsample_pattern = 0x00002C4C, + .framedrop_period = 0x00002C50, + .framedrop_pattern = 0x00002C54, + .frame_inc = 0x00002C58, + .burst_limit = 0x00002C5C, + .ubwc_regs = NULL, + }, + /* BUS Client 11 */ + { + .status0 = 0x00002D00, + .status1 = 0x00002D04, + .cfg = 0x00002D08, + .header_addr = 0x00002D0C, + .header_cfg = 0x00002D10, + .image_addr = 0x00002D14, + .image_addr_offset = 0x00002D18, + .buffer_width_cfg = 0x00002D1C, + .buffer_height_cfg = 0x00002D20, + .packer_cfg = 0x00002D24, + .stride = 0x00002D28, + .irq_subsample_period = 0x00002D48, + .irq_subsample_pattern = 0x00002D4C, + .framedrop_period = 0x00002D50, + .framedrop_pattern = 0x00002D54, + .frame_inc = 0x00002D58, + .burst_limit = 0x00002D5C, + .ubwc_regs = NULL, + }, + /* BUS Client 12 */ + { + .status0 = 0x00002E00, + .status1 = 0x00002E04, + .cfg = 0x00002E08, + .header_addr = 0x00002E0C, + .header_cfg = 0x00002E10, + .image_addr = 0x00002E14, + .image_addr_offset = 0x00002E18, + .buffer_width_cfg = 0x00002E1C, + .buffer_height_cfg = 0x00002E20, + .packer_cfg = 0x00002E24, + .stride = 0x00002E28, + .irq_subsample_period = 0x00002E48, + .irq_subsample_pattern = 0x00002E4C, + .framedrop_period = 0x00002E50, + .framedrop_pattern = 0x00002E54, + .frame_inc = 0x00002E58, + .burst_limit = 0x00002E5C, + .ubwc_regs = NULL, + }, + /* BUS Client 13 */ + { + .status0 = 0x00002F00, + .status1 = 0x00002F04, + .cfg = 0x00002F08, + .header_addr = 0x00002F0C, + .header_cfg = 0x00002F10, + .image_addr = 0x00002F14, + .image_addr_offset = 0x00002F18, + .buffer_width_cfg = 0x00002F1C, + .buffer_height_cfg = 0x00002F20, + .packer_cfg = 0x00002F24, + .stride = 0x00002F28, + .irq_subsample_period = 0x00002F48, + .irq_subsample_pattern = 0x00002F4C, + .framedrop_period = 0x00002F50, + .framedrop_pattern = 0x00002F54, + .frame_inc = 0x00002F58, + .burst_limit = 0x00002F5C, + .ubwc_regs = NULL, + }, + /* BUS Client 14 */ + { + .status0 = 0x00003000, + .status1 = 0x00003004, + .cfg = 0x00003008, + .header_addr = 0x0000300C, + .header_cfg = 0x00003010, + .image_addr = 0x00003014, + .image_addr_offset = 0x00003018, + .buffer_width_cfg = 0x0000301C, + .buffer_height_cfg = 0x00003020, + .packer_cfg = 0x00003024, + .stride = 0x00003028, + .irq_subsample_period = 0x00003048, + .irq_subsample_pattern = 0x0000304C, + .framedrop_period = 0x00003050, + .framedrop_pattern = 0x00003054, + .frame_inc = 0x00003058, + .burst_limit = 0x0000305C, + .ubwc_regs = NULL, + }, + /* BUS Client 15 */ + { + .status0 = 0x00003100, + .status1 = 0x00003104, + .cfg = 0x00003108, + .header_addr = 0x0000310C, + .header_cfg = 0x00003110, + .image_addr = 0x00003114, + .image_addr_offset = 0x00003118, + .buffer_width_cfg = 0x0000311C, + .buffer_height_cfg = 0x00003120, + .packer_cfg = 0x00003124, + .stride = 0x00003128, + .irq_subsample_period = 0x00003148, + .irq_subsample_pattern = 0x0000314C, + .framedrop_period = 0x00003150, + .framedrop_pattern = 0x00003154, + .frame_inc = 0x00003158, + .burst_limit = 0x0000315C, + .ubwc_regs = NULL, + }, + /* BUS Client 16 */ + { + .status0 = 0x00003200, + .status1 = 0x00003204, + .cfg = 0x00003208, + .header_addr = 0x0000320C, + .header_cfg = 0x00003210, + .image_addr = 0x00003214, + .image_addr_offset = 0x00003218, + .buffer_width_cfg = 0x0000321C, + .buffer_height_cfg = 0x00003220, + .packer_cfg = 0x00003224, + .stride = 0x00003228, + .irq_subsample_period = 0x00003248, + .irq_subsample_pattern = 0x0000324C, + .framedrop_period = 0x00003250, + .framedrop_pattern = 0x00003254, + .frame_inc = 0x00003258, + .burst_limit = 0x0000325C, + .ubwc_regs = NULL, + }, + /* BUS Client 17 */ + { + .status0 = 0x00003300, + .status1 = 0x00003304, + .cfg = 0x00003308, + .header_addr = 0x0000330C, + .header_cfg = 0x00003310, + .image_addr = 0x00003314, + .image_addr_offset = 0x00003318, + .buffer_width_cfg = 0x0000331C, + .buffer_height_cfg = 0x00003320, + .packer_cfg = 0x00003324, + .stride = 0x00003328, + .irq_subsample_period = 0x00003348, + .irq_subsample_pattern = 0x0000334C, + .framedrop_period = 0x00003350, + .framedrop_pattern = 0x00003354, + .frame_inc = 0x00003358, + .burst_limit = 0x0000335C, + .ubwc_regs = NULL, + }, + /* BUS Client 18 */ + { + .status0 = 0x00003400, + .status1 = 0x00003404, + .cfg = 0x00003408, + .header_addr = 0x0000340C, + .header_cfg = 0x00003410, + .image_addr = 0x00003414, + .image_addr_offset = 0x00003418, + .buffer_width_cfg = 0x0000341C, + .buffer_height_cfg = 0x00003420, + .packer_cfg = 0x00003424, + .stride = 0x00003428, + .irq_subsample_period = 0x00003448, + .irq_subsample_pattern = 0x0000344C, + .framedrop_period = 0x00003450, + .framedrop_pattern = 0x00003454, + .frame_inc = 0x00003458, + .burst_limit = 0x0000345C, + .ubwc_regs = NULL, + }, + /* BUS Client 19 */ + { + .status0 = 0x00003500, + .status1 = 0x00003504, + .cfg = 0x00003508, + .header_addr = 0x0000350C, + .header_cfg = 0x00003510, + .image_addr = 0x00003514, + .image_addr_offset = 0x00003518, + .buffer_width_cfg = 0x0000351C, + .buffer_height_cfg = 0x00003520, + .packer_cfg = 0x00003524, + .stride = 0x00003528, + .irq_subsample_period = 0x00003548, + .irq_subsample_pattern = 0x0000354C, + .framedrop_period = 0x00003550, + .framedrop_pattern = 0x00003554, + .frame_inc = 0x00003558, + .burst_limit = 0x0000355C, + .ubwc_regs = NULL, + }, + /* BUS Client 20 */ + { + .status0 = 0x00003600, + .status1 = 0x00003604, + .cfg = 0x00003608, + .header_addr = 0x0000360C, + .header_cfg = 0x00003610, + .image_addr = 0x00003614, + .image_addr_offset = 0x00003618, + .buffer_width_cfg = 0x0000361C, + .buffer_height_cfg = 0x00003620, + .packer_cfg = 0x00003624, + .stride = 0x00003628, + .irq_subsample_period = 0x00003648, + .irq_subsample_pattern = 0x0000364C, + .framedrop_period = 0x00003650, + .framedrop_pattern = 0x00003654, + .frame_inc = 0x00003658, + .burst_limit = 0x0000365C, + .ubwc_regs = + &vfe175_130_ubwc_regs_client_20, + }, + /* BUS Client 21 */ + { + .status0 = 0x00003700, + .status1 = 0x00003704, + .cfg = 0x00003708, + .header_addr = 0x0000370C, + .header_cfg = 0x00003710, + .image_addr = 0x00003714, + .image_addr_offset = 0x00003718, + .buffer_width_cfg = 0x0000371C, + .buffer_height_cfg = 0x00003720, + .packer_cfg = 0x00003724, + .stride = 0x00003728, + .irq_subsample_period = 0x00003748, + .irq_subsample_pattern = 0x0000374C, + .framedrop_period = 0x00003750, + .framedrop_pattern = 0x00003754, + .frame_inc = 0x00003758, + .burst_limit = 0x0000375C, + .ubwc_regs = + &vfe175_130_ubwc_regs_client_21, + }, + /* BUS Client 22 */ + { + .status0 = 0x00003800, + .status1 = 0x00003804, + .cfg = 0x00003808, + .header_addr = 0x0000380C, + .header_cfg = 0x00003810, + .image_addr = 0x00003814, + .image_addr_offset = 0x00003818, + .buffer_width_cfg = 0x0000381C, + .buffer_height_cfg = 0x00003820, + .packer_cfg = 0x00003824, + .stride = 0x00003828, + .irq_subsample_period = 0x00003848, + .irq_subsample_pattern = 0x0000384C, + .framedrop_period = 0x00003850, + .framedrop_pattern = 0x00003854, + .frame_inc = 0x00003858, + .burst_limit = 0x0000385C, + .ubwc_regs = NULL, + }, + /* BUS Client 23 */ + { + .status0 = 0x00003900, + .status1 = 0x00003904, + .cfg = 0x00003908, + .header_addr = 0x0000390C, + .header_cfg = 0x00003910, + .image_addr = 0x00003914, + .image_addr_offset = 0x00003918, + .buffer_width_cfg = 0x0000391C, + .buffer_height_cfg = 0x00003920, + .packer_cfg = 0x00003924, + .stride = 0x00003928, + .irq_subsample_period = 0x00003948, + .irq_subsample_pattern = 0x0000394C, + .framedrop_period = 0x00003950, + .framedrop_pattern = 0x00003954, + .frame_inc = 0x00003958, + .burst_limit = 0x0000395C, + .ubwc_regs = NULL, + }, + }, + .comp_grp_reg = { + /* CAM_VFE_BUS_VER2_COMP_GRP_0 */ + { + .comp_mask = 0x00002010, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_1 */ + { + .comp_mask = 0x00002014, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_2 */ + { + .comp_mask = 0x00002018, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_3 */ + { + .comp_mask = 0x0000201C, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_4 */ + { + .comp_mask = 0x00002020, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_5 */ + { + .comp_mask = 0x00002024, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */ + { + .comp_mask = 0x0000202C, + .addr_sync_mask = 0x00002088, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */ + { + .comp_mask = 0x00002030, + .addr_sync_mask = 0x0000208C, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */ + { + .comp_mask = 0x00002034, + .addr_sync_mask = 0x00002090, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */ + { + .comp_mask = 0x00002038, + .addr_sync_mask = 0x00002094, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */ + { + .comp_mask = 0x0000203C, + .addr_sync_mask = 0x00002098, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */ + { + .comp_mask = 0x00002040, + .addr_sync_mask = 0x0000209C, + }, + }, + .num_out = 22, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP, + .max_width = 4096, + .max_height = 4096, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP, + .max_width = 1920, + .max_height = 1080, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_2PD, + .max_width = 1920, + .max_height = 1080, + }, + }, + .top_irq_shift = 9, + .support_consumed_addr = false, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 23, +}; + +static struct cam_vfe_irq_hw_info vfe175_130_irq_hw_info = { + .reset_mask = BIT(31), + .supported_irq = CAM_VFE_HW_IRQ_CAP_INT_CSID, + .top_irq_reg = &vfe175_130_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe175_130_hw_info = { + .irq_hw_info = &vfe175_130_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_2_0, + .bus_hw_info = &vfe175_130_bus_hw_info, + + .bus_rd_version = CAM_VFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &vfe175_130_bus_rd_hw_info, + + .top_version = CAM_VFE_TOP_VER_2_0, + .top_hw_info = &vfe175_130_top_hw_info, + + .camif_version = CAM_VFE_CAMIF_VER_2_0, + .camif_reg = &vfe175_130_camif_reg, + + .camif_lite_version = CAM_VFE_CAMIF_LITE_VER_2_0, + .camif_lite_reg = &vfe175_130_camif_lite_reg, + +}; + +#endif /* _CAM_VFE175_130_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h new file mode 100644 index 0000000000..e3c33e2826 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h @@ -0,0 +1,1828 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE480_H_ +#define _CAM_VFE480_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver3.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_BUS_VER3_480_MAX_CLIENTS 26 + +static struct cam_irq_register_set vfe480_top_irq_reg_set[3] = { + { + .mask_reg_offset = 0x0000003C, + .clear_reg_offset = 0x00000048, + .status_reg_offset = 0x00000054, + }, + { + .mask_reg_offset = 0x00000040, + .clear_reg_offset = 0x0000004C, + .status_reg_offset = 0x00000058, + }, + { + .mask_reg_offset = 0x00000044, + .clear_reg_offset = 0x00000050, + .status_reg_offset = 0x0000005C, + }, +}; + +static struct cam_irq_controller_reg_info vfe480_top_irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe480_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000038, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_camif_ver3_pp_clc_reg vfe480_camif_reg = { + .hw_version = 0x00002600, + .hw_status = 0x00002604, + .module_cfg = 0x00002660, + .pdaf_raw_crop_width_cfg = 0x00002668, + .pdaf_raw_crop_height_cfg = 0x0000266C, + .line_skip_pattern = 0x00002670, + .pixel_skip_pattern = 0x00002674, + .period_cfg = 0x00002678, + .irq_subsample_pattern = 0x0000267C, + .epoch_irq_cfg = 0x00002680, + .debug_1 = 0x000027F0, + .debug_0 = 0x000027F4, + .test_bus_ctrl = 0x000027F8, + .spare = 0x000027FC, + .reg_update_cmd = 0x00000034, +}; + +static struct cam_vfe_camif_ver3_reg_data vfe_480_camif_reg_data = { + .pp_extern_reg_update_shift = 4, + .dual_pd_extern_reg_update_shift = 17, + .extern_reg_update_mask = 1, + .dual_ife_pix_en_shift = 3, + .operating_mode_shift = 11, + .input_mux_sel_shift = 5, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 24, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 23, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x41, + .epoch_line_cfg = 0x00000014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .epoch1_irq_mask = 0x00000008, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x82000200, + .error_irq_mask2 = 0x30301F80, + .subscribe_irq_mask1 = 0x00000007, + .frame_id_irq_mask = 0x400, + .enable_diagnostic_hw = 0x1, + .pp_camif_cfg_en_shift = 0, + .pp_camif_cfg_ife_out_en_shift = 8, + .top_debug_cfg_en = 1, +}; + +static struct cam_vfe_top_ver3_reg_offset_common vfe480_top_common_reg = { + .hw_version = 0x00000000, + .titan_version = 0x00000004, + .hw_capability = 0x00000008, + .lens_feature = 0x0000000C, + .stats_feature = 0x00000010, + .color_feature = 0x00000014, + .zoom_feature = 0x00000018, + .global_reset_cmd = 0x0000001C, + .core_cfg_0 = 0x0000002C, + .core_cfg_1 = 0x00000030, + .reg_update_cmd = 0x00000034, + .violation_status = 0x00000074, + .core_cgc_ovd_0 = 0x00000020, + .core_cgc_ovd_1 = 0x00000094, + .ahb_cgc_ovd = 0x00000024, + .noc_cgc_ovd = 0x00000028, + .trigger_cdm_events = 0x00000090, + .custom_frame_idx = 0x00000110, + .dsp_status = 0x0000007C, + .diag_config = 0x00000064, + .diag_sensor_status_0 = 0x00000068, + .diag_sensor_status_1 = 0x00000098, + .bus_overflow_status = 0x0000AA68, + .top_debug_cfg = 0x000000DC, + .top_debug_0 = 0x00000080, + .top_debug_1 = 0x00000084, + .top_debug_2 = 0x00000088, + .top_debug_3 = 0x0000008C, + .top_debug_4 = 0x0000009C, + .top_debug_5 = 0x000000A0, + .top_debug_6 = 0x000000A4, + .top_debug_7 = 0x000000A8, + .top_debug_8 = 0x000000AC, + .top_debug_9 = 0x000000B0, + .top_debug_10 = 0x000000B4, + .top_debug_11 = 0x000000B8, + .top_debug_12 = 0x000000BC, + .top_debug_13 = 0x000000C0, +}; + +static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_rdi[3] = { + { + .lite_hw_version = 0x9A00, + .lite_hw_status = 0x9A04, + .lite_module_config = 0x9A60, + .lite_skip_period = 0x9A68, + .lite_irq_subsample_pattern = 0x9A6C, + .lite_epoch_irq = 0x9A70, + .lite_debug_1 = 0x9BF0, + .lite_debug_0 = 0x9BF4, + .lite_test_bus_ctrl = 0x9BF8, + .camif_lite_spare = 0x9BFC, + .reg_update_cmd = 0x34, + }, + { + .lite_hw_version = 0x9C00, + .lite_hw_status = 0x9C04, + .lite_module_config = 0x9C60, + .lite_skip_period = 0x9C68, + .lite_irq_subsample_pattern = 0x9C6C, + .lite_epoch_irq = 0x9C70, + .lite_debug_1 = 0x9DF0, + .lite_debug_0 = 0x9DF4, + .lite_test_bus_ctrl = 0x9DF8, + .camif_lite_spare = 0x9DFC, + .reg_update_cmd = 0x34, + }, + { + .lite_hw_version = 0x9E00, + .lite_hw_status = 0x9E04, + .lite_module_config = 0x9E60, + .lite_skip_period = 0x9E68, + .lite_irq_subsample_pattern = 0x9E6C, + .lite_epoch_irq = 0x9E70, + .lite_debug_1 = 0x9FF0, + .lite_debug_0 = 0x9FF4, + .lite_test_bus_ctrl = 0x9FF8, + .camif_lite_spare = 0x9FFC, + .reg_update_cmd = 0x34, + }, +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe480_camif_rdi_reg_data[3] = { + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x2, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x10, + .epoch0_irq_mask = 0x40, + .epoch1_irq_mask = 0x80, + .eof_irq_mask = 0x20, + .error_irq_mask0 = 0x20000000, + .error_irq_mask2 = 0x20000, + .subscribe_irq_mask1 = 0x30, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 1, + }, + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x100, + .epoch0_irq_mask = 0x400, + .epoch1_irq_mask = 0x800, + .eof_irq_mask = 0x200, + .error_irq_mask0 = 0x10000000, + .error_irq_mask2 = 0x40000, + .subscribe_irq_mask1 = 0x300, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 1, + }, + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x8, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x1000, + .epoch0_irq_mask = 0x4000, + .epoch1_irq_mask = 0x8000, + .eof_irq_mask = 0x2000, + .error_irq_mask0 = 0x8000000, + .error_irq_mask2 = 0x80000, + .subscribe_irq_mask1 = 0x3000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 1, + }, +}; + +static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_lcr = { + .lite_hw_version = 0xA000, + .lite_hw_status = 0xA004, + .lite_module_config = 0xA060, + .lite_skip_period = 0xA068, + .lite_irq_subsample_pattern = 0xA06C, + .lite_epoch_irq = 0xA070, + .lite_debug_1 = 0xA1F0, + .lite_debug_0 = 0xA1F4, + .lite_test_bus_ctrl = 0xA1F8, + .camif_lite_spare = 0xA1FC, + .reg_update_cmd = 0x0034, +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe480_camif_lcr_reg_data = { + .extern_reg_update_shift = 16, + .reg_update_cmd_data = 0x40, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x100000, + .epoch0_irq_mask = 0x400000, + .epoch1_irq_mask = 0x800000, + .eof_irq_mask = 0x200000, + .error_irq_mask0 = 0x0, + .error_irq_mask2 = 0x18000, + .subscribe_irq_mask1 = 0x300000, + .enable_diagnostic_hw = 0x1, +}; + +static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_pd = { + .lite_hw_version = 0xA400, + .lite_hw_status = 0xA404, + .lite_module_config = 0xA460, + .lite_skip_period = 0xA468, + .lite_irq_subsample_pattern = 0xA46C, + .lite_epoch_irq = 0xA470, + .lite_debug_1 = 0xA5F0, + .lite_debug_0 = 0xA5F4, + .lite_test_bus_ctrl = 0xA5F8, + .camif_lite_spare = 0xA5FC, + .reg_update_cmd = 0x0034, +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe480_camif_pd_reg_data = { + .extern_reg_update_shift = 17, + .operating_mode_shift = 13, + .input_mux_sel_shift = 31, + .reg_update_cmd_data = 0x20, + .epoch_line_cfg = 0x00140014, + .sof_irq_mask = 0x10000, + .epoch0_irq_mask = 0x40000, + .epoch1_irq_mask = 0x80000, + .eof_irq_mask = 0x20000, + .error_irq_mask0 = 0x40000000, + .error_irq_mask2 = 0x6000, + .subscribe_irq_mask1 = 0x30000, + .enable_diagnostic_hw = 0x1, +}; + +struct cam_vfe_camif_lite_ver3_hw_info rdi_hw_info_arr[CAM_VFE_RDI_VER2_MAX] = { + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[0], + .reg_data = &vfe480_camif_rdi_reg_data[0], + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[1], + .reg_data = &vfe480_camif_rdi_reg_data[1], + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[2], + .reg_data = &vfe480_camif_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver3_hw_info vfe480_top_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_reg = &vfe480_camif_reg, + .reg_data = &vfe_480_camif_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_pd, + .reg_data = &vfe480_camif_pd_reg_data, + }, + .rdi_hw_info[0] = &rdi_hw_info_arr[0], + .rdi_hw_info[1] = &rdi_hw_info_arr[1], + .rdi_hw_info[2] = &rdi_hw_info_arr[2], + .lcr_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_lcr, + .reg_data = &vfe480_camif_lcr_reg_data, + }, + .num_mux = 6, + .mux_type = { + CAM_VFE_CAMIF_VER_3_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_LCR_VER_1_0, + }, + .num_path_port_map = 2, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_LCR, CAM_ISP_IFE_OUT_RES_LCR}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + }, +}; + +static struct cam_irq_register_set vfe480_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x0000AA18, + .clear_reg_offset = 0x0000AA20, + .status_reg_offset = 0x0000AA28, + }, + { + .mask_reg_offset = 0x0000AA1C, + .clear_reg_offset = 0x0000AA24, + .status_reg_offset = 0x0000AA2C, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe480_ubwc_regs_client_0 = { + .meta_addr = 0x0000AC40, + .meta_cfg = 0x0000AC44, + .mode_cfg = 0x0000AC48, + .stats_ctrl = 0x0000AC4C, + .ctrl_2 = 0x0000AC50, + .lossy_thresh0 = 0x0000AC54, + .lossy_thresh1 = 0x0000AC58, + .off_lossy_var = 0x0000AC5C, + .bw_limit = 0x0000AC1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe480_ubwc_regs_client_1 = { + .meta_addr = 0x0000AD40, + .meta_cfg = 0x0000AD44, + .mode_cfg = 0x0000AD48, + .stats_ctrl = 0x0000AD4C, + .ctrl_2 = 0x0000AD50, + .lossy_thresh0 = 0x0000AD54, + .lossy_thresh1 = 0x0000AD58, + .off_lossy_var = 0x0000AD5C, + .bw_limit = 0x0000AD1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe480_ubwc_regs_client_4 = { + .meta_addr = 0x0000B040, + .meta_cfg = 0x0000B044, + .mode_cfg = 0x0000B048, + .stats_ctrl = 0x0000B04C, + .ctrl_2 = 0x0000B050, + .lossy_thresh0 = 0x0000B054, + .lossy_thresh1 = 0x0000B058, + .off_lossy_var = 0x0000B05C, + .bw_limit = 0x0000B01C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe480_ubwc_regs_client_5 = { + .meta_addr = 0x0000B140, + .meta_cfg = 0x0000B144, + .mode_cfg = 0x0000B148, + .stats_ctrl = 0x0000B14C, + .ctrl_2 = 0x0000B150, + .lossy_thresh0 = 0x0000B154, + .lossy_thresh1 = 0x0000B158, + .off_lossy_var = 0x0000B15C, + .bw_limit = 0x0000B11C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t vfe480_out_port_mid[][4] = { + {8, 0, 0, 0}, + {9, 0, 0, 0}, + {10, 0, 0, 0}, + {32, 33, 34, 35}, + {16, 0, 0, 0}, + {17, 0, 0, 0}, + {11, 12, 0, 0}, + {20, 21, 22, 0}, + {25, 26, 0, 0}, + {40, 0, 0, 0}, + {41, 0, 0, 0}, + {42, 0, 0, 0}, + {43, 0, 0, 0}, + {44, 0, 0, 0}, + {45, 0, 0, 0}, + {46, 0, 0, 0}, + {47, 0, 0, 0}, + {48, 0, 0, 0}, + {36, 37, 38, 39}, + {18, 0, 0, 0}, + {19, 0, 0, 0}, + {23, 24, 0, 0}, + {27, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = { + .common_reg = { + .hw_version = 0x0000AA00, + .cgc_ovd = 0x0000AA08, + .comp_cfg_0 = 0x0000AA0C, + .comp_cfg_1 = 0x0000AA10, + .if_frameheader_cfg = { + 0x0000AA34, + 0x0000AA38, + 0x0000AA3C, + 0x0000AA40, + 0x0000AA44, + 0x0000AA48, + }, + .ubwc_static_ctrl = 0x0000AA58, + .pwr_iso_cfg = 0x0000AA5C, + .overflow_status_clear = 0x0000AA60, + .ccif_violation_status = 0x0000AA64, + .overflow_status = 0x0000AA68, + .image_size_violation_status = 0x0000AA70, + .debug_status_top_cfg = 0x0000AAD4, + .debug_status_top = 0x0000AAD8, + .test_bus_ctrl = 0x0000AADC, + .top_irq_mask_0 = 0x0000003C, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe480_bus_irq_reg, + .global_irq_cmd_offset = 0x0000AA30, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = CAM_VFE_BUS_VER3_480_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL Y */ + { + .cfg = 0x0000AC00, + .image_addr = 0x0000AC04, + .frame_incr = 0x0000AC08, + .image_cfg_0 = 0x0000AC0C, + .image_cfg_1 = 0x0000AC10, + .image_cfg_2 = 0x0000AC14, + .packer_cfg = 0x0000AC18, + .frame_header_addr = 0x0000AC20, + .frame_header_incr = 0x0000AC24, + .frame_header_cfg = 0x0000AC28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000AC30, + .irq_subsample_pattern = 0x0000AC34, + .framedrop_period = 0x0000AC38, + .framedrop_pattern = 0x0000AC3C, + .system_cache_cfg = 0x0000AC60, + .burst_limit = 0x0000AC64, + .addr_status_0 = 0x0000AC68, + .addr_status_1 = 0x0000AC6C, + .addr_status_2 = 0x0000AC70, + .addr_status_3 = 0x0000AC74, + .debug_status_cfg = 0x0000AC78, + .debug_status_0 = 0x0000AC7C, + .debug_status_1 = 0x0000AC80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe480_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 1 FULL C */ + { + .cfg = 0x0000AD00, + .image_addr = 0x0000AD04, + .frame_incr = 0x0000AD08, + .image_cfg_0 = 0x0000AD0C, + .image_cfg_1 = 0x0000AD10, + .image_cfg_2 = 0x0000AD14, + .packer_cfg = 0x0000AD18, + .frame_header_addr = 0x0000AD20, + .frame_header_incr = 0x0000AD24, + .frame_header_cfg = 0x0000AD28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000AD30, + .irq_subsample_pattern = 0x0000AD34, + .framedrop_period = 0x0000AD38, + .framedrop_pattern = 0x0000AD3C, + .system_cache_cfg = 0x0000AD60, + .burst_limit = 0x0000AD64, + .addr_status_0 = 0x0000AD68, + .addr_status_1 = 0x0000AD6C, + .addr_status_2 = 0x0000AD70, + .addr_status_3 = 0x0000AD74, + .debug_status_cfg = 0x0000AD78, + .debug_status_0 = 0x0000AD7C, + .debug_status_1 = 0x0000AD80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe480_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 2 DS4 */ + { + .cfg = 0x0000AE00, + .image_addr = 0x0000AE04, + .frame_incr = 0x0000AE08, + .image_cfg_0 = 0x0000AE0C, + .image_cfg_1 = 0x0000AE10, + .image_cfg_2 = 0x0000AE14, + .packer_cfg = 0x0000AE18, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000AE30, + .irq_subsample_pattern = 0x0000AE34, + .framedrop_period = 0x0000AE38, + .framedrop_pattern = 0x0000AE3C, + .system_cache_cfg = 0x0000AE60, + .burst_limit = 0x0000AE64, + .addr_status_0 = 0x0000AE68, + .addr_status_1 = 0x0000AE6C, + .addr_status_2 = 0x0000AE70, + .addr_status_3 = 0x0000AE74, + .debug_status_cfg = 0x0000AE78, + .debug_status_0 = 0x0000AE7C, + .debug_status_1 = 0x0000AE80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 3 DS16 */ + { + .cfg = 0x0000AF00, + .image_addr = 0x0000AF04, + .frame_incr = 0x0000AF08, + .image_cfg_0 = 0x0000AF0C, + .image_cfg_1 = 0x0000AF10, + .image_cfg_2 = 0x0000AF14, + .packer_cfg = 0x0000AF18, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000AF30, + .irq_subsample_pattern = 0x0000AF34, + .framedrop_period = 0x0000AF38, + .framedrop_pattern = 0x0000AF3C, + .system_cache_cfg = 0x0000AF60, + .burst_limit = 0x0000AF64, + .addr_status_0 = 0x0000AF68, + .addr_status_1 = 0x0000AF6C, + .addr_status_2 = 0x0000AF70, + .addr_status_3 = 0x0000AF74, + .debug_status_cfg = 0x0000AF78, + .debug_status_0 = 0x0000AF7C, + .debug_status_1 = 0x0000AF80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 4 DISP Y */ + { + .cfg = 0x0000B000, + .image_addr = 0x0000B004, + .frame_incr = 0x0000B008, + .image_cfg_0 = 0x0000B00C, + .image_cfg_1 = 0x0000B010, + .image_cfg_2 = 0x0000B014, + .packer_cfg = 0x0000B018, + .frame_header_addr = 0x0000B020, + .frame_header_incr = 0x0000B024, + .frame_header_cfg = 0x0000B028, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B030, + .irq_subsample_pattern = 0x0000B034, + .framedrop_period = 0x0000B038, + .framedrop_pattern = 0x0000B03C, + .system_cache_cfg = 0x0000B060, + .burst_limit = 0x0000B064, + .addr_status_0 = 0x0000B068, + .addr_status_1 = 0x0000B06C, + .addr_status_2 = 0x0000B070, + .addr_status_3 = 0x0000B074, + .debug_status_cfg = 0x0000B078, + .debug_status_0 = 0x0000B07C, + .debug_status_1 = 0x0000B080, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe480_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 5 DISP C */ + { + .cfg = 0x0000B100, + .image_addr = 0x0000B104, + .frame_incr = 0x0000B108, + .image_cfg_0 = 0x0000B10C, + .image_cfg_1 = 0x0000B110, + .image_cfg_2 = 0x0000B114, + .packer_cfg = 0x0000B118, + .frame_header_addr = 0x0000B120, + .frame_header_incr = 0x0000B124, + .frame_header_cfg = 0x0000B128, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B130, + .irq_subsample_pattern = 0x0000B134, + .framedrop_period = 0x0000B138, + .framedrop_pattern = 0x0000B13C, + .system_cache_cfg = 0x0000B160, + .burst_limit = 0x0000B164, + .addr_status_0 = 0x0000B168, + .addr_status_1 = 0x0000B16C, + .addr_status_2 = 0x0000B170, + .addr_status_3 = 0x0000B174, + .debug_status_cfg = 0x0000B178, + .debug_status_0 = 0x0000B17C, + .debug_status_1 = 0x0000B180, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe480_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 6 DISP DS4 */ + { + .cfg = 0x0000B200, + .image_addr = 0x0000B204, + .frame_incr = 0x0000B208, + .image_cfg_0 = 0x0000B20C, + .image_cfg_1 = 0x0000B210, + .image_cfg_2 = 0x0000B214, + .packer_cfg = 0x0000B218, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B230, + .irq_subsample_pattern = 0x0000B234, + .framedrop_period = 0x0000B238, + .framedrop_pattern = 0x0000B23C, + .system_cache_cfg = 0x0000B260, + .burst_limit = 0x0000B264, + .addr_status_0 = 0x0000B268, + .addr_status_1 = 0x0000B26C, + .addr_status_2 = 0x0000B270, + .addr_status_3 = 0x0000B274, + .debug_status_cfg = 0x0000B278, + .debug_status_0 = 0x0000B27C, + .debug_status_1 = 0x0000B280, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 7 DISP DS16 */ + { + .cfg = 0x0000B300, + .image_addr = 0x0000B304, + .frame_incr = 0x0000B308, + .image_cfg_0 = 0x0000B30C, + .image_cfg_1 = 0x0000B310, + .image_cfg_2 = 0x0000B314, + .packer_cfg = 0x0000B318, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B330, + .irq_subsample_pattern = 0x0000B334, + .framedrop_period = 0x0000B338, + .framedrop_pattern = 0x0000B33C, + .system_cache_cfg = 0x0000B360, + .burst_limit = 0x0000B364, + .addr_status_0 = 0x0000B368, + .addr_status_1 = 0x0000B36C, + .addr_status_2 = 0x0000B370, + .addr_status_3 = 0x0000B374, + .debug_status_cfg = 0x0000B378, + .debug_status_0 = 0x0000B37C, + .debug_status_1 = 0x0000B380, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 8 FD Y */ + { + .cfg = 0x0000B400, + .image_addr = 0x0000B404, + .frame_incr = 0x0000B408, + .image_cfg_0 = 0x0000B40C, + .image_cfg_1 = 0x0000B410, + .image_cfg_2 = 0x0000B414, + .packer_cfg = 0x0000B418, + .frame_header_addr = 0x0000B420, + .frame_header_incr = 0x0000B424, + .frame_header_cfg = 0x0000B428, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B430, + .irq_subsample_pattern = 0x0000B434, + .framedrop_period = 0x0000B438, + .framedrop_pattern = 0x0000B43C, + .system_cache_cfg = 0x0000B460, + .burst_limit = 0x0000B464, + .addr_status_0 = 0x0000B468, + .addr_status_1 = 0x0000B46C, + .addr_status_2 = 0x0000B470, + .addr_status_3 = 0x0000B474, + .debug_status_cfg = 0x0000B478, + .debug_status_0 = 0x0000B47C, + .debug_status_1 = 0x0000B480, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 9 FD C */ + { + .cfg = 0x0000B500, + .image_addr = 0x0000B504, + .frame_incr = 0x0000B508, + .image_cfg_0 = 0x0000B50C, + .image_cfg_1 = 0x0000B510, + .image_cfg_2 = 0x0000B514, + .packer_cfg = 0x0000B518, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B530, + .irq_subsample_pattern = 0x0000B534, + .framedrop_period = 0x0000B538, + .framedrop_pattern = 0x0000B53C, + .system_cache_cfg = 0x0000B560, + .burst_limit = 0x0000B564, + .addr_status_0 = 0x0000B568, + .addr_status_1 = 0x0000B56C, + .addr_status_2 = 0x0000B570, + .addr_status_3 = 0x0000B574, + .debug_status_cfg = 0x0000B578, + .debug_status_0 = 0x0000B57C, + .debug_status_1 = 0x0000B580, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 10 PIXEL RAW */ + { + .cfg = 0x0000B600, + .image_addr = 0x0000B604, + .frame_incr = 0x0000B608, + .image_cfg_0 = 0x0000B60C, + .image_cfg_1 = 0x0000B610, + .image_cfg_2 = 0x0000B614, + .packer_cfg = 0x0000B618, + .frame_header_addr = 0x0000B620, + .frame_header_incr = 0x0000B624, + .frame_header_cfg = 0x0000B628, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B630, + .irq_subsample_pattern = 0x0000B634, + .framedrop_period = 0x0000B638, + .framedrop_pattern = 0x0000B63C, + .system_cache_cfg = 0x0000B660, + .burst_limit = 0x0000B664, + .addr_status_0 = 0x0000B668, + .addr_status_1 = 0x0000B66C, + .addr_status_2 = 0x0000B670, + .addr_status_3 = 0x0000B674, + .debug_status_cfg = 0x0000B678, + .debug_status_0 = 0x0000B67C, + .debug_status_1 = 0x0000B680, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 11 CAMIF PD */ + { + .cfg = 0x0000B700, + .image_addr = 0x0000B704, + .frame_incr = 0x0000B708, + .image_cfg_0 = 0x0000B70C, + .image_cfg_1 = 0x0000B710, + .image_cfg_2 = 0x0000B714, + .packer_cfg = 0x0000B718, + .frame_header_addr = 0x0000B720, + .frame_header_incr = 0x0000B724, + .frame_header_cfg = 0x0000B728, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B730, + .irq_subsample_pattern = 0x0000B734, + .framedrop_period = 0x0000B738, + .framedrop_pattern = 0x0000B73C, + .system_cache_cfg = 0x0000B760, + .burst_limit = 0x0000B764, + .addr_status_0 = 0x0000B768, + .addr_status_1 = 0x0000B76C, + .addr_status_2 = 0x0000B770, + .addr_status_3 = 0x0000B774, + .debug_status_cfg = 0x0000B778, + .debug_status_0 = 0x0000B77C, + .debug_status_1 = 0x0000B780, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN128) | BIT_ULL(CAM_FORMAT_PLAIN8), + }, + /* BUS Client 12 STATS HDR BE */ + { + .cfg = 0x0000B800, + .image_addr = 0x0000B804, + .frame_incr = 0x0000B808, + .image_cfg_0 = 0x0000B80C, + .image_cfg_1 = 0x0000B810, + .image_cfg_2 = 0x0000B814, + .packer_cfg = 0x0000B818, + .frame_header_addr = 0x0000B820, + .frame_header_incr = 0x0000B824, + .frame_header_cfg = 0x0000B828, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B830, + .irq_subsample_pattern = 0x0000B834, + .framedrop_period = 0x0000B838, + .framedrop_pattern = 0x0000B83C, + .system_cache_cfg = 0x0000B860, + .burst_limit = 0x0000B864, + .addr_status_0 = 0x0000B868, + .addr_status_1 = 0x0000B86C, + .addr_status_2 = 0x0000B870, + .addr_status_3 = 0x0000B874, + .debug_status_cfg = 0x0000B878, + .debug_status_0 = 0x0000B87C, + .debug_status_1 = 0x0000B880, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 13 STATS HDR BHIST */ + { + .cfg = 0x0000B900, + .image_addr = 0x0000B904, + .frame_incr = 0x0000B908, + .image_cfg_0 = 0x0000B90C, + .image_cfg_1 = 0x0000B910, + .image_cfg_2 = 0x0000B914, + .packer_cfg = 0x0000B918, + .frame_header_addr = 0x0000B920, + .frame_header_incr = 0x0000B924, + .frame_header_cfg = 0x0000B928, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000B930, + .irq_subsample_pattern = 0x0000B934, + .framedrop_period = 0x0000B938, + .framedrop_pattern = 0x0000B93C, + .system_cache_cfg = 0x0000B960, + .burst_limit = 0x0000B964, + .addr_status_0 = 0x0000B968, + .addr_status_1 = 0x0000B96C, + .addr_status_2 = 0x0000B970, + .addr_status_3 = 0x0000B974, + .debug_status_cfg = 0x0000B978, + .debug_status_0 = 0x0000B97C, + .debug_status_1 = 0x0000B980, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 14 STATS TINTLESS BG */ + { + .cfg = 0x0000BA00, + .image_addr = 0x0000BA04, + .frame_incr = 0x0000BA08, + .image_cfg_0 = 0x0000BA0C, + .image_cfg_1 = 0x0000BA10, + .image_cfg_2 = 0x0000BA14, + .packer_cfg = 0x0000BA18, + .frame_header_addr = 0x0000BA20, + .frame_header_incr = 0x0000BA24, + .frame_header_cfg = 0x0000BA28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BA30, + .irq_subsample_pattern = 0x0000BA34, + .framedrop_period = 0x0000BA38, + .framedrop_pattern = 0x0000BA3C, + .system_cache_cfg = 0x0000BA60, + .burst_limit = 0x0000BA64, + .addr_status_0 = 0x0000BA68, + .addr_status_1 = 0x0000BA6C, + .addr_status_2 = 0x0000BA70, + .addr_status_3 = 0x0000BA74, + .debug_status_cfg = 0x0000BA78, + .debug_status_0 = 0x0000BA7C, + .debug_status_1 = 0x0000BA80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 15 STATS AWB BG */ + { + .cfg = 0x0000BB00, + .image_addr = 0x0000BB04, + .frame_incr = 0x0000BB08, + .image_cfg_0 = 0x0000BB0C, + .image_cfg_1 = 0x0000BB10, + .image_cfg_2 = 0x0000BB14, + .packer_cfg = 0x0000BB18, + .frame_header_addr = 0x0000BB20, + .frame_header_incr = 0x0000BB24, + .frame_header_cfg = 0x0000BB28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BB30, + .irq_subsample_pattern = 0x0000BB34, + .framedrop_period = 0x0000BB38, + .framedrop_pattern = 0x0000BB3C, + .system_cache_cfg = 0x0000BB60, + .burst_limit = 0x0000BB64, + .addr_status_0 = 0x0000BB68, + .addr_status_1 = 0x0000BB6C, + .addr_status_2 = 0x0000BB70, + .addr_status_3 = 0x0000BB74, + .debug_status_cfg = 0x0000BB78, + .debug_status_0 = 0x0000BB7C, + .debug_status_1 = 0x0000BB80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 16 STATS BHIST */ + { + .cfg = 0x0000BC00, + .image_addr = 0x0000BC04, + .frame_incr = 0x0000BC08, + .image_cfg_0 = 0x0000BC0C, + .image_cfg_1 = 0x0000BC10, + .image_cfg_2 = 0x0000BC14, + .packer_cfg = 0x0000BC18, + .frame_header_addr = 0x0000BC20, + .frame_header_incr = 0x0000BC24, + .frame_header_cfg = 0x0000BC28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BC30, + .irq_subsample_pattern = 0x0000BC34, + .framedrop_period = 0x0000BC38, + .framedrop_pattern = 0x0000BC3C, + .system_cache_cfg = 0x0000BC60, + .burst_limit = 0x0000BC64, + .addr_status_0 = 0x0000BC68, + .addr_status_1 = 0x0000BC6C, + .addr_status_2 = 0x0000BC70, + .addr_status_3 = 0x0000BC74, + .debug_status_cfg = 0x0000BC78, + .debug_status_0 = 0x0000BC7C, + .debug_status_1 = 0x0000BC80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 17 STATS RS */ + { + .cfg = 0x0000BD00, + .image_addr = 0x0000BD04, + .frame_incr = 0x0000BD08, + .image_cfg_0 = 0x0000BD0C, + .image_cfg_1 = 0x0000BD10, + .image_cfg_2 = 0x0000BD14, + .packer_cfg = 0x0000BD18, + .frame_header_addr = 0x0000BD20, + .frame_header_incr = 0x0000BD24, + .frame_header_cfg = 0x0000BD28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BD30, + .irq_subsample_pattern = 0x0000BD34, + .framedrop_period = 0x0000BD38, + .framedrop_pattern = 0x0000BD3C, + .system_cache_cfg = 0x0000BD60, + .burst_limit = 0x0000BD64, + .addr_status_0 = 0x0000BD68, + .addr_status_1 = 0x0000BD6C, + .addr_status_2 = 0x0000BD70, + .addr_status_3 = 0x0000BD74, + .debug_status_cfg = 0x0000BD78, + .debug_status_0 = 0x0000BD7C, + .debug_status_1 = 0x0000BD80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 18 STATS CS */ + { + .cfg = 0x0000BE00, + .image_addr = 0x0000BE04, + .frame_incr = 0x0000BE08, + .image_cfg_0 = 0x0000BE0C, + .image_cfg_1 = 0x0000BE10, + .image_cfg_2 = 0x0000BE14, + .packer_cfg = 0x0000BE18, + .frame_header_addr = 0x0000BE20, + .frame_header_incr = 0x0000BE24, + .frame_header_cfg = 0x0000BE28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BE30, + .irq_subsample_pattern = 0x0000BE34, + .framedrop_period = 0x0000BE38, + .framedrop_pattern = 0x0000BE3C, + .system_cache_cfg = 0x0000BE60, + .burst_limit = 0x0000BE64, + .addr_status_0 = 0x0000BE68, + .addr_status_1 = 0x0000BE6C, + .addr_status_2 = 0x0000BE70, + .addr_status_3 = 0x0000BE74, + .debug_status_cfg = 0x0000BE78, + .debug_status_0 = 0x0000BE7C, + .debug_status_1 = 0x0000BE80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 19 STATS IHIST */ + { + .cfg = 0x0000BF00, + .image_addr = 0x0000BF04, + .frame_incr = 0x0000BF08, + .image_cfg_0 = 0x0000BF0C, + .image_cfg_1 = 0x0000BF10, + .image_cfg_2 = 0x0000BF14, + .packer_cfg = 0x0000BF18, + .frame_header_addr = 0x0000BF20, + .frame_header_incr = 0x0000BF24, + .frame_header_cfg = 0x0000BF28, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000BF30, + .irq_subsample_pattern = 0x0000BF34, + .framedrop_period = 0x0000BF38, + .framedrop_pattern = 0x0000BF3C, + .system_cache_cfg = 0x0000BF60, + .burst_limit = 0x0000BF64, + .addr_status_0 = 0x0000BF68, + .addr_status_1 = 0x0000BF6C, + .addr_status_2 = 0x0000BF70, + .addr_status_3 = 0x0000BF74, + .debug_status_cfg = 0x0000BF78, + .debug_status_0 = 0x0000BF7C, + .debug_status_1 = 0x0000BF80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 20 STATS BF */ + { + .cfg = 0x0000C000, + .image_addr = 0x0000C004, + .frame_incr = 0x0000C008, + .image_cfg_0 = 0x0000C00C, + .image_cfg_1 = 0x0000C010, + .image_cfg_2 = 0x0000C014, + .packer_cfg = 0x0000C018, + .frame_header_addr = 0x0000C020, + .frame_header_incr = 0x0000C024, + .frame_header_cfg = 0x0000C028, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000C030, + .irq_subsample_pattern = 0x0000C034, + .framedrop_period = 0x0000C038, + .framedrop_pattern = 0x0000C03C, + .system_cache_cfg = 0x0000C060, + .burst_limit = 0x0000C064, + .addr_status_0 = 0x0000C068, + .addr_status_1 = 0x0000C06C, + .addr_status_2 = 0x0000C070, + .addr_status_3 = 0x0000C074, + .debug_status_cfg = 0x0000C078, + .debug_status_0 = 0x0000C07C, + .debug_status_1 = 0x0000C080, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 21 PDAF V2.0 */ + { + .cfg = 0x0000C100, + .image_addr = 0x0000C104, + .frame_incr = 0x0000C108, + .image_cfg_0 = 0x0000C10C, + .image_cfg_1 = 0x0000C110, + .image_cfg_2 = 0x0000C114, + .packer_cfg = 0x0000C118, + .frame_header_addr = 0x0000C120, + .frame_header_incr = 0x0000C124, + .frame_header_cfg = 0x0000C128, + .line_done_cfg = 0x0000C12C, + .irq_subsample_period = 0x0000C130, + .irq_subsample_pattern = 0x0000C134, + .framedrop_period = 0x0000C138, + .framedrop_pattern = 0x0000C13C, + .system_cache_cfg = 0x0000C160, + .burst_limit = 0x0000C164, + .addr_status_0 = 0x0000C168, + .addr_status_1 = 0x0000C16C, + .addr_status_2 = 0x0000C170, + .addr_status_3 = 0x0000C174, + .debug_status_cfg = 0x0000C178, + .debug_status_0 = 0x0000C17C, + .debug_status_1 = 0x0000C180, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 22 LCR */ + { + .cfg = 0x0000C200, + .image_addr = 0x0000C204, + .frame_incr = 0x0000C208, + .image_cfg_0 = 0x0000C20C, + .image_cfg_1 = 0x0000C210, + .image_cfg_2 = 0x0000C214, + .packer_cfg = 0x0000C218, + .frame_header_addr = 0x0000C220, + .frame_header_incr = 0x0000C224, + .frame_header_cfg = 0x0000C228, + .line_done_cfg = 0, + .irq_subsample_period = 0x0000C230, + .irq_subsample_pattern = 0x0000C234, + .framedrop_period = 0x0000C238, + .framedrop_pattern = 0x0000C23C, + .system_cache_cfg = 0x0000C260, + .burst_limit = 0x0000C264, + .addr_status_0 = 0x0000C268, + .addr_status_1 = 0x0000C26C, + .addr_status_2 = 0x0000C270, + .addr_status_3 = 0x0000C274, + .debug_status_cfg = 0x0000C278, + .debug_status_0 = 0x0000C27C, + .debug_status_1 = 0x0000C280, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_10, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 23 RDI0 */ + { + .cfg = 0x0000C300, + .image_addr = 0x0000C304, + .frame_incr = 0x0000C308, + .image_cfg_0 = 0x0000C30C, + .image_cfg_1 = 0x0000C310, + .image_cfg_2 = 0x0000C314, + .packer_cfg = 0x0000C318, + .frame_header_addr = 0x0000C320, + .frame_header_incr = 0x0000C324, + .frame_header_cfg = 0x0000C328, + .line_done_cfg = 0x0000C32C, + .irq_subsample_period = 0x0000C330, + .irq_subsample_pattern = 0x0000C334, + .framedrop_period = 0x0000C338, + .framedrop_pattern = 0x0000C33C, + .system_cache_cfg = 0x0000C360, + .burst_limit = 0x0000C364, + .addr_status_0 = 0x0000C368, + .addr_status_1 = 0x0000C36C, + .addr_status_2 = 0x0000C370, + .addr_status_3 = 0x0000C374, + .debug_status_cfg = 0x0000C378, + .debug_status_0 = 0x0000C37C, + .debug_status_1 = 0x0000C380, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 24 RDI1 */ + { + .cfg = 0x0000C400, + .image_addr = 0x0000C404, + .frame_incr = 0x0000C408, + .image_cfg_0 = 0x0000C40C, + .image_cfg_1 = 0x0000C410, + .image_cfg_2 = 0x0000C414, + .packer_cfg = 0x0000C418, + .frame_header_addr = 0x0000C420, + .frame_header_incr = 0x0000C424, + .frame_header_cfg = 0x0000C428, + .line_done_cfg = 0x0000C42C, + .irq_subsample_period = 0x0000C430, + .irq_subsample_pattern = 0x0000C434, + .framedrop_period = 0x0000C438, + .framedrop_pattern = 0x0000C43C, + .system_cache_cfg = 0x0000C460, + .burst_limit = 0x0000C464, + .addr_status_0 = 0x0000C468, + .addr_status_1 = 0x0000C46C, + .addr_status_2 = 0x0000C470, + .addr_status_3 = 0x0000C474, + .debug_status_cfg = 0x0000C478, + .debug_status_0 = 0x0000C47C, + .debug_status_1 = 0x0000C480, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_12, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 25 RDI2 */ + { + .cfg = 0x0000C500, + .image_addr = 0x0000C504, + .frame_incr = 0x0000C508, + .image_cfg_0 = 0x0000C50C, + .image_cfg_1 = 0x0000C510, + .image_cfg_2 = 0x0000C514, + .packer_cfg = 0x0000C518, + .frame_header_addr = 0x0000C520, + .frame_header_incr = 0x0000C524, + .frame_header_cfg = 0x0000C528, + .line_done_cfg = 0x0000C52C, + .irq_subsample_period = 0x0000C530, + .irq_subsample_pattern = 0x0000C534, + .framedrop_period = 0x0000C538, + .framedrop_pattern = 0x0000C53C, + .system_cache_cfg = 0x0000C560, + .burst_limit = 0x0000C564, + .addr_status_0 = 0x0000C568, + .addr_status_1 = 0x0000C56C, + .addr_status_2 = 0x0000C570, + .addr_status_3 = 0x0000C574, + .debug_status_cfg = 0x0000C578, + .debug_status_0 = 0x0000C57C, + .debug_status_1 = 0x0000C580, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_13, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + }, + .num_out = 23, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = vfe480_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = vfe480_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_5, + .mid = vfe480_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[3], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 0, + 1, + }, + .name = { + "FULL_Y", + "FULL_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 2, + }, + .name = { + "DS_4", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[5], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 3, + }, + .name = { + "DS_16", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[6], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 10, + }, + .name = { + "PIXEL_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[7], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 8, + 9, + }, + .name = { + "FD_Y", + "FD_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[8], + .num_mid = 2, + .num_wm = 1, + .wm_idx = { + 11, + }, + .name = { + "PDAF", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[9], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 12, + }, + .name = { + "STATS_HDR_BE", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[10], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 13, + }, + .name = { + "STATS_HDR_BHIST", + }, + + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[11], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 14, + }, + .name = { + "STATS_TL_BG", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[12], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 20, + }, + .name = { + "STATS_BF", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[13], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 15, + }, + .name = { + "STATS_AWB_BGB", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[14], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 16, + }, + .name = { + "STATS_BHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[15], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 17, + }, + .name = { + "STATS_RS", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[16], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 18, + }, + .name = { + "STATS_CS", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[17], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 19, + }, + .name = { + "IHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP, + .max_width = 4096, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[18], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 4, + 5, + }, + .name = { + "FULL_DISP_Y", + "FULL_DISP_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + + .mid = vfe480_out_port_mid[19], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 6, + }, + .name = { + "DISP_DS_4", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe480_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 7, + }, + .name = { + "DISP_DS_16", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe480_out_port_mid[21], + .num_mid = 2, + .num_wm = 1, + .wm_idx = { + 21, + }, + .name = { + "2PD", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LCR, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = vfe480_out_port_mid[22], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 22, + }, + .name = { + "LCR", + }, + }, + }, + .num_cons_err = 21, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input not supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input not supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input not supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input not supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format not supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format not supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format not supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format not supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack 128 BPP format not supported" + }, + { + .bitmask = BIT(9), + .error_description = "UBWC NV12 format not supported" + }, + { + .bitmask = BIT(10), + .error_description = "UBWC NV12 4R format not supported" + }, + { + .bitmask = BIT(11), + .error_description = "UBWC TP10 format not supported" + }, + { + .bitmask = BIT(12), + .error_description = "Frame based Mode not supported" + }, + { + .bitmask = BIT(13), + .error_description = "Index based Mode not supported" + }, + { + .bitmask = BIT(14), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(16), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(17), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(18), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(19), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(20), + .error_description = "Meta Stride unalign" + }, + }, + + .num_comp_grp = 14, + .comp_done_mask = { + BIT(6), BIT(7), BIT(8), BIT(9), BIT(10), + BIT(11), BIT(12), BIT(13), BIT(14), + BIT(15), BIT(16), BIT(17), BIT(18), BIT(19), + }, + .top_irq_shift = 7, + .support_consumed_addr = true, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 25, + .supported_irq = CAM_VFE_HW_IRQ_CAP_BUF_DONE | CAM_VFE_HW_IRQ_CAP_RUP, + .comp_cfg_needed = true, + .pack_align_shift = 4, + .support_burst_limit = true, +}; + +static struct cam_irq_register_set vfe480_bus_rd_irq_reg[1] = { + { + .mask_reg_offset = 0x0000A810, + .clear_reg_offset = 0x0000A814, + .status_reg_offset = 0x0000A81C, + }, +}; + +static struct cam_vfe_bus_rd_ver1_hw_info vfe480_bus_rd_hw_info = { + .common_reg = { + .hw_version = 0x0000A800, + .hw_capability = 0x0000A804, + .sw_reset = 0x0000A808, + .cgc_ovd = 0x0000A80C, + .pwr_iso_cfg = 0x0000A834, + .input_if_cmd = 0x0000A820, + .test_bus_ctrl = 0x0000A848, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe480_bus_rd_irq_reg, + .global_irq_cmd_offset = 0x0000A818, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + }, + .num_client = 1, + .bus_client_reg = { + /* BUS Client 0 */ + { + .cfg = 0x0000A850, + .image_addr = 0x0000A858, + .buf_size = 0x0000A85C, + .stride = 0x0000A860, + .unpacker_cfg = 0x0000A864, + .latency_buf_allocation = 0x0000A878, + .burst_limit = 0x0000A880, + }, + }, + .num_bus_rd_resc = 1, + .vfe_bus_rd_hw_info = { + { + .vfe_bus_rd_type = CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0, + .max_width = -1, + .max_height = -1, + }, + }, + .top_irq_shift = 8, +}; + +static struct cam_vfe_irq_hw_info vfe480_irq_hw_info = { + .reset_mask = BIT(0), + .supported_irq = CAM_VFE_HW_IRQ_CAP_INT_CSID, + .top_irq_reg = &vfe480_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe480_hw_info = { + .irq_hw_info = &vfe480_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe480_bus_hw_info, + + .bus_rd_version = CAM_VFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &vfe480_bus_rd_hw_info, + + .top_version = CAM_VFE_TOP_VER_3_0, + .top_hw_info = &vfe480_top_hw_info, +}; + +#endif /* _CAM_VFE480_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe570.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe570.h new file mode 100644 index 0000000000..29df235d8d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe570.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + + +#ifndef _CAM_VFE570_H_ +#define _CAM_VFE570_H_ +#include "cam_vfe480.h" +#include "cam_vfe_top_ver3.h" +#include "cam_vfe_core.h" + +static struct cam_vfe_camif_ver3_reg_data vfe_570_camif_reg_data = { + .pp_extern_reg_update_shift = 4, + .dual_pd_extern_reg_update_shift = 17, + .extern_reg_update_mask = 1, + .dual_ife_pix_en_shift = 3, + .dual_ife_sync_sel_shift = 18, + .operating_mode_shift = 11, + .input_mux_sel_shift = 5, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 24, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 23, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x41, + .epoch_line_cfg = 0x00000014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .epoch1_irq_mask = 0x00000008, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x82000200, + .error_irq_mask2 = 0x30301F80, + .subscribe_irq_mask1 = 0x00000007, + .enable_diagnostic_hw = 0x1, + .pp_camif_cfg_en_shift = 0, + .pp_camif_cfg_ife_out_en_shift = 8, + .top_debug_cfg_en = 1, + .dual_vfe_sync_mask = 0x3, + .input_bayer_fmt = 0, + .input_yuv_fmt = 1, + +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe570_camif_rdi1_reg_data = { + .extern_reg_update_shift = 0, + .input_mux_sel_shift = 7, + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x100, + .epoch0_irq_mask = 0x400, + .epoch1_irq_mask = 0x800, + .eof_irq_mask = 0x200, + .error_irq_mask0 = 0x10000000, + .error_irq_mask2 = 0x40000, + .subscribe_irq_mask1 = 0x300, + .enable_diagnostic_hw = 0x1, +}; + +struct cam_vfe_camif_lite_ver3_hw_info + vfe570_rdi_hw_info_arr[CAM_VFE_RDI_VER2_MAX] = { + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[0], + .reg_data = &vfe480_camif_rdi_reg_data[0], + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[1], + .reg_data = &vfe570_camif_rdi1_reg_data, + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[2], + .reg_data = &vfe480_camif_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver3_hw_info vfe570_top_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_reg = &vfe480_camif_reg, + .reg_data = &vfe_570_camif_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_pd, + .reg_data = &vfe480_camif_pd_reg_data, + }, + .rdi_hw_info[0] = &vfe570_rdi_hw_info_arr[0], + .rdi_hw_info[1] = &vfe570_rdi_hw_info_arr[1], + .rdi_hw_info[2] = &vfe570_rdi_hw_info_arr[2], + .lcr_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_lcr, + .reg_data = &vfe480_camif_lcr_reg_data, + }, + .num_mux = 6, + .mux_type = { + CAM_VFE_CAMIF_VER_3_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_LCR_VER_1_0, + }, + .num_path_port_map = 2, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_LCR, CAM_ISP_IFE_OUT_RES_LCR}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + }, +}; + +static struct cam_vfe_hw_info cam_vfe570_hw_info = { + .irq_hw_info = &vfe480_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe480_bus_hw_info, + + .bus_rd_version = CAM_VFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &vfe480_bus_rd_hw_info, + + .top_version = CAM_VFE_TOP_VER_3_0, + .top_hw_info = &vfe570_top_hw_info, +}; + +#endif /* _CAM_VFE570_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe580.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe580.h new file mode 100644 index 0000000000..0a7b89884c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe580.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + + +#ifndef _CAM_VFE580_H_ +#define _CAM_VFE580_H_ +#include "cam_vfe480.h" +#include "cam_vfe_top_ver3.h" +#include "cam_vfe_core.h" + +static struct cam_vfe_camif_ver3_reg_data vfe_580_camif_reg_data = { + .pp_extern_reg_update_shift = 4, + .dual_pd_extern_reg_update_shift = 17, + .extern_reg_update_mask = 1, + .dual_ife_pix_en_shift = 3, + .dual_ife_sync_sel_shift = 18, + .operating_mode_shift = 11, + .input_mux_sel_shift = 5, + .pixel_pattern_shift = 24, + .pixel_pattern_mask = 0x7, + .dsp_mode_shift = 24, + .dsp_mode_mask = 0x1, + .dsp_en_shift = 23, + .dsp_en_mask = 0x1, + .reg_update_cmd_data = 0x41, + .epoch_line_cfg = 0x00000014, + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x00000004, + .epoch1_irq_mask = 0x00000008, + .eof_irq_mask = 0x00000002, + .error_irq_mask0 = 0x82000200, + .error_irq_mask2 = 0x30301F80, + .subscribe_irq_mask1 = 0x00000007, + .enable_diagnostic_hw = 0x1, + .pp_camif_cfg_en_shift = 0, + .pp_camif_cfg_ife_out_en_shift = 8, + .top_debug_cfg_en = 1, + .dual_vfe_sync_mask = 0x3, + .input_bayer_fmt = 0, + .input_yuv_fmt = 1, + +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe580_camif_rdi1_reg_data = { + .extern_reg_update_shift = 0, + .input_mux_sel_shift = 7, + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x100, + .epoch0_irq_mask = 0x400, + .epoch1_irq_mask = 0x800, + .eof_irq_mask = 0x200, + .error_irq_mask0 = 0x10000000, + .error_irq_mask2 = 0x40000, + .subscribe_irq_mask1 = 0x300, + .enable_diagnostic_hw = 0x1, +}; + +struct cam_vfe_camif_lite_ver3_hw_info + vfe580_rdi_hw_info_arr[CAM_VFE_RDI_VER2_MAX] = { + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[0], + .reg_data = &vfe480_camif_rdi_reg_data[0], + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[1], + .reg_data = &vfe580_camif_rdi1_reg_data, + }, + { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_rdi[2], + .reg_data = &vfe480_camif_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver3_hw_info vfe580_top_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_reg = &vfe480_camif_reg, + .reg_data = &vfe_580_camif_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_pd, + .reg_data = &vfe480_camif_pd_reg_data, + }, + .rdi_hw_info[0] = &vfe580_rdi_hw_info_arr[0], + .rdi_hw_info[1] = &vfe580_rdi_hw_info_arr[1], + .rdi_hw_info[2] = &vfe580_rdi_hw_info_arr[2], + .lcr_hw_info = { + .common_reg = &vfe480_top_common_reg, + .camif_lite_reg = &vfe480_camif_lcr, + .reg_data = &vfe480_camif_lcr_reg_data, + }, + .num_mux = 6, + .mux_type = { + CAM_VFE_CAMIF_VER_3_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_LCR_VER_1_0, + }, + .num_path_port_map = 2, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_LCR, CAM_ISP_IFE_OUT_RES_LCR}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + }, +}; + +static struct cam_vfe_hw_info cam_vfe580_hw_info = { + .irq_hw_info = &vfe480_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe480_bus_hw_info, + + .bus_rd_version = CAM_VFE_BUS_RD_VER_1_0, + .bus_rd_hw_info = &vfe480_bus_rd_hw_info, + + .top_version = CAM_VFE_TOP_VER_3_0, + .top_hw_info = &vfe580_top_hw_info, +}; + +#endif /* _CAM_VFE580_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680.h new file mode 100644 index 0000000000..ce1e5eeb8c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680.h @@ -0,0 +1,2498 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE680_H_ +#define _CAM_VFE680_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_BUS_VER3_680_MAX_CLIENTS 28 +#define CAM_VFE_680_NUM_DBG_REG 17 + +static struct cam_vfe_top_ver4_module_desc vfe680_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_CHANNEL_GAIN", + }, + { + .id = 2, + .desc = "CLC_BPC_PDPC", + }, + { + .id = 3, + .desc = "CLC_BINCORRECT", + }, + { + .id = 4, + .desc = "CLC_COMPDECOMP", + }, + { + .id = 5, + .desc = "CLC_LSC", + }, + { + .id = 6, + .desc = "CLC_WB_GAIN", + }, + { + .id = 7, + .desc = "CLC_GIC", + }, + { + .id = 8, + .desc = "CLC_BPC_ABF", + }, + { + .id = 9, + .desc = "CLC_BLS", + }, + { + .id = 10, + .desc = "CLC_BAYER_GTM", + }, + { + .id = 11, + .desc = "CLC_BAYER_LTM", + }, + { + .id = 12, + .desc = "CLC_LCAC", + }, + { + .id = 13, + .desc = "CLC_DEMOSAIC", + }, + { + .id = 14, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 15, + .desc = "CLC_GTM", + }, + { + .id = 16, + .desc = "CLC_GLUT", + }, + { + .id = 17, + .desc = "CLC_COLOR_TRANSFORM", + }, + { + .id = 18, + .desc = "CLC_UVG", + }, + { + .id = 19, + .desc = "CLC_PREPROCESSOR", + }, + { + .id = 20, + .desc = "CLC_CHROMA_UP", + }, + { + .id = 21, + .desc = "CLC_SPARSE_PD_EXT", + }, + { + .id = 22, + .desc = "CLC_LCR", + }, + { + .id = 23, + .desc = "CLC_COMPDECOMP_HVX_TX", + }, + { + .id = 24, + .desc = "CLC_COMPDECOMP_HVX_RX", + }, + { + .id = 25, + .desc = "CLC_GTM_FD_OUT", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_PIXEL_RAW_OUT", + }, + { + .id = 27, + .desc = "CLC_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 28, + .desc = "CLC_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 29, + .desc = "CLC_CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 30, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 31, + .desc = "CLC_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 32, + .desc = "CLC_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 33, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 34, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 35, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 36, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 37, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 38, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 39, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 40, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 41, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 42, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 43, + .desc = "CLC_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 44, + .desc = "CLC_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 45, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 46, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 47, + .desc = "CLC_DSX_Y_VID_OUT", + }, + { + .id = 48, + .desc = "CLC_DSX_C_VID_OUT", + }, + { + .id = 49, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_Y_VID_OUT", + }, + { + .id = 50, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_C_VID_OUT", + }, + { + .id = 51, + .desc = "CLC_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 52, + .desc = "CLC_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 53, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 54, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 55, + .desc = "CLC_STATS_AEC_BE", + }, + { + .id = 56, + .desc = "CLC_STATS_AEC_BHIST", + }, + { + .id = 57, + .desc = "CLC_STATS_BHIST", + }, + { + .id = 58, + .desc = "CLC_STATS_TINTLESS_BG", + }, + { + .id = 59, + .desc = "CLC_STATS_AWB_BG", + }, + { + .id = 60, + .desc = "CLC_STATS_BFW", + }, + { + .id = 61, + .desc = "CLC_STATS_BAF", + }, + { + .id = 62, + .desc = "CLC_STATS_RS", + }, + { + .id = 63, + .desc = "CLC_STATS_IHIST", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe680_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "VIDEO_FULL_Y", + }, + { + .wm_id = 1, + .desc = "VIDEO_FULL_C", + }, + { + .wm_id = 2, + .desc = "VIDEO_DS_4:1", + }, + { + .wm_id = 3, + .desc = "VIDEO_DS_16:1", + }, + { + .wm_id = 4, + .desc = "DISPLAY_FULL_Y", + }, + { + .wm_id = 5, + .desc = "DISPLAY_FULL_C", + }, + { + .wm_id = 6, + .desc = "DISPLAY_DS_4:1", + }, + { + .wm_id = 7, + .desc = "DISPLAY_DS_16:1", + }, + { + .wm_id = 8, + .desc = "FD_Y", + }, + { + .wm_id = 9, + .desc = "FD_C", + }, + { + .wm_id = 10, + .desc = "PIXEL_RAW", + }, + { + .wm_id = 11, + .desc = "STATS_BE0", + }, + { + .wm_id = 12, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 13, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 14, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 15, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 16, + .desc = "STATS_BAF", + }, + { + .wm_id = 17, + .desc = "STATS_BHIST", + }, + { + .wm_id = 18, + .desc = "STATS_RS", + }, + { + .wm_id = 19, + .desc = "STATS_IHIST", + }, + { + .wm_id = 20, + .desc = "SPARSE_PD", + }, + { + .wm_id = 21, + .desc = "PDAF_V2.0_PD_DATA", + }, + { + .wm_id = 22, + .desc = "PDAF_V2.0_SAD", + }, + { + .wm_id = 23, + .desc = "LCR", + }, + { + .wm_id = 24, + .desc = "RDI0", + }, + { + .wm_id = 25, + .desc = "RDI1", + }, + { + .wm_id = 26, + .desc = "RDI2", + }, + { + .wm_id = 27, + .desc = "LTM_STATS", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc vfe680_top_irq_err_desc[] = { + { + .bitmask = BIT(4), + .err_name = "PP VIOLATION", + .desc = "", + }, + { + .bitmask = BIT(6), + .err_name = "PDAF VIOLATION", + .desc = "", + }, + { + .bitmask = BIT(12), + .err_name = "DSP IFE PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the output Data", + }, + { + .bitmask = BIT(13), + .err_name = "IFE DSP TX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the outgoing data to the DSP interface", + }, + { + .bitmask = BIT(14), + .err_name = "DSP IFE RX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the incoming data from DSP before processed", + }, + { + .bitmask = BIT(15), + .err_name = "DSP TX FIFO OVERFLOW", + .desc = "Overflow on DSP interface TX path FIFO", + }, + { + .bitmask = BIT(16), + .err_name = "DSP RX FIFO OVERFLOW", + .desc = "Overflow on DSP interface RX path FIFO", + }, + { + .bitmask = BIT(17), + .err_name = "DSP ERROR VIOLATION", + .desc = "When DSP sends a error signal", + }, + { + .bitmask = BIT(18), + .err_name = "DIAG VIOLATION", + .desc = "HBI is less than the minimum required HBI", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc vfe680_pdaf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output, LSB aligned", + }, +}; + +static struct cam_irq_register_set vfe680_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00000034, + .clear_reg_offset = 0x0000003c, + .status_reg_offset = 0x00000044, + .set_reg_offset = 0x0000004C, + .test_set_val = BIT(1), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00000038, + .clear_reg_offset = 0x00000040, + .status_reg_offset = 0x00000048, + }, +}; + +static struct cam_irq_controller_reg_info vfe680_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe680_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000030, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe680_top_debug_reg[] = { + 0x000000A0, + 0x000000A4, + 0x000000A8, + 0x000000AC, + 0x000000B0, + 0x000000B4, + 0x000000B8, + 0x000000BC, + 0x000000C0, + 0x000000C4, + 0x000000C8, + 0x000000CC, + 0x000000D0, + 0x000000D4, + 0x000000D8, + 0x000000DC, + 0x000000E0, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe680_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .core_cfg_0 = 0x00000024, + .core_cfg_1 = 0x00000028, + .core_cfg_2 = 0x0000002C, + .global_reset_cmd = 0x00000030, + .diag_config = 0x00000050, + .diag_sensor_status = {0x00000054, 0x00000058}, + .diag_frm_cnt_status = {0x0000005C, 0x00000060}, + .ipp_violation_status = 0x00000064, + .pdaf_violation_status = 0x00000404, + .core_cfg_3 = 0x00000068, + .core_cgc_ovd_0 = 0x00000018, + .core_cgc_ovd_1 = 0x0000001C, + .ahb_cgc_ovd = 0x00000020, + .dsp_status = 0x0000006C, + .stats_throttle_cfg_0 = 0x00000070, + .stats_throttle_cfg_1 = 0x00000074, + .stats_throttle_cfg_2 = 0x00000078, + .core_cfg_4 = 0x00000080, + .core_cfg_5 = 0x00000084, + .core_cfg_6 = 0x00000088, + .period_cfg = 0x0000008C, + .irq_sub_pattern_cfg = 0x00000090, + .epoch0_pattern_cfg = 0x00000094, + .epoch1_pattern_cfg = 0x00000098, + .epoch_height_cfg = 0x0000009C, + .bus_violation_status = 0x00000C64, + .bus_overflow_status = 0x00000C68, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000100, + .perf_pix_count = 0x00000104, + .perf_line_count = 0x00000108, + .perf_stall_count = 0x0000010C, + .perf_always_count = 0x00000110, + .perf_count_status = 0x00000114, + }, + { + .perf_count_cfg = 0x00000118, + .perf_pix_count = 0x0000011C, + .perf_line_count = 0x00000120, + .perf_stall_count = 0x00000124, + .perf_always_count = 0x00000128, + .perf_count_status = 0x0000012C, + }, + }, + .top_debug_cfg = 0x000000FC, + .num_top_debug_reg = CAM_VFE_680_NUM_DBG_REG, + .top_debug = vfe680_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_pp_common_reg_data = { + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x10000, + .epoch1_irq_mask = 0x20000, + .eof_irq_mask = 0x00000002, + .error_irq_mask = 0x7F1D0, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .ipp_violation_mask = 0x10, + .pdaf_violation_mask = 0x40, + .diag_violation_mask = 0x40000, + .diag_frm_count_mask_0 = 0x3f0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x10, +}; + +static struct cam_vfe_ver4_path_reg_data vfe680_vfe_full_rdi_reg_data[3] = { + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +static struct cam_vfe_ver4_path_reg_data vfe680_pdlib_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, +}; + +struct cam_vfe_ver4_path_hw_info + vfe680_rdi_hw_info_arr[] = { + { + .common_reg = &vfe680_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &vfe680_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &vfe680_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe680_dbg_reg_info[CAM_VFE_680_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_IHIST", + "STATS_RS", + "STATS_BAF", + "GTM_BHIST", + "TINTLESS_BG", + "STATS_BFW", + "STATS_BG", + "STATS_BHIST" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_BE", + "R2PD_DS16_C_VID", + "R2PD_DS16_Y_VID", + "crop_rnd_clamp_post_downscale_C_DS16_VID", + "4to1_C_DS16_VID", + "crop_rnd_clamp_post_downscale_Y_DS16_VID", + "4to1_Y_DS16_VID", + "crop_rnd_clamp_post_dsx_C_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_VID_C", + "R2PD_DS4_VID_Y", + "DSX_C", + "crop_rnd_clamp_post_dsx_Y_VID", + "DSX_Y", + "crop_rnd_clamp_post_downscale_mn_C_VID", + "downscale_mn_C_VID", + "crop_rnd_clamp_post_downscale_mn_Y_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "MNDS_Y_VID", + "R2PD_DS16_C_DISP", + "R2PD_DS16_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS16_DISP", + "4to1_C_DS16_DISP", + "crop_rnd_clamp_post_downscale_Y_DS16_DISP", + "4to1_Y_DS16_DISP", + "R2PD_DS4_C_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS4_DISP", + "4to1_C_DS4_DISP", + "crop_rnd_clamp_post_downscale_Y_DS4_DISP", + "4to1_Y_DS4_DISP", + "crop_rnd_clamp_post_downscale_mn_C_DISP", + "downscale_mn_C_DISP", + "crop_rnd_clamp_post_downscale_mn_Y_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "downscale_mn_Y_DISP", + "crop_rnd_clamp_post_downscale_mn_C_FD", + "downscale_mn_C_FD", + "crop_rnd_clamp_post_downscale_mn_Y_FD", + "downscale_mn_Y_FD", + "gtm_fd_out", + "uvg", + "color_xform" + ), + VFE_DBG_INFO_ARRAY_4bit( + "glut", + "gtm", + "color_correct", + "demosaic", + "hvx_tap2", + "lcac", + "bayer_ltm", + "bayer_gtm" + ), + VFE_DBG_INFO_ARRAY_4bit( + "bls", + "bpc_abf", + "gic", + "wb_gain", + "lsc", + "compdecomp_hxv_rx", + "compdecomp_hxv_tx", + "hvx_tap1" + ), + VFE_DBG_INFO_ARRAY_4bit( + "decompand", + "reserved", + "bincorrect", + "bpc_pdpc", + "channel_gain", + "bayer_argb_ccif_converter", + "crop_rnd_clamp_pre_argb_packer", + "chroma_up_uv" + ), + VFE_DBG_INFO_ARRAY_4bit( + "chroma_up_y", + "demux", + "hxv_tap0", + "preprocess", + "sparse_pd_ext", + "lcr", + "bayer_ltm_bus_wr", + "RDI2" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI1", + "RDI0", + "lcr_bus_wr", + "pdaf_sad_bus_wr", + "pd_data_bus_wr", + "sparse_pd_bus_wr", + "ihist_bus_wr", + "flicker_rs_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "gtm_bhist_bus_wr", + "baf_bus_wr", + "bfw_bus_wr", + "bg_bus_wr", + "tintless_bg_bus_wr", + "bhist_bus_wr", + "be_bus_wr", + "pixel_raw_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "fd_c_bus_wr", + "fd_y_bus_wr", + "disp_ds16_bus_wr", + "disp_ds4_bus_wr", + "disp_c_bus_wr", + "disp_y_bus_wr", + "vid_ds16_bus_Wr", + "vid_ds4_bus_Wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "vid_c_bus_wr", + "vid_y_bus_wr", + "CLC_PDAF", + "PIX_PP", + "reserved", + "reserved", + "reserved", + "reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved" + ), +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe680_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PIXEL_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_PDAF_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_DSP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe680_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe680_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe680_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe680_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe680_diag_reg_info[4], + }, + { + .num_fields = 2, + .field = &vfe680_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe680_top_hw_info = { + .common_reg = &vfe680_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &vfe680_top_common_reg, + .reg_data = &vfe_pp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe680_top_common_reg, + .reg_data = &vfe680_pdlib_reg_data, + }, + .rdi_hw_info = vfe680_rdi_hw_info_arr, + .wr_client_desc = vfe680_wr_client_desc, + .ipp_module_desc = vfe680_ipp_mod_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .num_path_port_map = 2, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD} + }, + .num_rdi = ARRAY_SIZE(vfe680_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(vfe680_top_irq_err_desc), + .top_err_desc = vfe680_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(vfe680_pdaf_violation_desc), + .pdaf_violation_desc = vfe680_pdaf_violation_desc, + .top_debug_reg_info = &vfe680_dbg_reg_info, + .diag_sensor_info = vfe680_diag_sensor_field, + .diag_frame_info = vfe680_diag_frame_field, +}; + +static struct cam_irq_register_set vfe680_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00000C18, + .clear_reg_offset = 0x00000C20, + .status_reg_offset = 0x00000C28, + .set_reg_offset = 0x00000C50, + }, + { + .mask_reg_offset = 0x00000C1C, + .clear_reg_offset = 0x00000C24, + .status_reg_offset = 0x00000C2C, + .set_reg_offset = 0x00000C54, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe680_ubwc_regs_client_0 = { + .meta_addr = 0x00000E40, + .meta_cfg = 0x00000E44, + .mode_cfg = 0x00000E48, + .stats_ctrl = 0x00000E4C, + .ctrl_2 = 0x00000E50, + .lossy_thresh0 = 0x00000E54, + .lossy_thresh1 = 0x00000E58, + .off_lossy_var = 0x00000E5C, + .bw_limit = 0x00000E1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe680_ubwc_regs_client_1 = { + .meta_addr = 0x00000F40, + .meta_cfg = 0x00000F44, + .mode_cfg = 0x00000F48, + .stats_ctrl = 0x00000F4C, + .ctrl_2 = 0x00000F50, + .lossy_thresh0 = 0x00000F54, + .lossy_thresh1 = 0x00000F58, + .off_lossy_var = 0x00000F5C, + .bw_limit = 0x00000F1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe680_ubwc_regs_client_4 = { + .meta_addr = 0x00001240, + .meta_cfg = 0x00001244, + .mode_cfg = 0x00001248, + .stats_ctrl = 0x0000124C, + .ctrl_2 = 0x00001250, + .lossy_thresh0 = 0x00001254, + .lossy_thresh1 = 0x00001258, + .off_lossy_var = 0x0000125C, + .bw_limit = 0x0000121C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe680_ubwc_regs_client_5 = { + .meta_addr = 0x00001340, + .meta_cfg = 0x00001344, + .mode_cfg = 0x00001348, + .stats_ctrl = 0x0000134C, + .ctrl_2 = 0x00001350, + .lossy_thresh0 = 0x00001354, + .lossy_thresh1 = 0x00001358, + .off_lossy_var = 0x0000135C, + .bw_limit = 0x0000131C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t vfe680_out_port_mid[][4] = { + {18, 0, 0, 0}, + {19, 0, 0, 0}, + {20, 0, 0, 0}, + {8, 9, 10, 11}, + {32, 0, 0, 0}, + {33, 0, 0, 0}, + {16, 17, 0, 0}, + {36, 37, 38, 0}, + {4, 0, 0, 0}, + {41, 0, 0, 0}, + {44, 0, 0, 0}, + {42, 0, 0, 0}, + {40, 0, 0, 0}, + {46, 0, 0, 0}, + {47, 0, 0, 0}, + {12, 13, 14, 15}, + {34, 0, 0, 0}, + {35, 0, 0, 0}, + {5, 6, 0, 0}, + {48, 0, 0, 0}, + {43, 0, 0, 0}, + {7, 0, 0, 0}, + {39, 0, 0, 0}, + {49, 50, 0, 0}, + {45, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000C00, + .cgc_ovd = 0x00000C08, + .if_frameheader_cfg = { + 0x00000C34, + 0x00000C38, + 0x00000C3C, + 0x00000C40, + 0x00000C44, + }, + .ubwc_static_ctrl = 0x00000C58, + .pwr_iso_cfg = 0x00000C5C, + .overflow_status_clear = 0x00000C60, + .ccif_violation_status = 0x00000C64, + .overflow_status = 0x00000C68, + .image_size_violation_status = 0x00000C70, + .debug_status_top_cfg = 0x00000CD4, + .debug_status_top = 0x00000CD8, + .test_bus_ctrl = 0x00000CDC, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe680_bus_irq_reg, + .global_irq_cmd_offset = 0x00000C30, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = CAM_VFE_BUS_VER3_680_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL Y */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_status_0 = 0x00000E70, + .addr_status_1 = 0x00000E74, + .addr_status_2 = 0x00000E78, + .addr_status_3 = 0x00000E7C, + .debug_status_cfg = 0x00000E80, + .debug_status_0 = 0x00000E84, + .debug_status_1 = 0x00000E88, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe680_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 1 FULL C */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_status_0 = 0x00000F70, + .addr_status_1 = 0x00000F74, + .addr_status_2 = 0x00000F78, + .addr_status_3 = 0x00000F7C, + .debug_status_cfg = 0x00000F80, + .debug_status_0 = 0x00000F84, + .debug_status_1 = 0x00000F88, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe680_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 2 DS4 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_status_0 = 0x00001070, + .addr_status_1 = 0x00001074, + .addr_status_2 = 0x00001078, + .addr_status_3 = 0x0000107C, + .debug_status_cfg = 0x00001080, + .debug_status_0 = 0x00001084, + .debug_status_1 = 0x00001088, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 3 DS16 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_status_0 = 0x00001170, + .addr_status_1 = 0x00001174, + .addr_status_2 = 0x00001178, + .addr_status_3 = 0x0000117C, + .debug_status_cfg = 0x00001180, + .debug_status_0 = 0x00001184, + .debug_status_1 = 0x00001188, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 4 DISP Y */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_status_0 = 0x00001270, + .addr_status_1 = 0x00001274, + .addr_status_2 = 0x00001278, + .addr_status_3 = 0x0000127C, + .debug_status_cfg = 0x00001280, + .debug_status_0 = 0x00001284, + .debug_status_1 = 0x00001288, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe680_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 5 DISP C */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .system_cache_cfg = 0x00001368, + .addr_status_0 = 0x00001370, + .addr_status_1 = 0x00001374, + .addr_status_2 = 0x00001378, + .addr_status_3 = 0x0000137C, + .debug_status_cfg = 0x00001380, + .debug_status_0 = 0x00001384, + .debug_status_1 = 0x00001388, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe680_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 6 DISP DS4 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_status_0 = 0x00001470, + .addr_status_1 = 0x00001474, + .addr_status_2 = 0x00001478, + .addr_status_3 = 0x0000147C, + .debug_status_cfg = 0x00001480, + .debug_status_0 = 0x00001484, + .debug_status_1 = 0x00001488, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 7 DISP DS16 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_status_0 = 0x00001570, + .addr_status_1 = 0x00001574, + .addr_status_2 = 0x00001578, + .addr_status_3 = 0x0000157C, + .debug_status_cfg = 0x00001580, + .debug_status_0 = 0x00001584, + .debug_status_1 = 0x00001588, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 8 FD Y */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_status_0 = 0x00001670, + .addr_status_1 = 0x00001674, + .addr_status_2 = 0x00001678, + .addr_status_3 = 0x0000167C, + .debug_status_cfg = 0x00001680, + .debug_status_0 = 0x00001684, + .debug_status_1 = 0x00001688, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 9 FD C */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_status_0 = 0x00001770, + .addr_status_1 = 0x00001774, + .addr_status_2 = 0x00001778, + .addr_status_3 = 0x0000177C, + .debug_status_cfg = 0x00001780, + .debug_status_0 = 0x00001784, + .debug_status_1 = 0x00001788, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 10 PIXEL RAW */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_status_0 = 0x00001870, + .addr_status_1 = 0x00001874, + .addr_status_2 = 0x00001878, + .addr_status_3 = 0x0000187C, + .debug_status_cfg = 0x00001880, + .debug_status_0 = 0x00001884, + .debug_status_1 = 0x00001888, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 11 STATS BE 0 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_status_0 = 0x00001970, + .addr_status_1 = 0x00001974, + .addr_status_2 = 0x00001978, + .addr_status_3 = 0x0000197C, + .debug_status_cfg = 0x00001980, + .debug_status_0 = 0x00001984, + .debug_status_1 = 0x00001988, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 12 STATS BHIST 0 */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_status_0 = 0x00001A70, + .addr_status_1 = 0x00001A74, + .addr_status_2 = 0x00001A78, + .addr_status_3 = 0x00001A7C, + .debug_status_cfg = 0x00001A80, + .debug_status_0 = 0x00001A84, + .debug_status_1 = 0x00001A88, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 13 STATS TINTLESS BG */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_status_0 = 0x00001B70, + .addr_status_1 = 0x00001B74, + .addr_status_2 = 0x00001B78, + .addr_status_3 = 0x00001B7C, + .debug_status_cfg = 0x00001B80, + .debug_status_0 = 0x00001B84, + .debug_status_1 = 0x00001B88, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 14 STATS AWB BG */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_status_0 = 0x00001C70, + .addr_status_1 = 0x00001C74, + .addr_status_2 = 0x00001C78, + .addr_status_3 = 0x00001C7C, + .debug_status_cfg = 0x00001C80, + .debug_status_0 = 0x00001C84, + .debug_status_1 = 0x00001C88, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 15 STATS AWB BFW */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_status_0 = 0x00001D70, + .addr_status_1 = 0x00001D74, + .addr_status_2 = 0x00001D78, + .addr_status_3 = 0x00001D7C, + .debug_status_cfg = 0x00001D80, + .debug_status_0 = 0x00001D84, + .debug_status_1 = 0x00001D88, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 STATS BAF */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_status_0 = 0x00001E70, + .addr_status_1 = 0x00001E74, + .addr_status_2 = 0x00001E78, + .addr_status_3 = 0x00001E7C, + .debug_status_cfg = 0x00001E80, + .debug_status_0 = 0x00001E84, + .debug_status_1 = 0x00001E88, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 17 STATS BHIST */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_status_0 = 0x00001F70, + .addr_status_1 = 0x00001F74, + .addr_status_2 = 0x00001F78, + .addr_status_3 = 0x00001F7C, + .debug_status_cfg = 0x00001F80, + .debug_status_0 = 0x00001F84, + .debug_status_1 = 0x00001F88, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 18 STATS RS */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_status_0 = 0x00002070, + .addr_status_1 = 0x00002074, + .addr_status_2 = 0x00002078, + .addr_status_3 = 0x0000207C, + .debug_status_cfg = 0x00002080, + .debug_status_0 = 0x00002084, + .debug_status_1 = 0x00002088, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 19 STATS IHIST */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_status_0 = 0x00002170, + .addr_status_1 = 0x00002174, + .addr_status_2 = 0x00002178, + .addr_status_3 = 0x0000217C, + .debug_status_cfg = 0x00002180, + .debug_status_0 = 0x00002184, + .debug_status_1 = 0x00002188, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_10, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 20 SPARSE PD */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_status_0 = 0x00002270, + .addr_status_1 = 0x00002274, + .addr_status_2 = 0x00002278, + .addr_status_3 = 0x0000227C, + .debug_status_cfg = 0x00002280, + .debug_status_0 = 0x00002284, + .debug_status_1 = 0x00002288, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_12, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN8_10), + }, + /* BUS Client 21 PDAF V2.0 PD DATA - 2PD */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_status_0 = 0x00002370, + .addr_status_1 = 0x00002374, + .addr_status_2 = 0x00002378, + .addr_status_3 = 0x0000237C, + .debug_status_cfg = 0x00002380, + .debug_status_0 = 0x00002384, + .debug_status_1 = 0x00002388, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_13, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 22 PDAF V2.0 SAD STATS */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_status_0 = 0x00002470, + .addr_status_1 = 0x00002474, + .addr_status_2 = 0x00002478, + .addr_status_3 = 0x0000247C, + .debug_status_cfg = 0x00002480, + .debug_status_0 = 0x00002484, + .debug_status_1 = 0x00002488, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_13, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 23 LCR */ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .mmu_prefetch_cfg = 0x00002560, + .mmu_prefetch_max_offset = 0x00002564, + .system_cache_cfg = 0x00002568, + .addr_status_0 = 0x00002570, + .addr_status_1 = 0x00002574, + .addr_status_2 = 0x00002578, + .addr_status_3 = 0x0000257C, + .debug_status_cfg = 0x00002580, + .debug_status_0 = 0x00002584, + .debug_status_1 = 0x00002588, + .bw_limiter_addr = 0x0000251C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 24 RDI0 */ + { + .cfg = 0x00002600, + .image_addr = 0x00002604, + .frame_incr = 0x00002608, + .image_cfg_0 = 0x0000260C, + .image_cfg_1 = 0x00002610, + .image_cfg_2 = 0x00002614, + .packer_cfg = 0x00002618, + .frame_header_addr = 0x00002620, + .frame_header_incr = 0x00002624, + .frame_header_cfg = 0x00002628, + .irq_subsample_period = 0x00002630, + .irq_subsample_pattern = 0x00002634, + .framedrop_period = 0x00002638, + .framedrop_pattern = 0x0000263C, + .mmu_prefetch_cfg = 0x00002660, + .mmu_prefetch_max_offset = 0x00002664, + .system_cache_cfg = 0x00002668, + .addr_status_0 = 0x00002670, + .addr_status_1 = 0x00002674, + .addr_status_2 = 0x00002678, + .addr_status_3 = 0x0000267C, + .debug_status_cfg = 0x00002680, + .debug_status_0 = 0x00002684, + .debug_status_1 = 0x00002688, + .bw_limiter_addr = 0x0000261C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_14, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 25 RDI1 */ + { + .cfg = 0x00002700, + .image_addr = 0x00002704, + .frame_incr = 0x00002708, + .image_cfg_0 = 0x0000270C, + .image_cfg_1 = 0x00002710, + .image_cfg_2 = 0x00002714, + .packer_cfg = 0x00002718, + .frame_header_addr = 0x00002720, + .frame_header_incr = 0x00002724, + .frame_header_cfg = 0x00002728, + .irq_subsample_period = 0x00002730, + .irq_subsample_pattern = 0x00002734, + .framedrop_period = 0x00002738, + .framedrop_pattern = 0x0000273C, + .mmu_prefetch_cfg = 0x00002760, + .mmu_prefetch_max_offset = 0x00002764, + .system_cache_cfg = 0x00002768, + .addr_status_0 = 0x00002770, + .addr_status_1 = 0x00002774, + .addr_status_2 = 0x00002778, + .addr_status_3 = 0x0000277C, + .debug_status_cfg = 0x00002780, + .debug_status_0 = 0x00002784, + .debug_status_1 = 0x00002788, + .bw_limiter_addr = 0x0000271C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_15, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 26 RDI2 */ + { + .cfg = 0x00002800, + .image_addr = 0x00002804, + .frame_incr = 0x00002808, + .image_cfg_0 = 0x0000280C, + .image_cfg_1 = 0x00002810, + .image_cfg_2 = 0x00002814, + .packer_cfg = 0x00002818, + .frame_header_addr = 0x00002820, + .frame_header_incr = 0x00002824, + .frame_header_cfg = 0x00002828, + .irq_subsample_period = 0x00002830, + .irq_subsample_pattern = 0x00002834, + .framedrop_period = 0x00002838, + .framedrop_pattern = 0x0000283C, + .mmu_prefetch_cfg = 0x00002860, + .mmu_prefetch_max_offset = 0x00002864, + .system_cache_cfg = 0x00002868, + .addr_status_0 = 0x00002870, + .addr_status_1 = 0x00002874, + .addr_status_2 = 0x00002878, + .addr_status_3 = 0x0000287C, + .debug_status_cfg = 0x00002880, + .debug_status_0 = 0x00002884, + .debug_status_1 = 0x00002888, + .bw_limiter_addr = 0x0000281C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_16, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 27 LTM STATS */ + { + .cfg = 0x00002900, + .image_addr = 0x00002904, + .frame_incr = 0x00002908, + .image_cfg_0 = 0x0000290C, + .image_cfg_1 = 0x00002910, + .image_cfg_2 = 0x00002914, + .packer_cfg = 0x00002918, + .frame_header_addr = 0x00002920, + .frame_header_incr = 0x00002924, + .frame_header_cfg = 0x00002928, + .irq_subsample_period = 0x00002930, + .irq_subsample_pattern = 0x00002934, + .framedrop_period = 0x00002938, + .framedrop_pattern = 0x0000293C, + .mmu_prefetch_cfg = 0x00002960, + .mmu_prefetch_max_offset = 0x00002964, + .system_cache_cfg = 0x00002968, + .addr_status_0 = 0x00002970, + .addr_status_1 = 0x00002974, + .addr_status_2 = 0x00002978, + .addr_status_3 = 0x0000297C, + .debug_status_cfg = 0x00002980, + .debug_status_0 = 0x00002984, + .debug_status_1 = 0x00002988, + .bw_limiter_addr = 0x0000291C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + }, + .num_out = 25, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = vfe680_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = vfe680_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = vfe680_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 26, + }, + .name = { + "RDI_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4096, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[3], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 0, + 1, + }, + .name = { + "FULL_Y", + "FULL_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 2, + }, + .name = { + "DS_4", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[5], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 3, + }, + .name = { + "DS_16", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[6], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 10, + }, + .name = { + "PIXEL_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[7], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 8, + 9, + }, + .name = { + "FD_Y", + "FD_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_SPARSE_PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[8], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 20, + }, + .name = { + "PDAF", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[9], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 13, + }, + .name = { + "STATS_TL_BG", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + + .mid = vfe680_out_port_mid[10], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 16, + }, + .name = { + "STATS_BF", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[11], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 14, + }, + .name = { + "STATS_AWB_BGB", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[12], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 12, + }, + .name = { + "STATS_BHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[13], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 18, + }, + .name = { + "STATS_RS", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[14], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 19, + }, + .name = { + "STATS_IHIST", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP, + .max_width = 4096, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[15], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 4, + 5, + }, + .name = { + "FULL_DISP_Y", + "FULL_DISP_C", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[16], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 6, + }, + .name = { + "DISP_DS_4", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[17], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 7, + }, + .name = { + "DISP_DS_16", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe680_out_port_mid[18], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "2PD", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LCR, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[19], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "LCR", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 15, + }, + .name = { + "AWB_BFW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe680_out_port_mid[21], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 22, + }, + .name = { + "2PD_STATS", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[22], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 11, + }, + .name = { + "AEC_BE", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[23], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 27, + }, + .name = { + "LTM", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe680_out_port_mid[24], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 17, + }, + .name = { + "GTM_BHIST", + }, + }, + }, + .num_cons_err = 29, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input not supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input not supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input not supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input not supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format not supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 format not supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format not supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format not supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack MIPI 20 format not supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 14 format not supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 12 format not supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 10 format not supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack 128 BPP format not supported" + }, + { + .bitmask = BIT(13), + .error_description = "UBWC NV12 format not supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC NV12 4R format not supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC TP10 format not supported" + }, + { + .bitmask = BIT(16), + .error_description = "Frame based Mode not supported" + }, + { + .bitmask = BIT(17), + .error_description = "Index based Mode not supported" + }, + { + .bitmask = BIT(18), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(19), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(20), + .error_description = "FIFO frmheader addr unalign" + }, + { + .bitmask = BIT(21), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(22), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(23), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(25), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(28), + .error_description = "Meta Stride unalign" + }, + }, + .num_comp_grp = 17, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), + BIT(4), BIT(5), BIT(6), BIT(7), + BIT(8), BIT(9), BIT(10), BIT(11), + BIT(12), BIT(13), BIT(14), BIT(15), BIT(16), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 33, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, +}; + +static struct cam_vfe_irq_hw_info vfe680_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_EXT_CSID, + .top_irq_reg = &vfe680_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe680_hw_info = { + .irq_hw_info = &vfe680_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe680_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe680_top_hw_info, +}; + +#endif /* _CAM_VFE680_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680_110.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680_110.h new file mode 100644 index 0000000000..6eb52ff691 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680_110.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE680_110_H_ +#define _CAM_VFE680_110_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_680_110_NUM_DBG_REG 17 + +static uint32_t vfe680_110_top_debug_reg[] = { + 0x000000A0, + 0x000000A4, + 0x000000A8, + 0x000000AC, + 0x000000B0, + 0x000000B4, + 0x000000B8, + 0x000000BC, + 0x000000C0, + 0x000000C4, + 0x000000C8, + 0x000000CC, + 0x000000D0, + 0x000000D4, + 0x000000D8, + 0x000000DC, + 0x000000E0, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe680_110_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .core_cfg_0 = 0x00000024, + .core_cfg_1 = 0x00000028, + .core_cfg_2 = 0x0000002C, + .global_reset_cmd = 0x00000030, + .diag_config = 0x00000050, + .diag_sensor_status = {0x00000054, 0x00000058}, + .diag_frm_cnt_status = {0x0000005C, 0x00000060}, + .ipp_violation_status = 0x00000064, + .pdaf_violation_status = 0x00000404, + .core_cfg_3 = 0x00000068, + .core_cgc_ovd_0 = 0x00000018, + .core_cgc_ovd_1 = 0x0000001C, + .ahb_cgc_ovd = 0x00000020, + .dsp_status = 0x0000006C, + .stats_throttle_cfg_0 = 0x00000070, + .stats_throttle_cfg_1 = 0x00000074, + .stats_throttle_cfg_2 = 0x00000078, + .core_cfg_4 = 0x00000080, + .core_cfg_5 = 0x00000084, + .core_cfg_6 = 0x00000088, + .period_cfg = 0x0000008C, + .irq_sub_pattern_cfg = 0x00000090, + .epoch0_pattern_cfg = 0x00000094, + .epoch1_pattern_cfg = 0x00000098, + .epoch_height_cfg = 0x0000009C, + .bus_violation_status = 0x00000C64, + .bus_overflow_status = 0x00000C68, + .top_debug_cfg = 0x000000FC, + .num_top_debug_reg = CAM_VFE_680_110_NUM_DBG_REG, + .top_debug = vfe680_110_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +struct cam_vfe_ver4_path_hw_info + vfe680_110_rdi_hw_info_arr[] = { + { + .common_reg = &vfe680_110_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &vfe680_110_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &vfe680_110_top_common_reg, + .reg_data = &vfe680_vfe_full_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe680_110_top_hw_info = { + .common_reg = &vfe680_110_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &vfe680_110_top_common_reg, + .reg_data = &vfe_pp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe680_110_top_common_reg, + .reg_data = &vfe680_pdlib_reg_data, + }, + .rdi_hw_info = vfe680_110_rdi_hw_info_arr, + .wr_client_desc = vfe680_wr_client_desc, + .ipp_module_desc = vfe680_ipp_mod_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .num_path_port_map = 2, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD} + }, + .num_rdi = ARRAY_SIZE(vfe680_110_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(vfe680_top_irq_err_desc), + .top_err_desc = vfe680_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(vfe680_pdaf_violation_desc), + .pdaf_violation_desc = vfe680_pdaf_violation_desc, + .top_debug_reg_info = &vfe680_dbg_reg_info, + .diag_sensor_info = vfe680_diag_sensor_field, + .diag_frame_info = vfe680_diag_frame_field, +}; + +static struct cam_vfe_hw_info cam_vfe680_110_hw_info = { + .irq_hw_info = &vfe680_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe680_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe680_110_top_hw_info, +}; + +#endif /* _CAM_VFE680_110_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe780.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe780.h new file mode 100644 index 0000000000..98f641d06f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe780.h @@ -0,0 +1,2593 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE780_H_ +#define _CAM_VFE780_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_BUS_VER3_780_MAX_CLIENTS 27 +#define CAM_VFE_780_NUM_DBG_REG 17 + +static struct cam_vfe_top_ver4_module_desc vfe780_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_CHANNEL_GAIN", + }, + { + .id = 2, + .desc = "CLC_BPC_PDPC", + }, + { + .id = 3, + .desc = "CLC_BINCORRECT", + }, + { + .id = 4, + .desc = "CLC_COMPDECOMP", + }, + { + .id = 5, + .desc = "CLC_LSC", + }, + { + .id = 6, + .desc = "CLC_WB_GAIN", + }, + { + .id = 7, + .desc = "CLC_GIC", + }, + { + .id = 8, + .desc = "CLC_BPC_ABF", + }, + { + .id = 9, + .desc = "CLC_BLS", + }, + { + .id = 10, + .desc = "CLC_BAYER_GTM", + }, + { + .id = 11, + .desc = "CLC_BAYER_LTM", + }, + { + .id = 12, + .desc = "CLC_LCAC", + }, + { + .id = 13, + .desc = "CLC_DEMOSAIC", + }, + { + .id = 14, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 15, + .desc = "CLC_GTM", + }, + { + .id = 16, + .desc = "CLC_GLUT", + }, + { + .id = 17, + .desc = "CLC_COLOR_TRANSFORM", + }, + { + .id = 18, + .desc = "CLC_UVG", + }, + { + .id = 19, + .desc = "CLC_PREPROCESSOR", + }, + { + .id = 20, + .desc = "CLC_CHROMA_UP", + }, + { + .id = 21, + .desc = "Reserved", + }, + { + .id = 22, + .desc = "Reserved", + }, + { + .id = 23, + .desc = "CLC_COMPDECOMP_HVX_TX", + }, + { + .id = 24, + .desc = "CLC_COMPDECOMP_HVX_RX", + }, + { + .id = 25, + .desc = "CLC_GTM_FD_OUT", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_PIXEL_RAW_OUT", + }, + { + .id = 27, + .desc = "CLC_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 28, + .desc = "CLC_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 29, + .desc = "CLC_CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 30, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 31, + .desc = "CLC_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 32, + .desc = "CLC_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 33, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 34, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 35, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 36, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 37, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 38, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 39, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 40, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 41, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 42, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 43, + .desc = "CLC_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 44, + .desc = "CLC_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 45, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 46, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 47, + .desc = "CLC_DSX_Y_VID_OUT", + }, + { + .id = 48, + .desc = "CLC_DSX_C_VID_OUT", + }, + { + .id = 49, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_Y_VID_OUT", + }, + { + .id = 50, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_C_VID_OUT", + }, + { + .id = 51, + .desc = "CLC_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 52, + .desc = "CLC_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 53, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 54, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 55, + .desc = "CLC_STATS_AEC_BE", + }, + { + .id = 56, + .desc = "CLC_STATS_AEC_BHIST", + }, + { + .id = 57, + .desc = "CLC_STATS_BHIST", + }, + { + .id = 58, + .desc = "CLC_STATS_TINTLESS_BG", + }, + { + .id = 59, + .desc = "CLC_STATS_AWB_BG", + }, + { + .id = 60, + .desc = "CLC_STATS_BFW", + }, + { + .id = 61, + .desc = "CLC_STATS_RS", + }, + { + .id = 62, + .desc = "CLC_STATS_IHIST", + }, + { + .id = 63, + .desc = "Reserved", + }, + { + .id = 64, + .desc = "CLC_PDPC_BPC_1D", + }, + { + .id = 65, + .desc = "CLC_PDPC_BPC_1D_POST_LSC", + }, + { + .id = 66, + .desc = "CLC_STATS_CAF", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe780_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "VIDEO_FULL_Y", + }, + { + .wm_id = 1, + .desc = "VIDEO_FULL_C", + }, + { + .wm_id = 2, + .desc = "VIDEO_DS_4:1", + }, + { + .wm_id = 3, + .desc = "VIDEO_DS_16:1", + }, + { + .wm_id = 4, + .desc = "DISPLAY_FULL_Y", + }, + { + .wm_id = 5, + .desc = "DISPLAY_FULL_C", + }, + { + .wm_id = 6, + .desc = "DISPLAY_DS_4:1", + }, + { + .wm_id = 7, + .desc = "DISPLAY_DS_16:1", + }, + { + .wm_id = 8, + .desc = "FD_Y", + }, + { + .wm_id = 9, + .desc = "FD_C", + }, + { + .wm_id = 10, + .desc = "PIXEL_RAW", + }, + { + .wm_id = 11, + .desc = "STATS_BE0", + }, + { + .wm_id = 12, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 13, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 14, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 15, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 16, + .desc = "STATS_CAF", + }, + { + .wm_id = 17, + .desc = "STATS_BHIST", + }, + { + .wm_id = 18, + .desc = "STATS_BAYER_RS", + }, + { + .wm_id = 19, + .desc = "STATS_IHIST", + }, + { + .wm_id = 20, + .desc = "PDAF_0_2PD", + }, + { + .wm_id = 21, + .desc = "PDAF_1_PREPROCESS_2PD", + }, + { + .wm_id = 22, + .desc = "PDAF_2_PARSED_DATA", + }, + { + .wm_id = 23, + .desc = "RDI0", + }, + { + .wm_id = 24, + .desc = "RDI1", + }, + { + .wm_id = 25, + .desc = "RDI2", + }, + { + .wm_id = 26, + .desc = "LTM_STATS", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc vfe780_top_irq_err_desc[] = { + { + .bitmask = BIT(4), + .err_name = "PP VIOLATION", + .desc = "", + }, + { + .bitmask = BIT(6), + .err_name = "PDAF VIOLATION", + .desc = "", + }, + { + .bitmask = BIT(7), + .err_name = "DYNAMIC PDAF SWITCH VIOLATION", + .desc = "PD exposure changes dynamically and the sensor gap is not large enough", + }, + { + .bitmask = BIT(8), + .err_name = "LCR PD INPUT TIMING PROTOCOL VIOLATION", + .desc = "Input timing protocol on the LCR and PD path is not met", + }, + { + .bitmask = BIT(12), + .err_name = "DSP IFE PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the output Data", + }, + { + .bitmask = BIT(13), + .err_name = "IFE DSP TX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the outgoing data to the DSP interface", + }, + { + .bitmask = BIT(14), + .err_name = "DSP IFE RX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the incoming data from DSP before processed", + }, + { + .bitmask = BIT(15), + .err_name = "DSP TX FIFO OVERFLOW", + .desc = "Overflow on DSP interface TX path FIFO", + }, + { + .bitmask = BIT(16), + .err_name = "DSP RX FIFO OVERFLOW", + .desc = "Overflow on DSP interface RX path FIFO", + }, + { + .bitmask = BIT(17), + .err_name = "DSP ERROR VIOLATION", + .desc = "When DSP sends a error signal", + }, + { + .bitmask = BIT(18), + .err_name = "DIAG VIOLATION", + .desc = "HBI is less than the minimum required HBI", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc vfe780_pdaf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output", + }, + { + .bitmask = BIT(2), + .desc = "Sim monitor 3 violation - parsed output", + }, + { + .bitmask = BIT(3), + .desc = "Constraint violation", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_lcr_res_info vfe780_pdaf_lcr_res_mask[] = { + { + .res_id = CAM_ISP_HW_VFE_IN_RDI0, + .val = 0, + }, + { + .res_id = CAM_ISP_HW_VFE_IN_RDI1, + .val = 1, + }, + { + .res_id = CAM_ISP_HW_VFE_IN_RDI2, + .val= 2, + }, +}; + +static struct cam_irq_register_set vfe780_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00000034, + .clear_reg_offset = 0x0000003C, + .status_reg_offset = 0x00000044, + .set_reg_offset = 0x0000004C, + .test_set_val = BIT(1), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00000038, + .clear_reg_offset = 0x00000040, + .status_reg_offset = 0x00000048, + }, +}; + +static struct cam_irq_controller_reg_info vfe780_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe780_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000030, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe780_top_debug_reg[] = { + 0x000000A0, + 0x000000A4, + 0x000000A8, + 0x000000AC, + 0x000000B0, + 0x000000B4, + 0x000000B8, + 0x000000BC, + 0x000000C0, + 0x000000C4, + 0x000000C8, + 0x000000CC, + 0x000000D0, + 0x000000D4, + 0x000000D8, + 0x000000DC, + 0x000000E0, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe780_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .core_cfg_0 = 0x00000024, + .core_cfg_1 = 0x00000028, + .core_cfg_2 = 0x0000002C, + .global_reset_cmd = 0x00000030, + .diag_config = 0x00000050, + .diag_sensor_status = {0x00000054, 0x00000058}, + .diag_frm_cnt_status = {0x0000005C, 0x00000060}, + .ipp_violation_status = 0x00000064, + .pdaf_violation_status = 0x00000404, + .core_cgc_ovd_0 = 0x00000018, + .core_cgc_ovd_1 = 0x0000001C, + .ahb_cgc_ovd = 0x00000020, + .dsp_status = 0x0000006C, + .stats_throttle_cfg_0 = 0x00000070, + .stats_throttle_cfg_1 = 0x00000074, + .stats_throttle_cfg_2 = 0x00000078, + .core_cfg_4 = 0x00000080, + .core_cfg_5 = 0x00000084, + .core_cfg_6 = 0x00000088, + .period_cfg = 0x0000008C, + .irq_sub_pattern_cfg = 0x00000090, + .epoch0_pattern_cfg = 0x00000094, + .epoch1_pattern_cfg = 0x00000098, + .epoch_height_cfg = 0x0000009C, + .bus_violation_status = 0x00000C64, + .bus_overflow_status = 0x00000C68, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000100, + .perf_pix_count = 0x00000104, + .perf_line_count = 0x00000108, + .perf_stall_count = 0x0000010C, + .perf_always_count = 0x00000110, + .perf_count_status = 0x00000114, + }, + { + .perf_count_cfg = 0x00000118, + .perf_pix_count = 0x0000011C, + .perf_line_count = 0x00000120, + .perf_stall_count = 0x00000124, + .perf_always_count = 0x00000128, + .perf_count_status = 0x0000012C, + }, + }, + .top_debug_cfg = 0x000000FC, + .num_top_debug_reg = CAM_VFE_780_NUM_DBG_REG, + .pdaf_input_cfg_0 = 0x00000130, + .pdaf_input_cfg_1 = 0x00000134, + .top_debug = vfe780_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe780_pp_common_reg_data = { + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x10000, + .epoch1_irq_mask = 0x20000, + .eof_irq_mask = 0x00000002, + .error_irq_mask = 0x7F1D0, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .ipp_violation_mask = 0x10, + .pdaf_violation_mask = 0x40, + .diag_violation_mask = 0x40000, + .diag_frm_count_mask_0 = 0x3f0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x10, +}; + +static struct cam_vfe_ver4_path_reg_data vfe780_vfe_full_rdi_reg_data[3] = { + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +static struct cam_vfe_ver4_path_reg_data vfe780_pdlib_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, +}; + +struct cam_vfe_ver4_path_hw_info + vfe780_rdi_hw_info_arr[] = { + { + .common_reg = &vfe780_top_common_reg, + .reg_data = &vfe780_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &vfe780_top_common_reg, + .reg_data = &vfe780_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &vfe780_top_common_reg, + .reg_data = &vfe780_vfe_full_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe780_dbg_reg_info[CAM_VFE_780_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_IHIST", + "STATS_RS", + "STATS_CAF", + "GTM_BHIST", + "TINTLESS_BG", + "STATS_BFW", + "STATS_BG", + "STATS_BHIST" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_BE", + "R2PD_DS16_C_VID", + "R2PD_DS16_Y_VID", + "crop_rnd_clamp_post_downscale_C_DS16_VID", + "4to1_C_DS16_VID", + "crop_rnd_clamp_post_downscale_Y_DS16_VID", + "4to1_Y_DS16_VID", + "crop_rnd_clamp_post_dsx_C_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_VID_C", + "R2PD_DS4_VID_Y", + "DSX_C", + "crop_rnd_clamp_post_dsx_Y_VID", + "DSX_Y", + "crop_rnd_clamp_post_downscale_mn_C_VID", + "downscale_mn_C_VID", + "crop_rnd_clamp_post_downscale_mn_Y_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "MNDS_Y_VID", + "R2PD_DS16_C_DISP", + "R2PD_DS16_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS16_DISP", + "4to1_C_DS16_DISP", + "crop_rnd_clamp_post_downscale_Y_DS16_DISP", + "4to1_Y_DS16_DISP", + "R2PD_DS4_C_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS4_DISP", + "4to1_C_DS4_DISP", + "crop_rnd_clamp_post_downscale_Y_DS4_DISP", + "4to1_Y_DS4_DISP", + "crop_rnd_clamp_post_downscale_mn_C_DISP", + "downscale_mn_C_DISP", + "crop_rnd_clamp_post_downscale_mn_Y_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "downscale_mn_Y_DISP", + "crop_rnd_clamp_post_downscale_mn_C_FD", + "downscale_mn_C_FD", + "crop_rnd_clamp_post_downscale_mn_Y_FD", + "downscale_mn_Y_FD", + "gtm_fd_out", + "uvg", + "color_xform" + ), + VFE_DBG_INFO_ARRAY_4bit( + "glut", + "gtm", + "color_correct", + "demosaic", + "hvx_tap2", + "lcac", + "bayer_ltm", + "bayer_gtm" + ), + VFE_DBG_INFO_ARRAY_4bit( + "bls", + "bpc_abf", + "gic", + "wb_gain", + "lsc", + "compdecomp_hxv_rx", + "compdecomp_hxv_tx", + "hvx_tap1" + ), + VFE_DBG_INFO_ARRAY_4bit( + "decompand", + "reserved", + "bincorrect", + "bpc_pdpc", + "channel_gain", + "bayer_argb_ccif_converter", + "crop_rnd_clamp_pre_argb_packer", + "chroma_up_uv" + ), + VFE_DBG_INFO_ARRAY_4bit( + "chroma_up_y", + "demux", + "hxv_tap0", + "preprocess", + "reserved", + "reserved", + "bayer_ltm_bus_wr", + "RDI2" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI1", + "RDI0", + "reserved", + "pdaf_2_bus_wr", + "pdaf_1_bus_wr", + "pdaf_1_bus_wr", + "ihist_bus_wr", + "flicker_rs_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "gtm_bhist_bus_wr", + "caf_bus_wr", + "bfw_bus_wr", + "bg_bus_wr", + "tintless_bg_bus_wr", + "bhist_bus_wr", + "be_bus_wr", + "pixel_raw_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "fd_c_bus_wr", + "fd_y_bus_wr", + "disp_ds16_bus_wr", + "disp_ds4_bus_wr", + "disp_c_bus_wr", + "disp_y_bus_wr", + "vid_ds16_bus_Wr", + "vid_ds4_bus_Wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "vid_c_bus_wr", + "vid_y_bus_wr", + "CLC_PDAF", + "PIX_PP", + "reserved", + "reserved", + "reserved", + "reserved" + ), + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved" + ), +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe780_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PIXEL_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_PDAF_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_DSP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe780_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe780_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe780_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe780_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe780_diag_reg_info[4], + }, + { + .num_fields = 2, + .field = &vfe780_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe780_top_hw_info = { + .common_reg = &vfe780_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &vfe780_top_common_reg, + .reg_data = &vfe780_pp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe780_top_common_reg, + .reg_data = &vfe780_pdlib_reg_data, + }, + .rdi_hw_info = vfe780_rdi_hw_info_arr, + .wr_client_desc = vfe780_wr_client_desc, + .ipp_module_desc = vfe780_ipp_mod_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .num_path_port_map = 3, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA}, + }, + .num_rdi = ARRAY_SIZE(vfe780_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(vfe780_top_irq_err_desc), + .top_err_desc = vfe780_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(vfe780_pdaf_violation_desc), + .pdaf_violation_desc = vfe780_pdaf_violation_desc, + .top_debug_reg_info = &vfe780_dbg_reg_info, + .pdaf_lcr_res_mask = vfe780_pdaf_lcr_res_mask, + .num_pdaf_lcr_res = ARRAY_SIZE(vfe780_pdaf_lcr_res_mask), + .diag_sensor_info = vfe780_diag_sensor_field, + .diag_frame_info = vfe780_diag_frame_field, +}; + +static struct cam_irq_register_set vfe780_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00000C18, + .clear_reg_offset = 0x00000C20, + .status_reg_offset = 0x00000C28, + .set_reg_offset = 0x00000C50, + }, + { + .mask_reg_offset = 0x00000C1C, + .clear_reg_offset = 0x00000C24, + .status_reg_offset = 0x00000C2C, + .set_reg_offset = 0x00000C54, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe780_ubwc_regs_client_0 = { + .meta_addr = 0x00000E40, + .meta_cfg = 0x00000E44, + .mode_cfg = 0x00000E48, + .stats_ctrl = 0x00000E4C, + .ctrl_2 = 0x00000E50, + .lossy_thresh0 = 0x00000E54, + .lossy_thresh1 = 0x00000E58, + .off_lossy_var = 0x00000E5C, + .bw_limit = 0x00000E1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe780_ubwc_regs_client_1 = { + .meta_addr = 0x00000F40, + .meta_cfg = 0x00000F44, + .mode_cfg = 0x00000F48, + .stats_ctrl = 0x00000F4C, + .ctrl_2 = 0x00000F50, + .lossy_thresh0 = 0x00000F54, + .lossy_thresh1 = 0x00000F58, + .off_lossy_var = 0x00000F5C, + .bw_limit = 0x00000F1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe780_ubwc_regs_client_4 = { + .meta_addr = 0x00001240, + .meta_cfg = 0x00001244, + .mode_cfg = 0x00001248, + .stats_ctrl = 0x0000124C, + .ctrl_2 = 0x00001250, + .lossy_thresh0 = 0x00001254, + .lossy_thresh1 = 0x00001258, + .off_lossy_var = 0x0000125C, + .bw_limit = 0x0000121C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe780_ubwc_regs_client_5 = { + .meta_addr = 0x00001340, + .meta_cfg = 0x00001344, + .mode_cfg = 0x00001348, + .stats_ctrl = 0x0000134C, + .ctrl_2 = 0x00001350, + .lossy_thresh0 = 0x00001354, + .lossy_thresh1 = 0x00001358, + .off_lossy_var = 0x0000135C, + .bw_limit = 0x0000131C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t vfe780_out_port_mid[][4] = { + {34, 0, 0, 0}, + {35, 0, 0, 0}, + {36, 0, 0, 0}, + {16, 17, 18, 19}, + {20, 0, 0, 0}, + {21, 0, 0, 0}, + {32, 33, 0, 0}, + {28, 29, 30}, + {8, 0, 0, 0}, + {18, 0, 0, 0}, + {21, 0, 0, 0}, + {19, 0, 0, 0}, + {17, 0, 0, 0}, + {23, 0, 0, 0}, + {24, 0, 0, 0}, + {22, 23, 24, 25}, + {26, 0, 0, 0}, + {27, 0, 0, 0}, + {9, 0, 0, 0}, + {20, 0, 0, 0}, + {10, 0, 0, 0}, + {16, 0, 0, 0}, + {25, 26, 0, 0}, + {22, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_err_irq_desc vfe780_bus_irq_err_desc_0[] = { + { + .bitmask = BIT(26), + .err_name = "IPCC_FENCE_DATA_ERR", + .desc = "IPCC or FENCE Data was not available in the Input Fifo", + }, + { + .bitmask = BIT(27), + .err_name = "IPCC_FENCE_ADDR_ERR", + .desc = "IPCC or FENCE address fifo was empty and read was attempted", + }, + { + .bitmask = BIT(28), + .err_name = "CONS_VIOLATION", + .desc = "Programming of software registers violated the constraints", + }, + { + .bitmask = BIT(30), + .err_name = "VIOLATION", + .desc = "Client has a violation in ccif protocol at input", + }, + { + .bitmask = BIT(31), + .err_name = "IMAGE_SIZE_VIOLATION", + .desc = "Programmed image size is not same as image size from the CCIF", + }, +}; + +static struct cam_vfe_bus_ver3_err_irq_desc vfe780_bus_irq_err_desc_1[] = { + { + .bitmask = BIT(28), + .err_name = "EARLY_DONE", + .desc = "Buf done for each client. Early done irq for clients STATS_BAF", + }, + { + .bitmask = BIT(29), + .err_name = "EARLY_DONE", + .desc = "Buf done for each client. Early done irq for clients STATS_BAF", + }, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe780_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000C00, + .cgc_ovd = 0x00000C08, + .if_frameheader_cfg = { + 0x00000C34, + 0x00000C38, + 0x00000C3C, + 0x00000C40, + 0x00000C44, + }, + .ubwc_static_ctrl = 0x00000C58, + .pwr_iso_cfg = 0x00000C5C, + .overflow_status_clear = 0x00000C60, + .ccif_violation_status = 0x00000C64, + .overflow_status = 0x00000C68, + .image_size_violation_status = 0x00000C70, + .debug_status_top_cfg = 0x00000CD4, + .debug_status_top = 0x00000CD8, + .test_bus_ctrl = 0x00000CDC, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe780_bus_irq_reg, + .global_irq_cmd_offset = 0x00000C30, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = CAM_VFE_BUS_VER3_780_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL Y */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000E74, + .addr_status_1 = 0x00000E78, + .addr_status_2 = 0x00000E7C, + .addr_status_3 = 0x00000E80, + .debug_status_cfg = 0x00000E84, + .debug_status_0 = 0x00000E88, + .debug_status_1 = 0x00000E8C, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe780_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 1 FULL C */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000F74, + .addr_status_1 = 0x00000F78, + .addr_status_2 = 0x00000F7C, + .addr_status_3 = 0x00000F80, + .debug_status_cfg = 0x00000F84, + .debug_status_0 = 0x00000F88, + .debug_status_1 = 0x00000F8C, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe780_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 2 DS4 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .addr_status_0 = 0x00001074, + .addr_status_1 = 0x00001078, + .addr_status_2 = 0x0000107C, + .addr_status_3 = 0x00001080, + .debug_status_cfg = 0x00001084, + .debug_status_0 = 0x00001088, + .debug_status_1 = 0x0000108C, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 3 DS16 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .addr_status_0 = 0x00001174, + .addr_status_1 = 0x00001178, + .addr_status_2 = 0x0000117C, + .addr_status_3 = 0x00001180, + .debug_status_cfg = 0x00001184, + .debug_status_0 = 0x00001188, + .debug_status_1 = 0x0000118C, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 4 DISP Y */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .addr_status_0 = 0x00001274, + .addr_status_1 = 0x00001278, + .addr_status_2 = 0x0000127C, + .addr_status_3 = 0x00001280, + .debug_status_cfg = 0x00001284, + .debug_status_0 = 0x00001288, + .debug_status_1 = 0x0000128C, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe780_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 5 DISP C */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .addr_status_0 = 0x00001374, + .addr_status_1 = 0x00001378, + .addr_status_2 = 0x0000137C, + .addr_status_3 = 0x00001380, + .debug_status_cfg = 0x00001384, + .debug_status_0 = 0x00001388, + .debug_status_1 = 0x0000138C, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe780_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 6 DISP DS4 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001474, + .addr_status_1 = 0x00001478, + .addr_status_2 = 0x0000147C, + .addr_status_3 = 0x00001480, + .debug_status_cfg = 0x00001484, + .debug_status_0 = 0x00001488, + .debug_status_1 = 0x0000148C, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 7 DISP DS16 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001574, + .addr_status_1 = 0x00001578, + .addr_status_2 = 0x0000157C, + .addr_status_3 = 0x00001580, + .debug_status_cfg = 0x00001584, + .debug_status_0 = 0x00001588, + .debug_status_1 = 0x0000158C, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 8 FD Y */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001674, + .addr_status_1 = 0x00001678, + .addr_status_2 = 0x0000167C, + .addr_status_3 = 0x00001680, + .debug_status_cfg = 0x00001684, + .debug_status_0 = 0x00001688, + .debug_status_1 = 0x0000168C, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 9 FD C */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001774, + .addr_status_1 = 0x00001778, + .addr_status_2 = 0x0000177C, + .addr_status_3 = 0x00001780, + .debug_status_cfg = 0x00001784, + .debug_status_0 = 0x00001788, + .debug_status_1 = 0x0000178C, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 10 PIXEL RAW */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001874, + .addr_status_1 = 0x00001878, + .addr_status_2 = 0x0000187C, + .addr_status_3 = 0x00001880, + .debug_status_cfg = 0x00001884, + .debug_status_0 = 0x00001888, + .debug_status_1 = 0x0000188C, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 11 STATS BE 0 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001974, + .addr_status_1 = 0x00001978, + .addr_status_2 = 0x0000197C, + .addr_status_3 = 0x00001980, + .debug_status_cfg = 0x00001984, + .debug_status_0 = 0x00001988, + .debug_status_1 = 0x0000198C, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 12 STATS BHIST 0 */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .addr_status_0 = 0x00001A74, + .addr_status_1 = 0x00001A78, + .addr_status_2 = 0x00001A7C, + .addr_status_3 = 0x00001A80, + .debug_status_cfg = 0x00001A84, + .debug_status_0 = 0x00001A88, + .debug_status_1 = 0x00001A8C, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 13 STATS TINTLESS BG */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .addr_status_0 = 0x00001B74, + .addr_status_1 = 0x00001B78, + .addr_status_2 = 0x00001B7C, + .addr_status_3 = 0x00001B80, + .debug_status_cfg = 0x00001B84, + .debug_status_0 = 0x00001B88, + .debug_status_1 = 0x00001B8C, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 14 STATS AWB BG */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .addr_status_0 = 0x00001C74, + .addr_status_1 = 0x00001C78, + .addr_status_2 = 0x00001C7C, + .addr_status_3 = 0x00001C80, + .debug_status_cfg = 0x00001C84, + .debug_status_0 = 0x00001C88, + .debug_status_1 = 0x00001C8C, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 15 STATS AWB BFW */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_cfg = 0x00001D70, + .addr_status_0 = 0x00001D74, + .addr_status_1 = 0x00001D78, + .addr_status_2 = 0x00001D7C, + .addr_status_3 = 0x00001D80, + .debug_status_cfg = 0x00001D84, + .debug_status_0 = 0x00001D88, + .debug_status_1 = 0x00001D8C, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 STATS CAF */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_cfg = 0x00001E70, + .addr_status_0 = 0x00001E74, + .addr_status_1 = 0x00001E78, + .addr_status_2 = 0x00001E7C, + .addr_status_3 = 0x00001E80, + .debug_status_cfg = 0x00001E84, + .debug_status_0 = 0x00001E88, + .debug_status_1 = 0x00001E8C, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 17 STATS BHIST */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_cfg = 0x00001F70, + .addr_status_0 = 0x00001F74, + .addr_status_1 = 0x00001F78, + .addr_status_2 = 0x00001F7C, + .addr_status_3 = 0x00001F80, + .debug_status_cfg = 0x00001F84, + .debug_status_0 = 0x00001F88, + .debug_status_1 = 0x00001F8C, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 18 STATS BAYER RS */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_cfg = 0x00002070, + .addr_status_0 = 0x00002074, + .addr_status_1 = 0x00002078, + .addr_status_2 = 0x0000207C, + .addr_status_3 = 0x00002080, + .debug_status_cfg = 0x00002084, + .debug_status_0 = 0x00002088, + .debug_status_1 = 0x0000208C, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 19 STATS IHIST */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_cfg = 0x00002170, + .addr_status_0 = 0x00002174, + .addr_status_1 = 0x00002178, + .addr_status_2 = 0x0000217C, + .addr_status_3 = 0x00002180, + .debug_status_cfg = 0x00002184, + .debug_status_0 = 0x00002188, + .debug_status_1 = 0x0000218C, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_10, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 20 PDAF_0_2PD */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_cfg = 0x00002270, + .addr_status_0 = 0x00002274, + .addr_status_1 = 0x00002278, + .addr_status_2 = 0x0000227C, + .addr_status_3 = 0x00002280, + .debug_status_cfg = 0x00002284, + .debug_status_0 = 0x00002288, + .debug_status_1 = 0x0000228C, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 21 PDAF V2.0 PD DATA PDAF_1_PREPROCESS_2PD */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_cfg = 0x00002370, + .addr_status_0 = 0x00002374, + .addr_status_1 = 0x00002378, + .addr_status_2 = 0x0000237C, + .addr_status_3 = 0x00002380, + .debug_status_cfg = 0x00002384, + .debug_status_0 = 0x00002388, + .debug_status_1 = 0x0000238C, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 22 PDAF V2.0 PDAF_2_PARSED_DATA */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_cfg = 0x00002470, + .addr_status_0 = 0x00002474, + .addr_status_1 = 0x00002478, + .addr_status_2 = 0x0000247C, + .addr_status_3 = 0x00002480, + .debug_status_cfg = 0x00002484, + .debug_status_0 = 0x00002488, + .debug_status_1 = 0x0000248C, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 23 RDI0 */ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .line_done_cfg = 0x0000252C, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .mmu_prefetch_cfg = 0x00002560, + .mmu_prefetch_max_offset = 0x00002564, + .system_cache_cfg = 0x00002568, + .addr_cfg = 0x00002570, + .addr_status_0 = 0x00002574, + .addr_status_1 = 0x00002578, + .addr_status_2 = 0x0000257C, + .addr_status_3 = 0x00002580, + .debug_status_cfg = 0x00002584, + .debug_status_0 = 0x00002588, + .debug_status_1 = 0x0000258C, + .bw_limiter_addr = 0x0000251C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_12, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 24 RDI1 */ + { + .cfg = 0x00002600, + .image_addr = 0x00002604, + .frame_incr = 0x00002608, + .image_cfg_0 = 0x0000260C, + .image_cfg_1 = 0x00002610, + .image_cfg_2 = 0x00002614, + .packer_cfg = 0x00002618, + .frame_header_addr = 0x00002620, + .frame_header_incr = 0x00002624, + .frame_header_cfg = 0x00002628, + .line_done_cfg = 0x0000262C, + .irq_subsample_period = 0x00002630, + .irq_subsample_pattern = 0x00002634, + .framedrop_period = 0x00002638, + .framedrop_pattern = 0x0000263C, + .mmu_prefetch_cfg = 0x00002660, + .mmu_prefetch_max_offset = 0x00002664, + .system_cache_cfg = 0x00002668, + .addr_cfg = 0x00002670, + .addr_status_0 = 0x00002674, + .addr_status_1 = 0x00002678, + .addr_status_2 = 0x0000267C, + .addr_status_3 = 0x00002680, + .debug_status_cfg = 0x00002684, + .debug_status_0 = 0x00002688, + .debug_status_1 = 0x0000268C, + .bw_limiter_addr = 0x0000261C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_13, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 25 RDI2 */ + { + .cfg = 0x00002700, + .image_addr = 0x00002704, + .frame_incr = 0x00002708, + .image_cfg_0 = 0x0000270C, + .image_cfg_1 = 0x00002710, + .image_cfg_2 = 0x00002714, + .packer_cfg = 0x00002718, + .frame_header_addr = 0x00002720, + .frame_header_incr = 0x00002724, + .frame_header_cfg = 0x00002728, + .line_done_cfg = 0x0000272C, + .irq_subsample_period = 0x00002730, + .irq_subsample_pattern = 0x00002734, + .framedrop_period = 0x00002738, + .framedrop_pattern = 0x0000273C, + .mmu_prefetch_cfg = 0x00002760, + .mmu_prefetch_max_offset = 0x00002764, + .system_cache_cfg = 0x00002768, + .addr_cfg = 0x00002770, + .addr_status_0 = 0x00002774, + .addr_status_1 = 0x00002778, + .addr_status_2 = 0x0000277C, + .addr_status_3 = 0x00002780, + .debug_status_cfg = 0x00002784, + .debug_status_0 = 0x00002788, + .debug_status_1 = 0x0000278C, + .bw_limiter_addr = 0x0000271C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_14, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 26 LTM STATS */ + { + .cfg = 0x00002800, + .image_addr = 0x00002804, + .frame_incr = 0x00002808, + .image_cfg_0 = 0x0000280C, + .image_cfg_1 = 0x00002810, + .image_cfg_2 = 0x00002814, + .packer_cfg = 0x00002818, + .frame_header_addr = 0x00002820, + .frame_header_incr = 0x00002824, + .frame_header_cfg = 0x00002828, + .irq_subsample_period = 0x00002830, + .irq_subsample_pattern = 0x00002834, + .framedrop_period = 0x00002838, + .framedrop_pattern = 0x0000283C, + .mmu_prefetch_cfg = 0x00002860, + .mmu_prefetch_max_offset = 0x00002864, + .system_cache_cfg = 0x00002868, + .addr_cfg = 0x00002870, + .addr_status_0 = 0x00002874, + .addr_status_1 = 0x00002878, + .addr_status_2 = 0x0000287C, + .addr_status_3 = 0x00002880, + .debug_status_cfg = 0x00002884, + .debug_status_0 = 0x00002888, + .debug_status_1 = 0x0000288C, + .bw_limiter_addr = 0x0000281C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + }, + .num_out = 24, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = vfe780_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_0", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = vfe780_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_1", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = vfe780_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_2", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4928, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[3], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 0, + 1, + }, + .name = { + "FULL_Y", + "FULL_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1696, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 2, + }, + .name = { + "DS_4", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 424, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[5], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 3, + }, + .name = { + "DS_16", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + .max_width = 7296, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[6], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 10, + }, + .name = { + "PIXEL_RAW", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 2304, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[7], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 8, + 9, + }, + .name = { + "FD_Y", + "FD_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 14592, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe780_out_port_mid[8], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 20, + }, + .name = { + "PDAF_0_2PD", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[9], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 13, + }, + .name = { + "STATS_TL_BG", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[10], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 16, + }, + .name = { + "STATS_BF", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[11], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 14, + }, + .name = { + "STATS_AWB_BGB", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[12], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 12, + }, + .name = { + "STATS_BHIST", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[13], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 18, + }, + .name = { + "STATS_RS", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[14], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 19, + }, + .name = { + "STATS_IHIST", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP, + .max_width = 4928, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[15], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 4, + 5, + }, + .name = { + "FULL_DISP_Y", + "FULL_DISP_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP, + .max_width = 1232, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[16], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 6, + }, + .name = { + "DISP_DS_4", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP, + .max_width = 308, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[17], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 7, + }, + .name = { + "DISP_DS_16", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe780_out_port_mid[18], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "PDAF_1_PREPROCESS_2PD", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[19], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 15, + }, + .name = { + "AWB_BFW", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe780_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 22, + }, + .name = { + "PDAF_2_PARSED_DATA", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[21], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 11, + }, + .name = { + "AEC_BE", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[22], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 26, + }, + .name = { + "LTM", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe780_out_port_mid[23], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 17, + }, + .name = { + "GTM_BHIST", + }, + .pid_mask = 0x700000, + }, + }, + + .num_cons_err = 29, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input Not Supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input Not Supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input Not Supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input Not Supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format Not Supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format Not Supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format Not Supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format Not Supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack MIPI 20 format Not Supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 14 format Not Supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 12 format Not Supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 10 format Not Supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack 128 BPP format Not Supported" + }, + { + .bitmask = BIT(13), + .error_description = "UBWC NV12 format Not Supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC NV12 4R format Not Supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC TP10 format Not Supported" + }, + { + .bitmask = BIT(16), + .error_description = "Frame based Mode Not Supported" + }, + { + .bitmask = BIT(17), + .error_description = "Index based Mode Not Supported" + }, + { + .bitmask = BIT(18), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(19), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(20), + .error_description = "FIFO framehdr addr unalign" + }, + { + .bitmask = BIT(21), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(22), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(23), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(25), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(28), + .error_description = "Meta Stride unalign" + }, + }, + .num_bus_errors_0 = ARRAY_SIZE(vfe780_bus_irq_err_desc_0), + .num_bus_errors_1 = ARRAY_SIZE(vfe780_bus_irq_err_desc_1), + .bus_err_desc_0 = vfe780_bus_irq_err_desc_0, + .bus_err_desc_1 = vfe780_bus_irq_err_desc_1, + .num_comp_grp = 15, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + BIT(5), BIT(6), BIT(7), BIT(8), BIT(9), BIT(10), + BIT(13), BIT(14), BIT(15), BIT(16), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 36, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, +}; + +static struct cam_vfe_irq_hw_info vfe780_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_EXT_CSID, + .top_irq_reg = &vfe780_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe780_hw_info = { + .irq_hw_info = &vfe780_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe780_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe780_top_hw_info, +}; + +#endif /* _CAM_VFE780_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe880.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe880.h new file mode 100644 index 0000000000..fb724648ca --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe880.h @@ -0,0 +1,2738 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE880_H_ +#define _CAM_VFE880_H_ +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_BUS_VER3_880_MAX_CLIENTS 27 +#define CAM_VFE_880_NUM_DBG_REG 19 + +static struct cam_vfe_top_ver4_module_desc vfe880_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_DEMUX", + }, + { + .id = 1, + .desc = "CLC_CHANNEL_GAIN", + }, + { + .id = 2, + .desc = "CLC_BPC_PDPC", + }, + { + .id = 3, + .desc = "CLC_BINCORRECT", + }, + { + .id = 4, + .desc = "CLC_COMPDECOMP", + }, + { + .id = 5, + .desc = "CLC_LSC", + }, + { + .id = 6, + .desc = "CLC_WB_GAIN", + }, + { + .id = 7, + .desc = "CLC_GIC", + }, + { + .id = 8, + .desc = "CLC_BPC_ABF", + }, + { + .id = 9, + .desc = "CLC_BLS", + }, + { + .id = 10, + .desc = "CLC_BAYER_GTM", + }, + { + .id = 11, + .desc = "CLC_BAYER_LTM", + }, + { + .id = 12, + .desc = "CLC_LCAC", + }, + { + .id = 13, + .desc = "CLC_DEMOSAIC", + }, + { + .id = 14, + .desc = "CLC_COLOR_CORRECT", + }, + { + .id = 15, + .desc = "CLC_GTM", + }, + { + .id = 16, + .desc = "CLC_GLUT", + }, + { + .id = 17, + .desc = "CLC_COLOR_TRANSFORM", + }, + { + .id = 18, + .desc = "CLC_UVG", + }, + { + .id = 19, + .desc = "CLC_PREPROCESSOR", + }, + { + .id = 20, + .desc = "CLC_CHROMA_UP", + }, + { + .id = 21, + .desc = "Reserved", + }, + { + .id = 22, + .desc = "Reserved", + }, + { + .id = 23, + .desc = "CLC_COMPDECOMP_HVX_TX", + }, + { + .id = 24, + .desc = "CLC_COMPDECOMP_HVX_RX", + }, + { + .id = 25, + .desc = "CLC_GTM_FD_OUT", + }, + { + .id = 26, + .desc = "CLC_CROP_RND_CLAMP_PIXEL_RAW_OUT", + }, + { + .id = 27, + .desc = "CLC_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 28, + .desc = "CLC_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 29, + .desc = "CLC_CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT", + }, + { + .id = 30, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT", + }, + { + .id = 31, + .desc = "CLC_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 32, + .desc = "CLC_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 33, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT", + }, + { + .id = 34, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT", + }, + { + .id = 35, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 36, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 37, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT", + }, + { + .id = 38, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT", + }, + { + .id = 39, + .desc = "CLC_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 40, + .desc = "CLC_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 41, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT", + }, + { + .id = 42, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT", + }, + { + .id = 43, + .desc = "CLC_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 44, + .desc = "CLC_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 45, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT", + }, + { + .id = 46, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT", + }, + { + .id = 47, + .desc = "CLC_DSX_Y_VID_OUT", + }, + { + .id = 48, + .desc = "CLC_DSX_C_VID_OUT", + }, + { + .id = 49, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_Y_VID_OUT", + }, + { + .id = 50, + .desc = "CLC_CROP_RND_CLAMP_POST_DSX_C_VID_OUT", + }, + { + .id = 51, + .desc = "CLC_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 52, + .desc = "CLC_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 53, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT", + }, + { + .id = 54, + .desc = "CLC_CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT", + }, + { + .id = 55, + .desc = "CLC_STATS_AEC_BE", + }, + { + .id = 56, + .desc = "CLC_STATS_AEC_BHIST", + }, + { + .id = 57, + .desc = "CLC_STATS_BHIST", + }, + { + .id = 58, + .desc = "CLC_STATS_TINTLESS_BG", + }, + { + .id = 59, + .desc = "CLC_STATS_AWB_BG", + }, + { + .id = 60, + .desc = "CLC_STATS_BFW", + }, + { + .id = 61, + .desc = "CLC_STATS_RS", + }, + { + .id = 62, + .desc = "CLC_STATS_IHIST", + }, + { + .id = 63, + .desc = "Reserved", + }, + { + .id = 64, + .desc = "CLC_PDPC_BPC_1D", + }, + { + .id = 65, + .desc = "CLC_PDPC_BPC_1D_POST_LSC", + }, + { + .id = 66, + .desc = "CLC_STATS_CAF", + }, + { + .id = 67, + .desc = "CLC_FCG", + }, + { + .id = 68, + .desc = "CLC_PDPC_BPC_1D_ALSC_STATS", + }, + { + .id = 69, + .desc = "CLC_PDPC_BPC_1D_POST_FCG", + }, + { + .id = 70, + .desc = "CLC_STATS_ALSC_BG", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe880_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "VIDEO_FULL_Y", + }, + { + .wm_id = 1, + .desc = "VIDEO_FULL_C", + }, + { + .wm_id = 2, + .desc = "VIDEO_DS_4:1", + }, + { + .wm_id = 3, + .desc = "VIDEO_DS_16:1", + }, + { + .wm_id = 4, + .desc = "DISPLAY_FULL_Y", + }, + { + .wm_id = 5, + .desc = "DISPLAY_FULL_C", + }, + { + .wm_id = 6, + .desc = "DISPLAY_DS_4:1", + }, + { + .wm_id = 7, + .desc = "DISPLAY_DS_16:1", + }, + { + .wm_id = 8, + .desc = "FD_Y", + }, + { + .wm_id = 9, + .desc = "FD_C", + }, + { + .wm_id = 10, + .desc = "PIXEL_RAW", + }, + { + .wm_id = 11, + .desc = "STATS_BE0", + }, + { + .wm_id = 12, + .desc = "STATS_BHIST0", + }, + { + .wm_id = 13, + .desc = "STATS_TINTLESS_BG", + }, + { + .wm_id = 14, + .desc = "STATS_AWB_BG", + }, + { + .wm_id = 15, + .desc = "STATS_AWB_BFW", + }, + { + .wm_id = 16, + .desc = "STATS_CAF", + }, + { + .wm_id = 17, + .desc = "STATS_BHIST", + }, + { + .wm_id = 18, + .desc = "STATS_BAYER_RS", + }, + { + .wm_id = 19, + .desc = "STATS_IHIST", + }, + { + .wm_id = 20, + .desc = "PDAF_0_2PD", + }, + { + .wm_id = 21, + .desc = "PDAF_1_PREPROCESS_2PD", + }, + { + .wm_id = 22, + .desc = "PDAF_2_PARSED_DATA", + }, + { + .wm_id = 23, + .desc = "RDI0", + }, + { + .wm_id = 24, + .desc = "RDI1", + }, + { + .wm_id = 25, + .desc = "RDI2", + }, + { + .wm_id = 26, + .desc = "LTM_STATS", + }, + { + .wm_id = 27, + .desc = "STATS_ALSC", + }, +}; + +static struct cam_vfe_top_ver4_top_err_irq_desc vfe880_top_irq_err_desc[] = { + { + .bitmask = BIT(4), + .err_name = "PP VIOLATION", + .desc = "CLC CCIF violation", + }, + { + .bitmask = BIT(6), + .err_name = "PDAF VIOLATION", + .desc = "CLC PDAF violation", + }, + { + .bitmask = BIT(7), + .err_name = "DYNAMIC PDAF SWITCH VIOLATION", + .desc = "PD exposure select changes dynamically, the common vbi is insufficient", + }, + { + .bitmask = BIT(8), + .err_name = "LCR PD INPUT TIMING PROTOCOL VIOLATION", + .desc = + "Sensor/SW: Input protocol timing on the LCR and PD path is not met, protocol expects SOF of LCR data to come before PD SOF, and LCR payload should only come after PD SOF", + }, + { + .bitmask = BIT(12), + .err_name = "DSP IFE PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the output Data", + }, + { + .bitmask = BIT(13), + .err_name = "IFE DSP TX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the outgoing data to the DSP interface", + }, + { + .bitmask = BIT(14), + .err_name = "DSP IFE RX PROTOCOL VIOLATION", + .desc = "CCIF protocol violation on the incoming data from DSP before processed", + }, + { + .bitmask = BIT(15), + .err_name = "DSP TX FIFO OVERFLOW", + .desc = "Overflow on DSP interface TX path FIFO", + }, + { + .bitmask = BIT(16), + .err_name = "DSP RX FIFO OVERFLOW", + .desc = "Overflow on DSP interface RX path FIFO", + }, + { + .bitmask = BIT(17), + .err_name = "DSP ERROR VIOLATION", + .desc = "When DSP sends a error signal", + }, + { + .bitmask = BIT(18), + .err_name = "DIAG VIOLATION", + .desc = "Sensor: The HBI at IFE input is less than the spec", + .debug = "Check sensor config", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_violation_desc vfe880_pdaf_violation_desc[] = { + { + .bitmask = BIT(0), + .desc = "Sim monitor 1 violation - SAD output", + }, + { + .bitmask = BIT(1), + .desc = "Sim monitor 2 violation - pre-proc output", + }, + { + .bitmask = BIT(2), + .desc = "Sim monitor 3 violation - parsed output", + }, + { + .bitmask = BIT(3), + .desc = "Constraint violation", + }, +}; + +static struct cam_vfe_top_ver4_pdaf_lcr_res_info vfe880_pdaf_lcr_res_mask[] = { + { + .res_id = CAM_ISP_HW_VFE_IN_RDI0, + .val = 0, + }, + { + .res_id = CAM_ISP_HW_VFE_IN_RDI1, + .val = 1, + }, + { + .res_id = CAM_ISP_HW_VFE_IN_RDI2, + .val = 2, + }, +}; + +static struct cam_irq_register_set vfe880_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00000034, + .clear_reg_offset = 0x0000003C, + .status_reg_offset = 0x00000044, + .set_reg_offset = 0x0000004C, + .test_set_val = BIT(1), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00000038, + .clear_reg_offset = 0x00000040, + .status_reg_offset = 0x00000048, + }, +}; + +static struct cam_irq_controller_reg_info vfe880_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe880_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000030, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe880_top_debug_reg[] = { + 0x000000A0, + 0x000000A4, + 0x000000A8, + 0x000000AC, + 0x000000B0, + 0x000000B4, + 0x000000B8, + 0x000000BC, + 0x000000C0, + 0x000000C4, + 0x000000C8, + 0x000000CC, + 0x000000D0, + 0x000000D4, + 0x000000D8, + 0x000000DC, + 0x000000E0, + 0x000000E4, + 0x000000E8, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe880_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .core_cfg_0 = 0x00000024, + .core_cfg_1 = 0x00000028, + .core_cfg_2 = 0x0000002C, + .global_reset_cmd = 0x00000030, + .diag_config = 0x00000050, + .diag_sensor_status = {0x00000054, 0x00000058}, + .diag_frm_cnt_status = {0x0000005C, 0x00000060}, + .ipp_violation_status = 0x00000064, + .pdaf_violation_status = 0x00000404, + .core_cgc_ovd_0 = 0x00000018, + .core_cgc_ovd_1 = 0x0000001C, + .ahb_cgc_ovd = 0x00000020, + .dsp_status = 0x0000006C, + .stats_throttle_cfg_0 = 0x00000070, + .stats_throttle_cfg_1 = 0x00000074, + .stats_throttle_cfg_2 = 0x00000078, + .core_cfg_4 = 0x00000080, + .core_cfg_5 = 0x00000084, + .core_cfg_6 = 0x00000088, + .period_cfg = 0x0000008C, + .irq_sub_pattern_cfg = 0x00000090, + .epoch0_pattern_cfg = 0x00000094, + .epoch1_pattern_cfg = 0x00000098, + .epoch_height_cfg = 0x0000009C, + .bus_violation_status = 0x00000C64, + .bus_overflow_status = 0x00000C68, + .num_perf_counters = 2, + .perf_count_reg = { + { + .perf_count_cfg = 0x00000100, + .perf_pix_count = 0x00000104, + .perf_line_count = 0x00000108, + .perf_stall_count = 0x0000010C, + .perf_always_count = 0x00000110, + .perf_count_status = 0x00000114, + }, + { + .perf_count_cfg = 0x00000118, + .perf_pix_count = 0x0000011C, + .perf_line_count = 0x00000120, + .perf_stall_count = 0x00000124, + .perf_always_count = 0x00000128, + .perf_count_status = 0x0000012C, + }, + }, + .top_debug_cfg = 0x000000FC, + .num_top_debug_reg = CAM_VFE_880_NUM_DBG_REG, + .pdaf_input_cfg_0 = 0x00000130, + .pdaf_input_cfg_1 = 0x00000134, + .top_debug = vfe880_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe880_pp_common_reg_data = { + .sof_irq_mask = 0x00000001, + .epoch0_irq_mask = 0x10000, + .epoch1_irq_mask = 0x20000, + .eof_irq_mask = 0x00000002, + .error_irq_mask = 0x7F1D0, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + .ipp_violation_mask = 0x10, + .pdaf_violation_mask = 0x40, + .diag_violation_mask = 0x40000, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x10, +}; + +static struct cam_vfe_ver4_path_reg_data vfe880_vfe_full_rdi_reg_data[3] = { + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x400, + .eof_irq_mask = 0x800, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, + { + .sof_irq_mask = 0x1000, + .eof_irq_mask = 0x2000, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, + }, +}; + +static struct cam_vfe_ver4_path_reg_data vfe880_pdlib_reg_data = { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 3, +}; + +struct cam_vfe_ver4_path_hw_info + vfe880_rdi_hw_info_arr[] = { + { + .common_reg = &vfe880_top_common_reg, + .reg_data = &vfe880_vfe_full_rdi_reg_data[0], + }, + { + .common_reg = &vfe880_top_common_reg, + .reg_data = &vfe880_vfe_full_rdi_reg_data[1], + }, + { + .common_reg = &vfe880_top_common_reg, + .reg_data = &vfe880_vfe_full_rdi_reg_data[2], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe880_dbg_reg_info[CAM_VFE_880_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit("test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_IHIST", + "STATS_RS", + "STATS_CAF", + "GTM_BHIST", + "TINTLESS_BG", + "STATS_BFW", + "STATS_BG", + "STATS_BHIST" + ), + VFE_DBG_INFO_ARRAY_4bit( + "STATS_BE", + "R2PD_DS16_C_VID", + "R2PD_DS16_Y_VID", + "crop_rnd_clamp_post_downscale_C_DS16_VID", + "4to1_C_DS16_VID", + "crop_rnd_clamp_post_downscale_Y_DS16_VID", + "4to1_Y_DS16_VID", + "crop_rnd_clamp_post_dsx_C_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_VID_C", + "R2PD_DS4_VID_Y", + "DSX_C", + "crop_rnd_clamp_post_dsx_Y_VID", + "DSX_Y", + "crop_rnd_clamp_post_downscale_mn_C_VID", + "downscale_mn_C_VID", + "crop_rnd_clamp_post_downscale_mn_Y_VID" + ), + VFE_DBG_INFO_ARRAY_4bit( + "MNDS_Y_VID", + "R2PD_DS16_C_DISP", + "R2PD_DS16_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS16_DISP", + "4to1_C_DS16_DISP", + "crop_rnd_clamp_post_downscale_Y_DS16_DISP", + "4to1_Y_DS16_DISP", + "R2PD_DS4_C_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "R2PD_DS4_Y_DISP", + "crop_rnd_clamp_post_downscale_C_DS4_DISP", + "4to1_C_DS4_DISP", + "crop_rnd_clamp_post_downscale_Y_DS4_DISP", + "4to1_Y_DS4_DISP", + "crop_rnd_clamp_post_downscale_mn_C_DISP", + "downscale_mn_C_DISP", + "crop_rnd_clamp_post_downscale_mn_Y_DISP" + ), + VFE_DBG_INFO_ARRAY_4bit( + "downscale_mn_Y_DISP", + "crop_rnd_clamp_post_downscale_mn_C_FD", + "downscale_mn_C_FD", + "crop_rnd_clamp_post_downscale_mn_Y_FD", + "downscale_mn_Y_FD", + "gtm_fd_out", + "uvg", + "color_xform" + ), + VFE_DBG_INFO_ARRAY_4bit( + "glut", + "gtm", + "color_correct", + "demosaic", + "hvx_tap2", + "lcac", + "bayer_ltm", + "bayer_gtm" + ), + VFE_DBG_INFO_ARRAY_4bit( + "bls", + "bpc_abf", + "gic", + "wb_gain", + "lsc", + "compdecomp_hxv_rx", + "compdecomp_hxv_tx", + "hvx_tap1" + ), + VFE_DBG_INFO_ARRAY_4bit( + "decompand", + "reserved", + "bincorrect", + "bpc_pdpc", + "channel_gain", + "bayer_argb_ccif_converter", + "crop_rnd_clamp_pre_argb_packer", + "chroma_up_uv" + ), + VFE_DBG_INFO_ARRAY_4bit( + "chroma_up_y", + "demux", + "hxv_tap0", + "preprocess", + "reserved", + "reserved", + "bayer_ltm_bus_wr", + "RDI2" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI1", + "RDI0", + "reserved", + "pdaf_2_bus_wr", + "pdaf_1_bus_wr", + "pdaf_1_bus_wr", + "ihist_bus_wr", + "flicker_rs_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "gtm_bhist_bus_wr", + "caf_bus_wr", + "bfw_bus_wr", + "bg_bus_wr", + "tintless_bg_bus_wr", + "bhist_bus_wr", + "be_bus_wr", + "pixel_raw_bus_wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "fd_c_bus_wr", + "fd_y_bus_wr", + "disp_ds16_bus_wr", + "disp_ds4_bus_wr", + "disp_c_bus_wr", + "disp_y_bus_wr", + "vid_ds16_bus_Wr", + "vid_ds4_bus_Wr" + ), + VFE_DBG_INFO_ARRAY_4bit( + "vid_c_bus_wr", + "vid_y_bus_wr", + "CLC_PDAF", + "PIX_PP", + "reserved", + "reserved", + "reserved", + "reserved" + ), + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + VFE_DBG_INFO(32, "lcr_pd_timing_debug"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved", + "r2pd_reserved" + ), + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + VFE_DBG_INFO(32, "lcr_pd_monitor"), + }, + { + /* needs to be parsed separately, doesn't conform to I, V, R */ + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + VFE_DBG_INFO(32, "bus_wr_src_idle"), + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe880_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_PIXEL_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_PDAF_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_DSP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe880_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe880_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe880_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe880_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe880_diag_reg_info[4], + }, + { + .num_fields = 2, + .field = &vfe880_diag_reg_info[8], + }, +}; + +static struct cam_vfe_ver4_fcg_module_info vfe880_fcg_module_info = { + .max_fcg_ch_ctx = 1, + .max_fcg_predictions = 3, + .fcg_index_shift = 16, + .max_reg_val_pair_size = 4, + .fcg_type_size = 2, + .fcg_phase_index_cfg_0 = 0xC870, + .fcg_phase_index_cfg_1 = 0xC874, +}; + +static struct cam_vfe_top_ver4_hw_info vfe880_top_hw_info = { + .common_reg = &vfe880_top_common_reg, + .vfe_full_hw_info = { + .common_reg = &vfe880_top_common_reg, + .reg_data = &vfe880_pp_common_reg_data, + }, + .pdlib_hw_info = { + .common_reg = &vfe880_top_common_reg, + .reg_data = &vfe880_pdlib_reg_data, + }, + .rdi_hw_info = vfe880_rdi_hw_info_arr, + .wr_client_desc = vfe880_wr_client_desc, + .ipp_module_desc = vfe880_ipp_mod_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_PDLIB_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .num_path_port_map = 3, + .path_port_map = { + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD}, + {CAM_ISP_HW_VFE_IN_PDLIB, CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA}, + }, + .num_rdi = ARRAY_SIZE(vfe880_rdi_hw_info_arr), + .num_top_errors = ARRAY_SIZE(vfe880_top_irq_err_desc), + .top_err_desc = vfe880_top_irq_err_desc, + .num_pdaf_violation_errors = ARRAY_SIZE(vfe880_pdaf_violation_desc), + .pdaf_violation_desc = vfe880_pdaf_violation_desc, + .top_debug_reg_info = &vfe880_dbg_reg_info, + .pdaf_lcr_res_mask = vfe880_pdaf_lcr_res_mask, + .num_pdaf_lcr_res = ARRAY_SIZE(vfe880_pdaf_lcr_res_mask), + .fcg_module_info = &vfe880_fcg_module_info, + .fcg_supported = true, + .diag_sensor_info = vfe880_diag_sensor_field, + .diag_frame_info = vfe880_diag_frame_field, +}; + +static struct cam_irq_register_set vfe880_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00000C18, + .clear_reg_offset = 0x00000C20, + .status_reg_offset = 0x00000C28, + .set_reg_offset = 0x00000C50, + }, + { + .mask_reg_offset = 0x00000C1C, + .clear_reg_offset = 0x00000C24, + .status_reg_offset = 0x00000C2C, + .set_reg_offset = 0x00000C54, + }, +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe880_ubwc_regs_client_0 = { + .meta_addr = 0x00000E40, + .meta_cfg = 0x00000E44, + .mode_cfg = 0x00000E48, + .stats_ctrl = 0x00000E4C, + .ctrl_2 = 0x00000E50, + .lossy_thresh0 = 0x00000E54, + .lossy_thresh1 = 0x00000E58, + .off_lossy_var = 0x00000E5C, + .bw_limit = 0x00000E1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe880_ubwc_regs_client_1 = { + .meta_addr = 0x00000F40, + .meta_cfg = 0x00000F44, + .mode_cfg = 0x00000F48, + .stats_ctrl = 0x00000F4C, + .ctrl_2 = 0x00000F50, + .lossy_thresh0 = 0x00000F54, + .lossy_thresh1 = 0x00000F58, + .off_lossy_var = 0x00000F5C, + .bw_limit = 0x00000F1C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe880_ubwc_regs_client_4 = { + .meta_addr = 0x00001240, + .meta_cfg = 0x00001244, + .mode_cfg = 0x00001248, + .stats_ctrl = 0x0000124C, + .ctrl_2 = 0x00001250, + .lossy_thresh0 = 0x00001254, + .lossy_thresh1 = 0x00001258, + .off_lossy_var = 0x0000125C, + .bw_limit = 0x0000121C, + .ubwc_comp_en_bit = BIT(1), +}; + +static struct cam_vfe_bus_ver3_reg_offset_ubwc_client + vfe880_ubwc_regs_client_5 = { + .meta_addr = 0x00001340, + .meta_cfg = 0x00001344, + .mode_cfg = 0x00001348, + .stats_ctrl = 0x0000134C, + .ctrl_2 = 0x00001350, + .lossy_thresh0 = 0x00001354, + .lossy_thresh1 = 0x00001358, + .off_lossy_var = 0x0000135C, + .bw_limit = 0x0000131C, + .ubwc_comp_en_bit = BIT(1), +}; + +static uint32_t vfe880_out_port_mid[][4] = { + {34, 0, 0, 0}, + {35, 0, 0, 0}, + {36, 0, 0, 0}, + {8, 9, 10, 11}, + {11, 0, 0, 0}, + {12, 0, 0, 0}, + {32, 33, 0, 0}, + {27, 28, 29, 0}, + {8, 0, 0, 0}, + {18, 0, 0, 0}, + {21, 0, 0, 0}, + {19, 0, 0, 0}, + {17, 0, 0, 0}, + {23, 0, 0, 0}, + {24, 0, 0, 0}, + {12, 13, 14, 15}, + {13, 0, 0, 0}, + {14, 0, 0, 0}, + {9, 0, 0, 0}, + {20, 0, 0, 0}, + {10, 0, 0, 0}, + {16, 0, 0, 0}, + {25, 26, 0, 0}, + {22, 0, 0, 0}, + {30, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_err_irq_desc vfe880_bus_irq_err_desc_0[] = { + { + .bitmask = BIT(26), + .err_name = "IPCC_FENCE_DATA_ERR", + .desc = "IPCC or FENCE Data was not available in the Input Fifo", + }, + { + .bitmask = BIT(27), + .err_name = "IPCC_FENCE_ADDR_ERR", + .desc = "IPCC or FENCE address fifo was empty and read was attempted", + }, + { + .bitmask = BIT(28), + .err_name = "CONS_VIOLATION", + .desc = "Programming of software registers violated the constraints", + }, + { + .bitmask = BIT(30), + .err_name = "VIOLATION", + .desc = "Client has a violation in ccif protocol at input", + }, + { + .bitmask = BIT(31), + .err_name = "IMAGE_SIZE_VIOLATION", + .desc = "Programmed image size is not same as image size from the CCIF", + }, +}; + +static struct cam_vfe_bus_ver3_err_irq_desc vfe880_bus_irq_err_desc_1[] = { + { + .bitmask = BIT(28), + .err_name = "EARLY_DONE", + .desc = "Buf done for each client. Early done irq for clients STATS_BAF", + }, + { + .bitmask = BIT(29), + .err_name = "EARLY_DONE", + .desc = "Buf done for each client. Early done irq for clients STATS_BAF", + }, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe880_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000C00, + .cgc_ovd = 0x00000C08, + .if_frameheader_cfg = { + 0x00000C34, + 0x00000C38, + 0x00000C3C, + 0x00000C40, + 0x00000C44, + }, + .ubwc_static_ctrl = 0x00000C58, + .pwr_iso_cfg = 0x00000C5C, + .overflow_status_clear = 0x00000C60, + .ccif_violation_status = 0x00000C64, + .overflow_status = 0x00000C68, + .image_size_violation_status = 0x00000C70, + .debug_status_top_cfg = 0x00000CF0, + .debug_status_top = 0x00000CF4, + .test_bus_ctrl = 0x00000CDC, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe880_bus_irq_reg, + .global_irq_cmd_offset = 0x00000C30, + .global_clear_bitmask = 0x00000001, + }, + .num_perf_counters = 8, + .perf_cnt_status = 0x00000CB4, + .perf_cnt_reg = { + { + .perf_cnt_cfg = 0x00000C74, + .perf_cnt_val = 0x00000C94, + }, + { + .perf_cnt_cfg = 0x00000C78, + .perf_cnt_val = 0x00000C98, + }, + { + .perf_cnt_cfg = 0x00000C7C, + .perf_cnt_val = 0x00000C9C, + }, + { + .perf_cnt_cfg = 0x00000C80, + .perf_cnt_val = 0x00000CA0, + }, + { + .perf_cnt_cfg = 0x00000C84, + .perf_cnt_val = 0x00000CA4, + }, + { + .perf_cnt_cfg = 0x00000C88, + .perf_cnt_val = 0x00000CA8, + }, + { + .perf_cnt_cfg = 0x00000C8C, + .perf_cnt_val = 0x00000CAC, + }, + { + .perf_cnt_cfg = 0x00000C90, + .perf_cnt_val = 0x00000CB0, + }, + }, + }, + .num_client = CAM_VFE_BUS_VER3_880_MAX_CLIENTS, + .bus_client_reg = { + /* BUS Client 0 FULL Y */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .framedrop_period = 0x00000E38, + .framedrop_pattern = 0x00000E3C, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000E78, + .addr_status_1 = 0x00000E7C, + .addr_status_2 = 0x00000E80, + .addr_status_3 = 0x00000E84, + .debug_status_cfg = 0x00000E88, + .debug_status_0 = 0x00000E8C, + .debug_status_1 = 0x00000E90, + .bw_limiter_addr = 0x00000E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe880_ubwc_regs_client_0, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 1 FULL C */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .framedrop_period = 0x00000F38, + .framedrop_pattern = 0x00000F3C, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000F78, + .addr_status_1 = 0x00000F7C, + .addr_status_2 = 0x00000F80, + .addr_status_3 = 0x00000F84, + .debug_status_cfg = 0x00000F88, + .debug_status_0 = 0x00000F8C, + .debug_status_1 = 0x00000F90, + .bw_limiter_addr = 0x00000F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = &vfe880_ubwc_regs_client_1, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 2 DS4 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .framedrop_period = 0x00001038, + .framedrop_pattern = 0x0000103C, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .addr_status_0 = 0x00001078, + .addr_status_1 = 0x0000107C, + .addr_status_2 = 0x00001080, + .addr_status_3 = 0x00001084, + .debug_status_cfg = 0x00001088, + .debug_status_0 = 0x0000108C, + .debug_status_1 = 0x00001090, + .bw_limiter_addr = 0x0000101C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 3 DS16 */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .framedrop_period = 0x00001138, + .framedrop_pattern = 0x0000113C, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .addr_status_0 = 0x00001178, + .addr_status_1 = 0x0000117C, + .addr_status_2 = 0x00001180, + .addr_status_3 = 0x00001184, + .debug_status_cfg = 0x00001188, + .debug_status_0 = 0x0000118C, + .debug_status_1 = 0x00001190, + .bw_limiter_addr = 0x0000111C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 4 DISP Y */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .framedrop_period = 0x00001238, + .framedrop_pattern = 0x0000123C, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .addr_status_0 = 0x00001278, + .addr_status_1 = 0x0000127C, + .addr_status_2 = 0x00001280, + .addr_status_3 = 0x00001284, + .debug_status_cfg = 0x00001288, + .debug_status_0 = 0x0000128C, + .debug_status_1 = 0x00001290, + .bw_limiter_addr = 0x0000121C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe880_ubwc_regs_client_4, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + + }, + /* BUS Client 5 DISP C */ + { + .cfg = 0x00001300, + .image_addr = 0x00001304, + .frame_incr = 0x00001308, + .image_cfg_0 = 0x0000130C, + .image_cfg_1 = 0x00001310, + .image_cfg_2 = 0x00001314, + .packer_cfg = 0x00001318, + .frame_header_addr = 0x00001320, + .frame_header_incr = 0x00001324, + .frame_header_cfg = 0x00001328, + .irq_subsample_period = 0x00001330, + .irq_subsample_pattern = 0x00001334, + .framedrop_period = 0x00001338, + .framedrop_pattern = 0x0000133C, + .mmu_prefetch_cfg = 0x00001360, + .mmu_prefetch_max_offset = 0x00001364, + .system_cache_cfg = 0x00001368, + .addr_cfg = 0x00001370, + .addr_status_0 = 0x00001378, + .addr_status_1 = 0x0000137C, + .addr_status_2 = 0x00001380, + .addr_status_3 = 0x00001384, + .debug_status_cfg = 0x00001388, + .debug_status_0 = 0x0000138C, + .debug_status_1 = 0x00001390, + .bw_limiter_addr = 0x0000131C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = &vfe880_ubwc_regs_client_5, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 6 DISP DS4 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001478, + .addr_status_1 = 0x0000147C, + .addr_status_2 = 0x00001480, + .addr_status_3 = 0x00001484, + .debug_status_cfg = 0x00001488, + .debug_status_0 = 0x0000148C, + .debug_status_1 = 0x00001490, + .bw_limiter_addr = 0x0000141C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 7 DISP DS16 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001578, + .addr_status_1 = 0x0000157C, + .addr_status_2 = 0x00001580, + .addr_status_3 = 0x00001584, + .debug_status_cfg = 0x00001588, + .debug_status_0 = 0x0000158C, + .debug_status_1 = 0x00001590, + .bw_limiter_addr = 0x0000151C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 8 FD Y */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001678, + .addr_status_1 = 0x0000167C, + .addr_status_2 = 0x00001680, + .addr_status_3 = 0x00001684, + .debug_status_cfg = 0x00001688, + .debug_status_0 = 0x0000168C, + .debug_status_1 = 0x00001690, + .bw_limiter_addr = 0x0000161C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 9 FD C */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001778, + .addr_status_1 = 0x0000177C, + .addr_status_2 = 0x00001780, + .addr_status_3 = 0x00001784, + .debug_status_cfg = 0x00001788, + .debug_status_0 = 0x0000178C, + .debug_status_1 = 0x00001790, + .bw_limiter_addr = 0x0000171C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_UBWC_NV12) | + BIT_ULL(CAM_FORMAT_UBWC_NV12_4R) | BIT_ULL(CAM_FORMAT_NV21) | + BIT_ULL(CAM_FORMAT_NV12) | BIT_ULL(CAM_FORMAT_Y_ONLY) | + BIT_ULL(CAM_FORMAT_UBWC_TP10) | BIT_ULL(CAM_FORMAT_TP10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10), + }, + /* BUS Client 10 PIXEL RAW */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001878, + .addr_status_1 = 0x0000187C, + .addr_status_2 = 0x00001880, + .addr_status_3 = 0x00001884, + .debug_status_cfg = 0x00001888, + .debug_status_0 = 0x0000188C, + .debug_status_1 = 0x00001890, + .bw_limiter_addr = 0x0000181C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | + BIT_ULL(CAM_FORMAT_PLAIN8) | BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_PLAIN128), + }, + /* BUS Client 11 STATS BE 0 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001978, + .addr_status_1 = 0x0000197C, + .addr_status_2 = 0x00001980, + .addr_status_3 = 0x00001984, + .debug_status_cfg = 0x00001988, + .debug_status_0 = 0x0000198C, + .debug_status_1 = 0x00001990, + .bw_limiter_addr = 0x0000191C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 12 STATS BHIST 0 */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .framedrop_period = 0x00001A38, + .framedrop_pattern = 0x00001A3C, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .addr_status_0 = 0x00001A78, + .addr_status_1 = 0x00001A7C, + .addr_status_2 = 0x00001A80, + .addr_status_3 = 0x00001A84, + .debug_status_cfg = 0x00001A88, + .debug_status_0 = 0x00001A8C, + .debug_status_1 = 0x00001A90, + .bw_limiter_addr = 0x00001A1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 13 STATS TINTLESS BG */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .framedrop_period = 0x00001B38, + .framedrop_pattern = 0x00001B3C, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .addr_status_0 = 0x00001B78, + .addr_status_1 = 0x00001B7C, + .addr_status_2 = 0x00001B80, + .addr_status_3 = 0x00001B84, + .debug_status_cfg = 0x00001B88, + .debug_status_0 = 0x00001B8C, + .debug_status_1 = 0x00001B90, + .bw_limiter_addr = 0x00001B1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_5, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 14 STATS AWB BG */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .addr_status_0 = 0x00001C78, + .addr_status_1 = 0x00001C7C, + .addr_status_2 = 0x00001C80, + .addr_status_3 = 0x00001C84, + .debug_status_cfg = 0x00001C88, + .debug_status_0 = 0x00001C8C, + .debug_status_1 = 0x00001C90, + .bw_limiter_addr = 0x00001C1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 15 STATS AWB BFW */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .mmu_prefetch_cfg = 0x00001D60, + .mmu_prefetch_max_offset = 0x00001D64, + .system_cache_cfg = 0x00001D68, + .addr_cfg = 0x00001D70, + .addr_status_0 = 0x00001D78, + .addr_status_1 = 0x00001D7C, + .addr_status_2 = 0x00001D80, + .addr_status_3 = 0x00001D84, + .debug_status_cfg = 0x00001D88, + .debug_status_0 = 0x00001D8C, + .debug_status_1 = 0x00001D90, + .bw_limiter_addr = 0x00001D1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_6, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + /* BUS Client 16 STATS CAF */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .mmu_prefetch_cfg = 0x00001E60, + .mmu_prefetch_max_offset = 0x00001E64, + .system_cache_cfg = 0x00001E68, + .addr_cfg = 0x00001E70, + .addr_status_0 = 0x00001E78, + .addr_status_1 = 0x00001E7C, + .addr_status_2 = 0x00001E80, + .addr_status_3 = 0x00001E84, + .debug_status_cfg = 0x00001E88, + .debug_status_0 = 0x00001E8C, + .debug_status_1 = 0x00001E90, + .bw_limiter_addr = 0x00001E1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_7, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 17 STATS BHIST */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .mmu_prefetch_cfg = 0x00001F60, + .mmu_prefetch_max_offset = 0x00001F64, + .system_cache_cfg = 0x00001F68, + .addr_cfg = 0x00001F70, + .addr_status_0 = 0x00001F78, + .addr_status_1 = 0x00001F7C, + .addr_status_2 = 0x00001F80, + .addr_status_3 = 0x00001F84, + .debug_status_cfg = 0x00001F88, + .debug_status_0 = 0x00001F8C, + .debug_status_1 = 0x00001F90, + .bw_limiter_addr = 0x00001F1C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_8, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 18 STATS BAYER RS */ + { + .cfg = 0x00002000, + .image_addr = 0x00002004, + .frame_incr = 0x00002008, + .image_cfg_0 = 0x0000200C, + .image_cfg_1 = 0x00002010, + .image_cfg_2 = 0x00002014, + .packer_cfg = 0x00002018, + .frame_header_addr = 0x00002020, + .frame_header_incr = 0x00002024, + .frame_header_cfg = 0x00002028, + .irq_subsample_period = 0x00002030, + .irq_subsample_pattern = 0x00002034, + .framedrop_period = 0x00002038, + .framedrop_pattern = 0x0000203C, + .mmu_prefetch_cfg = 0x00002060, + .mmu_prefetch_max_offset = 0x00002064, + .system_cache_cfg = 0x00002068, + .addr_cfg = 0x00002070, + .addr_status_0 = 0x00002078, + .addr_status_1 = 0x0000207C, + .addr_status_2 = 0x00002080, + .addr_status_3 = 0x00002084, + .debug_status_cfg = 0x00002088, + .debug_status_0 = 0x0000208C, + .debug_status_1 = 0x00002090, + .bw_limiter_addr = 0x0000201C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_9, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 19 STATS IHIST */ + { + .cfg = 0x00002100, + .image_addr = 0x00002104, + .frame_incr = 0x00002108, + .image_cfg_0 = 0x0000210C, + .image_cfg_1 = 0x00002110, + .image_cfg_2 = 0x00002114, + .packer_cfg = 0x00002118, + .frame_header_addr = 0x00002120, + .frame_header_incr = 0x00002124, + .frame_header_cfg = 0x00002128, + .irq_subsample_period = 0x00002130, + .irq_subsample_pattern = 0x00002134, + .framedrop_period = 0x00002138, + .framedrop_pattern = 0x0000213C, + .mmu_prefetch_cfg = 0x00002160, + .mmu_prefetch_max_offset = 0x00002164, + .system_cache_cfg = 0x00002168, + .addr_cfg = 0x00002170, + .addr_status_0 = 0x00002178, + .addr_status_1 = 0x0000217C, + .addr_status_2 = 0x00002180, + .addr_status_3 = 0x00002184, + .debug_status_cfg = 0x00002188, + .debug_status_0 = 0x0000218C, + .debug_status_1 = 0x00002190, + .bw_limiter_addr = 0x0000211C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_10, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 20 PDAF_0_2PD */ + { + .cfg = 0x00002200, + .image_addr = 0x00002204, + .frame_incr = 0x00002208, + .image_cfg_0 = 0x0000220C, + .image_cfg_1 = 0x00002210, + .image_cfg_2 = 0x00002214, + .packer_cfg = 0x00002218, + .frame_header_addr = 0x00002220, + .frame_header_incr = 0x00002224, + .frame_header_cfg = 0x00002228, + .irq_subsample_period = 0x00002230, + .irq_subsample_pattern = 0x00002234, + .framedrop_period = 0x00002238, + .framedrop_pattern = 0x0000223C, + .mmu_prefetch_cfg = 0x00002260, + .mmu_prefetch_max_offset = 0x00002264, + .system_cache_cfg = 0x00002268, + .addr_cfg = 0x00002270, + .addr_status_0 = 0x00002278, + .addr_status_1 = 0x0000227C, + .addr_status_2 = 0x00002280, + .addr_status_3 = 0x00002284, + .debug_status_cfg = 0x00002288, + .debug_status_0 = 0x0000228C, + .debug_status_1 = 0x00002290, + .bw_limiter_addr = 0x0000221C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = 0xFFFFFFFFFFFFFFFF, + }, + /* BUS Client 21 PDAF V2.0 PD DATA PDAF_1_PREPROCESS_2PD */ + { + .cfg = 0x00002300, + .image_addr = 0x00002304, + .frame_incr = 0x00002308, + .image_cfg_0 = 0x0000230C, + .image_cfg_1 = 0x00002310, + .image_cfg_2 = 0x00002314, + .packer_cfg = 0x00002318, + .frame_header_addr = 0x00002320, + .frame_header_incr = 0x00002324, + .frame_header_cfg = 0x00002328, + .irq_subsample_period = 0x00002330, + .irq_subsample_pattern = 0x00002334, + .framedrop_period = 0x00002338, + .framedrop_pattern = 0x0000233C, + .mmu_prefetch_cfg = 0x00002360, + .mmu_prefetch_max_offset = 0x00002364, + .system_cache_cfg = 0x00002368, + .addr_cfg = 0x00002370, + .addr_status_0 = 0x00002378, + .addr_status_1 = 0x0000237C, + .addr_status_2 = 0x00002380, + .addr_status_3 = 0x00002384, + .debug_status_cfg = 0x00002388, + .debug_status_0 = 0x0000238C, + .debug_status_1 = 0x00002390, + .bw_limiter_addr = 0x0000231C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 22 PDAF V2.0 PDAF_2_PARSED_DATA */ + { + .cfg = 0x00002400, + .image_addr = 0x00002404, + .frame_incr = 0x00002408, + .image_cfg_0 = 0x0000240C, + .image_cfg_1 = 0x00002410, + .image_cfg_2 = 0x00002414, + .packer_cfg = 0x00002418, + .frame_header_addr = 0x00002420, + .frame_header_incr = 0x00002424, + .frame_header_cfg = 0x00002428, + .irq_subsample_period = 0x00002430, + .irq_subsample_pattern = 0x00002434, + .framedrop_period = 0x00002438, + .framedrop_pattern = 0x0000243C, + .mmu_prefetch_cfg = 0x00002460, + .mmu_prefetch_max_offset = 0x00002464, + .system_cache_cfg = 0x00002468, + .addr_cfg = 0x00002470, + .addr_status_0 = 0x00002478, + .addr_status_1 = 0x0000247C, + .addr_status_2 = 0x00002480, + .addr_status_3 = 0x00002484, + .debug_status_cfg = 0x00002488, + .debug_status_0 = 0x0000248C, + .debug_status_1 = 0x00002490, + .bw_limiter_addr = 0x0000241C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_11, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN16_8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16), + }, + /* BUS Client 23 RDI0 */ + { + .cfg = 0x00002500, + .image_addr = 0x00002504, + .frame_incr = 0x00002508, + .image_cfg_0 = 0x0000250C, + .image_cfg_1 = 0x00002510, + .image_cfg_2 = 0x00002514, + .packer_cfg = 0x00002518, + .frame_header_addr = 0x00002520, + .frame_header_incr = 0x00002524, + .frame_header_cfg = 0x00002528, + .line_done_cfg = 0x0000252C, + .irq_subsample_period = 0x00002530, + .irq_subsample_pattern = 0x00002534, + .framedrop_period = 0x00002538, + .framedrop_pattern = 0x0000253C, + .mmu_prefetch_cfg = 0x00002560, + .mmu_prefetch_max_offset = 0x00002564, + .system_cache_cfg = 0x00002568, + .addr_cfg = 0x00002570, + .addr_status_0 = 0x00002578, + .addr_status_1 = 0x0000257C, + .addr_status_2 = 0x00002580, + .addr_status_3 = 0x00002584, + .debug_status_cfg = 0x00002588, + .debug_status_0 = 0x0000258C, + .debug_status_1 = 0x00002590, + .bw_limiter_addr = 0x0000251C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_12, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 24 RDI1 */ + { + .cfg = 0x00002600, + .image_addr = 0x00002604, + .frame_incr = 0x00002608, + .image_cfg_0 = 0x0000260C, + .image_cfg_1 = 0x00002610, + .image_cfg_2 = 0x00002614, + .packer_cfg = 0x00002618, + .frame_header_addr = 0x00002620, + .frame_header_incr = 0x00002624, + .frame_header_cfg = 0x00002628, + .line_done_cfg = 0x0000262C, + .irq_subsample_period = 0x00002630, + .irq_subsample_pattern = 0x00002634, + .framedrop_period = 0x00002638, + .framedrop_pattern = 0x0000263C, + .mmu_prefetch_cfg = 0x00002660, + .mmu_prefetch_max_offset = 0x00002664, + .system_cache_cfg = 0x00002668, + .addr_cfg = 0x00002670, + .addr_status_0 = 0x00002678, + .addr_status_1 = 0x0000267C, + .addr_status_2 = 0x00002680, + .addr_status_3 = 0x00002684, + .debug_status_cfg = 0x00002688, + .debug_status_0 = 0x0000268C, + .debug_status_1 = 0x00002690, + .bw_limiter_addr = 0x0000261C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_13, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 25 RDI2 */ + { + .cfg = 0x00002700, + .image_addr = 0x00002704, + .frame_incr = 0x00002708, + .image_cfg_0 = 0x0000270C, + .image_cfg_1 = 0x00002710, + .image_cfg_2 = 0x00002714, + .packer_cfg = 0x00002718, + .frame_header_addr = 0x00002720, + .frame_header_incr = 0x00002724, + .frame_header_cfg = 0x00002728, + .line_done_cfg = 0x0000272C, + .irq_subsample_period = 0x00002730, + .irq_subsample_pattern = 0x00002734, + .framedrop_period = 0x00002738, + .framedrop_pattern = 0x0000273C, + .mmu_prefetch_cfg = 0x00002760, + .mmu_prefetch_max_offset = 0x00002764, + .system_cache_cfg = 0x00002768, + .addr_cfg = 0x00002770, + .addr_status_0 = 0x00002778, + .addr_status_1 = 0x0000277C, + .addr_status_2 = 0x00002780, + .addr_status_3 = 0x00002784, + .debug_status_cfg = 0x00002788, + .debug_status_0 = 0x0000278C, + .debug_status_1 = 0x00002790, + .bw_limiter_addr = 0x0000271C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_14, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 26 LTM STATS */ + { + .cfg = 0x00002800, + .image_addr = 0x00002804, + .frame_incr = 0x00002808, + .image_cfg_0 = 0x0000280C, + .image_cfg_1 = 0x00002810, + .image_cfg_2 = 0x00002814, + .packer_cfg = 0x00002818, + .frame_header_addr = 0x00002820, + .frame_header_incr = 0x00002824, + .frame_header_cfg = 0x00002828, + .irq_subsample_period = 0x00002830, + .irq_subsample_pattern = 0x00002834, + .framedrop_period = 0x00002838, + .framedrop_pattern = 0x0000283C, + .mmu_prefetch_cfg = 0x00002860, + .mmu_prefetch_max_offset = 0x00002864, + .system_cache_cfg = 0x00002868, + .addr_cfg = 0x00002870, + .addr_status_0 = 0x00002878, + .addr_status_1 = 0x0000287C, + .addr_status_2 = 0x00002880, + .addr_status_3 = 0x00002884, + .debug_status_cfg = 0x00002888, + .debug_status_0 = 0x0000288C, + .debug_status_1 = 0x00002890, + .bw_limiter_addr = 0x0000281C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN32), + }, + /* BUS Client 27 ALSC BG */ + { + .cfg = 0x00002900, + .image_addr = 0x00002904, + .frame_incr = 0x00002908, + .image_cfg_0 = 0x0000290C, + .image_cfg_1 = 0x00002910, + .image_cfg_2 = 0x00002914, + .packer_cfg = 0x00002918, + .frame_header_addr = 0x00002920, + .frame_header_incr = 0x00002924, + .frame_header_cfg = 0x00002928, + .irq_subsample_period = 0x00002930, + .irq_subsample_pattern = 0x00002934, + .framedrop_period = 0x00002938, + .framedrop_pattern = 0x0000293C, + .mmu_prefetch_cfg = 0x00002960, + .mmu_prefetch_max_offset = 0x00002964, + .system_cache_cfg = 0x00002968, + .addr_cfg = 0x00002970, + .addr_status_0 = 0x00002978, + .addr_status_1 = 0x0000297C, + .addr_status_2 = 0x00002980, + .addr_status_3 = 0x00002984, + .debug_status_cfg = 0x00002988, + .debug_status_0 = 0x0000298C, + .debug_status_1 = 0x00002990, + .bw_limiter_addr = 0x0000291C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_15, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 25, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = vfe880_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 23, + }, + .name = { + "RDI_0", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = vfe880_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 24, + }, + .name = { + "RDI_1", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = 16384, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .mid = vfe880_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 25, + }, + .name = { + "RDI_2", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL, + .max_width = 4928, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[3], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 0, + 1, + }, + .name = { + "FULL_Y", + "FULL_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4, + .max_width = 1696, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[4], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 2, + }, + .name = { + "DS_4", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16, + .max_width = 424, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[5], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 3, + }, + .name = { + "DS_16", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + .max_width = 7296, + .max_height = 16384, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[6], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 10, + }, + .name = { + "PIXEL_RAW", + }, + .pid_mask = 0x700, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD, + .max_width = 2304, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[7], + .num_mid = 3, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 8, + 9, + }, + .name = { + "FD_Y", + "FD_C", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD, + .max_width = 14592, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe880_out_port_mid[8], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 20, + }, + .name = { + "PDAF_0_2PD", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[9], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 13, + }, + .name = { + "STATS_TL_BG", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[10], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 16, + }, + .name = { + "STATS_BF", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[11], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 14, + }, + .name = { + "STATS_AWB_BGB", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[12], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 12, + }, + .name = { + "STATS_BHIST", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[13], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 18, + }, + .name = { + "STATS_RS", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[14], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 19, + }, + .name = { + "STATS_IHIST", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP, + .max_width = 4928, + .max_height = 4096, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[15], + .num_mid = 4, + .num_wm = 2, + .line_based = 1, + .wm_idx = { + 4, + 5, + }, + .name = { + "FULL_DISP_Y", + "FULL_DISP_C", + }, + .pid_mask = 0x70000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP, + .max_width = 1232, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[16], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 6, + }, + .name = { + "DISP_DS_4", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP, + .max_width = 308, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[17], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 7, + }, + .name = { + "DISP_DS_16", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe880_out_port_mid[18], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 21, + }, + .name = { + "PDAF_1_PREPROCESS_2PD", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[19], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 15, + }, + .name = { + "AWB_BFW", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe880_out_port_mid[20], + .num_mid = 1, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 22, + }, + .name = { + "PDAF_2_PARSED_DATA", + }, + .pid_mask = 0x7000000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[21], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 11, + }, + .name = { + "AEC_BE", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[22], + .num_mid = 2, + .num_wm = 1, + .line_based = 1, + .wm_idx = { + 26, + }, + .name = { + "LTM", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[23], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 17, + }, + .name = { + "GTM_BHIST", + }, + .pid_mask = 0x700000, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe880_out_port_mid[24], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 27, + }, + .name = { + "STATS_ALSC", + }, + .pid_mask = 0x700000, + }, + }, + + .num_cons_err = 29, + .constraint_error_list = { + { + .bitmask = BIT(0), + .error_description = "PPC 1x1 input Not Supported" + }, + { + .bitmask = BIT(1), + .error_description = "PPC 1x2 input Not Supported" + }, + { + .bitmask = BIT(2), + .error_description = "PPC 2x1 input Not Supported" + }, + { + .bitmask = BIT(3), + .error_description = "PPC 2x2 input Not Supported" + }, + { + .bitmask = BIT(4), + .error_description = "Pack 8 BPP format Not Supported" + }, + { + .bitmask = BIT(5), + .error_description = "Pack 16 BPP format Not Supported" + }, + { + .bitmask = BIT(6), + .error_description = "Pack 32 BPP format Not Supported" + }, + { + .bitmask = BIT(7), + .error_description = "Pack 64 BPP format Not Supported" + }, + { + .bitmask = BIT(8), + .error_description = "Pack MIPI 20 format Not Supported" + }, + { + .bitmask = BIT(9), + .error_description = "Pack MIPI 14 format Not Supported" + }, + { + .bitmask = BIT(10), + .error_description = "Pack MIPI 12 format Not Supported" + }, + { + .bitmask = BIT(11), + .error_description = "Pack MIPI 10 format Not Supported" + }, + { + .bitmask = BIT(12), + .error_description = "Pack 128 BPP format Not Supported" + }, + { + .bitmask = BIT(13), + .error_description = "UBWC NV12 format Not Supported" + }, + { + .bitmask = BIT(14), + .error_description = "UBWC NV12 4R format Not Supported" + }, + { + .bitmask = BIT(15), + .error_description = "UBWC TP10 format Not Supported" + }, + { + .bitmask = BIT(16), + .error_description = "Frame based Mode Not Supported" + }, + { + .bitmask = BIT(17), + .error_description = "Index based Mode Not Supported" + }, + { + .bitmask = BIT(18), + .error_description = "FIFO image addr unalign" + }, + { + .bitmask = BIT(19), + .error_description = "FIFO ubwc addr unalign" + }, + { + .bitmask = BIT(20), + .error_description = "FIFO framehdr addr unalign" + }, + { + .bitmask = BIT(21), + .error_description = "Image address unalign" + }, + { + .bitmask = BIT(22), + .error_description = "UBWC address unalign" + }, + { + .bitmask = BIT(23), + .error_description = "Frame Header address unalign" + }, + { + .bitmask = BIT(24), + .error_description = "Stride unalign" + }, + { + .bitmask = BIT(25), + .error_description = "X Initialization unalign" + }, + { + .bitmask = BIT(26), + .error_description = "Image Width unalign" + }, + { + .bitmask = BIT(27), + .error_description = "Image Height unalign" + }, + { + .bitmask = BIT(28), + .error_description = "Meta Stride unalign" + }, + }, + .num_bus_errors_0 = ARRAY_SIZE(vfe880_bus_irq_err_desc_0), + .num_bus_errors_1 = ARRAY_SIZE(vfe880_bus_irq_err_desc_1), + .bus_err_desc_0 = vfe880_bus_irq_err_desc_0, + .bus_err_desc_1 = vfe880_bus_irq_err_desc_1, + .num_comp_grp = 16, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + BIT(5), BIT(6), BIT(7), BIT(8), + BIT(9), BIT(10), BIT(13), + BIT(14), BIT(15), BIT(16), BIT(11), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 37, + .pack_align_shift = 5, + .max_bw_counter_limit = 0xFF, +}; + +static struct cam_vfe_irq_hw_info vfe880_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_EXT_CSID, + .top_irq_reg = &vfe880_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe880_hw_info = { + .irq_hw_info = &vfe880_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe880_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe880_top_hw_info, +}; + +#endif /* _CAM_VFE880_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite108x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite108x.h new file mode 100644 index 0000000000..b333f03d72 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite108x.h @@ -0,0 +1,709 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_LITE108X_H_ +#define _CAM_VFE_LITE108X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_108X_NUM_DBG_REG 5 + +static struct cam_vfe_top_ver4_module_desc vfe_lite108x_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_BLS", + }, + { + .id = 1, + .desc = "CLC_GLUT", + }, + { + .id = 2, + .desc = "CLC_STATS_BG", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe_lite108x_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "RDI_0", + }, + { + .wm_id = 1, + .desc = "RDI_1", + }, + { + .wm_id = 2, + .desc = "RDI_2", + }, + { + .wm_id = 3, + .desc = "RDI_3", + }, + { + .wm_id = 4, + .desc = "GAMMA", + }, + { + .wm_id = 5, + .desc = "BE", + }, +}; + +/* All the Offsets are relative to ifelite start address */ + +static struct cam_irq_register_set vfe_lite108x_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00000164, + .clear_reg_offset = 0x00000174, + .status_reg_offset = 0x00000154, + .set_reg_offset = 0x00000184, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00000168, + .clear_reg_offset = 0x00000178, + .status_reg_offset = 0x00000158, + }, +}; + +static struct cam_irq_controller_reg_info vfe_lite108x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe_lite108x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000188, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe_lite108x_top_debug_reg[] = { + 0x0000038C, + 0x00000390, + 0x00000394, + 0x00000398, + 0x0000039C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe_lite108x_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x0, + .core_cgc_ovd_0 = 0x00000104, + .ahb_cgc_ovd = 0x00000108, + .core_cfg_0 = 0x00000114, + .diag_config = 0x00000254, + .diag_config_1 = 0x00000258, + .diag_sensor_status = {0x0000025C, 0x00000260}, + .diag_frm_cnt_status = {0x00000264, 0x00000268}, + .ipp_violation_status = 0x000002A4, + .bus_violation_status = 0x00000864, + .bus_overflow_status = 0x00000868, + .top_debug_cfg = 0x000003DC, + .num_top_debug_reg = CAM_VFE_108X_NUM_DBG_REG, + .top_debug = vfe_lite108x_top_debug_reg, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite108x_ipp_reg_data = { + .sof_irq_mask = 0x1, + .eof_irq_mask = 0x2, + .error_irq_mask = 0x6, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .ipp_violation_mask = 0x10, + .diag_violation_mask = 0x4, + .diag_sensor_sel_mask = 0x40, + .diag_frm_count_mask_1 = 0x100, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite108x_rdi_reg_data[4] = { + + { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x200, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x400, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, +}; + +static struct cam_vfe_ver4_path_hw_info + vfe_lite108x_rdi_hw_info[] = { + { + .common_reg = &vfe_lite108x_top_common_reg, + .reg_data = &vfe_lite108x_rdi_reg_data[0], + }, + { + .common_reg = &vfe_lite108x_top_common_reg, + .reg_data = &vfe_lite108x_rdi_reg_data[1], + }, + { + .common_reg = &vfe_lite108x_top_common_reg, + .reg_data = &vfe_lite108x_rdi_reg_data[2], + }, + { + .common_reg = &vfe_lite108x_top_common_reg, + .reg_data = &vfe_lite108x_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe108x_dbg_reg_info[CAM_VFE_108X_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "PP_THROTTLE", + "STATS_BG_THROTTLE", + "STATS_BG", + "BLS", + "GLUT", + "unused", + "unused", + "unused" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI_0", + "RDI_1", + "RDI_2", + "RDI_3", + "PP_STATS_BG", + "PP_GLUT", + "PP_STATS_BG", + "PP_GLUT" + ), + VFE_DBG_INFO_ARRAY_4bit( + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused" + ), +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe_lite108x_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite108x_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe_lite108x_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe_lite108x_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite108x_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe_lite108x_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &vfe_lite108x_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe_lite108x_top_hw_info = { + .common_reg = &vfe_lite108x_top_common_reg, + .rdi_hw_info = vfe_lite108x_rdi_hw_info, + .vfe_full_hw_info = { + .common_reg = &vfe_lite108x_top_common_reg, + .reg_data = &vfe_lite108x_ipp_reg_data, + }, + .ipp_module_desc = vfe_lite108x_ipp_mod_desc, + .wr_client_desc = vfe_lite108x_wr_client_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .top_debug_reg_info = &vfe108x_dbg_reg_info, + .num_rdi = ARRAY_SIZE(vfe_lite108x_rdi_hw_info), + .diag_sensor_info = vfe_lite108x_diag_sensor_field, + .diag_frame_info = vfe_lite108x_diag_frame_field, +}; + +static struct cam_irq_register_set vfe_lite108x_bus_irq_reg[1] = { + { + .mask_reg_offset = 0x00000818, + .clear_reg_offset = 0x00000820, + .status_reg_offset = 0x00000828, + }, +}; + +static uint32_t vfe_lite108x_out_port_mid[][4] = { + {32, 0, 0, 0}, + {33, 0, 0, 0}, + {34, 0, 0, 0}, + {35, 0, 0, 0}, + {36, 0, 0, 0}, + {37, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe_lite108x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00000800, + .cgc_ovd = 0x00000808, + .pwr_iso_cfg = 0x0000085C, + .overflow_status_clear = 0x00000860, + .ccif_violation_status = 0x00000864, + .overflow_status = 0x00000868, + .image_size_violation_status = 0x00000870, + .debug_status_top_cfg = 0x000008F0, + .debug_status_top = 0x000008F4, + .test_bus_ctrl = 0x00000928, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe_lite108x_bus_irq_reg, + .global_irq_cmd_offset = 0x00000830, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 6, + .bus_client_reg = { + /* BUS Client 0 RDI0 */ + { + .cfg = 0x00000D00, + .image_addr = 0x00000D04, + .frame_incr = 0x00000D08, + .image_cfg_0 = 0x00000D0C, + .image_cfg_1 = 0x00000D10, + .image_cfg_2 = 0x00000D14, + .packer_cfg = 0x00000D18, + .frame_header_addr = 0x00000D20, + .frame_header_incr = 0x00000D24, + .frame_header_cfg = 0x00000D28, + .line_done_cfg = 0x00000D2C, + .irq_subsample_period = 0x00000D30, + .irq_subsample_pattern = 0x00000D34, + .mmu_prefetch_cfg = 0x00000D60, + .mmu_prefetch_max_offset = 0x00000D64, + .system_cache_cfg = 0x00000D68, + .addr_cfg = 0x00000D70, + .addr_status_0 = 0x00000D90, + .addr_status_1 = 0x00000D94, + .addr_status_2 = 0x00000D98, + .addr_status_3 = 0x00000D9C, + .debug_status_cfg = 0x00000D7C, + .debug_status_0 = 0x00000D80, + .debug_status_1 = 0x00000D84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 1 RDI1 */ + { + .cfg = 0x00000E00, + .image_addr = 0x00000E04, + .frame_incr = 0x00000E08, + .image_cfg_0 = 0x00000E0C, + .image_cfg_1 = 0x00000E10, + .image_cfg_2 = 0x00000E14, + .packer_cfg = 0x00000E18, + .frame_header_addr = 0x00000E20, + .frame_header_incr = 0x00000E24, + .frame_header_cfg = 0x00000E28, + .line_done_cfg = 0x00000E2C, + .irq_subsample_period = 0x00000E30, + .irq_subsample_pattern = 0x00000E34, + .mmu_prefetch_cfg = 0x00000E60, + .mmu_prefetch_max_offset = 0x00000E64, + .system_cache_cfg = 0x00000E68, + .addr_cfg = 0x00000E70, + .addr_status_0 = 0x00000E90, + .addr_status_1 = 0x00000E94, + .addr_status_2 = 0x00000E98, + .addr_status_3 = 0x00000E9C, + .debug_status_cfg = 0x00000E7C, + .debug_status_0 = 0x00000E80, + .debug_status_1 = 0x00000E84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 2 RDI2 */ + { + .cfg = 0x00000F00, + .image_addr = 0x00000F04, + .frame_incr = 0x00000F08, + .image_cfg_0 = 0x00000F0C, + .image_cfg_1 = 0x00000F10, + .image_cfg_2 = 0x00000F14, + .packer_cfg = 0x00000F18, + .frame_header_addr = 0x00000F20, + .frame_header_incr = 0x00000F24, + .frame_header_cfg = 0x00000F28, + .line_done_cfg = 0x00000F2C, + .irq_subsample_period = 0x00000F30, + .irq_subsample_pattern = 0x00000F34, + .mmu_prefetch_cfg = 0x00000F60, + .mmu_prefetch_max_offset = 0x00000F64, + .system_cache_cfg = 0x00000F68, + .addr_cfg = 0x00000F70, + .addr_status_0 = 0x00000F90, + .addr_status_1 = 0x00000F94, + .addr_status_2 = 0x00000F98, + .addr_status_3 = 0x00000F9C, + .debug_status_cfg = 0x00000F7C, + .debug_status_0 = 0x00000F80, + .debug_status_1 = 0x00000F84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 3 RDI3 */ + { + .cfg = 0x00001000, + .image_addr = 0x00001004, + .frame_incr = 0x00001008, + .image_cfg_0 = 0x0000100C, + .image_cfg_1 = 0x00001010, + .image_cfg_2 = 0x00001014, + .packer_cfg = 0x00001018, + .frame_header_addr = 0x00001020, + .frame_header_incr = 0x00001024, + .frame_header_cfg = 0x00001028, + .line_done_cfg = 0x0000102C, + .irq_subsample_period = 0x00001030, + .irq_subsample_pattern = 0x00001034, + .mmu_prefetch_cfg = 0x00001060, + .mmu_prefetch_max_offset = 0x00001064, + .system_cache_cfg = 0x00001068, + .addr_cfg = 0x00001070, + .addr_status_0 = 0x00001090, + .addr_status_1 = 0x00001094, + .addr_status_2 = 0x00001098, + .addr_status_3 = 0x0000109C, + .debug_status_cfg = 0x0000107C, + .debug_status_0 = 0x00001080, + .debug_status_1 = 0x00001084, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 4 Gamma */ + { + .cfg = 0x00001100, + .image_addr = 0x00001104, + .frame_incr = 0x00001108, + .image_cfg_0 = 0x0000110C, + .image_cfg_1 = 0x00001110, + .image_cfg_2 = 0x00001114, + .packer_cfg = 0x00001118, + .frame_header_addr = 0x00001120, + .frame_header_incr = 0x00001124, + .frame_header_cfg = 0x00001128, + .line_done_cfg = 0x0000112C, + .irq_subsample_period = 0x00001130, + .irq_subsample_pattern = 0x00001134, + .mmu_prefetch_cfg = 0x00001160, + .mmu_prefetch_max_offset = 0x00001164, + .system_cache_cfg = 0x00001168, + .addr_cfg = 0x00001170, + .addr_status_0 = 0x00001190, + .addr_status_1 = 0x00001194, + .addr_status_2 = 0x00001198, + .addr_status_3 = 0x0000119C, + .debug_status_cfg = 0x0000117C, + .debug_status_0 = 0x00001180, + .debug_status_1 = 0x00001184, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 5 Stats BE */ + { + .cfg = 0x00001200, + .image_addr = 0x00001204, + .frame_incr = 0x00001208, + .image_cfg_0 = 0x0000120C, + .image_cfg_1 = 0x00001210, + .image_cfg_2 = 0x00001214, + .packer_cfg = 0x00001218, + .frame_header_addr = 0x00001220, + .frame_header_incr = 0x00001224, + .frame_header_cfg = 0x00001228, + .line_done_cfg = 0x0000122C, + .irq_subsample_period = 0x00001230, + .irq_subsample_pattern = 0x00001234, + .mmu_prefetch_cfg = 0x00001260, + .mmu_prefetch_max_offset = 0x00001264, + .system_cache_cfg = 0x00001268, + .addr_cfg = 0x00001270, + .addr_status_0 = 0x00001290, + .addr_status_1 = 0x00001294, + .addr_status_2 = 0x00001298, + .addr_status_3 = 0x0000129C, + .debug_status_cfg = 0x0000127C, + .debug_status_0 = 0x00001280, + .debug_status_1 = 0x00001284, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 6, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite108x_out_port_mid[0], + .num_mid = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite108x_out_port_mid[1], + .num_mid = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite108x_out_port_mid[2], + .num_mid = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite108x_out_port_mid[3], + .num_mid = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite108x_out_port_mid[4], + .num_mid = 1, + .wm_idx = { + 4, + }, + .name = { + "PREPROCESS_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite108x_out_port_mid[5], + .num_mid = 1, + .wm_idx = { + 5, + }, + .name = { + "STATS_BG", + }, + }, + }, + .num_comp_grp = 5, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 34, +}; + +static struct cam_vfe_irq_hw_info vfe_lite108x_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID, + .top_irq_reg = &vfe_lite108x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite108x_hw_info = { + .irq_hw_info = &vfe_lite108x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe_lite108x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe_lite108x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE108X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h new file mode 100644 index 0000000000..0aeafb9bd8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_LITE17X_H_ +#define _CAM_VFE_LITE17X_H_ + +#include "cam_vfe_bus_ver2.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe17x_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x0000005C, + .clear_reg_offset = 0x00000064, + .status_reg_offset = 0x0000006C, + }, + { + .mask_reg_offset = 0x00000060, + .clear_reg_offset = 0x00000068, + .status_reg_offset = 0x00000070, + }, +}; + +static struct cam_irq_controller_reg_info vfe17x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe17x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000058, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_top_ver2_reg_offset_common vfe17x_top_common_reg = { + .hw_version = 0x00000000, + .hw_capability = 0x00000004, + .lens_feature = 0x00000008, + .stats_feature = 0x0000000C, + .color_feature = 0x00000010, + .zoom_feature = 0x00000014, + .global_reset_cmd = 0x00000018, + .module_ctrl = { + NULL, + NULL, + NULL, + NULL, + }, + .bus_cgc_ovd = 0x0000003C, + .core_cfg = 0x00000000, + .three_D_cfg = 0x00000000, + .violation_status = 0x0000007C, + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_ver2_reg vfe17x_rdi_reg = { + .reg_update_cmd = 0x000004AC, +}; + +static struct cam_vfe_rdi_common_reg_data vfe17x_rdi_reg_data = { + .subscribe_irq_mask0 = 0x780001E0, + .subscribe_irq_mask1 = 0x0, + .error_irq_mask0 = 0x0, + .error_irq_mask1 = 0x3C, +}; + +static struct cam_vfe_rdi_reg_data vfe17x_rdi_0_data = { + .reg_update_cmd_data = 0x2, + .sof_irq_mask = 0x8000000, + .reg_update_irq_mask = 0x20, +}; + +static struct cam_vfe_rdi_reg_data vfe17x_rdi_1_data = { + .reg_update_cmd_data = 0x4, + .sof_irq_mask = 0x10000000, + .reg_update_irq_mask = 0x40, +}; + +static struct cam_vfe_rdi_reg_data vfe17x_rdi_2_data = { + .reg_update_cmd_data = 0x8, + .sof_irq_mask = 0x20000000, + .reg_update_irq_mask = 0x80, +}; + +static struct cam_vfe_rdi_reg_data vfe17x_rdi_3_data = { + .reg_update_cmd_data = 0x10, + .sof_irq_mask = 0x40000000, + .reg_update_irq_mask = 0x100, +}; + +static struct cam_vfe_rdi_overflow_status vfe17x_rdi_irq_status = { + .rdi0_overflow_mask = 0x8, + .rdi1_overflow_mask = 0x10, + .rdi2_overflow_mask = 0x18, + .rdi3_overflow_mask = 0x20, + .rdi_overflow_mask = 0x3c, +}; + +static struct cam_vfe_top_ver2_hw_info vfe17x_top_hw_info = { + .common_reg = &vfe17x_top_common_reg, + .camif_hw_info = { + .common_reg = NULL, + .camif_reg = NULL, + .reg_data = NULL, + }, + .rdi_hw_info = { + .common_reg = &vfe17x_top_common_reg, + .rdi_reg = &vfe17x_rdi_reg, + .common_reg_data = &vfe17x_rdi_reg_data, + .reg_data = { + &vfe17x_rdi_0_data, + &vfe17x_rdi_1_data, + &vfe17x_rdi_2_data, + &vfe17x_rdi_3_data, + }, + .rdi_irq_status = &vfe17x_rdi_irq_status, + }, + .num_mux = 4, + .mux_type = { + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, +}; + +static struct cam_irq_register_set vfe17x_bus_irq_reg[3] = { + { + .mask_reg_offset = 0x00002044, + .clear_reg_offset = 0x00002050, + .status_reg_offset = 0x0000205C, + }, + { + .mask_reg_offset = 0x00002048, + .clear_reg_offset = 0x00002054, + .status_reg_offset = 0x00002060, + }, + { + .mask_reg_offset = 0x0000204C, + .clear_reg_offset = 0x00002058, + .status_reg_offset = 0x00002064, + }, +}; + +static struct cam_vfe_bus_ver2_hw_info vfe17x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00002000, + .hw_capability = 0x00002004, + .sw_reset = 0x00002008, + .cgc_ovd = 0x0000200C, + .pwr_iso_cfg = 0x000020CC, + .dual_master_comp_cfg = 0x00002028, + .irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe17x_bus_irq_reg, + .global_irq_cmd_offset = 0x00002068, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, + }, + .comp_error_status = 0x0000206C, + .comp_ovrwr_status = 0x00002070, + .dual_comp_error_status = 0x00002074, + .dual_comp_ovrwr_status = 0x00002078, + .addr_sync_cfg = 0x0000207C, + .addr_sync_frame_hdr = 0x00002080, + .addr_sync_no_sync = 0x00002084, + .top_irq_mask_0 = 0x0000005C, + }, + .num_client = 4, + .bus_client_reg = { + /* BUS Client 0 */ + { + .status0 = 0x00002200, + .status1 = 0x00002204, + .cfg = 0x00002208, + .header_addr = 0x0000220C, + .header_cfg = 0x00002210, + .image_addr = 0x00002214, + .image_addr_offset = 0x00002218, + .buffer_width_cfg = 0x0000221C, + .buffer_height_cfg = 0x00002220, + .packer_cfg = 0x00002224, + .stride = 0x00002228, + .irq_subsample_period = 0x00002248, + .irq_subsample_pattern = 0x0000224C, + .framedrop_period = 0x00002250, + .framedrop_pattern = 0x00002254, + .frame_inc = 0x00002258, + .burst_limit = 0x0000225C, + .ubwc_regs = NULL, + }, + /* BUS Client 1 */ + { + .status0 = 0x00002300, + .status1 = 0x00002304, + .cfg = 0x00002308, + .header_addr = 0x0000230C, + .header_cfg = 0x00002310, + .image_addr = 0x00002314, + .image_addr_offset = 0x00002318, + .buffer_width_cfg = 0x0000231C, + .buffer_height_cfg = 0x00002320, + .packer_cfg = 0x00002324, + .stride = 0x00002328, + .irq_subsample_period = 0x00002348, + .irq_subsample_pattern = 0x0000234C, + .framedrop_period = 0x00002350, + .framedrop_pattern = 0x00002354, + .frame_inc = 0x00002358, + .burst_limit = 0x0000235C, + .ubwc_regs = NULL, + }, + /* BUS Client 2 */ + { + .status0 = 0x00002400, + .status1 = 0x00002404, + .cfg = 0x00002408, + .header_addr = 0x0000240C, + .header_cfg = 0x00002410, + .image_addr = 0x00002414, + .image_addr_offset = 0x00002418, + .buffer_width_cfg = 0x0000241C, + .buffer_height_cfg = 0x00002420, + .packer_cfg = 0x00002424, + .stride = 0x00002428, + .irq_subsample_period = 0x00002448, + .irq_subsample_pattern = 0x0000244C, + .framedrop_period = 0x00002450, + .framedrop_pattern = 0x00002454, + .frame_inc = 0x00002458, + .burst_limit = 0x0000245C, + .ubwc_regs = NULL, + }, + /* BUS Client 3 */ + { + .status0 = 0x00002500, + .status1 = 0x00002504, + .cfg = 0x00002508, + .header_addr = 0x0000250C, + .header_cfg = 0x00002510, + .image_addr = 0x00002514, + .image_addr_offset = 0x00002518, + .buffer_width_cfg = 0x0000251C, + .buffer_height_cfg = 0x00002520, + .packer_cfg = 0x00002524, + .stride = 0x00002528, + .irq_subsample_period = 0x00002548, + .irq_subsample_pattern = 0x0000254C, + .framedrop_period = 0x00002550, + .framedrop_pattern = 0x00002554, + .frame_inc = 0x00002558, + .burst_limit = 0x0000255C, + .ubwc_regs = NULL, + }, + }, + .comp_grp_reg = { + /* CAM_VFE_BUS_VER2_COMP_GRP_0 */ + { + .comp_mask = 0x00002010, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_1 */ + { + .comp_mask = 0x00002014, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_2 */ + { + .comp_mask = 0x00002018, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_3 */ + { + .comp_mask = 0x0000201C, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_4 */ + { + .comp_mask = 0x00002020, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_5 */ + { + .comp_mask = 0x00002024, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */ + { + .comp_mask = 0x0000202C, + .addr_sync_mask = 0x00002088, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */ + { + .comp_mask = 0x00002030, + .addr_sync_mask = 0x0000208C, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */ + { + .comp_mask = 0x00002034, + .addr_sync_mask = 0x00002090, + + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */ + { + .comp_mask = 0x00002038, + .addr_sync_mask = 0x00002094, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */ + { + .comp_mask = 0x0000203C, + .addr_sync_mask = 0x00002098, + }, + /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */ + { + .comp_mask = 0x00002040, + .addr_sync_mask = 0x0000209C, + }, + }, + .num_out = 4, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + }, + }, + .top_irq_shift = 9, + .support_consumed_addr = false, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 23, +}; + +static struct cam_vfe_irq_hw_info vfe17x_irq_hw_info = { + .reset_mask = BIT(31), + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_INT_CSID, + .top_irq_reg = &vfe17x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite17x_hw_info = { + .irq_hw_info = &vfe17x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_2_0, + .bus_hw_info = &vfe17x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_2_0, + .top_hw_info = &vfe17x_top_hw_info, + +}; + +#endif /* _CAM_VFE_LITE17X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h new file mode 100644 index 0000000000..ad24674af2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h @@ -0,0 +1,510 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_LITE48x_H_ +#define _CAM_VFE_LITE48x_H_ + +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" +#include "cam_vfe_top_ver3.h" +#include "cam_vfe_core.h" + +static struct cam_irq_register_set vfe48x_top_irq_reg_set[3] = { + { + .mask_reg_offset = 0x00000028, + .clear_reg_offset = 0x00000034, + .status_reg_offset = 0x00000040, + }, + { + .mask_reg_offset = 0x0000002C, + .clear_reg_offset = 0x00000038, + .status_reg_offset = 0x00000044, + }, + { + .mask_reg_offset = 0x00000030, + .clear_reg_offset = 0x0000003C, + .status_reg_offset = 0x00000048, + }, +}; + +static struct cam_irq_controller_reg_info vfe48x_top_irq_reg_info = { + .num_registers = 3, + .irq_reg_set = vfe48x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00000024, + .global_clear_bitmask = 0x00000001, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static struct cam_vfe_top_ver3_reg_offset_common vfe48x_top_common_reg = { + .hw_version = 0x00000000, + .titan_version = 0x00000004, + .hw_capability = 0x00000008, + .global_reset_cmd = 0x0000000C, + .core_cgc_ovd_0 = 0x00000010, + .ahb_cgc_ovd = 0x00000014, + .noc_cgc_ovd = 0x00000018, + .reg_update_cmd = 0x00000020, + .diag_config = 0x00000050, + .diag_sensor_status_0 = 0x00000054, + .bus_overflow_status = 0x00001A68, + .top_debug_cfg = 0x00000074, + .top_debug_0 = 0x0000005C, + .top_debug_1 = 0x00000068, + .top_debug_2 = 0x0000006C, + .top_debug_3 = 0x00000070, +}; + +static struct cam_vfe_camif_lite_ver3_reg vfe48x_camif_rdi[4] = { + { + .lite_hw_version = 0x1200, + .lite_hw_status = 0x1204, + .lite_module_config = 0x1260, + .lite_skip_period = 0x1268, + .lite_irq_subsample_pattern = 0x126C, + .lite_epoch_irq = 0x1270, + .lite_debug_1 = 0x13F0, + .lite_debug_0 = 0x13F4, + .lite_test_bus_ctrl = 0x13F8, + .camif_lite_spare = 0x13FC, + .reg_update_cmd = 0x0020, + }, + { + .lite_hw_version = 0x1400, + .lite_hw_status = 0x1404, + .lite_module_config = 0x1460, + .lite_skip_period = 0x1468, + .lite_irq_subsample_pattern = 0x146C, + .lite_epoch_irq = 0x1470, + .lite_debug_1 = 0x15F0, + .lite_debug_0 = 0x15F4, + .lite_test_bus_ctrl = 0x15F8, + .camif_lite_spare = 0x15FC, + .reg_update_cmd = 0x0020, + }, + { + .lite_hw_version = 0x1600, + .lite_hw_status = 0x1604, + .lite_module_config = 0x1660, + .lite_skip_period = 0x1668, + .lite_irq_subsample_pattern = 0x166C, + .lite_epoch_irq = 0x1670, + .lite_debug_1 = 0x17F0, + .lite_debug_0 = 0x17F4, + .lite_test_bus_ctrl = 0x17F8, + .camif_lite_spare = 0x17FC, + .reg_update_cmd = 0x0020, + }, + { + .lite_hw_version = 0x1800, + .lite_hw_status = 0x1804, + .lite_module_config = 0x1860, + .lite_skip_period = 0x1868, + .lite_irq_subsample_pattern = 0x186C, + .lite_epoch_irq = 0x1870, + .lite_debug_1 = 0x19F0, + .lite_debug_0 = 0x19F4, + .lite_test_bus_ctrl = 0x19F8, + .camif_lite_spare = 0x19FC, + .reg_update_cmd = 0x0020, + }, +}; + +static struct cam_vfe_camif_lite_ver3_reg_data vfe48x_camif_rdi_reg_data[4] = { + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x1, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x1, + .epoch0_irq_mask = 0x4, + .epoch1_irq_mask = 0x8, + .eof_irq_mask = 0x02, + .error_irq_mask0 = 0x1, + .error_irq_mask2 = 0x100, + .subscribe_irq_mask1 = 0x3, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x1, + }, + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x2, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x10, + .epoch0_irq_mask = 0x40, + .epoch1_irq_mask = 0x80, + .eof_irq_mask = 0x20, + .error_irq_mask0 = 0x2, + .error_irq_mask2 = 0x200, + .subscribe_irq_mask1 = 0x30, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x1, + }, + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x4, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x100, + .epoch0_irq_mask = 0x400, + .epoch1_irq_mask = 0x800, + .eof_irq_mask = 0x200, + .error_irq_mask0 = 0x4, + .error_irq_mask2 = 0x400, + .subscribe_irq_mask1 = 0x300, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x1, + }, + { + .extern_reg_update_shift = 0, + .reg_update_cmd_data = 0x8, + .epoch_line_cfg = 0x0, + .sof_irq_mask = 0x1000, + .epoch0_irq_mask = 0x4000, + .epoch1_irq_mask = 0x8000, + .eof_irq_mask = 0x2000, + .error_irq_mask0 = 0x8, + .error_irq_mask2 = 0x800, + .subscribe_irq_mask1 = 0x3000, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x1, + }, +}; + +static struct cam_vfe_camif_lite_ver3_hw_info + vfe48x_rdi_hw_info[CAM_VFE_RDI_VER2_MAX] = { + { + .common_reg = &vfe48x_top_common_reg, + .camif_lite_reg = &vfe48x_camif_rdi[0], + .reg_data = &vfe48x_camif_rdi_reg_data[0], + }, + { + .common_reg = &vfe48x_top_common_reg, + .camif_lite_reg = &vfe48x_camif_rdi[1], + .reg_data = &vfe48x_camif_rdi_reg_data[1], + }, + { + .common_reg = &vfe48x_top_common_reg, + .camif_lite_reg = &vfe48x_camif_rdi[2], + .reg_data = &vfe48x_camif_rdi_reg_data[2], + }, + { + .common_reg = &vfe48x_top_common_reg, + .camif_lite_reg = &vfe48x_camif_rdi[3], + .reg_data = &vfe48x_camif_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver3_hw_info vfe48x_top_hw_info = { + .common_reg = &vfe48x_top_common_reg, + .rdi_hw_info[0] = &vfe48x_rdi_hw_info[0], + .rdi_hw_info[1] = &vfe48x_rdi_hw_info[1], + .rdi_hw_info[2] = &vfe48x_rdi_hw_info[2], + .rdi_hw_info[3] = &vfe48x_rdi_hw_info[3], + .num_mux = 4, + .mux_type = { + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, +}; + +static struct cam_irq_register_set vfe48x_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00001A18, + .clear_reg_offset = 0x00001A20, + .status_reg_offset = 0x00001A28, + }, + { + .mask_reg_offset = 0x00001A1C, + .clear_reg_offset = 0x00001A24, + .status_reg_offset = 0x00001A2C, + }, +}; + +static uint32_t vfe48x_out_port_mid[][4] = { + {16, 0, 0, 0}, + {17, 0, 0, 0}, + {18, 0, 0, 0}, + {19, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe48x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001A00, + .cgc_ovd = 0x00001A08, + .if_frameheader_cfg = { + 0x00001A34, + 0x00001A38, + 0x00001A3C, + 0x00001A40, + }, + .pwr_iso_cfg = 0x00001A5C, + .overflow_status_clear = 0x00001A60, + .ccif_violation_status = 0x00001A64, + .overflow_status = 0x00001A68, + .image_size_violation_status = 0x00001A70, + .debug_status_top_cfg = 0x00001AD4, + .debug_status_top = 0x00001AD8, + .test_bus_ctrl = 0x00001ADC, + .top_irq_mask_0 = 0x00000028, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe48x_bus_irq_reg, + .global_irq_cmd_offset = 0x00001A30, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 4, + .bus_client_reg = { + /* RDI 0 */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .line_done_cfg = 0x00001C2C, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .framedrop_period = 0x00001C38, + .framedrop_pattern = 0x00001C3C, + .system_cache_cfg = 0x00001C60, + .burst_limit = 0x00001C64, + .addr_status_0 = 0x00001C68, + .addr_status_1 = 0x00001C6C, + .addr_status_2 = 0x00001C70, + .addr_status_3 = 0x00001C74, + .debug_status_cfg = 0x00001C78, + .debug_status_0 = 0x00001C7C, + .debug_status_1 = 0x00001C80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* RDI 1 */ + { + .cfg = 0x00001D00, + .image_addr = 0x00001D04, + .frame_incr = 0x00001D08, + .image_cfg_0 = 0x00001D0C, + .image_cfg_1 = 0x00001D10, + .image_cfg_2 = 0x00001D14, + .packer_cfg = 0x00001D18, + .frame_header_addr = 0x00001D20, + .frame_header_incr = 0x00001D24, + .frame_header_cfg = 0x00001D28, + .line_done_cfg = 0x00001D2C, + .irq_subsample_period = 0x00001D30, + .irq_subsample_pattern = 0x00001D34, + .framedrop_period = 0x00001D38, + .framedrop_pattern = 0x00001D3C, + .system_cache_cfg = 0x00001D60, + .burst_limit = 0x00001D64, + .addr_status_0 = 0x00001D68, + .addr_status_1 = 0x00001D6C, + .addr_status_2 = 0x00001D70, + .addr_status_3 = 0x00001D74, + .debug_status_cfg = 0x00001D78, + .debug_status_0 = 0x00001D7C, + .debug_status_1 = 0x00001D80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* RDI 2 */ + { + .cfg = 0x00001E00, + .image_addr = 0x00001E04, + .frame_incr = 0x00001E08, + .image_cfg_0 = 0x00001E0C, + .image_cfg_1 = 0x00001E10, + .image_cfg_2 = 0x00001E14, + .packer_cfg = 0x00001E18, + .frame_header_addr = 0x00001E20, + .frame_header_incr = 0x00001E24, + .frame_header_cfg = 0x00001E28, + .line_done_cfg = 0x00001E2C, + .irq_subsample_period = 0x00001E30, + .irq_subsample_pattern = 0x00001E34, + .framedrop_period = 0x00001E38, + .framedrop_pattern = 0x00001E3C, + .system_cache_cfg = 0x00001E60, + .burst_limit = 0x00001E64, + .addr_status_0 = 0x00001E68, + .addr_status_1 = 0x00001E6C, + .addr_status_2 = 0x00001E70, + .addr_status_3 = 0x00001E74, + .debug_status_cfg = 0x00001E78, + .debug_status_0 = 0x00001E7C, + .debug_status_1 = 0x00001E80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* RDI 3 */ + { + .cfg = 0x00001F00, + .image_addr = 0x00001F04, + .frame_incr = 0x00001F08, + .image_cfg_0 = 0x00001F0C, + .image_cfg_1 = 0x00001F10, + .image_cfg_2 = 0x00001F14, + .packer_cfg = 0x00001F18, + .frame_header_addr = 0x00001F20, + .frame_header_incr = 0x00001F24, + .frame_header_cfg = 0x00001F28, + .line_done_cfg = 0x00001F2C, + .irq_subsample_period = 0x00001F30, + .irq_subsample_pattern = 0x00001F34, + .framedrop_period = 0x00001F38, + .framedrop_pattern = 0x00001F3C, + .system_cache_cfg = 0x00001F60, + .burst_limit = 0x00001F64, + .addr_status_0 = 0x00001F68, + .addr_status_1 = 0x00001F6C, + .addr_status_2 = 0x00001F70, + .addr_status_3 = 0x00001F74, + .debug_status_cfg = 0x00001F78, + .debug_status_0 = 0x00001F7C, + .debug_status_1 = 0x00001F80, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + }, + .num_out = 4, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .mid = vfe48x_out_port_mid[0], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .mid = vfe48x_out_port_mid[1], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .mid = vfe48x_out_port_mid[2], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .mid = vfe48x_out_port_mid[3], + .num_mid = 1, + .num_wm = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + }, + .num_comp_grp = 4, + .comp_done_mask = { + BIT(4), BIT(5), BIT(6), BIT(7), + }, + .top_irq_shift = 4, + .support_consumed_addr = true, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 25, + .supported_irq = CAM_VFE_HW_IRQ_CAP_BUF_DONE | CAM_VFE_HW_IRQ_CAP_RUP, + .comp_cfg_needed = true, +}; + +static struct cam_vfe_irq_hw_info vfe48x_irq_hw_info = { + .reset_mask = BIT(17), + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_INT_CSID, + .top_irq_reg = &vfe48x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite48x_hw_info = { + .irq_hw_info = &vfe48x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe48x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_3_0, + .top_hw_info = &vfe48x_top_hw_info, + +}; + +#endif /* _CAM_VFE_LITE48x_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite68x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite68x.h new file mode 100644 index 0000000000..0741f38206 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite68x.h @@ -0,0 +1,720 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_LITE68X_H_ +#define _CAM_VFE_LITE68X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_68X_NUM_DBG_REG 5 + +static struct cam_vfe_top_ver4_module_desc vfe68x_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_BLS", + }, + { + .id = 1, + .desc = "CLC_GLUT", + }, + { + .id = 2, + .desc = "CLC_STATS_BG", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe680x_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "RDI_0", + }, + { + .wm_id = 1, + .desc = "RDI_1", + }, + { + .wm_id = 2, + .desc = "RDI_2", + }, + { + .wm_id = 3, + .desc = "RDI_3", + }, + { + .wm_id = 4, + .desc = "GAMMA", + }, + { + .wm_id = 5, + .desc = "BE", + }, +}; + +static struct cam_irq_register_set vfe68x_top_irq_reg_set[3] = { + { + .mask_reg_offset = 0x00001024, + .clear_reg_offset = 0x0000102C, + .status_reg_offset = 0x0000101C, + .set_reg_offset = 0x00001034, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00001028, + .clear_reg_offset = 0x00001030, + .status_reg_offset = 0x00001020, + }, +}; + +static struct cam_irq_controller_reg_info vfe68x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe68x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00001038, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe68x_top_debug_reg[] = { + 0x0000105C, + 0x00001060, + 0x00001064, + 0x00001068, + 0x0000106C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe68x_top_common_reg = { + .hw_version = 0x00001000, + .hw_capability = 0x00001004, + .core_cgc_ovd_0 = 0x00001014, + .ahb_cgc_ovd = 0x00001018, + .core_cfg_0 = 0x0000103C, + .diag_config = 0x00001040, + .diag_sensor_status = {0x00001044, 0x00001048}, + .diag_frm_cnt_status = {0x0000104C, 0x00001050}, + .ipp_violation_status = 0x00001054, + .bus_violation_status = 0x00001264, + .bus_overflow_status = 0x00001268, + .top_debug_cfg = 0x00001074, + .num_top_debug_reg = CAM_VFE_68X_NUM_DBG_REG, + .top_debug = vfe68x_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe68x_ipp_reg_data = +{ + .sof_irq_mask = 0x1, + .eof_irq_mask = 0x2, + .error_irq_mask = 0x6, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .ipp_violation_mask = 0x10, + .diag_violation_mask = 0x4, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x200, +}; + +static struct cam_vfe_ver4_path_reg_data vfe68x_rdi_reg_data[4] = { + { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, +}; + +static struct cam_vfe_ver4_path_hw_info + vfe68x_rdi_hw_info[] = { + { + .common_reg = &vfe68x_top_common_reg, + .reg_data = &vfe68x_rdi_reg_data[0], + }, + { + .common_reg = &vfe68x_top_common_reg, + .reg_data = &vfe68x_rdi_reg_data[1], + }, + { + .common_reg = &vfe68x_top_common_reg, + .reg_data = &vfe68x_rdi_reg_data[2], + }, + { + .common_reg = &vfe68x_top_common_reg, + .reg_data = &vfe68x_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe68x_dbg_reg_info[CAM_VFE_68X_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "PP_THROTTLE", + "STATS_BG_THROTTLE", + "STATS_BG", + "BLS", + "GLUT", + "unused", + "unused", + "unused" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI_0", + "RDI_1", + "RDI_2", + "RDI_3", + "PP_STATS_BG", + "PP_GLUT", + "PP_STATS_BG", + "PP_GLUT" + ), + VFE_DBG_INFO_ARRAY_4bit( + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused" + ), +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe_lite68x_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite68x_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe_lite68x_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe_lite68x_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite68x_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe_lite68x_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &vfe_lite68x_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe68x_top_hw_info = { + .common_reg = &vfe68x_top_common_reg, + .rdi_hw_info = vfe68x_rdi_hw_info, + .vfe_full_hw_info = { + .common_reg = &vfe68x_top_common_reg, + .reg_data = &vfe68x_ipp_reg_data, + }, + .ipp_module_desc = vfe68x_ipp_mod_desc, + .wr_client_desc = vfe680x_wr_client_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .num_rdi = ARRAY_SIZE(vfe68x_rdi_hw_info), + .top_debug_reg_info = &vfe68x_dbg_reg_info, + .diag_sensor_info = vfe_lite68x_diag_sensor_field, + .diag_frame_info = vfe_lite68x_diag_frame_field, +}; + +static struct cam_irq_register_set vfe680x_bus_irq_reg[2] = { + { + .mask_reg_offset = 0x00001218, + .clear_reg_offset = 0x00001220, + .status_reg_offset = 0x00001228, + }, + { + .mask_reg_offset = 0x0000121C, + .clear_reg_offset = 0x00001224, + .status_reg_offset = 0x0000122C, + }, +}; + +static uint32_t vfe680x_out_port_mid[][4] = { + {8, 0, 0, 0}, + {9, 0, 0, 0}, + {10, 0, 0, 0}, + {11, 0, 0, 0}, + {12, 0, 0, 0}, + {13, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe680x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001200, + .cgc_ovd = 0x00001208, + .if_frameheader_cfg = { + 0x00001234, + 0x00001238, + 0x0000123C, + 0x00001240, + 0x00001244, + }, + .pwr_iso_cfg = 0x0000125C, + .overflow_status_clear = 0x00001260, + .ccif_violation_status = 0x00001264, + .overflow_status = 0x00001268, + .image_size_violation_status = 0x00001270, + .debug_status_top_cfg = 0x000012D4, + .debug_status_top = 0x000012D8, + .test_bus_ctrl = 0x000012DC, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe680x_bus_irq_reg, + .global_irq_cmd_offset = 0x00001230, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 6, + .bus_client_reg = { + /* BUS Client 0 RDI0 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .framedrop_period = 0x00001438, + .framedrop_pattern = 0x0000143C, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_status_0 = 0x00001470, + .addr_status_1 = 0x00001474, + .addr_status_2 = 0x00001478, + .addr_status_3 = 0x0000147C, + .debug_status_cfg = 0x00001480, + .debug_status_0 = 0x00001484, + .debug_status_1 = 0x00001488, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 1 RDI1 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .framedrop_period = 0x00001538, + .framedrop_pattern = 0x0000153C, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_status_0 = 0x00001570, + .addr_status_1 = 0x00001574, + .addr_status_2 = 0x00001578, + .addr_status_3 = 0x0000157C, + .debug_status_cfg = 0x00001580, + .debug_status_0 = 0x00001584, + .debug_status_1 = 0x00001588, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 2 RDI2 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .framedrop_period = 0x00001638, + .framedrop_pattern = 0x0000163C, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_status_0 = 0x00001670, + .addr_status_1 = 0x00001674, + .addr_status_2 = 0x00001678, + .addr_status_3 = 0x0000167C, + .debug_status_cfg = 0x00001680, + .debug_status_0 = 0x00001684, + .debug_status_1 = 0x00001688, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 3 RDI3 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .framedrop_period = 0x00001738, + .framedrop_pattern = 0x0000173C, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_status_0 = 0x00001770, + .addr_status_1 = 0x00001774, + .addr_status_2 = 0x00001778, + .addr_status_3 = 0x0000177C, + .debug_status_cfg = 0x00001780, + .debug_status_0 = 0x00001784, + .debug_status_1 = 0x00001788, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 4 Gamma */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .framedrop_period = 0x00001838, + .framedrop_pattern = 0x0000183C, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_status_0 = 0x00001870, + .addr_status_1 = 0x00001874, + .addr_status_2 = 0x00001878, + .addr_status_3 = 0x0000187C, + .debug_status_cfg = 0x00001880, + .debug_status_0 = 0x00001884, + .debug_status_1 = 0x00001888, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 5 Stats BE */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .framedrop_period = 0x00001938, + .framedrop_pattern = 0x0000193C, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_status_0 = 0x00001970, + .addr_status_1 = 0x00001974, + .addr_status_2 = 0x00001978, + .addr_status_3 = 0x0000197C, + .debug_status_cfg = 0x00001980, + .debug_status_0 = 0x00001984, + .debug_status_1 = 0x00001988, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 6, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .num_wm = 1, + .mid = vfe680x_out_port_mid[0], + .num_mid = 1, + .line_based = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .num_wm = 1, + .mid = vfe680x_out_port_mid[1], + .num_mid = 1, + .line_based = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .num_wm = 1, + .mid = vfe680x_out_port_mid[2], + .num_mid = 1, + .line_based = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .num_wm = 1, + .mid = vfe680x_out_port_mid[3], + .num_mid = 1, + .line_based = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe680x_out_port_mid[4], + .num_mid = 1, + .wm_idx = { + 4, + }, + .name = { + "PREPROCESS_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe680x_out_port_mid[5], + .num_mid = 1, + .wm_idx = { + 5, + }, + .name = { + "STATS_BG", + }, + }, + }, + .num_comp_grp = 5, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 33, +}; + +static struct cam_vfe_irq_hw_info vfe68x_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID, + .top_irq_reg = &vfe68x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite68x_hw_info = { + .irq_hw_info = &vfe68x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe680x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe68x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE68X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite78x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite78x.h new file mode 100644 index 0000000000..d71d887f9d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite78x.h @@ -0,0 +1,715 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_LITE78X_H_ +#define _CAM_VFE_LITE78X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" + +#define CAM_VFE_78X_NUM_DBG_REG 5 + +static struct cam_vfe_top_ver4_module_desc vfe_lite78x_ipp_mod_desc[] = { + { + .id = 0, + .desc = "CLC_BLS", + }, + { + .id = 1, + .desc = "CLC_GLUT", + }, + { + .id = 2, + .desc = "CLC_STATS_BG", + }, +}; + +static struct cam_vfe_top_ver4_wr_client_desc vfe_lite78x_wr_client_desc[] = { + { + .wm_id = 0, + .desc = "RDI_0", + }, + { + .wm_id = 1, + .desc = "RDI_1", + }, + { + .wm_id = 2, + .desc = "RDI_2", + }, + { + .wm_id = 3, + .desc = "RDI_3", + }, + { + .wm_id = 4, + .desc = "GAMMA", + }, + { + .wm_id = 5, + .desc = "BE", + }, +}; + +static struct cam_irq_register_set vfe_lite78x_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00001024, + .clear_reg_offset = 0x0000102C, + .status_reg_offset = 0x0000101C, + .set_reg_offset = 0x00001034, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00001028, + .clear_reg_offset = 0x00001030, + .status_reg_offset = 0x00001020, + }, +}; + +static struct cam_irq_controller_reg_info vfe_lite78x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe_lite78x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00001038, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe_lite78x_top_debug_reg[] = { + 0x0000105C, + 0x00001060, + 0x00001064, + 0x00001068, + 0x0000106C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe_lite78x_top_common_reg = { + .hw_version = 0x00001000, + .hw_capability = 0x00001004, + .core_cgc_ovd_0 = 0x00001014, + .ahb_cgc_ovd = 0x00001018, + .core_cfg_0 = 0x0000103C, + .diag_config = 0x00001040, + .diag_sensor_status = {0x00001044, 0x00001048}, + .diag_frm_cnt_status = {0x0000104C, 0x00001050}, + .ipp_violation_status = 0x00001054, + .bus_violation_status = 0x00001264, + .bus_overflow_status = 0x00001268, + .top_debug_cfg = 0x00001074, + .num_top_debug_reg = CAM_VFE_78X_NUM_DBG_REG, + .top_debug = vfe_lite78x_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite78x_ipp_reg_data = +{ + .sof_irq_mask = 0x1, + .eof_irq_mask = 0x2, + .error_irq_mask = 0x6, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .ipp_violation_mask = 0x10, + .diag_violation_mask = 0x4, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x200, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite78x_rdi_reg_data[4] = { + { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, +}; + +static struct cam_vfe_ver4_path_hw_info + vfe_lite78x_rdi_hw_info[] = { + { + .common_reg = &vfe_lite78x_top_common_reg, + .reg_data = &vfe_lite78x_rdi_reg_data[0], + }, + { + .common_reg = &vfe_lite78x_top_common_reg, + .reg_data = &vfe_lite78x_rdi_reg_data[1], + }, + { + .common_reg = &vfe_lite78x_top_common_reg, + .reg_data = &vfe_lite78x_rdi_reg_data[2], + }, + { + .common_reg = &vfe_lite78x_top_common_reg, + .reg_data = &vfe_lite78x_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_debug_reg_info vfe78x_dbg_reg_info[CAM_VFE_78X_NUM_DBG_REG][8] = { + VFE_DBG_INFO_ARRAY_4bit( + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved", + "test_bus_reserved" + ), + { + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + VFE_DBG_INFO(32, "non-CLC info"), + }, + VFE_DBG_INFO_ARRAY_4bit( + "PP_THROTTLE", + "STATS_BG_THROTTLE", + "STATS_BG", + "BLS", + "GLUT", + "unused", + "unused", + "unused" + ), + VFE_DBG_INFO_ARRAY_4bit( + "RDI_0", + "RDI_1", + "RDI_2", + "RDI_3", + "PP_STATS_BG", + "PP_GLUT", + "PP_STATS_BG", + "PP_GLUT" + ), + VFE_DBG_INFO_ARRAY_4bit( + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused", + "unused" + ), +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe_lite78x_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite78x_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe_lite78x_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe_lite78x_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite78x_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe_lite78x_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &vfe_lite78x_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe_lite78x_top_hw_info = { + .common_reg = &vfe_lite78x_top_common_reg, + .rdi_hw_info = vfe_lite78x_rdi_hw_info, + .vfe_full_hw_info = { + .common_reg = &vfe_lite78x_top_common_reg, + .reg_data = &vfe_lite78x_ipp_reg_data, + }, + .ipp_module_desc = vfe_lite78x_ipp_mod_desc, + .wr_client_desc = vfe_lite78x_wr_client_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .top_debug_reg_info = &vfe78x_dbg_reg_info, + .num_rdi = ARRAY_SIZE(vfe_lite78x_rdi_hw_info), + .diag_sensor_info = vfe_lite78x_diag_sensor_field, + .diag_frame_info = vfe_lite78x_diag_frame_field, +}; + +static struct cam_irq_register_set vfe_lite78x_bus_irq_reg[1] = { + { + .mask_reg_offset = 0x00001218, + .clear_reg_offset = 0x00001220, + .status_reg_offset = 0x00001228, + }, +}; + +static uint32_t vfe_lite78x_out_port_mid[][4] = { + {8, 0, 0, 0}, + {9, 0, 0, 0}, + {10, 0, 0, 0}, + {11, 0, 0, 0}, + {12, 0, 0, 0}, + {13, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe_lite78x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001200, + .cgc_ovd = 0x00001208, + .if_frameheader_cfg = { + 0x00001234, + 0x00001238, + 0x0000123C, + 0x00001240, + 0x00001244, + }, + .pwr_iso_cfg = 0x0000125C, + .overflow_status_clear = 0x00001260, + .ccif_violation_status = 0x00001264, + .overflow_status = 0x00001268, + .image_size_violation_status = 0x00001270, + .debug_status_top_cfg = 0x000012D4, + .debug_status_top = 0x000012D8, + .test_bus_ctrl = 0x000012DC, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe_lite78x_bus_irq_reg, + .global_irq_cmd_offset = 0x00001230, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 6, + .bus_client_reg = { + /* BUS Client 0 RDI0 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .line_done_cfg = 0x0000142C, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001474, + .addr_status_1 = 0x00001478, + .addr_status_2 = 0x0000147C, + .addr_status_3 = 0x00001480, + .debug_status_cfg = 0x00001484, + .debug_status_0 = 0x00001488, + .debug_status_1 = 0x0000148C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 1 RDI1 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .line_done_cfg = 0x0000152C, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001574, + .addr_status_1 = 0x00001578, + .addr_status_2 = 0x0000157C, + .addr_status_3 = 0x00001580, + .debug_status_cfg = 0x00001584, + .debug_status_0 = 0x00001588, + .debug_status_1 = 0x0000158C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 2 RDI2 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .line_done_cfg = 0x0000162C, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001674, + .addr_status_1 = 0x00001678, + .addr_status_2 = 0x0000167C, + .addr_status_3 = 0x00001680, + .debug_status_cfg = 0x00001684, + .debug_status_0 = 0x00001688, + .debug_status_1 = 0x0000168C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 3 RDI3 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .line_done_cfg = 0x0000172C, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001774, + .addr_status_1 = 0x00001778, + .addr_status_2 = 0x0000177C, + .addr_status_3 = 0x00001780, + .debug_status_cfg = 0x00001784, + .debug_status_0 = 0x00001788, + .debug_status_1 = 0x0000178C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 4 Gamma */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .line_done_cfg = 0x0000182C, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001874, + .addr_status_1 = 0x00001878, + .addr_status_2 = 0x0000187C, + .addr_status_3 = 0x00001880, + .debug_status_cfg = 0x00001884, + .debug_status_0 = 0x00001888, + .debug_status_1 = 0x0000188C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 5 Stats BE */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .line_done_cfg = 0x0000192C, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001974, + .addr_status_1 = 0x00001978, + .addr_status_2 = 0x0000197C, + .addr_status_3 = 0x00001980, + .debug_status_cfg = 0x00001984, + .debug_status_0 = 0x00001988, + .debug_status_1 = 0x0000198C, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 6, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite78x_out_port_mid[0], + .num_mid = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite78x_out_port_mid[1], + .num_mid = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite78x_out_port_mid[2], + .num_mid = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite78x_out_port_mid[3], + .num_mid = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite78x_out_port_mid[4], + .num_mid = 1, + .wm_idx = { + 4, + }, + .name = { + "PREPROCESS_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite78x_out_port_mid[5], + .num_mid = 1, + .wm_idx = { + 5, + }, + .name = { + "STATS_BG", + }, + }, + }, + .num_comp_grp = 5, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 34, +}; + +static struct cam_vfe_irq_hw_info vfe_lite78x_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID, + .top_irq_reg = &vfe_lite78x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite78x_hw_info = { + .irq_hw_info = &vfe_lite78x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe_lite78x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe_lite78x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE78X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite88x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite88x.h new file mode 100644 index 0000000000..809c88cf27 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite88x.h @@ -0,0 +1,622 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_LITE88X_H_ +#define _CAM_VFE_LITE88X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" +#include "cam_vfe_lite78x.h" + +#define CAM_VFE_88X_NUM_DBG_REG 5 + +/* Offsets might not match due to csid secure regs at beginning of reg space */ + +static struct cam_irq_register_set vfe_lite88x_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00001024, + .clear_reg_offset = 0x0000102C, + .status_reg_offset = 0x0000101C, + .set_reg_offset = 0x00001034, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00001028, + .clear_reg_offset = 0x00001030, + .status_reg_offset = 0x00001020, + }, +}; + +static struct cam_irq_controller_reg_info vfe_lite88x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe_lite88x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00001038, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe_lite88x_top_debug_reg[] = { + 0x0000105C, + 0x00001060, + 0x00001064, + 0x00001068, + 0x0000106C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe_lite88x_top_common_reg = { + .hw_version = 0x00001000, + .hw_capability = 0x00001004, + .core_cgc_ovd_0 = 0x00001014, + .ahb_cgc_ovd = 0x00001018, + .core_cfg_0 = 0x0000103C, + .diag_config = 0x00001040, + .diag_sensor_status = {0x00001044, 0x00001048}, + .diag_frm_cnt_status = {0x0000104C, 0x00001050}, + .ipp_violation_status = 0x00001054, + .bus_violation_status = 0x00001264, + .bus_overflow_status = 0x00001268, + .top_debug_cfg = 0x00001074, + .num_top_debug_reg = CAM_VFE_88X_NUM_DBG_REG, + .top_debug = vfe_lite88x_top_debug_reg, + .frame_timing_irq_reg_idx = CAM_IFE_IRQ_CAMIF_REG_STATUS1, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite88x_ipp_reg_data = { + .sof_irq_mask = 0x1, + .eof_irq_mask = 0x2, + .error_irq_mask = 0x6, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .ipp_violation_mask = 0x10, + .diag_violation_mask = 0x4, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x200, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite88x_rdi_reg_data[4] = { + { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, +}; + +static struct cam_vfe_ver4_path_hw_info + vfe_lite88x_rdi_hw_info[] = { + { + .common_reg = &vfe_lite88x_top_common_reg, + .reg_data = &vfe_lite88x_rdi_reg_data[0], + }, + { + .common_reg = &vfe_lite88x_top_common_reg, + .reg_data = &vfe_lite88x_rdi_reg_data[1], + }, + { + .common_reg = &vfe_lite88x_top_common_reg, + .reg_data = &vfe_lite88x_rdi_reg_data[2], + }, + { + .common_reg = &vfe_lite88x_top_common_reg, + .reg_data = &vfe_lite88x_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe_lite88x_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite88x_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe_lite88x_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe_lite88x_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite88x_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe_lite88x_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &vfe_lite88x_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe_lite88x_top_hw_info = { + .common_reg = &vfe_lite88x_top_common_reg, + .rdi_hw_info = vfe_lite88x_rdi_hw_info, + .vfe_full_hw_info = { + .common_reg = &vfe_lite88x_top_common_reg, + .reg_data = &vfe_lite88x_ipp_reg_data, + }, + .ipp_module_desc = vfe_lite78x_ipp_mod_desc, + .wr_client_desc = vfe_lite78x_wr_client_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .top_debug_reg_info = &vfe78x_dbg_reg_info, + .num_rdi = ARRAY_SIZE(vfe_lite88x_rdi_hw_info), + .diag_sensor_info = vfe_lite88x_diag_sensor_field, + .diag_frame_info = vfe_lite88x_diag_frame_field, +}; + +static struct cam_irq_register_set vfe_lite88x_bus_irq_reg[1] = { + { + .mask_reg_offset = 0x00001218, + .clear_reg_offset = 0x00001220, + .status_reg_offset = 0x00001228, + }, +}; + +static uint32_t vfe_lite88x_out_port_mid[][4] = { + {8, 0, 0, 0}, + {9, 0, 0, 0}, + {10, 0, 0, 0}, + {11, 0, 0, 0}, + {12, 0, 0, 0}, + {13, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe_lite88x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001200, + .cgc_ovd = 0x00001208, + .if_frameheader_cfg = { + 0x00001234, + 0x00001238, + 0x0000123C, + 0x00001240, + 0x00001244, + }, + .pwr_iso_cfg = 0x0000125C, + .overflow_status_clear = 0x00001260, + .ccif_violation_status = 0x00001264, + .overflow_status = 0x00001268, + .image_size_violation_status = 0x00001270, + .debug_status_top_cfg = 0x000012F0, + .debug_status_top = 0x000012F4, + .test_bus_ctrl = 0x00001394, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe_lite88x_bus_irq_reg, + .global_irq_cmd_offset = 0x00001230, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 6, + .bus_client_reg = { + /* BUS Client 0 RDI0 */ + { + .cfg = 0x00001400, + .image_addr = 0x00001404, + .frame_incr = 0x00001408, + .image_cfg_0 = 0x0000140C, + .image_cfg_1 = 0x00001410, + .image_cfg_2 = 0x00001414, + .packer_cfg = 0x00001418, + .frame_header_addr = 0x00001420, + .frame_header_incr = 0x00001424, + .frame_header_cfg = 0x00001428, + .line_done_cfg = 0x0000142C, + .irq_subsample_period = 0x00001430, + .irq_subsample_pattern = 0x00001434, + .mmu_prefetch_cfg = 0x00001460, + .mmu_prefetch_max_offset = 0x00001464, + .system_cache_cfg = 0x00001468, + .addr_cfg = 0x00001470, + .addr_status_0 = 0x00001478, + .addr_status_1 = 0x0000147C, + .addr_status_2 = 0x00001480, + .addr_status_3 = 0x00001484, + .debug_status_cfg = 0x00001488, + .debug_status_0 = 0x0000148C, + .debug_status_1 = 0x00001490, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 1 RDI1 */ + { + .cfg = 0x00001500, + .image_addr = 0x00001504, + .frame_incr = 0x00001508, + .image_cfg_0 = 0x0000150C, + .image_cfg_1 = 0x00001510, + .image_cfg_2 = 0x00001514, + .packer_cfg = 0x00001518, + .frame_header_addr = 0x00001520, + .frame_header_incr = 0x00001524, + .frame_header_cfg = 0x00001528, + .line_done_cfg = 0x0000152C, + .irq_subsample_period = 0x00001530, + .irq_subsample_pattern = 0x00001534, + .mmu_prefetch_cfg = 0x00001560, + .mmu_prefetch_max_offset = 0x00001564, + .system_cache_cfg = 0x00001568, + .addr_cfg = 0x00001570, + .addr_status_0 = 0x00001578, + .addr_status_1 = 0x0000157C, + .addr_status_2 = 0x00001580, + .addr_status_3 = 0x00001584, + .debug_status_cfg = 0x00001588, + .debug_status_0 = 0x0000158C, + .debug_status_1 = 0x00001590, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 2 RDI2 */ + { + .cfg = 0x00001600, + .image_addr = 0x00001604, + .frame_incr = 0x00001608, + .image_cfg_0 = 0x0000160C, + .image_cfg_1 = 0x00001610, + .image_cfg_2 = 0x00001614, + .packer_cfg = 0x00001618, + .frame_header_addr = 0x00001620, + .frame_header_incr = 0x00001624, + .frame_header_cfg = 0x00001628, + .line_done_cfg = 0x0000162C, + .irq_subsample_period = 0x00001630, + .irq_subsample_pattern = 0x00001634, + .mmu_prefetch_cfg = 0x00001660, + .mmu_prefetch_max_offset = 0x00001664, + .system_cache_cfg = 0x00001668, + .addr_cfg = 0x00001670, + .addr_status_0 = 0x00001678, + .addr_status_1 = 0x0000167C, + .addr_status_2 = 0x00001680, + .addr_status_3 = 0x00001684, + .debug_status_cfg = 0x00001688, + .debug_status_0 = 0x0000168C, + .debug_status_1 = 0x00001690, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 3 RDI3 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .line_done_cfg = 0x0000172C, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001778, + .addr_status_1 = 0x0000177C, + .addr_status_2 = 0x00001780, + .addr_status_3 = 0x00001784, + .debug_status_cfg = 0x00001788, + .debug_status_0 = 0x0000178C, + .debug_status_1 = 0x00001790, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 4 Gamma */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .line_done_cfg = 0x0000182C, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001878, + .addr_status_1 = 0x0000187C, + .addr_status_2 = 0x00001880, + .addr_status_3 = 0x00001884, + .debug_status_cfg = 0x00001888, + .debug_status_0 = 0x0000188C, + .debug_status_1 = 0x00001890, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_YUV422_10), + }, + /* BUS Client 5 Stats BE */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .line_done_cfg = 0x0000192C, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001978, + .addr_status_1 = 0x0000197C, + .addr_status_2 = 0x00001980, + .addr_status_3 = 0x00001984, + .debug_status_cfg = 0x00001988, + .debug_status_0 = 0x0000198C, + .debug_status_1 = 0x00001990, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 6, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite88x_out_port_mid[0], + .num_mid = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite88x_out_port_mid[1], + .num_mid = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite88x_out_port_mid[2], + .num_mid = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite88x_out_port_mid[3], + .num_mid = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite88x_out_port_mid[4], + .num_mid = 1, + .wm_idx = { + 4, + }, + .name = { + "PREPROCESS_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite88x_out_port_mid[5], + .num_mid = 1, + .wm_idx = { + 5, + }, + .name = { + "STATS_BG", + }, + }, + }, + .num_comp_grp = 5, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 34, +}; + +static struct cam_vfe_irq_hw_info vfe_lite88x_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID, + .top_irq_reg = &vfe_lite88x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite88x_hw_info = { + .irq_hw_info = &vfe_lite88x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe_lite88x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe_lite88x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE88X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite97x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite97x.h new file mode 100644 index 0000000000..c44b80cf3a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite97x.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_LITE97X_H_ +#define _CAM_VFE_LITE97X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" +#include "cam_vfe_lite78x.h" +#include "cam_vfe_lite97x.h" + +static struct cam_vfe_hw_info cam_vfe_lite97x_hw_info = { + .irq_hw_info = &vfe_lite98x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe_lite98x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe_lite98x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE97X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite98x.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite98x.h new file mode 100644 index 0000000000..2e7a7abe43 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite98x.h @@ -0,0 +1,624 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_LITE98X_H_ +#define _CAM_VFE_LITE98X_H_ +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_vfe_core.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_irq_controller.h" +#include "cam_vfe_lite78x.h" + +#define CAM_VFE_98X_NUM_DBG_REG 5 + +/* Offsets might not match due to csid secure regs at beginning of reg space */ + +static struct cam_irq_register_set vfe_lite98x_top_irq_reg_set[2] = { + { + .mask_reg_offset = 0x00001024, + .clear_reg_offset = 0x0000102C, + .status_reg_offset = 0x0000101C, + .set_reg_offset = 0x00001034, + .test_set_val = BIT(0), + .test_sub_val = BIT(0), + }, + { + .mask_reg_offset = 0x00001028, + .clear_reg_offset = 0x00001030, + .status_reg_offset = 0x00001020, + }, +}; + +static struct cam_irq_controller_reg_info vfe_lite98x_top_irq_reg_info = { + .num_registers = 2, + .irq_reg_set = vfe_lite98x_top_irq_reg_set, + .global_irq_cmd_offset = 0x00001038, + .global_clear_bitmask = 0x00000001, + .global_set_bitmask = 0x00000010, + .clear_all_bitmask = 0xFFFFFFFF, +}; + +static uint32_t vfe_lite98x_top_debug_reg[] = { + 0x0000105C, + 0x00001060, + 0x00001064, + 0x00001068, + 0x0000106C, +}; + +static struct cam_vfe_top_ver4_reg_offset_common vfe_lite98x_top_common_reg = { + .hw_version = 0x00001000, + .hw_capability = 0x00001004, + .core_cgc_ovd_0 = 0x00001014, + .ahb_cgc_ovd = 0x00001018, + .core_cfg_0 = 0x0000103C, + .diag_config = 0x00001040, + .diag_sensor_status = {0x00001044, 0x00001048}, + .diag_frm_cnt_status = {0x0000104C, 0x00001050}, + .ipp_violation_status = 0x00001054, + .bus_violation_status = 0x00001264, + .bus_overflow_status = 0x00001268, + .top_debug_cfg = 0x00001074, + .num_top_debug_reg = CAM_VFE_98X_NUM_DBG_REG, + .top_debug = vfe_lite98x_top_debug_reg, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite98x_ipp_reg_data = { + .sof_irq_mask = 0x1, + .eof_irq_mask = 0x2, + .error_irq_mask = 0x6, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + .ipp_violation_mask = 0x10, + .diag_violation_mask = 0x4, + .diag_sensor_sel_mask = 0x8, + .diag_frm_count_mask_0 = 0x200, +}; + +static struct cam_vfe_ver4_path_reg_data vfe_lite98x_rdi_reg_data[4] = { + { + .sof_irq_mask = 0x4, + .eof_irq_mask = 0x8, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x0, + .diag_frm_count_mask_0 = 0x20, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x10, + .eof_irq_mask = 0x20, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x2, + .diag_frm_count_mask_0 = 0x40, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x40, + .eof_irq_mask = 0x80, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x4, + .diag_frm_count_mask_0 = 0x80, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, + { + .sof_irq_mask = 0x100, + .eof_irq_mask = 0x200, + .error_irq_mask = 0x0, + .diag_sensor_sel_mask = 0x6, + .diag_frm_count_mask_0 = 0x100, + .enable_diagnostic_hw = 0x1, + .top_debug_cfg_en = 0x3, + }, +}; + +static struct cam_vfe_ver4_path_hw_info + vfe_lite98x_rdi_hw_info[] = { + { + .common_reg = &vfe_lite98x_top_common_reg, + .reg_data = &vfe_lite98x_rdi_reg_data[0], + }, + { + .common_reg = &vfe_lite98x_top_common_reg, + .reg_data = &vfe_lite98x_rdi_reg_data[1], + }, + { + .common_reg = &vfe_lite98x_top_common_reg, + .reg_data = &vfe_lite98x_rdi_reg_data[2], + }, + { + .common_reg = &vfe_lite98x_top_common_reg, + .reg_data = &vfe_lite98x_rdi_reg_data[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_info vfe_lite98x_diag_reg_info[] = { + { + .bitmask = 0x3FFF, + .name = "SENSOR_HBI", + }, + { + .bitmask = 0x4000, + .name = "SENSOR_NEQ_HBI", + }, + { + .bitmask = 0x8000, + .name = "SENSOR_HBI_MIN_ERROR", + }, + { + .bitmask = 0xFFFFFF, + .name = "SENSOR_VBI", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_RDI_0_PIPE", + }, + { + .bitmask = 0xFF00, + .name = "FRAME_CNT_RDI_1_PIPE", + }, + { + .bitmask = 0xFF0000, + .name = "FRAME_CNT_RDI_2_PIPE", + }, + { + .bitmask = 0xFF000000, + .name = "FRAME_CNT_RDI_3_PIPE", + }, + { + .bitmask = 0xFF, + .name = "FRAME_CNT_IPP_PIPE", + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite98x_diag_sensor_field[] = { + { + .num_fields = 3, + .field = &vfe_lite98x_diag_reg_info[0], + }, + { + .num_fields = 1, + .field = &vfe_lite98x_diag_reg_info[3], + }, +}; + +static struct cam_vfe_top_ver4_diag_reg_fields vfe_lite98x_diag_frame_field[] = { + { + .num_fields = 4, + .field = &vfe_lite98x_diag_reg_info[4], + }, + { + .num_fields = 1, + .field = &vfe_lite98x_diag_reg_info[8], + }, +}; + +static struct cam_vfe_top_ver4_hw_info vfe_lite98x_top_hw_info = { + .common_reg = &vfe_lite98x_top_common_reg, + .rdi_hw_info = vfe_lite98x_rdi_hw_info, + .vfe_full_hw_info = { + .common_reg = &vfe_lite98x_top_common_reg, + .reg_data = &vfe_lite98x_ipp_reg_data, + }, + .ipp_module_desc = vfe_lite78x_ipp_mod_desc, + .wr_client_desc = vfe_lite78x_wr_client_desc, + .num_mux = 5, + .mux_type = { + CAM_VFE_CAMIF_VER_4_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + CAM_VFE_RDI_VER_1_0, + }, + .top_debug_reg_info = &vfe78x_dbg_reg_info, + .num_rdi = ARRAY_SIZE(vfe_lite98x_rdi_hw_info), + .diag_sensor_info = vfe_lite98x_diag_sensor_field, + .diag_frame_info = vfe_lite98x_diag_frame_field, +}; + +static struct cam_irq_register_set vfe_lite98x_bus_irq_reg[1] = { + { + .mask_reg_offset = 0x00001218, + .clear_reg_offset = 0x00001220, + .status_reg_offset = 0x00001228, + }, +}; + +static uint32_t vfe_lite98x_out_port_mid[][4] = { + {8, 0, 0, 0}, + {9, 0, 0, 0}, + {10, 0, 0, 0}, + {11, 0, 0, 0}, + {12, 0, 0, 0}, + {13, 0, 0, 0}, +}; + +static struct cam_vfe_bus_ver3_hw_info vfe_lite98x_bus_hw_info = { + .common_reg = { + .hw_version = 0x00001200, + .cgc_ovd = 0x00001208, + .if_frameheader_cfg = { + 0x00001234, + 0x00001238, + 0x0000123C, + 0x00001240, + 0x00001244, + }, + .pwr_iso_cfg = 0x0000125C, + .overflow_status_clear = 0x00001260, + .ccif_violation_status = 0x00001264, + .overflow_status = 0x00001268, + .image_size_violation_status = 0x00001270, + .debug_status_top_cfg = 0x000012F0, + .debug_status_top = 0x000012F4, + .test_bus_ctrl = 0x00001328, + .wm_mode_shift = 16, + .wm_mode_val = { 0x0, 0x1, 0x2 }, + .wm_en_shift = 0, + .frmheader_en_shift = 2, + .virtual_frm_en_shift = 1, + .irq_reg_info = { + .num_registers = 1, + .irq_reg_set = vfe_lite98x_bus_irq_reg, + .global_irq_cmd_offset = 0x00001230, + .global_clear_bitmask = 0x00000001, + }, + }, + .num_client = 6, + .bus_client_reg = { + /* BUS Client 0 RDI0 */ + { + .cfg = 0x00001700, + .image_addr = 0x00001704, + .frame_incr = 0x00001708, + .image_cfg_0 = 0x0000170C, + .image_cfg_1 = 0x00001710, + .image_cfg_2 = 0x00001714, + .packer_cfg = 0x00001718, + .frame_header_addr = 0x00001720, + .frame_header_incr = 0x00001724, + .frame_header_cfg = 0x00001728, + .line_done_cfg = 0x0000172C, + .irq_subsample_period = 0x00001730, + .irq_subsample_pattern = 0x00001734, + .mmu_prefetch_cfg = 0x00001760, + .mmu_prefetch_max_offset = 0x00001764, + .system_cache_cfg = 0x00001768, + .addr_cfg = 0x00001770, + .addr_status_0 = 0x00001790, + .addr_status_1 = 0x00001794, + .addr_status_2 = 0x00001798, + .addr_status_3 = 0x0000179C, + .debug_status_cfg = 0x0000177C, + .debug_status_0 = 0x00001780, + .debug_status_1 = 0x00001784, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_1, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10) | + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 1 RDI1 */ + { + .cfg = 0x00001800, + .image_addr = 0x00001804, + .frame_incr = 0x00001808, + .image_cfg_0 = 0x0000180C, + .image_cfg_1 = 0x00001810, + .image_cfg_2 = 0x00001814, + .packer_cfg = 0x00001818, + .frame_header_addr = 0x00001820, + .frame_header_incr = 0x00001824, + .frame_header_cfg = 0x00001828, + .line_done_cfg = 0x0000182C, + .irq_subsample_period = 0x00001830, + .irq_subsample_pattern = 0x00001834, + .mmu_prefetch_cfg = 0x00001860, + .mmu_prefetch_max_offset = 0x00001864, + .system_cache_cfg = 0x00001868, + .addr_cfg = 0x00001870, + .addr_status_0 = 0x00001890, + .addr_status_1 = 0x00001894, + .addr_status_2 = 0x00001898, + .addr_status_3 = 0x0000189C, + .debug_status_cfg = 0x0000187C, + .debug_status_0 = 0x00001880, + .debug_status_1 = 0x00001884, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_2, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10)| + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 2 RDI2 */ + { + .cfg = 0x00001900, + .image_addr = 0x00001904, + .frame_incr = 0x00001908, + .image_cfg_0 = 0x0000190C, + .image_cfg_1 = 0x00001910, + .image_cfg_2 = 0x00001914, + .packer_cfg = 0x00001918, + .frame_header_addr = 0x00001920, + .frame_header_incr = 0x00001924, + .frame_header_cfg = 0x00001928, + .line_done_cfg = 0x0000192C, + .irq_subsample_period = 0x00001930, + .irq_subsample_pattern = 0x00001934, + .mmu_prefetch_cfg = 0x00001960, + .mmu_prefetch_max_offset = 0x00001964, + .system_cache_cfg = 0x00001968, + .addr_cfg = 0x00001970, + .addr_status_0 = 0x00001990, + .addr_status_1 = 0x00001994, + .addr_status_2 = 0x00001998, + .addr_status_3 = 0x0000199C, + .debug_status_cfg = 0x0000197C, + .debug_status_0 = 0x00001980, + .debug_status_1 = 0x00001984, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_3, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10)| + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 3 RDI3 */ + { + .cfg = 0x00001A00, + .image_addr = 0x00001A04, + .frame_incr = 0x00001A08, + .image_cfg_0 = 0x00001A0C, + .image_cfg_1 = 0x00001A10, + .image_cfg_2 = 0x00001A14, + .packer_cfg = 0x00001A18, + .frame_header_addr = 0x00001A20, + .frame_header_incr = 0x00001A24, + .frame_header_cfg = 0x00001A28, + .line_done_cfg = 0x00001A2C, + .irq_subsample_period = 0x00001A30, + .irq_subsample_pattern = 0x00001A34, + .mmu_prefetch_cfg = 0x00001A60, + .mmu_prefetch_max_offset = 0x00001A64, + .system_cache_cfg = 0x00001A68, + .addr_cfg = 0x00001A70, + .addr_status_0 = 0x00001A90, + .addr_status_1 = 0x00001A94, + .addr_status_2 = 0x00001A98, + .addr_status_3 = 0x00001A9C, + .debug_status_cfg = 0x00001A7C, + .debug_status_0 = 0x00001A80, + .debug_status_1 = 0x00001A84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_4, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN64) | BIT_ULL(CAM_FORMAT_YUV422_10)| + BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 4 Gamma */ + { + .cfg = 0x00001B00, + .image_addr = 0x00001B04, + .frame_incr = 0x00001B08, + .image_cfg_0 = 0x00001B0C, + .image_cfg_1 = 0x00001B10, + .image_cfg_2 = 0x00001B14, + .packer_cfg = 0x00001B18, + .frame_header_addr = 0x00001B20, + .frame_header_incr = 0x00001B24, + .frame_header_cfg = 0x00001B28, + .line_done_cfg = 0x00001B2C, + .irq_subsample_period = 0x00001B30, + .irq_subsample_pattern = 0x00001B34, + .mmu_prefetch_cfg = 0x00001B60, + .mmu_prefetch_max_offset = 0x00001B64, + .system_cache_cfg = 0x00001B68, + .addr_cfg = 0x00001B70, + .addr_status_0 = 0x00001B90, + .addr_status_1 = 0x00001B94, + .addr_status_2 = 0x00001B98, + .addr_status_3 = 0x00001B9C, + .debug_status_cfg = 0x00001B7C, + .debug_status_0 = 0x00001B80, + .debug_status_1 = 0x00001B84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_MIPI_RAW_10) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_6) | BIT_ULL(CAM_FORMAT_MIPI_RAW_8) | + BIT_ULL(CAM_FORMAT_YUV422) | BIT_ULL(CAM_FORMAT_MIPI_RAW_12) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_14) | BIT_ULL(CAM_FORMAT_MIPI_RAW_16) | + BIT_ULL(CAM_FORMAT_MIPI_RAW_20) | BIT_ULL(CAM_FORMAT_PLAIN128) | + BIT_ULL(CAM_FORMAT_PLAIN32_20) | BIT_ULL(CAM_FORMAT_PLAIN8) | + BIT_ULL(CAM_FORMAT_PLAIN16_10) | BIT_ULL(CAM_FORMAT_PLAIN16_12) | + BIT_ULL(CAM_FORMAT_PLAIN16_14) | BIT_ULL(CAM_FORMAT_PLAIN16_16) | + BIT_ULL(CAM_FORMAT_PLAIN16_8) | BIT_ULL(CAM_FORMAT_PLAIN64) | + BIT_ULL(CAM_FORMAT_YUV422_10) | BIT_ULL(CAM_FORMAT_PLAIN16_10_LSB), + }, + /* BUS Client 5 Stats BE */ + { + .cfg = 0x00001C00, + .image_addr = 0x00001C04, + .frame_incr = 0x00001C08, + .image_cfg_0 = 0x00001C0C, + .image_cfg_1 = 0x00001C10, + .image_cfg_2 = 0x00001C14, + .packer_cfg = 0x00001C18, + .frame_header_addr = 0x00001C20, + .frame_header_incr = 0x00001C24, + .frame_header_cfg = 0x00001C28, + .line_done_cfg = 0x00001C2C, + .irq_subsample_period = 0x00001C30, + .irq_subsample_pattern = 0x00001C34, + .mmu_prefetch_cfg = 0x00001C60, + .mmu_prefetch_max_offset = 0x00001C64, + .system_cache_cfg = 0x00001C68, + .addr_cfg = 0x00001C70, + .addr_status_0 = 0x00001C90, + .addr_status_1 = 0x00001C94, + .addr_status_2 = 0x00001C98, + .addr_status_3 = 0x00001C9C, + .debug_status_cfg = 0x00001C7C, + .debug_status_0 = 0x00001C80, + .debug_status_1 = 0x00001C84, + .comp_group = CAM_VFE_BUS_VER3_COMP_GRP_0, + .ubwc_regs = NULL, + .supported_formats = BIT_ULL(CAM_FORMAT_PLAIN64), + }, + }, + .num_out = 6, + .vfe_out_hw_info = { + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_1, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite98x_out_port_mid[0], + .num_mid = 1, + .wm_idx = { + 0, + }, + .name = { + "LITE_0", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_2, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite98x_out_port_mid[1], + .num_mid = 1, + .wm_idx = { + 1, + }, + .name = { + "LITE_1", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_3, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite98x_out_port_mid[2], + .num_mid = 1, + .wm_idx = { + 2, + }, + .name = { + "LITE_2", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_4, + .num_wm = 1, + .line_based = 1, + .mid = vfe_lite98x_out_port_mid[3], + .num_mid = 1, + .wm_idx = { + 3, + }, + .name = { + "LITE_3", + }, + }, + { + .vfe_out_type = + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + .max_width = 1920, + .max_height = 1080, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite98x_out_port_mid[4], + .num_mid = 1, + .wm_idx = { + 4, + }, + .name = { + "PREPROCESS_RAW", + }, + }, + { + .vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + .max_width = -1, + .max_height = -1, + .source_group = CAM_VFE_BUS_VER3_SRC_GRP_0, + .num_wm = 1, + .mid = vfe_lite98x_out_port_mid[5], + .num_mid = 1, + .wm_idx = { + 5, + }, + .name = { + "STATS_BG", + }, + }, + }, + .num_comp_grp = 5, + .support_consumed_addr = true, + .comp_done_mask = { + BIT(0), BIT(1), BIT(2), BIT(3), BIT(4), + }, + .top_irq_shift = 0, + .max_out_res = CAM_ISP_IFE_OUT_RES_BASE + 34, +}; + +static struct cam_vfe_irq_hw_info vfe_lite98x_irq_hw_info = { + .reset_mask = 0, + .supported_irq = CAM_VFE_HW_IRQ_CAP_LITE_EXT_CSID, + .top_irq_reg = &vfe_lite98x_top_irq_reg_info, +}; + +static struct cam_vfe_hw_info cam_vfe_lite98x_hw_info = { + .irq_hw_info = &vfe_lite98x_irq_hw_info, + + .bus_version = CAM_VFE_BUS_VER_3_0, + .bus_hw_info = &vfe_lite98x_bus_hw_info, + + .top_version = CAM_VFE_TOP_VER_4_0, + .top_hw_info = &vfe_lite98x_top_hw_info, +}; + +#endif /* _CAM_VFE_LITE98X_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c new file mode 100644 index 0000000000..2b902d753a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#include "cam_vfe_bus.h" +#include "cam_vfe_bus_ver1.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_vfe_bus_rd_ver1.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_debug_util.h" + +int cam_vfe_bus_init(uint32_t bus_version, + int bus_type, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus) +{ + int rc = -ENODEV; + + switch (bus_type) { + case BUS_TYPE_WR: + switch (bus_version) { + case CAM_VFE_BUS_VER_2_0: + rc = cam_vfe_bus_ver2_init(soc_info, hw_intf, + bus_hw_info, vfe_irq_controller, vfe_bus); + break; + case CAM_VFE_BUS_VER_3_0: + rc = cam_vfe_bus_ver3_init(soc_info, hw_intf, + bus_hw_info, vfe_irq_controller, vfe_bus); + break; + default: + CAM_ERR(CAM_ISP, "Unsupported Bus WR Version 0x%x", + bus_version); + break; + } + break; + case BUS_TYPE_RD: + switch (bus_version) { + case CAM_VFE_BUS_RD_VER_1_0: + /* Call vfe bus rd init function */ + rc = cam_vfe_bus_rd_ver1_init(soc_info, hw_intf, + bus_hw_info, vfe_irq_controller, vfe_bus); + break; + default: + CAM_ERR(CAM_ISP, "Unsupported Bus RD Version 0x%x", + bus_version); + break; + } + break; + default: + CAM_ERR(CAM_ISP, "Unsupported Bus type %d", bus_type); + break; + } + + return rc; +} + +int cam_vfe_bus_deinit(uint32_t bus_version, + struct cam_vfe_bus **vfe_bus) +{ + int rc = -ENODEV; + + switch (bus_version) { + case CAM_VFE_BUS_VER_2_0: + rc = cam_vfe_bus_ver2_deinit(vfe_bus); + break; + case CAM_VFE_BUS_VER_3_0: + rc = cam_vfe_bus_ver3_deinit(vfe_bus); + break; + default: + CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c new file mode 100644 index 0000000000..dd017dafed --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c @@ -0,0 +1,1392 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include + +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_hw_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_vfe_hw_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_bus.h" +#include "cam_vfe_bus_rd_ver1.h" +#include "cam_vfe_core.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "vfe_bus_rd"; + +#define ALIGNUP(value, alignment) \ + ((value + alignment - 1) / alignment * alignment) + +#define MAX_BUF_UPDATE_REG_NUM \ + (sizeof(struct cam_vfe_bus_rd_ver1_reg_offset_bus_client)/4) + +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + +#define BUS_RD_VER1_DEFAULT_LATENCY_BUF_ALLOC 512 + +enum cam_vfe_bus_rd_ver1_unpacker_format { + BUS_RD_VER1_UNPACKER_FMT_PLAIN_128 = 0x0, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_8 = 0x1, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_10BPP = 0x2, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_12BPP = 0x3, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_14BPP = 0x4, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_32_20BPP = 0x5, + BUS_RD_VER1_UNPACKER_FMT_ARGB_10 = 0x6, + BUS_RD_VER1_UNPACKER_FMT_ARGB_12 = 0x7, + BUS_RD_VER1_UNPACKER_FMT_ARGB_14 = 0x8, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_32 = 0x9, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_64 = 0xA, + BUS_RD_VER1_UNPACKER_FMT_TP_10 = 0xB, + BUS_RD_VER1_UNPACKER_FMT_MIPI8 = 0xC, + BUS_RD_VER1_UNPACKER_FMT_MIPI10 = 0xD, + BUS_RD_VER1_UNPACKER_FMT_MIPI12 = 0xE, + BUS_RD_VER1_UNPACKER_FMT_MIPI14 = 0xF, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_16BPP = 0x10, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_128_ODD_EVEN = 0x11, + BUS_RD_VER1_UNPACKER_FMT_PLAIN_8_ODD_EVEN = 0x12, + BUS_RD_VER1_UNPACKER_FMT_MAX = 0x13, +}; + +struct cam_vfe_bus_rd_ver1_common_data { + uint32_t core_index; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + void *bus_irq_controller; + void *vfe_irq_controller; + struct cam_vfe_bus_rd_ver1_reg_offset_common *common_reg; + uint32_t io_buf_update[ + MAX_REG_VAL_PAIR_SIZE]; + + struct list_head free_payload_list; + spinlock_t spin_lock; + struct mutex bus_mutex; + uint32_t secure_mode; + uint32_t num_sec_out; + uint32_t fs_sync_enable; + uint32_t go_cmd_sel; + cam_hw_mgr_event_cb_func event_cb; +}; + +struct cam_vfe_bus_rd_ver1_rm_resource_data { + uint32_t index; + struct cam_vfe_bus_rd_ver1_common_data *common_data; + struct cam_vfe_bus_rd_ver1_reg_offset_bus_client *hw_regs; + void *ctx; + + bool init_cfg_done; + + uint32_t offset; + + uint32_t min_vbi; + uint32_t fs_mode; + uint32_t hbi_count; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + uint32_t latency_buf_allocation; + uint32_t unpacker_cfg; + uint32_t burst_len; + + uint32_t go_cmd_sel; + uint32_t fs_sync_enable; + uint32_t fs_line_sync_en; + + uint32_t en_cfg; + uint32_t is_dual; + uint32_t img_addr; + uint32_t input_if_cmd; +}; + +struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data { + uint32_t bus_rd_type; + struct cam_vfe_bus_rd_ver1_common_data *common_data; + + uint32_t num_rm; + struct cam_isp_resource_node *rm_res[PLANE_MAX]; + + struct cam_isp_resource_node *comp_grp; + enum cam_isp_hw_sync_mode dual_comp_sync_mode; + uint32_t dual_hw_alternate_vfe_id; + struct list_head vfe_bus_rd_list; + + uint32_t format; + uint32_t max_width; + uint32_t max_height; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t secure_mode; + int irq_handle; + void *priv; + uint32_t status; + bool is_offline; +}; + +struct cam_vfe_bus_rd_ver1_priv { + struct cam_vfe_bus_rd_ver1_common_data common_data; + uint32_t num_client; + uint32_t num_bus_rd_resc; + + struct cam_isp_resource_node bus_client[ + CAM_VFE_BUS_RD_VER1_MAX_CLIENTS]; + struct cam_isp_resource_node vfe_bus_rd[ + CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX]; + + int irq_handle; + void *tasklet_info; + uint32_t top_irq_shift; +}; + +static int cam_vfe_bus_rd_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); + +static void cam_vfe_bus_rd_pxls_to_bytes(uint32_t pxls, uint32_t fmt, + uint32_t *bytes) +{ + switch (fmt) { + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_128: + *bytes = pxls * 16; + break; + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_8: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_8_ODD_EVEN: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_10BPP: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_12BPP: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_14BPP: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_16BPP: + case BUS_RD_VER1_UNPACKER_FMT_ARGB_10: + case BUS_RD_VER1_UNPACKER_FMT_ARGB_12: + case BUS_RD_VER1_UNPACKER_FMT_ARGB_14: + *bytes = pxls * 2; + break; + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_32_20BPP: + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_32: + *bytes = pxls * 4; + break; + case BUS_RD_VER1_UNPACKER_FMT_PLAIN_64: + *bytes = pxls * 8; + break; + case BUS_RD_VER1_UNPACKER_FMT_TP_10: + *bytes = ALIGNUP(pxls, 3) * 4 / 3; + break; + case BUS_RD_VER1_UNPACKER_FMT_MIPI8: + *bytes = pxls; + break; + case BUS_RD_VER1_UNPACKER_FMT_MIPI10: + *bytes = ALIGNUP(pxls * 5, 4) / 4; + break; + case BUS_RD_VER1_UNPACKER_FMT_MIPI12: + *bytes = ALIGNUP(pxls * 3, 2) / 2; + break; + case BUS_RD_VER1_UNPACKER_FMT_MIPI14: + *bytes = ALIGNUP(pxls * 7, 4) / 4; + break; + default: + CAM_ERR(CAM_ISP, "Invalid unpacker_fmt:%d", fmt); + break; + } +} + +static enum cam_vfe_bus_rd_ver1_unpacker_format + cam_vfe_bus_get_unpacker_fmt(uint32_t unpack_fmt) +{ + switch (unpack_fmt) { + case CAM_FORMAT_PLAIN128: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_128; + case CAM_FORMAT_PLAIN8: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_8; + case CAM_FORMAT_PLAIN16_10: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN32_20: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_32_20BPP; + case CAM_FORMAT_ARGB_10: + return BUS_RD_VER1_UNPACKER_FMT_ARGB_10; + case CAM_FORMAT_ARGB_12: + return BUS_RD_VER1_UNPACKER_FMT_ARGB_12; + case CAM_FORMAT_ARGB_14: + return BUS_RD_VER1_UNPACKER_FMT_ARGB_14; + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_ARGB: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_32; + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_ARGB_16: + case CAM_FORMAT_PD10: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_64; + case CAM_FORMAT_TP10: + return BUS_RD_VER1_UNPACKER_FMT_TP_10; + case CAM_FORMAT_MIPI_RAW_8: + return BUS_RD_VER1_UNPACKER_FMT_MIPI8; + case CAM_FORMAT_MIPI_RAW_10: + return BUS_RD_VER1_UNPACKER_FMT_MIPI10; + case CAM_FORMAT_MIPI_RAW_12: + return BUS_RD_VER1_UNPACKER_FMT_MIPI12; + case CAM_FORMAT_MIPI_RAW_14: + return BUS_RD_VER1_UNPACKER_FMT_MIPI14; + case CAM_FORMAT_PLAIN16_16: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_16_16BPP; + case CAM_FORMAT_PLAIN8_SWAP: + return BUS_RD_VER1_UNPACKER_FMT_PLAIN_8_ODD_EVEN; + default: + return BUS_RD_VER1_UNPACKER_FMT_MAX; + } +} + +static enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type + cam_vfe_bus_get_bus_rd_res_id(uint32_t res_type) +{ + switch (res_type) { + case CAM_ISP_RESOURCE_VFE_BUS_RD: + return CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0; + default: + return CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX; + } +} + +static int cam_vfe_bus_get_num_rm( + enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type res_type) +{ + switch (res_type) { + case CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0: + return 1; + default: + CAM_ERR(CAM_ISP, "Unsupported resource_type %u", res_type); + return -EINVAL; + } +} + +static int cam_vfe_bus_rd_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_isp_resource_node *bus_rd = NULL; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + + bus_rd = th_payload->handler_priv; + if (!bus_rd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + rsrc_data = bus_rd->res_priv; + rsrc_data->status = th_payload->evt_status_arr[0]; + + CAM_DBG(CAM_ISP, "VFE:%d Bus RD IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + + return 0; +} + +static int cam_vfe_bus_rd_handle_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + struct cam_isp_resource_node *bus_rd = NULL; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + + bus_rd = (struct cam_isp_resource_node *)handler_priv; + if (!bus_rd) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + rsrc_data = bus_rd->res_priv; + + if (rsrc_data->status & 0x2) + CAM_DBG(CAM_ISP, "Received VFE:%d BUS RD RUP", + rsrc_data->common_data->core_index); + else if (rsrc_data->status & 0x4) + CAM_DBG(CAM_ISP, "Received VFE:%d BUS RD BUF DONE", + rsrc_data->common_data->core_index); + else if (rsrc_data->status & 0x10000) { + CAM_ERR(CAM_ISP, "Received VFE:%d BUS RD CCIF Violation", + rsrc_data->common_data->core_index); + return CAM_VFE_IRQ_STATUS_VIOLATION; + } + + return CAM_VFE_IRQ_STATUS_SUCCESS; +} + +static int cam_vfe_bus_get_rm_idx( + enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type vfe_bus_rd_res_id, + enum cam_vfe_bus_plane_type plane) +{ + int rm_idx = -1; + + switch (vfe_bus_rd_res_id) { + case CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0: + switch (plane) { + case PLANE_Y: + rm_idx = 0; + break; + default: + break; + } + break; + default: + break; + } + + return rm_idx; +} + +static int cam_vfe_bus_acquire_rm( + struct cam_vfe_bus_rd_ver1_priv *ver1_bus_rd_priv, + void *tasklet, + void *ctx, + enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type vfe_bus_rd_res_id, + enum cam_vfe_bus_plane_type plane, + struct cam_isp_resource_node **rm_res, + uint32_t *client_done_mask, + uint32_t is_dual, + uint32_t unpacker_fmt) +{ + uint32_t rm_idx = 0; + struct cam_isp_resource_node *rm_res_local = NULL; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data = NULL; + int rc = 0; + + *rm_res = NULL; + *client_done_mask = 0; + + /* No need to allocate for BUS VER2. VFE OUT to RM is fixed. */ + rm_idx = cam_vfe_bus_get_rm_idx(vfe_bus_rd_res_id, plane); + if (rm_idx >= ver1_bus_rd_priv->num_client) { + CAM_ERR(CAM_ISP, "Unsupported VFE RM:%d plane:%d", + vfe_bus_rd_res_id, plane); + return -EINVAL; + } + + rm_res_local = &ver1_bus_rd_priv->bus_client[rm_idx]; + if (rm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "VFE:%d RM:%d res not available state:%d", + ver1_bus_rd_priv->common_data.core_index, rm_idx, + rm_res_local->res_state); + return -EALREADY; + } + + /* Acquire ownership */ + rc = cam_vmrm_soc_acquire_resources( + CAM_HW_ID_IFE0 + ver1_bus_rd_priv->common_data.core_index); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] acquire ownership failed", + ver1_bus_rd_priv->common_data.core_index); + return rc; + } + + rm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rm_res_local->tasklet_info = tasklet; + + rsrc_data = rm_res_local->res_priv; + rsrc_data->ctx = ctx; + rsrc_data->is_dual = is_dual; + rsrc_data->unpacker_cfg = cam_vfe_bus_get_unpacker_fmt(unpacker_fmt); + rsrc_data->latency_buf_allocation = + BUS_RD_VER1_DEFAULT_LATENCY_BUF_ALLOC; + /* Set RM offset value to default */ + rsrc_data->offset = 0; + + *client_done_mask = (1 << (rm_idx + 2)); + *rm_res = rm_res_local; + + CAM_DBG(CAM_ISP, "VFE:%d RM:%d Acquired", + rsrc_data->common_data->core_index, rsrc_data->index); + return 0; +} + +static int cam_vfe_bus_release_rm(void *bus_priv, + struct cam_isp_resource_node *rm_res) +{ + int rc = 0; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data = + rm_res->res_priv; + + rsrc_data->offset = 0; + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 0; + rsrc_data->format = 0; + rsrc_data->unpacker_cfg = 0; + rsrc_data->burst_len = 0; + rsrc_data->init_cfg_done = false; + rsrc_data->en_cfg = 0; + rsrc_data->is_dual = 0; + + rm_res->tasklet_info = NULL; + rm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rc = cam_vmrm_soc_release_resources( + CAM_HW_ID_IFE0 + rsrc_data->common_data->core_index); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] vmrm soc release resources failed", + rsrc_data->common_data->core_index); + return rc; + } + + CAM_DBG(CAM_ISP, "VFE:%d RM:%d released", + rsrc_data->common_data->core_index, rsrc_data->index); + return rc; +} + +static int cam_vfe_bus_start_rm(struct cam_isp_resource_node *rm_res) +{ + struct cam_vfe_bus_rd_ver1_rm_resource_data *rm_data; + struct cam_vfe_bus_rd_ver1_common_data *common_data; + uint32_t buf_size; + + rm_data = rm_res->res_priv; + common_data = rm_data->common_data; + + buf_size = ((rm_data->width)&(0x0000FFFF)) | + ((rm_data->height<<16)&(0xFFFF0000)); + cam_io_w_mb(buf_size, common_data->mem_base + + rm_data->hw_regs->buf_size); + cam_io_w_mb(rm_data->width, common_data->mem_base + + rm_data->hw_regs->stride); + cam_io_w_mb(rm_data->unpacker_cfg, common_data->mem_base + + rm_data->hw_regs->unpacker_cfg); + cam_io_w_mb(rm_data->latency_buf_allocation, common_data->mem_base + + rm_data->hw_regs->latency_buf_allocation); + cam_io_w_mb(0x1, common_data->mem_base + rm_data->hw_regs->cfg); + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + CAM_DBG(CAM_ISP, + "Start VFE:%d RM:%d offset:0x%X en_cfg:0x%X width:%d height:%d", + rm_data->common_data->core_index, rm_data->index, + (uint32_t) rm_data->hw_regs->cfg, rm_data->en_cfg, + rm_data->width, rm_data->height); + CAM_DBG(CAM_ISP, "RM:%d pk_fmt:%d stride:%d", rm_data->index, + rm_data->unpacker_cfg, rm_data->stride); + + return 0; +} + +static int cam_vfe_bus_stop_rm(struct cam_isp_resource_node *rm_res) +{ + int rc = 0; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data = + rm_res->res_priv; + struct cam_vfe_bus_rd_ver1_common_data *common_data = + rsrc_data->common_data; + + /* Disable RM */ + cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg); + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rsrc_data->init_cfg_done = false; + + CAM_DBG(CAM_ISP, "VFE:%d RM:%d stopped", + rsrc_data->common_data->core_index, rsrc_data->index); + + return rc; +} + +static int cam_vfe_bus_init_rm_resource(uint32_t index, + struct cam_vfe_bus_rd_ver1_priv *ver1_bus_rd_priv, + struct cam_vfe_bus_rd_ver1_hw_info *bus_rd_hw_info, + struct cam_isp_resource_node *rm_res) +{ + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_rd_ver1_rm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_ISP, "Failed to alloc VFE:%d RM res priv", + ver1_bus_rd_priv->common_data.core_index); + return -ENOMEM; + } + rm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->hw_regs = &bus_rd_hw_info->bus_client_reg[index]; + rsrc_data->common_data = &ver1_bus_rd_priv->common_data; + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&rm_res->list); + + rm_res->start = cam_vfe_bus_start_rm; + rm_res->stop = cam_vfe_bus_stop_rm; + rm_res->hw_intf = ver1_bus_rd_priv->common_data.hw_intf; + + return 0; +} + +static int cam_vfe_bus_deinit_rm_resource( + struct cam_isp_resource_node *rm_res) +{ + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data; + + rm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&rm_res->list); + + rm_res->start = NULL; + rm_res->stop = NULL; + rm_res->top_half_handler = NULL; + rm_res->bottom_half_handler = NULL; + rm_res->hw_intf = NULL; + + rsrc_data = rm_res->res_priv; + rm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_rd_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + return 0; +} + +static int cam_vfe_bus_acquire_vfe_bus_rd(void *bus_priv, void *acquire_args, + uint32_t args_size) +{ + int rc = -ENODEV; + int i; + enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type bus_rd_res_id; + int num_rm; + uint32_t client_done_mask; + struct cam_vfe_bus_rd_ver1_priv *ver1_bus_rd_priv = + bus_priv; + struct cam_vfe_acquire_args *acq_args = acquire_args; + struct cam_vfe_hw_vfe_bus_rd_acquire_args *bus_rd_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_ISP, "Invalid Param"); + return -EINVAL; + } + + bus_rd_acquire_args = &acq_args->vfe_bus_rd; + + bus_rd_res_id = cam_vfe_bus_get_bus_rd_res_id( + acq_args->rsrc_type); + if (bus_rd_res_id == CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX) + return -ENODEV; + + num_rm = cam_vfe_bus_get_num_rm(bus_rd_res_id); + if (num_rm < 1) + return -EINVAL; + + rsrc_node = &ver1_bus_rd_priv->vfe_bus_rd[bus_rd_res_id]; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "VFE:%d RM:0x%x not available state:%d", + ver1_bus_rd_priv->common_data.core_index, + acq_args->rsrc_type, rsrc_node->res_state); + return -EBUSY; + } + + rsrc_node->res_id = acq_args->rsrc_type; + rsrc_data = rsrc_node->res_priv; + + CAM_DBG(CAM_ISP, "VFE:%d acquire RD type:0x%x", + rsrc_data->common_data->core_index, acq_args->rsrc_type); + + rsrc_data->num_rm = num_rm; + rsrc_node->tasklet_info = acq_args->tasklet; + ver1_bus_rd_priv->tasklet_info = acq_args->tasklet; + rsrc_node->cdm_ops = bus_rd_acquire_args->cdm_ops; + rsrc_data->cdm_util_ops = bus_rd_acquire_args->cdm_ops; + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->priv = acq_args->priv; + rsrc_data->is_offline = bus_rd_acquire_args->is_offline; + + for (i = 0; i < num_rm; i++) { + rc = cam_vfe_bus_acquire_rm(ver1_bus_rd_priv, + acq_args->tasklet, + acq_args->priv, + bus_rd_res_id, + i, + &rsrc_data->rm_res[i], + &client_done_mask, + bus_rd_acquire_args->is_dual, + bus_rd_acquire_args->unpacker_fmt); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d RM:%d acquire failed rc:%d", + rsrc_data->common_data->core_index, + bus_rd_res_id, rc); + goto release_rm; + } + } + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + bus_rd_acquire_args->rsrc_node = rsrc_node; + + CAM_DBG(CAM_ISP, "VFE:%d acquire RD 0x%x successful", + rsrc_data->common_data->core_index, acq_args->rsrc_type); + return rc; + +release_rm: + for (i--; i >= 0; i--) + cam_vfe_bus_release_rm(ver1_bus_rd_priv, rsrc_data->rm_res[i]); + return rc; +} + +static int cam_vfe_bus_release_vfe_bus_rd(void *bus_priv, void *release_args, + uint32_t args_size) +{ + uint32_t i; + struct cam_isp_resource_node *vfe_bus_rd = NULL; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + vfe_bus_rd = release_args; + rsrc_data = vfe_bus_rd->res_priv; + + if (vfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, + "VFE:%d RD type:0x%x invalid resource state:%d", + rsrc_data->common_data->core_index, + vfe_bus_rd->res_id, vfe_bus_rd->res_state); + } + + for (i = 0; i < rsrc_data->num_rm; i++) + cam_vfe_bus_release_rm(bus_priv, rsrc_data->rm_res[i]); + rsrc_data->num_rm = 0; + + vfe_bus_rd->tasklet_info = NULL; + vfe_bus_rd->cdm_ops = NULL; + rsrc_data->cdm_util_ops = NULL; + + if (vfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + vfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_vfe_bus_start_vfe_bus_rd( + struct cam_isp_resource_node *vfe_bus_rd) +{ + int rc = 0, i; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + struct cam_vfe_bus_rd_ver1_common_data *common_data = NULL; + uint32_t irq_reg_mask[1] = {0x6}, val = 0; + + if (!vfe_bus_rd) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_bus_rd->res_priv; + common_data = rsrc_data->common_data; + + CAM_DBG(CAM_ISP, "VFE:%d start RD type:0x%x", vfe_bus_rd->res_id); + + if (vfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Invalid resource state:%d", + vfe_bus_rd->res_state); + return -EACCES; + } + + if (!rsrc_data->is_offline) { + val = (common_data->fs_sync_enable << 5) | + (common_data->go_cmd_sel << 4); + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->input_if_cmd); + } + + for (i = 0; i < rsrc_data->num_rm; i++) + rc = cam_vfe_bus_start_rm(rsrc_data->rm_res[i]); + + rsrc_data->irq_handle = cam_irq_controller_subscribe_irq( + common_data->bus_irq_controller, + CAM_IRQ_PRIORITY_1, + irq_reg_mask, + vfe_bus_rd, + cam_vfe_bus_rd_handle_irq_top_half, + cam_vfe_bus_rd_handle_irq_bottom_half, + vfe_bus_rd->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->irq_handle < 1) { + CAM_ERR(CAM_ISP, "Failed to subscribe BUS RD IRQ"); + rsrc_data->irq_handle = 0; + return -EFAULT; + } + + vfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_vfe_bus_stop_vfe_bus_rd( + struct cam_isp_resource_node *vfe_bus_rd) +{ + int rc = 0, i; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + + if (!vfe_bus_rd) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_bus_rd->res_priv; + + if (vfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + vfe_bus_rd->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "VFE:%d Bus RD 0x%x state: %d", + rsrc_data->common_data->core_index, vfe_bus_rd->res_id, + vfe_bus_rd->res_state); + return rc; + } + for (i = 0; i < rsrc_data->num_rm; i++) + rc = cam_vfe_bus_stop_rm(rsrc_data->rm_res[i]); + + if (rsrc_data->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + rsrc_data->common_data->bus_irq_controller, + rsrc_data->irq_handle); + rsrc_data->irq_handle = 0; + } + + vfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + CAM_DBG(CAM_ISP, "VFE:%d stopped Bus RD:0x%x", + rsrc_data->common_data->core_index, + vfe_bus_rd->res_id); + return rc; +} + +static int cam_vfe_bus_init_vfe_bus_read_resource(uint32_t index, + struct cam_vfe_bus_rd_ver1_priv *bus_rd_priv, + struct cam_vfe_bus_rd_ver1_hw_info *bus_rd_hw_info) +{ + struct cam_isp_resource_node *vfe_bus_rd = NULL; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rsrc_data = NULL; + int rc = 0; + int32_t vfe_bus_rd_resc_type = + bus_rd_hw_info->vfe_bus_rd_hw_info[index].vfe_bus_rd_type; + + if (vfe_bus_rd_resc_type < 0 || + vfe_bus_rd_resc_type >= CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX) { + CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d", + vfe_bus_rd_resc_type); + return -EINVAL; + } + + vfe_bus_rd = &bus_rd_priv->vfe_bus_rd[vfe_bus_rd_resc_type]; + if (vfe_bus_rd->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + vfe_bus_rd->res_priv) { + CAM_ERR(CAM_ISP, + "Error. Looks like same resource is init again"); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + vfe_bus_rd->res_priv = rsrc_data; + + vfe_bus_rd->res_type = CAM_ISP_RESOURCE_VFE_BUS_RD; + vfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&vfe_bus_rd->list); + + rsrc_data->bus_rd_type = + bus_rd_hw_info->vfe_bus_rd_hw_info[index].vfe_bus_rd_type; + rsrc_data->common_data = &bus_rd_priv->common_data; + rsrc_data->max_width = + bus_rd_hw_info->vfe_bus_rd_hw_info[index].max_width; + rsrc_data->max_height = + bus_rd_hw_info->vfe_bus_rd_hw_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + + vfe_bus_rd->start = cam_vfe_bus_start_vfe_bus_rd; + vfe_bus_rd->stop = cam_vfe_bus_stop_vfe_bus_rd; + vfe_bus_rd->process_cmd = cam_vfe_bus_rd_process_cmd; + vfe_bus_rd->hw_intf = bus_rd_priv->common_data.hw_intf; + + return 0; +} + +static int cam_vfe_bus_deinit_vfe_bus_rd_resource( + struct cam_isp_resource_node *vfe_bus_rd_res) +{ + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = + vfe_bus_rd_res->res_priv; + + if (vfe_bus_rd_res->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized"); + return 0; + } + + vfe_bus_rd_res->start = NULL; + vfe_bus_rd_res->stop = NULL; + vfe_bus_rd_res->top_half_handler = NULL; + vfe_bus_rd_res->bottom_half_handler = NULL; + vfe_bus_rd_res->hw_intf = NULL; + + vfe_bus_rd_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&vfe_bus_rd_res->list); + vfe_bus_rd_res->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_rd_ver1_handle_irq(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_vfe_bus_rd_ver1_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + CAM_DBG(CAM_ISP, "Top Bus RD IRQ Received"); + + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_0); + + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_vfe_bus_rd_update_rm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_buf_io_cfg *io_cfg; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *vfe_bus_rd_data = NULL; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + uint32_t buf_size = 0; + + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_bus_rd_data = (struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *) + update_buf->res->res_priv; + + if (!vfe_bus_rd_data || !vfe_bus_rd_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_bus_rd_data->cdm_util_ops; + + CAM_DBG(CAM_ISP, "#of RM: %d", vfe_bus_rd_data->num_rm); + if (update_buf->rm_update->num_buf != vfe_bus_rd_data->num_rm) { + CAM_ERR(CAM_ISP, + "Failed! Invalid number buffers:%d required:%d", + update_buf->rm_update->num_buf, + vfe_bus_rd_data->num_rm); + return -EINVAL; + } + + reg_val_pair = &vfe_bus_rd_data->common_data->io_buf_update[0]; + io_cfg = update_buf->rm_update->io_cfg; + + for (i = 0, j = 0; i < vfe_bus_rd_data->num_rm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %lu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + rm_data = vfe_bus_rd_data->rm_res[i]->res_priv; + + /* update size register */ + cam_vfe_bus_rd_pxls_to_bytes(io_cfg->planes[i].width, + rm_data->unpacker_cfg, &rm_data->width); + rm_data->height = io_cfg->planes[i].height; + + buf_size = ((rm_data->width)&(0x0000FFFF)) | + ((rm_data->height<<16)&(0xFFFF0000)); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->buf_size, buf_size); + CAM_DBG(CAM_ISP, "VFE:%d RM:%d image_size:0x%X", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + + rm_data->stride = io_cfg->planes[i].plane_stride; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->stride, rm_data->stride); + CAM_DBG(CAM_ISP, "VFE:%d RM:%d image_stride:0x%X", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + rm_data->hw_regs->image_addr, + update_buf->rm_update->image_buf[i] + + rm_data->offset); + CAM_DBG(CAM_ISP, "VFE:%d RM:%d image_address:0x%X", + rm_data->common_data->core_index, + rm_data->index, reg_val_pair[j-1]); + rm_data->img_addr = reg_val_pair[j-1]; + + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_buf->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + update_buf->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "No reg val pairs. num_rms: %u", + vfe_bus_rd_data->num_rm); + } + + return 0; +} + +static int cam_vfe_bus_rd_update_hfr(void *priv, void *cmd_args, + uint32_t arg_size) +{ + return 0; +} + +static int cam_vfe_bus_rd_update_fs_cfg(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bus_rd_ver1_priv *bus_priv; + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *vfe_bus_rd_data = NULL; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rm_data = NULL; + struct cam_vfe_fe_update_args *fe_upd_args; + struct cam_fe_config *fe_cfg; + int i = 0; + + bus_priv = (struct cam_vfe_bus_rd_ver1_priv *) priv; + fe_upd_args = (struct cam_vfe_fe_update_args *)cmd_args; + + vfe_bus_rd_data = (struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *) + fe_upd_args->node_res->res_priv; + + if (!vfe_bus_rd_data || !vfe_bus_rd_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + fe_cfg = &fe_upd_args->fe_config; + + for (i = 0; i < vfe_bus_rd_data->num_rm; i++) { + + rm_data = vfe_bus_rd_data->rm_res[i]->res_priv; + + rm_data->format = fe_cfg->format; + rm_data->unpacker_cfg = fe_cfg->unpacker_cfg; + rm_data->latency_buf_allocation = fe_cfg->latency_buf_size; + rm_data->stride = fe_cfg->stride; + rm_data->go_cmd_sel = fe_cfg->go_cmd_sel; + rm_data->fs_sync_enable = fe_cfg->fs_sync_enable; + rm_data->hbi_count = fe_cfg->hbi_count; + rm_data->fs_line_sync_en = fe_cfg->fs_line_sync_en; + rm_data->fs_mode = fe_cfg->fs_mode; + rm_data->min_vbi = fe_cfg->min_vbi; + + CAM_DBG(CAM_ISP, + "VFE:%d RM:%d format:0x%x unpacker_cfg:0x%x", + rm_data->format, rm_data->unpacker_cfg); + CAM_DBG(CAM_ISP, + "latency_buf_alloc:0x%x stride:0x%x go_cmd_sel:0x%x", + rm_data->latency_buf_allocation, rm_data->stride, + rm_data->go_cmd_sel); + CAM_DBG(CAM_ISP, + "fs_sync_en:%d hbi_cnt:0x%x fs_mode:0x%x min_vbi:0x%x", + rm_data->fs_sync_enable, rm_data->hbi_count, + rm_data->fs_mode, rm_data->min_vbi); + } + bus_priv->common_data.fs_sync_enable = fe_cfg->fs_sync_enable; + bus_priv->common_data.go_cmd_sel = fe_cfg->go_cmd_sel; + return 0; +} + +static int cam_vfe_bus_rd_add_go_cmd(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *rd_data; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + uint32_t reg_val_pair[2 * CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX]; + struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data; + int i = 0; + uint32_t val = 0, size = 0, offset = 0; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "invalid ars size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + rd_data = (struct cam_vfe_bus_rd_ver1_vfe_bus_rd_data *) priv; + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + for (i = 0; i < rd_data->num_rm; i++) { + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, + "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + rsrc_data = rd_data->rm_res[i]->res_priv; + offset = rsrc_data->common_data->common_reg->input_if_cmd; + val = cam_io_r_mb(rsrc_data->common_data->mem_base + offset); + val |= 0x1; + reg_val_pair[i * 2] = offset; + reg_val_pair[i * 2 + 1] = val; + CAM_DBG(CAM_ISP, "VFE:%d Bus RD go_cmd: 0x%x offset 0x%x", + rd_data->common_data->core_index, + reg_val_pair[i * 2 + 1], reg_val_pair[i * 2]); + } + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; + +} + +static int cam_vfe_bus_start_hw(void *hw_priv, + void *start_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_start_vfe_bus_rd(hw_priv); +} + +static int cam_vfe_bus_stop_hw(void *hw_priv, + void *stop_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_stop_vfe_bus_rd(hw_priv); +} + +static int cam_vfe_bus_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_rd_ver1_priv *bus_priv = hw_priv; + uint32_t top_irq_reg_mask[3] = {0}; + uint32_t offset = 0; + struct cam_vfe_bus_rd_ver1_reg_offset_common *common_reg; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + common_reg = bus_priv->common_data.common_reg; + top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift); + + bus_priv->irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.vfe_irq_controller, + CAM_IRQ_PRIORITY_2, + top_irq_reg_mask, + bus_priv, + cam_vfe_bus_rd_ver1_handle_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->irq_handle < 1) { + CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ"); + bus_priv->irq_handle = 0; + return -EFAULT; + } + + /* no clock gating at bus input */ + offset = common_reg->cgc_ovd; + cam_io_w_mb(0x1, bus_priv->common_data.mem_base + offset); + + /* BUS_RD_TEST_BUS_CTRL */ + offset = common_reg->test_bus_ctrl; + cam_io_w_mb(0x0, bus_priv->common_data.mem_base + offset); + + return 0; +} + +static int cam_vfe_bus_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_rd_ver1_priv *bus_priv = hw_priv; + int rc = 0; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Error: Invalid args"); + return -EINVAL; + } + + if (bus_priv->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.vfe_irq_controller, + bus_priv->irq_handle); + bus_priv->irq_handle = 0; + } + + return rc; +} + +static int __cam_vfe_bus_rd_process_cmd(void *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + return cam_vfe_bus_rd_process_cmd(priv, cmd_type, cmd_args, arg_size); +} + +static int cam_vfe_bus_rd_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE_RM: + rc = cam_vfe_bus_rd_update_rm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE_RM: + rc = cam_vfe_bus_rd_update_hfr(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_RM_SECURE_MODE: + rc = cam_vfe_bus_rd_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD: + rc = cam_vfe_bus_rd_update_fs_cfg(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_FE_TRIGGER_CMD: + rc = cam_vfe_bus_rd_add_go_cmd(priv, cmd_args, arg_size); + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d", + cmd_type); + break; + } + + return rc; +} + +int cam_vfe_bus_rd_ver1_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_rd_ver1_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + struct cam_vfe_bus_rd_ver1_hw_info *bus_rd_hw_info = bus_hw_info; + + if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) { + CAM_ERR(CAM_ISP, + "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK", + soc_info, hw_intf, bus_rd_hw_info); + CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller); + rc = -EINVAL; + goto end; + } + + vfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus), GFP_KERNEL); + if (!vfe_bus_local) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_rd_ver1_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + + vfe_bus_local->bus_priv = bus_priv; + + bus_priv->num_client = bus_rd_hw_info->num_client; + bus_priv->num_bus_rd_resc = + bus_rd_hw_info->num_bus_rd_resc; + bus_priv->common_data.num_sec_out = 0; + bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.vfe_irq_controller = vfe_irq_controller; + bus_priv->common_data.common_reg = &bus_rd_hw_info->common_reg; + bus_priv->top_irq_shift = bus_rd_hw_info->top_irq_shift; + + mutex_init(&bus_priv->common_data.bus_mutex); + + rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base, + &bus_rd_hw_info->common_reg.irq_reg_info, + &bus_priv->common_data.bus_irq_controller); + if (rc) { + CAM_ERR(CAM_ISP, "cam_irq_controller_init failed"); + goto free_bus_priv; + } + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_vfe_bus_init_rm_resource(i, bus_priv, bus_hw_info, + &bus_priv->bus_client[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init RM failed for client:%d, rc=%d", + i, rc); + goto deinit_rm; + } + } + + for (i = 0; i < bus_priv->num_bus_rd_resc; i++) { + rc = cam_vfe_bus_init_vfe_bus_read_resource(i, bus_priv, + bus_rd_hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init VFE Out failed for client:%d, rc=%d", + i, rc); + goto deinit_vfe_bus_rd; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + + vfe_bus_local->hw_ops.reserve = cam_vfe_bus_acquire_vfe_bus_rd; + vfe_bus_local->hw_ops.release = cam_vfe_bus_release_vfe_bus_rd; + vfe_bus_local->hw_ops.start = cam_vfe_bus_start_hw; + vfe_bus_local->hw_ops.stop = cam_vfe_bus_stop_hw; + vfe_bus_local->hw_ops.init = cam_vfe_bus_init_hw; + vfe_bus_local->hw_ops.deinit = cam_vfe_bus_deinit_hw; + vfe_bus_local->top_half_handler = cam_vfe_bus_rd_ver1_handle_irq; + vfe_bus_local->bottom_half_handler = NULL; + vfe_bus_local->hw_ops.process_cmd = __cam_vfe_bus_rd_process_cmd; + + *vfe_bus = vfe_bus_local; + + return rc; + +deinit_vfe_bus_rd: + for (--i; i >= 0; i--) + cam_vfe_bus_deinit_vfe_bus_rd_resource( + &bus_priv->vfe_bus_rd[i]); +deinit_rm: + if (i < 0) + i = bus_priv->num_client; + for (--i; i >= 0; i--) + cam_vfe_bus_deinit_rm_resource(&bus_priv->bus_client[i]); + +free_bus_priv: + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + +end: + return rc; +} + +int cam_vfe_bus_rd_bus_ver1_deinit( + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_rd_ver1_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + + if (!vfe_bus || !*vfe_bus) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + vfe_bus_local = *vfe_bus; + + bus_priv = vfe_bus_local->bus_priv; + if (!bus_priv) { + CAM_ERR(CAM_ISP, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_vfe_bus_deinit_rm_resource(&bus_priv->bus_client[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit RM failed rc=%d", rc); + } + for (i = 0; i < CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX; i++) { + rc = cam_vfe_bus_deinit_vfe_bus_rd_resource( + &bus_priv->vfe_bus_rd[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit VFE Out failed rc=%d", rc); + } + + rc = cam_irq_controller_deinit( + &bus_priv->common_data.bus_irq_controller); + if (rc) + CAM_ERR(CAM_ISP, + "Deinit IRQ Controller failed rc=%d", rc); + + mutex_destroy(&bus_priv->common_data.bus_mutex); + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + + *vfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.h new file mode 100644 index 0000000000..90229b179c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_BUS_R_VER1_H_ +#define _CAM_VFE_BUS_R_VER1_H_ + +#include "cam_irq_controller.h" +#include "cam_vfe_bus.h" + +#define CAM_VFE_BUS_RD_VER1_MAX_CLIENTS 1 + +enum cam_vfe_bus_rd_ver1_vfe_core_id { + CAM_VFE_BUS_RD_VER1_VFE_CORE_0, + CAM_VFE_BUS_RD_VER1_VFE_CORE_1, + CAM_VFE_BUS_RD_VER1_VFE_CORE_MAX, +}; + +enum cam_vfe_bus_rd_ver1_comp_grp_type { + CAM_VFE_BUS_RD_VER1_COMP_GRP_0, + CAM_VFE_BUS_RD_VER1_COMP_GRP_MAX, +}; + + +enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type { + CAM_VFE_BUS_RD_VER1_VFE_BUSRD_RDI0, + CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX, +}; + +/* + * struct cam_vfe_bus_rd_ver1_reg_offset_common: + * + * @Brief: Common registers across all BUS Clients + */ +struct cam_vfe_bus_rd_ver1_reg_offset_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t sw_reset; + uint32_t cgc_ovd; + uint32_t pwr_iso_cfg; + uint32_t input_if_cmd; + uint32_t test_bus_ctrl; + struct cam_irq_controller_reg_info irq_reg_info; +}; + +/* + * struct cam_vfe_bus_rd_ver1_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS Clients + */ +struct cam_vfe_bus_rd_ver1_reg_offset_bus_client { + uint32_t cfg; + uint32_t image_addr; + uint32_t buf_size; + uint32_t stride; + uint32_t unpacker_cfg; + uint32_t latency_buf_allocation; + uint32_t burst_limit; +}; + +/* + * struct cam_vfe_bus_rd_ver1_vfe_bus_hw_info: + * + * @Brief: HW capability of VFE Bus Client + */ +struct cam_vfe_bus_rd_ver1_vfe_bus_hw_info { + enum cam_vfe_bus_rd_ver1_vfe_bus_rd_type vfe_bus_rd_type; + uint32_t max_width; + uint32_t max_height; +}; + +/* + * struct cam_vfe_bus_rd_ver1_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @bus_client_reg: Bus client register info + * @comp_reg_grp: Composite group register info + * @vfe_out_hw_info: VFE output capability + */ +struct cam_vfe_bus_rd_ver1_hw_info { + struct cam_vfe_bus_rd_ver1_reg_offset_common common_reg; + uint32_t num_client; + struct cam_vfe_bus_rd_ver1_reg_offset_bus_client + bus_client_reg[CAM_VFE_BUS_RD_VER1_MAX_CLIENTS]; + uint32_t num_bus_rd_resc; + struct cam_vfe_bus_rd_ver1_vfe_bus_hw_info + vfe_bus_rd_hw_info[CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX]; + uint32_t top_irq_shift; +}; + +/* + * cam_vfe_bus_rd_ver1_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top + * level IRQs + * @vfe_bus: Pointer to vfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_rd_ver1_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus); + +/* + * cam_vfe_bus_rd_bus_ver1_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @vfe_bus: Pointer to vfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_rd_bus_ver1_deinit(struct cam_vfe_bus **vfe_bus); + +#endif /* _CAM_VFE_BUS_R_VER1_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h new file mode 100644 index 0000000000..2783ddccf2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_BUS_VER1_H_ +#define _CAM_VFE_BUS_VER1_H_ + +enum cam_vfe_bus_ver1_pingpong_id { + CAM_VFE_BUS_VER1_PING, + CAM_VFE_BUS_VER1_PONG, + CAM_VFE_BUS_VER1_PINGPONG_MAX, +}; + +enum cam_vfe_bus_ver1_wm_type { + CAM_VFE_BUS_WM_TYPE_IMAGE, + CAM_VFE_BUS_WM_TYPE_STATS, + CAM_VFE_BUS_WM_TYPE_MAX, +}; + +enum cam_vfe_bus_ver1_comp_grp_type { + CAM_VFE_BUS_VER1_COMP_GRP_IMG0, + CAM_VFE_BUS_VER1_COMP_GRP_IMG1, + CAM_VFE_BUS_VER1_COMP_GRP_IMG2, + CAM_VFE_BUS_VER1_COMP_GRP_IMG3, + CAM_VFE_BUS_VER1_COMP_GRP_STATS0, + CAM_VFE_BUS_VER1_COMP_GRP_STATS1, + CAM_VFE_BUS_VER1_COMP_GRP_MAX, +}; + +struct cam_vfe_bus_ver1_common_reg { + uint32_t cmd_offset; + uint32_t cfg_offset; + uint32_t io_fmt_offset; + uint32_t argb_cfg_offset; + uint32_t xbar_cfg0_offset; + uint32_t xbar_cfg1_offset; + uint32_t xbar_cfg2_offset; + uint32_t xbar_cfg3_offset; + uint32_t ping_pong_status_reg; +}; + +struct cam_vfe_bus_ver1_wm_reg { + uint32_t wm_cfg_offset; + uint32_t ping_addr_offset; + uint32_t ping_max_addr_offset; + uint32_t pong_addr_offset; + uint32_t pong_max_addr_offset; + uint32_t addr_cfg_offset; + uint32_t ub_cfg_offset; + uint32_t image_size_offset; + uint32_t buffer_cfg_offset; + uint32_t framedrop_pattern_offset; + uint32_t irq_subsample_pattern_offset; + uint32_t ping_pong_status_bit; /* 0 - 31 */ + uint32_t composite_bit; /* 0 -31 */ +}; + +struct cam_vfe_bus_ver1_wm_resource_data { + uint32_t index; + uint32_t wm_type; + uint32_t res_type; + + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t scanline; + + uint32_t burst_len; + + uint32_t framedrop_period; + uint32_t framedrop_pattern; + + uint32_t buf_valid[CAM_VFE_BUS_VER1_PINGPONG_MAX]; + uint32_t ub_size; + uint32_t ub_offset; + + struct cam_vfe_bus_ver1_wm_reg hw_regs; +}; + +struct cam_vfe_bus_ver1_comp_grp_reg { + enum cam_vfe_bus_ver1_comp_grp_type comp_grp_type; + uint32_t comp_grp_offset; +}; + +struct cam_vfe_bus_ver1_comp_grp { + struct cam_vfe_bus_ver1_comp_grp_reg reg_info; + struct list_head wm_list; + uint32_t cur_bit_mask; +}; + +/* + * cam_vfe_bus_ver1_init() + * + * @Brief: Initialize Bus layer + * + * @mem_base: Mapped base address of register space + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top + * level IRQs + * @vfe_bus: Pointer to vfe_bus structure which will be filled + * and returned on successful initialize + */ +int cam_vfe_bus_ver1_init( + void __iomem *mem_base, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus); + +#endif /* _CAM_VFE_BUS_VER1_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c new file mode 100644 index 0000000000..d044eada2c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c @@ -0,0 +1,4039 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_hw_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_vfe_hw_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_bus.h" +#include "cam_vfe_bus_ver2.h" +#include "cam_vfe_core.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "vfe_bus"; + +#define CAM_VFE_BUS_IRQ_REG0 0 +#define CAM_VFE_BUS_IRQ_REG1 1 +#define CAM_VFE_BUS_IRQ_REG2 2 +#define CAM_VFE_BUS_IRQ_MAX 3 + +#define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256 + +#define CAM_VFE_BUS_SET_DEBUG_REG 0x82 + +#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFF01 +#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFF01 +#define CAM_VFE_BUS_INTRA_CLIENT_MASK 0x3 +#define CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT 8 +#define CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL \ + ((1 << CAM_VFE_BUS_VER2_MAX_CLIENTS) - 1) + +#define MAX_BUF_UPDATE_REG_NUM \ + ((sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client) + \ + sizeof(struct cam_vfe_bus_ver2_reg_offset_ubwc_client))/4) +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + +static uint32_t bus_error_irq_mask[3] = { + 0x7800, + 0x0000, + 0x0040, +}; + +enum cam_vfe_bus_packer_format { + PACKER_FMT_PLAIN_128 = 0x0, + PACKER_FMT_PLAIN_8 = 0x1, + PACKER_FMT_PLAIN_16_10BPP = 0x2, + PACKER_FMT_PLAIN_16_12BPP = 0x3, + PACKER_FMT_PLAIN_16_14BPP = 0x4, + PACKER_FMT_PLAIN_16_16BPP = 0x5, + PACKER_FMT_ARGB_10 = 0x6, + PACKER_FMT_ARGB_12 = 0x7, + PACKER_FMT_ARGB_14 = 0x8, + PACKER_FMT_PLAIN_32_20BPP = 0x9, + PACKER_FMT_PLAIN_64 = 0xA, + PACKER_FMT_TP_10 = 0xB, + PACKER_FMT_PLAIN_32_32BPP = 0xC, + PACKER_FMT_PLAIN_8_ODD_EVEN = 0xD, + PACKER_FMT_PLAIN_8_LSB_MSB_10 = 0xE, + PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0xF, + PACKER_FMT_MAX = 0xF, +}; + +enum cam_vfe_bus_comp_grp_id { + CAM_VFE_BUS_COMP_GROUP_NONE = -EINVAL, + CAM_VFE_BUS_COMP_GROUP_ID_0 = 0x0, + CAM_VFE_BUS_COMP_GROUP_ID_1 = 0x1, + CAM_VFE_BUS_COMP_GROUP_ID_2 = 0x2, + CAM_VFE_BUS_COMP_GROUP_ID_3 = 0x3, + CAM_VFE_BUS_COMP_GROUP_ID_4 = 0x4, + CAM_VFE_BUS_COMP_GROUP_ID_5 = 0x5, +}; + +struct cam_vfe_bus_ver2_common_data { + uint32_t core_index; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + void *bus_irq_controller; + void *vfe_irq_controller; + struct cam_vfe_bus_ver2_reg_offset_common *common_reg; + uint32_t io_buf_update[ + MAX_REG_VAL_PAIR_SIZE]; + + struct cam_vfe_bus_irq_evt_payload evt_payload[ + CAM_VFE_BUS_VER2_PAYLOAD_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + struct mutex bus_mutex; + uint32_t secure_mode; + uint32_t num_sec_out; + uint32_t addr_no_sync; + cam_hw_mgr_event_cb_func event_cb; + bool hw_init; + bool support_consumed_addr; + bool disable_ubwc_comp; +}; + +struct cam_vfe_bus_ver2_wm_resource_data { + uint32_t index; + struct cam_vfe_bus_ver2_common_data *common_data; + struct cam_vfe_bus_ver2_reg_offset_bus_client *hw_regs; + + bool init_cfg_done; + bool hfr_cfg_done; + + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t format; + enum cam_vfe_bus_packer_format pack_fmt; + + uint32_t burst_len; + + uint32_t en_ubwc; + bool ubwc_updated; + uint32_t packer_cfg; + uint32_t tile_cfg; + uint32_t h_init; + uint32_t v_init; + uint32_t ubwc_meta_stride; + uint32_t ubwc_mode_cfg_0; + uint32_t ubwc_mode_cfg_1; + uint32_t ubwc_meta_offset; + + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + + uint32_t en_cfg; + uint32_t is_dual; + uint32_t ubwc_lossy_threshold_0; + uint32_t ubwc_lossy_threshold_1; + uint32_t ubwc_bandwidth_limit; + uint32_t acquired_width; + uint32_t acquired_height; +}; + +struct cam_vfe_bus_ver2_comp_grp_data { + enum cam_vfe_bus_ver2_comp_grp_type comp_grp_type; + struct cam_vfe_bus_ver2_common_data *common_data; + struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs; + + uint32_t comp_grp_local_idx; + uint32_t unique_id; + + uint32_t is_master; + uint32_t dual_slave_core; + uint32_t intra_client_mask; + uint32_t composite_mask; + uint32_t addr_sync_mode; + + uint32_t acquire_dev_cnt; +}; + +struct cam_vfe_bus_ver2_vfe_out_data { + uint32_t out_type; + struct cam_vfe_bus_ver2_common_data *common_data; + + uint32_t num_wm; + struct cam_isp_resource_node *wm_res[PLANE_MAX]; + + struct cam_isp_resource_node *comp_grp; + enum cam_isp_hw_sync_mode dual_comp_sync_mode; + uint32_t dual_hw_alternate_vfe_id; + struct list_head vfe_out_list; + + uint32_t format; + uint32_t max_width; + uint32_t max_height; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t secure_mode; + void *priv; + uint32_t mid[CAM_VFE_BUS_VER2_MAX_MID_PER_PORT]; +}; + +struct cam_vfe_bus_ver2_priv { + struct cam_vfe_bus_ver2_common_data common_data; + uint32_t num_client; + uint32_t num_out; + uint32_t top_irq_shift; + + struct cam_isp_resource_node bus_client[CAM_VFE_BUS_VER2_MAX_CLIENTS]; + struct cam_isp_resource_node comp_grp[CAM_VFE_BUS_VER2_COMP_GRP_MAX]; + struct cam_isp_resource_node vfe_out[CAM_VFE_BUS_VER2_VFE_OUT_MAX]; + + struct list_head free_comp_grp; + struct list_head free_dual_comp_grp; + struct list_head used_comp_grp; + + int irq_handle; + int error_irq_handle; + void *tasklet_info; + uint32_t max_out_res; +}; + +static int cam_vfe_bus_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); + +static int cam_vfe_bus_get_evt_payload( + struct cam_vfe_bus_ver2_common_data *common_data, + struct cam_vfe_bus_irq_evt_payload **evt_payload) +{ + int rc; + + spin_lock(&common_data->spin_lock); + + if (!common_data->hw_init) { + *evt_payload = NULL; + CAM_ERR_RATE_LIMIT(CAM_ISP, "VFE:%d Bus uninitialized", + common_data->core_index); + rc = -EPERM; + goto done; + } + + if (list_empty(&common_data->free_payload_list)) { + *evt_payload = NULL; + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&common_data->free_payload_list, + struct cam_vfe_bus_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&common_data->spin_lock); + return rc; +} + +static enum cam_vfe_bus_comp_grp_id + cam_vfe_bus_comp_grp_id_convert(uint32_t comp_grp) +{ + switch (comp_grp) { + case CAM_ISP_RES_COMP_GROUP_ID_0: + return CAM_VFE_BUS_COMP_GROUP_ID_0; + case CAM_ISP_RES_COMP_GROUP_ID_1: + return CAM_VFE_BUS_COMP_GROUP_ID_1; + case CAM_ISP_RES_COMP_GROUP_ID_2: + return CAM_VFE_BUS_COMP_GROUP_ID_2; + case CAM_ISP_RES_COMP_GROUP_ID_3: + return CAM_VFE_BUS_COMP_GROUP_ID_3; + case CAM_ISP_RES_COMP_GROUP_ID_4: + return CAM_VFE_BUS_COMP_GROUP_ID_4; + case CAM_ISP_RES_COMP_GROUP_ID_5: + return CAM_VFE_BUS_COMP_GROUP_ID_5; + case CAM_ISP_RES_COMP_GROUP_NONE: + default: + return CAM_VFE_BUS_COMP_GROUP_NONE; + } +} + +static enum cam_vfe_bus_ver2_comp_grp_type + cam_vfe_bus_dual_comp_grp_id_convert(uint32_t comp_grp) +{ + switch (comp_grp) { + case CAM_VFE_BUS_COMP_GROUP_ID_0: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0; + case CAM_VFE_BUS_COMP_GROUP_ID_1: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1; + case CAM_VFE_BUS_COMP_GROUP_ID_2: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2; + case CAM_VFE_BUS_COMP_GROUP_ID_3: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3; + case CAM_VFE_BUS_COMP_GROUP_ID_4: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4; + case CAM_VFE_BUS_COMP_GROUP_ID_5: + return CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5; + case CAM_VFE_BUS_COMP_GROUP_NONE: + default: + return CAM_VFE_BUS_VER2_COMP_GRP_MAX; + } +} + +static int cam_vfe_bus_put_evt_payload( + struct cam_vfe_bus_ver2_common_data *common_data, + struct cam_vfe_bus_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!common_data) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_bus_irq_evt_payload); + spin_lock_irqsave(&common_data->spin_lock, flags); + if (common_data->hw_init) + list_add_tail(&(*evt_payload)->list, &common_data->free_payload_list); + + spin_unlock_irqrestore(&common_data->spin_lock, flags); + + *evt_payload = NULL; + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_bus_ver2_get_intra_client_mask( + enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core, + enum cam_vfe_bus_ver2_vfe_core_id current_core, + uint32_t *intra_client_mask) +{ + int rc = 0; + uint32_t camera_hw_version = 0; + uint32_t version_based_intra_client_mask = 0x1; + + *intra_client_mask = 0; + + + if (dual_slave_core == current_core) { + CAM_ERR(CAM_ISP, + "Invalid params. Same core as Master and Slave"); + return -EINVAL; + } + + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + + CAM_DBG(CAM_ISP, "CPAS VERSION %d", camera_hw_version); + + switch (camera_hw_version) { + case CAM_CPAS_TITAN_170_V100: + version_based_intra_client_mask = 0x3; + break; + default: + version_based_intra_client_mask = 0x1; + break; + } + + + switch (current_core) { + case CAM_VFE_BUS_VER2_VFE_CORE_0: + switch (dual_slave_core) { + case CAM_VFE_BUS_VER2_VFE_CORE_1: + case CAM_VFE_BUS_VER2_VFE_CORE_2: + *intra_client_mask = version_based_intra_client_mask; + break; + default: + CAM_ERR(CAM_ISP, "Invalid value for slave core %u", + dual_slave_core); + rc = -EINVAL; + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_CORE_1: + switch (dual_slave_core) { + case CAM_VFE_BUS_VER2_VFE_CORE_0: + case CAM_VFE_BUS_VER2_VFE_CORE_2: + *intra_client_mask = version_based_intra_client_mask; + break; + default: + CAM_ERR(CAM_ISP, "Invalid value for slave core %u", + dual_slave_core); + rc = -EINVAL; + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_CORE_2: + switch (dual_slave_core) { + case CAM_VFE_BUS_VER2_VFE_CORE_0: + case CAM_VFE_BUS_VER2_VFE_CORE_1: + *intra_client_mask = version_based_intra_client_mask; + break; + default: + CAM_ERR(CAM_ISP, "Invalid value for slave core %u", + dual_slave_core); + rc = -EINVAL; + break; + } + break; + default: + CAM_ERR(CAM_ISP, + "Invalid value for master core %u", current_core); + rc = -EINVAL; + break; + } + + return rc; +} + +static bool cam_vfe_bus_can_be_secure(uint32_t out_type) +{ + switch (out_type) { + case CAM_VFE_BUS_VER2_VFE_OUT_FULL: + case CAM_VFE_BUS_VER2_VFE_OUT_DS4: + case CAM_VFE_BUS_VER2_VFE_OUT_DS16: + case CAM_VFE_BUS_VER2_VFE_OUT_FD: + case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI0: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI1: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI2: + case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP: + case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP: + case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP: + return true; + + case CAM_VFE_BUS_VER2_VFE_OUT_PDAF: + case CAM_VFE_BUS_VER2_VFE_OUT_2PD: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST: + default: + return false; + } +} + +static enum cam_vfe_bus_ver2_vfe_out_type + cam_vfe_bus_get_out_res_id(uint32_t res_type) +{ + switch (res_type) { + case CAM_ISP_IFE_OUT_RES_FULL: + return CAM_VFE_BUS_VER2_VFE_OUT_FULL; + case CAM_ISP_IFE_OUT_RES_DS4: + return CAM_VFE_BUS_VER2_VFE_OUT_DS4; + case CAM_ISP_IFE_OUT_RES_DS16: + return CAM_VFE_BUS_VER2_VFE_OUT_DS16; + case CAM_ISP_IFE_OUT_RES_FD: + return CAM_VFE_BUS_VER2_VFE_OUT_FD; + case CAM_ISP_IFE_OUT_RES_RAW_DUMP: + return CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP; + case CAM_ISP_IFE_OUT_RES_PDAF: + return CAM_VFE_BUS_VER2_VFE_OUT_PDAF; + case CAM_ISP_IFE_OUT_RES_2PD: + return CAM_VFE_BUS_VER2_VFE_OUT_2PD; + case CAM_ISP_IFE_OUT_RES_RDI_0: + return CAM_VFE_BUS_VER2_VFE_OUT_RDI0; + case CAM_ISP_IFE_OUT_RES_RDI_1: + return CAM_VFE_BUS_VER2_VFE_OUT_RDI1; + case CAM_ISP_IFE_OUT_RES_RDI_2: + return CAM_VFE_BUS_VER2_VFE_OUT_RDI2; + case CAM_ISP_IFE_OUT_RES_RDI_3: + return CAM_VFE_BUS_VER2_VFE_OUT_RDI3; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST; + case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG; + case CAM_ISP_IFE_OUT_RES_STATS_BF: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF; + case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG; + case CAM_ISP_IFE_OUT_RES_STATS_BHIST: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST; + case CAM_ISP_IFE_OUT_RES_STATS_RS: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS; + case CAM_ISP_IFE_OUT_RES_STATS_CS: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS; + case CAM_ISP_IFE_OUT_RES_STATS_IHIST: + return CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST; + case CAM_ISP_IFE_OUT_RES_FULL_DISP: + return CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP; + case CAM_ISP_IFE_OUT_RES_DS4_DISP: + return CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP; + case CAM_ISP_IFE_OUT_RES_DS16_DISP: + return CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP; + default: + return CAM_VFE_BUS_VER2_VFE_OUT_MAX; + } +} + +static int cam_vfe_bus_get_num_wm( + enum cam_vfe_bus_ver2_vfe_out_type res_type, + uint32_t format) +{ + switch (res_type) { + case CAM_VFE_BUS_VER2_VFE_OUT_RDI0: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI1: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI2: + case CAM_VFE_BUS_VER2_VFE_OUT_RDI3: + switch (format) { + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_DPCM_10_6_10: + case CAM_FORMAT_DPCM_10_8_10: + case CAM_FORMAT_DPCM_12_6_12: + case CAM_FORMAT_DPCM_12_8_12: + case CAM_FORMAT_DPCM_14_8_14: + case CAM_FORMAT_DPCM_14_10_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN32_20: + case CAM_FORMAT_PLAIN128: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_FULL: + case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP: + switch (format) { + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_TP10: + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_UBWC_TP10: + case CAM_FORMAT_UBWC_P010: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + return 2; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_FD: + switch (format) { + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_TP10: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + return 2; + case CAM_FORMAT_Y_ONLY: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_DS4: + case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP: + case CAM_VFE_BUS_VER2_VFE_OUT_DS16: + case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP: + switch (format) { + case CAM_FORMAT_PD8: + case CAM_FORMAT_PD10: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP: + switch (format) { + case CAM_FORMAT_ARGB_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_PDAF: + switch (format) { + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_2PD: + switch (format) { + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN64: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS: + switch (format) { + case CAM_FORMAT_PLAIN64: + return 1; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS: + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST: + switch (format) { + case CAM_FORMAT_PLAIN16_16: + return 1; + default: + break; + } + break; + default: + break; + } + + CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u", + format, res_type); + + return -EINVAL; +} + +static int cam_vfe_bus_get_wm_idx( + enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id, + enum cam_vfe_bus_plane_type plane) +{ + int wm_idx = -1; + + switch (vfe_out_res_id) { + case CAM_VFE_BUS_VER2_VFE_OUT_RDI0: + switch (plane) { + case PLANE_Y: + wm_idx = 0; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_RDI1: + switch (plane) { + case PLANE_Y: + wm_idx = 1; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_RDI2: + switch (plane) { + case PLANE_Y: + wm_idx = 2; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_RDI3: + switch (plane) { + case PLANE_Y: + wm_idx = 3; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_FULL: + switch (plane) { + case PLANE_Y: + wm_idx = 3; + break; + case PLANE_C: + wm_idx = 4; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_DS4: + switch (plane) { + case PLANE_Y: + wm_idx = 5; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_DS16: + switch (plane) { + case PLANE_Y: + wm_idx = 6; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_FD: + switch (plane) { + case PLANE_Y: + wm_idx = 7; + break; + case PLANE_C: + wm_idx = 8; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP: + switch (plane) { + case PLANE_Y: + wm_idx = 9; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_PDAF: + case CAM_VFE_BUS_VER2_VFE_OUT_2PD: + switch (plane) { + case PLANE_Y: + wm_idx = 10; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE: + switch (plane) { + case PLANE_Y: + wm_idx = 11; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST: + switch (plane) { + case PLANE_Y: + wm_idx = 12; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG: + switch (plane) { + case PLANE_Y: + wm_idx = 13; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF: + switch (plane) { + case PLANE_Y: + wm_idx = 14; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG: + switch (plane) { + case PLANE_Y: + wm_idx = 15; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST: + switch (plane) { + case PLANE_Y: + wm_idx = 16; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS: + switch (plane) { + case PLANE_Y: + wm_idx = 17; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS: + switch (plane) { + case PLANE_Y: + wm_idx = 18; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST: + switch (plane) { + case PLANE_Y: + wm_idx = 19; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP: + switch (plane) { + case PLANE_Y: + wm_idx = 20; + break; + case PLANE_C: + wm_idx = 21; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP: + switch (plane) { + case PLANE_Y: + wm_idx = 22; + break; + default: + break; + } + break; + case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP: + switch (plane) { + case PLANE_Y: + wm_idx = 23; + break; + default: + break; + } + break; + + default: + break; + } + + return wm_idx; +} + +static enum cam_vfe_bus_packer_format + cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index) +{ + switch (out_fmt) { + case CAM_FORMAT_NV21: + if ((wm_index == 4) || (wm_index == 6) || (wm_index == 21)) + return PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN; + fallthrough; + case CAM_FORMAT_NV12: + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_Y_ONLY: + return PACKER_FMT_PLAIN_8_LSB_MSB_10; + case CAM_FORMAT_PLAIN16_16: + return PACKER_FMT_PLAIN_16_16BPP; + case CAM_FORMAT_PLAIN64: + return PACKER_FMT_PLAIN_64; + case CAM_FORMAT_PLAIN8: + return PACKER_FMT_PLAIN_8; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + return PACKER_FMT_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return PACKER_FMT_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return PACKER_FMT_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN32_20: + return PACKER_FMT_PLAIN_32_20BPP; + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PD8: + case CAM_FORMAT_PD10: + return PACKER_FMT_PLAIN_128; + case CAM_FORMAT_UBWC_TP10: + case CAM_FORMAT_TP10: + return PACKER_FMT_TP_10; + case CAM_FORMAT_ARGB_14: + return PACKER_FMT_ARGB_14; + default: + return PACKER_FMT_MAX; + } + return PACKER_FMT_MAX; +} + +static int cam_vfe_bus_acquire_wm( + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_isp_out_port_generic_info *out_port_info, + void *tasklet, + enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id, + enum cam_vfe_bus_plane_type plane, + struct cam_isp_resource_node **wm_res, + uint32_t *client_done_mask, + uint32_t is_dual) +{ + uint32_t wm_idx = 0; + struct cam_isp_resource_node *wm_res_local = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = NULL; + + *wm_res = NULL; + *client_done_mask = 0; + + /* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */ + wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane); + if (wm_idx < 0 || wm_idx >= ver2_bus_priv->num_client) { + CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d", + vfe_out_res_id, plane); + return -EINVAL; + } + + wm_res_local = &ver2_bus_priv->bus_client[wm_idx]; + if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "WM res not available state:%d", + wm_res_local->res_state); + return -EALREADY; + } + wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + wm_res_local->tasklet_info = tasklet; + + rsrc_data = wm_res_local->res_priv; + rsrc_data->format = out_port_info->format; + rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format, + wm_idx); + + rsrc_data->width = out_port_info->width; + rsrc_data->height = out_port_info->height; + rsrc_data->acquired_width = out_port_info->width; + rsrc_data->acquired_height = out_port_info->height; + rsrc_data->is_dual = is_dual; + /* Set WM offset value to default */ + rsrc_data->offset = 0; + CAM_DBG(CAM_ISP, "WM %d width %d height %d", rsrc_data->index, + rsrc_data->width, rsrc_data->height); + + if (rsrc_data->index < 3) { + /* Write master 0-2 refers to RDI 0/ RDI 1/RDI 2 */ + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_PLAIN128: + rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->pack_fmt = 0x0; + rsrc_data->en_cfg = 0x3; + break; + case CAM_FORMAT_PLAIN8: + rsrc_data->en_cfg = 0x1; + rsrc_data->pack_fmt = 0x1; + rsrc_data->width = rsrc_data->width * 2; + rsrc_data->stride = rsrc_data->width; + break; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN32_20: + rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->height = 0; + rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->pack_fmt = 0x0; + rsrc_data->en_cfg = 0x3; + break; + case CAM_FORMAT_PLAIN64: + rsrc_data->en_cfg = 0x1; + rsrc_data->pack_fmt = 0xA; + break; + default: + CAM_ERR(CAM_ISP, "Unsupported RDI format %d", + rsrc_data->format); + return -EINVAL; + } + } else if ((rsrc_data->index < 5) || + (rsrc_data->index == 7) || (rsrc_data->index == 8) || + (rsrc_data->index == 20) || (rsrc_data->index == 21)) { + /* + * Write master 3, 4 - for Full OUT , 7-8 FD OUT, + * WM 20-21 = FULL_DISP + */ + switch (rsrc_data->format) { + case CAM_FORMAT_UBWC_NV12_4R: + rsrc_data->en_ubwc = 1; + rsrc_data->width = ALIGNUP(rsrc_data->width, 64); + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_UBWC_NV12: + rsrc_data->en_ubwc = 1; + /* Fall through for NV12 */ + fallthrough; + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + case CAM_FORMAT_Y_ONLY: + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_UBWC_TP10: + rsrc_data->en_ubwc = 1; + rsrc_data->width = + ALIGNUP(rsrc_data->width, 48) * 4 / 3; + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_TP10: + rsrc_data->width = + ALIGNUP(rsrc_data->width, 3) * 4 / 3; + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_PLAIN16_10_LSB: + rsrc_data->pack_fmt |= 0x10; + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + rsrc_data->width *= 2; + break; + case CAM_FORMAT_PLAIN16_10: + switch (plane) { + case PLANE_C: + rsrc_data->height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + rsrc_data->width *= 2; + break; + default: + CAM_ERR(CAM_ISP, "Invalid format %d", + rsrc_data->format); + return -EINVAL; + } + rsrc_data->en_cfg = 0x1; + } else if (rsrc_data->index >= 11 && rsrc_data->index < 20) { + /* Write master 11 - 19 stats */ + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 1; + rsrc_data->en_cfg = 0x3; + } else if (rsrc_data->index == 10) { + /* Write master 10 - PDAF/2PD */ + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 1; + rsrc_data->en_cfg = 0x3; + if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_PDAF) + /* LSB aligned */ + rsrc_data->pack_fmt |= 0x10; + } else if (rsrc_data->index == 9) { + /* Write master 9 - Raw dump */ + rsrc_data->width = rsrc_data->width * 2; + rsrc_data->stride = rsrc_data->width; + rsrc_data->en_cfg = 0x1; + /* LSB aligned */ + rsrc_data->pack_fmt |= 0x10; + } else { + /* Write master 5-6 DS ports */ + uint32_t align_width; + + rsrc_data->width = rsrc_data->width * 4; + rsrc_data->height = rsrc_data->height / 2; + rsrc_data->en_cfg = 0x1; + CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width); + align_width = ALIGNUP(rsrc_data->width, 16); + if (align_width != rsrc_data->width) { + CAM_WARN(CAM_ISP, + "Override width %u with expected %u", + rsrc_data->width, align_width); + rsrc_data->width = align_width; + } + } + + *client_done_mask = (1 << wm_idx); + *wm_res = wm_res_local; + + CAM_DBG(CAM_ISP, "WM %d: processed width %d, processed height %d", + rsrc_data->index, rsrc_data->width, rsrc_data->height); + return 0; +} + +static int cam_vfe_bus_release_wm(void *bus_priv, + struct cam_isp_resource_node *wm_res) +{ + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = + wm_res->res_priv; + + rsrc_data->offset = 0; + rsrc_data->width = 0; + rsrc_data->height = 0; + rsrc_data->stride = 0; + rsrc_data->format = 0; + rsrc_data->pack_fmt = 0; + rsrc_data->burst_len = 0; + rsrc_data->irq_subsample_period = 0; + rsrc_data->irq_subsample_pattern = 0; + rsrc_data->framedrop_period = 0; + rsrc_data->framedrop_pattern = 0; + rsrc_data->packer_cfg = 0; + rsrc_data->en_ubwc = 0; + rsrc_data->tile_cfg = 0; + rsrc_data->h_init = 0; + rsrc_data->v_init = 0; + rsrc_data->ubwc_meta_stride = 0; + rsrc_data->ubwc_mode_cfg_0 = 0; + rsrc_data->ubwc_mode_cfg_1 = 0; + rsrc_data->ubwc_meta_offset = 0; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + rsrc_data->en_cfg = 0; + rsrc_data->is_dual = 0; + + rsrc_data->ubwc_lossy_threshold_0 = 0; + rsrc_data->ubwc_lossy_threshold_1 = 0; + rsrc_data->ubwc_bandwidth_limit = 0; + wm_res->tasklet_info = NULL; + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_vfe_bus_start_wm( + struct cam_isp_resource_node *wm_res, + uint32_t *bus_irq_reg_mask) +{ + int rc = 0, val = 0; + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_vfe_bus_ver2_common_data *common_data = + rsrc_data->common_data; + uint32_t camera_hw_version; + uint32_t disable_ubwc_comp = rsrc_data->common_data->disable_ubwc_comp; + + cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit); + + cam_io_w_mb(rsrc_data->width, + common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg); + cam_io_w(rsrc_data->height, + common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg); + cam_io_w(rsrc_data->pack_fmt, + common_data->mem_base + rsrc_data->hw_regs->packer_cfg); + + /* Configure stride for RDIs */ + if (rsrc_data->index < 3) + cam_io_w_mb(rsrc_data->stride, (common_data->mem_base + + rsrc_data->hw_regs->stride)); + + bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] = (1 << rsrc_data->index); + + /* enable ubwc if needed*/ + if (rsrc_data->en_ubwc) { + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + if (rc) { + CAM_ERR(CAM_ISP, "failed to get HW version rc=%d", rc); + return rc; + } + if ((camera_hw_version > CAM_CPAS_TITAN_NONE) && + (camera_hw_version < CAM_CPAS_TITAN_175_V100) && + (camera_hw_version != CAM_CPAS_TITAN_165_V100)) { + struct cam_vfe_bus_ver2_reg_offset_ubwc_client + *ubwc_regs; + + ubwc_regs = + (struct + cam_vfe_bus_ver2_reg_offset_ubwc_client *) + rsrc_data->hw_regs->ubwc_regs; + if (!ubwc_regs) { + CAM_ERR(CAM_ISP, "ubwc_regs is NULL"); + return -EINVAL; + } + val = cam_io_r_mb(common_data->mem_base + + ubwc_regs->mode_cfg_0); + val |= 0x1; + if (disable_ubwc_comp) { + val &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression, ubwc_mode_cfg: 0x%x", + val); + } + cam_io_w_mb(val, common_data->mem_base + + ubwc_regs->mode_cfg_0); + } else if ((camera_hw_version == CAM_CPAS_TITAN_175_V100) || + (camera_hw_version == CAM_CPAS_TITAN_175_V101) || + (camera_hw_version == CAM_CPAS_TITAN_175_V120) || + (camera_hw_version == CAM_CPAS_TITAN_175_V130) || + (camera_hw_version == CAM_CPAS_TITAN_165_V100)) { + struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client + *ubwc_regs; + + ubwc_regs = + (struct + cam_vfe_bus_ver2_reg_offset_ubwc_3_client *) + rsrc_data->hw_regs->ubwc_regs; + if (!ubwc_regs) { + CAM_ERR(CAM_ISP, "ubwc_regs is NULL"); + return -EINVAL; + } + val = cam_io_r_mb(common_data->mem_base + + ubwc_regs->mode_cfg_0); + val |= 0x1; + if (disable_ubwc_comp) { + val &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression, ubwc_mode_cfg: 0x%x", + val); + } + cam_io_w_mb(val, common_data->mem_base + + ubwc_regs->mode_cfg_0); + } else { + CAM_ERR(CAM_ISP, "Invalid HW version: %d", + camera_hw_version); + return -EINVAL; + } + } + /* enabling Wm configuratons are taken care in update_wm(). + * i.e enable wm only if io buffers are allocated + */ + + CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index, + rsrc_data->width, rsrc_data->height); + CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index, + rsrc_data->pack_fmt & PACKER_FMT_MAX); + CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d", + rsrc_data->index, rsrc_data->stride, 0xf); + CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x", + rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg, + rsrc_data->en_cfg); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res) +{ + int rc = 0; + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = + wm_res->res_priv; + struct cam_vfe_bus_ver2_common_data *common_data = + rsrc_data->common_data; + + /* Disable WM */ + cam_io_w_mb(0x0, + common_data->mem_base + rsrc_data->hw_regs->cfg); + + /* Disable all register access, reply on global reset */ + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + + return rc; +} + +static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_vfe_bus_handle_wm_done_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int rc = CAM_VFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *wm_res = handler_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = + (wm_res == NULL) ? NULL : wm_res->res_priv; + uint32_t *cam_ife_irq_regs; + uint32_t status_reg; + + if (!evt_payload || !wm_res || !rsrc_data) + return rc; + + CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x", + (uint64_t)evt_payload, evt_payload->core_index); + CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]); + CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]); + CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]); + CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]); + CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x", + evt_payload->irq_reg_val[4]); + CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x", + evt_payload->irq_reg_val[5]); + CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x", + evt_payload->irq_reg_val[6]); + + cam_ife_irq_regs = evt_payload->irq_reg_val; + status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1]; + + if (status_reg & BIT(rsrc_data->index)) { + cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &= + ~BIT(rsrc_data->index); + rc = CAM_VFE_IRQ_STATUS_SUCCESS; + evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE; + } + CAM_DBG(CAM_ISP, "status_reg %x rc %d wm_idx %d", + status_reg, rc, rsrc_data->index); + + return rc; +} + + +static int cam_vfe_bus_err_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_vfe_bus_ver2_priv *bus_priv = handler_priv; + struct cam_vfe_bus_ver2_common_data *common_data; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + uint32_t val = 0; + + if (!handler_priv || !evt_payload_priv) + return -EINVAL; + + evt_payload = evt_payload_priv; + common_data = &bus_priv->common_data; + + val = evt_payload->debug_status_0; + CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val); + + if (val & 0x01) + CAM_INFO(CAM_ISP, "RDI 0 violation"); + + if (val & 0x02) + CAM_INFO(CAM_ISP, "RDI 1 violation"); + + if (val & 0x04) + CAM_INFO(CAM_ISP, "RDI 2 violation"); + + if (val & 0x08) + CAM_INFO(CAM_ISP, "VID Y 1:1 UBWC violation"); + + if (val & 0x010) + CAM_INFO(CAM_ISP, "VID C 1:1 UBWC violation"); + + if (val & 0x020) + CAM_INFO(CAM_ISP, "VID YC 4:1 violation"); + + if (val & 0x040) + CAM_INFO(CAM_ISP, "VID YC 16:1 violation"); + + if (val & 0x080) + CAM_INFO(CAM_ISP, "FD Y violation"); + + if (val & 0x0100) + CAM_INFO(CAM_ISP, "FD C violation"); + + if (val & 0x0200) + CAM_INFO(CAM_ISP, "RAW DUMP violation"); + + if (val & 0x0400) + CAM_INFO(CAM_ISP, "PDAF violation"); + + if (val & 0x0800) + CAM_INFO(CAM_ISP, "STATs HDR BE violation"); + + if (val & 0x01000) + CAM_INFO(CAM_ISP, "STATs HDR BHIST violation"); + + if (val & 0x02000) + CAM_INFO(CAM_ISP, "STATs TINTLESS BG violation"); + + if (val & 0x04000) + CAM_INFO(CAM_ISP, "STATs BF violation"); + + if (val & 0x08000) + CAM_INFO(CAM_ISP, "STATs AWB BG UBWC violation"); + + if (val & 0x010000) + CAM_INFO(CAM_ISP, "STATs BHIST violation"); + + if (val & 0x020000) + CAM_INFO(CAM_ISP, "STATs RS violation"); + + if (val & 0x040000) + CAM_INFO(CAM_ISP, "STATs CS violation"); + + if (val & 0x080000) + CAM_INFO(CAM_ISP, "STATs IHIST violation"); + + if (val & 0x0100000) + CAM_INFO(CAM_ISP, "DISP Y 1:1 UBWC violation"); + + if (val & 0x0200000) + CAM_INFO(CAM_ISP, "DISP C 1:1 UBWC violation"); + + if (val & 0x0400000) + CAM_INFO(CAM_ISP, "DISP YC 4:1 violation"); + + if (val & 0x0800000) + CAM_INFO(CAM_ISP, "DISP YC 16:1 violation"); + + cam_vfe_bus_put_evt_payload(common_data, &evt_payload); + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = common_data->core_index; + evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT; + evt_info.res_id = CAM_VFE_BUS_VER2_VFE_OUT_MAX; + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (common_data->event_cb) + common_data->event_cb(NULL, CAM_ISP_HW_EVENT_ERROR, + (void *)&evt_info); + + return 0; +} + +static int cam_vfe_bus_init_wm_resource(uint32_t index, + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_vfe_bus_ver2_hw_info *ver2_hw_info, + struct cam_isp_resource_node *wm_res) +{ + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver2_wm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv"); + return -ENOMEM; + } + wm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->hw_regs = &ver2_hw_info->bus_client_reg[index]; + rsrc_data->common_data = &ver2_bus_priv->common_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = NULL; + wm_res->stop = NULL; + wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half; + wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half; + wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_vfe_bus_deinit_wm_resource( + struct cam_isp_resource_node *wm_res) +{ + struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = NULL; + wm_res->stop = NULL; + wm_res->top_half_handler = NULL; + wm_res->bottom_half_handler = NULL; + wm_res->hw_intf = NULL; + + rsrc_data = wm_res->res_priv; + wm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static void cam_vfe_bus_add_wm_to_comp_grp( + struct cam_isp_resource_node *comp_grp, + uint32_t composite_mask) +{ + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv; + + rsrc_data->composite_mask |= composite_mask; +} + +static void cam_vfe_bus_match_comp_grp( + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_isp_resource_node **comp_grp, + uint32_t comp_grp_local_idx, + uint32_t unique_id) +{ + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL; + struct cam_isp_resource_node *comp_grp_local = NULL; + + list_for_each_entry(comp_grp_local, + &ver2_bus_priv->used_comp_grp, list) { + rsrc_data = comp_grp_local->res_priv; + if (rsrc_data->comp_grp_local_idx == comp_grp_local_idx && + rsrc_data->unique_id == unique_id) { + /* Match found */ + *comp_grp = comp_grp_local; + return; + } + } + + *comp_grp = NULL; +} + +static int cam_vfe_bus_get_free_dual_comp_grp( + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_isp_resource_node **comp_grp, + uint32_t comp_grp_local_idx) +{ + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL; + struct cam_isp_resource_node *dual_comp_grp_local = NULL; + struct cam_isp_resource_node *dual_comp_grp_local_temp = NULL; + int32_t dual_comp_grp_idx = 0; + int rc = -EINVAL; + + dual_comp_grp_idx = cam_vfe_bus_dual_comp_grp_id_convert(comp_grp_local_idx); + + CAM_DBG(CAM_ISP, "dual_comp_grp_idx :%d", dual_comp_grp_idx); + + list_for_each_entry_safe(dual_comp_grp_local, dual_comp_grp_local_temp, + &ver2_bus_priv->free_dual_comp_grp, list) { + rsrc_data = dual_comp_grp_local->res_priv; + CAM_DBG(CAM_ISP, "current grp type : %d expected :%d", + rsrc_data->comp_grp_type, dual_comp_grp_idx); + if (dual_comp_grp_idx != rsrc_data->comp_grp_type) { + continue; + } else { + list_del_init(&dual_comp_grp_local->list); + *comp_grp = dual_comp_grp_local; + return 0; + } + } + + return rc; +} + +static int cam_vfe_bus_acquire_comp_grp( + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_isp_out_port_generic_info *out_port_info, + void *tasklet, + uint32_t unique_id, + uint32_t is_dual, + uint32_t is_master, + enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core, + struct cam_isp_resource_node **comp_grp) +{ + int rc = 0; + uint32_t bus_comp_grp_id; + struct cam_isp_resource_node *comp_grp_local = NULL; + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL; + + bus_comp_grp_id = cam_vfe_bus_comp_grp_id_convert( + out_port_info->comp_grp_id); + /* Perform match only if there is valid comp grp request */ + if (out_port_info->comp_grp_id != CAM_ISP_RES_COMP_GROUP_NONE) { + /* Check if matching comp_grp already acquired */ + cam_vfe_bus_match_comp_grp(ver2_bus_priv, &comp_grp_local, + bus_comp_grp_id, unique_id); + } + + if (!comp_grp_local) { + /* First find a free group */ + if (is_dual) { + CAM_DBG(CAM_ISP, "Acquire dual comp group"); + if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) { + CAM_ERR(CAM_ISP, "No Free Composite Group"); + return -ENODEV; + } + rc = cam_vfe_bus_get_free_dual_comp_grp( + ver2_bus_priv, &comp_grp_local, bus_comp_grp_id); + if (rc || !comp_grp_local) { + CAM_ERR(CAM_ISP, + "failed to acquire dual comp grp for :%d rc :%d", + bus_comp_grp_id, rc); + return rc; + } + rsrc_data = comp_grp_local->res_priv; + rc = cam_vfe_bus_ver2_get_intra_client_mask( + dual_slave_core, + comp_grp_local->hw_intf->hw_idx, + &rsrc_data->intra_client_mask); + if (rc) + return rc; + } else { + CAM_DBG(CAM_ISP, "Acquire comp group"); + if (list_empty(&ver2_bus_priv->free_comp_grp)) { + CAM_ERR(CAM_ISP, "No Free Composite Group"); + return -ENODEV; + } + comp_grp_local = list_first_entry( + &ver2_bus_priv->free_comp_grp, + struct cam_isp_resource_node, list); + rsrc_data = comp_grp_local->res_priv; + } + + list_del(&comp_grp_local->list); + comp_grp_local->tasklet_info = tasklet; + comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + rsrc_data->is_master = is_master; + rsrc_data->composite_mask = 0; + rsrc_data->unique_id = unique_id; + rsrc_data->comp_grp_local_idx = bus_comp_grp_id; + + if (is_master) + rsrc_data->addr_sync_mode = 0; + else + rsrc_data->addr_sync_mode = 1; + + list_add_tail(&comp_grp_local->list, + &ver2_bus_priv->used_comp_grp); + + } else { + rsrc_data = comp_grp_local->res_priv; + /* Do not support runtime change in composite mask */ + if (comp_grp_local->res_state == + CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u", + comp_grp_local->res_state, + rsrc_data->comp_grp_type); + return -EBUSY; + } + } + + CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type); + + rsrc_data->acquire_dev_cnt++; + *comp_grp = comp_grp_local; + + return rc; +} + +static int cam_vfe_bus_release_comp_grp( + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_isp_resource_node *in_comp_grp) +{ + struct cam_isp_resource_node *comp_grp = NULL; + struct cam_vfe_bus_ver2_comp_grp_data *in_rsrc_data = NULL; + int match_found = 0; + + if (!in_comp_grp) { + CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_comp_grp); + return -EINVAL; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "Already released Comp Grp"); + return 0; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "Invalid State %d", + in_comp_grp->res_state); + return -EBUSY; + } + + in_rsrc_data = in_comp_grp->res_priv; + CAM_DBG(CAM_ISP, "Comp Grp type %u", in_rsrc_data->comp_grp_type); + + list_for_each_entry(comp_grp, &ver2_bus_priv->used_comp_grp, list) { + if (comp_grp == in_comp_grp) { + match_found = 1; + break; + } + } + + if (!match_found) { + CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u", + in_rsrc_data->comp_grp_type); + return -ENODEV; + } + + in_rsrc_data->acquire_dev_cnt--; + if (in_rsrc_data->acquire_dev_cnt == 0) { + list_del(&comp_grp->list); + + in_rsrc_data->unique_id = 0; + in_rsrc_data->comp_grp_local_idx = CAM_VFE_BUS_COMP_GROUP_NONE; + in_rsrc_data->composite_mask = 0; + in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX; + in_rsrc_data->addr_sync_mode = 0; + + comp_grp->tasklet_info = NULL; + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + if (in_rsrc_data->comp_grp_type >= + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 && + in_rsrc_data->comp_grp_type <= + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) + list_add_tail(&comp_grp->list, + &ver2_bus_priv->free_dual_comp_grp); + else if (in_rsrc_data->comp_grp_type >= + CAM_VFE_BUS_VER2_COMP_GRP_0 && + in_rsrc_data->comp_grp_type <= + CAM_VFE_BUS_VER2_COMP_GRP_5) + list_add_tail(&comp_grp->list, + &ver2_bus_priv->free_comp_grp); + } + + return 0; +} + +static int cam_vfe_bus_start_comp_grp( + struct cam_isp_resource_node *comp_grp, + uint32_t *bus_irq_reg_mask) +{ + int rc = 0; + uint32_t addr_sync_cfg; + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = + comp_grp->res_priv; + struct cam_vfe_bus_ver2_common_data *common_data = + rsrc_data->common_data; + + CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d", + rsrc_data->comp_grp_type, comp_grp->res_state); + + cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base + + rsrc_data->hw_regs->comp_mask); + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + return 0; + + CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask); + CAM_DBG(CAM_ISP, "composite_mask addr 0x%x", + rsrc_data->hw_regs->comp_mask); + + if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 && + rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) { + int dual_comp_grp = (rsrc_data->comp_grp_type - + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0); + + if (rsrc_data->is_master) { + int intra_client_en = cam_io_r_mb( + common_data->mem_base + + common_data->common_reg->dual_master_comp_cfg); + + /* + * 2 Bits per comp_grp. Hence left shift by + * comp_grp * 2 + */ + intra_client_en |= + (rsrc_data->intra_client_mask << + (dual_comp_grp * 2)); + + cam_io_w_mb(intra_client_en, common_data->mem_base + + common_data->common_reg->dual_master_comp_cfg); + + bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] = + (1 << dual_comp_grp); + } + + CAM_DBG(CAM_ISP, "addr_sync_mask addr 0x%x", + rsrc_data->hw_regs->addr_sync_mask); + cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base + + rsrc_data->hw_regs->addr_sync_mask); + + addr_sync_cfg = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->addr_sync_cfg); + addr_sync_cfg |= (rsrc_data->addr_sync_mode << dual_comp_grp); + /* + * 2 Bits per dual_comp_grp. dual_comp_grp stats at bit number + * 8. Hence left shift cdual_comp_grp dual comp_grp * 2 and + * add 8 + */ + addr_sync_cfg |= + (rsrc_data->intra_client_mask << + ((dual_comp_grp * 2) + + CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT)); + cam_io_w_mb(addr_sync_cfg, common_data->mem_base + + common_data->common_reg->addr_sync_cfg); + + common_data->addr_no_sync &= ~(rsrc_data->composite_mask); + cam_io_w_mb(common_data->addr_no_sync, common_data->mem_base + + common_data->common_reg->addr_sync_no_sync); + CAM_DBG(CAM_ISP, "addr_sync_cfg: 0x%x addr_no_sync_cfg: 0x%x", + addr_sync_cfg, common_data->addr_no_sync); + } else { + /* IRQ bits for COMP GRP start at 5. So add 5 to the shift */ + bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] = + (1 << (rsrc_data->comp_grp_type + 5)); + } + + CAM_DBG(CAM_ISP, "VFE start COMP_GRP%d", rsrc_data->comp_grp_type); + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp) +{ + int rc = 0; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return rc; +} + +static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_vfe_bus_handle_comp_done_bottom_half(void *handler_priv, + void *evt_payload_priv, uint32_t *grp_id) +{ + int rc = CAM_VFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *comp_grp = handler_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv; + uint32_t *cam_ife_irq_regs; + uint32_t status_reg; + uint32_t comp_err_reg; + uint32_t comp_grp_id; + + CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type); + + if (!evt_payload) + return rc; + + cam_ife_irq_regs = evt_payload->irq_reg_val; + + switch (rsrc_data->comp_grp_type) { + case CAM_VFE_BUS_VER2_COMP_GRP_0: + case CAM_VFE_BUS_VER2_COMP_GRP_1: + case CAM_VFE_BUS_VER2_COMP_GRP_2: + case CAM_VFE_BUS_VER2_COMP_GRP_3: + case CAM_VFE_BUS_VER2_COMP_GRP_4: + case CAM_VFE_BUS_VER2_COMP_GRP_5: + comp_grp_id = (rsrc_data->comp_grp_type - + CAM_VFE_BUS_VER2_COMP_GRP_0); + + /* Check for Regular composite error */ + status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0]; + + comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_ERR]; + if ((status_reg & BIT(11)) && + (comp_err_reg & rsrc_data->composite_mask)) { + /* Check for Regular composite error */ + evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR; + rc = CAM_VFE_IRQ_STATUS_ERR_COMP; + break; + } + + comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_OWRT]; + /* Check for Regular composite Overwrite */ + if ((status_reg & BIT(12)) && + (comp_err_reg & rsrc_data->composite_mask)) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR; + rc = CAM_VFE_IRQ_STATUS_COMP_OWRT; + break; + } + + /* Regular Composite SUCCESS */ + if (status_reg & BIT(comp_grp_id + 5)) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE; + rc = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d", + status_reg, (comp_grp_id + 5), rc); + break; + + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0: + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1: + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2: + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3: + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4: + case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5: + comp_grp_id = (rsrc_data->comp_grp_type - + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0); + + /* Check for DUAL composite error */ + status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2]; + + comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_ERR]; + if ((status_reg & BIT(6)) && + (comp_err_reg & rsrc_data->composite_mask)) { + /* Check for DUAL composite error */ + evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR; + rc = CAM_VFE_IRQ_STATUS_ERR_COMP; + break; + } + + /* Check for Dual composite Overwrite */ + comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT]; + if ((status_reg & BIT(7)) && + (comp_err_reg & rsrc_data->composite_mask)) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR; + rc = CAM_VFE_IRQ_STATUS_COMP_OWRT; + break; + } + + /* DUAL Composite SUCCESS */ + if (status_reg & BIT(comp_grp_id)) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE; + rc = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + break; + default: + rc = CAM_VFE_IRQ_STATUS_ERR; + CAM_ERR(CAM_ISP, "Invalid comp_grp_type %u", + rsrc_data->comp_grp_type); + break; + } + + *grp_id = rsrc_data->comp_grp_type; + + return rc; +} + +static int cam_vfe_bus_init_comp_grp(uint32_t index, + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_vfe_bus_ver2_hw_info *ver2_hw_info, + struct cam_isp_resource_node *comp_grp) +{ + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver2_comp_grp_data), + GFP_KERNEL); + if (!rsrc_data) + return -ENOMEM; + + comp_grp->res_priv = rsrc_data; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&comp_grp->list); + + rsrc_data->comp_grp_type = index; + rsrc_data->common_data = &ver2_bus_priv->common_data; + rsrc_data->hw_regs = &ver2_hw_info->comp_grp_reg[index]; + rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX; + + if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 && + rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) + list_add_tail(&comp_grp->list, + &ver2_bus_priv->free_dual_comp_grp); + else if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 + && rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5) + list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp); + + comp_grp->start = NULL; + comp_grp->stop = NULL; + comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half; + comp_grp->bottom_half_handler = NULL; + comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_vfe_bus_deinit_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = + comp_grp->res_priv; + + comp_grp->start = NULL; + comp_grp->stop = NULL; + comp_grp->top_half_handler = NULL; + comp_grp->bottom_half_handler = NULL; + comp_grp->hw_intf = NULL; + + list_del_init(&comp_grp->list); + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + comp_grp->res_priv = NULL; + + if (!rsrc_data) { + CAM_ERR(CAM_ISP, "comp_grp_priv is NULL"); + return -ENODEV; + } + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + + struct cam_isp_hw_get_cmd_update *secure_mode = cmd_args; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data; + uint32_t *mode; + + rsrc_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + secure_mode->res->res_priv; + mode = (uint32_t *)secure_mode->data; + *mode = + (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ? + true : false; + + return 0; +} + +static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args, + uint32_t args_size) +{ + int rc = -ENODEV; + int i; + enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id; + uint32_t format; + int num_wm; + uint32_t client_done_mask; + struct cam_vfe_bus_ver2_priv *ver2_bus_priv = bus_priv; + struct cam_vfe_acquire_args *acq_args = acquire_args; + struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0, mode; + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_ISP, "Invalid Param"); + return -EINVAL; + } + + out_acquire_args = &acq_args->vfe_out; + format = out_acquire_args->out_port_info->format; + + CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x", + out_acquire_args->out_port_info->res_type); + + vfe_out_res_id = cam_vfe_bus_get_out_res_id( + out_acquire_args->out_port_info->res_type); + if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_MAX) + return -ENODEV; + + num_wm = cam_vfe_bus_get_num_wm(vfe_out_res_id, format); + if (num_wm < 1) + return -EINVAL; + + rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id]; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d", + vfe_out_res_id, rsrc_node->res_state); + return -EBUSY; + } + + rsrc_data = rsrc_node->res_priv; + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->common_data->disable_ubwc_comp = + out_acquire_args->disable_ubwc_comp; + rsrc_data->priv = acq_args->priv; + + secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type); + mode = out_acquire_args->out_port_info->secure_mode; + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (!rsrc_data->common_data->num_sec_out) { + rsrc_data->secure_mode = mode; + rsrc_data->common_data->secure_mode = mode; + } else { + if (mode == rsrc_data->common_data->secure_mode) { + rsrc_data->secure_mode = + rsrc_data->common_data->secure_mode; + } else { + rc = -EINVAL; + CAM_ERR_RATE_LIMIT(CAM_ISP, + "Mismatch: Acquire mode[%d], drvr mode[%d]", + rsrc_data->common_data->secure_mode, + mode); + mutex_unlock( + &rsrc_data->common_data->bus_mutex); + return rc; + } + } + rsrc_data->common_data->num_sec_out++; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + ver2_bus_priv->tasklet_info = acq_args->tasklet; + rsrc_data->num_wm = num_wm; + rsrc_node->res_id = out_acquire_args->out_port_info->res_type; + rsrc_node->tasklet_info = acq_args->tasklet; + rsrc_node->cdm_ops = out_acquire_args->cdm_ops; + rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops; + + /* Reserve Composite Group */ + if (num_wm > 1 || (out_acquire_args->is_dual) || + (out_acquire_args->out_port_info->comp_grp_id > + CAM_ISP_RES_COMP_GROUP_NONE && + out_acquire_args->out_port_info->comp_grp_id < + CAM_ISP_RES_COMP_GROUP_ID_MAX)) { + + rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv, + out_acquire_args->out_port_info, + acq_args->tasklet, + out_acquire_args->unique_id, + out_acquire_args->is_dual, + out_acquire_args->is_master, + out_acquire_args->dual_slave_core, + &rsrc_data->comp_grp); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE%d Comp_Grp acquire fail for Out %d rc=%d", + rsrc_data->common_data->core_index, + vfe_out_res_id, rc); + return rc; + } + } + + /* Reserve WM */ + for (i = 0; i < num_wm; i++) { + rc = cam_vfe_bus_acquire_wm(ver2_bus_priv, + out_acquire_args->out_port_info, + acq_args->tasklet, + vfe_out_res_id, + i, + &rsrc_data->wm_res[i], + &client_done_mask, + out_acquire_args->is_dual); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE%d WM acquire failed for Out %d rc=%d", + rsrc_data->common_data->core_index, + vfe_out_res_id, rc); + goto release_wm; + } + + if (rsrc_data->comp_grp) + cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp, + client_done_mask); + } + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + out_acquire_args->rsrc_node = rsrc_node; + + CAM_DBG(CAM_ISP, "Acquire successful"); + return rc; + +release_wm: + for (i--; i >= 0; i--) + cam_vfe_bus_release_wm(ver2_bus_priv, rsrc_data->wm_res[i]); + + cam_vfe_bus_release_comp_grp(ver2_bus_priv, + rsrc_data->comp_grp); + + return rc; +} + +static int cam_vfe_bus_release_vfe_out(void *bus_priv, void *release_args, + uint32_t args_size) +{ + uint32_t i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + vfe_out = release_args; + rsrc_data = vfe_out->res_priv; + + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Invalid resource state:%d", + vfe_out->res_state); + } + + for (i = 0; i < rsrc_data->num_wm; i++) + cam_vfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]); + rsrc_data->num_wm = 0; + + if (rsrc_data->comp_grp) + cam_vfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp); + rsrc_data->comp_grp = NULL; + + vfe_out->tasklet_info = NULL; + vfe_out->cdm_ops = NULL; + rsrc_data->cdm_util_ops = NULL; + + secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type); + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (rsrc_data->secure_mode == + rsrc_data->common_data->secure_mode) { + rsrc_data->common_data->num_sec_out--; + rsrc_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } else { + /* + * The validity of the mode is properly + * checked while acquiring the output port. + * not expected to reach here, unless there is + * some corruption. + */ + CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch", + rsrc_data->common_data->secure_mode, + rsrc_data->secure_mode); + } + + if (!rsrc_data->common_data->num_sec_out) + rsrc_data->common_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_vfe_bus_start_vfe_out( + struct cam_isp_resource_node *vfe_out) +{ + int rc = 0, i; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver2_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX]; + + if (!vfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_out->res_priv; + common_data = rsrc_data->common_data; + + CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type); + + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Invalid resource state:%d", + vfe_out->res_state); + return -EACCES; + } + + memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask)); + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i], + bus_irq_reg_mask); + + if (rsrc_data->comp_grp) { + memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask)); + rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp, + bus_irq_reg_mask); + } + + vfe_out->irq_handle = cam_irq_controller_subscribe_irq( + common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1, + bus_irq_reg_mask, vfe_out, vfe_out->top_half_handler, + vfe_out->bottom_half_handler, vfe_out->tasklet_info, + &tasklet_bh_api, CAM_IRQ_EVT_GROUP_0); + + if (vfe_out->irq_handle < 1) { + CAM_ERR(CAM_ISP, "Subscribe IRQ failed for res_id %d", + vfe_out->res_id); + vfe_out->irq_handle = 0; + return -EFAULT; + } + + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_vfe_bus_stop_vfe_out( + struct cam_isp_resource_node *vfe_out) +{ + int rc = 0, i; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver2_common_data *common_data = NULL; + + if (!vfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_out->res_priv; + common_data = rsrc_data->common_data; + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state); + return rc; + } + + if (rsrc_data->comp_grp) + rc = cam_vfe_bus_stop_comp_grp(rsrc_data->comp_grp); + + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]); + + if (vfe_out->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + common_data->bus_irq_controller, + vfe_out->irq_handle); + vfe_out->irq_handle = 0; + } + + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + + vfe_out = th_payload->handler_priv; + if (!vfe_out) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + + rsrc_data = vfe_out->res_priv; + + CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]); + CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]); + CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]); + + rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, + "IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1], + th_payload->evt_status_arr[2]); + + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + evt_payload->core_index = rsrc_data->common_data->core_index; + evt_payload->evt_id = evt_id; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_bus_handle_vfe_out_done_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + int rc = -EINVAL; + struct cam_isp_resource_node *vfe_out = handler_priv; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_bufdone_event_info bufdone_evt_info = {0}; + void *ctx = NULL; + uint32_t evt_id = 0; + uint32_t comp_grp_id = 0; + struct cam_vfe_bus_irq_evt_payload *evt_payload = + evt_payload_priv; + + /* + * If this resource has Composite Group then we only handle + * Composite done. We acquire Composite if number of WM > 1. + * So Else case is only one individual buf_done = WM[0]. + */ + if (rsrc_data->comp_grp) { + rc = cam_vfe_bus_handle_comp_done_bottom_half( + rsrc_data->comp_grp, evt_payload_priv, &comp_grp_id); + } else { + rc = rsrc_data->wm_res[0]->bottom_half_handler( + rsrc_data->wm_res[0], evt_payload_priv); + } + + ctx = rsrc_data->priv; + + CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc); + + switch (rc) { + case CAM_VFE_IRQ_STATUS_SUCCESS: + evt_id = evt_payload->evt_id; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.res_type = vfe_out->res_type; + evt_info.hw_idx = vfe_out->hw_intf->hw_idx; + if (rsrc_data->comp_grp) { + bufdone_evt_info.res_id = vfe_out->res_id; + bufdone_evt_info.comp_grp_id = comp_grp_id; + } else + bufdone_evt_info.res_id = vfe_out->res_id; + + evt_info.event_data = (void *)&bufdone_evt_info; + if (rsrc_data->common_data->event_cb) + rsrc_data->common_data->event_cb(ctx, + evt_id, (void *)&evt_info); + break; + default: + break; + } + + cam_vfe_bus_put_evt_payload(rsrc_data->common_data, &evt_payload); + + return rc; +} + +static int cam_vfe_bus_init_vfe_out_resource(uint32_t index, + struct cam_vfe_bus_ver2_priv *ver2_bus_priv, + struct cam_vfe_bus_ver2_hw_info *ver2_hw_info) +{ + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + int rc = 0, i; + int32_t vfe_out_type = + ver2_hw_info->vfe_out_hw_info[index].vfe_out_type; + + if (vfe_out_type < 0 || + vfe_out_type >= CAM_VFE_BUS_VER2_VFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d", + vfe_out_type); + return -EINVAL; + } + + vfe_out = &ver2_bus_priv->vfe_out[vfe_out_type]; + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + vfe_out->res_priv) { + CAM_ERR(CAM_ISP, + "Error. Looks like same resource is init again"); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver2_vfe_out_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + vfe_out->res_priv = rsrc_data; + + vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT; + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&vfe_out->list); + + rsrc_data->out_type = + ver2_hw_info->vfe_out_hw_info[index].vfe_out_type; + rsrc_data->common_data = &ver2_bus_priv->common_data; + rsrc_data->max_width = + ver2_hw_info->vfe_out_hw_info[index].max_width; + rsrc_data->max_height = + ver2_hw_info->vfe_out_hw_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + + vfe_out->start = cam_vfe_bus_start_vfe_out; + vfe_out->stop = cam_vfe_bus_stop_vfe_out; + vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half; + vfe_out->bottom_half_handler = + cam_vfe_bus_handle_vfe_out_done_bottom_half; + vfe_out->process_cmd = cam_vfe_bus_process_cmd; + vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf; + vfe_out->irq_handle = 0; + + for (i = 0; i < CAM_VFE_BUS_VER2_MAX_MID_PER_PORT; i++) + rsrc_data->mid[i] = ver2_hw_info->vfe_out_hw_info[index].mid[i]; + + return 0; +} + +static int cam_vfe_bus_deinit_vfe_out_resource( + struct cam_isp_resource_node *vfe_out) +{ + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv; + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized"); + return 0; + } + + vfe_out->start = NULL; + vfe_out->stop = NULL; + vfe_out->top_half_handler = NULL; + vfe_out->bottom_half_handler = NULL; + vfe_out->hw_intf = NULL; + vfe_out->irq_handle = 0; + + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&vfe_out->list); + vfe_out->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_vfe_bus_ver2_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + CAM_DBG(CAM_ISP, "Enter"); + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_0); + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int i = 0, rc = 0; + struct cam_vfe_bus_ver2_priv *bus_priv = + th_payload->handler_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + + CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ"); + for (i = 0; i < th_payload->num_registers; i++) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "vfe:%d: IRQ_Status%d: 0x%x", + bus_priv->common_data.core_index, i, + th_payload->evt_status_arr[i]); + } + cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + + rc = cam_vfe_bus_get_evt_payload(&bus_priv->common_data, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Cannot get payload"); + return rc; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->core_index = bus_priv->common_data.core_index; + evt_payload->evt_id = evt_id; + evt_payload->debug_status_0 = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->debug_status_0); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static void cam_vfe_bus_update_ubwc_meta_addr( + uint32_t *reg_val_pair, + uint32_t *j, + void *regs, + uint64_t image_buf) +{ + uint32_t camera_hw_version; + struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs; + struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_3_regs; + int rc = 0; + + if (!regs || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + goto end; + } + + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc); + goto end; + } else if ((camera_hw_version < CAM_CPAS_TITAN_170_V100) || + (camera_hw_version > CAM_CPAS_TITAN_175_V130)) { + CAM_ERR(CAM_ISP, "Invalid HW version: %d", + camera_hw_version); + goto end; + } + + switch (camera_hw_version) { + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_170_V120: + case CAM_CPAS_TITAN_170_V200: + ubwc_regs = + (struct cam_vfe_bus_ver2_reg_offset_ubwc_client *)regs; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_addr, + image_buf); + break; + case CAM_CPAS_TITAN_175_V100: + case CAM_CPAS_TITAN_175_V101: + case CAM_CPAS_TITAN_175_V120: + case CAM_CPAS_TITAN_175_V130: + case CAM_CPAS_TITAN_165_V100: + ubwc_3_regs = + (struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *) + regs; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_3_regs->meta_addr, + image_buf); + break; + default: + break; + } +end: + return; +} + +static int cam_vfe_bus_update_ubwc_3_regs( + struct cam_vfe_bus_ver2_wm_resource_data *wm_data, + uint32_t *reg_val_pair, uint32_t i, uint32_t *j) +{ + struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_regs; + int rc = 0; + + if (!wm_data || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *) + wm_data->hw_regs->ubwc_regs; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + wm_data->hw_regs->packer_cfg, wm_data->packer_cfg); + CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->is_dual) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->tile_cfg, wm_data->tile_cfg); + } else { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->tile_cfg, wm_data->tile_cfg); + CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x", + wm_data->index, reg_val_pair[*j-1]); + } + + if (wm_data->is_dual) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->h_init, wm_data->offset); + } else { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->h_init, wm_data->h_init); + CAM_DBG(CAM_ISP, "WM %d h_init 0x%x", + wm_data->index, reg_val_pair[*j-1]); + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->v_init, wm_data->v_init); + CAM_DBG(CAM_ISP, "WM %d v_init 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_stride, wm_data->ubwc_meta_stride); + CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->common_data->disable_ubwc_comp) { + wm_data->ubwc_mode_cfg_0 &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression on VFE:%d WM:%d", + wm_data->common_data->core_index, wm_data->index); + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0); + CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->mode_cfg_1, wm_data->ubwc_mode_cfg_1); + CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_1 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_offset, wm_data->ubwc_meta_offset); + CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->ubwc_bandwidth_limit) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->bw_limit, wm_data->ubwc_bandwidth_limit); + CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x", + wm_data->index, wm_data->ubwc_bandwidth_limit); + } + +end: + return rc; +} + +static int cam_vfe_bus_update_ubwc_legacy_regs( + struct cam_vfe_bus_ver2_wm_resource_data *wm_data, + uint32_t camera_hw_version, uint32_t *reg_val_pair, + uint32_t i, uint32_t *j) +{ + struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs; + uint32_t ubwc_bw_limit = 0; + int rc = 0; + + if (!wm_data || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_client *) + wm_data->hw_regs->ubwc_regs; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + wm_data->hw_regs->packer_cfg, wm_data->packer_cfg); + CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->is_dual) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->tile_cfg, wm_data->tile_cfg); + } else { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->tile_cfg, wm_data->tile_cfg); + CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x", + wm_data->index, reg_val_pair[*j-1]); + } + + if (wm_data->is_dual) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->h_init, wm_data->offset); + } else { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->h_init, wm_data->h_init); + CAM_DBG(CAM_ISP, "WM %d h_init 0x%x", + wm_data->index, reg_val_pair[*j-1]); + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->v_init, wm_data->v_init); + CAM_DBG(CAM_ISP, "WM %d v_init 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_stride, wm_data->ubwc_meta_stride); + CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->common_data->disable_ubwc_comp) { + wm_data->ubwc_mode_cfg_0 &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression on VFE:%d WM:%d", + wm_data->common_data->core_index, wm_data->index); + } + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0); + CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_offset, wm_data->ubwc_meta_offset); + CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x", + wm_data->index, reg_val_pair[*j-1]); + + if (camera_hw_version == CAM_CPAS_TITAN_170_V110) { + switch (wm_data->format) { + case CAM_FORMAT_UBWC_TP10: + ubwc_bw_limit = 0x8 | BIT(0); + break; + case CAM_FORMAT_UBWC_NV12_4R: + ubwc_bw_limit = 0xB | BIT(0); + break; + default: + ubwc_bw_limit = 0; + break; + } + } + + if (ubwc_bw_limit) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->bw_limit, ubwc_bw_limit); + CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x", + wm_data->index, ubwc_bw_limit); + } + +end: + return rc; +} + +static int cam_vfe_bus_update_ubwc_regs( + struct cam_vfe_bus_ver2_wm_resource_data *wm_data, + uint32_t *reg_val_pair, uint32_t i, uint32_t *j) +{ + uint32_t camera_hw_version; + int rc = 0; + + if (!wm_data || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc); + goto end; + } else if ((camera_hw_version <= CAM_CPAS_TITAN_NONE) || + (camera_hw_version >= CAM_CPAS_TITAN_MAX)) { + CAM_ERR(CAM_ISP, "Invalid HW version: %d", + camera_hw_version); + rc = -EINVAL; + goto end; + } + switch (camera_hw_version) { + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_170_V120: + case CAM_CPAS_TITAN_170_V200: + rc = cam_vfe_bus_update_ubwc_legacy_regs( + wm_data, camera_hw_version, reg_val_pair, i, j); + break; + case CAM_CPAS_TITAN_175_V100: + case CAM_CPAS_TITAN_175_V101: + case CAM_CPAS_TITAN_175_V120: + case CAM_CPAS_TITAN_175_V130: + case CAM_CPAS_TITAN_165_V100: + rc = cam_vfe_bus_update_ubwc_3_regs( + wm_data, reg_val_pair, i, j); + break; + default: + break; + } + + if (rc) + CAM_ERR(CAM_ISP, "Failed to update ubwc regs rc:%d", rc); + +end: + return rc; +} + +static int cam_vfe_bus_update_wm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_buf_io_cfg *io_cfg; + struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, k, size = 0; + uint32_t frame_inc = 0, val; + uint32_t loop_size = 0; + + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + update_buf->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_out_data->cdm_util_ops; + + if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) { + CAM_ERR(CAM_ISP, + "Failed! Invalid number buffers:%d required:%d", + update_buf->wm_update->num_buf, vfe_out_data->num_wm); + return -EINVAL; + } + + reg_val_pair = &vfe_out_data->common_data->io_buf_update[0]; + io_cfg = update_buf->wm_update->io_cfg; + + for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = vfe_out_data->wm_res[i]->res_priv; + /* update width register */ + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->buffer_width_cfg, + wm_data->width); + CAM_DBG(CAM_ISP, "WM %d image width 0x%x", + wm_data->index, reg_val_pair[j-1]); + + /* For initial configuration program all bus registers */ + val = io_cfg->planes[i].plane_stride; + CAM_DBG(CAM_ISP, "before stride %d", val); + val = ALIGNUP(val, 16); + if (val != io_cfg->planes[i].plane_stride && + val != wm_data->stride) + CAM_WARN(CAM_ISP, + "Warning stride %u expected %u", + io_cfg->planes[i].plane_stride, + val); + + if ((wm_data->stride != val || + !wm_data->init_cfg_done) && (wm_data->index >= 3)) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->stride, + io_cfg->planes[i].plane_stride); + wm_data->stride = val; + CAM_DBG(CAM_ISP, "WM %d image stride 0x%x", + wm_data->index, reg_val_pair[j-1]); + } + + if (wm_data->en_ubwc) { + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "No UBWC register to configure."); + return -EINVAL; + } + if (wm_data->ubwc_updated) { + wm_data->ubwc_updated = false; + cam_vfe_bus_update_ubwc_regs( + wm_data, reg_val_pair, i, &j); + } + + /* UBWC meta address */ + cam_vfe_bus_update_ubwc_meta_addr( + reg_val_pair, &j, + wm_data->hw_regs->ubwc_regs, + update_buf->wm_update->image_buf[i]); + CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx", + wm_data->index, + update_buf->wm_update->image_buf[i]); + } + + if (wm_data->en_ubwc) { + frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride * + io_cfg->planes[i].slice_height, 4096); + frame_inc += io_cfg->planes[i].meta_size; + CAM_DBG(CAM_ISP, + "WM %d frm %d: ht: %d stride %d meta: %d", + wm_data->index, frame_inc, + io_cfg->planes[i].slice_height, + io_cfg->planes[i].plane_stride, + io_cfg->planes[i].meta_size); + } else { + frame_inc = io_cfg->planes[i].plane_stride * + io_cfg->planes[i].slice_height; + } + + if (wm_data->index < 3) + loop_size = wm_data->irq_subsample_period + 1; + else + loop_size = 1; + + /* WM Image address */ + for (k = 0; k < loop_size; k++) { + if (wm_data->en_ubwc) + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_addr, + update_buf->wm_update->image_buf[i] + + io_cfg->planes[i].meta_size + + k * frame_inc); + else + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_addr, + update_buf->wm_update->image_buf[i] + + wm_data->offset + k * frame_inc); + CAM_DBG(CAM_ISP, "WM %d image address 0x%x", + wm_data->index, reg_val_pair[j-1]); + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->frame_inc, frame_inc); + CAM_DBG(CAM_ISP, "WM %d frame_inc %d", + wm_data->index, reg_val_pair[j-1]); + + + /* enable the WM */ + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->cfg, + wm_data->en_cfg); + + /* set initial configuration done */ + if (!wm_data->init_cfg_done) + wm_data->init_cfg_done = true; + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_buf->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + update_buf->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "No reg val pairs. num_wms: %u", + vfe_out_data->num_wm); + } + + return 0; +} + +static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_hfr; + struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + struct cam_isp_port_hfr_config *hfr_cfg = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + + update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + update_hfr->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_out_data->cdm_util_ops; + + reg_val_pair = &vfe_out_data->common_data->io_buf_update[0]; + hfr_cfg = (struct cam_isp_port_hfr_config *)update_hfr->data; + + for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "reg_val_pair %d exceeds the array limit %zu", + j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = vfe_out_data->wm_res[i]->res_priv; + + if (wm_data->index <= 2 && hfr_cfg->subsample_period > 3) { + CAM_ERR(CAM_ISP, + "RDI doesn't support irq subsample period %d", + hfr_cfg->subsample_period); + return -EINVAL; + } + + if ((wm_data->framedrop_pattern != + hfr_cfg->framedrop_pattern) || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_pattern, + hfr_cfg->framedrop_pattern); + wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern; + CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x", + wm_data->index, wm_data->framedrop_pattern); + } + + if (wm_data->framedrop_period != hfr_cfg->framedrop_period || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_period, + hfr_cfg->framedrop_period); + wm_data->framedrop_period = hfr_cfg->framedrop_period; + CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x", + wm_data->index, wm_data->framedrop_period); + } + + if (wm_data->irq_subsample_period != hfr_cfg->subsample_period + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_period, + hfr_cfg->subsample_period); + wm_data->irq_subsample_period = + hfr_cfg->subsample_period; + CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x", + wm_data->index, wm_data->irq_subsample_period); + } + + if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_pattern, + hfr_cfg->subsample_pattern); + wm_data->irq_subsample_pattern = + hfr_cfg->subsample_pattern; + CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x", + wm_data->index, wm_data->irq_subsample_pattern); + } + + /* set initial configuration done */ + if (!wm_data->hfr_cfg_done) + wm_data->hfr_cfg_done = true; + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_hfr->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + update_hfr->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_hfr->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_hfr->cmd.used_bytes = size * 4; + } else { + update_hfr->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "No reg val pairs. num_wms: %u", + vfe_out_data->num_wm); + } + + return 0; +} + +static int cam_vfe_bus_update_ubwc_config(void *cmd_args) +{ + struct cam_isp_hw_get_cmd_update *update_ubwc; + struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + struct cam_ubwc_plane_cfg_v1 *ubwc_plane_cfg = NULL; + uint32_t i; + int rc = 0; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + update_ubwc = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + update_ubwc->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + rc = -EINVAL; + goto end; + } + + ubwc_plane_cfg = (struct cam_ubwc_plane_cfg_v1 *)update_ubwc->data; + + for (i = 0; i < vfe_out_data->num_wm; i++) { + + wm_data = vfe_out_data->wm_res[i]->res_priv; + if (i > 0) + ubwc_plane_cfg++; + + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "No UBWC register to configure."); + rc = -EINVAL; + goto end; + } + + if (!wm_data->en_ubwc) { + CAM_ERR(CAM_ISP, "UBWC Disabled"); + rc = -EINVAL; + goto end; + } + + if (wm_data->packer_cfg != + ubwc_plane_cfg->packer_config || + !wm_data->init_cfg_done) { + wm_data->packer_cfg = ubwc_plane_cfg->packer_config; + wm_data->ubwc_updated = true; + } + + if ((!wm_data->is_dual) && ((wm_data->tile_cfg != + ubwc_plane_cfg->tile_config) + || !wm_data->init_cfg_done)) { + wm_data->tile_cfg = ubwc_plane_cfg->tile_config; + wm_data->ubwc_updated = true; + } + + if ((!wm_data->is_dual) && ((wm_data->h_init != + ubwc_plane_cfg->h_init) || + !wm_data->init_cfg_done)) { + wm_data->h_init = ubwc_plane_cfg->h_init; + wm_data->ubwc_updated = true; + } + + if (wm_data->v_init != ubwc_plane_cfg->v_init || + !wm_data->init_cfg_done) { + wm_data->v_init = ubwc_plane_cfg->v_init; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_meta_stride != + ubwc_plane_cfg->meta_stride || + !wm_data->init_cfg_done) { + wm_data->ubwc_meta_stride = ubwc_plane_cfg->meta_stride; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_mode_cfg_0 != + ubwc_plane_cfg->mode_config_0 || + !wm_data->init_cfg_done) { + wm_data->ubwc_mode_cfg_0 = + ubwc_plane_cfg->mode_config_0; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_mode_cfg_1 != + ubwc_plane_cfg->mode_config_1 || + !wm_data->init_cfg_done) { + wm_data->ubwc_mode_cfg_1 = + ubwc_plane_cfg->mode_config_1; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_meta_offset != + ubwc_plane_cfg->meta_offset || + !wm_data->init_cfg_done) { + wm_data->ubwc_meta_offset = ubwc_plane_cfg->meta_offset; + wm_data->ubwc_updated = true; + } + } + +end: + return rc; +} + +static int cam_vfe_bus_update_ubwc_config_v2(void *cmd_args) +{ + struct cam_isp_hw_get_cmd_update *update_ubwc; + struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + struct cam_vfe_generic_ubwc_config *ubwc_generic_cfg = NULL; + struct cam_vfe_generic_ubwc_plane_config *ubwc_generic_plane_cfg = NULL; + uint32_t i; + int rc = 0; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + update_ubwc = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + update_ubwc->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + rc = -EINVAL; + goto end; + } + + ubwc_generic_cfg = (struct cam_vfe_generic_ubwc_config *) + update_ubwc->data; + + for (i = 0; i < vfe_out_data->num_wm; i++) { + + wm_data = vfe_out_data->wm_res[i]->res_priv; + ubwc_generic_plane_cfg = &ubwc_generic_cfg->ubwc_plane_cfg[i]; + + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "No UBWC register to configure."); + rc = -EINVAL; + goto end; + } + + if (!wm_data->en_ubwc) { + CAM_ERR(CAM_ISP, "UBWC Disabled"); + rc = -EINVAL; + goto end; + } + + if (wm_data->packer_cfg != + ubwc_generic_plane_cfg->packer_config || + !wm_data->init_cfg_done) { + wm_data->packer_cfg = + ubwc_generic_plane_cfg->packer_config; + wm_data->ubwc_updated = true; + } + + if ((!wm_data->is_dual) && ((wm_data->tile_cfg != + ubwc_generic_plane_cfg->tile_config) + || !wm_data->init_cfg_done)) { + wm_data->tile_cfg = ubwc_generic_plane_cfg->tile_config; + wm_data->ubwc_updated = true; + } + + if ((!wm_data->is_dual) && ((wm_data->h_init != + ubwc_generic_plane_cfg->h_init) || + !wm_data->init_cfg_done)) { + wm_data->h_init = ubwc_generic_plane_cfg->h_init; + wm_data->ubwc_updated = true; + } + + if (wm_data->v_init != ubwc_generic_plane_cfg->v_init || + !wm_data->init_cfg_done) { + wm_data->v_init = ubwc_generic_plane_cfg->v_init; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_meta_stride != + ubwc_generic_plane_cfg->meta_stride || + !wm_data->init_cfg_done) { + wm_data->ubwc_meta_stride = + ubwc_generic_plane_cfg->meta_stride; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_mode_cfg_0 != + ubwc_generic_plane_cfg->mode_config_0 || + !wm_data->init_cfg_done) { + wm_data->ubwc_mode_cfg_0 = + ubwc_generic_plane_cfg->mode_config_0; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_mode_cfg_1 != + ubwc_generic_plane_cfg->mode_config_1 || + !wm_data->init_cfg_done) { + wm_data->ubwc_mode_cfg_1 = + ubwc_generic_plane_cfg->mode_config_1; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_meta_offset != + ubwc_generic_plane_cfg->meta_offset || + !wm_data->init_cfg_done) { + wm_data->ubwc_meta_offset = + ubwc_generic_plane_cfg->meta_offset; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_lossy_threshold_0 != + ubwc_generic_plane_cfg->lossy_threshold_0 || + !wm_data->init_cfg_done) { + wm_data->ubwc_lossy_threshold_0 = + ubwc_generic_plane_cfg->lossy_threshold_0; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_lossy_threshold_1 != + ubwc_generic_plane_cfg->lossy_threshold_1 || + !wm_data->init_cfg_done) { + wm_data->ubwc_lossy_threshold_1 = + ubwc_generic_plane_cfg->lossy_threshold_1; + wm_data->ubwc_updated = true; + } + + if (wm_data->ubwc_bandwidth_limit != + ubwc_generic_plane_cfg->bandwidth_limit || + !wm_data->init_cfg_done) { + wm_data->ubwc_bandwidth_limit = + ubwc_generic_plane_cfg->bandwidth_limit; + wm_data->ubwc_updated = true; + } + } + +end: + return rc; +} + + +static int cam_vfe_bus_update_stripe_cfg(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bus_ver2_priv *bus_priv; + struct cam_isp_hw_dual_isp_update_args *stripe_args; + struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + struct cam_isp_dual_stripe_config *stripe_config; + uint32_t outport_id, ports_plane_idx, i; + + bus_priv = (struct cam_vfe_bus_ver2_priv *) priv; + stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + stripe_args->res->res_priv; + + if (!vfe_out_data) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + outport_id = stripe_args->res->res_id & 0xFF; + if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE || + stripe_args->res->res_id >= bus_priv->max_out_res) + return 0; + + ports_plane_idx = (stripe_args->split_id * + (stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) + + (outport_id * CAM_PACKET_MAX_PLANES); + for (i = 0; i < vfe_out_data->num_wm; i++) { + wm_data = vfe_out_data->wm_res[i]->res_priv; + stripe_config = (struct cam_isp_dual_stripe_config *) + &stripe_args->dual_cfg->stripes_flex[ports_plane_idx + i]; + wm_data->width = stripe_config->width; + wm_data->offset = stripe_config->offset; + wm_data->tile_cfg = stripe_config->tileconfig; + CAM_DBG(CAM_ISP, "id:%x wm:%d width:0x%x offset:%x tilecfg:%x", + stripe_args->res->res_id, i, wm_data->width, + wm_data->offset, wm_data->tile_cfg); + } + + return 0; +} + +static int cam_vfe_bus_start_hw(void *hw_priv, + void *start_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_start_vfe_out(hw_priv); +} + +static int cam_vfe_bus_stop_hw(void *hw_priv, + void *stop_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_stop_vfe_out(hw_priv); +} + +static int cam_vfe_bus_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv; + uint32_t top_irq_reg_mask[2] = {0}; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + if (bus_priv->common_data.hw_init) + return 0; + + top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift); + + bus_priv->irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.vfe_irq_controller, + CAM_IRQ_PRIORITY_2, + top_irq_reg_mask, + bus_priv, + cam_vfe_bus_ver2_handle_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->irq_handle < 1) { + CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ"); + bus_priv->irq_handle = 0; + return -EFAULT; + } + + if (bus_priv->tasklet_info != NULL) { + bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.bus_irq_controller, + CAM_IRQ_PRIORITY_0, + bus_error_irq_mask, + bus_priv, + cam_vfe_bus_error_irq_top_half, + cam_vfe_bus_err_bottom_half, + bus_priv->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->error_irq_handle < 1) { + CAM_ERR(CAM_ISP, "Failed to subscribe BUS error IRQ %d", + bus_priv->error_irq_handle); + bus_priv->error_irq_handle = 0; + return -EFAULT; + } + } + + /*Set Debug Registers*/ + cam_io_w_mb(CAM_VFE_BUS_SET_DEBUG_REG, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->debug_status_cfg); + + /* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */ + cam_io_w_mb(0x0, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->addr_sync_frame_hdr); + + /* no clock gating at bus input */ + cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base + 0x0000200C); + + /* BUS_WR_TEST_BUS_CTRL */ + cam_io_w_mb(0x0, bus_priv->common_data.mem_base + 0x0000211C); + + /* if addr_no_sync has default value then config the addr no sync reg */ + cam_io_w_mb(CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL, + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->addr_sync_no_sync); + + bus_priv->common_data.hw_init = true; + + return 0; +} + +static int cam_vfe_bus_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv; + int rc = 0, i; + unsigned long flags; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Error: Invalid args"); + return -EINVAL; + } + + if (!bus_priv->common_data.hw_init) + return 0; + + if (bus_priv->error_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + bus_priv->error_irq_handle = 0; + } + + if (bus_priv->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.vfe_irq_controller, + bus_priv->irq_handle); + bus_priv->irq_handle = 0; + } + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + bus_priv->common_data.hw_init = false; + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + return rc; +} + +static int __cam_vfe_bus_process_cmd(void *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + return cam_vfe_bus_process_cmd(priv, cmd_type, cmd_args, arg_size); +} + +int cam_vfe_bus_dump_wm_data(void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver2_priv *bus_priv = + (struct cam_vfe_bus_ver2_priv *) priv; + struct cam_isp_hw_event_info *event_info = + (struct cam_isp_hw_event_info *)cmd_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL; + int i, wm_idx; + enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id; + + vfe_out_res_id = cam_vfe_bus_get_out_res_id(event_info->res_id); + if (vfe_out_res_id >= CAM_VFE_BUS_VER2_VFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "Unsupported res_id: %u", vfe_out_res_id); + return -EINVAL; + } + + rsrc_node = &bus_priv->vfe_out[vfe_out_res_id]; + rsrc_data = rsrc_node->res_priv; + for (i = 0; i < rsrc_data->num_wm; i++) { + wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, i); + if (wm_idx < 0 || wm_idx >= bus_priv->num_client) { + CAM_ERR(CAM_ISP, "Unsupported VFE out %d", + vfe_out_res_id); + return -EINVAL; + } + wm_data = bus_priv->bus_client[wm_idx].res_priv; + CAM_INFO(CAM_ISP, + "VFE:%d WM:%d width:%u height:%u stride:%u x_init:%u en_cfg:%u acquired width:%u height:%u", + wm_data->common_data->core_index, wm_idx, + wm_data->width, + wm_data->height, + wm_data->stride, wm_data->h_init, + wm_data->en_cfg, + wm_data->acquired_width, + wm_data->acquired_height); + } + return 0; +} + +static int cam_vfe_bus_get_res_for_mid( + struct cam_vfe_bus_ver2_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver2_vfe_out_data *out_data = NULL; + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + struct cam_isp_hw_get_res_for_mid *get_res = NULL; + int i, j; + + get_res = (struct cam_isp_hw_get_res_for_mid *)cmd_update->data; + if (!get_res) { + CAM_ERR(CAM_ISP, + "invalid get resource for mid paramas"); + return -EINVAL; + } + + for (i = 0; i < bus_priv->num_out; i++) { + out_data = (struct cam_vfe_bus_ver2_vfe_out_data *) + bus_priv->vfe_out[i].res_priv; + + if (!out_data) + continue; + + for (j = 0; j < CAM_VFE_BUS_VER2_MAX_MID_PER_PORT; j++) { + if (out_data->mid[j] == get_res->mid) + goto end; + } + } + + if (i == bus_priv->num_out) { + CAM_ERR(CAM_ISP, + "mid:%d does not match with any out resource", + get_res->mid); + get_res->out_res_id = 0; + return -EINVAL; + } + +end: + CAM_INFO(CAM_ISP, "match mid :%d out resource:0x%x found", + get_res->mid, bus_priv->vfe_out[i].res_id); + get_res->out_res_id = bus_priv->vfe_out[i].res_id; + return 0; +} + +static int cam_vfe_bus_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_vfe_bus_ver2_priv *bus_priv; + uint32_t top_mask_0 = 0; + struct cam_isp_hw_cap *vfe_bus_cap; + + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + rc = cam_vfe_bus_update_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + bus_priv = (struct cam_vfe_bus_ver2_priv *) priv; + if (bus_priv->error_irq_handle) { + CAM_DBG(CAM_ISP, "Mask off bus error irq handler"); + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + bus_priv->error_irq_handle = 0; + } + break; + case CAM_ISP_HW_CMD_UBWC_UPDATE: + rc = cam_vfe_bus_update_ubwc_config(cmd_args); + break; + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: + rc = cam_vfe_bus_dump_wm_data(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_UBWC_UPDATE_V2: + rc = cam_vfe_bus_update_ubwc_config_v2(cmd_args); + break; + case CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ: + bus_priv = (struct cam_vfe_bus_ver2_priv *) priv; + top_mask_0 = cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->top_irq_mask_0); + top_mask_0 |= (1 << bus_priv->top_irq_shift); + cam_io_w_mb(top_mask_0, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->top_irq_mask_0); + break; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + bus_priv = (struct cam_vfe_bus_ver2_priv *) priv; + rc = cam_vfe_bus_get_res_for_mid(bus_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: + bus_priv = (struct cam_vfe_bus_ver2_priv *) priv; + vfe_bus_cap = (struct cam_isp_hw_cap *) cmd_args; + vfe_bus_cap->max_out_res_type = bus_priv->max_out_res; + vfe_bus_cap->support_consumed_addr = + bus_priv->common_data.support_consumed_addr; + + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d", + cmd_type); + break; + } + + return rc; +} + +int cam_vfe_bus_ver2_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_ver2_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info; + + CAM_DBG(CAM_ISP, "Enter"); + + if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) { + CAM_ERR(CAM_ISP, + "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK", + soc_info, hw_intf, bus_hw_info); + CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller); + rc = -EINVAL; + goto end; + } + + vfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus), GFP_KERNEL); + if (!vfe_bus_local) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver2_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + vfe_bus_local->bus_priv = bus_priv; + + bus_priv->num_client = ver2_hw_info->num_client; + bus_priv->num_out = ver2_hw_info->num_out; + bus_priv->top_irq_shift = ver2_hw_info->top_irq_shift; + bus_priv->max_out_res = ver2_hw_info->max_out_res; + bus_priv->common_data.num_sec_out = 0; + bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.vfe_irq_controller = vfe_irq_controller; + bus_priv->common_data.common_reg = &ver2_hw_info->common_reg; + bus_priv->common_data.addr_no_sync = + CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL; + bus_priv->common_data.hw_init = false; + bus_priv->common_data.support_consumed_addr = + ver2_hw_info->support_consumed_addr; + bus_priv->common_data.disable_ubwc_comp = false; + + mutex_init(&bus_priv->common_data.bus_mutex); + + rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base, + &ver2_hw_info->common_reg.irq_reg_info, + &bus_priv->common_data.bus_irq_controller); + if (rc) { + CAM_ERR(CAM_ISP, "cam_irq_controller_init failed"); + goto free_bus_priv; + } + + INIT_LIST_HEAD(&bus_priv->free_comp_grp); + INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp); + INIT_LIST_HEAD(&bus_priv->used_comp_grp); + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info, + &bus_priv->bus_client[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc); + goto deinit_wm; + } + } + + for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) { + rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info, + &bus_priv->comp_grp[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc); + goto deinit_comp_grp; + } + } + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv, + bus_hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc); + goto deinit_vfe_out; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + + vfe_bus_local->hw_ops.reserve = cam_vfe_bus_acquire_vfe_out; + vfe_bus_local->hw_ops.release = cam_vfe_bus_release_vfe_out; + vfe_bus_local->hw_ops.start = cam_vfe_bus_start_hw; + vfe_bus_local->hw_ops.stop = cam_vfe_bus_stop_hw; + vfe_bus_local->hw_ops.init = cam_vfe_bus_init_hw; + vfe_bus_local->hw_ops.deinit = cam_vfe_bus_deinit_hw; + vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq; + vfe_bus_local->bottom_half_handler = NULL; + vfe_bus_local->hw_ops.process_cmd = __cam_vfe_bus_process_cmd; + + *vfe_bus = vfe_bus_local; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; + +deinit_vfe_out: + for (--i; i >= 0; i--) + cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]); + +deinit_comp_grp: + if (i < 0) + i = CAM_VFE_BUS_VER2_COMP_GRP_MAX; + for (--i; i >= 0; i--) + cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + +deinit_wm: + if (i < 0) + i = bus_priv->num_client; + for (--i; i >= 0; i--) + cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]); + +free_bus_priv: + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + +end: + return rc; +} + +int cam_vfe_bus_ver2_deinit( + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_ver2_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + unsigned long flags; + + if (!vfe_bus || !*vfe_bus) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + vfe_bus_local = *vfe_bus; + + bus_priv = vfe_bus_local->bus_priv; + if (!bus_priv) { + CAM_ERR(CAM_ISP, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + bus_priv->common_data.hw_init = false; + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + for (i = 0; i < bus_priv->num_client; i++) { + rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit WM failed rc=%d", rc); + } + + for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) { + rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit Comp Grp failed rc=%d", rc); + } + + for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) { + rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "Deinit VFE Out failed rc=%d", rc); + } + + INIT_LIST_HEAD(&bus_priv->free_comp_grp); + INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp); + INIT_LIST_HEAD(&bus_priv->used_comp_grp); + + rc = cam_irq_controller_deinit( + &bus_priv->common_data.bus_irq_controller); + if (rc) + CAM_ERR(CAM_ISP, + "Deinit IRQ Controller failed rc=%d", rc); + + mutex_destroy(&bus_priv->common_data.bus_mutex); + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + + *vfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h new file mode 100644 index 0000000000..0a2726b6c7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_BUS_VER2_H_ +#define _CAM_VFE_BUS_VER2_H_ + +#include "cam_irq_controller.h" +#include "cam_vfe_bus.h" + +#define CAM_VFE_BUS_VER2_MAX_CLIENTS 24 +#define CAM_VFE_BUS_VER2_MAX_MID_PER_PORT 4 + +enum cam_vfe_bus_ver2_vfe_core_id { + CAM_VFE_BUS_VER2_VFE_CORE_0, + CAM_VFE_BUS_VER2_VFE_CORE_1, + CAM_VFE_BUS_VER2_VFE_CORE_2, + CAM_VFE_BUS_VER2_VFE_CORE_MAX, +}; + +enum cam_vfe_bus_ver2_comp_grp_type { + CAM_VFE_BUS_VER2_COMP_GRP_0, + CAM_VFE_BUS_VER2_COMP_GRP_1, + CAM_VFE_BUS_VER2_COMP_GRP_2, + CAM_VFE_BUS_VER2_COMP_GRP_3, + CAM_VFE_BUS_VER2_COMP_GRP_4, + CAM_VFE_BUS_VER2_COMP_GRP_5, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4, + CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5, + CAM_VFE_BUS_VER2_COMP_GRP_MAX, +}; + +enum cam_vfe_bus_ver2_vfe_out_type { + CAM_VFE_BUS_VER2_VFE_OUT_RDI0, + CAM_VFE_BUS_VER2_VFE_OUT_RDI1, + CAM_VFE_BUS_VER2_VFE_OUT_RDI2, + CAM_VFE_BUS_VER2_VFE_OUT_RDI3, + CAM_VFE_BUS_VER2_VFE_OUT_FULL, + CAM_VFE_BUS_VER2_VFE_OUT_DS4, + CAM_VFE_BUS_VER2_VFE_OUT_DS16, + CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP, + CAM_VFE_BUS_VER2_VFE_OUT_FD, + CAM_VFE_BUS_VER2_VFE_OUT_PDAF, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS, + CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST, + CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP, + CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP, + CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP, + CAM_VFE_BUS_VER2_VFE_OUT_2PD, + CAM_VFE_BUS_VER2_VFE_OUT_MAX, +}; + +/* + * struct cam_vfe_bus_ver2_reg_offset_common: + * + * @Brief: Common registers across all BUS Clients + */ +struct cam_vfe_bus_ver2_reg_offset_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t sw_reset; + uint32_t cgc_ovd; + uint32_t pwr_iso_cfg; + uint32_t dual_master_comp_cfg; + struct cam_irq_controller_reg_info irq_reg_info; + uint32_t comp_error_status; + uint32_t comp_ovrwr_status; + uint32_t dual_comp_error_status; + uint32_t dual_comp_ovrwr_status; + uint32_t addr_sync_cfg; + uint32_t addr_sync_frame_hdr; + uint32_t addr_sync_no_sync; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t top_irq_mask_0; +}; + +/* + * struct cam_vfe_bus_ver2_reg_offset_ubwc_client: + * + * @Brief: UBWC register offsets for BUS Clients + */ +struct cam_vfe_bus_ver2_reg_offset_ubwc_client { + uint32_t tile_cfg; + uint32_t h_init; + uint32_t v_init; + uint32_t meta_addr; + uint32_t meta_offset; + uint32_t meta_stride; + uint32_t mode_cfg_0; + uint32_t bw_limit; + uint32_t ubwc_comp_en_bit; +}; + +/* + * struct cam_vfe_bus_ver2_reg_offset_ubwc_client: + * + * @Brief: UBWC register offsets for BUS Clients + */ +struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client { + uint32_t tile_cfg; + uint32_t h_init; + uint32_t v_init; + uint32_t meta_addr; + uint32_t meta_offset; + uint32_t meta_stride; + uint32_t mode_cfg_0; + uint32_t mode_cfg_1; + uint32_t bw_limit; + uint32_t ubwc_comp_en_bit; +}; + + +/* + * struct cam_vfe_bus_ver2_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS Clients + */ +struct cam_vfe_bus_ver2_reg_offset_bus_client { + uint32_t status0; + uint32_t status1; + uint32_t cfg; + uint32_t header_addr; + uint32_t header_cfg; + uint32_t image_addr; + uint32_t image_addr_offset; + uint32_t buffer_width_cfg; + uint32_t buffer_height_cfg; + uint32_t packer_cfg; + uint32_t stride; + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + uint32_t frame_inc; + uint32_t burst_limit; + void *ubwc_regs; +}; + +/* + * struct cam_vfe_bus_ver2_reg_offset_comp_grp: + * + * @Brief: Register offsets for Composite Group registers + * comp_mask: Comp group register address + * addr_sync_mask:Address sync group register address + */ +struct cam_vfe_bus_ver2_reg_offset_comp_grp { + uint32_t comp_mask; + uint32_t addr_sync_mask; +}; + +/* + * struct cam_vfe_bus_ver2_vfe_out_hw_info: + * + * @Brief: HW capability of VFE Bus Client + */ +struct cam_vfe_bus_ver2_vfe_out_hw_info { + enum cam_vfe_bus_ver2_vfe_out_type vfe_out_type; + uint32_t max_width; + uint32_t max_height; + uint32_t mid[CAM_VFE_BUS_VER2_MAX_MID_PER_PORT]; +}; + +/* + * struct cam_vfe_bus_ver2_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @bus_client_reg: Bus client register info + * @comp_reg_grp: Composite group register info + * @vfe_out_hw_info: VFE output capability + * @top_irq_shift: Mask shift for top level BUS WR irq + * @support_consumed_addr: Indicate if bus support consumed address + * @max_out_res: Max vfe out resource value supported for hw + */ +struct cam_vfe_bus_ver2_hw_info { + struct cam_vfe_bus_ver2_reg_offset_common common_reg; + uint32_t num_client; + struct cam_vfe_bus_ver2_reg_offset_bus_client + bus_client_reg[CAM_VFE_BUS_VER2_MAX_CLIENTS]; + struct cam_vfe_bus_ver2_reg_offset_comp_grp + comp_grp_reg[CAM_VFE_BUS_VER2_COMP_GRP_MAX]; + uint32_t num_out; + struct cam_vfe_bus_ver2_vfe_out_hw_info + vfe_out_hw_info[CAM_VFE_BUS_VER2_VFE_OUT_MAX]; + uint32_t top_irq_shift; + bool support_consumed_addr; + uint32_t max_out_res; +}; + +/* + * cam_vfe_bus_ver2_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top + * level IRQs + * @vfe_bus: Pointer to vfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_ver2_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus); + +/* + * cam_vfe_bus_ver2_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @vfe_bus: Pointer to vfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_ver2_deinit(struct cam_vfe_bus **vfe_bus); + +#endif /* _CAM_VFE_BUS_VER2_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c new file mode 100644 index 0000000000..2107c2fb79 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c @@ -0,0 +1,5514 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_hw_intf.h" +#include "cam_ife_hw_mgr.h" +#include "cam_vfe_hw_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_bus_ver3.h" +#include "cam_vfe_core.h" +#include "cam_vfe_soc.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "cam_trace.h" +#include "cam_smmu_api.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +static const char drv_name[] = "vfe_bus"; + +#define CAM_VFE_BUS_VER3_IRQ_REG0 0 +#define CAM_VFE_BUS_VER3_IRQ_REG1 1 +#define CAM_VFE_BUS_VER3_IRQ_MAX 2 + +#define CAM_VFE_BUS_VER3_PAYLOAD_MAX 256 + +#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFFFF +#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFFFF + +#define MAX_BUF_UPDATE_REG_NUM \ + ((sizeof(struct cam_vfe_bus_ver3_reg_offset_bus_client) + \ + sizeof(struct cam_vfe_bus_ver3_reg_offset_ubwc_client))/4) +#define MAX_REG_VAL_PAIR_SIZE \ + (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES) + +static uint32_t bus_error_irq_mask[2] = { + 0xD0000000, + 0x00000000, +}; + +enum cam_vfe_bus_ver3_packer_format { + PACKER_FMT_VER3_PLAIN_128, + PACKER_FMT_VER3_PLAIN_8, + PACKER_FMT_VER3_PLAIN_8_ODD_EVEN, + PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10, + PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN, + PACKER_FMT_VER3_PLAIN_16_10BPP, + PACKER_FMT_VER3_PLAIN_16_12BPP, + PACKER_FMT_VER3_PLAIN_16_14BPP, + PACKER_FMT_VER3_PLAIN_16_16BPP, + PACKER_FMT_VER3_PLAIN_32, + PACKER_FMT_VER3_PLAIN_64, + PACKER_FMT_VER3_TP_10, + PACKER_FMT_VER3_MIPI10, + PACKER_FMT_VER3_MIPI12, + PACKER_FMT_VER3_MIPI14, + PACKER_FMT_VER3_MIPI20, + PACKER_FMT_VER3_PLAIN32_20BPP, + PACKER_FMT_VER3_MAX, +}; + +struct cam_vfe_bus_irq_violation_type { + bool hwpd_violation; +}; + +struct cam_vfe_bus_ver3_comp_grp_acquire_args { + enum cam_vfe_bus_ver3_comp_grp_type comp_grp_id; + uint64_t composite_mask; +}; + +struct cam_vfe_bus_ver3_common_data { + uint32_t core_index; + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + void *bus_irq_controller; + void *vfe_irq_controller; + void *buf_done_controller; + void *mc_comp_buf_done_controller; + void *priv; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_bus_ver3_reg_offset_common *common_reg; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t io_buf_update[ + MAX_REG_VAL_PAIR_SIZE]; + + struct cam_vfe_bus_irq_evt_payload evt_payload[ + CAM_VFE_BUS_VER3_PAYLOAD_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + struct mutex bus_mutex; + uint32_t secure_mode; + uint32_t num_sec_out; + uint32_t addr_no_sync; + uint32_t supported_irq; + uint32_t perf_cnt_cfg[CAM_VFE_PERF_CNT_MAX]; + bool comp_config_needed; + bool is_lite; + bool hw_init; + bool support_consumed_addr; + bool support_burst_limit; + bool disable_ubwc_comp; + bool init_irq_subscribed; + bool disable_mmu_prefetch; + bool perf_cnt_en; + cam_hw_mgr_event_cb_func event_cb; + int rup_irq_handle[CAM_VFE_BUS_VER3_SRC_GRP_MAX]; + uint32_t pack_align_shift; + uint32_t max_bw_counter_limit; + uint32_t cntr; +}; + +struct cam_vfe_bus_ver3_wm_cfg_data { + uint32_t width; + uint32_t height; + uint32_t pack_fmt; + uint32_t format; + uint32_t stride; + uint32_t offset; + uint32_t h_init; + uint32_t en_cfg; + uint32_t image_offset; + uint32_t meta_offset; + bool updated; + bool rcs_en; +}; + +struct cam_vfe_bus_ver3_wm_ubwc_cfg_data { + bool ubwc_updated; + uint32_t ubwc_meta_addr; + uint32_t ubwc_meta_cfg; + uint32_t ubwc_mode_cfg; + uint32_t ubwc_stats_ctrl; + uint32_t ubwc_ctrl_2; + uint32_t ubwc_lossy_threshold_0; + uint32_t ubwc_lossy_threshold_1; + uint32_t ubwc_offset_lossy_variance; + uint32_t ubwc_bandwidth_limit; +}; + +struct cam_vfe_bus_ver3_wm_mc_data { + struct cam_vfe_bus_ver3_wm_cfg_data cfg; + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data ubwc_cfg_data; + bool init_cfg_done; +}; + +struct cam_vfe_bus_ver3_wm_resource_data { + struct cam_vfe_bus_ver3_common_data *common_data; + struct cam_vfe_bus_ver3_reg_offset_bus_client *hw_regs; + struct cam_vfe_bus_ver3_wm_cfg_data cfg; + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data; + struct cam_vfe_bus_ver3_wm_mc_data *mc_data; + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data ubwc_cfg_data; + enum cam_vfe_bus_wr_wm_mode wm_mode; + uint32_t burst_len; + uint32_t index; + uint32_t en_ubwc; + uint32_t packer_cfg; + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + uint32_t is_dual; + uint32_t acquired_width; + uint32_t acquired_height; + uint32_t default_line_based; + bool init_cfg_done; + bool hfr_cfg_done; + bool use_wm_pack; + bool update_wm_format; +}; + +struct cam_vfe_bus_ver3_comp_grp_data { + enum cam_vfe_bus_ver3_comp_grp_type comp_grp_type; + struct cam_vfe_bus_ver3_common_data *common_data; + + uint64_t composite_mask; + uint32_t comp_done_mask; + uint32_t mc_comp_done_mask; + uint32_t is_master; + uint32_t is_dual; + uint32_t dual_slave_core; + uint32_t intra_client_mask; + uint32_t addr_sync_mode; + + uint32_t acquire_dev_cnt; + uint32_t irq_trigger_cnt; + uint32_t ubwc_static_ctrl; +}; + +struct cam_vfe_bus_ver3_vfe_out_data { + uint32_t out_type; + uint32_t source_group; + struct cam_vfe_bus_ver3_common_data *common_data; + struct cam_vfe_bus_ver3_priv *bus_priv; + + uint32_t num_wm; + struct cam_isp_resource_node *wm_res; + + struct cam_isp_resource_node *comp_grp; + enum cam_isp_hw_sync_mode dual_comp_sync_mode; + uint32_t dual_hw_alternate_vfe_id; + struct list_head vfe_out_list; + + uint32_t is_master; + uint32_t is_dual; + + uint32_t format; + uint32_t max_width; + uint32_t max_height; + uint32_t secure_mode; + void *priv; + uint32_t *mid; + uint32_t num_mid; + uint32_t early_done_mask; + bool limiter_enabled; + bool mc_based; + bool cntxt_cfg_except; + uint32_t dst_hw_ctxt_id_mask; + int mc_comp_irq_handle; + int early_done_irq_handle; + uint64_t pid_mask; +}; + +struct cam_vfe_bus_ver3_priv { + struct cam_vfe_bus_ver3_common_data common_data; + uint32_t num_client; + uint32_t num_out; + uint32_t num_comp_grp; + uint32_t top_irq_shift; + + struct cam_isp_resource_node *bus_client; + struct cam_isp_resource_node *comp_grp; + struct cam_isp_resource_node *vfe_out; + uint32_t vfe_out_map_outtype[CAM_VFE_BUS_VER3_VFE_OUT_MAX]; + + int bus_irq_handle; + int rup_irq_handle; + int error_irq_handle; + void *tasklet_info; + uint32_t max_out_res; + uint32_t num_cons_err; + struct cam_vfe_constraint_error_info *constraint_error_list; + struct cam_vfe_bus_ver3_hw_info *bus_hw_info; +}; + +static int cam_vfe_bus_ver3_handle_mc_comp_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload); + +static void cam_vfe_bus_ver3_unsubscribe_init_irq( + struct cam_vfe_bus_ver3_priv *bus_priv); + +static int cam_vfe_bus_ver3_subscribe_init_irq( + struct cam_vfe_bus_ver3_priv *bus_priv); + +static int cam_vfe_bus_ver3_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size); + +static int cam_vfe_bus_ver3_config_ubwc_regs( + struct cam_vfe_bus_ver3_wm_resource_data *wm_data, + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data *ubwc_cfg_data); + + +static const char *cam_vfe_bus_ver3_wm_mode_to_string(enum cam_vfe_bus_wr_wm_mode wm_mode) +{ + switch (wm_mode) { + case CAM_VFE_WM_LINE_BASED_MODE: + return "LINE_BASED"; + case CAM_VFE_WM_FRAME_BASED_MODE: + return "FRAME_BASED"; + case CAM_VFE_WM_INDEX_BASED_MODE: + return "INDEX_BASED"; + default: + return "INVALID_WM_MODE"; + } +} + +static int cam_vfe_bus_ver3_get_evt_payload( + struct cam_vfe_bus_ver3_common_data *common_data, + struct cam_vfe_bus_irq_evt_payload **evt_payload) +{ + int rc; + + spin_lock(&common_data->spin_lock); + + if (!common_data->hw_init) { + *evt_payload = NULL; + CAM_ERR_RATE_LIMIT(CAM_ISP, "VFE:%u Bus uninitialized", + common_data->core_index); + rc = -EPERM; + goto done; + } + + if (list_empty(&common_data->free_payload_list)) { + *evt_payload = NULL; + CAM_ERR_RATE_LIMIT(CAM_ISP, "VFE:%u No free BUS event payload", + common_data->core_index); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&common_data->free_payload_list, + struct cam_vfe_bus_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&common_data->spin_lock); + return rc; +} + +static int cam_vfe_bus_ver3_put_evt_payload( + struct cam_vfe_bus_ver3_common_data *common_data, + struct cam_vfe_bus_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!common_data) { + CAM_ERR(CAM_ISP, "Invalid param common_data NULL"); + return -EINVAL; + } + + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "VFE:%u No payload to put", common_data->core_index); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_bus_irq_evt_payload); + spin_lock_irqsave(&common_data->spin_lock, flags); + if (common_data->hw_init) + list_add_tail(&(*evt_payload)->list, &common_data->free_payload_list); + + spin_unlock_irqrestore(&common_data->spin_lock, flags); + *evt_payload = NULL; + + CAM_DBG(CAM_ISP, "VFE:%u Done", common_data->core_index); + return 0; +} + +static bool cam_vfe_bus_ver3_can_be_secure(uint32_t out_type) +{ + switch (out_type) { + case CAM_VFE_BUS_VER3_VFE_OUT_FULL: + case CAM_VFE_BUS_VER3_VFE_OUT_DS2: + case CAM_VFE_BUS_VER3_VFE_OUT_DS4: + case CAM_VFE_BUS_VER3_VFE_OUT_DS16: + case CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI0: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI1: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI2: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI3: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI4: + case CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP: + case CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP: + case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP: + case CAM_VFE_BUS_VER3_VFE_OUT_2PD: + case CAM_VFE_BUS_VER3_VFE_OUT_LCR: + case CAM_VFE_BUS_VER3_VFE_OUT_PDAF: + case CAM_VFE_BUS_VER3_VFE_OUT_SPARSE_PD: + case CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD: + case CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED: + case CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW: + case CAM_VFE_BUS_VER3_VFE_OUT_IR: + return true; + + case CAM_VFE_BUS_VER3_VFE_OUT_FD: + return cam_secure_get_vfe_fd_port_config(); + + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC: + case CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE: + case CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST: + default: + return false; + } +} + +static enum cam_vfe_bus_ver3_vfe_out_type + cam_vfe_bus_ver3_get_out_res_id_and_index( + struct cam_vfe_bus_ver3_priv *bus_priv, + uint32_t res_type, uint32_t *index) +{ + uint32_t vfe_out_type; + + switch (res_type) { + case CAM_ISP_IFE_OUT_RES_FULL: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL; + break; + case CAM_ISP_IFE_OUT_RES_DS4: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4; + break; + case CAM_ISP_IFE_OUT_RES_DS16: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16; + break; + case CAM_ISP_IFE_OUT_RES_FD: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD; + break; + case CAM_ISP_IFE_OUT_RES_RAW_DUMP: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP; + break; + case CAM_ISP_IFE_OUT_RES_2PD: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD; + break; + case CAM_ISP_IFE_OUT_RES_RDI_0: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0; + break; + case CAM_ISP_IFE_OUT_RES_RDI_1: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1; + break; + case CAM_ISP_IFE_OUT_RES_RDI_2: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2; + break; + case CAM_ISP_IFE_OUT_RES_RDI_3: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3; + break; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE; + break; + case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST; + break; + case CAM_ISP_IFE_OUT_RES_STATS_TL_BG: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG; + break; + case CAM_ISP_IFE_OUT_RES_STATS_BF: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF; + break; + case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG; + break; + case CAM_ISP_IFE_OUT_RES_STATS_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST; + break; + case CAM_ISP_IFE_OUT_RES_STATS_RS: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS; + break; + case CAM_ISP_IFE_OUT_RES_STATS_CS: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS; + break; + case CAM_ISP_IFE_OUT_RES_STATS_IHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST; + break; + case CAM_ISP_IFE_OUT_RES_FULL_DISP: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP; + break; + case CAM_ISP_IFE_OUT_RES_DS4_DISP: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP; + break; + case CAM_ISP_IFE_OUT_RES_DS16_DISP: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP; + break; + case CAM_ISP_IFE_OUT_RES_LCR: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LCR; + break; + case CAM_ISP_IFE_OUT_RES_AWB_BFW: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW; + break; + case CAM_ISP_IFE_OUT_RES_SPARSE_PD: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_SPARSE_PD; + break; + case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD; + break; + case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE; + break; + case CAM_ISP_IFE_OUT_RES_LTM_STATS: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS; + break; + case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST; + break; + case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG; + break; + case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW; + break; + case CAM_ISP_IFE_OUT_RES_STATS_CAF: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF; + break; + case CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS; + break; + case CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED; + break; + case CAM_ISP_IFE_OUT_RES_STATS_ALSC: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC; + break; + case CAM_ISP_IFE_OUT_RES_DS2: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS2; + break; + case CAM_ISP_IFE_OUT_RES_IR: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_IR; + break; + case CAM_ISP_IFE_OUT_RES_RDI_4: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI4; + break; + case CAM_ISP_IFE_OUT_RES_STATS_TMC_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST; + break; + case CAM_ISP_IFE_OUT_RES_STATS_AF_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST; + break; + case CAM_ISP_IFE_OUT_RES_STATS_AEC_BHIST: + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST; + break; + default: + CAM_WARN(CAM_ISP, "VFE:%u Invalid isp res id: %d , assigning max", + bus_priv->common_data.core_index, res_type); + vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_MAX; + *index = CAM_VFE_BUS_VER3_VFE_OUT_MAX; + return vfe_out_type; + } + *index = bus_priv->vfe_out_map_outtype[vfe_out_type]; + + return vfe_out_type; +} + +static enum cam_vfe_bus_ver3_packer_format + cam_vfe_bus_ver3_get_packer_fmt(uint32_t out_fmt, int wm_index) +{ + switch (out_fmt) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_PD8: + return PACKER_FMT_VER3_PLAIN_128; + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_PLAIN8: + return PACKER_FMT_VER3_PLAIN_8; + case CAM_FORMAT_MIPI_RAW_10: + return PACKER_FMT_VER3_MIPI10; + case CAM_FORMAT_MIPI_RAW_12: + return PACKER_FMT_VER3_MIPI12; + case CAM_FORMAT_MIPI_RAW_14: + return PACKER_FMT_VER3_MIPI14; + case CAM_FORMAT_MIPI_RAW_20: + return PACKER_FMT_VER3_MIPI20; + case CAM_FORMAT_NV21: + if ((wm_index == 1) || (wm_index == 3) || (wm_index == 5)) + return PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN; + fallthrough; + case CAM_FORMAT_NV12: + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_Y_ONLY: + return PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_10_LSB: + return PACKER_FMT_VER3_PLAIN_16_10BPP; + case CAM_FORMAT_PLAIN16_12: + return PACKER_FMT_VER3_PLAIN_16_12BPP; + case CAM_FORMAT_PLAIN16_14: + return PACKER_FMT_VER3_PLAIN_16_14BPP; + case CAM_FORMAT_PLAIN16_16: + return PACKER_FMT_VER3_PLAIN_16_16BPP; + case CAM_FORMAT_PLAIN32: + case CAM_FORMAT_ARGB: + return PACKER_FMT_VER3_PLAIN_32; + case CAM_FORMAT_PLAIN32_20: + return PACKER_FMT_VER3_PLAIN32_20BPP; + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_ARGB_16: + case CAM_FORMAT_PD10: + return PACKER_FMT_VER3_PLAIN_64; + case CAM_FORMAT_UBWC_TP10: + case CAM_FORMAT_GBR_UBWC_TP10: + case CAM_FORMAT_BAYER_UBWC_TP10: + case CAM_FORMAT_TP10: + case CAM_FORMAT_GBR_TP10: + return PACKER_FMT_VER3_TP_10; + default: + return PACKER_FMT_VER3_MAX; + } + return PACKER_FMT_VER3_MAX; +} + +static int cam_vfe_bus_ver3_handle_rup_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + uint32_t irq_status; + + vfe_out = th_payload->handler_priv; + if (!vfe_out) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + + rsrc_data = vfe_out->res_priv; + + CAM_DBG(CAM_ISP, "VFE:%u Bus IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + + rc = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data, + &evt_payload); + + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "VFE:%u Bus IRQ status_0: 0x%X", + rsrc_data->common_data->core_index, + th_payload->evt_status_arr[0]); + return rc; + } + + evt_payload->core_index = rsrc_data->common_data->core_index; + evt_payload->evt_id = evt_id; + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + irq_status = + th_payload->evt_status_arr[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0]; + + trace_cam_log_event("RUP", "RUP_IRQ", irq_status, 0); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static void cam_vfe_bus_ver3_print_constraint_errors( + struct cam_vfe_bus_ver3_priv *bus_priv, + uint8_t *wm_name, + uint32_t constraint_errors) +{ + uint32_t i; + + CAM_DBG(CAM_ISP, "VFE[%u] Constraint violation bitflags: 0x%X", + bus_priv->common_data.core_index, constraint_errors); + + for (i = 0; i < bus_priv->num_cons_err; i++) { + if (bus_priv->constraint_error_list[i].bitmask & constraint_errors) { + CAM_ERR(CAM_ISP, "SW: Constraint Violation"); + CAM_ERR(CAM_ISP, + "Write master name: %s, violated constraint description: %s", + wm_name, bus_priv->constraint_error_list[i].error_description); + } + } +} + +static void cam_vfe_bus_ver3_get_constraint_errors( + struct cam_vfe_bus_ver3_priv *bus_priv) +{ + uint32_t i, j, constraint_errors; + uint8_t *wm_name = NULL; + struct cam_isp_resource_node *out_rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + + for (i = 0; i < bus_priv->num_out; i++) { + out_rsrc_node = &bus_priv->vfe_out[i]; + if (!out_rsrc_node || !out_rsrc_node->res_priv) { + CAM_DBG(CAM_ISP, + "VFE:%u out:%d out rsrc node or data is NULL", + bus_priv->common_data.core_index, i); + continue; + } + out_rsrc_data = out_rsrc_node->res_priv; + for (j = 0; j < out_rsrc_data->num_wm; j++) { + wm_data = out_rsrc_data->wm_res[j].res_priv; + wm_name = out_rsrc_data->wm_res[j].res_name; + if (wm_data) { + constraint_errors = cam_io_r_mb( + bus_priv->common_data.mem_base + + wm_data->hw_regs->debug_status_1); + if (!constraint_errors) + continue; + + cam_vfe_bus_ver3_print_constraint_errors( + bus_priv, wm_name, constraint_errors); + } + } + } +} + +static int cam_vfe_bus_ver3_handle_rup_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_ERR; + struct cam_vfe_bus_irq_evt_payload *payload; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_isp_hw_event_info evt_info; + uint32_t irq_status; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return ret; + } + + payload = evt_payload_priv; + vfe_out = handler_priv; + rsrc_data = vfe_out->res_priv; + + if (!rsrc_data->common_data->event_cb) { + CAM_ERR(CAM_ISP, "VFE:%u Callback to HW MGR not found", vfe_out->hw_intf->hw_idx); + return ret; + } + + irq_status = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0]; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = rsrc_data->common_data->core_index; + evt_info.res_type = CAM_ISP_RESOURCE_VFE_IN; + + if (!rsrc_data->common_data->is_lite) { + if (irq_status & 0x1) { + CAM_DBG(CAM_ISP, "VFE:%u Received CAMIF RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_CAMIF; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x2) { + CAM_DBG(CAM_ISP, "VFE:%u Received PDLIB RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_PDLIB; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x4) + CAM_DBG(CAM_ISP, "VFE:%u Received LCR RUP", + evt_info.hw_idx); + + if (irq_status & 0x8) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI0 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x10) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI1 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x20) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI2 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + } else { + if (irq_status & 0x1) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI0 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x2) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI1 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x4) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI2 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + + if (irq_status & 0x8) { + CAM_DBG(CAM_ISP, "VFE:%u Received RDI3 RUP", + evt_info.hw_idx); + evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI3; + rsrc_data->common_data->event_cb( + rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE, + (void *)&evt_info); + } + } + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + + CAM_DBG(CAM_ISP, + "VFE:%u Bus RUP IRQ status_0:0x%X rc:%d", + evt_info.hw_idx, CAM_ISP_HW_EVENT_REG_UPDATE, irq_status, ret); + + cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &payload); + + return ret; +} + +/* lsb alignemnt needs to be set only if the format has padding bits */ +static inline bool cam_vfe_bus_ver3_needs_lsb_alignment(uint32_t format) +{ + switch (format) { + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN32_20: + case CAM_FORMAT_PLAIN16_10_LSB: + return true; + } + + return false; +} + +static inline void cam_vfe_bus_ver3_config_frame_based_rdi_wm( + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data) +{ + rsrc_data->cfg.width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH; + rsrc_data->cfg.height = 0; + rsrc_data->cfg.stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE; + rsrc_data->cfg.en_cfg = + (rsrc_data->common_data->common_reg->wm_mode_val[CAM_VFE_WM_FRAME_BASED_MODE] << + rsrc_data->common_data->common_reg->wm_mode_shift) | + (1 << rsrc_data->common_data->common_reg->wm_en_shift); +} + +static int cam_vfe_bus_ver3_config_rdi_wm( + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data) +{ + struct cam_vfe_bus_ver3_reg_offset_common *common_reg = rsrc_data->common_data->common_reg; + + rsrc_data->cfg.pack_fmt = PACKER_FMT_VER3_PLAIN_128; + + if (rsrc_data->wm_mode == CAM_VFE_WM_FRAME_BASED_MODE) + cam_vfe_bus_ver3_config_frame_based_rdi_wm(rsrc_data); + else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + else { + CAM_WARN(CAM_ISP, "No index mode %d is supported for VFE: %u WM: %u", + rsrc_data->wm_mode, + rsrc_data->common_data->core_index, + rsrc_data->index); + return 0; + } + + switch (rsrc_data->cfg.format) { + case CAM_FORMAT_MIPI_RAW_10: + if (rsrc_data->use_wm_pack) { + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width), 8); + + rsrc_data->cfg.pack_fmt = PACKER_FMT_VER3_MIPI10; + } else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 5) / 4, 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_6: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 3) / 4, 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_YUV422: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP(rsrc_data->cfg.width, 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_12: + if (rsrc_data->use_wm_pack) { + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width), 8); + + rsrc_data->cfg.pack_fmt = PACKER_FMT_VER3_MIPI12; + } else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 3) / 2, 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_14: + if (rsrc_data->use_wm_pack) { + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width), 8); + + rsrc_data->cfg.pack_fmt = PACKER_FMT_VER3_MIPI14; + } else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 7) / 4, 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_16: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 2), 16) / 16; + + break; + case CAM_FORMAT_MIPI_RAW_20: + if (rsrc_data->use_wm_pack) { + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width), 8); + rsrc_data->cfg.pack_fmt = PACKER_FMT_VER3_MIPI20; + } else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 5) / 2, 16) / 16; + + break; + case CAM_FORMAT_PLAIN128: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 16), 16) / 16; + + break; + case CAM_FORMAT_PLAIN32_20: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 4), 16) / 16; + + break; + case CAM_FORMAT_PLAIN8: + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[rsrc_data->wm_mode] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + rsrc_data->cfg.stride = rsrc_data->cfg.width; + rsrc_data->cfg.width = ALIGNUP(rsrc_data->cfg.width, 16) / 16; + break; + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN16_10_LSB: + if (rsrc_data->use_wm_pack) { + rsrc_data->cfg.pack_fmt = cam_vfe_bus_ver3_get_packer_fmt( + rsrc_data->cfg.format, rsrc_data->index); + /* LSB aligned */ + rsrc_data->cfg.pack_fmt |= (1 << rsrc_data->common_data->pack_align_shift); + + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width), 8); + } else if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) + rsrc_data->cfg.width = ALIGNUP(rsrc_data->cfg.width * 2, 16) / 16; + + if (rsrc_data->cfg.format == CAM_FORMAT_PLAIN16_10_LSB) + rsrc_data->cfg.pack_fmt |= (1 << rsrc_data->common_data->pack_align_shift); + break; + case CAM_FORMAT_PLAIN64: + rsrc_data->cfg.width = ALIGNUP(rsrc_data->cfg.width * 8, 16) / 16; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_FORMAT_YUV422_10: + if (rsrc_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) { + rsrc_data->cfg.en_cfg = + (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + rsrc_data->cfg.width = ALIGNUP((rsrc_data->cfg.width * 5) / 4, 16) / 16; + } + + break; + default: + CAM_ERR(CAM_ISP, "VFE:%u Unsupported RDI format %d", + rsrc_data->common_data->core_index, rsrc_data->cfg.format); + return -EINVAL; + } + + return 0; +} + +static int cam_vfe_bus_ver3_config_ports_with_ubwc( + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data, + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id, + enum cam_vfe_bus_plane_type plane) +{ + struct cam_vfe_bus_ver3_reg_offset_common *common_reg = rsrc_data->common_data->common_reg; + + switch (rsrc_data->cfg.format) { + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_TP10: + case CAM_FORMAT_UBWC_P016: + case CAM_FORMAT_GBR_UBWC_TP10: + case CAM_FORMAT_BAYER_UBWC_TP10: + rsrc_data->en_ubwc = 1; + fallthrough; + /* For v980, plain and MIPI formats also supported */ + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + case CAM_FORMAT_Y_ONLY: + case CAM_FORMAT_TP10: + case CAM_FORMAT_GBR_TP10: + switch (plane) { + case PLANE_C: + rsrc_data->cfg.height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_PLAIN16_10_LSB: + rsrc_data->cfg.pack_fmt |= (1 << rsrc_data->common_data->pack_align_shift); + switch (plane) { + case PLANE_C: + rsrc_data->cfg.height /= 2; + break; + case PLANE_Y: + break; + default: + CAM_ERR(CAM_ISP, "Invalid plane %d", plane); + return -EINVAL; + } + break; + case CAM_FORMAT_PD10: + /** + * For PD10 format, on Lanai and older, width and height + * halved for all planes + */ + rsrc_data->cfg.width /= 2; + rsrc_data->cfg.height /= 2; + break; + default: + CAM_ERR(CAM_ISP, "Invalid format %d out_type:%d", + rsrc_data->cfg.format, vfe_out_res_id); + return -EINVAL; + } + + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + CAM_DBG(CAM_ISP, "pack_fmt: %d", rsrc_data->cfg.pack_fmt); + + return 0; +} + +static int cam_vfe_bus_ver3_config_port( + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data, + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id, + enum cam_vfe_bus_plane_type plane) +{ + int rc = 0; + struct cam_vfe_bus_ver3_reg_offset_common *common_reg = rsrc_data->common_data->common_reg; + + if (rsrc_data->default_line_based) + rsrc_data->wm_mode = CAM_VFE_WM_LINE_BASED_MODE; + else + rsrc_data->wm_mode = CAM_VFE_WM_FRAME_BASED_MODE; + + switch (vfe_out_res_id) { + case CAM_VFE_BUS_VER3_VFE_OUT_RDI0: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI1: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI2: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI3: + case CAM_VFE_BUS_VER3_VFE_OUT_RDI4: + rc = cam_vfe_bus_ver3_config_rdi_wm(rsrc_data); + if (rc) + return rc; + + break; + case CAM_VFE_BUS_VER3_VFE_OUT_FULL: + case CAM_VFE_BUS_VER3_VFE_OUT_DS2: + case CAM_VFE_BUS_VER3_VFE_OUT_DS4: + case CAM_VFE_BUS_VER3_VFE_OUT_DS16: + case CAM_VFE_BUS_VER3_VFE_OUT_FD: + case CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP: + rc = cam_vfe_bus_ver3_config_ports_with_ubwc(rsrc_data, vfe_out_res_id, plane); + if (rc) + return rc; + + break; + case CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP: + case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP: + rsrc_data->cfg.height = rsrc_data->cfg.height / 2; + rsrc_data->cfg.width = rsrc_data->cfg.width / 2; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS: + case CAM_VFE_BUS_VER3_VFE_OUT_2PD: + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST: + case CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW: + rsrc_data->cfg.width = 0; + rsrc_data->cfg.height = 0; + rsrc_data->cfg.stride = 1; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_FRAME_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF: + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_FRAME_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW: + rsrc_data->cfg.width = 0; + rsrc_data->cfg.height = 0; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_FRAME_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_LCR: + case CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED: + rsrc_data->cfg.stride = ALIGNUP(rsrc_data->cfg.width * 2, 8); + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + /* LSB aligned */ + rsrc_data->cfg.pack_fmt |= (1 << ver3_bus_priv->common_data.pack_align_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP: + if (cam_vfe_bus_ver3_needs_lsb_alignment(rsrc_data->cfg.pack_fmt)) { + rsrc_data->cfg.pack_fmt |= + (1 << ver3_bus_priv->common_data.pack_align_shift); + } + + rsrc_data->cfg.stride = rsrc_data->cfg.width; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_SPARSE_PD: + rsrc_data->cfg.stride = ALIGNUP(rsrc_data->cfg.width * 2, 8); + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + if (rsrc_data->cfg.format != CAM_FORMAT_PLAIN8) + /* LSB aligned */ + rsrc_data->cfg.pack_fmt |= (1 << + ver3_bus_priv->common_data.pack_align_shift); + + break; + case CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD: + rsrc_data->cfg.stride = ALIGNUP(rsrc_data->cfg.width * 2, 8); + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS: + rsrc_data->cfg.stride = ALIGNUP(rsrc_data->cfg.width * 4, 16); + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + break; + case CAM_VFE_BUS_VER3_VFE_OUT_IR: + rsrc_data->cfg.stride = rsrc_data->cfg.width; + rsrc_data->cfg.en_cfg = (common_reg->wm_mode_val[CAM_VFE_WM_LINE_BASED_MODE] << + common_reg->wm_mode_shift) | (1 << common_reg->wm_en_shift); + /* LSB aligned */ + rsrc_data->cfg.pack_fmt |= (1 << ver3_bus_priv->common_data.pack_align_shift); + break; + default: + CAM_ERR(CAM_ISP, "Invalid out_type:%d requested", vfe_out_res_id); + return -EINVAL; + } + + return rc; +} + +static int cam_vfe_bus_ver3_acquire_wm( + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_vfe_hw_vfe_out_acquire_args *out_acq_args, + void *tasklet, + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id, + enum cam_vfe_bus_plane_type plane, + struct cam_isp_resource_node *wm_res, + enum cam_vfe_bus_ver3_comp_grp_type *comp_grp_id) +{ + int32_t wm_idx = 0, rc = 0; + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data = NULL; + + if (wm_res->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "VFE:%u WM:%d not available state:%d", + wm_res->hw_intf->hw_idx, wm_idx, wm_res->res_state); + return -EALREADY; + } + + rsrc_data = wm_res->res_priv; + wm_idx = rsrc_data->index; + rsrc_data->cfg.format = out_acq_args->out_port_info->format; + rsrc_data->use_wm_pack = (out_acq_args->use_wm_pack || + out_acq_args->out_port_info->use_wm_pack); + rsrc_data->cfg.pack_fmt = cam_vfe_bus_ver3_get_packer_fmt(rsrc_data->cfg.format, + wm_idx); + + rsrc_data->cfg.width = out_acq_args->out_port_info->width; + rsrc_data->cfg.height = out_acq_args->out_port_info->height; + rsrc_data->acquired_width = out_acq_args->out_port_info->width; + rsrc_data->acquired_height = out_acq_args->out_port_info->height; + rsrc_data->is_dual = out_acq_args->is_dual; + /* Set WM offset value to default */ + rsrc_data->cfg.offset = 0; + rsrc_data->cfg.meta_offset = 0; + rsrc_data->cfg.image_offset = 0; + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d width %d height %d, supported_format: 0x%llx, pack_fmt: %d use_wm_pack:%s", + wm_res->hw_intf->hw_idx, rsrc_data->index, + rsrc_data->cfg.width, rsrc_data->cfg.height, rsrc_data->hw_regs->supported_formats, + rsrc_data->cfg.pack_fmt, CAM_BOOL_TO_YESNO(rsrc_data->use_wm_pack)); + + if (!(rsrc_data->hw_regs->supported_formats & BIT_ULL(rsrc_data->cfg.format))) { + CAM_ERR(CAM_ISP, "Invalid format %d out_type:%d", + rsrc_data->cfg.format, vfe_out_res_id); + return -EINVAL; + } + + rc = cam_vfe_bus_ver3_config_port(ver3_bus_priv, rsrc_data, vfe_out_res_id, plane); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%u WM:%d %s Failed to configure port", + rsrc_data->common_data->core_index, rsrc_data->index, wm_res->res_name); + return rc; + } + + if ((rsrc_data->cfg.en_cfg >> rsrc_data->common_data->common_reg->wm_mode_shift) != + rsrc_data->common_data->common_reg->wm_mode_val[rsrc_data->wm_mode]) { + CAM_ERR(CAM_ISP, + "VFE:%u WM:%d %s en_cfg: 0x%x does not have the required wm mode: %s value", + rsrc_data->common_data->core_index, rsrc_data->index, wm_res->res_name, + rsrc_data->cfg.en_cfg, + cam_vfe_bus_ver3_wm_mode_to_string(rsrc_data->wm_mode)); + return -EINVAL; + } + + *comp_grp_id = rsrc_data->hw_regs->comp_group; + + if (out_acq_args->out_port_info->rcs_en) { + rsrc_data->cfg.en_cfg |= rsrc_data->hw_regs->rcs_en_mask; + rsrc_data->cfg.rcs_en = true; + } + + /* Acquire ownership */ + rc = cam_vmrm_soc_acquire_resources(CAM_HW_ID_IFE0 + rsrc_data->common_data->core_index); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] acquire ownership failed", + rsrc_data->common_data->core_index); + return rc; + } + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + wm_res->tasklet_info = tasklet; + + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d %s processed width:%d height:%d stride:%d format:0x%X pack_fmt:%d en_ubwc:%d %s", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name, rsrc_data->cfg.width, rsrc_data->cfg.height, + rsrc_data->cfg.stride, rsrc_data->cfg.format, rsrc_data->cfg.pack_fmt, + rsrc_data->en_ubwc, cam_vfe_bus_ver3_wm_mode_to_string(rsrc_data->wm_mode)); + return 0; +} + +static int cam_vfe_bus_ver3_release_wm(void *bus_priv, + struct cam_isp_resource_node *wm_res) +{ + int rc = 0; + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data = + wm_res->res_priv; + + rsrc_data->cfg.offset = 0; + rsrc_data->cfg.meta_offset = 0; + rsrc_data->cfg.image_offset = 0; + rsrc_data->cfg.width = 0; + rsrc_data->cfg.height = 0; + rsrc_data->cfg.stride = 0; + rsrc_data->cfg.format = 0; + rsrc_data->cfg.pack_fmt = 0; + rsrc_data->cfg.rcs_en = false; + rsrc_data->burst_len = 0; + rsrc_data->irq_subsample_period = 0; + rsrc_data->irq_subsample_pattern = 0; + rsrc_data->framedrop_period = 0; + rsrc_data->framedrop_pattern = 0; + rsrc_data->packer_cfg = 0; + rsrc_data->en_ubwc = 0; + rsrc_data->cfg.h_init = 0; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + rsrc_data->cfg.en_cfg = 0; + rsrc_data->is_dual = 0; + memset(&rsrc_data->ubwc_cfg_data, 0, sizeof(struct cam_vfe_bus_ver3_wm_ubwc_cfg_data)); + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) + memset(rsrc_data->mc_data, 0, sizeof(struct cam_vfe_bus_ver3_wm_mc_data) * + CAM_ISP_MULTI_CTXT_MAX); + + wm_res->tasklet_info = NULL; + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rc = cam_vmrm_soc_release_resources(CAM_HW_ID_IFE0 + rsrc_data->common_data->core_index); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] vmrm soc release resources failed", + rsrc_data->common_data->core_index); + return rc; + } + + CAM_DBG(CAM_ISP, "VFE:%u Release WM:%d %s", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name); + + return rc; +} + +static int cam_vfe_bus_ver3_start_wm_util( + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data, int hw_ctxt_id) +{ + struct cam_vfe_bus_ver3_common_data *common_data = rsrc_data->common_data; + + cam_io_w(rsrc_data->cfg.pack_fmt, common_data->mem_base + rsrc_data->hw_regs->packer_cfg); + + /* Validate for debugfs and mmu reg info for targets that don't list it */ + if (!(common_data->disable_mmu_prefetch) && + (rsrc_data->hw_regs->mmu_prefetch_cfg)) { + cam_io_w_mb(1, common_data->mem_base + + rsrc_data->hw_regs->mmu_prefetch_cfg); + cam_io_w_mb(0xFFFFFFFF, common_data->mem_base + + rsrc_data->hw_regs->mmu_prefetch_max_offset); + CAM_DBG(CAM_ISP, "VFE: %u WM: %d MMU prefetch enabled", + rsrc_data->common_data->core_index, + rsrc_data->index); + } + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) + cam_io_w_mb(rsrc_data->out_rsrc_data->dst_hw_ctxt_id_mask, + common_data->mem_base + rsrc_data->hw_regs->ctxt_cfg); + + /* Enable WM */ + cam_io_w_mb(rsrc_data->cfg.en_cfg, common_data->mem_base + rsrc_data->hw_regs->cfg); + + return 0; +} + +static int cam_vfe_bus_ver3_start_wm(struct cam_isp_resource_node *wm_res) +{ + const uint32_t enable_debug_status_1 = 11 << 8; + int i, j, rc = 0; + uint32_t restore_ctxt_sel_val = 0; + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data = wm_res->res_priv; + struct cam_vfe_bus_ver3_common_data *common_data = rsrc_data->common_data; + + if (common_data->support_burst_limit) + cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit); + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) { + restore_ctxt_sel_val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->ctxt_sel); + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + if (!(rsrc_data->out_rsrc_data->dst_hw_ctxt_id_mask & BIT(i))) + continue; + + /* Program reg context select before programming multibank registers*/ + cam_io_w_mb(((i << common_data->common_reg->mc_write_sel_shift) | + (i << common_data->common_reg->mc_read_sel_shift)), + common_data->mem_base + common_data->common_reg->ctxt_sel); + rc = cam_vfe_bus_ver3_start_wm_util(rsrc_data, i); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed in configuring WM:%s hw_ctxt:%d for start", + rsrc_data->common_data->core_index, wm_res->res_name, i); + goto end; + } + } + + cam_io_w_mb(restore_ctxt_sel_val, common_data->mem_base + + common_data->common_reg->ctxt_sel); + } else { + rc = cam_vfe_bus_ver3_start_wm_util(rsrc_data, -1); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%u Failed in configuring WM:%s for start", + rsrc_data->common_data->core_index, wm_res->res_name); + goto end; + } + } + + /* Enable constraint error detection */ + cam_io_w_mb(enable_debug_status_1, + common_data->mem_base + + rsrc_data->hw_regs->debug_status_cfg); + + CAM_DBG(CAM_ISP, + "Start VFE:%u WM:%d %s offset:0x%X en_cfg:0x%X width:%d height:%d", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name, (uint32_t) rsrc_data->hw_regs->cfg, + rsrc_data->cfg.en_cfg, rsrc_data->cfg.width, rsrc_data->cfg.height); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d pk_fmt:%d stride:%d burst len:%d hw_ctxt_mask:0x%x", + rsrc_data->common_data->core_index, rsrc_data->index, rsrc_data->cfg.pack_fmt, + rsrc_data->cfg.stride, 0xF, rsrc_data->out_rsrc_data->dst_hw_ctxt_id_mask); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + common_data->cntr = 0; + + if (!common_data->perf_cnt_en) { + for (j = 0; j < CAM_VFE_PERF_CNT_MAX; j++) { + if (!common_data->perf_cnt_cfg[j]) + continue; + + cam_io_w_mb(common_data->perf_cnt_cfg[j], + common_data->mem_base + + common_data->common_reg->perf_cnt_reg[j].perf_cnt_cfg); + common_data->perf_cnt_en = true; + CAM_DBG(CAM_ISP, "VFE:%u perf_cnt_%d:0x%x offset: 0x%x", + rsrc_data->common_data->core_index, + j, common_data->perf_cnt_cfg[j], + common_data->common_reg->perf_cnt_reg[j].perf_cnt_cfg); + } + } + +end: + return rc; +} + +static inline void cam_vfe_bus_ver3_stop_wm_util( + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data) +{ + /* Disable WM */ + cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + rsrc_data->hw_regs->cfg); +} + +static int cam_vfe_bus_ver3_stop_wm(struct cam_isp_resource_node *wm_res) +{ + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data = wm_res->res_priv; + struct cam_vfe_bus_ver3_common_data *common_data = rsrc_data->common_data; + int i; + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) { + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + if (!(rsrc_data->out_rsrc_data->dst_hw_ctxt_id_mask & BIT(i))) + continue; + + cam_io_w_mb(i << rsrc_data->common_data->common_reg->mc_write_sel_shift, + rsrc_data->common_data->mem_base + + rsrc_data->common_data->common_reg->ctxt_sel); + cam_vfe_bus_ver3_stop_wm_util(rsrc_data); + } + } else { + cam_vfe_bus_ver3_stop_wm_util(rsrc_data); + } + + CAM_DBG(CAM_ISP, "Stop VFE:%u WM:%d %s hw_ctxt_mask:0x%x", + rsrc_data->common_data->core_index, rsrc_data->index, + wm_res->res_name, rsrc_data->out_rsrc_data->dst_hw_ctxt_id_mask); + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + rsrc_data->init_cfg_done = false; + rsrc_data->hfr_cfg_done = false; + rsrc_data->ubwc_cfg_data.ubwc_updated = false; + rsrc_data->update_wm_format = false; + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) { + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) { + rsrc_data->mc_data[i].init_cfg_done = false; + rsrc_data->mc_data[i].ubwc_cfg_data.ubwc_updated = false; + rsrc_data->mc_data[i].cfg.updated = false; + } + } + + common_data->perf_cnt_en = false; + return 0; +} + +static int cam_vfe_bus_ver3_handle_wm_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_vfe_bus_ver3_handle_wm_done_bottom_half(void *wm_node, + void *evt_payload_priv) +{ + return -EPERM; +} + +static int cam_vfe_bus_ver3_init_wm_resource(uint32_t index, + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_vfe_bus_ver3_hw_info *ver3_hw_info, + struct cam_isp_resource_node *wm_res, + struct cam_isp_resource_node **comp_grp, + uint8_t *wm_name, + uint32_t line_based_config, + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data) +{ + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver3_wm_resource_data), + GFP_KERNEL); + if (!rsrc_data) { + CAM_DBG(CAM_ISP, "VFE:%u Failed to alloc for WM res priv", + ver3_bus_priv->common_data.hw_intf->hw_idx); + return -ENOMEM; + } + + if (out_rsrc_data->mc_based || out_rsrc_data->cntxt_cfg_except) { + rsrc_data->mc_data = CAM_MEM_ZALLOC_ARRAY(CAM_ISP_MULTI_CTXT_MAX, + sizeof(struct cam_vfe_bus_ver3_wm_mc_data), GFP_KERNEL); + if (!rsrc_data->mc_data) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to alloc for WM res mc data", + ver3_bus_priv->common_data.hw_intf->hw_idx); + return -ENOMEM; + } + } + + wm_res->res_priv = rsrc_data; + + rsrc_data->index = index; + rsrc_data->default_line_based = line_based_config; + rsrc_data->hw_regs = &ver3_hw_info->bus_client_reg[index]; + rsrc_data->common_data = &ver3_bus_priv->common_data; + rsrc_data->out_rsrc_data = out_rsrc_data; + *comp_grp = &ver3_bus_priv->comp_grp[(&ver3_hw_info->bus_client_reg[index])->comp_group]; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = cam_vfe_bus_ver3_start_wm; + wm_res->stop = cam_vfe_bus_ver3_stop_wm; + wm_res->top_half_handler = cam_vfe_bus_ver3_handle_wm_done_top_half; + wm_res->bottom_half_handler = + cam_vfe_bus_ver3_handle_wm_done_bottom_half; + wm_res->hw_intf = ver3_bus_priv->common_data.hw_intf; + + if (wm_name) + scnprintf(wm_res->res_name, CAM_ISP_RES_NAME_LEN, + "%s", wm_name); + return 0; +} + +static int cam_vfe_bus_ver3_deinit_wm_resource( + struct cam_isp_resource_node *wm_res) +{ + struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data; + + wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&wm_res->list); + + wm_res->start = NULL; + wm_res->stop = NULL; + wm_res->top_half_handler = NULL; + wm_res->bottom_half_handler = NULL; + wm_res->hw_intf = NULL; + + rsrc_data = wm_res->res_priv; + wm_res->res_priv = NULL; + if (!rsrc_data) + return -ENOMEM; + + if (rsrc_data->out_rsrc_data->mc_based || rsrc_data->out_rsrc_data->cntxt_cfg_except) { + CAM_MEM_FREE(rsrc_data->mc_data); + rsrc_data->mc_data = NULL; + } + + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_ver3_acquire_comp_grp( + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + void *tasklet, + uint32_t is_dual, + uint32_t is_master, + struct cam_isp_resource_node *comp_grp, + struct cam_vfe_bus_ver3_comp_grp_acquire_args *comp_acq_args) +{ + int rc = 0; + struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = comp_grp->res_priv; + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + rsrc_data->intra_client_mask = 0x1; + comp_grp->tasklet_info = tasklet; + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + rsrc_data->is_master = is_master; + rsrc_data->is_dual = is_dual; + + if (is_master) + rsrc_data->addr_sync_mode = 0; + else + rsrc_data->addr_sync_mode = 1; + + } else { + rsrc_data = comp_grp->res_priv; + /* Do not support runtime change in composite mask */ + if (comp_grp->res_state == + CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid State %d comp_grp:%u", + rsrc_data->common_data->core_index, comp_grp->res_state, + rsrc_data->comp_grp_type); + return -EBUSY; + } + } + + CAM_DBG(CAM_ISP, "Acquire VFE:%u comp_grp:%u", + rsrc_data->common_data->core_index, rsrc_data->comp_grp_type); + + rsrc_data->acquire_dev_cnt++; + rsrc_data->composite_mask |= comp_acq_args->composite_mask; + + return rc; +} + +static int cam_vfe_bus_ver3_release_comp_grp( + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_isp_resource_node *in_comp_grp) +{ + struct cam_vfe_bus_ver3_comp_grp_data *in_rsrc_data = NULL; + + if (!in_comp_grp) { + CAM_ERR(CAM_ISP, "Invalid Params comp_grp %pK", in_comp_grp); + return -EINVAL; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) { + CAM_ERR(CAM_ISP, "VFE:%u Already released comp_grp", + ver3_bus_priv->common_data.core_index); + return 0; + } + + if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid State %d", + ver3_bus_priv->common_data.core_index, in_comp_grp->res_state); + return -EBUSY; + } + + in_rsrc_data = in_comp_grp->res_priv; + CAM_DBG(CAM_ISP, "Release VFE:%u comp_grp:%u", + ver3_bus_priv->common_data.core_index, + in_rsrc_data->comp_grp_type); + + in_rsrc_data->acquire_dev_cnt--; + if (in_rsrc_data->acquire_dev_cnt == 0) { + + in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX; + in_rsrc_data->addr_sync_mode = 0; + in_rsrc_data->composite_mask = 0; + + in_comp_grp->tasklet_info = NULL; + in_comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + } + + return 0; +} + +static int cam_vfe_bus_ver3_start_comp_grp( + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data, uint32_t *bus_irq_reg_mask) +{ + int rc = 0; + uint32_t val; + struct cam_isp_resource_node *comp_grp = vfe_out_data->comp_grp; + struct cam_vfe_bus_ver3_comp_grp_data *comp_grp_rsrc_data = + vfe_out_data->comp_grp->res_priv; + struct cam_vfe_bus_ver3_common_data *common_data = comp_grp_rsrc_data->common_data; + + CAM_DBG(CAM_ISP, + "Start VFE:%u comp_grp:%d streaming state:%d comp_mask:0x%lX", + comp_grp_rsrc_data->common_data->core_index, + comp_grp_rsrc_data->comp_grp_type, comp_grp->res_state, + comp_grp_rsrc_data->composite_mask); + + if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + return 0; + + if (!common_data->comp_config_needed) + goto skip_comp_cfg; + + if (comp_grp_rsrc_data->is_dual) { + if (comp_grp_rsrc_data->is_master) { + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val |= (0x1 << (comp_grp_rsrc_data->comp_grp_type + 14)); + + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->comp_cfg_1); + + val |= (0x1 << comp_grp_rsrc_data->comp_grp_type); + + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_1); + } else { + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val |= (0x1 << comp_grp_rsrc_data->comp_grp_type); + val |= (0x1 << (comp_grp_rsrc_data->comp_grp_type + 14)); + + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_0); + + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->comp_cfg_1); + + val |= (0x1 << comp_grp_rsrc_data->comp_grp_type); + + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->comp_cfg_1); + } + } + +skip_comp_cfg: + + if (comp_grp_rsrc_data->ubwc_static_ctrl) { + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->ubwc_static_ctrl); + val |= comp_grp_rsrc_data->ubwc_static_ctrl; + cam_io_w_mb(val, common_data->mem_base + + common_data->common_reg->ubwc_static_ctrl); + } + + if (comp_grp_rsrc_data->mc_comp_done_mask) + bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] = comp_grp_rsrc_data->mc_comp_done_mask; + else + bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] = comp_grp_rsrc_data->comp_done_mask; + + CAM_DBG(CAM_ISP, + "Start Done VFE:%u comp_grp:%d mc_based:%s bus_irq_mask_0: 0x%x comp_done_mask:0x%x mc_comp_done_mask:0x%x", + comp_grp_rsrc_data->common_data->core_index, comp_grp_rsrc_data->comp_grp_type, + CAM_BOOL_TO_YESNO(vfe_out_data->mc_based), + bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0], comp_grp_rsrc_data->comp_done_mask, + comp_grp_rsrc_data->mc_comp_done_mask); + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + return rc; +} + +static int cam_vfe_bus_ver3_stop_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return 0; +} + +static int cam_vfe_bus_ver3_handle_comp_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_vfe_bus_ver3_handle_comp_done_bottom_half( + void *handler_priv, + struct cam_vfe_bus_irq_evt_payload *evt_payload, + uint32_t *comp_grp_id) +{ + int rc = CAM_VFE_IRQ_STATUS_ERR; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out = handler_priv; + struct cam_isp_resource_node *comp_grp = vfe_out->comp_grp; + struct cam_vfe_bus_ver3_comp_grp_data *comp_grp_rsrc_data = comp_grp->res_priv; + uint32_t *cam_ife_irq_regs; + uint32_t status_0; + + if (!evt_payload) + return rc; + + if (comp_grp_rsrc_data->is_dual && (!comp_grp_rsrc_data->is_master)) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid comp_grp:%u is_master:%u", + comp_grp_rsrc_data->common_data->core_index, + comp_grp_rsrc_data->comp_grp_type, + comp_grp_rsrc_data->is_master); + return rc; + } + + cam_ife_irq_regs = evt_payload->irq_reg_val; + status_0 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0]; + + if ((status_0 & comp_grp_rsrc_data->comp_done_mask) || + (evt_payload->is_hw_ctxt_comp_done && + (status_0 & comp_grp_rsrc_data->mc_comp_done_mask)) || evt_payload->is_early_done) { + evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE; + rc = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + CAM_DBG(CAM_ISP, + "VFE:%u comp_grp:%d is_hw_ctxt_comp_irq:%s comp_done_mask:0x%x mc_comp_done_mask:0x%x Bus IRQ status_0:0x%x rc:%d", + comp_grp_rsrc_data->common_data->core_index, comp_grp_rsrc_data->comp_grp_type, + CAM_BOOL_TO_YESNO(evt_payload->is_hw_ctxt_comp_done), + comp_grp_rsrc_data->comp_done_mask, comp_grp_rsrc_data->mc_comp_done_mask, + status_0, rc); + + *comp_grp_id = comp_grp_rsrc_data->comp_grp_type; + + return rc; +} + +static int cam_vfe_bus_ver3_init_comp_grp(uint32_t index, + struct cam_hw_soc_info *soc_info, + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_vfe_bus_ver3_hw_info *ver3_hw_info, + struct cam_isp_resource_node *comp_grp) +{ + struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL; + struct cam_vfe_soc_private *vfe_soc_private = soc_info->soc_private; + int ddr_type = 0; + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver3_comp_grp_data), + GFP_KERNEL); + if (!rsrc_data) + return -ENOMEM; + + comp_grp->res_priv = rsrc_data; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rsrc_data->comp_grp_type = index; + rsrc_data->common_data = &ver3_bus_priv->common_data; + rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX; + rsrc_data->comp_done_mask = ver3_hw_info->comp_done_mask[index]; + rsrc_data->mc_comp_done_mask = ver3_hw_info->mc_comp_done_mask[index]; + + if (rsrc_data->comp_grp_type != CAM_VFE_BUS_VER3_COMP_GRP_0 && + rsrc_data->comp_grp_type != CAM_VFE_BUS_VER3_COMP_GRP_1) + rsrc_data->ubwc_static_ctrl = 0; + else { + ddr_type = cam_get_ddr_type(); + if ((ddr_type == DDR_TYPE_LPDDR5) || + (ddr_type == DDR_TYPE_LPDDR5X)) + rsrc_data->ubwc_static_ctrl = + vfe_soc_private->ubwc_static_ctrl[1]; + else + rsrc_data->ubwc_static_ctrl = + vfe_soc_private->ubwc_static_ctrl[0]; + } + + + comp_grp->top_half_handler = cam_vfe_bus_ver3_handle_comp_done_top_half; + comp_grp->hw_intf = ver3_bus_priv->common_data.hw_intf; + + return 0; +} + +static int cam_vfe_bus_ver3_deinit_comp_grp( + struct cam_isp_resource_node *comp_grp) +{ + struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = + comp_grp->res_priv; + + comp_grp->start = NULL; + comp_grp->stop = NULL; + comp_grp->top_half_handler = NULL; + comp_grp->bottom_half_handler = NULL; + comp_grp->hw_intf = NULL; + + comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + + comp_grp->res_priv = NULL; + + if (!rsrc_data) { + CAM_ERR(CAM_ISP, "comp_grp_priv is NULL"); + return -ENODEV; + } + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static int cam_vfe_bus_ver3_get_secure_mode(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *secure_mode = cmd_args; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data; + uint32_t *mode; + + rsrc_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + secure_mode->res->res_priv; + mode = (uint32_t *)secure_mode->data; + *mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ? + true : false; + + return 0; +} + +static int cam_vfe_bus_ver3_acquire_vfe_out(void *bus_priv, void *acquire_args, + uint32_t args_size) +{ + int rc = -ENODEV; + int i; + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id; + struct cam_vfe_bus_ver3_priv *ver3_bus_priv = bus_priv; + struct cam_vfe_acquire_args *acq_args = acquire_args; + struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0, mode; + struct cam_vfe_bus_ver3_comp_grp_acquire_args comp_acq_args = {0}; + uint32_t outmap_index = CAM_VFE_BUS_VER3_VFE_OUT_MAX; + struct cam_vfe_bus_ver3_comp_grp_data *comp_grp_rsrc_data = NULL; + + + if (!bus_priv || !acquire_args) { + CAM_ERR(CAM_ISP, "Invalid Param"); + return -EINVAL; + } + + out_acquire_args = &acq_args->vfe_out; + + CAM_DBG(CAM_ISP, "VFE:%u Acquire out_type:0x%X use_hw_ctxt:%s hw_ctx_id:0x%x", + ver3_bus_priv->common_data.core_index, + out_acquire_args->out_port_info->res_type, + CAM_BOOL_TO_YESNO(out_acquire_args->use_hw_ctxt), + out_acquire_args->out_port_info->hw_context_id); + + vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id_and_index( + ver3_bus_priv, + out_acquire_args->out_port_info->res_type, + &outmap_index); + if ((vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX) || + (outmap_index >= ver3_bus_priv->num_out)) { + CAM_WARN(CAM_ISP, + "VFE:%u target does not support req res id :0x%x outtype:%d index:%d", + ver3_bus_priv->common_data.core_index, + out_acquire_args->out_port_info->res_type, + vfe_out_res_id, outmap_index); + return -ENODEV; + } + + rsrc_node = &ver3_bus_priv->vfe_out[outmap_index]; + rsrc_data = rsrc_node->res_priv; + comp_grp_rsrc_data = rsrc_data->comp_grp->res_priv; + if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) { + if ((rsrc_data->mc_based || rsrc_data->cntxt_cfg_except) && + out_acquire_args->use_hw_ctxt && + !(rsrc_data->dst_hw_ctxt_id_mask & + out_acquire_args->out_port_info->hw_context_id)) { + rsrc_data->dst_hw_ctxt_id_mask |= + out_acquire_args->out_port_info->hw_context_id; + out_acquire_args->rsrc_node = rsrc_node; + rc = 0; + goto end; + } else { + CAM_ERR(CAM_ISP, + "VFE:%u out_type:0x%X mc_cap:%s cntxt_except:%s resource not available state:%d dst_hw_ctxt_id_mask:0x%x req_ctxt_id:0x%x", + ver3_bus_priv->common_data.core_index, + vfe_out_res_id, CAM_BOOL_TO_YESNO(rsrc_data->mc_based), + CAM_BOOL_TO_YESNO(rsrc_data->cntxt_cfg_except), + rsrc_node->res_state, rsrc_data->dst_hw_ctxt_id_mask, + out_acquire_args->out_port_info->hw_context_id); + rc = -EBUSY; + goto end; + } + } + + rsrc_data->common_data->event_cb = acq_args->event_cb; + rsrc_data->common_data->priv = acq_args->priv; + rsrc_data->common_data->disable_ubwc_comp = + out_acquire_args->disable_ubwc_comp; + rsrc_data->priv = acq_args->priv; + rsrc_data->bus_priv = ver3_bus_priv; + rsrc_data->limiter_enabled = false; + comp_acq_args.composite_mask = (1ULL << vfe_out_res_id); + + if (out_acquire_args->use_hw_ctxt) + rsrc_data->dst_hw_ctxt_id_mask |= out_acquire_args->out_port_info->hw_context_id; + + + /* for some hw versions, buf done is not received from vfe but + * from IP external to VFE. In such case, we get the controller + * from hw manager and assign it here + */ + if (!(ver3_bus_priv->common_data.supported_irq & CAM_VFE_HW_IRQ_CAP_BUF_DONE)) { + rsrc_data->common_data->buf_done_controller = acq_args->buf_done_controller; + + if (comp_grp_rsrc_data->mc_comp_done_mask) + rsrc_data->common_data->mc_comp_buf_done_controller = + acq_args->mc_comp_buf_done_controller; + } + + secure_caps = cam_vfe_bus_ver3_can_be_secure( + rsrc_data->out_type); + mode = out_acquire_args->out_port_info->secure_mode; + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (!rsrc_data->common_data->num_sec_out) { + rsrc_data->secure_mode = mode; + rsrc_data->common_data->secure_mode = mode; + } else { + if (mode == rsrc_data->common_data->secure_mode) { + rsrc_data->secure_mode = + rsrc_data->common_data->secure_mode; + } else { + rc = -EINVAL; + CAM_ERR_RATE_LIMIT(CAM_ISP, + "VFE:%u Mismatch: Acquire mode[%d], drvr mode[%d]", + ver3_bus_priv->common_data.core_index, + rsrc_data->common_data->secure_mode, + mode); + mutex_unlock( + &rsrc_data->common_data->bus_mutex); + return rc; + } + } + rsrc_data->common_data->num_sec_out++; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + ver3_bus_priv->tasklet_info = acq_args->tasklet; + rsrc_node->is_rdi_primary_res = false; + rsrc_node->res_id = out_acquire_args->out_port_info->res_type; + rsrc_node->tasklet_info = acq_args->tasklet; + rsrc_node->cdm_ops = out_acquire_args->cdm_ops; + rsrc_data->common_data->cdm_util_ops = out_acquire_args->cdm_ops; + rsrc_data->format = out_acquire_args->out_port_info->format; + + if ((rsrc_data->out_type == CAM_VFE_BUS_VER3_VFE_OUT_FD) && + (rsrc_data->format == CAM_FORMAT_Y_ONLY)) + rsrc_data->num_wm = 1; + + if (rsrc_data->out_type == CAM_VFE_BUS_VER3_VFE_OUT_DS2) { + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_BAYER_UBWC_TP10: + case CAM_FORMAT_PLAIN16_10_LSB: + rsrc_data->num_wm = 1; + break; + default: + break; + } + } + + /* Acquire WM and retrieve COMP GRP ID */ + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_vfe_bus_ver3_acquire_wm(ver3_bus_priv, + out_acquire_args, + acq_args->tasklet, + vfe_out_res_id, + i, + &rsrc_data->wm_res[i], + &comp_acq_args.comp_grp_id); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to acquire WM VFE:%u out_type:%d rc:%d", + rsrc_data->common_data->core_index, + vfe_out_res_id, rc); + goto release_wm; + } + } + + + /* Acquire composite group using COMP GRP ID */ + rc = cam_vfe_bus_ver3_acquire_comp_grp(ver3_bus_priv, + acq_args->tasklet, + out_acquire_args->is_dual, + out_acquire_args->is_master, + rsrc_data->comp_grp, + &comp_acq_args); + if (rc) { + CAM_ERR(CAM_ISP, + "Failed to acquire comp_grp VFE:%u out_typp:%d rc:%d", + rsrc_data->common_data->core_index, + vfe_out_res_id, rc); + goto release_wm; + } + + rsrc_data->is_dual = out_acquire_args->is_dual; + rsrc_data->is_master = out_acquire_args->is_master; + + rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + out_acquire_args->rsrc_node = rsrc_node; + out_acquire_args->comp_grp_id = comp_acq_args.comp_grp_id; + + CAM_DBG(CAM_ISP, "VFE:%u Acquire successful", rsrc_data->common_data->core_index); + return rc; + +release_wm: + for (i--; i >= 0; i--) + cam_vfe_bus_ver3_release_wm(ver3_bus_priv, + &rsrc_data->wm_res[i]); + +end: + return rc; +} + +static int cam_vfe_bus_ver3_release_vfe_out(void *bus_priv, void *release_args, + uint32_t args_size) +{ + uint32_t i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + uint32_t secure_caps = 0; + + if (!bus_priv || !release_args) { + CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK", + bus_priv, release_args); + return -EINVAL; + } + + vfe_out = release_args; + rsrc_data = vfe_out->res_priv; + + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, + "Invalid resource state:%d VFE:%u out_type:0x%X", + vfe_out->res_state, rsrc_data->common_data->core_index, + vfe_out->res_id); + } + + for (i = 0; i < rsrc_data->num_wm; i++) + cam_vfe_bus_ver3_release_wm(bus_priv, &rsrc_data->wm_res[i]); + + if ((rsrc_data->out_type == CAM_VFE_BUS_VER3_VFE_OUT_FD) && + (rsrc_data->format == CAM_FORMAT_Y_ONLY)) + rsrc_data->num_wm = 2; + + if (rsrc_data->out_type == CAM_VFE_BUS_VER3_VFE_OUT_DS2) { + switch (rsrc_data->format) { + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_BAYER_UBWC_TP10: + case CAM_FORMAT_PLAIN16_10_LSB: + rsrc_data->num_wm = 2; + break; + default: + break; + } + } + + if (rsrc_data->comp_grp) + cam_vfe_bus_ver3_release_comp_grp(bus_priv, + rsrc_data->comp_grp); + + + vfe_out->tasklet_info = NULL; + vfe_out->cdm_ops = NULL; + rsrc_data->dst_hw_ctxt_id_mask = 0; + + secure_caps = cam_vfe_bus_ver3_can_be_secure(rsrc_data->out_type); + mutex_lock(&rsrc_data->common_data->bus_mutex); + if (secure_caps) { + if (rsrc_data->secure_mode == + rsrc_data->common_data->secure_mode) { + rsrc_data->common_data->num_sec_out--; + rsrc_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } else { + /* + * The validity of the mode is properly + * checked while acquiring the output port. + * not expected to reach here, unless there is + * some corruption. + */ + CAM_ERR(CAM_ISP, "VFE:%u driver[%d],resource[%d] mismatch", + rsrc_data->common_data->core_index, + rsrc_data->common_data->secure_mode, + rsrc_data->secure_mode); + } + + if (!rsrc_data->common_data->num_sec_out) + rsrc_data->common_data->secure_mode = + CAM_SECURE_MODE_NON_SECURE; + } + mutex_unlock(&rsrc_data->common_data->bus_mutex); + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +static int cam_vfe_bus_ver3_start_vfe_out( + struct cam_isp_resource_node *vfe_out) +{ + int rc = 0, i; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_common_data *common_data = NULL; + uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX]; + uint32_t rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX]; + uint32_t source_group = 0; + struct cam_vfe_bus_ver3_comp_grp_data *comp_grp_rsrc_data = NULL; + + if (!vfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_out->res_priv; + comp_grp_rsrc_data = rsrc_data->comp_grp->res_priv; + common_data = rsrc_data->common_data; + source_group = rsrc_data->source_group; + + CAM_DBG(CAM_ISP, "Start VFE:%u out_type:0x%X", + rsrc_data->common_data->core_index, rsrc_data->out_type); + + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, + "Invalid resource state:%d VFE:%u out_type:0x%X", + vfe_out->res_state, rsrc_data->common_data->core_index, + rsrc_data->out_type); + return -EACCES; + } + + /* subscribe when first out rsrc is streamed on */ + if (!rsrc_data->common_data->init_irq_subscribed) { + rc = cam_vfe_bus_ver3_subscribe_init_irq(rsrc_data->bus_priv); + if (rc) + return rc; + } + + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_vfe_bus_ver3_start_wm(&rsrc_data->wm_res[i]); + if (rc) + return rc; + } + + memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask)); + rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data, bus_irq_reg_mask); + + if (rsrc_data->is_dual && !rsrc_data->is_master) + goto end; + + if (comp_grp_rsrc_data->mc_comp_done_mask) { + if (!common_data->mc_comp_buf_done_controller) { + CAM_ERR(CAM_ISP, "MC comp buf done ctrler is NULL"); + return -EPERM; + } + + rsrc_data->mc_comp_irq_handle = cam_irq_controller_subscribe_irq( + common_data->mc_comp_buf_done_controller, + CAM_IRQ_PRIORITY_1, + bus_irq_reg_mask, + vfe_out, + cam_vfe_bus_ver3_handle_mc_comp_done_top_half, + vfe_out->bottom_half_handler, + vfe_out->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->mc_comp_irq_handle < 1) { + CAM_ERR(CAM_ISP, "Subscribe MC comp IRQ failed for VFE out_res %d, VFE:%u", + vfe_out->res_id, rsrc_data->common_data->core_index); + rsrc_data->mc_comp_irq_handle = 0; + return -EFAULT; + } + } else { + vfe_out->irq_handle = cam_irq_controller_subscribe_irq( + common_data->buf_done_controller, + CAM_IRQ_PRIORITY_1, + bus_irq_reg_mask, + vfe_out, + vfe_out->top_half_handler, + vfe_out->bottom_half_handler, + vfe_out->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (vfe_out->irq_handle < 1) { + CAM_ERR(CAM_ISP, "Subscribe IRQ failed for VFE out_res %d, VFE:%u", + vfe_out->res_id, rsrc_data->common_data->core_index); + vfe_out->irq_handle = 0; + return -EFAULT; + } + } + + if (rsrc_data->early_done_mask) { + memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask)); + bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG1] = rsrc_data->early_done_mask; + + CAM_DBG(CAM_ISP, + "VFE:%u out_type:0x%X %s bus_irq_mask_0:0x%X for Early done", + rsrc_data->common_data->core_index, rsrc_data->out_type, + rsrc_data->wm_res[PLANE_Y].res_name, + bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG1]); + + rsrc_data->early_done_irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->bus_priv->common_data.bus_irq_controller, + CAM_IRQ_PRIORITY_1, + bus_irq_reg_mask, + vfe_out, + vfe_out->top_half_handler, + vfe_out->bottom_half_handler, + rsrc_data->bus_priv->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->early_done_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to subscribe Early done IRQ", + rsrc_data->bus_priv->common_data.core_index); + rsrc_data->early_done_irq_handle = 0; + return -EFAULT; + } + } + + + if ((common_data->is_lite || source_group > CAM_VFE_BUS_VER3_SRC_GRP_0) + && !vfe_out->is_rdi_primary_res) + goto end; + + if ((common_data->supported_irq & CAM_VFE_HW_IRQ_CAP_RUP) && + (!common_data->rup_irq_handle[source_group])) { + memset(rup_irq_reg_mask, 0, sizeof(rup_irq_reg_mask)); + rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] |= + 0x1 << source_group; + + CAM_DBG(CAM_ISP, + "VFE:%u out_type:0x%X bus_irq_mask_0:0x%X for RUP", + rsrc_data->common_data->core_index, rsrc_data->out_type, + rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0]); + + common_data->rup_irq_handle[source_group] = + cam_irq_controller_subscribe_irq( + common_data->bus_irq_controller, + CAM_IRQ_PRIORITY_0, + rup_irq_reg_mask, + vfe_out, + cam_vfe_bus_ver3_handle_rup_top_half, + cam_vfe_bus_ver3_handle_rup_bottom_half, + vfe_out->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_1); + + if (common_data->rup_irq_handle[source_group] < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to subscribe RUP IRQ", + rsrc_data->common_data->core_index); + common_data->rup_irq_handle[source_group] = 0; + return -EFAULT; + } + } + +end: + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + return rc; +} + +static int cam_vfe_bus_ver3_stop_vfe_out( + struct cam_isp_resource_node *vfe_out) +{ + int rc = 0, i; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_common_data *common_data = NULL; + + if (!vfe_out) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + + rsrc_data = vfe_out->res_priv; + common_data = rsrc_data->common_data; + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE || + vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, "Stop VFE:%u out_type:0x%X state:%d", + rsrc_data->common_data->core_index, rsrc_data->out_type, + vfe_out->res_state); + return rc; + } + + rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp); + + for (i = 0; i < rsrc_data->num_wm; i++) + rc = cam_vfe_bus_ver3_stop_wm(&rsrc_data->wm_res[i]); + + if (common_data->rup_irq_handle[rsrc_data->source_group]) { + rc = cam_irq_controller_unsubscribe_irq( + common_data->bus_irq_controller, + common_data->rup_irq_handle[rsrc_data->source_group]); + common_data->rup_irq_handle[rsrc_data->source_group] = 0; + } + + if (vfe_out->irq_handle) { + rc = cam_irq_controller_unsubscribe_irq(common_data->buf_done_controller, + vfe_out->irq_handle); + vfe_out->irq_handle = 0; + } + + if (rsrc_data->mc_comp_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq(common_data->mc_comp_buf_done_controller, + rsrc_data->mc_comp_irq_handle); + rsrc_data->mc_comp_irq_handle = 0; + } + + if (rsrc_data->early_done_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq(common_data->bus_irq_controller, + rsrc_data->early_done_irq_handle); + rsrc_data->early_done_irq_handle = 0; + } + + if (rsrc_data->common_data->init_irq_subscribed) + cam_vfe_bus_ver3_unsubscribe_init_irq(rsrc_data->bus_priv); + + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + return rc; +} + +static int cam_vfe_bus_ver3_out_done_top_half_util(uint32_t evt_id, + struct cam_irq_th_payload *th_payload, + struct cam_vfe_bus_irq_evt_payload *evt_payload, + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data) +{ + uint32_t status_0, status_1; + int i; + struct cam_vfe_bus_ver3_wm_resource_data *wm_rsrc_data = NULL; + struct cam_vfe_bus_ver3_comp_grp_data *comp_rsrc_data = + out_rsrc_data->comp_grp->res_priv; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + evt_payload->core_index = out_rsrc_data->common_data->core_index; + evt_payload->evt_id = evt_id; + wm_rsrc_data = out_rsrc_data->wm_res[PLANE_Y].res_priv; + + if (th_payload->num_registers <= CAM_IFE_BUS_IRQ_REGISTERS_MAX) { + for (i = 0; i < th_payload->num_registers; i++) { + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + CAM_DBG(CAM_ISP, "VFE:%u Bus IRQ status_%d: 0x%X", + out_rsrc_data->common_data->core_index, + i, th_payload->evt_status_arr[i]); + } + } else { + CAM_ERR(CAM_ISP, "Index out of bounds for num of registers %d, max allowed is %d", + th_payload->num_registers, CAM_IFE_BUS_IRQ_REGISTERS_MAX); + return -EINVAL; + } + + th_payload->evt_payload_priv = evt_payload; + + status_0 = th_payload->evt_status_arr[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0]; + + if (evt_payload->is_hw_ctxt_comp_done && (!comp_rsrc_data->mc_comp_done_mask)) { + CAM_ERR(CAM_ISP, "Invalid configuration for hw ctxt comp buf done"); + return -EPERM; + } + + if ((status_0 & comp_rsrc_data->comp_done_mask) || (evt_payload->is_hw_ctxt_comp_done && + (status_0 & comp_rsrc_data->mc_comp_done_mask))) { + evt_payload->last_consumed_addr = cam_io_r_mb(wm_rsrc_data->common_data->mem_base + + wm_rsrc_data->hw_regs->addr_status_0); + trace_cam_log_event("bufdone", "bufdone_IRQ", + status_0, comp_rsrc_data->comp_grp_type); + } + + if (th_payload->num_registers >= 2) { + status_1 = th_payload->evt_status_arr[CAM_IFE_IRQ_BUS_VER3_REG_STATUS1]; + if (status_1 & out_rsrc_data->early_done_mask) { + evt_payload->is_early_done = true; + trace_cam_log_event("earlydone", "earlydone_IRQ", + status_1, out_rsrc_data->wm_res[PLANE_Y].res_id); + } + } + + if (status_0 & 0x1) + trace_cam_log_event("UnexpectedRUP", "RUP_IRQ", status_0, 40); + + return 0; +} + +static int cam_vfe_bus_ver3_handle_vfe_out_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data = NULL; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + + vfe_out = th_payload->handler_priv; + if (!vfe_out) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + + out_rsrc_data = vfe_out->res_priv; + + rc = cam_vfe_bus_ver3_get_evt_payload(out_rsrc_data->common_data, &evt_payload); + if (rc) { + for (i = 0; i < th_payload->num_registers; i++) + CAM_INFO_RATE_LIMIT(CAM_ISP, "VFE:%u Bus IRQ status_%d: 0x%X", + out_rsrc_data->common_data->core_index, i, + th_payload->evt_status_arr[i]); + return rc; + } + + evt_payload->is_hw_ctxt_comp_done = false; + rc = cam_vfe_bus_ver3_out_done_top_half_util(evt_id, th_payload, evt_payload, + out_rsrc_data); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to process out done top half rc:%d", rc); + return rc; + } + + + CAM_DBG(CAM_ISP, "VFE:%u Exit", out_rsrc_data->common_data->core_index); + return rc; +} + +static int cam_vfe_bus_ver3_handle_mc_comp_done_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *out_rsrc_data = NULL; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + + vfe_out = th_payload->handler_priv; + if (!vfe_out) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource"); + return -ENODEV; + } + + out_rsrc_data = vfe_out->res_priv; + + CAM_DBG(CAM_ISP, "VFE:%u MC comp done res: %s", out_rsrc_data->common_data->core_index, + out_rsrc_data->wm_res[PLANE_Y].res_name); + rc = cam_vfe_bus_ver3_get_evt_payload(out_rsrc_data->common_data, &evt_payload); + if (rc) { + for (i = 0; i < th_payload->num_registers; i++) + CAM_INFO_RATE_LIMIT(CAM_ISP, "VFE:%u Bus IRQ status_%d: 0x%X", + out_rsrc_data->common_data->core_index, i, + th_payload->evt_status_arr[i]); + return rc; + } + + evt_payload->is_hw_ctxt_comp_done = true; + rc = cam_vfe_bus_ver3_out_done_top_half_util(evt_id, th_payload, evt_payload, + out_rsrc_data); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to process out done top half rc:%d", rc); + return rc; + } + + return rc; +} + +static int cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int rc = -EINVAL; + struct cam_isp_resource_node *vfe_out = handler_priv; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_isp_hw_event_info evt_info = {0}; + struct cam_isp_hw_bufdone_event_info bufdone_evt_info = {0}; + void *ctx = NULL; + uint32_t evt_id = 0; + uint32_t comp_grp_id = 0; + + rc = cam_vfe_bus_ver3_handle_comp_done_bottom_half(rsrc_data, evt_payload, &comp_grp_id); + CAM_DBG(CAM_ISP, + "VFE:%u out_type:0x%x mc_based:%s cntxt_cfg_except:%s is_early_done:%s comp_grp_id:%d rc:%d", + rsrc_data->common_data->core_index, rsrc_data->out_type, + CAM_BOOL_TO_YESNO(rsrc_data->mc_based), + CAM_BOOL_TO_YESNO(rsrc_data->cntxt_cfg_except), + CAM_BOOL_TO_YESNO(evt_payload->is_early_done), comp_grp_id, rc); + + ctx = rsrc_data->priv; + + switch (rc) { + case CAM_VFE_IRQ_STATUS_SUCCESS: + evt_id = evt_payload->evt_id; + evt_info.res_type = vfe_out->res_type; + evt_info.hw_idx = vfe_out->hw_intf->hw_idx; + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.res_id = vfe_out->res_id; + bufdone_evt_info.res_id = vfe_out->res_id; + bufdone_evt_info.comp_grp_id = comp_grp_id; + bufdone_evt_info.last_consumed_addr = evt_payload->last_consumed_addr; + bufdone_evt_info.is_hw_ctxt_comp = evt_payload->is_hw_ctxt_comp_done; + bufdone_evt_info.is_early_done = evt_payload->is_early_done; + evt_info.event_data = (void *)&bufdone_evt_info; + + if (rsrc_data->common_data->event_cb) + rsrc_data->common_data->event_cb(ctx, evt_id, (void *)&evt_info); + + break; + default: + break; + } + + cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &evt_payload); + + return rc; +} + +static int cam_vfe_bus_ver3_init_vfe_out_resource(uint32_t index, + struct cam_vfe_bus_ver3_priv *ver3_bus_priv, + struct cam_vfe_bus_ver3_hw_info *ver3_hw_info) +{ + struct cam_isp_resource_node *vfe_out = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + int rc = 0, i = 0; + int32_t vfe_out_type = + ver3_hw_info->vfe_out_hw_info[index].vfe_out_type; + + if (vfe_out_type < 0 || + vfe_out_type >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d", + vfe_out_type); + return -EINVAL; + } + + ver3_bus_priv->vfe_out_map_outtype[vfe_out_type] = index; + vfe_out = &ver3_bus_priv->vfe_out[index]; + if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE || + vfe_out->res_priv) { + CAM_ERR(CAM_ISP, "vfe_out_type %d has already been initialized", + vfe_out_type); + return -EFAULT; + } + + rsrc_data = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver3_vfe_out_data), + GFP_KERNEL); + if (!rsrc_data) { + rc = -ENOMEM; + return rc; + } + + vfe_out->res_priv = rsrc_data; + + vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT; + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + INIT_LIST_HEAD(&vfe_out->list); + + rsrc_data->source_group = + ver3_hw_info->vfe_out_hw_info[index].source_group; + rsrc_data->out_type = + ver3_hw_info->vfe_out_hw_info[index].vfe_out_type; + rsrc_data->common_data = &ver3_bus_priv->common_data; + rsrc_data->max_width = + ver3_hw_info->vfe_out_hw_info[index].max_width; + rsrc_data->max_height = + ver3_hw_info->vfe_out_hw_info[index].max_height; + rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE; + rsrc_data->num_wm = ver3_hw_info->vfe_out_hw_info[index].num_wm; + rsrc_data->mc_based = ver3_hw_info->vfe_out_hw_info[index].mc_based; + rsrc_data->early_done_mask = ver3_hw_info->vfe_out_hw_info[index].early_done_mask; + rsrc_data->cntxt_cfg_except = ver3_hw_info->vfe_out_hw_info[index].cntxt_cfg_except; + + rsrc_data->wm_res = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + rsrc_data->num_wm), GFP_KERNEL); + if (!rsrc_data->wm_res) { + CAM_ERR(CAM_ISP, "Failed to alloc for wm_res"); + return -ENOMEM; + } + + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_vfe_bus_ver3_init_wm_resource( + ver3_hw_info->vfe_out_hw_info[index].wm_idx[i], + ver3_bus_priv, ver3_hw_info, + &rsrc_data->wm_res[i], + &rsrc_data->comp_grp, + ver3_hw_info->vfe_out_hw_info[index].name[i], + ver3_hw_info->vfe_out_hw_info[index].line_based, + rsrc_data); + if (rc < 0) { + CAM_ERR(CAM_ISP, "VFE:%u init WM:%d failed rc:%d", + ver3_bus_priv->common_data.core_index, i, rc); + return rc; + } + } + + rsrc_data->pid_mask = ver3_hw_info->vfe_out_hw_info[index].pid_mask; + + vfe_out->start = cam_vfe_bus_ver3_start_vfe_out; + vfe_out->stop = cam_vfe_bus_ver3_stop_vfe_out; + vfe_out->top_half_handler = + cam_vfe_bus_ver3_handle_vfe_out_done_top_half; + vfe_out->bottom_half_handler = + cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half; + vfe_out->process_cmd = cam_vfe_bus_ver3_process_cmd; + vfe_out->hw_intf = ver3_bus_priv->common_data.hw_intf; + vfe_out->irq_handle = 0; + + rsrc_data->mc_comp_irq_handle = 0; + rsrc_data->num_mid = ver3_hw_info->vfe_out_hw_info->num_mid; + rsrc_data->mid = ver3_hw_info->vfe_out_hw_info[index].mid; + + return 0; +} + +static int cam_vfe_bus_ver3_deinit_vfe_out_resource( + struct cam_isp_resource_node *vfe_out) +{ + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv; + int rc = 0, i = 0; + + if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) { + /* + * This is not error. It can happen if the resource is + * never supported in the HW. + */ + return 0; + } + + vfe_out->start = NULL; + vfe_out->stop = NULL; + vfe_out->top_half_handler = NULL; + vfe_out->bottom_half_handler = NULL; + vfe_out->hw_intf = NULL; + vfe_out->irq_handle = 0; + + vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + INIT_LIST_HEAD(&vfe_out->list); + vfe_out->res_priv = NULL; + + if (!rsrc_data) + return -ENOMEM; + + for (i = 0; i < rsrc_data->num_wm; i++) { + rc = cam_vfe_bus_ver3_deinit_wm_resource(&rsrc_data->wm_res[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "VFE:%u deinit WM:%d failed rc:%d", + rsrc_data->common_data->core_index, i, rc); + } + + rsrc_data->wm_res = NULL; + rsrc_data->comp_grp = NULL; + rsrc_data->mc_comp_irq_handle = 0; + CAM_MEM_FREE(rsrc_data); + + return 0; +} + +static void cam_vfe_bus_ver3_print_wm_info( + struct cam_vfe_bus_ver3_wm_resource_data *wm_data, + struct cam_vfe_bus_ver3_common_data *common_data, + uint8_t *wm_name) +{ + uint32_t addr_status0, addr_status1, addr_status2, addr_status3, limiter; + + addr_status0 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_0); + addr_status1 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_1); + addr_status2 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_2); + addr_status3 = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->addr_status_3); + limiter = cam_io_r_mb(common_data->mem_base + + wm_data->hw_regs->bw_limiter_addr); + + CAM_INFO(CAM_ISP, + "VFE:%u WM:%d wm_name:%s width:%u height:%u stride:%u x_init:%u en_cfg:%u acquired width:%u height:%u", + wm_data->common_data->core_index, wm_data->index, wm_name, + wm_data->cfg.width, + wm_data->cfg.height, + wm_data->cfg.stride, wm_data->cfg.h_init, + wm_data->cfg.en_cfg, + wm_data->acquired_width, + wm_data->acquired_height); + CAM_INFO(CAM_ISP, + "VFE:%u WM:%u last consumed address:0x%x last frame addr:0x%x fifo cnt:0x%x current client address:0x%x limiter: 0x%x", + common_data->hw_intf->hw_idx, wm_data->index, + addr_status0, addr_status1, addr_status2, addr_status3, limiter); +} + +static int cam_vfe_bus_ver3_print_dimensions( + uint32_t res_id, + struct cam_vfe_bus_ver3_priv *bus_priv) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_vfe_bus_ver3_common_data *common_data = NULL; + int i; + uint8_t *wm_name = NULL; + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id = + CAM_VFE_BUS_VER3_VFE_OUT_MAX; + uint32_t outmap_index = CAM_VFE_BUS_VER3_VFE_OUT_MAX; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid bus private data, res_id: %d", + res_id); + return -EINVAL; + } + + vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id_and_index(bus_priv, + res_id, &outmap_index); + + if ((vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX) || + (outmap_index >= bus_priv->num_out)) { + CAM_WARN_RATE_LIMIT(CAM_ISP, + "target does not support req res id :0x%x outtype:%d index:%d", + res_id, + vfe_out_res_id, outmap_index); + return -EINVAL; + } + + rsrc_node = &bus_priv->vfe_out[outmap_index]; + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) { + CAM_ERR(CAM_ISP, "VFE out data is null, res_id: %d", + vfe_out_res_id); + return -EINVAL; + } + + for (i = 0; i < rsrc_data->num_wm; i++) { + wm_data = rsrc_data->wm_res[i].res_priv; + wm_name = rsrc_data->wm_res[i].res_name; + common_data = rsrc_data->common_data; + cam_vfe_bus_ver3_print_wm_info(wm_data, common_data, wm_name); + } + return 0; +} + +static int cam_vfe_bus_ver3_mini_dump( + struct cam_vfe_bus_ver3_priv *bus_priv, + void *cmd_args) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm = NULL; + struct cam_vfe_bus_ver3_mini_dump_data *md; + struct cam_vfe_bus_ver3_wm_mini_dump *md_wm; + struct cam_hw_mini_dump_args *md_args; + struct cam_hw_info *hw_info = NULL; + uint32_t bytes_written = 0; + uint32_t i, j, k = 0; + + if (!bus_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid bus private data"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)bus_priv->common_data.hw_intf->hw_priv; + md_args = (struct cam_hw_mini_dump_args *)cmd_args; + + if (sizeof(*md) > md_args->len) { + md_args->bytes_written = 0; + return 0; + } + + md = (struct cam_vfe_bus_ver3_mini_dump_data *)md_args->start_addr; + md->clk_rate = cam_soc_util_get_applied_src_clk(&hw_info->soc_info, true); + md->hw_idx = bus_priv->common_data.hw_intf->hw_idx; + md->hw_state = hw_info->hw_state; + bytes_written += sizeof(*md); + md->wm = (struct cam_vfe_bus_ver3_wm_mini_dump *) + ((uint8_t *)md_args->start_addr + bytes_written); + + for (i = 0; i < bus_priv->num_out; i++) { + rsrc_node = &bus_priv->vfe_out[i]; + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) + continue; + + for (j = 0; j < rsrc_data->num_wm; j++) { + if (bytes_written + sizeof(*md_wm) > md_args->len) + goto end; + + md_wm = &md->wm[k]; + wm = rsrc_data->wm_res[j].res_priv; + md_wm->width = wm->cfg.width; + md_wm->index = wm->index; + md_wm->height = wm->cfg.height; + md_wm->stride = wm->cfg.stride; + md_wm->en_cfg = wm->cfg.en_cfg; + md_wm->h_init = wm->cfg.h_init; + md_wm->format = wm->cfg.format; + md_wm->acquired_width = wm->acquired_width; + md_wm->acquired_height = wm->acquired_height; + md_wm->state = rsrc_node->res_state; + scnprintf(md_wm->name, CAM_ISP_RES_NAME_LEN, + "%s", rsrc_data->wm_res[j].res_name); + k++; + bytes_written += sizeof(*md_wm); + } + } +end: + md->num_client = k; + md_args->bytes_written = bytes_written; + return 0; +} + +static void *cam_vfe_bus_ver3_user_dump_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_vfe_bus_ver3_wm_resource_data *wm = NULL; + uint32_t *addr; + uint32_t addr_status0; + uint32_t addr_status1; + uint32_t addr_status2; + uint32_t addr_status3; + + wm = (struct cam_vfe_bus_ver3_wm_resource_data *)dump_struct; + + addr_status0 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_0); + addr_status1 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_1); + addr_status2 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_2); + addr_status3 = cam_io_r_mb(wm->common_data->mem_base + + wm->hw_regs->addr_status_3); + + addr = (uint32_t *)addr_ptr; + + *addr++ = wm->common_data->hw_intf->hw_idx; + *addr++ = wm->index; + *addr++ = addr_status0; + *addr++ = addr_status1; + *addr++ = addr_status2; + *addr++ = addr_status3; + + return addr; +} + +static int cam_vfe_bus_ver3_user_dump( + struct cam_vfe_bus_ver3_priv *bus_priv, + void *cmd_args) +{ + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm = NULL; + struct cam_hw_info *hw_info = NULL; + struct cam_isp_hw_dump_args *dump_args; + uint32_t i, j = 0; + int rc = 0; + + + if (!bus_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Bus private data NULL"); + return -EINVAL; + } + + hw_info = (struct cam_hw_info *)bus_priv->common_data.hw_intf->hw_priv; + dump_args = (struct cam_isp_hw_dump_args *)cmd_args; + + if (hw_info->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_WARN(CAM_ISP, + "VFE:%u BUS powered down", bus_priv->common_data.core_index); + return -EINVAL; + } + + rc = cam_common_user_dump_helper(dump_args, cam_common_user_dump_clock, + hw_info, sizeof(uint64_t), "CLK_RATE_PRINT:"); + + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%u BUS VER3: Clock dump failed, rc: %d", + bus_priv->common_data.core_index, rc); + return rc; + } + + for (i = 0; i < bus_priv->num_out; i++) { + rsrc_node = &bus_priv->vfe_out[i]; + if (rsrc_node->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_DBG(CAM_ISP, + "VFE:%u BUS VER3: path inactive res ID: %d, continuing", + bus_priv->common_data.core_index, rsrc_node->res_id); + continue; + } + + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) + continue; + for (j = 0; j < rsrc_data->num_wm; j++) { + + wm = rsrc_data->wm_res[j].res_priv; + if (!wm) + continue; + + rc = cam_common_user_dump_helper(dump_args, cam_vfe_bus_ver3_user_dump_info, + wm, sizeof(uint32_t), "VFE_BUS_CLIENT.%s.%d:", + rsrc_data->wm_res[j].res_name, + rsrc_data->common_data->core_index); + + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%u BUS VER3: Info dump failed, rc: %d", + bus_priv->common_data.core_index, rc); + return rc; + } + } + } + return 0; +} + +static int cam_vfe_bus_ver3_handle_bus_irq(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_vfe_bus_ver3_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + CAM_DBG(CAM_ISP, "Enter VFE:%u", bus_priv->common_data.core_index); + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_0); + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_vfe_bus_ver3_handle_rup_irq(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + struct cam_vfe_bus_ver3_priv *bus_priv; + int rc = 0; + + bus_priv = th_payload->handler_priv; + CAM_DBG(CAM_ISP, "Enter, VFE:%u", bus_priv->common_data.core_index); + rc = cam_irq_controller_handle_irq(evt_id, + bus_priv->common_data.bus_irq_controller, CAM_IRQ_EVT_GROUP_1); + return (rc == IRQ_HANDLED) ? 0 : -EINVAL; +} + +static int cam_vfe_bus_ver3_handle_err_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int i = 0, rc = 0; + struct cam_vfe_bus_ver3_priv *bus_priv = + th_payload->handler_priv; + struct cam_vfe_bus_irq_evt_payload *evt_payload; + + CAM_ERR(CAM_ISP, "VFE:%u BUS Err IRQ", + bus_priv->common_data.core_index); + for (i = 0; i < th_payload->num_registers; i++) { + CAM_ERR(CAM_ISP, "VFE:%u BUS IRQ status_%d: 0x%X", + bus_priv->common_data.core_index, i, + th_payload->evt_status_arr[i]); + } + cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + + rc = cam_vfe_bus_ver3_get_evt_payload(&bus_priv->common_data, + &evt_payload); + if (rc) + return rc; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->core_index = bus_priv->common_data.core_index; + + evt_payload->ccif_violation_status = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->ccif_violation_status); + + evt_payload->image_size_violation_status = cam_io_r_mb( + bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->image_size_violation_status); + + cam_isp_hw_get_timestamp(&evt_payload->ts); + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static void cam_vfe_check_violations( + char *error_type, + uint32_t status, + struct cam_vfe_bus_ver3_priv *bus_priv, + uint64_t *out_port, + struct cam_vfe_bus_irq_violation_type *violation_type) +{ + int i, j; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_vfe_bus_ver3_common_data *common_data = NULL; + uint8_t *wm_name = NULL; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid bus private data"); + return; + } + + for (i = 0; i < bus_priv->num_out; i++) { + rsrc_node = &bus_priv->vfe_out[i]; + rsrc_data = rsrc_node->res_priv; + if (!rsrc_data) { + CAM_ERR(CAM_ISP, "VFE:%u out data is null, res_id: %d", + bus_priv->common_data.core_index, i); + return; + } + + for (j = 0; j < rsrc_data->num_wm; j++) { + wm_data = rsrc_data->wm_res[j].res_priv; + common_data = rsrc_data->common_data; + wm_name = rsrc_data->wm_res[j].res_name; + + if (status & BIT(wm_data->index)) { + cam_vfe_bus_ver3_print_wm_info(wm_data, + common_data, wm_name); + *out_port |= BIT_ULL(rsrc_node->res_id & 0xFF); + + /* check what type of violation it is*/ + switch (rsrc_data->out_type) { + case CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD: + case CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED: + if (!strcmp(error_type, "Image Size")) + violation_type->hwpd_violation = true; + break; + default: + break; + } + } + } + } +} + +static int cam_vfe_bus_ver3_handle_err_irq_bottom_half( + void *handler_priv, void *evt_payload_priv) +{ + struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv; + struct cam_vfe_bus_ver3_priv *bus_priv = handler_priv; + struct cam_vfe_bus_ver3_common_data *common_data; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_vfe_bus_irq_violation_type violation_type; + uint32_t status = 0, image_size_violation = 0, ccif_violation = 0, constraint_violation = 0; + uint64_t out_port_mask = 0; + int idx = -1; + + if (!handler_priv || !evt_payload_priv) + return -EINVAL; + + common_data = &bus_priv->common_data; + + status = evt_payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0]; + image_size_violation = (status >> 31) & 0x1; + ccif_violation = (status >> 30) & 0x1; + constraint_violation = (status >> 28) & 0x1; + + CAM_ERR(CAM_ISP, + "VFE[%u] BUS error image size violation: %s, CCIF violation: %s, Constraint violation: %s", + bus_priv->common_data.core_index, CAM_BOOL_TO_YESNO(image_size_violation), + CAM_BOOL_TO_YESNO(ccif_violation), CAM_BOOL_TO_YESNO(constraint_violation)); + + memset(&evt_info, 0, sizeof(evt_info)); + memset(&err_evt_info, 0, sizeof(err_evt_info)); + memset(&violation_type, 0, sizeof(violation_type)); + + if (image_size_violation || constraint_violation) { + status = evt_payload->image_size_violation_status; + if (!status) { + CAM_ERR(CAM_ISP, + "VFE[%u] CONSTRAINT_VIOLATION occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + cam_vfe_bus_ver3_get_constraint_errors(bus_priv); + } else { + CAM_ERR(CAM_ISP, + "VFE[%u] IMAGE_SIZE_VIOLATION occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, + "Sensor: Programmed image size is different as actual image size from input"); + CAM_ERR(CAM_ISP, "Debug: Check SW programming/sensor config"); + cam_vfe_check_violations("Image Size", status, bus_priv, + &out_port_mask, &violation_type); + } + } + + if (ccif_violation) { + status = evt_payload->ccif_violation_status; + CAM_ERR(CAM_ISP, "VFE[%u] CCIF protocol violation occurred at [%llu: %09llu]", + bus_priv->common_data.core_index, + evt_payload->ts.mono_time.tv_sec, + evt_payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "Violation status: 0x%x", status); + cam_vfe_check_violations("CCIF", status, bus_priv, + &out_port_mask, &violation_type); + } + + if (image_size_violation && violation_type.hwpd_violation) { + err_evt_info.err_mask |= CAM_VFE_IRQ_ERR_MASK_HWPD_VIOLATION; + CAM_DBG(CAM_ISP, "HWPD image size violation"); + } + + cam_vfe_bus_ver3_put_evt_payload(common_data, &evt_payload); + + evt_info.hw_idx = common_data->core_index; + evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT; + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (!common_data->event_cb) + return 0; + + if (!out_port_mask) { + /* No valid res_id found */ + evt_info.res_id = CAM_VFE_BUS_VER3_VFE_OUT_MAX; + common_data->event_cb(common_data->priv, CAM_ISP_HW_EVENT_ERROR, + (void *)&evt_info); + return 0; + } + + while (out_port_mask) { + idx++; + if (!(out_port_mask & 0x1)) { + out_port_mask >>= 1; + continue; + } + + evt_info.res_id = CAM_ISP_IFE_OUT_RES_BASE + idx; + common_data->event_cb(common_data->priv, CAM_ISP_HW_EVENT_ERROR, + (void *)&evt_info); + out_port_mask >>= 1; + } + return 0; +} + +static void cam_vfe_bus_ver3_unsubscribe_init_irq( + struct cam_vfe_bus_ver3_priv *bus_priv) +{ + int rc = 0; + + if (bus_priv->error_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, "VFE:%u failed to unsubscribe error irqs", + bus_priv->common_data.core_index); + + bus_priv->error_irq_handle = 0; + } + + if (bus_priv->bus_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.vfe_irq_controller, + bus_priv->bus_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, "VFE:%u failed to unsubscribe top irq", + bus_priv->common_data.core_index); + + bus_priv->bus_irq_handle = 0; + cam_irq_controller_unregister_dependent(bus_priv->common_data.vfe_irq_controller, + bus_priv->common_data.bus_irq_controller); + } + + if (bus_priv->rup_irq_handle) { + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.vfe_irq_controller, + bus_priv->rup_irq_handle); + if (rc) + CAM_WARN(CAM_ISP, "VFE:%u failed to unsubscribe rup done irq", + bus_priv->common_data.core_index); + + bus_priv->rup_irq_handle = 0; + } + + bus_priv->common_data.init_irq_subscribed = false; + CAM_DBG(CAM_ISP, "BUS init irq unsubscribed, VFE:%u", bus_priv->common_data.core_index); +} + +static int cam_vfe_bus_ver3_subscribe_init_irq( + struct cam_vfe_bus_ver3_priv *bus_priv) +{ + uint32_t top_irq_reg_mask[3] = {0}; + + /* Subscribe top IRQ */ + top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift); + + bus_priv->bus_irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.vfe_irq_controller, + CAM_IRQ_PRIORITY_4, + top_irq_reg_mask, + bus_priv, + cam_vfe_bus_ver3_handle_bus_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->bus_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to subscribe BUS (buf_done) IRQ", + bus_priv->common_data.core_index); + bus_priv->bus_irq_handle = 0; + return -EFAULT; + } + + cam_irq_controller_register_dependent( + bus_priv->common_data.vfe_irq_controller, + bus_priv->common_data.bus_irq_controller, + top_irq_reg_mask); + + if (bus_priv->tasklet_info != NULL) { + bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.bus_irq_controller, + CAM_IRQ_PRIORITY_0, + bus_error_irq_mask, + bus_priv, + cam_vfe_bus_ver3_handle_err_irq_top_half, + cam_vfe_bus_ver3_handle_err_irq_bottom_half, + bus_priv->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->error_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to subscribe BUS Error IRQ", + bus_priv->common_data.core_index); + bus_priv->error_irq_handle = 0; + return -EFAULT; + } + } + + if (bus_priv->common_data.supported_irq & CAM_VFE_HW_IRQ_CAP_RUP) { + bus_priv->rup_irq_handle = cam_irq_controller_subscribe_irq( + bus_priv->common_data.vfe_irq_controller, + CAM_IRQ_PRIORITY_2, + top_irq_reg_mask, + bus_priv, + cam_vfe_bus_ver3_handle_rup_irq, + NULL, + NULL, + NULL, + CAM_IRQ_EVT_GROUP_0); + + if (bus_priv->rup_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to subscribe BUS (rup) IRQ", + bus_priv->common_data.core_index); + bus_priv->rup_irq_handle = 0; + return -EFAULT; + } + } + + bus_priv->common_data.init_irq_subscribed = true; + CAM_DBG(CAM_ISP, "VFE:%u BUS irq subscribed", bus_priv->common_data.core_index); + return 0; +} + +static void cam_vfe_bus_ver3_update_ubwc_meta_addr( + uint32_t *reg_val_pair, + uint32_t *j, + void *regs, + dma_addr_t image_buf) +{ + struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs; + uint32_t temp = cam_smmu_is_expanded_memory() ? + CAM_36BIT_INTF_GET_IOVA_BASE(image_buf) : image_buf; + + if (cam_smmu_is_expanded_memory() && + CAM_36BIT_INTF_GET_IOVA_OFFSET(image_buf)) { + CAM_ERR(CAM_ISP, "Error, address not aligned! offset:0x%x", + CAM_36BIT_INTF_GET_IOVA_OFFSET(image_buf)); + } + + if (!regs || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + goto end; + } + + ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *) regs; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_addr, temp); + +end: + return; +} + +static int cam_vfe_bus_ver3_update_ubwc_regs( + struct cam_vfe_bus_ver3_wm_resource_data *wm_data, + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data *ubwc_cfg_data, + uint32_t *reg_val_pair, uint32_t i, uint32_t *j) +{ + struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs; + int rc = 0; + + if (!wm_data || !ubwc_cfg_data || !reg_val_pair || !j) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *) + wm_data->hw_regs->ubwc_regs; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + wm_data->hw_regs->packer_cfg, wm_data->packer_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d packer cfg 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->meta_cfg, ubwc_cfg_data->ubwc_meta_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d meta stride 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + if (wm_data->common_data->disable_ubwc_comp) { + ubwc_cfg_data->ubwc_mode_cfg &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression on VFE:%u WM:%d", + wm_data->common_data->core_index, wm_data->index); + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->mode_cfg, ubwc_cfg_data->ubwc_mode_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc_mode_cfg 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->ctrl_2, ubwc_cfg_data->ubwc_ctrl_2); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc_ctrl_2 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->lossy_thresh0, ubwc_cfg_data->ubwc_lossy_threshold_0); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d lossy_thresh0 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->lossy_thresh1, ubwc_cfg_data->ubwc_lossy_threshold_1); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d lossy_thresh1 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->off_lossy_var, ubwc_cfg_data->ubwc_offset_lossy_variance); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d off_lossy_var 0x%X", + wm_data->common_data->core_index, wm_data->index, reg_val_pair[*j-1]); + + /* + * If limit value >= 0xFFFF, limit configured by + * generic limiter blob + */ + if (ubwc_cfg_data->ubwc_bandwidth_limit < 0xFFFF) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, *j, + ubwc_regs->bw_limit, ubwc_cfg_data->ubwc_bandwidth_limit); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc bw limit 0x%X", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_bandwidth_limit); + } + +end: + return rc; +} + +static int cam_vfe_bus_ver3_config_ubwc_regs( + struct cam_vfe_bus_ver3_wm_resource_data *wm_data, + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data *ubwc_cfg_data) +{ + struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs = NULL; + + + if (!wm_data || !ubwc_cfg_data) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *) wm_data->hw_regs->ubwc_regs; + + cam_io_w_mb(wm_data->packer_cfg, wm_data->common_data->mem_base + + wm_data->hw_regs->packer_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d packer cfg:0x%x", + wm_data->common_data->core_index, wm_data->index, wm_data->packer_cfg); + + cam_io_w_mb(ubwc_cfg_data->ubwc_meta_cfg, + wm_data->common_data->mem_base + ubwc_regs->meta_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d meta stride:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_meta_cfg); + + if (wm_data->common_data->disable_ubwc_comp) { + ubwc_cfg_data->ubwc_mode_cfg &= ~ubwc_regs->ubwc_comp_en_bit; + CAM_DBG(CAM_ISP, + "Force disable UBWC compression on VFE:%u WM:%d", + wm_data->common_data->core_index, wm_data->index); + } + + cam_io_w_mb(ubwc_cfg_data->ubwc_mode_cfg, + wm_data->common_data->mem_base + ubwc_regs->mode_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc_mode_cfg:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_mode_cfg); + + cam_io_w_mb(ubwc_cfg_data->ubwc_ctrl_2, + wm_data->common_data->mem_base + ubwc_regs->ctrl_2); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc_ctrl_2:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_ctrl_2); + + cam_io_w_mb(ubwc_cfg_data->ubwc_lossy_threshold_0, + wm_data->common_data->mem_base + ubwc_regs->lossy_thresh0); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d lossy_thresh0: 0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_lossy_threshold_0); + + cam_io_w_mb(ubwc_cfg_data->ubwc_lossy_threshold_1, + wm_data->common_data->mem_base + ubwc_regs->lossy_thresh1); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d lossy_thresh0:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_lossy_threshold_1); + + cam_io_w_mb(ubwc_cfg_data->ubwc_offset_lossy_variance, + wm_data->common_data->mem_base + ubwc_regs->off_lossy_var); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d off_lossy_var:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_offset_lossy_variance); + + /* + * If limit value >= 0xFFFF, limit configured by + * generic limiter blob + */ + if (ubwc_cfg_data->ubwc_bandwidth_limit < 0xFFFF) { + cam_io_w_mb(ubwc_cfg_data->ubwc_bandwidth_limit, + wm_data->common_data->mem_base + ubwc_regs->bw_limit); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc bw limit:0x%x", + wm_data->common_data->core_index, wm_data->index, + ubwc_cfg_data->ubwc_bandwidth_limit); + } + + return 0; +} + +static int cam_vfe_bus_ver3_config_wm(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_vfe_bus_ver3_priv *bus_priv; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs; + uint32_t i, val, iova_addr, iova_offset, stride; + dma_addr_t iova; + + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + update_buf->res->res_priv; + if (!vfe_out_data) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + if (!vfe_out_data->limiter_enabled) + CAM_WARN(CAM_ISP, + "Configuring scratch for VFE:%u out_type: %u, with no BW limiter enabled", + bus_priv->common_data.core_index, vfe_out_data->out_type); + + for (i = 0; i < vfe_out_data->num_wm; i++) { + wm_data = vfe_out_data->wm_res[i].res_priv; + ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *) + wm_data->hw_regs->ubwc_regs; + + stride = update_buf->wm_update->stride; + val = stride; + val = ALIGNUP(val, 16); + if (val != stride && + val != wm_data->cfg.stride) + CAM_WARN(CAM_SFE, "VFE:%u Warning stride %u expected %u", + bus_priv->common_data.core_index, stride, val); + + if (wm_data->cfg.stride != val || !wm_data->init_cfg_done) { + cam_io_w_mb(stride, wm_data->common_data->mem_base + + wm_data->hw_regs->image_cfg_2); + wm_data->cfg.stride = val; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image stride 0x%x", + bus_priv->common_data.core_index, wm_data->index, stride); + } + + /* WM Image address */ + iova = update_buf->wm_update->image_buf[i]; + if (cam_smmu_is_expanded_memory()) { + iova_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + + cam_io_w_mb(iova_addr, wm_data->common_data->mem_base + + wm_data->hw_regs->image_addr); + cam_io_w_mb(iova_offset, wm_data->common_data->mem_base + + wm_data->hw_regs->addr_cfg); + + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image address 0x%x 0x%x", + bus_priv->common_data.core_index, wm_data->index, + iova_addr, iova_offset); + } else { + iova_addr = iova; + cam_io_w_mb(iova_addr, wm_data->common_data->mem_base + + wm_data->hw_regs->image_addr); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image address 0x%X", + bus_priv->common_data.core_index, wm_data->index, iova_addr); + } + + if (wm_data->en_ubwc) { + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "VFE:%u No UBWC register to configure for WM: %u", + bus_priv->common_data.core_index, wm_data->index); + return -EINVAL; + } + + if (wm_data->ubwc_cfg_data.ubwc_updated) { + wm_data->ubwc_cfg_data.ubwc_updated = false; + cam_vfe_bus_ver3_config_ubwc_regs(wm_data, &wm_data->ubwc_cfg_data); + } + + cam_io_w_mb(iova_addr, wm_data->common_data->mem_base + + ubwc_regs->meta_addr); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d meta address 0x%x", + bus_priv->common_data.core_index, wm_data->index, iova_addr); + } + + /* enable the WM */ + cam_io_w_mb(wm_data->cfg.en_cfg, wm_data->common_data->mem_base + + wm_data->hw_regs->cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d en_cfg 0x%x", + bus_priv->common_data.core_index, wm_data->index, wm_data->cfg.en_cfg); + } + + return 0; +} + +static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_buf; + struct cam_vfe_bus_ver3_priv *bus_priv; + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_vfe_bus_ver3_wm_cfg_data *cfg; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + int hw_cntxt_id = -1; + uint32_t frame_inc = 0, val; + uint32_t iova_addr, iova_offset, image_buf_offset = 0, stride, slice_h; + dma_addr_t iova; + + update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args; + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + update_buf->res->res_priv; + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_out_data->common_data->cdm_util_ops; + if ((update_buf->wm_update->num_buf != vfe_out_data->num_wm) && + (!(update_buf->use_scratch_cfg))) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed! Invalid number buffers:%d required:%d", + bus_priv->common_data.core_index, update_buf->wm_update->num_buf, + vfe_out_data->num_wm); + return -EINVAL; + } + + reg_val_pair = &vfe_out_data->common_data->io_buf_update[0]; + if (update_buf->use_scratch_cfg) { + CAM_DBG(CAM_ISP, "VFE:%u Using scratch for IFE out_type: %u", + bus_priv->common_data.core_index, vfe_out_data->out_type); + + if (!vfe_out_data->limiter_enabled) + CAM_WARN(CAM_ISP, + "Configuring scratch for VFE:%u out_type: %u, with no BW limiter enabled", + bus_priv->common_data.core_index, vfe_out_data->out_type); + } else { + io_cfg = update_buf->wm_update->io_cfg; + if (!io_cfg) { + CAM_ERR(CAM_ISP, "Invalid io cfg for wm update"); + return -EINVAL; + } + + hw_cntxt_id = io_cfg->flag ? (ffs(io_cfg->flag) - 1) : -1; + } + + if ((vfe_out_data->mc_based || vfe_out_data->cntxt_cfg_except) && + ((hw_cntxt_id < CAM_ISP_MULTI_CTXT_0) || + (hw_cntxt_id >= CAM_ISP_MULTI_CTXT_MAX))) { + CAM_ERR(CAM_ISP, "Invalid hw context id : %d for wm update", hw_cntxt_id); + return -EINVAL; + } + + for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "VFE:%u reg_val_pair %d exceeds the array limit %zu", + bus_priv->common_data.core_index, j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = vfe_out_data->wm_res[i].res_priv; + + if ((vfe_out_data->mc_based || wm_data->out_rsrc_data->cntxt_cfg_except) + && (wm_data->mc_data[hw_cntxt_id].cfg.updated)) + cfg = &wm_data->mc_data[hw_cntxt_id].cfg; + else + cfg = &wm_data->cfg; + + /* Disable frame header in case it was previously enabled */ + if ((cfg->en_cfg) & (1 << wm_data->common_data->common_reg->frmheader_en_shift)) + cfg->en_cfg &= ~(1 << wm_data->common_data->common_reg->frmheader_en_shift); + + if (update_buf->wm_update->frame_header && + !update_buf->wm_update->fh_enabled && + wm_data->hw_regs->frame_header_addr) { + + cfg->en_cfg |= (1 << wm_data->common_data->common_reg->frmheader_en_shift); + update_buf->wm_update->fh_enabled = true; + if (cam_smmu_is_expanded_memory()) { + iova_addr = CAM_36BIT_INTF_GET_IOVA_BASE( + update_buf->wm_update->frame_header); + iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET( + update_buf->wm_update->frame_header); + } else { + iova_addr = update_buf->wm_update->frame_header; + iova_offset = 0; + } + + if (iova_offset) + CAM_ERR(CAM_ISP, "VFE:%u Error, address not aligned! offset:0x%x", + bus_priv->common_data.core_index, iova_offset); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->frame_header_addr, iova_addr); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->frame_header_cfg, + update_buf->wm_update->local_id); + CAM_DBG(CAM_ISP, + "VFE:%u WM: %d en_cfg 0x%x frame_header %pK local_id %u", + bus_priv->common_data.core_index, wm_data->index, cfg->en_cfg, + update_buf->wm_update->frame_header, + update_buf->wm_update->local_id); + } + + val = (cfg->height << 16) | cfg->width; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_0, val); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image height and width 0x%X wid:%d ht:%d", + bus_priv->common_data.core_index, wm_data->index, reg_val_pair[j-1], + cfg->width, cfg->height); + + /* For initial configuration program all bus registers */ + if (update_buf->use_scratch_cfg) { + stride = update_buf->wm_update->stride; + slice_h = update_buf->wm_update->slice_height; + } else { + stride = io_cfg->planes[i].plane_stride; + slice_h = io_cfg->planes[i].slice_height; + } + + val = stride; + CAM_DBG(CAM_ISP, "VFE:%u val before stride %d", + bus_priv->common_data.core_index, val); + val = ALIGNUP(val, 16); + if (val != stride) + CAM_DBG(CAM_ISP, "VFE:%u Warning stride %u expected %u", + bus_priv->common_data.core_index, stride, val); + + if (cfg->stride != val || !wm_data->init_cfg_done || + ((wm_data->out_rsrc_data->mc_based || + wm_data->out_rsrc_data->cntxt_cfg_except) && + !wm_data->mc_data[hw_cntxt_id].init_cfg_done)) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_2, stride); + cfg->stride = val; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image stride 0x%X", + bus_priv->common_data.core_index, wm_data->index, + reg_val_pair[j-1]); + } + + /* + * For write master format update case, we check whether + * the updated format matches with the io config format, + * to help debug the wrong programming on a target who + * doesn't support 2 decode formats on CSID. + */ + if (wm_data->update_wm_format) { + if ((!update_buf->use_scratch_cfg) && + (io_cfg->format != cfg->format)) + CAM_WARN(CAM_ISP, + "VFE:%u format mismatch, wm_data format:%u io_cfg format:%u", + bus_priv->common_data.core_index, + cfg->format, + io_cfg->format); + + val = cfg->pack_fmt; + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->packer_cfg, val); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image pack_fmt %d", + bus_priv->common_data.core_index, + wm_data->index, + reg_val_pair[j-1]); + } + + if (wm_data->en_ubwc) { + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "VFE:%u No UBWC register to configure.", + bus_priv->common_data.core_index); + return -EINVAL; + } + + if ((wm_data->out_rsrc_data->mc_based || + wm_data->out_rsrc_data->cntxt_cfg_except) && + wm_data->mc_data[hw_cntxt_id].ubwc_cfg_data.ubwc_updated) { + wm_data->mc_data[hw_cntxt_id].ubwc_cfg_data.ubwc_updated = false; + cam_vfe_bus_ver3_update_ubwc_regs(wm_data, + &wm_data->mc_data[hw_cntxt_id].ubwc_cfg_data, + reg_val_pair, i, &j); + } else if (wm_data->ubwc_cfg_data.ubwc_updated) { + wm_data->ubwc_cfg_data.ubwc_updated = false; + cam_vfe_bus_ver3_update_ubwc_regs(wm_data, + &wm_data->ubwc_cfg_data, + reg_val_pair, i, &j); + + } + + /* UBWC meta address */ + cam_vfe_bus_ver3_update_ubwc_meta_addr( + reg_val_pair, &j, + wm_data->hw_regs->ubwc_regs, + update_buf->wm_update->image_buf[i] + cfg->meta_offset); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d ubwc meta_addr:0x%llx meta_off:0x%x", + bus_priv->common_data.core_index, wm_data->index, + update_buf->wm_update->image_buf[i] + cfg->meta_offset, + cfg->meta_offset); + } + + frame_inc = stride * slice_h; + if (wm_data->en_ubwc) { + frame_inc = ALIGNUP(stride * + slice_h, 4096); + + if (!update_buf->use_scratch_cfg) { + frame_inc += io_cfg->planes[i].meta_size; + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d frm %d: slice_ht: %d stride %d meta: %d", + bus_priv->common_data.core_index, wm_data->index, + frame_inc, io_cfg->planes[i].slice_height, + io_cfg->planes[i].plane_stride, + io_cfg->planes[i].meta_size); + } + } + + if (wm_data->wm_mode == CAM_VFE_WM_LINE_BASED_MODE) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_cfg_1, cfg->h_init); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d h_init 0x%X", + bus_priv->common_data.core_index, wm_data->index, + reg_val_pair[j-1]); + } + + if ((wm_data->en_ubwc) && (!update_buf->use_scratch_cfg)) + image_buf_offset = io_cfg->planes[i].meta_size + cfg->image_offset; + else if ((wm_data->wm_mode == CAM_VFE_WM_FRAME_BASED_MODE) || + (wm_data->wm_mode == CAM_VFE_WM_INDEX_BASED_MODE)) + image_buf_offset = wm_data->cfg.offset; + else + image_buf_offset = cfg->image_offset; + + /* WM Image address */ + iova = update_buf->wm_update->image_buf[i] + + image_buf_offset; + + CAM_DBG(CAM_ISP, "VFE:%u WM:%d img_addr:0x%lx img_off:0x%x", + bus_priv->common_data.core_index, wm_data->index, iova, image_buf_offset); + + if (cam_smmu_is_expanded_memory()) { + iova_addr = CAM_36BIT_INTF_GET_IOVA_BASE(iova); + iova_offset = CAM_36BIT_INTF_GET_IOVA_OFFSET(iova); + + /* Align frame inc to 256 as well */ + frame_inc = CAM_36BIT_INTF_GET_IOVA_BASE(frame_inc); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_addr, iova_addr); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->addr_cfg, iova_offset); + + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image address:0x%X image offset: 0x%X", + bus_priv->common_data.core_index, wm_data->index, + reg_val_pair[j-3], reg_val_pair[j-1]); + } else { + iova_addr = iova; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->image_addr, iova_addr); + + CAM_DBG(CAM_ISP, "VFE:%u WM:%d image address 0x%X", + bus_priv->common_data.core_index, + wm_data->index, reg_val_pair[j-1]); + } + + update_buf->wm_update->image_buf_offset[i] = image_buf_offset; + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->frame_incr, frame_inc); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d frame_inc: %d expanded_mem: %s", + bus_priv->common_data.core_index, wm_data->index, reg_val_pair[j-1], + CAM_BOOL_TO_YESNO(cam_smmu_is_expanded_memory)); + + cfg->en_cfg |= cfg->rcs_en ? wm_data->hw_regs->rcs_en_mask : 0; + + /* enable the WM */ + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->cfg, cfg->en_cfg); + CAM_DBG(CAM_ISP, "VFE:%u WM:%d %s out_res_id:0x%x en_cfg 0x%X", + bus_priv->common_data.core_index, wm_data->index, + vfe_out_data->wm_res[i].res_name, update_buf->res->res_id, + reg_val_pair[j-1]); + + /* set initial configuration done */ + if (!wm_data->init_cfg_done) + wm_data->init_cfg_done = true; + + if ((wm_data->out_rsrc_data->mc_based || + wm_data->out_rsrc_data->cntxt_cfg_except) && + !wm_data->mc_data[hw_cntxt_id].init_cfg_done) + wm_data->mc_data[hw_cntxt_id].init_cfg_done = true; + } + + num_regval_pairs = j / 2; + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random(num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_buf->cmd.size) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed! Buf size:%d insufficient, expected size:%d", + bus_priv->common_data.core_index, update_buf->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom(update_buf->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_buf->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_ISP, "No reg val pairs. num_wms: %u VFE:%u", + vfe_out_data->num_wm, bus_priv->common_data.core_index); + update_buf->cmd.used_bytes = 0; + } + + return 0; +} + +static int cam_vfe_bus_ver3_update_hfr(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *update_hfr; + struct cam_vfe_bus_ver3_priv *bus_priv; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_isp_port_hfr_config *hfr_cfg = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + uint32_t i, j, size = 0; + + update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args; + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + update_hfr->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_out_data->common_data->cdm_util_ops; + reg_val_pair = &vfe_out_data->common_data->io_buf_update[0]; + hfr_cfg = (struct cam_isp_port_hfr_config *)update_hfr->data; + + for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) { + CAM_ERR(CAM_ISP, + "VFE:%u reg_val_pair %d exceeds the array limit %zu", + bus_priv->common_data.core_index, j, MAX_REG_VAL_PAIR_SIZE); + return -ENOMEM; + } + + wm_data = vfe_out_data->wm_res[i].res_priv; + + /* Frame drop config is only applicable to full IFE */ + if (!bus_priv->common_data.is_lite) { + if ((wm_data->framedrop_pattern != + hfr_cfg->framedrop_pattern) || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_pattern, + hfr_cfg->framedrop_pattern); + wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d framedrop pattern 0x%X", + bus_priv->common_data.core_index, wm_data->index, + wm_data->framedrop_pattern); + } + + if (wm_data->framedrop_period != hfr_cfg->framedrop_period || + !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->framedrop_period, + hfr_cfg->framedrop_period); + wm_data->framedrop_period = hfr_cfg->framedrop_period; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d framedrop period 0x%X", + bus_priv->common_data.core_index, wm_data->index, + wm_data->framedrop_period); + } + } + + if (wm_data->irq_subsample_period != hfr_cfg->subsample_period + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_period, + hfr_cfg->subsample_period); + wm_data->irq_subsample_period = + hfr_cfg->subsample_period; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d irq subsample period 0x%X", + bus_priv->common_data.core_index, wm_data->index, + wm_data->irq_subsample_period); + } + + if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern + || !wm_data->hfr_cfg_done) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->irq_subsample_pattern, + hfr_cfg->subsample_pattern); + wm_data->irq_subsample_pattern = + hfr_cfg->subsample_pattern; + CAM_DBG(CAM_ISP, "VFE:%u WM:%d irq subsample pattern 0x%X", + bus_priv->common_data.core_index, wm_data->index, + wm_data->irq_subsample_pattern); + } + + /* set initial configuration done */ + if (!wm_data->hfr_cfg_done) + wm_data->hfr_cfg_done = true; + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > update_hfr->cmd.size) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed! Buf size:%d insufficient, expected size:%d", + bus_priv->common_data.core_index, update_hfr->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + update_hfr->cmd.cmd_buf_addr, + num_regval_pairs, reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + update_hfr->cmd.used_bytes = size * 4; + } else { + update_hfr->cmd.used_bytes = 0; + CAM_DBG(CAM_ISP, + "VFE:%u No reg val pairs. num_wms: %u", + bus_priv->common_data.core_index, vfe_out_data->num_wm); + } + + return 0; +} + +static void cam_vfe_bus_ver3_update_wm_ubwc_data( + struct cam_vfe_bus_ver3_wm_resource_data *wm_data, + struct cam_vfe_bus_ver3_wm_ubwc_cfg_data *ubwc_cfg_data, + struct cam_vfe_generic_ubwc_plane_config *ubwc_generic_plane_cfg, + bool init_cfg_done) +{ + if (wm_data->packer_cfg != + ubwc_generic_plane_cfg->packer_config || + !init_cfg_done) { + wm_data->packer_cfg = + ubwc_generic_plane_cfg->packer_config; + ubwc_cfg_data->ubwc_updated = true; + } + + if ((!wm_data->is_dual) && ((wm_data->cfg.h_init != + ubwc_generic_plane_cfg->h_init) || + !init_cfg_done)) { + wm_data->cfg.h_init = ubwc_generic_plane_cfg->h_init; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_meta_cfg != + ubwc_generic_plane_cfg->meta_stride || + !init_cfg_done) { + ubwc_cfg_data->ubwc_meta_cfg = + ubwc_generic_plane_cfg->meta_stride; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_mode_cfg != + ubwc_generic_plane_cfg->mode_config_0 || + !init_cfg_done) { + ubwc_cfg_data->ubwc_mode_cfg = + ubwc_generic_plane_cfg->mode_config_0; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_ctrl_2 != + ubwc_generic_plane_cfg->ctrl_2 || + !init_cfg_done) { + ubwc_cfg_data->ubwc_ctrl_2 = + ubwc_generic_plane_cfg->ctrl_2; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_lossy_threshold_0 != + ubwc_generic_plane_cfg->lossy_threshold_0 || + !init_cfg_done) { + ubwc_cfg_data->ubwc_lossy_threshold_0 = + ubwc_generic_plane_cfg->lossy_threshold_0; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_lossy_threshold_1 != + ubwc_generic_plane_cfg->lossy_threshold_1 || + !init_cfg_done) { + ubwc_cfg_data->ubwc_lossy_threshold_1 = + ubwc_generic_plane_cfg->lossy_threshold_1; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_offset_lossy_variance != + ubwc_generic_plane_cfg->lossy_var_offset || + !init_cfg_done) { + ubwc_cfg_data->ubwc_offset_lossy_variance = + ubwc_generic_plane_cfg->lossy_var_offset; + ubwc_cfg_data->ubwc_updated = true; + } + + if (ubwc_cfg_data->ubwc_bandwidth_limit != + ubwc_generic_plane_cfg->bandwidth_limit || + !init_cfg_done) { + ubwc_cfg_data->ubwc_bandwidth_limit = + ubwc_generic_plane_cfg->bandwidth_limit; + ubwc_cfg_data->ubwc_updated = true; + } +} + +static int cam_vfe_bus_ver3_update_ubwc_config_v2(void *cmd_args) +{ + struct cam_isp_hw_get_cmd_update *update_ubwc; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_vfe_generic_ubwc_config *ubwc_generic_cfg = NULL; + struct cam_vfe_generic_ubwc_plane_config *ubwc_generic_plane_cfg = NULL; + uint32_t i; + int rc = 0; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + update_ubwc = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + update_ubwc->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + rc = -EINVAL; + goto end; + } + + ubwc_generic_cfg = (struct cam_vfe_generic_ubwc_config *) + update_ubwc->data; + + for (i = 0; i < vfe_out_data->num_wm; i++) { + + wm_data = vfe_out_data->wm_res[i].res_priv; + ubwc_generic_plane_cfg = &ubwc_generic_cfg->ubwc_plane_cfg[i]; + + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "VFE:%u No UBWC register to configure.", + vfe_out_data->common_data->core_index); + rc = -EINVAL; + goto end; + } + + if (!wm_data->en_ubwc) { + CAM_ERR(CAM_ISP, "VFE:%u UBWC Disabled", + vfe_out_data->common_data->core_index); + rc = -EINVAL; + goto end; + } + + cam_vfe_bus_ver3_update_wm_ubwc_data(wm_data, &wm_data->ubwc_cfg_data, + ubwc_generic_plane_cfg, wm_data->init_cfg_done); + } + +end: + return rc; +} + +static int cam_vfe_bus_ver3_update_ubwc_config_v3(void *cmd_args) +{ + struct cam_isp_hw_get_cmd_update *update_ubwc; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_vfe_generic_ubwc_config *ubwc_generic_cfg = NULL; + struct cam_vfe_generic_ubwc_plane_config *ubwc_generic_plane_cfg = NULL; + uint32_t i, j; + int rc = 0; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + rc = -EINVAL; + goto end; + } + + update_ubwc = (struct cam_isp_hw_get_cmd_update *) cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + update_ubwc->res->res_priv; + + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + rc = -EINVAL; + goto end; + } + + ubwc_generic_cfg = (struct cam_vfe_generic_ubwc_config *) + update_ubwc->data; + + for (i = 0; i < vfe_out_data->num_wm; i++) { + + wm_data = vfe_out_data->wm_res[i].res_priv; + ubwc_generic_plane_cfg = &ubwc_generic_cfg->ubwc_plane_cfg[i]; + + if (!wm_data->hw_regs->ubwc_regs) { + CAM_ERR(CAM_ISP, + "VFE:%u No UBWC register to configure.", + vfe_out_data->common_data->core_index); + rc = -EINVAL; + goto end; + } + + if (!wm_data->en_ubwc) { + CAM_ERR(CAM_ISP, "VFE:%u UBWC Disabled", + vfe_out_data->common_data->core_index); + rc = -EINVAL; + goto end; + } + + if ((vfe_out_data->mc_based || vfe_out_data->cntxt_cfg_except) && + wm_data->mc_data) { + for (j = 0; j < CAM_ISP_MULTI_CTXT_MAX; j++) { + if (!(ubwc_generic_plane_cfg->hw_ctx_id_mask & BIT(j))) + continue; + + cam_vfe_bus_ver3_update_wm_ubwc_data(wm_data, + &wm_data->mc_data[j].ubwc_cfg_data, ubwc_generic_plane_cfg, + wm_data->mc_data[j].init_cfg_done); + } + } else { + cam_vfe_bus_ver3_update_wm_ubwc_data(wm_data, &wm_data->ubwc_cfg_data, + ubwc_generic_plane_cfg, wm_data->init_cfg_done); + } + } + +end: + return rc; +} + +static int cam_vfe_bus_ver3_update_stripe_cfg(void *priv, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bus_ver3_priv *bus_priv; + struct cam_isp_hw_dual_isp_update_args *stripe_args; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_isp_dual_stripe_config *stripe_config; + uint32_t outport_id, ports_plane_idx, i; + + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + stripe_args->res->res_priv; + + if (!vfe_out_data) { + CAM_ERR(CAM_ISP, "Failed! Invalid data"); + return -EINVAL; + } + + outport_id = stripe_args->res->res_id & 0xFF; + if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE || + stripe_args->res->res_id >= bus_priv->max_out_res) + return 0; + + ports_plane_idx = (stripe_args->split_id * + (stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) + + (outport_id * CAM_PACKET_MAX_PLANES); + for (i = 0; i < vfe_out_data->num_wm; i++) { + wm_data = vfe_out_data->wm_res[i].res_priv; + stripe_config = (struct cam_isp_dual_stripe_config *) + &stripe_args->dual_cfg->stripes_flex[ports_plane_idx + i]; + wm_data->cfg.width = stripe_config->width; + + /* + * UMD sends buffer offset address as offset for clients + * programmed to operate in frame/index based mode and h_init + * value as offset for clients programmed to operate in line + * based mode. + */ + + if ((wm_data->wm_mode == CAM_VFE_WM_FRAME_BASED_MODE) || + (wm_data->wm_mode == CAM_VFE_WM_INDEX_BASED_MODE)) + wm_data->cfg.offset = stripe_config->offset; + else + wm_data->cfg.h_init = stripe_config->offset; + + CAM_DBG(CAM_ISP, + "VFE:%u out_type:0x%X WM:%d width:%d offset:0x%X h_init:%d", + vfe_out_data->common_data->core_index, stripe_args->res->res_id, + wm_data->index, wm_data->cfg.width, wm_data->cfg.offset, + wm_data->cfg.h_init); + } + + return 0; +} + +static int cam_vfe_bus_ver3_update_wm_config_v2( + void *cmd_args) +{ + int i; + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_isp_vfe_wm_config_v2 *wm_config = NULL; + struct cam_vfe_bus_ver3_wm_cfg_data *cfg; + struct cam_vfe_bus_ver3_reg_offset_common *common_reg = NULL; + int32_t hw_ctxt_id = 0; + unsigned long context_id_mask = 0; + bool update_wm_mode; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + wm_config_update = cmd_args; + vfe_out_data = wm_config_update->res->res_priv; + wm_config = (struct cam_isp_vfe_wm_config_v2 *) + wm_config_update->data; + + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops || !wm_config) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + for (i = 0; i < vfe_out_data->num_wm; i++) { + context_id_mask = wm_config->context_id_mask; + cfg = NULL; + update_wm_mode = false; + wm_data = vfe_out_data->wm_res[i].res_priv; + common_reg = wm_data->common_data->common_reg; + if (vfe_out_data->mc_based) { + while (context_id_mask) { + hw_ctxt_id = ffs(wm_config->context_id_mask) - 1; + if (hw_ctxt_id < 0 || hw_ctxt_id >= CAM_ISP_MULTI_CTXT_MAX) { + CAM_ERR(CAM_ISP, "VFE[%u] Invalid context id received %d", + vfe_out_data->common_data->core_index, + hw_ctxt_id); + return -EINVAL; + } + + clear_bit(hw_ctxt_id, &context_id_mask); + cfg = &wm_data->mc_data[hw_ctxt_id].cfg; + cfg->updated = true; + } + } else { + cfg = &wm_data->cfg; + } + + if (wm_config->wm_mode >= CAM_VFE_WM_MODE_MAX) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid wm_mode: 0x%X WM:%d", + vfe_out_data->common_data->core_index, wm_config->wm_mode, + wm_data->index); + return -EINVAL; + } + + if (!cfg) { + CAM_ERR(CAM_ISP, "VFE:%u WM:%d config data is NULL", + vfe_out_data->common_data->core_index, wm_data->index); + return -EINVAL; + } + + cfg->h_init = wm_config->h_init; + cfg->width = wm_config->width; + if (wm_config->wm_mode != wm_data->wm_mode) { + wm_data->wm_mode = wm_config->wm_mode; + update_wm_mode = true; + } + + if (wm_config->enable) + cfg->en_cfg = ((wm_config->wm_mode << common_reg->wm_mode_shift) | + (wm_config->virtual_frame_en << common_reg->virtual_frm_en_shift) | + (1 << common_reg->wm_en_shift)); + else + cfg->en_cfg = ((wm_config->wm_mode << common_reg->wm_mode_shift) | + (wm_config->virtual_frame_en << common_reg->virtual_frm_en_shift)); + + /* + * Offset in cam_isp_vfe_wm_config_v2 blob is mapped to meta_offset + * for buffer alignment feature. Decision is based on param_mask BIT(0). + */ + cfg->meta_offset = 0; + if (i == PLANE_C) { + cfg->height = wm_config->height / 2; + cfg->image_offset = wm_config->offset_in_bytes / 2; + if (wm_config->param_mask & CAM_IFE_WM_BUF_ALLIGN_EN) + cfg->meta_offset = wm_config->offset / 2; + } else { + cfg->height = wm_config->height; + cfg->image_offset = wm_config->offset_in_bytes; + if (wm_config->param_mask & CAM_IFE_WM_BUF_ALLIGN_EN) + cfg->meta_offset = wm_config->offset; + } + + /* Per req configuring port/wm as lossy/loseless */ + cfg->rcs_en = (wm_config->param_mask & CAM_IFE_WM_RCS_EN); + + /* + * For RAW10/RAW12/RAW14 sensor mode seamless switch case, + * the format may be changed on RDIs, so we update RDIs WM data. + */ + wm_data->update_wm_format = false; + if (wm_config->packer_format && (cfg->format != wm_config->packer_format)) { + cfg->format = wm_config->packer_format; + wm_data->update_wm_format = true; + } + + if ((vfe_out_data->out_type >= CAM_VFE_BUS_VER3_VFE_OUT_RDI0) && + (vfe_out_data->out_type <= CAM_VFE_BUS_VER3_VFE_OUT_RDI4) && + (wm_data->update_wm_format || update_wm_mode)) + cam_vfe_bus_ver3_config_rdi_wm(wm_data); + + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d %s up_mode:%s up_fmt:%s en_cfg:0x%X ht:%d width:%d stride:%d", + vfe_out_data->common_data->core_index, wm_data->index, + vfe_out_data->wm_res[i].res_name, CAM_BOOL_TO_YESNO(update_wm_mode), + CAM_BOOL_TO_YESNO(wm_data->update_wm_format), cfg->en_cfg, cfg->height, + cfg->width, wm_data->cfg.stride); + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d pack_fmt:%d ctx_mask:%u meta_of:0x%X h_init:0x%x img_of:0x%x", + vfe_out_data->common_data->core_index, wm_data->index, + wm_data->cfg.pack_fmt, wm_config->context_id_mask, cfg->meta_offset, + cfg->h_init, cfg->image_offset); + } + + return 0; + + +} + +static int cam_vfe_bus_ver3_update_wm_config( + void *cmd_args) +{ + int i; + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_isp_vfe_wm_config *wm_config = NULL; + struct cam_vfe_bus_ver3_reg_offset_common *common_reg = NULL; + enum cam_vfe_bus_ver3_packer_format packer_fmt = PACKER_FMT_VER3_MAX; + bool update_wm_mode; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + wm_config_update = cmd_args; + vfe_out_data = wm_config_update->res->res_priv; + wm_config = (struct cam_isp_vfe_wm_config *) + wm_config_update->data; + + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops || !wm_config) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + for (i = 0; i < vfe_out_data->num_wm; i++) { + wm_data = vfe_out_data->wm_res[i].res_priv; + common_reg = wm_data->common_data->common_reg; + update_wm_mode = false; + + if (wm_config->wm_mode >= CAM_VFE_WM_MODE_MAX) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid wm_mode: 0x%X WM:%d", + vfe_out_data->common_data->core_index, wm_config->wm_mode, + wm_data->index); + return -EINVAL; + } + + if (wm_config->wm_mode != wm_data->wm_mode) { + wm_data->wm_mode = wm_config->wm_mode; + update_wm_mode = true; + } + + wm_data->cfg.en_cfg = ((wm_config->wm_mode << common_reg->wm_mode_shift) | + (wm_config->virtual_frame_en << common_reg->virtual_frm_en_shift) | + (1 << common_reg->wm_en_shift)); + wm_data->cfg.width = wm_config->width; + + if (i == PLANE_C) + wm_data->cfg.height = wm_config->height / 2; + else + wm_data->cfg.height = wm_config->height; + + /* + * For RAW10/RAW12/RAW14 sensor mode seamless switch case, + * the format may be changed on RDIs, so we update RDIs WM data. + */ + wm_data->update_wm_format = false; + if ((wm_config->packer_format != CAM_FORMAT_BASE) && + (wm_data->cfg.format != wm_config->packer_format)) { + wm_data->update_wm_format = true; + wm_data->cfg.format = wm_config->packer_format; + packer_fmt = cam_vfe_bus_ver3_get_packer_fmt( + wm_config->packer_format, wm_data->index); + + /* Reconfigure only for valid packer fmt */ + if (packer_fmt != PACKER_FMT_VER3_MAX) { + /* LSB aligned for plain type format */ + switch (wm_config->packer_format) { + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN16_10_LSB: + packer_fmt |= + (1 << wm_data->common_data->pack_align_shift); + wm_data->cfg.pack_fmt = packer_fmt; + break; + default: + break; + } + } else { + CAM_ERR(CAM_ISP, "VFE:%u Invalid format:%d", + vfe_out_data->common_data->core_index, + wm_config->packer_format); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d update format:%d pack_fmt:%d", + vfe_out_data->common_data->core_index, wm_data->index, + wm_config->packer_format, wm_data->cfg.pack_fmt); + } + + if ((vfe_out_data->out_type >= CAM_VFE_BUS_VER3_VFE_OUT_RDI0) && + (vfe_out_data->out_type <= CAM_VFE_BUS_VER3_VFE_OUT_RDI4) && + (wm_data->update_wm_format || update_wm_mode)) + cam_vfe_bus_ver3_config_rdi_wm(wm_data); + + CAM_DBG(CAM_ISP, + "VFE:%u WM:%d %s update_mode:%s update_fmt:%s en_cfg:0x%X height:%d width:%d stride:%d", + vfe_out_data->common_data->core_index, wm_data->index, + vfe_out_data->wm_res[i].res_name, CAM_BOOL_TO_YESNO(update_wm_mode), + CAM_BOOL_TO_YESNO(wm_data->update_wm_format), wm_data->cfg.en_cfg, + wm_data->cfg.height, wm_data->cfg.width, wm_data->cfg.stride); + } + + return 0; +} + +static int cam_vfe_bus_update_bw_limiter( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *wm_config_update; + struct cam_vfe_bus_ver3_vfe_out_data *vfe_out_data = NULL; + struct cam_cdm_utils_ops *cdm_util_ops; + struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL; + struct cam_isp_wm_bw_limiter_config *wm_bw_limit_cfg = NULL; + uint32_t counter_limit = 0, reg_val = 0; + uint32_t *reg_val_pair, num_regval_pairs = 0; + uint32_t i, j, size = 0; + bool limiter_enabled = false; + + wm_config_update = (struct cam_isp_hw_get_cmd_update *) cmd_args; + wm_bw_limit_cfg = (struct cam_isp_wm_bw_limiter_config *) + wm_config_update->data; + + vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + wm_config_update->res->res_priv; + if (!vfe_out_data || !vfe_out_data->common_data->cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid data"); + return -EINVAL; + } + + cdm_util_ops = vfe_out_data->common_data->cdm_util_ops; + reg_val_pair = &vfe_out_data->common_data->io_buf_update[0]; + for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) { + if (j >= (MAX_REG_VAL_PAIR_SIZE - (MAX_BUF_UPDATE_REG_NUM * 2))) { + CAM_ERR(CAM_ISP, + "VFE:%u reg_val_pair %d exceeds the array limit %zu for WM idx %d", + vfe_out_data->common_data->core_index, j, + MAX_REG_VAL_PAIR_SIZE, i); + return -ENOMEM; + } + + /* Num WMs needs to match max planes */ + if (i >= CAM_PACKET_MAX_PLANES) { + CAM_WARN(CAM_ISP, + "VFE:%u Num of WMs: %d exceeded max planes", + vfe_out_data->common_data->core_index, i); + goto add_reg_pair; + } + + wm_data = (struct cam_vfe_bus_ver3_wm_resource_data *) + vfe_out_data->wm_res[i].res_priv; + if (!wm_data->hw_regs->bw_limiter_addr) { + CAM_ERR(CAM_ISP, + "VFE:%u WM: %d %s has no support for bw limiter", + vfe_out_data->common_data->core_index, wm_data->index, + vfe_out_data->wm_res[i].res_name); + return -EINVAL; + } + + counter_limit = wm_bw_limit_cfg->counter_limit[i]; + + /* Validate max counter limit */ + if (counter_limit > + wm_data->common_data->max_bw_counter_limit) { + CAM_WARN(CAM_ISP, + "VFE:%u Invalid counter limit: 0x%x capping to max: 0x%x", + vfe_out_data->common_data->core_index, + wm_bw_limit_cfg->counter_limit[i], + wm_data->common_data->max_bw_counter_limit); + counter_limit = + wm_data->common_data->max_bw_counter_limit; + } + + if (wm_bw_limit_cfg->enable_limiter && counter_limit) { + reg_val = 1; + reg_val |= (counter_limit << 1); + limiter_enabled = true; + } else { + reg_val = 0; + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + MAX_REG_VAL_PAIR_SIZE, j, + wm_data->hw_regs->bw_limiter_addr, reg_val); + CAM_DBG(CAM_ISP, "VFE:%u WM: %d for %s bw_limter: 0x%x", + vfe_out_data->common_data->core_index, wm_data->index, + vfe_out_data->wm_res[i].res_name, + reg_val_pair[j-1]); + } + +add_reg_pair: + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > wm_config_update->cmd.size) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed! Buf size:%d insufficient, expected size:%d", + vfe_out_data->common_data->core_index, + wm_config_update->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + wm_config_update->cmd.cmd_buf_addr, num_regval_pairs, + reg_val_pair); + + /* cdm util returns dwords, need to convert to bytes */ + wm_config_update->cmd.used_bytes = size * 4; + } else { + CAM_DBG(CAM_ISP, + "VFE:%u No reg val pairs. num_wms: %u", + vfe_out_data->common_data->core_index, vfe_out_data->num_wm); + wm_config_update->cmd.used_bytes = 0; + } + + vfe_out_data->limiter_enabled = limiter_enabled; + return 0; +} + +static int cam_vfe_bus_ver3_mc_ctxt_sel( + void *priv, void *cmd_args, uint32_t arg_size) + +{ + struct cam_vfe_bus_ver3_priv *bus_priv; + struct cam_isp_hw_get_cmd_update *mc_config; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_bus_ver3_reg_offset_common *common_reg; + uint32_t reg_val[2], ctxt_id = 0; + uint32_t size = 0; + + if (!priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid args priv %x cmd_args %x", + priv, cmd_args); + return -EINVAL; + } + + bus_priv = (struct cam_vfe_bus_ver3_priv *)priv; + mc_config = (struct cam_isp_hw_get_cmd_update *)cmd_args; + ctxt_id = *((uint32_t *)mc_config->data); + + common_reg = bus_priv->common_data.common_reg; + reg_val[0] = common_reg->ctxt_sel; + reg_val[1] = (ctxt_id << common_reg->mc_write_sel_shift); + + CAM_DBG(CAM_ISP, "CTXT_SEL updated with ctxt_id: %u, val: 0x%x", + ctxt_id, reg_val[1]); + + cdm_util_ops = bus_priv->common_data.cdm_util_ops; + size = cdm_util_ops->cdm_required_size_reg_random(1); + + /* cdm util returns dwords, need to convert to bytes */ + if ((size * 4) > mc_config->cmd.size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d insufficient, expected size:%d", + mc_config->cmd.size, size); + return -ENOMEM; + } + + cdm_util_ops->cdm_write_regrandom( + mc_config->cmd.cmd_buf_addr, 1, reg_val); + + /* cdm util returns dwords, need to convert to bytes */ + mc_config->cmd.used_bytes = size * 4; + + return 0; +} + +static int cam_vfe_bus_ver3_irq_inject( + void *priv, void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver3_priv *bus_priv = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_bus_ver3_hw_info *bus_hw_info = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + struct cam_irq_register_set *inject_reg = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + bus_priv = (struct cam_vfe_bus_ver3_priv *)priv; + soc_info = bus_priv->common_data.soc_info; + bus_hw_info = (struct cam_vfe_bus_ver3_hw_info *)bus_priv->bus_hw_info; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (inject_params->reg_unit == + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG) + inject_reg = &bus_hw_info->common_reg.irq_reg_info.irq_reg_set[0]; + else if (inject_params->reg_unit == + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG) + inject_reg = &bus_hw_info->common_reg.irq_reg_info.irq_reg_set[1]; + else + return -EINVAL; + + if (!inject_reg) { + CAM_INFO(CAM_ISP, "Invalid inject_reg"); + return -EINVAL; + } + + cam_io_w_mb(inject_params->irq_mask, + soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + inject_reg->set_reg_offset); + cam_io_w_mb(0x10, soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + bus_hw_info->common_reg.irq_reg_info.global_irq_cmd_offset); + CAM_INFO(CAM_ISP, "Injected : irq_mask %#x set_reg_offset %#x", + inject_params->irq_mask, inject_reg->set_reg_offset); + + return 0; +} + +static int cam_vfe_bus_ver3_dump_irq_desc( + void *priv, void *cmd_args, uint32_t arg_size) +{ + int i, offset = 0; + int num_irq_desc = 0; + struct cam_vfe_bus_ver3_priv *bus_priv = NULL; + struct cam_vfe_bus_ver3_hw_info *bus_hw_info = NULL; + struct cam_isp_irq_inject_param *inject_params = NULL; + struct cam_vfe_bus_ver3_err_irq_desc *err_irq_desc = NULL; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Invalid params"); + return -EINVAL; + } + + bus_priv = (struct cam_vfe_bus_ver3_priv *)priv; + bus_hw_info = (struct cam_vfe_bus_ver3_hw_info *)bus_priv->bus_hw_info; + inject_params = (struct cam_isp_irq_inject_param *)cmd_args; + + if (inject_params->reg_unit == + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_0_REG) { + err_irq_desc = bus_hw_info->bus_err_desc_0; + num_irq_desc = bus_hw_info->num_bus_errors_0; + } else if (inject_params->reg_unit == + CAM_ISP_IFE_0_BUS_WR_INPUT_IF_IRQ_SET_1_REG) { + err_irq_desc = bus_hw_info->bus_err_desc_1; + num_irq_desc = bus_hw_info->num_bus_errors_1; + } else + return -EINVAL; + + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, + "Printing executable IRQ for hw_type: VFE reg_unit: %d\n", + inject_params->reg_unit); + + for (i = 0; i < num_irq_desc; i++) + offset += scnprintf(inject_params->line_buf + offset, + LINE_BUFFER_LEN - offset, "%#12x : %s - %s\n", + err_irq_desc[i].bitmask, + err_irq_desc[i].err_name, + err_irq_desc[i].desc); + + return 0; +} + +static int cam_vfe_bus_ver3_start_hw(void *hw_priv, + void *start_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_ver3_start_vfe_out(hw_priv); +} + +static int cam_vfe_bus_ver3_stop_hw(void *hw_priv, + void *stop_hw_args, uint32_t arg_size) +{ + return cam_vfe_bus_ver3_stop_vfe_out(hw_priv); +} + +static int cam_vfe_bus_ver3_init_hw(void *hw_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver3_priv *bus_priv = hw_priv; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + if (bus_priv->common_data.hw_init) + return 0; + + /* We take the controller only if the buf done is supported on vfe side + * for some hw, it is taken from IP extenal to VFE like CSID + */ + if ((bus_priv->common_data.supported_irq & CAM_VFE_HW_IRQ_CAP_BUF_DONE)) + bus_priv->common_data.buf_done_controller = + bus_priv->common_data.bus_irq_controller; + + bus_priv->common_data.hw_init = true; + + CAM_DBG(CAM_ISP, "VFE:%u bus-wr hw-version:0x%x", + bus_priv->common_data.core_index, + cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->hw_version)); + + return 0; +} + +static int cam_vfe_bus_ver3_deinit_hw(void *hw_priv, + void *deinit_hw_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver3_priv *bus_priv = hw_priv; + int rc = 0, i; + unsigned long flags; + + if (!bus_priv) { + CAM_ERR(CAM_ISP, "Error: Invalid args"); + return -EINVAL; + } + + if (!bus_priv->common_data.hw_init) + return 0; + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + bus_priv->common_data.hw_init = false; + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + return rc; +} + +static int cam_vfe_bus_get_res_for_mid( + struct cam_vfe_bus_ver3_priv *bus_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_bus_ver3_vfe_out_data *out_data = NULL; + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + struct cam_isp_hw_get_res_for_mid *get_res = NULL; + uint32_t num_mid = 0, port_mid[CAM_VFE_BUS_VER3_VFE_OUT_MAX] = {0}; + int i, j; + bool pid_found = false; + + get_res = (struct cam_isp_hw_get_res_for_mid *)cmd_update->data; + if (!get_res) { + CAM_ERR(CAM_ISP, + "invalid get resource for mid paramas"); + return -EINVAL; + } + + for (i = 0; i < bus_priv->num_out; i++) { + out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + bus_priv->vfe_out[i].res_priv; + + if (!out_data) + continue; + + for (j = 0; j < out_data->num_mid; j++) { + if (out_data->mid[j] == get_res->mid) + port_mid[num_mid++] = i; + } + } + + if (!num_mid) { + CAM_ERR(CAM_ISP, + "VFE:%u mid:%d does not match with any out resource", + bus_priv->common_data.core_index, get_res->mid); + get_res->out_res_id = 0; + return -EINVAL; + } + + for (i = 0; i < num_mid; i++) { + out_data = (struct cam_vfe_bus_ver3_vfe_out_data *) + bus_priv->vfe_out[port_mid[i]].res_priv; + get_res->out_res_id = bus_priv->vfe_out[port_mid[i]].res_id; + if (out_data->pid_mask & (BIT(get_res->pid))) { + get_res->out_res_id = bus_priv->vfe_out[port_mid[i]].res_id; + pid_found = true; + CAM_INFO(CAM_ISP, "VFE:%u match mid:%d out_res:0x%x found, is pid found:%d", + bus_priv->common_data.core_index, get_res->mid, + bus_priv->vfe_out[port_mid[i]].res_id, pid_found); + break; + } + } + + if (!pid_found) { + CAM_ERR(CAM_ISP, "VFE:%u pid:%d not found pid_found:%d mid:%d", + bus_priv->common_data.core_index, get_res->pid, pid_found, get_res->mid); + return -EINVAL; + } + + return 0; +} + +static int __cam_vfe_bus_ver3_process_cmd(void *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + return cam_vfe_bus_ver3_process_cmd(priv, cmd_type, cmd_args, arg_size); +} + +static uint32_t cam_vfe_bus_ver3_get_last_consumed_addr( + struct cam_vfe_bus_ver3_priv *bus_priv, + uint32_t res_type) +{ + uint32_t last_consumed_addr = 0; + struct cam_isp_resource_node *rsrc_node = NULL; + struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL; + struct cam_vfe_bus_ver3_wm_resource_data *wm_rsrc_data = NULL; + enum cam_vfe_bus_ver3_vfe_out_type res_id; + uint32_t outmap_index = + CAM_VFE_BUS_VER3_VFE_OUT_MAX; + + res_id = cam_vfe_bus_ver3_get_out_res_id_and_index(bus_priv, + res_type, &outmap_index); + if ((res_id >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) || + (outmap_index >= bus_priv->num_out)) { + CAM_WARN(CAM_ISP, + "target does not support req res id :0x%x outtype:%d index:%d", + res_type, res_id, outmap_index); + return 0; + } + + rsrc_node = &bus_priv->vfe_out[outmap_index]; + rsrc_data = rsrc_node->res_priv; + wm_rsrc_data = rsrc_data->wm_res[PLANE_Y].res_priv; + last_consumed_addr = cam_io_r_mb( + wm_rsrc_data->common_data->mem_base + + wm_rsrc_data->hw_regs->addr_status_0); + + CAM_DBG(CAM_ISP, "VFE:%u res_type:0x%x res_id:0x%x last_consumed_addr:0x%x", + bus_priv->common_data.core_index, res_type, res_id, last_consumed_addr); + + return last_consumed_addr; +} + +static int cam_vfe_bus_ver3_read_rst_perf_cntrs( + struct cam_vfe_bus_ver3_priv *bus_priv) +{ + int i; + bool print = false; + uint32_t val, status; + size_t len = 0; + uint8_t log_buf[256]; + struct cam_vfe_bus_ver3_common_data *common_data = &bus_priv->common_data; + + if (!common_data->perf_cnt_en) + return 0; + + CAM_DBG(CAM_ISP, "IFE%u Checking perf count status", common_data->core_index); + common_data->cntr++; + for (i = 0; i < CAM_VFE_PERF_CNT_MAX; i++) { + + status = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->perf_cnt_status); + if (!(status & BIT(i))) + continue; + + val = cam_io_r_mb(common_data->mem_base + + common_data->common_reg->perf_cnt_reg[i].perf_cnt_val); + + cam_io_w_mb(common_data->perf_cnt_cfg[i], + common_data->mem_base + + common_data->common_reg->perf_cnt_reg[i].perf_cnt_cfg); + CAM_INFO_BUF(CAM_ISP, log_buf, 256, &len, "cnt%d: 0x%x ", i, val); + print = true; + } + + if (print) + CAM_INFO(CAM_ISP, + "IFE%u Frame: %u Perf counters %s", + common_data->core_index, common_data->cntr, log_buf); + + return 0; +} + +static int cam_vfe_bus_ver3_process_cmd( + struct cam_isp_resource_node *priv, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_vfe_bus_ver3_priv *bus_priv; + uint32_t top_mask_0 = 0; + struct cam_isp_hw_cap *vfe_bus_cap; + struct cam_isp_hw_done_event_data *done; + + if (!priv || !cmd_args) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_BUF_UPDATE: + rc = cam_vfe_bus_ver3_update_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_HFR_UPDATE: + rc = cam_vfe_bus_ver3_update_hfr(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_WM_SECURE_MODE: + rc = cam_vfe_bus_ver3_get_secure_mode(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STRIPE_UPDATE: + rc = cam_vfe_bus_ver3_update_stripe_cfg(priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ: + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + if (bus_priv->error_irq_handle) { + CAM_DBG(CAM_ISP, "VFE:%u Mask off bus error irq handler", + priv->hw_intf->hw_idx); + rc = cam_irq_controller_unsubscribe_irq( + bus_priv->common_data.bus_irq_controller, + bus_priv->error_irq_handle); + bus_priv->error_irq_handle = 0; + } + break; + case CAM_ISP_HW_CMD_DUMP_BUS_INFO: { + struct cam_isp_hw_event_info *event_info; + + event_info = + (struct cam_isp_hw_event_info *)cmd_args; + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + rc = cam_vfe_bus_ver3_print_dimensions( + event_info->res_id, bus_priv); + break; + } + case CAM_ISP_HW_IFE_BUS_MINI_DUMP: { + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + rc = cam_vfe_bus_ver3_mini_dump(bus_priv, cmd_args); + break; + } + case CAM_ISP_HW_USER_DUMP: { + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + + rc = cam_vfe_bus_ver3_user_dump(bus_priv, cmd_args); + break; + } + case CAM_ISP_HW_CMD_UBWC_UPDATE_V2: + rc = cam_vfe_bus_ver3_update_ubwc_config_v2(cmd_args); + break; + case CAM_ISP_HW_CMD_UBWC_UPDATE_V3: + rc = cam_vfe_bus_ver3_update_ubwc_config_v3(cmd_args); + break; + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE: + rc = cam_vfe_bus_ver3_update_wm_config(cmd_args); + break; + case CAM_ISP_HW_CMD_WM_CONFIG_UPDATE_V2: + rc = cam_vfe_bus_ver3_update_wm_config_v2(cmd_args); + break; + + case CAM_ISP_HW_CMD_UNMASK_BUS_WR_IRQ: + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + top_mask_0 = cam_io_r_mb(bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->top_irq_mask_0); + top_mask_0 |= (1 << bus_priv->top_irq_shift); + cam_io_w_mb(top_mask_0, bus_priv->common_data.mem_base + + bus_priv->common_data.common_reg->top_irq_mask_0); + break; + case CAM_ISP_HW_CMD_GET_RES_FOR_MID: + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + rc = cam_vfe_bus_get_res_for_mid(bus_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + vfe_bus_cap = (struct cam_isp_hw_cap *) cmd_args; + vfe_bus_cap->max_out_res_type = bus_priv->max_out_res; + vfe_bus_cap->support_consumed_addr = + bus_priv->common_data.support_consumed_addr; + vfe_bus_cap->skip_regdump_data.skip_regdump = + bus_priv->bus_hw_info->skip_regdump; + vfe_bus_cap->skip_regdump_data.skip_regdump_start_offset = + bus_priv->bus_hw_info->skip_regdump_start_offset; + vfe_bus_cap->skip_regdump_data.skip_regdump_stop_offset = + bus_priv->bus_hw_info->skip_regdump_stop_offset; + vfe_bus_cap->num_wr_perf_counters = + bus_priv->bus_hw_info->common_reg.num_perf_counters; + rc = 0; + break; + case CAM_ISP_HW_CMD_GET_LAST_CONSUMED_ADDR: + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + done = (struct cam_isp_hw_done_event_data *) cmd_args; + done->last_consumed_addr = cam_vfe_bus_ver3_get_last_consumed_addr( + bus_priv, done->resource_handle); + if (done->last_consumed_addr) + rc = 0; + break; + case CAM_ISP_HW_CMD_IFE_DEBUG_CFG: { + int i; + struct cam_vfe_generic_debug_config *debug_cfg; + + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + debug_cfg = (struct cam_vfe_generic_debug_config *)cmd_args; + bus_priv->common_data.disable_mmu_prefetch = + debug_cfg->disable_ife_mmu_prefetch; + + for (i = 0; i < CAM_VFE_PERF_CNT_MAX; i++) { + bus_priv->common_data.perf_cnt_cfg[i] = + debug_cfg->vfe_bus_wr_perf_counter_val[i]; + } + + CAM_DBG(CAM_ISP, "IFE: %u bus WR prefetch %s", + bus_priv->common_data.core_index, + bus_priv->common_data.disable_mmu_prefetch ? + "disabled" : "enabled"); + rc = 0; + } + break; + case CAM_ISP_HW_CMD_BUF_UPDATE: + rc = cam_vfe_bus_ver3_config_wm(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG: + rc = cam_vfe_bus_update_bw_limiter(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_MC_CTXT_SEL: + rc = cam_vfe_bus_ver3_mc_ctxt_sel(priv, cmd_args, arg_size); + break; + + case CAM_ISP_HW_CMD_IRQ_INJECTION: + rc = cam_vfe_bus_ver3_irq_inject(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_IRQ_DESCRIPTION: + rc = cam_vfe_bus_ver3_dump_irq_desc(priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_READ_RST_PERF_CNTRS: { + bus_priv = (struct cam_vfe_bus_ver3_priv *) priv; + rc = cam_vfe_bus_ver3_read_rst_perf_cntrs(bus_priv); + } + break; + default: + CAM_ERR_RATE_LIMIT(CAM_ISP, "VFE:%u Invalid camif process command:%d", + priv->hw_intf->hw_idx, cmd_type); + break; + } + + return rc; +} + +int cam_vfe_bus_ver3_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_ver3_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + struct cam_vfe_bus_ver3_hw_info *ver3_hw_info = bus_hw_info; + struct cam_vfe_soc_private *soc_private = NULL; + + CAM_DBG(CAM_ISP, "Enter"); + + if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) { + CAM_ERR(CAM_ISP, + "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK", + soc_info, hw_intf, bus_hw_info); + CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller); + rc = -EINVAL; + goto end; + } + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + rc = -ENODEV; + goto end; + } + + vfe_bus_local = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus), GFP_KERNEL); + if (!vfe_bus_local) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus"); + rc = -ENOMEM; + goto end; + } + + bus_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_bus_ver3_priv), + GFP_KERNEL); + if (!bus_priv) { + CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv"); + rc = -ENOMEM; + goto free_bus_local; + } + vfe_bus_local->bus_priv = bus_priv; + + bus_priv->num_client = ver3_hw_info->num_client; + bus_priv->num_out = ver3_hw_info->num_out; + bus_priv->num_comp_grp = ver3_hw_info->num_comp_grp; + bus_priv->top_irq_shift = ver3_hw_info->top_irq_shift; + bus_priv->max_out_res = ver3_hw_info->max_out_res; + bus_priv->common_data.num_sec_out = 0; + bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE; + bus_priv->common_data.core_index = soc_info->index; + bus_priv->common_data.mem_base = + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX); + bus_priv->common_data.hw_intf = hw_intf; + bus_priv->common_data.vfe_irq_controller = vfe_irq_controller; + bus_priv->common_data.common_reg = &ver3_hw_info->common_reg; + bus_priv->common_data.hw_init = false; + + bus_priv->common_data.is_lite = soc_private->is_ife_lite; + bus_priv->common_data.support_consumed_addr = + ver3_hw_info->support_consumed_addr; + bus_priv->common_data.support_burst_limit = + ver3_hw_info->support_burst_limit; + bus_priv->common_data.disable_ubwc_comp = false; + bus_priv->common_data.supported_irq = ver3_hw_info->supported_irq; + bus_priv->common_data.comp_config_needed = + ver3_hw_info->comp_cfg_needed; + bus_priv->common_data.init_irq_subscribed = false; + bus_priv->common_data.disable_mmu_prefetch = false; + bus_priv->common_data.pack_align_shift = + ver3_hw_info->pack_align_shift; + bus_priv->common_data.max_bw_counter_limit = + ver3_hw_info->max_bw_counter_limit; + bus_priv->num_cons_err = ver3_hw_info->num_cons_err; + bus_priv->constraint_error_list = ver3_hw_info->constraint_error_list; + bus_priv->common_data.soc_info = soc_info; + bus_priv->bus_hw_info = ver3_hw_info; + + if (bus_priv->num_out >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) { + CAM_ERR(CAM_ISP, "VFE:%u number of vfe out:%d more than max value:%d ", + bus_priv->common_data.core_index, bus_priv->num_out, + CAM_VFE_BUS_VER3_VFE_OUT_MAX); + rc = -EINVAL; + goto free_bus_priv; + } + + bus_priv->comp_grp = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + bus_priv->num_comp_grp), GFP_KERNEL); + if (!bus_priv->comp_grp) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to alloc for bus comp groups", + bus_priv->common_data.core_index); + rc = -ENOMEM; + goto free_bus_priv; + } + + bus_priv->vfe_out = CAM_MEM_ZALLOC((sizeof(struct cam_isp_resource_node) * + bus_priv->num_out), GFP_KERNEL); + if (!bus_priv->vfe_out) { + CAM_ERR(CAM_ISP, "VFE:%u Failed to alloc for bus out res", + bus_priv->common_data.core_index); + rc = -ENOMEM; + goto free_comp_grp; + } + + for (i = 0; i < CAM_VFE_BUS_VER3_SRC_GRP_MAX; i++) + bus_priv->common_data.rup_irq_handle[i] = 0; + + mutex_init(&bus_priv->common_data.bus_mutex); + + rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base, + &ver3_hw_info->common_reg.irq_reg_info, + &bus_priv->common_data.bus_irq_controller); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%u Init bus_irq_controller failed", + bus_priv->common_data.core_index); + goto free_vfe_out; + } + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_vfe_bus_ver3_init_comp_grp(i, soc_info, + bus_priv, bus_hw_info, + &bus_priv->comp_grp[i]); + if (rc < 0) { + CAM_ERR(CAM_ISP, "VFE:%u init comp_grp:%d failed rc:%d", + bus_priv->common_data.core_index, i, rc); + goto deinit_comp_grp; + } + } + + for (i = 0; i < CAM_VFE_BUS_VER3_VFE_OUT_MAX; i++) + bus_priv->vfe_out_map_outtype[i] = + CAM_VFE_BUS_VER3_VFE_OUT_MAX; + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_vfe_bus_ver3_init_vfe_out_resource(i, bus_priv, + bus_hw_info); + if (rc < 0) { + CAM_ERR(CAM_ISP, + "VFE:%u init out_type:0x%X failed rc:%d", + bus_priv->common_data.core_index, i, rc); + goto deinit_vfe_out; + } + } + + spin_lock_init(&bus_priv->common_data.spin_lock); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++) { + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + list_add_tail(&bus_priv->common_data.evt_payload[i].list, + &bus_priv->common_data.free_payload_list); + } + + vfe_bus_local->hw_ops.reserve = cam_vfe_bus_ver3_acquire_vfe_out; + vfe_bus_local->hw_ops.release = cam_vfe_bus_ver3_release_vfe_out; + vfe_bus_local->hw_ops.start = cam_vfe_bus_ver3_start_hw; + vfe_bus_local->hw_ops.stop = cam_vfe_bus_ver3_stop_hw; + vfe_bus_local->hw_ops.init = cam_vfe_bus_ver3_init_hw; + vfe_bus_local->hw_ops.deinit = cam_vfe_bus_ver3_deinit_hw; + vfe_bus_local->top_half_handler = NULL; + vfe_bus_local->bottom_half_handler = NULL; + vfe_bus_local->hw_ops.process_cmd = __cam_vfe_bus_ver3_process_cmd; + + *vfe_bus = vfe_bus_local; + + CAM_DBG(CAM_ISP, "Exit, VFE:%u", bus_priv->common_data.core_index); + return rc; + +deinit_vfe_out: + for (--i; i >= 0; i--) + cam_vfe_bus_ver3_deinit_vfe_out_resource(&bus_priv->vfe_out[i]); + +deinit_comp_grp: + if (i < 0) + i = bus_priv->num_comp_grp; + for (--i; i >= 0; i--) + cam_vfe_bus_ver3_deinit_comp_grp(&bus_priv->comp_grp[i]); + +free_vfe_out: + CAM_MEM_FREE(bus_priv->vfe_out); + +free_comp_grp: + CAM_MEM_FREE(bus_priv->comp_grp); + +free_bus_priv: + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + +end: + return rc; +} + +int cam_vfe_bus_ver3_deinit( + struct cam_vfe_bus **vfe_bus) +{ + int i, rc = 0; + struct cam_vfe_bus_ver3_priv *bus_priv = NULL; + struct cam_vfe_bus *vfe_bus_local; + unsigned long flags; + + if (!vfe_bus || !*vfe_bus) { + CAM_ERR(CAM_ISP, "Invalid input"); + return -EINVAL; + } + vfe_bus_local = *vfe_bus; + + bus_priv = vfe_bus_local->bus_priv; + if (!bus_priv) { + CAM_ERR(CAM_ISP, "bus_priv is NULL"); + rc = -ENODEV; + goto free_bus_local; + } + + spin_lock_irqsave(&bus_priv->common_data.spin_lock, flags); + INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list); + for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++) + INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list); + bus_priv->common_data.hw_init = false; + bus_priv->common_data.init_irq_subscribed = false; + spin_unlock_irqrestore(&bus_priv->common_data.spin_lock, flags); + + for (i = 0; i < bus_priv->num_comp_grp; i++) { + rc = cam_vfe_bus_ver3_deinit_comp_grp(&bus_priv->comp_grp[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "VFE:%u deinit comp_grp:%d failed rc:%d", + bus_priv->common_data.core_index, i, rc); + } + + for (i = 0; i < bus_priv->num_out; i++) { + rc = cam_vfe_bus_ver3_deinit_vfe_out_resource( + &bus_priv->vfe_out[i]); + if (rc < 0) + CAM_ERR(CAM_ISP, + "VFE:%u deinit out_type:0x%X failed rc:%d", + bus_priv->common_data.core_index, i, rc); + } + + + rc = cam_irq_controller_deinit( + &bus_priv->common_data.bus_irq_controller); + if (rc) + CAM_ERR(CAM_ISP, + "VFE:%u Deinit BUS IRQ Controller failed rc=%d", + bus_priv->common_data.core_index, rc); + + CAM_MEM_FREE(bus_priv->comp_grp); + CAM_MEM_FREE(bus_priv->vfe_out); + + mutex_destroy(&bus_priv->common_data.bus_mutex); + CAM_MEM_FREE(vfe_bus_local->bus_priv); + +free_bus_local: + CAM_MEM_FREE(vfe_bus_local); + + *vfe_bus = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h new file mode 100644 index 0000000000..778d571250 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_VFE_BUS_VER3_H_ +#define _CAM_VFE_BUS_VER3_H_ + +#include "cam_irq_controller.h" +#include "cam_vfe_bus.h" +#include "cam_vfe_hw_intf.h" + +#define CAM_VFE_BUS_VER3_MAX_SUB_GRPS 6 +#define CAM_VFE_BUS_VER3_MAX_MID_PER_PORT 4 +#define CAM_VFE_BUS_VER3_CONS_ERR_MAX 32 +#define CAM_VFE_BUS_VER3_MAX_CLIENTS 28 + +enum cam_vfe_bus_wr_wm_mode { + CAM_VFE_WM_LINE_BASED_MODE, + CAM_VFE_WM_FRAME_BASED_MODE, + CAM_VFE_WM_INDEX_BASED_MODE, + CAM_VFE_WM_MODE_MAX, +}; + +enum cam_vfe_bus_ver3_vfe_core_id { + CAM_VFE_BUS_VER3_VFE_CORE_0, + CAM_VFE_BUS_VER3_VFE_CORE_1, + CAM_VFE_BUS_VER3_VFE_CORE_MAX, +}; + +enum cam_vfe_bus_ver3_src_grp { + CAM_VFE_BUS_VER3_SRC_GRP_0, + CAM_VFE_BUS_VER3_SRC_GRP_1, + CAM_VFE_BUS_VER3_SRC_GRP_2, + CAM_VFE_BUS_VER3_SRC_GRP_3, + CAM_VFE_BUS_VER3_SRC_GRP_4, + CAM_VFE_BUS_VER3_SRC_GRP_5, + CAM_VFE_BUS_VER3_SRC_GRP_6, + CAM_VFE_BUS_VER3_SRC_GRP_MAX, +}; + +enum cam_vfe_bus_ver3_comp_grp_type { + CAM_VFE_BUS_VER3_COMP_GRP_0, + CAM_VFE_BUS_VER3_COMP_GRP_1, + CAM_VFE_BUS_VER3_COMP_GRP_2, + CAM_VFE_BUS_VER3_COMP_GRP_3, + CAM_VFE_BUS_VER3_COMP_GRP_4, + CAM_VFE_BUS_VER3_COMP_GRP_5, + CAM_VFE_BUS_VER3_COMP_GRP_6, + CAM_VFE_BUS_VER3_COMP_GRP_7, + CAM_VFE_BUS_VER3_COMP_GRP_8, + CAM_VFE_BUS_VER3_COMP_GRP_9, + CAM_VFE_BUS_VER3_COMP_GRP_10, + CAM_VFE_BUS_VER3_COMP_GRP_11, + CAM_VFE_BUS_VER3_COMP_GRP_12, + CAM_VFE_BUS_VER3_COMP_GRP_13, + CAM_VFE_BUS_VER3_COMP_GRP_14, + CAM_VFE_BUS_VER3_COMP_GRP_15, + CAM_VFE_BUS_VER3_COMP_GRP_16, + CAM_VFE_BUS_VER3_COMP_GRP_MAX, +}; + +enum cam_vfe_bus_ver3_vfe_out_type { + CAM_VFE_BUS_VER3_VFE_OUT_RDI0, + CAM_VFE_BUS_VER3_VFE_OUT_RDI1, + CAM_VFE_BUS_VER3_VFE_OUT_RDI2, + CAM_VFE_BUS_VER3_VFE_OUT_RDI3, + CAM_VFE_BUS_VER3_VFE_OUT_RDI4, + CAM_VFE_BUS_VER3_VFE_OUT_FULL, + CAM_VFE_BUS_VER3_VFE_OUT_DS4, + CAM_VFE_BUS_VER3_VFE_OUT_DS16, + CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP, + CAM_VFE_BUS_VER3_VFE_OUT_FD, + CAM_VFE_BUS_VER3_VFE_OUT_PDAF, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST, + CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP, + CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP, + CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP, + CAM_VFE_BUS_VER3_VFE_OUT_2PD, + CAM_VFE_BUS_VER3_VFE_OUT_LCR, + CAM_VFE_BUS_VER3_VFE_OUT_SPARSE_PD, + CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW, + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_2PD, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE, + CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_BG, + CAM_VFE_BUS_VER3_VFE_OUT_PREPROCESS_RAW, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_CAF, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_BAYER_RS, + CAM_VFE_BUS_VER3_VFE_OUT_PDAF_PARSED, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_ALSC, + CAM_VFE_BUS_VER3_VFE_OUT_DS2, + CAM_VFE_BUS_VER3_VFE_OUT_IR, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_AF_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_TMC_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BHIST, + CAM_VFE_BUS_VER3_VFE_OUT_MAX, +}; + +/* + * struct cam_vfe_bus_ver3_err_irq_desc: + * + * @Brief: Bus error irq description + */ +struct cam_vfe_bus_ver3_err_irq_desc { + uint32_t bitmask; + char *err_name; + char *desc; +}; + +/* + * struct cam_vfe_constraint_error_info: + * + * @Brief: Constraint error info + */ +struct cam_vfe_constraint_error_info { + uint32_t bitmask; + char *error_description; +}; + +struct cam_vfe_bus_perf_cnt_hw_info { + uint32_t perf_cnt_cfg; + uint32_t perf_cnt_val; +}; + +/* + * struct cam_vfe_bus_ver3_reg_offset_common: + * + * @Brief: Common registers across all BUS Clients + */ +struct cam_vfe_bus_ver3_reg_offset_common { + uint32_t hw_version; + uint32_t cgc_ovd; + uint32_t comp_cfg_0; + uint32_t comp_cfg_1; + uint32_t ctxt_sel; + uint32_t if_frameheader_cfg[CAM_VFE_BUS_VER3_MAX_SUB_GRPS]; + uint32_t ubwc_static_ctrl; + uint32_t pwr_iso_cfg; + uint32_t overflow_status_clear; + uint32_t ccif_violation_status; + uint32_t overflow_status; + uint32_t image_size_violation_status; + uint32_t debug_status_top_cfg; + uint32_t debug_status_top; + uint32_t test_bus_ctrl; + uint32_t mc_read_sel_shift; + uint32_t mc_write_sel_shift; + uint32_t mc_ctxt_mask; + uint32_t wm_mode_shift; + uint32_t wm_mode_val[CAM_VFE_WM_MODE_MAX]; + uint32_t wm_en_shift; + uint32_t frmheader_en_shift; + uint32_t virtual_frm_en_shift; + uint32_t top_irq_mask_0; + struct cam_irq_controller_reg_info irq_reg_info; + uint32_t num_perf_counters; + uint32_t perf_cnt_status; + struct cam_vfe_bus_perf_cnt_hw_info perf_cnt_reg[CAM_VFE_PERF_CNT_MAX]; +}; + +/* + * struct cam_vfe_bus_ver3_reg_offset_ubwc_client: + * + * @Brief: UBWC register offsets for BUS Clients + */ +struct cam_vfe_bus_ver3_reg_offset_ubwc_client { + uint32_t meta_addr; + uint32_t meta_cfg; + uint32_t mode_cfg; + uint32_t stats_ctrl; + uint32_t ctrl_2; + uint32_t lossy_thresh0; + uint32_t lossy_thresh1; + uint32_t off_lossy_var; + uint32_t bw_limit; + uint32_t ubwc_comp_en_bit; +}; + +/* + * struct cam_vfe_bus_ver3_reg_offset_bus_client: + * + * @Brief: Register offsets for BUS Clients + */ +struct cam_vfe_bus_ver3_reg_offset_bus_client { + uint32_t cfg; + uint32_t image_addr; + uint32_t frame_incr; + uint32_t image_cfg_0; + uint32_t image_cfg_1; + uint32_t image_cfg_2; + uint32_t packer_cfg; + uint32_t frame_header_addr; + uint32_t frame_header_incr; + uint32_t frame_header_cfg; + uint32_t line_done_cfg; + uint32_t irq_subsample_period; + uint32_t irq_subsample_pattern; + uint32_t framedrop_period; + uint32_t framedrop_pattern; + uint32_t mmu_prefetch_cfg; + uint32_t mmu_prefetch_max_offset; + uint32_t addr_cfg; + uint32_t ctxt_cfg; + uint32_t burst_limit; + uint32_t system_cache_cfg; + void *ubwc_regs; + uint32_t addr_status_0; + uint32_t addr_status_1; + uint32_t addr_status_2; + uint32_t addr_status_3; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; + uint32_t debug_status_ctxt; + uint32_t hw_ctxt_cfg; + uint32_t bw_limiter_addr; + uint32_t comp_group; + uint64_t supported_formats; + uint32_t rcs_en_mask; +}; + +/* + * struct cam_vfe_bus_ver3_vfe_out_hw_info: + * + * @Brief: HW capability of VFE Bus Client + */ +struct cam_vfe_bus_ver3_vfe_out_hw_info { + enum cam_vfe_bus_ver3_vfe_out_type vfe_out_type; + uint32_t max_width; + uint32_t max_height; + uint32_t source_group; + uint32_t *mid; + uint32_t num_mid; + uint32_t num_wm; + uint32_t line_based; + uint32_t wm_idx[PLANE_MAX]; + uint32_t mc_grp_shift; + uint32_t early_done_mask; + uint8_t *name[PLANE_MAX]; + uint64_t pid_mask; + bool mc_based; + bool cntxt_cfg_except; +}; + + +/* + * struct cam_vfe_bus_ver3_hw_info: + * + * @Brief: HW register info for entire Bus + * + * @common_reg: Common register details + * @num_client: Total number of write clients + * @bus_client_reg: Bus client register info + * @vfe_out_hw_info: VFE output capability + * @num_cons_err: Number of constraint errors in list + * @constraint_error_list: Static list of all constraint errors + * @num_comp_grp: Number of composite groups + * @comp_done_mask: Mask shift for comp done mask + * @mc_comp_done_mask: Mask shift for hw multi-context comp done irq + * @top_irq_shift: Mask shift for top level BUS WR irq + * @support_consumed_addr: Indicate if bus support consumed address + * @max_out_res: Max vfe out resource value supported for hw + * @supported_irq: Mask to indicate the IRQ supported + * @comp_cfg_needed: Composite group config is needed for hw + * @pack_align_shift: Shift value for alignment of packer format + * @max_bw_counter_limit: Max BW counter limit + */ +struct cam_vfe_bus_ver3_hw_info { + struct cam_vfe_bus_ver3_reg_offset_common common_reg; + uint32_t num_client; + struct cam_vfe_bus_ver3_reg_offset_bus_client + bus_client_reg[CAM_VFE_BUS_VER3_MAX_CLIENTS]; + uint32_t num_out; + struct cam_vfe_bus_ver3_vfe_out_hw_info + vfe_out_hw_info[CAM_VFE_BUS_VER3_VFE_OUT_MAX]; + uint32_t num_cons_err; + struct cam_vfe_constraint_error_info + constraint_error_list[CAM_VFE_BUS_VER3_CONS_ERR_MAX]; + uint32_t num_bus_errors_0; + uint32_t num_bus_errors_1; + struct cam_vfe_bus_ver3_err_irq_desc *bus_err_desc_0; + struct cam_vfe_bus_ver3_err_irq_desc *bus_err_desc_1; + uint32_t num_comp_grp; + uint32_t comp_done_mask[CAM_VFE_BUS_VER3_COMP_GRP_MAX]; + uint32_t mc_comp_done_mask[CAM_VFE_BUS_VER3_COMP_GRP_MAX]; + uint32_t top_irq_shift; + bool support_consumed_addr; + uint32_t max_out_res; + uint32_t supported_irq; + bool comp_cfg_needed; + uint32_t pack_align_shift; + uint32_t max_bw_counter_limit; + bool support_burst_limit; + bool skip_regdump; + uint32_t skip_regdump_start_offset; + uint32_t skip_regdump_stop_offset; +}; + +/** + * struct cam_vfe_bus_ver3_wm_mini_dump - VFE WM data + * + * @width Width + * @height Height + * @stride stride + * @h_init init height + * @acquired_width acquired width + * @acquired_height acquired height + * @en_cfg Enable flag + * @format format + * @index Index + * @state state + * @name Res name + */ +struct cam_vfe_bus_ver3_wm_mini_dump { + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t h_init; + uint32_t acquired_width; + uint32_t acquired_height; + uint32_t en_cfg; + uint32_t format; + uint32_t index; + uint32_t state; + uint8_t name[CAM_ISP_RES_NAME_LEN]; +}; + +/** + * struct cam_vfe_bus_ver3_mini_dump_data - VFE bus mini dump data + * + * @wm: Write Master client information + * @clk_rate: Clock rate + * @num_client: Num client + * @hw_state: hw statte + * @hw_idx: Hw index + */ +struct cam_vfe_bus_ver3_mini_dump_data { + struct cam_vfe_bus_ver3_wm_mini_dump *wm; + uint64_t clk_rate; + uint32_t num_client; + uint8_t hw_state; + uint8_t hw_idx; +}; + +/* + * cam_vfe_bus_ver3_init() + * + * @Brief: Initialize Bus layer + * + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top + * level IRQs + * @vfe_bus: Pointer to vfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_ver3_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus); + +/* + * cam_vfe_bus_ver3_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @vfe_bus: Pointer to vfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_ver3_deinit(struct cam_vfe_bus **vfe_bus); + +#endif /* _CAM_VFE_BUS_VER3_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h new file mode 100644 index 0000000000..8300e5813d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_BUS_H_ +#define _CAM_VFE_BUS_H_ + +#include + +#include "cam_hw_intf.h" +#include "cam_isp_hw.h" + +#define CAM_VFE_BUS_VER_1_0 0x1000 +#define CAM_VFE_BUS_VER_2_0 0x2000 +#define CAM_VFE_BUS_VER_3_0 0x3000 + +#define CAM_VFE_BUS_RD_VER_1_0 0x1000 + +#define ALIGNUP(value, alignment) \ + ((value + alignment - 1) / alignment * alignment) + +enum cam_vfe_bus_plane_type { + PLANE_Y, + PLANE_C, + PLANE_MAX, +}; + +enum cam_vfe_bus_type { + BUS_TYPE_WR, + BUS_TYPE_RD, + BUS_TYPE_MAX, +}; + +/* + * struct cam_vfe_bus: + * + * @Brief: Bus interface structure + * + * @bus_priv: Private data of BUS + * @hw_ops: Hardware interface functions + * @top_half_handler: Top Half handler function + * @bottom_half_handler: Bottom Half handler function + */ +struct cam_vfe_bus { + void *bus_priv; + + struct cam_hw_ops hw_ops; + CAM_IRQ_HANDLER_TOP_HALF top_half_handler; + CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler; +}; + +/* + * cam_vfe_bus_init() + * + * @Brief: Initialize Bus layer + * + * @bus_version: Version of BUS to initialize + * @bus_type: Bus Type RD/WR + * @soc_info: Soc Information for the associated HW + * @hw_intf: HW Interface of HW to which this resource belongs + * @bus_hw_info: BUS HW info that contains details of BUS registers + * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top + * level IRQs + * @vfe_bus: Pointer to vfe_bus structure which will be filled + * and returned on successful initialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_init(uint32_t bus_version, + int bus_type, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *bus_hw_info, + void *vfe_irq_controller, + struct cam_vfe_bus **vfe_bus); + +/* + * cam_vfe_bus_deinit() + * + * @Brief: Deinitialize Bus layer + * + * @bus_version: Version of BUS to deinitialize + * @vfe_bus: Pointer to vfe_bus structure to deinitialize + * + * @Return: 0: Success + * Non-zero: Failure + */ +int cam_vfe_bus_deinit(uint32_t bus_version, + struct cam_vfe_bus **vfe_bus); + +#endif /* _CAM_VFE_BUS_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c new file mode 100644 index 0000000000..ac16e08ebd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_io_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver2.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_camif_lite_ver2.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_mem_mgr_api.h" + +struct cam_vfe_mux_camif_lite_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_camif_lite_ver2_reg *camif_lite_reg; + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_camif_lite_ver2_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + enum cam_isp_hw_sync_mode sync_mode; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int irq_handle; + void *vfe_irq_controller; + struct cam_vfe_top_irq_evt_payload + evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + struct timespec64 error_ts; +}; + +static int cam_vfe_camif_lite_get_evt_payload( + struct cam_vfe_mux_camif_lite_data *camif_lite_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&camif_lite_priv->spin_lock); + if (list_empty(&camif_lite_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&camif_lite_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&camif_lite_priv->spin_lock); + return rc; +} + +static int cam_vfe_camif_lite_put_evt_payload( + struct cam_vfe_mux_camif_lite_data *camif_lite_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!camif_lite_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&camif_lite_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &camif_lite_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_camif_lite_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + bool error_flag = false; + + CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x", + th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]); + + camif_lite_node = th_payload->handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] & + camif_lite_priv->reg_data->lite_err_irq_mask0)) { + CAM_ERR(CAM_ISP, + "CAMIF LITE ERR VFE:%d IRQ STATUS_0=0x%x STATUS_1=0x%x", + camif_lite_node->hw_intf->hw_idx, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d", + camif_lite_node->hw_intf->hw_idx); + cam_irq_controller_disable_all( + camif_lite_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + camif_lite_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + camif_lite_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->violation_status); + + if (error_flag) + CAM_INFO(CAM_ISP, "Violation status = 0x%x", + evt_payload->irq_reg_val[i]); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + + +static int cam_vfe_camif_lite_get_reg_update( + struct cam_isp_resource_node *camif_lite_res, + void *cmd_args, + uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_camif_lite_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + rsrc_data = camif_lite_res->res_priv; + reg_val_pair[0] = rsrc_data->camif_lite_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->dual_pd_reg_update_cmd_data; + CAM_DBG(CAM_ISP, "CAMIF Lite reg_update_cmd %x offset %x", + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_camif_lite_ver2_acquire_resource( + struct cam_isp_resource_node *camif_lite_res, + void *acquire_param) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_data; + struct cam_vfe_acquire_args *acquire_data; + int rc = 0; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_lite_data = (struct cam_vfe_mux_camif_lite_data *) + camif_lite_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + camif_lite_data->sync_mode = acquire_data->vfe_in.sync_mode; + camif_lite_data->event_cb = acquire_data->event_cb; + camif_lite_data->priv = acquire_data->priv; + + CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d", + camif_lite_res->hw_intf->hw_idx, + camif_lite_data->sync_mode); + return rc; +} + +static int cam_vfe_camif_lite_resource_start( + struct cam_isp_resource_node *camif_lite_res) +{ + struct cam_vfe_mux_camif_lite_data *rsrc_data; + uint32_t val = 0; + int rc = 0; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (camif_lite_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error! Invalid camif lite res res_state:%d", + camif_lite_res->res_state); + return -EINVAL; + } + + rsrc_data = (struct cam_vfe_mux_camif_lite_data *) + camif_lite_res->res_priv; + + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->lite_err_irq_mask0; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->lite_err_irq_mask1; + + /* vfe core config */ + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg); + + if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val |= (1 << rsrc_data->reg_data->extern_reg_update_shift); + + val |= (1 << rsrc_data->reg_data->dual_pd_path_sel_shift); + + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg); + + CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", + camif_lite_res->hw_intf->hw_idx, val); + + /* epoch config with 20 line */ + cam_io_w_mb(rsrc_data->reg_data->lite_epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_lite_reg->lite_epoch_irq); + + camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->dual_pd_reg_update_cmd_data, + rsrc_data->mem_base + + rsrc_data->camif_lite_reg->reg_update_cmd); + CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", + camif_lite_res->hw_intf->hw_idx, + rsrc_data->reg_data->dual_pd_reg_update_cmd_data); + + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + camif_lite_res, + cam_vfe_camif_lite_err_irq_top_half, + camif_lite_res->bottom_half_handler, + camif_lite_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->irq_err_handle = 0; + } + } + + rsrc_data->error_ts.tv_sec = 0; + rsrc_data->error_ts.tv_nsec = 0; + + CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done", + camif_lite_res->hw_intf->hw_idx); + return rc; +} + +static int cam_vfe_camif_lite_resource_stop( + struct cam_isp_resource_node *camif_lite_res) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + int rc = 0; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if ((camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *)camif_lite_res; + + if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + if (camif_lite_priv->irq_handle) { + cam_irq_controller_unsubscribe_irq( + camif_lite_priv->vfe_irq_controller, + camif_lite_priv->irq_handle); + camif_lite_priv->irq_handle = 0; + } + + if (camif_lite_priv->irq_err_handle) { + cam_irq_controller_unsubscribe_irq( + camif_lite_priv->vfe_irq_controller, + camif_lite_priv->irq_err_handle); + camif_lite_priv->irq_err_handle = 0; + } + + return rc; +} + +static int cam_vfe_camif_lite_process_cmd( + struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, + "unsupported process command:%d", cmd_type); + break; + } + + return rc; +} + +static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + camif_lite_node = th_payload->handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + + CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]); + CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]); + + rc = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_camif_lite_handle_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_MAX; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + uint32_t irq_status0; + uint32_t irq_status1; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + struct cam_isp_hw_error_event_info err_evt_info; + struct timespec64 ts; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return ret; + } + + camif_lite_node = handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + payload = evt_payload_priv; + irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1]; + soc_info = camif_lite_priv->soc_info; + soc_private = + (struct cam_vfe_soc_private *)soc_info->soc_private; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = camif_lite_node->hw_intf->hw_idx; + evt_info.res_id = camif_lite_node->res_id; + evt_info.res_type = camif_lite_node->res_type; + + CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x", + irq_status0, irq_status1); + + if (irq_status0 & camif_lite_priv->reg_data->lite_sof_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF Lite Received SOF", + evt_info.hw_idx); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_lite_priv->reg_data->lite_epoch0_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF Lite Received EPOCH", + evt_info.hw_idx); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF Lite Received REG_UPDATE_ACK", + evt_info.hw_idx); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_lite_priv->reg_data->lite_eof_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF Lite Received EOF", + evt_info.hw_idx); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if ((irq_status0 & camif_lite_priv->reg_data->lite_err_irq_mask0) || + (irq_status1 & camif_lite_priv->reg_data->lite_err_irq_mask1)) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE Received ERROR", + evt_info.hw_idx); + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + CAM_INFO(CAM_ISP, + "ERROR timestamp:[%lld.%09lld]", + camif_lite_priv->error_ts.tv_sec, + camif_lite_priv->error_ts.tv_nsec); + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_OVERFLOW; + + cam_cpas_log_votes(false); + + } + + cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload); + + CAM_DBG(CAM_ISP, "returning status = %d", ret); + return ret; +} + +int cam_vfe_camif_lite_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_lite_hw_info, + struct cam_isp_resource_node *camif_lite_node, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv = NULL; + struct cam_vfe_camif_lite_ver2_hw_info *camif_lite_info = + camif_lite_hw_info; + int i = 0; + + camif_lite_priv = CAM_MEM_ZALLOC(sizeof(*camif_lite_priv), + GFP_KERNEL); + if (!camif_lite_priv) + return -ENOMEM; + + camif_lite_node->res_priv = camif_lite_priv; + + camif_lite_priv->mem_base = + soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + camif_lite_priv->camif_lite_reg = camif_lite_info->camif_lite_reg; + camif_lite_priv->common_reg = camif_lite_info->common_reg; + camif_lite_priv->reg_data = camif_lite_info->reg_data; + camif_lite_priv->hw_intf = hw_intf; + camif_lite_priv->soc_info = soc_info; + camif_lite_priv->vfe_irq_controller = vfe_irq_controller; + + camif_lite_node->init = NULL; + camif_lite_node->deinit = NULL; + camif_lite_node->start = cam_vfe_camif_lite_resource_start; + camif_lite_node->stop = cam_vfe_camif_lite_resource_stop; + camif_lite_node->process_cmd = cam_vfe_camif_lite_process_cmd; + camif_lite_node->top_half_handler = + cam_vfe_camif_lite_handle_irq_top_half; + camif_lite_node->bottom_half_handler = + cam_vfe_camif_lite_handle_irq_bottom_half; + + spin_lock_init(&camif_lite_priv->spin_lock); + INIT_LIST_HEAD(&camif_lite_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) { + INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list); + list_add_tail(&camif_lite_priv->evt_payload[i].list, + &camif_lite_priv->free_payload_list); + } + + return 0; +} + +int cam_vfe_camif_lite_ver2_deinit( + struct cam_isp_resource_node *camif_lite_node) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv = + camif_lite_node->res_priv; + int i = 0; + + if (!camif_lite_priv) { + CAM_WARN(CAM_ISP, "Error! camif_priv is NULL"); + return -ENODEV; + } + + INIT_LIST_HEAD(&camif_lite_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) + INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list); + + camif_lite_node->start = NULL; + camif_lite_node->stop = NULL; + camif_lite_node->process_cmd = NULL; + camif_lite_node->top_half_handler = NULL; + camif_lite_node->bottom_half_handler = NULL; + camif_lite_node->res_priv = NULL; + + CAM_MEM_FREE(camif_lite_priv); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h new file mode 100644 index 0000000000..7813e55f50 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_CAMIF_LITE_VER2_H_ +#define _CAM_VFE_CAMIF_LITE_VER2_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +#define CAM_VFE_CAMIF_LITE_EVT_MAX 256 + +struct cam_vfe_camif_lite_ver2_reg { + uint32_t camif_lite_cmd; + uint32_t camif_lite_config; + uint32_t lite_skip_period; + uint32_t lite_irq_subsample_pattern; + uint32_t lite_epoch_irq; + uint32_t reg_update_cmd; +}; + +struct cam_vfe_camif_lite_ver2_reg_data { + uint32_t dual_pd_reg_update_cmd_data; + uint32_t lite_epoch_line_cfg; + uint32_t lite_sof_irq_mask; + uint32_t lite_epoch0_irq_mask; + uint32_t dual_pd_reg_upd_irq_mask; + uint32_t lite_eof_irq_mask; + uint32_t lite_err_irq_mask0; + uint32_t lite_err_irq_mask1; + uint32_t lite_subscribe_irq_mask0; + uint32_t lite_subscribe_irq_mask1; + uint32_t extern_reg_update_shift; + uint32_t dual_pd_path_sel_shift; +}; + +struct cam_vfe_camif_lite_ver2_hw_info { + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_camif_lite_ver2_reg *camif_lite_reg; + struct cam_vfe_camif_lite_ver2_reg_data *reg_data; +}; + +int cam_vfe_camif_lite_ver2_acquire_resource( + struct cam_isp_resource_node *camif_lite_res, + void *acquire_param); + +int cam_vfe_camif_lite_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_lite_hw_info, + struct cam_isp_resource_node *camif_lite_node, + void *vfe_irq_controller); + +int cam_vfe_camif_lite_ver2_deinit( + struct cam_isp_resource_node *camif_node); + +#endif /* _CAM_VFE_CAMIF_LITE_VER2_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c new file mode 100644 index 0000000000..cc46df24f9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c @@ -0,0 +1,1333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_io_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver3.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_camif_lite_ver3.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" + +struct cam_vfe_mux_camif_lite_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_camif_lite_ver3_reg *camif_lite_reg; + struct cam_vfe_top_ver3_reg_offset_common *common_reg; + struct cam_vfe_camif_lite_ver3_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + enum cam_isp_hw_sync_mode sync_mode; + struct cam_vfe_camif_common_cfg cam_common_cfg; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int irq_handle; + int sof_irq_handle; + void *vfe_irq_controller; + struct list_head free_payload_list; + spinlock_t spin_lock; + uint32_t camif_debug; + struct cam_vfe_top_irq_evt_payload + evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX]; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct timespec64 error_ts; +}; + +static int cam_vfe_camif_lite_get_evt_payload( + struct cam_vfe_mux_camif_lite_data *camif_lite_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&camif_lite_priv->spin_lock); + if (list_empty(&camif_lite_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free CAMIF LITE event payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&camif_lite_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&camif_lite_priv->spin_lock); + return rc; +} + +static int cam_vfe_camif_lite_put_evt_payload( + struct cam_vfe_mux_camif_lite_data *camif_lite_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!camif_lite_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&camif_lite_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &camif_lite_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_camif_lite_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + struct cam_vfe_soc_private *soc_private = NULL; + bool error_flag = false; + + camif_lite_node = th_payload->handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + + soc_private = camif_lite_priv->soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] & + camif_lite_priv->reg_data->error_irq_mask0)) { + CAM_ERR(CAM_ISP, + "VFE:%d CAMIF LITE:%d Err IRQ status_1: 0x%X status_2: 0x%X", + camif_lite_node->hw_intf->hw_idx, + camif_lite_node->res_id, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[2]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d", + camif_lite_node->hw_intf->hw_idx); + cam_irq_controller_disable_all( + camif_lite_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload); + if (rc) + return rc; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + camif_lite_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + camif_lite_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->violation_status); + + evt_payload->irq_reg_val[++i] = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->bus_overflow_status); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + + +static int cam_vfe_camif_lite_get_reg_update( + struct cam_isp_resource_node *camif_lite_res, + void *cmd_args, + uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_camif_lite_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, + "Invalid args: cdm args %pK", cdm_args); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CAMIF LITE:%d %s get RUP", camif_lite_res->res_id, + camif_lite_res->res_name); + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + rsrc_data = camif_lite_res->res_priv; + reg_val_pair[0] = rsrc_data->camif_lite_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data; + CAM_DBG(CAM_ISP, "CAMIF LITE:%d %s reg_update_cmd 0x%X offset 0x%X", + camif_lite_res->res_id, camif_lite_res->res_name, + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_camif_lite_ver3_acquire_resource( + struct cam_isp_resource_node *camif_lite_res, + void *acquire_param) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_data; + struct cam_vfe_acquire_args *acquire_data; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_lite_data = (struct cam_vfe_mux_camif_lite_data *) + camif_lite_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + camif_lite_data->sync_mode = acquire_data->vfe_in.sync_mode; + camif_lite_data->event_cb = acquire_data->event_cb; + camif_lite_data->priv = acquire_data->priv; + camif_lite_res->is_rdi_primary_res = false; + CAM_DBG(CAM_ISP, "Acquired VFE:%d CAMIF LITE:%d %s sync_mode=%d", + camif_lite_res->hw_intf->hw_idx, + camif_lite_res->res_id, + camif_lite_res->res_name, + camif_lite_data->sync_mode); + return 0; +} + +static int cam_vfe_camif_lite_resource_start( + struct cam_isp_resource_node *camif_lite_res) +{ + struct cam_vfe_mux_camif_lite_data *rsrc_data; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t val = 0; + int rc = 0; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + if (camif_lite_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Invalid camif lite res res_state:%d", + camif_lite_res->res_state); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "CAMIF LITE:%d %s Start", camif_lite_res->res_id, + camif_lite_res->res_name); + + rsrc_data = (struct cam_vfe_mux_camif_lite_data *) + camif_lite_res->res_priv; + + soc_private = rsrc_data->soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + if (soc_private->is_ife_lite) + goto skip_core_cfg; + + /* vfe core config */ + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0); + + if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_LCR && + rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val |= (1 << rsrc_data->reg_data->extern_reg_update_shift); + + if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) { + val |= (1 << rsrc_data->reg_data->operating_mode_shift); + val |= (rsrc_data->cam_common_cfg.input_mux_sel_pdaf & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF; + } + + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0); + + CAM_DBG(CAM_ISP, "VFE:%d core_cfg val:%d", + camif_lite_res->hw_intf->hw_idx, val); + + /* epoch config */ + cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_lite_reg->lite_epoch_irq); + + rsrc_data->error_ts.tv_sec = 0; + rsrc_data->error_ts.tv_nsec = 0; + rsrc_data->sof_ts.tv_sec = 0; + rsrc_data->sof_ts.tv_nsec = 0; + rsrc_data->epoch_ts.tv_sec = 0; + rsrc_data->epoch_ts.tv_nsec = 0; + rsrc_data->eof_ts.tv_sec = 0; + rsrc_data->eof_ts.tv_nsec = 0; + +skip_core_cfg: + + camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + + rsrc_data->camif_lite_reg->reg_update_cmd); + + memset(err_irq_mask, 0, sizeof(err_irq_mask)); + memset(irq_mask, 0, sizeof(irq_mask)); + + val = cam_io_r(rsrc_data->mem_base + rsrc_data->common_reg->top_debug_cfg); + val |= rsrc_data->reg_data->top_debug_cfg_en; + cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->top_debug_cfg); + + if (!camif_lite_res->is_rdi_primary_res) + goto subscribe_err; + + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->epoch0_irq_mask | + rsrc_data->reg_data->eof_irq_mask; + + if (!rsrc_data->irq_handle) { + rsrc_data->irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_3, + irq_mask, + camif_lite_res, + camif_lite_res->top_half_handler, + camif_lite_res->bottom_half_handler, + camif_lite_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_handle < 1) { + CAM_ERR(CAM_ISP, "IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->irq_handle = 0; + } + } + + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->sof_irq_mask; + + if (!rsrc_data->sof_irq_handle) { + rsrc_data->sof_irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + irq_mask, + camif_lite_res, + camif_lite_res->top_half_handler, + camif_lite_res->bottom_half_handler, + camif_lite_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->sof_irq_handle < 1) { + CAM_ERR(CAM_ISP, "IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->sof_irq_handle = 0; + } + } + +subscribe_err: + + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->error_irq_mask0; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] = + rsrc_data->reg_data->error_irq_mask2; + + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + camif_lite_res, + cam_vfe_camif_lite_err_irq_top_half, + camif_lite_res->bottom_half_handler, + camif_lite_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->irq_err_handle = 0; + } + } + + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Start Done", + camif_lite_res->hw_intf->hw_idx, + camif_lite_res->res_id, camif_lite_res->res_name); + return rc; +} + +static int cam_vfe_camif_lite_reg_dump( + struct cam_isp_resource_node *camif_lite_res) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t offset, val, wm_idx; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if ((camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + camif_lite_priv = + (struct cam_vfe_mux_camif_lite_data *)camif_lite_res->res_priv; + + soc_private = camif_lite_priv->soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + CAM_INFO(CAM_ISP, "IFE:%d TOP", camif_lite_priv->hw_intf->hw_idx); + if (!soc_private->is_ife_lite) { + for (offset = 0x0; offset <= 0x1FC; offset += 0x4) { + if (offset == 0x1C || offset == 0x34 || + offset == 0x38 || offset == 0x90) + continue; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + } + } else { + for (offset = 0x0; offset <= 0x74; offset += 0x4) { + if (offset == 0xC || offset == 0x20 || offset == 0x24) + continue; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + } + } + + if (camif_lite_res->res_id != CAM_ISP_HW_VFE_IN_RDI0) + goto dump_rdi_1; + + CAM_INFO(CAM_ISP, "IFE:%d RDI0 CAMIF", + camif_lite_priv->hw_intf->hw_idx); + if (!soc_private->is_ife_lite) { + for (offset = 0x9A00; offset <= 0x9BFC; offset += 0x4) { + if (offset == 0x9A08) + offset = 0x9A60; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x9A60) + offset = 0x9A64; + else if (offset == 0x9A70) + offset = 0x9AEC; + } + } else { + for (offset = 0x1200; offset <= 0x13FC; offset += 0x4) { + if (offset == 0x1208) + offset = 0x1260; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x1260) + offset = 0x1264; + else if (offset == 0x1270) + offset = 0x12EC; + } + } + + goto wr_dump; + +dump_rdi_1: + if (camif_lite_res->res_id != CAM_ISP_HW_VFE_IN_RDI1) + goto dump_rdi_2; + + CAM_INFO(CAM_ISP, "IFE:%d RDI1 CAMIF", + camif_lite_priv->hw_intf->hw_idx); + if (!soc_private->is_ife_lite) { + for (offset = 0x9C00; offset <= 0x9DFC; offset += 0x4) { + if (offset == 0x9C08) + offset = 0x9C60; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x9C60) + offset = 0x9C64; + else if (offset == 0x9C70) + offset = 0x9DEC; + } + } else { + for (offset = 0x1400; offset <= 0x15FC; offset += 0x4) { + if (offset == 0x1408) + offset = 0x1460; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x1460) + offset = 0x1464; + else if (offset == 0x1470) + offset = 0x15EC; + } + } + + goto wr_dump; + +dump_rdi_2: + if (camif_lite_res->res_id != CAM_ISP_HW_VFE_IN_RDI2) + goto dump_rdi_3; + + CAM_INFO(CAM_ISP, "IFE:%d RDI2 CAMIF", + camif_lite_priv->hw_intf->hw_idx); + if (!soc_private->is_ife_lite) { + for (offset = 0x9E00; offset <= 0x9FFC; offset += 0x4) { + if (offset == 0x9E08) + offset = 0x9E60; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x9E60) + offset = 0x9E64; + else if (offset == 0x9E70) + offset = 0x9FEC; + } + } else { + for (offset = 0x1600; offset <= 0x17FC; offset += 0x4) { + if (offset == 0x1608) + offset = 0x1660; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x1660) + offset = 0x1664; + else if (offset == 0x1670) + offset = 0x17EC; + } + } + + goto wr_dump; + +dump_rdi_3: + if (camif_lite_res->res_id != CAM_ISP_HW_VFE_IN_RDI3) + goto dump_pdlib; + + CAM_INFO(CAM_ISP, "IFE:%d RDI3 CAMIF", + camif_lite_priv->hw_intf->hw_idx); + if (soc_private->is_ife_lite) { + for (offset = 0x1800; offset <= 0x19FC; offset += 0x4) { + if (offset == 0x1808) + offset = 0x1860; + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0x1860) + offset = 0x1864; + else if (offset == 0x1870) + offset = 0x19EC; + } + } + + goto wr_dump; + +dump_pdlib: + if (camif_lite_res->res_id != CAM_ISP_HW_VFE_IN_PDLIB) + goto dump_lcr; + + CAM_INFO(CAM_ISP, "IFE:%d PDLIB CAMIF", + camif_lite_priv->hw_intf->hw_idx); + for (offset = 0xA400; offset <= 0xA5FC; offset += 0x4) { + if (offset == 0xA408) + offset = 0xA460; + val = cam_soc_util_r(camif_lite_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0xA460) + offset = 0xA464; + else if (offset == 0xA470) + offset = 0xA5EC; + } + + CAM_INFO(CAM_ISP, "IFE:%d CLC PDLIB", + camif_lite_priv->hw_intf->hw_idx); + for (offset = 0xA600; offset <= 0xA718; offset += 0x4) { + val = cam_soc_util_r(camif_lite_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + } + + goto wr_dump; + +dump_lcr: + CAM_INFO(CAM_ISP, "IFE:%d LCR CAMIF", camif_lite_priv->hw_intf->hw_idx); + for (offset = 0xA000; offset <= 0xA1FC; offset += 0x4) { + if (offset == 0xA008) + offset = 0xA060; + val = cam_soc_util_r(camif_lite_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0xA060) + offset = 0xA064; + else if (offset == 0xA070) + offset = 0xA1EC; + } + + CAM_INFO(CAM_ISP, "IFE:%d CLC LCR", camif_lite_priv->hw_intf->hw_idx); + for (offset = 0xA200; offset <= 0xA3FC; offset += 0x4) { + if (offset == 0xA208) + offset = 0xA260; + else if (offset == 0xA288) + offset = 0xA3F8; + val = cam_soc_util_r(camif_lite_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + if (offset == 0xA260) + offset = 0xA264; + else if (offset == 0xA280) + offset = 0xA1EC; + } + +wr_dump: + if (!soc_private->is_ife_lite) + goto end_dump; + + CAM_INFO(CAM_ISP, "IFE:%d LITE BUS WR", + camif_lite_priv->hw_intf->hw_idx); + for (offset = 0x1A00; offset <= 0x1AE0; offset += 0x4) { + val = cam_soc_util_r(camif_lite_priv->soc_info, 0, offset); + CAM_DBG(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + } + + for (wm_idx = 0; wm_idx <= 3; wm_idx++) { + for (offset = 0x1C00 + 0x100 * wm_idx; offset < (0x1C00 + + 0x100 * wm_idx + 0x84); offset += 0x4) { + val = cam_soc_util_r(camif_lite_priv->soc_info, + 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + } + } + +end_dump: + return 0; +} + +static int cam_vfe_camif_lite_resource_stop( + struct cam_isp_resource_node *camif_lite_res) +{ + struct cam_vfe_mux_camif_lite_data *rsrc_data; + int rc = 0; + + if (!camif_lite_res) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Stop", + camif_lite_res->hw_intf->hw_idx, + camif_lite_res->res_id, + camif_lite_res->res_name); + + if ((camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + rsrc_data = + (struct cam_vfe_mux_camif_lite_data *)camif_lite_res->res_priv; + + /* Disable Camif */ + cam_io_w_mb(0x0, rsrc_data->mem_base + + rsrc_data->camif_lite_reg->lite_module_config); + + if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + if (rsrc_data->irq_handle > 0) { + cam_irq_controller_unsubscribe_irq( + rsrc_data->vfe_irq_controller, + rsrc_data->irq_handle); + rsrc_data->irq_handle = 0; + } + + if (rsrc_data->sof_irq_handle > 0) { + cam_irq_controller_unsubscribe_irq( + rsrc_data->vfe_irq_controller, + rsrc_data->sof_irq_handle); + rsrc_data->sof_irq_handle = 0; + } + + if (rsrc_data->irq_err_handle > 0) { + cam_irq_controller_unsubscribe_irq( + rsrc_data->vfe_irq_controller, + rsrc_data->irq_err_handle); + rsrc_data->irq_err_handle = 0; + } + + return rc; +} + +static int cam_vfe_camif_lite_ver3_core_config( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_core_config_args *vfe_core_cfg = + (struct cam_vfe_core_config_args *)cmd_args; + + camif_lite_priv = + (struct cam_vfe_mux_camif_lite_data *)rsrc_node->res_priv; + camif_lite_priv->cam_common_cfg.vid_ds16_r2pd = + vfe_core_cfg->core_config.vid_ds16_r2pd; + camif_lite_priv->cam_common_cfg.vid_ds4_r2pd = + vfe_core_cfg->core_config.vid_ds4_r2pd; + camif_lite_priv->cam_common_cfg.disp_ds16_r2pd = + vfe_core_cfg->core_config.disp_ds16_r2pd; + camif_lite_priv->cam_common_cfg.disp_ds4_r2pd = + vfe_core_cfg->core_config.disp_ds4_r2pd; + camif_lite_priv->cam_common_cfg.dsp_streaming_tap_point = + vfe_core_cfg->core_config.dsp_streaming_tap_point; + camif_lite_priv->cam_common_cfg.ihist_src_sel = + vfe_core_cfg->core_config.ihist_src_sel; + camif_lite_priv->cam_common_cfg.hdr_be_src_sel = + vfe_core_cfg->core_config.hdr_be_src_sel; + camif_lite_priv->cam_common_cfg.hdr_bhist_src_sel = + vfe_core_cfg->core_config.hdr_bhist_src_sel; + camif_lite_priv->cam_common_cfg.input_mux_sel_pdaf = + vfe_core_cfg->core_config.input_mux_sel_pdaf; + camif_lite_priv->cam_common_cfg.input_mux_sel_pp = + vfe_core_cfg->core_config.input_mux_sel_pp; + + return 0; +} + +static int cam_vfe_camif_lite_process_cmd( + struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv = NULL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CORE_CONFIG: + rc = cam_vfe_camif_lite_ver3_core_config(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *) + rsrc_node->res_priv; + camif_lite_priv->camif_debug = *((uint32_t *)cmd_args); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, + "unsupported process command:%d", cmd_type); + break; + } + + return rc; +} + +static void cam_vfe_camif_lite_overflow_debug_info( + struct cam_vfe_mux_camif_lite_data *camif_lite_priv) +{ + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t val0, val1, val2, val3; + + soc_private = camif_lite_priv->soc_info->soc_private; + + val0 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_0); + val1 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_1); + val2 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_2); + val3 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_3); + CAM_INFO(CAM_ISP, + "status_0: 0x%X status_1: 0x%X status_2: 0x%X status_3: 0x%X", + val0, val1, val2, val3); + + if (soc_private->is_ife_lite) + return; + + val0 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_4); + val1 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_5); + val2 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_6); + val3 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_7); + CAM_INFO(CAM_ISP, + "status_4: 0x%X status_5: 0x%X status_6: 0x%X status_7: 0x%X", + val0, val1, val2, val3); + val0 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_8); + val1 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_9); + val2 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_10); + val3 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_11); + CAM_INFO(CAM_ISP, + "status_8: 0x%X status_9: 0x%X status_10: 0x%X status_11: 0x%X", + val0, val1, val2, val3); + val0 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_12); + val1 = cam_io_r(camif_lite_priv->mem_base + + camif_lite_priv->common_reg->top_debug_13); + CAM_INFO(CAM_ISP, "status_12: 0x%X status_13: 0x%X", + val0, val1); +} + +static void cam_vfe_camif_lite_print_status(uint32_t *status, + int err_type, struct cam_vfe_mux_camif_lite_data *camif_lite_priv) +{ + uint32_t violation_mask = 0x3F00, violation_status = 0; + uint32_t bus_overflow_status = 0, status_0 = 0, status_2 = 0; + struct cam_vfe_soc_private *soc_private = NULL; + + if (!status) { + CAM_ERR(CAM_ISP, "Invalid params"); + return; + } + + bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS]; + violation_status = status[CAM_IFE_IRQ_VIOLATION_STATUS]; + status_0 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + status_2 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]; + soc_private = camif_lite_priv->soc_info->soc_private; + + if (soc_private->is_ife_lite) + goto ife_lite; + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) { + if (status_0 & 0x200000) + CAM_INFO(CAM_ISP, "RDI2 FRAME DROP"); + + if (status_0 & 0x400000) + CAM_INFO(CAM_ISP, "RDI1 FRAME DROP"); + + if (status_0 & 0x800000) + CAM_INFO(CAM_ISP, "RDI0 FRAME DROP"); + + if (status_0 & 0x1000000) + CAM_INFO(CAM_ISP, "PD PIPE FRAME DROP"); + + if (status_0 & 0x8000000) + CAM_INFO(CAM_ISP, "RDI2 OVERFLOW"); + + if (status_0 & 0x10000000) + CAM_INFO(CAM_ISP, "RDI1 OVERFLOW"); + + if (status_0 & 0x20000000) + CAM_INFO(CAM_ISP, "RDI0 OVERFLOW"); + + if (status_0 & 0x40000000) { + CAM_INFO(CAM_ISP, "PD PIPE OVERFLOW"); + cam_cpas_log_votes(false); + } + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && bus_overflow_status) { + if (bus_overflow_status & 0x0800) + CAM_INFO(CAM_ISP, "CAMIF PD BUS OVERFLOW"); + + if (bus_overflow_status & 0x0400000) + CAM_INFO(CAM_ISP, "LCR BUS OVERFLOW"); + + if (bus_overflow_status & 0x0800000) + CAM_INFO(CAM_ISP, "RDI0 BUS OVERFLOW"); + + if (bus_overflow_status & 0x01000000) + CAM_INFO(CAM_ISP, "RDI1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x02000000) + CAM_INFO(CAM_ISP, "RDI2 BUS OVERFLOW"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) { + CAM_INFO(CAM_ISP, "PDLIB / LCR Module hang"); + /* print debug registers */ + cam_vfe_camif_lite_overflow_debug_info(camif_lite_priv); + } + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) { + if (status_2 & 0x02000) + CAM_INFO(CAM_ISP, "PD CAMIF VIOLATION"); + + if (status_2 & 0x04000) + CAM_INFO(CAM_ISP, "PD VIOLATION"); + + if (status_2 & 0x08000) + CAM_INFO(CAM_ISP, "LCR CAMIF VIOLATION"); + + if (status_2 & 0x010000) + CAM_INFO(CAM_ISP, "LCR VIOLATION"); + + if (status_2 & 0x020000) + CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION"); + + if (status_2 & 0x040000) + CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION"); + + if (status_2 & 0x080000) + CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION && violation_status) { + if (violation_mask & violation_status) + CAM_INFO(CAM_ISP, "LCR VIOLATION Module ID:%d", + violation_mask & violation_status); + + violation_mask = 0x0F0000; + if (violation_mask & violation_status) + CAM_INFO(CAM_ISP, "PD VIOLATION Module ID:%d", + violation_mask & violation_status); + + } + + goto print_state; + +ife_lite: + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) { + if (status_0 & 0x100) + CAM_INFO(CAM_ISP, "RDI3 FRAME DROP"); + + if (status_0 & 0x80) + CAM_INFO(CAM_ISP, "RDI2 FRAME DROP"); + + if (status_0 & 0x40) + CAM_INFO(CAM_ISP, "RDI1 FRAME DROP"); + + if (status_0 & 0x20) + CAM_INFO(CAM_ISP, "RDI0 FRAME DROP"); + + if (status_0 & 0x8) + CAM_INFO(CAM_ISP, "RDI3 OVERFLOW"); + + if (status_0 & 0x4) + CAM_INFO(CAM_ISP, "RDI2 OVERFLOW"); + + if (status_0 & 0x2) + CAM_INFO(CAM_ISP, "RDI1 OVERFLOW"); + + if (status_0 & 0x1) + CAM_INFO(CAM_ISP, "RDI0 OVERFLOW"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && bus_overflow_status) { + if (bus_overflow_status & 0x01) + CAM_INFO(CAM_ISP, "RDI0 BUS OVERFLOW"); + + if (bus_overflow_status & 0x02) + CAM_INFO(CAM_ISP, "RDI1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x04) + CAM_INFO(CAM_ISP, "RDI2 BUS OVERFLOW"); + + if (bus_overflow_status & 0x08) + CAM_INFO(CAM_ISP, "RDI3 BUS OVERFLOW"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) { + CAM_INFO(CAM_ISP, "RDI hang"); + /* print debug registers */ + cam_vfe_camif_lite_overflow_debug_info(camif_lite_priv); + } + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) { + if (status_2 & 0x100) + CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION"); + + if (status_2 & 0x200) + CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION"); + + if (status_2 & 0x400) + CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION"); + + if (status_2 & 0x800) + CAM_INFO(CAM_ISP, "RDI3 CAMIF VIOLATION"); + } + +print_state: + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", soc_private->ife_clk_src); + + if ((err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) && + bus_overflow_status) + cam_cpas_log_votes(false); + +} + +static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + camif_lite_node = th_payload->handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + + CAM_DBG(CAM_ISP, + "VFE:%d CAMIF LITE:%d %s IRQ status_0: 0x%X status_1: 0x%X status_2: 0x%X", + camif_lite_node->hw_intf->hw_idx, + camif_lite_node->res_id, + camif_lite_node->res_name, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1], + th_payload->evt_status_arr[2]); + + rc = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "VFE:%d CAMIF LITE:%d IRQ status_0: 0x%X status_1: 0x%X status_2: 0x%X", + camif_lite_node->hw_intf->hw_idx, + camif_lite_node->res_id, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1], + th_payload->evt_status_arr[2]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_camif_lite_handle_irq_bottom_half( + void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_MAX; + struct cam_isp_resource_node *camif_lite_node; + struct cam_vfe_mux_camif_lite_data *camif_lite_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX] = {0}; + int i = 0; + uint32_t status_0 = 0; + struct timespec64 ts; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return ret; + } + + camif_lite_node = handler_priv; + camif_lite_priv = camif_lite_node->res_priv; + payload = evt_payload_priv; + + soc_private = camif_lite_priv->soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) + irq_status[i] = payload->irq_reg_val[i]; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = camif_lite_node->hw_intf->hw_idx; + evt_info.res_id = camif_lite_node->res_id; + evt_info.res_type = camif_lite_node->res_type; + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + + CAM_DBG(CAM_ISP, + "VFE:%d CAMIF LITE:%d %s IRQ status_0: 0x%X status_1: 0x%X status_2: 0x%X", + evt_info.hw_idx, evt_info.res_id, camif_lite_node->res_name, + irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0], + irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1], + irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]); + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_lite_priv->reg_data->sof_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Received SOF", + evt_info.hw_idx, evt_info.res_id, + camif_lite_node->res_name); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + camif_lite_priv->sof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_lite_priv->sof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_lite_priv->reg_data->epoch0_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Received EPOCH", + evt_info.hw_idx, evt_info.res_id, + camif_lite_node->res_name); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + camif_lite_priv->epoch_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_lite_priv->epoch_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_lite_priv->reg_data->eof_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Received EOF", + evt_info.hw_idx, evt_info.res_id, + camif_lite_node->res_name); + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + camif_lite_priv->eof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_lite_priv->eof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + } + + status_0 = irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0] + & camif_lite_priv->reg_data->error_irq_mask0; + if (status_0) { + CAM_ERR(CAM_ISP, "VFE:%d Overflow", + camif_lite_node->hw_intf->hw_idx); + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + CAM_INFO(CAM_ISP, + "ERROR timestamp:[%lld.%09lld]", + camif_lite_priv->error_ts.tv_sec, + camif_lite_priv->error_ts.tv_nsec); + + if (camif_lite_node->is_rdi_primary_res) + CAM_INFO(CAM_ISP, + "SOF timestamp:[%lld.%09lld] EPOCH timestamp:[%lld.%09lld] EOF timestamp:[%lld.%09lld]", + camif_lite_priv->sof_ts.tv_sec, + camif_lite_priv->sof_ts.tv_nsec, + camif_lite_priv->epoch_ts.tv_sec, + camif_lite_priv->epoch_ts.tv_nsec, + camif_lite_priv->eof_ts.tv_sec, + camif_lite_priv->eof_ts.tv_nsec); + + if (status_0 & 0x8000000) + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_2; + + if (status_0 & 0x10000000) + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_1; + + if (status_0 & 0x20000000) + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_0; + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_OVERFLOW; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_vfe_camif_lite_print_status(irq_status, ret, + camif_lite_priv); + + if (camif_lite_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_lite_reg_dump(camif_lite_node); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) { + CAM_ERR(CAM_ISP, "VFE:%d Violation", + camif_lite_node->hw_intf->hw_idx); + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (camif_lite_priv->event_cb) + camif_lite_priv->event_cb(camif_lite_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_VIOLATION; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_vfe_camif_lite_print_status(irq_status, ret, + camif_lite_priv); + + if (camif_lite_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_lite_reg_dump(camif_lite_node); + } + + cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload); + + CAM_DBG(CAM_ISP, "returning status = %d", ret); + return ret; +} + +int cam_vfe_camif_lite_ver3_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_lite_hw_info, + struct cam_isp_resource_node *camif_lite_node, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv = NULL; + struct cam_vfe_camif_lite_ver3_hw_info *camif_lite_info = + camif_lite_hw_info; + int i = 0; + + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Init", + camif_lite_node->res_id, camif_lite_node->res_id, + camif_lite_node->res_name); + + camif_lite_priv = CAM_MEM_ZALLOC(sizeof(*camif_lite_priv), + GFP_KERNEL); + if (!camif_lite_priv) + return -ENOMEM; + + camif_lite_node->res_priv = camif_lite_priv; + + camif_lite_priv->mem_base = + soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + camif_lite_priv->camif_lite_reg = camif_lite_info->camif_lite_reg; + camif_lite_priv->common_reg = camif_lite_info->common_reg; + camif_lite_priv->reg_data = camif_lite_info->reg_data; + camif_lite_priv->hw_intf = hw_intf; + camif_lite_priv->soc_info = soc_info; + camif_lite_priv->vfe_irq_controller = vfe_irq_controller; + + camif_lite_node->init = NULL; + camif_lite_node->deinit = NULL; + camif_lite_node->start = cam_vfe_camif_lite_resource_start; + camif_lite_node->stop = cam_vfe_camif_lite_resource_stop; + camif_lite_node->process_cmd = cam_vfe_camif_lite_process_cmd; + camif_lite_node->top_half_handler = + cam_vfe_camif_lite_handle_irq_top_half; + camif_lite_node->bottom_half_handler = + cam_vfe_camif_lite_handle_irq_bottom_half; + + spin_lock_init(&camif_lite_priv->spin_lock); + INIT_LIST_HEAD(&camif_lite_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) { + INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list); + list_add_tail(&camif_lite_priv->evt_payload[i].list, + &camif_lite_priv->free_payload_list); + } + + return 0; +} + +int cam_vfe_camif_lite_ver3_deinit( + struct cam_isp_resource_node *camif_lite_node) +{ + struct cam_vfe_mux_camif_lite_data *camif_lite_priv = + camif_lite_node->res_priv; + int i = 0; + + if (!camif_lite_priv) { + CAM_ERR(CAM_ISP, "Error! camif_priv is NULL"); + return -ENODEV; + } + + CAM_DBG(CAM_ISP, "VFE:%d CAMIF LITE:%d %s Deinit", + camif_lite_node->hw_intf->hw_idx, camif_lite_node->res_id, + camif_lite_node->res_name); + + INIT_LIST_HEAD(&camif_lite_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) + INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list); + + camif_lite_node->start = NULL; + camif_lite_node->stop = NULL; + camif_lite_node->process_cmd = NULL; + camif_lite_node->top_half_handler = NULL; + camif_lite_node->bottom_half_handler = NULL; + + camif_lite_node->res_priv = NULL; + + CAM_MEM_FREE(camif_lite_priv); + + return 0; +} + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h new file mode 100644 index 0000000000..54a38bdf41 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_CAMIF_LITE_VER3_H_ +#define _CAM_VFE_CAMIF_LITE_VER3_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +#define CAM_VFE_RDI_VER2_MAX 4 +#define CAM_VFE_CAMIF_LITE_EVT_MAX 256 + +struct cam_vfe_camif_lite_ver3_reg { + uint32_t lite_hw_version; + uint32_t lite_hw_status; + uint32_t lite_module_config; + uint32_t lite_skip_period; + uint32_t lite_irq_subsample_pattern; + uint32_t lite_epoch_irq; + uint32_t lite_debug_1; + uint32_t lite_debug_0; + uint32_t lite_test_bus_ctrl; + uint32_t camif_lite_spare; + uint32_t reg_update_cmd; +}; + +struct cam_vfe_camif_lite_ver3_reg_data { + uint32_t extern_reg_update_shift; + uint32_t operating_mode_shift; + uint32_t input_mux_sel_shift; + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask2; + uint32_t subscribe_irq_mask1; + uint32_t enable_diagnostic_hw; + uint32_t top_debug_cfg_en; +}; + +struct cam_vfe_camif_lite_ver3_hw_info { + struct cam_vfe_top_ver3_reg_offset_common *common_reg; + struct cam_vfe_camif_lite_ver3_reg *camif_lite_reg; + struct cam_vfe_camif_lite_ver3_reg_data *reg_data; +}; + +int cam_vfe_camif_lite_ver3_acquire_resource( + struct cam_isp_resource_node *camif_lite_res, + void *acquire_param); + +int cam_vfe_camif_lite_ver3_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_lite_hw_info, + struct cam_isp_resource_node *camif_lite_node, + void *vfe_irq_controller); + +int cam_vfe_camif_lite_ver3_deinit( + struct cam_isp_resource_node *camif_node); + +#endif /* _CAM_VFE_CAMIF_LITE_VER3_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c new file mode 100644 index 0000000000..c56f26b4d9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c @@ -0,0 +1,999 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_io_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver2.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_camif_ver2.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" + +#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 + +struct cam_vfe_mux_camif_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_camif_ver2_reg *camif_reg; + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_camif_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int irq_handle; + void *vfe_irq_controller; + struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_CAMIF_EVT_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dsp_mode; + uint32_t pix_pattern; + uint32_t first_pixel; + uint32_t first_line; + uint32_t last_pixel; + uint32_t last_line; + uint32_t hbi_value; + uint32_t vbi_value; + bool enable_sof_irq_debug; + uint32_t irq_debug_cnt; + uint32_t camif_debug; + uint32_t dual_hw_idx; + uint32_t is_dual; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct timespec64 error_ts; +}; + +static int cam_vfe_camif_get_evt_payload( + struct cam_vfe_mux_camif_data *camif_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&camif_priv->spin_lock); + if (list_empty(&camif_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&camif_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); +done: + spin_unlock(&camif_priv->spin_lock); + return rc; +} + +static int cam_vfe_camif_put_evt_payload( + struct cam_vfe_mux_camif_data *camif_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!camif_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&camif_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&camif_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_camif_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + bool error_flag = false; + + CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x", + th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]); + + camif_node = th_payload->handler_priv; + camif_priv = camif_node->res_priv; + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] & + camif_priv->reg_data->error_irq_mask0)) { + CAM_ERR(CAM_ISP, + "Camif Error: vfe:%d: IRQ STATUS_0=0x%x STATUS_1=0x%x", + camif_node->hw_intf->hw_idx, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d", + camif_node->hw_intf->hw_idx); + cam_irq_controller_disable_all( + camif_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + camif_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + camif_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->violation_status); + + if (error_flag) + CAM_INFO(CAM_ISP, "Violation status = 0x%x", + evt_payload->irq_reg_val[2]); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern) +{ + int rc; + + switch (pattern) { + case CAM_ISP_PATTERN_BAYER_RGRGRG: + case CAM_ISP_PATTERN_BAYER_GRGRGR: + case CAM_ISP_PATTERN_BAYER_BGBGBG: + case CAM_ISP_PATTERN_BAYER_GBGBGB: + case CAM_ISP_PATTERN_YUV_YCBYCR: + case CAM_ISP_PATTERN_YUV_YCRYCB: + case CAM_ISP_PATTERN_YUV_CBYCRY: + case CAM_ISP_PATTERN_YUV_CRYCBY: + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern); + rc = -EINVAL; + break; + } + return rc; +} + +static int cam_vfe_camif_get_reg_update( + struct cam_isp_resource_node *camif_res, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_camif_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + rsrc_data = camif_res->res_priv; + reg_val_pair[0] = rsrc_data->camif_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data; + CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd %x offset %x", + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_camif_ver2_acquire_resource( + struct cam_isp_resource_node *camif_res, + void *acquire_param) +{ + struct cam_vfe_mux_camif_data *camif_data; + struct cam_vfe_acquire_args *acquire_data; + + int rc = 0; + + camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + rc = cam_vfe_camif_validate_pix_pattern( + acquire_data->vfe_in.in_port->test_pattern); + if (rc) + return rc; + + camif_data->sync_mode = acquire_data->vfe_in.sync_mode; + camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern; + camif_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode; + camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start; + camif_data->last_pixel = acquire_data->vfe_in.in_port->left_stop; + camif_data->first_line = acquire_data->vfe_in.in_port->line_start; + camif_data->last_line = acquire_data->vfe_in.in_port->line_stop; + camif_data->event_cb = acquire_data->event_cb; + camif_data->priv = acquire_data->priv; + camif_data->is_dual = acquire_data->vfe_in.is_dual; + camif_data->hbi_value = 0; + camif_data->vbi_value = 0; + + if (acquire_data->vfe_in.is_dual) + camif_data->dual_hw_idx = + acquire_data->vfe_in.dual_hw_idx; + + CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d", + camif_res->hw_intf->hw_idx, + camif_data->pix_pattern, camif_data->dsp_mode); + return rc; +} + +static int cam_vfe_camif_resource_init( + struct cam_isp_resource_node *camif_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_camif_data *camif_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + + soc_info = camif_data->soc_info; + + if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "failed to enable dsp clk"); + } + camif_data->sof_ts.tv_sec = 0; + camif_data->sof_ts.tv_nsec = 0; + camif_data->epoch_ts.tv_sec = 0; + camif_data->epoch_ts.tv_nsec = 0; + camif_data->eof_ts.tv_sec = 0; + camif_data->eof_ts.tv_nsec = 0; + camif_data->error_ts.tv_sec = 0; + camif_data->error_ts.tv_nsec = 0; + + return rc; +} + +static int cam_vfe_camif_resource_deinit( + struct cam_isp_resource_node *camif_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_camif_data *camif_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + + soc_info = camif_data->soc_info; + + if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "failed to disable dsp clk"); + } + + return rc; +} + +static int cam_vfe_camif_resource_start( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_data *rsrc_data; + uint32_t val = 0; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t computed_epoch_line_cfg; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t dual_vfe_sync_val; + struct cam_vfe_soc_private *soc_private; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d", + camif_res->res_state); + return -EINVAL; + } + + rsrc_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->error_irq_mask0; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->error_irq_mask1; + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->subscribe_irq_mask0; + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->subscribe_irq_mask1; + + soc_private = rsrc_data->soc_info->soc_private; + + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error! soc_private NULL"); + return -ENODEV; + } + + /*config vfe core*/ + val = (rsrc_data->pix_pattern << + rsrc_data->reg_data->pixel_pattern_shift); + if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val |= (1 << rsrc_data->reg_data->extern_reg_update_shift); + + if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + /* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */ + val |= (((rsrc_data->dsp_mode - 1) & + rsrc_data->reg_data->dsp_mode_mask) << + rsrc_data->reg_data->dsp_mode_shift); + val |= (0x1 << rsrc_data->reg_data->dsp_en_shift); + } + + cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg); + + CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx, + val); + + /* disable the CGC for stats */ + cam_io_w_mb(0xFFFFFFFF, rsrc_data->mem_base + + rsrc_data->common_reg->module_ctrl[ + CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd); + + /* epoch config */ + switch (soc_private->cpas_version) { + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_170_V120: + case CAM_CPAS_TITAN_170_V200: + cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq); + break; + default: + epoch0_irq_mask = (((rsrc_data->last_line + + rsrc_data->vbi_value) - + rsrc_data->first_line) / 2); + if ((epoch0_irq_mask) > + (rsrc_data->last_line - rsrc_data->first_line)) + epoch0_irq_mask = rsrc_data->last_line - + rsrc_data->first_line; + + epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & + 0xFFFF; + computed_epoch_line_cfg = (epoch0_irq_mask << 16) | + epoch1_irq_mask; + cam_io_w_mb(computed_epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq); + CAM_DBG(CAM_ISP, "first_line: %u\n" + "last_line: %u vbi: %u\n" + "epoch_line_cfg: 0x%x", + rsrc_data->first_line, + rsrc_data->last_line, + rsrc_data->vbi_value, + computed_epoch_line_cfg); + break; + } + + if (rsrc_data->is_dual && rsrc_data->reg_data->dual_vfe_sync_mask) { + dual_vfe_sync_val = (rsrc_data->dual_hw_idx & + rsrc_data->reg_data->dual_vfe_sync_mask) + 1; + cam_io_w_mb(dual_vfe_sync_val, rsrc_data->mem_base + + rsrc_data->camif_reg->dual_vfe_sync); + } + camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd); + CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx, + rsrc_data->reg_data->reg_update_cmd_data); + + /* disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + + if (rsrc_data->camif_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->camif_reg->vfe_diag_config); + val |= rsrc_data->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->camif_reg->vfe_diag_config); + } + + if ((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) && + rsrc_data->is_dual) + goto subscribe_err; + + if (!rsrc_data->irq_handle) { + rsrc_data->irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + irq_mask, + camif_res, + camif_res->top_half_handler, + camif_res->bottom_half_handler, + camif_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_handle < 1) { + CAM_ERR(CAM_ISP, "IRQ handle subscribe failure"); + rsrc_data->irq_handle = 0; + } + } + +subscribe_err: + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + camif_res, + cam_vfe_camif_err_irq_top_half, + camif_res->bottom_half_handler, + camif_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure"); + rsrc_data->irq_err_handle = 0; + } + } + + CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx); + return 0; +} + +static int cam_vfe_camif_reg_dump( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_data *camif_priv; + struct cam_vfe_soc_private *soc_private; + uint32_t offset, val, wm_idx; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + for (offset = 0x0; offset < 0x1160; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val); + } + + for (offset = 0x2000; offset <= 0x20B8; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val); + } + + for (wm_idx = 0; wm_idx <= 23; wm_idx++) { + for (offset = 0x2200 + 0x100 * wm_idx; + offset < 0x2278 + 0x100 * wm_idx; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, + "offset 0x%x value 0x%x", offset, val); + } + } + + soc_private = camif_priv->soc_info->soc_private; + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + return 0; +} + +static int cam_vfe_camif_resource_stop( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_data *camif_priv; + int rc = 0; + uint32_t val = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED || + camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) + return 0; + + camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv; + + if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->common_reg->core_cfg); + val &= (~(1 << camif_priv->reg_data->dsp_en_shift)); + cam_io_w_mb(val, camif_priv->mem_base + + camif_priv->common_reg->core_cfg); + } + + if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_config); + if (val & camif_priv->reg_data->enable_diagnostic_hw) { + val &= ~camif_priv->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_config); + } + + if (camif_priv->irq_handle) { + cam_irq_controller_unsubscribe_irq( + camif_priv->vfe_irq_controller, camif_priv->irq_handle); + camif_priv->irq_handle = 0; + } + + if (camif_priv->irq_err_handle) { + cam_irq_controller_unsubscribe_irq( + camif_priv->vfe_irq_controller, + camif_priv->irq_err_handle); + camif_priv->irq_err_handle = 0; + } + + return rc; +} + +static int cam_vfe_camif_sof_irq_debug( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_data *camif_priv; + uint32_t *enable_sof_irq = (uint32_t *)cmd_args; + + camif_priv = + (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv; + + if (*enable_sof_irq == 1) + camif_priv->enable_sof_irq_debug = true; + else + camif_priv->enable_sof_irq_debug = false; + + return 0; +} + +int cam_vfe_camif_dump_timestamps( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_data *camif_priv = + (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv; + + CAM_INFO(CAM_ISP, + "CAMIF ERROR timestamp:[%lld.%09lld] SOF timestamp:[%lld.%09lld] EPOCH timestamp:[%lld.%09lld] EOF timestamp:[%lld.%09lld]", + camif_priv->error_ts.tv_sec, + camif_priv->error_ts.tv_nsec, + camif_priv->sof_ts.tv_sec, + camif_priv->sof_ts.tv_nsec, + camif_priv->epoch_ts.tv_sec, + camif_priv->epoch_ts.tv_nsec, + camif_priv->eof_ts.tv_sec, + camif_priv->eof_ts.tv_nsec); + + return 0; +} + +static int cam_vfe_camif_blanking_update( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_data *camif_priv = + (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv; + + struct cam_isp_blanking_config *blanking_config = + (struct cam_isp_blanking_config *)cmd_args; + + camif_priv->hbi_value = blanking_config->hbi; + camif_priv->vbi_value = blanking_config->vbi; + + CAM_DBG(CAM_ISP, "hbi:%d vbi:%d", + camif_priv->hbi_value, camif_priv->vbi_value); + return 0; +} + +static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_vfe_mux_camif_data *camif_priv = NULL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_camif_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: + rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + camif_priv = + (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv; + camif_priv->camif_debug = *((uint32_t *)cmd_args); + break; + case CAM_ISP_HW_CMD_CAMIF_DATA: + rc = cam_vfe_camif_dump_timestamps(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = cam_vfe_camif_blanking_update(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: + camif_priv = (struct cam_vfe_mux_camif_data *) + rsrc_node->res_priv; + *((struct cam_hw_soc_info **)cmd_args) = camif_priv->soc_info; + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, + "unsupported process command:%d", cmd_type); + break; + } + + return rc; +} + +static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + camif_node = th_payload->handler_priv; + camif_priv = camif_node->res_priv; + + CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]); + CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]); + + rc = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_MAX; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t irq_status0; + uint32_t irq_status1; + uint32_t val; + struct cam_isp_hw_error_event_info err_evt_info; + struct timespec64 ts; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return ret; + } + + camif_node = handler_priv; + camif_priv = camif_node->res_priv; + payload = evt_payload_priv; + irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1]; + + soc_info = camif_priv->soc_info; + soc_private = (struct cam_vfe_soc_private *)soc_info->soc_private; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = camif_node->hw_intf->hw_idx; + evt_info.res_id = camif_node->res_id; + evt_info.res_type = camif_node->res_type; + + CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x", + irq_status0, irq_status1); + + if (irq_status0 & camif_priv->reg_data->eof_irq_mask) { + CAM_DBG(CAM_ISP, "Received EOF"); + camif_priv->eof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->eof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_priv->reg_data->sof_irq_mask) { + if ((camif_priv->enable_sof_irq_debug) && + (camif_priv->irq_debug_cnt <= + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF"); + + camif_priv->irq_debug_cnt++; + if (camif_priv->irq_debug_cnt == + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + camif_priv->enable_sof_irq_debug = + false; + camif_priv->irq_debug_cnt = 0; + } + } else { + CAM_DBG(CAM_ISP, "Received SOF"); + camif_priv->sof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->sof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + } + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) { + CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK"); + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) { + CAM_DBG(CAM_ISP, "Received EPOCH"); + camif_priv->epoch_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->epoch_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & camif_priv->reg_data->error_irq_mask0) { + CAM_DBG(CAM_ISP, "Received ERROR"); + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + CAM_INFO(CAM_ISP, "Violation status = %x", + payload->irq_reg_val[2]); + + ret = CAM_VFE_IRQ_STATUS_OVERFLOW; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_cpas_log_votes(false); + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_reg_dump(camif_node->res_priv); + } + + if (irq_status1 & camif_priv->reg_data->error_irq_mask1) { + CAM_DBG(CAM_ISP, "Received ERROR"); + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + CAM_INFO(CAM_ISP, "Violation status = %x", + payload->irq_reg_val[2]); + + ret = CAM_VFE_IRQ_STATUS_OVERFLOW; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_cpas_log_votes(false); + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_reg_dump(camif_node->res_priv); + } + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r(camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_sensor_status); + CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x", + camif_priv->mem_base, val); + } + + cam_vfe_camif_put_evt_payload(camif_priv, &payload); + + CAM_DBG(CAM_ISP, "returning status = %d", ret); + return ret; +} + +int cam_vfe_camif_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_hw_info, + struct cam_isp_resource_node *camif_node, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_camif_data *camif_priv = NULL; + struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info; + int i = 0; + + camif_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_mux_camif_data), + GFP_KERNEL); + if (!camif_priv) { + CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv"); + return -ENOMEM; + } + + camif_node->res_priv = camif_priv; + + camif_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + camif_priv->camif_reg = camif_info->camif_reg; + camif_priv->common_reg = camif_info->common_reg; + camif_priv->reg_data = camif_info->reg_data; + camif_priv->hw_intf = hw_intf; + camif_priv->soc_info = soc_info; + camif_priv->vfe_irq_controller = vfe_irq_controller; + + camif_node->init = cam_vfe_camif_resource_init; + camif_node->deinit = cam_vfe_camif_resource_deinit; + camif_node->start = cam_vfe_camif_resource_start; + camif_node->stop = cam_vfe_camif_resource_stop; + camif_node->process_cmd = cam_vfe_camif_process_cmd; + camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half; + camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half; + + spin_lock_init(&camif_priv->spin_lock); + INIT_LIST_HEAD(&camif_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) { + INIT_LIST_HEAD(&camif_priv->evt_payload[i].list); + list_add_tail(&camif_priv->evt_payload[i].list, + &camif_priv->free_payload_list); + } + + return 0; +} + +int cam_vfe_camif_ver2_deinit( + struct cam_isp_resource_node *camif_node) +{ + struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv; + int i = 0; + + if (!camif_priv) { + CAM_ERR(CAM_ISP, "Error! camif_priv is NULL"); + return -ENODEV; + } + + INIT_LIST_HEAD(&camif_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) + INIT_LIST_HEAD(&camif_priv->evt_payload[i].list); + + camif_node->start = NULL; + camif_node->stop = NULL; + camif_node->process_cmd = NULL; + camif_node->top_half_handler = NULL; + camif_node->bottom_half_handler = NULL; + + camif_node->res_priv = NULL; + + + CAM_MEM_FREE(camif_priv); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h new file mode 100644 index 0000000000..26522594b9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_CAMIF_VER2_H_ +#define _CAM_VFE_CAMIF_VER2_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +struct cam_vfe_camif_ver2_reg { + uint32_t camif_cmd; + uint32_t camif_config; + uint32_t line_skip_pattern; + uint32_t pixel_skip_pattern; + uint32_t skip_period; + uint32_t irq_subsample_pattern; + uint32_t epoch_irq; + uint32_t raw_crop_width_cfg; + uint32_t raw_crop_height_cfg; + uint32_t reg_update_cmd; + uint32_t vfe_diag_config; + uint32_t vfe_diag_sensor_status; + uint32_t dual_vfe_sync; +}; + +struct cam_vfe_camif_reg_data { + uint32_t raw_crop_first_pixel_shift; + uint32_t raw_crop_first_pixel_mask; + + uint32_t raw_crop_last_pixel_shift; + uint32_t raw_crop_last_pixel_mask; + + uint32_t raw_crop_first_line_shift; + uint32_t raw_crop_first_line_mask; + + uint32_t raw_crop_last_line_shift; + uint32_t raw_crop_last_line_mask; + + uint32_t input_mux_sel_shift; + uint32_t input_mux_sel_mask; + uint32_t extern_reg_update_shift; + uint32_t extern_reg_update_mask; + + uint32_t pixel_pattern_shift; + uint32_t pixel_pattern_mask; + + uint32_t dsp_mode_shift; + uint32_t dsp_mode_mask; + uint32_t dsp_en_shift; + uint32_t dsp_en_mask; + + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t reg_update_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask1; + uint32_t subscribe_irq_mask0; + uint32_t subscribe_irq_mask1; + + uint32_t enable_diagnostic_hw; + uint32_t dual_vfe_sync_mask; +}; + +struct cam_vfe_camif_ver2_hw_info { + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_camif_ver2_reg *camif_reg; + struct cam_vfe_camif_reg_data *reg_data; +}; + +int cam_vfe_camif_ver2_acquire_resource( + struct cam_isp_resource_node *camif_res, + void *acquire_param); + +int cam_vfe_camif_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_hw_info, + struct cam_isp_resource_node *camif_node, + void *vfe_irq_controller); + +int cam_vfe_camif_ver2_deinit( + struct cam_isp_resource_node *camif_node); + +#endif /* _CAM_VFE_CAMIF_VER2_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c new file mode 100644 index 0000000000..353639055a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c @@ -0,0 +1,1609 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_io_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver3.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_vfe_camif_ver3.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_cpas_api.h" +#include "cam_trace.h" +#include "cam_mem_mgr_api.h" + +#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 + +struct cam_vfe_mux_camif_ver3_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_camif_ver3_pp_clc_reg *camif_reg; + struct cam_vfe_top_ver3_reg_offset_common *common_reg; + struct cam_vfe_camif_ver3_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_camif_common_cfg cam_common_cfg; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int irq_handle; + int sof_irq_handle; + void *vfe_irq_controller; + struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_CAMIF_EVT_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dsp_mode; + uint32_t pix_pattern; + uint32_t first_pixel; + uint32_t first_line; + uint32_t last_pixel; + uint32_t last_line; + uint32_t hbi_value; + uint32_t vbi_value; + bool enable_sof_irq_debug; + uint32_t irq_debug_cnt; + uint32_t camif_debug; + uint32_t horizontal_bin; + uint32_t qcfa_bin; + uint32_t dual_hw_idx; + uint32_t is_dual; + bool is_fe_enabled; + bool is_offline; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct timespec64 error_ts; +}; + +static int cam_vfe_camif_ver3_get_evt_payload( + struct cam_vfe_mux_camif_ver3_data *camif_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&camif_priv->spin_lock); + if (list_empty(&camif_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free CAMIF event payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&camif_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); +done: + spin_unlock(&camif_priv->spin_lock); + return rc; +} + +static int cam_vfe_camif_ver3_put_evt_payload( + struct cam_vfe_mux_camif_ver3_data *camif_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!camif_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&camif_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&camif_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_camif_ver3_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_ver3_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + bool error_flag = false; + + camif_node = th_payload->handler_priv; + camif_priv = camif_node->res_priv; + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] & + camif_priv->reg_data->error_irq_mask0)) { + CAM_ERR(CAM_ISP, + "VFE:%d CAMIF Err IRQ status_0: 0x%X status_2: 0x%X", + camif_node->hw_intf->hw_idx, + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[2]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d", + camif_node->hw_intf->hw_idx); + cam_irq_controller_disable_all( + camif_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload); + if (rc) + return rc; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + camif_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + camif_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->violation_status); + + evt_payload->irq_reg_val[++i] = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->bus_overflow_status); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_vfe_camif_ver3_validate_pix_pattern(uint32_t pattern, + struct cam_vfe_mux_camif_ver3_data *camif_data) +{ + int rc; + + switch (pattern) { + case CAM_ISP_PATTERN_BAYER_RGRGRG: + case CAM_ISP_PATTERN_BAYER_GRGRGR: + case CAM_ISP_PATTERN_BAYER_BGBGBG: + case CAM_ISP_PATTERN_BAYER_GBGBGB: + camif_data->cam_common_cfg.input_pp_fmt = + camif_data->reg_data->input_bayer_fmt; + rc = 0; + break; + case CAM_ISP_PATTERN_YUV_YCBYCR: + case CAM_ISP_PATTERN_YUV_YCRYCB: + case CAM_ISP_PATTERN_YUV_CBYCRY: + case CAM_ISP_PATTERN_YUV_CRYCBY: + rc = 0; + camif_data->cam_common_cfg.input_pp_fmt = + camif_data->reg_data->input_yuv_fmt; + break; + default: + CAM_ERR(CAM_ISP, "Error, Invalid pix pattern:%d", pattern); + rc = -EINVAL; + break; + } + return rc; +} + +static int cam_vfe_camif_ver3_get_reg_update( + struct cam_isp_resource_node *camif_res, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_camif_ver3_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid arg size: %d expected:%ld", + arg_size, sizeof(struct cam_isp_hw_get_cmd_update)); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args: %pK", cdm_args); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, (size*4)); + return -EINVAL; + } + + rsrc_data = camif_res->res_priv; + reg_val_pair[0] = rsrc_data->camif_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data; + CAM_DBG(CAM_ISP, "VFE:%d CAMIF reg_update_cmd 0x%X offset 0x%X", + camif_res->hw_intf->hw_idx, + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_camif_ver3_acquire_resource( + struct cam_isp_resource_node *camif_res, + void *acquire_param) +{ + struct cam_vfe_mux_camif_ver3_data *camif_data; + struct cam_vfe_acquire_args *acquire_data; + int rc = 0; + + camif_data = (struct cam_vfe_mux_camif_ver3_data *) + camif_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + rc = cam_vfe_camif_ver3_validate_pix_pattern( + acquire_data->vfe_in.in_port->test_pattern, camif_data); + + if (rc) { + CAM_ERR(CAM_ISP, "Validate pix pattern failed, rc = %d", rc); + return rc; + } + + camif_data->sync_mode = acquire_data->vfe_in.sync_mode; + camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern; + camif_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode; + camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start; + camif_data->last_pixel = acquire_data->vfe_in.in_port->left_stop; + camif_data->first_line = acquire_data->vfe_in.in_port->line_start; + camif_data->last_line = acquire_data->vfe_in.in_port->line_stop; + camif_data->is_fe_enabled = acquire_data->vfe_in.is_fe_enabled; + camif_data->is_offline = acquire_data->vfe_in.is_offline; + camif_data->is_dual = acquire_data->vfe_in.is_dual; + camif_data->event_cb = acquire_data->event_cb; + camif_data->priv = acquire_data->priv; + camif_data->qcfa_bin = acquire_data->vfe_in.in_port->qcfa_bin; + camif_data->horizontal_bin = + acquire_data->vfe_in.in_port->horizontal_bin; + camif_data->hbi_value = 0; + camif_data->vbi_value = 0; + + if (camif_data->is_dual) + camif_data->dual_hw_idx = acquire_data->vfe_in.dual_hw_idx; + + CAM_DBG(CAM_ISP, + "VFE:%d CAMIF pix_pattern:%d dsp_mode=%d is_dual:%d dual_hw_idx:%d", + camif_res->hw_intf->hw_idx, + camif_data->pix_pattern, camif_data->dsp_mode, + camif_data->is_dual, camif_data->dual_hw_idx); + + return rc; +} + +static int cam_vfe_camif_ver3_resource_init( + struct cam_isp_resource_node *camif_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_camif_ver3_data *camif_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_data = (struct cam_vfe_mux_camif_ver3_data *) + camif_res->res_priv; + + soc_info = camif_data->soc_info; + + if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, + "failed to enable dsp clk, rc = %d", rc); + } + camif_data->sof_ts.tv_sec = 0; + camif_data->sof_ts.tv_nsec = 0; + camif_data->epoch_ts.tv_sec = 0; + camif_data->epoch_ts.tv_nsec = 0; + camif_data->eof_ts.tv_sec = 0; + camif_data->eof_ts.tv_nsec = 0; + camif_data->error_ts.tv_sec = 0; + camif_data->error_ts.tv_nsec = 0; + + return rc; +} + +static int cam_vfe_camif_ver3_resource_deinit( + struct cam_isp_resource_node *camif_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_camif_ver3_data *camif_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + camif_data = (struct cam_vfe_mux_camif_ver3_data *) + camif_res->res_priv; + + soc_info = camif_data->soc_info; + + if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "failed to disable dsp clk"); + } + + return rc; +} + +static int cam_vfe_camif_ver3_resource_start( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_ver3_data *rsrc_data; + uint32_t val = 0; + uint32_t epoch0_line_cfg; + uint32_t epoch1_line_cfg; + uint32_t computed_epoch_line_cfg; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + struct cam_vfe_soc_private *soc_private; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error, Invalid camif res res_state:%d", + camif_res->res_state); + return -EINVAL; + } + + memset(err_irq_mask, 0, sizeof(err_irq_mask)); + memset(irq_mask, 0, sizeof(irq_mask)); + + rsrc_data = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv; + + soc_private = rsrc_data->soc_info->soc_private; + + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error, soc_private NULL"); + return -ENODEV; + } + + /* config debug status registers */ + val = cam_io_r(rsrc_data->mem_base + rsrc_data->common_reg->top_debug_cfg); + val |= rsrc_data->reg_data->top_debug_cfg_en; + cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->top_debug_cfg); + + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0); + + /* AF stitching by hw disabled by default + * PP CAMIF currently operates only in offline mode + */ + + if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + /* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */ + if (camif_res->hw_intf->hw_idx != CAM_ISP_HW_VFE_CORE_2) { + val |= (((rsrc_data->dsp_mode - 1) & + rsrc_data->reg_data->dsp_mode_mask) << + rsrc_data->reg_data->dsp_mode_shift); + val |= (0x1 << rsrc_data->reg_data->dsp_en_shift); + } else { + CAM_ERR(CAM_ISP, "Error, HVX not available for IFE_%d", + camif_res->hw_intf->hw_idx); + return -EINVAL; + } + } + + if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val |= (1 << rsrc_data->reg_data->pp_extern_reg_update_shift); + + if ((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) || + (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)) + val |= (1 << rsrc_data->reg_data->dual_ife_pix_en_shift); + + val |= (~rsrc_data->cam_common_cfg.vid_ds16_r2pd & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD; + val |= (~rsrc_data->cam_common_cfg.vid_ds4_r2pd & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD; + val |= (~rsrc_data->cam_common_cfg.disp_ds16_r2pd & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD; + val |= (~rsrc_data->cam_common_cfg.disp_ds4_r2pd & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD; + val |= (rsrc_data->cam_common_cfg.dsp_streaming_tap_point & 0x3) << + CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING; + val |= (rsrc_data->cam_common_cfg.ihist_src_sel & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST; + val |= (rsrc_data->cam_common_cfg.hdr_be_src_sel & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE; + val |= (rsrc_data->cam_common_cfg.hdr_bhist_src_sel & 0x1) << + CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST; + val |= (rsrc_data->cam_common_cfg.input_mux_sel_pp & 0x3) << + CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP; + val |= (rsrc_data->cam_common_cfg.input_pp_fmt & 0x3) << + CAM_SHIFT_TOP_CORE_CFG_INPUT_PP_FMT; + + if (rsrc_data->is_fe_enabled && !rsrc_data->is_offline) + val |= 0x2 << rsrc_data->reg_data->operating_mode_shift; + else + val |= 0x1 << rsrc_data->reg_data->operating_mode_shift; + + if (rsrc_data->is_dual && rsrc_data->reg_data->dual_vfe_sync_mask) { + val |= (((rsrc_data->dual_hw_idx & + rsrc_data->reg_data->dual_vfe_sync_mask) + 1) << + rsrc_data->reg_data->dual_ife_sync_sel_shift); + } + + CAM_DBG(CAM_ISP, "VFE:%d TOP core_cfg: 0x%X", + camif_res->hw_intf->hw_idx, val); + + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0); + + /* epoch config */ + switch (soc_private->cpas_version) { + case CAM_CPAS_TITAN_480_V100: + case CAM_CPAS_TITAN_580_V100: + case CAM_CPAS_TITAN_570_V100: + case CAM_CPAS_TITAN_570_V200: + epoch0_line_cfg = ((rsrc_data->last_line + + rsrc_data->vbi_value) - + rsrc_data->first_line) / 4; + if ((epoch0_line_cfg * 2) > + (rsrc_data->last_line - rsrc_data->first_line)) + epoch0_line_cfg = (rsrc_data->last_line - + rsrc_data->first_line)/2; + /* epoch line cfg will still be configured at midpoint of the + * frame width. We use '/ 4' instead of '/ 2' + * cause it is multipixel path + */ + if (rsrc_data->horizontal_bin || rsrc_data->qcfa_bin) + epoch0_line_cfg >>= 1; + + epoch1_line_cfg = rsrc_data->reg_data->epoch_line_cfg & + 0xFFFF; + computed_epoch_line_cfg = (epoch1_line_cfg << 16) | + epoch0_line_cfg; + cam_io_w_mb(computed_epoch_line_cfg, + rsrc_data->mem_base + + rsrc_data->camif_reg->epoch_irq_cfg); + CAM_DBG(CAM_ISP, "epoch_line_cfg: 0x%X", + computed_epoch_line_cfg); + break; + default: + CAM_ERR(CAM_ISP, "Hardware version not proper: 0x%X", + soc_private->cpas_version); + return -EINVAL; + break; + } + + camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + if (!rsrc_data->is_offline) { + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + + rsrc_data->camif_reg->reg_update_cmd); + CAM_DBG(CAM_ISP, "VFE:%d CAMIF RUP val:0x%X", + camif_res->hw_intf->hw_idx, + rsrc_data->reg_data->reg_update_cmd_data); + } + + /* disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + + if (rsrc_data->camif_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->diag_config); + val |= rsrc_data->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->diag_config); + } + + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->error_irq_mask0; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] = + rsrc_data->reg_data->error_irq_mask2; + + if ((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) && + rsrc_data->is_dual) + goto subscribe_err; + + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->epoch0_irq_mask | + rsrc_data->reg_data->eof_irq_mask; + + if (!rsrc_data->irq_handle) { + rsrc_data->irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_3, + irq_mask, + camif_res, + camif_res->top_half_handler, + camif_res->bottom_half_handler, + camif_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->irq_handle < 1) { + CAM_ERR(CAM_ISP, "IRQ handle subscribe failure"); + rsrc_data->irq_handle = 0; + } + } + + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->reg_data->sof_irq_mask; + if (rsrc_data->cam_common_cfg.input_mux_sel_pp & 0x3) + irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->reg_data->frame_id_irq_mask; + + if (!rsrc_data->sof_irq_handle) { + rsrc_data->sof_irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + irq_mask, + camif_res, + camif_res->top_half_handler, + camif_res->bottom_half_handler, + camif_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->sof_irq_handle < 1) { + CAM_ERR(CAM_ISP, "SOF IRQ handle subscribe failure"); + rsrc_data->sof_irq_handle = 0; + } + } + +subscribe_err: + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + camif_res, + cam_vfe_camif_ver3_err_irq_top_half, + camif_res->bottom_half_handler, + camif_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure"); + rsrc_data->irq_err_handle = 0; + } + } + + CAM_DBG(CAM_ISP, "VFE:%d CAMIF Start Done", camif_res->hw_intf->hw_idx); + return 0; +} + +static int cam_vfe_camif_ver3_reg_dump( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv; + uint32_t offset, val, wm_idx; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + camif_priv = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv; + + CAM_INFO(CAM_ISP, "IFE:%d TOP", camif_res->hw_intf->hw_idx); + for (offset = 0x0; offset <= 0x1FC; offset += 0x4) { + if (offset == 0x1C || offset == 0x34 || + offset == 0x38 || offset == 0x90) + continue; + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + } + + CAM_INFO(CAM_ISP, "IFE:%d PP CLC PREPROCESS", + camif_res->hw_intf->hw_idx); + for (offset = 0x2200; offset <= 0x23FC; offset += 0x4) { + if (offset == 0x2208) + offset = 0x2260; + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + if (offset == 0x2260) + offset = 0x23F4; + } + + CAM_INFO(CAM_ISP, "IFE:%d PP CLC CAMIF", camif_res->hw_intf->hw_idx); + for (offset = 0x2600; offset <= 0x27FC; offset += 0x4) { + if (offset == 0x2608) + offset = 0x2660; + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + if (offset == 0x2660) + offset = 0x2664; + else if (offset == 0x2680) + offset = 0x27EC; + } + + CAM_INFO(CAM_ISP, "IFE:%d PP CLC Modules", camif_res->hw_intf->hw_idx); + for (offset = 0x2800; offset <= 0x8FFC; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + } + + CAM_INFO(CAM_ISP, "IFE:%d BUS RD", camif_res->hw_intf->hw_idx); + for (offset = 0xA800; offset <= 0xA89C; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + if (offset == 0xA838) + offset = 0xA844; + if (offset == 0xA864) + offset = 0xA874; + if (offset == 0xA878) + offset = 0xA87C; + } + + CAM_INFO(CAM_ISP, "IFE:%d BUS WR", camif_res->hw_intf->hw_idx); + for (offset = 0xAA00; offset <= 0xAADC; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_DBG(CAM_ISP, "offset 0x%X value 0x%X", offset, val); + } + + for (wm_idx = 0; wm_idx <= 25; wm_idx++) { + for (offset = 0xAC00 + 0x100 * wm_idx; + offset < 0xAC84 + 0x100 * wm_idx; offset += 0x4) { + val = cam_soc_util_r(camif_priv->soc_info, 0, offset); + CAM_INFO(CAM_ISP, "offset 0x%X value 0x%X", + offset, val); + } + } + + return 0; +} + +static int cam_vfe_camif_ver3_resource_stop( + struct cam_isp_resource_node *camif_res) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv; + int rc = 0; + uint32_t val = 0; + + if (!camif_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + camif_priv = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv; + + if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->common_reg->core_cfg_0); + val &= (~(1 << camif_priv->reg_data->dsp_en_shift)); + cam_io_w_mb(val, camif_priv->mem_base + + camif_priv->common_reg->core_cfg_0); + } + + if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->common_reg->diag_config); + if (val & camif_priv->reg_data->enable_diagnostic_hw) { + val &= ~camif_priv->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, camif_priv->mem_base + + camif_priv->common_reg->diag_config); + } + + if (camif_priv->irq_handle) { + cam_irq_controller_unsubscribe_irq( + camif_priv->vfe_irq_controller, camif_priv->irq_handle); + camif_priv->irq_handle = 0; + } + + if (camif_priv->sof_irq_handle) { + cam_irq_controller_unsubscribe_irq( + camif_priv->vfe_irq_controller, + camif_priv->sof_irq_handle); + camif_priv->sof_irq_handle = 0; + } + + if (camif_priv->irq_err_handle) { + cam_irq_controller_unsubscribe_irq( + camif_priv->vfe_irq_controller, + camif_priv->irq_err_handle); + camif_priv->irq_err_handle = 0; + } + + return rc; +} + +static int cam_vfe_camif_ver3_core_config( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv; + struct cam_vfe_core_config_args *vfe_core_cfg = + (struct cam_vfe_core_config_args *)cmd_args; + + camif_priv = + (struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv; + camif_priv->cam_common_cfg.vid_ds16_r2pd = + vfe_core_cfg->core_config.vid_ds16_r2pd; + camif_priv->cam_common_cfg.vid_ds4_r2pd = + vfe_core_cfg->core_config.vid_ds4_r2pd; + camif_priv->cam_common_cfg.disp_ds16_r2pd = + vfe_core_cfg->core_config.disp_ds16_r2pd; + camif_priv->cam_common_cfg.disp_ds4_r2pd = + vfe_core_cfg->core_config.disp_ds4_r2pd; + camif_priv->cam_common_cfg.dsp_streaming_tap_point = + vfe_core_cfg->core_config.dsp_streaming_tap_point; + camif_priv->cam_common_cfg.ihist_src_sel = + vfe_core_cfg->core_config.ihist_src_sel; + camif_priv->cam_common_cfg.hdr_be_src_sel = + vfe_core_cfg->core_config.hdr_be_src_sel; + camif_priv->cam_common_cfg.hdr_bhist_src_sel = + vfe_core_cfg->core_config.hdr_bhist_src_sel; + camif_priv->cam_common_cfg.input_mux_sel_pdaf = + vfe_core_cfg->core_config.input_mux_sel_pdaf; + camif_priv->cam_common_cfg.input_mux_sel_pp = + vfe_core_cfg->core_config.input_mux_sel_pp; + + return 0; +} + +static int cam_vfe_camif_ver3_sof_irq_debug( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv; + uint32_t *enable_sof_irq = (uint32_t *)cmd_args; + + camif_priv = + (struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv; + + if (*enable_sof_irq == 1) + camif_priv->enable_sof_irq_debug = true; + else + camif_priv->enable_sof_irq_debug = false; + + return 0; +} + +int cam_vfe_camif_ver3_dump_timestamps( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv = + (struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv; + + CAM_INFO(CAM_ISP, + "CAMIF ERROR timestamp:[%lld.%09lld] SOF timestamp:[%lld.%09lld] EPOCH timestamp:[%lld.%09lld] EOF timestamp:[%lld.%09lld]", + camif_priv->error_ts.tv_sec, + camif_priv->error_ts.tv_nsec, + camif_priv->sof_ts.tv_sec, + camif_priv->sof_ts.tv_nsec, + camif_priv->epoch_ts.tv_sec, + camif_priv->epoch_ts.tv_nsec, + camif_priv->eof_ts.tv_sec, + camif_priv->eof_ts.tv_nsec); + + return 0; +} + +static int cam_vfe_camif_ver3_blanking_update( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv = + (struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv; + + struct cam_isp_blanking_config *blanking_config = + (struct cam_isp_blanking_config *)cmd_args; + + camif_priv->hbi_value = blanking_config->hbi; + camif_priv->vbi_value = blanking_config->vbi; + + CAM_DBG(CAM_ISP, "hbi:%d vbi:%d", + camif_priv->hbi_value, camif_priv->vbi_value); + return 0; +} + +static int cam_vfe_camif_ver3_process_cmd( + struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, + "Invalid input arguments rsesource node:%pK cmd_args:%pK", + rsrc_node, cmd_args); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_camif_ver3_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: + rc = cam_vfe_camif_ver3_sof_irq_debug(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_CORE_CONFIG: + rc = cam_vfe_camif_ver3_core_config(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + camif_priv = (struct cam_vfe_mux_camif_ver3_data *) + rsrc_node->res_priv; + camif_priv->camif_debug = *((uint32_t *)cmd_args); + break; + case CAM_ISP_HW_CMD_QUERY_REGSPACE_DATA: + camif_priv = (struct cam_vfe_mux_camif_ver3_data *) + rsrc_node->res_priv; + *((struct cam_hw_soc_info **)cmd_args) = camif_priv->soc_info; + rc = 0; + break; + case CAM_ISP_HW_CMD_CAMIF_DATA: + rc = cam_vfe_camif_ver3_dump_timestamps(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = cam_vfe_camif_ver3_blanking_update(rsrc_node, cmd_args); + break; + default: + CAM_ERR(CAM_ISP, + "unsupported process command:%d", cmd_type); + break; + } + + return rc; +} + + +static void cam_vfe_camif_ver3_overflow_debug_info( + struct cam_vfe_mux_camif_ver3_data *camif_priv) +{ + uint32_t val0, val1, val2, val3; + + val0 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_0); + val1 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_1); + val2 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_2); + val3 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_3); + CAM_INFO(CAM_ISP, + "status_0: 0x%X status_1: 0x%X status_2: 0x%X status_3: 0x%X", + val0, val1, val2, val3); + + val0 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_4); + val1 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_5); + val2 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_6); + val3 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_7); + CAM_INFO(CAM_ISP, + "status_4: 0x%X status_5: 0x%X status_6: 0x%X status_7: 0x%X", + val0, val1, val2, val3); + + val0 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_8); + val1 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_9); + val2 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_10); + val3 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_11); + CAM_INFO(CAM_ISP, + "status_8: 0x%X status_9: 0x%X status_10: 0x%X status_11: 0x%X", + val0, val1, val2, val3); + + val0 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_12); + val1 = cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->top_debug_13); + CAM_INFO(CAM_ISP, "status_12: 0x%X status_13: 0x%X", + val0, val1); +} + +static void cam_vfe_camif_ver3_print_status(uint32_t *status, + int err_type, struct cam_vfe_mux_camif_ver3_data *camif_priv) +{ + uint32_t violation_mask = 0x3F, module_id = 0; + uint32_t bus_overflow_status = 0, status_0 = 0, status_2 = 0; + struct cam_vfe_soc_private *soc_private; + + if (!status) { + CAM_ERR(CAM_ISP, "Invalid params"); + return; + } + + bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS]; + status_0 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + status_2 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]; + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) { + if (status_0 & 0x0200) + CAM_INFO(CAM_ISP, "DSP OVERFLOW"); + + if (status_0 & 0x2000000) + CAM_INFO(CAM_ISP, "PIXEL PIPE FRAME DROP"); + + if (status_0 & 0x80000000) + CAM_INFO(CAM_ISP, "PIXEL PIPE OVERFLOW"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && bus_overflow_status) { + if (bus_overflow_status & 0x01) + CAM_INFO(CAM_ISP, "VID Y 1:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x02) + CAM_INFO(CAM_ISP, "VID C 1:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x04) + CAM_INFO(CAM_ISP, "VID YC 4:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x08) + CAM_INFO(CAM_ISP, "VID YC 16:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x010) + CAM_INFO(CAM_ISP, "DISP Y 1:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x020) + CAM_INFO(CAM_ISP, "DISP C 1:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x040) + CAM_INFO(CAM_ISP, "DISP YC 4:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x080) + CAM_INFO(CAM_ISP, "DISP YC 16:1 BUS OVERFLOW"); + + if (bus_overflow_status & 0x0100) + CAM_INFO(CAM_ISP, "FD Y BUS OVERFLOW"); + + if (bus_overflow_status & 0x0200) + CAM_INFO(CAM_ISP, "FD C BUS OVERFLOW"); + + if (bus_overflow_status & 0x0400) + CAM_INFO(CAM_ISP, "PIXEL RAW DUMP BUS OVERFLOW"); + + if (bus_overflow_status & 0x01000) + CAM_INFO(CAM_ISP, "STATS HDR BE BUS OVERFLOW"); + + if (bus_overflow_status & 0x02000) + CAM_INFO(CAM_ISP, "STATS HDR BHIST BUS OVERFLOW"); + + if (bus_overflow_status & 0x04000) + CAM_INFO(CAM_ISP, "STATS TINTLESS BG BUS OVERFLOW"); + + if (bus_overflow_status & 0x08000) + CAM_INFO(CAM_ISP, "STATS AWB BG BUS OVERFLOW"); + + if (bus_overflow_status & 0x010000) + CAM_INFO(CAM_ISP, "STATS BHIST BUS OVERFLOW"); + + if (bus_overflow_status & 0x020000) + CAM_INFO(CAM_ISP, "STATS RS BUS OVERFLOW"); + + if (bus_overflow_status & 0x040000) + CAM_INFO(CAM_ISP, "STATS CS BUS OVERFLOW"); + + if (bus_overflow_status & 0x080000) + CAM_INFO(CAM_ISP, "STATS IHIST BUS OVERFLOW"); + + if (bus_overflow_status & 0x0100000) + CAM_INFO(CAM_ISP, "STATS BAF BUS OVERFLOW"); + + if (bus_overflow_status & 0x0200000) + CAM_INFO(CAM_ISP, "PDAF BUS OVERFLOW"); + goto print_state; + } + + if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) { + CAM_INFO(CAM_ISP, "PIXEL PIPE Module hang"); + /* print debug registers */ + cam_vfe_camif_ver3_overflow_debug_info(camif_priv); + goto print_state; + } + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) { + if (status_2 & 0x080) + CAM_INFO(CAM_ISP, "DSP IFE PROTOCOL VIOLATION"); + + if (status_2 & 0x0100) + CAM_INFO(CAM_ISP, "IFE DSP TX PROTOCOL VIOLATION"); + + if (status_2 & 0x0200) + CAM_INFO(CAM_ISP, "DSP IFE RX PROTOCOL VIOLATION"); + + if (status_2 & 0x0400) + CAM_INFO(CAM_ISP, "PP PREPROCESS VIOLATION"); + + if (status_2 & 0x0800) + CAM_INFO(CAM_ISP, "PP CAMIF VIOLATION"); + + if (status_2 & 0x01000) + CAM_INFO(CAM_ISP, "PP VIOLATION"); + + if (status_2 & 0x0100000) + CAM_INFO(CAM_ISP, + "DSP_TX_VIOLATION:overflow on DSP interface TX path FIFO"); + + if (status_2 & 0x0200000) + CAM_INFO(CAM_ISP, + "DSP_RX_VIOLATION:overflow on DSP interface RX path FIFO"); + + if (status_2 & 0x10000000) + CAM_INFO(CAM_ISP, "DSP ERROR VIOLATION"); + + if (status_2 & 0x20000000) + CAM_INFO(CAM_ISP, + "DIAG VIOLATION: HBI is less than the minimum required HBI"); + } + + if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION && + status[CAM_IFE_IRQ_VIOLATION_STATUS]) { + module_id = + violation_mask & status[CAM_IFE_IRQ_VIOLATION_STATUS]; + CAM_INFO(CAM_ISP, "PIXEL PIPE VIOLATION Module ID:%d", + module_id); + + switch (module_id) { + case 0: + CAM_INFO(CAM_ISP, "DEMUX"); + break; + case 1: + CAM_INFO(CAM_ISP, "CHROMA_UP"); + break; + case 2: + CAM_INFO(CAM_ISP, "PEDESTAL"); + break; + case 3: + CAM_INFO(CAM_ISP, "LINEARIZATION"); + break; + case 4: + CAM_INFO(CAM_ISP, "BPC_PDPC"); + break; + case 5: + CAM_INFO(CAM_ISP, "HDR_BINCORRECT"); + break; + case 6: + CAM_INFO(CAM_ISP, "ABF"); + break; + case 7: + CAM_INFO(CAM_ISP, "LSC"); + break; + case 8: + CAM_INFO(CAM_ISP, "DEMOSAIC"); + break; + case 9: + CAM_INFO(CAM_ISP, "COLOR_CORRECT"); + break; + case 10: + CAM_INFO(CAM_ISP, "GTM"); + break; + case 11: + CAM_INFO(CAM_ISP, "GLUT"); + break; + case 12: + CAM_INFO(CAM_ISP, "COLOR_XFORM"); + break; + case 13: + CAM_INFO(CAM_ISP, "CROP_RND_CLAMP_PIXEL_RAW_OUT"); + break; + case 14: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_Y_FD_OUT"); + break; + case 15: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_C_FD_OUT"); + break; + case 16: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT"); + break; + case 17: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT"); + break; + case 18: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_Y_DISP_OUT"); + break; + case 19: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_C_DISP_OUT"); + break; + case 20: + CAM_INFO(CAM_ISP, + "module: CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT"); + break; + case 21: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT"); + break; + case 22: + CAM_INFO(CAM_ISP, "DOWNSCALE_4TO1_Y_DISP_DS4_OUT"); + break; + case 23: + CAM_INFO(CAM_ISP, "DOWNSCALE_4TO1_C_DISP_DS4_OUT"); + break; + case 24: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT"); + break; + case 25: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT"); + break; + case 26: + CAM_INFO(CAM_ISP, "DOWNSCALE_4TO1_Y_DISP_DS16_OUT"); + break; + case 27: + CAM_INFO(CAM_ISP, "DOWNSCALE_4TO1_C_DISP_DS16_OUT"); + break; + case 28: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT"); + break; + case 29: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT"); + break; + case 30: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_Y_VID_OUT"); + break; + case 31: + CAM_INFO(CAM_ISP, "DOWNSCALE_MN_C_VID_OUT"); + break; + case 32: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT"); + break; + case 33: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT"); + break; + case 34: + CAM_INFO(CAM_ISP, "DSX_Y_VID_OUT"); + break; + case 35: + CAM_INFO(CAM_ISP, "DSX_C_VID_OUT"); + break; + case 36: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DSX_Y_VID_OUT"); + break; + case 37: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DSX_C_VID_OUT"); + break; + case 38: + CAM_INFO(CAM_ISP, + "DOWNSCALE_4TO1_Y_VID_DS16_OUT"); + break; + case 39: + CAM_INFO(CAM_ISP, + "DOWNSCALE_4TO1_C_VID_DS16_OUT"); + break; + case 40: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT"); + break; + case 41: + CAM_INFO(CAM_ISP, + "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT"); + break; + case 42: + CAM_INFO(CAM_ISP, "BLS"); + break; + case 43: + CAM_INFO(CAM_ISP, "STATS_TINTLESS_BG"); + break; + case 44: + CAM_INFO(CAM_ISP, "STATS_HDR_BHIST"); + break; + case 45: + CAM_INFO(CAM_ISP, "STATS_HDR_BE"); + break; + case 46: + CAM_INFO(CAM_ISP, "STATS_AWB_BG"); + break; + case 47: + CAM_INFO(CAM_ISP, "STATS_BHIST"); + break; + case 48: + CAM_INFO(CAM_ISP, "STATS_BAF"); + break; + case 49: + CAM_INFO(CAM_ISP, "STATS_RS"); + break; + case 50: + CAM_INFO(CAM_ISP, "STATS_CS"); + break; + case 51: + CAM_INFO(CAM_ISP, "STATS_IHIST"); + break; + default: + CAM_ERR(CAM_ISP, + "Invalid Module ID:%d", module_id); + break; + } + } + +print_state: + soc_private = camif_priv->soc_info->soc_private; + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", soc_private->ife_clk_src); + + if ((err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) && + ((camif_priv->cam_common_cfg.input_mux_sel_pp & 0x3) || + (bus_overflow_status))) + cam_cpas_log_votes(false); +} + +static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_ver3_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + camif_node = th_payload->handler_priv; + camif_priv = camif_node->res_priv; + + CAM_DBG(CAM_ISP, + "VFE:%d CAMIF IRQ status_0: 0x%X status_1: 0x%X status_2: 0x%X", + camif_node->hw_intf->hw_idx, th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1], th_payload->evt_status_arr[2]); + + rc = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload); + if (rc) { + CAM_INFO_RATE_LIMIT(CAM_ISP, + "VFE:%d CAMIF IRQ status_0: 0x%X status_1: 0x%X status_2: 0x%X", + camif_node->hw_intf->hw_idx, th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1], th_payload->evt_status_arr[2]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + evt_payload->reg_val = 0; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + /* Read frame_id meta at every epoch if custom hw is enabled */ + if (evt_payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->epoch0_irq_mask) { + if ((camif_priv->common_reg->custom_frame_idx) && + (camif_priv->cam_common_cfg.input_mux_sel_pp & 0x3)) + evt_payload->reg_val = cam_io_r_mb( + camif_priv->mem_base + + camif_priv->common_reg->custom_frame_idx); + } + + th_payload->evt_payload_priv = evt_payload; + + if (th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->sof_irq_mask) { + trace_cam_log_event("SOF", "TOP_HALF", + th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1], + camif_node->hw_intf->hw_idx); + } + + if (th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->epoch0_irq_mask) { + trace_cam_log_event("EPOCH0", "TOP_HALF", + th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1], + camif_node->hw_intf->hw_idx); + } + + if (th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->eof_irq_mask) { + trace_cam_log_event("EOF", "TOP_HALF", + th_payload->evt_status_arr[CAM_IFE_IRQ_CAMIF_REG_STATUS1], + camif_node->hw_intf->hw_idx); + } + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *camif_node; + struct cam_vfe_mux_camif_ver3_data *camif_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX] = {0}; + struct timespec64 ts; + uint32_t val = 0; + int i = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, + "Invalid params handle_priv:%pK, evt_payload_priv:%pK", + handler_priv, evt_payload_priv); + return ret; + } + + camif_node = handler_priv; + camif_priv = camif_node->res_priv; + payload = evt_payload_priv; + + soc_info = camif_priv->soc_info; + soc_private = + (struct cam_vfe_soc_private *)soc_info->soc_private; + + for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) + irq_status[i] = payload->irq_reg_val[i]; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = camif_node->hw_intf->hw_idx; + evt_info.res_id = camif_node->res_id; + evt_info.res_type = camif_node->res_type; + evt_info.reg_val = 0; + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->sof_irq_mask) { + if ((camif_priv->enable_sof_irq_debug) && + (camif_priv->irq_debug_cnt <= + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "VFE:%d Received SOF", + evt_info.hw_idx); + + camif_priv->irq_debug_cnt++; + if (camif_priv->irq_debug_cnt == + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + camif_priv->enable_sof_irq_debug = + false; + camif_priv->irq_debug_cnt = 0; + } + } else { + CAM_DBG(CAM_ISP, "VFE:%d Received SOF", + evt_info.hw_idx); + camif_priv->sof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->sof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + } + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->epoch0_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d Received EPOCH", evt_info.hw_idx); + evt_info.reg_val = payload->reg_val; + camif_priv->epoch_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->epoch_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1] + & camif_priv->reg_data->eof_irq_mask) { + CAM_DBG(CAM_ISP, "VFE:%d Received EOF", evt_info.hw_idx); + camif_priv->eof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + camif_priv->eof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_EOF, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0] + & camif_priv->reg_data->error_irq_mask0) { + CAM_ERR(CAM_ISP, "VFE:%d Overflow", evt_info.hw_idx); + + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + + ret = CAM_VFE_IRQ_STATUS_OVERFLOW; + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_vfe_camif_ver3_print_status(irq_status, ret, camif_priv); + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_ver3_reg_dump(camif_node); + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0] + & camif_priv->reg_data->frame_id_irq_mask) { + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->common_reg->custom_frame_idx); + CAM_DBG(CAM_ISP, + "VFE:%d Frame id change to: %u", evt_info.hw_idx, + val); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) { + CAM_ERR(CAM_ISP, "VFE:%d Violation", evt_info.hw_idx); + + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + + ret = CAM_VFE_IRQ_STATUS_VIOLATION; + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + + cam_vfe_camif_ver3_print_status(irq_status, ret, camif_priv); + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP) + cam_vfe_camif_ver3_reg_dump(camif_node); + + if (camif_priv->event_cb) + camif_priv->event_cb(camif_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + } + + if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + CAM_DBG(CAM_ISP, "VFE:%d VFE_DIAG_SENSOR_STATUS: 0x%X", + evt_info.hw_idx, camif_priv->mem_base, + cam_io_r(camif_priv->mem_base + + camif_priv->common_reg->diag_sensor_status_0)); + } + + cam_vfe_camif_ver3_put_evt_payload(camif_priv, &payload); + + CAM_DBG(CAM_ISP, "returning status = %d", ret); + return ret; +} + +int cam_vfe_camif_ver3_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_hw_info, + struct cam_isp_resource_node *camif_node, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL; + struct cam_vfe_camif_ver3_hw_info *camif_info = camif_hw_info; + int i = 0; + + camif_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_mux_camif_ver3_data), + GFP_KERNEL); + if (!camif_priv) + return -ENOMEM; + + camif_node->res_priv = camif_priv; + + camif_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + camif_priv->camif_reg = camif_info->camif_reg; + camif_priv->common_reg = camif_info->common_reg; + camif_priv->reg_data = camif_info->reg_data; + camif_priv->hw_intf = hw_intf; + camif_priv->soc_info = soc_info; + camif_priv->vfe_irq_controller = vfe_irq_controller; + + camif_node->init = cam_vfe_camif_ver3_resource_init; + camif_node->deinit = cam_vfe_camif_ver3_resource_deinit; + camif_node->start = cam_vfe_camif_ver3_resource_start; + camif_node->stop = cam_vfe_camif_ver3_resource_stop; + camif_node->process_cmd = cam_vfe_camif_ver3_process_cmd; + camif_node->top_half_handler = cam_vfe_camif_ver3_handle_irq_top_half; + camif_node->bottom_half_handler = + cam_vfe_camif_ver3_handle_irq_bottom_half; + spin_lock_init(&camif_priv->spin_lock); + INIT_LIST_HEAD(&camif_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) { + INIT_LIST_HEAD(&camif_priv->evt_payload[i].list); + list_add_tail(&camif_priv->evt_payload[i].list, + &camif_priv->free_payload_list); + } + + return 0; +} + +int cam_vfe_camif_ver3_deinit( + struct cam_isp_resource_node *camif_node) +{ + struct cam_vfe_mux_camif_ver3_data *camif_priv; + int i = 0; + + if (!camif_node) { + CAM_ERR(CAM_ISP, "Error, camif_node is NULL %pK", camif_node); + return -ENODEV; + } + + camif_priv = camif_node->res_priv; + + INIT_LIST_HEAD(&camif_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) + INIT_LIST_HEAD(&camif_priv->evt_payload[i].list); + + camif_priv = camif_node->res_priv; + + camif_node->start = NULL; + camif_node->stop = NULL; + camif_node->process_cmd = NULL; + camif_node->top_half_handler = NULL; + camif_node->bottom_half_handler = NULL; + camif_node->res_priv = NULL; + + if (!camif_priv) { + CAM_ERR(CAM_ISP, "Error, camif_priv is NULL %pK", camif_priv); + return -ENODEV; + } + + CAM_MEM_FREE(camif_priv); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h new file mode 100644 index 0000000000..3098de6ea7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_CAMIF_VER3_H_ +#define _CAM_VFE_CAMIF_VER3_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +struct cam_vfe_camif_ver3_pp_clc_reg { + uint32_t hw_version; + uint32_t hw_status; + uint32_t module_cfg; + uint32_t pdaf_raw_crop_width_cfg; + uint32_t pdaf_raw_crop_height_cfg; + uint32_t line_skip_pattern; + uint32_t pixel_skip_pattern; + uint32_t period_cfg; + uint32_t irq_subsample_pattern; + uint32_t epoch_irq_cfg; + uint32_t debug_1; + uint32_t debug_0; + uint32_t test_bus_ctrl; + uint32_t spare; + uint32_t reg_update_cmd; +}; + +struct cam_vfe_camif_ver3_reg_data { + uint32_t pp_extern_reg_update_shift; + uint32_t dual_pd_extern_reg_update_shift; + uint32_t extern_reg_update_mask; + uint32_t dual_ife_pix_en_shift; + uint32_t dual_ife_sync_sel_shift; + uint32_t operating_mode_shift; + uint32_t input_mux_sel_shift; + + uint32_t pixel_pattern_shift; + uint32_t pixel_pattern_mask; + + uint32_t dsp_mode_shift; + uint32_t dsp_mode_mask; + uint32_t dsp_en_shift; + uint32_t dsp_en_mask; + + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask2; + uint32_t subscribe_irq_mask1; + uint32_t frame_id_irq_mask; + + uint32_t enable_diagnostic_hw; + uint32_t pp_camif_cfg_en_shift; + uint32_t pp_camif_cfg_ife_out_en_shift; + uint32_t top_debug_cfg_en; + uint32_t dual_vfe_sync_mask; + + uint32_t input_bayer_fmt; + uint32_t input_yuv_fmt; + +}; + +struct cam_vfe_camif_ver3_hw_info { + struct cam_vfe_top_ver3_reg_offset_common *common_reg; + struct cam_vfe_camif_ver3_pp_clc_reg *camif_reg; + struct cam_vfe_camif_ver3_reg_data *reg_data; +}; + +int cam_vfe_camif_ver3_acquire_resource( + struct cam_isp_resource_node *camif_res, + void *acquire_param); + +int cam_vfe_camif_ver3_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_hw_info, + struct cam_isp_resource_node *camif_node, + void *vfe_irq_controller); + +int cam_vfe_camif_ver3_deinit( + struct cam_isp_resource_node *camif_node); + +#endif /* _CAM_VFE_CAMIF_VER3_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c new file mode 100644 index 0000000000..83b741a78d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include + +#include "cam_io_util.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_fe_ver1.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" + +#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 + +struct cam_vfe_mux_fe_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_fe_ver1_reg *fe_reg; + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_fe_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dsp_mode; + uint32_t pix_pattern; + uint32_t first_pixel; + uint32_t first_line; + uint32_t last_pixel; + uint32_t last_line; + uint32_t hbi_value; + uint32_t vbi_value; + bool enable_sof_irq_debug; + uint32_t irq_debug_cnt; + uint32_t fe_cfg_data; + uint32_t hbi_count; +}; + +static int cam_vfe_fe_validate_pix_pattern(uint32_t pattern) +{ + int rc; + + switch (pattern) { + case CAM_ISP_PATTERN_BAYER_RGRGRG: + case CAM_ISP_PATTERN_BAYER_GRGRGR: + case CAM_ISP_PATTERN_BAYER_BGBGBG: + case CAM_ISP_PATTERN_BAYER_GBGBGB: + case CAM_ISP_PATTERN_YUV_YCBYCR: + case CAM_ISP_PATTERN_YUV_YCRYCB: + case CAM_ISP_PATTERN_YUV_CBYCRY: + case CAM_ISP_PATTERN_YUV_CRYCBY: + rc = 0; + break; + default: + CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern); + rc = -EINVAL; + break; + } + return rc; +} + +static int cam_vfe_fe_update( + struct cam_isp_resource_node *fe_res, + void *cmd_data, uint32_t arg_size) +{ + struct cam_vfe_mux_fe_data *rsrc_data = NULL; + struct cam_vfe_fe_update_args *args = cmd_data; + uint32_t fe_cfg_data; + + if (arg_size != sizeof(struct cam_vfe_fe_update_args)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "fe_update->min_vbi = 0x%x", args->fe_config.min_vbi); + CAM_DBG(CAM_ISP, "fe_update->hbi_count = 0x%x", + args->fe_config.hbi_count); + CAM_DBG(CAM_ISP, "fe_update->fs_mode = 0x%x", args->fe_config.fs_mode); + CAM_DBG(CAM_ISP, "fe_update->fs_line_sync_en = 0x%x", + args->fe_config.fs_line_sync_en); + + fe_cfg_data = args->fe_config.min_vbi | + args->fe_config.fs_mode << 8 | + args->fe_config.fs_line_sync_en; + + rsrc_data = fe_res->res_priv; + rsrc_data->fe_cfg_data = fe_cfg_data; + rsrc_data->hbi_count = args->fe_config.hbi_count; + + CAM_DBG(CAM_ISP, "fe_cfg_data = 0x%x", fe_cfg_data); + return 0; +} + +static int cam_vfe_fe_get_reg_update( + struct cam_isp_resource_node *fe_res, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_fe_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + rsrc_data = fe_res->res_priv; + reg_val_pair[0] = rsrc_data->fe_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data; + CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd 0x%x offset 0x%x", + reg_val_pair[1], reg_val_pair[0]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_fe_ver1_acquire_resource( + struct cam_isp_resource_node *fe_res, + void *acquire_param) +{ + struct cam_vfe_mux_fe_data *fe_data; + struct cam_vfe_acquire_args *acquire_data; + + int rc = 0; + + fe_data = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + rc = cam_vfe_fe_validate_pix_pattern( + acquire_data->vfe_in.in_port->test_pattern); + if (rc) { + CAM_ERR(CAM_ISP, "pix validation failed: id:%d pix_pattern %d", + fe_res->hw_intf->hw_idx, + acquire_data->vfe_in.in_port->test_pattern); + return rc; + } + + fe_data->sync_mode = acquire_data->vfe_in.sync_mode; + fe_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern; + fe_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode; + fe_data->first_pixel = acquire_data->vfe_in.in_port->left_start; + fe_data->last_pixel = acquire_data->vfe_in.in_port->left_stop; + fe_data->first_line = acquire_data->vfe_in.in_port->line_start; + fe_data->last_line = acquire_data->vfe_in.in_port->line_stop; + fe_data->hbi_value = 0; + fe_data->vbi_value = 0; + + CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d", + fe_res->hw_intf->hw_idx, + fe_data->pix_pattern, fe_data->dsp_mode); + return rc; +} + +static int cam_vfe_fe_resource_init( + struct cam_isp_resource_node *fe_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_fe_data *fe_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!fe_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + fe_data = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + + soc_info = fe_data->soc_info; + + if ((fe_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (fe_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "failed to enable dsp clk"); + } + + return rc; +} + +static int cam_vfe_fe_resource_deinit( + struct cam_isp_resource_node *fe_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_fe_data *fe_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!fe_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + fe_data = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + + soc_info = fe_data->soc_info; + + if ((fe_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (fe_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "failed to disable dsp clk"); + } + + return rc; + +} + +static int cam_vfe_fe_resource_start( + struct cam_isp_resource_node *fe_res) +{ + struct cam_vfe_mux_fe_data *rsrc_data; + uint32_t val = 0; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t computed_epoch_line_cfg; + + if (!fe_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (fe_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error! Invalid fe res res_state:%d", + fe_res->res_state); + return -EINVAL; + } + + rsrc_data = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + + /* config vfe core */ + val = (rsrc_data->pix_pattern << + rsrc_data->reg_data->pixel_pattern_shift); + if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) + val |= (1 << rsrc_data->reg_data->extern_reg_update_shift); + + if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + /* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */ + val |= (((rsrc_data->dsp_mode - 1) & + rsrc_data->reg_data->dsp_mode_mask) << + rsrc_data->reg_data->dsp_mode_shift); + val |= (0x1 << rsrc_data->reg_data->dsp_en_shift); + } + + if (rsrc_data->fe_cfg_data) { + /*set Mux mode value to EXT_RD_PATH */ + val |= (rsrc_data->reg_data->fe_mux_data << + rsrc_data->reg_data->input_mux_sel_shift); + } + + if (rsrc_data->hbi_count) { + /*set hbi count*/ + val |= (rsrc_data->hbi_count << + rsrc_data->reg_data->hbi_cnt_shift); + } + cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg); + + CAM_DBG(CAM_ISP, "hw id:%d core_cfg (off:0x%x, val:0x%x)", + fe_res->hw_intf->hw_idx, + rsrc_data->common_reg->core_cfg, + val); + + /* disable the CGC for stats */ + cam_io_w_mb(0xFFFFFFFF, rsrc_data->mem_base + + rsrc_data->common_reg->module_ctrl[ + CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd); + + /* epoch config */ + epoch0_irq_mask = (((rsrc_data->last_line + rsrc_data->vbi_value) - + rsrc_data->first_line) / 2); + if (epoch0_irq_mask > (rsrc_data->last_line - rsrc_data->first_line)) + epoch0_irq_mask = rsrc_data->last_line - rsrc_data->first_line; + + epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF; + computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask; + cam_io_w_mb(computed_epoch_line_cfg, + rsrc_data->mem_base + rsrc_data->fe_reg->epoch_irq); + CAM_DBG(CAM_ISP, + "first_line:0x%x last_line:0x%x vbi:0x%x epoch_line_cfg: 0x%x", + rsrc_data->first_line, rsrc_data->last_line, + rsrc_data->vbi_value, computed_epoch_line_cfg); + + fe_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Read Back cfg */ + cam_io_w_mb(rsrc_data->fe_cfg_data, + rsrc_data->mem_base + rsrc_data->fe_reg->fe_cfg); + CAM_DBG(CAM_ISP, "hw id:%d fe_cfg_data(off:0x%x val:0x%x)", + fe_res->hw_intf->hw_idx, + rsrc_data->fe_reg->fe_cfg, + rsrc_data->fe_cfg_data); + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + rsrc_data->fe_reg->reg_update_cmd); + CAM_DBG(CAM_ISP, "hw id:%d RUP (off:0x%x, val:0x%x)", + fe_res->hw_intf->hw_idx, + rsrc_data->fe_reg->reg_update_cmd, + rsrc_data->reg_data->reg_update_cmd_data); + + /* disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + + CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", fe_res->hw_intf->hw_idx); + return 0; +} + +static int cam_vfe_fe_reg_dump( + struct cam_isp_resource_node *fe_res) +{ + struct cam_vfe_mux_fe_data *fe_priv; + struct cam_vfe_soc_private *soc_private; + int rc = 0, i; + uint32_t val = 0; + + if (!fe_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if ((fe_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (fe_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + fe_priv = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + soc_private = fe_priv->soc_info->soc_private; + for (i = 0xA3C; i <= 0xA90; i += 4) { + val = cam_io_r_mb(fe_priv->mem_base + i); + CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); + } + + for (i = 0xE0C; i <= 0xE3C; i += 4) { + val = cam_io_r_mb(fe_priv->mem_base + i); + CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); + } + + for (i = 0x2000; i <= 0x20B8; i += 4) { + val = cam_io_r_mb(fe_priv->mem_base + i); + CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); + } + + for (i = 0x2500; i <= 0x255C; i += 4) { + val = cam_io_r_mb(fe_priv->mem_base + i); + CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); + } + + for (i = 0x2600; i <= 0x265C; i += 4) { + val = cam_io_r_mb(fe_priv->mem_base + i); + CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); + } + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + return rc; +} + +static int cam_vfe_fe_resource_stop( + struct cam_isp_resource_node *fe_res) +{ + struct cam_vfe_mux_fe_data *fe_priv; + int rc = 0; + uint32_t val = 0; + + if (!fe_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (fe_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED || + fe_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) + return 0; + + fe_priv = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; + + if ((fe_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (fe_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + val = cam_io_r_mb(fe_priv->mem_base + + fe_priv->common_reg->core_cfg); + val &= (~(1 << fe_priv->reg_data->dsp_en_shift)); + cam_io_w_mb(val, fe_priv->mem_base + + fe_priv->common_reg->core_cfg); + } + + if (fe_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + fe_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + return rc; +} + +static int cam_vfe_fe_sof_irq_debug( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_fe_data *fe_priv; + uint32_t *enable_sof_irq = (uint32_t *)cmd_args; + + fe_priv = + (struct cam_vfe_mux_fe_data *)rsrc_node->res_priv; + + if (*enable_sof_irq == 1) + fe_priv->enable_sof_irq_debug = true; + else + fe_priv->enable_sof_irq_debug = false; + + return 0; +} + +static int cam_vfe_fe_blanking_update( + struct cam_isp_resource_node *rsrc_node, void *cmd_args) +{ + struct cam_vfe_mux_fe_data *fe_priv = + (struct cam_vfe_mux_fe_data *)rsrc_node->res_priv; + + struct cam_isp_blanking_config *blanking_config = + (struct cam_isp_blanking_config *)cmd_args; + + fe_priv->hbi_value = blanking_config->hbi; + fe_priv->vbi_value = blanking_config->vbi; + CAM_DBG(CAM_ISP, "hbi:%d vbi:%d", + fe_priv->hbi_value, fe_priv->vbi_value); + + return 0; +} + +static int cam_vfe_fe_process_cmd(struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, "Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_fe_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: + rc = cam_vfe_fe_sof_irq_debug(rsrc_node, cmd_args); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD: + rc = cam_vfe_fe_update(rsrc_node, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = cam_vfe_fe_blanking_update(rsrc_node, cmd_args); + break; + default: + CAM_ERR(CAM_ISP, + "unsupported process command:%d", cmd_type); + break; + } + + return rc; +} + +static int cam_vfe_fe_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + return -EPERM; +} + +static int cam_vfe_fe_handle_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_SUCCESS; + struct cam_isp_resource_node *fe_node; + struct cam_vfe_mux_fe_data *fe_priv; + struct cam_vfe_top_irq_evt_payload *payload; + uint32_t irq_status0; + uint32_t irq_status1; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + ret = CAM_VFE_IRQ_STATUS_ERR; + return ret; + } + + fe_node = handler_priv; + fe_priv = fe_node->res_priv; + payload = evt_payload_priv; + irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1]; + + CAM_DBG(CAM_ISP, "event ID, irq_status_0 = 0x%x", irq_status0); + + if (irq_status0 & fe_priv->reg_data->sof_irq_mask) { + if ((fe_priv->enable_sof_irq_debug) && + (fe_priv->irq_debug_cnt <= + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF"); + + fe_priv->irq_debug_cnt++; + if (fe_priv->irq_debug_cnt == + CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + fe_priv->enable_sof_irq_debug = + false; + fe_priv->irq_debug_cnt = 0; + } + } else { + CAM_DBG(CAM_ISP, "Received SOF"); + } + } + + if (irq_status0 & fe_priv->reg_data->epoch0_irq_mask) + CAM_DBG(CAM_ISP, "Received EPOCH"); + + if (irq_status0 & fe_priv->reg_data->reg_update_irq_mask) + CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK"); + + if (irq_status0 & fe_priv->reg_data->eof_irq_mask) + CAM_DBG(CAM_ISP, "Received EOF"); + + if (irq_status1 & fe_priv->reg_data->error_irq_mask1) { + CAM_DBG(CAM_ISP, "Received ERROR"); + ret = CAM_VFE_IRQ_STATUS_ERR; + cam_vfe_fe_reg_dump(fe_node); + /* No HW mgr notification on error */ + } + + CAM_DBG(CAM_ISP, "returing status = %d", ret); + return ret; +} + +int cam_vfe_fe_ver1_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *fe_hw_info, + struct cam_isp_resource_node *fe_node) +{ + struct cam_vfe_mux_fe_data *fe_priv = NULL; + struct cam_vfe_fe_ver1_hw_info *fe_info = fe_hw_info; + + fe_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_mux_fe_data), + GFP_KERNEL); + if (!fe_priv) { + CAM_ERR(CAM_ISP, "Error! Failed to alloc for fe_priv"); + return -ENOMEM; + } + + fe_node->res_priv = fe_priv; + + fe_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + fe_priv->fe_reg = fe_info->fe_reg; + fe_priv->common_reg = fe_info->common_reg; + fe_priv->reg_data = fe_info->reg_data; + fe_priv->hw_intf = hw_intf; + fe_priv->soc_info = soc_info; + + fe_node->init = cam_vfe_fe_resource_init; + fe_node->deinit = cam_vfe_fe_resource_deinit; + fe_node->start = cam_vfe_fe_resource_start; + fe_node->stop = cam_vfe_fe_resource_stop; + fe_node->process_cmd = cam_vfe_fe_process_cmd; + fe_node->top_half_handler = cam_vfe_fe_handle_irq_top_half; + fe_node->bottom_half_handler = cam_vfe_fe_handle_irq_bottom_half; + + return 0; +} + +int cam_vfe_fe_ver1_deinit( + struct cam_isp_resource_node *fe_node) +{ + struct cam_vfe_mux_fe_data *fe_priv = fe_node->res_priv; + + fe_node->start = NULL; + fe_node->stop = NULL; + fe_node->process_cmd = NULL; + fe_node->top_half_handler = NULL; + fe_node->bottom_half_handler = NULL; + + fe_node->res_priv = NULL; + + if (!fe_priv) { + CAM_ERR(CAM_ISP, "Error! fe_priv is NULL"); + return -ENODEV; + } + + CAM_MEM_FREE(fe_priv); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.h new file mode 100644 index 0000000000..6974a568fd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_FE_VER1_H_ +#define _CAM_VFE_FE_VER1_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +struct cam_vfe_fe_ver1_reg { + uint32_t camif_cmd; + uint32_t camif_config; + uint32_t line_skip_pattern; + uint32_t pixel_skip_pattern; + uint32_t skip_period; + uint32_t irq_subsample_pattern; + uint32_t epoch_irq; + uint32_t raw_crop_width_cfg; + uint32_t raw_crop_height_cfg; + uint32_t reg_update_cmd; + uint32_t vfe_diag_config; + uint32_t vfe_diag_sensor_status; + uint32_t fe_cfg; +}; + +struct cam_vfe_fe_reg_data { + uint32_t raw_crop_first_pixel_shift; + uint32_t raw_crop_first_pixel_mask; + + uint32_t raw_crop_last_pixel_shift; + uint32_t raw_crop_last_pixel_mask; + + uint32_t raw_crop_first_line_shift; + uint32_t raw_crop_first_line_mask; + + uint32_t raw_crop_last_line_shift; + uint32_t raw_crop_last_line_mask; + + uint32_t input_mux_sel_shift; + uint32_t input_mux_sel_mask; + uint32_t extern_reg_update_shift; + uint32_t extern_reg_update_mask; + + uint32_t pixel_pattern_shift; + uint32_t pixel_pattern_mask; + + uint32_t dsp_mode_shift; + uint32_t dsp_mode_mask; + uint32_t dsp_en_shift; + uint32_t dsp_en_mask; + + uint32_t reg_update_cmd_data; + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t reg_update_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask0; + uint32_t error_irq_mask1; + + uint32_t enable_diagnostic_hw; + uint32_t fe_mux_data; + uint32_t hbi_cnt_shift; +}; + +struct cam_vfe_fe_ver1_hw_info { + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_fe_ver1_reg *fe_reg; + struct cam_vfe_fe_reg_data *reg_data; +}; + +int cam_vfe_fe_ver1_acquire_resource( + struct cam_isp_resource_node *camif_res, + void *acquire_param); + +int cam_vfe_fe_ver1_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *camif_hw_info, + struct cam_isp_resource_node *camif_node); + +int cam_vfe_fe_ver1_deinit( + struct cam_isp_resource_node *camif_node); + +#endif /* _CAM_VFE_FE_VER1_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c new file mode 100644 index 0000000000..cb909beaf3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_vfe_rdi.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_isp_hw.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_top_ver2.h" +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_mem_mgr_api.h" + +struct cam_vfe_mux_rdi_data { + void __iomem *mem_base; + struct cam_hw_intf *hw_intf; + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_rdi_ver2_reg *rdi_reg; + struct cam_vfe_rdi_common_reg_data *rdi_common_reg_data; + struct cam_vfe_rdi_overflow_status *rdi_irq_status; + struct cam_vfe_rdi_reg_data *reg_data; + struct cam_hw_soc_info *soc_info; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int irq_handle; + void *vfe_irq_controller; + struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_RDI_EVT_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + + enum cam_isp_hw_sync_mode sync_mode; + struct timespec64 sof_ts; + struct timespec64 error_ts; +}; + +static int cam_vfe_rdi_get_evt_payload( + struct cam_vfe_mux_rdi_data *rdi_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&rdi_priv->spin_lock); + if (list_empty(&rdi_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&rdi_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); + rc = 0; +done: + spin_unlock(&rdi_priv->spin_lock); + return rc; +} + +static int cam_vfe_rdi_put_evt_payload( + struct cam_vfe_mux_rdi_data *rdi_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!rdi_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "No payload to put"); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&rdi_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &rdi_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&rdi_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "Done"); + return 0; +} + +static int cam_vfe_rdi_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *rdi_node; + struct cam_vfe_mux_rdi_data *rdi_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + bool error_flag = false; + + CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]); + + rdi_node = th_payload->handler_priv; + rdi_priv = rdi_node->res_priv; + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if (th_payload->evt_status_arr[1]) { + CAM_ERR(CAM_ISP, + "RDI Error: vfe:%d: STATUS_1=0x%x", + rdi_node->hw_intf->hw_idx, + th_payload->evt_status_arr[1]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d", + rdi_node->hw_intf->hw_idx); + cam_irq_controller_disable_all( + rdi_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, + "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "STATUS_1=0x%x", + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + rdi_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + rdi_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + evt_payload->irq_reg_val[i] = cam_io_r(rdi_priv->mem_base + + rdi_priv->common_reg->violation_status); + + if (error_flag) + CAM_INFO(CAM_ISP, "Violation status = 0x%x", + evt_payload->irq_reg_val[i]); + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_vfe_rdi_get_reg_update( + struct cam_isp_resource_node *rdi_res, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_mux_rdi_data *rsrc_data = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error - Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res) { + CAM_ERR(CAM_ISP, "Error - Invalid args"); + return -EINVAL; + } + + cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Error - Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, + "Error - buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size * 4); + return -EINVAL; + } + + rsrc_data = rdi_res->res_priv; + reg_val_pair[0] = rsrc_data->rdi_reg->reg_update_cmd; + reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data; + CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x", + rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]); + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +int cam_vfe_rdi_ver2_acquire_resource( + struct cam_isp_resource_node *rdi_res, + void *acquire_param) +{ + struct cam_vfe_mux_rdi_data *rdi_data; + struct cam_vfe_acquire_args *acquire_data; + + rdi_data = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + rdi_data->event_cb = acquire_data->event_cb; + rdi_data->priv = acquire_data->priv; + rdi_data->sync_mode = acquire_data->vfe_in.sync_mode; + rdi_res->is_rdi_primary_res = false; + + return 0; +} + +static int cam_vfe_rdi_resource_start( + struct cam_isp_resource_node *rdi_res) +{ + struct cam_vfe_mux_rdi_data *rsrc_data; + int rc = 0; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t rdi_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {0}; + + if (!rdi_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d", + rdi_res->res_state); + return -EINVAL; + } + + rsrc_data = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = + rsrc_data->rdi_common_reg_data->error_irq_mask0; + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] = + rsrc_data->rdi_common_reg_data->error_irq_mask1; + + rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* Reg Update */ + cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data, + rsrc_data->mem_base + rsrc_data->rdi_reg->reg_update_cmd); + + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + rdi_res, + cam_vfe_rdi_err_irq_top_half, + rdi_res->bottom_half_handler, + rdi_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->irq_err_handle = 0; + } + } + + if (!rdi_res->is_rdi_primary_res) + goto end; + + rdi_irq_mask[0] = + (rsrc_data->reg_data->reg_update_irq_mask | + rsrc_data->reg_data->sof_irq_mask); + + CAM_DBG(CAM_ISP, "RDI%d irq_mask 0x%x", + rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, + rdi_irq_mask[0]); + + if (!rsrc_data->irq_handle) { + rsrc_data->irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + rdi_irq_mask, + rdi_res, + rdi_res->top_half_handler, + rdi_res->bottom_half_handler, + rdi_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->irq_handle < 1) { + CAM_ERR(CAM_ISP, "IRQ handle subscribe failure"); + rc = -ENOMEM; + rsrc_data->irq_handle = 0; + } + } + + rsrc_data->sof_ts.tv_sec = 0; + rsrc_data->sof_ts.tv_nsec = 0; + rsrc_data->error_ts.tv_sec = 0; + rsrc_data->error_ts.tv_nsec = 0; + + CAM_DBG(CAM_ISP, "Start RDI %d", + rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0); +end: + return rc; +} + + +static int cam_vfe_rdi_resource_stop( + struct cam_isp_resource_node *rdi_res) +{ + struct cam_vfe_mux_rdi_data *rdi_priv; + int rc = 0; + + if (!rdi_res) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED || + rdi_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) + return 0; + + rdi_priv = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv; + + if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + if (rdi_priv->irq_handle) { + cam_irq_controller_unsubscribe_irq( + rdi_priv->vfe_irq_controller, rdi_priv->irq_handle); + rdi_priv->irq_handle = 0; + } + + if (rdi_priv->irq_err_handle) { + cam_irq_controller_unsubscribe_irq( + rdi_priv->vfe_irq_controller, rdi_priv->irq_err_handle); + rdi_priv->irq_err_handle = 0; + } + + return rc; +} + +static int cam_vfe_rdi_process_cmd(struct cam_isp_resource_node *rsrc_node, + uint32_t cmd_type, void *cmd_args, uint32_t arg_size) +{ + int rc = -EINVAL; + + if (!rsrc_node || !cmd_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_rdi_get_reg_update(rsrc_node, cmd_args, + arg_size); + break; + default: + CAM_ERR(CAM_ISP, + "unsupported RDI process command:%d", cmd_type); + break; + } + + return rc; +} + +static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *rdi_node; + struct cam_vfe_mux_rdi_data *rdi_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + rdi_node = th_payload->handler_priv; + rdi_priv = rdi_node->res_priv; + + CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]); + CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]); + + rc = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload); + if (rc) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue"); + CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x", + th_payload->evt_status_arr[0], + th_payload->evt_status_arr[1]); + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + CAM_DBG(CAM_ISP, "Exit"); + return rc; +} + +static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *rdi_node; + struct cam_vfe_mux_rdi_data *rdi_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + uint32_t irq_status0; + uint32_t irq_status1; + uint32_t irq_rdi_status; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + struct timespec64 ts; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, "Invalid params"); + return ret; + } + + rdi_node = handler_priv; + rdi_priv = rdi_node->res_priv; + payload = evt_payload_priv; + soc_info = rdi_priv->soc_info; + soc_private = + (struct cam_vfe_soc_private *)soc_info->soc_private; + + irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0]; + irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1]; + + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.hw_idx = rdi_node->hw_intf->hw_idx; + evt_info.res_id = rdi_node->res_id; + evt_info.res_type = rdi_node->res_type; + + CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0); + + if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) { + CAM_DBG(CAM_ISP, "Received SOF"); + rdi_priv->sof_ts.tv_sec = + payload->ts.mono_time.tv_sec; + rdi_priv->sof_ts.tv_nsec = + payload->ts.mono_time.tv_nsec; + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_SOF, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) { + CAM_DBG(CAM_ISP, "Received REG UPDATE"); + + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info); + + ret = CAM_VFE_IRQ_STATUS_SUCCESS; + } + + if (!rdi_priv->rdi_irq_status) + goto end; + + irq_rdi_status = + (irq_status1 & + rdi_priv->rdi_irq_status->rdi_overflow_mask); + if (irq_rdi_status) { + ktime_get_boottime_ts64(&ts); + CAM_INFO(CAM_ISP, + "current monotonic timestamp:[%lld.%09lld]", + ts.tv_sec, ts.tv_nsec); + + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + + CAM_INFO(CAM_ISP, "ife_clk_src:%lld", + soc_private->ife_clk_src); + CAM_INFO(CAM_ISP, + "ERROR timestamp:[%lld.%09lld] SOF timestamp:[%lld.%09lld]", + rdi_priv->error_ts.tv_sec, + rdi_priv->error_ts.tv_nsec, + rdi_priv->sof_ts.tv_sec, + rdi_priv->sof_ts.tv_nsec); + + if (irq_rdi_status & + rdi_priv->rdi_irq_status->rdi0_overflow_mask) { + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_0; + } + else if (irq_rdi_status & + rdi_priv->rdi_irq_status->rdi1_overflow_mask) { + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_1; + } + else if (irq_rdi_status & + rdi_priv->rdi_irq_status->rdi2_overflow_mask) { + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_2; + } + else if (irq_rdi_status & + rdi_priv->rdi_irq_status->rdi3_overflow_mask) { + evt_info.res_id = CAM_ISP_IFE_OUT_RES_RDI_3; + } + + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW; + evt_info.event_data = (void *)&err_evt_info; + if (rdi_priv->event_cb) + rdi_priv->event_cb(rdi_priv->priv, + CAM_ISP_HW_EVENT_ERROR, + (void *)&evt_info); + cam_cpas_log_votes(false); + } +end: + cam_vfe_rdi_put_evt_payload(rdi_priv, &payload); + CAM_DBG(CAM_ISP, "returing status = %d", ret); + return ret; +} + +int cam_vfe_rdi_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *rdi_hw_info, + struct cam_isp_resource_node *rdi_node, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_rdi_data *rdi_priv = NULL; + struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info; + int i = 0; + + rdi_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_mux_rdi_data), + GFP_KERNEL); + if (!rdi_priv) { + CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv"); + return -ENOMEM; + } + + rdi_node->res_priv = rdi_priv; + + rdi_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + rdi_priv->hw_intf = hw_intf; + rdi_priv->common_reg = rdi_info->common_reg; + rdi_priv->rdi_reg = rdi_info->rdi_reg; + rdi_priv->vfe_irq_controller = vfe_irq_controller; + rdi_priv->rdi_common_reg_data = rdi_info->common_reg_data; + rdi_priv->soc_info = soc_info; + rdi_priv->rdi_irq_status = rdi_info->rdi_irq_status; + + switch (rdi_node->res_id) { + case CAM_ISP_HW_VFE_IN_RDI0: + rdi_priv->reg_data = rdi_info->reg_data[0]; + break; + case CAM_ISP_HW_VFE_IN_RDI1: + rdi_priv->reg_data = rdi_info->reg_data[1]; + break; + case CAM_ISP_HW_VFE_IN_RDI2: + rdi_priv->reg_data = rdi_info->reg_data[2]; + break; + case CAM_ISP_HW_VFE_IN_RDI3: + if (rdi_info->reg_data[3]) { + rdi_priv->reg_data = rdi_info->reg_data[3]; + } else { + CAM_ERR(CAM_ISP, "Error! RDI3 is not supported"); + goto err_init; + } + break; + default: + CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id); + goto err_init; + } + + rdi_node->start = cam_vfe_rdi_resource_start; + rdi_node->stop = cam_vfe_rdi_resource_stop; + rdi_node->process_cmd = cam_vfe_rdi_process_cmd; + rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half; + rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half; + + spin_lock_init(&rdi_priv->spin_lock); + INIT_LIST_HEAD(&rdi_priv->free_payload_list); + for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++) { + INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list); + list_add_tail(&rdi_priv->evt_payload[i].list, + &rdi_priv->free_payload_list); + } + + return 0; +err_init: + CAM_MEM_FREE(rdi_priv); + return -EINVAL; +} + +int cam_vfe_rdi_ver2_deinit( + struct cam_isp_resource_node *rdi_node) +{ + struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv; + int i = 0; + + if (!rdi_priv) { + CAM_ERR(CAM_ISP, "Error! rdi_priv NULL"); + return -ENODEV; + } + + INIT_LIST_HEAD(&rdi_priv->free_payload_list); + for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++) + INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list); + + rdi_node->start = NULL; + rdi_node->stop = NULL; + rdi_node->process_cmd = NULL; + rdi_node->top_half_handler = NULL; + rdi_node->bottom_half_handler = NULL; + + rdi_node->res_priv = NULL; + + CAM_MEM_FREE(rdi_priv); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h new file mode 100644 index 0000000000..70a2cb756c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_RDI_H_ +#define _CAM_VFE_RDI_H_ + +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +#define CAM_VFE_RDI_VER2_MAX 4 + +#define CAM_VFE_RDI_EVT_MAX 256 + +struct cam_vfe_rdi_ver2_reg { + uint32_t reg_update_cmd; +}; + +struct cam_vfe_rdi_overflow_status { + uint32_t rdi0_overflow_mask; + uint32_t rdi1_overflow_mask; + uint32_t rdi2_overflow_mask; + uint32_t rdi3_overflow_mask; + uint32_t rdi_overflow_mask; +}; + +struct cam_vfe_rdi_common_reg_data { + uint32_t subscribe_irq_mask0; + uint32_t subscribe_irq_mask1; + uint32_t error_irq_mask0; + uint32_t error_irq_mask1; + uint32_t error_irq_mask2; + uint32_t rdi_frame_drop_mask; +}; + +struct cam_vfe_rdi_reg_data { + uint32_t reg_update_cmd_data; + uint32_t sof_irq_mask; + uint32_t reg_update_irq_mask; +}; +struct cam_vfe_rdi_ver2_hw_info { + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_rdi_ver2_reg *rdi_reg; + struct cam_vfe_rdi_common_reg_data *common_reg_data; + struct cam_vfe_rdi_overflow_status *rdi_irq_status; + struct cam_vfe_rdi_reg_data *reg_data[CAM_VFE_RDI_VER2_MAX]; +}; + +int cam_vfe_rdi_ver2_acquire_resource( + struct cam_isp_resource_node *rdi_res, + void *acquire_param); + +int cam_vfe_rdi_ver2_init( + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *rdi_hw_info, + struct cam_isp_resource_node *rdi_node, + void *vfe_irq_controller); + +int cam_vfe_rdi_ver2_deinit( + struct cam_isp_resource_node *rdi_node); + +#endif /* _CAM_VFE_RDI_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c new file mode 100644 index 0000000000..58c09f28cd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver2.h" +#include "cam_vfe_top_ver3.h" +#include "cam_vfe_top_ver4.h" +#include "cam_debug_util.h" + +int cam_vfe_top_init(uint32_t top_version, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top) +{ + int rc = -EINVAL; + + switch (top_version) { + case CAM_VFE_TOP_VER_2_0: + rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info, + vfe_irq_controller, vfe_top); + break; + case CAM_VFE_TOP_VER_3_0: + rc = cam_vfe_top_ver3_init(soc_info, hw_intf, top_hw_info, + vfe_irq_controller, vfe_top); + break; + case CAM_VFE_TOP_VER_4_0: + rc = cam_vfe_top_ver4_init(soc_info, hw_intf, top_hw_info, + vfe_irq_controller, vfe_top); + break; + default: + CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version); + break; + } + + return rc; +} + +int cam_vfe_top_deinit(uint32_t top_version, + struct cam_vfe_top **vfe_top) +{ + int rc = -EINVAL; + + switch (top_version) { + case CAM_VFE_TOP_VER_2_0: + rc = cam_vfe_top_ver2_deinit(vfe_top); + break; + case CAM_VFE_TOP_VER_3_0: + rc = cam_vfe_top_ver3_deinit(vfe_top); + break; + case CAM_VFE_TOP_VER_4_0: + rc = cam_vfe_top_ver4_deinit(vfe_top); + break; + default: + CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c new file mode 100644 index 0000000000..f68f1f0884 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_vfe_top_common.h" +#include "cam_debug_util.h" + +static const char *cam_vfe_top_clk_bw_state_to_string(uint32_t state) +{ + switch (state) { + case CAM_CLK_BW_STATE_UNCHANGED: + return "UNCHANGED"; + case CAM_CLK_BW_STATE_INCREASE: + return "INCREASE"; + case CAM_CLK_BW_STATE_DECREASE: + return "DECREASE"; + default: + return "Invalid State"; + } +} +static int cam_vfe_top_set_axi_bw_vote(struct cam_vfe_top_priv_common *top_common, + struct cam_axi_vote *final_bw_vote, uint64_t total_bw_new_vote, bool start_stop, + uint64_t request_id) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int i, j; + + soc_info = top_common->soc_info; + soc_private = (struct cam_vfe_soc_private *)soc_info->soc_private; + + CAM_DBG(CAM_PERF, "VFE:%d Sending final BW to cpas bw_state:%s bw_vote:%llu req_id:%ld", + top_common->hw_idx, cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + total_bw_new_vote, (start_stop ? -1 : request_id)); + rc = cam_cpas_update_axi_vote(soc_private->cpas_handle, + final_bw_vote); + if (!rc) { + memcpy(&top_common->applied_axi_vote, + final_bw_vote, + sizeof(struct cam_axi_vote)); + top_common->total_bw_applied = total_bw_new_vote; + } else { + CAM_ERR(CAM_PERF, + "VFE:%d BW request failed, req_id: %ld, final num_paths: %d, rc=%d", + top_common->hw_idx, (start_stop ? -1 : request_id), + final_bw_vote->num_paths, rc); + for (i = 0; i < final_bw_vote->num_paths; i++) { + CAM_INFO(CAM_PERF, + "ife[%d] : Applied BW Vote : [%s][%s][%s] [%llu %llu %llu]", + top_common->hw_idx, + cam_cpas_axi_util_path_type_to_string( + final_bw_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + final_bw_vote->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + final_bw_vote->axi_path[i].vote_level), + final_bw_vote->axi_path[i].camnoc_bw, + final_bw_vote->axi_path[i].mnoc_ab_bw, + final_bw_vote->axi_path[i].mnoc_ib_bw); + } + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + for (j = 0; j < top_common->last_bw_vote[i].num_paths; j++) { + CAM_INFO(CAM_PERF, + "ife[%d] : History[%d] BW Vote : [%s][%s] [%s] [%llu %llu %llu]", + top_common->hw_idx, i, + cam_cpas_axi_util_path_type_to_string( + top_common->last_bw_vote[i].axi_path[j].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + top_common->last_bw_vote[i].axi_path[j].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + top_common->last_bw_vote[i].axi_path[j].vote_level), + top_common->last_bw_vote[i].axi_path[j].camnoc_bw, + top_common->last_bw_vote[i].axi_path[j].mnoc_ab_bw, + top_common->last_bw_vote[i].axi_path[j].mnoc_ib_bw); + } + } + } + + return rc; + +} + +static int cam_vfe_top_set_hw_clk_rate(struct cam_vfe_top_priv_common *top_common, + unsigned long final_clk_rate, bool start_stop, uint64_t request_id, bool is_drv_config_en) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + struct cam_ahb_vote ahb_vote; + int rc = 0, clk_lvl = -1; + unsigned long cesta_clk_rate_high = 0, cesta_clk_rate_low = 0; + int cesta_client_idx = -1; + uint32_t lowest_clk_lvl; + + soc_info = top_common->soc_info; + soc_private = (struct cam_vfe_soc_private *)soc_info->soc_private; + + lowest_clk_lvl = soc_info->lowest_clk_level; + cesta_clk_rate_high = final_clk_rate; + cesta_clk_rate_low = final_clk_rate; + + if (soc_info->is_clk_drv_en) { + cesta_client_idx = top_common->hw_idx; + if (is_drv_config_en) + cesta_clk_rate_low = + soc_info->clk_rate[lowest_clk_lvl][soc_info->src_clk_idx]; + else + cesta_clk_rate_low = final_clk_rate; + } + + CAM_DBG(CAM_PERF, + "Applying VFE:%d Clock name=%s idx=%d cesta_client_idx:%d req clk[high low]=[%lu %lu] req_id=%ld", + top_common->hw_idx, soc_info->clk_name[soc_info->src_clk_idx], + soc_info->src_clk_idx, cesta_client_idx, cesta_clk_rate_high, cesta_clk_rate_low, + (start_stop ? -1 : request_id)); + + rc = cam_soc_util_set_src_clk_rate(soc_info, cesta_client_idx, cesta_clk_rate_high, + cesta_clk_rate_low); + if (!rc) { + top_common->applied_clk_rate = cesta_clk_rate_high; + rc = cam_soc_util_get_clk_level(soc_info, cesta_clk_rate_low, + soc_info->src_clk_idx, &clk_lvl); + if (rc) { + CAM_WARN(CAM_ISP, + "Failed to get clk level for %s with clk_rate %lu src_idx %d rc %d", + soc_info->dev_name, cesta_clk_rate_high, + soc_info->src_clk_idx, rc); + rc = 0; + goto end; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = clk_lvl; + cam_cpas_update_ahb_vote(soc_private->cpas_handle, &ahb_vote); + } else { + CAM_ERR(CAM_ISP, + "VFE:%d cesta_client_idx:%d Failed to set the req clk rate[high low]: [%llu %llu] rc:%d", + top_common->hw_idx, cesta_client_idx, cesta_clk_rate_high, + cesta_clk_rate_low, rc); + } + +end: + return rc; +} + +static inline void cam_vfe_top_delay_clk_reduction( + struct cam_vfe_top_priv_common *top_common, + unsigned long *max_clk) +{ + int i; + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + if (top_common->last_clk_vote[i] > *max_clk) + *max_clk = top_common->last_clk_vote[i]; + } +} + +static int cam_vfe_top_calc_hw_clk_rate( + struct cam_vfe_top_priv_common *top_common, bool start_stop, + unsigned long *final_clk_rate, uint64_t request_id) +{ + int i, rc = 0; + unsigned long max_req_clk_rate = 0; + + for (i = 0; i < top_common->num_mux; i++) { + if (top_common->req_clk_rate[i] > max_req_clk_rate) + max_req_clk_rate = top_common->req_clk_rate[i]; + } + + if (start_stop && !top_common->skip_data_rst_on_stop) { + /* need to vote current clk immediately */ + *final_clk_rate = max_req_clk_rate; + /* Reset everything, we can start afresh */ + memset(top_common->last_clk_vote, 0, sizeof(uint64_t) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + top_common->last_clk_counter = 0; + top_common->last_clk_vote[top_common->last_clk_counter] = max_req_clk_rate; + top_common->last_clk_counter = (top_common->last_clk_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + } else { + top_common->last_clk_vote[top_common->last_clk_counter] = + max_req_clk_rate; + top_common->last_clk_counter = (top_common->last_clk_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + + /* Find max clk request in last few requests */ + cam_vfe_top_delay_clk_reduction(top_common, final_clk_rate); + if (!(*final_clk_rate)) { + CAM_ERR(CAM_PERF, "Final clock rate is zero"); + return -EINVAL; + } + } + + if (*final_clk_rate == top_common->applied_clk_rate) + top_common->clk_state = CAM_CLK_BW_STATE_UNCHANGED; + else if (*final_clk_rate > top_common->applied_clk_rate) + top_common->clk_state = CAM_CLK_BW_STATE_INCREASE; + else + top_common->clk_state = CAM_CLK_BW_STATE_DECREASE; + + CAM_DBG(CAM_PERF, "VFE:%d Clock state:%s applied_clk_rate:%llu req_id:%ld", + top_common->hw_idx, cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + top_common->applied_clk_rate, (start_stop ? -1 : request_id)); + + return rc; +} + +int cam_vfe_top_clock_update(struct cam_vfe_top_priv_common *top_common, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_clock_update_args *clk_update = NULL; + struct cam_isp_resource_node *res = NULL; + int i; + + clk_update = + (struct cam_vfe_clock_update_args *)cmd_args; + res = clk_update->node_res; + + if (!res || !res->hw_intf->hw_priv) { + CAM_ERR(CAM_PERF, "Invalid input res %pK", res); + return -EINVAL; + } + + if (res->res_type != CAM_ISP_RESOURCE_VFE_IN || + res->res_id >= CAM_ISP_HW_VFE_IN_MAX) { + CAM_ERR(CAM_PERF, "VFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < top_common->num_mux; i++) { + if (top_common->mux_rsrc[i].res_id == res->res_id) { + top_common->req_clk_rate[i] = clk_update->clk_rate; + break; + } + } + + return 0; +} + +static struct cam_axi_vote *cam_vfe_top_delay_bw_reduction( + struct cam_vfe_top_priv_common *top_common, + uint64_t *to_be_applied_bw) +{ + uint32_t i; + int vote_idx = -1; + uint64_t max_bw = 0; + + for (i = 0; i < CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; i++) { + if (top_common->last_total_bw_vote[i] >= max_bw) { + vote_idx = i; + max_bw = top_common->last_total_bw_vote[i]; + } + } + + *to_be_applied_bw = max_bw; + + return &top_common->last_bw_vote[vote_idx]; +} + +static int cam_vfe_top_calc_axi_bw_vote( + struct cam_vfe_top_priv_common *top_common, bool start_stop, + struct cam_axi_vote **to_be_applied_axi_vote, uint64_t *total_bw_new_vote, + uint64_t request_id) +{ + int rc = 0; + uint32_t i; + uint32_t num_paths = 0; + bool bw_unchanged = true; + struct cam_axi_vote *final_bw_vote = NULL; + + if (top_common->num_mux > CAM_VFE_TOP_MUX_MAX) { + CAM_ERR(CAM_PERF, + "Number of Mux exceeds max, # Mux: %d > Limit: %d", + top_common->num_mux, CAM_VFE_TOP_MUX_MAX); + return -EINVAL; + } + + memset(&top_common->agg_incoming_vote, 0, sizeof(struct cam_axi_vote)); + for (i = 0; i < top_common->num_mux; i++) { + if (top_common->axi_vote_control[i] == + CAM_ISP_BW_CONTROL_INCLUDE) { + if (num_paths + + top_common->req_axi_vote[i].num_paths > + CAM_CPAS_MAX_PATHS_PER_CLIENT) { + CAM_ERR(CAM_PERF, + "Required paths(%d) more than max(%d)", + num_paths + + top_common->req_axi_vote[i].num_paths, + CAM_CPAS_MAX_PATHS_PER_CLIENT); + rc = -EINVAL; + goto end; + } + + memcpy(&top_common->agg_incoming_vote.axi_path[num_paths], + &top_common->req_axi_vote[i].axi_path[0], + top_common->req_axi_vote[i].num_paths * + sizeof( + struct cam_cpas_axi_per_path_bw_vote)); + num_paths += top_common->req_axi_vote[i].num_paths; + } + } + + top_common->agg_incoming_vote.num_paths = num_paths; + + for (i = 0; i < top_common->agg_incoming_vote.num_paths; i++) { + CAM_DBG(CAM_PERF, + "ife[%d] : New BW Vote : counter[%d] [%s][%s][%s] [%llu %llu %llu]", + top_common->hw_idx, + top_common->last_bw_counter, + cam_cpas_axi_util_path_type_to_string( + top_common->agg_incoming_vote.axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + top_common->agg_incoming_vote.axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + top_common->agg_incoming_vote.axi_path[i].vote_level), + top_common->agg_incoming_vote.axi_path[i].camnoc_bw, + top_common->agg_incoming_vote.axi_path[i].mnoc_ab_bw, + top_common->agg_incoming_vote.axi_path[i].mnoc_ib_bw); + + *total_bw_new_vote += top_common->agg_incoming_vote.axi_path[i].camnoc_bw; + } + + memcpy(&top_common->last_bw_vote[top_common->last_bw_counter], + &top_common->agg_incoming_vote, sizeof(struct cam_axi_vote)); + top_common->last_total_bw_vote[top_common->last_bw_counter] = *total_bw_new_vote; + top_common->last_bw_counter = (top_common->last_bw_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + + if (*total_bw_new_vote != top_common->total_bw_applied) + bw_unchanged = false; + + CAM_DBG(CAM_PERF, + "ife[%d] : applied_total=%lld, new_total=%lld unchanged=%d, start_stop=%d req_id=%ld", + top_common->hw_idx, top_common->total_bw_applied, + *total_bw_new_vote, bw_unchanged, start_stop, (start_stop ? -1 : request_id)); + + if (bw_unchanged) { + CAM_DBG(CAM_PERF, "BW config unchanged"); + *to_be_applied_axi_vote = NULL; + top_common->bw_state = CAM_CLK_BW_STATE_UNCHANGED; + goto end; + } + + if (start_stop) { + /* need to vote current request immediately */ + final_bw_vote = &top_common->agg_incoming_vote; + /* Reset everything, we can start afresh */ + memset(top_common->last_bw_vote, 0, sizeof(struct cam_axi_vote) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + memset(top_common->last_total_bw_vote, 0, sizeof(uint64_t) * + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ); + top_common->last_bw_counter = 0; + top_common->last_bw_vote[top_common->last_bw_counter] = + top_common->agg_incoming_vote; + top_common->last_total_bw_vote[top_common->last_bw_counter] = *total_bw_new_vote; + top_common->last_bw_counter = (top_common->last_bw_counter + 1) % + CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ; + } else { + /* + * Find max bw request in last few frames. This will be the bw + * that we want to vote to CPAS now. + */ + final_bw_vote = cam_vfe_top_delay_bw_reduction(top_common, total_bw_new_vote); + if (*total_bw_new_vote == 0) + CAM_WARN(CAM_PERF, "to_be_applied_axi_vote is 0, req_id:%llu", request_id); + } + + for (i = 0; i < final_bw_vote->num_paths; i++) { + CAM_DBG(CAM_PERF, + "ife[%d] : Apply BW Vote : [%s][%s][%s] [%llu %llu %llu]", + top_common->hw_idx, + cam_cpas_axi_util_path_type_to_string( + final_bw_vote->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + final_bw_vote->axi_path[i].transac_type), + cam_cpas_axi_util_drv_vote_lvl_to_string( + final_bw_vote->axi_path[i].vote_level), + final_bw_vote->axi_path[i].camnoc_bw, + final_bw_vote->axi_path[i].mnoc_ab_bw, + final_bw_vote->axi_path[i].mnoc_ib_bw); + } + + if (*total_bw_new_vote == top_common->total_bw_applied) { + CAM_DBG(CAM_PERF, "VFE:%d Final BW Unchanged after delay", top_common->hw_idx); + top_common->bw_state = CAM_CLK_BW_STATE_UNCHANGED; + *to_be_applied_axi_vote = NULL; + goto end; + } else if (*total_bw_new_vote > top_common->total_bw_applied) { + top_common->bw_state = CAM_CLK_BW_STATE_INCREASE; + } else { + top_common->bw_state = CAM_CLK_BW_STATE_DECREASE; + } + + CAM_DBG(CAM_PERF, + "ife[%d] : Delayed update: applied_total=%lld new_total=%lld start_stop=%d bw_state=%s req_id=%ld", + top_common->hw_idx, top_common->total_bw_applied, + *total_bw_new_vote, start_stop, + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + (start_stop ? -1 : request_id)); + + *to_be_applied_axi_vote = final_bw_vote; + +end: + return rc; +} + +int cam_vfe_top_bw_update_v2(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bw_update_args_v2 *bw_update = NULL; + struct cam_isp_resource_node *res = NULL; + int rc = 0; + int i; + + bw_update = (struct cam_vfe_bw_update_args_v2 *)cmd_args; + res = bw_update->node_res; + + if (!res || !res->hw_intf || !res->hw_intf->hw_priv) + return -EINVAL; + + if (res->res_type != CAM_ISP_RESOURCE_VFE_IN || + res->res_id >= CAM_ISP_HW_VFE_IN_MAX) { + CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < top_common->num_mux; i++) { + if (top_common->mux_rsrc[i].res_id == res->res_id) { + memcpy(&top_common->req_axi_vote[i], + &bw_update->isp_vote, + sizeof(struct cam_axi_vote)); + top_common->axi_vote_control[i] = + CAM_ISP_BW_CONTROL_INCLUDE; + break; + } + } + + return rc; +} + +int cam_vfe_top_bw_update(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_bw_update_args *bw_update = NULL; + struct cam_isp_resource_node *res = NULL; + int rc = 0; + int i; + struct cam_axi_vote *mux_axi_vote; + + bw_update = (struct cam_vfe_bw_update_args *)cmd_args; + res = bw_update->node_res; + + if (!res || !res->hw_intf || !res->hw_intf->hw_priv) + return -EINVAL; + + + CAM_DBG(CAM_PERF, "res_id=%d, BW=[%lld %lld]", + res->res_id, bw_update->camnoc_bw_bytes, + bw_update->external_bw_bytes); + + if (res->res_type != CAM_ISP_RESOURCE_VFE_IN || + res->res_id >= CAM_ISP_HW_VFE_IN_MAX) { + CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < top_common->num_mux; i++) { + mux_axi_vote = &top_common->req_axi_vote[i]; + if (top_common->mux_rsrc[i].res_id == res->res_id) { + mux_axi_vote->num_paths = 1; + if ((res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) && + (res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) { + mux_axi_vote->axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_IFE_RDI0 + + (res->res_id - CAM_ISP_HW_VFE_IN_RDI0); + } else { + /* + * Vote all bw into VIDEO path as we cannot + * differentiate to which path this has to go + */ + mux_axi_vote->axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_IFE_VID; + } + + mux_axi_vote->axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + mux_axi_vote->axi_path[0].camnoc_bw = + bw_update->camnoc_bw_bytes; + mux_axi_vote->axi_path[0].mnoc_ab_bw = + bw_update->external_bw_bytes; + mux_axi_vote->axi_path[0].mnoc_ib_bw = + bw_update->external_bw_bytes; + + top_common->axi_vote_control[i] = + CAM_ISP_BW_CONTROL_INCLUDE; + break; + } + + } + + return rc; +} + +int cam_vfe_top_bw_control(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_bw_control_args *bw_ctrl = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_hw_info *hw_info = NULL; + int rc = 0; + int i; + + bw_ctrl = (struct cam_isp_bw_control_args *)cmd_args; + res = bw_ctrl->node_res; + + if (!res || !res->hw_intf->hw_priv) + return -EINVAL; + + hw_info = res->hw_intf->hw_priv; + + if (res->res_type != CAM_ISP_RESOURCE_VFE_IN || + res->res_id >= CAM_ISP_HW_VFE_IN_MAX) { + CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d", + res->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + for (i = 0; i < top_common->num_mux; i++) { + if (top_common->mux_rsrc[i].res_id == res->res_id) { + top_common->axi_vote_control[i] = bw_ctrl->action; + break; + } + } + + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) + return rc; + + return cam_vfe_top_apply_bw_start_stop(top_common); +} + +int cam_vfe_top_apply_clk_bw_update(struct cam_vfe_top_priv_common *top_common, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *hw_info = NULL; + struct cam_hw_intf *hw_intf = NULL; + struct cam_axi_vote *to_be_applied_axi_vote = NULL; + struct cam_isp_apply_clk_bw_args *clk_bw_args = NULL; + unsigned long final_clk_rate = 0; + uint64_t total_bw_new_vote = 0; + uint64_t request_id; + int rc = 0; + bool drv_config_changed = false; + + if (arg_size != sizeof(struct cam_isp_apply_clk_bw_args)) { + CAM_ERR(CAM_ISP, "Invalid arg size: %u", arg_size); + return -EINVAL; + } + + clk_bw_args = (struct cam_isp_apply_clk_bw_args *)cmd_args; + request_id = clk_bw_args->request_id; + hw_intf = clk_bw_args->hw_intf; + if (!hw_intf) { + CAM_ERR(CAM_PERF, "Invalid hw_intf"); + return -EINVAL; + } + + hw_info = hw_intf->hw_priv; + if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_DBG(CAM_PERF, + "VFE:%d Not ready to set clocks yet :%d", + hw_intf->hw_idx, hw_info->hw_state); + goto end; + } + + if (clk_bw_args->skip_clk_data_rst) { + top_common->skip_data_rst_on_stop = true; + CAM_DBG(CAM_ISP, "VFE:%u requested to avoid clk data rst", hw_intf->hw_idx); + return 0; + } + + rc = cam_vfe_top_calc_hw_clk_rate(top_common, false, &final_clk_rate, request_id); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in calculating clock rate rc=%d", + hw_intf->hw_idx, rc); + goto end; + } + + rc = cam_vfe_top_calc_axi_bw_vote(top_common, false, + &to_be_applied_axi_vote, &total_bw_new_vote, request_id); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%d Failed in calculating bw vote rc=%d", + hw_intf->hw_idx, rc); + goto end; + } + + if ((!to_be_applied_axi_vote) && (top_common->bw_state != CAM_CLK_BW_STATE_UNCHANGED)) { + CAM_ERR(CAM_PERF, "VFE:%d Invalid BW vote for state:%s", hw_intf->hw_idx, + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state)); + rc = -EINVAL; + goto end; + } + + if (top_common->prev_drv_config_en != clk_bw_args->is_drv_config_en) { + drv_config_changed = true; + top_common->prev_drv_config_en = clk_bw_args->is_drv_config_en; + } + + CAM_DBG(CAM_PERF, + "VFE:%d APPLY CLK/BW req_id:%ld clk:%s bw:%s drv_conf_en:%s drv_conf_changed:%d", + hw_intf->hw_idx, request_id, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en), drv_config_changed); + + /* Determine BW and clock voting sequence according to state */ + if ((top_common->clk_state == CAM_CLK_BW_STATE_UNCHANGED) && + (top_common->bw_state == CAM_CLK_BW_STATE_UNCHANGED)) { + if (!drv_config_changed) + goto end; + + rc = cam_vfe_top_set_hw_clk_rate(top_common, final_clk_rate, false, + request_id, clk_bw_args->is_drv_config_en); + } else if (top_common->clk_state == CAM_CLK_BW_STATE_UNCHANGED) { + rc = cam_vfe_top_set_axi_bw_vote(top_common, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_common->bw_state == CAM_CLK_BW_STATE_UNCHANGED) { + rc = cam_vfe_top_set_hw_clk_rate(top_common, final_clk_rate, false, request_id, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_common->clk_state == CAM_CLK_BW_STATE_INCREASE) { + /* Set BW first, followed by Clock */ + rc = cam_vfe_top_set_axi_bw_vote(top_common, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + + rc = cam_vfe_top_set_hw_clk_rate(top_common, final_clk_rate, false, 0, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else if (top_common->clk_state == CAM_CLK_BW_STATE_DECREASE) { + /* Set Clock first, followed by BW */ + rc = cam_vfe_top_set_hw_clk_rate(top_common, final_clk_rate, false, request_id, + clk_bw_args->is_drv_config_en); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final clk:%lu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, final_clk_rate, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + + rc = cam_vfe_top_set_axi_bw_vote(top_common, to_be_applied_axi_vote, + total_bw_new_vote, false, request_id); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in voting final bw:%llu clk_state:%s bw_state:%s is_drv_config_en:%s", + hw_intf->hw_idx, total_bw_new_vote, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + goto end; + } + } else { + CAM_ERR(CAM_ISP, + "Invalid state to apply CLK/BW clk_state:%s bw_state:%s is_drv_config_en:%s", + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state), + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state), + CAM_BOOL_TO_YESNO(clk_bw_args->is_drv_config_en)); + rc = -EINVAL; + goto end; + } + + if (top_common->clk_state != CAM_CLK_BW_STATE_UNCHANGED || drv_config_changed) + clk_bw_args->clock_updated = true; + +end: + top_common->clk_state = CAM_CLK_BW_STATE_INIT; + top_common->bw_state = CAM_CLK_BW_STATE_INIT; + return rc; +} + +int cam_vfe_top_apply_clock_start_stop(struct cam_vfe_top_priv_common *top_common) +{ + int rc = 0; + unsigned long final_clk_rate = 0; + + rc = cam_vfe_top_calc_hw_clk_rate(top_common, true, &final_clk_rate, 0); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in calculating clock rate rc=%d", + top_common->hw_idx, rc); + goto end; + } + + if (top_common->clk_state == CAM_CLK_BW_STATE_UNCHANGED) + goto end; + + rc = cam_vfe_top_set_hw_clk_rate(top_common, final_clk_rate, true, 0, false); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%d Failed in voting final clk:%lu clk_state:%s", + top_common->hw_idx, final_clk_rate, + cam_vfe_top_clk_bw_state_to_string(top_common->clk_state)); + goto end; + } + +end: + top_common->clk_state = CAM_CLK_BW_STATE_INIT; + top_common->skip_data_rst_on_stop = false; + top_common->prev_drv_config_en = false; + return rc; +} + +int cam_vfe_top_apply_bw_start_stop(struct cam_vfe_top_priv_common *top_common) +{ + int rc = 0; + uint64_t total_bw_new_vote = 0; + struct cam_axi_vote *to_be_applied_axi_vote = NULL; + + rc = cam_vfe_top_calc_axi_bw_vote(top_common, true, &to_be_applied_axi_vote, + &total_bw_new_vote, 0); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%d Failed in calculating bw vote rc=%d", + top_common->hw_idx, rc); + goto end; + } + + if (top_common->bw_state == CAM_CLK_BW_STATE_UNCHANGED) + goto end; + + rc = cam_vfe_top_set_axi_bw_vote(top_common, to_be_applied_axi_vote, total_bw_new_vote, + true, 0); + if (rc) { + CAM_ERR(CAM_ISP, "VFE:%d Failed in voting final bw:%llu bw_state:%s", + top_common->hw_idx, total_bw_new_vote, + cam_vfe_top_clk_bw_state_to_string(top_common->bw_state)); + goto end; + } + +end: + top_common->bw_state = CAM_CLK_BW_STATE_INIT; + return rc; +} + diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h new file mode 100644 index 0000000000..9f4dda8c5e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_common.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_TOP_COMMON_H_ +#define _CAM_VFE_TOP_COMMON_H_ + +#define CAM_VFE_TOP_MUX_MAX 7 + +#include "cam_cpas_api.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_soc.h" + +#define CAM_VFE_TOP_MAX_REG_DUMP_ENTRIES 70 + +#define CAM_VFE_TOP_MAX_LUT_DUMP_ENTRIES 6 + +#define CAM_VFE_TOP_LOG_BUF_LEN 1024 + +#define CAM_VFE_TOP_DEBUG_VEC_ERR_REGS 2 +#define CAM_VFE_TOP_DEBUG_VEC_FIFO_SIZE 4 +#define CAM_VFE_TOP_DEBUG_TIMESTAMP_IRQ_SEL_SHIFT 4 +#define CAM_VFE_TOP_DEBUG_TIMESTAMP_IRQ_CLEAR_SHIFT 6 + +struct cam_vfe_top_priv_common { + struct cam_isp_resource_node mux_rsrc[CAM_VFE_TOP_MUX_MAX]; + uint32_t num_mux; + uint32_t hw_idx; + struct cam_axi_vote applied_axi_vote; + struct cam_axi_vote agg_incoming_vote; + struct cam_axi_vote req_axi_vote[CAM_VFE_TOP_MUX_MAX]; + struct cam_axi_vote last_bw_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + uint64_t last_total_bw_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + uint32_t last_bw_counter; + unsigned long last_clk_vote[CAM_DELAY_CLK_BW_REDUCTION_NUM_REQ]; + uint32_t last_clk_counter; + uint64_t total_bw_applied; + enum cam_clk_bw_state clk_state; + enum cam_clk_bw_state bw_state; + uint32_t hw_version; + enum cam_isp_bw_control_action axi_vote_control[CAM_VFE_TOP_MUX_MAX]; + struct cam_hw_soc_info *soc_info; + unsigned long applied_clk_rate; + unsigned long req_clk_rate[CAM_VFE_TOP_MUX_MAX]; + bool skip_data_rst_on_stop; + bool prev_drv_config_en; + +}; + +struct cam_vfe_top_reg_dump_entry { + uint32_t reg_dump_start; + uint32_t reg_dump_end; +}; + +struct cam_vfe_top_lut_dump_entry { + uint32_t lut_word_size; + uint32_t lut_bank_sel; + uint32_t lut_addr_size; +}; + +struct cam_vfe_top_dump_data { + uint32_t num_reg_dump_entries; + uint32_t num_lut_dump_entries; + uint32_t dmi_cfg; + uint32_t dmi_addr; + uint32_t dmi_data_path_hi; + uint32_t dmi_data_path_lo; + struct cam_vfe_top_reg_dump_entry + reg_entry[CAM_VFE_TOP_MAX_REG_DUMP_ENTRIES]; + struct cam_vfe_top_lut_dump_entry + lut_entry[CAM_VFE_TOP_MAX_LUT_DUMP_ENTRIES]; +}; + +int cam_vfe_top_clock_update(struct cam_vfe_top_priv_common *top_common, + void *cmd_args, uint32_t arg_size); + +int cam_vfe_top_bw_update_v2(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size); + +int cam_vfe_top_bw_update(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size); + +int cam_vfe_top_bw_control(struct cam_vfe_soc_private *soc_private, + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size); + +int cam_vfe_top_apply_clk_bw_update( + struct cam_vfe_top_priv_common *top_common, void *cmd_args, + uint32_t arg_size); + +int cam_vfe_top_apply_clock_start_stop(struct cam_vfe_top_priv_common *top_common); + +int cam_vfe_top_apply_bw_start_stop(struct cam_vfe_top_priv_common *top_common); + +#endif /* _CAM_VFE_TOP_COMMON_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c new file mode 100644 index 0000000000..dfc813ba59 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c @@ -0,0 +1,1004 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_io_util.h" +#include "cam_cdm_util.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver2.h" +#include "cam_debug_util.h" +#include "cam_vfe_soc.h" +#include "cam_mem_mgr_api.h" + +#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F +#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87 + +struct cam_vfe_top_ver2_common_data { + struct cam_hw_intf *hw_intf; + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_top_dump_data *dump_data; +}; + +struct cam_vfe_top_ver2_priv { + struct cam_vfe_top_ver2_common_data common_data; + struct cam_vfe_top_priv_common top_common; +}; + +static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t mem_base = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error! Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE( + top_priv->top_common.soc_info, VFE_CORE_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_ISP, "failed to get mem_base, index: %d num_reg_map: %u", + VFE_CORE_BASE_IDX, top_priv->top_common.soc_info->num_reg_map); + return -EINVAL; + } + + CAM_DBG(CAM_ISP, "core %d mem_base 0x%x", + top_priv->top_common.soc_info->index, mem_base); + + cdm_util_ops->cdm_write_changebase( + cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_fs_update( + struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_fe_update_args *cmd_update = cmd_args; + + if (cmd_update->node_res->process_cmd) + return cmd_update->node_res->process_cmd(cmd_update->node_res, + CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, cmd_args, arg_size); + + return 0; +} + +static int cam_vfe_top_dump_info( + struct cam_vfe_top_ver2_priv *top_priv, uint32_t cmd_type) +{ + struct cam_hw_soc_info *soc_info = top_priv->top_common.soc_info; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Null soc_info"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_NOTIFY_OVERFLOW: + CAM_INFO_RATE_LIMIT(CAM_ISP, "VFE%d src_clk_rate:%luHz", + soc_info->index, soc_info->applied_src_clk_rates.sw_client); + break; + default: + CAM_ERR(CAM_ISP, "cmd_type: %u not supported", cmd_type); + break; + } + + return 0; +} + +static int cam_vfe_top_blanking_update(uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_blanking_config *blanking_config = NULL; + struct cam_isp_resource_node *node_res = NULL; + + blanking_config = + (struct cam_isp_blanking_config *)cmd_args; + node_res = blanking_config->node_res; + + if (!node_res) { + CAM_ERR(CAM_PERF, "Invalid input res %pK", node_res); + return -EINVAL; + } + + if (!node_res->process_cmd) { + CAM_ERR(CAM_PERF, "Invalid input res process_cmd %pK", + node_res->process_cmd); + return -EINVAL; + } + + return node_res->process_cmd(node_res, + cmd_type, cmd_args, arg_size); +} + +static int cam_vfe_top_mux_get_reg_update( + struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + + if (cmd_update->res->process_cmd) + return cmd_update->res->process_cmd(cmd_update->res, + CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size); + + return -EINVAL; +} + +static int cam_vfe_top_wait_comp_event(struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error! Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_comp_wait(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + cdm_util_ops->cdm_write_wait_comp_event(cdm_args->cmd.cmd_buf_addr, + 0, 0x2); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_add_wait_trigger(struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error! Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error! Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + reg_val_pair[0] = 0x90; + reg_val_pair[1] = 0x1; + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_get_data( + struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *res = cmd_args; + + if (res->process_cmd) + return res->process_cmd(res, + CAM_ISP_HW_CMD_CAMIF_DATA, cmd_args, arg_size); + + return -EINVAL; +} + +int cam_vfe_top_get_hw_caps(void *device_priv, + void *args, uint32_t arg_size) +{ + struct cam_vfe_hw_get_hw_cap *vfe_cap_info = NULL; + struct cam_vfe_top_ver2_priv *vfe_top_prv = NULL; + struct cam_vfe_soc_private *vfe_soc_private = NULL; + + if (!device_priv || !args) { + CAM_ERR(CAM_ISP, + "Invalid arguments device_priv:%p, args:%p", + device_priv, args); + return -EINVAL; + } + + vfe_cap_info = (struct cam_vfe_hw_get_hw_cap *)args; + vfe_top_prv = (struct cam_vfe_top_ver2_priv *)device_priv; + + if (!vfe_top_prv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "soc_info is null"); + return -EFAULT; + } + + vfe_soc_private = (struct cam_vfe_soc_private *) + vfe_top_prv->top_common.soc_info->soc_private; + + vfe_cap_info->is_lite = (vfe_soc_private->is_ife_lite) ? true : false; + vfe_cap_info->incr = + (vfe_top_prv->top_common.hw_version) & 0x00ffff; + vfe_cap_info->minor = + ((vfe_top_prv->top_common.hw_version) >> 16) & 0x0fff; + vfe_cap_info->major = + ((vfe_top_prv->top_common.hw_version) >> 28) & 0x000f; + + return 0; +} + +static int cam_vfe_hw_dump( + struct cam_vfe_top_ver2_priv *top_priv, + void *cmd_args, + uint32_t arg_size) +{ + int i, j; + uint8_t *dst; + uint32_t reg_start_offset; + uint32_t reg_dump_size = 0; + uint32_t lut_dump_size = 0; + uint32_t val; + uint32_t num_reg; + void __iomem *reg_base; + uint32_t *addr, *start; + size_t remain_len; + uint32_t min_len; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_dump_data *dump_data; + struct cam_isp_hw_dump_header *hdr; + struct cam_isp_hw_dump_args *dump_args = + (struct cam_isp_hw_dump_args *)cmd_args; + + if (!dump_args) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + if (!dump_args->cpu_addr || !dump_args->buf_len) { + CAM_ERR(CAM_ISP, + "Invalid params %pK %zu", + (void *)dump_args->cpu_addr, + dump_args->buf_len); + return -EINVAL; + } + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_ISP, + "Dump offset overshoot offset %zu buf_len %zu", + dump_args->offset, dump_args->buf_len); + return -ENOSPC; + } + dump_data = top_priv->common_data.dump_data; + if (!dump_data) { + CAM_ERR(CAM_ISP, "Dump data not available"); + return -EINVAL; + } + + soc_info = top_priv->top_common.soc_info; + + /*Dump registers */ + for (i = 0; i < dump_data->num_reg_dump_entries; i++) + reg_dump_size += (dump_data->reg_entry[i].reg_dump_end - + dump_data->reg_entry[i].reg_dump_start); + /* + * We dump the offset as well, so the total size dumped becomes + * multiplied by 2 + */ + reg_dump_size *= 2; + for (i = 0; i < dump_data->num_lut_dump_entries; i++) + lut_dump_size += ((dump_data->lut_entry[i].lut_addr_size) * + (dump_data->lut_entry[i].lut_word_size/8)); + + /*Minimum len comprises of: + * soc_index + * lut_dump_size + reg_dump_size + sizeof dump_header + + * (num_lut_dump_entries--> represents number of banks) + */ + min_len = sizeof(uint32_t) + lut_dump_size + reg_dump_size + + sizeof(struct cam_isp_hw_dump_header) + + (dump_data->num_lut_dump_entries * sizeof(uint32_t)); + remain_len = dump_args->buf_len - dump_args->offset; + if (remain_len < min_len) { + CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu, min %u", + remain_len, min_len); + return -ENOSPC; + } + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + hdr->word_size = sizeof(uint32_t); + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "VFE_REG:"); + addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + *addr++ = soc_info->index; + for (i = 0; i < dump_data->num_reg_dump_entries; i++) { + num_reg = (dump_data->reg_entry[i].reg_dump_end - + dump_data->reg_entry[i].reg_dump_start)/4; + reg_start_offset = dump_data->reg_entry[i].reg_dump_start; + reg_base = soc_info->reg_map[0].mem_base + reg_start_offset; + for (j = 0; j < num_reg; j++) { + addr[0] = soc_info->mem_block[0]->start + + reg_start_offset + (j*4); + addr[1] = cam_io_r(reg_base + (j*4)); + addr += 2; + } + } + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + + /*dump LUT*/ + for (i = 0; i < dump_data->num_lut_dump_entries; i++) { + + dst = (char *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_isp_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "LUT_REG:"); + hdr->word_size = dump_data->lut_entry[i].lut_word_size/8; + addr = (uint32_t *)(dst + + sizeof(struct cam_isp_hw_dump_header)); + start = addr; + *addr++ = dump_data->lut_entry[i].lut_bank_sel; + val = 0x100 | dump_data->lut_entry[i].lut_bank_sel; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + dump_data->dmi_cfg); + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + dump_data->dmi_addr); + for (j = 0; j < dump_data->lut_entry[i].lut_addr_size; + j++) { + if (dump_data->lut_entry[i].lut_word_size == 64) { + addr[0] = cam_io_r( + soc_info->reg_map[0].mem_base + + dump_data->dmi_data_path_lo); + addr[1] = cam_io_r( + soc_info->reg_map[0].mem_base + + dump_data->dmi_data_path_hi); + addr += 2; + } else { + *addr = cam_io_r( + soc_info->reg_map[0].mem_base + + dump_data->dmi_data_path_lo); + addr++; + } + } + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_isp_hw_dump_header); + } + CAM_DBG(CAM_ISP, "offset %zu", dump_args->offset); + return 0; +} + +int cam_vfe_top_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver2_priv *top_priv = device_priv; + struct cam_vfe_top_ver2_common_data common_data = top_priv->common_data; + + top_priv->top_common.applied_clk_rate = 0; + + top_priv->top_common.hw_version = + cam_io_r_mb(top_priv->top_common.soc_info->reg_map[0].mem_base + + common_data.common_reg->hw_version); + + return 0; +} + +int cam_vfe_top_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver2_priv *top_priv = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL; + uint32_t *reset_reg_args = reset_core_args; + uint32_t reset_reg_val; + + if (!top_priv || !reset_reg_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + switch (*reset_reg_args) { + case CAM_VFE_HW_RESET_HW_AND_REG: + reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL; + break; + default: + reset_reg_val = CAM_VFE_HW_RESET_HW_VAL; + break; + } + + CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val); + soc_info = top_priv->top_common.soc_info; + reg_common = top_priv->common_data.common_reg; + + /* Mask All the IRQs except RESET */ + cam_io_w_mb((1 << 31), + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C); + + /* Reset HW */ + cam_io_w_mb(reset_reg_val, + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + + reg_common->global_reset_cmd); + + CAM_DBG(CAM_ISP, "Reset HW exit"); + return 0; +} + +int cam_vfe_top_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver2_priv *top_priv; + struct cam_vfe_acquire_args *args; + struct cam_vfe_hw_vfe_in_acquire_args *acquire_args; + uint32_t i; + int rc = -EINVAL; + + if (!device_priv || !reserve_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver2_priv *)device_priv; + args = (struct cam_vfe_acquire_args *)reserve_args; + acquire_args = &args->vfe_in; + + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == + acquire_args->res_id && + top_priv->top_common.mux_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_AVAILABLE) { + + if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) { + rc = cam_vfe_camif_ver2_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + if (acquire_args->res_id == + CAM_ISP_HW_VFE_IN_PDLIB) { + rc = cam_vfe_camif_lite_ver2_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + if ((acquire_args->res_id >= + CAM_ISP_HW_VFE_IN_RDI0) && + (acquire_args->res_id <= + CAM_ISP_HW_VFE_IN_RDI3)) { + rc = cam_vfe_rdi_ver2_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_RD) { + rc = cam_vfe_fe_ver1_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + top_priv->top_common.mux_rsrc[i].cdm_ops = + acquire_args->cdm_ops; + top_priv->top_common.mux_rsrc[i].tasklet_info = + args->tasklet; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_RESERVED; + acquire_args->rsrc_node = + &top_priv->top_common.mux_rsrc[i]; + + rc = 0; + break; + } + } + + return rc; + +} + +int cam_vfe_top_release(void *device_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *mux_res; + + if (!device_priv || !release_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)release_args; + + CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state); + if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d", + mux_res->res_state); + return -EINVAL; + } + mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +int cam_vfe_top_start(void *device_priv, + void *start_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver2_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_info *hw_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int rc = 0; + + if (!device_priv || !start_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver2_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)start_args; + hw_info = (struct cam_hw_info *)mux_res->hw_intf->hw_priv; + + if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) { + rc = cam_vfe_top_apply_clock_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in applying start clock rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + rc = cam_vfe_top_apply_bw_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in applying start bw rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + if (mux_res->start) { + rc = mux_res->start(mux_res); + } else { + CAM_ERR(CAM_ISP, + "Invalid res id:%d", mux_res->res_id); + rc = -EINVAL; + } + } else { + CAM_ERR(CAM_ISP, "VFE HW not powered up"); + rc = -EPERM; + } + + return rc; +} + +int cam_vfe_top_stop(void *device_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver2_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int i, rc = 0; + + if (!device_priv || !stop_args) { + CAM_ERR(CAM_ISP, "Error! Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver2_priv *)device_priv; + mux_res = (struct cam_isp_resource_node *)stop_args; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + + if ((mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + (mux_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) || + (mux_res->res_id == CAM_ISP_HW_VFE_IN_RD) || + ((mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) && + (mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3))) { + rc = mux_res->stop(mux_res); + } else { + CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id); + return -EINVAL; + } + + if (!rc) { + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == + mux_res->res_id) { + top_priv->top_common.req_clk_rate[i] = 0; + top_priv->top_common.req_axi_vote[i] + .axi_path[0].camnoc_bw = 0; + top_priv->top_common.req_axi_vote[i] + .axi_path[0].mnoc_ab_bw = 0; + top_priv->top_common.req_axi_vote[i] + .axi_path[0].mnoc_ib_bw = 0; + top_priv->top_common.axi_vote_control[i] = + CAM_ISP_BW_CONTROL_EXCLUDE; + break; + } + } + } + soc_private->ife_clk_src = 0; + return rc; +} + +int cam_vfe_top_read(void *device_priv, + void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_top_write(void *device_priv, + void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_vfe_top_ver2_priv *top_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + + if (!device_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Error! Invalid arguments"); + return -EINVAL; + } + top_priv = (struct cam_vfe_top_ver2_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CAMIF_DATA: + rc = cam_vfe_top_get_data(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + rc = cam_vfe_top_clock_update(&top_priv->top_common, cmd_args, + arg_size); + break; + case CAM_ISP_HW_NOTIFY_OVERFLOW: + rc = cam_vfe_top_dump_info(top_priv, cmd_type); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD: + rc = cam_vfe_top_fs_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE: + rc = cam_vfe_top_bw_update(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + rc = cam_vfe_top_bw_update_v2(soc_private, + &top_priv->top_common, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_CONTROL: + rc = cam_vfe_top_bw_control(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_DUMP_HW: + rc = cam_vfe_hw_dump(top_priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_ADD_WAIT: + rc = cam_vfe_top_wait_comp_event(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER: + rc = cam_vfe_top_add_wait_trigger(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = cam_vfe_top_blanking_update(cmd_type, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + rc = cam_vfe_top_apply_clk_bw_update(&top_priv->top_common, cmd_args, arg_size); + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type); + break; + } + + return rc; +} + +int cam_vfe_top_ver2_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top_ptr) +{ + int i, j, rc = 0; + struct cam_vfe_top_ver2_priv *top_priv = NULL; + struct cam_vfe_top_ver2_hw_info *ver2_hw_info = top_hw_info; + struct cam_vfe_top *vfe_top; + + vfe_top = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top), GFP_KERNEL); + if (!vfe_top) { + CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top"); + rc = -ENOMEM; + goto end; + } + + top_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top_ver2_priv), + GFP_KERNEL); + if (!top_priv) { + CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv"); + rc = -ENOMEM; + goto free_vfe_top; + } + + vfe_top->top_priv = top_priv; + top_priv->top_common.applied_clk_rate = 0; + if (ver2_hw_info->num_mux > CAM_VFE_TOP_MUX_MAX) { + CAM_ERR(CAM_ISP, "Invalid number of input rsrc: %d, max: %d", + ver2_hw_info->num_mux, CAM_VFE_TOP_MUX_MAX); + rc = -EINVAL; + goto free_top_priv; + } + + top_priv->top_common.num_mux = ver2_hw_info->num_mux; + + for (i = 0, j = 0; i < top_priv->top_common.num_mux; i++) { + top_priv->top_common.mux_rsrc[i].res_type = + CAM_ISP_RESOURCE_VFE_IN; + top_priv->top_common.mux_rsrc[i].hw_intf = hw_intf; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + top_priv->top_common.req_clk_rate[i] = 0; + + if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) { + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_CAMIF; + + rc = cam_vfe_camif_ver2_init(hw_intf, soc_info, + &ver2_hw_info->camif_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + if (rc) + goto deinit_resources; + } else if (ver2_hw_info->mux_type[i] == + CAM_VFE_CAMIF_LITE_VER_2_0) { + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_PDLIB; + + rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info, + &ver2_hw_info->camif_lite_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + + if (rc) + goto deinit_resources; + } else if (ver2_hw_info->mux_type[i] == + CAM_VFE_RDI_VER_1_0) { + /* set the RDI resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_RDI0 + j++; + + rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info, + &ver2_hw_info->rdi_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + if (rc) + goto deinit_resources; + } else if (ver2_hw_info->mux_type[i] == + CAM_VFE_IN_RD_VER_1_0) { + /* set the RD resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_RD; + + rc = cam_vfe_fe_ver1_init(hw_intf, soc_info, + &ver2_hw_info->fe_hw_info, + &top_priv->top_common.mux_rsrc[i]); + if (rc) + goto deinit_resources; + } else { + CAM_WARN(CAM_ISP, "Invalid mux type: %u", + ver2_hw_info->mux_type[i]); + } + } + + vfe_top->hw_ops.get_hw_caps = cam_vfe_top_get_hw_caps; + vfe_top->hw_ops.init = cam_vfe_top_init_hw; + vfe_top->hw_ops.reset = cam_vfe_top_reset; + vfe_top->hw_ops.reserve = cam_vfe_top_reserve; + vfe_top->hw_ops.release = cam_vfe_top_release; + vfe_top->hw_ops.start = cam_vfe_top_start; + vfe_top->hw_ops.stop = cam_vfe_top_stop; + vfe_top->hw_ops.read = cam_vfe_top_read; + vfe_top->hw_ops.write = cam_vfe_top_write; + vfe_top->hw_ops.process_cmd = cam_vfe_top_process_cmd; + *vfe_top_ptr = vfe_top; + + top_priv->top_common.soc_info = soc_info; + top_priv->common_data.hw_intf = hw_intf; + top_priv->top_common.hw_idx = hw_intf->hw_idx; + top_priv->common_data.common_reg = ver2_hw_info->common_reg; + top_priv->common_data.dump_data = ver2_hw_info->dump_data; + + return rc; + +deinit_resources: + for (--i; i >= 0; i--) { + if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) { + if (cam_vfe_camif_ver2_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "Camif Deinit failed"); + } else if (ver2_hw_info->mux_type[i] == + CAM_VFE_CAMIF_LITE_VER_2_0) { + if (cam_vfe_camif_lite_ver2_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "Camif lite deinit failed"); + } else if (ver2_hw_info->mux_type[i] == + CAM_VFE_IN_RD_VER_1_0) { + if (cam_vfe_fe_ver1_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "VFE FE deinit failed"); + } else { + if (cam_vfe_rdi_ver2_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "RDI Deinit failed"); + } + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + } + +free_top_priv: + CAM_MEM_FREE(vfe_top->top_priv); +free_vfe_top: + CAM_MEM_FREE(vfe_top); +end: + return rc; +} + +int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top_ptr) +{ + int i, rc = 0; + struct cam_vfe_top_ver2_priv *top_priv = NULL; + struct cam_vfe_top *vfe_top; + + if (!vfe_top_ptr) { + CAM_ERR(CAM_ISP, "Error! Invalid input"); + return -EINVAL; + } + + vfe_top = *vfe_top_ptr; + if (!vfe_top) { + CAM_ERR(CAM_ISP, "Error! vfe_top NULL"); + return -ENODEV; + } + + top_priv = vfe_top->top_priv; + if (!top_priv) { + CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL"); + rc = -ENODEV; + goto free_vfe_top; + } + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_CAMIF_VER_2_0) { + rc = cam_vfe_camif_ver2_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d", + rc); + } else if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_CAMIF_LITE_VER_2_0) { + rc = cam_vfe_camif_lite_ver2_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, + "Camif lite deinit failed rc=%d", rc); + } else if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_RDI_VER_1_0) { + rc = cam_vfe_rdi_ver2_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc); + } else if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_IN_RD_VER_1_0) { + rc = cam_vfe_fe_ver1_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d", + rc); + } + } + + CAM_MEM_FREE(vfe_top->top_priv); + +free_vfe_top: + CAM_MEM_FREE(vfe_top); + *vfe_top_ptr = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h new file mode 100644 index 0000000000..65d01da159 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_TOP_VER2_H_ +#define _CAM_VFE_TOP_VER2_H_ + +#include "cam_vfe_camif_ver2.h" +#include "cam_vfe_camif_lite_ver2.h" +#include "cam_vfe_rdi.h" +#include "cam_vfe_fe_ver1.h" +#include "cam_vfe_top_common.h" + +enum cam_vfe_top_ver2_module_type { + CAM_VFE_TOP_VER2_MODULE_LENS, + CAM_VFE_TOP_VER2_MODULE_STATS, + CAM_VFE_TOP_VER2_MODULE_COLOR, + CAM_VFE_TOP_VER2_MODULE_ZOOM, + CAM_VFE_TOP_VER2_MODULE_MAX, +}; + +struct cam_vfe_top_ver2_reg_offset_module_ctrl { + uint32_t reset; + uint32_t cgc_ovd; + uint32_t enable; +}; + +struct cam_vfe_top_ver2_reg_offset_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t lens_feature; + uint32_t stats_feature; + uint32_t color_feature; + uint32_t zoom_feature; + uint32_t global_reset_cmd; + struct cam_vfe_top_ver2_reg_offset_module_ctrl + *module_ctrl[CAM_VFE_TOP_VER2_MODULE_MAX]; + uint32_t bus_cgc_ovd; + uint32_t core_cfg; + uint32_t three_D_cfg; + uint32_t violation_status; + uint32_t reg_update_cmd; +}; + +struct cam_vfe_top_ver2_hw_info { + struct cam_vfe_top_ver2_reg_offset_common *common_reg; + struct cam_vfe_camif_ver2_hw_info camif_hw_info; + struct cam_vfe_camif_lite_ver2_hw_info camif_lite_hw_info; + struct cam_vfe_rdi_ver2_hw_info rdi_hw_info; + struct cam_vfe_fe_ver1_hw_info fe_hw_info; + struct cam_vfe_top_dump_data *dump_data; + uint32_t num_mux; + uint32_t mux_type[CAM_VFE_TOP_MUX_MAX]; +}; + +int cam_vfe_top_ver2_init(struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top_ptr); + +int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top); + +#endif /* _CAM_VFE_TOP_VER2_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c new file mode 100644 index 0000000000..838f84cf3c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c @@ -0,0 +1,957 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_io_util.h" +#include "cam_cdm_util.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver3.h" +#include "cam_debug_util.h" +#include "cam_vfe_soc.h" +#include "cam_mem_mgr_api.h" + +#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00000001 +#define CAM_VFE_HW_RESET_HW_VAL 0x00010000 +#define CAM_VFE_LITE_HW_RESET_AND_REG_VAL 0x00000002 +#define CAM_VFE_LITE_HW_RESET_HW_VAL 0x00000001 +#define CAM_CDM_WAIT_COMP_EVENT_BIT 0x2 + +struct cam_vfe_top_ver3_common_data { + struct cam_vfe_top_ver3_hw_info *hw_info; + struct cam_hw_intf *hw_intf; + struct cam_vfe_top_ver3_reg_offset_common *common_reg; +}; + +struct cam_vfe_top_ver3_priv { + struct cam_vfe_top_ver3_common_data common_data; + struct cam_vfe_top_priv_common top_common; +}; + +static int cam_vfe_top_ver3_get_path_port_map(struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_path_port_map *arg = cmd_args; + struct cam_vfe_top_ver3_hw_info *hw_info = top_priv->common_data.hw_info; + int i; + + for (i = 0; i < hw_info->num_path_port_map; i++) { + arg->entry[i][0] = hw_info->path_port_map[i][0]; + arg->entry[i][1] = hw_info->path_port_map[i][1]; + } + arg->num_entries = hw_info->num_path_port_map; + + return 0; +} + +static int cam_vfe_top_ver3_mux_get_base(struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t mem_base = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error, Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE( + top_priv->top_common.soc_info, VFE_CORE_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_ISP, "failed to get mem_base, index: %d num_reg_map: %u", + VFE_CORE_BASE_IDX, top_priv->top_common.soc_info->num_reg_map); + return -EINVAL; + } + CAM_DBG(CAM_ISP, "core %d mem_base 0x%x", + top_priv->top_common.soc_info->index, mem_base); + + cdm_util_ops->cdm_write_changebase( + cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_fs_update( + struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_fe_update_args *cmd_update = cmd_args; + + if (cmd_update->node_res->process_cmd) + return cmd_update->node_res->process_cmd(cmd_update->node_res, + CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, cmd_args, arg_size); + + return 0; +} + +static int cam_vfe_top_ver3_dump_info( + struct cam_vfe_top_ver3_priv *top_priv, uint32_t cmd_type) +{ + struct cam_hw_soc_info *soc_info = top_priv->top_common.soc_info; + + if (!soc_info) { + CAM_ERR(CAM_ISP, "Null soc_info"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_NOTIFY_OVERFLOW: + CAM_INFO_RATE_LIMIT(CAM_ISP, "VFE%d src_clk_rate:%luHz", + soc_info->index, soc_info->applied_src_clk_rates.sw_client); + break; + default: + CAM_ERR(CAM_ISP, "cmd_type: %u not supported", cmd_type); + break; + } + + return 0; +} + + +static int cam_vfe_top_ver3_blanking_update(uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_blanking_config *blanking_config = NULL; + struct cam_isp_resource_node *node_res = NULL; + + blanking_config = + (struct cam_isp_blanking_config *)cmd_args; + node_res = blanking_config->node_res; + + if (!node_res) { + CAM_ERR(CAM_PERF, "Invalid input res %pK", node_res); + return -EINVAL; + } + + if (!node_res->process_cmd) { + CAM_ERR(CAM_PERF, "Invalid input res process_cmd %pK", + node_res->process_cmd); + return -EINVAL; + } + + return node_res->process_cmd(node_res, + cmd_type, cmd_args, arg_size); +} + +static int cam_vfe_core_config_control( + struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_core_config_args *core_config = cmd_args; + + if (core_config->node_res->process_cmd) + return core_config->node_res->process_cmd(core_config->node_res, + CAM_ISP_HW_CMD_CORE_CONFIG, cmd_args, arg_size); + + return -EINVAL; +} + +static int cam_vfe_top_ver3_mux_get_reg_update( + struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args; + + if (cmd_update->res->process_cmd) + return cmd_update->res->process_cmd(cmd_update->res, + CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size); + + return -EINVAL; +} + +static int cam_vfe_top_wait_comp_event(struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error, Invalid arg size = %d expected = %d", + arg_size, sizeof(struct cam_isp_hw_get_cmd_update)); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_comp_wait(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + cdm_util_ops->cdm_write_wait_comp_event(cdm_args->cmd.cmd_buf_addr, + 0, CAM_CDM_WAIT_COMP_EVENT_BIT); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_add_wait_trigger(struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t reg_val_pair[2]; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_top_ver3_reg_offset_common *reg_common = NULL; + uint32_t set_cdm_trigger_event; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error, Invalid arg size = %d expected = %d", + arg_size, sizeof(struct cam_isp_hw_get_cmd_update)); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_reg_random(1); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d", + cdm_args->cmd.size, size); + return -EINVAL; + } + + if (cdm_args->trigger_cdm_en == true) + set_cdm_trigger_event = 1; + else + set_cdm_trigger_event = 0; + + reg_common = top_priv->common_data.common_reg; + reg_val_pair[0] = reg_common->trigger_cdm_events; + reg_val_pair[1] = set_cdm_trigger_event; + + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + 1, reg_val_pair); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_ver3_get_data( + struct cam_vfe_top_ver3_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *res = cmd_args; + + if (res->process_cmd) + return res->process_cmd(res, + CAM_ISP_HW_CMD_CAMIF_DATA, cmd_args, arg_size); + + return -EINVAL; +} + +int cam_vfe_top_ver3_get_hw_caps(void *device_priv, + void *args, uint32_t arg_size) +{ + struct cam_vfe_hw_get_hw_cap *vfe_cap_info = NULL; + struct cam_vfe_top_ver3_priv *vfe_top_prv = NULL; + struct cam_vfe_soc_private *vfe_soc_private = NULL; + + if (!device_priv || !args) { + CAM_ERR(CAM_ISP, + "Invalid arguments device_priv:%p, args:%p", + device_priv, args); + return -EINVAL; + } + + vfe_cap_info = (struct cam_vfe_hw_get_hw_cap *)args; + vfe_top_prv = (struct cam_vfe_top_ver3_priv *)device_priv; + + if (!vfe_top_prv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "soc_info is null"); + return -EFAULT; + } + + vfe_soc_private = (struct cam_vfe_soc_private *) + vfe_top_prv->top_common.soc_info->soc_private; + + vfe_cap_info->is_lite = (vfe_soc_private->is_ife_lite) ? true : false; + vfe_cap_info->incr = + (vfe_top_prv->top_common.hw_version) & 0x00ffff; + vfe_cap_info->minor = + ((vfe_top_prv->top_common.hw_version) >> 16) & 0x0fff; + vfe_cap_info->major = + ((vfe_top_prv->top_common.hw_version) >> 28) & 0x000f; + + return 0; +} + +int cam_vfe_top_ver3_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver3_priv *top_priv = device_priv; + struct cam_vfe_top_ver3_common_data common_data = top_priv->common_data; + + top_priv->top_common.applied_clk_rate = 0; + + /** + * Auto clock gating is enabled by default, but no harm + * in setting the value we expect. + */ + CAM_DBG(CAM_ISP, "Enabling clock gating at IFE top"); + + cam_soc_util_w_mb(top_priv->top_common.soc_info, VFE_CORE_BASE_IDX, + common_data.common_reg->core_cgc_ovd_0, 0x0); + + cam_soc_util_w_mb(top_priv->top_common.soc_info, VFE_CORE_BASE_IDX, + common_data.common_reg->core_cgc_ovd_1, 0x0); + + cam_soc_util_w_mb(top_priv->top_common.soc_info, VFE_CORE_BASE_IDX, + common_data.common_reg->ahb_cgc_ovd, 0x0); + + cam_soc_util_w_mb(top_priv->top_common.soc_info, VFE_CORE_BASE_IDX, + common_data.common_reg->noc_cgc_ovd, 0x0); + + top_priv->top_common.hw_version = + cam_io_r_mb(top_priv->top_common.soc_info->reg_map[0].mem_base + + common_data.common_reg->hw_version); + + return 0; +} + +int cam_vfe_top_ver3_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver3_priv *top_priv = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + struct cam_vfe_top_ver3_reg_offset_common *reg_common = NULL; + uint32_t *reset_reg_args = reset_core_args; + uint32_t reset_reg_val; + + if (!top_priv || !reset_reg_args) { + CAM_ERR(CAM_ISP, "Invalid arguments"); + return -EINVAL; + } + + soc_info = top_priv->top_common.soc_info; + reg_common = top_priv->common_data.common_reg; + + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Invalid soc_private"); + return -ENODEV; + } + + switch (*reset_reg_args) { + case CAM_VFE_HW_RESET_HW_AND_REG: + if (!soc_private->is_ife_lite) + reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL; + else + reset_reg_val = CAM_VFE_LITE_HW_RESET_AND_REG_VAL; + break; + default: + if (!soc_private->is_ife_lite) + reset_reg_val = CAM_VFE_HW_RESET_HW_VAL; + else + reset_reg_val = CAM_VFE_LITE_HW_RESET_HW_VAL; + break; + } + + CAM_DBG(CAM_ISP, "reset reg value: 0x%x", reset_reg_val); + + /* Mask All the IRQs except RESET */ + if (!soc_private->is_ife_lite) + cam_io_w_mb(0x00000001, + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + + 0x3C); + else + cam_io_w_mb(0x00020000, + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + + 0x28); + + /* Reset HW */ + cam_io_w_mb(reset_reg_val, + CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + + reg_common->global_reset_cmd); + + CAM_DBG(CAM_ISP, "Reset HW exit"); + return 0; +} + +int cam_vfe_top_ver3_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver3_priv *top_priv; + struct cam_vfe_acquire_args *args; + struct cam_vfe_hw_vfe_in_acquire_args *acquire_args; + uint32_t i; + int rc = -EINVAL; + + if (!device_priv || !reserve_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver3_priv *)device_priv; + args = (struct cam_vfe_acquire_args *)reserve_args; + acquire_args = &args->vfe_in; + + CAM_DBG(CAM_ISP, "res id %d", acquire_args->res_id); + + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == + acquire_args->res_id && + top_priv->top_common.mux_rsrc[i].res_state == + CAM_ISP_RESOURCE_STATE_AVAILABLE) { + + if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) { + rc = cam_vfe_camif_ver3_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + if (acquire_args->res_id >= CAM_ISP_HW_VFE_IN_RDI0 && + acquire_args->res_id < CAM_ISP_HW_VFE_IN_MAX) { + rc = cam_vfe_camif_lite_ver3_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_RD) { + rc = cam_vfe_fe_ver1_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + top_priv->top_common.mux_rsrc[i].cdm_ops = + acquire_args->cdm_ops; + top_priv->top_common.mux_rsrc[i].tasklet_info = + args->tasklet; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_RESERVED; + acquire_args->rsrc_node = + &top_priv->top_common.mux_rsrc[i]; + + rc = 0; + CAM_DBG(CAM_ISP, "VFE[%u] Res [id:%d name:%s] reserved", + top_priv->common_data.hw_intf->hw_idx, + acquire_args->res_id, + top_priv->top_common.mux_rsrc[i].res_name); + break; + } + } + + return rc; + +} + +int cam_vfe_top_ver3_release(void *device_priv, + void *release_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *mux_res; + + if (!device_priv || !release_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)release_args; + + CAM_DBG(CAM_ISP, "%s Resource in state %d", mux_res->res_name, + mux_res->res_state); + if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "Error, Resource in Invalid res_state :%d", + mux_res->res_state); + return -EINVAL; + } + mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + return 0; +} + +int cam_vfe_top_ver3_start(void *device_priv, + void *start_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver3_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_info *hw_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int rc = 0; + + if (!device_priv || !start_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver3_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)start_args; + hw_info = (struct cam_hw_info *)mux_res->hw_intf->hw_priv; + + if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) { + rc = cam_vfe_top_apply_clock_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in applying start clock rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + rc = cam_vfe_top_apply_bw_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%d Failed in applying start bw rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + if (mux_res->start) { + rc = mux_res->start(mux_res); + } else { + CAM_ERR(CAM_ISP, + "Invalid res id:%d", mux_res->res_id); + rc = -EINVAL; + } + } else { + CAM_ERR(CAM_ISP, "VFE HW not powered up"); + rc = -EPERM; + } + + return rc; +} + +int cam_vfe_top_ver3_stop(void *device_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver3_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int i, rc = 0; + + if (!device_priv || !stop_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver3_priv *)device_priv; + mux_res = (struct cam_isp_resource_node *)stop_args; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + + if (mux_res->res_id < CAM_ISP_HW_VFE_IN_MAX) { + rc = mux_res->stop(mux_res); + } else { + CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id); + return -EINVAL; + } + + if (!rc) { + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == + mux_res->res_id) { + top_priv->top_common.req_clk_rate[i] = 0; + memset(&top_priv->top_common.req_axi_vote[i], + 0, sizeof(struct cam_axi_vote)); + top_priv->top_common.axi_vote_control[i] = + CAM_ISP_BW_CONTROL_EXCLUDE; + break; + } + } + } + + soc_private->ife_clk_src = 0; + + return rc; +} + +int cam_vfe_top_ver3_read(void *device_priv, + void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_top_ver3_write(void *device_priv, + void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_top_ver3_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_vfe_top_ver3_priv *top_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + + if (!device_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Error, Invalid arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver3_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "Error soc_private NULL"); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_vfe_top_ver3_mux_get_base(top_priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_top_ver3_mux_get_reg_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CAMIF_DATA: + rc = cam_vfe_top_ver3_get_data(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + rc = cam_vfe_top_clock_update(&top_priv->top_common, cmd_args, + arg_size); + break; + case CAM_ISP_HW_NOTIFY_OVERFLOW: + rc = cam_vfe_top_ver3_dump_info(top_priv, cmd_type); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD: + rc = cam_vfe_top_fs_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE: + rc = cam_vfe_top_bw_update(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + rc = cam_vfe_top_bw_update_v2(soc_private, + &top_priv->top_common, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_CONTROL: + rc = cam_vfe_top_bw_control(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_CORE_CONFIG: + rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_ADD_WAIT: + rc = cam_vfe_top_wait_comp_event(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_ADD_WAIT_TRIGGER: + rc = cam_vfe_top_add_wait_trigger(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BLANKING_UPDATE: + rc = cam_vfe_top_ver3_blanking_update(cmd_type, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_PATH_PORT_MAP: + rc = cam_vfe_top_ver3_get_path_port_map(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + rc = cam_vfe_top_apply_clk_bw_update(&top_priv->top_common, cmd_args, arg_size); + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_ISP, "Error, Invalid cmd:%d", cmd_type); + break; + } + + return rc; +} + +int cam_vfe_top_ver3_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top_ptr) +{ + int i, j, rc = 0; + struct cam_vfe_top_ver3_priv *top_priv = NULL; + struct cam_vfe_top_ver3_hw_info *ver3_hw_info = top_hw_info; + struct cam_vfe_top *vfe_top; + + vfe_top = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top), GFP_KERNEL); + if (!vfe_top) { + CAM_DBG(CAM_ISP, "Error, Failed to alloc for vfe_top"); + rc = -ENOMEM; + goto end; + } + + top_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top_ver3_priv), + GFP_KERNEL); + if (!top_priv) { + CAM_DBG(CAM_ISP, "Error, Failed to alloc for vfe_top_priv"); + rc = -ENOMEM; + goto free_vfe_top; + } + + vfe_top->top_priv = top_priv; + top_priv->top_common.applied_clk_rate = 0; + if (ver3_hw_info->num_mux > CAM_VFE_TOP_MUX_MAX) { + CAM_ERR(CAM_ISP, "Invalid number of input rsrc: %d, max: %d", + ver3_hw_info->num_mux, CAM_VFE_TOP_MUX_MAX); + rc = -EINVAL; + goto free_top_priv; + } + + top_priv->top_common.num_mux = ver3_hw_info->num_mux; + top_priv->common_data.hw_info = top_hw_info; + + for (i = 0, j = 0; i < top_priv->top_common.num_mux && + j < CAM_VFE_RDI_VER2_MAX; i++) { + top_priv->top_common.mux_rsrc[i].res_type = + CAM_ISP_RESOURCE_VFE_IN; + top_priv->top_common.mux_rsrc[i].hw_intf = hw_intf; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + top_priv->top_common.req_clk_rate[i] = 0; + + if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) { + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_CAMIF; + + rc = cam_vfe_camif_ver3_init(hw_intf, soc_info, + &ver3_hw_info->camif_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "CAMIF"); + if (rc) + goto deinit_resources; + } else if (ver3_hw_info->mux_type[i] == + CAM_VFE_PDLIB_VER_1_0) { + /* set the PDLIB resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_PDLIB; + + rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info, + &ver3_hw_info->pdlib_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "PDLIB"); + if (rc) + goto deinit_resources; + } else if (ver3_hw_info->mux_type[i] == + CAM_VFE_IN_RD_VER_1_0) { + /* set the RD resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_RD; + + rc = cam_vfe_fe_ver1_init(hw_intf, soc_info, + &ver3_hw_info->fe_hw_info, + &top_priv->top_common.mux_rsrc[i]); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "IN_RD"); + if (rc) + goto deinit_resources; + } else if (ver3_hw_info->mux_type[i] == + CAM_VFE_RDI_VER_1_0) { + /* set the RDI resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_RDI0 + j; + + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "RDI_%d", j); + + rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info, + ver3_hw_info->rdi_hw_info[j++], + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + if (rc) + goto deinit_resources; + } else if (ver3_hw_info->mux_type[i] == + CAM_VFE_LCR_VER_1_0) { + /* set the LCR resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_LCR; + + rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info, + &ver3_hw_info->lcr_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "LCR"); + if (rc) + goto deinit_resources; + } else { + CAM_WARN(CAM_ISP, "Invalid mux type: %u", + ver3_hw_info->mux_type[i]); + } + } + + vfe_top->hw_ops.get_hw_caps = cam_vfe_top_ver3_get_hw_caps; + vfe_top->hw_ops.init = cam_vfe_top_ver3_init_hw; + vfe_top->hw_ops.reset = cam_vfe_top_ver3_reset; + vfe_top->hw_ops.reserve = cam_vfe_top_ver3_reserve; + vfe_top->hw_ops.release = cam_vfe_top_ver3_release; + vfe_top->hw_ops.start = cam_vfe_top_ver3_start; + vfe_top->hw_ops.stop = cam_vfe_top_ver3_stop; + vfe_top->hw_ops.read = cam_vfe_top_ver3_read; + vfe_top->hw_ops.write = cam_vfe_top_ver3_write; + vfe_top->hw_ops.process_cmd = cam_vfe_top_ver3_process_cmd; + *vfe_top_ptr = vfe_top; + + top_priv->top_common.soc_info = soc_info; + top_priv->common_data.hw_intf = hw_intf; + top_priv->top_common.hw_idx = hw_intf->hw_idx; + top_priv->common_data.common_reg = ver3_hw_info->common_reg; + + return rc; + +deinit_resources: + for (--i; i >= 0; i--) { + if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) { + if (cam_vfe_camif_ver3_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "Camif Deinit failed"); + } else if (ver3_hw_info->mux_type[i] == CAM_VFE_IN_RD_VER_1_0) { + if (cam_vfe_fe_ver1_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "Camif fe Deinit failed"); + } else { + if (cam_vfe_camif_lite_ver3_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, + "Camif lite res id %d Deinit failed", + top_priv->top_common.mux_rsrc[i] + .res_id); + } + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + } + +free_top_priv: + CAM_MEM_FREE(vfe_top->top_priv); +free_vfe_top: + CAM_MEM_FREE(vfe_top); +end: + return rc; +} + +int cam_vfe_top_ver3_deinit(struct cam_vfe_top **vfe_top_ptr) +{ + int i, rc = 0; + struct cam_vfe_top_ver3_priv *top_priv = NULL; + struct cam_vfe_top *vfe_top; + + if (!vfe_top_ptr) { + CAM_ERR(CAM_ISP, "Error, Invalid input"); + return -EINVAL; + } + + vfe_top = *vfe_top_ptr; + if (!vfe_top) { + CAM_ERR(CAM_ISP, "Error, vfe_top NULL"); + return -ENODEV; + } + + top_priv = vfe_top->top_priv; + if (!top_priv) { + CAM_ERR(CAM_ISP, "Error, vfe_top_priv NULL"); + rc = -ENODEV; + goto free_vfe_top; + } + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_CAMIF_VER_3_0) { + rc = cam_vfe_camif_ver3_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d", + rc); + } else if (top_priv->top_common.mux_rsrc[i].res_type == + CAM_VFE_IN_RD_VER_1_0) { + rc = cam_vfe_fe_ver1_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d", + rc); + } else { + rc = cam_vfe_camif_lite_ver3_deinit( + &top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, + "Camif lite res id %d Deinit failed", + top_priv->top_common.mux_rsrc[i] + .res_id); + } + } + + CAM_MEM_FREE(vfe_top->top_priv); + +free_vfe_top: + CAM_MEM_FREE(vfe_top); + *vfe_top_ptr = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h new file mode 100644 index 0000000000..5a65ede267 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_VFE_TOP_VER3_H_ +#define _CAM_VFE_TOP_VER3_H_ + +#include "cam_vfe_camif_ver3.h" +#include "cam_vfe_camif_lite_ver3.h" +#include "cam_vfe_fe_ver1.h" +#include "cam_vfe_top_common.h" + +#define CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF 31 +#define CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD 30 +#define CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD 29 +#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD 28 +#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD 27 +#define CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING 25 +#define CAM_SHIFT_TOP_CORE_CFG_INPUT_PP_FMT 14 +#define CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST 10 +#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE 9 +#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST 8 +#define CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP 5 + +struct cam_vfe_top_ver3_reg_offset_common { + uint32_t hw_version; + uint32_t titan_version; + uint32_t hw_capability; + uint32_t lens_feature; + uint32_t stats_feature; + uint32_t color_feature; + uint32_t zoom_feature; + uint32_t global_reset_cmd; + uint32_t core_cgc_ovd_0; + uint32_t core_cgc_ovd_1; + uint32_t ahb_cgc_ovd; + uint32_t noc_cgc_ovd; + uint32_t bus_cgc_ovd; + uint32_t core_cfg_0; + uint32_t core_cfg_1; + uint32_t reg_update_cmd; + uint32_t trigger_cdm_events; + uint32_t violation_status; + uint32_t custom_frame_idx; + uint32_t dsp_status; + uint32_t diag_config; + uint32_t diag_sensor_status_0; + uint32_t diag_sensor_status_1; + uint32_t bus_overflow_status; + uint32_t top_debug_cfg; + uint32_t top_debug_0; + uint32_t top_debug_1; + uint32_t top_debug_2; + uint32_t top_debug_3; + uint32_t top_debug_4; + uint32_t top_debug_5; + uint32_t top_debug_6; + uint32_t top_debug_7; + uint32_t top_debug_8; + uint32_t top_debug_9; + uint32_t top_debug_10; + uint32_t top_debug_11; + uint32_t top_debug_12; + uint32_t top_debug_13; +}; + +struct cam_vfe_camif_common_cfg { + uint32_t vid_ds16_r2pd; + uint32_t vid_ds4_r2pd; + uint32_t disp_ds16_r2pd; + uint32_t disp_ds4_r2pd; + uint32_t dsp_streaming_tap_point; + uint32_t ihist_src_sel; + uint32_t hdr_be_src_sel; + uint32_t hdr_bhist_src_sel; + uint32_t input_mux_sel_pdaf; + uint32_t input_mux_sel_pp; + uint32_t input_pp_fmt; +}; + +struct cam_vfe_top_ver3_hw_info { + struct cam_vfe_top_ver3_reg_offset_common *common_reg; + struct cam_vfe_camif_ver3_hw_info camif_hw_info; + struct cam_vfe_camif_lite_ver3_hw_info pdlib_hw_info; + struct cam_vfe_camif_lite_ver3_hw_info + *rdi_hw_info[CAM_VFE_RDI_VER2_MAX]; + struct cam_vfe_camif_lite_ver3_hw_info lcr_hw_info; + struct cam_vfe_fe_ver1_hw_info fe_hw_info; + uint32_t num_mux; + uint32_t num_path_port_map; + uint32_t mux_type[CAM_VFE_TOP_MUX_MAX]; + uint32_t path_port_map[CAM_ISP_HW_PATH_PORT_MAP_MAX][2]; +}; + +int cam_vfe_top_ver3_init(struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top); + +int cam_vfe_top_ver3_deinit(struct cam_vfe_top **vfe_top); + +#endif /* _CAM_VFE_TOP_VER3_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.c b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.c new file mode 100644 index 0000000000..8fbbc65ca0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.c @@ -0,0 +1,2989 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_io_util.h" +#include "cam_cdm_util.h" +#include "cam_vfe_hw_intf.h" +#include "cam_vfe_top.h" +#include "cam_vfe_top_ver4.h" +#include "cam_debug_util.h" +#include "cam_vfe_soc.h" +#include "cam_trace.h" +#include "cam_isp_hw_mgr_intf.h" +#include "cam_irq_controller.h" +#include "cam_tasklet_util.h" +#include "cam_cdm_intf_api.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +#define CAM_SHIFT_TOP_CORE_VER_4_CFG_DSP_EN 8 +#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2 +#define CAM_VFE_LEN_LOG_BUF 256 +#define CAM_VFE_QTIMER_DIV_FACTOR 10000 + +struct cam_vfe_top_ver4_common_data { + struct cam_hw_intf *hw_intf; + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_vfe_top_ver4_hw_info *hw_info; +}; + +struct cam_vfe_top_ver4_perf_counter_cfg { + uint32_t perf_counter_val; + bool dump_counter; +}; + +struct cam_vfe_top_ver4_prim_sof_ts_reg_addr { + void __iomem *curr0_ts_addr; + void __iomem *curr1_ts_addr; +}; + +struct cam_vfe_top_ver4_priv { + struct cam_vfe_top_ver4_common_data common_data; + struct cam_vfe_top_priv_common top_common; + atomic_t overflow_pending; + uint8_t log_buf[CAM_VFE_LEN_LOG_BUF]; + uint32_t sof_cnt; + struct cam_vfe_top_ver4_perf_counter_cfg perf_counters[CAM_VFE_PERF_CNT_MAX]; + struct cam_vfe_top_ver4_prim_sof_ts_reg_addr sof_ts_reg_addr; + bool enable_ife_frame_irqs; + uint64_t diag_config_debug_val_0; +}; + +enum cam_vfe_top_ver4_fsm_state { + VFE_TOP_VER4_FSM_SOF = 0, + VFE_TOP_VER4_FSM_EPOCH, + VFE_TOP_VER4_FSM_EOF, + VFE_TOP_VER4_FSM_MAX, +}; + +enum cam_vfe_top_ver4_debug_reg_type { + VFE_TOP_DEBUG_REG = 0, + VFE_BAYER_DEBUG_REG, + VFE_DEBUG_REG_MAX, +}; + +struct cam_vfe_mux_ver4_data { + void __iomem *mem_base; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *hw_intf; + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_vfe_top_common_cfg cam_common_cfg; + struct cam_vfe_ver4_path_reg_data *reg_data; + struct cam_vfe_top_ver4_priv *top_priv; + + cam_hw_mgr_event_cb_func event_cb; + void *priv; + int irq_err_handle; + int frame_irq_handle; + int sof_irq_handle; + void *vfe_irq_controller; + struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_CAMIF_EVT_MAX]; + struct list_head free_payload_list; + spinlock_t spin_lock; + + enum cam_isp_hw_sync_mode sync_mode; + uint32_t dsp_mode; + uint32_t pix_pattern; + uint32_t first_pixel; + uint32_t first_line; + uint32_t last_pixel; + uint32_t last_line; + uint32_t hbi_value; + uint32_t vbi_value; + uint32_t irq_debug_cnt; + uint32_t camif_debug; + uint32_t horizontal_bin; + uint32_t qcfa_bin; + uint32_t dual_hw_idx; + uint32_t is_dual; + uint32_t epoch_factor; + struct timespec64 sof_ts; + struct timespec64 epoch_ts; + struct timespec64 eof_ts; + struct timespec64 error_ts; + enum cam_vfe_top_ver4_fsm_state fsm_state; + uint32_t n_frame_irqs; + bool is_fe_enabled; + bool is_offline; + bool is_lite; + bool is_pixel_path; + bool sfe_binned_epoch_cfg; + bool enable_sof_irq_debug; + bool handle_camif_irq; + uint32_t hw_ctxt_mask; +}; + +static inline int cam_vfe_top_ver4_get_hw_ctxt_from_irq_status( + struct cam_vfe_mux_ver4_data *vfe_priv, + uint32_t irq_status) +{ + int i; + + for (i = 0; i < CAM_ISP_MULTI_CTXT_MAX; i++) + if (irq_status & vfe_priv->reg_data->frm_irq_hw_ctxt_mask[i]) + break; + + if (i < CAM_ISP_MULTI_CTXT_MAX) + return i; + + return -1; +} + +static int cam_vfe_top_ver4_get_path_port_map(struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_path_port_map *arg = cmd_args; + struct cam_vfe_top_ver4_hw_info *hw_info = top_priv->common_data.hw_info; + int i; + + for (i = 0; i < hw_info->num_path_port_map; i++) { + arg->entry[i][0] = hw_info->path_port_map[i][0]; + arg->entry[i][1] = hw_info->path_port_map[i][1]; + } + arg->num_entries = hw_info->num_path_port_map; + + return 0; +} + +static int cam_vfe_top_ver4_pdaf_lcr_config(struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver4_hw_info *hw_info; + struct cam_isp_hw_get_cmd_update *cdm_args = NULL; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + uint32_t i; + uint32_t reg_val_idx = 0; + uint32_t num_reg_vals; + uint32_t reg_val_pair[4]; + struct cam_isp_lcr_rdi_cfg_args *cfg_args; + size_t size; + + if (!cmd_args || !top_priv) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + cdm_args = (struct cam_isp_hw_get_cmd_update *)cmd_args; + if (!cdm_args->res) { + CAM_ERR(CAM_ISP, "VFE:%u Error, Invalid res", top_priv->top_common.hw_idx); + return -EINVAL; + } + + hw_info = top_priv->common_data.hw_info; + if (!hw_info->num_pdaf_lcr_res || !hw_info->pdaf_lcr_res_mask) { + CAM_DBG(CAM_ISP, "VFE:%u PDAF LCR is not supported", top_priv->top_common.hw_idx); + return 0; + } + + cfg_args = (struct cam_isp_lcr_rdi_cfg_args *)cdm_args->data; + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid CDM ops", top_priv->top_common.hw_idx); + return -EINVAL; + } + + for (i = 0; i < hw_info->num_pdaf_lcr_res; i++) + if (cfg_args->ife_src_res_id == hw_info->pdaf_lcr_res_mask[i].res_id) + break; + + if (i == hw_info->num_pdaf_lcr_res) { + CAM_ERR(CAM_ISP, "VFE:%u Res :%d src_res :%u is not supported for mux", + top_priv->top_common.hw_idx, cfg_args->rdi_lcr_cfg->res_id, + cfg_args->ife_src_res_id); + return -EINVAL; + } + + if (cfg_args->is_init) + num_reg_vals = 2; + else + num_reg_vals = 1; + + size = cdm_util_ops->cdm_required_size_reg_random(num_reg_vals); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "VFE:%u buf size:%d is not sufficient, expected: %d", + top_priv->top_common.hw_idx, cdm_args->cmd.size, (size*4)); + return -EINVAL; + } + + if (cfg_args->is_init) { + reg_val_pair[reg_val_idx++] = hw_info->common_reg->pdaf_input_cfg_1; + reg_val_pair[reg_val_idx++] = 0; + } + + reg_val_pair[reg_val_idx++] = hw_info->common_reg->pdaf_input_cfg_0; + reg_val_pair[reg_val_idx] = hw_info->pdaf_lcr_res_mask[i].val; + cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr, + num_reg_vals, reg_val_pair); + cdm_args->cmd.used_bytes = size * 4; + + return 0; +} + +static int cam_vfe_top_ver4_mux_get_base(struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + uint32_t size = 0; + uint32_t mem_base = 0; + struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args; + struct cam_cdm_utils_ops *cdm_util_ops = NULL; + struct cam_vfe_soc_private *soc_private; + + if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) { + CAM_ERR(CAM_ISP, "Error, Invalid cmd size"); + return -EINVAL; + } + + if (!cdm_args || !cdm_args->res || !top_priv || + !top_priv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + soc_private = top_priv->top_common.soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "VFE:%u soc_private is null", top_priv->top_common.hw_idx); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops; + + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid CDM ops", top_priv->top_common.hw_idx); + return -EINVAL; + } + + size = cdm_util_ops->cdm_required_size_changebase(); + /* since cdm returns dwords, we need to convert it into bytes */ + if ((size * 4) > cdm_args->cmd.size) { + CAM_ERR(CAM_ISP, "VFE:%u buf size:%d is not sufficient, expected: %d", + top_priv->top_common.hw_idx, cdm_args->cmd.size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE( + top_priv->top_common.soc_info, VFE_CORE_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_ISP, "failed to get mem_base, index: %d num_reg_map: %u", + VFE_CORE_BASE_IDX, top_priv->top_common.soc_info->num_reg_map); + return -EINVAL; + } + + if (cdm_args->cdm_id == CAM_CDM_RT) { + if (!soc_private->rt_wrapper_base) { + CAM_ERR(CAM_ISP, "VFE:%u rt_wrapper_base_addr is null", + top_priv->top_common.hw_idx); + return -EINVAL; + } + + mem_base -= soc_private->rt_wrapper_base; + } + + CAM_DBG(CAM_ISP, "core %u mem_base 0x%x, cdm_id: %u", + top_priv->top_common.soc_info->index, mem_base, + cdm_args->cdm_id); + + cdm_util_ops->cdm_write_changebase(cdm_args->cmd.cmd_buf_addr, mem_base); + cdm_args->cmd.used_bytes = (size * 4); + + return 0; +} + +static int cam_vfe_top_fs_update( + struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_fe_update_args *cmd_update = cmd_args; + + if (cmd_update->node_res->process_cmd) + return cmd_update->node_res->process_cmd(cmd_update->node_res, + CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, cmd_args, arg_size); + + return 0; +} + +static int cam_vfe_top_ver4_set_primary_sof_timer_reg_addr( + struct cam_vfe_top_ver4_priv *top_priv, void *cmd_args) +{ + struct cam_ife_csid_ts_reg_addr *sof_ts_addr_update_args; + + if (!cmd_args) { + CAM_ERR(CAM_ISP, "Error, Invalid args"); + return -EINVAL; + } + + sof_ts_addr_update_args = (struct cam_ife_csid_ts_reg_addr *)cmd_args; + + if (!sof_ts_addr_update_args->curr0_ts_addr || + !sof_ts_addr_update_args->curr1_ts_addr) { + CAM_ERR(CAM_ISP, "Invalid SOF Qtimer address: curr0: 0x%pK, curr1: 0x%pK", + sof_ts_addr_update_args->curr0_ts_addr, + sof_ts_addr_update_args->curr1_ts_addr); + return -EINVAL; + } + + top_priv->sof_ts_reg_addr.curr0_ts_addr = + sof_ts_addr_update_args->curr0_ts_addr; + top_priv->sof_ts_reg_addr.curr1_ts_addr = + sof_ts_addr_update_args->curr1_ts_addr; + + return 0; +} + +static uint64_t cam_vfe_top_ver4_get_time_stamp(void __iomem *mem_base, + uint32_t timestamp_hi_addr, uint32_t timestamp_lo_addr) +{ + uint64_t timestamp_val, time_hi, time_lo; + + time_hi = cam_io_r_mb(mem_base + timestamp_hi_addr); + time_lo = cam_io_r_mb(mem_base + timestamp_lo_addr); + + timestamp_val = (time_hi << 32) | time_lo; + + return mul_u64_u32_div(timestamp_val, + CAM_VFE_QTIMER_DIV_FACTOR, + CAM_VFE_QTIMER_DIV_FACTOR); +} + +static void cam_vfe_top_ver4_read_debug_err_vectors( + struct cam_vfe_mux_ver4_data *vfe_priv, + enum cam_vfe_top_ver4_debug_reg_type reg_type, + uint32_t irq_status) +{ + struct cam_vfe_top_ver4_module_desc *module_desc; + struct cam_vfe_top_ver4_priv *top_priv = vfe_priv->top_priv; + struct cam_vfe_top_ver4_common_data *common_data = &top_priv->common_data; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + int i, j, k; + char *hm_type; + uint32_t temp, debug_cfg; + uint32_t debug_err_vec_ts_lb, debug_err_vec_ts_mb; + uint32_t *debug_err_vec_irq; + uint32_t debug_vec_error_reg[ + CAM_VFE_TOP_DEBUG_VEC_ERR_REGS] = {0}; + uint64_t timestamp; + size_t len = 0; + uint8_t log_buf[CAM_VFE_TOP_LOG_BUF_LEN]; + + switch (reg_type) { + case VFE_TOP_DEBUG_REG: + module_desc = common_data->hw_info->ipp_module_desc; + hm_type = "MAIN_PP"; + debug_err_vec_ts_lb = common_data->common_reg->top_debug_err_vec_ts_lb; + debug_err_vec_ts_mb = common_data->common_reg->top_debug_err_vec_ts_mb; + debug_err_vec_irq = common_data->common_reg->top_debug_err_vec_irq; + break; + case VFE_BAYER_DEBUG_REG: + module_desc = common_data->hw_info->bayer_module_desc; + hm_type = "BAYER"; + debug_err_vec_ts_lb = common_data->common_reg->bayer_debug_err_vec_ts_lb; + debug_err_vec_ts_mb = common_data->common_reg->bayer_debug_err_vec_ts_mb; + debug_err_vec_irq = common_data->common_reg->bayer_debug_err_vec_irq; + break; + default: + return; + } + + soc_info = top_priv->top_common.soc_info; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + /* Read existing debug cfg value so we don't overwite */ + debug_cfg = cam_io_r_mb(base + common_data->common_reg->top_debug_cfg); + + for (i = 0; i < CAM_VFE_TOP_DEBUG_VEC_FIFO_SIZE ; i++) { + cam_io_w_mb((debug_cfg | (i << CAM_VFE_TOP_DEBUG_TIMESTAMP_IRQ_SEL_SHIFT)), + base + common_data->common_reg->top_debug_cfg); + + timestamp = cam_vfe_top_ver4_get_time_stamp(base, debug_err_vec_ts_mb, + debug_err_vec_ts_lb); + + if (!timestamp) { + CAM_DBG(CAM_ISP, "Debug IRQ vectors already read, skip"); + return; + } + + for (j = 0; j < CAM_VFE_TOP_DEBUG_VEC_ERR_REGS; j++) { + if (debug_err_vec_irq[j] == 0) + break; + + temp = cam_io_r_mb(base + debug_err_vec_irq[j]); + temp ^= debug_vec_error_reg[j]; + debug_vec_error_reg[j] |= temp; + k = 0; + + while (temp) { + if (temp & 0x1) { + CAM_INFO_BUF(CAM_ISP, log_buf, CAM_VFE_TOP_LOG_BUF_LEN, + &len, "%s ", module_desc[k + (j * 32)].desc); + } + temp >>= 1; + k++; + } + } + CAM_INFO(CAM_ISP, + "%s HM CLC(s) error that occurred in time order %d at timestamp %lld: %s", + hm_type, i, timestamp, log_buf); + memset(log_buf, 0x0, sizeof(uint8_t) * CAM_VFE_TOP_LOG_BUF_LEN); + } + + cam_io_w_mb((debug_cfg | (0x1 << CAM_VFE_TOP_DEBUG_TIMESTAMP_IRQ_CLEAR_SHIFT)), + base + common_data->common_reg->top_debug_cfg); +} + +static void cam_vfe_top_ver4_print_error_irq_timestamps( + struct cam_vfe_mux_ver4_data *vfe_priv, + uint32_t irq_status) +{ + int i; + struct cam_vfe_top_ver4_priv *top_priv = vfe_priv->top_priv; + + if (!(top_priv->common_data.common_reg->capabilities & + CAM_VFE_COMMON_CAP_DEBUG_ERR_VEC)) + return; + + for (i = 0; i < VFE_DEBUG_REG_MAX; i++) + cam_vfe_top_ver4_read_debug_err_vectors(vfe_priv, i, irq_status); +} + +static void cam_vfe_top_ver4_check_module_idle( + struct cam_vfe_top_ver4_debug_reg_info *debug_reg, + struct cam_vfe_top_ver4_priv *top_priv, + uint32_t *idle_status, bool *is_mc) +{ + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + uint32_t val, shift; + + if (unlikely(!debug_reg || !top_priv || !idle_status || !is_mc)) + return; + + if (!debug_reg->debug_idle_reg_addr || !debug_reg->debug_idle_bitmask) + return; + + soc_info = top_priv->top_common.soc_info; + common_reg = top_priv->common_data.common_reg; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + + val = cam_io_r_mb(base + debug_reg->debug_idle_reg_addr); + + shift = ffs(debug_reg->debug_idle_bitmask) - 1; + + *is_mc = !(debug_reg->debug_idle_bitmask && !(debug_reg->debug_idle_bitmask + & (debug_reg->debug_idle_bitmask - 1))); + + *idle_status = ((val & debug_reg->debug_idle_bitmask) >> shift); +} + +static void cam_vfe_top_ver4_check_module_status( + uint32_t num_reg, uint64_t *reg_val, + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_vfe_top_ver4_debug_reg_info (*status_list)[][8]) +{ + bool found = false, is_mc; + uint32_t i, j, idle_status; + uint64_t val = 0; + size_t len = 0; + uint8_t line_buf[CAM_VFE_LEN_LOG_BUF], log_buf[1024]; + + if (!status_list) + return; + + for (i = 0; i < num_reg; i++) { + /* Check for ideal values */ + if ((reg_val[i] == 0) || (reg_val[i] == 0x55555555)) + continue; + + for (j = 0; j < 8; j++) { + val = reg_val[i] >> (*status_list)[i][j].shift; + val &= 0xF; + if (val == 0 || val == 5) + continue; + + cam_vfe_top_ver4_check_module_idle(&(*status_list)[i][j], top_priv, + &idle_status, &is_mc); + + snprintf(line_buf, CAM_VFE_LEN_LOG_BUF, + "\n\t%s [I:%llu V:%llu R:%llu] idle: 0x%x, is_mc: %s", + (*status_list)[i][j].clc_name, ((val >> 2) & 1), + ((val >> 1) & 1), (val & 1), idle_status, CAM_BOOL_TO_YESNO(is_mc)); + + strlcat(log_buf, line_buf, 1024); + found = true; + } + if (found) + CAM_INFO_RATE_LIMIT(CAM_ISP, "Check config for Debug%u - %s", i, log_buf); + len = 0; + found = false; + memset(log_buf, 0, sizeof(uint8_t)*1024); + } +} + +static void cam_vfe_top_dump_perf_counters( + const char *event, + const char *res_name, + struct cam_vfe_top_ver4_priv *top_priv) +{ + int i; + void __iomem *mem_base; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + + soc_info = top_priv->top_common.soc_info; + common_reg = top_priv->common_data.common_reg; + mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (top_priv->perf_counters[i].dump_counter) { + CAM_INFO(CAM_ISP, + "VFE [%u] on %s %s counter: %d pixel_cnt: %d line_cnt: %d stall_cnt: %d always_cnt: %d status: 0x%x", + top_priv->common_data.hw_intf->hw_idx, res_name, event, (i + 1), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_pix_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_line_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_stall_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_always_count), + cam_io_r_mb(mem_base + + common_reg->perf_count_reg[i].perf_count_status)); + } + } +} + +static void cam_vfe_top_ver4_print_debug_reg_status( + struct cam_vfe_top_ver4_priv *top_priv, + enum cam_vfe_top_ver4_debug_reg_type reg_type) +{ + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_vfe_top_ver4_debug_reg_info (*debug_reg_info)[][8]; + uint32_t val = 0; + uint32_t num_reg = 0; + uint32_t i = 0, j; + uint32_t *debug_reg; + size_t len = 0; + uint8_t *log_buf; + uint64_t reg_val[CAM_VFE_TOP_DBG_REG_MAX] = {0}; + struct cam_hw_soc_info *soc_info; + void __iomem *base; + char *reg_name; + + soc_info = top_priv->top_common.soc_info; + common_reg = top_priv->common_data.common_reg; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + log_buf = top_priv->log_buf; + + switch (reg_type) { + case VFE_TOP_DEBUG_REG: + debug_reg = common_reg->top_debug; + debug_reg_info = top_priv->common_data.hw_info->top_debug_reg_info; + num_reg = common_reg->num_top_debug_reg; + reg_name = "TOP"; + break; + case VFE_BAYER_DEBUG_REG: + debug_reg = common_reg->bayer_debug; + debug_reg_info = top_priv->common_data.hw_info->bayer_debug_reg_info; + num_reg = common_reg->num_bayer_debug_reg; + reg_name = "BAYER"; + break; + default: + return; + } + + if (!debug_reg || !debug_reg_info) + return; + + while (i < num_reg) { + for (j = 0; j < 4 && i < num_reg; j++, i++) { + val = cam_io_r(base + debug_reg[i]); + reg_val[i] = (uint64_t)val; + CAM_INFO_BUF(CAM_ISP, log_buf, CAM_VFE_LEN_LOG_BUF, &len, + "VFE[%u] status %2d : 0x%08x", soc_info->index, i, val); + } + CAM_INFO(CAM_ISP, "VFE[%u]: %s Debug Status: %s", + soc_info->index, reg_name, log_buf); + len = 0; + } + + cam_vfe_top_ver4_check_module_status(num_reg, reg_val, + top_priv, debug_reg_info); + +} + +static inline void cam_vfe_top_ver4_print_debug_regs( + struct cam_vfe_top_ver4_priv *top_priv) +{ + int i; + + for (i = 0; i < VFE_DEBUG_REG_MAX; i++) + cam_vfe_top_ver4_print_debug_reg_status(top_priv, i); + + cam_vfe_top_dump_perf_counters("ERROR", "", top_priv); +} + +static void cam_vfe_top_ver4_print_pdaf_violation_info( + struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, uint32_t desc_idx) +{ + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_ver4_common_data *common_data; + void __iomem *base; + uint32_t val = 0; + uint32_t i = 0; + + top_priv = vfe_priv->top_priv; + common_data = &top_priv->common_data; + soc_info = top_priv->top_common.soc_info; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + val = cam_io_r(base + + common_data->common_reg->pdaf_violation_status), + + CAM_DBG(CAM_ISP, "VFE[%u] PDAF HW Violation status 0x%x", soc_info->index, val); + + for (i = 0; i < common_data->hw_info->num_pdaf_violation_errors; i++) { + if (common_data->hw_info->pdaf_violation_desc[i].bitmask & val) { + CAM_ERR(CAM_ISP, "VFE[%u] %s occurred at [%llu: %09llu]", + soc_info->index, + common_data->hw_info->top_err_desc[desc_idx].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "%s", common_data->hw_info->top_err_desc[desc_idx].desc); + CAM_ERR(CAM_ISP, "PDAF violation description: %s", + common_data->hw_info->pdaf_violation_desc[i].desc); + } + } +} + +static void cam_vfe_top_ver4_print_ipp_violation_info( + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_vfe_top_irq_evt_payload *payload, uint32_t desc_idx) +{ + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_ver4_common_data *common_data; + void __iomem *base; + uint32_t val = 0; + + common_data = &top_priv->common_data; + soc_info = top_priv->top_common.soc_info; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + val = cam_io_r(base + + common_data->common_reg->ipp_violation_status); + + CAM_ERR(CAM_ISP, "VFE[%u] %s occurred at [%llu: %09llu]", + soc_info->index, + common_data->hw_info->top_err_desc[desc_idx].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "%s", common_data->hw_info->top_err_desc[desc_idx].desc); + + if (common_data->hw_info->ipp_module_desc) + CAM_ERR(CAM_ISP, "IPP Violation Module id: [%u %s]", + common_data->hw_info->ipp_module_desc[val].id, + common_data->hw_info->ipp_module_desc[val].desc); + else + CAM_ERR(CAM_ISP, "IPP Violation status 0x%x", val); +} + +static void cam_vfe_top_ver4_print_bayer_violation_info( + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_vfe_top_irq_evt_payload *payload, uint32_t desc_idx) +{ + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_ver4_common_data *common_data; + void __iomem *base; + uint32_t val = 0; + + common_data = &top_priv->common_data; + soc_info = top_priv->top_common.soc_info; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + val = cam_io_r(base + + common_data->common_reg->bayer_violation_status); + + CAM_ERR(CAM_ISP, "VFE[%u] %s occurred at [%llu: %09llu]", + soc_info->index, + common_data->hw_info->top_err_desc[desc_idx].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "%s", common_data->hw_info->top_err_desc[desc_idx].desc); + + if (common_data->hw_info->bayer_module_desc) + CAM_ERR(CAM_ISP, "Bayer Violation Module id: [%u %s]", + common_data->hw_info->bayer_module_desc[val].id, + common_data->hw_info->bayer_module_desc[val].desc); + else + CAM_ERR(CAM_ISP, "Bayer Violation status 0x%x", val); +} + +static inline bool cam_vfe_is_diag_sensor_select(uint32_t diag_cfg, + struct cam_vfe_mux_ver4_data *vfe_priv) +{ + uint32_t val; + + val = diag_cfg & (vfe_priv->reg_data->diag_sensor_sel_mask); + + return (vfe_priv->reg_data->is_mc_path) ? (val <= CAM_ISP_MULTI_CTXT_MAX) : + (val != 0); +} + +static void cam_vfe_top_ver4_print_diag_sensor_frame_count_info( + struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, uint32_t desc_idx, + uint32_t res_id, bool is_error) +{ + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_top_ver4_common_data *common_data; + struct cam_vfe_top_ver4_diag_reg_info *field; + void __iomem *base; + uint32_t val, shift, diag_cfg0, diag_cfg1 = 0; + int i, j; + uint8_t log_buf[1024]; + size_t len = 0; + + top_priv = vfe_priv->top_priv; + common_data = &top_priv->common_data; + soc_info = top_priv->top_common.soc_info; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + + if (is_error) { + CAM_ERR(CAM_ISP, "VFE[%u] %s occurred at [%llu: %09llu]", + soc_info->index, + common_data->hw_info->top_err_desc[desc_idx].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "%s", common_data->hw_info->top_err_desc[desc_idx].desc); + } + + if (!(top_priv->diag_config_debug_val_0 & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS)) + return; + + diag_cfg0 = cam_io_r_mb(base + common_data->common_reg->diag_config); + + if (common_data->common_reg->diag_config_1) + diag_cfg1 = cam_io_r_mb(base + common_data->common_reg->diag_config_1); + + if (!cam_vfe_is_diag_sensor_select(diag_cfg0, vfe_priv)) + goto print_frame_stats; + + for (i = 0; i < CAM_VFE_DIAG_SENSOR_STATUS_MAX; i++) { + if (!common_data->common_reg->diag_sensor_status[i]) + break; + + val = cam_io_r_mb(base + common_data->common_reg->diag_sensor_status[i]); + + for (j = 0; j < common_data->hw_info->diag_sensor_info[i].num_fields; j++) { + field = &common_data->hw_info->diag_sensor_info[i].field[j]; + shift = ffs(field->bitmask) - 1; + CAM_INFO_BUF(CAM_ISP, log_buf, 1024, &len, "%s: 0x%x, ", + field->name, ((val & field->bitmask) >> shift)); + } + + CAM_INFO(CAM_ISP, "VFE[%u] res_id: %d diag_sensor_status_%d: %s", + soc_info->index, res_id, i, log_buf); + + len = 0; + } + +print_frame_stats: + + if (!(diag_cfg0 && vfe_priv->reg_data->diag_frm_count_mask_0) && + (!diag_cfg1 || !(diag_cfg1 & vfe_priv->reg_data->diag_frm_count_mask_1))) + return; + + for (i = 0; i < CAM_VFE_DIAG_FRAME_COUNT_STATUS_MAX; i++) { + if (!common_data->common_reg->diag_frm_cnt_status[i]) + break; + + val = cam_io_r_mb(base + common_data->common_reg->diag_frm_cnt_status[i]); + + for (j = 0; j < common_data->hw_info->diag_frame_info[i].num_fields; j++) { + field = &common_data->hw_info->diag_frame_info[i].field[j]; + shift = ffs(field->bitmask) - 1; + CAM_INFO_BUF(CAM_ISP, log_buf, 1024, &len, "%s: 0x%x, ", + field->name, ((val & field->bitmask) >> shift)); + } + + CAM_INFO(CAM_ISP, "VFE[%u] res_id: %d diag_frame_count_status_%d: %s", + soc_info->index, res_id, i, log_buf); + + len = 0; + } + +} + +static void cam_vfe_top_ver4_print_top_irq_error( + struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, + uint32_t irq_status, uint32_t res_id) +{ + uint32_t i = 0; + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_vfe_top_ver4_common_data *common_data; + + top_priv = vfe_priv->top_priv; + common_data = &top_priv->common_data; + + for (i = 0; i < common_data->hw_info->num_top_errors; i++) { + if (common_data->hw_info->top_err_desc[i].bitmask & irq_status) { + if (common_data->hw_info->top_err_desc[i].bitmask & + vfe_priv->reg_data->ipp_violation_mask) { + cam_vfe_top_ver4_print_ipp_violation_info(top_priv, payload, i); + continue; + } + + if (common_data->hw_info->top_err_desc[i].bitmask & + vfe_priv->reg_data->pdaf_violation_mask) { + cam_vfe_top_ver4_print_pdaf_violation_info(vfe_priv, payload, i); + continue; + } + + if (common_data->hw_info->top_err_desc[i].bitmask & + vfe_priv->reg_data->bayer_violation_mask) { + cam_vfe_top_ver4_print_bayer_violation_info(top_priv, payload, i); + continue; + } + + if (common_data->hw_info->top_err_desc[i].bitmask & + vfe_priv->reg_data->diag_violation_mask) { + cam_vfe_top_ver4_print_diag_sensor_frame_count_info(vfe_priv, + payload, i, res_id, true); + continue; + } + + /* Other errors without specific handler */ + CAM_ERR(CAM_ISP, "%s occurred at [%llu: %09llu]", + common_data->hw_info->top_err_desc[i].err_name, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + CAM_ERR(CAM_ISP, "%s", common_data->hw_info->top_err_desc[i].desc); + if (common_data->hw_info->top_err_desc[i].debug) + CAM_ERR(CAM_ISP, "Debug: %s", + common_data->hw_info->top_err_desc[i].debug); + } + } +} + +int cam_vfe_top_ver4_dump_timestamps(struct cam_vfe_top_ver4_priv *top_priv, int res_id) +{ + uint32_t i; + struct cam_vfe_mux_ver4_data *vfe_priv = NULL; + struct cam_isp_resource_node *res = NULL; + struct cam_isp_resource_node *camif_res = NULL; + struct timespec64 ts; + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + + res = &top_priv->top_common.mux_rsrc[i]; + + if (!res || !res->res_priv) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "VFE[%u] Invalid Resource", + top_priv->common_data.hw_intf->hw_idx); + return -EINVAL; + } + + vfe_priv = res->res_priv; + + if (!vfe_priv->frame_irq_handle) + continue; + + if (vfe_priv->is_pixel_path) { + camif_res = res; + if (res->res_id == res_id) + break; + } else { + if (res->is_rdi_primary_res && res->res_id == res_id) { + break; + } else if (!res->is_rdi_primary_res && camif_res) { + vfe_priv = camif_res->res_priv; + break; + } + } + } + + ktime_get_boottime_ts64(&ts); + + CAM_INFO(CAM_ISP, "VFE[%u] res: %u current_ts: %lld:%lld", + top_priv->common_data.hw_intf->hw_idx, res_id, ts.tv_sec, ts.tv_nsec); + + if (i == top_priv->top_common.num_mux || !vfe_priv) { + CAM_DBG(CAM_ISP, "VFE[%u] invalid res_id %d i:%d", + top_priv->common_data.hw_intf->hw_idx, res_id, i); + return 0; + } + + CAM_INFO(CAM_ISP, + "VFE[%u] CAMIF Error timestamp:[%lld.%09lld] SOF timestamp:[%lld.%09lld] EPOCH timestamp:[%lld.%09lld] EOF timestamp:[%lld.%09lld] epoch_factor: %u%%", + vfe_priv->hw_intf->hw_idx, + vfe_priv->error_ts.tv_sec, vfe_priv->error_ts.tv_nsec, + vfe_priv->sof_ts.tv_sec, vfe_priv->sof_ts.tv_nsec, + vfe_priv->epoch_ts.tv_sec, vfe_priv->epoch_ts.tv_nsec, + vfe_priv->eof_ts.tv_sec, vfe_priv->eof_ts.tv_nsec, + vfe_priv->epoch_factor); + + return 0; +} + +static int cam_vfe_top_ver4_print_overflow_debug_info( + struct cam_vfe_top_ver4_priv *top_priv, void *cmd_args) +{ + struct cam_vfe_top_ver4_common_data *common_data; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_soc_private *soc_private = NULL; + uint32_t violation_status = 0, bus_overflow_status = 0, tmp; + uint32_t i = 0; + int res_id; + struct cam_isp_hw_overflow_info *overflow_info = NULL; + + overflow_info = (struct cam_isp_hw_overflow_info *)cmd_args; + res_id = overflow_info->res_id; + + common_data = &top_priv->common_data; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + + bus_overflow_status = cam_io_r(soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + common_data->common_reg->bus_overflow_status); + violation_status = cam_io_r(soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + common_data->common_reg->bus_violation_status); + + if (soc_private->is_ife_lite) + CAM_ERR(CAM_ISP, + "VFE[%u] sof_cnt:%d src_clk:%lu overflow:%s violation:%s", + soc_info->index, top_priv->sof_cnt, + soc_info->applied_src_clk_rates.sw_client, + CAM_BOOL_TO_YESNO(bus_overflow_status), + CAM_BOOL_TO_YESNO(violation_status)); + else + CAM_ERR(CAM_ISP, + "VFE[%u] sof_cnt:%d src_clk sw_client:%lu hw_client:[%lu %lu] overflow:%s violation:%s", + soc_info->index, top_priv->sof_cnt, + soc_info->applied_src_clk_rates.sw_client, + soc_info->applied_src_clk_rates.hw_client[soc_info->index].high, + soc_info->applied_src_clk_rates.hw_client[soc_info->index].low, + CAM_BOOL_TO_YESNO(bus_overflow_status), + CAM_BOOL_TO_YESNO(violation_status)); + + if (bus_overflow_status) { + overflow_info->is_bus_overflow = true; + CAM_INFO(CAM_ISP, "VFE[%u] Bus overflow status: 0x%x", + soc_info->index, bus_overflow_status); + } + + tmp = bus_overflow_status; + while (tmp) { + if (tmp & 0x1) + CAM_ERR(CAM_ISP, "VFE[%u] Bus Overflow %s", + soc_info->index, common_data->hw_info->wr_client_desc[i].desc); + tmp = tmp >> 1; + i++; + } + + cam_vfe_top_ver4_dump_timestamps(top_priv, res_id); + cam_cpas_dump_camnoc_buff_fill_info(soc_private->cpas_handle); + if (bus_overflow_status) + cam_cpas_log_votes(false); + + if (violation_status) + CAM_INFO(CAM_ISP, "VFE[%u] Bus violation status: 0x%x", + soc_info->index, violation_status); + + i = 0; + tmp = violation_status; + while (tmp) { + if (tmp & 0x1) + CAM_ERR(CAM_ISP, "VFE[%u] Bus Violation %s", + soc_info->index, common_data->hw_info->wr_client_desc[i].desc); + tmp = tmp >> 1; + i++; + } + + cam_vfe_top_ver4_print_debug_regs(top_priv); + + return 0; +} + +static int cam_vfe_core_config_control( + struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_vfe_core_config_args *vfe_core_cfg = cmd_args; + struct cam_isp_resource_node *rsrc_node = vfe_core_cfg->node_res; + struct cam_vfe_mux_ver4_data *vfe_priv = rsrc_node->res_priv; + + vfe_priv->cam_common_cfg.vid_ds16_r2pd = + vfe_core_cfg->core_config.vid_ds16_r2pd; + vfe_priv->cam_common_cfg.vid_ds4_r2pd = + vfe_core_cfg->core_config.vid_ds4_r2pd; + vfe_priv->cam_common_cfg.disp_ds16_r2pd = + vfe_core_cfg->core_config.disp_ds16_r2pd; + vfe_priv->cam_common_cfg.disp_ds4_r2pd = + vfe_core_cfg->core_config.disp_ds4_r2pd; + vfe_priv->cam_common_cfg.dsp_streaming_tap_point = + vfe_core_cfg->core_config.dsp_streaming_tap_point; + vfe_priv->cam_common_cfg.ihist_src_sel = + vfe_core_cfg->core_config.ihist_src_sel; + vfe_priv->cam_common_cfg.input_pp_fmt = + vfe_core_cfg->core_config.core_cfg_flag + & CAM_ISP_PARAM_CORE_CFG_PP_FORMAT; + vfe_priv->cam_common_cfg.hdr_mux_sel_pp = + vfe_core_cfg->core_config.core_cfg_flag + & CAM_ISP_PARAM_CORE_CFG_HDR_MUX_SEL; + + return 0; +} + +static int cam_vfe_init_config_update( + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_hw_init_config_update *init_cfg = cmd_args; + struct cam_isp_resource_node *rsrc_node = init_cfg->node_res; + struct cam_vfe_mux_ver4_data *mux_data = rsrc_node->res_priv; + + if (arg_size != sizeof(struct cam_isp_hw_init_config_update)) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid args size expected: %zu actual: %zu", + rsrc_node->hw_intf->hw_idx, sizeof(struct cam_isp_hw_init_config_update), + arg_size); + return -EINVAL; + } + + mux_data->epoch_factor = + init_cfg->init_config->epoch_cfg.epoch_factor; + + CAM_DBG(CAM_ISP, + "VFE:%u Init Update for res_name: %s epoch_factor: %u%%", + rsrc_node->hw_intf->hw_idx, rsrc_node->res_name, mux_data->epoch_factor); + + return 0; +} + +static int cam_vfe_top_ver4_mux_get_reg_update( + struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + CAM_ERR(CAM_ISP, "VFE:%u Invalid request, Add RUP in CSID", + top_priv->common_data.hw_intf->hw_idx); + return -EINVAL; +} + +static int cam_vfe_top_ver4_get_data( + struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, uint32_t arg_size) +{ + struct cam_isp_resource_node *res = cmd_args; + + if (res->process_cmd) + return res->process_cmd(res, + CAM_ISP_HW_CMD_CAMIF_DATA, cmd_args, arg_size); + + return -EINVAL; +} + +int cam_vfe_top_ver4_get_hw_caps(void *device_priv, void *args, uint32_t arg_size) +{ + struct cam_vfe_hw_get_hw_cap *vfe_cap_info = NULL; + struct cam_vfe_top_ver4_priv *vfe_top_prv = NULL; + struct cam_vfe_soc_private *soc_priv = NULL; + + if (!device_priv || !args) { + CAM_ERR(CAM_ISP, "Invalid arguments device_priv:%p, args:%p", device_priv, args); + return -EINVAL; + } + + vfe_cap_info = args; + vfe_top_prv = device_priv; + + if (!vfe_top_prv->top_common.soc_info) { + CAM_ERR(CAM_ISP, "soc info is null"); + return -EFAULT; + } + + soc_priv = (struct cam_vfe_soc_private *)vfe_top_prv->top_common.soc_info->soc_private; + + vfe_cap_info->is_lite = soc_priv->is_ife_lite; + vfe_cap_info->incr = (vfe_top_prv->top_common.hw_version) & 0x00ffff; + vfe_cap_info->minor = ((vfe_top_prv->top_common.hw_version) >> 16) & 0x0fff; + vfe_cap_info->major = (vfe_top_prv->top_common.hw_version) >> 28; + + return 0; +} + +int cam_vfe_top_ver4_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver4_priv *top_priv = device_priv; + struct cam_vfe_top_ver4_common_data common_data = top_priv->common_data; + + top_priv->top_common.applied_clk_rate = 0; + + top_priv->top_common.hw_version = cam_io_r_mb( + top_priv->top_common.soc_info->reg_map[0].mem_base + + common_data.common_reg->hw_version); + CAM_DBG(CAM_ISP, "VFE:%u hw-version:0x%x", + top_priv->top_common.hw_idx, + top_priv->top_common.hw_version); + + return 0; +} + +int cam_vfe_top_ver4_reset(void *device_priv, + void *reset_core_args, uint32_t arg_size) +{ + CAM_DBG(CAM_ISP, "Reset not supported"); + return 0; +} + +int cam_vfe_top_acquire_resource( + struct cam_isp_resource_node *vfe_full_res, + void *acquire_param) +{ + struct cam_vfe_mux_ver4_data *res_data; + struct cam_vfe_acquire_args *acquire_data; + int rc = 0; + + res_data = (struct cam_vfe_mux_ver4_data *) + vfe_full_res->res_priv; + acquire_data = (struct cam_vfe_acquire_args *)acquire_param; + + res_data->sync_mode = acquire_data->vfe_in.sync_mode; + res_data->event_cb = acquire_data->event_cb; + res_data->priv = acquire_data->priv; + + if (!res_data->is_pixel_path) + goto config_done; + + res_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern; + res_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode; + res_data->first_pixel = acquire_data->vfe_in.in_port->left_start; + res_data->last_pixel = acquire_data->vfe_in.in_port->left_stop; + res_data->first_line = acquire_data->vfe_in.in_port->line_start; + res_data->last_line = acquire_data->vfe_in.in_port->line_stop; + res_data->is_fe_enabled = acquire_data->vfe_in.is_fe_enabled; + res_data->is_offline = acquire_data->vfe_in.is_offline; + res_data->is_dual = acquire_data->vfe_in.is_dual; + res_data->qcfa_bin = acquire_data->vfe_in.in_port->qcfa_bin; + res_data->horizontal_bin = + acquire_data->vfe_in.in_port->horizontal_bin; + res_data->vbi_value = 0; + res_data->hbi_value = 0; + res_data->sfe_binned_epoch_cfg = + acquire_data->vfe_in.in_port->sfe_binned_epoch_cfg; + res_data->handle_camif_irq = acquire_data->vfe_in.handle_camif_irq; + + if (res_data->is_dual) + res_data->dual_hw_idx = acquire_data->vfe_in.dual_hw_idx; + +config_done: + CAM_DBG(CAM_ISP, + "VFE:%u Res:[id:%d name:%s] dsp_mode:%d is_dual:%d dual_hw_idx:%d", + vfe_full_res->hw_intf->hw_idx, + vfe_full_res->res_id, + vfe_full_res->res_name, + res_data->dsp_mode, + res_data->is_dual, res_data->dual_hw_idx); + + return rc; +} + +int cam_vfe_top_ver4_reserve(void *device_priv, + void *reserve_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_vfe_acquire_args *args; + struct cam_vfe_hw_vfe_in_acquire_args *acquire_args; + struct cam_vfe_mux_ver4_data *vfe_priv = NULL; + uint32_t i; + int rc = -EINVAL; + + if (!device_priv || !reserve_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver4_priv *)device_priv; + args = (struct cam_vfe_acquire_args *)reserve_args; + acquire_args = &args->vfe_in; + + CAM_DBG(CAM_ISP, "VFE:%u res id %d", + top_priv->common_data.hw_intf->hw_idx, acquire_args->res_id); + + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == acquire_args->res_id) { + vfe_priv = (struct cam_vfe_mux_ver4_data *) + top_priv->top_common.mux_rsrc[i].res_priv; + + if (top_priv->top_common.mux_rsrc[i].res_state != + CAM_ISP_RESOURCE_STATE_AVAILABLE) { + if (acquire_args->res_id != CAM_ISP_HW_VFE_IN_CAMIF) { + CAM_ERR(CAM_ISP, "VFE:%d Duplicate acquire for camif", + top_priv->common_data.hw_intf->hw_idx); + rc = -EINVAL; + break; + } + + if (!(vfe_priv->hw_ctxt_mask & acquire_args->hw_ctxt_mask)) { + CAM_DBG(CAM_ISP, + "VFE:%d Update hw ctxt mask: 0x%x for camif curr_mask_val: 0x%x", + top_priv->common_data.hw_intf->hw_idx, + acquire_args->hw_ctxt_mask, + vfe_priv->hw_ctxt_mask); + vfe_priv->hw_ctxt_mask |= acquire_args->hw_ctxt_mask; + acquire_args->rsrc_node = &top_priv->top_common.mux_rsrc[i]; + rc = 0; + } else { + CAM_ERR(CAM_ISP, + "VFE:%d Duplicate hw ctxt mask: 0x%x for camif curr_mask_val: 0x%x", + top_priv->common_data.hw_intf->hw_idx, + acquire_args->hw_ctxt_mask, + vfe_priv->hw_ctxt_mask); + rc = -EINVAL; + } + + break; + } + + if (((acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) || + (acquire_args->res_id >= CAM_ISP_HW_VFE_IN_RDI0)) && + (acquire_args->res_id < CAM_ISP_HW_VFE_IN_MAX)) { + rc = cam_vfe_top_acquire_resource( + &top_priv->top_common.mux_rsrc[i], + args); + if (rc) + break; + } + + /* Acquire ownership */ + rc = cam_vmrm_soc_acquire_resources( + CAM_HW_ID_IFE0 + top_priv->common_data.hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] acquire ownership failed", + top_priv->common_data.hw_intf->hw_idx); + break; + } + + top_priv->top_common.mux_rsrc[i].cdm_ops = + acquire_args->cdm_ops; + top_priv->top_common.mux_rsrc[i].tasklet_info = + args->tasklet; + vfe_priv->hw_ctxt_mask = acquire_args->hw_ctxt_mask; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_RESERVED; + acquire_args->rsrc_node = &top_priv->top_common.mux_rsrc[i]; + + rc = 0; + break; + } + } + + return rc; + +} + +int cam_vfe_top_ver4_release(void *device_priv, + void *release_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_isp_resource_node *mux_res; + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_vfe_mux_ver4_data *vfe_priv = NULL; + + if (!device_priv || !release_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)release_args; + top_priv = (struct cam_vfe_top_ver4_priv *)device_priv; + vfe_priv = (struct cam_vfe_mux_ver4_data *) mux_res->res_priv; + + CAM_DBG(CAM_ISP, "VFE:%u Resource in state %d", + top_priv->common_data.hw_intf->hw_idx, mux_res->res_state); + if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "VFE:%u Error, Resource in Invalid res_state :%d", + top_priv->common_data.hw_intf->hw_idx, mux_res->res_state); + return -EINVAL; + } + + memset(&vfe_priv->top_priv->sof_ts_reg_addr, 0, + sizeof(vfe_priv->top_priv->sof_ts_reg_addr)); + vfe_priv->hw_ctxt_mask = 0; + mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE; + + rc = cam_vmrm_soc_release_resources( + CAM_HW_ID_IFE0 + top_priv->common_data.hw_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_ISP, "VFE[%u] vmrm soc release resources failed", + top_priv->common_data.hw_intf->hw_idx); + } + + return rc; +} + +int cam_vfe_top_ver4_start(void *device_priv, + void *start_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_info *hw_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_vfe_soc_private *soc_private = NULL; + int rc = 0, i; + + if (!device_priv || !start_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver4_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "VFE:%u Error soc_private NULL", + top_priv->common_data.hw_intf->hw_idx); + return -EINVAL; + } + + mux_res = (struct cam_isp_resource_node *)start_args; + hw_info = (struct cam_hw_info *)mux_res->hw_intf->hw_priv; + + if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) { + rc = cam_vfe_top_apply_clock_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed in applying start clock rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + rc = cam_vfe_top_apply_bw_start_stop(&top_priv->top_common); + if (rc) { + CAM_ERR(CAM_ISP, + "VFE:%u Failed in applying start bw rc:%d", + hw_info->soc_info.index, rc); + return rc; + } + + if (mux_res->start) { + rc = mux_res->start(mux_res); + } else { + CAM_ERR(CAM_ISP, + "VFE:%u Invalid res id:%d", + hw_info->soc_info.index, mux_res->res_id); + rc = -EINVAL; + } + + /* Perf counter config */ + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (!top_priv->perf_counters[i].perf_counter_val) + continue; + + top_priv->perf_counters[i].dump_counter = true; + cam_io_w_mb(top_priv->perf_counters[i].perf_counter_val, + soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->perf_count_reg[i].perf_count_cfg); + CAM_DBG(CAM_ISP, "VFE [%u] perf_count_%d: 0x%x", + hw_info->soc_info.index, (i + 1), + top_priv->perf_counters[i].perf_counter_val); + } + + if (top_priv->diag_config_debug_val_0 & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + CAM_DBG(CAM_ISP, "Setting diag_cfg register on VFE%u to: 0x%llx", + hw_info->soc_info.index, top_priv->diag_config_debug_val_0); + + cam_io_w_mb((uint32_t)top_priv->diag_config_debug_val_0, soc_info->reg_map[ + VFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->diag_config); + + if (top_priv->common_data.common_reg->diag_config_1 && + (top_priv->diag_config_debug_val_0 >> 32)) + cam_io_w_mb((uint32_t)(top_priv->diag_config_debug_val_0 >> 32), + soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->diag_config_1); + } + } else { + CAM_ERR(CAM_ISP, "VFE:%u HW not powered up", hw_info->soc_info.index); + rc = -EPERM; + } + + atomic_set(&top_priv->overflow_pending, 0); + return rc; +} + +int cam_vfe_top_ver4_stop(void *device_priv, + void *stop_args, uint32_t arg_size) +{ + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_isp_resource_node *mux_res; + struct cam_hw_soc_info *soc_info = NULL; + void __iomem *base; + int i, rc = 0; + + if (!device_priv || !stop_args) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver4_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + mux_res = (struct cam_isp_resource_node *)stop_args; + base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + + if (mux_res->res_id < CAM_ISP_HW_VFE_IN_MAX) { + rc = mux_res->stop(mux_res); + } else { + CAM_ERR(CAM_ISP, "VFE:%u Invalid res id:%d", + top_priv->common_data.hw_intf->hw_idx, mux_res->res_id); + return -EINVAL; + } + + if (!rc) { + for (i = 0; i < top_priv->top_common.num_mux; i++) { + if (top_priv->top_common.mux_rsrc[i].res_id == + mux_res->res_id) { + if (!top_priv->top_common.skip_data_rst_on_stop) + top_priv->top_common.req_clk_rate[i] = 0; + memset(&top_priv->top_common.req_axi_vote[i], + 0, sizeof(struct cam_axi_vote)); + top_priv->top_common.axi_vote_control[i] = + CAM_ISP_BW_CONTROL_EXCLUDE; + break; + } + } + } + + /* Reset perf counters at stream off */ + for (i = 0; i < top_priv->common_data.common_reg->num_perf_counters; i++) { + if (top_priv->perf_counters[i].dump_counter) + cam_io_w_mb(0x0, + base + + top_priv->common_data.common_reg->perf_count_reg[i].perf_count_cfg); + top_priv->perf_counters[i].dump_counter = false; + } + + top_priv->diag_config_debug_val_0 = 0; + + if (top_priv->common_data.hw_info->num_pdaf_lcr_res) + cam_io_w(1, soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base + + top_priv->common_data.common_reg->pdaf_input_cfg_1); + + atomic_set(&top_priv->overflow_pending, 0); + return rc; +} + +int cam_vfe_top_ver4_read(void *device_priv, + void *read_args, uint32_t arg_size) +{ + return -EPERM; +} + +int cam_vfe_top_ver4_write(void *device_priv, + void *write_args, uint32_t arg_size) +{ + return -EPERM; +} + +static int cam_vfe_top_apply_fcg_update( + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_isp_hw_fcg_update *fcg_update, + struct cam_cdm_utils_ops *cdm_util_ops) +{ + struct cam_isp_fcg_config_internal *fcg_config; + struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx; + struct cam_isp_predict_fcg_config_internal *fcg_pr; + struct cam_vfe_top_ver4_hw_info *hw_info; + struct cam_vfe_ver4_fcg_module_info *fcg_module_info; + uint32_t size, fcg_index_shift; + uint32_t *reg_val_pair; + uint32_t num_regval_pairs = 0; + int rc = 0, i, j = 0; + + if (!top_priv || !fcg_update || (fcg_update->prediction_idx == 0)) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + hw_info = top_priv->common_data.hw_info; + fcg_config = (struct cam_isp_fcg_config_internal *)fcg_update->data; + if (!hw_info || !fcg_config) { + CAM_ERR(CAM_ISP, "Invalid config params"); + return -EINVAL; + } + + fcg_module_info = hw_info->fcg_module_info; + if (!fcg_module_info) { + CAM_ERR(CAM_ISP, "Invalid FCG common data"); + return -EINVAL; + } + + if (fcg_config->num_ch_ctx > CAM_ISP_MAX_FCG_CH_CTXS) { + CAM_ERR(CAM_SFE, "out of bounds %d", + fcg_config->num_ch_ctx); + return -EINVAL; + } + + reg_val_pair = CAM_MEM_ZALLOC_ARRAY(fcg_module_info->max_reg_val_pair_size, + sizeof(uint32_t), + GFP_KERNEL); + if (!reg_val_pair) { + CAM_ERR(CAM_ISP, "Failed allocating memory for reg val pair"); + return -ENOMEM; + } + + fcg_index_shift = fcg_module_info->fcg_index_shift; + + for (i = 0, j = 0; i < fcg_config->num_ch_ctx; i++) { + if (j >= fcg_module_info->max_reg_val_pair_size) { + CAM_ERR(CAM_ISP, "reg_val_pair %d exceeds the array limit %u", + j, fcg_module_info->max_reg_val_pair_size); + rc = -ENOMEM; + goto free_mem; + } + + fcg_ch_ctx = &fcg_config->ch_ctx_fcg_configs[i]; + if (!fcg_ch_ctx) { + CAM_ERR(CAM_ISP, "Failed in FCG channel/context dereference"); + rc = -EINVAL; + goto free_mem; + } + + fcg_pr = &fcg_ch_ctx->predicted_fcg_configs[ + fcg_update->prediction_idx - 1]; + + /* For VFE/MC_TFE, only PHASE should be enabled */ + if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) { + switch (fcg_ch_ctx->fcg_ch_ctx_id) { + /* Same value as CAM_ISP_FCG_MASK_CH0/1/2 to support both VFE and MC_TFE */ + case CAM_ISP_MULTI_CTXT0_MASK: + if (hw_info->fcg_mc_supported) { + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_reg_ctxt_sel, + (fcg_module_info->fcg_reg_ctxt_mask & + (fcg_ch_ctx->fcg_ch_ctx_id << + fcg_module_info->fcg_reg_ctxt_shift))); + CAM_DBG(CAM_ISP, + "Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, sel_wr: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_module_info->fcg_reg_ctxt_mask & + (fcg_ch_ctx->fcg_ch_ctx_id << + fcg_module_info->fcg_reg_ctxt_shift))); + } + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_phase_index_cfg_0, + fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_phase_index_cfg_1, + fcg_pr->phase_index_b); + CAM_DBG(CAM_ISP, + "Program FCG registers for IFE/MC_TFE, ch_ctx_id: 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + case CAM_ISP_MULTI_CTXT1_MASK: + case CAM_ISP_MULTI_CTXT2_MASK: + if (!hw_info->fcg_mc_supported) { + CAM_ERR(CAM_ISP, + "No support for multi context for FCG on ch_ctx_id: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id); + rc = -EINVAL; + goto free_mem; + } + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_reg_ctxt_sel, + (fcg_module_info->fcg_reg_ctxt_mask & + (fcg_ch_ctx->fcg_ch_ctx_id << + fcg_module_info->fcg_reg_ctxt_shift))); + CAM_DBG(CAM_ISP, + "Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, sel_wr: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_module_info->fcg_reg_ctxt_mask & + (fcg_ch_ctx->fcg_ch_ctx_id << + fcg_module_info->fcg_reg_ctxt_shift))); + + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_phase_index_cfg_0, + fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)); + CAM_ISP_ADD_REG_VAL_PAIR(reg_val_pair, + fcg_module_info->max_reg_val_pair_size, j, + fcg_module_info->fcg_phase_index_cfg_1, + fcg_pr->phase_index_b); + CAM_DBG(CAM_ISP, + "Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u", + fcg_ch_ctx->fcg_ch_ctx_id, + (fcg_pr->phase_index_r | + (fcg_pr->phase_index_g << fcg_index_shift)), + fcg_pr->phase_index_b); + break; + default: + CAM_ERR(CAM_ISP, "Unsupported ch_ctx_id: 0x%x", + fcg_ch_ctx->fcg_ch_ctx_id); + rc = -EINVAL; + goto free_mem; + } + } + } + + num_regval_pairs = j / 2; + + if (num_regval_pairs) { + size = cdm_util_ops->cdm_required_size_reg_random( + num_regval_pairs); + + if ((size * 4) != fcg_update->cmd_size) { + CAM_ERR(CAM_ISP, + "Failed! Buf size:%d is wrong, expected size: %d", + fcg_update->cmd_size, size * 4); + rc = -ENOMEM; + goto free_mem; + } + + cdm_util_ops->cdm_write_regrandom( + (uint32_t *)fcg_update->cmd_buf_addr, + num_regval_pairs, reg_val_pair); + } else { + CAM_WARN(CAM_ISP, "No reg val pairs"); + } + +free_mem: + CAM_MEM_FREE(reg_val_pair); + return rc; +} + +static int cam_vfe_top_get_fcg_buf_size( + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_isp_hw_fcg_get_size *fcg_get_size, + struct cam_cdm_utils_ops *cdm_util_ops) +{ + struct cam_vfe_top_ver4_hw_info *hw_info; + struct cam_vfe_ver4_fcg_module_info *fcg_module_info; + uint32_t num_types, num_reg_val; + + if (!top_priv) { + CAM_ERR(CAM_ISP, "Invalid args"); + return -EINVAL; + } + + hw_info = top_priv->common_data.hw_info; + if (!hw_info) { + CAM_ERR(CAM_ISP, "Invalid config params"); + return -EINVAL; + } + + if (!hw_info->fcg_supported && + !hw_info->fcg_mc_supported) { + fcg_get_size->fcg_supported = false; + CAM_DBG(CAM_ISP, "FCG is not supported by hardware"); + return 0; + } + + fcg_module_info = hw_info->fcg_module_info; + fcg_get_size->fcg_supported = true; + num_types = fcg_get_size->num_types; + if (num_types == 0) { + CAM_ERR(CAM_ISP, "Number of types(STATS/PHASE) requested is empty"); + return -EINVAL; + } + + num_reg_val = num_types * fcg_module_info->fcg_type_size; + + /* Count for wr_sel register in MC_TFE */ + if (hw_info->fcg_mc_supported) + num_reg_val += fcg_get_size->num_ctxs; + + fcg_get_size->kmd_size = + cdm_util_ops->cdm_required_size_reg_random(num_reg_val); + return 0; +} + +static int cam_vfe_top_fcg_config( + struct cam_vfe_top_ver4_priv *top_priv, + void *cmd_args, + uint32_t arg_size) +{ + struct cam_isp_hw_fcg_cmd *fcg_cmd; + struct cam_cdm_utils_ops *cdm_util_ops; + int rc; + + if (arg_size != sizeof(struct cam_isp_hw_fcg_cmd)) { + CAM_ERR(CAM_ISP, "Invalid cmd size, arg_size: %u, expected size: %u", + arg_size, sizeof(struct cam_isp_hw_fcg_cmd)); + return -EINVAL; + } + + fcg_cmd = (struct cam_isp_hw_fcg_cmd *) cmd_args; + if (!fcg_cmd || !fcg_cmd->res) { + CAM_ERR(CAM_ISP, "Invalid cmd args"); + return -EINVAL; + } + + cdm_util_ops = + (struct cam_cdm_utils_ops *)fcg_cmd->res->cdm_ops; + if (!cdm_util_ops) { + CAM_ERR(CAM_ISP, "Invalid CDM ops"); + return -EINVAL; + } + + if (fcg_cmd->get_size_flag) { + struct cam_isp_hw_fcg_get_size *fcg_get_size; + + fcg_get_size = &fcg_cmd->u.fcg_get_size; + rc = cam_vfe_top_get_fcg_buf_size(top_priv, fcg_get_size, cdm_util_ops); + } else { + struct cam_isp_hw_fcg_update *fcg_update; + + fcg_update = &fcg_cmd->u.fcg_update; + rc = cam_vfe_top_apply_fcg_update(top_priv, fcg_update, cdm_util_ops); + } + + return rc; +} + +static int cam_vfe_top_ver4_update_sof_debug( + void *cmd_args, + uint32_t arg_size) +{ + struct cam_vfe_enable_sof_irq_args *sof_irq_args = cmd_args; + struct cam_isp_resource_node *res; + struct cam_vfe_mux_ver4_data *mux_data; + bool enable_sof_irq; + uint32_t sof_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + + if (arg_size != sizeof(struct cam_vfe_enable_sof_irq_args)) { + CAM_ERR(CAM_ISP, "Invalid arg size expected: %zu actual: %zu", + sizeof(struct cam_vfe_enable_sof_irq_args), arg_size); + return -EINVAL; + } + + memset(sof_irq_mask, 0, sizeof(sof_irq_mask)); + + res = sof_irq_args->res; + enable_sof_irq = sof_irq_args->enable_sof_irq_debug; + mux_data = res->res_priv; + + if (mux_data->frame_irq_handle) { + CAM_DBG(CAM_ISP, + "Frame IRQ (including SOF) is enabled, no SOF IRQ registration is needed"); + return 0; + } + + sof_irq_mask[mux_data->common_reg->frame_timing_irq_reg_idx] = + mux_data->reg_data->sof_irq_mask; + + mux_data->enable_sof_irq_debug = enable_sof_irq; + if (mux_data->sof_irq_handle) + cam_irq_controller_update_irq( + mux_data->vfe_irq_controller, + mux_data->sof_irq_handle, + enable_sof_irq, sof_irq_mask); + return 0; +} + +int cam_vfe_top_ver4_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_vfe_top_ver4_priv *top_priv; + struct cam_hw_soc_info *soc_info; + struct cam_vfe_soc_private *soc_private; + + if (!device_priv || !cmd_args) { + CAM_ERR(CAM_ISP, "Error, Invalid arguments"); + return -EINVAL; + } + + top_priv = (struct cam_vfe_top_ver4_priv *)device_priv; + soc_info = top_priv->top_common.soc_info; + soc_private = soc_info->soc_private; + if (!soc_private) { + CAM_ERR(CAM_ISP, "VFE:%u Error soc_private NULL", + top_priv->common_data.hw_intf->hw_idx); + return -EINVAL; + } + + switch (cmd_type) { + case CAM_ISP_HW_CMD_GET_CHANGE_BASE: + rc = cam_vfe_top_ver4_mux_get_base(top_priv, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_REG_UPDATE: + rc = cam_vfe_top_ver4_mux_get_reg_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CAMIF_DATA: + rc = cam_vfe_top_ver4_get_data(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_CLOCK_UPDATE: + rc = cam_vfe_top_clock_update(&top_priv->top_common, cmd_args, + arg_size); + break; + case CAM_ISP_HW_NOTIFY_OVERFLOW: + atomic_set(&top_priv->overflow_pending, 1); + rc = cam_vfe_top_ver4_print_overflow_debug_info(top_priv, + cmd_args); + break; + case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD: + rc = cam_vfe_top_fs_update(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE: + rc = cam_vfe_top_bw_update(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_UPDATE_V2: + rc = cam_vfe_top_bw_update_v2(soc_private, + &top_priv->top_common, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_BW_CONTROL: + rc = cam_vfe_top_bw_control(soc_private, &top_priv->top_common, + cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_CORE_CONFIG: + rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_GET_PATH_PORT_MAP: + rc = cam_vfe_top_ver4_get_path_port_map(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: + rc = cam_vfe_top_apply_clk_bw_update(&top_priv->top_common, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE: + rc = cam_vfe_init_config_update(cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_RDI_LCR_CFG: + rc = cam_vfe_top_ver4_pdaf_lcr_config(top_priv, cmd_args, + arg_size); + break; + case CAM_ISP_HW_CMD_QUERY_CAP: { + struct cam_isp_hw_cap *ife_cap; + + ife_cap = (struct cam_isp_hw_cap *) cmd_args; + ife_cap->num_perf_counters = + top_priv->common_data.common_reg->num_perf_counters; + if (top_priv->common_data.hw_info->fcg_supported || + top_priv->common_data.hw_info->fcg_mc_supported) { + ife_cap->fcg_supported = true; + ife_cap->max_fcg_ch_ctx = + top_priv->common_data.hw_info->fcg_module_info->max_fcg_ch_ctx; + ife_cap->max_fcg_predictions = + top_priv->common_data.hw_info->fcg_module_info->max_fcg_predictions; + } + } + break; + case CAM_ISP_HW_CMD_IFE_DEBUG_CFG: { + int i; + uint32_t max_counters = top_priv->common_data.common_reg->num_perf_counters; + struct cam_vfe_generic_debug_config *debug_cfg; + + debug_cfg = (struct cam_vfe_generic_debug_config *)cmd_args; + if (debug_cfg->num_counters <= max_counters) + for (i = 0; i < max_counters; i++) + top_priv->perf_counters[i].perf_counter_val = + debug_cfg->vfe_perf_counter_val[i]; + + top_priv->enable_ife_frame_irqs = debug_cfg->enable_ife_frame_irqs; + top_priv->diag_config_debug_val_0 = debug_cfg->diag_config; + } + break; + case CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR: { + struct cam_ife_csid_ts_reg_addr *sof_addr_args = + (struct cam_ife_csid_ts_reg_addr *)cmd_args; + + if (sof_addr_args->get_addr) { + CAM_ERR(CAM_ISP, "VFE does not support get of primary SOF ts addr"); + rc = -EINVAL; + } else + rc = cam_vfe_top_ver4_set_primary_sof_timer_reg_addr(top_priv, + sof_addr_args); + } + break; + case CAM_ISP_HW_CMD_FCG_CONFIG: + rc = cam_vfe_top_fcg_config(top_priv, cmd_args, arg_size); + break; + case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: + rc = cam_vfe_top_ver4_update_sof_debug(cmd_args, arg_size); + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_ISP, "VFE:%u Error, Invalid cmd:%d", + top_priv->common_data.hw_intf->hw_idx, cmd_type); + break; + } + + return rc; +} + +static int cam_vfe_get_evt_payload( + struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + int rc = 0; + + spin_lock(&vfe_priv->spin_lock); + if (list_empty(&vfe_priv->free_payload_list)) { + CAM_ERR_RATE_LIMIT(CAM_ISP, "No free VFE event payload"); + rc = -ENODEV; + goto done; + } + + *evt_payload = list_first_entry(&vfe_priv->free_payload_list, + struct cam_vfe_top_irq_evt_payload, list); + list_del_init(&(*evt_payload)->list); +done: + spin_unlock(&vfe_priv->spin_lock); + return rc; +} + +static int cam_vfe_top_put_evt_payload( + struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload **evt_payload) +{ + unsigned long flags; + + if (!vfe_priv) { + CAM_ERR(CAM_ISP, "Invalid param core_info NULL"); + return -EINVAL; + } + if (*evt_payload == NULL) { + CAM_ERR(CAM_ISP, "VFE:%u No payload to put", vfe_priv->hw_intf->hw_idx); + return -EINVAL; + } + + CAM_COMMON_SANITIZE_LIST_ENTRY((*evt_payload), struct cam_vfe_top_irq_evt_payload); + spin_lock_irqsave(&vfe_priv->spin_lock, flags); + list_add_tail(&(*evt_payload)->list, &vfe_priv->free_payload_list); + *evt_payload = NULL; + spin_unlock_irqrestore(&vfe_priv->spin_lock, flags); + + CAM_DBG(CAM_ISP, "VFE:%u Done", vfe_priv->hw_intf->hw_idx); + return 0; +} + +static int cam_vfe_handle_irq_top_half(uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc; + int i; + struct cam_isp_resource_node *vfe_res; + struct cam_vfe_mux_ver4_data *vfe_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + + vfe_res = th_payload->handler_priv; + vfe_priv = vfe_res->res_priv; + + for (i = 0; i < th_payload->num_registers; i++) + CAM_DBG(CAM_ISP, + "VFE:%u IRQ status_%u: 0x%X", + vfe_res->hw_intf->hw_idx, i, + th_payload->evt_status_arr[i]); + + rc = cam_vfe_get_evt_payload(vfe_priv, &evt_payload); + if (rc) { + for (i = 0; i < th_payload->num_registers; i++) + CAM_INFO_RATE_LIMIT(CAM_ISP, + "VFE:%u IRQ status_%u: 0x%X", + vfe_res->hw_intf->hw_idx, i, + th_payload->evt_status_arr[i]); + + return rc; + } + + cam_isp_hw_get_timestamp(&evt_payload->ts); + evt_payload->reg_val = 0; + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + if (th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx] + & vfe_priv->reg_data->sof_irq_mask) { + if (vfe_priv->top_priv->sof_ts_reg_addr.curr0_ts_addr && + vfe_priv->top_priv->sof_ts_reg_addr.curr1_ts_addr) { + evt_payload->ts.sof_ts = + cam_io_r_mb(vfe_priv->top_priv->sof_ts_reg_addr.curr1_ts_addr); + evt_payload->ts.sof_ts = (evt_payload->ts.sof_ts << 32) | + cam_io_r_mb(vfe_priv->top_priv->sof_ts_reg_addr.curr0_ts_addr); + } + + trace_cam_log_event("SOF", "TOP_HALF", + th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx], + vfe_res->hw_intf->hw_idx); + } + + if (th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx] + & vfe_priv->reg_data->epoch0_irq_mask) { + trace_cam_log_event("EPOCH0", "TOP_HALF", + th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx], + vfe_res->hw_intf->hw_idx); + } + + if (th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx] + & vfe_priv->reg_data->eof_irq_mask) { + trace_cam_log_event("EOF", "TOP_HALF", + th_payload->evt_status_arr[vfe_priv->common_reg->frame_timing_irq_reg_idx], + vfe_res->hw_intf->hw_idx); + } + + CAM_DBG(CAM_ISP, "VFE:%u Exit", vfe_res->hw_intf->hw_idx); + return rc; +} + +static void cam_vfe_irq_status_to_event(struct cam_vfe_mux_ver4_data *vfe_priv, + uint32_t irq_status, bool *sof, bool *epoch, bool *eof) +{ + *sof = (irq_status & vfe_priv->reg_data->sof_irq_mask); + *epoch = (irq_status & vfe_priv->reg_data->epoch0_irq_mask); + *eof = (irq_status & vfe_priv->reg_data->eof_irq_mask); +} + +static enum cam_vfe_top_ver4_fsm_state cam_vfe_top_ver4_fsm_next_state( + struct cam_isp_resource_node *res, + enum cam_vfe_top_ver4_fsm_state state) +{ + switch (state) { + case VFE_TOP_VER4_FSM_SOF: + return (res->is_rdi_primary_res) ? VFE_TOP_VER4_FSM_EOF : VFE_TOP_VER4_FSM_EPOCH; + case VFE_TOP_VER4_FSM_EPOCH: + return VFE_TOP_VER4_FSM_EOF; + case VFE_TOP_VER4_FSM_EOF: + return VFE_TOP_VER4_FSM_SOF; + default: + /* set to SOF to recover from incorrect state */ + return VFE_TOP_VER4_FSM_SOF; + } +} + +static const char *cam_vfe_top_ver4_fsm_state_to_string( + enum cam_vfe_top_ver4_fsm_state state) +{ + switch (state) { + case VFE_TOP_VER4_FSM_SOF: return "SOF"; + case VFE_TOP_VER4_FSM_EPOCH: return "EPOCH"; + case VFE_TOP_VER4_FSM_EOF: return "EOF"; + default: return "INVALID"; + } +} + +typedef int (*cam_vfe_handle_frame_irq_t)(struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, + struct cam_isp_hw_event_info *evt_info); + + +static int cam_vfe_handle_sof(struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, + struct cam_isp_hw_event_info *evt_info) +{ + if ((vfe_priv->enable_sof_irq_debug) && + (vfe_priv->irq_debug_cnt <= CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) { + CAM_INFO(CAM_ISP, "VFE:%u Received SOF at [%lld: %09lld]", + vfe_priv->hw_intf->hw_idx, + payload->ts.mono_time.tv_sec, + payload->ts.mono_time.tv_nsec); + + vfe_priv->irq_debug_cnt++; + if (vfe_priv->irq_debug_cnt == CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) { + struct cam_vfe_enable_sof_irq_args sof_irq_args; + + vfe_priv->irq_debug_cnt = 0; + + if (evt_info->res_id >= CAM_VFE_TOP_MUX_MAX) { + CAM_ERR(CAM_ISP, + "VFE:%u inval res_id for mux_rsrc:%d", + vfe_priv->hw_intf->hw_idx, evt_info->res_id); + return -EINVAL; + } + sof_irq_args.res = + &vfe_priv->top_priv->top_common.mux_rsrc[evt_info->res_id]; + sof_irq_args.enable_sof_irq_debug = false; + + cam_vfe_top_ver4_update_sof_debug((void *)(&sof_irq_args), + sizeof(sof_irq_args)); + } + } else { + uint32_t frm_irq_status = + payload->irq_reg_val[vfe_priv->common_reg->frame_timing_irq_reg_idx]; + int hw_ctxt_id = -1; + + if (vfe_priv->reg_data->is_mc_path) + hw_ctxt_id = cam_vfe_top_ver4_get_hw_ctxt_from_irq_status(vfe_priv, + frm_irq_status); + + CAM_DBG(CAM_ISP, "VFE:%u is_mc:%s hw_ctxt:%d Received SOF", + vfe_priv->hw_intf->hw_idx, + CAM_BOOL_TO_YESNO(vfe_priv->reg_data->is_mc_path), hw_ctxt_id); + vfe_priv->sof_ts.tv_sec = payload->ts.mono_time.tv_sec; + vfe_priv->sof_ts.tv_nsec = payload->ts.mono_time.tv_nsec; + } + vfe_priv->top_priv->sof_cnt++; + + return 0; +} + +static int cam_vfe_handle_epoch(struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, + struct cam_isp_hw_event_info *evt_info) +{ + CAM_DBG(CAM_ISP, "VFE:%u Received EPOCH", vfe_priv->hw_intf->hw_idx); + evt_info->reg_val = payload->reg_val; + vfe_priv->epoch_ts.tv_sec = payload->ts.mono_time.tv_sec; + vfe_priv->epoch_ts.tv_nsec = payload->ts.mono_time.tv_nsec; + + return 0; +} + +static int cam_vfe_handle_eof(struct cam_vfe_mux_ver4_data *vfe_priv, + struct cam_vfe_top_irq_evt_payload *payload, + struct cam_isp_hw_event_info *evt_info) +{ + uint32_t frm_irq_status = + payload->irq_reg_val[vfe_priv->common_reg->frame_timing_irq_reg_idx]; + int hw_ctxt_id = -1; + + if (vfe_priv->reg_data->is_mc_path) + hw_ctxt_id = cam_vfe_top_ver4_get_hw_ctxt_from_irq_status(vfe_priv, frm_irq_status); + + CAM_DBG(CAM_ISP, "VFE:%u is_mc:%s hw_ctxt:%d Received EOF", + vfe_priv->hw_intf->hw_idx, + CAM_BOOL_TO_YESNO(vfe_priv->reg_data->is_mc_path), hw_ctxt_id); + + vfe_priv->eof_ts.tv_sec = payload->ts.mono_time.tv_sec; + vfe_priv->eof_ts.tv_nsec = payload->ts.mono_time.tv_nsec; + + return 0; +} + +static int __cam_vfe_handle_frame_timing_irqs(struct cam_isp_resource_node *vfe_res, bool event, + enum cam_isp_hw_event_type event_type, cam_vfe_handle_frame_irq_t handle_irq_fn, + struct cam_vfe_top_irq_evt_payload *payload, struct cam_isp_hw_event_info *evt_info) +{ + struct cam_vfe_mux_ver4_data *vfe_priv = vfe_res->res_priv; + + if (!event) { + CAM_WARN(CAM_ISP, "VFE:%u missed %s", vfe_priv->hw_intf->hw_idx, + cam_isp_hw_evt_type_to_string(event_type)); + } else { + handle_irq_fn(vfe_priv, payload, evt_info); + if (!(vfe_priv->top_priv->enable_ife_frame_irqs) + && vfe_priv->event_cb) + vfe_priv->event_cb(vfe_priv->priv, event_type, evt_info); + } + vfe_priv->fsm_state = cam_vfe_top_ver4_fsm_next_state(vfe_res, vfe_priv->fsm_state); + + return 0; +} + +static int cam_vfe_handle_frame_timing_irqs(struct cam_isp_resource_node *vfe_res, + uint32_t irq_status, struct cam_vfe_top_irq_evt_payload *payload, + struct cam_isp_hw_event_info *evt_info) +{ + struct cam_vfe_mux_ver4_data *vfe_priv = vfe_res->res_priv; + bool sof, epoch, eof; + int i, j; + + cam_vfe_irq_status_to_event(vfe_priv, irq_status, &sof, &epoch, &eof); + CAM_DBG(CAM_ISP, "VFE:%u SOF:%s EPOCH:%s EOF:%s", vfe_priv->hw_intf->hw_idx, + CAM_BOOL_TO_YESNO(sof), CAM_BOOL_TO_YESNO(epoch), CAM_BOOL_TO_YESNO(eof)); + + i = (sof ? 1 : 0) + (epoch ? 1 : 0) + (eof ? 1 : 0); + j = i; + + if (i == vfe_priv->n_frame_irqs) + CAM_WARN_RATE_LIMIT(CAM_ISP, "VFE:%u top-half delay", vfe_priv->hw_intf->hw_idx); + + while (i > 0) { + bool event; + enum cam_isp_hw_event_type event_type; + cam_vfe_handle_frame_irq_t handle_irq_fn; + + CAM_DBG_PR2(CAM_ISP, "VFE:%u enter state:%s (%d/%d)", vfe_priv->hw_intf->hw_idx, + cam_vfe_top_ver4_fsm_state_to_string(vfe_priv->fsm_state), i, j); + + switch (vfe_priv->fsm_state) { + case VFE_TOP_VER4_FSM_SOF: + event = sof; + event_type = CAM_ISP_HW_EVENT_SOF; + handle_irq_fn = cam_vfe_handle_sof; + break; + case VFE_TOP_VER4_FSM_EPOCH: + event = epoch; + event_type = CAM_ISP_HW_EVENT_EPOCH; + handle_irq_fn = cam_vfe_handle_epoch; + break; + case VFE_TOP_VER4_FSM_EOF: + event = eof; + event_type = CAM_ISP_HW_EVENT_EOF; + handle_irq_fn = cam_vfe_handle_eof; + break; + default: + CAM_ERR(CAM_ISP, "VFE:%u frame state machine in invalid state", + vfe_priv->hw_intf->hw_idx); + return -EINVAL; + } + + /* consume event */ + if (event) + i--; + + __cam_vfe_handle_frame_timing_irqs(vfe_res, event, event_type, handle_irq_fn, + payload, evt_info); + + CAM_DBG_PR2(CAM_ISP, "VFE:%u exit state:%s (%d/%d)", vfe_priv->hw_intf->hw_idx, + cam_vfe_top_ver4_fsm_state_to_string(vfe_priv->fsm_state), i, j); + } + + return CAM_VFE_IRQ_STATUS_SUCCESS; +} + +static int cam_vfe_handle_irq_bottom_half(void *handler_priv, + void *evt_payload_priv) +{ + int ret = CAM_VFE_IRQ_STATUS_ERR; + struct cam_isp_resource_node *vfe_res; + struct cam_vfe_mux_ver4_data *vfe_priv; + struct cam_vfe_top_irq_evt_payload *payload; + struct cam_isp_hw_event_info evt_info; + struct cam_isp_hw_error_event_info err_evt_info; + struct cam_isp_sof_ts_data sof_and_boot_time; + uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX] = {0}, frame_timing_mask; + int i = 0; + + if (!handler_priv || !evt_payload_priv) { + CAM_ERR(CAM_ISP, + "Invalid params handle_priv:%pK, evt_payload_priv:%pK", + handler_priv, evt_payload_priv); + return ret; + } + + vfe_res = handler_priv; + vfe_priv = vfe_res->res_priv; + payload = evt_payload_priv; + + if (atomic_read(&vfe_priv->top_priv->overflow_pending)) { + CAM_INFO(CAM_ISP, + "VFE:%u Handling overflow, Ignore bottom half", + vfe_res->hw_intf->hw_idx); + cam_vfe_top_put_evt_payload(vfe_priv, &payload); + return IRQ_HANDLED; + } + + for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) + irq_status[i] = payload->irq_reg_val[i]; + + sof_and_boot_time.boot_time = payload->ts.mono_time; + sof_and_boot_time.sof_ts = payload->ts.sof_ts; + + evt_info.hw_idx = vfe_res->hw_intf->hw_idx; + evt_info.hw_type = CAM_ISP_HW_TYPE_VFE; + evt_info.res_id = vfe_res->res_id; + evt_info.res_type = vfe_res->res_type; + evt_info.reg_val = 0; + evt_info.event_data = &sof_and_boot_time; + + frame_timing_mask = vfe_priv->reg_data->sof_irq_mask | + vfe_priv->reg_data->epoch0_irq_mask | + vfe_priv->reg_data->eof_irq_mask; + + if (irq_status[vfe_priv->common_reg->frame_timing_irq_reg_idx] & frame_timing_mask) { + ret = cam_vfe_handle_frame_timing_irqs(vfe_res, + irq_status[vfe_priv->common_reg->frame_timing_irq_reg_idx] & + frame_timing_mask, + payload, &evt_info); + } + + if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0] + & vfe_priv->reg_data->error_irq_mask) { + err_evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION; + evt_info.event_data = (void *)&err_evt_info; + + if (vfe_priv->event_cb) + vfe_priv->event_cb(vfe_priv->priv, + CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info); + + + cam_vfe_top_ver4_print_top_irq_error(vfe_priv, payload, + irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0], vfe_res->res_id); + + cam_vfe_top_ver4_print_error_irq_timestamps(vfe_priv, + irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]); + + cam_vfe_top_ver4_print_debug_regs(vfe_priv->top_priv); + + ret = CAM_VFE_IRQ_STATUS_ERR; + } + + if ((vfe_priv->top_priv->diag_config_debug_val_0 & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) && + (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0] & + vfe_priv->reg_data->sof_irq_mask)) { + cam_vfe_top_ver4_print_diag_sensor_frame_count_info(vfe_priv, + payload, 0, vfe_res->res_id, false); + } + + /* Perf counter dump */ + if (irq_status[vfe_priv->common_reg->frame_timing_irq_reg_idx] & + vfe_priv->reg_data->eof_irq_mask) + cam_vfe_top_dump_perf_counters("EOF", vfe_res->res_name, vfe_priv->top_priv); + + if (irq_status[vfe_priv->common_reg->frame_timing_irq_reg_idx] & + vfe_priv->reg_data->sof_irq_mask) + cam_vfe_top_dump_perf_counters("SOF", vfe_res->res_name, vfe_priv->top_priv); + + cam_vfe_top_put_evt_payload(vfe_priv, &payload); + + CAM_DBG(CAM_ISP, "VFE:%u returning status = %d", evt_info.hw_idx, ret); + return ret; +} + +static int cam_vfe_ver4_err_irq_top_half( + uint32_t evt_id, + struct cam_irq_th_payload *th_payload) +{ + int32_t rc = 0; + int i; + struct cam_isp_resource_node *vfe_res; + struct cam_vfe_mux_ver4_data *vfe_priv; + struct cam_vfe_top_irq_evt_payload *evt_payload; + bool error_flag = false; + + vfe_res = th_payload->handler_priv; + vfe_priv = vfe_res->res_priv; + /* + * need to handle overflow condition here, otherwise irq storm + * will block everything + */ + if ((th_payload->evt_status_arr[0] & + vfe_priv->reg_data->error_irq_mask)) { + CAM_ERR(CAM_ISP, + "VFE:%u Err IRQ status_0: 0x%X", + vfe_res->hw_intf->hw_idx, + th_payload->evt_status_arr[0]); + CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%u", + vfe_res->hw_intf->hw_idx); + cam_irq_controller_disable_all( + vfe_priv->vfe_irq_controller); + error_flag = true; + } + + rc = cam_vfe_get_evt_payload(vfe_priv, &evt_payload); + if (rc) + return rc; + + cam_isp_hw_get_timestamp(&evt_payload->ts); + if (error_flag) { + vfe_priv->error_ts.tv_sec = + evt_payload->ts.mono_time.tv_sec; + vfe_priv->error_ts.tv_nsec = + evt_payload->ts.mono_time.tv_nsec; + } + + for (i = 0; i < th_payload->num_registers; i++) + evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i]; + + th_payload->evt_payload_priv = evt_payload; + + return rc; +} + +static int cam_vfe_resource_start( + struct cam_isp_resource_node *vfe_res) +{ + struct cam_vfe_mux_ver4_data *rsrc_data; + uint32_t val = 0, epoch_factor = 50; + int rc = 0; + uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + uint32_t sof_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX]; + + if (!vfe_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if (vfe_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) { + CAM_ERR(CAM_ISP, "VFE:%u Error, Invalid camif res res_state:%d", + vfe_res->hw_intf->hw_idx, vfe_res->res_state); + return -EINVAL; + } + + memset(err_irq_mask, 0, sizeof(err_irq_mask)); + memset(irq_mask, 0, sizeof(irq_mask)); + memset(sof_irq_mask, 0, sizeof(sof_irq_mask)); + + rsrc_data = (struct cam_vfe_mux_ver4_data *)vfe_res->res_priv; + + /* config debug status registers */ + cam_io_w_mb(rsrc_data->reg_data->top_debug_cfg_en, rsrc_data->mem_base + + rsrc_data->common_reg->top_debug_cfg); + + if (rsrc_data->is_lite || !rsrc_data->is_pixel_path || + (rsrc_data->common_reg->capabilities & CAM_VFE_COMMON_CAP_SKIP_CORE_CFG)) + goto skip_core_cfg; + + /* IFE top cfg programmed via CDM */ + CAM_DBG(CAM_ISP, "VFE:%u TOP core_cfg0: 0x%x core_cfg1: 0x%x", + vfe_res->hw_intf->hw_idx, + cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_0), + cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->common_reg->core_cfg_1)); + + /* % epoch factor from userland */ + if ((rsrc_data->epoch_factor) && (rsrc_data->epoch_factor <= 100)) + epoch_factor = rsrc_data->epoch_factor; + + val = ((rsrc_data->last_line + rsrc_data->vbi_value) - + rsrc_data->first_line) * epoch_factor / 100; + + if (val > rsrc_data->last_line) + val = rsrc_data->last_line; + + if (rsrc_data->horizontal_bin || rsrc_data->qcfa_bin || + rsrc_data->sfe_binned_epoch_cfg) + val >>= 1; + + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->common_reg->epoch_height_cfg); + CAM_DBG(CAM_ISP, + "VFE:%u height [0x%x : 0x%x] vbi_val: 0x%x epoch_factor: %u%% epoch_line_cfg: 0x%x", + vfe_res->hw_intf->hw_idx, rsrc_data->first_line, rsrc_data->last_line, + rsrc_data->vbi_value, epoch_factor, val); + +skip_core_cfg: + + if (rsrc_data->common_reg->capabilities & CAM_VFE_COMMON_CAP_CORE_MUX_CFG) + CAM_DBG(CAM_ISP, "VFE:%u TOP core_mux_cfg: 0x%x", + vfe_res->hw_intf->hw_idx, + cam_io_r_mb(rsrc_data->mem_base + rsrc_data->common_reg->core_mux_cfg)); + + vfe_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING; + + /* reset sof count */ + rsrc_data->top_priv->sof_cnt = 0; + + /* disable sof irq debug flag */ + rsrc_data->enable_sof_irq_debug = false; + rsrc_data->irq_debug_cnt = 0; + + /* Skip subscribing to timing irqs in these scenarios: + * Debug config is not enabled for IFE frame timing IRQs, and + * 1. Resource is dual IFE slave + * 2. Resource is not primary RDI + * 3. non-sfe use cases, such cases are taken care in CSID. + */ + + if (!(rsrc_data->top_priv->enable_ife_frame_irqs) && + (((rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) && rsrc_data->is_dual) || + (!rsrc_data->is_pixel_path && !vfe_res->is_rdi_primary_res) || + !rsrc_data->handle_camif_irq)) + goto skip_frame_irq_subscribe; + + irq_mask[rsrc_data->common_reg->frame_timing_irq_reg_idx] = + rsrc_data->reg_data->sof_irq_mask | rsrc_data->reg_data->epoch0_irq_mask | + rsrc_data->reg_data->eof_irq_mask; + + rsrc_data->n_frame_irqs = + hweight32(irq_mask[rsrc_data->common_reg->frame_timing_irq_reg_idx]); + + if (!rsrc_data->frame_irq_handle) { + rsrc_data->frame_irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + irq_mask, + vfe_res, + vfe_res->top_half_handler, + vfe_res->bottom_half_handler, + vfe_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->frame_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Frame IRQs handle subscribe failure", + vfe_res->hw_intf->hw_idx); + rc = -ENOMEM; + rsrc_data->frame_irq_handle = 0; + } + } + +skip_frame_irq_subscribe: + /* Subscribe SOF IRQ only if FRAME IRQs are not subscribed */ + if (!rsrc_data->frame_irq_handle) { + /* SOF IRQ mask is set to 0 intentially at resource start */ + rsrc_data->sof_irq_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_1, + sof_irq_mask, + vfe_res, + vfe_res->top_half_handler, + vfe_res->bottom_half_handler, + vfe_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + if (rsrc_data->sof_irq_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u SOF IRQ handle subscribe failed"); + rsrc_data->sof_irq_handle = 0; + return -ENOMEM; + } + } + + err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] = rsrc_data->reg_data->error_irq_mask; + + if (!rsrc_data->irq_err_handle) { + rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq( + rsrc_data->vfe_irq_controller, + CAM_IRQ_PRIORITY_0, + err_irq_mask, + vfe_res, + cam_vfe_ver4_err_irq_top_half, + vfe_res->bottom_half_handler, + vfe_res->tasklet_info, + &tasklet_bh_api, + CAM_IRQ_EVT_GROUP_0); + + if (rsrc_data->irq_err_handle < 1) { + CAM_ERR(CAM_ISP, "VFE:%u Error IRQ handle subscribe failure", + vfe_res->hw_intf->hw_idx); + rc = -ENOMEM; + rsrc_data->irq_err_handle = 0; + } + } + + rsrc_data->fsm_state = VFE_TOP_VER4_FSM_SOF; + + CAM_DBG(CAM_ISP, "VFE:%u Res: %s Start Done", + vfe_res->hw_intf->hw_idx, + vfe_res->res_name); + + return rc; +} + +static int cam_vfe_resource_stop( + struct cam_isp_resource_node *vfe_res) +{ + struct cam_vfe_mux_ver4_data *vfe_priv; + struct cam_vfe_top_ver4_priv *top_priv; + int rc = 0; + uint32_t val = 0; + + if (!vfe_res) { + CAM_ERR(CAM_ISP, "Error, Invalid input arguments"); + return -EINVAL; + } + + if ((vfe_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) || + (vfe_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)) + return 0; + + vfe_priv = (struct cam_vfe_mux_ver4_data *)vfe_res->res_priv; + top_priv = vfe_priv->top_priv; + + if (vfe_priv->is_lite || !vfe_priv->is_pixel_path || + (vfe_priv->common_reg->capabilities & CAM_VFE_COMMON_CAP_SKIP_CORE_CFG)) + goto skip_core_decfg; + + if ((vfe_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (vfe_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + val = cam_io_r_mb(vfe_priv->mem_base + + vfe_priv->common_reg->core_cfg_0); + val &= (~(1 << CAM_SHIFT_TOP_CORE_VER_4_CFG_DSP_EN)); + cam_io_w_mb(val, vfe_priv->mem_base + + vfe_priv->common_reg->core_cfg_0); + } + +skip_core_decfg: + if (vfe_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) + vfe_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + + if (vfe_priv->frame_irq_handle) { + cam_irq_controller_unsubscribe_irq( + vfe_priv->vfe_irq_controller, + vfe_priv->frame_irq_handle); + vfe_priv->frame_irq_handle = 0; + } + vfe_priv->n_frame_irqs = 0; + + if (vfe_priv->sof_irq_handle) { + cam_irq_controller_unsubscribe_irq( + vfe_priv->vfe_irq_controller, + vfe_priv->sof_irq_handle); + vfe_priv->sof_irq_handle = 0; + } + + if (vfe_priv->irq_err_handle) { + cam_irq_controller_unsubscribe_irq( + vfe_priv->vfe_irq_controller, + vfe_priv->irq_err_handle); + vfe_priv->irq_err_handle = 0; + } + + /* Skip epoch factor reset for internal recovery */ + if (!top_priv->top_common.skip_data_rst_on_stop) + vfe_priv->epoch_factor = 0; + + CAM_DBG(CAM_ISP, "VFE:%u Res: %s Stopped", + vfe_res->hw_intf->hw_idx, + vfe_res->res_name); + + return rc; +} + +static int cam_vfe_resource_init( + struct cam_isp_resource_node *vfe_res, + void *init_args, uint32_t arg_size) +{ + struct cam_vfe_mux_ver4_data *rsrc_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!vfe_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + rsrc_data = vfe_res->res_priv; + soc_info = rsrc_data->soc_info; + + if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, + "VFE:%u failed to enable dsp clk, rc = %d", + vfe_res->hw_intf->hw_idx, rc); + } + + rsrc_data->sof_ts.tv_sec = 0; + rsrc_data->sof_ts.tv_nsec = 0; + rsrc_data->epoch_ts.tv_sec = 0; + rsrc_data->epoch_ts.tv_nsec = 0; + rsrc_data->eof_ts.tv_sec = 0; + rsrc_data->eof_ts.tv_nsec = 0; + rsrc_data->error_ts.tv_sec = 0; + rsrc_data->error_ts.tv_nsec = 0; + + CAM_DBG(CAM_ISP, "VFE:%u Res: %s Init Done", + vfe_res->hw_intf->hw_idx, + vfe_res->res_name); + + return rc; +} + +static int cam_vfe_resource_deinit( + struct cam_isp_resource_node *vfe_res, + void *deinit_args, uint32_t arg_size) +{ + struct cam_vfe_mux_ver4_data *rsrc_data; + struct cam_hw_soc_info *soc_info; + int rc = 0; + + if (!vfe_res) { + CAM_ERR(CAM_ISP, "Error Invalid input arguments"); + return -EINVAL; + } + + rsrc_data = vfe_res->res_priv; + soc_info = rsrc_data->soc_info; + + if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) && + (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) { + rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME); + if (rc) + CAM_ERR(CAM_ISP, "VFE:%u failed to disable dsp clk", + vfe_res->hw_intf->hw_idx); + } + + CAM_DBG(CAM_ISP, "VFE:%u Res: %s DeInit Done", + vfe_res->hw_intf->hw_idx, + vfe_res->res_name); + return rc; +} + +int cam_vfe_res_mux_init( + struct cam_vfe_top_ver4_priv *top_priv, + struct cam_hw_intf *hw_intf, + struct cam_hw_soc_info *soc_info, + void *vfe_hw_info, + struct cam_isp_resource_node *vfe_res, + void *vfe_irq_controller) +{ + struct cam_vfe_mux_ver4_data *vfe_priv = NULL; + struct cam_vfe_ver4_path_hw_info *hw_info = vfe_hw_info; + struct cam_vfe_soc_private *soc_priv = soc_info->soc_private; + int i; + + vfe_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_mux_ver4_data), + GFP_KERNEL); + if (!vfe_priv) + return -ENOMEM; + + vfe_res->res_priv = vfe_priv; + vfe_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base; + vfe_priv->common_reg = hw_info->common_reg; + vfe_priv->reg_data = hw_info->reg_data; + vfe_priv->hw_intf = hw_intf; + vfe_priv->is_lite = soc_priv->is_ife_lite; + vfe_priv->soc_info = soc_info; + vfe_priv->vfe_irq_controller = vfe_irq_controller; + vfe_priv->is_pixel_path = (vfe_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF); + vfe_priv->top_priv = top_priv; + + vfe_res->init = cam_vfe_resource_init; + vfe_res->deinit = cam_vfe_resource_deinit; + vfe_res->start = cam_vfe_resource_start; + vfe_res->stop = cam_vfe_resource_stop; + vfe_res->top_half_handler = cam_vfe_handle_irq_top_half; + vfe_res->bottom_half_handler = cam_vfe_handle_irq_bottom_half; + + spin_lock_init(&vfe_priv->spin_lock); + INIT_LIST_HEAD(&vfe_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) { + INIT_LIST_HEAD(&vfe_priv->evt_payload[i].list); + list_add_tail(&vfe_priv->evt_payload[i].list, + &vfe_priv->free_payload_list); + } + return 0; +} + +int cam_vfe_res_mux_deinit( + struct cam_isp_resource_node *vfe_res) +{ + struct cam_vfe_mux_ver4_data *vfe_priv; + int i = 0; + + if (!vfe_res) { + CAM_ERR(CAM_ISP, "Error, VFE Node Resource is NULL %pK", vfe_res); + return -ENODEV; + } + + vfe_priv = vfe_res->res_priv; + + vfe_res->init = NULL; + vfe_res->deinit = NULL; + vfe_res->start = NULL; + vfe_res->stop = NULL; + vfe_res->process_cmd = NULL; + vfe_res->top_half_handler = NULL; + vfe_res->bottom_half_handler = NULL; + vfe_res->res_priv = NULL; + + if (!vfe_priv) { + CAM_ERR(CAM_ISP, "VFE:%u vfe_priv is NULL %pK", vfe_res->hw_intf->hw_idx, vfe_priv); + return -ENODEV; + } + + INIT_LIST_HEAD(&vfe_priv->free_payload_list); + for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) + INIT_LIST_HEAD(&vfe_priv->evt_payload[i].list); + CAM_MEM_FREE(vfe_priv); + + return 0; +} + +int cam_vfe_top_ver4_init( + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top_ptr) +{ + int i, j = 0, rc = 0; + struct cam_vfe_top_ver4_priv *top_priv = NULL; + struct cam_vfe_top_ver4_hw_info *hw_info = top_hw_info; + struct cam_vfe_top *vfe_top; + + vfe_top = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top), GFP_KERNEL); + if (!vfe_top) { + CAM_DBG(CAM_ISP, "VFE:%u Error, Failed to alloc for vfe_top", hw_intf->hw_idx); + rc = -ENOMEM; + goto end; + } + + top_priv = CAM_MEM_ZALLOC(sizeof(struct cam_vfe_top_ver4_priv), + GFP_KERNEL); + if (!top_priv) { + CAM_DBG(CAM_ISP, "VFE:%u Error, Failed to alloc for vfe_top_priv", hw_intf->hw_idx); + rc = -ENOMEM; + goto free_vfe_top; + } + + vfe_top->top_priv = top_priv; + top_priv->top_common.applied_clk_rate = 0; + + if (hw_info->num_mux > CAM_VFE_TOP_MUX_MAX) { + CAM_ERR(CAM_ISP, "VFE:%u Invalid number of input rsrc: %d, max: %d", + hw_intf->hw_idx, hw_info->num_mux, CAM_VFE_TOP_MUX_MAX); + rc = -EINVAL; + goto free_top_priv; + } + + top_priv->top_common.num_mux = hw_info->num_mux; + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + top_priv->top_common.mux_rsrc[i].res_type = + CAM_ISP_RESOURCE_VFE_IN; + top_priv->top_common.mux_rsrc[i].hw_intf = hw_intf; + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_AVAILABLE; + top_priv->top_common.req_clk_rate[i] = 0; + + if (hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_4_0) { + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_CAMIF; + + rc = cam_vfe_res_mux_init(top_priv, + hw_intf, soc_info, + &hw_info->vfe_full_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "CAMIF"); + } else if (hw_info->mux_type[i] == + CAM_VFE_PDLIB_VER_1_0) { + /* set the PDLIB resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_PDLIB; + + rc = cam_vfe_res_mux_init(top_priv, + hw_intf, soc_info, + &hw_info->pdlib_hw_info, + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "PDLIB"); + } else if (hw_info->mux_type[i] == + CAM_VFE_RDI_VER_1_0 && j < hw_info->num_rdi) { + /* set the RDI resource id */ + top_priv->top_common.mux_rsrc[i].res_id = + CAM_ISP_HW_VFE_IN_RDI0 + j; + + scnprintf(top_priv->top_common.mux_rsrc[i].res_name, + CAM_ISP_RES_NAME_LEN, "RDI_%d", j); + rc = cam_vfe_res_mux_init(top_priv, + hw_intf, soc_info, + &hw_info->rdi_hw_info[j++], + &top_priv->top_common.mux_rsrc[i], + vfe_irq_controller); + } else { + CAM_WARN(CAM_ISP, "VFE:%u Invalid mux type: %u", + hw_intf->hw_idx, hw_info->mux_type[i]); + } + if (rc) + goto deinit_resources; + } + + + vfe_top->hw_ops.get_hw_caps = cam_vfe_top_ver4_get_hw_caps; + vfe_top->hw_ops.init = cam_vfe_top_ver4_init_hw; + vfe_top->hw_ops.reset = cam_vfe_top_ver4_reset; + vfe_top->hw_ops.reserve = cam_vfe_top_ver4_reserve; + vfe_top->hw_ops.release = cam_vfe_top_ver4_release; + vfe_top->hw_ops.start = cam_vfe_top_ver4_start; + vfe_top->hw_ops.stop = cam_vfe_top_ver4_stop; + vfe_top->hw_ops.read = cam_vfe_top_ver4_read; + vfe_top->hw_ops.write = cam_vfe_top_ver4_write; + vfe_top->hw_ops.process_cmd = cam_vfe_top_ver4_process_cmd; + *vfe_top_ptr = vfe_top; + + top_priv->common_data.hw_info = hw_info; + top_priv->top_common.soc_info = soc_info; + top_priv->common_data.hw_intf = hw_intf; + top_priv->top_common.hw_idx = hw_intf->hw_idx; + top_priv->common_data.common_reg = hw_info->common_reg; + + if (top_priv->common_data.common_reg->num_perf_counters > CAM_VFE_PERF_CNT_MAX) { + CAM_ERR(CAM_ISP, "Invalid number of perf counters: %d max: %d", + top_priv->common_data.common_reg->num_perf_counters, + CAM_VFE_PERF_CNT_MAX); + rc = -EINVAL; + goto deinit_resources; + } + + return rc; + +deinit_resources: + + for (--i; i >= 0; i--) { + if (hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_4_0) { + if (cam_vfe_res_mux_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, "VFE:%u Camif Deinit failed", hw_intf->hw_idx); + } else { + if (cam_vfe_res_mux_deinit( + &top_priv->top_common.mux_rsrc[i])) + CAM_ERR(CAM_ISP, + "VFE:%u Camif lite res id %d Deinit failed", + hw_intf->hw_idx, top_priv->top_common.mux_rsrc[i] + .res_id); + } + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + } + + +free_top_priv: + CAM_MEM_FREE(vfe_top->top_priv); +free_vfe_top: + CAM_MEM_FREE(vfe_top); +end: + return rc; +} + +int cam_vfe_top_ver4_deinit(struct cam_vfe_top **vfe_top_ptr) +{ + int i, rc = 0; + struct cam_vfe_top_ver4_priv *top_priv = NULL; + struct cam_vfe_top *vfe_top; + + if (!vfe_top_ptr) { + CAM_ERR(CAM_ISP, "Error, Invalid input"); + return -EINVAL; + } + + vfe_top = *vfe_top_ptr; + if (!vfe_top) { + CAM_ERR(CAM_ISP, "Error, vfe_top NULL"); + return -ENODEV; + } + + top_priv = vfe_top->top_priv; + if (!top_priv) { + CAM_ERR(CAM_ISP, "Error, vfe_top_priv NULL"); + rc = -ENODEV; + goto free_vfe_top; + } + + for (i = 0; i < top_priv->top_common.num_mux; i++) { + top_priv->top_common.mux_rsrc[i].res_state = + CAM_ISP_RESOURCE_STATE_UNAVAILABLE; + rc = cam_vfe_res_mux_deinit(&top_priv->top_common.mux_rsrc[i]); + if (rc) + CAM_ERR(CAM_ISP, "VFE:%u Mux[%d] deinit failed rc=%d", + top_priv->common_data.hw_intf->hw_idx, i, rc); + } + + CAM_MEM_FREE(vfe_top->top_priv); + +free_vfe_top: + CAM_MEM_FREE(vfe_top); + *vfe_top_ptr = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.h new file mode 100644 index 0000000000..b58f2bd593 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver4.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_TOP_VER4_H_ +#define _CAM_VFE_TOP_VER4_H_ + +#include "cam_vfe_top_common.h" +#include "cam_isp_hw.h" +#include "cam_vfe_top.h" + +#define CAM_VFE_RDI_VER2_MAX 4 +#define CAM_VFE_CAMIF_LITE_EVT_MAX 256 +#define CAM_VFE_TOP_DBG_REG_MAX 19 +#define CAM_VFE_DIAG_SENSOR_STATUS_MAX 4 +#define CAM_VFE_DIAG_FRAME_COUNT_STATUS_MAX 3 + +struct cam_vfe_top_ver4_perf_count_reg_offset { + uint32_t perf_count_cfg; + uint32_t perf_count_cfg_mc; + uint32_t perf_pix_count; + uint32_t perf_line_count; + uint32_t perf_stall_count; + uint32_t perf_always_count; + uint32_t perf_count_status; +}; + +struct cam_vfe_top_ver4_reg_offset_common { + uint32_t hw_version; + uint32_t titan_version; + uint32_t hw_capability; + uint32_t main_feature; + uint32_t bayer_feature; + uint32_t lens_feature; + uint32_t stats_feature; + uint32_t color_feature; + uint32_t zoom_feature; + uint32_t fd_feature; + uint32_t global_reset_cmd; + uint32_t core_cgc_ovd_0; + uint32_t core_cgc_ovd_1; + uint32_t ahb_cgc_ovd; + uint32_t noc_cgc_ovd; + uint32_t bus_cgc_ovd; + uint32_t core_cfg_0; + uint32_t core_cfg_1; + uint32_t core_cfg_2; + uint32_t core_cfg_3; + uint32_t core_cfg_4; + uint32_t core_cfg_5; + uint32_t core_cfg_6; + uint32_t core_mux_cfg; + uint32_t period_cfg; + uint32_t reg_update_cmd; + uint32_t trigger_cdm_events; + uint32_t ipp_violation_status; + uint32_t bayer_violation_status; + uint32_t pdaf_violation_status; + uint32_t custom_frame_idx; + uint32_t dsp_status; + uint32_t diag_config; + uint32_t diag_config_1; + uint32_t diag_sensor_status[ + CAM_VFE_DIAG_SENSOR_STATUS_MAX]; + uint32_t diag_frm_cnt_status[ + CAM_VFE_DIAG_FRAME_COUNT_STATUS_MAX]; + uint32_t stats_throttle_cfg_0; + uint32_t stats_throttle_cfg_1; + uint32_t stats_throttle_cfg_2; + uint32_t pdaf_parsed_throttle_cfg; + uint32_t wirc_throttle_cfg; + uint32_t fd_y_throttle_cfg; + uint32_t fd_c_throttle_cfg; + uint32_t ds16_g_throttle_cfg; + uint32_t ds16_br_throttle_cfg; + uint32_t ds4_g_throttle_cfg; + uint32_t ds4_br_throttle_cfg; + uint32_t ds2_g_throttle_cfg; + uint32_t ds2_br_throttle_cfg; + uint32_t full_out_throttle_cfg; + uint32_t bus_violation_status; + uint32_t bus_overflow_status; + uint32_t irq_sub_pattern_cfg; + uint32_t epoch0_pattern_cfg; + uint32_t epoch1_pattern_cfg; + uint32_t epoch_height_cfg; + uint32_t num_perf_counters; + struct cam_vfe_top_ver4_perf_count_reg_offset + perf_count_reg[CAM_VFE_PERF_CNT_MAX]; + uint32_t top_debug_cfg; + uint32_t bayer_debug_cfg; + uint32_t top_debug_err_vec_irq[CAM_VFE_TOP_DEBUG_VEC_ERR_REGS]; + uint32_t top_debug_err_vec_ts_lb; + uint32_t top_debug_err_vec_ts_mb; + uint32_t bayer_debug_err_vec_irq[CAM_VFE_TOP_DEBUG_VEC_ERR_REGS]; + uint32_t bayer_debug_err_vec_ts_lb; + uint32_t bayer_debug_err_vec_ts_mb; + uint32_t pdaf_input_cfg_0; + uint32_t pdaf_input_cfg_1; + uint32_t num_top_debug_reg; + uint32_t num_bayer_debug_reg; + uint32_t *top_debug; + uint32_t *bayer_debug; + uint32_t frame_timing_irq_reg_idx; + uint32_t capabilities; +}; + +struct cam_vfe_top_common_cfg { + uint32_t vid_ds16_r2pd; + uint32_t vid_ds4_r2pd; + uint32_t disp_ds16_r2pd; + uint32_t disp_ds4_r2pd; + uint32_t dsp_streaming_tap_point; + uint32_t ihist_src_sel; + uint32_t input_pp_fmt; + uint32_t hdr_mux_sel_pp; +}; + +struct cam_vfe_top_ver4_module_desc { + uint32_t id; + uint8_t *desc; +}; + +struct cam_vfe_top_ver4_wr_client_desc { + uint32_t wm_id; + uint8_t *desc; +}; + +struct cam_vfe_top_ver4_top_err_irq_desc { + uint32_t bitmask; + char *err_name; + char *desc; + char *debug; +}; + +struct cam_vfe_top_ver4_pdaf_violation_desc { + uint32_t bitmask; + char *desc; +}; + +struct cam_vfe_top_ver4_bayer_violation_desc { + uint32_t bitmask; + char *desc; +}; + +struct cam_vfe_top_ver4_pdaf_lcr_res_info { + uint32_t res_id; + uint32_t val; +}; + +struct cam_vfe_ver4_path_hw_info { + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_vfe_ver4_path_reg_data *reg_data; +}; + +struct cam_vfe_top_ver4_debug_reg_info { + uint32_t shift; + char *clc_name; + uint32_t debug_idle_reg_addr; + uint32_t debug_idle_bitmask; +}; + +struct cam_vfe_top_ver4_diag_reg_info { + uint32_t bitmask; + char *name; +}; + +struct cam_vfe_top_ver4_diag_reg_fields { + uint32_t num_fields; + struct cam_vfe_top_ver4_diag_reg_info *field; +}; + + +struct cam_vfe_ver4_fcg_module_info { + uint32_t max_fcg_ch_ctx; + uint32_t max_fcg_predictions; + uint32_t fcg_index_shift; + uint32_t max_reg_val_pair_size; + uint32_t fcg_type_size; + uint32_t fcg_phase_index_cfg_0; + uint32_t fcg_phase_index_cfg_1; + uint32_t fcg_reg_ctxt_shift; + uint32_t fcg_reg_ctxt_mask; + uint32_t fcg_reg_ctxt_sel; +}; + +struct cam_vfe_top_ver4_hw_info { + struct cam_vfe_top_ver4_reg_offset_common *common_reg; + struct cam_vfe_ver4_path_hw_info vfe_full_hw_info; + struct cam_vfe_ver4_path_hw_info pdlib_hw_info; + struct cam_vfe_ver4_path_hw_info *rdi_hw_info; + struct cam_vfe_ver4_path_reg_data *reg_data; + struct cam_vfe_top_ver4_wr_client_desc *wr_client_desc; + struct cam_vfe_top_ver4_module_desc *ipp_module_desc; + struct cam_vfe_top_ver4_module_desc *bayer_module_desc; + uint32_t num_reg; + struct cam_vfe_top_ver4_debug_reg_info (*top_debug_reg_info)[][8]; + struct cam_vfe_top_ver4_debug_reg_info (*bayer_debug_reg_info)[][8]; + uint32_t num_mux; + uint32_t num_path_port_map; + uint32_t mux_type[CAM_VFE_TOP_MUX_MAX]; + uint32_t path_port_map[CAM_ISP_HW_PATH_PORT_MAP_MAX][2]; + uint32_t num_rdi; + uint32_t num_top_errors; + struct cam_vfe_top_ver4_top_err_irq_desc *top_err_desc; + uint32_t num_pdaf_violation_errors; + struct cam_vfe_top_ver4_pdaf_violation_desc *pdaf_violation_desc; + struct cam_vfe_top_ver4_pdaf_lcr_res_info *pdaf_lcr_res_mask; + uint32_t num_pdaf_lcr_res; + struct cam_vfe_ver4_fcg_module_info *fcg_module_info; + struct cam_vfe_top_ver4_diag_reg_fields *diag_sensor_info; + struct cam_vfe_top_ver4_diag_reg_fields *diag_frame_info; + bool fcg_supported; + bool fcg_mc_supported; +}; + +struct cam_vfe_ver4_path_reg_data { + uint32_t epoch_line_cfg; + uint32_t sof_irq_mask; + uint32_t epoch0_irq_mask; + uint32_t epoch1_irq_mask; + uint32_t eof_irq_mask; + uint32_t error_irq_mask; + uint32_t enable_diagnostic_hw; + uint32_t top_debug_cfg_en; + uint32_t ipp_violation_mask; + uint32_t bayer_violation_mask; + uint32_t pdaf_violation_mask; + uint32_t diag_violation_mask; + uint32_t diag_sensor_sel_mask; + uint32_t diag_frm_count_mask_0; + uint32_t diag_frm_count_mask_1; + bool is_mc_path; + uint32_t frm_irq_hw_ctxt_mask[CAM_ISP_MULTI_CTXT_MAX]; +}; + + +int cam_vfe_top_ver4_init(struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top); + +int cam_vfe_top_ver4_deinit(struct cam_vfe_top **vfe_top); + +#define VFE_DBG_INFO(shift_val, name) {.shift = shift_val, .clc_name = name} + +#define VFE_DBG_INFO_ARRAY_4bit(name1, name2, name3, name4, name5, name6, name7, name8) \ + { \ + VFE_DBG_INFO(0, name1), \ + VFE_DBG_INFO(4, name2), \ + VFE_DBG_INFO(8, name3), \ + VFE_DBG_INFO(12, name4), \ + VFE_DBG_INFO(16, name5), \ + VFE_DBG_INFO(20, name6), \ + VFE_DBG_INFO(24, name7), \ + VFE_DBG_INFO(28, name8), \ + } + +#define VFE_DBG_INFO_WITH_IDLE(shift_val, name, idle_addr, idle_bitmask)\ + { \ + .shift = shift_val, \ + .clc_name = name, \ + .debug_idle_reg_addr = idle_addr, \ + .debug_idle_bitmask = idle_bitmask \ + } + +#endif /* _CAM_VFE_TOP_VER4_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h new file mode 100644 index 0000000000..7d1b770aa3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VFE_TOP_H_ +#define _CAM_VFE_TOP_H_ + +#include "cam_hw_intf.h" +#include "cam_isp_hw.h" + +#define CAM_VFE_TOP_VER_1_0 0x100000 +#define CAM_VFE_TOP_VER_2_0 0x200000 +#define CAM_VFE_TOP_VER_3_0 0x300000 +#define CAM_VFE_TOP_VER_4_0 0x400000 + +#define CAM_VFE_CAMIF_VER_1_0 0x10 +#define CAM_VFE_CAMIF_VER_2_0 0x20 +#define CAM_VFE_CAMIF_VER_3_0 0x30 +#define CAM_VFE_CAMIF_VER_4_0 0x40 + +#define CAM_VFE_CAMIF_LITE_VER_2_0 0x02 + +#define CAM_VFE_RDI_VER_1_0 0x1000 +#define CAM_VFE_IN_RD_VER_1_0 0x2000 + +#define CAM_VFE_LCR_VER_1_0 0x100 +#define CAM_VFE_PDLIB_VER_1_0 0x10000 + +/* + * Debug values for camif module + */ +#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS BIT_ULL(0) +#define CAMIF_DEBUG_ENABLE_REG_DUMP BIT(1) +#define CAM_VFE_CAMIF_EVT_MAX 256 + +struct cam_vfe_top { + void *top_priv; + struct cam_hw_ops hw_ops; +}; + +int cam_vfe_top_init(uint32_t top_version, + struct cam_hw_soc_info *soc_info, + struct cam_hw_intf *hw_intf, + void *top_hw_info, + void *vfe_irq_controller, + struct cam_vfe_top **vfe_top); + +int cam_vfe_top_deinit(uint32_t top_version, + struct cam_vfe_top **vfe_top); + +#endif /* _CAM_VFE_TOP_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.c new file mode 100644 index 0000000000..9305623c8a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include "cam_mem_mgr.h" +#include "cam_jpeg_context.h" +#include "cam_context_utils.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_jpeg_hw_intf.h" + +static const char jpeg_dev_name[] = "cam-jpeg"; + +static int cam_jpeg_context_dump_active_request(void *data, void *args) +{ + + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args; + int rc = 0; + + if (!ctx || !pf_args) { + CAM_ERR(CAM_JPEG, "Invalid ctx %pK or pf args %pK", + ctx, pf_args); + return -EINVAL; + } + + CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + CAM_INFO(CAM_JPEG, "Active req_id: %lld, ctx_id: %u", + req->request_id, ctx->ctx_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to dump pf info ctx_id: %u state: %d", + ctx->ctx_id, ctx->state); + } + + if (pf_args->pf_context_info.ctx_found) { + /* Send PF notification to UMD if PF found on current CTX */ + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_JPEG, + "Failed to notify PF event to userspace rc: %d", rc); + } + + return rc; +} + +static int cam_jpeg_context_mini_dump(void *priv, void *args) +{ + int rc; + struct cam_context *ctx; + + if (!priv || args) { + CAM_ERR(CAM_ICP, "Invalid param priv %pK args %pK", priv, args); + return -EINVAL; + } + + ctx = (struct cam_context *)priv; + rc = cam_context_mini_dump(ctx, args); + if (rc) + CAM_ERR(CAM_JPEG, "Mini Dump failed %d", rc); + + return rc; +} + +static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc; + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (rc) + CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc); + else + ctx->state = CAM_CTX_ACQUIRED; + + return rc; +} + +static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + cam_common_release_evt_params(ctx->dev_hdl); + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_JPEG, "Unable to release device %d", rc); + + ctx->state = CAM_CTX_AVAILABLE; + + return rc; +} + +static int __cam_jpeg_ctx_dump_dev_in_acquired( + struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc; + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to dump device, rc=%d", rc); + + return rc; +} + +static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + flush_args.cmd = cmd; + flush_args.flush_active_req = true; + + rc = cam_context_flush_dev_to_hw(ctx, &flush_args); + if (rc) + CAM_ERR(CAM_ICP, "Failed to flush device"); + + return rc; +} + +static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + return cam_context_prepare_dev_to_hw(ctx, cmd); +} + +static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx, void *done_evt_data) +{ + struct cam_jpeg_hw_buf_done_evt_data *buf_done = done_evt_data; + + return cam_context_buf_done_from_hw(ctx, buf_done->buf_done_data, + buf_done->evt_id); +} + +static int __cam_jpeg_ctx_handle_evt_inducement(void *ctx, void *inject_evt_arg) +{ + return cam_context_apply_evt_injection(ctx, inject_evt_arg); +} + +static int __cam_jpeg_ctx_handle_hw_event(void *ctx, + uint32_t evt_id, void *evt_data) +{ + int rc; + + if (!ctx || !evt_data) { + CAM_ERR(CAM_JPEG, "Invalid parameters ctx %s evt_data: %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_data)); + return -EINVAL; + } + + switch (evt_id) { + case CAM_JPEG_EVT_ID_BUF_DONE: + rc = __cam_jpeg_ctx_handle_buf_done_in_acquired(ctx, evt_data); + break; + case CAM_JPEG_EVT_ID_INDUCE_ERR: + rc = __cam_jpeg_ctx_handle_evt_inducement(ctx, evt_data); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid event id: %u", evt_id); + rc = -EINVAL; + } + + return rc; +} + +static int __cam_jpeg_ctx_stop_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed in Stop dev, rc=%d", rc); + return rc; + } + + return rc; +} + +static int cam_jpeg_context_validate_event_notify_injection(struct cam_context *ctx, + struct cam_hw_inject_evt_param *evt_params) +{ + int rc = 0; + uint32_t evt_type; + uint64_t req_id; + + req_id = evt_params->req_id; + evt_type = evt_params->u.evt_notify.evt_notify_type; + + switch (evt_type) { + case V4L_EVENT_CAM_REQ_MGR_NODE_EVENT: { + struct cam_hw_inject_node_evt_param *node_evt_params = + &evt_params->u.evt_notify.u.node_evt_params; + + switch (node_evt_params->event_type) { + case CAM_REQ_MGR_RETRY_EVENT: + break; + default: + CAM_ERR(CAM_JPEG, + "Invalid event type %u for node event injection event cause: %u req id: %llu ctx id: %u dev hdl: %d", + node_evt_params->event_type, node_evt_params->event_cause, + req_id, ctx->ctx_id, ctx->dev_hdl); + return -EINVAL; + } + + CAM_INFO(CAM_JPEG, + "Inject Node evt: event type: %u event cause: %u req id: %llu ctx id: %u dev hdl: %d", + node_evt_params->event_type, node_evt_params->event_cause, + req_id, ctx->ctx_id, ctx->dev_hdl); + break; + } + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: { + struct cam_hw_inject_pf_evt_param *pf_evt_params = + &evt_params->u.evt_notify.u.pf_evt_params; + bool non_fatal_en; + + rc = cam_smmu_is_cb_non_fatal_fault_en(ctx->img_iommu_hdl, &non_fatal_en); + if (rc) { + CAM_ERR(CAM_JPEG, + "Fail to query whether device's cb has non-fatal enabled rc: %d", + rc); + return rc; + } + + if (!non_fatal_en) { + CAM_ERR(CAM_JPEG, + "Fail to inject page fault event notification. Page fault is fatal for JPEG"); + return -EINVAL; + } + + CAM_INFO(CAM_JPEG, + "Inject PF evt: req_id: %llu ctx id: %u dev hdl: %d ctx found: %hhu", + req_id, ctx->ctx_id, ctx->dev_hdl, pf_evt_params->ctx_found); + break; + } + default: + CAM_ERR(CAM_JPEG, "Event notification type not supported: %u", evt_type); + rc = -EINVAL; + } + + return rc; +} + +static int cam_jpeg_context_inject_evt(void *context, void *evt_args) +{ + struct cam_context *ctx = context; + struct cam_hw_inject_evt_param *evt_params = NULL; + struct cam_hw_inject_buffer_error_param *buf_err_params = NULL; + int rc = 0; + + if (!ctx || !evt_args) { + CAM_ERR(CAM_JPEG, + "invalid params ctx %s event args %s", + CAM_IS_NULL_TO_STR(ctx), CAM_IS_NULL_TO_STR(evt_args)); + return -EINVAL; + } + + evt_params = (struct cam_hw_inject_evt_param *)evt_args; + + if (evt_params->inject_id == CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) { + buf_err_params = &evt_params->u.buf_err_evt; + if (buf_err_params->sync_error > CAM_SYNC_JPEG_EVENT_START || + buf_err_params->sync_error < CAM_SYNC_JPEG_EVENT_END) { + CAM_INFO(CAM_JPEG, "Inject buffer sync error %u ctx id: %u req id %llu", + buf_err_params->sync_error, ctx->ctx_id, evt_params->req_id); + } else { + CAM_ERR(CAM_JPEG, "Invalid buffer sync error %u", + buf_err_params->sync_error); + return -EINVAL; + } + } else { + rc = cam_jpeg_context_validate_event_notify_injection(ctx, evt_params); + if (rc) { + CAM_ERR(CAM_JPEG, + "Event notification injection failed validation rc: %d", rc); + return rc; + } + } + + if (ctx->hw_mgr_intf->hw_inject_evt) + ctx->hw_mgr_intf->hw_inject_evt(ctx->ctxt_to_hw_map, evt_args); + + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = { }, + .crm_ops = { }, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available, + }, + .crm_ops = { }, + .irq_ops = NULL, + .mini_dump_ops = cam_jpeg_context_mini_dump, + }, + /* Acquired */ + { + .ioctl_ops = { + .release_dev = __cam_jpeg_ctx_release_dev_in_acquired, + .config_dev = __cam_jpeg_ctx_config_dev_in_acquired, + .stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired, + .flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired, + .dump_dev = __cam_jpeg_ctx_dump_dev_in_acquired, + }, + .crm_ops = { }, + .irq_ops = __cam_jpeg_ctx_handle_hw_event, + .pagefault_ops = cam_jpeg_context_dump_active_request, + .mini_dump_ops = cam_jpeg_context_mini_dump, + .evt_inject_ops = cam_jpeg_context_inject_evt, + }, + /* Ready */ + { + .ioctl_ops = {}, + }, + /* Flushed */ + { + .ioctl_ops = {}, + }, + /* Activated */ + { + .ioctl_ops = {}, + }, +}; + +int cam_jpeg_context_init(struct cam_jpeg_context *ctx, + struct cam_context *ctx_base, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, + int img_iommu_hdl) +{ + int rc; + int i; + + if (!ctx || !ctx_base) { + CAM_ERR(CAM_JPEG, "Invalid Context"); + rc = -EFAULT; + goto err; + } + + memset(ctx, 0, sizeof(*ctx)); + + ctx->base = ctx_base; + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) + ctx->req_base[i].req_priv = &ctx->jpeg_req[i]; + + rc = cam_context_init(ctx_base, jpeg_dev_name, CAM_JPEG, ctx_id, + NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_JPEG, "Camera Context Base init failed"); + goto err; + } + + ctx_base->state_machine = cam_jpeg_ctx_state_machine; + ctx_base->ctx_priv = ctx; + + ctx_base->max_hw_update_entries = CAM_CTX_CFG_MAX; + ctx_base->max_in_map_entries = CAM_CTX_CFG_MAX; + ctx_base->max_out_map_entries = CAM_CTX_CFG_MAX; +err: + return rc; +} + +int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx) +{ + if (!ctx || !ctx->base) { + CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx); + return -EINVAL; + } + + cam_context_deinit(ctx->base); + + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.h new file mode 100644 index 0000000000..492cf1b99d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_context.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_JPEG_CONTEXT_H_ +#define _CAM_JPEG_CONTEXT_H_ + +#include + +#include "cam_context.h" +#include "cam_jpeg_hw_mgr_intf.h" + +#define CAM_JPEG_HW_EVENT_MAX 20 + +/** + * struct cam_jpeg_context - Jpeg context + * @base : Base jpeg cam context object + * @jpeg_req : Jpeg reguest data stored during prepare update + * @req_base : Common request structure + */ +struct cam_jpeg_context { + struct cam_context *base; + struct cam_jpeg_request_data jpeg_req[CAM_CTX_REQ_MAX]; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; +}; + +/* cam jpeg context irq handling function type */ +typedef int (*cam_jpeg_hw_event_cb_func)( + struct cam_jpeg_context *ctx_jpeg, + void *evt_data); + +/** + * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks + * + * @irq_ops: Array of handle function pointers. + * + */ +struct cam_jpeg_ctx_irq_ops { + cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX]; +}; + +/** + * cam_jpeg_context_init() + * + * @brief: Initialization function for the JPEG context + * + * @ctx: JPEG context obj to be initialized + * @ctx_base: Context base from cam_context + * @hw_intf: JPEG hw manager interface + * @ctx_id: ID for this context + * @img_iommu_hdl: IOMMU HDL for image buffers + * + */ +int cam_jpeg_context_init(struct cam_jpeg_context *ctx, + struct cam_context *ctx_base, + struct cam_hw_mgr_intf *hw_intf, + uint32_t ctx_id, + int img_iommu_hdl); + +/** + * cam_jpeg_context_deinit() + * + * @brief: Deinitialize function for the JPEG context + * + * @ctx: JPEG context obj to be deinitialized + * + */ +int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx); + +#endif /* __CAM_JPEG_CONTEXT_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.c new file mode 100644 index 0000000000..a7e86c4e46 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_node.h" +#include "cam_hw_mgr_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_jpeg_dev.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_common_util.h" +#include "cam_context_utils.h" +#include "cam_req_mgr_dev.h" + +#define CAM_JPEG_DEV_NAME "cam-jpeg" + +static struct cam_jpeg_dev g_jpeg_dev; + +static int cam_jpeg_dev_evt_inject_cb(void *inject_args) +{ + struct cam_common_inject_evt_param *inject_params = inject_args; + int i; + + for (i = 0; i < CAM_JPEG_CTX_MAX; i++) { + if (g_jpeg_dev.ctx[i].dev_hdl == inject_params->dev_hdl) { + cam_context_add_evt_inject(&g_jpeg_dev.ctx[i], + &inject_params->evt_params); + return 0; + } + } + + CAM_ERR(CAM_JPEG, "No dev hdl found %d", inject_params->dev_hdl); + return -EINVAL; +} + +static void cam_jpeg_dev_iommu_fault_handler( + struct cam_smmu_pf_info *pf_smmu_info) +{ + int i, rc; + struct cam_node *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + + if (!pf_smmu_info || !pf_smmu_info->token) { + CAM_ERR(CAM_JPEG, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_smmu_info->token; + + pf_args.pf_smmu_info = pf_smmu_info; + + for (i = 0; i < node->ctx_size; i++) { + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_context_info.ctx_found) + /* found ctx and packet of the faulted address */ + break; + } + + if (i == node->ctx_size) { + /* Faulted ctx not found. But report PF to UMD anyway*/ + rc = cam_context_send_pf_evt(NULL, &pf_args); + if (rc) + CAM_ERR(CAM_JPEG, + "Failed to notify PF event to userspace rc: %d", rc); + } +} + +static void cam_jpeg_dev_mini_dump_cb(void *priv, void *args) +{ + struct cam_context *ctx = NULL; + + if (!priv || !args) { + CAM_ERR(CAM_JPEG, "Invalid param priv %pK %pK args", priv, args); + return; + } + + ctx = (struct cam_context *)priv; + cam_context_mini_dump_from_hw(ctx, args); +} + +static const struct of_device_id cam_jpeg_dt_match[] = { + { + .compatible = "qcom,cam-jpeg" + }, + { } +}; + +static int cam_jpeg_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + + mutex_lock(&g_jpeg_dev.jpeg_mutex); + g_jpeg_dev.open_cnt++; + mutex_unlock(&g_jpeg_dev.jpeg_mutex); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + + return 0; +} + +static int cam_jpeg_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_node *node = v4l2_get_subdevdata(sd); + + mutex_lock(&g_jpeg_dev.jpeg_mutex); + if (g_jpeg_dev.open_cnt <= 0) { + CAM_DBG(CAM_JPEG, "JPEG subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_jpeg_dev.open_cnt--; + + if (!node) { + CAM_ERR(CAM_JPEG, "Node ptr is NULL"); + rc = -EINVAL; + goto end; + } + + if (g_jpeg_dev.open_cnt == 0) + cam_node_shutdown(node); + +end: + mutex_unlock(&g_jpeg_dev.jpeg_mutex); + return rc; +} + +static int cam_jpeg_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_JPEG, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + return cam_jpeg_subdev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = { + .close = cam_jpeg_subdev_close, + .open = cam_jpeg_subdev_open, +}; + +static int cam_jpeg_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + int i; + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_node *node; + int iommu_hdl = -1; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops; + g_jpeg_dev.sd.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME, + CAM_JPEG_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc); + goto err; + } + node = (struct cam_node *)g_jpeg_dev.sd.token; + + rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node, + (uint64_t *)&hw_mgr_intf, &iommu_hdl, + cam_jpeg_dev_mini_dump_cb); + if (rc) { + CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc); + goto unregister; + } + + for (i = 0; i < CAM_JPEG_CTX_MAX; i++) { + rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i], + &g_jpeg_dev.ctx[i], + &node->hw_mgr_intf, + i, iommu_hdl); + if (rc) { + CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d", + i, rc); + goto ctx_init_fail; + } + } + + cam_common_register_evt_inject_cb(cam_jpeg_dev_evt_inject_cb, + CAM_COMMON_EVT_INJECT_HW_JPEG); + + rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_JPEG_CTX_MAX, + CAM_JPEG_DEV_NAME); + if (rc) { + CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc); + goto ctx_init_fail; + } + + node->sd_handler = cam_jpeg_subdev_close_internal; + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_jpeg_dev_iommu_fault_handler, node); + + mutex_init(&g_jpeg_dev.jpeg_mutex); + + CAM_DBG(CAM_JPEG, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +ctx_init_fail: + for (--i; i >= 0; i--) + if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i])) + CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc); +unregister: + if (cam_subdev_remove(&g_jpeg_dev.sd)) + CAM_ERR(CAM_JPEG, "remove fail %d", rc); +err: + return rc; +} + +static void cam_jpeg_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + int i; + + for (i = 0; i < CAM_CTX_MAX; i++) { + rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]); + if (rc) + CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d", + i, rc); + } + + rc = cam_subdev_remove(&g_jpeg_dev.sd); + if (rc) + CAM_ERR(CAM_JPEG, "Unregister failed %d", rc); +} + +const static struct component_ops cam_jpeg_dev_component_ops = { + .bind = cam_jpeg_dev_component_bind, + .unbind = cam_jpeg_dev_component_unbind, +}; + +static int cam_jpeg_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_jpeg_dev_component_ops); + return 0; +} + +static int cam_jpeg_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_JPEG, "Adding JPEG component"); + rc = component_add(&pdev->dev, &cam_jpeg_dev_component_ops); + if (rc) + CAM_ERR(CAM_JPEG, "failed to add component rc: %d", rc); + + return rc; + +} + +struct platform_driver jpeg_driver = { + .probe = cam_jpeg_dev_probe, + .remove = cam_jpeg_dev_remove, + .driver = { + .name = "cam_jpeg", + .owner = THIS_MODULE, + .of_match_table = cam_jpeg_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_jpeg_dev_init_module(void) +{ + return platform_driver_register(&jpeg_driver); +} + +void cam_jpeg_dev_exit_module(void) +{ + platform_driver_unregister(&jpeg_driver); +} + +MODULE_DESCRIPTION("MSM JPEG driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.h new file mode 100644 index 0000000000..d468748d8f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/cam_jpeg_dev.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_JPEG_DEV_H_ +#define _CAM_JPEG_DEV_H_ + +#include "cam_subdev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_context.h" +#include "cam_jpeg_context.h" + +/** + * struct cam_jpeg_dev - Camera JPEG V4l2 device node + * + * @sd: Commone camera subdevice node + * @node: Pointer to jpeg subdevice + * @ctx: JPEG base context storage + * @ctx_jpeg: JPEG private context storage + * @jpeg_mutex: Jpeg dev mutex + * @open_cnt: Open device count + */ +struct cam_jpeg_dev { + struct cam_subdev sd; + struct cam_node *node; + struct cam_context ctx[CAM_JPEG_CTX_MAX]; + struct cam_jpeg_context ctx_jpeg[CAM_JPEG_CTX_MAX]; + struct mutex jpeg_mutex; + int32_t open_cnt; +}; + +/** + * @brief : API to register JPEG dev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_jpeg_dev_init_module(void); + +/** + * @brief : API to remove JPEG dev from platform framework. + */ +void cam_jpeg_dev_exit_module(void); +#endif /* __CAM_JPEG_DEV_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c new file mode 100644 index 0000000000..b1b66af872 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c @@ -0,0 +1,2372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_packet_util.h" +#include "cam_hw.h" +#include "cam_jpeg_context.h" +#include "cam_req_mgr_dev.h" +#include "cam_hw_mgr_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_jpeg_hw_mgr.h" +#include "cam_smmu_api.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_workq.h" +#include "cam_cdm_intf_api.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_cpas_api.h" +#include "cam_sync_api.h" +#include "cam_presil_hw_access.h" + +#define CAM_JPEG_HW_ENTRIES_MAX 20 + +#define CAM_JPEG_MAX_NUM_CMD_BUFFS 5 + +#define CAM_JPEG_CHBASE_CMD_BUFF_IDX 0 +#define CAM_JPEG_CFG_CMD_BUFF_IDX 1 +#define CAM_JPEG_PARAM_CMD_BUFF_IDX 2 +#define CAM_JPEG_THUBMNAIL_SIZE_CMD_BUFF_IDX 3 + +#define CAM_JPEG_DEV_TYPE(type) ((type) == CAM_JPEG_DEV_TYPE_ENC ? "ENC" : "DMA") + +static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr; + +static int32_t cam_jpeg_hw_mgr_sched_bottom_half(uint32_t irq_status, + int32_t result_size, void *data); +static int cam_jpeg_mgr_process_hw_update_entries(void *priv, void *data); +static int cam_jpeg_insert_cdm_change_base( + struct cam_hw_config_args *config_args, + struct cam_jpeg_hw_ctx_data *ctx_data, + struct cam_jpeg_hw_mgr *hw_mgr); + +static inline void cam_jpeg_mgr_move_req_to_free_list(struct cam_jpeg_hw_cfg_req *p_cfg_req) +{ + if (!p_cfg_req) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return; + } + + cam_mem_put_cpu_buf(p_cfg_req->hw_cfg_args.hw_update_entries[0].handle); + list_add_tail(&p_cfg_req->list, &g_jpeg_hw_mgr.free_req_list); +} + +static void cam_jpeg_mgr_apply_evt_injection(struct cam_hw_done_event_data *buf_done_data, + struct cam_jpeg_hw_ctx_data *ctx_data, bool *signal_fence_buffer) +{ + struct cam_hw_inject_evt_param *evt_inject_params = &ctx_data->evt_inject_params; + struct cam_common_evt_inject_data inject_evt; + + inject_evt.buf_done_data = buf_done_data; + inject_evt.evt_params = evt_inject_params; + + if (ctx_data->ctxt_event_cb) + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_JPEG_EVT_ID_INDUCE_ERR, + &inject_evt); + + if (evt_inject_params->inject_id == CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) + *signal_fence_buffer = false; + + evt_inject_params->is_valid = false; +} + +static int cam_jpeg_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + if (!blob_data || !user_data || !blob_size) { + CAM_ERR(CAM_JPEG, "Invalid arguments. blob_data: %p user_data: %p blob_size: %u", + blob_data, user_data, blob_size); + return -EINVAL; + } + + CAM_DBG(CAM_JPEG, "blob_type: %u, blob_size: %u", blob_type, blob_size); + + switch(blob_type) { + case CAM_JPEG_THUMBNAIL_SIZE_BLOB: + *((uint32_t *)user_data) = (uint32_t) *((uint32_t *)blob_data); + CAM_DBG(CAM_JPEG, "Thumbnail max size: %u bytes", *((uint32_t *)user_data)); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid blob_tye: %u", blob_type); + break; + } + + return 0; +} + +static int cam_jpeg_add_command_buffers(struct cam_packet *packet, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_jpeg_hw_ctx_data *ctx_data) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_jpeg_request_data *jpeg_request_data; + struct cam_kmd_buf_info kmd_buf; + unsigned int num_entry = 0; + unsigned int i; + int rc; + + if (!packet || !prepare_args || !ctx_data) { + CAM_ERR(CAM_JPEG, "Invalid args: packet: 0x%p, prepare_args: 0x%p, ctx_data: 0x%p", + packet, prepare_args, ctx_data); + return -EINVAL; + } + + jpeg_request_data = (struct cam_jpeg_request_data *) prepare_args->priv; + + if (!jpeg_request_data) { + CAM_ERR(CAM_JPEG, "prepare_args private is null"); + return -EINVAL; + } + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *)&packet->payload_flex + (packet->cmd_buf_offset / 4)); + + CAM_DBG(CAM_JPEG, + "Pkt: %pK req_id: %u cmd_desc: %pK Size: %lu, num_cmd_buffs: %d dev_type: %u", + (void *)packet, + packet->header.request_id, + (void *)cmd_desc, + sizeof(struct cam_cmd_buf_desc), + packet->num_cmd_buf, + ctx_data->jpeg_dev_acquire_info.dev_type); + + num_entry = prepare_args->num_hw_update_entries; + + rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf); + if (rc) { + CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc); + return rc; + } + + CAM_DBG(CAM_JPEG, "KMD Buffer: used_bytes: %u handle: 0x%x offset: 0x%x", + kmd_buf.used_bytes, kmd_buf.handle, kmd_buf.offset); + + /* fill kmd buf info into 1st hw update entry for change base*/ + prepare_args->hw_update_entries[num_entry].len = (uint32_t)kmd_buf.used_bytes; + prepare_args->hw_update_entries[num_entry].handle = (uint32_t)kmd_buf.handle; + prepare_args->hw_update_entries[num_entry].offset = (uint32_t)kmd_buf.offset; + num_entry++; + + jpeg_request_data->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + jpeg_request_data->request_id = packet->header.request_id; + jpeg_request_data->thumbnail_threshold_size = 0; + jpeg_request_data->out_size_mem_handle = 0; + jpeg_request_data->out_size_offset = 0; + + CAM_DBG(CAM_JPEG, + "Change_Base HW_Entry. Offset: 0x%x Length: %u mem_handle: 0x%x num_entry: %d", + prepare_args->hw_update_entries[num_entry].offset, + prepare_args->hw_update_entries[num_entry].len, + prepare_args->hw_update_entries[num_entry].handle, + num_entry); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + CAM_DBG(CAM_JPEG, + "Metadata: %u Offset: 0x%x Length: %u mem_handle: 0x%x num_entry: %d", + cmd_desc[i].meta_data, cmd_desc[i].offset, + cmd_desc[i].length, cmd_desc[i].mem_handle, + num_entry); + + switch(cmd_desc[i].meta_data) { + case CAM_JPEG_ENC_PACKET_CONFIG_DATA: + case CAM_JPEG_DMA_PACKET_PLANE0_CONFIG_DATA: + case CAM_JPEG_DMA_PACKET_PLANE1_CONFIG_DATA: + prepare_args->hw_update_entries[num_entry].len = + (uint32_t)cmd_desc[i].length; + prepare_args->hw_update_entries[num_entry].handle = + (uint32_t)cmd_desc[i].mem_handle; + prepare_args->hw_update_entries[num_entry].offset = + (uint32_t)cmd_desc[i].offset; + + num_entry++; + break; + case CAM_JPEG_PACKET_INOUT_PARAM: + jpeg_request_data->out_size_mem_handle = cmd_desc[i].mem_handle; + jpeg_request_data->out_size_offset = cmd_desc[i].offset; + break; + case CAM_JPEG_PACKET_GENERIC_BLOB: + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + &cam_jpeg_generic_blob_handler, + (void *)&jpeg_request_data->thumbnail_threshold_size); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid metadata"); + return -EINVAL; + } + } + + CAM_DBG(CAM_JPEG, "num_entry: %u", num_entry); + prepare_args->num_hw_update_entries = num_entry; + + return rc; +} + +static int cam_jpeg_process_next_hw_update(void *priv, void *data, + struct cam_hw_done_event_data *buf_data) +{ + int rc; + struct cam_jpeg_hw_mgr *hw_mgr = priv; + struct cam_hw_update_entry *cmd; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_hw_config_args *config_args = NULL; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + uint32_t dev_type; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + uint32_t cdm_cfg_to_insert = 0; + uint32_t pass_num; + + if (!data || !priv) { + CAM_ERR(CAM_JPEG, "Invalid data"); + return -EINVAL; + } + + ctx_data = (struct cam_jpeg_hw_ctx_data *)data; + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0]; + config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args; + + if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) { + CAM_ERR(CAM_JPEG, "op reset null "); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_INVLD_CMD; + rc = -EFAULT; + goto end_error; + } + rc = hw_mgr->devices[dev_type][0]->hw_ops.reset( + hw_mgr->devices[dev_type][0]->hw_priv, + NULL, 0); + if (rc) { + CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_HW_RESET_FAILED; + goto end_error; + } + + cdm_cmd = ctx_data->cdm_cmd; + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->flag = false; + cdm_cmd->userdata = NULL; + cdm_cmd->cookie = 0; + cdm_cmd->cmd_arrary_count = 0; + + /* insert cdm chage base cmd */ + rc = cam_jpeg_insert_cdm_change_base(config_args, + ctx_data, hw_mgr); + if (rc) { + CAM_ERR(CAM_JPEG, "insert change base failed %d", rc); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_CDM_CHANGE_BASE_ERR; + goto end_error; + } + + /* insert next cdm payload at index */ + /* for enc or dma 1st pass at index 1 */ + /* for dma 2nd pass at index 2*/ + if (p_cfg_req->num_hw_entry_processed == 0) { + cdm_cfg_to_insert = CAM_JPEG_CFG_CMD_BUFF_IDX; + pass_num = 1; + } else { + cdm_cfg_to_insert = p_cfg_req->num_hw_entry_processed + 1; + pass_num = 2; + } + + CAM_DBG(CAM_JPEG, "processed %d total %d using cfg entry %d for %pK", + p_cfg_req->num_hw_entry_processed, + config_args->num_hw_update_entries, + cdm_cfg_to_insert, + p_cfg_req); + + cmd = (config_args->hw_update_entries + cdm_cfg_to_insert); + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle = + cmd->handle; + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].offset = + cmd->offset; + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].len = + cmd->len; + CAM_DBG(CAM_JPEG, "Entry: %d, hdl: 0x%x, offset: 0x%x, len: %d", + cdm_cmd->cmd_arrary_count, + cmd->handle, + cmd->offset, + cmd->len); + cdm_cmd->cmd_arrary_count++; + + rc = cam_cdm_submit_bls( + hw_mgr->cdm_info[dev_type][0].cdm_handle, + cdm_cmd); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", rc); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_CDM_CONFIG_ERR; + goto end_error; + } + + if (g_jpeg_hw_mgr.camnoc_misr_test) { + /* configure jpeg hw and camnoc misr */ + rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_CONFIG_HW_MISR, + &g_jpeg_hw_mgr.camnoc_misr_test, + sizeof(g_jpeg_hw_mgr.camnoc_misr_test)); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", + rc); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_MISR_CONFIG_ERR; + goto end_error; + } + } + + if (!hw_mgr->devices[dev_type][0]->hw_ops.start) { + CAM_ERR(CAM_JPEG, "op start null "); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_INVLD_CMD; + rc = -EINVAL; + goto end_error; + } + + CAM_TRACE(CAM_JPEG, "Start JPEG %s ctx %lld Req %llu Pass %d", + CAM_JPEG_DEV_TYPE(dev_type), + (uint64_t) ctx_data, + config_args->request_id, pass_num); + + rc = hw_mgr->devices[dev_type][0]->hw_ops.start( + hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", + rc); + buf_data->evt_param = CAM_SYNC_JPEG_EVENT_START_HW_ERR; + goto end_error; + } + + cam_cpas_notify_event("JPEG Submit", config_args->request_id); + + return 0; +end_error: + return rc; +} + +static int cam_jpeg_mgr_bottom_half_irq(void *priv, void *data) +{ + int rc = 0; + int32_t i; + uintptr_t dev_type = 0; + struct cam_jpeg_process_irq_work_data_t *task_data; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + struct cam_context *cam_ctx = NULL; + struct cam_hw_done_event_data buf_data; + struct cam_jpeg_set_irq_cb irq_cb; + struct cam_jpeg_irq_cb_data *irq_cb_data; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + struct crm_workq_task *task; + struct cam_jpeg_process_frame_work_data_t *wq_task_data; + struct cam_jpeg_request_data *jpeg_req; + struct cam_req_mgr_message v4l2_msg = {0}; + struct cam_ctx_request *req; + struct cam_jpeg_misr_dump_args misr_args; + struct cam_jpeg_hw_buf_done_evt_data jpeg_done_evt; + struct cam_jpeg_config_inout_param_info *inout_params; + uint32_t *cmd_buf_kaddr; + uintptr_t kaddr; + size_t len; + size_t inout_param_size; + + if (!data || !priv) { + CAM_ERR(CAM_JPEG, "Invalid data"); + return -EINVAL; + } + + task_data = data; + + irq_cb_data = (struct cam_jpeg_irq_cb_data *)task_data->data; + + ctx_data = (struct cam_jpeg_hw_ctx_data *)irq_cb_data->private_data; + + if (!ctx_data) { + CAM_ERR(CAM_JPEG, "No ctx data found!"); + return -EINVAL; + } + + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + return -EINVAL; + } + + cam_ctx = (struct cam_context *) ctx_data->context_priv; + if (!cam_ctx) { + CAM_ERR(CAM_JPEG, "cam_ctx is null"); + return -EINVAL; + } + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex); + + p_cfg_req = g_jpeg_hw_mgr.dev_hw_cfg_args[dev_type][0]; + + if (g_jpeg_hw_mgr.device_in_use[dev_type][0] == false || + p_cfg_req == NULL) { + CAM_ERR(CAM_JPEG, "irq for old request %d", rc); + mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex); + return -EINVAL; + } + + p_cfg_req->num_hw_entry_processed++; + CAM_DBG(CAM_JPEG, "dev_type: %u, hw_entry_processed %d", + dev_type, p_cfg_req->num_hw_entry_processed); + + if (g_jpeg_hw_mgr.camnoc_misr_test) { + misr_args.req_id = p_cfg_req->req_id; + misr_args.enable_bug = g_jpeg_hw_mgr.bug_on_misr; + CAM_DBG(CAM_JPEG, "req %lld bug is enabled for MISR :%d", + misr_args.req_id, misr_args.enable_bug); + + /* dump jpeg hw and camnoc misr */ + rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.process_cmd( + g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_DUMP_HW_MISR_VAL, &misr_args, + sizeof(struct cam_jpeg_misr_dump_args)); + if (rc) + CAM_WARN_RATE_LIMIT(CAM_JPEG, "jpeg and camnoc hw misr enable failed %d", + rc); + } + + /* If we have processed just plane 1 for jpeg dma, + * send the configuration data for plane 1 as well.*/ + if (dev_type == CAM_JPEG_RES_TYPE_DMA) { + if ((task_data->u.is_dma_frame_done) && (p_cfg_req->num_hw_entry_processed < 2)) { + /* Processes next entry before freeing the device */ + rc = cam_jpeg_process_next_hw_update(priv, ctx_data, + &buf_data); + if (!rc) { + mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex); + return 0; + } + } + } + + jpeg_req = irq_cb_data->jpeg_req; + inout_param_size = sizeof(struct cam_jpeg_config_inout_param_info); + + if (jpeg_req->dev_type == CAM_JPEG_RES_TYPE_ENC) { + rc = cam_mem_get_cpu_buf(jpeg_req->out_size_mem_handle, + (uintptr_t *)&kaddr, &len); + if (!rc) { + if ((inout_param_size > len) || + (jpeg_req->out_size_offset >= (len - inout_param_size))) + CAM_ERR(CAM_JPEG, + "Inval off = %u cmd buf len = %zu inout_param_size = %d", + jpeg_req->out_size_offset, len, inout_param_size); + else { + cmd_buf_kaddr = (uint32_t *)kaddr; + cmd_buf_kaddr += (jpeg_req->out_size_offset / sizeof(uint32_t)); + inout_params = + (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr; + inout_params->output_size = task_data->u.output_encode_size; + } + cam_mem_put_cpu_buf(jpeg_req->out_size_mem_handle); + } + else + CAM_ERR(CAM_JPEG, "Buffer pointer for inout param is null"); + + CAM_DBG(CAM_JPEG, "Encoded Size %d Thresold Size: %u", + task_data->u.output_encode_size, + jpeg_req->thumbnail_threshold_size); + + if (jpeg_req->thumbnail_threshold_size) { + if (task_data->u.output_encode_size > jpeg_req->thumbnail_threshold_size) { + CAM_DBG(CAM_JPEG, "Thumbnail max size: %u dev_type: %u", + jpeg_req->thumbnail_threshold_size, jpeg_req->dev_type); + v4l2_msg.session_hdl = cam_ctx->session_hdl; + v4l2_msg.u.node_msg.request_id = jpeg_req->request_id; + v4l2_msg.u.node_msg.link_hdl = cam_ctx->link_hdl; + v4l2_msg.u.node_msg.device_hdl = cam_ctx->dev_hdl; + v4l2_msg.u.node_msg.event_type = CAM_REQ_MGR_RETRY_EVENT; + v4l2_msg.u.node_msg.event_cause = + CAM_REQ_MGR_JPEG_THUBNAIL_SIZE_ERROR; + cam_req_mgr_notify_message(&v4l2_msg, + V4L_EVENT_CAM_REQ_MGR_NODE_EVENT, + V4L_EVENT_CAM_REQ_MGR_EVENT); + + for (i = 0; i < p_cfg_req->hw_cfg_args.num_out_map_entries; i++) { + cam_sync_put_obj_ref( + p_cfg_req->hw_cfg_args.out_map_entries[i].sync_id); + } + + spin_lock(&cam_ctx->lock); + if (!list_empty(&cam_ctx->active_req_list)) { + req = list_first_entry(&cam_ctx->active_req_list, + struct cam_ctx_request, list); + cam_smmu_buffer_tracker_putref(&req->buf_tracker); + list_del_init(&req->list); + list_add_tail(&req->list, &cam_ctx->free_req_list); + if (req->packet) { + cam_common_mem_free(req->packet); + req->packet = NULL; + } + } + spin_unlock(&cam_ctx->lock); + + goto exit; + } + } + } + + buf_data.num_handles = p_cfg_req->hw_cfg_args.num_out_map_entries; + for (i = 0; i < buf_data.num_handles; i++) { + buf_data.resource_handle[i] = + p_cfg_req->hw_cfg_args.out_map_entries[i].resource_handle; + } + + buf_data.request_id = jpeg_req->request_id; + jpeg_done_evt.evt_id = CAM_CTX_EVT_ID_SUCCESS; + jpeg_done_evt.buf_done_data = &buf_data; + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_JPEG_EVT_ID_BUF_DONE, + &jpeg_done_evt); + +exit: + irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_sched_bottom_half; + irq_cb.irq_cb_data.private_data = NULL; + irq_cb.irq_cb_data.jpeg_req = NULL; + irq_cb.b_set_cb = false; + + if (!g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.process_cmd) { + CAM_ERR(CAM_JPEG, "process_cmd null"); + rc = -EINVAL; + goto err; + } + rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.process_cmd( + g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) { + CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc); + goto err; + } + + if (g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit) { + rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit( + g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to Deinit %lu HW", dev_type); + } + + g_jpeg_hw_mgr.device_in_use[dev_type][0] = false; + g_jpeg_hw_mgr.dev_hw_cfg_args[dev_type][0] = NULL; + + task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame); + if (!task) { + CAM_ERR(CAM_JPEG, "no empty task"); + rc = -EINVAL; + goto err; + } + + wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)task->payload; + if (!task_data) { + CAM_ERR(CAM_JPEG, "task_data is NULL"); + rc = -EINVAL; + goto err; + } + + wq_task_data->data = NULL; + wq_task_data->request_id = 0; + wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_jpeg_mgr_process_hw_update_entries; + rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_JPEG, "could not enque task %d", rc); + goto err; + } + + cam_jpeg_mgr_move_req_to_free_list(p_cfg_req); +err: + mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex); + return rc; +} + +static int cam_jpeg_hw_mgr_sched_bottom_half(uint32_t irq_status, int32_t irq_data, void *data) +{ + int32_t rc; + unsigned long flags; + struct crm_workq_task *task; + struct cam_jpeg_process_irq_work_data_t *task_data; + + spin_lock_irqsave(&g_jpeg_hw_mgr.hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_irq_cb); + if (!task) { + CAM_ERR(CAM_JPEG, "no empty task"); + spin_unlock_irqrestore(&g_jpeg_hw_mgr.hw_mgr_lock, flags); + return -ENOMEM; + } + + task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload; + task_data->data = data; + task_data->irq_status = irq_status; + task_data->u.irq_data = irq_data; + task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_jpeg_mgr_bottom_half_irq; + + rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&g_jpeg_hw_mgr.hw_mgr_lock, flags); + + return rc; +} + +static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr) +{ + int i = 0; + int num_ctx = CAM_JPEG_CTX_MAX; + + for (i = 0; i < num_ctx; i++) { + mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex); + if (hw_mgr->ctx_data[i].in_use == false) { + hw_mgr->ctx_data[i].in_use = true; + mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex); + break; + } + mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex); + } + + return i; +} + + +static int cam_jpeg_mgr_release_ctx( + struct cam_jpeg_hw_mgr *hw_mgr, struct cam_jpeg_hw_ctx_data *ctx_data) +{ + if (!ctx_data) { + CAM_ERR(CAM_JPEG, "invalid ctx_data %pK", ctx_data); + return -EINVAL; + } + + mutex_lock(&ctx_data->ctx_mutex); + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is already un-used: %pK", ctx_data); + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + + ctx_data->in_use = false; + memset(&ctx_data->evt_inject_params, 0, sizeof(struct cam_hw_inject_evt_param)); + + mutex_unlock(&ctx_data->ctx_mutex); + + return 0; +} + +static int cam_jpeg_insert_cdm_change_base( + struct cam_hw_config_args *config_args, + struct cam_jpeg_hw_ctx_data *ctx_data, + struct cam_jpeg_hw_mgr *hw_mgr) +{ + int rc = 0; + uint32_t dev_type; + struct cam_cdm_bl_request *cdm_cmd; + uint32_t size; + uint32_t mem_cam_base; + uintptr_t iova_addr; + uint32_t *ch_base_iova_addr; + size_t ch_base_len; + + rc = cam_mem_get_cpu_buf( + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].handle, + &iova_addr, &ch_base_len); + if (rc) { + CAM_ERR(CAM_JPEG, + "unable to get src buf info for cmd buf: %d", rc); + return rc; + } + + if (config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].offset >= + ch_base_len) { + CAM_ERR(CAM_JPEG, "Not enough buf offset %d len %d", + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].offset, + ch_base_len); + cam_mem_put_cpu_buf( + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].handle); + return -EINVAL; + } + + ch_base_iova_addr = (uint32_t *)iova_addr; + ch_base_iova_addr = (ch_base_iova_addr + + (config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].offset / + sizeof(uint32_t))); + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + mem_cam_base = hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base; + size = + hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_required_size_changebase(); + hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_write_changebase( + ch_base_iova_addr, mem_cam_base); + + cdm_cmd = ctx_data->cdm_cmd; + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle = + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].handle; + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].offset = + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].offset; + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].len = size * sizeof(uint32_t); + CAM_DBG(CAM_JPEG, "Entry: %d, hdl: 0x%x, offset: 0x%x, len: %d, addr: 0x%p", + cdm_cmd->cmd_arrary_count, + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle, + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].offset, + cdm_cmd->cmd_flex[cdm_cmd->cmd_arrary_count].len, + (void *)iova_addr); + cdm_cmd->cmd_arrary_count++; + cdm_cmd->gen_irq_arb = false; + + ch_base_iova_addr += size; + *ch_base_iova_addr = 0; + ch_base_iova_addr += size; + *ch_base_iova_addr = 0; + + cam_mem_put_cpu_buf( + config_args->hw_update_entries[CAM_JPEG_CHBASE_CMD_BUFF_IDX].handle); + + return rc; +} + +static int cam_jpeg_mgr_process_hw_update_entries(void *priv, void *data) +{ + int rc; + int i = 0; + uintptr_t request_id = 0; + uint32_t dev_type; + struct cam_jpeg_hw_mgr *hw_mgr = priv; + struct cam_hw_config_args *config_args = NULL; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + struct cam_jpeg_request_data *jpeg_req; + struct cam_jpeg_process_frame_work_data_t *task_data; + struct cam_jpeg_set_irq_cb irq_cb; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + struct cam_hw_done_event_data buf_data; + struct cam_jpeg_hw_buf_done_evt_data jpeg_done_evt; + bool event_inject = false; + bool signal_fence_buffer = true; + + task_data = (struct cam_jpeg_process_frame_work_data_t *)data; + if (!hw_mgr || !task_data) { + CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK", + hw_mgr, task_data); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + if (list_empty(&hw_mgr->hw_config_req_list)) { + CAM_DBG(CAM_JPEG, "no available request"); + rc = -EFAULT; + goto end; + } + + p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list, + struct cam_jpeg_hw_cfg_req, list); + if (!p_cfg_req) { + CAM_ERR(CAM_JPEG, "no request"); + rc = -EFAULT; + goto end; + } + + if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) { + hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true; + hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req; + list_del_init(&p_cfg_req->list); + } else { + CAM_DBG(CAM_JPEG, "Not dequeing, just return"); + rc = -EFAULT; + goto end; + } + + config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args; + request_id = task_data->request_id; + jpeg_req = (struct cam_jpeg_request_data *)config_args->priv; + if (request_id != (uintptr_t)jpeg_req->request_id) { + CAM_DBG(CAM_JPEG, "Probably received req from Bottom half. req %zd %zd", + request_id, (uintptr_t)jpeg_req->request_id); + } + + if (!config_args->num_hw_update_entries) { + CAM_ERR(CAM_JPEG, "No hw update enteries are available"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + rc = -EINVAL; + goto end_unusedev; + } + + ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + rc = -EINVAL; + goto end_unusedev; + } + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + CAM_DBG(CAM_JPEG, "req_id: %u, dev_type: %u", + request_id, dev_type); + + if (dev_type != p_cfg_req->dev_type) + CAM_WARN(CAM_JPEG, "dev types not same something wrong"); + + if (!hw_mgr->devices[dev_type][0]->hw_ops.init) { + CAM_ERR(CAM_JPEG, "hw op init null "); + rc = -EFAULT; + goto end_unusedev; + } + rc = hw_mgr->devices[dev_type][0]->hw_ops.init( + hw_mgr->devices[dev_type][0]->hw_priv, + ctx_data, + sizeof(struct cam_jpeg_hw_ctx_data)); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type); + goto end_unusedev; + } + + if (ctx_data->evt_inject_params.is_valid && + ctx_data->evt_inject_params.req_id == request_id) { + event_inject = true; + goto end_callcb; + } + + irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_sched_bottom_half; + irq_cb.irq_cb_data.private_data = (void *)ctx_data; + irq_cb.irq_cb_data.jpeg_req = jpeg_req; + irq_cb.b_set_cb = true; + if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) { + CAM_ERR(CAM_JPEG, "op process_cmd null "); + buf_data.evt_param = CAM_SYNC_JPEG_EVENT_INVLD_CMD; + rc = -EFAULT; + goto end_callcb; + } + rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) { + CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc); + buf_data.evt_param = CAM_SYNC_JPEG_EVENT_SET_IRQ_CB; + goto end_callcb; + } + + /* insert one of the cdm payloads */ + rc = cam_jpeg_process_next_hw_update(priv, ctx_data, &buf_data); + if (rc) { + CAM_ERR(CAM_JPEG, "next hw update failed %d", rc); + goto end_callcb; + } + + p_cfg_req->submit_timestamp = ktime_get(); + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; + +end_callcb: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + if (p_cfg_req) { + buf_data.num_handles = + config_args->num_out_map_entries; + for (i = 0; i < buf_data.num_handles; i++) { + buf_data.resource_handle[i] = + config_args->out_map_entries[i].resource_handle; + } + buf_data.request_id = (uintptr_t)jpeg_req->request_id; + if (event_inject) + cam_jpeg_mgr_apply_evt_injection(&buf_data, ctx_data, + &signal_fence_buffer); + + if (signal_fence_buffer) { + jpeg_done_evt.evt_id = CAM_CTX_EVT_ID_ERROR; + jpeg_done_evt.buf_done_data = &buf_data; + ctx_data->ctxt_event_cb(ctx_data->context_priv, + CAM_JPEG_EVT_ID_BUF_DONE, &jpeg_done_evt); + } + } +end_unusedev: + mutex_lock(&hw_mgr->hw_mgr_mutex); + hw_mgr->device_in_use[p_cfg_req->dev_type][0] = false; + hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = NULL; + +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) +{ + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_config_args *config_args = config_hw_args; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + struct cam_jpeg_request_data *jpeg_req; + struct crm_workq_task *task; + struct cam_jpeg_process_frame_work_data_t *task_data; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + int rc; + + if (!hw_mgr || !config_args) { + CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK", + hw_mgr, config_args); + return -EINVAL; + } + + if (!config_args->num_hw_update_entries) { + CAM_ERR(CAM_JPEG, "No hw update enteries are available"); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if (list_empty(&hw_mgr->free_req_list)) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_JPEG, "list empty"); + return -ENOMEM; + } + + p_cfg_req = list_first_entry(&hw_mgr->free_req_list, + struct cam_jpeg_hw_cfg_req, list); + list_del_init(&p_cfg_req->list); + + /* Update Currently Processing Config Request */ + p_cfg_req->hw_cfg_args = *config_args; + p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + jpeg_req = (struct cam_jpeg_request_data *)config_args->priv; + p_cfg_req->req_id = (uintptr_t)jpeg_req->request_id; + p_cfg_req->num_hw_entry_processed = 0; + CAM_DBG(CAM_JPEG, "req_id: %u, dev_type: %d", + p_cfg_req->req_id, ctx_data->jpeg_dev_acquire_info.dev_type); + task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame); + if (!task) { + CAM_ERR(CAM_JPEG, "no empty task"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + rc = -ENOMEM; + goto err_after_dq_free_list; + } + + task_data = (struct cam_jpeg_process_frame_work_data_t *) + task->payload; + if (!task_data) { + CAM_ERR(CAM_JPEG, "task_data is NULL"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + rc = -EINVAL; + goto err_after_dq_free_list; + } + CAM_DBG(CAM_JPEG, "cfge %pK num %d", + p_cfg_req->hw_cfg_args.hw_update_entries, + p_cfg_req->hw_cfg_args.num_hw_update_entries); + + list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + task_data->data = config_args->priv; + task_data->request_id = (uintptr_t)jpeg_req->request_id; + task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_jpeg_mgr_process_hw_update_entries; + + rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr, + CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc); + goto err_after_get_task; + } + + return rc; + +err_after_get_task: + list_del_init(&p_cfg_req->list); +err_after_dq_free_list: + cam_jpeg_mgr_move_req_to_free_list(p_cfg_req); + return rc; +} + +static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv, + void *prepare_hw_update_args) +{ + int rc, i, j, k; + struct cam_hw_prepare_update_args *prepare_args = + prepare_hw_update_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + struct cam_packet *packet = NULL; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + if (!prepare_args || !hw_mgr) { + CAM_ERR(CAM_JPEG, "Invalid args %pK %pK", + prepare_args, hw_mgr); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + packet = prepare_args->packet; + if (!packet) { + CAM_ERR(CAM_JPEG, "received packet is NULL"); + return -EINVAL; + } + + if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) && + ((packet->header.op_code + & 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) { + CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d", + packet->header.op_code & 0xff); + return -EINVAL; + } + + rc = cam_packet_util_validate_packet(packet, prepare_args->remain_len); + if (rc) { + CAM_ERR(CAM_JPEG, "invalid packet %d", rc); + return rc; + } + + if (!packet->num_cmd_buf || + (packet->num_cmd_buf > CAM_JPEG_MAX_NUM_CMD_BUFFS) || + !packet->num_patches || !packet->num_io_configs || + (packet->num_io_configs > CAM_JPEG_IMAGE_MAX)) { + CAM_ERR(CAM_JPEG, + "wrong number of cmd/patch/io_configs info: %u %u %u", + packet->num_cmd_buf, packet->num_patches, + packet->num_io_configs); + return -EINVAL; + } + + rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker, + hw_mgr->iommu_hdl, -1, false); + if (rc) { + CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc); + return rc; + } + + io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + CAM_DBG(CAM_JPEG, "Packet: %pK, io_cfg_ptr: %pK size: %lu req_id: %u dev_type: %d", + (void *)packet, + (void *)io_cfg_ptr, + sizeof(struct cam_buf_io_cfg), + packet->header.request_id, + ctx_data->jpeg_dev_acquire_info.dev_type); + + prepare_args->num_out_map_entries = 0; + + for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) { + if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) { + prepare_args->in_map_entries[j].resource_handle = + io_cfg_ptr[i].resource_type; + prepare_args->in_map_entries[j++].sync_id = + io_cfg_ptr[i].fence; + prepare_args->num_in_map_entries++; + } else { + prepare_args->out_map_entries[k].resource_handle = + io_cfg_ptr[i].resource_type; + prepare_args->out_map_entries[k++].sync_id = + io_cfg_ptr[i].fence; + prepare_args->num_out_map_entries++; + } + CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u resource_type %d ", + i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence, + io_cfg_ptr[i].resource_type); + } + + rc = cam_jpeg_add_command_buffers(packet, prepare_args, ctx_data); + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_JPEG, "Sending relevant buffers for request:%llu to presil", + packet->header.request_id); + rc = cam_presil_send_buffers_from_packet(packet, hw_mgr->iommu_hdl, + hw_mgr->cdm_iommu_hdl); + if (rc) { + CAM_ERR(CAM_JPEG, "Error sending buffers for request:%llu to presil", + packet->header.request_id); + return rc; + } + } + + CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d", + prepare_args->in_map_entries[0].sync_id); + + return rc; +} + +static void cam_jpeg_mgr_stop_deinit_dev(struct cam_jpeg_hw_mgr *hw_mgr, + struct cam_jpeg_hw_cfg_req *p_cfg_req, uint32_t dev_type) +{ + int rc = 0; + struct cam_jpeg_set_irq_cb irq_cb; + + /* stop reset Unregister CB and deinit */ + irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_sched_bottom_half; + irq_cb.irq_cb_data.private_data = NULL; + irq_cb.irq_cb_data.jpeg_req = NULL; + irq_cb.b_set_cb = false; + if (hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) { + rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) + CAM_ERR(CAM_JPEG, "SET_IRQ_CB fail %d", rc); + } else { + CAM_ERR(CAM_JPEG, "process_cmd null %d", dev_type); + } + + if (hw_mgr->devices[dev_type][0]->hw_ops.stop) { + rc = hw_mgr->devices[dev_type][0]->hw_ops.stop( + hw_mgr->devices[dev_type][0]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "stop fail %d", rc); + } else { + CAM_ERR(CAM_JPEG, "op stop null %d", dev_type); + } + + if (hw_mgr->devices[dev_type][0]->hw_ops.deinit) { + rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit( + hw_mgr->devices[dev_type][0]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW %d", + dev_type, rc); + } else { + CAM_ERR(CAM_JPEG, "op deinit null %d", dev_type); + } + + hw_mgr->device_in_use[dev_type][0] = false; + hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL; +} + +static int cam_jpeg_mgr_flush(void *hw_mgr_priv, + struct cam_jpeg_hw_ctx_data *ctx_data) +{ + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + uint32_t dev_type; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + struct cam_jpeg_hw_cfg_req *cfg_req = NULL, *req_temp = NULL; + + CAM_DBG(CAM_JPEG, "E: JPEG flush ctx"); + + if (!hw_mgr || !ctx_data) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0]; + + if (hw_mgr->device_in_use[dev_type][0] == true && + p_cfg_req != NULL) { + if ((struct cam_jpeg_hw_ctx_data *) + p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) { + cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req, + dev_type); + list_del_init(&p_cfg_req->list); + cam_jpeg_mgr_move_req_to_free_list(p_cfg_req); + } + } + + list_for_each_entry_safe(cfg_req, req_temp, + &hw_mgr->hw_config_req_list, list) { + if ((struct cam_jpeg_hw_ctx_data *) + cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data) + continue; + + list_del_init(&cfg_req->list); + cam_jpeg_mgr_move_req_to_free_list(cfg_req); + } + + CAM_DBG(CAM_JPEG, "X: JPEG flush ctx"); + + return 0; +} + +static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv, + struct cam_jpeg_hw_ctx_data *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_cfg_req *cfg_req = NULL; + struct cam_jpeg_hw_cfg_req *req_temp = NULL; + struct cam_jpeg_request_data *jpeg_req; + uintptr_t request_id = 0; + uint32_t dev_type; + struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; + bool b_req_found = false; + + CAM_DBG(CAM_JPEG, "E: JPEG flush req"); + + if (!hw_mgr || !ctx_data || !flush_args) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + + if (flush_args->num_req_pending) + return 0; + + jpeg_req = (struct cam_jpeg_request_data *)flush_args->flush_req_active[0]; + if (!jpeg_req) { + CAM_ERR(CAM_JPEG, "Request data is null"); + return -EINVAL; + } + + request_id = (uintptr_t)jpeg_req->request_id; + + if (!flush_args->num_req_active) + return 0; + + if (request_id <= 0) { + CAM_ERR(CAM_JPEG, "Invalid red id %ld", request_id); + return -EINVAL; + } + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0]; + if (hw_mgr->device_in_use[dev_type][0] == true && + p_cfg_req != NULL) { + if (((struct cam_jpeg_hw_ctx_data *) + p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) && + (p_cfg_req->req_id == request_id)) { + cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req, + dev_type); + list_del_init(&p_cfg_req->list); + cam_jpeg_mgr_move_req_to_free_list(p_cfg_req); + b_req_found = true; + } + } + + list_for_each_entry_safe(cfg_req, req_temp, + &hw_mgr->hw_config_req_list, list) { + if ((struct cam_jpeg_hw_ctx_data *) + cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data) + continue; + + if (cfg_req->req_id != request_id) + continue; + + list_del_init(&cfg_req->list); + cam_jpeg_mgr_move_req_to_free_list(cfg_req); + b_req_found = true; + break; + } + + if (!b_req_found) { + CAM_ERR(CAM_JPEG, "req not found %ld", request_id); + return -EINVAL; + } + + CAM_DBG(CAM_JPEG, "X: JPEG flush req"); + return 0; +} + +static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args) +{ + int rc = 0; + struct cam_hw_flush_args *flush_args = flush_hw_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + + if (!hw_mgr || !flush_args || !flush_args->ctxt_to_hw_map) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + mutex_lock(&hw_mgr->hw_mgr_mutex); + + ctx_data = (struct cam_jpeg_hw_ctx_data *)flush_args->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) || + (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) { + CAM_ERR(CAM_JPEG, "Invalid flush type: %d", + flush_args->flush_type); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_ALL: + rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data); + if ((rc)) + CAM_ERR(CAM_JPEG, "Flush failed %d", rc); + break; + case CAM_FLUSH_TYPE_REQ: + rc = cam_jpeg_mgr_flush_req(hw_mgr_priv, ctx_data, flush_args); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid flush type: %d", + flush_args->flush_type); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args) +{ + int rc; + struct cam_hw_stop_args *stop_args = + (struct cam_hw_stop_args *)stop_hw_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + + if (!hw_mgr || !stop_args || !stop_args->ctxt_to_hw_map) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + mutex_lock(&hw_mgr->hw_mgr_mutex); + + ctx_data = (struct cam_jpeg_hw_ctx_data *)stop_args->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data); + if ((rc)) + CAM_ERR(CAM_JPEG, "flush failed %d", rc); + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args) +{ + int rc; + struct cam_hw_release_args *release_hw = release_hw_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + uint32_t dev_type; + + if (!hw_mgr || !release_hw || !release_hw->ctxt_to_hw_map) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + + ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map; + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 0) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_JPEG, "Error Unbalanced deinit"); + return -EFAULT; + } + + hw_mgr->cdm_info[dev_type][0].ref_cnt--; + if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) { + if (cam_cdm_stream_off( + hw_mgr->cdm_info[dev_type][0].cdm_handle)) { + CAM_ERR(CAM_JPEG, "CDM stream off failed %d", + hw_mgr->cdm_info[dev_type][0].cdm_handle); + } + /* release cdm handle */ + cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle); + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data); + if (rc) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_JPEG, "JPEG release ctx failed"); + CAM_MEM_FREE(ctx_data->cdm_cmd); + ctx_data->cdm_cmd = NULL; + + return -EINVAL; + } + + CAM_MEM_FREE(ctx_data->cdm_cmd); + ctx_data->cdm_cmd = NULL; + CAM_DBG(CAM_JPEG, "handle %llu", ctx_data); + + return rc; +} + +static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) +{ + int rc = 0; + int32_t ctx_id = 0; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + struct cam_hw_acquire_args *args = acquire_hw_args; + struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info; + struct cam_cdm_acquire_data cdm_acquire; + uint32_t dev_type; + + if ((!hw_mgr_priv) || (!acquire_hw_args)) { + CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv, + acquire_hw_args); + return -EINVAL; + } + + if (args->num_acq > 1) { + CAM_ERR(CAM_JPEG, + "number of resources are wrong: %u", + args->num_acq); + return -EINVAL; + } + + if (copy_from_user(&jpeg_dev_acquire_info, + (void __user *)args->acquire_info, + sizeof(jpeg_dev_acquire_info))) { + CAM_ERR(CAM_JPEG, "copy failed"); + return -EFAULT; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr); + if (ctx_id >= CAM_JPEG_CTX_MAX) { + CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EFAULT; + } + + ctx_data = &hw_mgr->ctx_data[ctx_id]; + + ctx_data->cdm_cmd = + CAM_MEM_ZALLOC(((sizeof(struct cam_cdm_bl_request)) + + ((CAM_JPEG_HW_ENTRIES_MAX - 1) * + sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL); + if (!ctx_data->cdm_cmd) { + rc = -ENOMEM; + goto jpeg_release_ctx; + } + + mutex_lock(&ctx_data->ctx_mutex); + ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info; + mutex_unlock(&ctx_data->ctx_mutex); + + if (ctx_data->jpeg_dev_acquire_info.dev_type >= + CAM_JPEG_RES_TYPE_MAX) { + rc = -EINVAL; + goto acq_cdm_hdl_failed; + } + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + CAM_DBG(CAM_JPEG, "ctx_id: %u, dev_type: %u", + ctx_id, dev_type); + if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) { + + if (dev_type == CAM_JPEG_RES_TYPE_ENC) { + memcpy(cdm_acquire.identifier, + "jpegenc", sizeof("jpegenc")); + } else { + memcpy(cdm_acquire.identifier, + "jpegdma", sizeof("jpegdma")); + } + cdm_acquire.cell_index = 0; + cdm_acquire.handle = 0; + cdm_acquire.userdata = ctx_data; + if (hw_mgr->cdm_reg_map[dev_type][0]) { + cdm_acquire.base_array[0] = + hw_mgr->cdm_reg_map[dev_type][0]; + } + cdm_acquire.base_array_cnt = 1; + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = NULL; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d", + rc); + rc = -EFAULT; + goto acq_cdm_hdl_failed; + } + hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle; + hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops; + hw_mgr->cdm_info[dev_type][0].ref_cnt++; + } else { + hw_mgr->cdm_info[dev_type][0].ref_cnt++; + } + + if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1) + if (cam_cdm_stream_on( + hw_mgr->cdm_info[dev_type][0].cdm_handle)) { + CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!", + hw_mgr->cdm_info[dev_type][0].cdm_handle); + rc = -EFAULT; + goto start_cdm_hdl_failed; + } + + mutex_lock(&ctx_data->ctx_mutex); + ctx_data->context_priv = args->context_data; + + args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]); + + mutex_unlock(&ctx_data->ctx_mutex); + + hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb; + hw_mgr->ctx_data[ctx_id].mini_dump_cb = args->mini_dump_cb; + + if (copy_to_user((void __user *)args->acquire_info, + &jpeg_dev_acquire_info, + sizeof(jpeg_dev_acquire_info))) { + rc = -EFAULT; + goto copy_to_user_failed; + } + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data); + + return rc; + +copy_to_user_failed: + if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1) + cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle); +start_cdm_hdl_failed: + if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1) + cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle); + hw_mgr->cdm_info[dev_type][0].ref_cnt--; +acq_cdm_hdl_failed: + CAM_MEM_FREE(ctx_data->cdm_cmd); +jpeg_release_ctx: + cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args) +{ + int rc; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *query_cap = hw_caps_args; + + if (!hw_mgr_priv || !hw_caps_args) { + CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", + hw_mgr_priv, hw_caps_args); + return -EINVAL; + } + + if (sizeof(struct cam_jpeg_query_cap_cmd) != query_cap->size) { + CAM_ERR(CAM_JPEG, + "Input query cap size:%u does not match expected query cap size: %u", + query_cap->size, sizeof(struct cam_jpeg_query_cap_cmd)); + return -EFAULT; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle), + &g_jpeg_hw_mgr.jpeg_caps, + sizeof(struct cam_jpeg_query_cap_cmd))) { + CAM_ERR(CAM_JPEG, "copy_to_user failed"); + rc = -EFAULT; + goto copy_error; + } + CAM_DBG(CAM_JPEG, "Success"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return 0; + +copy_error: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_req_mgr_process_workq_jpeg_command_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_workq_jpeg_message_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static int cam_jpeg_setup_workqs(void) +{ + int rc, i; + + rc = cam_req_mgr_workq_create( + "jpeg_command_queue", + CAM_JPEG_WORKQ_NUM_TASK, + &g_jpeg_hw_mgr.work_process_frame, + CRM_WORKQ_USAGE_NON_IRQ, 0, + cam_req_mgr_process_workq_jpeg_command_queue); + if (rc) { + CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc); + goto work_process_frame_failed; + } + + rc = cam_req_mgr_workq_create( + "jpeg_message_queue", + CAM_JPEG_WORKQ_NUM_TASK, + &g_jpeg_hw_mgr.work_process_irq_cb, + CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_jpeg_message_queue); + if (rc) { + CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc); + goto work_process_irq_cb_failed; + } + + g_jpeg_hw_mgr.process_frame_work_data = + CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_process_frame_work_data_t) * + CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL); + if (!g_jpeg_hw_mgr.process_frame_work_data) { + rc = -ENOMEM; + goto work_process_frame_data_failed; + } + + g_jpeg_hw_mgr.process_irq_cb_work_data = + CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_process_irq_work_data_t) * + CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL); + if (!g_jpeg_hw_mgr.process_irq_cb_work_data) { + rc = -ENOMEM; + goto work_process_irq_cb_data_failed; + } + + for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++) + g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload = + &g_jpeg_hw_mgr.process_irq_cb_work_data[i]; + + for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++) + g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload = + &g_jpeg_hw_mgr.process_frame_work_data[i]; + + INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list); + INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list); + for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) { + INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list)); + list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list), + &(g_jpeg_hw_mgr.free_req_list)); + } + + return rc; + +work_process_irq_cb_data_failed: + CAM_MEM_FREE(g_jpeg_hw_mgr.process_frame_work_data); +work_process_frame_data_failed: + cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb); +work_process_irq_cb_failed: + cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame); +work_process_frame_failed: + + return rc; +} + +static int cam_jpeg_init_devices(struct device_node *of_node, + uint32_t *p_num_enc_dev, + uint32_t *p_num_dma_dev) +{ + int count, i, rc; + uint32_t num_dev; + uint32_t num_dma_dev; + const char *name = NULL; + struct device_node *child_node = NULL; + struct platform_device *child_pdev = NULL; + struct cam_hw_intf *child_dev_intf = NULL; + struct cam_hw_info *enc_hw = NULL; + struct cam_hw_info *dma_hw = NULL; + struct cam_hw_soc_info *enc_soc_info = NULL; + struct cam_hw_soc_info *dma_soc_info = NULL; + + if (!p_num_enc_dev || !p_num_dma_dev) { + rc = -EINVAL; + goto num_dev_failed; + } + count = of_property_count_strings(of_node, "compat-hw-name"); + if (!count) { + CAM_ERR(CAM_JPEG, + "no compat hw found in dev tree, count = %d", + count); + rc = -EINVAL; + goto num_dev_failed; + } + + rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev); + if (rc) { + CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc); + goto num_enc_failed; + } + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = CAM_MEM_ZALLOC( + sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL); + if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) { + rc = -ENOMEM; + CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed"); + goto num_enc_failed; + } + + rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev); + if (rc) { + CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc); + goto num_dma_failed; + } + + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = CAM_MEM_ZALLOC( + sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL); + if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) { + rc = -ENOMEM; + goto num_dma_failed; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "compat-hw-name", + i, &name); + if (rc) { + CAM_ERR(CAM_JPEG, "getting dev object name failed"); + goto compat_hw_name_failed; + } + + child_node = of_find_node_by_name(NULL, name); + if (!child_node) { + CAM_ERR(CAM_JPEG, + "error! Cannot find node in dtsi %s", name); + rc = -ENODEV; + goto compat_hw_name_failed; + } + + child_pdev = of_find_device_by_node(child_node); + if (!child_pdev) { + CAM_ERR(CAM_JPEG, "failed to find device on bus %s", + child_node->name); + rc = -ENODEV; + of_node_put(child_node); + goto compat_hw_name_failed; + } + + child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata( + child_pdev); + if (!child_dev_intf) { + CAM_ERR(CAM_JPEG, "no child device"); + of_node_put(child_node); + rc = -ENODEV; + goto compat_hw_name_failed; + } + CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d", + child_dev_intf, + child_dev_intf->hw_type, + child_dev_intf->hw_idx); + + if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC && + child_dev_intf->hw_idx >= num_dev) || + (child_dev_intf->hw_type == CAM_JPEG_DEV_DMA && + child_dev_intf->hw_idx >= num_dma_dev)) { + CAM_ERR(CAM_JPEG, "index out of range"); + rc = -ENODEV; + goto compat_hw_name_failed; + } + g_jpeg_hw_mgr.devices[child_dev_intf->hw_type] + [child_dev_intf->hw_idx] = child_dev_intf; + + of_node_put(child_node); + } + + enc_hw = (struct cam_hw_info *) + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv; + enc_soc_info = &enc_hw->soc_info; + g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] = + &enc_soc_info->reg_map[0]; + dma_hw = (struct cam_hw_info *) + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv; + dma_soc_info = &dma_hw->soc_info; + g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] = + &dma_soc_info->reg_map[0]; + + (void) g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_ops.process_cmd( + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv, + CAM_JPEG_CMD_GET_NUM_PID, + &g_jpeg_hw_mgr.num_pid[CAM_JPEG_DEV_ENC], + sizeof(uint32_t)); + + rc = g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_ops.process_cmd( + g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv, + CAM_JPEG_CMD_GET_NUM_PID, + &g_jpeg_hw_mgr.num_pid[CAM_JPEG_DEV_DMA], + sizeof(uint32_t)); + + *p_num_enc_dev = num_dev; + *p_num_dma_dev = num_dma_dev; + + return rc; + +compat_hw_name_failed: + CAM_MEM_FREE(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]); +num_dma_failed: + CAM_MEM_FREE(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]); +num_enc_failed: +num_dev_failed: + + return rc; +} + +static int cam_jpeg_mgr_hw_dump(void *hw_mgr_priv, void *dump_hw_args) +{ + int rc; + uint8_t *dst; + ktime_t cur_time; + size_t remain_len; + uint32_t min_len; + uint32_t dev_type; + uint64_t diff; + uint64_t *addr, *start; + struct timespec64 cur_ts; + struct timespec64 req_ts; + struct cam_jpeg_hw_mgr *hw_mgr; + struct cam_hw_dump_args *dump_args; + struct cam_jpeg_hw_cfg_req *p_cfg_req; + struct cam_jpeg_hw_ctx_data *ctx_data; + struct cam_jpeg_hw_dump_args jpeg_dump_args; + struct cam_jpeg_hw_dump_header *hdr; + + if (!hw_mgr_priv || !dump_hw_args) { + CAM_ERR(CAM_JPEG, "Invalid args %pK %pK", + hw_mgr_priv, dump_hw_args); + return -EINVAL; + } + + hw_mgr = hw_mgr_priv; + dump_args = (struct cam_hw_dump_args *)dump_hw_args; + ctx_data = (struct cam_jpeg_hw_ctx_data *)dump_args->ctxt_to_hw_map; + + if (!ctx_data) { + CAM_ERR(CAM_JPEG, "Invalid context"); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + if (!ctx_data->in_use) { + CAM_ERR(CAM_JPEG, "ctx is not in use"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + if (true == hw_mgr->device_in_use[dev_type][0]) { + p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0]; + if (p_cfg_req && p_cfg_req->req_id == + (uintptr_t)dump_args->request_id) + goto hw_dump; + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + +hw_dump: + cur_time = ktime_get(); + diff = ktime_us_delta(p_cfg_req->submit_timestamp, cur_time); + cur_ts = ktime_to_timespec64(cur_time); + req_ts = ktime_to_timespec64(p_cfg_req->submit_timestamp); + + if (diff < CAM_JPEG_RESPONSE_TIME_THRESHOLD) { + CAM_INFO(CAM_JPEG, + "No error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + } + + CAM_INFO(CAM_JPEG, + "Error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &jpeg_dump_args.cpu_addr, &jpeg_dump_args.buf_len); + if (rc) { + CAM_ERR(CAM_JPEG, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -rc; + } + + if (jpeg_dump_args.buf_len <= dump_args->offset) { + CAM_WARN(CAM_JPEG, "dump offset overshoot len %zu offset %zu", + jpeg_dump_args.buf_len, dump_args->offset); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + + remain_len = jpeg_dump_args.buf_len - dump_args->offset; + min_len = sizeof(struct cam_jpeg_hw_dump_header) + + (CAM_JPEG_HW_DUMP_NUM_WORDS * sizeof(uint64_t)); + if (remain_len < min_len) { + CAM_WARN(CAM_JPEG, "dump buffer exhaust remain %zu min %u", + remain_len, min_len); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return -ENOSPC; + } + + dst = (uint8_t *)jpeg_dump_args.cpu_addr + dump_args->offset; + hdr = (struct cam_jpeg_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_JPEG_HW_DUMP_TAG_MAX_LEN, + "JPEG_REQ:"); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_jpeg_hw_dump_header)); + start = addr; + *addr++ = dump_args->request_id; + *addr++ = req_ts.tv_sec; + *addr++ = req_ts.tv_nsec/NSEC_PER_USEC; + *addr++ = cur_ts.tv_sec; + *addr++ = cur_ts.tv_nsec/NSEC_PER_USEC; + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_jpeg_hw_dump_header); + jpeg_dump_args.request_id = dump_args->request_id; + jpeg_dump_args.offset = dump_args->offset; + + if (hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) { + rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_HW_DUMP, + &jpeg_dump_args, sizeof(jpeg_dump_args)); + } + + CAM_DBG(CAM_JPEG, "Offset before %u after %u", + dump_args->offset, jpeg_dump_args.offset); + dump_args->offset = jpeg_dump_args.offset; + mutex_unlock(&hw_mgr->hw_mgr_mutex); + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static void cam_jpeg_mgr_dump_pf_data( + struct cam_jpeg_hw_mgr *hw_mgr, + struct cam_hw_cmd_args *hw_cmd_args) +{ + struct cam_jpeg_hw_ctx_data *ctx_data; + struct cam_packet *packet; + struct cam_jpeg_match_pid_args jpeg_pid_mid_args; + struct cam_hw_dump_pf_args *pf_args; + uint32_t dev_type; + bool hw_pid_support = true; + int rc = 0; + struct cam_ctx_request *req_pf; + + ctx_data = (struct cam_jpeg_hw_ctx_data *)hw_cmd_args->ctxt_to_hw_map; + pf_args = hw_cmd_args->u.pf_cmd_args->pf_args; + req_pf = (struct cam_ctx_request *) + hw_cmd_args->u.pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + jpeg_pid_mid_args.fault_mid = pf_args->pf_smmu_info->mid; + jpeg_pid_mid_args.pid = pf_args->pf_smmu_info->pid; + dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; + + if (!hw_mgr->num_pid[dev_type]) { + hw_pid_support = false; + goto iodump; + } + + rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][0]->hw_priv, + CAM_JPEG_CMD_MATCH_PID_MID, + &jpeg_pid_mid_args, sizeof(jpeg_pid_mid_args)); + if (rc) { + CAM_ERR(CAM_JPEG, "CAM_JPEG_CMD_MATCH_PID_MID failed %d", rc); + return; + } + + if (!jpeg_pid_mid_args.pid_match_found) { + CAM_INFO(CAM_JPEG, "This context data is not matched with pf pid and mid"); + return; + } + pf_args->pf_context_info.resource_type = jpeg_pid_mid_args.match_res; + +iodump: + cam_packet_util_dump_io_bufs(packet, hw_mgr->iommu_hdl, hw_mgr->iommu_sec_hdl, + pf_args, hw_pid_support); + + /* Dump JPEG registers for debug purpose */ + if (dev_type == CAM_JPEG_RES_TYPE_DMA || + dev_type == CAM_JPEG_RES_TYPE_ENC) { + rc = hw_mgr->devices[dev_type][CAM_JPEG_MEM_BASE_INDEX]->hw_ops.process_cmd( + hw_mgr->devices[dev_type][CAM_JPEG_MEM_BASE_INDEX]->hw_priv, + CAM_JPEG_CMD_DUMP_DEBUG_REGS, + NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "Invalid process_cmd ops"); + + } else { + CAM_ERR(CAM_JPEG, "Invalid dev_type %d", dev_type); + } +} + +static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_JPEG, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_jpeg_mgr_dump_pf_data(hw_mgr, hw_cmd_args); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid cmd :%d", + hw_cmd_args->cmd_type); + } + + return rc; +} + +static unsigned long cam_jpeg_hw_mgr_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_jpeg_hw_mini_dump_req *md_req; + struct cam_jpeg_hw_mgr_mini_dump *md; + struct cam_jpeg_hw_ctx_mini_dump *ctx_md; + struct cam_jpeg_hw_ctx_data *ctx; + struct cam_jpeg_hw_mgr *hw_mgr; + struct cam_jpeg_hw_cfg_req *req; + struct cam_hw_mini_dump_args hw_dump_args; + uint32_t dev_type; + uint32_t i = 0; + unsigned long dumped_len = 0; + unsigned long remain_len = len; + + if (!dst || len < sizeof(*md)) { + CAM_ERR(CAM_JPEG, "Invalid params dst %pk len %lu", dst, len); + return 0; + } + + md = (struct cam_jpeg_hw_mgr_mini_dump *)dst; + md->num_context = 0; + hw_mgr = &g_jpeg_hw_mgr; + for (i = 0; i < CAM_JPEG_RES_TYPE_MAX; i++) { + if (hw_mgr->devices[i][0]->hw_ops.process_cmd) { + hw_mgr->devices[i][0]->hw_ops.process_cmd( + hw_mgr->devices[i][0]->hw_priv, + CAM_JPEG_CMD_MINI_DUMP, + &md->core[i], + sizeof(struct cam_jpeg_mini_dump_core_info)); + } + } + + dumped_len += sizeof(*md); + remain_len = len - dumped_len; + for (i = 0; i < CAM_JPEG_CTX_MAX; i++) { + ctx = &hw_mgr->ctx_data[i]; + if (!ctx->in_use) + continue; + + if (remain_len < sizeof(*ctx_md)) + goto end; + + md->num_context++; + ctx_md = (struct cam_jpeg_hw_ctx_mini_dump *) + ((uint8_t *)dst + dumped_len); + md->ctx[i] = ctx_md; + ctx_md->in_use = ctx->in_use; + memcpy(&ctx_md->acquire_info, &ctx->jpeg_dev_acquire_info, + sizeof(struct cam_jpeg_acquire_dev_info)); + dev_type = ctx->jpeg_dev_acquire_info.dev_type; + req = hw_mgr->dev_hw_cfg_args[dev_type][0]; + if (req) { + md_req = &md->cfg_req[dev_type]; + memcpy(&md_req->submit_timestamp, &req->submit_timestamp, + sizeof(ktime_t)); + md_req->req_id = req->req_id; + md_req->dev_type = req->dev_type; + md_req->num_hw_entry_processed = req->num_hw_entry_processed; + } + + hw_dump_args.len = remain_len; + hw_dump_args.bytes_written = 0; + hw_dump_args.start_addr = (void *)((uint8_t *)dst + dumped_len); + hw_mgr->mini_dump_cb(ctx->context_priv, &hw_dump_args); + if (dumped_len + hw_dump_args.bytes_written >= len) + goto end; + + dumped_len += hw_dump_args.bytes_written; + remain_len = len - dumped_len; + } + +end: + return dumped_len; +} + +static int cam_jpeg_set_camnoc_misr_test(void *data, u64 val) +{ + g_jpeg_hw_mgr.camnoc_misr_test = val; + return 0; +} + +static int cam_jpeg_get_camnoc_misr_test(void *data, u64 *val) +{ + *val = g_jpeg_hw_mgr.camnoc_misr_test; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(camnoc_misr_test, cam_jpeg_get_camnoc_misr_test, + cam_jpeg_set_camnoc_misr_test, "%08llu"); + +static int cam_jpeg_set_bug_on_misr(void *data, u64 val) +{ + g_jpeg_hw_mgr.bug_on_misr = val; + return 0; +} + +static int cam_jpeg_get_bug_on_misr(void *data, u64 *val) +{ + *val = g_jpeg_hw_mgr.bug_on_misr; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(bug_on_misr_mismatch, cam_jpeg_get_bug_on_misr, + cam_jpeg_set_bug_on_misr, "%08llu"); + +#ifdef CONFIG_CAM_TEST_IRQ_LINE + +static int cam_jpeg_test_irq_line(void) +{ + struct cam_hw_intf *hw_intf; + int rc = -EINVAL, i, j; + + for (i = 0; i < CAM_JPEG_DEV_PER_TYPE_MAX; i++) { + for (j = 0; j < CAM_JPEG_DEV_MAX; j++) { + hw_intf = g_jpeg_hw_mgr.devices[j][i]; + if (hw_intf && hw_intf->hw_ops.test_irq_line) { + rc = hw_intf->hw_ops.test_irq_line(hw_intf->hw_priv); + if (rc) + CAM_ERR(CAM_JPEG, + "failed to verify IRQ line for JPEG-%s[%d]", + CAM_JPEG_DEV_TYPE(j), i); + } + } + } + + return rc; +} + +#else + +static int cam_jpeg_test_irq_line(void) +{ + CAM_ERR(CAM_JPEG, "IRQ line verification disabled!"); + return -EPERM; +} + +#endif + +#if (defined(CONFIG_CAM_TEST_IRQ_LINE) && defined(CONFIG_CAM_TEST_IRQ_LINE_AT_PROBE)) + +static int cam_jpeg_test_irq_line_at_probe(void) +{ + return cam_jpeg_test_irq_line(); +} + +#else + +static int cam_jpeg_test_irq_line_at_probe(void) +{ + return 0; +} + +#endif + +static int cam_jpeg_set_irq_line_test(void *data, u64 val) +{ + cam_jpeg_test_irq_line(); + return 0; +} + +static int cam_jpeg_get_irq_line_test(void *data, u64 *val) +{ + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_jpeg_irq_line_test, cam_jpeg_get_irq_line_test, + cam_jpeg_set_irq_line_test, "%08llu"); + +static int cam_jpeg_mgr_create_debugfs_entry(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("jpeg", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_JPEG, "DebugFS could not create directory!"); + return rc; + } + /* Store parent inode for cleanup in caller */ + g_jpeg_hw_mgr.dentry = dbgfileptr; + + debugfs_create_file("camnoc_misr_test", 0644, g_jpeg_hw_mgr.dentry, + NULL, &camnoc_misr_test); + + debugfs_create_file("bug_on_misr_mismatch", 0644, g_jpeg_hw_mgr.dentry, + NULL, &bug_on_misr_mismatch); + + debugfs_create_file("test_irq_line", 0644, g_jpeg_hw_mgr.dentry, + NULL, &cam_jpeg_irq_line_test); + + return rc; +} + +static void cam_jpeg_mgr_inject_evt(void *hw_mgr_priv, void *evt_args) +{ + struct cam_jpeg_hw_ctx_data *ctx_data = hw_mgr_priv; + struct cam_hw_inject_evt_param *evt_params = evt_args; + + if (!ctx_data || !evt_params) { + CAM_ERR(CAM_JPEG, "Invalid parameters ctx data %s event params %s", + CAM_IS_NULL_TO_STR(ctx_data), CAM_IS_NULL_TO_STR(evt_params)); + return; + } + + memcpy(&ctx_data->evt_inject_params, evt_params, + sizeof(struct cam_hw_inject_evt_param)); + + ctx_data->evt_inject_params.is_valid = true; +} + +int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl, cam_jpeg_mini_dump_cb mini_dump_cb) +{ + int i, rc; + uint32_t num_dev; + uint32_t num_dma_dev; + struct cam_hw_mgr_intf *hw_mgr_intf; + struct cam_iommu_handle cdm_handles; + + hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl; + if (!of_node || !hw_mgr_intf) { + CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK", + of_node, hw_mgr_intf); + return -EINVAL; + } + + memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf)); + hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw; + hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw; + hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update; + hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw; + hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush; + hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop; + hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd; + hw_mgr_intf->hw_dump = cam_jpeg_mgr_hw_dump; + hw_mgr_intf->hw_inject_evt = cam_jpeg_mgr_inject_evt; + + mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex); + spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock); + + for (i = 0; i < CAM_JPEG_CTX_MAX; i++) + mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex); + + rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev); + if (rc) { + CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc); + goto smmu_get_failed; + } + + rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl); + if (rc) { + CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc); + goto smmu_get_failed; + } + + CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl); + + rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles); + if (rc) { + CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail %d", rc); + g_jpeg_hw_mgr.cdm_iommu_hdl = -1; + g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1; + goto cdm_iommu_failed; + } + g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure; + g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure; + + g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure = + g_jpeg_hw_mgr.iommu_hdl; + g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure = + g_jpeg_hw_mgr.iommu_sec_hdl; + g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure = + g_jpeg_hw_mgr.cdm_iommu_hdl; + g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure = + g_jpeg_hw_mgr.cdm_iommu_hdl_secure; + g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev; + g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr = 0; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr = 0; + g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0; + + g_jpeg_hw_mgr.mini_dump_cb = mini_dump_cb; + + rc = cam_jpeg_setup_workqs(); + if (rc) { + CAM_ERR(CAM_JPEG, "setup work qs failed %d", rc); + goto cdm_iommu_failed; + } + + if (iommu_hdl) + *iommu_hdl = g_jpeg_hw_mgr.iommu_hdl; + + cam_common_register_mini_dump_cb(cam_jpeg_hw_mgr_mini_dump_cb, "CAM_JPEG", + NULL); + cam_jpeg_mgr_create_debugfs_entry(); + cam_jpeg_test_irq_line_at_probe(); + + return 0; + +cdm_iommu_failed: + cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl); + g_jpeg_hw_mgr.iommu_hdl = 0; +smmu_get_failed: + mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex); + for (i = 0; i < CAM_JPEG_CTX_MAX; i++) + mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h new file mode 100644 index 0000000000..189543370d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_HW_MGR_H +#define CAM_JPEG_HW_MGR_H + +#include +#include +#include + +#include "cam_jpeg_hw_intf.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" + +#define CAM_JPEG_WORKQ_NUM_TASK 30 +#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1 +#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2 +#define CAM_JPEG_HW_CFG_Q_MAX 50 + +/* + * Response time threshold in ms beyond which a request is not expected + * to be with JPEG hw + */ +#define CAM_JPEG_RESPONSE_TIME_THRESHOLD 100000 + +/** + * struct cam_jpeg_process_frame_work_data_t + * + * @type: Task type + * @data: Pointer to command data + * @request_id: Request id + */ +struct cam_jpeg_process_frame_work_data_t { + uint32_t type; + void *data; + uintptr_t request_id; +}; + +/** + * struct cam_jpeg_process_irq_work_data_t + * + * @type : Task type + * @output_encode_size : Result size of encoder + * @is_dma_frame_done : Flag to indicate whther the dma fram done was recived or not. + * @irq_status : IRQ status + * @data : Pointer to message data + */ +struct cam_jpeg_process_irq_work_data_t { + uint32_t type; + union core_data { + int32_t output_encode_size; + int32_t is_dma_frame_done; + int32_t irq_data; + } u; + uint32_t irq_status; + void *data; +}; + +/** + * struct cam_jpeg_hw_cdm_info_t + * + * @ref_cnt: Ref count of how many times device type is acquired + * @cdm_handle: Cdm handle + * @cdm_ops: Cdm ops struct + */ +struct cam_jpeg_hw_cdm_info_t { + int ref_cnt; + uint32_t cdm_handle; + struct cam_cdm_utils_ops *cdm_ops; +}; + +/** + * struct cam_jpeg_hw_cfg_req_t + * + * @list_head: List head + * @hw_cfg_args: Hw config args + * @dev_type: Dev type for cfg request + * @req_id: Request Id + * @submit_timestamp: Timestamp of submitting request + * @num_hw_entry_processed: Cdm payloads already processed + */ +struct cam_jpeg_hw_cfg_req { + struct list_head list; + struct cam_hw_config_args hw_cfg_args; + uint32_t dev_type; + uintptr_t req_id; + ktime_t submit_timestamp; + uint32_t num_hw_entry_processed; +}; + +/** + * struct cam_jpeg_hw_ctx_data + * + * @context_priv: Context private data, cam_context from + * acquire. + * @ctx_mutex: Mutex for context + * @jpeg_dev_acquire_info: Acquire device info + * @ctxt_event_cb: Context callback function + * @in_use: Flag for context usage + * @wait_complete: Completion info + * @cdm_cmd: Cdm cmd submitted for that context. + * @mini_dump_cb: Mini dump cb + * @evt_inject_params: Event injection params for hw_mgr_ctx + */ +struct cam_jpeg_hw_ctx_data { + void *context_priv; + struct mutex ctx_mutex; + struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info; + cam_hw_event_cb_func ctxt_event_cb; + bool in_use; + struct completion wait_complete; + struct cam_cdm_bl_request *cdm_cmd; + cam_ctx_mini_dump_cb_func mini_dump_cb; + struct cam_hw_inject_evt_param evt_inject_params; +}; + +/** + * struct cam_jpeg_hw_mgr + * @hw_mgr_mutex: Mutex for JPEG hardware manager + * @hw_mgr_lock: Spinlock for JPEG hardware manager + * @ctx_data: Context data + * @jpeg_caps: JPEG capabilities + * @iommu_hdl: Non secure IOMMU handle + * @iommu_sec_hdl: Secure IOMMU handle + * @work_process_frame: Work queue for hw config requests + * @work_process_irq_cb: Work queue for processing IRQs. + * @process_frame_work_data: Work data pool for hw config + * requests + * @process_irq_cb_work_data: Work data pool for irq requests + * @cdm_iommu_hdl: Iommu handle received from cdm + * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm + * @dentry: Debugfs entry + * @camnoc_misr_test : debugfs entry to select camnoc_misr for read or write path + * @bug_on_misr : enable/disable bug on when misr mismatch is seen + * @devices: Core hw Devices of JPEG hardware manager + * @cdm_info: Cdm info for each core device. + * @cdm_reg_map: Regmap of each device for cdm. + * @device_in_use: Flag device being used for an active request + * @dev_hw_cfg_args: Current cfg request per core dev + * @hw_config_req_list: Pending hw update requests list + * @free_req_list: Free nodes for above list + * @req_list: Nodes of hw update list + * @num_pid: num of pids supported in the device + * @mini_dump_cb: Mini dump cb + */ +struct cam_jpeg_hw_mgr { + struct mutex hw_mgr_mutex; + spinlock_t hw_mgr_lock; + struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX]; + struct cam_jpeg_query_cap_cmd jpeg_caps; + int32_t iommu_hdl; + int32_t iommu_sec_hdl; + struct cam_req_mgr_core_workq *work_process_frame; + struct cam_req_mgr_core_workq *work_process_irq_cb; + struct cam_jpeg_process_frame_work_data_t *process_frame_work_data; + struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data; + int cdm_iommu_hdl; + int cdm_iommu_hdl_secure; + struct dentry *dentry; + u64 camnoc_misr_test; + u64 bug_on_misr; + + struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX]; + struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX] + [CAM_JPEG_NUM_DEV_PER_RES_MAX]; + struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX] + [CAM_JPEG_NUM_DEV_PER_RES_MAX]; + uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX] + [CAM_JPEG_NUM_DEV_PER_RES_MAX]; + struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX] + [CAM_JPEG_NUM_DEV_PER_RES_MAX]; + + struct list_head hw_config_req_list; + struct list_head free_req_list; + struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX]; + uint32_t num_pid[CAM_JPEG_DEV_TYPE_MAX]; + cam_jpeg_mini_dump_cb mini_dump_cb; +}; + +/** + * struct cam_jpeg_hw_mini_dump_req + * + * @submit_timestamp: Time stamp of request submit + * @req_id: Request Id + * @dev_type: Dev type + * @num_hw_entry_processed: Num of hw entry processed + */ +struct cam_jpeg_hw_mini_dump_req { + ktime_t submit_timestamp; + uintptr_t req_id; + uint32_t dev_type; + uint32_t num_hw_entry_processed; +}; + +/** + * struct cam_jpeg_hw_ctx_mini_dump + * + * @acquire_info: Acquire info + * @hw_ctx: hw context info + * @in_use: flag to indicate if in use + */ +struct cam_jpeg_hw_ctx_mini_dump { + struct cam_jpeg_acquire_dev_info acquire_info; + struct cam_hw_mini_dump_info hw_ctx; + bool in_use; +}; + +/** + * struct cam_jpeg_hw_mgr_mini_dump + * + * @ctx: Context info array + * @cfg_req: Cfg req + * @core: core info + * @num_context: Num of context + */ +struct cam_jpeg_hw_mgr_mini_dump { + struct cam_jpeg_hw_ctx_mini_dump *ctx[CAM_JPEG_CTX_MAX]; + struct cam_jpeg_hw_mini_dump_req cfg_req[CAM_JPEG_DEV_TYPE_MAX]; + struct cam_jpeg_mini_dump_core_info core[CAM_JPEG_RES_TYPE_MAX]; + uint32_t num_context; +}; +#endif /* CAM_JPEG_HW_MGR_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h new file mode 100644 index 0000000000..ec09bea926 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_HW_INTF_H +#define CAM_JPEG_HW_INTF_H + +#include "cam_cpas_api.h" + +#define CAM_JPEG_DEV_PER_TYPE_MAX 1 + +#define CAM_JPEG_CMD_BUF_MAX_SIZE 128 +#define CAM_JPEG_MSG_BUF_MAX_SIZE CAM_JPEG_CMD_BUF_MAX_SIZE + +#define JPEG_VOTE 640000000 + +#define CAM_JPEG_HW_DUMP_TAG_MAX_LEN 128 +#define CAM_JPEG_HW_DUMP_NUM_WORDS 5 +#define CAM_JPEG_HW_MAX_NUM_PID 2 +#define CAM_JPEG_CAMNOC_MISR_VAL_ROW 2 +#define CAM_JPEG_CAMNOC_MISR_VAL_COL 4 +#define CAM_JPEG_ENC_MISR_VAL_NUM 3 +#define CAM_JPEG_MISR_ID_LOW_RD 1 +#define CAM_JPEG_MISR_ID_LOW_WR 2 +#define CAM_JPEG_MEM_BASE_INDEX 0 + +/** + * struct cam_jpeg_irq_cb_data - Data that gets passed from IRQ when the cb function is called + * @private_data : Void * for privat data + * @jpeg_req : Jpeg reguest data stored during prepare update + */ +struct cam_jpeg_irq_cb_data { + void *private_data; + struct cam_jpeg_request_data *jpeg_req; +}; + +struct cam_jpeg_set_irq_cb { + int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status, int32_t result_size, void *data); + struct cam_jpeg_irq_cb_data irq_cb_data; + uint32_t b_set_cb; +}; + +struct cam_jpeg_hw_dump_args { + uint64_t request_id; + uintptr_t cpu_addr; + size_t offset; + size_t buf_len; +}; + +struct cam_jpeg_hw_dump_header { + uint8_t tag[CAM_JPEG_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +struct cam_jpeg_match_pid_args { + uint32_t pid; + uint32_t fault_mid; + bool pid_match_found; + uint32_t match_res; +}; + +struct cam_jpeg_mini_dump_core_info { + uint32_t framedone; + uint32_t resetdone; + uint32_t iserror; + uint32_t stopdone; + uint32_t open_count; + int32_t ref_count; + uint32_t core_state; + uint32_t hw_state; +}; + +/** + * struct cam_jpeg_misr_dump_args + * @req_id : Request Id + * @enable_bug : This flag indicates whether BUG_ON(1) has to be called or not on MISR mismatch + */ +struct cam_jpeg_misr_dump_args { + uint32_t req_id; + bool enable_bug; +}; + +/** + * struct cam_jpeg_hw_buf_done_evt_data + * @buf_done_data: buf done payload + * @evt_id: event id (success/cancel/error) + */ +struct cam_jpeg_hw_buf_done_evt_data { + void *buf_done_data; + uint32_t evt_id; +}; + +enum cam_jpeg_cmd_type { + CAM_JPEG_CMD_CDM_CFG, + CAM_JPEG_CMD_SET_IRQ_CB, + CAM_JPEG_CMD_HW_DUMP, + CAM_JPEG_CMD_GET_NUM_PID, + CAM_JPEG_CMD_MATCH_PID_MID, + CAM_JPEG_CMD_MINI_DUMP, + CAM_JPEG_CMD_CONFIG_HW_MISR, + CAM_JPEG_CMD_DUMP_HW_MISR_VAL, + CAM_JPEG_CMD_DUMP_DEBUG_REGS, + CAM_JPEG_CMD_MAX, +}; + +enum cam_jpeg_hw_event_type { + CAM_JPEG_EVT_ID_BUF_DONE, + CAM_JPEG_EVT_ID_INDUCE_ERR, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h new file mode 100644 index 0000000000..c6168f62f5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_HW_MGR_INTF_H +#define CAM_JPEG_HW_MGR_INTF_H + +#include +#include +#include + +#define CAM_JPEG_CTX_MAX 24 + +enum cam_jpeg_hw_type { + CAM_JPEG_DEV_ENC, + CAM_JPEG_DEV_DMA, + CAM_JPEG_DEV_MAX, +}; + +/** + * struct cam_jpeg_request_data - Jpeg request data received from command buffers + * @dev_type : Jpeg device type(ENC vs DMA) + * @request_id : Request ID + * @thumbnail_threshold_size : Threshold size for thumbnail image + * @out_size_mem_handle : handle to the buffer to share encoded output size with userspace + * @out_size_offset : offset to memory where out_size_mem_handle is stored + */ +struct cam_jpeg_request_data { + uint32_t dev_type; + uint64_t request_id; + uint32_t thumbnail_threshold_size; + __s32 out_size_mem_handle; + uint32_t out_size_offset; +}; + +typedef void (*cam_jpeg_mini_dump_cb)(void *priv, void *dst); + +int cam_jpeg_hw_mgr_init(struct device_node *of_node, + uint64_t *hw_mgr_hdl, int *iommu_hdl, + cam_jpeg_mini_dump_cb mini_dump_cb); + +#endif /* CAM_JPEG_HW_MGR_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_165_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_165_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..2c6965125c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_165_hw_info_ver_4_2_0.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_165_HW_INFO_VER_4_2_0_H +#define CAM_JPEG_DMA_165_HW_INFO_VER_4_2_0_H + +#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) +#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) +#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE +#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ + CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE + +static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_165_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x14, + .int_status = 0x10, + .int_mask = 0x0C, + .hw_cmd = 0x1C, + .reset_cmd = 0x08, + .encode_size = 0x180, + .core_cfg = 0x18, + .misr_cfg0 = 0x160, + .misr_cfg1 = 0x164, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x32083, + .hw_cmd_stop = 0x00000004, + .misr_cfg0 = 0x506, + }, + .int_status = { + .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, + .iserror = 0x0, + .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x4, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x5908, + .id_mask_low = 0x5920, + .id_value_low = 0x5918, + .misc_ctl = 0x5910, + .sigdata0 = 0x5950, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD00, + .id_value_low_wr = 0xD42, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .max_misr_wr = 4, + .camnoc_misr_sigdata = 4, + .master_we_sel = 2, + .misr_rd_word_sel = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_DMA_165_HW_INFO_VER_4_2_0_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_580_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_580_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..837cd3ef8a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_580_hw_info_ver_4_2_0.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_580_HW_INFO_VER_4_2_0_H +#define CAM_JPEG_DMA_580_HW_INFO_VER_4_2_0_H + +#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) +#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) +#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE +#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ + CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE + +static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_580_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x14, + .int_status = 0x10, + .int_mask = 0x0C, + .hw_cmd = 0x1C, + .reset_cmd = 0x08, + .encode_size = 0x180, + .core_cfg = 0x18, + .misr_cfg0 = 0x160, + .misr_cfg1 = 0x164, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x32083, + .hw_cmd_stop = 0x00000004, + .misr_cfg0 = 0x506, + }, + .int_status = { + .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, + .iserror = 0x0, + .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x4, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x3608, + .id_mask_low = 0x3620, + .id_value_low = 0x3618, + .misc_ctl = 0x3610, + .sigdata0 = 0x3650, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD00, + .id_value_low_wr = 0xD42, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .max_misr_wr = 4, + .camnoc_misr_sigdata = 4, + .master_we_sel = 2, + .misr_rd_word_sel = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_DMA_580_HW_INFO_VER_4_2_0_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_680_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_680_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..260a1d55a2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_680_hw_info_ver_4_2_0.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_680_HW_INFO_VER_4_2_0_H +#define CAM_JPEG_DMA_680_HW_INFO_VER_4_2_0_H + +#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) +#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) +#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE +#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ + CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE + +static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_680_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x14, + .int_status = 0x10, + .int_mask = 0x0C, + .hw_cmd = 0x1C, + .reset_cmd = 0x08, + .encode_size = 0x180, + .core_cfg = 0x18, + .misr_cfg0 = 0x160, + .misr_cfg1 = 0x164, + }, + .debug_reg_offset = { + .top_offset = 0x0, + .top_range = 0x19, + .we_offset = 0xB8, + .we_range = 0x9, + .we_qos_offset = 0x144, + .we_qos_range = 0x1C, + .perf_offset = 0xFEC, + .perf_range = 0x5, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x32083, + .hw_cmd_stop = 0x00000004, + .misr_cfg0 = 0x506, + }, + .int_status = { + .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, + .iserror = 0x0, + .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x4, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x8108, + .id_mask_low = 0x8120, + .id_value_low = 0x8118, + .misc_ctl = 0x8110, + .sigdata0 = 0x8150, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD00, + .id_value_low_wr = 0xD42, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .max_misr_wr = 4, + .camnoc_misr_sigdata = 4, + .master_we_sel = 2, + .misr_rd_word_sel = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_DMA_680_HW_INFO_VER_4_2_0_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_780_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_780_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..eb11a6fe25 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_780_hw_info_ver_4_2_0.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_780_HW_INFO_VER_4_2_0_H +#define CAM_JPEG_DMA_780_HW_INFO_VER_4_2_0_H + +#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) +#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) +#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE +#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ + CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE + +static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_780_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x14, + .int_status = 0x10, + .int_mask = 0x0C, + .hw_cmd = 0x1C, + .reset_cmd = 0x08, + .encode_size = 0x180, + .core_cfg = 0x18, + .misr_cfg0 = 0x160, + .misr_cfg1 = 0x164, + }, + .debug_reg_offset = { + .top_offset = 0x0, + .top_range = 0x19, + .we_offset = 0xB8, + .we_range = 0x9, + .we_qos_offset = 0x144, + .we_qos_range = 0x1C, + .perf_offset = 0xFEC, + .perf_range = 0x5, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x32083, + .hw_cmd_stop = 0x00000004, + .misr_cfg0 = 0x506, + }, + .int_status = { + .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, + .iserror = 0x0, + .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x4, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x2C88, + .id_mask_low = 0x2CA0, + .id_value_low = 0x2C98, + .misc_ctl = 0x2C90, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x3, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD00, + .id_value_low_wr = 0xD42, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .max_misr_wr = 4, + .camnoc_misr_sigdata = 0, + .master_we_sel = 2, + .misr_rd_word_sel = 4, + .camnoc_misr_support = 0, +}; + +#endif /* CAM_JPEG_DMA_780_HW_INFO_VER_4_2_0_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..b70cdc346f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H +#define CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H + +#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) +#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) +#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) +#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) + +#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE +#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ + CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE + +static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x14, + .int_status = 0x10, + .int_mask = 0x0C, + .hw_cmd = 0x1C, + .reset_cmd = 0x08, + .encode_size = 0x180, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x32083, + .hw_cmd_stop = 0x00000004, + }, + .int_status = { + .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, + .iserror = 0x0, + .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, + }, + .camnoc_misr_support = 0, +}; + +#endif /* CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c new file mode 100644 index 0000000000..99b6a8eb2c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "jpeg_dma_core.h" +#include "jpeg_dma_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_jpeg_hw_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" + +#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.framedone) +#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.resetdone) +#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.stopdone) + +#define CAM_JPEG_DMA_RESET_TIMEOUT msecs_to_jiffies(500) + +int cam_jpeg_dma_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + int rc; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + + if (!soc_info || !core_info) { + CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + mutex_lock(&core_info->core_mutex); + if (++core_info->ref_count > 1) { + mutex_unlock(&core_info->core_mutex); + return 0; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 2; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = JPEG_VOTE; + axi_vote.axi_path[0].mnoc_ab_bw = JPEG_VOTE; + axi_vote.axi_path[0].mnoc_ib_bw = JPEG_VOTE; + axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[1].camnoc_bw = JPEG_VOTE; + axi_vote.axi_path[1].mnoc_ab_bw = JPEG_VOTE; + axi_vote.axi_path[1].mnoc_ib_bw = JPEG_VOTE; + + + rc = cam_cpas_start(core_info->cpas_handle, + &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc); + goto cpas_failed; + } + + rc = cam_jpeg_dma_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc); + goto soc_failed; + } + + mutex_unlock(&core_info->core_mutex); + + return 0; + +soc_failed: + cam_cpas_stop(core_info->cpas_handle); +cpas_failed: + --core_info->ref_count; + mutex_unlock(&core_info->core_mutex); + + return rc; +} + +int cam_jpeg_dma_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + int rc; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + if (!soc_info || !core_info) { + CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + mutex_lock(&core_info->core_mutex); + if (--core_info->ref_count > 0) { + mutex_unlock(&core_info->core_mutex); + return 0; + } + + if (core_info->ref_count < 0) { + CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count); + core_info->ref_count = 0; + mutex_unlock(&core_info->core_mutex); + return -EFAULT; + } + + rc = cam_jpeg_dma_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_JPEG, "soc disable failed %d", rc); + + rc = cam_cpas_stop(core_info->cpas_handle); + if (rc) + CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc); + + mutex_unlock(&core_info->core_mutex); + + return 0; +} + +irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data) +{ + struct cam_hw_info *jpeg_dma_dev = data; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + uint32_t irq_status = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + + if (!jpeg_dma_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return IRQ_HANDLED; + } + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + hw_info = core_info->jpeg_dma_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + irq_status = cam_io_r_mb(mem_base + + core_info->jpeg_dma_hw_info->reg_offset.int_status); + cam_io_w_mb(irq_status, + soc_info->reg_map[0].mem_base + + core_info->jpeg_dma_hw_info->reg_offset.int_clr); + CAM_DBG(CAM_JPEG, "irq_num: %d irq_status: 0x%x , core_state: %d", + irq_num, irq_status, core_info->core_state); + if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) { + spin_lock(&jpeg_dma_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_DMA_CORE_READY) { + CAM_TRACE(CAM_JPEG, "DMA FrameDone IRQ"); + CAM_DBG(CAM_JPEG, "frane_done"); + core_info->core_state = CAM_JPEG_DMA_CORE_RESETTING_ON_DONE; + cam_io_w_mb(hw_info->reg_val.reset_cmd, + mem_base + hw_info->reg_offset.reset_cmd); + } else { + CAM_WARN(CAM_JPEG, "unexpected frame done "); + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + } + spin_unlock(&jpeg_dma_dev->hw_lock); + } + + if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) { + spin_lock(&jpeg_dma_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_DMA_CORE_RESETTING) { + core_info->core_state = CAM_JPEG_DMA_CORE_READY; + complete(&jpeg_dma_dev->hw_complete); + CAM_DBG(CAM_JPEG, "JPEG DMA %s reset done", + jpeg_dma_dev->soc_info.dev_name); + } else if (core_info->core_state == CAM_JPEG_DMA_CORE_RESETTING_ON_DONE) { + if (core_info->irq_cb.jpeg_hw_mgr_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, 1, + (void *)&core_info->irq_cb.irq_cb_data); + } else { + CAM_WARN(CAM_JPEG, "unexpected frame done"); + } + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + } else { + CAM_ERR(CAM_JPEG, "unexpected reset irq"); + } + spin_unlock(&jpeg_dma_dev->hw_lock); + } + + if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) { + spin_lock(&jpeg_dma_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_DMA_CORE_ABORTING) { + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + complete(&jpeg_dma_dev->hw_complete); + if (core_info->irq_cb.jpeg_hw_mgr_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, 0, + (void *)&core_info->irq_cb.irq_cb_data); + } + } else { + CAM_ERR(CAM_JPEG, "unexpected abort irq"); + } + spin_unlock(&jpeg_dma_dev->hw_lock); + } + + return IRQ_HANDLED; +} + +int cam_jpeg_dma_reset_hw(void *data, + void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = data; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + unsigned long rem_jiffies; + int rc = 0; + + if (!jpeg_dma_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + /* maskdisable.clrirq.maskenable.resetcmd */ + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + hw_info = core_info->jpeg_dma_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + mutex_lock(&core_info->core_mutex); + spin_lock(&jpeg_dma_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_DMA_CORE_RESETTING) { + CAM_ERR(CAM_JPEG, "dma alrady resetting"); + spin_unlock(&jpeg_dma_dev->hw_lock); + mutex_unlock(&core_info->core_mutex); + return 0; + } + + reinit_completion(&jpeg_dma_dev->hw_complete); + + core_info->core_state = CAM_JPEG_DMA_CORE_RESETTING; + spin_unlock(&jpeg_dma_dev->hw_lock); + + cam_io_w_mb(hw_info->reg_val.int_mask_disable_all, + mem_base + hw_info->reg_offset.int_mask); + cam_io_w_mb(hw_info->reg_val.int_clr_clearall, + mem_base + hw_info->reg_offset.int_clr); + cam_io_w_mb(hw_info->reg_val.int_mask_enable_all, + mem_base + hw_info->reg_offset.int_mask); + cam_io_w_mb(hw_info->reg_val.reset_cmd, + mem_base + hw_info->reg_offset.reset_cmd); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &jpeg_dma_dev->hw_complete, + CAM_JPEG_DMA_RESET_TIMEOUT); + if (!rem_jiffies) { + CAM_ERR(CAM_JPEG, "dma error Reset Timeout"); + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + rc = -ETIMEDOUT; + } + + mutex_unlock(&core_info->core_mutex); + return rc; +} + +int cam_jpeg_dma_test_irq_line(void *data) +{ + struct cam_hw_info *jpeg_dma_dev = data; + int rc; + + if (!data) { + CAM_ERR(CAM_JPEG, "invalid args"); + return -EINVAL; + } + + rc = cam_jpeg_dma_init_hw(data, NULL, 0); + if (rc) { + CAM_ERR(CAM_JPEG, "failed to init hw (rc=%d)", rc); + return rc; + } + + rc = cam_jpeg_dma_reset_hw(data, NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "failed to trigger reset irq (rc=%d)", rc); + else + CAM_INFO(CAM_JPEG, "verified JPEG DMA (%s) IRQ line", + jpeg_dma_dev->soc_info.dev_name); + + rc = cam_jpeg_dma_deinit_hw(data, NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "failed to de-init hw (rc=%d)", rc); + + return 0; +} + +int cam_jpeg_dma_start_hw(void *data, + void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = data; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + + if (!jpeg_dma_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + hw_info = core_info->jpeg_dma_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + if (core_info->core_state != CAM_JPEG_DMA_CORE_READY) { + CAM_ERR(CAM_JPEG, "Error not ready"); + return -EINVAL; + } + + CAM_DBG(CAM_JPEG, "Starting DMA"); + cam_io_w_mb(0x00000601, + mem_base + hw_info->reg_offset.int_mask); + cam_io_w_mb(hw_info->reg_val.hw_cmd_start, + mem_base + hw_info->reg_offset.hw_cmd); + + return 0; +} + +int cam_jpeg_dma_stop_hw(void *data, + void *stop_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = data; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + unsigned long rem_jiffies; + + if (!jpeg_dma_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + hw_info = core_info->jpeg_dma_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + mutex_lock(&core_info->core_mutex); + spin_lock(&jpeg_dma_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_DMA_CORE_ABORTING) { + CAM_ERR(CAM_JPEG, "alrady stopping"); + spin_unlock(&jpeg_dma_dev->hw_lock); + mutex_unlock(&core_info->core_mutex); + return 0; + } + + reinit_completion(&jpeg_dma_dev->hw_complete); + core_info->core_state = CAM_JPEG_DMA_CORE_ABORTING; + spin_unlock(&jpeg_dma_dev->hw_lock); + + cam_io_w_mb(hw_info->reg_val.hw_cmd_stop, + mem_base + hw_info->reg_offset.hw_cmd); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &jpeg_dma_dev->hw_complete, + CAM_JPEG_DMA_RESET_TIMEOUT); + if (!rem_jiffies) { + CAM_ERR(CAM_JPEG, "error Reset Timeout"); + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + } + + mutex_unlock(&core_info->core_mutex); + return 0; +} + +static int cam_jpeg_dma_mini_dump(struct cam_hw_info *dev, void *args) { + + struct cam_jpeg_mini_dump_core_info *md; + struct cam_jpeg_dma_device_hw_info *hw_info; + struct cam_jpeg_dma_device_core_info *core_info; + + if (!dev || !args) { + CAM_ERR(CAM_JPEG, "Invalid params dev %pK args %pK", dev, args); + return -EINVAL; + } + + core_info = (struct cam_jpeg_dma_device_core_info *)dev->core_info; + hw_info = core_info->jpeg_dma_hw_info; + md = (struct cam_jpeg_mini_dump_core_info *)args; + + md->framedone = hw_info->int_status.framedone; + md->resetdone = hw_info->int_status.resetdone; + md->iserror = hw_info->int_status.iserror; + md->stopdone = hw_info->int_status.stopdone; + md->open_count = dev->open_count; + md->hw_state = dev->hw_state; + md->ref_count = core_info->ref_count; + md->core_state = core_info->core_state; + return 0; +} + +int cam_jpeg_dma_dump_camnoc_misr_val(struct cam_jpeg_dma_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *dma_mem_base = NULL; + void __iomem *camnoc_mem_base = NULL; + struct cam_jpeg_misr_dump_args *pmisr_args; + int32_t camnoc_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL] = {{0}}; + int i, rc = 0; + int32_t val; + uint32_t index = 0; + bool mismatch = false; + + dma_mem_base = soc_info->reg_map[0].mem_base; + camnoc_mem_base = soc_info->reg_map[1].mem_base; + pmisr_args = (struct cam_jpeg_misr_dump_args *)cmd_args; + if (!pmisr_args) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + val = cam_io_r_mb(dma_mem_base + hw_info->reg_offset.core_cfg); + index = (val >> hw_info->int_status.scale_enable_shift) & + hw_info->int_status.scale_enable; + CAM_DBG(CAM_JPEG, "index %d", index); + + for (i = 0; i < hw_info->camnoc_misr_sigdata; i++) { + camnoc_misr_val[index][i] = cam_io_r_mb(camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.sigdata0 + (i * 8)); + if (hw_info->prev_camnoc_misr_val[index][i] != + camnoc_misr_val[index][i]) + mismatch = true; + } + if (mismatch && (pmisr_args->req_id != 1)) { + CAM_ERR(CAM_JPEG, + "CAMNOC DMA_MISR MISMATCH [req:%d][i:%d][index:%d]\n" + "curr:0x%x %x %x %x prev:0x%x %x %x %x isbug:%d", + pmisr_args->req_id, i, index, + camnoc_misr_val[index][3], camnoc_misr_val[index][2], + camnoc_misr_val[index][1], camnoc_misr_val[index][0], + hw_info->prev_camnoc_misr_val[index][3], + hw_info->prev_camnoc_misr_val[index][2], + hw_info->prev_camnoc_misr_val[index][1], + hw_info->prev_camnoc_misr_val[index][0], + pmisr_args->enable_bug); + if (pmisr_args->enable_bug) + BUG_ON(1); + } + CAM_DBG(CAM_JPEG, + "CAMNOC DMA_MISR req:%d SigData:0x %x %x %x %x", + pmisr_args->req_id, + camnoc_misr_val[index][3], camnoc_misr_val[index][2], + camnoc_misr_val[index][1], camnoc_misr_val[index][0]); + for (i = 0; i < hw_info->camnoc_misr_sigdata; i++) + hw_info->prev_camnoc_misr_val[index][i] = + camnoc_misr_val[index][i]; + /* stop misr : cam_noc_cam_noc_0_req_link_misrprb_MiscCtl_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.misc_ctl_stop, + camnoc_mem_base + hw_info->camnoc_misr_reg_offset.misc_ctl); + + return rc; +} + +int cam_jpeg_dma_dump_hw_misr_val(struct cam_jpeg_dma_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *dma_mem_base = NULL; + struct cam_jpeg_misr_dump_args *pmisr_args; + int32_t dma_wr_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL] = {{0}}; + int32_t dma_rd_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL] = {{0}}; + int offset, i, rc = 0; + int32_t val; + uint32_t index = 0; + bool mismatch = false; + + dma_mem_base = soc_info->reg_map[0].mem_base; + pmisr_args = (struct cam_jpeg_misr_dump_args *)cmd_args; + if (!pmisr_args) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + val = cam_io_r_mb(dma_mem_base + hw_info->reg_offset.core_cfg); + index = (val >> hw_info->int_status.scale_enable_shift) & + hw_info->int_status.scale_enable; + CAM_DBG(CAM_JPEG, "index %d", index); + + /* After the session is complete, read back the MISR values. + * fetch engine MISR values + */ + offset = hw_info->reg_offset.misr_cfg1; + for (i = 0; i < hw_info->max_misr_rd; i++) { + val = i << hw_info->misr_rd_word_sel; + cam_io_w_mb(val, dma_mem_base + offset); + dma_rd_misr_val[index][i] = cam_io_r_mb(dma_mem_base + + offset + 0x4); + if (hw_info->prev_dma_rd_misr_val[index][i] != dma_rd_misr_val[index][i]) + mismatch = true; + } + if (mismatch && (pmisr_args->req_id != 1)) { + CAM_ERR(CAM_JPEG, + "CAMNOC DMA_RD_MISR MISMATCH [req:%d][index:%d][i:%d]\n" + "curr:0x%x %x %x %x prev:0x%x %x %x %x isbug:%d", + pmisr_args->req_id, index, i, + dma_rd_misr_val[index][3], dma_rd_misr_val[index][2], + dma_rd_misr_val[index][1], dma_rd_misr_val[index][0], + hw_info->prev_dma_rd_misr_val[index][3], + hw_info->prev_dma_rd_misr_val[index][2], + hw_info->prev_dma_rd_misr_val[index][1], + hw_info->prev_dma_rd_misr_val[index][0], + pmisr_args->enable_bug); + if (pmisr_args->enable_bug) + BUG_ON(1); + } + + CAM_DBG(CAM_JPEG, + "CORE JPEG DMA RD MISR: 0x%x %x %x %x", + dma_rd_misr_val[index][3], dma_rd_misr_val[index][2], + dma_rd_misr_val[index][1], dma_rd_misr_val[index][0]); + + mismatch = false; + for (i = 0; i < hw_info->max_misr_rd; i++) { + hw_info->prev_dma_rd_misr_val[index][i] = + dma_rd_misr_val[index][i]; + } + + /* write engine MISR values */ + for (i = 0; i < hw_info->max_misr_wr; i++) { + val = hw_info->master_we_sel | (i << hw_info->misr_rd_word_sel); + cam_io_w_mb(val, dma_mem_base + offset); + dma_wr_misr_val[index][i] = cam_io_r_mb(dma_mem_base + + offset + 0x4); + if (hw_info->prev_dma_wr_misr_val[index][i] != + dma_wr_misr_val[index][i]) + mismatch = true; + } + if (mismatch && (pmisr_args->req_id != 1)) { + CAM_ERR(CAM_JPEG, + "CAMNOC DMA_WR_MISR MISMATCH [req:%d][index:%d][i:%d]\n" + "curr:0x%x %x %x %x prev:0x%x %x %x %x isbug:%d", + pmisr_args->req_id, index, i, + dma_wr_misr_val[index][3], dma_wr_misr_val[index][2], + dma_wr_misr_val[index][1], dma_wr_misr_val[index][0], + hw_info->prev_dma_wr_misr_val[index][3], + hw_info->prev_dma_wr_misr_val[index][2], + hw_info->prev_dma_wr_misr_val[index][1], + hw_info->prev_dma_wr_misr_val[index][0], + pmisr_args->enable_bug); + if (pmisr_args->enable_bug) + BUG_ON(1); + } + CAM_DBG(CAM_JPEG, + "CORE JPEG DMA WR MISR: 0x%x %x %x %x", + dma_wr_misr_val[index][3], dma_wr_misr_val[index][2], + dma_wr_misr_val[index][1], dma_wr_misr_val[index][0]); + + for (i = 0; i < hw_info->max_misr_wr; i++) { + hw_info->prev_dma_wr_misr_val[index][i] = + dma_wr_misr_val[index][i]; + } + + return rc; +} + +int cam_jpeg_dma_config_cmanoc_hw_misr(struct cam_jpeg_dma_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *dma_mem_base = NULL; + void __iomem *camnoc_mem_base = NULL; + uint32_t *camnoc_misr_test = NULL; + int val = 0; + + dma_mem_base = soc_info->reg_map[0].mem_base; + camnoc_mem_base = soc_info->reg_map[1].mem_base; + if (!camnoc_mem_base) { + CAM_ERR(CAM_JPEG, "Invalid camnoc base address"); + return -EINVAL; + } + camnoc_misr_test = (uint32_t *)cmd_args; + if (!camnoc_misr_test) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + /* enable FE and WE with sample data mode */ + cam_io_w_mb(hw_info->reg_val.misr_cfg0, dma_mem_base + + hw_info->reg_offset.misr_cfg0); + + /* cam_noc_cam_noc_0_req_link_misrprb_MainCtl_Low + * enable CRC generation on both RD, WR and transaction payload + */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.main_ctl, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.main_ctl); + /* cam_noc_cam_noc_0_req_link_misrprb_IdMask_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.main_ctl, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.id_mask_low); + /* cam_noc_cam_noc_0_req_link_misrprb_IdValue_Low */ + switch (*camnoc_misr_test) { + case CAM_JPEG_MISR_ID_LOW_RD: + val = hw_info->camnoc_misr_reg_val.id_value_low_rd; + break; + case CAM_JPEG_MISR_ID_LOW_WR: + val = hw_info->camnoc_misr_reg_val.id_value_low_wr; + break; + default: + val = hw_info->camnoc_misr_reg_val.id_value_low_rd; + break; + } + cam_io_w_mb(val, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.id_value_low); + /* start/reset misr : cam_noc_cam_noc_0_req_link_misrprb_MiscCtl_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.misc_ctl_start, + camnoc_mem_base + hw_info->camnoc_misr_reg_offset.misc_ctl); + CAM_DBG(CAM_JPEG, "DMA CAMNOC and HW MISR configured"); + + return 0; +} + +int cam_jpeg_dma_dump_debug_regs(struct cam_hw_info *jpeg_dma_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + + soc_info = &jpeg_dma_dev->soc_info; + core_info = (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->core_info; + + CAM_INFO(CAM_JPEG, "************ JPEG DMA REGISTER DUMP ************"); + + /* JPEG DMA TOP, Interrupt, core config, command registers & Fetch Engine Registers */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_dma_hw_info->debug_reg_offset.top_offset, + core_info->jpeg_dma_hw_info->debug_reg_offset.top_range); + + /* Write Engine */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_dma_hw_info->debug_reg_offset.we_offset, + core_info->jpeg_dma_hw_info->debug_reg_offset.we_range); + + /* + * WE qos cfg, test bus and debug regs, spare regs, bus misr, scale reg, core status regs + * & MMU prefetch regs + */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_dma_hw_info->debug_reg_offset.we_qos_offset, + core_info->jpeg_dma_hw_info->debug_reg_offset.we_qos_range); + + /* Perf Registers */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_dma_hw_info->debug_reg_offset.perf_offset, + core_info->jpeg_dma_hw_info->debug_reg_offset.perf_range); + + return 0; +} + +int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_dma_dev = device_priv; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + struct cam_jpeg_match_pid_args *match_pid_mid = NULL; + uint32_t *num_pid = NULL; + struct cam_hw_soc_info *soc_info = NULL; + int i, rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_JPEG_CMD_MAX) { + CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + + hw_info = core_info->jpeg_dma_hw_info; + soc_info = &jpeg_dma_dev->soc_info; + + + switch (cmd_type) { + case CAM_JPEG_CMD_SET_IRQ_CB: + { + struct cam_jpeg_set_irq_cb *irq_cb = cmd_args; + struct cam_jpeg_irq_cb_data *irq_cb_data; + + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + irq_cb_data = &irq_cb->irq_cb_data; + spin_lock(&jpeg_dma_dev->hw_lock); + if (irq_cb->b_set_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb = + irq_cb->jpeg_hw_mgr_cb; + core_info->irq_cb.irq_cb_data.jpeg_req = irq_cb_data->jpeg_req; + core_info->irq_cb.irq_cb_data.private_data = irq_cb_data->private_data; + } else { + core_info->irq_cb.jpeg_hw_mgr_cb = NULL; + core_info->irq_cb.irq_cb_data.jpeg_req = NULL; + core_info->irq_cb.irq_cb_data.private_data = NULL; + } + spin_unlock(&jpeg_dma_dev->hw_lock); + rc = 0; + break; + } + case CAM_JPEG_CMD_GET_NUM_PID: + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + num_pid = (uint32_t *)cmd_args; + *num_pid = core_info->num_pid; + + break; + case CAM_JPEG_CMD_MATCH_PID_MID: + match_pid_mid = (struct cam_jpeg_match_pid_args *)cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + for (i = 0; i < core_info->num_pid; i++) { + if (core_info->pid[i] == match_pid_mid->pid) + break; + } + + if (i == core_info->num_pid) + match_pid_mid->pid_match_found = false; + else + match_pid_mid->pid_match_found = true; + + if (match_pid_mid->pid_match_found) { + if (match_pid_mid->fault_mid == core_info->rd_mid) { + match_pid_mid->match_res = + CAM_JPEG_DMA_INPUT_IMAGE; + } else if (match_pid_mid->fault_mid == + core_info->wr_mid) { + match_pid_mid->match_res = + CAM_JPEG_DMA_OUTPUT_IMAGE; + } else + match_pid_mid->pid_match_found = false; + } + + break; + case CAM_JPEG_CMD_MINI_DUMP: + rc = cam_jpeg_dma_mini_dump(jpeg_dma_dev, cmd_args); + break; + case CAM_JPEG_CMD_CONFIG_HW_MISR: + { + if (hw_info->camnoc_misr_support) + rc = cam_jpeg_dma_config_cmanoc_hw_misr(hw_info, soc_info, cmd_args); + else + CAM_DBG(CAM_JPEG, "camnoc misr is not supported"); + break; + } + case CAM_JPEG_CMD_DUMP_HW_MISR_VAL: + { + if (hw_info->camnoc_misr_support) { + rc = cam_jpeg_dma_dump_hw_misr_val(hw_info, soc_info, cmd_args); + if (rc) + break; + rc = cam_jpeg_dma_dump_camnoc_misr_val(hw_info, soc_info, cmd_args); + } else { + CAM_DBG(CAM_JPEG, "camnoc misr is not supported"); + } + break; + } + case CAM_JPEG_CMD_DUMP_DEBUG_REGS: + rc = cam_jpeg_dma_dump_debug_regs(jpeg_dma_dev); + break; + default: + rc = -EINVAL; + break; + } + if (rc) + CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h new file mode 100644 index 0000000000..93ebd5e89f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_DMA_CORE_H +#define CAM_JPEG_DMA_CORE_H + +#include +#include +#include +#include +#include + +#include "cam_jpeg_hw_intf.h" + +struct cam_jpeg_dma_reg_offsets { + uint32_t hw_version; + uint32_t int_status; + uint32_t int_clr; + uint32_t int_mask; + uint32_t hw_cmd; + uint32_t reset_cmd; + uint32_t encode_size; + uint32_t core_cfg; + uint32_t misr_cfg0; + uint32_t misr_cfg1; +}; + +struct cam_jpeg_dma_regval { + uint32_t int_clr_clearall; + uint32_t int_mask_disable_all; + uint32_t int_mask_enable_all; + uint32_t hw_cmd_start; + uint32_t reset_cmd; + uint32_t hw_cmd_stop; + uint32_t misr_cfg0; +}; + +struct cam_jpeg_dma_int_status { + uint32_t framedone; + uint32_t resetdone; + uint32_t iserror; + uint32_t stopdone; + uint32_t scale_enable; + uint32_t scale_enable_shift; +}; + +struct cam_jpeg_dma_camnoc_misr_reg_offset { + uint32_t main_ctl; + uint32_t id_mask_low; + uint32_t id_value_low; + uint32_t misc_ctl; + uint32_t sigdata0; +}; + +struct cam_jpeg_dma_camnoc_misr_reg_val { + uint32_t main_ctl; + uint32_t id_mask_low; + uint32_t id_value_low_rd; + uint32_t id_value_low_wr; + uint32_t misc_ctl_start; + uint32_t misc_ctl_stop; +}; + +struct cam_jpeg_dma_debug_regs_offset { + uint32_t top_offset; + uint32_t top_range; + uint32_t we_offset; + uint32_t we_range; + uint32_t we_qos_offset; + uint32_t we_qos_range; + uint32_t perf_offset; + uint32_t perf_range; +}; + +struct cam_jpeg_dma_device_hw_info { + struct cam_jpeg_dma_reg_offsets reg_offset; + struct cam_jpeg_dma_regval reg_val; + struct cam_jpeg_dma_debug_regs_offset debug_reg_offset; + struct cam_jpeg_dma_int_status int_status; + struct cam_jpeg_dma_camnoc_misr_reg_offset camnoc_misr_reg_offset; + struct cam_jpeg_dma_camnoc_misr_reg_val camnoc_misr_reg_val; + uint32_t max_misr; + uint32_t max_misr_rd; + uint32_t max_misr_wr; + uint32_t camnoc_misr_sigdata; + uint32_t master_we_sel; + uint32_t misr_rd_word_sel; + uint32_t camnoc_misr_support; + int32_t prev_dma_wr_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL]; + int32_t prev_dma_rd_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL]; + int32_t prev_camnoc_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL]; +}; + +enum cam_jpeg_dma_core_state { + CAM_JPEG_DMA_CORE_NOT_READY, + CAM_JPEG_DMA_CORE_READY, + CAM_JPEG_DMA_CORE_RESETTING, + CAM_JPEG_DMA_CORE_ABORTING, + CAM_JPEG_DMA_CORE_RESETTING_ON_DONE, + CAM_JPEG_DMA_CORE_STATE_MAX, +}; + +struct cam_jpeg_dma_device_core_info { + enum cam_jpeg_dma_core_state core_state; + struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info; + uint32_t cpas_handle; + struct cam_jpeg_set_irq_cb irq_cb; + int32_t ref_count; + struct mutex core_mutex; + uint32_t num_pid; + uint32_t pid[CAM_JPEG_HW_MAX_NUM_PID]; + uint32_t rd_mid; + uint32_t wr_mid; +}; + +int cam_jpeg_dma_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_jpeg_dma_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_jpeg_dma_start_hw(void *device_priv, + void *start_hw_args, uint32_t arg_size); +int cam_jpeg_dma_stop_hw(void *device_priv, + void *stop_hw_args, uint32_t arg_size); +int cam_jpeg_dma_reset_hw(void *device_priv, + void *reset_hw_args, uint32_t arg_size); +int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int cam_jpeg_dma_test_irq_line(void *data); +irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data); + +/** + * @brief : API to register JPEG DMA hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_jpeg_dma_init_module(void); + +/** + * @brief : API to remove JPEG DMA Hw from platform framework. + */ +void cam_jpeg_dma_exit_module(void); +#endif /* CAM_JPEG_DMA_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c new file mode 100644 index 0000000000..61fe700d53 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "jpeg_dma_core.h" +#include "jpeg_dma_soc.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_jpeg_hw_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_jpeg_dma_hw_info_ver_4_2_0.h" +#include "cam_jpeg_dma_165_hw_info_ver_4_2_0.h" +#include "cam_jpeg_dma_580_hw_info_ver_4_2_0.h" +#include "cam_jpeg_dma_680_hw_info_ver_4_2_0.h" +#include "cam_jpeg_dma_780_hw_info_ver_4_2_0.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_jpeg_dma_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = soc_info->dev; + memcpy(cpas_register_params.identifier, "jpeg-dma", + sizeof("jpeg-dma")); + cpas_register_params.cam_cpas_client_cb = NULL; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc) { + CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_jpeg_dma_unregister_cpas( + struct cam_jpeg_dma_device_core_info *core_info) +{ + int rc; + + rc = cam_cpas_unregister_client(core_info->cpas_handle); + if (rc) + CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc); + core_info->cpas_handle = 0; + + return rc; +} + +static int cam_jpeg_dma_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *jpeg_dma_dev = NULL; + struct cam_hw_intf *jpeg_dma_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + struct cam_jpeg_dma_device_hw_info *hw_info = NULL; + struct cam_jpeg_dma_soc_private *soc_private; + int i, rc; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + jpeg_dma_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!jpeg_dma_dev_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &jpeg_dma_dev_intf->hw_idx); + + jpeg_dma_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!jpeg_dma_dev) { + rc = -ENOMEM; + goto error_alloc_dev; + } + jpeg_dma_dev->soc_info.pdev = pdev; + jpeg_dma_dev->soc_info.dev = &pdev->dev; + jpeg_dma_dev->soc_info.dev_name = pdev->name; + jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev; + jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw; + jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw; + jpeg_dma_dev_intf->hw_ops.start = cam_jpeg_dma_start_hw; + jpeg_dma_dev_intf->hw_ops.stop = cam_jpeg_dma_stop_hw; + jpeg_dma_dev_intf->hw_ops.reset = cam_jpeg_dma_reset_hw; + jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd; + jpeg_dma_dev_intf->hw_ops.test_irq_line = cam_jpeg_dma_test_irq_line; + jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA; + + platform_set_drvdata(pdev, jpeg_dma_dev_intf); + jpeg_dma_dev->core_info = + CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_dma_device_core_info), + GFP_KERNEL); + if (!jpeg_dma_dev->core_info) { + rc = -ENOMEM; + goto error_alloc_core; + } + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info"); + rc = -EINVAL; + goto error_match_dev; + } + hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data; + core_info->jpeg_dma_hw_info = hw_info; + core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; + mutex_init(&core_info->core_mutex); + + rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info, + cam_jpeg_dma_irq, + jpeg_dma_dev); + if (rc) { + CAM_ERR(CAM_JPEG, "failed to init_soc %d", rc); + goto error_init_soc; + } + + rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info, + core_info, jpeg_dma_dev_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc); + goto error_reg_cpas; + } + jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&jpeg_dma_dev->hw_mutex); + spin_lock_init(&jpeg_dma_dev->hw_lock); + init_completion(&jpeg_dma_dev->hw_complete); + CAM_DBG(CAM_JPEG, "JPEG-DMA component bound successfully"); + + soc_private = (struct cam_jpeg_dma_soc_private *) + jpeg_dma_dev->soc_info.soc_private; + + core_info->num_pid = soc_private->num_pid; + for (i = 0; i < soc_private->num_pid; i++) + core_info->pid[i] = soc_private->pid[i]; + + core_info->rd_mid = soc_private->rd_mid; + core_info->wr_mid = soc_private->wr_mid; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +error_reg_cpas: + cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info); +error_init_soc: + mutex_destroy(&core_info->core_mutex); +error_match_dev: + CAM_MEM_FREE(jpeg_dma_dev->core_info); +error_alloc_core: + CAM_MEM_FREE(jpeg_dma_dev); +error_alloc_dev: + CAM_MEM_FREE(jpeg_dma_dev_intf); + + return rc; +} + +static void cam_jpeg_dma_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *jpeg_dma_dev = NULL; + struct cam_hw_intf *jpeg_dma_dev_intf = NULL; + struct cam_jpeg_dma_device_core_info *core_info = NULL; + int rc; + struct platform_device *pdev = to_platform_device(dev); + + jpeg_dma_dev_intf = platform_get_drvdata(pdev); + if (!jpeg_dma_dev_intf) { + CAM_ERR(CAM_JPEG, "error No data in pdev"); + return; + } + + jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv; + if (!jpeg_dma_dev) { + CAM_ERR(CAM_JPEG, "error HW data is NULL"); + goto free_jpeg_hw_intf; + } + + core_info = (struct cam_jpeg_dma_device_core_info *) + jpeg_dma_dev->core_info; + if (!core_info) { + CAM_ERR(CAM_JPEG, "error core data NULL"); + goto deinit_soc; + } + + rc = cam_jpeg_dma_unregister_cpas(core_info); + if (rc) + CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc); + + mutex_destroy(&core_info->core_mutex); + CAM_MEM_FREE(core_info); + +deinit_soc: + rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc); + + mutex_destroy(&jpeg_dma_dev->hw_mutex); + CAM_MEM_FREE(jpeg_dma_dev); + +free_jpeg_hw_intf: + CAM_MEM_FREE(jpeg_dma_dev_intf); +} + +const static struct component_ops cam_jpeg_dma_component_ops = { + .bind = cam_jpeg_dma_component_bind, + .unbind = cam_jpeg_dma_component_unbind, +}; + +static int cam_jpeg_dma_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_jpeg_dma_component_ops); + return 0; +} + +static int cam_jpeg_dma_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_JPEG, "Adding JPEG dma component"); + rc = component_add(&pdev->dev, &cam_jpeg_dma_component_ops); + if (rc) + CAM_ERR(CAM_JPEG, "failed to add component rc: %d", rc); + + return rc; +} + +static const struct of_device_id cam_jpeg_dma_dt_match[] = { + { + .compatible = "qcom,cam_jpeg_dma", + .data = &cam_jpeg_dma_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_dma_165", + .data = &cam_jpeg_dma_165_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_dma_580", + .data = &cam_jpeg_dma_580_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_dma_680", + .data = &cam_jpeg_dma_680_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_dma_780", + .data = &cam_jpeg_dma_780_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match); + +struct platform_driver cam_jpeg_dma_driver = { + .probe = cam_jpeg_dma_probe, + .remove = cam_jpeg_dma_remove, + .driver = { + .name = "cam-jpeg-dma", + .owner = THIS_MODULE, + .of_match_table = cam_jpeg_dma_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_jpeg_dma_init_module(void) +{ + return platform_driver_register(&cam_jpeg_dma_driver); +} + +void cam_jpeg_dma_exit_module(void) +{ + platform_driver_unregister(&cam_jpeg_dma_driver); +} + +MODULE_DESCRIPTION("CAM JPEG_DMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c new file mode 100644 index 0000000000..1e199d02ed --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "jpeg_dma_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t jpeg_dma_irq_handler, void *data) +{ + struct cam_jpeg_dma_soc_private *soc_private; + struct platform_device *pdev = NULL; + int num_pid = 0, i = 0; + int rc; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_dma_soc_private), + GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_JPEG, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + pdev = soc_info->pdev; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) + return rc; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, + jpeg_dma_irq_handler, &(irq_data[0])); + if (rc) + CAM_ERR(CAM_JPEG, "init soc failed %d", rc); + + soc_private->num_pid = 0; + soc_private->rd_mid = UINT_MAX; + soc_private->wr_mid = UINT_MAX; + + num_pid = of_property_count_u32_elems(pdev->dev.of_node, "cam_hw_pid"); + CAM_DBG(CAM_JPEG, "jpeg:%d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_JPEG_HW_MAX_NUM_PID) + goto end; + + soc_private->num_pid = num_pid; + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(pdev->dev.of_node, "cam_hw_pid", i, + &soc_private->pid[i]); + + of_property_read_u32(pdev->dev.of_node, + "cam_hw_rd_mid", &soc_private->rd_mid); + + of_property_read_u32(pdev->dev.of_node, + "cam_hw_wr_mid", &soc_private->wr_mid); + +end: + return rc; +} + +int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + CAM_SVS_VOTE, true); + if (rc) + CAM_ERR(CAM_JPEG, "enable platform failed %d", rc); + + return rc; +} + +int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_JPEG, "disable platform failed %d", rc); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h new file mode 100644 index 0000000000..418dd1efe3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2021 The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_JPEG_DMA_SOC_H_ +#define _CAM_JPEG_DMA_SOC_H_ + +#include "cam_soc_util.h" +#include "cam_jpeg_hw_intf.h" + +/* + * struct cam_jpeg_dma_soc_private: + * + * @Brief: Private SOC data specific to JPEG DMA Driver + * + * @num_pid JPEG number of pids + * @pid: JPEG DMA pid value list + * @rd_mid: JPEG DMA inport read mid value + * @wr_mid: JPEG DMA outport write mid value + */ +struct cam_jpeg_dma_soc_private { + uint32_t num_pid; + uint32_t pid[CAM_JPEG_HW_MAX_NUM_PID]; + uint32_t rd_mid; + uint32_t wr_mid; +}; + +int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t jpeg_dma_irq_handler, void *irq_data); + +int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +#endif /* _CAM_JPEG_DMA_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_165_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_165_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..110c6acfa9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_165_hw_info_ver_4_2_0.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_165_HW_INFO_TITAN170_H +#define CAM_JPEG_ENC_165_HW_INFO_TITAN170_H + +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001 +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000 + +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000 +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a + +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000 +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b + +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800 +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26) +#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29) + +#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK +#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \ + CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK +#define CAM_JPEG_HW_MASK_COMP_ERR \ + (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK) + +static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_165_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x1c, + .int_status = 0x20, + .int_mask = 0x18, + .hw_cmd = 0x10, + .reset_cmd = 0x8, + .encode_size = 0x180, + .core_cfg = 0xc, + .misr_cfg = 0x2B4, + .misr_rd0 = 0x2B8, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x200320D3, + .hw_cmd_stop = 0x00000002, + .misr_cfg = 0x7, + }, + .int_status = { + .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK, + .iserror = CAM_JPEG_HW_MASK_COMP_ERR, + .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x7, + }, + .reg_dump = { + .start_offset = 0x0, + .end_offset = 0x33C, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x5908, + .id_mask_low = 0x5920, + .id_value_low = 0x5918, + .misc_ctl = 0x5910, + .sigdata0 = 0x5950, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD80, + .id_value_low_wr = 0xDC2, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .camnoc_misr_sigdata = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_ENC_165_HW_INFO_TITAN170_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_580_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_580_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..b0aa621b76 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_580_hw_info_ver_4_2_0.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_580_HW_INFO_TITAN170_H +#define CAM_JPEG_ENC_580_HW_INFO_TITAN170_H + +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001 +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000 + +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000 +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a + +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000 +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b + +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800 +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26) +#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29) + +#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK +#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \ + CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK +#define CAM_JPEG_HW_MASK_COMP_ERR \ + (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK) + +static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_580_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x1c, + .int_status = 0x20, + .int_mask = 0x18, + .hw_cmd = 0x10, + .reset_cmd = 0x8, + .encode_size = 0x180, + .core_cfg = 0xc, + .misr_cfg = 0x2B4, + .misr_rd0 = 0x2B8, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x200320D3, + .hw_cmd_stop = 0x00000002, + .misr_cfg = 0x7, + }, + .int_status = { + .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK, + .iserror = CAM_JPEG_HW_MASK_COMP_ERR, + .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x7, + }, + .reg_dump = { + .start_offset = 0x0, + .end_offset = 0x33C, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x3608, + .id_mask_low = 0x3620, + .id_value_low = 0x3618, + .misc_ctl = 0x3610, + .sigdata0 = 0x3650, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD80, + .id_value_low_wr = 0xDC2, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .camnoc_misr_sigdata = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_ENC_580_HW_INFO_TITAN170_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_680_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_680_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..42fac9a685 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_680_hw_info_ver_4_2_0.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_680_HW_INFO_TITAN170_H +#define CAM_JPEG_ENC_680_HW_INFO_TITAN170_H + +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001 +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000 + +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000 +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a + +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000 +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b + +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800 +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26) +#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29) + +#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK +#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \ + CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK +#define CAM_JPEG_HW_MASK_COMP_ERR \ + (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK) + +static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_680_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x1c, + .int_status = 0x20, + .int_mask = 0x18, + .hw_cmd = 0x10, + .reset_cmd = 0x8, + .encode_size = 0x180, + .core_cfg = 0xc, + .misr_cfg = 0x2B4, + .misr_rd0 = 0x2B8, + }, + .debug_reg_offset = { + .top_offset = 0x0, + .top_range = 0x24, + .we_offset = 0xC0, + .we_range = 0x31, + .scale_offset = 0x26C, + .scale_range = 0x35, + .perf_offset = 0xFEC, + .perf_range = 0x5, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x200320D3, + .hw_cmd_stop = 0x00000002, + .misr_cfg = 0x7, + }, + .int_status = { + .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK, + .iserror = CAM_JPEG_HW_MASK_COMP_ERR, + .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x7, + }, + .reg_dump = { + .start_offset = 0x0, + .end_offset = 0x33C, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x8108, + .id_mask_low = 0x8120, + .id_value_low = 0x8118, + .misc_ctl = 0x8110, + .sigdata0 = 0x8150, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x7, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD80, + .id_value_low_wr = 0xDC2, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .camnoc_misr_sigdata = 4, + .camnoc_misr_support = 1, +}; + +#endif /* CAM_JPEG_ENC_680_HW_INFO_TITAN170_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_780_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_780_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..dc5cff8e80 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_780_hw_info_ver_4_2_0.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_780_HW_INFO_TITAN170_H +#define CAM_JPEG_ENC_780_HW_INFO_TITAN170_H + +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001 +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000 + +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000 +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a + +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000 +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b + +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800 +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b + +#define CAM_JPEG_HW_MASK_SCALE_ENABLE 0x1 + +#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26) +#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29) + +#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK +#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \ + CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK +#define CAM_JPEG_HW_MASK_COMP_ERR \ + (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK) + +static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_780_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x1c, + .int_status = 0x20, + .int_mask = 0x18, + .hw_cmd = 0x10, + .reset_cmd = 0x8, + .encode_size = 0x180, + .core_cfg = 0xc, + .misr_cfg = 0x2B4, + .misr_rd0 = 0x2B8, + }, + .debug_reg_offset = { + .top_offset = 0x0, + .top_range = 0x24, + .we_offset = 0xC0, + .we_range = 0x31, + .scale_offset = 0x26C, + .scale_range = 0x35, + .perf_offset = 0xFEC, + .perf_range = 0x5, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x200320D3, + .hw_cmd_stop = 0x00000002, + .misr_cfg = 0x7, + }, + .int_status = { + .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK, + .iserror = CAM_JPEG_HW_MASK_COMP_ERR, + .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK, + .scale_enable = CAM_JPEG_HW_MASK_SCALE_ENABLE, + .scale_enable_shift = 0x7, + }, + .reg_dump = { + .start_offset = 0x0, + .end_offset = 0x33C, + }, + .camnoc_misr_reg_offset = { + .main_ctl = 0x2C88, + .id_mask_low = 0x2CA0, + .id_value_low = 0x2C98, + .misc_ctl = 0x2C90, + }, + .camnoc_misr_reg_val = { + .main_ctl = 0x3, + .id_mask_low = 0xFC0, + .id_value_low_rd = 0xD80, + .id_value_low_wr = 0xDC2, + .misc_ctl_start = 0x1, + .misc_ctl_stop = 0x2, + }, + .max_misr = 3, + .max_misr_rd = 4, + .camnoc_misr_sigdata = 0, + .camnoc_misr_support = 0, +}; + +#endif /* CAM_JPEG_ENC_780_HW_INFO_TITAN170_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h new file mode 100644 index 0000000000..a51432052b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_HW_INFO_TITAN170_H +#define CAM_JPEG_ENC_HW_INFO_TITAN170_H + +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001 +#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000 + +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000 +#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a + +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000 +#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b + +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800 +#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b + +#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25) +#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26) +#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29) + +#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \ + CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK +#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \ + CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK +#define CAM_JPEG_HW_MASK_COMP_ERR \ + (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \ + CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \ + CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK) + +static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = { + .reg_offset = { + .hw_version = 0x0, + .int_clr = 0x1c, + .int_status = 0x20, + .int_mask = 0x18, + .hw_cmd = 0x10, + .reset_cmd = 0x8, + .encode_size = 0x180, + }, + .reg_val = { + .int_clr_clearall = 0xFFFFFFFF, + .int_mask_disable_all = 0x00000000, + .int_mask_enable_all = 0xFFFFFFFF, + .hw_cmd_start = 0x00000001, + .reset_cmd = 0x200320D3, + .hw_cmd_stop = 0x00000002, + }, + .int_status = { + .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE, + .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK, + .iserror = CAM_JPEG_HW_MASK_COMP_ERR, + .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK, + }, + .reg_dump = { + .start_offset = 0x0, + .end_offset = 0x33C, + }, + .camnoc_misr_support = 0, +}; + +#endif /* CAM_JPEG_ENC_HW_INFO_TITAN170_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c new file mode 100644 index 0000000000..369ca4dead --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "jpeg_enc_core.h" +#include "jpeg_enc_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_jpeg_hw_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" + +#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.framedone) +#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.resetdone) +#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.iserror) +#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \ + ((jpeg_irq_status) & (hi)->int_status.stopdone) + +#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500) + +int cam_jpeg_enc_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + unsigned long flags; + int rc; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + + if (!soc_info || !core_info) { + CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + mutex_lock(&core_info->core_mutex); + if (++core_info->ref_count > 1) { + mutex_unlock(&core_info->core_mutex); + return 0; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 2; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = JPEG_VOTE; + axi_vote.axi_path[0].mnoc_ab_bw = JPEG_VOTE; + axi_vote.axi_path[0].mnoc_ib_bw = JPEG_VOTE; + axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[1].camnoc_bw = JPEG_VOTE; + axi_vote.axi_path[1].mnoc_ab_bw = JPEG_VOTE; + axi_vote.axi_path[1].mnoc_ib_bw = JPEG_VOTE; + + + rc = cam_cpas_start(core_info->cpas_handle, + &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc); + goto cpas_failed; + } + + rc = cam_jpeg_enc_enable_soc_resources(soc_info); + if (rc) { + CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc); + goto soc_failed; + } + spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags); + jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_UP; + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + + mutex_unlock(&core_info->core_mutex); + + return 0; + +soc_failed: + cam_cpas_stop(core_info->cpas_handle); +cpas_failed: + --core_info->ref_count; + mutex_unlock(&core_info->core_mutex); + + return rc; +} + +int cam_jpeg_enc_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + unsigned long flags; + int rc; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + if (!soc_info || !core_info) { + CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + mutex_lock(&core_info->core_mutex); + if (--core_info->ref_count > 0) { + mutex_unlock(&core_info->core_mutex); + return 0; + } + + if (core_info->ref_count < 0) { + CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count); + core_info->ref_count = 0; + mutex_unlock(&core_info->core_mutex); + return -EFAULT; + } + + spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags); + jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + rc = cam_jpeg_enc_disable_soc_resources(soc_info); + if (rc) + CAM_ERR(CAM_JPEG, "soc disable failed %d", rc); + + rc = cam_cpas_stop(core_info->cpas_handle); + if (rc) + CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc); + + mutex_unlock(&core_info->core_mutex); + + return 0; +} + +irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data) +{ + struct cam_hw_info *jpeg_enc_dev = data; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + uint32_t irq_status = 0; + uint32_t encoded_size = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + + if (!jpeg_enc_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return IRQ_HANDLED; + } + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + spin_lock(&jpeg_enc_dev->hw_lock); + if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_JPEG, "JPEG HW is in off state"); + spin_unlock(&jpeg_enc_dev->hw_lock); + return IRQ_HANDLED; + } + irq_status = cam_io_r_mb(mem_base + + core_info->jpeg_enc_hw_info->reg_offset.int_status); + + cam_io_w_mb(irq_status, + soc_info->reg_map[0].mem_base + + core_info->jpeg_enc_hw_info->reg_offset.int_clr); + spin_unlock(&jpeg_enc_dev->hw_lock); + + CAM_DBG(CAM_JPEG, "irq_num: %d irq_status: 0x%x , core_state: %d", + irq_num, irq_status, core_info->core_state); + + if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) { + spin_lock(&jpeg_enc_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) { + CAM_TRACE(CAM_JPEG, "Ctx %lld ENC FrameDone IRQ", + core_info->irq_cb.irq_cb_data.private_data); + encoded_size = cam_io_r_mb(mem_base + + core_info->jpeg_enc_hw_info->reg_offset.encode_size); + if (core_info->irq_cb.jpeg_hw_mgr_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, + encoded_size, + (void *)&core_info->irq_cb.irq_cb_data); + } else { + CAM_ERR(CAM_JPEG, "unexpected done, no cb"); + } + cam_cpas_notify_event("JPEG FrameDone", 0); + } else { + CAM_ERR(CAM_JPEG, "unexpected done irq"); + } + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + spin_unlock(&jpeg_enc_dev->hw_lock); + } + if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) { + spin_lock(&jpeg_enc_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) { + core_info->core_state = CAM_JPEG_ENC_CORE_READY; + complete(&jpeg_enc_dev->hw_complete); + CAM_DBG(CAM_JPEG, "JPEG ENC (%s) reset done", + jpeg_enc_dev->soc_info.dev_name); + } else { + CAM_ERR(CAM_JPEG, "unexpected reset irq"); + } + spin_unlock(&jpeg_enc_dev->hw_lock); + } + if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) { + spin_lock(&jpeg_enc_dev->hw_lock); + if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) { + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + complete(&jpeg_enc_dev->hw_complete); + if (core_info->irq_cb.jpeg_hw_mgr_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, -1, + (void *)&core_info->irq_cb.irq_cb_data); + } + } else { + CAM_ERR(CAM_JPEG, "unexpected abort irq"); + } + spin_unlock(&jpeg_enc_dev->hw_lock); + } + /* Unexpected/unintended HW interrupt */ + if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status, hw_info)) { + spin_lock(&jpeg_enc_dev->hw_lock); + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + CAM_ERR_RATE_LIMIT(CAM_JPEG, + "error irq_num %d irq_status = %x , core_state %d", + irq_num, irq_status, core_info->core_state); + + if (core_info->irq_cb.jpeg_hw_mgr_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, -1, + (void *)&core_info->irq_cb.irq_cb_data); + } + spin_unlock(&jpeg_enc_dev->hw_lock); + } + + return IRQ_HANDLED; +} + +int cam_jpeg_enc_reset_hw(void *data, + void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = data; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + unsigned long rem_jiffies; + unsigned long flags; + int rc = 0; + + if (!jpeg_enc_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + /* maskdisable.clrirq.maskenable.resetcmd */ + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + mutex_lock(&core_info->core_mutex); + spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags); + if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_JPEG, "JPEG HW is in off state"); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + mutex_unlock(&core_info->core_mutex); + return -EINVAL; + } + if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) { + CAM_ERR(CAM_JPEG, "alrady resetting"); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + mutex_unlock(&core_info->core_mutex); + return 0; + } + + reinit_completion(&jpeg_enc_dev->hw_complete); + core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING; + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + + cam_io_w_mb(hw_info->reg_val.int_mask_disable_all, + mem_base + hw_info->reg_offset.int_mask); + cam_io_w_mb(hw_info->reg_val.int_clr_clearall, + mem_base + hw_info->reg_offset.int_clr); + cam_io_w_mb(hw_info->reg_val.int_mask_enable_all, + mem_base + hw_info->reg_offset.int_mask); + cam_io_w_mb(hw_info->reg_val.reset_cmd, + mem_base + hw_info->reg_offset.reset_cmd); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &jpeg_enc_dev->hw_complete, + CAM_JPEG_ENC_RESET_TIMEOUT); + if (!rem_jiffies) { + CAM_ERR(CAM_JPEG, "error Reset Timeout"); + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + rc = -ETIMEDOUT; + } + + mutex_unlock(&core_info->core_mutex); + return rc; +} + +int cam_jpeg_enc_test_irq_line(void *data) +{ + struct cam_hw_info *jpeg_enc_dev = data; + int rc; + + if (!data) { + CAM_ERR(CAM_JPEG, "invalid args"); + return -EINVAL; + } + + rc = cam_jpeg_enc_init_hw(data, NULL, 0); + if (rc) { + CAM_ERR(CAM_JPEG, "failed to init hw (rc=%d)", rc); + return rc; + } + + rc = cam_jpeg_enc_reset_hw(data, NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "failed to trigger reset irq (rc=%d)", rc); + else + CAM_INFO(CAM_JPEG, "verified JPEG DMA (%s) IRQ line", + jpeg_enc_dev->soc_info.dev_name); + + rc = cam_jpeg_enc_deinit_hw(data, NULL, 0); + if (rc) + CAM_ERR(CAM_JPEG, "failed to de-init hw (rc=%d)", rc); + + return 0; +} + +int cam_jpeg_enc_start_hw(void *data, + void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = data; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + unsigned long flags; + + if (!jpeg_enc_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags); + if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_JPEG, "JPEG HW is in off state"); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + return -EINVAL; + } + if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) { + CAM_ERR(CAM_JPEG, "Error not ready: %d", core_info->core_state); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + + CAM_DBG(CAM_JPEG, "Starting JPEG ENC"); + cam_io_w_mb(hw_info->reg_val.hw_cmd_start, + mem_base + hw_info->reg_offset.hw_cmd); + + return 0; +} + +int cam_jpeg_enc_stop_hw(void *data, + void *stop_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = data; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + void __iomem *mem_base; + unsigned long rem_jiffies; + unsigned long flags; + + if (!jpeg_enc_dev) { + CAM_ERR(CAM_JPEG, "Invalid args"); + return -EINVAL; + } + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + mem_base = soc_info->reg_map[0].mem_base; + + mutex_lock(&core_info->core_mutex); + spin_lock_irqsave(&jpeg_enc_dev->hw_lock, flags); + if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_JPEG, "JPEG HW is in off state"); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + mutex_unlock(&core_info->core_mutex); + return -EINVAL; + } + if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) { + CAM_ERR(CAM_JPEG, "alrady stopping"); + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + mutex_unlock(&core_info->core_mutex); + return 0; + } + + reinit_completion(&jpeg_enc_dev->hw_complete); + core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING; + spin_unlock_irqrestore(&jpeg_enc_dev->hw_lock, flags); + + cam_io_w_mb(hw_info->reg_val.hw_cmd_stop, + mem_base + hw_info->reg_offset.hw_cmd); + + rem_jiffies = cam_common_wait_for_completion_timeout( + &jpeg_enc_dev->hw_complete, + CAM_JPEG_ENC_RESET_TIMEOUT); + if (!rem_jiffies) { + CAM_ERR(CAM_JPEG, "error Reset Timeout"); + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + } + + mutex_unlock(&core_info->core_mutex); + return 0; +} + +int cam_jpeg_enc_hw_dump( + struct cam_hw_info *jpeg_enc_dev, + struct cam_jpeg_hw_dump_args *dump_args) +{ + + int i; + uint8_t *dst; + uint32_t *addr, *start; + uint32_t num_reg, min_len; + uint32_t reg_start_offset; + size_t remain_len; + struct cam_hw_soc_info *soc_info; + struct cam_jpeg_hw_dump_header *hdr; + struct cam_jpeg_enc_device_hw_info *hw_info; + struct cam_jpeg_enc_device_core_info *core_info; + + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + mutex_lock(&core_info->core_mutex); + spin_lock(&jpeg_enc_dev->hw_lock); + + if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_ERR(CAM_JPEG, "JPEG HW is in off state"); + spin_unlock(&jpeg_enc_dev->hw_lock); + mutex_unlock(&core_info->core_mutex); + return -EINVAL; + } + + spin_unlock(&jpeg_enc_dev->hw_lock); + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_JPEG, "dump buffer overshoot %zu %zu", + dump_args->buf_len, dump_args->offset); + mutex_unlock(&core_info->core_mutex); + return -ENOSPC; + } + + remain_len = dump_args->buf_len - dump_args->offset; + min_len = sizeof(struct cam_jpeg_hw_dump_header) + + soc_info->reg_map[0].size + sizeof(uint32_t); + if (remain_len < min_len) { + CAM_WARN(CAM_JPEG, "dump buffer exhaust %zu %u", + remain_len, min_len); + mutex_unlock(&core_info->core_mutex); + return -ENOSPC; + } + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_jpeg_hw_dump_header *)dst; + snprintf(hdr->tag, CAM_JPEG_HW_DUMP_TAG_MAX_LEN, + "JPEG_REG:"); + hdr->word_size = sizeof(uint32_t); + addr = (uint32_t *)(dst + sizeof(struct cam_jpeg_hw_dump_header)); + start = addr; + *addr++ = soc_info->index; + num_reg = (hw_info->reg_dump.end_offset - + hw_info->reg_dump.start_offset)/4; + reg_start_offset = hw_info->reg_dump.start_offset; + for (i = 0; i < num_reg; i++) { + *addr++ = soc_info->mem_block[0]->start + + reg_start_offset + i*4; + *addr++ = cam_io_r(soc_info->reg_map[0].mem_base + (i*4)); + } + + mutex_unlock(&core_info->core_mutex); + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_jpeg_hw_dump_header); + CAM_DBG(CAM_JPEG, "offset %zu", dump_args->offset); + + return 0; +} + +static int cam_jpeg_enc_mini_dump(struct cam_hw_info *dev, void *args) { + + struct cam_jpeg_mini_dump_core_info *md; + struct cam_jpeg_enc_device_hw_info *hw_info; + struct cam_jpeg_enc_device_core_info *core_info; + + if (!dev || !args) { + CAM_ERR(CAM_JPEG, "Invalid params priv %pK args %pK", dev, args); + return -EINVAL; + } + + core_info = (struct cam_jpeg_enc_device_core_info *)dev->core_info; + hw_info = core_info->jpeg_enc_hw_info; + md = (struct cam_jpeg_mini_dump_core_info *)args; + + md->framedone = hw_info->int_status.framedone; + md->resetdone = hw_info->int_status.resetdone; + md->iserror = hw_info->int_status.iserror; + md->stopdone = hw_info->int_status.stopdone; + md->open_count = dev->open_count; + md->hw_state = dev->hw_state; + md->ref_count = core_info->ref_count; + md->core_state = core_info->core_state; + return 0; +} + +int cam_jpeg_enc_dump_camnoc_misr_val(struct cam_jpeg_enc_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *enc_mem_base = NULL; + void __iomem *camnoc_mem_base = NULL; + struct cam_jpeg_misr_dump_args *pmisr_args; + int32_t val; + uint32_t index = 0; + int i; + bool mismatch = false; + int32_t camnoc_misr_val[CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL] = {{0}}; + + enc_mem_base = soc_info->reg_map[0].mem_base; + camnoc_mem_base = soc_info->reg_map[1].mem_base; + if (!camnoc_mem_base) { + CAM_ERR(CAM_JPEG, "Invalid camnoc base address"); + return -EINVAL; + } + pmisr_args = (struct cam_jpeg_misr_dump_args *)cmd_args; + if (!pmisr_args) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + + val = cam_io_r_mb(enc_mem_base + hw_info->reg_offset.core_cfg); + index = (val >> hw_info->int_status.scale_enable_shift) & + hw_info->int_status.scale_enable; + CAM_DBG(CAM_JPEG, "index %d", index); + + for (i = 0; i < hw_info->camnoc_misr_sigdata; i++) { + camnoc_misr_val[index][i] = cam_io_r_mb(camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.sigdata0 + (i * 8)); + if (hw_info->prev_camnoc_misr_val[index][i] != camnoc_misr_val[index][i]) + mismatch = true; + } + if (mismatch && (pmisr_args->req_id != 1)) { + CAM_ERR(CAM_JPEG, + "CAMNOC ENC_MISR MISMATCH [req:%d][i:%d][index:%d]\n" + "curr SigData:0x%x %x %x %x prev SigData:0x%x %x %x %x isbug:%d", + pmisr_args->req_id, i, index, + camnoc_misr_val[index][3], camnoc_misr_val[index][2], + camnoc_misr_val[index][1], camnoc_misr_val[index][0], + hw_info->prev_camnoc_misr_val[index][3], + hw_info->prev_camnoc_misr_val[index][2], + hw_info->prev_camnoc_misr_val[index][1], + hw_info->prev_camnoc_misr_val[index][0], pmisr_args->enable_bug); + if (pmisr_args->enable_bug) + BUG_ON(1); + } + CAM_DBG(CAM_JPEG, + "CAMNOC ENC MISR req:%d SigData:0x%x %x %x %x", + pmisr_args->req_id, + camnoc_misr_val[index][3], camnoc_misr_val[index][2], + camnoc_misr_val[index][1], camnoc_misr_val[index][0]); + for (i = 0; i < hw_info->camnoc_misr_sigdata; i++) + hw_info->prev_camnoc_misr_val[index][i] = camnoc_misr_val[index][i]; + /* stop misr : cam_noc_cam_noc_0_req_link_misrprb_MiscCtl_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.misc_ctl_stop, + camnoc_mem_base + hw_info->camnoc_misr_reg_offset.misc_ctl); + return 0; +} + +int cam_jpeg_enc_dump_hw_misr_val(struct cam_jpeg_enc_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *enc_mem_base = NULL; + void __iomem *camnoc_mem_base = NULL; + struct cam_jpeg_misr_dump_args *pmisr_args; + int32_t val; + uint32_t index = 0; + int offset, i, j; + bool mismatch = false; + int32_t enc_misr_val[CAM_JPEG_ENC_MISR_VAL_NUM][CAM_JPEG_CAMNOC_MISR_VAL_ROW][ + CAM_JPEG_CAMNOC_MISR_VAL_COL] = {{{0}}}; + + enc_mem_base = soc_info->reg_map[0].mem_base; + camnoc_mem_base = soc_info->reg_map[1].mem_base; + if (!camnoc_mem_base) { + CAM_ERR(CAM_JPEG, "Invalid camnoc base address"); + return -EINVAL; + } + pmisr_args = (struct cam_jpeg_misr_dump_args *)cmd_args; + if (!pmisr_args) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + + val = cam_io_r_mb(enc_mem_base + hw_info->reg_offset.core_cfg); + index = (val >> hw_info->int_status.scale_enable_shift) & + hw_info->int_status.scale_enable; + CAM_DBG(CAM_JPEG, "index %d", index); + + for (i = 0; i < hw_info->max_misr; i++) { + offset = hw_info->reg_offset.misr_rd0 + (i * 0x10); + for (j = 0; j < hw_info->max_misr_rd; j++) { + enc_misr_val[i][index][j] = cam_io_r_mb(enc_mem_base + + offset + (j * 4)); + if (hw_info->prev_enc_misr_val[i][index][j] != + enc_misr_val[i][index][j]) + mismatch = true; + } + if (mismatch && (pmisr_args->req_id != 1)) { + CAM_ERR(CAM_JPEG, + "ENC_MISR RD MISMATCH [req:%d][i:%d][index:%d][j:%d]\n" + "curr:0x%x %x %x %x prev:0x%x %x %x %x isbug:%d", + pmisr_args->req_id, i, index, j, enc_misr_val[i][index][3], + enc_misr_val[i][index][2], enc_misr_val[i][index][1], + enc_misr_val[i][index][0], hw_info->prev_enc_misr_val[i][index][3], + hw_info->prev_enc_misr_val[i][index][2], + hw_info->prev_enc_misr_val[i][index][1], + hw_info->prev_enc_misr_val[i][index][0], pmisr_args->enable_bug); + if (pmisr_args->enable_bug) + BUG_ON(1); + } + CAM_DBG(CAM_JPEG, "ENC_MISR RD [req:%d][%d]: 0x%x %x %x %x", + pmisr_args->req_id, i, + enc_misr_val[i][index][3], enc_misr_val[i][index][2], + enc_misr_val[i][index][1], enc_misr_val[i][index][0]); + mismatch = false; + + for (j = 0; j < hw_info->max_misr_rd; j++) + hw_info->prev_enc_misr_val[i][index][j] = enc_misr_val[i][index][j]; + } + + return 0; +} + +int cam_jpeg_enc_config_cmanoc_hw_misr(struct cam_jpeg_enc_device_hw_info *hw_info, + struct cam_hw_soc_info *soc_info, void *cmd_args) +{ + void __iomem *enc_mem_base = NULL; + void __iomem *camnoc_mem_base = NULL; + uint32_t *camnoc_misr_test = NULL; + int val = 0; + + enc_mem_base = soc_info->reg_map[0].mem_base; + camnoc_mem_base = soc_info->reg_map[1].mem_base; + if (!camnoc_mem_base) { + CAM_ERR(CAM_JPEG, "Invalid camnoc base address"); + return -EINVAL; + } + camnoc_misr_test = (uint32_t *)cmd_args; + if (!camnoc_misr_test) { + CAM_ERR(CAM_JPEG, "Invalid command argument"); + return -EINVAL; + } + + /* enable all MISRs */ + cam_io_w_mb(hw_info->reg_val.misr_cfg, enc_mem_base + + hw_info->reg_offset.misr_cfg); + + /* cam_noc_cam_noc_0_req_link_misrprb_MainCtl_Low + * enable CRC generation on both RD, WR and transaction payload + */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.main_ctl, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.main_ctl); + + /* cam_noc_cam_noc_0_req_link_misrprb_IdMask_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.main_ctl, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.id_mask_low); + + /* cam_noc_cam_noc_0_req_link_misrprb_IdValue_Low */ + switch (*camnoc_misr_test) { + case CAM_JPEG_MISR_ID_LOW_RD: + val = hw_info->camnoc_misr_reg_val.id_value_low_rd; + break; + case CAM_JPEG_MISR_ID_LOW_WR: + val = hw_info->camnoc_misr_reg_val.id_value_low_wr; + break; + default: + val = hw_info->camnoc_misr_reg_val.id_value_low_rd; + break; + } + cam_io_w_mb(val, camnoc_mem_base + + hw_info->camnoc_misr_reg_offset.id_value_low); + + /* start/reset misr : cam_noc_cam_noc_0_req_link_misrprb_MiscCtl_Low */ + cam_io_w_mb(hw_info->camnoc_misr_reg_val.misc_ctl_start, + camnoc_mem_base + hw_info->camnoc_misr_reg_offset.misc_ctl); + CAM_DBG(CAM_JPEG, "ENC CAMNOC MISR configured"); + + return 0; +} + +int cam_jpeg_enc_dump_debug_regs(struct cam_hw_info *jpeg_enc_dev) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + + soc_info = &jpeg_enc_dev->soc_info; + core_info = (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->core_info; + + CAM_INFO(CAM_JPEG, "******** JPEG ENCODER REGISTER DUMP *********"); + + /* JPEG DMA TOP, Interrupt, core config, command registers & Fetch Engine Registers*/ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_enc_hw_info->debug_reg_offset.top_offset, + core_info->jpeg_enc_hw_info->debug_reg_offset.top_range); + + /* Write Engine & Encoder debug registers*/ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_enc_hw_info->debug_reg_offset.we_offset, + core_info->jpeg_enc_hw_info->debug_reg_offset.we_range); + + /* WE qos cfg, test bus, DMI, spare regs, bus misr, scale reg & MMU prefetch regs */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_enc_hw_info->debug_reg_offset.scale_offset, + core_info->jpeg_enc_hw_info->debug_reg_offset.scale_range); + + /* Perf Registers */ + cam_soc_util_reg_dump(soc_info, CAM_JPEG_MEM_BASE_INDEX, + core_info->jpeg_enc_hw_info->debug_reg_offset.perf_offset, + core_info->jpeg_enc_hw_info->debug_reg_offset.perf_range); + return 0; +} + +int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *jpeg_enc_dev = device_priv; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + struct cam_jpeg_match_pid_args *match_pid_mid = NULL; + uint32_t *num_pid = NULL; + struct cam_hw_soc_info *soc_info = NULL; + int i, rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_JPEG, "Invalid arguments"); + return -EINVAL; + } + + if (cmd_type >= CAM_JPEG_CMD_MAX) { + CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type); + return -EINVAL; + } + + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + + hw_info = core_info->jpeg_enc_hw_info; + soc_info = &jpeg_enc_dev->soc_info; + + switch (cmd_type) { + case CAM_JPEG_CMD_SET_IRQ_CB: + { + struct cam_jpeg_set_irq_cb *irq_cb = cmd_args; + struct cam_jpeg_irq_cb_data *irq_cb_data; + + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + irq_cb_data = &irq_cb->irq_cb_data; + spin_lock(&jpeg_enc_dev->hw_lock); + + if (irq_cb->b_set_cb) { + core_info->irq_cb.jpeg_hw_mgr_cb = irq_cb->jpeg_hw_mgr_cb; + core_info->irq_cb.irq_cb_data.private_data = irq_cb_data->private_data; + core_info->irq_cb.irq_cb_data.jpeg_req = irq_cb_data->jpeg_req; + } else { + core_info->irq_cb.jpeg_hw_mgr_cb = NULL; + core_info->irq_cb.irq_cb_data.private_data = NULL; + core_info->irq_cb.irq_cb_data.jpeg_req = NULL; + } + spin_unlock(&jpeg_enc_dev->hw_lock); + rc = 0; + break; + } + case CAM_JPEG_CMD_HW_DUMP: + { + rc = cam_jpeg_enc_hw_dump(jpeg_enc_dev, + cmd_args); + break; + } + case CAM_JPEG_CMD_GET_NUM_PID: + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + num_pid = (uint32_t *)cmd_args; + *num_pid = core_info->num_pid; + + break; + case CAM_JPEG_CMD_MATCH_PID_MID: + match_pid_mid = (struct cam_jpeg_match_pid_args *)cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_JPEG, "cmd args NULL"); + return -EINVAL; + } + + for (i = 0 ; i < core_info->num_pid; i++) { + if (core_info->pid[i] == match_pid_mid->pid) + break; + } + + if (i == core_info->num_pid) + match_pid_mid->pid_match_found = false; + else + match_pid_mid->pid_match_found = true; + + if (match_pid_mid->pid_match_found) { + if (match_pid_mid->fault_mid == core_info->rd_mid) { + match_pid_mid->match_res = + CAM_JPEG_ENC_INPUT_IMAGE; + } else if (match_pid_mid->fault_mid == + core_info->wr_mid) { + match_pid_mid->match_res = + CAM_JPEG_ENC_OUTPUT_IMAGE; + } else + match_pid_mid->pid_match_found = false; + } + + break; + case CAM_JPEG_CMD_MINI_DUMP: + { + rc = cam_jpeg_enc_mini_dump(jpeg_enc_dev, cmd_args); + break; + } + case CAM_JPEG_CMD_CONFIG_HW_MISR: + { + if (hw_info->camnoc_misr_support) + rc = cam_jpeg_enc_config_cmanoc_hw_misr(hw_info, soc_info, cmd_args); + else + CAM_DBG(CAM_JPEG, "camnoc misr is not supported"); + break; + } + case CAM_JPEG_CMD_DUMP_HW_MISR_VAL: + { + if (hw_info->camnoc_misr_support) { + rc = cam_jpeg_enc_dump_hw_misr_val(hw_info, soc_info, cmd_args); + if (rc) + break; + rc = cam_jpeg_enc_dump_camnoc_misr_val(hw_info, soc_info, cmd_args); + } else { + CAM_DBG(CAM_JPEG, "camnoc misr is not supported"); + } + break; + } + case CAM_JPEG_CMD_DUMP_DEBUG_REGS: + rc = cam_jpeg_enc_dump_debug_regs(jpeg_enc_dev); + break; + default: + rc = -EINVAL; + break; + } + if (rc) + CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h new file mode 100644 index 0000000000..3605211e77 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_JPEG_ENC_CORE_H +#define CAM_JPEG_ENC_CORE_H + +#include +#include +#include +#include + +#include "cam_jpeg_hw_intf.h" + +struct cam_jpeg_enc_reg_offsets { + uint32_t hw_version; + uint32_t int_status; + uint32_t int_clr; + uint32_t int_mask; + uint32_t hw_cmd; + uint32_t reset_cmd; + uint32_t encode_size; + uint32_t core_cfg; + uint32_t misr_cfg; + uint32_t misr_rd0; +}; + +struct cam_jpeg_enc_regval { + uint32_t int_clr_clearall; + uint32_t int_mask_disable_all; + uint32_t int_mask_enable_all; + uint32_t hw_cmd_start; + uint32_t reset_cmd; + uint32_t hw_cmd_stop; + uint32_t misr_cfg; +}; + +struct cam_jpeg_enc_int_status { + uint32_t framedone; + uint32_t resetdone; + uint32_t iserror; + uint32_t stopdone; + uint32_t scale_enable; + uint32_t scale_enable_shift; +}; + +struct cam_jpeg_enc_reg_dump { + uint32_t start_offset; + uint32_t end_offset; +}; + +struct cam_jpeg_enc_camnoc_misr_reg_offset { + uint32_t main_ctl; + uint32_t id_mask_low; + uint32_t id_value_low; + uint32_t misc_ctl; + uint32_t sigdata0; +}; + +struct cam_jpeg_enc_camnoc_misr_reg_val { + uint32_t main_ctl; + uint32_t id_mask_low; + uint32_t id_value_low_rd; + uint32_t id_value_low_wr; + uint32_t misc_ctl_start; + uint32_t misc_ctl_stop; +}; + +struct cam_jpeg_enc_debug_regs_offset { + uint32_t top_offset; + uint32_t top_range; + uint32_t we_offset; + uint32_t we_range; + uint32_t scale_offset; + uint32_t scale_range; + uint32_t perf_offset; + uint32_t perf_range; +}; + +struct cam_jpeg_enc_device_hw_info { + struct cam_jpeg_enc_reg_offsets reg_offset; + struct cam_jpeg_enc_debug_regs_offset debug_reg_offset; + struct cam_jpeg_enc_regval reg_val; + struct cam_jpeg_enc_int_status int_status; + struct cam_jpeg_enc_reg_dump reg_dump; + struct cam_jpeg_enc_camnoc_misr_reg_offset camnoc_misr_reg_offset; + struct cam_jpeg_enc_camnoc_misr_reg_val camnoc_misr_reg_val; + uint32_t max_misr; + uint32_t max_misr_rd; + uint32_t camnoc_misr_sigdata; + uint32_t camnoc_misr_support; + int32_t prev_camnoc_misr_val[ + CAM_JPEG_CAMNOC_MISR_VAL_ROW][CAM_JPEG_CAMNOC_MISR_VAL_COL]; + int32_t prev_enc_misr_val[CAM_JPEG_ENC_MISR_VAL_NUM][ + CAM_JPEG_CAMNOC_MISR_VAL_ROW][CAM_JPEG_CAMNOC_MISR_VAL_COL]; +}; + +enum cam_jpeg_enc_core_state { + CAM_JPEG_ENC_CORE_NOT_READY, + CAM_JPEG_ENC_CORE_READY, + CAM_JPEG_ENC_CORE_RESETTING, + CAM_JPEG_ENC_CORE_ABORTING, + CAM_JPEG_ENC_CORE_STATE_MAX, +}; + +struct cam_jpeg_enc_device_core_info { + enum cam_jpeg_enc_core_state core_state; + struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info; + uint32_t cpas_handle; + struct cam_jpeg_set_irq_cb irq_cb; + int32_t ref_count; + struct mutex core_mutex; + uint32_t num_pid; + uint32_t pid[CAM_JPEG_HW_MAX_NUM_PID]; + uint32_t rd_mid; + uint32_t wr_mid; +}; + +int cam_jpeg_enc_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_jpeg_enc_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_jpeg_enc_start_hw(void *device_priv, + void *start_hw_args, uint32_t arg_size); +int cam_jpeg_enc_stop_hw(void *device_priv, + void *stop_hw_args, uint32_t arg_size); +int cam_jpeg_enc_reset_hw(void *device_priv, + void *reset_hw_args, uint32_t arg_size); +int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int cam_jpeg_enc_test_irq_line(void *data); + +irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data); + +/** + * @brief : API to register JPEG ENC hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_jpeg_enc_init_module(void); + +/** + * @brief : API to remove JPEG ENC Hw from platform framework. + */ +void cam_jpeg_enc_exit_module(void); +#endif /* CAM_JPEG_ENC_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c new file mode 100644 index 0000000000..d49e838ae9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "jpeg_enc_core.h" +#include "jpeg_enc_soc.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_jpeg_hw_intf.h" +#include "cam_jpeg_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_jpeg_enc_hw_info_ver_4_2_0.h" +#include "cam_jpeg_enc_165_hw_info_ver_4_2_0.h" +#include "cam_jpeg_enc_580_hw_info_ver_4_2_0.h" +#include "cam_jpeg_enc_680_hw_info_ver_4_2_0.h" +#include "cam_jpeg_enc_780_hw_info_ver_4_2_0.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_jpeg_enc_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = soc_info->dev; + memcpy(cpas_register_params.identifier, "jpeg-enc", + sizeof("jpeg-enc")); + cpas_register_params.cam_cpas_client_cb = NULL; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc) { + CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_jpeg_enc_unregister_cpas( + struct cam_jpeg_enc_device_core_info *core_info) +{ + int rc; + + rc = cam_cpas_unregister_client(core_info->cpas_handle); + if (rc) + CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc); + core_info->cpas_handle = 0; + + return rc; +} + +static int cam_jpeg_enc_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *jpeg_enc_dev = NULL; + struct cam_hw_intf *jpeg_enc_dev_intf = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + struct cam_jpeg_enc_device_hw_info *hw_info = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct cam_jpeg_enc_soc_private *soc_private; + int i; + int rc; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + jpeg_enc_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!jpeg_enc_dev_intf) + return -ENOMEM; + + of_property_read_u32(pdev->dev.of_node, + "cell-index", &jpeg_enc_dev_intf->hw_idx); + + jpeg_enc_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!jpeg_enc_dev) { + rc = -ENOMEM; + goto error_alloc_dev; + } + jpeg_enc_dev->soc_info.pdev = pdev; + jpeg_enc_dev->soc_info.dev = &pdev->dev; + jpeg_enc_dev->soc_info.dev_name = pdev->name; + jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev; + jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw; + jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw; + jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw; + jpeg_enc_dev_intf->hw_ops.stop = cam_jpeg_enc_stop_hw; + jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw; + jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd; + jpeg_enc_dev_intf->hw_ops.test_irq_line = cam_jpeg_enc_test_irq_line; + jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC; + + platform_set_drvdata(pdev, jpeg_enc_dev_intf); + jpeg_enc_dev->core_info = + CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_enc_device_core_info), + GFP_KERNEL); + if (!jpeg_enc_dev->core_info) { + rc = -ENOMEM; + goto error_alloc_core; + } + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info"); + rc = -EINVAL; + goto error_match_dev; + } + hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data; + core_info->jpeg_enc_hw_info = hw_info; + core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY; + mutex_init(&core_info->core_mutex); + + rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info, + cam_jpeg_enc_irq, + jpeg_enc_dev); + if (rc) { + CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc); + goto error_init_soc; + } + + rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info, + core_info, jpeg_enc_dev_intf->hw_idx); + if (rc) { + CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc); + goto error_reg_cpas; + } + jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + mutex_init(&jpeg_enc_dev->hw_mutex); + spin_lock_init(&jpeg_enc_dev->hw_lock); + init_completion(&jpeg_enc_dev->hw_complete); + CAM_DBG(CAM_JPEG, "JPEG-Encoder component bound successfully"); + + soc_private = (struct cam_jpeg_enc_soc_private *) + jpeg_enc_dev->soc_info.soc_private; + + core_info->num_pid = soc_private->num_pid; + for (i = 0; i < soc_private->num_pid; i++) + core_info->pid[i] = soc_private->pid[i]; + + core_info->rd_mid = soc_private->rd_mid; + core_info->wr_mid = soc_private->wr_mid; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +error_reg_cpas: + cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info); +error_init_soc: + mutex_destroy(&core_info->core_mutex); +error_match_dev: + CAM_MEM_FREE(jpeg_enc_dev->core_info); +error_alloc_core: + CAM_MEM_FREE(jpeg_enc_dev); +error_alloc_dev: + CAM_MEM_FREE(jpeg_enc_dev_intf); + + return rc; +} + +static void cam_jpeg_enc_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *jpeg_enc_dev = NULL; + struct cam_hw_intf *jpeg_enc_dev_intf = NULL; + struct cam_jpeg_enc_device_core_info *core_info = NULL; + int rc; + struct platform_device *pdev = to_platform_device(dev); + + jpeg_enc_dev_intf = platform_get_drvdata(pdev); + if (!jpeg_enc_dev_intf) { + CAM_ERR(CAM_JPEG, "error No data in pdev"); + return; + } + + jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv; + if (!jpeg_enc_dev) { + CAM_ERR(CAM_JPEG, "error HW data is NULL"); + goto free_jpeg_hw_intf; + } + + core_info = (struct cam_jpeg_enc_device_core_info *) + jpeg_enc_dev->core_info; + if (!core_info) { + CAM_ERR(CAM_JPEG, "error core data NULL"); + goto deinit_soc; + } + + rc = cam_jpeg_enc_unregister_cpas(core_info); + if (rc) + CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc); + + mutex_destroy(&core_info->core_mutex); + CAM_MEM_FREE(core_info); + +deinit_soc: + rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc); + + mutex_destroy(&jpeg_enc_dev->hw_mutex); + CAM_MEM_FREE(jpeg_enc_dev); + +free_jpeg_hw_intf: + CAM_MEM_FREE(jpeg_enc_dev_intf); +} + +const static struct component_ops cam_jpeg_enc_component_ops = { + .bind = cam_jpeg_enc_component_bind, + .unbind = cam_jpeg_enc_component_unbind, +}; + +static int cam_jpeg_enc_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_jpeg_enc_component_ops); + return 0; +} + +static int cam_jpeg_enc_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_JPEG, "Adding JPEG component"); + rc = component_add(&pdev->dev, &cam_jpeg_enc_component_ops); + if (rc) + CAM_ERR(CAM_JPEG, "failed to add component rc: %d", rc); + + return rc; +} + +static const struct of_device_id cam_jpeg_enc_dt_match[] = { + { + .compatible = "qcom,cam_jpeg_enc", + .data = &cam_jpeg_enc_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_enc_165", + .data = &cam_jpeg_enc_165_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_enc_580", + .data = &cam_jpeg_enc_580_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_enc_680", + .data = &cam_jpeg_enc_680_hw_info, + }, + { + .compatible = "qcom,cam_jpeg_enc_780", + .data = &cam_jpeg_enc_780_hw_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match); + +struct platform_driver cam_jpeg_enc_driver = { + .probe = cam_jpeg_enc_probe, + .remove = cam_jpeg_enc_remove, + .driver = { + .name = "cam-jpeg-enc", + .owner = THIS_MODULE, + .of_match_table = cam_jpeg_enc_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_jpeg_enc_init_module(void) +{ + return platform_driver_register(&cam_jpeg_enc_driver); +} + +void cam_jpeg_enc_exit_module(void) +{ + platform_driver_unregister(&cam_jpeg_enc_driver); +} + +MODULE_DESCRIPTION("CAM JPEG_ENC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c new file mode 100644 index 0000000000..de0b1e05ac --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "jpeg_enc_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t jpeg_enc_irq_handler, void *data) +{ + struct cam_jpeg_enc_soc_private *soc_private; + struct platform_device *pdev = NULL; + int num_pid = 0, i = 0; + int rc; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_jpeg_enc_soc_private), + GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) + return rc; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, + jpeg_enc_irq_handler, &(irq_data[0])); + if (rc) + CAM_ERR(CAM_JPEG, "init soc failed %d", rc); + + soc_private->num_pid = 0; + soc_private->rd_mid = UINT_MAX; + soc_private->wr_mid = UINT_MAX; + + pdev = soc_info->pdev; + num_pid = of_property_count_u32_elems(pdev->dev.of_node, "cam_hw_pid"); + CAM_DBG(CAM_JPEG, "jpeg enc:%d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_JPEG_HW_MAX_NUM_PID) + goto end; + + soc_private->num_pid = num_pid; + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(pdev->dev.of_node, "cam_hw_pid", i, + &soc_private->pid[i]); + + of_property_read_u32(pdev->dev.of_node, + "cam_hw_rd_mid", &soc_private->rd_mid); + + of_property_read_u32(pdev->dev.of_node, + "cam_hw_wr_mid", &soc_private->wr_mid); +end: + return rc; +} + +int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + CAM_SVS_VOTE, true); + if (rc) + CAM_ERR(CAM_JPEG, "enable platform failed %d", rc); + + return rc; +} + +int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) + CAM_ERR(CAM_JPEG, "disable platform failed %d", rc); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h new file mode 100644 index 0000000000..6e7938c87b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2021 The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_JPEG_ENC_SOC_H_ +#define _CAM_JPEG_ENC_SOC_H_ + +#include "cam_soc_util.h" +#include "cam_jpeg_hw_intf.h" + +/* + * struct cam_jpeg_enc_soc_private: + * + * @Brief: Private SOC data specific to JPEG ENC Driver + * + * @num_pid JPEG number of pids + * @pid: JPEG enc pid value list + * @rd_mid: JPEG enc inport read mid value + * @wr_mid: JPEG enc outport write mid value + */ +struct cam_jpeg_enc_soc_private { + uint32_t num_pid; + uint32_t pid[CAM_JPEG_HW_MAX_NUM_PID]; + uint32_t rd_mid; + uint32_t wr_mid; +}; + +int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t jpeg_enc_irq_handler, void *irq_data); + +int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info); + +#endif /* _CAM_JPEG_ENC_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.c new file mode 100644 index 0000000000..cc55892713 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "cam_debug_util.h" +#include "cam_lrme_context.h" + +static const char lrme_dev_name[] = "cam-lrme"; + +static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc = 0; + uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map; + struct cam_lrme_context *lrme_ctx = ctx->ctx_priv; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to acquire"); + return rc; + } + + ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT); + ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map; + + ctx->state = CAM_CTX_ACQUIRED; + + return rc; +} + +static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to release"); + return rc; + } + + ctx->state = CAM_CTX_AVAILABLE; + + return rc; +} + +static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_start_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to start"); + return rc; + } + + ctx->state = CAM_CTX_ACTIVATED; + + return rc; +} + +static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to config"); + return rc; + } + + return rc; +} + +static int __cam_lrme_ctx_dump_dev_in_activated( + struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_LRME, "Failed to dump device"); + + return rc; +} + +static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + flush_args.cmd = cmd; + flush_args.flush_active_req = true; + + rc = cam_context_flush_dev_to_hw(ctx, &flush_args); + if (rc) + CAM_ERR(CAM_LRME, "Failed to flush device"); + + return rc; +} +static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to stop dev"); + return rc; + } + + ctx->state = CAM_CTX_ACQUIRED; + + return rc; +} + +static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id); + + rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to stop"); + return rc; + } + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to release"); + return rc; + } + + ctx->state = CAM_CTX_AVAILABLE; + + return rc; +} + +static int __cam_lrme_ctx_handle_irq_in_activated(void *context, + uint32_t evt_id, void *evt_data) +{ + int rc; + + CAM_DBG(CAM_LRME, "Enter"); + + rc = cam_context_buf_done_from_hw(context, evt_data, evt_id); + if (rc) { + CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc); + return rc; + } + + return rc; +} + +/* top state machine */ +static struct cam_ctx_ops + cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_lrme_ctx_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .config_dev = __cam_lrme_ctx_config_dev_in_activated, + .release_dev = __cam_lrme_ctx_release_dev_in_acquired, + .start_dev = __cam_lrme_ctx_start_dev_in_acquired, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Ready */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Flushed */ + { + .ioctl_ops = {}, + }, + /* Activate */ + { + .ioctl_ops = { + .config_dev = __cam_lrme_ctx_config_dev_in_activated, + .release_dev = __cam_lrme_ctx_release_dev_in_activated, + .stop_dev = __cam_lrme_ctx_stop_dev_in_activated, + .flush_dev = __cam_lrme_ctx_flush_dev_in_activated, + .dump_dev = __cam_lrme_ctx_dump_dev_in_activated, + }, + .crm_ops = {}, + .irq_ops = __cam_lrme_ctx_handle_irq_in_activated, + }, +}; + +int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx, + struct cam_context *base_ctx, + struct cam_hw_mgr_intf *hw_intf, + uint32_t index + int img_iommu_hdl) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter"); + + if (!base_ctx || !lrme_ctx) { + CAM_ERR(CAM_LRME, "Invalid input"); + return -EINVAL; + } + + memset(lrme_ctx, 0, sizeof(*lrme_ctx)); + + rc = cam_context_init(base_ctx, lrme_dev_name, CAM_LRME, index, + NULL, hw_intf, lrme_ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to init context"); + return rc; + } + lrme_ctx->base = base_ctx; + lrme_ctx->index = index; + base_ctx->ctx_priv = lrme_ctx; + base_ctx->state_machine = cam_lrme_ctx_state_machine; + + base_ctx->max_hw_update_entries = CAM_CTX_CFG_MAX; + base_ctx->max_in_map_entries = CAM_CTX_CFG_MAX; + base_ctx->max_out_map_entries = CAM_CTX_CFG_MAX; + + return rc; +} + +int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Enter"); + + if (!lrme_ctx) { + CAM_ERR(CAM_LRME, "No ctx to deinit"); + return -EINVAL; + } + + rc = cam_context_deinit(lrme_ctx->base); + + memset(lrme_ctx, 0, sizeof(*lrme_ctx)); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.h new file mode 100644 index 0000000000..abc831f414 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_context.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_CONTEXT_H_ +#define _CAM_LRME_CONTEXT_H_ + +#include "cam_context.h" +#include "cam_context_utils.h" +#include "cam_hw_mgr_intf.h" +#include "cam_req_mgr_interface.h" + +#define CAM_LRME_CTX_INDEX_SHIFT 16 + +/** + * struct cam_lrme_context + * + * @base : Base context pointer for this LRME context + * @req_base : List of base request for this LRME context + */ +struct cam_lrme_context { + struct cam_context *base; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; + uint64_t index; +}; + +int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx, + struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf, + uint32_t index, int img_iommu_hdl); +int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx); + +#endif /* _CAM_LRME_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.c new file mode 100644 index 0000000000..eaab01ec7f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_subdev.h" +#include "cam_node.h" +#include "cam_lrme_context.h" +#include "cam_lrme_hw_mgr.h" +#include "cam_lrme_hw_mgr_intf.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +#define CAM_LRME_DEV_NAME "cam-lrme" + +/** + * struct cam_lrme_dev + * + * @sd : Subdev information + * @ctx : List of base contexts + * @lrme_ctx : List of LRME contexts + * @lock : Mutex for LRME subdev + * @open_cnt : Open count of LRME subdev + */ +struct cam_lrme_dev { + struct cam_subdev sd; + struct cam_context ctx[CAM_CTX_MAX]; + struct cam_lrme_context lrme_ctx[CAM_CTX_MAX]; + struct mutex lock; + uint32_t open_cnt; +}; + +static struct cam_lrme_dev *g_lrme_dev; + +static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id, + void *evt_data) +{ + uint64_t index; + struct cam_context *ctx; + int rc; + + index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map); + CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id); + ctx = &g_lrme_dev->ctx[index]; + rc = ctx->irq_cb_intf(ctx, evt_id, evt_data); + if (rc) + CAM_ERR(CAM_LRME, "irq callback failed"); + + return rc; +} + +static int cam_lrme_dev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_lrme_dev *lrme_dev = g_lrme_dev; + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + + if (!lrme_dev) { + CAM_ERR(CAM_LRME, + "LRME Dev not initialized, dev=%pK", lrme_dev); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + return -ENODEV; + } + + mutex_lock(&lrme_dev->lock); + lrme_dev->open_cnt++; + mutex_unlock(&lrme_dev->lock); + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + + return 0; +} + +static int cam_lrme_dev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_lrme_dev *lrme_dev = g_lrme_dev; + struct cam_node *node = v4l2_get_subdevdata(sd); + + if (!lrme_dev) { + CAM_ERR(CAM_LRME, "Invalid args"); + return -ENODEV; + } + + mutex_lock(&lrme_dev->lock); + if (lrme_dev->open_cnt <= 0) { + CAM_DBG(CAM_LRME, "LRME subdev is already closed"); + rc = -EINVAL; + goto end; + } + + lrme_dev->open_cnt--; + if (!node) { + CAM_ERR(CAM_LRME, "Node is NULL"); + rc = -EINVAL; + goto end; + } + + if (lrme_dev->open_cnt == 0) + cam_node_shutdown(node); + +end: + mutex_unlock(&lrme_dev->lock); + return rc; +} + +static int cam_lrme_dev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_LRME, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + return cam_lrme_dev_close_internal(sd, fh); +} + +static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = { + .open = cam_lrme_dev_open, + .close = cam_lrme_dev_close, +}; + +static int cam_lrme_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + int i; + struct cam_hw_mgr_intf hw_mgr_intf; + struct cam_node *node; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + g_lrme_dev = CAM_MEM_ZALLOC(sizeof(struct cam_lrme_dev), GFP_KERNEL); + if (!g_lrme_dev) { + CAM_ERR(CAM_LRME, "No memory"); + return -ENOMEM; + } + g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops; + g_lrme_dev->sd.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + + mutex_init(&g_lrme_dev->lock); + + rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME, + CAM_LRME_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed"); + goto free_mem; + } + node = (struct cam_node *)g_lrme_dev->sd.token; + + rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb); + if (rc) { + CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager"); + goto unregister; + } + + for (i = 0; i < CAM_CTX_MAX; i++) { + rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i], + &g_lrme_dev->ctx[i], + &node->hw_mgr_intf, i, -1); + if (rc) { + CAM_ERR(CAM_LRME, "LRME context init failed"); + goto deinit_ctx; + } + } + + rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX, + CAM_LRME_DEV_NAME); + if (rc) { + CAM_ERR(CAM_LRME, "LRME node init failed"); + goto deinit_ctx; + } + + node->sd_handler = cam_lrme_dev_close_internal; + CAM_DBG(CAM_LRME, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return 0; + +deinit_ctx: + for (--i; i >= 0; i--) { + if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i])) + CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i); + } +unregister: + if (cam_subdev_remove(&g_lrme_dev->sd)) + CAM_ERR(CAM_LRME, "Failed in subdev remove"); +free_mem: + CAM_MEM_FREE(g_lrme_dev); + + return rc; +} + +static void cam_lrme_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + int rc = 0; + + for (i = 0; i < CAM_CTX_MAX; i++) { + rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]); + if (rc) + CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i); + } + + rc = cam_lrme_hw_mgr_deinit(); + if (rc) + CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc); + + rc = cam_subdev_remove(&g_lrme_dev->sd); + if (rc) + CAM_ERR(CAM_LRME, "Unregister failed rc: %d", rc); + + mutex_destroy(&g_lrme_dev->lock); + CAM_MEM_FREE(g_lrme_dev); + g_lrme_dev = NULL; +} + +const static struct component_ops cam_lrme_component_ops = { + .bind = cam_lrme_component_bind, + .unbind = cam_lrme_component_unbind, +}; + +static int cam_lrme_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Adding LRME component"); + rc = component_add(&pdev->dev, &cam_lrme_component_ops); + if (rc) + CAM_ERR(CAM_LRME, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_lrme_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_lrme_component_ops); + return 0; +} + +static const struct of_device_id cam_lrme_dt_match[] = { + { + .compatible = "qcom,cam-lrme" + }, + {} +}; + +struct platform_driver cam_lrme_driver = { + .probe = cam_lrme_dev_probe, + .remove = cam_lrme_dev_remove, + .driver = { + .name = "cam_lrme", + .owner = THIS_MODULE, + .of_match_table = cam_lrme_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_lrme_dev_init_module(void) +{ + return platform_driver_register(&cam_lrme_driver); +} + +void cam_lrme_dev_exit_module(void) +{ + platform_driver_unregister(&cam_lrme_driver); +} + +MODULE_DESCRIPTION("MSM LRME driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.h new file mode 100644 index 0000000000..e8f4dcfc72 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/cam_lrme_dev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_DEV_H_ +#define _CAM_LRME_DEV_H_ + +/** + * @brief : API to register LRME dev to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_lrme_dev_init_module(void); + +/** + * @brief : API to remove LRME dev from platform framework. + */ +void cam_lrme_dev_exit_module(void); + +#endif /* _CAM_LRME_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c new file mode 100644 index 0000000000..7ef409baa5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c @@ -0,0 +1,1230 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_soc_util.h" +#include "cam_mem_mgr_api.h" +#include "cam_smmu_api.h" +#include "cam_packet_util.h" +#include "cam_lrme_context.h" +#include "cam_lrme_hw_intf.h" +#include "cam_lrme_hw_core.h" +#include "cam_lrme_hw_soc.h" +#include "cam_lrme_hw_mgr_intf.h" +#include "cam_lrme_hw_mgr.h" + +static struct cam_lrme_hw_mgr g_lrme_hw_mgr; + +static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr, + struct cam_lrme_acquire_args *lrme_acquire_args) +{ + int i, index = 0; + uint32_t min_ctx = UINT_MAX; + struct cam_lrme_device *hw_device = NULL; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (!hw_mgr->device_count) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_LRME, "No device is registered"); + return -EINVAL; + } + + for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) { + hw_device = &hw_mgr->hw_device[i]; + if (!hw_device->num_context) { + index = i; + break; + } + if (hw_device->num_context < min_ctx) { + min_ctx = hw_device->num_context; + index = i; + } + } + + hw_device = &hw_mgr->hw_device[index]; + hw_device->num_context++; + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_LRME, "reserve device index %d", index); + + return index; +} + +static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr, + uint32_t device_index, struct cam_lrme_device **hw_device) +{ + if (!hw_mgr) { + CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr); + return -EINVAL; + } + + if (device_index >= CAM_LRME_HW_MAX) { + CAM_ERR(CAM_LRME, "Wrong device index %d", device_index); + return -EINVAL; + } + + *hw_device = &hw_mgr->hw_device[device_index]; + + return 0; +} + +static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet, + size_t remain_len) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + int i, rc; + + if (!packet) { + CAM_ERR(CAM_LRME, "Invalid args"); + return -EINVAL; + } + + CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d", + packet->header.request_id, packet->header.op_code, + packet->header.size, packet->header.flags); + CAM_DBG(CAM_LRME, + "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)", + packet->cmd_buf_offset, packet->num_cmd_buf, + packet->io_configs_offset, packet->num_io_configs); + CAM_DBG(CAM_LRME, + "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)", + packet->patch_offset, packet->num_patches, + packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index); + + if (cam_packet_util_validate_packet(packet, remain_len)) { + CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d", + packet->kmd_cmd_buf_index, + packet->num_cmd_buf, packet->cmd_buf_offset, + packet->io_configs_offset, packet->header.size); + return -EINVAL; + } + + if (!packet->num_io_configs) { + CAM_ERR(CAM_LRME, "no io configs"); + return -EINVAL; + } + + if (!packet->num_cmd_buf) { + CAM_ERR(CAM_LRME, "no cmd bufs"); + return -EINVAL; + } + + cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload_flex + + packet->cmd_buf_offset); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + CAM_DBG(CAM_LRME, + "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d", + i, + cmd_desc[i].mem_handle, cmd_desc[i].offset, + cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type, + cmd_desc[i].meta_data); + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) { + CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i); + return rc; + } + } + + return 0; +} + +static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl, + struct cam_hw_prepare_update_args *prepare, + struct cam_lrme_hw_io_buffer *input_buf, + struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size, + struct list_head *buf_tracker) +{ + int rc = -EINVAL; + uint32_t num_in_buf, num_out_buf, i, j, plane; + struct cam_buf_io_cfg *io_cfg; + dma_addr_t io_addr[CAM_PACKET_MAX_PLANES]; + size_t size; + + num_in_buf = 0; + num_out_buf = 0; + io_cfg = (struct cam_buf_io_cfg *)((uint8_t *) + &prepare->packet->payload_flex + + prepare->packet->io_configs_offset); + + for (i = 0; i < prepare->packet->num_io_configs; i++) { + CAM_DBG(CAM_LRME, + "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]", + i, io_cfg[i].mem_handle[0], io_cfg[i].direction, + io_cfg[i].resource_type, + io_cfg[i].fence, io_cfg[i].format); + + memset(io_addr, 0, sizeof(io_addr)); + for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) { + if (!io_cfg[i].mem_handle[plane]) + break; + + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane], + iommu_hdl, &io_addr[plane], &size, NULL, buf_tracker); + if (rc) { + CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d", + plane, rc); + return -ENOMEM; + } + + if ((size_t)io_cfg[i].offsets[plane] >= size) { + CAM_ERR(CAM_LRME, "Invalid plane offset: %zu", + (size_t)io_cfg[i].offsets[plane]); + return -EINVAL; + } + + io_addr[plane] += io_cfg[i].offsets[plane]; + + CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu", + io_cfg[i].direction, plane, io_addr[plane]); + } + + switch (io_cfg[i].direction) { + case CAM_BUF_INPUT: { + if (num_in_buf >= io_buf_size) { + CAM_ERR(CAM_LRME, + "Invalid number of buffers %d %d %d", + num_in_buf, num_out_buf, io_buf_size); + return -EINVAL; + } + prepare->in_map_entries[num_in_buf].resource_handle = + io_cfg[i].resource_type; + prepare->in_map_entries[num_in_buf].sync_id = + io_cfg[i].fence; + + input_buf[num_in_buf].valid = true; + for (j = 0; j < plane; j++) + input_buf[num_in_buf].io_addr[j] = io_addr[j]; + input_buf[num_in_buf].num_plane = plane; + input_buf[num_in_buf].io_cfg = &io_cfg[i]; + + num_in_buf++; + break; + } + case CAM_BUF_OUTPUT: { + if (num_out_buf >= io_buf_size) { + CAM_ERR(CAM_LRME, + "Invalid number of buffers %d %d %d", + num_in_buf, num_out_buf, io_buf_size); + return -EINVAL; + } + prepare->out_map_entries[num_out_buf].resource_handle = + io_cfg[i].resource_type; + prepare->out_map_entries[num_out_buf].sync_id = + io_cfg[i].fence; + + output_buf[num_out_buf].valid = true; + for (j = 0; j < plane; j++) + output_buf[num_out_buf].io_addr[j] = io_addr[j]; + output_buf[num_out_buf].num_plane = plane; + output_buf[num_out_buf].io_cfg = &io_cfg[i]; + + num_out_buf++; + break; + } + default: + CAM_ERR(CAM_LRME, "Unsupported io direction %d", + io_cfg[i].direction); + return -EINVAL; + } + } + prepare->num_in_map_entries = num_in_buf; + prepare->num_out_map_entries = num_out_buf; + + return 0; +} + +static int cam_lrme_mgr_util_prepare_hw_update_entries( + struct cam_lrme_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare, + struct cam_lrme_hw_cmd_config_args *config_args, + struct cam_kmd_buf_info *kmd_buf_info) +{ + int i, rc = 0; + struct cam_lrme_device *hw_device = NULL; + uint32_t *kmd_buf_addr; + uint32_t num_entry; + uint32_t kmd_buf_max_size; + uint32_t kmd_buf_used_bytes = 0; + struct cam_hw_update_entry *hw_entry; + struct cam_cmd_buf_desc *cmd_desc = NULL; + + hw_device = config_args->hw_device; + if (!hw_device) { + CAM_ERR(CAM_LRME, "Invalid hw_device"); + return -EINVAL; + } + + kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr + + kmd_buf_info->used_bytes); + kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes; + + config_args->cmd_buf_addr = kmd_buf_addr; + config_args->size = kmd_buf_max_size; + config_args->config_buf_size = 0; + + if (hw_device->hw_intf.hw_ops.process_cmd) { + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_PREPARE_HW_UPDATE, + config_args, + sizeof(struct cam_lrme_hw_cmd_config_args)); + if (rc) { + CAM_ERR(CAM_LRME, + "Failed in CMD_PREPARE_HW_UPDATE %d", rc); + return rc; + } + } else { + CAM_ERR(CAM_LRME, "Can't find handle function"); + return -EINVAL; + } + + kmd_buf_used_bytes += config_args->config_buf_size; + + if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) { + CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)", + kmd_buf_used_bytes, kmd_buf_max_size); + return -ENOMEM; + } + + hw_entry = prepare->hw_update_entries; + num_entry = 0; + + if (config_args->config_buf_size) { + if ((num_entry + 1) >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_LRME, "Insufficient HW entries :%d %d", + num_entry, prepare->max_hw_update_entries); + return -EINVAL; + } + + hw_entry[num_entry].handle = kmd_buf_info->handle; + hw_entry[num_entry].len = config_args->config_buf_size; + hw_entry[num_entry].offset = kmd_buf_info->offset; + + kmd_buf_info->used_bytes += config_args->config_buf_size; + kmd_buf_info->offset += config_args->config_buf_size; + num_entry++; + } + + cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *) + &prepare->packet->payload_flex + prepare->packet->cmd_buf_offset); + + for (i = 0; i < prepare->packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + if ((num_entry + 1) >= prepare->max_hw_update_entries) { + CAM_ERR(CAM_LRME, "Exceed max num of entry"); + return -EINVAL; + } + + hw_entry[num_entry].handle = cmd_desc[i].mem_handle; + hw_entry[num_entry].len = cmd_desc[i].length; + hw_entry[num_entry].offset = cmd_desc[i].offset; + num_entry++; + } + prepare->num_hw_update_entries = num_entry; + + CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)", + prepare->num_hw_update_entries, prepare->num_in_map_entries, + prepare->num_out_map_entries); + + return rc; +} + +static void cam_lrme_mgr_util_put_frame_req( + struct list_head *src_list, + struct cam_lrme_frame_request *frame_req, + struct mutex *lock, + bool free_buffer) +{ + if (!frame_req) { + CAM_ERR(CAM_LRME, "Invalid frame_req"); + return; + } + + mutex_lock(lock); + if (free_buffer) + cam_mem_put_cpu_buf(frame_req->hw_update_entries[0]->handle) + + list_add_tail(frame_req->frame_list, src_list); + mutex_unlock(lock); +} + +static int cam_lrme_mgr_util_get_frame_req( + struct list_head *src_list, + struct cam_lrme_frame_request **frame_req, + struct mutex *lock) +{ + int rc = 0; + struct cam_lrme_frame_request *req_ptr = NULL; + + mutex_lock(lock); + if (!list_empty(src_list)) { + req_ptr = list_first_entry(src_list, + struct cam_lrme_frame_request, frame_list); + list_del_init(&req_ptr->frame_list); + } else { + rc = -ENOENT; + } + *frame_req = req_ptr; + mutex_unlock(lock); + + return rc; +} + + +static int cam_lrme_mgr_util_submit_req(void *priv, void *data) +{ + struct cam_lrme_device *hw_device; + struct cam_lrme_hw_mgr *hw_mgr; + struct cam_lrme_frame_request *frame_req = NULL; + struct cam_lrme_hw_submit_args submit_args; + struct cam_lrme_mgr_work_data *work_data; + int rc; + int req_prio = 0; + + if (!priv) { + CAM_ERR(CAM_LRME, "worker doesn't have private data"); + return -EINVAL; + } + + hw_mgr = (struct cam_lrme_hw_mgr *)priv; + work_data = (struct cam_lrme_mgr_work_data *)data; + hw_device = work_data->hw_device; + + rc = cam_lrme_mgr_util_get_frame_req( + &hw_device->frame_pending_list_high, &frame_req, + &hw_device->high_req_lock); + + if (!frame_req) { + rc = cam_lrme_mgr_util_get_frame_req( + &hw_device->frame_pending_list_normal, &frame_req, + &hw_device->normal_req_lock); + if (frame_req) + req_prio = 1; + } + + if (!frame_req) { + CAM_DBG(CAM_LRME, "No pending request"); + return 0; + } + + if (hw_device->hw_intf.hw_ops.process_cmd) { + submit_args.hw_update_entries = frame_req->hw_update_entries; + submit_args.num_hw_update_entries = + frame_req->num_hw_update_entries; + submit_args.frame_req = frame_req; + + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_SUBMIT, + &submit_args, sizeof(struct cam_lrme_hw_submit_args)); + + if (rc == -EBUSY) + CAM_DBG(CAM_LRME, "device busy"); + else if (rc) + CAM_ERR(CAM_LRME, "submit request failed rc %d", rc); + if (rc) { + req_prio == 0 ? mutex_lock(&hw_device->high_req_lock) : + mutex_lock(&hw_device->normal_req_lock); + list_add(&frame_req->frame_list, + (req_prio == 0 ? + &hw_device->frame_pending_list_high : + &hw_device->frame_pending_list_normal)); + req_prio == 0 ? mutex_unlock(&hw_device->high_req_lock) : + mutex_unlock(&hw_device->normal_req_lock); + } + if (rc == -EBUSY) + rc = 0; + } else { + req_prio == 0 ? mutex_lock(&hw_device->high_req_lock) : + mutex_lock(&hw_device->normal_req_lock); + list_add(&frame_req->frame_list, + (req_prio == 0 ? + &hw_device->frame_pending_list_high : + &hw_device->frame_pending_list_normal)); + req_prio == 0 ? mutex_unlock(&hw_device->high_req_lock) : + mutex_unlock(&hw_device->normal_req_lock); + rc = -EINVAL; + } + + CAM_DBG(CAM_LRME, "End of submit, rc %d", rc); + + return rc; +} + +static int cam_lrme_mgr_util_schedule_frame_req( + struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device) +{ + int rc = 0; + struct crm_workq_task *task; + struct cam_lrme_mgr_work_data *work_data; + + task = cam_req_mgr_workq_get_task(hw_device->work); + if (!task) { + CAM_ERR(CAM_LRME, "Can not get task for worker"); + return -ENOMEM; + } + + work_data = (struct cam_lrme_mgr_work_data *)task->payload; + work_data->hw_device = hw_device; + + task->process_cb = cam_lrme_mgr_util_submit_req; + CAM_DBG(CAM_LRME, "enqueue submit task"); + rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0); + + return rc; +} + +static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr, + uint32_t device_index) +{ + int rc = 0; + struct cam_lrme_device *hw_device; + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Error in getting device %d", rc); + return rc; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + hw_device->num_context--; + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_lrme_mgr_cb(void *data, + struct cam_lrme_hw_cb_args *cb_args) +{ + struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr; + int rc = 0; + uint32_t evt_id = CAM_CTX_EVT_ID_ERROR; + struct cam_lrme_frame_request *frame_req; + struct cam_lrme_device *hw_device; + + if (!data || !cb_args) { + CAM_ERR(CAM_LRME, "Invalid input args"); + return -EINVAL; + } + + hw_device = (struct cam_lrme_device *)data; + frame_req = cb_args->frame_req; + + if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) { + memset(frame_req, 0x0, sizeof(*frame_req)); + INIT_LIST_HEAD(&frame_req->frame_list); + cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + frame_req, &hw_mgr->free_req_lock, true); + cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME; + frame_req = NULL; + } + + if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) { + cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE; + CAM_DBG(CAM_LRME, "Reg update"); + } + + if (!frame_req) + return rc; + + if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) { + cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE; + evt_id = CAM_CTX_EVT_ID_SUCCESS; + } else if (cb_args->cb_type & CAM_LRME_CB_ERROR) { + cb_args->cb_type &= ~CAM_LRME_CB_ERROR; + evt_id = CAM_CTX_EVT_ID_ERROR; + } else { + CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld", + cb_args->cb_type, frame_req->req_id); + return -EINVAL; + } + + if (hw_mgr->event_cb) { + struct cam_hw_done_event_data buf_data; + + buf_data.request_id = frame_req->req_id; + buf_data.evt_param = (cb_args->cb_type & CAM_LRME_CB_ERROR) ? + CAM_SYNC_LRME_EVENT_CB_ERROR : + CAM_SYNC_COMMON_EVENT_SUCCESS; + CAM_DBG(CAM_LRME, "frame req %llu, evt_id %d", + frame_req->req_id, evt_id); + rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map, + evt_id, &buf_data); + } else { + CAM_ERR(CAM_LRME, "No cb function"); + } + + memset(frame_req, 0x0, sizeof(*frame_req)); + INIT_LIST_HEAD(&frame_req->frame_list); + cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &frame_req->frame_list, + &hw_mgr->free_req_lock, true); + + rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device); + + return rc; +} + +static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args) +{ + int rc = 0; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_query_cap_cmd *args = hw_get_caps_args; + + if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) { + CAM_ERR(CAM_LRME, + "sizeof(struct cam_query_cap_cmd) = %zu, args->size = %d", + sizeof(struct cam_query_cap_cmd), args->size); + return -EFAULT; + } + + if (copy_to_user(u64_to_user_ptr(args->caps_handle), + &(hw_mgr->lrme_caps), + sizeof(struct cam_lrme_query_cap_cmd))) { + CAM_ERR(CAM_LRME, "copy to user failed"); + return -EFAULT; + } + + return rc; +} + +static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args) +{ + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *args = + (struct cam_hw_acquire_args *)hw_acquire_args; + struct cam_lrme_acquire_args lrme_acquire_args; + uintptr_t device_index; + + if (!hw_mgr_priv || !args) { + CAM_ERR(CAM_LRME, + "Invalid input params hw_mgr_priv %pK, acquire_args %pK", + hw_mgr_priv, args); + return -EINVAL; + } + + if (copy_from_user(&lrme_acquire_args, + (void __user *)args->acquire_info, + sizeof(struct cam_lrme_acquire_args))) { + CAM_ERR(CAM_LRME, "Failed to copy acquire args from user"); + return -EFAULT; + } + + device_index = cam_lrme_mgr_util_reserve_device(hw_mgr, + &lrme_acquire_args); + CAM_DBG(CAM_LRME, "Get device id %llu", device_index); + + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Get wrong device id %lu", device_index); + return -EINVAL; + } + + /* device_index is the right 4 bit in ctxt_to_hw_map */ + args->ctxt_to_hw_map = (void *)device_index; + + return 0; +} + +static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args) +{ + int rc = 0; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_release_args *args = + (struct cam_hw_release_args *)hw_release_args; + uint64_t device_index; + + if (!hw_mgr_priv || !hw_release_args) { + CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK", + hw_mgr_priv, hw_release_args); + return -EINVAL; + } + + device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index); + return -EPERM; + } + + rc = cam_lrme_mgr_util_release(hw_mgr, device_index); + if (rc) + CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc); + + return rc; +} + +static int cam_lrme_mgr_hw_dump(void *hw_mgr_priv, void *hw_dump_args) +{ + struct cam_hw_dump_args *dump_args = hw_dump_args; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_lrme_device *hw_device; + int rc = 0; + uint32_t device_index; + struct cam_lrme_hw_dump_args lrme_dump_args; + + device_index = CAM_LRME_DECODE_DEVICE_INDEX(dump_args->ctxt_to_hw_map); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %d", device_index); + return -EPERM; + } + + CAM_DBG(CAM_LRME, "Start device index %d", device_index); + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to get hw device"); + return rc; + } + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, + &lrme_dump_args.cpu_addr, + &lrme_dump_args.buf_len); + if (rc) { + CAM_ERR(CAM_LRME, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + lrme_dump_args.offset = dump_args->offset; + lrme_dump_args.request_id = dump_args->request_id; + + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_DUMP, + &lrme_dump_args, + sizeof(struct cam_lrme_hw_dump_args)); + CAM_DBG(CAM_LRME, "Offset before %zu after %zu", + dump_args->offset, lrme_dump_args.offset); + dump_args->offset = lrme_dump_args.offset; + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args) +{ int rc = 0, i; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_flush_args *args; + struct cam_lrme_device *hw_device; + struct cam_lrme_frame_request *frame_req = NULL, *req_to_flush = NULL; + struct cam_lrme_frame_request **req_list = NULL; + uint32_t device_index; + struct cam_lrme_hw_flush_args lrme_flush_args; + uint32_t priority; + + if (!hw_mgr_priv || !hw_flush_args) { + CAM_ERR(CAM_LRME, "Invalid args %pK %pK", + hw_mgr_priv, hw_flush_args); + return -EINVAL; + } + + args = (struct cam_hw_flush_args *)hw_flush_args; + device_index = ((uintptr_t)args->ctxt_to_hw_map & 0xF); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %d", device_index); + return -EPERM; + } + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Error in getting device %d", rc); + goto end; + } + + req_list = (struct cam_lrme_frame_request **)args->flush_req_pending; + for (i = 0; i < args->num_req_pending; i++) { + frame_req = req_list[i]; + memset(frame_req, 0x0, sizeof(*frame_req)); + cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list, + &frame_req->frame_list, &hw_mgr->free_req_lock, true); + } + + req_list = (struct cam_lrme_frame_request **)args->flush_req_active; + for (i = 0; i < args->num_req_active; i++) { + frame_req = req_list[i]; + priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map); + mutex_lock((priority == CAM_LRME_PRIORITY_HIGH) ? + &hw_device->high_req_lock : + &hw_device->normal_req_lock); + if (!list_empty(&frame_req->frame_list)) { + list_del_init(&frame_req->frame_list); + cam_lrme_mgr_util_put_frame_req( + &hw_mgr->frame_free_list, + &frame_req->frame_list, + &hw_mgr->free_req_lock, true); + } else + req_to_flush = frame_req; + mutex_unlock((priority == CAM_LRME_PRIORITY_HIGH) ? + &hw_device->high_req_lock : + &hw_device->normal_req_lock); + } + if (!req_to_flush) + goto end; + if (hw_device->hw_intf.hw_ops.flush) { + lrme_flush_args.ctxt_to_hw_map = req_to_flush->ctxt_to_hw_map; + lrme_flush_args.flush_type = args->flush_type; + lrme_flush_args.req_to_flush = req_to_flush; + rc = hw_device->hw_intf.hw_ops.flush(hw_device->hw_intf.hw_priv, + &lrme_flush_args, + sizeof(lrme_flush_args)); + if (rc) { + CAM_ERR(CAM_LRME, "Failed in HW Stop %d", rc); + goto end; + } + } else { + CAM_ERR(CAM_LRME, "No stop ops"); + goto end; + } + +end: + return rc; +} + + +static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args) +{ + int rc = 0; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_start_args *args = + (struct cam_hw_start_args *)hw_start_args; + struct cam_lrme_device *hw_device; + uint32_t device_index; + + if (!hw_mgr || !args) { + CAM_ERR(CAM_LRME, "Invalid input params"); + return -EINVAL; + } + + device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %d", device_index); + return -EPERM; + } + + CAM_DBG(CAM_LRME, "Start device index %d", device_index); + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to get hw device"); + return rc; + } + + if (hw_device->hw_intf.hw_ops.start) { + rc = hw_device->hw_intf.hw_ops.start( + hw_device->hw_intf.hw_priv, NULL, 0); + } else { + CAM_ERR(CAM_LRME, "Invalid start function"); + return -EINVAL; + } + + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_DUMP_REGISTER, + &g_lrme_hw_mgr.debugfs_entry.dump_register, + sizeof(bool)); + + return rc; +} + +static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args) +{ + int rc = 0; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_stop_args *args = + (struct cam_hw_stop_args *)stop_args; + struct cam_lrme_device *hw_device; + uint32_t device_index; + + if (!hw_mgr_priv || !stop_args) { + CAM_ERR(CAM_LRME, "Invalid arguments"); + return -EINVAL; + } + + device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %d", device_index); + return -EPERM; + } + + CAM_DBG(CAM_LRME, "Stop device index %d", device_index); + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to get hw device"); + return rc; + } + + if (hw_device->hw_intf.hw_ops.stop) { + rc = hw_device->hw_intf.hw_ops.stop( + hw_device->hw_intf.hw_priv, NULL, 0); + if (rc) { + CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc); + goto end; + } + } + +end: + return rc; +} + +static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv, + void *hw_prepare_update_args) +{ + int rc = 0, i; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_prepare_update_args *args = + (struct cam_hw_prepare_update_args *)hw_prepare_update_args; + struct cam_lrme_device *hw_device; + struct cam_kmd_buf_info kmd_buf; + struct cam_lrme_hw_cmd_config_args config_args; + struct cam_lrme_frame_request *frame_req = NULL; + uint32_t device_index; + + if (!hw_mgr_priv || !hw_prepare_update_args) { + CAM_ERR(CAM_LRME, "Invalid args %pK %pK", + hw_mgr_priv, hw_prepare_update_args); + return -EINVAL; + } + + device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map); + if (device_index >= hw_mgr->device_count) { + CAM_ERR(CAM_LRME, "Invalid device index %d", device_index); + return -EPERM; + } + + rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device); + if (rc) { + CAM_ERR(CAM_LRME, "Error in getting device %d", rc); + goto error; + } + + rc = cam_lrme_mgr_util_packet_validate(args->packet, args->remain_len); + if (rc) { + CAM_ERR(CAM_LRME, "Error in packet validation %d", rc); + goto error; + } + + rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf); + if (rc) { + CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc); + goto error; + } + + CAM_DBG(CAM_LRME, + "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d", + kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset, + kmd_buf.size, kmd_buf.used_bytes); + + rc = cam_packet_util_process_patches(args->packet, + hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure, false); + if (rc) { + CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc); + return rc; + } + + memset(&config_args, 0, sizeof(config_args)); + config_args.hw_device = hw_device; + + rc = cam_lrme_mgr_util_prepare_io_buffer( + hw_mgr->device_iommu.non_secure, args, + config_args.input_buf, config_args.output_buf, + CAM_LRME_MAX_IO_BUFFER, args->buf_tracker); + if (rc) { + CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc); + goto error; + } + /* Check port number */ + if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) { + CAM_ERR(CAM_LRME, "Error in port number in %d, out %d", + args->num_in_map_entries, args->num_out_map_entries); + rc = -EINVAL; + goto error; + } + + rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args, + &config_args, &kmd_buf); + if (rc) { + CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc); + goto error; + } + + rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list, + &frame_req, &hw_mgr->free_req_lock); + if (rc || !frame_req) { + CAM_ERR(CAM_LRME, "Can not get free frame request"); + goto error; + } + + frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map; + frame_req->req_id = args->packet->header.request_id; + frame_req->hw_device = hw_device; + frame_req->num_hw_update_entries = args->num_hw_update_entries; + for (i = 0; i < args->num_hw_update_entries; i++) + frame_req->hw_update_entries[i] = args->hw_update_entries[i]; + + args->priv = frame_req; + + CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id); + + return 0; + +error: + return rc; +} + +static int cam_lrme_mgr_hw_config(void *hw_mgr_priv, + void *hw_config_args) +{ + int rc = 0; + struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_hw_config_args *args = + (struct cam_hw_config_args *)hw_config_args; + struct cam_lrme_frame_request *frame_req; + struct cam_lrme_device *hw_device = NULL; + enum cam_lrme_hw_mgr_ctx_priority priority; + + if (!hw_mgr_priv || !hw_config_args) { + CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK", + hw_mgr_priv, hw_config_args); + return -EINVAL; + } + + if (!args->num_hw_update_entries) { + CAM_ERR(CAM_LRME, "No hw update entries"); + return -EINVAL; + } + + frame_req = (struct cam_lrme_frame_request *)args->priv; + if (!frame_req) { + CAM_ERR(CAM_LRME, "No frame request"); + return -EINVAL; + } + + hw_device = frame_req->hw_device; + if (!hw_device) + return -EINVAL; + + priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map); + if (priority == CAM_LRME_PRIORITY_HIGH) { + cam_lrme_mgr_util_put_frame_req( + &hw_device->frame_pending_list_high, + &frame_req->frame_list, &hw_device->high_req_lock, false); + } else { + cam_lrme_mgr_util_put_frame_req( + &hw_device->frame_pending_list_normal, + &frame_req->frame_list, &hw_device->normal_req_lock, false); + } + + CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id); + rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device); + + return rc; +} + +static int cam_lrme_mgr_create_debugfs_entry(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("lrme", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_ISP,"DebugFS could not create directory!"); + return -ENOENT; + } + /* Store parent inode for cleanup in caller */ + g_lrme_hw_mgr.debugfs_entry.dentry = dbgfileptr; + + debugfs_create_bool("dump_register", 0644, g_lrme_hw_mgr.debugfs_entry.dentry, + &g_lrme_hw_mgr.debugfs_entry.dump_register); + + return 0; +} + +static void cam_req_mgr_process_workq_cam_lrme_device_submit_worker( + struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +int cam_lrme_mgr_register_device( + struct cam_hw_intf *lrme_hw_intf, + struct cam_iommu_handle *device_iommu, + struct cam_iommu_handle *cdm_iommu) +{ + struct cam_lrme_device *hw_device; + char buf[128]; + int i, rc; + + hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx]; + + g_lrme_hw_mgr.device_iommu = *device_iommu; + g_lrme_hw_mgr.cdm_iommu = *cdm_iommu; + + memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf)); + + mutex_init(&hw_device->high_req_lock); + mutex_init(&hw_device->normal_req_lock); + INIT_LIST_HEAD(&hw_device->frame_pending_list_high); + INIT_LIST_HEAD(&hw_device->frame_pending_list_normal); + + rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d", + lrme_hw_intf->hw_idx); + CAM_DBG(CAM_LRME, "Create submit workq for %s", buf); + rc = cam_req_mgr_workq_create(buf, + CAM_LRME_WORKQ_NUM_TASK, + &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ, 0, + cam_req_mgr_process_workq_cam_lrme_device_submit_worker); + if (rc) { + CAM_ERR(CAM_LRME, + "Unable to create a worker, rc=%d", rc); + return rc; + } + + for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++) + hw_device->work->task.pool[i].payload = + &hw_device->work_data[i]; + + if (hw_device->hw_intf.hw_ops.process_cmd) { + struct cam_lrme_hw_cmd_set_cb cb_args; + + cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb; + cb_args.data = hw_device; + + rc = hw_device->hw_intf.hw_ops.process_cmd( + hw_device->hw_intf.hw_priv, + CAM_LRME_HW_CMD_REGISTER_CB, + &cb_args, sizeof(cb_args)); + if (rc) { + CAM_ERR(CAM_LRME, "Register cb failed"); + goto destroy_workqueue; + } + CAM_DBG(CAM_LRME, "cb registered"); + } + + if (hw_device->hw_intf.hw_ops.get_hw_caps) { + rc = hw_device->hw_intf.hw_ops.get_hw_caps( + hw_device->hw_intf.hw_priv, &hw_device->hw_caps, + sizeof(hw_device->hw_caps)); + if (rc) + CAM_ERR(CAM_LRME, "Get caps failed"); + } else { + CAM_ERR(CAM_LRME, "No get_hw_caps function"); + goto destroy_workqueue; + } + g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] = + hw_device->hw_caps; + g_lrme_hw_mgr.device_count++; + g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu; + g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu; + g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count; + + hw_device->valid = true; + + CAM_DBG(CAM_LRME, "device registration done"); + return 0; + +destroy_workqueue: + cam_req_mgr_workq_destroy(&hw_device->work); + + return rc; +} + +int cam_lrme_mgr_deregister_device(int device_index) +{ + struct cam_lrme_device *hw_device; + + hw_device = &g_lrme_hw_mgr.hw_device[device_index]; + cam_req_mgr_workq_destroy(&hw_device->work); + mutex_destroy(&hw_device->high_req_lock); + mutex_destroy(&hw_device->normal_req_lock); + memset(hw_device, 0x0, sizeof(struct cam_lrme_device)); + g_lrme_hw_mgr.device_count--; + + return 0; +} + +int cam_lrme_hw_mgr_deinit(void) +{ + mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex); + mutex_destroy(&g_lrme_hw_mgr.free_req_lock); + memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr)); + + return 0; +} + +int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, + cam_hw_event_cb_func cam_lrme_dev_buf_done_cb) +{ + int i, rc = 0; + struct cam_lrme_frame_request *frame_req; + + if (!hw_mgr_intf) + return -EINVAL; + + CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count); + if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) { + CAM_ERR(CAM_LRME, "Invalid count of devices"); + return -EINVAL; + } + + memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf)); + + mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex); + mutex_init(&g_lrme_hw_mgr.free_req_lock); + INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list); + + /* Init hw mgr frame requests and add to free list */ + for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) { + frame_req = &g_lrme_hw_mgr.frame_req[i]; + + memset(frame_req, 0x0, sizeof(*frame_req)); + INIT_LIST_HEAD(&frame_req->frame_list); + + list_add_tail(&frame_req->frame_list, + &g_lrme_hw_mgr.frame_free_list); + } + + hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps; + hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire; + hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release; + hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start; + hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop; + hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update; + hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config; + hw_mgr_intf->hw_read = NULL; + hw_mgr_intf->hw_write = NULL; + hw_mgr_intf->hw_close = NULL; + hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush; + + g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb; + hw_mgr_intf->hw_dump = cam_lrme_mgr_hw_dump; + + cam_lrme_mgr_create_debugfs_entry(); + CAM_DBG(CAM_LRME, "Hw mgr init done"); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h new file mode 100644 index 0000000000..d5220906dc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_MGR_H_ +#define _CAM_LRME_HW_MGR_H_ + +#include +#include + +#include +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_hw_mgr_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_lrme_hw_intf.h" +#include "cam_context.h" + +#define CAM_LRME_HW_MAX 1 +#define CAM_LRME_WORKQ_NUM_TASK 10 + +#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \ + ((uintptr_t)ctxt_to_hw_map & 0xF) + +#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \ + (((uintptr_t)ctxt_to_hw_map & 0xF0) >> 4) + +#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \ + ((uint64_t)(uintptr_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT) + +/** + * enum cam_lrme_hw_mgr_ctx_priority + * + * CAM_LRME_PRIORITY_HIGH : High priority client + * CAM_LRME_PRIORITY_NORMAL : Normal priority client + */ +enum cam_lrme_hw_mgr_ctx_priority { + CAM_LRME_PRIORITY_HIGH, + CAM_LRME_PRIORITY_NORMAL, +}; + +/** + * struct cam_lrme_mgr_work_data : HW Mgr work data + * + * @hw_device : Pointer to the hw device + */ +struct cam_lrme_mgr_work_data { + struct cam_lrme_device *hw_device; +}; + +/** + * struct cam_lrme_debugfs_entry : debugfs entry struct + * + * @dentry : entry of debugfs + * @dump_register : flag to dump registers + */ +struct cam_lrme_debugfs_entry { + struct dentry *dentry; + bool dump_register; +}; + +/** + * struct cam_lrme_device : LRME HW device + * + * @hw_caps : HW device's capabilities + * @hw_intf : HW device's interface information + * @num_context : Number of contexts using this device + * @valid : Whether this device is valid + * @work : HW device's work queue + * @work_data : HW device's work data + * @frame_pending_list_high : High priority request queue + * @frame_pending_list_normal : Normal priority request queue + * @high_req_lock : mutex of high priority queue + * @normal_req_lock : mutex of normal priority queue + */ +struct cam_lrme_device { + struct cam_lrme_dev_cap hw_caps; + struct cam_hw_intf hw_intf; + uint32_t num_context; + bool valid; + struct cam_req_mgr_core_workq *work; + struct cam_lrme_mgr_work_data work_data[CAM_LRME_WORKQ_NUM_TASK]; + struct list_head frame_pending_list_high; + struct list_head frame_pending_list_normal; + struct mutex high_req_lock; + struct mutex normal_req_lock; +}; + +/** + * struct cam_lrme_hw_mgr : LRME HW manager + * + * @device_count : Number of HW devices + * @frame_free_list : List of free frame request + * @hw_mgr_mutex : Mutex to protect HW manager data + * @free_req_lock : Mutex to protect frame_free_list + * @hw_device : List of HW devices + * @device_iommu : Device iommu + * @cdm_iommu : cdm iommu + * @frame_req : List of frame request to use + * @lrme_caps : LRME capabilities + * @event_cb : IRQ callback function + * @debugfs_entry : debugfs entry to set debug prop + */ +struct cam_lrme_hw_mgr { + uint32_t device_count; + struct list_head frame_free_list; + struct mutex hw_mgr_mutex; + struct mutex free_req_lock; + struct cam_lrme_device hw_device[CAM_LRME_HW_MAX]; + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX]; + struct cam_lrme_query_cap_cmd lrme_caps; + cam_hw_event_cb_func event_cb; + struct cam_lrme_debugfs_entry debugfs_entry; +}; + +int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf, + struct cam_iommu_handle *device_iommu, + struct cam_iommu_handle *cdm_iommu); +int cam_lrme_mgr_deregister_device(int device_index); + +#endif /* _CAM_LRME_HW_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h new file mode 100644 index 0000000000..df6ea0eac7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_MGR_INTF_H_ +#define _CAM_LRME_HW_MGR_INTF_H_ + +#include + +#include "cam_debug_util.h" +#include "cam_hw_mgr_intf.h" + +int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, + cam_hw_event_cb_func cam_lrme_dev_buf_done_cb); +int cam_lrme_hw_mgr_deinit(void); + +#endif /* _CAM_LRME_HW_MGR_INTF_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c new file mode 100644 index 0000000000..56f2e0decd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c @@ -0,0 +1,1453 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_lrme_hw_core.h" +#include "cam_lrme_hw_soc.h" +#include "cam_smmu_api.h" + +static void cam_lrme_dump_registers(void __iomem *base) +{ + /* dump the clc registers */ + cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4); + /* dump the fe and we registers */ + cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4); + cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4); + cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4); + cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4); + cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4); + /* dump lrme sw registers, interrupts */ + cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4); +} + +static int cam_lrme_dump_regs_to_buf( + struct cam_lrme_frame_request *req, + struct cam_hw_info *lrme_hw, + struct cam_lrme_hw_dump_args *dump_args) +{ + int i; + uint8_t *dst; + uint32_t *addr, *start; + uint32_t num_reg, min_len; + size_t remain_len; + struct cam_hw_soc_info *soc_info; + struct cam_lrme_hw_dump_header *hdr; + + if (!lrme_hw || !req || !dump_args) { + CAM_ERR(CAM_LRME, "Invalid params %pK, %pK, %pK", + lrme_hw, req, dump_args); + return -EINVAL; + } + soc_info = &lrme_hw->soc_info; + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_LRME, "dump buffer overshoot len %zu offset %zu", + dump_args->buf_len, dump_args->offset); + return -ENOSPC; + } + remain_len = dump_args->buf_len - dump_args->offset; + min_len = sizeof(struct cam_lrme_hw_dump_header) + + soc_info->reg_map[0].size + sizeof(uint32_t); + + if (remain_len < min_len) { + CAM_WARN(CAM_LRME, "dump buffer exhaust remain %zu min %u", + remain_len, min_len); + return -ENOSPC; + } + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_lrme_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_LRME_HW_DUMP_TAG_MAX_LEN, + "LRME_REG:"); + hdr->word_size = sizeof(uint32_t); + addr = (uint32_t *)(dst + sizeof(struct cam_lrme_hw_dump_header)); + start = addr; + *addr++ = soc_info->index; + num_reg = soc_info->reg_map[0].size/4; + for (i = 0; i < num_reg; i++) { + *addr++ = soc_info->mem_block[0]->start + (i*4); + *addr++ = cam_io_r(soc_info->reg_map[0].mem_base + (i*4)); + } + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_lrme_hw_dump_header); + CAM_DBG(CAM_LRME, "offset %zu", dump_args->offset); + return 0; +} + +static int cam_lrme_hw_dump( + struct cam_hw_info *lrme_hw, + struct cam_lrme_hw_dump_args *dump_args) +{ + uint8_t *dst; + ktime_t cur_time; + size_t remain_len; + uint32_t min_len; + uint64_t diff; + uint64_t *addr, *start; + struct timespec64 cur_ts; + struct timespec64 req_ts; + struct cam_lrme_core *lrme_core; + struct cam_lrme_frame_request *req = NULL; + struct cam_lrme_hw_dump_header *hdr; + + mutex_lock(&lrme_hw->hw_mutex); + if (lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_DBG(CAM_LRME, "LRME HW is in off state"); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + + if (lrme_core->req_submit && + lrme_core->req_submit->req_id == dump_args->request_id) + req = lrme_core->req_submit; + else if (lrme_core->req_proc && + lrme_core->req_proc->req_id == dump_args->request_id) + req = lrme_core->req_proc; + + if (!req) { + CAM_DBG(CAM_LRME, "LRME req %lld not with hw", + dump_args->request_id); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; + } + + cur_time = ktime_get(); + diff = ktime_us_delta(req->submit_timestamp, cur_time); + cur_ts = ktime_to_timespec64(cur_time); + req_ts = ktime_to_timespec64(req->submit_timestamp); + + if (diff < CAM_LRME_RESPONSE_TIME_THRESHOLD) { + CAM_INFO(CAM_LRME, + "No error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; + } + + CAM_INFO(CAM_LRME, + "Error req %lld req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_LRME, "dump buffer overshoot len %zu offset %zu", + dump_args->buf_len, dump_args->offset); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; + } + + remain_len = dump_args->buf_len - dump_args->offset; + min_len = sizeof(struct cam_lrme_hw_dump_header) + + (CAM_LRME_HW_DUMP_NUM_WORDS * sizeof(uint64_t)); + + if (remain_len < min_len) { + CAM_WARN(CAM_LRME, "dump buffer exhaust remain %zu min %u", + remain_len, min_len); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; + } + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_lrme_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_LRME_HW_DUMP_TAG_MAX_LEN, + "LRME_REQ:"); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_lrme_hw_dump_header)); + start = addr; + *addr++ = req->req_id; + *addr++ = req_ts.tv_sec; + *addr++ = req_ts.tv_nsec/NSEC_PER_USEC; + *addr++ = cur_ts.tv_sec; + *addr++ = cur_ts.tv_nsec/NSEC_PER_USEC; + hdr->size = hdr->word_size * (addr - start); + dump_args->offset += hdr->size + + sizeof(struct cam_lrme_hw_dump_header); + cam_lrme_dump_regs_to_buf(req, lrme_hw, dump_args); + mutex_unlock(&lrme_hw->hw_mutex); + return 0; +} + +static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer, + uint32_t *index, uint32_t reg_offset, uint32_t reg_value) +{ + buffer[(*index)++] = reg_offset; + buffer[(*index)++] = reg_value; +} + +static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf, + uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd, + struct cam_lrme_hw_info *hw_info) +{ + uint32_t reg_val; + + /* 1. config buffer size */ + if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10) + reg_val = io_buf->io_cfg->planes[0].width * 2; + else + reg_val = io_buf->io_cfg->planes[0].width; + reg_val |= (io_buf->io_cfg->planes[0].height << 16); + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size, + reg_val); + + CAM_DBG(CAM_LRME, + "width %d", io_buf->io_cfg->planes[0].width); + CAM_DBG(CAM_LRME, + "height %d", io_buf->io_cfg->planes[0].height); + + /* 2. config image address */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].addr_image, + io_buf->io_addr[0]); + + CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]); + + /* 3. config stride */ + reg_val = io_buf->io_cfg->planes[0].plane_stride; + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].rd_stride, + reg_val); + + CAM_DBG(CAM_LRME, "plane_stride %d", + io_buf->io_cfg->planes[0].plane_stride); + + /* 4. enable client */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1); + + /* 5. unpack_cfg */ + if (io_buf->io_cfg->format == CAM_FORMAT_PD10) + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, + 0x0); + else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY || + io_buf->io_cfg->format == CAM_FORMAT_PLAIN8 || + io_buf->io_cfg->format == CAM_FORMAT_NV12 || + io_buf->io_cfg->format == CAM_FORMAT_NV21) + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, + 0x1); + else if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10) + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, + 0x22); + else + CAM_ERR(CAM_LRME, "Unsupported format %d", + io_buf->io_cfg->format); +} + +static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf, + uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd, + struct cam_lrme_hw_info *hw_info) +{ + /* config client mode */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].cfg, + 0x1); + + /* image address */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].addr_image, + io_buf->io_addr[0]); + CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]); + + /* buffer width and height */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg, + io_buf->io_cfg->planes[0].width); + CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width); + + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg, + io_buf->io_cfg->planes[0].height); + CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height); + + /* packer cfg */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg, + (index == 0) ? 0x1 : 0x5); + + /* client stride */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd, + hw_info->bus_wr_reg.bus_client_reg[index].wr_stride, + io_buf->io_cfg->planes[0].plane_stride); + CAM_DBG(CAM_LRME, "plane_stride %d", + io_buf->io_cfg->planes[0].plane_stride); +} + + +static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw, + struct cam_lrme_hw_cmd_config_args *config_args) +{ + int i; + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_cdm_info *hw_cdm_info; + uint32_t *cmd_buf_addr = config_args->cmd_buf_addr; + uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM]; + struct cam_lrme_hw_io_buffer *io_buf; + struct cam_lrme_hw_info *hw_info = + ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info; + uint32_t num_cmd = 0; + uint32_t num_regval_pairs = 0; + uint32_t size; + uint32_t mem_base, available_size = config_args->size; + uint32_t output_res_mask = 0, input_res_mask = 0; + + + if (!cmd_buf_addr) { + CAM_ERR(CAM_LRME, "Invalid input args"); + return -EINVAL; + } + + hw_cdm_info = + ((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info; + + for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) { + io_buf = &config_args->input_buf[i]; + + if (io_buf->valid == false) + break; + + if (io_buf->io_cfg->direction != CAM_BUF_INPUT) { + CAM_ERR(CAM_LRME, "Incorrect direction %d %d", + io_buf->io_cfg->direction, CAM_BUF_INPUT); + return -EINVAL; + } + CAM_DBG(CAM_LRME, + "resource_type %d", io_buf->io_cfg->resource_type); + + switch (io_buf->io_cfg->resource_type) { + case CAM_LRME_IO_TYPE_TAR: + cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair, + &num_cmd, hw_info); + + input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR; + break; + case CAM_LRME_IO_TYPE_REF: + cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair, + &num_cmd, hw_info); + + input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF; + break; + default: + CAM_ERR(CAM_LRME, "wrong resource_type %d", + io_buf->io_cfg->resource_type); + return -EINVAL; + } + } + + for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++) + if (!((input_res_mask >> i) & 0x1)) + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd, + hw_info->bus_rd_reg.bus_client_reg[i].core_cfg, + 0x0); + + for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) { + io_buf = &config_args->output_buf[i]; + + if (io_buf->valid == false) + break; + + if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) { + CAM_ERR(CAM_LRME, "Incorrect direction %d %d", + io_buf->io_cfg->direction, CAM_BUF_INPUT); + return -EINVAL; + } + + CAM_DBG(CAM_LRME, "resource_type %d", + io_buf->io_cfg->resource_type); + switch (io_buf->io_cfg->resource_type) { + case CAM_LRME_IO_TYPE_DS2: + cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair, + &num_cmd, hw_info); + + output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2; + break; + case CAM_LRME_IO_TYPE_RES: + cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair, + &num_cmd, hw_info); + + output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES; + break; + + default: + CAM_ERR(CAM_LRME, "wrong resource_type %d", + io_buf->io_cfg->resource_type); + return -EINVAL; + } + } + + for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++) + if (!((output_res_mask >> i) & 0x1)) + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd, + hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0); + + if (output_res_mask) { + /* write composite mask */ + cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd, + hw_info->bus_wr_reg.common_reg.composite_mask_0, + output_res_mask); + } + + size = hw_cdm_info->cdm_ops->cdm_required_size_changebase(); + if ((size * 4) > available_size) { + CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d", + available_size, size); + return -EINVAL; + } + + mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX); + if (mem_base == -1) { + CAM_ERR(CAM_LRME, "failed to get mem_base, index: %d num_reg_map: %u", + CAM_LRME_BASE_IDX, soc_info->num_reg_map); + return -EINVAL; + } + + hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base); + cmd_buf_addr += size; + available_size -= (size * 4); + + num_regval_pairs = num_cmd / 2; + + if (num_regval_pairs) { + size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random( + num_regval_pairs); + + if ((size * 4) > available_size) { + CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d", + available_size, size); + return -ENOMEM; + } + + hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, + num_regval_pairs, reg_val_pair); + cmd_buf_addr += size; + available_size -= (size * 4); + + } else { + CAM_DBG(CAM_LRME, "No reg val pairs"); + } + + config_args->config_buf_size = config_args->size - available_size; + + return 0; +} + +static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw) +{ + struct cam_lrme_core *lrme_core; + struct cam_hw_soc_info *soc_info; + struct cam_lrme_hw_info *hw_info; + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + hw_info = lrme_core->hw_info; + soc_info = &lrme_hw->soc_info; + + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.cmd); + + return 0; +} + +static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw, + uint32_t reset_type) +{ + struct cam_lrme_core *lrme_core; + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_hw_info *hw_info; + long time_left; + + lrme_core = lrme_hw->core_info; + hw_info = lrme_core->hw_info; + + switch (reset_type) { + case CAM_LRME_HW_RESET_TYPE_HW_RESET: + reinit_completion(&lrme_core->reset_complete); + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_rst_cmd); + time_left = cam_common_wait_for_completion_timeout( + &lrme_core->reset_complete, + msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT)); + if (time_left <= 0) { + CAM_ERR(CAM_LRME, + "HW reset wait failed time_left=%ld", + time_left); + return -ETIMEDOUT; + } + break; + case CAM_LRME_HW_RESET_TYPE_SW_RESET: + cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.sw_reset); + cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.sw_reset); + reinit_completion(&lrme_core->reset_complete); + cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_rst_cmd); + time_left = cam_common_wait_for_completion_timeout( + &lrme_core->reset_complete, + msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT)); + if (time_left <= 0) { + CAM_ERR(CAM_LRME, + "SW reset wait failed time_left=%ld", + time_left); + return -ETIMEDOUT; + } + break; + } + + return 0; +} + +int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw, + struct cam_lrme_dev_cap *hw_caps) +{ + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_hw_info *hw_info = + ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info; + uint32_t reg_value; + + if (!hw_info) { + CAM_ERR(CAM_LRME, "Invalid hw info data"); + return -EINVAL; + } + + reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base + + hw_info->clc_reg.clc_hw_version); + hw_caps->clc_hw_version.gen = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C); + hw_caps->clc_hw_version.rev = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->clc_hw_version.step = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.hw_version); + hw_caps->bus_rd_hw_version.gen = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C); + hw_caps->bus_rd_hw_version.rev = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->bus_rd_hw_version.step = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.hw_version); + hw_caps->bus_wr_hw_version.gen = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C); + hw_caps->bus_wr_hw_version.rev = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->bus_wr_hw_version.step = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_hw_version); + hw_caps->top_hw_version.gen = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C); + hw_caps->top_hw_version.rev = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->top_hw_version.step = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_titan_version); + hw_caps->top_titan_version.gen = + CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C); + hw_caps->top_titan_version.rev = + CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10); + hw_caps->top_titan_version.step = + CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0); + + return 0; +} + +static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core, + struct cam_lrme_frame_request *frame_req) +{ + struct cam_lrme_cdm_info *hw_cdm_info = + lrme_core->hw_cdm_info; + struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd; + struct cam_hw_update_entry *cmd; + int i, rc = 0; + + if (frame_req->num_hw_update_entries > 0) { + cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries; + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->flag = false; + cdm_cmd->userdata = NULL; + cdm_cmd->cookie = 0; + cdm_cmd->gen_irq_arb = false; + + for (i = 0; i <= frame_req->num_hw_update_entries; i++) { + cmd = (frame_req->hw_update_entries + i); + cdm_cmd->cmd_flex[i].bl_addr.mem_handle = cmd->handle; + cdm_cmd->cmd_flex[i].offset = cmd->offset; + cdm_cmd->cmd_flex[i].len = cmd->len; + cdm_cmd->cmd_flex[i].arbitrate = false; + } + + rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to submit cdm commands"); + return -EINVAL; + } + } else { + CAM_ERR(CAM_LRME, "No hw update entry"); + rc = -EINVAL; + } + + return rc; +} + +static int cam_lrme_hw_util_flush_ctx(struct cam_hw_info *lrme_hw, + void *ctxt_to_hw_map) +{ + int rc = -ENODEV; + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + struct cam_lrme_hw_cb_args cb_args; + struct cam_lrme_frame_request *req_proc, *req_submit; + struct cam_lrme_hw_submit_args submit_args; + + rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET); + if (rc) { + CAM_ERR(CAM_LRME, "reset failed"); + return rc; + } + + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + req_proc = lrme_core->req_proc; + req_submit = lrme_core->req_submit; + lrme_core->req_proc = NULL; + lrme_core->req_submit = NULL; + + if (req_submit && req_submit->ctxt_to_hw_map == ctxt_to_hw_map) { + cb_args.cb_type = CAM_LRME_CB_PUT_FRAME; + cb_args.frame_req = req_submit; + if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb( + lrme_core->hw_mgr_cb.data, &cb_args); + } else if (req_submit) { + submit_args.frame_req = req_submit; + submit_args.hw_update_entries = req_submit->hw_update_entries; + submit_args.num_hw_update_entries = + req_submit->num_hw_update_entries; + rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit); + if (rc) + CAM_ERR(CAM_LRME, "Submit failed"); + lrme_core->req_submit = req_submit; + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + } + + if (req_proc && req_proc->ctxt_to_hw_map == ctxt_to_hw_map) { + cb_args.cb_type = CAM_LRME_CB_PUT_FRAME; + cb_args.frame_req = req_proc; + if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb( + lrme_core->hw_mgr_cb.data, &cb_args); + } else if (req_proc) { + submit_args.frame_req = req_proc; + submit_args.hw_update_entries = req_proc->hw_update_entries; + submit_args.num_hw_update_entries = + req_proc->num_hw_update_entries; + rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc); + if (rc) + CAM_ERR(CAM_LRME, "Submit failed"); + lrme_core->req_submit = req_proc; + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + } + + return rc; +} + +static int cam_lrme_hw_util_flush_req(struct cam_hw_info *lrme_hw, + struct cam_lrme_frame_request *req_to_flush) +{ + int rc = -ENODEV; + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + struct cam_lrme_hw_cb_args cb_args; + struct cam_lrme_frame_request *req_proc, *req_submit; + struct cam_lrme_hw_submit_args submit_args; + + rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET); + if (rc) { + CAM_ERR(CAM_LRME, "reset failed"); + return rc; + } + + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + req_proc = lrme_core->req_proc; + req_submit = lrme_core->req_submit; + lrme_core->req_proc = NULL; + lrme_core->req_submit = NULL; + + if (req_submit && req_submit == req_to_flush) { + cb_args.cb_type = CAM_LRME_CB_PUT_FRAME; + cb_args.frame_req = req_submit; + if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb( + lrme_core->hw_mgr_cb.data, &cb_args); + } else if (req_submit) { + submit_args.frame_req = req_submit; + submit_args.hw_update_entries = req_submit->hw_update_entries; + submit_args.num_hw_update_entries = + req_submit->num_hw_update_entries; + rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit); + if (rc) + CAM_ERR(CAM_LRME, "Submit failed"); + lrme_core->req_submit = req_submit; + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + } + + if (req_proc && req_proc == req_to_flush) { + cb_args.cb_type = CAM_LRME_CB_PUT_FRAME; + cb_args.frame_req = req_proc; + if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb( + lrme_core->hw_mgr_cb.data, &cb_args); + } else if (req_proc) { + submit_args.frame_req = req_proc; + submit_args.hw_update_entries = req_proc->hw_update_entries; + submit_args.num_hw_update_entries = + req_proc->num_hw_update_entries; + rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc); + if (rc) + CAM_ERR(CAM_LRME, "Submit failed"); + lrme_core->req_submit = req_proc; + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + } + + return rc; +} + + +static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw) +{ + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + struct cam_lrme_frame_request *req_proc, *req_submit; + struct cam_lrme_hw_cb_args cb_args; + int rc; + + req_proc = lrme_core->req_proc; + req_submit = lrme_core->req_submit; + cb_args.cb_type = CAM_LRME_CB_ERROR; + + if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) && + (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) && + (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) { + CAM_ERR(CAM_LRME, "Get error irq in wrong state %d", + lrme_core->state); + } + + cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base); + + CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery"); + lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY; + rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET); + if (rc) + CAM_ERR(CAM_LRME, "Failed to reset"); + + lrme_core->req_proc = NULL; + lrme_core->req_submit = NULL; + if (!rc) + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + + cb_args.frame_req = req_proc; + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data, + &cb_args); + + cb_args.frame_req = req_submit; + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data, + &cb_args); + + return rc; +} + +static int cam_lrme_hw_util_process_reg_update( + struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args) +{ + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + int rc = 0; + + cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE; + if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) { + lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING; + } else { + CAM_ERR(CAM_LRME, "Reg update in wrong state %d", + lrme_core->state); + rc = cam_lrme_hw_util_process_err(lrme_hw); + if (rc) + CAM_ERR(CAM_LRME, "Failed to reset"); + return -EINVAL; + } + + lrme_core->req_proc = lrme_core->req_submit; + lrme_core->req_submit = NULL; + + if (lrme_core->dump_flag) + cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base); + + return 0; +} + +static int cam_lrme_hw_util_process_idle( + struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args) +{ + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + int rc = 0; + + cb_args->cb_type |= CAM_LRME_CB_BUF_DONE; + switch (lrme_core->state) { + case CAM_LRME_CORE_STATE_REQ_PROC_PEND: + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + break; + + case CAM_LRME_CORE_STATE_PROCESSING: + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + break; + + default: + CAM_ERR(CAM_LRME, "Idle in wrong state %d", + lrme_core->state); + rc = cam_lrme_hw_util_process_err(lrme_hw); + return rc; + } + cb_args->frame_req = lrme_core->req_proc; + lrme_core->req_proc = NULL; + + return 0; +} + +void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, + enum cam_lrme_irq_set set) +{ + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_core *lrme_core = lrme_hw->core_info; + struct cam_lrme_hw_info *hw_info = lrme_core->hw_info; + + switch (set) { + case CAM_LRME_IRQ_ENABLE: + cam_io_w_mb(0xFFFF, + soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_irq_mask); + cam_io_w_mb(0xFFFFF, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_mask_0); + cam_io_w_mb(0xFFFFF, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_mask_1); + cam_io_w_mb(0xFFFFF, + soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.irq_mask); + break; + + case CAM_LRME_IRQ_DISABLE: + cam_io_w_mb(0x0, + soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_irq_mask); + cam_io_w_mb(0x0, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_mask_0); + cam_io_w_mb(0x0, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_mask_1); + cam_io_w_mb(0x0, + soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.irq_mask); + break; + } +} + + +int cam_lrme_hw_process_irq(void *priv, void *data) +{ + struct cam_lrme_hw_work_data *work_data; + struct cam_hw_info *lrme_hw; + struct cam_lrme_core *lrme_core; + int rc = 0; + uint32_t top_irq_status, fe_irq_status; + uint32_t *we_irq_status; + struct cam_lrme_hw_cb_args cb_args; + + if (!data || !priv) { + CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv); + return -EINVAL; + } + + memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args)); + lrme_hw = (struct cam_hw_info *)priv; + work_data = (struct cam_lrme_hw_work_data *)data; + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + top_irq_status = work_data->top_irq_status; + fe_irq_status = work_data->fe_irq_status; + we_irq_status = work_data->we_irq_status; + + CAM_DBG(CAM_LRME, + "top status %x, fe status %x, we status0 %x, we status1 %x", + top_irq_status, fe_irq_status, we_irq_status[0], + we_irq_status[1]); + CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state); + + mutex_lock(&lrme_hw->hw_mutex); + + if (lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + CAM_DBG(CAM_LRME, "LRME HW is in off state"); + goto end; + } + + if (top_irq_status & (1 << 3)) { + CAM_DBG(CAM_LRME, "Error"); + rc = cam_lrme_hw_util_process_err(lrme_hw); + if (rc) + CAM_ERR(CAM_LRME, "Process error failed"); + goto end; + } + + if (we_irq_status[0] & (1 << 1)) { + CAM_DBG(CAM_LRME, "reg update"); + rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args); + if (rc) { + CAM_ERR(CAM_LRME, "Process reg_update failed"); + goto end; + } + } + + if (top_irq_status & (1 << 4)) { + CAM_DBG(CAM_LRME, "IDLE"); + if (!lrme_core->req_proc) { + CAM_DBG(CAM_LRME, "No frame request to process idle"); + goto end; + } + rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args); + if (rc) { + CAM_ERR(CAM_LRME, "Process idle failed"); + goto end; + } + } + + if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) { + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb( + lrme_core->hw_mgr_cb.data, &cb_args); + } else { + CAM_ERR(CAM_LRME, "No hw mgr cb"); + rc = -EINVAL; + } + +end: + mutex_unlock(&lrme_hw->hw_mutex); + return rc; +} + +int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv; + int rc = 0; + struct cam_lrme_core *lrme_core; + + if (!lrme_hw) { + CAM_ERR(CAM_LRME, + "Invalid input params, lrme_hw %pK", + lrme_hw); + return -EINVAL; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + + mutex_lock(&lrme_hw->hw_mutex); + + if (lrme_hw->open_count > 0) { + lrme_hw->open_count++; + CAM_DBG(CAM_LRME, "This device is activated before"); + goto unlock; + } + + rc = cam_lrme_soc_enable_resources(lrme_hw); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to enable soc resources"); + goto unlock; + } + + rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to reset hw"); + goto disable_soc; + } + + if (lrme_core->hw_cdm_info) { + struct cam_lrme_cdm_info *hw_cdm_info = + lrme_core->hw_cdm_info; + + rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to stream on cdm"); + goto disable_soc; + } + } + + lrme_hw->hw_state = CAM_HW_STATE_POWER_UP; + lrme_hw->open_count++; + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + + CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count); + mutex_unlock(&lrme_hw->hw_mutex); + return rc; + +disable_soc: + if (cam_lrme_soc_disable_resources(lrme_hw)) + CAM_ERR(CAM_LRME, "Error in disable soc resources"); +unlock: + CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count); + mutex_unlock(&lrme_hw->hw_mutex); + return rc; +} + +int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv; + int rc = 0; + struct cam_lrme_core *lrme_core; + + if (!lrme_hw) { + CAM_ERR(CAM_LRME, "Invalid argument"); + return -EINVAL; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + + mutex_lock(&lrme_hw->hw_mutex); + + if (lrme_hw->open_count == 0 || + lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_ERR(CAM_LRME, "Error Unbalanced stop"); + return -EINVAL; + } + lrme_hw->open_count--; + + CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count); + + if (lrme_hw->open_count) + goto unlock; + + lrme_core->req_proc = NULL; + lrme_core->req_submit = NULL; + + if (lrme_core->hw_cdm_info) { + struct cam_lrme_cdm_info *hw_cdm_info = + lrme_core->hw_cdm_info; + + rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle); + if (rc) { + CAM_ERR(CAM_LRME, + "Failed in CDM StreamOff, handle=0x%x, rc=%d", + hw_cdm_info->cdm_handle, rc); + goto unlock; + } + } + + rc = cam_lrme_soc_disable_resources(lrme_hw); + if (rc) + CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc); + + lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) { + lrme_core->state = CAM_LRME_CORE_STATE_INIT; + } else { + CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state); + rc = -EINVAL; + } + +unlock: + mutex_unlock(&lrme_hw->hw_mutex); + return rc; +} + +int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args, + uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv; + struct cam_lrme_core *lrme_core; + struct cam_lrme_hw_submit_args *args = + (struct cam_lrme_hw_submit_args *)hw_submit_args; + int rc = 0; + struct cam_lrme_frame_request *frame_req; + + + if (!hw_priv || !hw_submit_args) { + CAM_ERR(CAM_LRME, "Invalid input"); + return -EINVAL; + } + + if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) { + CAM_ERR(CAM_LRME, + "size of args %zu, arg_size %d", + sizeof(struct cam_lrme_hw_submit_args), arg_size); + return -EINVAL; + } + + frame_req = args->frame_req; + + mutex_lock(&lrme_hw->hw_mutex); + + if (lrme_hw->open_count == 0) { + CAM_ERR(CAM_LRME, "HW is not open"); + mutex_unlock(&lrme_hw->hw_mutex); + return -EINVAL; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE && + lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "device busy, can not submit, state %d", + lrme_core->state); + return -EBUSY; + } + + if (lrme_core->req_submit != NULL) { + CAM_ERR(CAM_LRME, "req_submit is not NULL"); + return -EBUSY; + } + + rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req); + if (rc) { + CAM_ERR(CAM_LRME, "Submit req failed"); + goto error; + } + + frame_req->submit_timestamp = ktime_get(); + + switch (lrme_core->state) { + case CAM_LRME_CORE_STATE_PROCESSING: + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND; + break; + + case CAM_LRME_CORE_STATE_IDLE: + cam_lrme_hw_util_submit_go(lrme_hw); + lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING; + break; + + default: + CAM_ERR(CAM_LRME, "Wrong hw state"); + rc = -EINVAL; + goto error; + } + + lrme_core->req_submit = frame_req; + + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu", + frame_req->req_id); + + return 0; + +error: + mutex_unlock(&lrme_hw->hw_mutex); + + return rc; + +} + +int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw = hw_priv; + struct cam_lrme_core *lrme_core; + struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args; + int rc; + + if (!hw_priv) { + CAM_ERR(CAM_LRME, "Invalid input args"); + return -EINVAL; + } + + if (!reset_core_args || + sizeof(struct cam_lrme_hw_reset_args) != arg_size) { + CAM_ERR(CAM_LRME, "Invalid reset args"); + return -EINVAL; + } + + lrme_core = lrme_hw->core_info; + + mutex_lock(&lrme_hw->hw_mutex); + if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_ERR(CAM_LRME, "Reset not allowed in %d state", + lrme_core->state); + return -EINVAL; + } + + lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY; + + rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type); + if (rc) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_ERR(CAM_FD, "Failed to reset"); + return rc; + } + + lrme_core->state = CAM_LRME_CORE_STATE_IDLE; + + mutex_unlock(&lrme_hw->hw_mutex); + + return 0; +} + +int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size) +{ + struct cam_lrme_core *lrme_core = NULL; + struct cam_hw_info *lrme_hw = hw_priv; + struct cam_lrme_hw_flush_args *flush_args = + (struct cam_lrme_hw_flush_args *)hw_flush_args; + int rc = -ENODEV; + + if (!hw_priv) { + CAM_ERR(CAM_LRME, "Invalid arguments %pK", hw_priv); + return -EINVAL; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + + mutex_lock(&lrme_hw->hw_mutex); + + if (lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING && + lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING && + lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "Flush is not needed in %d state", + lrme_core->state); + return 0; + } + + if (!lrme_core->req_proc && !lrme_core->req_submit) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "no req in device"); + return 0; + } + + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_ALL: + if ((!lrme_core->req_submit || + lrme_core->req_submit->ctxt_to_hw_map != + flush_args->ctxt_to_hw_map) && + (!lrme_core->req_proc || + lrme_core->req_proc->ctxt_to_hw_map != + flush_args->ctxt_to_hw_map)) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "hw running on different ctx"); + return 0; + } + rc = cam_lrme_hw_util_flush_ctx(lrme_hw, + flush_args->ctxt_to_hw_map); + if (rc) + CAM_ERR(CAM_LRME, "Flush all failed"); + break; + + case CAM_FLUSH_TYPE_REQ: + if ((!lrme_core->req_submit || + lrme_core->req_submit != flush_args->req_to_flush) && + (!lrme_core->req_proc || + lrme_core->req_proc != flush_args->req_to_flush)) { + mutex_unlock(&lrme_hw->hw_mutex); + CAM_DBG(CAM_LRME, "hw running on different ctx"); + return 0; + } + rc = cam_lrme_hw_util_flush_req(lrme_hw, + flush_args->req_to_flush); + if (rc) + CAM_ERR(CAM_LRME, "Flush req failed"); + break; + + default: + CAM_ERR(CAM_LRME, "Unsupported flush type"); + break; + } + + mutex_unlock(&lrme_hw->hw_mutex); + + return rc; +} + +int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw; + struct cam_lrme_core *lrme_core; + struct cam_lrme_dev_cap *lrme_hw_caps = + (struct cam_lrme_dev_cap *)get_hw_cap_args; + + if (!hw_priv || !get_hw_cap_args) { + CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK", + hw_priv, get_hw_cap_args); + return -EINVAL; + } + + lrme_hw = (struct cam_hw_info *)hw_priv; + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + *lrme_hw_caps = lrme_core->hw_caps; + + return 0; +} + +irqreturn_t cam_lrme_hw_irq(int irq_num, void *data) +{ + struct cam_hw_info *lrme_hw; + struct cam_lrme_core *lrme_core; + struct cam_hw_soc_info *soc_info; + struct cam_lrme_hw_info *hw_info; + struct crm_workq_task *task; + struct cam_lrme_hw_work_data *work_data; + uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1; + int rc; + + if (!data) { + CAM_ERR(CAM_LRME, "Invalid data in IRQ callback"); + return IRQ_NONE; + } + + lrme_hw = (struct cam_hw_info *)data; + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + soc_info = &lrme_hw->soc_info; + hw_info = lrme_core->hw_info; + + top_irq_status = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_irq_status); + CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status); + cam_io_w_mb(top_irq_status, + soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_irq_clear); + top_irq_status &= CAM_LRME_TOP_IRQ_MASK; + + fe_irq_status = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.irq_status); + CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status); + cam_io_w_mb(fe_irq_status, + soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.irq_clear); + fe_irq_status &= CAM_LRME_FE_IRQ_MASK; + + we_irq_status0 = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_status_0); + CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0); + cam_io_w_mb(we_irq_status0, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_clear_0); + we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0; + + we_irq_status1 = cam_io_r_mb( + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_status_1); + CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1); + cam_io_w_mb(we_irq_status1, + soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_clear_1); + we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1; + + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + hw_info->titan_reg.top_irq_cmd); + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + hw_info->bus_wr_reg.common_reg.irq_cmd); + cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base + + hw_info->bus_rd_reg.common_reg.irq_cmd); + + if (top_irq_status & 0x1) { + complete(&lrme_core->reset_complete); + top_irq_status &= (~0x1); + } + + if (top_irq_status || fe_irq_status || + we_irq_status0 || we_irq_status1) { + task = cam_req_mgr_workq_get_task(lrme_core->work); + if (!task) { + CAM_ERR(CAM_LRME, "no empty task available"); + return IRQ_NONE; + } + work_data = (struct cam_lrme_hw_work_data *)task->payload; + work_data->top_irq_status = top_irq_status; + work_data->fe_irq_status = fe_irq_status; + work_data->we_irq_status[0] = we_irq_status0; + work_data->we_irq_status[1] = we_irq_status1; + task->process_cb = cam_lrme_hw_process_irq; + rc = cam_req_mgr_workq_enqueue_task(task, data, + CRM_TASK_PRIORITY_0); + if (rc) + CAM_ERR(CAM_LRME, + "Failed in enqueue work task, rc=%d", rc); + } + + return IRQ_HANDLED; +} + +int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv; + int rc = 0; + + switch (cmd_type) { + case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: { + struct cam_lrme_hw_cmd_config_args *config_args; + + config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args; + rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args); + break; + } + + case CAM_LRME_HW_CMD_REGISTER_CB: { + struct cam_lrme_hw_cmd_set_cb *cb_args; + struct cam_lrme_device *hw_device; + struct cam_lrme_core *lrme_core = + (struct cam_lrme_core *)lrme_hw->core_info; + cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args; + lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb = + cb_args->cam_lrme_hw_mgr_cb; + lrme_core->hw_mgr_cb.data = cb_args->data; + hw_device = cb_args->data; + rc = 0; + break; + } + + case CAM_LRME_HW_CMD_SUBMIT: { + struct cam_lrme_hw_submit_args *submit_args; + + submit_args = (struct cam_lrme_hw_submit_args *)cmd_args; + rc = cam_lrme_hw_submit_req(hw_priv, + submit_args, arg_size); + break; + } + + case CAM_LRME_HW_CMD_DUMP_REGISTER: { + struct cam_lrme_core *lrme_core = + (struct cam_lrme_core *)lrme_hw->core_info; + lrme_core->dump_flag = *(bool *)cmd_args; + CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag); + break; + } + + case CAM_LRME_HW_CMD_DUMP: { + struct cam_lrme_hw_dump_args *dump_args = + (struct cam_lrme_hw_dump_args *)cmd_args; + rc = cam_lrme_hw_dump(lrme_hw, dump_args); + break; + } + default: + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h new file mode 100644 index 0000000000..f766fd3f7d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_CORE_H_ +#define _CAM_LRME_HW_CORE_H_ + +#include +#include +#include +#include +#include + +#include "cam_common_util.h" +#include "cam_debug_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_cdm_intf_api.h" +#include "cam_lrme_hw_intf.h" +#include "cam_lrme_hw_soc.h" +#include "cam_req_mgr_workq.h" + +#define CAM_LRME_HW_RESET_TIMEOUT 3000 + +#define CAM_LRME_BUS_RD_MAX_CLIENTS 2 +#define CAM_LRME_BUS_WR_MAX_CLIENTS 2 + +#define CAM_LRME_HW_WORKQ_NUM_TASK 30 + +#define CAM_LRME_TOP_IRQ_MASK 0x19 +#define CAM_LRME_WE_IRQ_MASK_0 0x2 +#define CAM_LRME_WE_IRQ_MASK_1 0x0 +#define CAM_LRME_FE_IRQ_MASK 0x0 + +#define CAM_LRME_MAX_REG_PAIR_NUM 60 + +#define CAM_LRME_RESPONSE_TIME_THRESHOLD 100000 +#define CAM_LRME_HW_DUMP_TAG_MAX_LEN 128 +#define CAM_LRME_HW_DUMP_NUM_WORDS 5 + +/** + * enum cam_lrme_irq_set + * + * @CAM_LRME_IRQ_ENABLE : Enable irqs + * @CAM_LRME_IRQ_DISABLE : Disable irqs + */ +enum cam_lrme_irq_set { + CAM_LRME_IRQ_ENABLE, + CAM_LRME_IRQ_DISABLE, +}; + +/** + * struct cam_lrme_cdm_info : information used to submit cdm command + * + * @cdm_handle : CDM handle for this device + * @cdm_ops : CDM ops + * @cdm_cmd : CDM command pointer + */ +struct cam_lrme_cdm_info { + uint32_t cdm_handle; + struct cam_cdm_utils_ops *cdm_ops; + struct cam_cdm_bl_request *cdm_cmd; +}; + +/** + * struct cam_lrme_hw_work_data : Work data for HW work queue + * + * @top_irq_status : Top registers irq status + * @fe_irq_status : FE engine irq status + * @we_irq_status : WE engine irq status + */ +struct cam_lrme_hw_work_data { + uint32_t top_irq_status; + uint32_t fe_irq_status; + uint32_t we_irq_status[2]; +}; + +/** + * enum cam_lrme_core_state : LRME core states + * + * @CAM_LRME_CORE_STATE_UNINIT : LRME is in uninit state + * @CAM_LRME_CORE_STATE_INIT : LRME is in init state after probe + * @ CAM_LRME_CORE_STATE_IDLE : LRME is in idle state. Hardware is in + * this state when no frame is processing + * or waiting for this core. + * @CAM_LRME_CORE_STATE_REQ_PENDING : LRME is in pending state. One frame is + * waiting for processing + * @CAM_LRME_CORE_STATE_PROCESSING : LRME is in processing state. HW manager + * can submit one more frame to HW + * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW. + * @CAM_LRME_CORE_STATE_RECOVERY : Indicate core is in the process of reset + * @CAM_LRME_CORE_STATE_MAX : upper limit of states + */ +enum cam_lrme_core_state { + CAM_LRME_CORE_STATE_UNINIT, + CAM_LRME_CORE_STATE_INIT, + CAM_LRME_CORE_STATE_IDLE, + CAM_LRME_CORE_STATE_REQ_PENDING, + CAM_LRME_CORE_STATE_PROCESSING, + CAM_LRME_CORE_STATE_REQ_PROC_PEND, + CAM_LRME_CORE_STATE_RECOVERY, + CAM_LRME_CORE_STATE_MAX, +}; + +/** + * struct cam_lrme_core : LRME HW core information + * + * @hw_info : Pointer to base HW information structure + * @device_iommu : Device iommu handle + * @cdm_iommu : CDM iommu handle + * @hw_caps : Hardware capabilities + * @state : Hardware state + * @reset_complete : Reset completion + * @work : Hardware workqueue to handle irq events + * @work_data : Work data used by hardware workqueue + * @hw_mgr_cb : Hw manager callback + * @req_proc : Pointer to the processing frame request + * @req_submit : Pointer to the frame request waiting for processing + * @hw_cdm_info : CDM information used by this device + * @hw_idx : Hardware index + */ +struct cam_lrme_core { + struct cam_lrme_hw_info *hw_info; + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + struct cam_lrme_dev_cap hw_caps; + enum cam_lrme_core_state state; + struct completion reset_complete; + struct cam_req_mgr_core_workq *work; + struct cam_lrme_hw_work_data work_data[CAM_LRME_HW_WORKQ_NUM_TASK]; + struct cam_lrme_hw_cmd_set_cb hw_mgr_cb; + struct cam_lrme_frame_request *req_proc; + struct cam_lrme_frame_request *req_submit; + struct cam_lrme_cdm_info *hw_cdm_info; + uint32_t hw_idx; + bool dump_flag; +}; + +/** + * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers + * + * @hw_version : Offset of hw_version register + * @hw_capability : Offset of hw_capability register + * @sw_reset : Offset of sw_reset register + * @cgc_override : Offset of cgc_override register + * @irq_mask : Offset of irq_mask register + * @irq_clear : Offset of irq_clear register + * @irq_cmd : Offset of irq_cmd register + * @irq_status : Offset of irq_status register + * @cmd : Offset of cmd register + * @irq_set : Offset of irq_set register + * @misr_reset : Offset of misr_reset register + * @security_cfg : Offset of security_cfg register + * @pwr_iso_cfg : Offset of pwr_iso_cfg register + * @pwr_iso_seed : Offset of pwr_iso_seed register + * @test_bus_ctrl : Offset of test_bus_ctrl register + * @spare : Offset of spare register + */ +struct cam_lrme_bus_rd_reg_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t sw_reset; + uint32_t cgc_override; + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_cmd; + uint32_t irq_status; + uint32_t cmd; + uint32_t irq_set; + uint32_t misr_reset; + uint32_t security_cfg; + uint32_t pwr_iso_cfg; + uint32_t pwr_iso_seed; + uint32_t test_bus_ctrl; + uint32_t spare; +}; + +/** + * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers + * @hw_version : Offset of hw_version register + * @hw_capability : Offset of hw_capability register + * @sw_reset : Offset of sw_reset register + * @cgc_override : Offset of cgc_override register + * @misr_reset : Offset of misr_reset register + * @pwr_iso_cfg : Offset of pwr_iso_cfg register + * @test_bus_ctrl : Offset of test_bus_ctrl register + * @composite_mask_0 : Offset of composite_mask_0 register + * @irq_mask_0 : Offset of irq_mask_0 register + * @irq_mask_1 : Offset of irq_mask_1 register + * @irq_clear_0 : Offset of irq_clear_0 register + * @irq_clear_1 : Offset of irq_clear_1 register + * @irq_status_0 : Offset of irq_status_0 register + * @irq_status_1 : Offset of irq_status_1 register + * @irq_cmd : Offset of irq_cmd register + * @irq_set_0 : Offset of irq_set_0 register + * @irq_set_1 : Offset of irq_set_1 register + * @addr_fifo_status : Offset of addr_fifo_status register + * @frame_header_cfg0 : Offset of frame_header_cfg0 register + * @frame_header_cfg1 : Offset of frame_header_cfg1 register + * @spare : Offset of spare register + */ +struct cam_lrme_bus_wr_reg_common { + uint32_t hw_version; + uint32_t hw_capability; + uint32_t sw_reset; + uint32_t cgc_override; + uint32_t misr_reset; + uint32_t pwr_iso_cfg; + uint32_t test_bus_ctrl; + uint32_t composite_mask_0; + uint32_t irq_mask_0; + uint32_t irq_mask_1; + uint32_t irq_clear_0; + uint32_t irq_clear_1; + uint32_t irq_status_0; + uint32_t irq_status_1; + uint32_t irq_cmd; + uint32_t irq_set_0; + uint32_t irq_set_1; + uint32_t addr_fifo_status; + uint32_t frame_header_cfg0; + uint32_t frame_header_cfg1; + uint32_t spare; +}; + +/** + * struct cam_lrme_bus_rd_bus_client : Offset of FE registers + * + * @core_cfg : Offset of core_cfg register + * @ccif_meta_data : Offset of ccif_meta_data register + * @addr_image : Offset of addr_image register + * @rd_buffer_size : Offset of rd_buffer_size register + * @rd_stride : Offset of rd_stride register + * @unpack_cfg_0 : Offset of unpack_cfg_0 register + * @latency_buff_allocation : Offset of latency_buff_allocation register + * @burst_limit_cfg : Offset of burst_limit_cfg register + * @misr_cfg_0 : Offset of misr_cfg_0 register + * @misr_cfg_1 : Offset of misr_cfg_1 register + * @misr_rd_val : Offset of misr_rd_val register + * @debug_status_cfg : Offset of debug_status_cfg register + * @debug_status_0 : Offset of debug_status_0 register + * @debug_status_1 : Offset of debug_status_1 register + */ +struct cam_lrme_bus_rd_bus_client { + uint32_t core_cfg; + uint32_t ccif_meta_data; + uint32_t addr_image; + uint32_t rd_buffer_size; + uint32_t rd_stride; + uint32_t unpack_cfg_0; + uint32_t latency_buff_allocation; + uint32_t burst_limit_cfg; + uint32_t misr_cfg_0; + uint32_t misr_cfg_1; + uint32_t misr_rd_val; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; +}; + +/** + * struct cam_lrme_bus_wr_bus_client : Offset of WE registers + * + * @status_0 : Offset of status_0 register + * @status_1 : Offset of status_1 register + * @cfg : Offset of cfg register + * @addr_frame_header : Offset of addr_frame_header register + * @frame_header_cfg : Offset of frame_header_cfg register + * @addr_image : Offset of addr_image register + * @addr_image_offset : Offset of addr_image_offset register + * @buffer_width_cfg : Offset of buffer_width_cfg register + * @buffer_height_cfg : Offset of buffer_height_cfg register + * @packer_cfg : Offset of packer_cfg register + * @wr_stride : Offset of wr_stride register + * @irq_subsample_cfg_period : Offset of irq_subsample_cfg_period register + * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register + * @burst_limit_cfg : Offset of burst_limit_cfg register + * @misr_cfg : Offset of misr_cfg register + * @misr_rd_word_sel : Offset of misr_rd_word_sel register + * @misr_val : Offset of misr_val register + * @debug_status_cfg : Offset of debug_status_cfg register + * @debug_status_0 : Offset of debug_status_0 register + * @debug_status_1 : Offset of debug_status_1 register + */ +struct cam_lrme_bus_wr_bus_client { + uint32_t status_0; + uint32_t status_1; + uint32_t cfg; + uint32_t addr_frame_header; + uint32_t frame_header_cfg; + uint32_t addr_image; + uint32_t addr_image_offset; + uint32_t buffer_width_cfg; + uint32_t buffer_height_cfg; + uint32_t packer_cfg; + uint32_t wr_stride; + uint32_t irq_subsample_cfg_period; + uint32_t irq_subsample_cfg_pattern; + uint32_t burst_limit_cfg; + uint32_t misr_cfg; + uint32_t misr_rd_word_sel; + uint32_t misr_val; + uint32_t debug_status_cfg; + uint32_t debug_status_0; + uint32_t debug_status_1; +}; + +/** + * struct cam_lrme_bus_rd_hw_info : FE registers information + * + * @common_reg : FE common register + * @bus_client_reg : List of FE bus registers information + */ +struct cam_lrme_bus_rd_hw_info { + struct cam_lrme_bus_rd_reg_common common_reg; + struct cam_lrme_bus_rd_bus_client + bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS]; +}; + +/** + * struct cam_lrme_bus_wr_hw_info : WE engine registers information + * + * @common_reg : WE common register + * @bus_client_reg : List of WE bus registers information + */ +struct cam_lrme_bus_wr_hw_info { + struct cam_lrme_bus_wr_reg_common common_reg; + struct cam_lrme_bus_wr_bus_client + bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS]; +}; + +/** + * struct cam_lrme_clc_reg : Offset of clc registers + * + * @clc_hw_version : Offset of clc_hw_version register + * @clc_hw_status : Offset of clc_hw_status register + * @clc_hw_status_dbg : Offset of clc_hw_status_dbg register + * @clc_module_cfg : Offset of clc_module_cfg register + * @clc_moduleformat : Offset of clc_moduleformat register + * @clc_rangestep : Offset of clc_rangestep register + * @clc_offset : Offset of clc_offset register + * @clc_maxallowedsad : Offset of clc_maxallowedsad register + * @clc_minallowedtarmad : Offset of clc_minallowedtarmad register + * @clc_meaningfulsaddiff : Offset of clc_meaningfulsaddiff register + * @clc_minsaddiffdenom : Offset of clc_minsaddiffdenom register + * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register + * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register + * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register + * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register + * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register + * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register + * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register + * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register + * @clc_ds_crop_horizontal : Offset of clc_ds_crop_horizontal register + * @clc_ds_crop_vertical : Offset of clc_ds_crop_vertical register + * @clc_tar_pd_unpacker : Offset of clc_tar_pd_unpacker register + * @clc_ref_pd_unpacker : Offset of clc_ref_pd_unpacker register + * @clc_sw_override : Offset of clc_sw_override register + * @clc_tar_height : Offset of clc_tar_height register + * @clc_test_bus_ctrl : Offset of clc_test_bus_ctrl register + * @clc_spare : Offset of clc_spare register + */ +struct cam_lrme_clc_reg { + uint32_t clc_hw_version; + uint32_t clc_hw_status; + uint32_t clc_hw_status_dbg; + uint32_t clc_module_cfg; + uint32_t clc_moduleformat; + uint32_t clc_rangestep; + uint32_t clc_offset; + uint32_t clc_maxallowedsad; + uint32_t clc_minallowedtarmad; + uint32_t clc_meaningfulsaddiff; + uint32_t clc_minsaddiffdenom; + uint32_t clc_robustnessmeasuredistmap_0; + uint32_t clc_robustnessmeasuredistmap_1; + uint32_t clc_robustnessmeasuredistmap_2; + uint32_t clc_robustnessmeasuredistmap_3; + uint32_t clc_robustnessmeasuredistmap_4; + uint32_t clc_robustnessmeasuredistmap_5; + uint32_t clc_robustnessmeasuredistmap_6; + uint32_t clc_robustnessmeasuredistmap_7; + uint32_t clc_ds_crop_horizontal; + uint32_t clc_ds_crop_vertical; + uint32_t clc_tar_pd_unpacker; + uint32_t clc_ref_pd_unpacker; + uint32_t clc_sw_override; + uint32_t clc_tar_height; + uint32_t clc_ref_height; + uint32_t clc_test_bus_ctrl; + uint32_t clc_spare; +}; + +/** + * struct cam_lrme_titan_reg : Offset of LRME top registers + * + * @top_hw_version : Offset of top_hw_version register + * @top_titan_version : Offset of top_titan_version register + * @top_rst_cmd : Offset of top_rst_cmd register + * @top_core_clk_cfg : Offset of top_core_clk_cfg register + * @top_irq_status : Offset of top_irq_status register + * @top_irq_mask : Offset of top_irq_mask register + * @top_irq_clear : Offset of top_irq_clear register + * @top_irq_set : Offset of top_irq_set register + * @top_irq_cmd : Offset of top_irq_cmd register + * @top_violation_status : Offset of top_violation_status register + * @top_spare : Offset of top_spare register + */ +struct cam_lrme_titan_reg { + uint32_t top_hw_version; + uint32_t top_titan_version; + uint32_t top_rst_cmd; + uint32_t top_core_clk_cfg; + uint32_t top_irq_status; + uint32_t top_irq_mask; + uint32_t top_irq_clear; + uint32_t top_irq_set; + uint32_t top_irq_cmd; + uint32_t top_violation_status; + uint32_t top_spare; +}; + +/** + * struct cam_lrme_hw_info : LRME registers information + * + * @clc_reg : LRME CLC registers + * @bus_rd_reg : LRME FE registers + * @bus_wr_reg : LRME WE registers + * @titan_reg : LRME top reisters + */ +struct cam_lrme_hw_info { + struct cam_lrme_clc_reg clc_reg; + struct cam_lrme_bus_rd_hw_info bus_rd_reg; + struct cam_lrme_bus_wr_hw_info bus_wr_reg; + struct cam_lrme_titan_reg titan_reg; +}; + +/** + * struct cam_lrme_hw_dump_header : LRME hw dump header + * + * @tag : LRME hw dump header tag + * @size : Size of data + * @word_size : size of each word + */ + +struct cam_lrme_hw_dump_header { + uint8_t tag[CAM_LRME_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +int cam_lrme_hw_process_irq(void *priv, void *data); +int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args, + uint32_t arg_size); +int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size); +int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size); +int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size); +irqreturn_t cam_lrme_hw_irq(int irq_num, void *data); +int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw, + struct cam_lrme_dev_cap *hw_caps); +int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size); +int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size); +void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set); + +#endif /* _CAM_LRME_HW_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c new file mode 100644 index 0000000000..2a95c21485 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cam_subdev.h" +#include "cam_lrme_hw_intf.h" +#include "cam_lrme_hw_core.h" +#include "cam_lrme_hw_soc.h" +#include "cam_lrme_hw_reg.h" +#include "cam_req_mgr_workq.h" +#include "cam_lrme_hw_mgr.h" +#include "cam_mem_mgr_api.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_req_mgr_dev.h" + +static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core, + struct cam_hw_info *lrme_hw) +{ + int rc, i; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_cdm_acquire_data cdm_acquire; + struct cam_lrme_cdm_info *hw_cdm_info; + + hw_cdm_info = CAM_MEM_ZALLOC(sizeof(struct cam_lrme_cdm_info), + GFP_KERNEL); + if (!hw_cdm_info) { + CAM_ERR(CAM_LRME, "No memory for hw_cdm_info"); + return -ENOMEM; + } + + cdm_cmd = CAM_MEM_ZALLOC((sizeof(struct cam_cdm_bl_request) + + ((CAM_LRME_MAX_HW_ENTRIES - 1) * + sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL); + if (!cdm_cmd) { + CAM_ERR(CAM_LRME, "No memory for cdm_cmd"); + CAM_MEM_FREE(hw_cdm_info); + return -ENOMEM; + } + + memset(&cdm_acquire, 0, sizeof(cdm_acquire)); + strscpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm")); + cdm_acquire.cell_index = lrme_hw->soc_info.index; + cdm_acquire.handle = 0; + cdm_acquire.userdata = hw_cdm_info; + cdm_acquire.cam_cdm_callback = NULL; + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map; + cdm_acquire.priority = CAM_CDM_BL_FIFO_0; + for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++) + cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i]; + + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_LRME, "Can't acquire cdm"); + goto error; + } + + hw_cdm_info->cdm_cmd = cdm_cmd; + hw_cdm_info->cdm_ops = cdm_acquire.ops; + hw_cdm_info->cdm_handle = cdm_acquire.handle; + + lrme_core->hw_cdm_info = hw_cdm_info; + CAM_DBG(CAM_LRME, "cdm acquire done"); + + return 0; +error: + CAM_MEM_FREE(cdm_cmd); + CAM_MEM_FREE(hw_cdm_info); + return rc; +} + +static void cam_req_mgr_process_workq_cam_lrme_hw_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static int cam_lrme_hw_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_info *lrme_hw; + struct cam_hw_intf lrme_hw_intf; + struct cam_lrme_core *lrme_core; + const struct of_device_id *match_dev = NULL; + struct cam_lrme_hw_info *hw_info; + int rc, i; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + lrme_hw = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!lrme_hw) { + CAM_ERR(CAM_LRME, "No memory to create lrme_hw"); + return -ENOMEM; + } + + lrme_core = CAM_MEM_ZALLOC(sizeof(struct cam_lrme_core), GFP_KERNEL); + if (!lrme_core) { + CAM_ERR(CAM_LRME, "No memory to create lrme_core"); + CAM_MEM_FREE(lrme_hw); + return -ENOMEM; + } + + lrme_hw->core_info = lrme_core; + lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN; + lrme_hw->soc_info.pdev = pdev; + lrme_hw->soc_info.dev = &pdev->dev; + lrme_hw->soc_info.dev_name = pdev->name; + lrme_hw->open_count = 0; + lrme_core->state = CAM_LRME_CORE_STATE_INIT; + + mutex_init(&lrme_hw->hw_mutex); + spin_lock_init(&lrme_hw->hw_lock); + init_completion(&lrme_hw->hw_complete); + init_completion(&lrme_core->reset_complete); + + rc = cam_req_mgr_workq_create("cam_lrme_hw_worker", + CAM_LRME_HW_WORKQ_NUM_TASK, + &lrme_core->work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_workq_cam_lrme_hw_worker); + if (rc) { + CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc); + goto free_memory; + } + + for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++) + lrme_core->work->task.pool[i].payload = + &lrme_core->work_data[i]; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev || !match_dev->data) { + CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev); + rc = -EINVAL; + goto destroy_workqueue; + } + hw_info = (struct cam_lrme_hw_info *)match_dev->data; + lrme_core->hw_info = hw_info; + + rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info, + cam_lrme_hw_irq, lrme_hw); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc); + goto destroy_workqueue; + } + + rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to acquire cdm"); + goto deinit_platform_res; + } + + rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure); + if (rc) { + CAM_ERR(CAM_LRME, "Get iommu handle failed"); + goto release_cdm; + } + + rc = cam_lrme_hw_start(lrme_hw, NULL, 0); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc); + goto detach_smmu; + } + + rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc); + if (cam_lrme_hw_stop(lrme_hw, NULL, 0)) + CAM_ERR(CAM_LRME, "Failed in hw deinit"); + goto detach_smmu; + } + + rc = cam_lrme_hw_stop(lrme_hw, NULL, 0); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc); + goto detach_smmu; + } + + lrme_core->hw_idx = lrme_hw->soc_info.index; + lrme_hw_intf.hw_priv = lrme_hw; + lrme_hw_intf.hw_idx = lrme_hw->soc_info.index; + lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps; + lrme_hw_intf.hw_ops.init = NULL; + lrme_hw_intf.hw_ops.deinit = NULL; + lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset; + lrme_hw_intf.hw_ops.reserve = NULL; + lrme_hw_intf.hw_ops.release = NULL; + lrme_hw_intf.hw_ops.start = cam_lrme_hw_start; + lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop; + lrme_hw_intf.hw_ops.read = NULL; + lrme_hw_intf.hw_ops.write = NULL; + lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd; + lrme_hw_intf.hw_ops.flush = cam_lrme_hw_flush; + lrme_hw_intf.hw_type = CAM_HW_LRME; + + rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles"); + goto detach_smmu; + } + + rc = cam_lrme_mgr_register_device(&lrme_hw_intf, + &lrme_core->device_iommu, + &lrme_core->cdm_iommu); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to register device"); + goto detach_smmu; + } + + platform_set_drvdata(pdev, lrme_hw); + CAM_DBG(CAM_LRME, "HW:%d component bound successfully", + lrme_hw_intf.hw_idx); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +detach_smmu: + cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure); +release_cdm: + cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle); + CAM_MEM_FREE(lrme_core->hw_cdm_info->cdm_cmd); + CAM_MEM_FREE(lrme_core->hw_cdm_info); +deinit_platform_res: + if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info)) + CAM_ERR(CAM_LRME, "Failed in soc deinit"); + mutex_destroy(&lrme_hw->hw_mutex); +destroy_workqueue: + cam_req_mgr_workq_destroy(&lrme_core->work); +free_memory: + mutex_destroy(&lrme_hw->hw_mutex); + CAM_MEM_FREE(lrme_hw); + CAM_MEM_FREE(lrme_core); + + return rc; +} + +static void cam_lrme_hw_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct cam_hw_info *lrme_hw; + struct cam_lrme_core *lrme_core; + struct platform_device *pdev = to_platform_device(dev); + + lrme_hw = platform_get_drvdata(pdev); + if (!lrme_hw) { + CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf"); + return; + } + + lrme_core = (struct cam_lrme_core *)lrme_hw->core_info; + if (!lrme_core) { + CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw"); + goto deinit_platform_res; + } + + cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure); + cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle); + cam_lrme_mgr_deregister_device(lrme_core->hw_idx); + + CAM_MEM_FREE(lrme_core->hw_cdm_info->cdm_cmd); + CAM_MEM_FREE(lrme_core->hw_cdm_info); + CAM_MEM_FREE(lrme_core); + +deinit_platform_res: + rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info); + if (rc) + CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc); + + mutex_destroy(&lrme_hw->hw_mutex); + CAM_MEM_FREE(lrme_hw); +} + +const static struct component_ops cam_lrme_hw_dev_component_ops = { + .bind = cam_lrme_hw_dev_component_bind, + .unbind = cam_lrme_hw_dev_component_unbind, +}; + +static int cam_lrme_hw_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_LRME, "Adding LRME HW component"); + rc = component_add(&pdev->dev, &cam_lrme_hw_dev_component_ops); + if (rc) + CAM_ERR(CAM_LRME, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_lrme_hw_dev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_lrme_hw_dev_component_ops); + return 0; +} + +static const struct of_device_id cam_lrme_hw_dt_match[] = { + { + .compatible = "qcom,lrme", + .data = &cam_lrme10_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match); + +struct platform_driver cam_lrme_hw_driver = { + .probe = cam_lrme_hw_dev_probe, + .remove = cam_lrme_hw_dev_remove, + .driver = { + .name = "cam_lrme_hw", + .owner = THIS_MODULE, + .of_match_table = cam_lrme_hw_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_lrme_hw_init_module(void) +{ + return platform_driver_register(&cam_lrme_hw_driver); +} + +void cam_lrme_hw_exit_module(void) +{ + platform_driver_unregister(&cam_lrme_hw_driver); +} + +MODULE_DESCRIPTION("CAM LRME HW driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h new file mode 100644 index 0000000000..64eb9c1123 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_INTF_H_ +#define _CAM_LRME_HW_INTF_H_ + +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_soc_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "cam_subdev.h" +#include "cam_cpas_api.h" +#include "cam_hw_mgr_intf.h" +#include "cam_debug_util.h" + + +#define CAM_LRME_MAX_IO_BUFFER 2 +#define CAM_LRME_MAX_HW_ENTRIES 5 + +#define CAM_LRME_BASE_IDX 0 + +/** + * enum cam_lrme_hw_type : Enum for LRME HW type + * + * @CAM_HW_LRME : LRME HW type + */ +enum cam_lrme_hw_type { + CAM_HW_LRME, +}; + +/** + * enum cam_lrme_cb_type : HW manager call back type + * + * @CAM_LRME_CB_BUF_DONE : Indicate buf done has been generated + * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update + * @CAM_LRME_CB_PUT_FRAME : Request HW manager to put back the frame + * @CAM_LRME_CB_ERROR : Indicate error irq has been generated + */ +enum cam_lrme_cb_type { + CAM_LRME_CB_BUF_DONE = 1, + CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1, + CAM_LRME_CB_PUT_FRAME = 1 << 2, + CAM_LRME_CB_ERROR = 1 << 3, +}; + +/** + * enum cam_lrme_hw_cmd_type : HW CMD type + * + * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update + * @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback + * @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW + * @CAM_LRME_HW_CMD_DUMP_REGISTER : dump register values + * @CAM_LRME_HW_CMD_DUMP : dump register values to buffer + */ +enum cam_lrme_hw_cmd_type { + CAM_LRME_HW_CMD_PREPARE_HW_UPDATE, + CAM_LRME_HW_CMD_REGISTER_CB, + CAM_LRME_HW_CMD_SUBMIT, + CAM_LRME_HW_CMD_DUMP_REGISTER, + CAM_LRME_HW_CMD_DUMP, +}; + +/** + * enum cam_lrme_hw_reset_type : Type of reset + * + * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset + * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset + */ +enum cam_lrme_hw_reset_type { + CAM_LRME_HW_RESET_TYPE_HW_RESET, + CAM_LRME_HW_RESET_TYPE_SW_RESET, +}; + +/** + *struct cam_lrme_frame_request : LRME frame request + * + * @frame_list : List head + * @req_id : Request ID + * @ctxt_to_hw_map : Information about context id, priority and device id + * @hw_device : Pointer to HW device + * @hw_update_entries : List of hw_update_entries + * @num_hw_update_entries : number of hw_update_entries + * @submit_timestamp : timestamp of submitting request with hw + */ +struct cam_lrme_frame_request { + struct list_head frame_list; + uint64_t req_id; + void *ctxt_to_hw_map; + struct cam_lrme_device *hw_device; + struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES]; + uint32_t num_hw_update_entries; + ktime_t submit_timestamp; +}; + +/** + * struct cam_lrme_hw_io_buffer : IO buffer information + * + * @valid : Indicate whether this IO config is valid + * @io_cfg : Pointer to IO configuration + * @num_buf : Number of buffers + * @num_plane : Number of planes + * @io_addr : List of IO address + */ +struct cam_lrme_hw_io_buffer { + bool valid; + struct cam_buf_io_cfg *io_cfg; + uint32_t num_buf; + uint32_t num_plane; + uint64_t io_addr[CAM_PACKET_MAX_PLANES]; +}; + +/** + * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update + * + * @hw_device : Pointer to HW device + * @input_buf : List of input buffers + * @output_buf : List of output buffers + * @cmd_buf_addr : Pointer to available KMD buffer + * @size : Available KMD buffer size + * @config_buf_size : Size used to prepare update + */ +struct cam_lrme_hw_cmd_config_args { + struct cam_lrme_device *hw_device; + struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER]; + struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER]; + uint32_t *cmd_buf_addr; + uint32_t size; + uint32_t config_buf_size; +}; + +/** + * struct cam_lrme_hw_flush_args : Args for flush HW + * + * @ctxt_to_hw_map : Identity of context + * @req_to_flush : Pointer to the frame need to flush in + * case of single frame flush + * @flush_type : Flush type + */ +struct cam_lrme_hw_flush_args { + void *ctxt_to_hw_map; + struct cam_lrme_frame_request *req_to_flush; + uint32_t flush_type; +}; + +/** + * struct cam_lrme_hw_reset_args : Args for reset HW + * + * @reset_type : Enum cam_lrme_hw_reset_type + */ +struct cam_lrme_hw_reset_args { + uint32_t reset_type; +}; + +/** + * struct cam_lrme_hw_cb_args : HW manager callback args + * + * @cb_type : Callback event type + * @frame_req : Pointer to the frame associated with the cb + */ +struct cam_lrme_hw_cb_args { + uint32_t cb_type; + struct cam_lrme_frame_request *frame_req; +}; + +/** + * struct cam_lrme_hw_cmd_set_cb : Args for set callback function + * + * @cam_lrme_hw_mgr_cb : Callback function pointer + * @data : Data sent along with callback function + */ +struct cam_lrme_hw_cmd_set_cb { + int (*cam_lrme_hw_mgr_cb)(void *data, + struct cam_lrme_hw_cb_args *args); + void *data; +}; + +/** + * struct cam_lrme_hw_submit_args : Args for submit request + * + * @hw_update_entries : List of hw update entries used to program registers + * @num_hw_update_entries : Number of hw update entries + * @frame_req : Pointer to the frame request + */ +struct cam_lrme_hw_submit_args { + struct cam_hw_update_entry *hw_update_entries; + uint32_t num_hw_update_entries; + struct cam_lrme_frame_request *frame_req; +}; + +/** + * struct cam_lrme_hw_dump_args : Args for dump request + * + * @request_id : Issue request id + * @cpu_addr : start address of the target buffer + * @offset : offset of the buffer + * @buf_len : Length of target buffer + */ +struct cam_lrme_hw_dump_args { + uint64_t request_id; + uintptr_t cpu_addr; + size_t offset; + size_t buf_len; +}; + +/** + * @brief : API to register LRME hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_lrme_hw_init_module(void); + +/** + * @brief : API to remove LRME Hw from platform framework. + */ +void cam_lrme_hw_exit_module(void); + +#endif /* _CAM_LRME_HW_INTF_H_ */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h new file mode 100644 index 0000000000..17eb25b0fa --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_REG_H_ +#define _CAM_LRME_HW_REG_H_ + +#include "cam_lrme_hw_core.h" + +static struct cam_lrme_hw_info cam_lrme10_hw_info = { + .clc_reg = { + .clc_hw_version = 0x00000000, + .clc_hw_status = 0x00000004, + .clc_hw_status_dbg = 0x00000008, + .clc_module_cfg = 0x00000060, + .clc_moduleformat = 0x000000A8, + .clc_rangestep = 0x00000068, + .clc_offset = 0x0000006C, + .clc_maxallowedsad = 0x00000070, + .clc_minallowedtarmad = 0x00000074, + .clc_meaningfulsaddiff = 0x00000078, + .clc_minsaddiffdenom = 0x0000007C, + .clc_robustnessmeasuredistmap_0 = 0x00000080, + .clc_robustnessmeasuredistmap_1 = 0x00000084, + .clc_robustnessmeasuredistmap_2 = 0x00000088, + .clc_robustnessmeasuredistmap_3 = 0x0000008C, + .clc_robustnessmeasuredistmap_4 = 0x00000090, + .clc_robustnessmeasuredistmap_5 = 0x00000094, + .clc_robustnessmeasuredistmap_6 = 0x00000098, + .clc_robustnessmeasuredistmap_7 = 0x0000009C, + .clc_ds_crop_horizontal = 0x000000A0, + .clc_ds_crop_vertical = 0x000000A4, + .clc_tar_pd_unpacker = 0x000000AC, + .clc_ref_pd_unpacker = 0x000000B0, + .clc_sw_override = 0x000000B4, + .clc_tar_height = 0x000000B8, + .clc_ref_height = 0x000000BC, + .clc_test_bus_ctrl = 0x000001F8, + .clc_spare = 0x000001FC, + }, + .bus_rd_reg = { + .common_reg = { + .hw_version = 0x00000200, + .hw_capability = 0x00000204, + .sw_reset = 0x00000208, + .cgc_override = 0x0000020C, + .irq_mask = 0x00000210, + .irq_clear = 0x00000214, + .irq_cmd = 0x00000218, + .irq_status = 0x0000021C, + .cmd = 0x00000220, + .irq_set = 0x00000224, + .misr_reset = 0x0000022C, + .security_cfg = 0x00000230, + .pwr_iso_cfg = 0x00000234, + .pwr_iso_seed = 0x00000238, + .test_bus_ctrl = 0x00000248, + .spare = 0x0000024C, + }, + .bus_client_reg = { + /* bus client 0 */ + { + .core_cfg = 0x00000250, + .ccif_meta_data = 0x00000254, + .addr_image = 0x00000258, + .rd_buffer_size = 0x0000025C, + .rd_stride = 0x00000260, + .unpack_cfg_0 = 0x00000264, + .latency_buff_allocation = 0x00000278, + .burst_limit_cfg = 0x00000280, + .misr_cfg_0 = 0x00000284, + .misr_cfg_1 = 0x00000288, + .misr_rd_val = 0x0000028C, + .debug_status_cfg = 0x00000290, + .debug_status_0 = 0x00000294, + .debug_status_1 = 0x00000298, + }, + /* bus client 1 */ + { + .core_cfg = 0x000002F0, + .ccif_meta_data = 0x000002F4, + .addr_image = 0x000002F8, + .rd_buffer_size = 0x000002FC, + .rd_stride = 0x00000300, + .unpack_cfg_0 = 0x00000304, + .latency_buff_allocation = 0x00000318, + .burst_limit_cfg = 0x00000320, + .misr_cfg_0 = 0x00000324, + .misr_cfg_1 = 0x00000328, + .misr_rd_val = 0x0000032C, + .debug_status_cfg = 0x00000330, + .debug_status_0 = 0x00000334, + .debug_status_1 = 0x00000338, + }, + }, + }, + .bus_wr_reg = { + .common_reg = { + .hw_version = 0x00000500, + .hw_capability = 0x00000504, + .sw_reset = 0x00000508, + .cgc_override = 0x0000050C, + .misr_reset = 0x000005C8, + .pwr_iso_cfg = 0x000005CC, + .test_bus_ctrl = 0x0000061C, + .composite_mask_0 = 0x00000510, + .irq_mask_0 = 0x00000544, + .irq_mask_1 = 0x00000548, + .irq_clear_0 = 0x00000550, + .irq_clear_1 = 0x00000554, + .irq_status_0 = 0x0000055C, + .irq_status_1 = 0x00000560, + .irq_cmd = 0x00000568, + .irq_set_0 = 0x000005BC, + .irq_set_1 = 0x000005C0, + .addr_fifo_status = 0x000005A8, + .frame_header_cfg0 = 0x000005AC, + .frame_header_cfg1 = 0x000005B0, + .spare = 0x00000620, + }, + .bus_client_reg = { + /* bus client 0 */ + { + .status_0 = 0x00000700, + .status_1 = 0x00000704, + .cfg = 0x00000708, + .addr_frame_header = 0x0000070C, + .frame_header_cfg = 0x00000710, + .addr_image = 0x00000714, + .addr_image_offset = 0x00000718, + .buffer_width_cfg = 0x0000071C, + .buffer_height_cfg = 0x00000720, + .packer_cfg = 0x00000724, + .wr_stride = 0x00000728, + .irq_subsample_cfg_period = 0x00000748, + .irq_subsample_cfg_pattern = 0x0000074C, + .burst_limit_cfg = 0x0000075C, + .misr_cfg = 0x00000760, + .misr_rd_word_sel = 0x00000764, + .misr_val = 0x00000768, + .debug_status_cfg = 0x0000076C, + .debug_status_0 = 0x00000770, + .debug_status_1 = 0x00000774, + }, + /* bus client 1 */ + { + .status_0 = 0x00000800, + .status_1 = 0x00000804, + .cfg = 0x00000808, + .addr_frame_header = 0x0000080C, + .frame_header_cfg = 0x00000810, + .addr_image = 0x00000814, + .addr_image_offset = 0x00000818, + .buffer_width_cfg = 0x0000081C, + .buffer_height_cfg = 0x00000820, + .packer_cfg = 0x00000824, + .wr_stride = 0x00000828, + .irq_subsample_cfg_period = 0x00000848, + .irq_subsample_cfg_pattern = 0x0000084C, + .burst_limit_cfg = 0x0000085C, + .misr_cfg = 0x00000860, + .misr_rd_word_sel = 0x00000864, + .misr_val = 0x00000868, + .debug_status_cfg = 0x0000086C, + .debug_status_0 = 0x00000870, + .debug_status_1 = 0x00000874, + }, + }, + }, + .titan_reg = { + .top_hw_version = 0x00000900, + .top_titan_version = 0x00000904, + .top_rst_cmd = 0x00000908, + .top_core_clk_cfg = 0x00000920, + .top_irq_status = 0x0000090C, + .top_irq_mask = 0x00000910, + .top_irq_clear = 0x00000914, + .top_irq_set = 0x00000918, + .top_irq_cmd = 0x0000091C, + .top_violation_status = 0x00000924, + .top_spare = 0x000009FC, + }, +}; + +#endif /* _CAM_LRME_HW_REG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c new file mode 100644 index 0000000000..c9762b545a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_lrme_hw_core.h" +#include "cam_lrme_hw_soc.h" +#include "cam_mem_mgr_api.h" + +int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw) +{ + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_soc_private *soc_private = + (struct cam_lrme_soc_private *)soc_info->soc_private; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + int rc = 0; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 2; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_READ; + axi_vote.axi_path[0].camnoc_bw = 7200000; + axi_vote.axi_path[0].mnoc_ab_bw = 7200000; + axi_vote.axi_path[0].mnoc_ib_bw = 7200000; + axi_vote.axi_path[1].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[1].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[1].camnoc_bw = 7200000; + axi_vote.axi_path[1].mnoc_ab_bw = 7200000; + axi_vote.axi_path[1].mnoc_ib_bw = 7200000; + + rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc); + return -EFAULT; + } + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc) { + CAM_ERR(CAM_LRME, + "Failed to enable platform resource, rc %d", rc); + goto stop_cpas; + } + + cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE); + + return rc; + +stop_cpas: + if (cam_cpas_stop(soc_private->cpas_handle)) + CAM_ERR(CAM_LRME, "Failed to stop cpas"); + + return rc; +} + +int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw) +{ + struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info; + struct cam_lrme_soc_private *soc_private; + int rc = 0; + + soc_private = soc_info->soc_private; + + cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE); + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_LRME, "Failed to disable platform resource"); + return rc; + } + rc = cam_cpas_stop(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_LRME, "Failed to stop cpas"); + + return rc; +} + +int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *private_data) +{ + struct cam_lrme_soc_private *soc_private; + struct cam_cpas_register_params cpas_register_param; + int rc, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc); + return rc; + } + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = private_data; + + rc = cam_soc_util_request_platform_resource(soc_info, irq_handler, &(irq_data[0])); + if (rc) { + CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d", + rc); + return rc; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_lrme_soc_private), GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto release_res; + } + soc_info->soc_private = soc_private; + + memset(&cpas_register_param, 0, sizeof(cpas_register_param)); + strscpy(cpas_register_param.identifier, + "lrmecpas", CAM_HW_IDENTIFIER_LENGTH); + cpas_register_param.cell_index = soc_info->index; + cpas_register_param.dev = &soc_info->pdev->dev; + cpas_register_param.userdata = private_data; + cpas_register_param.cam_cpas_client_cb = NULL; + + rc = cam_cpas_register_client(&cpas_register_param); + if (rc) { + CAM_ERR(CAM_LRME, "CPAS registration failed"); + goto free_soc_private; + } + soc_private->cpas_handle = cpas_register_param.client_handle; + CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle); + + return rc; + +free_soc_private: + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; +release_res: + cam_soc_util_release_platform_resource(soc_info); + + return rc; +} + +int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info) +{ + struct cam_lrme_soc_private *soc_private = + (struct cam_lrme_soc_private *)soc_info->soc_private; + int rc; + + rc = cam_cpas_unregister_client(soc_private->cpas_handle); + if (rc) + CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d", + soc_private->cpas_handle, rc); + + rc = cam_soc_util_release_platform_resource(soc_info); + if (rc) + CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc); + + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h new file mode 100644 index 0000000000..d77ac2bfdb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_LRME_HW_SOC_H_ +#define _CAM_LRME_HW_SOC_H_ + +#include "cam_soc_util.h" + +struct cam_lrme_soc_private { + uint32_t cpas_handle; +}; + +int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw); +int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw); +int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t irq_handler, void *private_data); +int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info); + +#endif /* _CAM_LRME_HW_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.c b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.c new file mode 100644 index 0000000000..2eafce9b46 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "cam_sync_api.h" +#include "cam_node.h" +#include "cam_context.h" +#include "cam_context_utils.h" +#include "cam_ope_context.h" +#include "cam_req_mgr_util.h" +#include "cam_mem_mgr.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_packet_util.h" + +static const char ope_dev_name[] = "cam-ope"; + +static int cam_ope_context_dump_active_request(void *data, void *args) +{ + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_dump_pf_args *pf_args = (struct cam_hw_dump_pf_args *)args; + int rc = 0; + + if (!ctx || !pf_args) { + CAM_ERR(CAM_OPE, "Invalid ctx %pK or pf args %pK", + ctx, pf_args); + return -EINVAL; + } + + CAM_INFO(CAM_OPE, "iommu fault for ope ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + CAM_INFO(CAM_OPE, "Active req_id: %llu ctx_id: %u", + req->request_id, ctx->ctx_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_args, &req->pf_data); + if (rc) + CAM_ERR(CAM_OPE, "Failed to dump pf info ctx_id: %u state: %d", + ctx->ctx_id, ctx->state); + } + + if (pf_args->pf_context_info.ctx_found) { + /* Send PF notification to UMD if PF found on current CTX */ + rc = cam_context_send_pf_evt(ctx, pf_args); + if (rc) + CAM_ERR(CAM_OPE, + "Failed to notify PF event to userspace rc: %d", rc); + } + + return rc; +} + +static int __cam_ope_acquire_dev_in_available(struct cam_context *ctx, + struct cam_acquire_dev_cmd_unified *args) +{ + int rc; + + rc = cam_context_acquire_dev_to_hw(ctx, args); + if (!rc) { + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("OPE", ctx); + } + + return rc; +} + +static int __cam_ope_release_dev_in_acquired(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_release_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_OPE, "Unable to release device"); + + ctx->state = CAM_CTX_AVAILABLE; + trace_cam_context_state("OPE", ctx); + return rc; +} + +static int __cam_ope_start_dev_in_acquired(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_start_dev_to_hw(ctx, cmd); + if (!rc) { + ctx->state = CAM_CTX_READY; + trace_cam_context_state("OPE", ctx); + } + + return rc; +} + +static int __cam_ope_flush_dev_in_ready(struct cam_context *ctx, + struct cam_flush_dev_cmd *cmd) +{ + int rc; + struct cam_context_utils_flush_args flush_args; + + flush_args.cmd = cmd; + flush_args.flush_active_req = false; + + rc = cam_context_flush_dev_to_hw(ctx, &flush_args); + if (rc) + CAM_ERR(CAM_OPE, "Failed to flush device"); + + return rc; +} + +static int __cam_ope_dump_dev_in_ready(struct cam_context *ctx, + struct cam_dump_req_cmd *cmd) +{ + int rc; + + rc = cam_context_dump_dev_to_hw(ctx, cmd); + if (rc) + CAM_ERR(CAM_OPE, "Failed to dump device"); + + return rc; +} + +static int __cam_ope_config_dev_in_ready(struct cam_context *ctx, + struct cam_config_dev_cmd *cmd) +{ + int rc; + size_t len; + uintptr_t packet_addr; + + rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle, + &packet_addr, &len); + if (rc) { + CAM_ERR(CAM_OPE, "[%s][%d] Can not get packet address", + ctx->dev_name, ctx->ctx_id); + rc = -EINVAL; + return rc; + } + + rc = cam_context_prepare_dev_to_hw(ctx, cmd); + + if (rc) + CAM_ERR(CAM_OPE, "Failed to prepare device"); + + cam_mem_put_cpu_buf((int32_t) cmd->packet_handle); + return rc; +} + +static int __cam_ope_stop_dev_in_ready(struct cam_context *ctx, + struct cam_start_stop_dev_cmd *cmd) +{ + int rc; + + rc = cam_context_stop_dev_to_hw(ctx); + if (rc) + CAM_ERR(CAM_OPE, "Failed to stop device"); + + ctx->state = CAM_CTX_ACQUIRED; + trace_cam_context_state("OPE", ctx); + return rc; +} + +static int __cam_ope_release_dev_in_ready(struct cam_context *ctx, + struct cam_release_dev_cmd *cmd) +{ + int rc; + + rc = __cam_ope_stop_dev_in_ready(ctx, NULL); + if (rc) + CAM_ERR(CAM_OPE, "Failed to stop device"); + + rc = __cam_ope_release_dev_in_acquired(ctx, cmd); + if (rc) + CAM_ERR(CAM_OPE, "Failed to release device"); + + return rc; +} + +static int __cam_ope_handle_buf_done_in_ready(void *ctx, + uint32_t evt_id, void *done) +{ + return cam_context_buf_done_from_hw(ctx, done, evt_id); +} + +static struct cam_ctx_ops + cam_ope_ctx_state_machine[CAM_CTX_STATE_MAX] = { + /* Uninit */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Available */ + { + .ioctl_ops = { + .acquire_dev = __cam_ope_acquire_dev_in_available, + }, + .crm_ops = {}, + .irq_ops = NULL, + }, + /* Acquired */ + { + .ioctl_ops = { + .release_dev = __cam_ope_release_dev_in_acquired, + .start_dev = __cam_ope_start_dev_in_acquired, + .config_dev = __cam_ope_config_dev_in_ready, + .flush_dev = __cam_ope_flush_dev_in_ready, + .dump_dev = __cam_ope_dump_dev_in_ready, + }, + .crm_ops = {}, + .irq_ops = __cam_ope_handle_buf_done_in_ready, + .pagefault_ops = cam_ope_context_dump_active_request, + }, + /* Ready */ + { + .ioctl_ops = { + .stop_dev = __cam_ope_stop_dev_in_ready, + .release_dev = __cam_ope_release_dev_in_ready, + .config_dev = __cam_ope_config_dev_in_ready, + .flush_dev = __cam_ope_flush_dev_in_ready, + .dump_dev = __cam_ope_dump_dev_in_ready, + }, + .crm_ops = {}, + .irq_ops = __cam_ope_handle_buf_done_in_ready, + .pagefault_ops = cam_ope_context_dump_active_request, + }, + /* Flushed */ + { + .ioctl_ops = {}, + }, + /* Activated */ + { + .ioctl_ops = {}, + .crm_ops = {}, + .irq_ops = NULL, + .pagefault_ops = cam_ope_context_dump_active_request, + }, +}; + +int cam_ope_context_init(struct cam_ope_context *ctx, + struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id, int img_iommu_hdl) +{ + int rc; + + if ((!ctx) || (!ctx->base) || (!hw_intf)) { + CAM_ERR(CAM_OPE, "Invalid params: %pK %pK", ctx, hw_intf); + rc = -EINVAL; + goto err; + } + + rc = cam_context_init(ctx->base, ope_dev_name, CAM_OPE, ctx_id, + NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX, img_iommu_hdl); + if (rc) { + CAM_ERR(CAM_OPE, "Camera Context Base init failed"); + goto err; + } + + ctx->base->state_machine = cam_ope_ctx_state_machine; + ctx->base->ctx_priv = ctx; + ctx->ctxt_to_hw_map = NULL; + + ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX; + ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX; + ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX; +err: + return rc; +} + +int cam_ope_context_deinit(struct cam_ope_context *ctx) +{ + if ((!ctx) || (!ctx->base)) { + CAM_ERR(CAM_OPE, "Invalid params: %pK", ctx); + return -EINVAL; + } + + cam_context_deinit(ctx->base); + memset(ctx, 0, sizeof(*ctx)); + + return 0; +} + + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.h b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.h new file mode 100644 index 0000000000..e91c6a7e8b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_context.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_OPE_CONTEXT_H_ +#define _CAM_OPE_CONTEXT_H_ + +#include "cam_context.h" + +#define OPE_CTX_MAX 32 + +/** + * struct cam_ope_context - ope context + * @base: ope context object + * @state_machine: state machine for OPE context + * @req_base: common request structure + * @state: ope context state + * @ctxt_to_hw_map: context to FW handle mapping + */ +struct cam_ope_context { + struct cam_context *base; + struct cam_ctx_ops *state_machine; + struct cam_ctx_request req_base[CAM_CTX_REQ_MAX]; + uint32_t state; + void *ctxt_to_hw_map; +}; + +/** + * cam_ope_context_init() - OPE context init + * @ctx: Pointer to context + * @hw_intf: Pointer to OPE hardware interface + * @ctx_id: ID for this context + * @img_iommu_hdl: IOMMU HDL for image buffers + * + */ +int cam_ope_context_init(struct cam_ope_context *ctx, + struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id, int img_iommu_hdl); + +/** + * cam_ope_context_deinit() - OPE context deinit + * @ctx: Pointer to context + */ +int cam_ope_context_deinit(struct cam_ope_context *ctx); + +#endif /* _CAM_OPE_CONTEXT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_subdev.c b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_subdev.c new file mode 100644 index 0000000000..33f6cd6883 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/cam_ope_subdev.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_req_mgr_dev.h" +#include "cam_subdev.h" +#include "cam_node.h" +#include "cam_context.h" +#include "cam_ope_context.h" +#include "cam_ope_hw_mgr_intf.h" +#include "cam_hw_mgr_intf.h" +#include "cam_debug_util.h" +#include "cam_smmu_api.h" +#include "camera_main.h" +#include "cam_context_utils.h" +#include "cam_mem_mgr_api.h" + +#define OPE_DEV_NAME "cam-ope" + +struct cam_ope_subdev { + struct cam_subdev sd; + struct cam_node *node; + struct cam_context ctx[OPE_CTX_MAX]; + struct cam_ope_context ctx_ope[OPE_CTX_MAX]; + struct mutex ope_lock; + int32_t open_cnt; + int32_t reserved; +}; + +static struct cam_ope_subdev g_ope_dev; + +static void cam_ope_dev_iommu_fault_handler( + struct cam_smmu_pf_info *pf_smmu_info) +{ + int i, rc; + struct cam_node *node = NULL; + struct cam_hw_dump_pf_args pf_args = {0}; + + if (!pf_smmu_info || !pf_smmu_info->token) { + CAM_ERR(CAM_OPE, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)pf_smmu_info->token; + pf_args.pf_smmu_info = pf_smmu_info; + + for (i = 0; i < node->ctx_size; i++) { + cam_context_dump_pf_info(&(node->ctx_list[i]), &pf_args); + if (pf_args.pf_context_info.ctx_found) + /* found ctx and packet of the faulted address */ + break; + } + + if (i == node->ctx_size) { + /* Faulted ctx not found. But report PF to UMD anyway*/ + rc = cam_context_send_pf_evt(NULL, &pf_args); + if (rc) + CAM_ERR(CAM_OPE, + "Failed to notify PF event to userspace rc: %d", rc); + } +} + +static int cam_ope_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_hw_mgr_intf *hw_mgr_intf = NULL; + struct cam_node *node = v4l2_get_subdevdata(sd); + int rc = 0; + + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_LOCK); + + mutex_lock(&g_ope_dev.ope_lock); + if (g_ope_dev.open_cnt >= 1) { + CAM_ERR(CAM_OPE, "OPE subdev is already opened"); + rc = -EALREADY; + goto end; + } + + if (!node) { + CAM_ERR(CAM_OPE, "Invalid args"); + rc = -EINVAL; + goto end; + } + + hw_mgr_intf = &node->hw_mgr_intf; + rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL); + if (rc < 0) { + CAM_ERR(CAM_OPE, "OPE HW open failed: %d", rc); + goto end; + } + g_ope_dev.open_cnt++; + CAM_DBG(CAM_OPE, "OPE HW open success: %d", rc); +end: + mutex_unlock(&g_ope_dev.ope_lock); + cam_req_mgr_rwsem_read_op(CAM_SUBDEV_UNLOCK); + return rc; +} + +static int cam_ope_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + int rc = 0; + struct cam_hw_mgr_intf *hw_mgr_intf = NULL; + struct cam_node *node = v4l2_get_subdevdata(sd); + + mutex_lock(&g_ope_dev.ope_lock); + if (g_ope_dev.open_cnt <= 0) { + CAM_DBG(CAM_OPE, "OPE subdev is already closed"); + rc = -EINVAL; + goto end; + } + g_ope_dev.open_cnt--; + if (!node) { + CAM_ERR(CAM_OPE, "Invalid args"); + rc = -EINVAL; + goto end; + } + + hw_mgr_intf = &node->hw_mgr_intf; + if (!hw_mgr_intf) { + CAM_ERR(CAM_OPE, "hw_mgr_intf is not initialized"); + rc = -EINVAL; + goto end; + } + + rc = cam_node_shutdown(node); + if (rc < 0) { + CAM_ERR(CAM_OPE, "HW close failed"); + goto end; + } + CAM_DBG(CAM_OPE, "OPE HW close success: %d", rc); + +end: + mutex_unlock(&g_ope_dev.ope_lock); + return rc; +} + +static int cam_ope_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_OPE, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_ope_subdev_close_internal(sd, fh); +} + +const struct v4l2_subdev_internal_ops cam_ope_subdev_internal_ops = { + .open = cam_ope_subdev_open, + .close = cam_ope_subdev_close, +}; + +static int cam_ope_subdev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0, i = 0; + struct cam_node *node; + struct cam_hw_mgr_intf *hw_mgr_intf; + int iommu_hdl = -1; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + CAM_DBG(CAM_OPE, "Binding OPE subdev component"); + if (!pdev) { + CAM_ERR(CAM_OPE, "pdev is NULL"); + return -EINVAL; + } + + g_ope_dev.sd.pdev = pdev; + g_ope_dev.sd.internal_ops = &cam_ope_subdev_internal_ops; + g_ope_dev.sd.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + rc = cam_subdev_probe(&g_ope_dev.sd, pdev, OPE_DEV_NAME, + CAM_OPE_DEVICE_TYPE); + if (rc) { + CAM_ERR(CAM_OPE, "OPE cam_subdev_probe failed:%d", rc); + return rc; + } + + node = (struct cam_node *) g_ope_dev.sd.token; + + hw_mgr_intf = CAM_MEM_ZALLOC(sizeof(*hw_mgr_intf), GFP_KERNEL); + if (!hw_mgr_intf) { + rc = -EINVAL; + goto hw_alloc_fail; + } + + rc = cam_ope_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf, + &iommu_hdl); + if (rc) { + CAM_ERR(CAM_OPE, "OPE HW manager init failed: %d", rc); + goto hw_init_fail; + } + + for (i = 0; i < OPE_CTX_MAX; i++) { + g_ope_dev.ctx_ope[i].base = &g_ope_dev.ctx[i]; + rc = cam_ope_context_init(&g_ope_dev.ctx_ope[i], + hw_mgr_intf, i, iommu_hdl); + if (rc) { + CAM_ERR(CAM_OPE, "OPE context init failed"); + goto ctx_fail; + } + } + + rc = cam_node_init(node, hw_mgr_intf, g_ope_dev.ctx, + OPE_CTX_MAX, OPE_DEV_NAME); + if (rc) { + CAM_ERR(CAM_OPE, "OPE node init failed"); + goto ctx_fail; + } + + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_ope_dev_iommu_fault_handler, node); + + g_ope_dev.open_cnt = 0; + mutex_init(&g_ope_dev.ope_lock); + + node->sd_handler = cam_ope_subdev_close_internal; + CAM_DBG(CAM_OPE, "Subdev component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +ctx_fail: + for (--i; i >= 0; i--) + cam_ope_context_deinit(&g_ope_dev.ctx_ope[i]); +hw_init_fail: + CAM_MEM_FREE(hw_mgr_intf); +hw_alloc_fail: + cam_subdev_remove(&g_ope_dev.sd); + return rc; +} + +static void cam_ope_subdev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct v4l2_subdev *sd; + struct cam_subdev *subdev; + struct platform_device *pdev = to_platform_device(dev); + + if (!pdev) { + CAM_ERR(CAM_OPE, "pdev is NULL"); + return; + } + + sd = platform_get_drvdata(pdev); + if (!sd) { + CAM_ERR(CAM_OPE, "V4l2 subdev is NULL"); + return; + } + + subdev = v4l2_get_subdevdata(sd); + if (!subdev) { + CAM_ERR(CAM_OPE, "cam subdev is NULL"); + return; + } + + for (i = 0; i < OPE_CTX_MAX; i++) + cam_ope_context_deinit(&g_ope_dev.ctx_ope[i]); + + cam_node_deinit(g_ope_dev.node); + cam_subdev_remove(&g_ope_dev.sd); + mutex_destroy(&g_ope_dev.ope_lock); +} + +const static struct component_ops cam_ope_subdev_component_ops = { + .bind = cam_ope_subdev_component_bind, + .unbind = cam_ope_subdev_component_unbind, +}; + +static int cam_ope_subdev_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_OPE, "Adding OPE subdev component"); + rc = component_add(&pdev->dev, &cam_ope_subdev_component_ops); + if (rc) + CAM_ERR(CAM_OPE, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_ope_subdev_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ope_subdev_component_ops); + return 0; +} + +static const struct of_device_id cam_ope_dt_match[] = { + {.compatible = "qcom,cam-ope"}, + {} +}; + + +struct platform_driver cam_ope_subdev_driver = { + .probe = cam_ope_subdev_probe, + .remove = cam_ope_subdev_remove, + .driver = { + .name = "cam_ope", + .of_match_table = cam_ope_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ope_subdev_init_module(void) +{ + return platform_driver_register(&cam_ope_subdev_driver); +} + +void cam_ope_subdev_exit_module(void) +{ + platform_driver_unregister(&cam_ope_subdev_driver); +} + +MODULE_DESCRIPTION("MSM OPE driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c new file mode 100644 index 0000000000..26016ce2f7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c @@ -0,0 +1,4417 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_sync_api.h" +#include "cam_packet_util.h" +#include "cam_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_ope_hw_mgr_intf.h" +#include "cam_ope_hw_mgr.h" +#include "ope_hw.h" +#include "cam_smmu_api.h" +#include "cam_mem_mgr.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include "cam_trace.h" +#include "cam_cpas_api.h" +#include "cam_common_util.h" +#include "cam_cdm_intf_api.h" +#include "cam_cdm_util.h" +#include "cam_cdm.h" +#include "ope_dev_intf.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" +#include "ope_core.h" + +static struct cam_ope_hw_mgr *ope_hw_mgr; + +static int cam_ope_req_timer_reset(struct cam_ope_ctx *ctx_data); + +static void cam_ope_mgr_dump_pf_data(struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_cmd_args *hw_cmd_args); + +static int cam_ope_mgr_get_rsc_idx(struct cam_ope_ctx *ctx_data, + struct ope_io_buf_info *in_io_buf) +{ + int k = 0; + int rsc_idx = -EINVAL; + + if (in_io_buf->direction == CAM_BUF_INPUT) { + for (k = 0; k < OPE_IN_RES_MAX; k++) { + if (ctx_data->ope_acquire.in_res[k].res_id == + in_io_buf->resource_type) + break; + } + if (k == OPE_IN_RES_MAX) { + CAM_ERR(CAM_OPE, "Invalid res_id %d", + in_io_buf->resource_type); + goto end; + } + rsc_idx = k; + } else if (in_io_buf->direction == CAM_BUF_OUTPUT) { + for (k = 0; k < OPE_OUT_RES_MAX; k++) { + if (ctx_data->ope_acquire.out_res[k].res_id == + in_io_buf->resource_type) + break; + } + if (k == OPE_OUT_RES_MAX) { + CAM_ERR(CAM_OPE, "Invalid res_id %d", + in_io_buf->resource_type); + goto end; + } + rsc_idx = k; + } + +end: + return rsc_idx; +} + +static bool cam_ope_is_pending_request(struct cam_ope_ctx *ctx_data) +{ + return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX); +} + +static int cam_ope_mgr_process_cmd(void *priv, void *data) +{ + int rc; + struct ope_cmd_work_data *task_data = NULL; + struct cam_ope_ctx *ctx_data; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + + if (!data || !priv) { + CAM_ERR(CAM_OPE, "Invalid params%pK %pK", data, priv); + return -EINVAL; + } + + ctx_data = priv; + task_data = (struct ope_cmd_work_data *)data; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + cdm_cmd = task_data->data; + + if (!cdm_cmd) { + CAM_ERR(CAM_OPE, "Invalid params%pK", cdm_cmd); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_OPE, "ctx id :%u is not in use", + ctx_data->ctx_id); + return -EINVAL; + } + + if (task_data->req_id <= ctx_data->last_flush_req) { + CAM_WARN(CAM_OPE, + "request %lld has been flushed, reject packet", + task_data->req_id, ctx_data->last_flush_req); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + if (!cam_ope_is_pending_request(ctx_data)) { + CAM_WARN(CAM_OPE, "no pending req, req %lld last flush %lld", + task_data->req_id, ctx_data->last_flush_req); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return -EINVAL; + } + + CAM_DBG(CAM_OPE, + "cam_cdm_submit_bls: handle 0x%x, ctx_id %d req %d cookie %d", + ctx_data->ope_cdm.cdm_handle, ctx_data->ctx_id, + task_data->req_id, cdm_cmd->cookie); + + if (task_data->req_id > ctx_data->last_flush_req) + ctx_data->last_flush_req = 0; + + cam_ope_req_timer_reset(ctx_data); + + rc = cam_cdm_submit_bls(ctx_data->ope_cdm.cdm_handle, cdm_cmd); + + if (!rc) + ctx_data->req_cnt++; + else + CAM_ERR(CAM_OPE, "submit failed for %lld", cdm_cmd->cookie); + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +} + +static int cam_ope_mgr_reset_hw(void) +{ + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + int i, rc = 0; + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET, + NULL, 0); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Reset failed: %d", rc); + return rc; + } + } + + return rc; +} + +static void cam_ope_free_io_config(struct cam_ope_request *req) +{ + int i, j; + + for (i = 0; i < OPE_MAX_BATCH_SIZE; i++) { + for (j = 0; j < OPE_MAX_IO_BUFS; j++) { + if (req->io_buf[i][j]) { + CAM_MEM_ZFREE((void *)req->io_buf[i][j], + sizeof(struct ope_io_buf)); + req->io_buf[i][j] = NULL; + } + } + } +} + +static void cam_ope_device_timer_stop(struct cam_ope_hw_mgr *hw_mgr) +{ + if (hw_mgr->clk_info.watch_dog) { + hw_mgr->clk_info.watch_dog_reset_counter = 0; + crm_timer_exit(&hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog = NULL; + } +} + +static void cam_ope_device_timer_reset(struct cam_ope_hw_mgr *hw_mgr) +{ + + if (hw_mgr->clk_info.watch_dog) { + CAM_DBG(CAM_OPE, "reset timer"); + crm_timer_reset(hw_mgr->clk_info.watch_dog); + hw_mgr->clk_info.watch_dog_reset_counter++; + } +} + +static int cam_ope_req_timer_modify(struct cam_ope_ctx *ctx_data, + int32_t expires) +{ + if (ctx_data->req_watch_dog) { + CAM_DBG(CAM_OPE, "stop timer : ctx_id = %d", ctx_data->ctx_id); + crm_timer_modify(ctx_data->req_watch_dog, expires); + } + return 0; +} + +static int cam_ope_req_timer_stop(struct cam_ope_ctx *ctx_data) +{ + if (ctx_data->req_watch_dog) { + CAM_DBG(CAM_OPE, "stop timer : ctx_id = %d", ctx_data->ctx_id); + ctx_data->req_watch_dog_reset_counter = 0; + crm_timer_exit(&ctx_data->req_watch_dog); + ctx_data->req_watch_dog = NULL; + } + return 0; +} + +static int cam_ope_req_timer_reset(struct cam_ope_ctx *ctx_data) +{ + if (ctx_data && ctx_data->req_watch_dog) { + ctx_data->req_watch_dog_reset_counter++; + CAM_DBG(CAM_OPE, "reset timer : ctx_id = %d, counter=%d", + ctx_data->ctx_id, + ctx_data->req_watch_dog_reset_counter); + crm_timer_reset(ctx_data->req_watch_dog); + } + + return 0; +} + +static int cam_ope_validate_frame_params(struct ope_frame_process *frame_process) +{ + int i, rc = 0; + + if (frame_process->batch_size > OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch: %d", + frame_process->batch_size); + return -EINVAL; + } + + for (i = 0; i < frame_process->batch_size; i++) { + if (frame_process->num_cmd_bufs[i] > OPE_MAX_CMD_BUFS) { + CAM_ERR(CAM_OPE, "Invalid num of cmd bufs for batch %d %d", + i, frame_process->num_cmd_bufs[i]); + return -EINVAL; + } + if (frame_process->frame_set[i].num_io_bufs > OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Invalid num of bufs for batch %d %d", + i, frame_process->frame_set[i].num_io_bufs); + return -EINVAL; + } + } + return rc; +} +static int cam_ope_mgr_reapply_config(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_request *ope_req) +{ + int rc = 0; + uint64_t request_id = 0; + struct crm_workq_task *task; + struct ope_cmd_work_data *task_data; + + request_id = ope_req->request_id; + CAM_DBG(CAM_OPE, "reapply req_id = %lld", request_id); + + task = cam_req_mgr_workq_get_task(ope_hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + return -ENOMEM; + } + + task_data = (struct ope_cmd_work_data *)task->payload; + task_data->data = (void *)ope_req->cdm_cmd; + task_data->req_id = request_id; + task_data->type = OPE_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_ope_mgr_process_cmd; + rc = cam_req_mgr_workq_enqueue_task(task, ctx_data, + CRM_TASK_PRIORITY_0); + + return rc; +} + +static int cam_get_valid_ctx_id(void) +{ + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + int i; + + for (i = 0; i < OPE_CTX_MAX; i++) { + if (hw_mgr->ctx[i].ctx_state == OPE_CTX_STATE_ACQUIRED) + break; + } + + if (i == OPE_CTX_MAX) + return -EINVAL; + + return i; +} + +static int32_t cam_ope_mgr_process_msg(void *priv, void *data) +{ + struct ope_msg_work_data *task_data; + struct cam_ope_hw_mgr *hw_mgr; + struct cam_ope_ctx *ctx; + uint32_t irq_status; + int32_t ctx_id; + int rc = 0, i; + + if (!data || !priv) { + CAM_ERR(CAM_OPE, "Invalid data"); + return -EINVAL; + } + + task_data = data; + hw_mgr = priv; + irq_status = task_data->irq_status; + ctx_id = cam_get_valid_ctx_id(); + if (ctx_id < 0) { + CAM_ERR(CAM_OPE, "No valid context to handle error"); + return ctx_id; + } + + ctx = &hw_mgr->ctx[ctx_id]; + + /* Indicate about this error to CDM and reset OPE*/ + rc = cam_cdm_handle_error(ctx->ope_cdm.cdm_handle); + + mutex_lock(&ctx->ctx_mutex); + if (ctx->ctx_state != OPE_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_OPE, "ctx id: %d not in right state: %d", + ctx_id, ctx->ctx_state); + mutex_unlock(&ctx->ctx_mutex); + return -EINVAL; + } + + for (i = 0; i < hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET, + NULL, 0); + if (rc) + CAM_ERR(CAM_OPE, "OPE Dev acquire failed: %d", rc); + } + + mutex_unlock(&ctx->ctx_mutex); + return rc; +} + +static int cam_ope_dump_hang_patches(struct cam_packet *packet, + struct cam_ope_hang_dump *dump) +{ + struct cam_patch_desc *patch_desc = NULL; + dma_addr_t iova_addr; + size_t src_buf_size; + int i, rc = 0; + int32_t iommu_hdl = ope_hw_mgr->iommu_hdl; + + /* process patch descriptor */ + patch_desc = (struct cam_patch_desc *) + ((uint32_t *) &packet->payload_flex + + packet->patch_offset/4); + + for (i = 0; i < packet->num_patches; i++) { + rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl, + iommu_hdl, &iova_addr, &src_buf_size, NULL, NULL); + if (rc < 0) { + CAM_ERR(CAM_UTIL, + "get src buf address failed for handle 0x%x", + patch_desc[i].src_buf_hdl); + return rc; + } + if (dump->num_bufs >= (OPE_MAX_BATCH_SIZE * OPE_MAX_CMD_BUFS)) { + CAM_ERR(CAM_OPE, "Invalid num_bufs entries"); + return -EINVAL; + } + dump->entries[dump->num_bufs].memhdl = + patch_desc[i].src_buf_hdl; + dump->entries[dump->num_bufs].iova = iova_addr; + dump->entries[dump->num_bufs].offset = patch_desc[i].src_offset; + dump->entries[dump->num_bufs].len = 0; + dump->entries[dump->num_bufs].size = src_buf_size; + dump->num_bufs++; + } + return rc; +} + +static int cam_ope_dump_direct(struct ope_cmd_buf_info *cmd_buf_info, + struct cam_ope_hang_dump *dump) +{ + dma_addr_t iova_addr; + size_t size; + int rc = 0; + + rc = cam_mem_get_io_buf(cmd_buf_info->mem_handle, + ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "get cmd buf addressfailed for handle 0x%x", + cmd_buf_info->mem_handle); + return rc; + } + dump->entries[dump->num_bufs].memhdl = cmd_buf_info->mem_handle; + dump->entries[dump->num_bufs].iova = iova_addr; + dump->entries[dump->num_bufs].offset = cmd_buf_info->offset; + dump->entries[dump->num_bufs].len = cmd_buf_info->length; + dump->entries[dump->num_bufs].size = size; + dump->num_bufs++; + return 0; +} + +static void cam_ope_dump_dmi(struct cam_ope_hang_dump *dump, uint32_t addr, + uint32_t length) +{ + int i; + uint32_t memhdl = 0, iova = 0, size; + + for (i = 0; i < dump->num_bufs; i++) { + if (dump->entries[i].iova + dump->entries[i].offset == addr) { + if (dump->entries[i].len == length) + goto end; + else if (dump->entries[i].len == 0) { + dump->entries[i].len = length; + goto end; + } else { + iova = dump->entries[i].iova; + memhdl = dump->entries[i].memhdl; + size = dump->entries[i].size; + } + } + } + if (memhdl && iova) { + dump->entries[dump->num_bufs].memhdl = memhdl; + dump->entries[dump->num_bufs].iova = iova; + dump->entries[dump->num_bufs].offset = addr - iova; + dump->entries[dump->num_bufs].len = length; + dump->entries[dump->num_bufs].size = size; + dump->num_bufs++; + } +end: + return; +} + +static int cam_ope_mgr_put_cmd_buf(struct cam_packet *packet) +{ + int i = 0; + int rc; + struct cam_cmd_buf_desc *cmd_desc = NULL; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (cmd_desc[i].type != CAM_CMD_BUF_GENERIC || + cmd_desc[i].meta_data == OPE_CMD_META_GENERIC_BLOB) + continue; + + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + + return 0; +} + +static int cam_ope_dump_indirect(struct ope_cmd_buf_info *cmd_buf_info, + struct cam_ope_hang_dump *dump) +{ + int rc = 0; + uintptr_t cpu_addr; + size_t buf_len; + uint32_t num_dmi; + struct cdm_dmi_cmd dmi_cmd; + uint32_t *print_ptr, print_idx; + + rc = cam_mem_get_cpu_buf(cmd_buf_info->mem_handle, + &cpu_addr, &buf_len); + if (rc || !cpu_addr || !buf_len) { + CAM_ERR(CAM_OPE, "get cmd buf fail 0x%x", + cmd_buf_info->mem_handle); + return rc; + } + rc = ope_validate_buff_offset(buf_len, cmd_buf_info); + if (rc) + goto put_buf; + + cpu_addr = cpu_addr + cmd_buf_info->offset; + + num_dmi = cmd_buf_info->length / + sizeof(struct cdm_dmi_cmd); + print_ptr = (uint32_t *)cpu_addr; + for (print_idx = 0; print_idx < num_dmi; print_idx++) { + memcpy(&dmi_cmd, (const void *)print_ptr, + sizeof(struct cdm_dmi_cmd)); + cam_ope_dump_dmi(dump, dmi_cmd.addr, dmi_cmd.length+1); + print_ptr += sizeof(struct cdm_dmi_cmd) / + sizeof(uint32_t); + } + +put_buf: + cam_mem_put_cpu_buf((int32_t) cmd_buf_info->mem_handle); + return rc; +} + +static int cam_ope_mgr_dump_cmd_buf(uint32_t *frame_process_addr, + struct cam_ope_hang_dump *dump) +{ + int rc = 0; + int i, j; + struct ope_frame_process *frame_process; + struct ope_cmd_buf_info *cmd_buf; + + frame_process = (struct ope_frame_process *)frame_process_addr; + rc = cam_ope_validate_frame_params(frame_process); + if (rc) + return rc; + for (i = 0; i < frame_process->batch_size; i++) { + for (j = 0; j < frame_process->num_cmd_bufs[i]; j++) { + cmd_buf = &frame_process->cmd_buf[i][j]; + if (dump->num_bufs >= (OPE_MAX_BATCH_SIZE * OPE_MAX_CMD_BUFS)) { + CAM_ERR(CAM_OPE, "Invalid num_bufs entries"); + return -EINVAL; + } + if (cmd_buf->type == OPE_CMD_BUF_TYPE_DIRECT) { + if (cmd_buf->cmd_buf_usage == OPE_CMD_BUF_DEBUG) + continue; + cam_ope_dump_direct(cmd_buf, dump); + } else if (cmd_buf->type == OPE_CMD_BUF_TYPE_INDIRECT) + cam_ope_dump_indirect(cmd_buf, dump); + } + } + return rc; +} + +static int cam_ope_mgr_dump_frame_set(uint32_t *frame_process_addr, + struct cam_ope_hang_dump *dump) +{ + int i, j, rc = 0; + dma_addr_t iova_addr; + size_t size; + struct ope_frame_process *frame_process; + struct ope_io_buf_info *io_buf; + struct cam_ope_buf_entry *buf_entry; + struct cam_ope_output_info *output_info; + + frame_process = (struct ope_frame_process *)frame_process_addr; + rc = cam_ope_validate_frame_params(frame_process); + if (rc) + return rc; + for (j = 0; j < frame_process->batch_size; j++) { + for (i = 0; i < frame_process->frame_set[j].num_io_bufs; i++) { + io_buf = &frame_process->frame_set[j].io_buf[i]; + rc = cam_mem_get_io_buf(io_buf->mem_handle[0], + ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_OPE, "get io buf fail 0x%x", + io_buf->mem_handle[0]); + return rc; + } + buf_entry = &dump->entries[dump->num_bufs]; + buf_entry->memhdl = io_buf->mem_handle[0]; + buf_entry->iova = iova_addr; + buf_entry->offset = io_buf->plane_offset[0]; + buf_entry->len = size - io_buf->plane_offset[0]; + buf_entry->size = size; + dump->num_bufs++; + if (io_buf->direction == 2) { + if (dump->num_outputs >= + (OPE_MAX_BATCH_SIZE * OPE_OUT_RES_MAX)) { + CAM_ERR(CAM_OPE, "Invalid num_outputs"); + rc = -EINVAL; + break; + } + output_info = + &dump->outputs[dump->num_outputs]; + output_info->iova = iova_addr; + output_info->offset = io_buf->plane_offset[0]; + output_info->len = size - + io_buf->plane_offset[0]; + dump->num_outputs++; + } + } + } + return rc; +} + +static int cam_ope_dump_frame_process(struct cam_packet *packet, + struct cam_ope_hang_dump *dump) +{ + int rc = 0; + int i; + size_t len; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uintptr_t cpu_addr = 0; + uint32_t *cpu_addr_local = NULL, *cpu_addr_u = NULL; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (cmd_desc[i].type != CAM_CMD_BUF_GENERIC || + cmd_desc[i].meta_data == OPE_CMD_META_GENERIC_BLOB) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &cpu_addr, &len); + if (rc || !cpu_addr || !len) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + cmd_desc[i].mem_handle); + return rc; + } + if ((len <= cmd_desc[i].offset) || + (cmd_desc[i].size < cmd_desc[i].length) || + ((len - cmd_desc[i].offset) < + cmd_desc[i].length)) { + CAM_ERR(CAM_OPE, "Invalid offset or length"); + return -EINVAL; + } + + cpu_addr_u = (uint32_t *)(((uint8_t *)cpu_addr) + cmd_desc[i].offset); + cam_common_mem_kdup((void **)&cpu_addr_local, cpu_addr_u, cmd_desc[i].size); + break; + } + + if (!cpu_addr_u) { + CAM_ERR(CAM_OPE, "invalid number of cmd buf"); + return -EINVAL; + } + + cam_ope_mgr_dump_cmd_buf(cpu_addr_local, dump); + cam_ope_mgr_dump_frame_set(cpu_addr_local, dump); + cam_common_mem_free(cpu_addr_local); + cam_ope_mgr_put_cmd_buf(packet); + return rc; +} + +static int cam_ope_dump_bls(struct cam_ope_request *ope_req, + struct cam_ope_hang_dump *dump) +{ + struct cam_cdm_bl_request *cdm_cmd; + size_t size; + int i, rc; + dma_addr_t iova_addr; + + cdm_cmd = ope_req->cdm_cmd; + for (i = 0; i < cdm_cmd->cmd_arrary_count; i++) { + rc = cam_mem_get_io_buf(cdm_cmd->cmd_flex[i].bl_addr.mem_handle, + ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_OPE, "get io buf fail 0x%x", + cdm_cmd->cmd_flex[i].bl_addr.mem_handle); + return rc; + } + if (dump->num_bls >= OPE_MAX_CDM_BLS) { + CAM_ERR(CAM_OPE, "Invalid num_bls"); + return -EINVAL; + } + dump->bl_entries[dump->num_bls].base = + (uint32_t)iova_addr + cdm_cmd->cmd_flex[i].offset; + dump->bl_entries[dump->num_bls].len = cdm_cmd->cmd_flex[i].len; + dump->bl_entries[dump->num_bls].arbitration = + cdm_cmd->cmd_flex[i].arbitrate; + dump->num_bls++; + } + return 0; +} + +static void cam_ope_dump_req_data(struct cam_ope_request *ope_req) +{ + struct cam_ope_hang_dump *dump; + struct cam_packet *packet = + (struct cam_packet *)ope_req->hang_data.packet; + + if (!ope_req->ope_debug_buf.cpu_addr || + ope_req->ope_debug_buf.len < sizeof(struct cam_ope_hang_dump) || + (ope_req->ope_debug_buf.offset + ope_req->ope_debug_buf.len) + > ope_req->ope_debug_buf.size) { + CAM_ERR(CAM_OPE, "Invalid debug buf, size %d %d len %d off %d", + sizeof(struct cam_ope_hang_dump), + ope_req->ope_debug_buf.size, + ope_req->ope_debug_buf.len, + ope_req->ope_debug_buf.offset); + return; + } + + dump = (struct cam_ope_hang_dump *)ope_req->ope_debug_buf.cpu_addr; + memset(dump, 0, sizeof(struct cam_ope_hang_dump)); + dump->num_bufs = 0; + cam_ope_dump_hang_patches(packet, dump); + cam_ope_dump_frame_process(packet, dump); + cam_ope_dump_bls(ope_req, dump); +} + +static bool cam_ope_check_req_delay(struct cam_ope_ctx *ctx_data, + uint64_t req_time) +{ + struct timespec64 ts; + uint64_t ts_ns; + + ktime_get_boottime_ts64(&ts); + ts_ns = (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + + if (ts_ns - req_time < + ((ctx_data->req_timer_timeout - + ctx_data->req_timer_timeout / 10) * 1000000)) { + CAM_INFO(CAM_OPE, "ctx: %d, ts_ns : %llu", + ctx_data->ctx_id, ts_ns); + cam_ope_req_timer_reset(ctx_data); + return true; + } + + return false; +} + +static int32_t cam_ope_process_request_timer(void *priv, void *data) +{ + struct ope_clk_work_data *clk_data = (struct ope_clk_work_data *)data; + struct cam_ope_ctx *ctx_data = (struct cam_ope_ctx *)clk_data->data; + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + uint32_t id; + struct cam_hw_intf *dev_intf = NULL; + struct cam_ope_clk_info *clk_info; + struct cam_ope_dev_bw_update clk_update; + int i = 0; + int device_share_ratio = 1; + int path_index; + struct crm_workq_task *task; + struct ope_msg_work_data *task_data; + + if (!ctx_data) { + CAM_ERR(CAM_OPE, "ctx_data is NULL, failed to update clk"); + return -EINVAL; + } + + mutex_lock(&ctx_data->ctx_mutex); + if ((ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) || + (ctx_data->req_watch_dog_reset_counter == 0)) { + CAM_DBG(CAM_OPE, "state %d counter = %d", ctx_data->ctx_state, + ctx_data->req_watch_dog_reset_counter); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + if (cam_ope_is_pending_request(ctx_data)) { + + if (cam_ope_check_req_delay(ctx_data, + ctx_data->last_req_time)) { + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + if (cam_ope_check_req_delay(ctx_data, + ope_hw_mgr->last_callback_time)) { + CAM_WARN(CAM_OPE, + "ope ctx: %d stuck due to other contexts", + ctx_data->ctx_id); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + if (!cam_cdm_detect_hang_error(ctx_data->ope_cdm.cdm_handle)) { + cam_ope_req_timer_reset(ctx_data); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + /* Try checking ctx struck again */ + if (cam_ope_check_req_delay(ctx_data, + ope_hw_mgr->last_callback_time)) { + CAM_WARN(CAM_OPE, + "ope ctx: %d stuck due to other contexts", + ctx_data->ctx_id); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + CAM_ERR(CAM_OPE, + "pending req at HW, ctx %d lrt %llu lct %llu", + ctx_data->ctx_id, ctx_data->last_req_time, + ope_hw_mgr->last_callback_time); + hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_DUMP_DEBUG, + NULL, 0); + + task = cam_req_mgr_workq_get_task(ope_hw_mgr->msg_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + task_data = (struct ope_msg_work_data *)task->payload; + task_data->data = hw_mgr; + task_data->irq_status = 1; + task_data->type = OPE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_ope_mgr_process_msg; + cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr, + CRM_TASK_PRIORITY_0); + cam_ope_req_timer_reset(ctx_data); + mutex_unlock(&ctx_data->ctx_mutex); + return 0; + } + + cam_ope_req_timer_modify(ctx_data, ~0); + /* Remove context BW */ + dev_intf = hw_mgr->ope_dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_OPE, "OPE dev intf is NULL"); + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + + clk_info = &hw_mgr->clk_info; + id = OPE_HW_BW_UPDATE; + device_share_ratio = hw_mgr->num_ope; + + clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + clk_update.ahb_vote.vote.freq = 0; + clk_update.ahb_vote_valid = false; + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its + * own index. BW that we wanted to vote now is after removing + * current context's vote from hw mgr consolidated vote + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = ctx_data->clk_info.axi_path[i] + .path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i] + .path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + memset(&ctx_data->clk_info.axi_path[0], 0, + CAM_OPE_MAX_PER_PATH_VOTES * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + clk_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&clk_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + clk_update.axi_vote.num_paths * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + if (device_share_ratio > 1) { + for (i = 0; i < clk_update.axi_vote.num_paths; i++) { + do_div( + clk_update.axi_vote.axi_path[i].camnoc_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ab_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].mnoc_ib_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].ddr_ab_bw, + device_share_ratio); + do_div( + clk_update.axi_vote.axi_path[i].ddr_ib_bw, + device_share_ratio); + } + } + + clk_update.axi_vote_valid = true; + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id, + &clk_update, sizeof(clk_update)); + + CAM_DBG(CAM_OPE, "X :ctx_id = %d curr_fc = %u bc = %u", + ctx_data->ctx_id, ctx_data->clk_info.curr_fc, + ctx_data->clk_info.base_clk); + mutex_unlock(&ctx_data->ctx_mutex); + + return 0; +} + +static void cam_ope_req_timer_cb(struct timer_list *timer_data) +{ + unsigned long flags; + struct crm_workq_task *task; + struct ope_clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + + spin_lock_irqsave(&ope_hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(ope_hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct ope_clk_work_data *)task->payload; + task_data->data = timer->parent; + task_data->type = OPE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_ope_process_request_timer; + cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); +} + +static int cam_ope_start_req_timer(struct cam_ope_ctx *ctx_data) +{ + int rc = 0; + + rc = crm_timer_init(&ctx_data->req_watch_dog, + ctx_data->req_timer_timeout, ctx_data, &cam_ope_req_timer_cb); + if (rc) + CAM_ERR(CAM_OPE, "Failed to start timer"); + + ctx_data->req_watch_dog_reset_counter = 0; + + return rc; +} + +static int cam_ope_supported_clk_rates(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + int i; + struct cam_hw_soc_info *soc_info; + struct cam_hw_intf *dev_intf = NULL; + struct cam_hw_info *dev = NULL; + + dev_intf = hw_mgr->ope_dev_intf[0]; + if (!dev_intf) { + CAM_ERR(CAM_OPE, "dev_intf is invalid"); + return -EINVAL; + } + + dev = (struct cam_hw_info *)dev_intf->hw_priv; + soc_info = &dev->soc_info; + + for (i = 0; i < CAM_MAX_VOTE; i++) { + ctx_data->clk_info.clk_rate[i] = + soc_info->clk_rate[i][soc_info->src_clk_idx]; + CAM_DBG(CAM_OPE, "clk_info[%d] = %d", + i, ctx_data->clk_info.clk_rate[i]); + } + + return 0; +} + +static int cam_ope_ctx_clk_info_init(struct cam_ope_ctx *ctx_data) +{ + int i; + + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + ctx_data->clk_info.uncompressed_bw = 0; + ctx_data->clk_info.compressed_bw = 0; + + for (i = 0; i < CAM_OPE_MAX_PER_PATH_VOTES; i++) { + ctx_data->clk_info.axi_path[i].camnoc_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ab_bw = 0; + ctx_data->clk_info.axi_path[i].mnoc_ib_bw = 0; + } + + cam_ope_supported_clk_rates(ope_hw_mgr, ctx_data); + + return 0; +} + +static int32_t cam_ope_deinit_idle_clk(void *priv, void *data) +{ + struct cam_ope_hw_mgr *hw_mgr = (struct cam_ope_hw_mgr *)priv; + struct ope_clk_work_data *task_data = (struct ope_clk_work_data *)data; + struct cam_ope_clk_info *clk_info = + (struct cam_ope_clk_info *)task_data->data; + uint32_t id; + uint32_t i; + struct cam_ope_ctx *ctx_data; + struct cam_hw_intf *dev_intf = NULL; + int rc = 0; + bool busy = false; + + clk_info->base_clk = 0; + clk_info->curr_clk = 0; + clk_info->over_clked = 0; + + mutex_lock(&hw_mgr->hw_mgr_mutex); + + for (i = 0; i < OPE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state == OPE_CTX_STATE_ACQUIRED) { + busy = cam_ope_is_pending_request(ctx_data); + if (busy) { + mutex_unlock(&ctx_data->ctx_mutex); + break; + } + cam_ope_ctx_clk_info_init(ctx_data); + } + mutex_unlock(&ctx_data->ctx_mutex); + } + + if (busy) { + cam_ope_device_timer_reset(hw_mgr); + rc = -EBUSY; + goto done; + } + + dev_intf = hw_mgr->ope_dev_intf[0]; + id = OPE_HW_CLK_DISABLE; + + CAM_DBG(CAM_OPE, "Disable %d", clk_info->hw_type); + + dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id, NULL, 0); + +done: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static void cam_ope_device_timer_cb(struct timer_list *timer_data) +{ + unsigned long flags; + struct crm_workq_task *task; + struct ope_clk_work_data *task_data; + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + + spin_lock_irqsave(&ope_hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(ope_hw_mgr->timer_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); + return; + } + + task_data = (struct ope_clk_work_data *)task->payload; + task_data->data = timer->parent; + task_data->type = OPE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_ope_deinit_idle_clk; + cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&ope_hw_mgr->hw_mgr_lock, flags); +} + +static int cam_ope_device_timer_start(struct cam_ope_hw_mgr *hw_mgr) +{ + int rc = 0; + int i; + + for (i = 0; i < CLK_HW_MAX; i++) { + if (!hw_mgr->clk_info.watch_dog) { + rc = crm_timer_init(&hw_mgr->clk_info.watch_dog, + OPE_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info, + &cam_ope_device_timer_cb); + + if (rc) + CAM_ERR(CAM_OPE, "Failed to start timer %d", i); + + hw_mgr->clk_info.watch_dog_reset_counter = 0; + } + } + + return rc; +} + +static int cam_ope_get_actual_clk_rate_idx( + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return i; + + /* + * Caller has to ensure returned index is within array + * size bounds while accessing that index. + */ + + return i; +} + +static bool cam_ope_is_over_clk(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info) +{ + int base_clk_idx; + int curr_clk_idx; + + base_clk_idx = cam_ope_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->base_clk); + + curr_clk_idx = cam_ope_get_actual_clk_rate_idx(ctx_data, + hw_mgr_clk_info->curr_clk); + + CAM_DBG(CAM_OPE, "bc_idx = %d cc_idx = %d %d %d", + base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk, + hw_mgr_clk_info->curr_clk); + + if (curr_clk_idx > base_clk_idx) + return true; + + return false; +} + + +static int cam_ope_get_lower_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_ope_get_actual_clk_rate_idx(ctx_data, base_clk); + + while (i > 0) { + if (ctx_data->clk_info.clk_rate[i - 1]) + return ctx_data->clk_info.clk_rate[i - 1]; + i--; + } + + CAM_DBG(CAM_OPE, "Already clk at lower level"); + + return base_clk; +} + +static int cam_ope_get_next_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + i = cam_ope_get_actual_clk_rate_idx(ctx_data, base_clk); + + while (i < CAM_MAX_VOTE - 1) { + if (ctx_data->clk_info.clk_rate[i + 1]) + return ctx_data->clk_info.clk_rate[i + 1]; + i++; + } + + CAM_DBG(CAM_OPE, "Already clk at higher level"); + + return base_clk; +} + +static int cam_ope_get_actual_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t base_clk) +{ + int i; + + for (i = 0; i < CAM_MAX_VOTE; i++) + if (ctx_data->clk_info.clk_rate[i] >= base_clk) + return ctx_data->clk_info.clk_rate[i]; + + return base_clk; +} + +static int cam_ope_calc_total_clk(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_clk_info *hw_mgr_clk_info, uint32_t dev_type) +{ + int i; + struct cam_ope_ctx *ctx_data; + + hw_mgr_clk_info->base_clk = 0; + for (i = 0; i < OPE_CTX_MAX; i++) { + ctx_data = &hw_mgr->ctx[i]; + if (ctx_data->ctx_state == OPE_CTX_STATE_ACQUIRED) + hw_mgr_clk_info->base_clk += + ctx_data->clk_info.base_clk; + } + + return 0; +} + +static uint32_t cam_ope_mgr_calc_base_clk(uint32_t frame_cycles, + uint64_t budget) +{ + uint64_t mul = 1000000000; + uint64_t base_clk = frame_cycles * mul; + + do_div(base_clk, budget); + + CAM_DBG(CAM_OPE, "budget = %lld fc = %d ib = %lld base_clk = %lld", + budget, frame_cycles, + (long long)(frame_cycles * mul), base_clk); + + return base_clk; +} + +static bool cam_ope_update_clk_overclk_free(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + + /* + * In caseof no pending packets case + * 1. In caseof overclk cnt is less than threshold, increase + * overclk count and no update in the clock rate + * 2. In caseof overclk cnt is greater than or equal to threshold + * then lower clock rate by one level and update hw_mgr current + * clock value. + * a. In case of new clock rate greater than sum of clock + * rates, reset overclk count value to zero if it is + * overclock + * b. if it is less than sum of base clocks then go to next + * level of clock and make overclk count to zero + * c. if it is same as sum of base clock rates update overclock + * cnt to 0 + */ + if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) { + hw_mgr_clk_info->over_clked++; + rc = false; + } else { + hw_mgr_clk_info->curr_clk = + cam_ope_get_lower_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + if (cam_ope_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info)) + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk < + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = + cam_ope_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->over_clked = 0; + } else if (hw_mgr_clk_info->curr_clk == + hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + } + rc = true; + } + + return rc; +} + +static bool cam_ope_update_clk_free(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + int rc = false; + bool over_clocked = false; + + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + ctx_data->clk_info.base_clk = base_clk; + cam_ope_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->ope_acquire.dev_type); + + /* + * Current clock is not always sum of base clocks, due to + * clock scales update to next higher or lower levels, it + * equals to one of discrete clock values supported by hardware. + * So even current clock is higher than sum of base clocks, we + * can not consider it is over clocked. if it is greater than + * discrete clock level then only it is considered as over clock. + * 1. Handle over clock case + * 2. If current clock is less than sum of base clocks + * update current clock + * 3. If current clock is same as sum of base clocks no action + */ + + over_clocked = cam_ope_is_over_clk(hw_mgr, ctx_data, + hw_mgr_clk_info); + + if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk && + over_clocked) { + rc = cam_ope_update_clk_overclk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + } else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->over_clked = 0; + rc = false; + } else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) { + hw_mgr_clk_info->curr_clk = cam_ope_get_actual_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->base_clk); + rc = true; + } + + return rc; +} + +static bool cam_ope_update_clk_busy(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_request *clk_info, + uint32_t base_clk) +{ + uint32_t next_clk_level; + uint32_t actual_clk; + bool rc = false; + + /* 1. if current request frame cycles(fc) are more than previous + * frame fc + * Calculate the new base clock. + * if sum of base clocks are more than next available clk level + * Update clock rate, change curr_clk_rate to sum of base clock + * rates and make over_clked to zero + * else + * Update clock rate to next level, update curr_clk_rate and make + * overclked cnt to zero + * 2. if current fc is less than or equal to previous frame fc + * Still Bump up the clock to next available level + * if it is available, then update clock, make overclk cnt to + * zero. If the clock is already at highest clock rate then + * no need to update the clock + */ + ctx_data->clk_info.base_clk = base_clk; + hw_mgr_clk_info->over_clked = 0; + if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) { + cam_ope_calc_total_clk(hw_mgr, hw_mgr_clk_info, + ctx_data->ope_acquire.dev_type); + actual_clk = cam_ope_get_actual_clk_rate(hw_mgr, + ctx_data, base_clk); + if (hw_mgr_clk_info->base_clk > actual_clk) { + hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk; + } else { + next_clk_level = cam_ope_get_next_clk_rate(hw_mgr, + ctx_data, hw_mgr_clk_info->curr_clk); + hw_mgr_clk_info->curr_clk = next_clk_level; + } + rc = true; + } else { + next_clk_level = + cam_ope_get_next_clk_rate(hw_mgr, ctx_data, + hw_mgr_clk_info->curr_clk); + if (hw_mgr_clk_info->curr_clk < next_clk_level) { + hw_mgr_clk_info->curr_clk = next_clk_level; + rc = true; + } + } + ctx_data->clk_info.curr_fc = clk_info->frame_cycles; + + return rc; +} + +static bool cam_ope_check_clk_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + bool busy = false, rc = false; + uint32_t base_clk; + struct cam_ope_clk_bw_request *clk_info; + uint64_t req_id; + struct cam_ope_clk_info *hw_mgr_clk_info; + + /* TODO: Have default clock rates update */ + /* TODO: Add support for debug clock updates */ + cam_ope_req_timer_reset(ctx_data); + cam_ope_device_timer_reset(hw_mgr); + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + CAM_DBG(CAM_OPE, "busy = %d req_id = %lld", busy, req_id); + + clk_info = &ctx_data->req_list[idx]->clk_info; + + /* Calculate base clk rate */ + base_clk = cam_ope_mgr_calc_base_clk( + clk_info->frame_cycles, clk_info->budget_ns); + ctx_data->clk_info.rt_flag = clk_info->rt_flag; + + if (busy) + rc = cam_ope_update_clk_busy(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + else + rc = cam_ope_update_clk_free(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info, base_clk); + + CAM_DBG(CAM_OPE, "bc = %d cc = %d busy = %d overclk = %d uc = %d", + hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk, + busy, hw_mgr_clk_info->over_clked, rc); + + return rc; +} + +static int cam_ope_mgr_update_clk_rate(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + struct cam_ope_dev_clk_update clk_upd_cmd; + int i; + + clk_upd_cmd.clk_rate = hw_mgr->clk_info.curr_clk; + + CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", clk_upd_cmd.clk_rate, + ctx_data->ope_acquire.dev_type); + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_CLK_UPDATE, + &clk_upd_cmd, sizeof(clk_upd_cmd)); + } + + return 0; +} + +static int cam_ope_mgr_calculate_num_path( + struct cam_ope_clk_bw_req_internal_v2 *clk_info, + struct cam_ope_ctx *ctx_data) +{ + int i, path_index = 0; + + for (i = 0; i < CAM_OPE_MAX_PER_PATH_VOTES; i++) { + if ((clk_info->axi_path[i].path_data_type < + CAM_AXI_PATH_DATA_OPE_START_OFFSET) || + (clk_info->axi_path[i].path_data_type > + CAM_AXI_PATH_DATA_OPE_MAX_OFFSET) || + ((clk_info->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET) >= + CAM_OPE_MAX_PER_PATH_VOTES)) { + CAM_DBG(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + path_index = clk_info->axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + CAM_DBG(CAM_OPE, + "clk_info: i[%d]: [%s %s] bw [%lld %lld] num_path: %d", + i, + cam_cpas_axi_util_trans_type_to_string( + clk_info->axi_path[i].transac_type), + cam_cpas_axi_util_path_type_to_string( + clk_info->axi_path[i].path_data_type), + clk_info->axi_path[i].camnoc_bw, + clk_info->axi_path[i].mnoc_ab_bw, + clk_info->num_paths); + } + + return (path_index+1); +} + +static bool cam_ope_update_bw_v2(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_ope_clk_info *hw_mgr_clk_info, + struct cam_ope_clk_bw_req_internal_v2 *clk_info, + bool busy) +{ + int i, path_index; + bool update_required = true; + + /* + * If current request bandwidth is different from previous frames, then + * recalculate bandwidth of all contexts of same hardware and update + * voting of bandwidth + */ + + for (i = 0; i < clk_info->num_paths; i++) + CAM_DBG(CAM_OPE, "clk_info camnoc = %lld busy = %d", + clk_info->axi_path[i].camnoc_bw, busy); + + if (clk_info->num_paths == ctx_data->clk_info.num_paths) { + update_required = false; + for (i = 0; i < clk_info->num_paths; i++) { + if ((clk_info->axi_path[i].transac_type == + ctx_data->clk_info.axi_path[i].transac_type) && + (clk_info->axi_path[i].path_data_type == + ctx_data->clk_info.axi_path[i].path_data_type) && + (clk_info->axi_path[i].camnoc_bw == + ctx_data->clk_info.axi_path[i].camnoc_bw) && + (clk_info->axi_path[i].mnoc_ab_bw == + ctx_data->clk_info.axi_path[i].mnoc_ab_bw)) { + continue; + } else { + update_required = true; + break; + } + } + } + + if (!update_required) { + CAM_DBG(CAM_OPE, + "Incoming BW hasn't changed, no update required"); + return false; + } + + if (busy) { + for (i = 0; i < clk_info->num_paths; i++) { + if (ctx_data->clk_info.axi_path[i].camnoc_bw > + clk_info->axi_path[i].camnoc_bw) + return false; + } + } + + /* + * Remove previous vote of this context from hw mgr first. + * hw_mgr_clk_info has all valid paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + ctx_data->clk_info.num_paths = + cam_ope_mgr_calculate_num_path(clk_info, ctx_data); + + memcpy(&ctx_data->clk_info.axi_path[0], + &clk_info->axi_path[0], + clk_info->num_paths * sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + /* + * Add new vote of this context in hw mgr. + * hw_mgr_clk_info has all paths, with each path in its own index + */ + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].path_data_type = + ctx_data->clk_info.axi_path[i].path_data_type; + hw_mgr_clk_info->axi_path[path_index].transac_type = + ctx_data->clk_info.axi_path[i].transac_type; + hw_mgr_clk_info->axi_path[path_index].camnoc_bw += + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw += + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw += + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw += + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw += + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + CAM_DBG(CAM_OPE, + "Consolidate Path Vote : Dev[%s] i[%d] path_idx[%d] : [%s %s] [%lld %lld]", + ctx_data->ope_acquire.dev_name, + i, path_index, + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[path_index].transac_type), + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[path_index].path_data_type), + hw_mgr_clk_info->axi_path[path_index].camnoc_bw, + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw); + } + + if (hw_mgr_clk_info->num_paths < ctx_data->clk_info.num_paths) + hw_mgr_clk_info->num_paths = ctx_data->clk_info.num_paths; + + return true; +} + +static bool cam_ope_check_bw_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + bool busy = false, bw_updated = false; + int i; + struct cam_ope_clk_bw_req_internal_v2 *clk_info_v2; + struct cam_ope_clk_info *hw_mgr_clk_info; + uint64_t req_id; + + hw_mgr_clk_info = &hw_mgr->clk_info; + req_id = ctx_data->req_list[idx]->request_id; + if (ctx_data->req_cnt > 1) + busy = true; + + clk_info_v2 = &ctx_data->req_list[idx]->clk_info_v2; + + bw_updated = cam_ope_update_bw_v2(hw_mgr, ctx_data, + hw_mgr_clk_info, clk_info_v2, busy); + + for (i = 0; i < hw_mgr_clk_info->num_paths; i++) { + CAM_DBG(CAM_OPE, + "Final path_type: %s, transac_type: %s, camnoc_bw = %lld mnoc_ab_bw = %lld, mnoc_ib_bw = %lld, device: %s", + cam_cpas_axi_util_path_type_to_string( + hw_mgr_clk_info->axi_path[i].path_data_type), + cam_cpas_axi_util_trans_type_to_string( + hw_mgr_clk_info->axi_path[i].transac_type), + hw_mgr_clk_info->axi_path[i].camnoc_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ab_bw, + hw_mgr_clk_info->axi_path[i].mnoc_ib_bw, + ctx_data->ope_acquire.dev_name); + } + + return bw_updated; +} + +static int cam_ope_update_cpas_vote(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data) +{ + int i = 0; + struct cam_ope_clk_info *clk_info; + struct cam_ope_dev_bw_update bw_update = {{0}, {0}, 0, 0}; + + clk_info = &hw_mgr->clk_info; + + bw_update.ahb_vote.type = CAM_VOTE_DYNAMIC; + bw_update.ahb_vote.vote.freq = 0; + bw_update.ahb_vote_valid = false; + + + bw_update.axi_vote.num_paths = clk_info->num_paths; + memcpy(&bw_update.axi_vote.axi_path[0], + &clk_info->axi_path[0], + bw_update.axi_vote.num_paths * + sizeof(struct cam_cpas_axi_per_path_bw_vote)); + + bw_update.axi_vote_valid = true; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_BW_UPDATE, + &bw_update, sizeof(bw_update)); + } + + return 0; +} + +static int cam_ope_mgr_ope_clk_update(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, int idx) +{ + int rc = 0; + + if (cam_ope_check_clk_update(hw_mgr, ctx_data, idx)) + rc = cam_ope_mgr_update_clk_rate(hw_mgr, ctx_data); + + if (cam_ope_check_bw_update(hw_mgr, ctx_data, idx)) + rc |= cam_ope_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} + +static void cam_ope_ctx_cdm_callback(uint32_t handle, void *userdata, + enum cam_cdm_cb_status status, void *cookie) +{ + int rc = 0; + struct cam_ope_ctx *ctx; + struct cam_ope_request *ope_req; + struct cam_hw_done_event_data buf_data; + struct timespec64 ts; + uint32_t evt_id = CAM_CTX_EVT_ID_SUCCESS; + uint64_t req_id = 0; + bool dump_flag = true; + int i; + + if (!userdata) { + CAM_ERR(CAM_OPE, "Invalid ctx from CDM callback"); + return; + } + + if (status != CAM_CDM_CB_STATUS_PAGEFAULT) + req_id = *(uint64_t *)cookie; + + if (req_id >= CAM_CTX_REQ_MAX) { + CAM_ERR(CAM_OPE, "Invalid reqIdx = %u", req_id); + return; + } + + ctx = userdata; + mutex_lock(&ctx->ctx_mutex); + + if (!test_bit(req_id, ctx->bitmap)) { + CAM_ERR(CAM_OPE, "Req not present reqIdx = %u for ctx_id = %d", + req_id, ctx->ctx_id); + goto end; + } + + ope_req = ctx->req_list[req_id]; + + ktime_get_boottime_ts64(&ts); + ope_hw_mgr->last_callback_time = (uint64_t)((ts.tv_sec * 1000000000) + + ts.tv_nsec); + + CAM_DBG(CAM_REQ, + "hdl=%x, udata=%pK, status=%d, cookie=%u", + handle, userdata, status, req_id); + CAM_DBG(CAM_REQ, "req_id= %llu ctx_id= %d lcb=%llu", + ope_req->request_id, ctx->ctx_id, + ope_hw_mgr->last_callback_time); + + if (ctx->ctx_state != OPE_CTX_STATE_ACQUIRED) { + CAM_ERR(CAM_OPE, "ctx %u is in %d state", + ctx->ctx_id, ctx->ctx_state); + mutex_unlock(&ctx->ctx_mutex); + return; + } + + if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) { + CAM_DBG(CAM_OPE, + "hdl=%x, udata=%pK, status=%d, cookie=%u req_id=%llu ctx_id=%d", + handle, userdata, status, req_id, + ope_req->request_id, ctx->ctx_id); + cam_ope_req_timer_reset(ctx); + cam_ope_device_timer_reset(ope_hw_mgr); + buf_data.evt_param = CAM_SYNC_COMMON_EVENT_SUCCESS; + } else if (status == CAM_CDM_CB_STATUS_HW_RESUBMIT) { + CAM_INFO(CAM_OPE, "After reset of CDM and OPE, reapply req"); + buf_data.evt_param = CAM_SYNC_OPE_EVENT_HW_RESUBMIT; + rc = cam_ope_mgr_reapply_config(ope_hw_mgr, ctx, ope_req); + if (!rc) + goto end; + } else { + CAM_INFO(CAM_OPE, + "CDM hdl=%x, udata=%pK, status=%d, cookie=%u req_id = %llu ctx_id=%d", + handle, userdata, status, req_id, + ope_req->request_id, ctx->ctx_id); + CAM_INFO(CAM_OPE, "Rst of CDM and OPE for error reqid = %lld", + ope_req->request_id); + + if (status != CAM_CDM_CB_STATUS_HW_FLUSH) { + cam_ope_dump_req_data(ope_req); + dump_flag = false; + + CAM_INFO(CAM_OPE, "bach_size: %d", ctx->req_list[req_id]->num_batch); + for (i = 0; i < ctx->req_list[req_id]->num_batch; i++) + CAM_INFO(CAM_OPE, "i: %d num_stripes: %d", + i, ctx->req_list[req_id]->num_stripes[i]); + } + rc = cam_ope_mgr_reset_hw(); + evt_id = CAM_CTX_EVT_ID_ERROR; + + if (status == CAM_CDM_CB_STATUS_PAGEFAULT) + buf_data.evt_param = CAM_SYNC_OPE_EVENT_PAGE_FAULT; + else if (status == CAM_CDM_CB_STATUS_HW_FLUSH) + buf_data.evt_param = CAM_SYNC_OPE_EVENT_HW_FLUSH; + else if (status == CAM_CDM_CB_STATUS_HW_RESET_DONE) + buf_data.evt_param = CAM_SYNC_OPE_EVENT_HW_RESET_DONE; + else if (status == CAM_CDM_CB_STATUS_HW_ERROR) + buf_data.evt_param = CAM_SYNC_OPE_EVENT_HW_ERROR; + else + buf_data.evt_param = CAM_SYNC_OPE_EVENT_UNKNOWN; + } + + if (ope_hw_mgr->dump_req_data_enable && dump_flag) + cam_ope_dump_req_data(ope_req); + + ctx->req_cnt--; + + buf_data.request_id = ope_req->request_id; + ope_req->request_id = 0; + CAM_MEM_ZFREE((void *)ctx->req_list[req_id]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + ctx->req_list[req_id]->cdm_cmd = NULL; + cam_ope_free_io_config(ctx->req_list[req_id]); + CAM_MEM_ZFREE((void *)ctx->req_list[req_id], + sizeof(struct cam_ope_request)); + ctx->req_list[req_id] = NULL; + clear_bit(req_id, ctx->bitmap); + ctx->ctxt_event_cb(ctx->context_priv, evt_id, &buf_data); + +end: + mutex_unlock(&ctx->ctx_mutex); +} + +int32_t cam_ope_hw_mgr_cb(uint32_t irq_status, void *data) +{ + int32_t rc = 0; + unsigned long flags; + struct cam_ope_hw_mgr *hw_mgr = data; + struct crm_workq_task *task; + struct ope_msg_work_data *task_data; + + if (!data) { + CAM_ERR(CAM_OPE, "irq cb data is NULL"); + return rc; + } + + spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags); + task = cam_req_mgr_workq_get_task(ope_hw_mgr->msg_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + return -ENOMEM; + } + + task_data = (struct ope_msg_work_data *)task->payload; + task_data->data = hw_mgr; + task_data->irq_status = irq_status; + task_data->type = OPE_WORKQ_TASK_MSG_TYPE; + task->process_cb = cam_ope_mgr_process_msg; + rc = cam_req_mgr_workq_enqueue_task(task, ope_hw_mgr, + CRM_TASK_PRIORITY_0); + spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags); + + return rc; +} + +static int cam_ope_mgr_create_kmd_buf(struct cam_ope_hw_mgr *hw_mgr, + struct cam_packet *packet, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_ope_ctx *ctx_data, + struct cam_ope_request *ope_req, + uint32_t *ope_cmd_buf_addr) +{ + int i, rc = 0; + struct cam_ope_dev_prepare_req prepare_req; + + prepare_req.ctx_data = ctx_data; + prepare_req.hw_mgr = hw_mgr; + prepare_req.packet = packet; + prepare_req.prepare_args = prepare_args; + prepare_req.req_idx = ope_req->req_idx; + prepare_req.kmd_buf_offset = 0; + prepare_req.frame_process = + (struct ope_frame_process *)ope_cmd_buf_addr; + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_PREPARE, &prepare_req, sizeof(prepare_req)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev prepare failed: %d", rc); + goto end; + } + } + + ope_req->genirq_buff_info.handle = ope_req->ope_kmd_buf.mem_handle; + ope_req->genirq_buff_info.cpu_addr = (uint32_t *)ope_req->ope_kmd_buf.cpu_addr; + ope_req->genirq_buff_info.offset = prepare_req.kmd_buf_offset; + ope_req->genirq_buff_info.used_bytes = 0; + ope_req->genirq_buff_info.size = ope_req->ope_kmd_buf.size - + ope_req->ope_kmd_buf.len - prepare_req.kmd_buf_offset; + + if (!ope_req->genirq_buff_info.size) { + CAM_ERR(CAM_OPE, "Not enough memory available for CDM GenIRQ Command"); + return -ENOMEM; + } + +end: + return rc; +} + +static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr, + struct cam_packet *packet, + struct cam_hw_prepare_update_args *prep_arg, + struct cam_ope_ctx *ctx_data, uint32_t req_idx) +{ + int i, j = 0, k = 0, l, rc = 0; + struct ope_io_buf *io_buf; + int32_t sync_in_obj[CAM_MAX_IN_RES]; + int32_t merged_sync_in_obj; + struct cam_ope_request *ope_request; + + ope_request = ctx_data->req_list[req_idx]; + prep_arg->num_out_map_entries = 0; + prep_arg->num_in_map_entries = 0; + + ope_request = ctx_data->req_list[req_idx]; + CAM_DBG(CAM_OPE, "E: req_idx = %u %x", req_idx, packet); + + for (i = 0; i < ope_request->num_batch; i++) { + for (l = 0; l < ope_request->num_io_bufs[i]; l++) { + io_buf = ope_request->io_buf[i][l]; + if (io_buf->direction == CAM_BUF_INPUT) { + if (io_buf->fence != -1) { + if (j < CAM_MAX_IN_RES) { + sync_in_obj[j++] = + io_buf->fence; + prep_arg->num_in_map_entries++; + } else { + CAM_ERR(CAM_OPE, + "reached max in_res %d %d", + io_buf->resource_type, + ope_request->request_id); + } + } else { + CAM_ERR(CAM_OPE, "Invalid fence %d %d", + io_buf->resource_type, + ope_request->request_id); + } + } else { + if (io_buf->fence != -1) { + prep_arg->out_map_entries[k].sync_id = + io_buf->fence; + k++; + prep_arg->num_out_map_entries++; + } else { + if (io_buf->resource_type + != OPE_OUT_RES_STATS_LTM) { + CAM_ERR(CAM_OPE, + "Invalid fence %d %d", + io_buf->resource_type, + ope_request->request_id); + } + } + } + CAM_DBG(CAM_REQ, + "ctx_id: %u req_id: %llu dir[%d] %u, fence: %d", + ctx_data->ctx_id, packet->header.request_id, i, + io_buf->direction, io_buf->fence); + CAM_DBG(CAM_REQ, "rsc_type = %u fmt = %d", + io_buf->resource_type, + io_buf->format); + } + } + + if (prep_arg->num_in_map_entries > 1 && + prep_arg->num_in_map_entries <= CAM_MAX_IN_RES) + prep_arg->num_in_map_entries = + cam_common_util_remove_duplicate_arr( + sync_in_obj, prep_arg->num_in_map_entries); + + if (prep_arg->num_in_map_entries > 1 && + prep_arg->num_in_map_entries <= CAM_MAX_IN_RES) { + rc = cam_sync_merge(&sync_in_obj[0], + prep_arg->num_in_map_entries, &merged_sync_in_obj); + if (rc) { + prep_arg->num_out_map_entries = 0; + prep_arg->num_in_map_entries = 0; + return rc; + } + + ope_request->in_resource = merged_sync_in_obj; + + prep_arg->in_map_entries[0].sync_id = merged_sync_in_obj; + prep_arg->num_in_map_entries = 1; + CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d", + ctx_data->ctx_id, packet->header.request_id, + merged_sync_in_obj); + } else if (prep_arg->num_in_map_entries == 1) { + prep_arg->in_map_entries[0].sync_id = sync_in_obj[0]; + prep_arg->num_in_map_entries = 1; + ope_request->in_resource = 0; + CAM_DBG(CAM_OPE, "fence = %d", sync_in_obj[0]); + } else { + CAM_DBG(CAM_OPE, "Invalid count of input fences, count: %d", + prep_arg->num_in_map_entries); + prep_arg->num_in_map_entries = 0; + ope_request->in_resource = 0; + rc = -EINVAL; + } + return rc; +} + +static void cam_ope_mgr_print_stripe_info(uint32_t batch, + uint32_t io_buf, uint32_t plane, uint32_t stripe, + struct ope_stripe_io *stripe_info, uint64_t iova_addr) +{ + CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d: E", + batch, io_buf, plane, stripe); + CAM_DBG(CAM_OPE, "width: %d s_h: %u s_s: %u", + stripe_info->width, stripe_info->height, + stripe_info->stride); + CAM_DBG(CAM_OPE, "s_xinit = %u iova = %x s_loc = %u", + stripe_info->x_init, iova_addr, stripe_info->s_location); + CAM_DBG(CAM_OPE, "s_off = %u s_format = %u s_len = %u d_bus %d", + stripe_info->offset, stripe_info->format, + stripe_info->len, stripe_info->disable_bus); + CAM_DBG(CAM_OPE, "s_align = %u s_pack = %u s_unpack = %u", + stripe_info->alignment, stripe_info->pack_format, + stripe_info->unpack_format); + CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d: E", + batch, io_buf, plane, stripe); +} + +static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr, + struct cam_packet *packet, struct cam_ope_ctx *ctx_data, + uint32_t *frame_process_addr, size_t length, struct cam_ope_request *ope_request, + struct list_head *buf_tracker) +{ + int rc = 0; + int i, j, k, l; + dma_addr_t iova_addr; + size_t len; + struct ope_frame_process *in_frame_process; + struct ope_frame_set *in_frame_set; + struct ope_io_buf_info *in_io_buf; + struct ope_stripe_info *in_stripe_info; + struct ope_io_buf *io_buf; + struct ope_stripe_io *stripe_info; + uint32_t alignment; + uint32_t rsc_idx; + uint32_t pack_format; + uint32_t unpack_format; + struct ope_in_res_info *in_res; + struct ope_out_res_info *out_res; + bool is_secure; + + in_frame_process = (struct ope_frame_process *)frame_process_addr; + + ope_request->num_batch = in_frame_process->batch_size; + + for (i = 0; i < in_frame_process->batch_size; i++) { + in_frame_set = &in_frame_process->frame_set[i]; + + if (in_frame_set->num_io_bufs > OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Wrong number of io buffers: %u", + in_frame_set->num_io_bufs); + return -EINVAL; + } + + for (j = 0; j < in_frame_set->num_io_bufs; j++) { + in_io_buf = &in_frame_set->io_buf[j]; + if (in_io_buf->num_planes > OPE_MAX_PLANES) { + CAM_ERR(CAM_OPE, "wrong number of planes: %u", + in_io_buf->num_planes); + return -EINVAL; + } + for (k = 0; k < in_io_buf->num_planes; k++) { + if ((!in_io_buf->num_stripes[k]) || + (in_io_buf->num_stripes[k] > OPE_MAX_STRIPES)) { + CAM_ERR(CAM_OPE, "num_stripes is invalid"); + return -EINVAL; + } + for (l = 0; l < in_io_buf->num_stripes[k]; + l++) { + in_stripe_info = + &in_io_buf->stripe_info[k][l]; + } + } + } + } + + for (i = 0; i < ope_request->num_batch; i++) { + in_frame_set = &in_frame_process->frame_set[i]; + + if (in_frame_set->num_io_bufs > OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Wrong number of io buffers: %u", + in_frame_set->num_io_bufs); + return -EINVAL; + } + + ope_request->num_io_bufs[i] = in_frame_set->num_io_bufs; + for (j = 0; j < in_frame_set->num_io_bufs; j++) { + in_io_buf = &in_frame_set->io_buf[j]; + ope_request->io_buf[i][j] = + CAM_MEM_ZALLOC(sizeof(struct ope_io_buf), GFP_KERNEL); + if (!ope_request->io_buf[i][j]) { + CAM_ERR(CAM_OPE, + "IO config allocation failure"); + cam_ope_free_io_config(ope_request); + return -ENOMEM; + } + io_buf = ope_request->io_buf[i][j]; + if (in_io_buf->num_planes > OPE_MAX_PLANES) { + CAM_ERR(CAM_OPE, "wrong number of planes: %u", + in_io_buf->num_planes); + return -EINVAL; + } + + io_buf->num_planes = in_io_buf->num_planes; + io_buf->resource_type = in_io_buf->resource_type; + io_buf->direction = in_io_buf->direction; + io_buf->fence = in_io_buf->fence; + io_buf->format = in_io_buf->format; + + rc = cam_ope_mgr_get_rsc_idx(ctx_data, in_io_buf); + if (rc < 0) { + CAM_ERR(CAM_OPE, "Invalid rsc idx = %d", rc); + return rc; + } + rsc_idx = rc; + if (in_io_buf->direction == CAM_BUF_INPUT) { + in_res = + &ctx_data->ope_acquire.in_res[rsc_idx]; + alignment = in_res->alignment; + unpack_format = in_res->unpacker_format; + pack_format = 0; + if (in_io_buf->pix_pattern > + PIXEL_PATTERN_CRYCBY) { + CAM_ERR(CAM_OPE, + "Invalid pix pattern = %u", + in_io_buf->pix_pattern); + return -EINVAL; + } + io_buf->pix_pattern = in_io_buf->pix_pattern; + } else if (in_io_buf->direction == CAM_BUF_OUTPUT) { + out_res = + &ctx_data->ope_acquire.out_res[rsc_idx]; + alignment = out_res->alignment; + pack_format = out_res->packer_format; + unpack_format = 0; + } + + for (k = 0; k < in_io_buf->num_planes; k++) { + if ((!in_io_buf->num_stripes[k]) || + (in_io_buf->num_stripes[k] > OPE_MAX_STRIPES)) { + CAM_ERR(CAM_OPE, "num_stripes is invalid"); + return -EINVAL; + } + io_buf->num_stripes[k] = + in_io_buf->num_stripes[k]; + is_secure = cam_mem_is_secure_buf( + in_io_buf->mem_handle[k]); + if (is_secure) + rc = cam_mem_get_io_buf( + in_io_buf->mem_handle[k], + hw_mgr->iommu_sec_hdl, + &iova_addr, &len, NULL, buf_tracker); + else + rc = cam_mem_get_io_buf( + in_io_buf->mem_handle[k], + hw_mgr->iommu_hdl, + &iova_addr, &len, NULL, buf_tracker); + + if (rc) { + CAM_ERR(CAM_OPE, "get buf failed: %d", + rc); + return -EINVAL; + } + if (len < in_io_buf->length[k]) { + CAM_ERR(CAM_OPE, "Invalid length"); + return -EINVAL; + } + + iova_addr += in_io_buf->plane_offset[k]; + CAM_DBG(CAM_OPE, + "E rsc %d stripes %d dir %d plane %d", + in_io_buf->resource_type, + in_io_buf->direction, + in_io_buf->num_stripes[k], k); + for (l = 0; l < in_io_buf->num_stripes[k]; + l++) { + in_stripe_info = + &in_io_buf->stripe_info[k][l]; + stripe_info = &io_buf->s_io[k][l]; + stripe_info->offset = + in_stripe_info->offset; + stripe_info->format = in_io_buf->format; + stripe_info->s_location = + in_stripe_info->stripe_location; + stripe_info->iova_addr = + iova_addr + stripe_info->offset; + stripe_info->width = + in_stripe_info->width; + stripe_info->height = + in_stripe_info->height; + stripe_info->stride = + in_io_buf->plane_stride[k]; + stripe_info->x_init = + in_stripe_info->x_init; + stripe_info->len = len; + stripe_info->alignment = alignment; + stripe_info->pack_format = pack_format; + stripe_info->unpack_format = + unpack_format; + stripe_info->disable_bus = + in_stripe_info->disable_bus; + cam_ope_mgr_print_stripe_info(i, j, + k, l, stripe_info, iova_addr); + } + CAM_DBG(CAM_OPE, + "X rsc %d stripes %d dir %d plane %d", + in_io_buf->resource_type, + in_io_buf->direction, + in_io_buf->num_stripes[k], k); + } + } + } + + return rc; +} + +static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr, + struct cam_packet *packet, struct cam_ope_ctx *ctx_data, + uint32_t *frame_process_addr, size_t length, struct cam_ope_request *ope_request, + struct list_head *buf_tracker) +{ + int rc = 0; + int i, j; + dma_addr_t iova_addr; + dma_addr_t iova_cdm_addr; + uintptr_t cpu_addr; + size_t len; + struct ope_frame_process *frame_process; + struct ope_cmd_buf_info *cmd_buf; + bool is_kmd_buf_valid = false; + + frame_process = (struct ope_frame_process *)frame_process_addr; + rc = cam_ope_validate_frame_params(frame_process); + if (rc) + goto end; + + CAM_DBG(CAM_OPE, "cmd buf for req id = %lld b_size = %d", + packet->header.request_id, frame_process->batch_size); + + for (i = 0; i < frame_process->batch_size; i++) { + CAM_DBG(CAM_OPE, "batch: %d count %d", i, + frame_process->num_cmd_bufs[i]); + for (j = 0; j < frame_process->num_cmd_bufs[i]; j++) { + CAM_DBG(CAM_OPE, "batch: %d cmd_buf_idx :%d mem_hdl:%x", + i, j, frame_process->cmd_buf[i][j].mem_handle); + CAM_DBG(CAM_OPE, "size = %u scope = %d buf_type = %d", + frame_process->cmd_buf[i][j].size, + frame_process->cmd_buf[i][j].cmd_buf_scope, + frame_process->cmd_buf[i][j].type); + CAM_DBG(CAM_OPE, "usage = %d buffered = %d s_idx = %d", + frame_process->cmd_buf[i][j].cmd_buf_usage, + frame_process->cmd_buf[i][j].cmd_buf_buffered, + frame_process->cmd_buf[i][j].stripe_idx); + } + } + + ope_request->num_batch = frame_process->batch_size; + + for (i = 0; i < frame_process->batch_size; i++) { + for (j = 0; j < frame_process->num_cmd_bufs[i]; j++) { + cmd_buf = &frame_process->cmd_buf[i][j]; + + switch (cmd_buf->cmd_buf_scope) { + case OPE_CMD_BUF_SCOPE_FRAME: { + rc = cam_mem_get_io_buf(cmd_buf->mem_handle, + hw_mgr->iommu_hdl, &iova_addr, &len, NULL, + buf_tracker); + if (rc) { + CAM_ERR(CAM_OPE, "get cmd buffailed %x", + hw_mgr->iommu_hdl); + goto end; + } + + iova_addr = iova_addr + cmd_buf->offset; + + rc = cam_mem_get_io_buf(cmd_buf->mem_handle, + hw_mgr->iommu_cdm_hdl, &iova_cdm_addr, &len, NULL, + buf_tracker); + if (rc) { + CAM_ERR(CAM_OPE, "get cmd buffailed %x", + hw_mgr->iommu_hdl); + goto end; + } + + iova_cdm_addr = iova_cdm_addr + cmd_buf->offset; + + rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle, + &cpu_addr, &len); + if (rc || !cpu_addr) { + CAM_ERR(CAM_OPE, "get cmd buffailed %x", + hw_mgr->iommu_hdl); + goto end; + } + if ((len <= frame_process->cmd_buf[i][j].offset) || + (frame_process->cmd_buf[i][j].size < + frame_process->cmd_buf[i][j].length) || + ((len - frame_process->cmd_buf[i][j].offset) < + frame_process->cmd_buf[i][j].length)) { + CAM_ERR(CAM_OPE, "Invalid offset."); + return -EINVAL; + } + cpu_addr = cpu_addr + + frame_process->cmd_buf[i][j].offset; + CAM_DBG(CAM_OPE, "Hdl %x size %d len %d off %d", + cmd_buf->mem_handle, cmd_buf->size, + cmd_buf->length, + cmd_buf->offset); + if (cmd_buf->cmd_buf_usage == OPE_CMD_BUF_KMD) { + ope_request->ope_kmd_buf.mem_handle = + cmd_buf->mem_handle; + ope_request->ope_kmd_buf.cpu_addr = + cpu_addr; + ope_request->ope_kmd_buf.iova_addr = + iova_addr; + ope_request->ope_kmd_buf.iova_cdm_addr = + iova_cdm_addr; + ope_request->ope_kmd_buf.len = len; + ope_request->ope_kmd_buf.offset = + cmd_buf->offset; + ope_request->ope_kmd_buf.size = + cmd_buf->size; + is_kmd_buf_valid = true; + CAM_DBG(CAM_OPE, "kbuf:%x io:%x cdm:%x", + ope_request->ope_kmd_buf.cpu_addr, + ope_request->ope_kmd_buf.iova_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + } else if (cmd_buf->cmd_buf_usage == + OPE_CMD_BUF_DEBUG) { + ope_request->ope_debug_buf.cpu_addr = + cpu_addr; + ope_request->ope_debug_buf.iova_addr = + iova_addr; + ope_request->ope_debug_buf.len = + cmd_buf->length; + ope_request->ope_debug_buf.size = + len; + ope_request->ope_debug_buf.offset = + cmd_buf->offset; + CAM_DBG(CAM_OPE, "dbg buf = %x", + ope_request->ope_debug_buf.cpu_addr); + } + cam_mem_put_cpu_buf(cmd_buf->mem_handle); + break; + } + case OPE_CMD_BUF_SCOPE_STRIPE: { + uint32_t num_cmd_bufs = 0; + uint32_t s_idx = 0; + + s_idx = cmd_buf->stripe_idx; + if (s_idx < 0 || s_idx >= OPE_MAX_STRIPES) { + CAM_ERR(CAM_OPE, "Invalid index."); + return -EINVAL; + } + num_cmd_bufs = + ope_request->num_stripe_cmd_bufs[i][s_idx]; + + if (!num_cmd_bufs) + ope_request->num_stripes[i]++; + + ope_request->num_stripe_cmd_bufs[i][s_idx]++; + break; + } + + default: + break; + } + } + } + + + for (i = 0; i < frame_process->batch_size; i++) { + CAM_DBG(CAM_OPE, "num of stripes for batch %d is %d", + i, ope_request->num_stripes[i]); + for (j = 0; j < ope_request->num_stripes[i]; j++) { + CAM_DBG(CAM_OPE, "cmd buffers for stripe: %d:%d is %d", + i, j, ope_request->num_stripe_cmd_bufs[i][j]); + } + } + + if (!is_kmd_buf_valid) { + CAM_DBG(CAM_OPE, "Invalid kmd buffer"); + rc = -EINVAL; + } +end: + return rc; +} + +static int cam_ope_mgr_process_cmd_desc(struct cam_ope_hw_mgr *hw_mgr, + struct cam_packet *packet, struct cam_ope_ctx *ctx_data, + uint32_t **ope_cmd_buf_addr, struct cam_ope_request *ope_request, + struct list_head *buf_tracker) +{ + int rc = 0; + int i; + int num_cmd_buf = 0; + size_t len; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uintptr_t cpu_addr = 0; + uint32_t *cpu_addr_local = NULL, *cpu_addr_u = NULL; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + + *ope_cmd_buf_addr = 0; + for (i = 0; i < packet->num_cmd_buf; i++, num_cmd_buf++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (cmd_desc[i].type != CAM_CMD_BUF_GENERIC || + cmd_desc[i].meta_data == OPE_CMD_META_GENERIC_BLOB) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &cpu_addr, &len); + if (rc || !cpu_addr) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + hw_mgr->iommu_hdl); + goto end; + } + if ((len <= cmd_desc[i].offset) || + (cmd_desc[i].size < cmd_desc[i].length) || + ((len - cmd_desc[i].offset) < + cmd_desc[i].length)) { + CAM_ERR(CAM_OPE, "Invalid offset or length"); + goto end; + } + + cpu_addr_u = (uint32_t *)(((uint8_t *)cpu_addr) + cmd_desc[i].offset); + cam_common_mem_kdup((void **)&cpu_addr_local, cpu_addr_u, cmd_desc[i].size); + *ope_cmd_buf_addr = cpu_addr_local; + } + + if (!cpu_addr_u) { + CAM_ERR(CAM_OPE, "invalid number of cmd buf"); + *ope_cmd_buf_addr = 0; + return -EINVAL; + } + + ope_request->request_id = packet->header.request_id; + + rc = cam_ope_mgr_process_cmd_buf_req(hw_mgr, packet, ctx_data, + *ope_cmd_buf_addr, len, ope_request, buf_tracker); + if (rc) { + CAM_ERR(CAM_OPE, "Process OPE cmd request is failed: %d", rc); + goto free_buf; + } + + rc = cam_ope_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data, + *ope_cmd_buf_addr, len, ope_request, buf_tracker); + if (rc) { + CAM_ERR(CAM_OPE, "Process OPE cmd io request is failed: %d", + rc); + goto free_buf; + } + + return rc; + +free_buf: + cam_common_mem_free(cpu_addr_local); +end: + *ope_cmd_buf_addr = 0; + return rc; +} + +static bool cam_ope_mgr_is_valid_inconfig(struct cam_packet *packet) +{ + int i, num_in_map_entries = 0; + bool in_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) + num_in_map_entries++; + + if (num_in_map_entries <= OPE_IN_RES_MAX) { + in_config_valid = true; + } else { + CAM_ERR(CAM_OPE, "In config entries(%u) more than allowed(%u)", + num_in_map_entries, OPE_IN_RES_MAX); + } + + CAM_DBG(CAM_OPE, "number of in_config info: %u %u %u %u", + packet->num_io_configs, OPE_MAX_IO_BUFS, + num_in_map_entries, OPE_IN_RES_MAX); + + return in_config_valid; +} + +static bool cam_ope_mgr_is_valid_outconfig(struct cam_packet *packet) +{ + int i, num_out_map_entries = 0; + bool out_config_valid = false; + struct cam_buf_io_cfg *io_cfg_ptr = NULL; + + io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload_flex + + packet->io_configs_offset/4); + + for (i = 0 ; i < packet->num_io_configs; i++) + if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) + num_out_map_entries++; + + if (num_out_map_entries <= OPE_OUT_RES_MAX) { + out_config_valid = true; + } else { + CAM_ERR(CAM_OPE, "Out config entries(%u) more than allowed(%u)", + num_out_map_entries, OPE_OUT_RES_MAX); + } + + CAM_DBG(CAM_OPE, "number of out_config info: %u %u %u %u", + packet->num_io_configs, OPE_MAX_IO_BUFS, + num_out_map_entries, OPE_OUT_RES_MAX); + + return out_config_valid; +} + +static int cam_ope_mgr_pkt_validation(struct cam_packet *packet) +{ + if ((packet->header.op_code & 0xff) != + OPE_OPCODE_CONFIG) { + CAM_ERR(CAM_OPE, "Invalid Opcode in pkt: %d", + packet->header.op_code & 0xff); + return -EINVAL; + } + + if (!packet->num_io_configs || + packet->num_io_configs > OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Invalid number of io configs: %d %d", + OPE_MAX_IO_BUFS, packet->num_io_configs); + return -EINVAL; + } + + if (!packet->num_cmd_buf || + packet->num_cmd_buf > OPE_PACKET_MAX_CMD_BUFS) { + CAM_ERR(CAM_OPE, "Invalid number of cmd buffers: %d %d", + OPE_PACKET_MAX_CMD_BUFS, packet->num_cmd_buf); + return -EINVAL; + } + + if (!cam_ope_mgr_is_valid_inconfig(packet) || + !cam_ope_mgr_is_valid_outconfig(packet)) { + return -EINVAL; + } + + CAM_DBG(CAM_OPE, "number of cmd/patch info: %u %u %u %u", + packet->num_cmd_buf, + packet->num_io_configs, OPE_MAX_IO_BUFS, + packet->num_patches); + return 0; +} + +static int cam_ope_get_acquire_info(struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_acquire_args *args, + struct cam_ope_ctx *ctx) +{ + int i = 0; + + if (args->num_acq > 1) { + CAM_ERR(CAM_OPE, "Invalid number of resources: %d", + args->num_acq); + return -EINVAL; + } + + if (copy_from_user(&ctx->ope_acquire, + (void __user *)args->acquire_info, + sizeof(struct ope_acquire_dev_info))) { + CAM_ERR(CAM_OPE, "Failed in acquire"); + return -EFAULT; + } + + if (ctx->ope_acquire.secure_mode > CAM_SECURE_MODE_SECURE) { + CAM_ERR(CAM_OPE, "Invalid mode:%d", + ctx->ope_acquire.secure_mode); + return -EINVAL; + } + + if (ctx->ope_acquire.num_out_res > OPE_OUT_RES_MAX) { + CAM_ERR(CAM_OPE, "num of out resources exceeding : %u", + ctx->ope_acquire.num_out_res); + return -EINVAL; + } + + if (ctx->ope_acquire.num_in_res > OPE_IN_RES_MAX) { + CAM_ERR(CAM_OPE, "num of in resources exceeding : %u", + ctx->ope_acquire.num_in_res); + return -EINVAL; + } + + if (ctx->ope_acquire.dev_type >= OPE_DEV_TYPE_MAX) { + CAM_ERR(CAM_OPE, "Invalid device type: %d", + ctx->ope_acquire.dev_type); + return -EFAULT; + } + + if (ctx->ope_acquire.hw_type >= OPE_HW_TYPE_MAX) { + CAM_ERR(CAM_OPE, "Invalid HW type: %d", + ctx->ope_acquire.hw_type); + return -EFAULT; + } + + CAM_DBG(CAM_OPE, "top: %u %u %s %u %u %u %u %u", + ctx->ope_acquire.hw_type, ctx->ope_acquire.dev_type, + ctx->ope_acquire.dev_name, + ctx->ope_acquire.nrt_stripes_for_arb, + ctx->ope_acquire.secure_mode, ctx->ope_acquire.batch_size, + ctx->ope_acquire.num_in_res, ctx->ope_acquire.num_out_res); + + for (i = 0; i < ctx->ope_acquire.num_in_res; i++) { + CAM_DBG(CAM_OPE, "IN: %u %u %u %u %u %u %u %u", + ctx->ope_acquire.in_res[i].res_id, + ctx->ope_acquire.in_res[i].format, + ctx->ope_acquire.in_res[i].width, + ctx->ope_acquire.in_res[i].height, + ctx->ope_acquire.in_res[i].alignment, + ctx->ope_acquire.in_res[i].unpacker_format, + ctx->ope_acquire.in_res[i].max_stripe_size, + ctx->ope_acquire.in_res[i].fps); + } + + for (i = 0; i < ctx->ope_acquire.num_out_res; i++) { + CAM_DBG(CAM_OPE, "OUT: %u %u %u %u %u %u %u %u", + ctx->ope_acquire.out_res[i].res_id, + ctx->ope_acquire.out_res[i].format, + ctx->ope_acquire.out_res[i].width, + ctx->ope_acquire.out_res[i].height, + ctx->ope_acquire.out_res[i].alignment, + ctx->ope_acquire.out_res[i].packer_format, + ctx->ope_acquire.out_res[i].subsample_period, + ctx->ope_acquire.out_res[i].subsample_pattern); + } + + return 0; +} + +static int cam_ope_get_free_ctx(struct cam_ope_hw_mgr *hw_mgr) +{ + int i; + + i = find_first_zero_bit(hw_mgr->ctx_bitmap, hw_mgr->ctx_bits); + if (i >= OPE_CTX_MAX || i < 0) { + CAM_ERR(CAM_OPE, "Invalid ctx id = %d", i); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[i].ctx_mutex); + if (hw_mgr->ctx[i].ctx_state != OPE_CTX_STATE_FREE) { + CAM_ERR(CAM_OPE, "Invalid ctx %d state %d", + i, hw_mgr->ctx[i].ctx_state); + mutex_unlock(&hw_mgr->ctx[i].ctx_mutex); + return -EINVAL; + } + set_bit(i, hw_mgr->ctx_bitmap); + mutex_unlock(&hw_mgr->ctx[i].ctx_mutex); + + return i; +} + + +static int cam_ope_put_free_ctx(struct cam_ope_hw_mgr *hw_mgr, uint32_t ctx_id) +{ + if (ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid ctx_id: %d", ctx_id); + return 0; + } + + hw_mgr->ctx[ctx_id].ctx_state = OPE_CTX_STATE_FREE; + clear_bit(ctx_id, hw_mgr->ctx_bitmap); + + return 0; +} + +static int cam_ope_mgr_get_hw_caps(void *hw_priv, void *hw_caps_args) +{ + struct cam_ope_hw_mgr *hw_mgr; + struct cam_query_cap_cmd *query_cap = hw_caps_args; + struct ope_hw_ver hw_ver; + int rc = 0, i; + + if (!hw_priv || !hw_caps_args) { + CAM_ERR(CAM_OPE, "Invalid args: %x %x", hw_priv, hw_caps_args); + return -EINVAL; + } + + if (sizeof(struct ope_query_cap_cmd) != query_cap->size) { + CAM_ERR(CAM_OPE, + "Input query cap size:%u does not match expected query cap size: %u", + query_cap->size, sizeof(struct ope_query_cap_cmd)); + return -EFAULT; + } + + hw_mgr = hw_priv; + mutex_lock(&hw_mgr->hw_mgr_mutex); + if (copy_from_user(&hw_mgr->ope_caps, + u64_to_user_ptr(query_cap->caps_handle), + sizeof(struct ope_query_cap_cmd))) { + CAM_ERR(CAM_OPE, "copy_from_user failed: size = %d", + sizeof(struct ope_query_cap_cmd)); + rc = -EFAULT; + goto end; + } + + for (i = 0; i < hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.get_hw_caps( + hw_mgr->ope_dev_intf[i]->hw_priv, + &hw_ver, sizeof(hw_ver)); + if (rc) + goto end; + + hw_mgr->ope_caps.hw_ver[i] = hw_ver; + } + + hw_mgr->ope_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl; + hw_mgr->ope_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl; + hw_mgr->ope_caps.cdm_iommu_handle.non_secure = hw_mgr->iommu_cdm_hdl; + hw_mgr->ope_caps.cdm_iommu_handle.secure = hw_mgr->iommu_sec_cdm_hdl; + hw_mgr->ope_caps.num_ope = hw_mgr->num_ope; + + CAM_DBG(CAM_OPE, "iommu sec %d iommu ns %d cdm s %d cdm ns %d", + hw_mgr->ope_caps.dev_iommu_handle.secure, + hw_mgr->ope_caps.dev_iommu_handle.non_secure, + hw_mgr->ope_caps.cdm_iommu_handle.secure, + hw_mgr->ope_caps.cdm_iommu_handle.non_secure); + + if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle), + &hw_mgr->ope_caps, sizeof(struct ope_query_cap_cmd))) { + CAM_ERR(CAM_OPE, "copy_to_user failed: size = %d", + sizeof(struct ope_query_cap_cmd)); + rc = -EFAULT; + } + +end: + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args) +{ + int rc = 0, i; + int ctx_id; + struct cam_ope_hw_mgr *hw_mgr = hw_priv; + struct cam_ope_ctx *ctx; + struct cam_hw_acquire_args *args = hw_acquire_args; + struct cam_ope_dev_acquire ope_dev_acquire; + struct cam_ope_dev_release ope_dev_release; + struct cam_cdm_acquire_data *cdm_acquire; + struct cam_ope_dev_init init; + struct cam_ope_dev_clk_update clk_update; + struct cam_ope_dev_bw_update *bw_update; + struct cam_ope_set_irq_cb irq_cb; + struct cam_hw_info *dev = NULL; + struct cam_hw_soc_info *soc_info = NULL; + int32_t idx; + + if ((!hw_priv) || (!hw_acquire_args)) { + CAM_ERR(CAM_OPE, "Invalid args: %x %x", + hw_priv, hw_acquire_args); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_id = cam_ope_get_free_ctx(hw_mgr); + if (ctx_id < 0) { + CAM_ERR(CAM_OPE, "No free ctx"); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return ctx_id; + } + + ctx = &hw_mgr->ctx[ctx_id]; + ctx->ctx_id = ctx_id; + mutex_lock(&ctx->ctx_mutex); + rc = cam_ope_get_acquire_info(hw_mgr, args, ctx); + if (rc < 0) { + CAM_ERR(CAM_OPE, "get_acquire info failed: %d", rc); + goto end; + } + + cdm_acquire = CAM_MEM_ZALLOC(sizeof(struct cam_cdm_acquire_data), GFP_KERNEL); + if (!cdm_acquire) { + CAM_ERR(CAM_ISP, "Out of memory"); + goto end; + } + strscpy(cdm_acquire->identifier, "ope", sizeof("ope")); + if (ctx->ope_acquire.dev_type == OPE_DEV_TYPE_OPE_RT) { + cdm_acquire->priority = CAM_CDM_BL_FIFO_3; + ctx->req_timer_timeout = OPE_REQUEST_RT_TIMEOUT; + } + else if (ctx->ope_acquire.dev_type == + OPE_DEV_TYPE_OPE_NRT) { + cdm_acquire->priority = CAM_CDM_BL_FIFO_0; + ctx->req_timer_timeout = OPE_REQUEST_NRT_TIMEOUT; + } + else + goto free_cdm_acquire; + + cdm_acquire->cell_index = 0; + cdm_acquire->handle = 0; + cdm_acquire->userdata = ctx; + cdm_acquire->cam_cdm_callback = cam_ope_ctx_cdm_callback; + cdm_acquire->id = CAM_CDM_VIRTUAL; + cdm_acquire->base_array_cnt = 1; + cdm_acquire->base_array[0] = hw_mgr->cdm_reg_map[OPE_DEV_OPE][0]; + + rc = cam_cdm_acquire(cdm_acquire); + if (rc) { + CAM_ERR(CAM_OPE, "cdm_acquire is failed: %d", rc); + goto cdm_acquire_failed; + } + + ctx->ope_cdm.cdm_ops = cdm_acquire->ops; + ctx->ope_cdm.cdm_handle = cdm_acquire->handle; + + rc = cam_cdm_stream_on(cdm_acquire->handle); + if (rc) { + CAM_ERR(CAM_OPE, "cdm stream on failure: %d", rc); + goto cdm_stream_on_failure; + } + + if (!hw_mgr->ope_ctx_cnt) { + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + init.hfi_en = ope_hw_mgr->hfi_en; + rc = hw_mgr->ope_dev_intf[i]->hw_ops.init( + hw_mgr->ope_dev_intf[i]->hw_priv, &init, + sizeof(init)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev init failed: %d", rc); + goto ope_dev_init_failure; + } + } + + /* Install IRQ CB */ + irq_cb.ope_hw_mgr_cb = cam_ope_hw_mgr_cb; + irq_cb.data = hw_mgr; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + init.hfi_en = ope_hw_mgr->hfi_en; + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev init failed: %d", rc); + goto ope_irq_set_failed; + } + } + + dev = (struct cam_hw_info *)hw_mgr->ope_dev_intf[0]->hw_priv; + soc_info = &dev->soc_info; + idx = soc_info->src_clk_idx; + + hw_mgr->clk_info.base_clk = + soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + hw_mgr->clk_info.threshold = 5; + hw_mgr->clk_info.over_clked = 0; + + for (i = 0; i < CAM_OPE_MAX_PER_PATH_VOTES; i++) { + hw_mgr->clk_info.axi_path[i].camnoc_bw = 0; + hw_mgr->clk_info.axi_path[i].mnoc_ab_bw = 0; + hw_mgr->clk_info.axi_path[i].mnoc_ib_bw = 0; + hw_mgr->clk_info.axi_path[i].ddr_ab_bw = 0; + hw_mgr->clk_info.axi_path[i].ddr_ib_bw = 0; + } + } + + ope_dev_acquire.ctx_id = ctx_id; + ope_dev_acquire.ope_acquire = &ctx->ope_acquire; + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_ACQUIRE, + &ope_dev_acquire, sizeof(ope_dev_acquire)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev acquire failed: %d", rc); + goto ope_dev_acquire_failed; + } + } + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev = (struct cam_hw_info *)hw_mgr->ope_dev_intf[i]->hw_priv; + soc_info = &dev->soc_info; + idx = soc_info->src_clk_idx; + clk_update.clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + hw_mgr->clk_info.curr_clk = + soc_info->clk_rate[CAM_TURBO_VOTE][idx]; + + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_CLK_UPDATE, + &clk_update, sizeof(clk_update)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev clk update failed: %d", rc); + goto ope_clk_update_failed; + } + } + + bw_update = CAM_MEM_ZALLOC(sizeof(struct cam_ope_dev_bw_update), GFP_KERNEL); + if (!bw_update) { + CAM_ERR(CAM_OPE, "Out of memory"); + goto ope_clk_update_failed; + } + bw_update->ahb_vote_valid = false; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + bw_update->axi_vote.num_paths = 1; + bw_update->axi_vote_valid = true; + bw_update->axi_vote.axi_path[0].camnoc_bw = 600000000; + bw_update->axi_vote.axi_path[0].mnoc_ab_bw = 600000000; + bw_update->axi_vote.axi_path[0].mnoc_ib_bw = 600000000; + bw_update->axi_vote.axi_path[0].ddr_ab_bw = 600000000; + bw_update->axi_vote.axi_path[0].ddr_ib_bw = 600000000; + bw_update->axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + bw_update->axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_ALL; + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_BW_UPDATE, + bw_update, sizeof(*bw_update)); + if (rc) { + CAM_ERR(CAM_OPE, "OPE Dev clk update failed: %d", rc); + goto free_bw_update; + } + } + + cam_ope_start_req_timer(ctx); + cam_ope_device_timer_start(hw_mgr); + hw_mgr->ope_ctx_cnt++; + ctx->context_priv = args->context_data; + args->ctxt_to_hw_map = ctx; + ctx->ctxt_event_cb = args->event_cb; + cam_ope_ctx_clk_info_init(ctx); + ctx->ctx_state = OPE_CTX_STATE_ACQUIRED; + CAM_MEM_FREE(cdm_acquire); + cdm_acquire = NULL; + CAM_MEM_FREE(bw_update); + bw_update = NULL; + + mutex_unlock(&ctx->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_INFO(CAM_OPE, "OPE: %d acquire succesfull rc %d", ctx_id, rc); + return rc; + +free_bw_update: + CAM_MEM_ZFREE((void *)bw_update, sizeof(struct cam_ope_dev_bw_update)); + bw_update = NULL; +ope_clk_update_failed: + ope_dev_release.ctx_id = ctx_id; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + if (hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RELEASE, + &ope_dev_release, sizeof(ope_dev_release))) + CAM_ERR(CAM_OPE, "OPE Dev release failed"); + } +ope_dev_acquire_failed: + if (!hw_mgr->ope_ctx_cnt) { + irq_cb.ope_hw_mgr_cb = NULL; + irq_cb.data = hw_mgr; + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + init.hfi_en = ope_hw_mgr->hfi_en; + if (hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb))) + CAM_ERR(CAM_OPE, + "OPE IRQ de register failed"); + } + } +ope_irq_set_failed: + if (!hw_mgr->ope_ctx_cnt) { + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + if (hw_mgr->ope_dev_intf[i]->hw_ops.deinit( + hw_mgr->ope_dev_intf[i]->hw_priv, NULL, 0)) + CAM_ERR(CAM_OPE, "OPE deinit fail"); + if (hw_mgr->ope_dev_intf[i]->hw_ops.stop( + hw_mgr->ope_dev_intf[i]->hw_priv, + NULL, 0)) + CAM_ERR(CAM_OPE, "OPE stop fail"); + } + } +ope_dev_init_failure: +cdm_stream_on_failure: + cam_cdm_release(cdm_acquire->handle); + ctx->ope_cdm.cdm_ops = NULL; + ctx->ope_cdm.cdm_handle = 0; + +cdm_acquire_failed: +free_cdm_acquire: + CAM_MEM_ZFREE((void *)cdm_acquire, sizeof(struct cam_cdm_acquire_data)); + cdm_acquire = NULL; +end: + args->ctxt_to_hw_map = NULL; + cam_ope_put_free_ctx(hw_mgr, ctx_id); + mutex_unlock(&ctx->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_ope_mgr_remove_bw(struct cam_ope_hw_mgr *hw_mgr, int ctx_id) +{ + int i, path_index, rc = 0; + struct cam_ope_ctx *ctx_data = NULL; + struct cam_ope_clk_info *hw_mgr_clk_info; + + ctx_data = &hw_mgr->ctx[ctx_id]; + hw_mgr_clk_info = &hw_mgr->clk_info; + + for (i = 0; i < ctx_data->clk_info.num_paths; i++) { + path_index = + ctx_data->clk_info.axi_path[i].path_data_type - + CAM_AXI_PATH_DATA_OPE_START_OFFSET; + + if (path_index >= CAM_OPE_MAX_PER_PATH_VOTES) { + CAM_WARN(CAM_OPE, + "Invalid path %d, start offset=%d, max=%d", + ctx_data->clk_info.axi_path[i].path_data_type, + CAM_AXI_PATH_DATA_OPE_START_OFFSET, + CAM_OPE_MAX_PER_PATH_VOTES); + continue; + } + + hw_mgr_clk_info->axi_path[path_index].camnoc_bw -= + ctx_data->clk_info.axi_path[i].camnoc_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ab_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ab_bw; + hw_mgr_clk_info->axi_path[path_index].mnoc_ib_bw -= + ctx_data->clk_info.axi_path[i].mnoc_ib_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ab_bw -= + ctx_data->clk_info.axi_path[i].ddr_ab_bw; + hw_mgr_clk_info->axi_path[path_index].ddr_ib_bw -= + ctx_data->clk_info.axi_path[i].ddr_ib_bw; + } + + rc = cam_ope_update_cpas_vote(hw_mgr, ctx_data); + + return rc; +} + +static int cam_ope_mgr_ope_clk_remove(struct cam_ope_hw_mgr *hw_mgr, int ctx_id) +{ + struct cam_ope_ctx *ctx_data = NULL; + struct cam_ope_clk_info *hw_mgr_clk_info; + + ctx_data = &hw_mgr->ctx[ctx_id]; + hw_mgr_clk_info = &hw_mgr->clk_info; + + if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk) + hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk; + + /* reset clock info */ + ctx_data->clk_info.curr_fc = 0; + ctx_data->clk_info.base_clk = 0; + + return 0; +} + +static int cam_ope_mgr_release_ctx(struct cam_ope_hw_mgr *hw_mgr, int ctx_id) +{ + int i = 0, rc = 0; + struct cam_ope_dev_release ope_dev_release; + + if (ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "ctx_id is wrong: %d", ctx_id); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex); + if (hw_mgr->ctx[ctx_id].ctx_state != + OPE_CTX_STATE_ACQUIRED) { + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + CAM_DBG(CAM_OPE, "ctx id: %d not in right state: %d", + ctx_id, hw_mgr->ctx[ctx_id].ctx_state); + return 0; + } + + hw_mgr->ctx[ctx_id].ctx_state = OPE_CTX_STATE_RELEASE; + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + ope_dev_release.ctx_id = ctx_id; + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RELEASE, + &ope_dev_release, sizeof(ope_dev_release)); + if (rc) + CAM_ERR(CAM_OPE, "OPE Dev release failed: %d", rc); + } + + rc = cam_cdm_stream_off(hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle); + if (rc) + CAM_ERR(CAM_OPE, "OPE CDM streamoff failed: %d", rc); + + rc = cam_cdm_release(hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle); + if (rc) + CAM_ERR(CAM_OPE, "OPE CDM relase failed: %d", rc); + + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + if (!hw_mgr->ctx[ctx_id].req_list[i]) + continue; + + if (hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd) { + CAM_MEM_ZFREE((void *)hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + hw_mgr->ctx[ctx_id].req_list[i]->cdm_cmd = NULL; + } + cam_ope_free_io_config(hw_mgr->ctx[ctx_id].req_list[i]); + CAM_MEM_ZFREE((void *)hw_mgr->ctx[ctx_id].req_list[i], + sizeof(struct cam_ope_request)); + hw_mgr->ctx[ctx_id].req_list[i] = NULL; + clear_bit(i, hw_mgr->ctx[ctx_id].bitmap); + } + + cam_ope_req_timer_stop(&hw_mgr->ctx[ctx_id]); + hw_mgr->ctx[ctx_id].ope_cdm.cdm_handle = 0; + hw_mgr->ctx[ctx_id].req_cnt = 0; + hw_mgr->ctx[ctx_id].last_flush_req = 0; + cam_ope_put_free_ctx(hw_mgr, ctx_id); + + rc = cam_ope_mgr_ope_clk_remove(hw_mgr, ctx_id); + if (rc) + CAM_ERR(CAM_OPE, "OPE clk update failed: %d", rc); + + hw_mgr->ope_ctx_cnt--; + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + CAM_DBG(CAM_OPE, "X: ctx_id = %d", ctx_id); + + return 0; +} + +static int cam_ope_mgr_release_hw(void *hw_priv, void *hw_release_args) +{ + int i, rc = 0; + int ctx_id = 0; + struct cam_hw_release_args *release_hw = hw_release_args; + struct cam_ope_hw_mgr *hw_mgr = hw_priv; + struct cam_ope_ctx *ctx_data = NULL; + struct cam_ope_set_irq_cb irq_cb; + struct cam_hw_intf *dev_intf; + + if (!release_hw || !hw_mgr) { + CAM_ERR(CAM_OPE, "Invalid args: %pK %pK", release_hw, hw_mgr); + return -EINVAL; + } + + ctx_data = release_hw->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_OPE, "NULL ctx data"); + return -EINVAL; + } + + ctx_id = ctx_data->ctx_id; + if (ctx_id < 0 || ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid ctx id: %d", ctx_id); + return -EINVAL; + } + + mutex_lock(&hw_mgr->ctx[ctx_id].ctx_mutex); + if (hw_mgr->ctx[ctx_id].ctx_state != OPE_CTX_STATE_ACQUIRED) { + CAM_DBG(CAM_OPE, "ctx is not in use: %d", ctx_id); + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + return -EINVAL; + } + mutex_unlock(&hw_mgr->ctx[ctx_id].ctx_mutex); + + mutex_lock(&hw_mgr->hw_mgr_mutex); + rc = cam_ope_mgr_release_ctx(hw_mgr, ctx_id); + if (!hw_mgr->ope_ctx_cnt) { + CAM_DBG(CAM_OPE, "Last Release"); + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev_intf = hw_mgr->ope_dev_intf[i]; + irq_cb.ope_hw_mgr_cb = NULL; + irq_cb.data = NULL; + rc = dev_intf->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, + OPE_HW_SET_IRQ_CB, + &irq_cb, sizeof(irq_cb)); + if (rc) + CAM_ERR(CAM_OPE, "IRQ dereg failed: %d", rc); + } + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev_intf = hw_mgr->ope_dev_intf[i]; + rc = dev_intf->hw_ops.deinit( + hw_mgr->ope_dev_intf[i]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_OPE, "deinit failed: %d", rc); + } + cam_ope_device_timer_stop(hw_mgr); + } + + rc = cam_ope_mgr_remove_bw(hw_mgr, ctx_id); + if (rc) + CAM_ERR(CAM_OPE, "OPE remove bw failed: %d", rc); + + if (!hw_mgr->ope_ctx_cnt) { + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + dev_intf = hw_mgr->ope_dev_intf[i]; + rc = dev_intf->hw_ops.stop( + hw_mgr->ope_dev_intf[i]->hw_priv, + NULL, 0); + if (rc) + CAM_ERR(CAM_OPE, "stop failed: %d", rc); + } + } + + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + CAM_DBG(CAM_OPE, "Release done for ctx_id %d", ctx_id); + return rc; +} + + +static int cam_ope_packet_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + struct cam_ope_clk_bw_request *clk_info; + struct ope_clk_bw_request_v2 *soc_req_v2; + struct cam_ope_clk_bw_req_internal_v2 *clk_info_v2; + struct ope_cmd_generic_blob *blob; + struct cam_ope_ctx *ctx_data; + uint32_t index; + size_t clk_update_size; + int rc = 0, i; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_OPE, "Invalid blob info %pK %d", blob_data, + blob_size); + return -EINVAL; + } + + blob = (struct ope_cmd_generic_blob *)user_data; + ctx_data = blob->ctx; + index = blob->req_idx; + + switch (blob_type) { + case OPE_CMD_GENERIC_BLOB_CLK_V2: + if (blob_size < sizeof(struct ope_clk_bw_request_v2)) { + CAM_ERR(CAM_OPE, "Mismatch blob size %d expected %lu", + blob_size, + sizeof(struct ope_clk_bw_request_v2)); + return -EINVAL; + } + + soc_req_v2 = (struct ope_clk_bw_request_v2 *)blob_data; + if (soc_req_v2->num_paths > CAM_OPE_MAX_PER_PATH_VOTES || + soc_req_v2->num_paths == 0) { + CAM_ERR(CAM_OPE, "Invalid num paths: %u", + soc_req_v2->num_paths); + return -EINVAL; + } + + /* Check for integer overflow */ + if (soc_req_v2->num_paths != 1) { + if (sizeof(struct cam_axi_per_path_bw_vote) > + ((UINT_MAX - + sizeof(struct ope_clk_bw_request_v2)) / + (soc_req_v2->num_paths - 1))) { + CAM_ERR(CAM_OPE, + "Size exceeds limit paths:%u size per path:%lu", + soc_req_v2->num_paths - 1, + sizeof( + struct cam_axi_per_path_bw_vote)); + return -EINVAL; + } + } + + clk_update_size = sizeof(struct ope_clk_bw_request_v2) + + ((soc_req_v2->num_paths - 1) * + sizeof(struct cam_axi_per_path_bw_vote)); + if (blob_size < clk_update_size) { + CAM_ERR(CAM_OPE, "Invalid blob size: %u", + blob_size); + return -EINVAL; + } + + clk_info = &ctx_data->req_list[index]->clk_info; + clk_info_v2 = &ctx_data->req_list[index]->clk_info_v2; + clk_info_v2->budget_ns = soc_req_v2->budget_ns; + clk_info_v2->frame_cycles = soc_req_v2->frame_cycles; + clk_info_v2->rt_flag = soc_req_v2->rt_flag; + clk_info_v2->num_paths = soc_req_v2->num_paths; + + for (i = 0; i < soc_req_v2->num_paths; i++) { + clk_info_v2->axi_path[i].usage_data = + soc_req_v2->axi_path_flex[i].usage_data; + clk_info_v2->axi_path[i].transac_type = + soc_req_v2->axi_path_flex[i].transac_type; + clk_info_v2->axi_path[i].path_data_type = + soc_req_v2->axi_path_flex[i].path_data_type; + clk_info_v2->axi_path[i].vote_level = 0; + clk_info_v2->axi_path[i].camnoc_bw = soc_req_v2->axi_path_flex[i].camnoc_bw; + clk_info_v2->axi_path[i].mnoc_ab_bw = + soc_req_v2->axi_path_flex[i].mnoc_ab_bw; + clk_info_v2->axi_path[i].mnoc_ib_bw = + soc_req_v2->axi_path_flex[i].mnoc_ib_bw; + } + + /* Use v1 structure for clk fields */ + clk_info->budget_ns = clk_info_v2->budget_ns; + clk_info->frame_cycles = clk_info_v2->frame_cycles; + clk_info->rt_flag = clk_info_v2->rt_flag; + + CAM_DBG(CAM_OPE, "budget=%llu, frame_cycle=%llu, rt_flag=%d", + clk_info_v2->budget_ns, clk_info_v2->frame_cycles, + clk_info_v2->rt_flag); + break; + + default: + CAM_WARN(CAM_OPE, "Invalid blob type %d", blob_type); + break; + } + return rc; +} + +static int cam_ope_process_generic_cmd_buffer( + struct cam_packet *packet, + struct cam_ope_ctx *ctx_data, + int32_t index, + uint64_t *io_buf_addr) +{ + int i, rc = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct ope_cmd_generic_blob cmd_generic_blob; + + cmd_generic_blob.ctx = ctx_data; + cmd_generic_blob.req_idx = index; + cmd_generic_blob.io_buf_addr = io_buf_addr; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *) &packet->payload_flex + packet->cmd_buf_offset/4); + + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + if (!cmd_desc[i].length) + continue; + + if (cmd_desc[i].meta_data != OPE_CMD_META_GENERIC_BLOB) + continue; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_ope_packet_generic_blob_handler, &cmd_generic_blob); + if (rc) + CAM_ERR(CAM_OPE, "Failed in processing blobs %d", rc); + } + + return rc; +} + +static int cam_ope_mgr_prepare_hw_update(void *hw_priv, + void *hw_prepare_update_args) +{ + int rc = 0; + struct cam_packet *packet = NULL; + struct cam_ope_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_prepare_update_args *prepare_args = + hw_prepare_update_args; + struct cam_ope_ctx *ctx_data = NULL; + uint32_t *ope_cmd_buf_addr; + uint32_t request_idx = 0; + struct cam_ope_request *ope_req; + struct timespec64 ts; + + if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) { + CAM_ERR(CAM_OPE, "Invalid args: %x %x", + prepare_args, hw_mgr); + return -EINVAL; + } + + ctx_data = prepare_args->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_OPE, "Invalid Context"); + return -EINVAL; + } + + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, "ctx id %u is not acquired state: %d", + ctx_data->ctx_id, ctx_data->ctx_state); + return -EINVAL; + } + + packet = prepare_args->packet; + rc = cam_packet_util_validate_packet(packet, prepare_args->remain_len); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, + "packet validation failed: %d req_id: %d ctx: %d", + rc, packet->header.request_id, ctx_data->ctx_id); + return rc; + } + + rc = cam_ope_mgr_pkt_validation(packet); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, + "ope packet validation failed: %d req_id: %d ctx: %d", + rc, packet->header.request_id, ctx_data->ctx_id); + return -EINVAL; + } + + rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker, + hw_mgr->iommu_cdm_hdl, hw_mgr->iommu_sec_cdm_hdl, false); + if (rc) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, "Patching failed: %d req_id: %d ctx: %d", + rc, packet->header.request_id, ctx_data->ctx_id); + return -EINVAL; + } + + request_idx = find_first_zero_bit(ctx_data->bitmap, ctx_data->bits); + if (request_idx >= CAM_CTX_REQ_MAX || request_idx < 0) { + mutex_unlock(&ctx_data->ctx_mutex); + CAM_ERR(CAM_OPE, "Invalid ctx req slot = %d", request_idx); + return -EINVAL; + } + + ctx_data->req_list[request_idx] = + CAM_MEM_ZALLOC(sizeof(struct cam_ope_request), GFP_KERNEL); + if (!ctx_data->req_list[request_idx]) { + CAM_ERR(CAM_OPE, "mem allocation failed ctx:%d req_idx:%d", + ctx_data->ctx_id, request_idx); + rc = -ENOMEM; + goto req_mem_alloc_failed; + } + + ope_req = ctx_data->req_list[request_idx]; + ope_req->req_idx = request_idx; + ope_req->cdm_cmd = + CAM_MEM_ZALLOC(((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd))), + GFP_KERNEL); + if (!ope_req->cdm_cmd) { + CAM_ERR(CAM_OPE, "Cdm mem alloc failed ctx:%d req_idx:%d", + ctx_data->ctx_id, request_idx); + rc = -ENOMEM; + goto req_cdm_mem_alloc_failed; + } + + rc = cam_ope_mgr_process_cmd_desc(hw_mgr, packet, + ctx_data, &ope_cmd_buf_addr, ope_req, prepare_args->buf_tracker); + if (rc) { + CAM_ERR(CAM_OPE, + "cmd desc processing failed :%d ctx: %d req_id:%d", + rc, ctx_data->ctx_id, packet->header.request_id); + goto end; + } + + rc = cam_ope_mgr_process_io_cfg(hw_mgr, packet, prepare_args, + ctx_data, request_idx); + if (rc) { + CAM_ERR(CAM_OPE, + "IO cfg processing failed: %d ctx: %d req_id:%d", + rc, ctx_data->ctx_id, packet->header.request_id); + goto free_buf; + } + + rc = cam_ope_mgr_create_kmd_buf(hw_mgr, packet, prepare_args, + ctx_data, ope_req, ope_cmd_buf_addr); + if (rc) { + CAM_ERR(CAM_OPE, + "create kmd buf failed: %d ctx: %d request_id:%d", + rc, ctx_data->ctx_id, packet->header.request_id); + goto free_buf; + } + rc = cam_ope_process_generic_cmd_buffer(packet, ctx_data, + request_idx, NULL); + if (rc) { + CAM_ERR(CAM_OPE, "Failed: %d ctx: %d req_id: %d req_idx: %d", + rc, ctx_data->ctx_id, packet->header.request_id, + request_idx); + goto free_buf; + } + + ope_req->cdm_cmd->genirq_buff = &ope_req->genirq_buff_info; + ope_req->hang_data.packet = packet; + prepare_args->num_hw_update_entries = 1; + prepare_args->hw_update_entries[0].addr = (uintptr_t)ope_req->cdm_cmd; + prepare_args->priv = ope_req; + + CAM_INFO(CAM_REQ, "OPE req %x num_batch %d", ope_req, ope_req->num_batch); + + ktime_get_boottime_ts64(&ts); + ctx_data->last_req_time = (uint64_t)((ts.tv_sec * 1000000000) + ts.tv_nsec); + CAM_DBG(CAM_REQ, "req_id= %llu ctx_id= %d lrt=%llu", + packet->header.request_id, ctx_data->ctx_id, + ctx_data->last_req_time); + cam_ope_req_timer_modify(ctx_data, ctx_data->req_timer_timeout); + set_bit(request_idx, ctx_data->bitmap); + cam_common_mem_free(ope_cmd_buf_addr); + cam_ope_mgr_put_cmd_buf(packet); + mutex_unlock(&ctx_data->ctx_mutex); + CAM_DBG(CAM_REQ, "Prepare Hw update Successful request_id: %d ctx: %d", + packet->header.request_id, ctx_data->ctx_id); + return rc; + +free_buf: + cam_common_mem_free(ope_cmd_buf_addr); +end: + cam_ope_mgr_put_cmd_buf(packet); + CAM_MEM_ZFREE((void *)ctx_data->req_list[request_idx]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + ctx_data->req_list[request_idx]->cdm_cmd = NULL; +req_cdm_mem_alloc_failed: + CAM_MEM_ZFREE((void *)ctx_data->req_list[request_idx], + sizeof(struct cam_ope_request)); + ctx_data->req_list[request_idx] = NULL; +req_mem_alloc_failed: + clear_bit(request_idx, ctx_data->bitmap); + mutex_unlock(&ctx_data->ctx_mutex); + return rc; +} + +static int cam_ope_mgr_handle_config_err( + struct cam_hw_config_args *config_args, + struct cam_ope_ctx *ctx_data) +{ + struct cam_hw_done_event_data buf_data; + struct cam_ope_request *ope_req; + uint32_t req_idx; + + ope_req = config_args->priv; + + buf_data.request_id = ope_req->request_id; + buf_data.evt_param = CAM_SYNC_OPE_EVENT_CONFIG_ERR; + ctx_data->ctxt_event_cb(ctx_data->context_priv, CAM_CTX_EVT_ID_ERROR, + &buf_data); + + req_idx = ope_req->req_idx; + ope_req->request_id = 0; + CAM_MEM_ZFREE((void *)ctx_data->req_list[req_idx]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + ctx_data->req_list[req_idx]->cdm_cmd = NULL; + cam_ope_free_io_config(ctx_data->req_list[req_idx]); + CAM_MEM_ZFREE((void *)ctx_data->req_list[req_idx], + sizeof(struct cam_ope_request)); + ctx_data->req_list[req_idx] = NULL; + clear_bit(req_idx, ctx_data->bitmap); + + return 0; +} + +static int cam_ope_mgr_enqueue_config(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + struct cam_hw_config_args *config_args) +{ + int rc = 0; + uint64_t request_id = 0; + struct crm_workq_task *task; + struct ope_cmd_work_data *task_data; + struct cam_hw_update_entry *hw_update_entries; + struct cam_ope_request *ope_req = NULL; + + ope_req = config_args->priv; + request_id = config_args->request_id; + hw_update_entries = config_args->hw_update_entries; + + CAM_DBG(CAM_OPE, "req_id = %lld %pK", request_id, config_args->priv); + + task = cam_req_mgr_workq_get_task(ope_hw_mgr->cmd_work); + if (!task) { + CAM_ERR(CAM_OPE, "no empty task"); + return -ENOMEM; + } + + task_data = (struct ope_cmd_work_data *)task->payload; + task_data->data = (void *)hw_update_entries->addr; + task_data->req_id = request_id; + task_data->type = OPE_WORKQ_TASK_CMD_TYPE; + task->process_cb = cam_ope_mgr_process_cmd; + rc = cam_req_mgr_workq_enqueue_task(task, ctx_data, + CRM_TASK_PRIORITY_0); + + return rc; +} + +static int cam_ope_mgr_config_hw(void *hw_priv, void *hw_config_args) +{ + int rc = 0; + struct cam_ope_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_config_args *config_args = hw_config_args; + struct cam_ope_ctx *ctx_data = NULL; + struct cam_ope_request *ope_req = NULL; + struct cam_cdm_bl_request *cdm_cmd; + + CAM_DBG(CAM_OPE, "E"); + if (!hw_mgr || !config_args) { + CAM_ERR(CAM_OPE, "Invalid arguments %pK %pK", + hw_mgr, config_args); + return -EINVAL; + } + + if (!config_args->num_hw_update_entries) { + CAM_ERR(CAM_OPE, "No hw update enteries are available"); + return -EINVAL; + } + + ctx_data = config_args->ctxt_to_hw_map; + mutex_lock(&hw_mgr->hw_mgr_mutex); + mutex_lock(&ctx_data->ctx_mutex); + if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) { + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + CAM_ERR(CAM_OPE, "ctx id :%u is not in use", + ctx_data->ctx_id); + return -EINVAL; + } + + ope_req = config_args->priv; + cdm_cmd = (struct cam_cdm_bl_request *) config_args->hw_update_entries->addr; + cdm_cmd->cookie = ope_req->req_idx; + + cam_ope_mgr_ope_clk_update(hw_mgr, ctx_data, ope_req->req_idx); + ctx_data->req_list[ope_req->req_idx]->submit_timestamp = ktime_get(); + + if (ope_req->request_id <= ctx_data->last_flush_req) + CAM_WARN(CAM_OPE, + "Anomaly submitting flushed req %llu [last_flush %llu] in ctx %u", + ope_req->request_id, ctx_data->last_flush_req, + ctx_data->ctx_id); + + rc = cam_ope_mgr_enqueue_config(hw_mgr, ctx_data, config_args); + if (rc) + goto config_err; + + CAM_DBG(CAM_REQ, "req_id %llu, ctx_id %u io config", + ope_req->request_id, ctx_data->ctx_id); + + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + + return rc; +config_err: + cam_ope_mgr_handle_config_err(config_args, ctx_data); + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return rc; +} + +static int cam_ope_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_ope_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_OPE, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_ope_mgr_dump_pf_data(hw_mgr, hw_cmd_args); + break; + default: + CAM_ERR(CAM_OPE, "Invalid cmd: %d", + hw_cmd_args->cmd_type); + } + + return rc; +} + +static int cam_ope_mgr_hw_open_u(void *hw_priv, void *fw_download_args) +{ + struct cam_ope_hw_mgr *hw_mgr; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_OPE, "Invalid args: %pK", hw_priv); + return -EINVAL; + } + + hw_mgr = hw_priv; + if (!hw_mgr->open_cnt) { + hw_mgr->open_cnt++; + } else { + rc = -EBUSY; + CAM_ERR(CAM_OPE, "Multiple opens are not supported"); + } + + return rc; +} + +static cam_ope_mgr_hw_close_u(void *hw_priv, void *hw_close_args) +{ + struct cam_ope_hw_mgr *hw_mgr; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_OPE, "Invalid args: %pK", hw_priv); + return -EINVAL; + } + + hw_mgr = hw_priv; + if (!hw_mgr->open_cnt) { + rc = -EINVAL; + CAM_ERR(CAM_OPE, "device is already closed"); + } else { + hw_mgr->open_cnt--; + } + + return rc; +} + +static int cam_ope_mgr_flush_req(struct cam_ope_ctx *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + int idx; + int64_t request_id; + + request_id = *(int64_t *)flush_args->flush_req_pending[0]; + for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) { + if (!ctx_data->req_list[idx]) + continue; + + if (ctx_data->req_list[idx]->request_id != request_id) + continue; + + ctx_data->req_list[idx]->request_id = 0; + CAM_MEM_ZFREE((void *)ctx_data->req_list[idx]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + ctx_data->req_list[idx]->cdm_cmd = NULL; + cam_ope_free_io_config(ctx_data->req_list[idx]); + CAM_MEM_ZFREE((void *)ctx_data->req_list[idx], + sizeof(struct cam_ope_request)); + ctx_data->req_list[idx] = NULL; + clear_bit(idx, ctx_data->bitmap); + } + + return 0; +} + +static int cam_ope_mgr_flush_all(struct cam_ope_ctx *ctx_data, + struct cam_hw_flush_args *flush_args) +{ + int i, rc; + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + + rc = cam_cdm_flush_hw(ctx_data->ope_cdm.cdm_handle); + + mutex_lock(&ctx_data->ctx_mutex); + for (i = 0; i < hw_mgr->num_ope; i++) { + rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd( + hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_RESET, + NULL, 0); + if (rc) + CAM_ERR(CAM_OPE, "OPE Dev reset failed: %d", rc); + } + + for (i = 0; i < CAM_CTX_REQ_MAX; i++) { + if (!ctx_data->req_list[i]) + continue; + + ctx_data->req_list[i]->request_id = 0; + CAM_MEM_ZFREE((void *)ctx_data->req_list[i]->cdm_cmd, + ((sizeof(struct cam_cdm_bl_request)) + + ((OPE_MAX_CDM_BLS - 1) * + sizeof(struct cam_cdm_bl_cmd)))); + ctx_data->req_list[i]->cdm_cmd = NULL; + cam_ope_free_io_config(ctx_data->req_list[i]); + CAM_MEM_ZFREE((void *)ctx_data->req_list[i], + sizeof(struct cam_ope_request)); + ctx_data->req_list[i] = NULL; + clear_bit(i, ctx_data->bitmap); + } + mutex_unlock(&ctx_data->ctx_mutex); + + return rc; +} + +static int cam_ope_mgr_hw_dump(void *hw_priv, void *hw_dump_args) +{ + struct cam_ope_ctx *ctx_data; + struct cam_ope_hw_mgr *hw_mgr = hw_priv; + struct cam_hw_dump_args *dump_args; + int idx; + ktime_t cur_time; + struct timespec64 cur_ts, req_ts; + uint64_t diff; + + if ((!hw_priv) || (!hw_dump_args)) { + CAM_ERR(CAM_OPE, "Invalid params %pK %pK", + hw_priv, hw_dump_args); + return -EINVAL; + } + + dump_args = (struct cam_hw_dump_args *)hw_dump_args; + ctx_data = dump_args->ctxt_to_hw_map; + + if (!ctx_data) { + CAM_ERR(CAM_OPE, "Invalid context"); + return -EINVAL; + } + + mutex_lock(&hw_mgr->hw_mgr_mutex); + mutex_lock(&ctx_data->ctx_mutex); + + CAM_INFO(CAM_OPE, "Req %lld", dump_args->request_id); + for (idx = 0; idx < CAM_CTX_REQ_MAX; idx++) { + if (!ctx_data->req_list[idx]) + continue; + + if (ctx_data->req_list[idx]->request_id == + dump_args->request_id) + break; + } + + /* no matching request found */ + if (idx == CAM_CTX_REQ_MAX) { + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + } + + cur_time = ktime_get(); + diff = ktime_us_delta(cur_time, + ctx_data->req_list[idx]->submit_timestamp); + cur_ts = ktime_to_timespec64(cur_time); + req_ts = ktime_to_timespec64(ctx_data->req_list[idx]->submit_timestamp); + + if (diff < (ctx_data->req_timer_timeout * 1000)) { + CAM_INFO(CAM_OPE, + "No Error req %llu req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; + } + + CAM_ERR(CAM_OPE, "Error req %llu req timestamp:[%lld.%06lld] curr timestamp:[%lld.%06lld]", + dump_args->request_id, + req_ts.tv_sec, + req_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, + cur_ts.tv_nsec/NSEC_PER_USEC); + + mutex_unlock(&ctx_data->ctx_mutex); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + return 0; +} + +static int cam_ope_mgr_hw_flush(void *hw_priv, void *hw_flush_args) +{ + struct cam_hw_flush_args *flush_args = hw_flush_args; + struct cam_ope_ctx *ctx_data; + struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr; + + if ((!hw_priv) || (!hw_flush_args)) { + CAM_ERR(CAM_OPE, "Input params are Null"); + return -EINVAL; + } + + ctx_data = flush_args->ctxt_to_hw_map; + if (!ctx_data) { + CAM_ERR(CAM_OPE, "Ctx data is NULL"); + return -EINVAL; + } + + if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) || + (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) { + CAM_ERR(CAM_OPE, "Invalid flush type: %d", + flush_args->flush_type); + return -EINVAL; + } + + switch (flush_args->flush_type) { + case CAM_FLUSH_TYPE_ALL: + mutex_lock(&hw_mgr->hw_mgr_mutex); + ctx_data->last_flush_req = flush_args->last_flush_req; + + CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d last_flush_req %u", + ctx_data->ctx_id, flush_args->flush_type, + ctx_data->last_flush_req); + + cam_ope_mgr_flush_all(ctx_data, flush_args); + mutex_unlock(&hw_mgr->hw_mgr_mutex); + break; + case CAM_FLUSH_TYPE_REQ: + mutex_lock(&ctx_data->ctx_mutex); + if (flush_args->num_req_active) { + CAM_ERR(CAM_OPE, "Flush request is not supported"); + mutex_unlock(&ctx_data->ctx_mutex); + return -EINVAL; + } + if (flush_args->num_req_pending) + cam_ope_mgr_flush_req(ctx_data, flush_args); + mutex_unlock(&ctx_data->ctx_mutex); + break; + default: + CAM_ERR(CAM_OPE, "Invalid flush type: %d", + flush_args->flush_type); + return -EINVAL; + } + + return 0; +} + +static int cam_ope_mgr_alloc_devs(struct device_node *of_node) +{ + int rc; + uint32_t num_dev; + + rc = of_property_read_u32(of_node, "num-ope", &num_dev); + if (rc) { + CAM_ERR(CAM_OPE, "getting num of ope failed: %d", rc); + return -EINVAL; + } + + ope_hw_mgr->devices[OPE_DEV_OPE] = CAM_MEM_ZALLOC( + sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL); + if (!ope_hw_mgr->devices[OPE_DEV_OPE]) + return -ENOMEM; + + return 0; +} + +static int cam_ope_mgr_init_devs(struct device_node *of_node) +{ + int rc = 0; + int count, i; + const char *name = NULL; + struct device_node *child_node = NULL; + struct platform_device *child_pdev = NULL; + struct cam_hw_intf *child_dev_intf = NULL; + struct cam_hw_info *ope_dev; + struct cam_hw_soc_info *soc_info = NULL; + + rc = cam_ope_mgr_alloc_devs(of_node); + if (rc) + return rc; + + count = of_property_count_strings(of_node, "compat-hw-name"); + if (!count) { + CAM_ERR(CAM_OPE, "no compat hw found in dev tree, cnt = %d", + count); + rc = -EINVAL; + goto compat_hw_name_failed; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "compat-hw-name", + i, &name); + if (rc) { + CAM_ERR(CAM_OPE, "getting dev object name failed"); + goto compat_hw_name_failed; + } + + child_node = of_find_node_by_name(NULL, name); + if (!child_node) { + CAM_ERR(CAM_OPE, "Cannot find node in dtsi %s", name); + rc = -ENODEV; + goto compat_hw_name_failed; + } + + child_pdev = of_find_device_by_node(child_node); + if (!child_pdev) { + CAM_ERR(CAM_OPE, "failed to find device on bus %s", + child_node->name); + rc = -ENODEV; + of_node_put(child_node); + goto compat_hw_name_failed; + } + + child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata( + child_pdev); + if (!child_dev_intf) { + CAM_ERR(CAM_OPE, "no child device"); + of_node_put(child_node); + goto compat_hw_name_failed; + } + ope_hw_mgr->devices[child_dev_intf->hw_type] + [child_dev_intf->hw_idx] = child_dev_intf; + + if (!child_dev_intf->hw_ops.process_cmd) + goto compat_hw_name_failed; + + of_node_put(child_node); + } + + ope_hw_mgr->num_ope = count; + + for (i = 0; i < count; i++) { + ope_hw_mgr->ope_dev_intf[i] = + ope_hw_mgr->devices[OPE_DEV_OPE][i]; + ope_dev = ope_hw_mgr->ope_dev_intf[i]->hw_priv; + soc_info = &ope_dev->soc_info; + ope_hw_mgr->cdm_reg_map[i][0] = + soc_info->reg_map[0].mem_base; + } + + ope_hw_mgr->hfi_en = of_property_read_bool(of_node, "hfi_en"); + + return 0; +compat_hw_name_failed: + CAM_MEM_FREE(ope_hw_mgr->devices[OPE_DEV_OPE]); + ope_hw_mgr->devices[OPE_DEV_OPE] = NULL; + return rc; +} + +static void cam_req_mgr_process_ope_command_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_ope_msg_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static void cam_req_mgr_process_ope_timer_queue(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +static int cam_ope_mgr_create_wq(void) +{ + + int rc; + int i; + + rc = cam_req_mgr_workq_create("ope_command_queue", OPE_WORKQ_NUM_TASK, + &ope_hw_mgr->cmd_work, CRM_WORKQ_USAGE_NON_IRQ, + 0, cam_req_mgr_process_ope_command_queue); + if (rc) { + CAM_ERR(CAM_OPE, "unable to create a command worker"); + goto cmd_work_failed; + } + + rc = cam_req_mgr_workq_create("ope_message_queue", OPE_WORKQ_NUM_TASK, + &ope_hw_mgr->msg_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_ope_msg_queue); + if (rc) { + CAM_ERR(CAM_OPE, "unable to create a message worker"); + goto msg_work_failed; + } + + rc = cam_req_mgr_workq_create("ope_timer_queue", OPE_WORKQ_NUM_TASK, + &ope_hw_mgr->timer_work, CRM_WORKQ_USAGE_IRQ, 0, + cam_req_mgr_process_ope_timer_queue); + if (rc) { + CAM_ERR(CAM_OPE, "unable to create a timer worker"); + goto timer_work_failed; + } + + ope_hw_mgr->cmd_work_data = + CAM_MEM_ZALLOC(sizeof(struct ope_cmd_work_data) * OPE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!ope_hw_mgr->cmd_work_data) { + rc = -ENOMEM; + goto cmd_work_data_failed; + } + + ope_hw_mgr->msg_work_data = + CAM_MEM_ZALLOC(sizeof(struct ope_msg_work_data) * OPE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!ope_hw_mgr->msg_work_data) { + rc = -ENOMEM; + goto msg_work_data_failed; + } + + ope_hw_mgr->timer_work_data = + CAM_MEM_ZALLOC(sizeof(struct ope_clk_work_data) * OPE_WORKQ_NUM_TASK, + GFP_KERNEL); + if (!ope_hw_mgr->timer_work_data) { + rc = -ENOMEM; + goto timer_work_data_failed; + } + + for (i = 0; i < OPE_WORKQ_NUM_TASK; i++) + ope_hw_mgr->msg_work->task.pool[i].payload = + &ope_hw_mgr->msg_work_data[i]; + + for (i = 0; i < OPE_WORKQ_NUM_TASK; i++) + ope_hw_mgr->cmd_work->task.pool[i].payload = + &ope_hw_mgr->cmd_work_data[i]; + + for (i = 0; i < OPE_WORKQ_NUM_TASK; i++) + ope_hw_mgr->timer_work->task.pool[i].payload = + &ope_hw_mgr->timer_work_data[i]; + return 0; + + +timer_work_data_failed: + CAM_MEM_FREE(ope_hw_mgr->msg_work_data); +msg_work_data_failed: + CAM_MEM_FREE(ope_hw_mgr->cmd_work_data); +cmd_work_data_failed: + cam_req_mgr_workq_destroy(&ope_hw_mgr->timer_work); +timer_work_failed: + cam_req_mgr_workq_destroy(&ope_hw_mgr->msg_work); +msg_work_failed: + cam_req_mgr_workq_destroy(&ope_hw_mgr->cmd_work); +cmd_work_failed: + return rc; +} + +static int cam_ope_create_debug_fs(void) +{ + int rc; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("ope", &ope_hw_mgr->dentry); + if (rc) { + CAM_ERR(CAM_OPE, "failed to create dentry"); + return rc; + } + + debugfs_create_bool("frame_dump_enable", 0644, ope_hw_mgr->dentry, + &ope_hw_mgr->frame_dump_enable); + + debugfs_create_bool("dump_req_data_enable", 0644, ope_hw_mgr->dentry, + &ope_hw_mgr->dump_req_data_enable); + + return 0; +} + + +int cam_ope_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl) +{ + int i, rc = 0, j; + struct cam_hw_mgr_intf *hw_mgr_intf; + struct cam_iommu_handle cdm_handles; + + if (!of_node || !hw_mgr_hdl) { + CAM_ERR(CAM_OPE, "Invalid args of_node %pK hw_mgr %pK", + of_node, hw_mgr_hdl); + return -EINVAL; + } + hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl; + + ope_hw_mgr = CAM_MEM_ZALLOC(sizeof(struct cam_ope_hw_mgr), GFP_KERNEL); + if (!ope_hw_mgr) { + CAM_ERR(CAM_OPE, "Unable to allocate mem for: size = %d", + sizeof(struct cam_ope_hw_mgr)); + return -ENOMEM; + } + + hw_mgr_intf->hw_mgr_priv = ope_hw_mgr; + hw_mgr_intf->hw_get_caps = cam_ope_mgr_get_hw_caps; + hw_mgr_intf->hw_acquire = cam_ope_mgr_acquire_hw; + hw_mgr_intf->hw_release = cam_ope_mgr_release_hw; + hw_mgr_intf->hw_start = NULL; + hw_mgr_intf->hw_stop = NULL; + hw_mgr_intf->hw_prepare_update = cam_ope_mgr_prepare_hw_update; + hw_mgr_intf->hw_config_stream_settings = NULL; + hw_mgr_intf->hw_config = cam_ope_mgr_config_hw; + hw_mgr_intf->hw_read = NULL; + hw_mgr_intf->hw_write = NULL; + hw_mgr_intf->hw_cmd = cam_ope_mgr_cmd; + hw_mgr_intf->hw_open = cam_ope_mgr_hw_open_u; + hw_mgr_intf->hw_close = cam_ope_mgr_hw_close_u; + hw_mgr_intf->hw_flush = cam_ope_mgr_hw_flush; + hw_mgr_intf->hw_dump = cam_ope_mgr_hw_dump; + + ope_hw_mgr->secure_mode = false; + mutex_init(&ope_hw_mgr->hw_mgr_mutex); + spin_lock_init(&ope_hw_mgr->hw_mgr_lock); + + for (i = 0; i < OPE_CTX_MAX; i++) { + ope_hw_mgr->ctx[i].bitmap_size = + BITS_TO_LONGS(CAM_CTX_REQ_MAX) * + sizeof(long); + ope_hw_mgr->ctx[i].bitmap = CAM_MEM_ZALLOC( + ope_hw_mgr->ctx[i].bitmap_size, GFP_KERNEL); + if (!ope_hw_mgr->ctx[i].bitmap) { + CAM_ERR(CAM_OPE, "bitmap allocation failed: size = %d", + ope_hw_mgr->ctx[i].bitmap_size); + rc = -ENOMEM; + goto ope_ctx_bitmap_failed; + } + ope_hw_mgr->ctx[i].bits = ope_hw_mgr->ctx[i].bitmap_size * + BITS_PER_BYTE; + mutex_init(&ope_hw_mgr->ctx[i].ctx_mutex); + } + + rc = cam_ope_mgr_init_devs(of_node); + if (rc) + goto dev_init_failed; + + ope_hw_mgr->ctx_bitmap_size = + BITS_TO_LONGS(OPE_CTX_MAX) * sizeof(long); + ope_hw_mgr->ctx_bitmap = CAM_MEM_ZALLOC(ope_hw_mgr->ctx_bitmap_size, + GFP_KERNEL); + if (!ope_hw_mgr->ctx_bitmap) { + rc = -ENOMEM; + goto ctx_bitmap_alloc_failed; + } + + ope_hw_mgr->ctx_bits = ope_hw_mgr->ctx_bitmap_size * + BITS_PER_BYTE; + + for (i = 0; i < OPE_DEV_MAX; i++) { + rc = cam_ope_hw_init(&ope_hw_mgr->ope_dev_data[i], i); + if (rc) + goto ctx_bitmap_alloc_failed; + } + + rc = cam_smmu_get_handle("ope", &ope_hw_mgr->iommu_hdl); + if (rc) { + CAM_ERR(CAM_OPE, "get mmu handle failed: %d", rc); + goto ope_get_hdl_failed; + } + + rc = cam_smmu_get_handle("cam-secure", &ope_hw_mgr->iommu_sec_hdl); + if (rc) { + CAM_ERR(CAM_OPE, "get secure mmu handle failed: %d", rc); + goto secure_hdl_failed; + } + + rc = cam_cdm_get_iommu_handle("ope", &cdm_handles); + if (rc) { + CAM_ERR(CAM_OPE, "ope cdm handle get is failed: %d", rc); + goto ope_cdm_hdl_failed; + } + + ope_hw_mgr->iommu_cdm_hdl = cdm_handles.non_secure; + ope_hw_mgr->iommu_sec_cdm_hdl = cdm_handles.secure; + CAM_DBG(CAM_OPE, "iommu hdls %x %x cdm %x %x", + ope_hw_mgr->iommu_hdl, ope_hw_mgr->iommu_sec_hdl, + ope_hw_mgr->iommu_cdm_hdl, + ope_hw_mgr->iommu_sec_cdm_hdl); + + rc = cam_ope_mgr_create_wq(); + if (rc) + goto ope_wq_create_failed; + + cam_ope_create_debug_fs(); + + if (iommu_hdl) + *iommu_hdl = ope_hw_mgr->iommu_hdl; + + return rc; + +ope_wq_create_failed: + ope_hw_mgr->iommu_cdm_hdl = -1; + ope_hw_mgr->iommu_sec_cdm_hdl = -1; +ope_cdm_hdl_failed: + cam_smmu_destroy_handle(ope_hw_mgr->iommu_sec_hdl); + ope_hw_mgr->iommu_sec_hdl = -1; +secure_hdl_failed: + cam_smmu_destroy_handle(ope_hw_mgr->iommu_hdl); + ope_hw_mgr->iommu_hdl = -1; +ope_get_hdl_failed: + CAM_MEM_ZFREE((void *)ope_hw_mgr->ctx_bitmap, + ope_hw_mgr->ctx_bitmap_size); + ope_hw_mgr->ctx_bitmap = NULL; + ope_hw_mgr->ctx_bitmap_size = 0; + ope_hw_mgr->ctx_bits = 0; +ctx_bitmap_alloc_failed: + CAM_MEM_ZFREE((void *)ope_hw_mgr->devices[OPE_DEV_OPE], + (ope_hw_mgr->num_ope) * (sizeof(struct cam_hw_intf *))); + ope_hw_mgr->devices[OPE_DEV_OPE] = NULL; +dev_init_failed: +ope_ctx_bitmap_failed: + mutex_destroy(&ope_hw_mgr->hw_mgr_mutex); + for (j = i - 1; j >= 0; j--) { + mutex_destroy(&ope_hw_mgr->ctx[j].ctx_mutex); + CAM_MEM_ZFREE((void *)ope_hw_mgr->ctx[j].bitmap, + ope_hw_mgr->ctx[j].bitmap_size); + ope_hw_mgr->ctx[j].bitmap = NULL; + ope_hw_mgr->ctx[j].bitmap_size = 0; + ope_hw_mgr->ctx[j].bits = 0; + } + CAM_MEM_ZFREE((void *)ope_hw_mgr, sizeof(struct cam_ope_hw_mgr)); + ope_hw_mgr = NULL; + + return rc; +} + +static void cam_ope_mgr_dump_pf_data( + struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_cmd_args *hw_cmd_args) +{ + struct cam_ope_ctx *ctx_data; + struct cam_packet *packet; + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_ope_request *ope_request; + struct ope_io_buf *io_buf = NULL; + struct cam_ope_match_pid_args ope_pid_mid_args; + struct cam_hw_dump_pf_args *pf_args; + struct cam_ctx_request *req_pf; + + int device_idx; + bool *ctx_found; + bool io_buf_found = false; + int i, j; + bool hw_pid_support = true; + bool hw_id_found = false; + uint32_t *resource_type; + int stripe_num; + int rc = 0; + + ctx_data = (struct cam_ope_ctx *)hw_cmd_args->ctxt_to_hw_map; + pf_args = hw_cmd_args->u.pf_cmd_args->pf_args; + req_pf = (struct cam_ctx_request *) + hw_cmd_args->u.pf_cmd_args->pf_req_info->req; + packet = (struct cam_packet *)req_pf->packet; + ope_request = hw_cmd_args->u.pf_cmd_args->pf_req_info->req; + + ope_pid_mid_args.fault_mid = pf_args->pf_smmu_info->mid; + ope_pid_mid_args.fault_pid = pf_args->pf_smmu_info->pid; + ctx_found = &(pf_args->pf_context_info.ctx_found); + resource_type = &(pf_args->pf_context_info.resource_type); + + if (ctx_data->pf_mid_found) + goto stripedump; + + if (*ctx_found && *resource_type) { + hw_pid_support = false; + goto stripedump; + } else { + *ctx_found = false; + } + + for (i = 0; i < ope_hw_mgr->num_ope; i++) { + if (!ope_hw_mgr->ope_dev_data[i]->hw_intf) + continue; + for (j = 0; j < ope_hw_mgr->ope_dev_data[i]->num_hw_pid; j++) { + if (ope_hw_mgr->ope_dev_data[i]->hw_pid[j] == + pf_args->pf_smmu_info->pid) { + hw_id_found = true; + device_idx = i; + *ctx_found = true; + break; + } + } + if (hw_id_found) + break; + } + + if (i == ope_hw_mgr->num_ope) { + CAM_INFO(CAM_OPE, + "PID:%d is not matching with any OPE HW PIDs ctx id:%d", + pf_args->pf_smmu_info->pid, ctx_data->ctx_id); + cam_packet_util_put_packet_addr(pf_req_info->packet_handle); + return; + } + + ope_pid_mid_args.device_idx = device_idx; + + rc = hw_mgr->devices[OPE_DEV_OPE][device_idx]->hw_ops.process_cmd( + hw_mgr->devices[OPE_DEV_OPE][device_idx]->hw_priv, + OPE_HW_MATCH_PID_MID, + &ope_pid_mid_args, sizeof(ope_pid_mid_args)); + if (rc) { + CAM_ERR(CAM_OPE, + "CAM_OPE_CMD_MATCH_PID_MID failed %d", rc); + cam_packet_util_put_packet_addr(pf_req_info->packet_handle); + return; + } + + *resource_type = ope_pid_mid_args.match_res; + CAM_INFO(CAM_OPE, "Fault port %d", *resource_type); + +stripedump: + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + + if (!ope_request) + goto iodump; + + CAM_INFO(CAM_OPE, "req_id %d Num of batches %d", + ope_request->request_id, ope_request->num_batch); + + for (i = 0; i < ope_request->num_batch; i++) { + for (j = 0; j < ope_request->num_io_bufs[i]; j++) { + io_buf = ope_request->io_buf[i][j]; + if (io_buf) { + if (io_buf->resource_type == *resource_type) { + io_buf_found = true; + break; + } + } + } + if (io_buf_found) + break; + } + +iodump: + cam_packet_util_dump_io_bufs(packet, hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, pf_args, hw_pid_support); + + if (io_buf_found) { + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (j >= OPE_MAX_PLANES) { + CAM_INFO(CAM_OPE, "Invalid plane idx: %d", j); + break; + } + + for (stripe_num = 0; stripe_num < io_buf->num_stripes[j]; stripe_num++) { + CAM_INFO(CAM_OPE, + "pln_num %d stripe_num %d width %d height %d stride %d iovaddr 0x%llx", + j, stripe_num, io_buf->s_io[j][stripe_num].width, + io_buf->s_io[j][stripe_num].height, + io_buf->s_io[j][stripe_num].stride, + io_buf->s_io[j][stripe_num].iova_addr); + } + } + } + cam_packet_util_put_packet_addr(pf_req_info->packet_handle); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h new file mode 100644 index 0000000000..57786f25d8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.h @@ -0,0 +1,630 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_OPE_HW_MGR_H +#define CAM_OPE_HW_MGR_H + +#include +#include +#include +#include "ope_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr.h" +#include "cam_smmu_api.h" +#include "cam_soc_util.h" +#include "cam_req_mgr_timer.h" +#include "cam_context.h" +#include "ope_hw.h" +#include "cam_cdm_intf_api.h" +#include "cam_req_mgr_timer.h" + +#define OPE_CTX_MAX 32 +#define CAM_FRAME_CMD_MAX 20 + + +#define OPE_WORKQ_NUM_TASK 100 +#define OPE_WORKQ_TASK_CMD_TYPE 1 +#define OPE_WORKQ_TASK_MSG_TYPE 2 + +#define OPE_PACKET_SIZE 0 +#define OPE_PACKET_TYPE 1 +#define OPE_PACKET_OPCODE 2 + +#define OPE_PACKET_MAX_CMD_BUFS 4 + +#define OPE_MAX_OUTPUT_SUPPORTED 8 +#define OPE_MAX_INPUT_SUPPORTED 3 + +#define OPE_FRAME_PROCESS_SUCCESS 0 +#define OPE_FRAME_PROCESS_FAILURE 1 + +#define OPE_CTX_STATE_FREE 0 +#define OPE_CTX_STATE_IN_USE 1 +#define OPE_CTX_STATE_ACQUIRED 2 +#define OPE_CTX_STATE_RELEASE 3 + +#define OPE_CMDS OPE_MAX_CMD_BUFS +#define CAM_MAX_IN_RES 8 + +#define OPE_MAX_CDM_BLS 32 + +#define CAM_OPE_MAX_PER_PATH_VOTES 6 +#define CAM_OPE_BW_CONFIG_UNKNOWN 0 +#define CAM_OPE_BW_CONFIG_V2 2 + +#define CLK_HW_OPE 0x0 +#define CLK_HW_MAX 0x1 + +#define OPE_DEVICE_IDLE_TIMEOUT 400 +#define OPE_REQUEST_RT_TIMEOUT 200 +#define OPE_REQUEST_NRT_TIMEOUT 400 + +/** + * struct cam_ope_clk_bw_request_v2 + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: Reserved for future use + * @num_paths: Number of paths for per path bw vote + * @axi_path: Per path vote info for OPE + */ +struct cam_ope_clk_bw_req_internal_v2 { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint32_t reserved; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct cam_ope_clk_bw_request + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @uncompressed_bw: Bandwidth required to process frame + * @compressed_bw: Compressed bandwidth to process frame + */ +struct cam_ope_clk_bw_request { + uint64_t budget_ns; + uint32_t frame_cycles; + uint32_t rt_flag; + uint64_t uncompressed_bw; + uint64_t compressed_bw; +}; + +/** + * struct cam_ctx_clk_info + * @curr_fc: Context latest request frame cycles + * @rt_flag: Flag to indicate real time request + * @base_clk: Base clock to process the request + * @reserved: Reserved field + * @uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @clk_rate: Supported clock rates for the context + * @num_paths: Number of valid AXI paths + * @axi_path: ctx based per path bw vote + */ +struct cam_ctx_clk_info { + uint32_t curr_fc; + uint32_t rt_flag; + uint32_t base_clk; + uint32_t reserved; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + int32_t clk_rate[CAM_MAX_VOTE]; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; +}; + +/** + * struct ope_cmd_generic_blob + * @ctx: Current context info + * @req_info_idx: Index used for request + * @io_buf_addr: pointer to io buffer address + */ +struct ope_cmd_generic_blob { + struct cam_ope_ctx *ctx; + uint32_t req_idx; + uint64_t *io_buf_addr; +}; + +/** + * struct cam_ope_clk_info + * @base_clk: Base clock to process request + * @curr_clk: Current clock of hadrware + * @threshold: Threshold for overclk count + * @over_clked: Over clock count + * @uncompressed_bw: Current bandwidth voting + * @compressed_bw: Current compressed bandwidth voting + * @num_paths: Number of AXI vote paths + * @axi_path: Current per path bw vote info + * @hw_type: IPE/BPS device type + * @watch_dog: watchdog timer handle + * @watch_dog_reset_counter: Counter for watch dog reset + */ +struct cam_ope_clk_info { + uint32_t base_clk; + uint32_t curr_clk; + uint32_t threshold; + uint32_t over_clked; + uint64_t uncompressed_bw; + uint64_t compressed_bw; + uint32_t num_paths; + struct cam_cpas_axi_per_path_bw_vote axi_path[CAM_OPE_MAX_PER_PATH_VOTES]; + uint32_t hw_type; + struct cam_req_mgr_timer *watch_dog; + uint32_t watch_dog_reset_counter; +}; + +/** + * struct ope_cmd_work_data + * + * @type: Type of work data + * @data: Private data + * @req_id: Request Id + */ +struct ope_cmd_work_data { + uint32_t type; + void *data; + int64_t req_id; +}; + +/** + * struct ope_msg_work_data + * + * @type: Type of work data + * @data: Private data + * @irq_status: IRQ status + */ +struct ope_msg_work_data { + uint32_t type; + void *data; + uint32_t irq_status; +}; + +/** + * struct ope_clk_work_data + * + * @type: Type of work data + * @data: Private data + */ +struct ope_clk_work_data { + uint32_t type; + void *data; +}; + +/** + * struct cdm_dmi_cmd + * + * @length: Number of bytes in LUT + * @reserved: reserved bits + * @cmd: Command ID (CDMCmd) + * @addr: Address of the LUT in memory + * @DMIAddr: Address of the target DMI config register + * @DMISel: DMI identifier + */ +struct cdm_dmi_cmd { + unsigned int length : 16; + unsigned int reserved : 8; + unsigned int cmd : 8; + unsigned int addr; + unsigned int DMIAddr : 24; + unsigned int DMISel : 8; +} __attribute__((__packed__)); + +/** + * struct ope_debug_buffer + * + * @cpu_addr: CPU address + * @iova_addr: IOVA address + * @len: Buffer length + * @size: Buffer Size + * @offset: buffer offset + */ +struct ope_debug_buffer { + uintptr_t cpu_addr; + dma_addr_t iova_addr; + size_t len; + uint32_t size; + uint32_t offset; +}; + +/** + * struct ope_kmd_buffer + * + * @mem_handle: Memory handle + * @cpu_addr: CPU address + * @iova_addr: IOVA address + * @iova_cdm_addr: CDM IOVA address + * @offset: Offset of buffer + * @len: Buffer length + * @size: Buffer Size + */ +struct ope_kmd_buffer { + uint32_t mem_handle; + uintptr_t cpu_addr; + dma_addr_t iova_addr; + dma_addr_t iova_cdm_addr; + uint32_t offset; + size_t len; + uint32_t size; +}; + +struct ope_stripe_settings { + uintptr_t cpu_addr; + dma_addr_t iova_addr; + size_t len; + uint32_t size; + uint32_t buf_type; + uint32_t type_buffered; +}; + +/** + * struct ope_pass_settings + * + * @cpu_addr: CPU address + * @iova_addr: IOVA address + * @len: Buffer length + * @size: Buffer Size + * @idx: Pass Index + * @buf_type: Direct/Indirect type + * @type_buffered: SB/DB types + */ +struct ope_pass_settings { + uintptr_t cpu_addr; + dma_addr_t iova_addr; + size_t len; + uint32_t size; + uint32_t idx; + uint32_t buf_type; + uint32_t type_buffered; +}; + +/** + * struct ope_frame_settings + * + * @cpu_addr: CPU address + * @iova_addr: IOVA address + * @offset: offset + * @len: Buffer length + * @size: Buffer Size + * @buf_type: Direct/Indirect type + * @type_buffered: SB/DB types + * @prefecth_disable: Disable prefetch + */ +struct ope_frame_settings { + uintptr_t cpu_addr; + dma_addr_t iova_addr; + uint32_t offset; + size_t len; + uint32_t size; + uint32_t buf_type; + uint32_t type_buffered; + uint32_t prefecth_disable; +}; + +/** + * struct ope_stripe_io + * + * @format: Stripe format + * @s_location: Stripe location + * @cpu_addr: Stripe CPU address + * @iova_addr: Stripe IOVA address + * @width: Stripe width + * @height: Stripe height + * @stride: Stripe stride + * @unpack_format: Unpack format + * @pack_format: Packing format + * @alignment: Stripe alignment + * @offset: Stripe offset + * @x_init: X_init + * @subsample_period: Subsample period + * @subsample_pattern: Subsample pattern + * @len: Stripe buffer length + * @disable_bus: disable bus for the stripe + */ +struct ope_stripe_io { + uint32_t format; + uint32_t s_location; + uintptr_t cpu_addr; + dma_addr_t iova_addr; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t unpack_format; + uint32_t pack_format; + uint32_t alignment; + uint32_t offset; + uint32_t x_init; + uint32_t subsample_period; + uint32_t subsample_pattern; + size_t len; + uint32_t disable_bus; +}; + +/** + * struct ope_io_buf + * + * @direction: Direction of a buffer + * @resource_type: Resource type of IO Buffer + * @format: Format + * @fence: Fence + * @num_planes: Number of planes + * @num_stripes: Number of stripes + * @s_io: Stripe info + */ +struct ope_io_buf { + uint32_t direction; + uint32_t resource_type; + uint32_t format; + uint32_t fence; + uint32_t num_planes; + uint32_t pix_pattern; + uint32_t num_stripes[OPE_MAX_PLANES]; + struct ope_stripe_io s_io[OPE_MAX_PLANES][OPE_MAX_STRIPES]; +}; + +/** + * struct cam_ope_request + * + * @request_id: Request Id + * @req_idx: Index in request list + * @state: Request state + * @num_batch: Number of batches + * @num_cmd_bufs: Number of command buffers + * @num_frame_bufs: Number of frame buffers + * @num_pass_bufs: Number of pass Buffers + * @num_stripes: Number of Stripes + * @num_io_bufs: Number of IO Buffers + * @in_resource: Input resource + * @num_stripe_cmd_bufs: Command buffers per stripe + * @ope_kmd_buf: KMD buffer for OPE programming + * @ope_debug_buf: Debug buffer + * @io_buf: IO config info of a request + * @cdm_cmd: CDM command for OPE CDM + * @clk_info: Clock Info V1 + * @clk_info_v2: Clock Info V2 + * @hang_data: Debug data for HW error + * @submit_timestamp: Submit timestamp to hw + */ +struct cam_ope_request { + uint64_t request_id; + uint32_t req_idx; + uint32_t state; + uint32_t num_batch; + uint32_t num_cmd_bufs; + uint32_t num_frame_bufs; + uint32_t num_pass_bufs; + uint32_t num_stripes[OPE_MAX_BATCH_SIZE]; + uint32_t num_io_bufs[OPE_MAX_BATCH_SIZE]; + uint32_t in_resource; + uint8_t num_stripe_cmd_bufs[OPE_MAX_BATCH_SIZE][OPE_MAX_STRIPES]; + struct ope_kmd_buffer ope_kmd_buf; + struct ope_debug_buffer ope_debug_buf; + struct cam_kmd_buf_info genirq_buff_info; + struct ope_io_buf *io_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_IO_BUFS]; + struct cam_cdm_bl_request *cdm_cmd; + struct cam_ope_clk_bw_request clk_info; + struct cam_ope_clk_bw_req_internal_v2 clk_info_v2; + struct cam_hw_mgr_pf_request_info hang_data; + ktime_t submit_timestamp; +}; + +/** + * struct cam_ope_cdm + * + * @cdm_handle: OPE CDM Handle + * @cdm_ops: OPE CDM Operations + */ +struct cam_ope_cdm { + uint32_t cdm_handle; + struct cam_cdm_utils_ops *cdm_ops; +}; + +/** + * struct cam_ope_ctx + * + * @context_priv: Private data of context + * @bitmap: Context bit map + * @bitmap_size: Context bit map size + * @bits: Context bit map bits + * @ctx_id: Context ID + * @ctx_state: State of a context + * @req_cnt: Requests count + * @ctx_mutex: Mutex for context + * @acquire_dev_cmd: Cam acquire command + * @ope_acquire: OPE acquire command + * @ctxt_event_cb: Callback of a context + * @req_list: Request List + * @ope_cdm: OPE CDM info + * @last_req_time: Timestamp of last request + * @req_watch_dog: Watchdog for requests + * @req_watch_dog_reset_counter: Request reset counter + * @clk_info: OPE Ctx clock info + * @clk_watch_dog: Clock watchdog + * @clk_watch_dog_reset_counter: Reset counter + * @last_flush_req: last flush req for this ctx + * @req_timer_timeout: req timer timeout value + */ +struct cam_ope_ctx { + void *context_priv; + size_t bitmap_size; + void *bitmap; + size_t bits; + uint32_t ctx_id; + uint32_t ctx_state; + uint32_t req_cnt; + struct mutex ctx_mutex; + struct cam_acquire_dev_cmd acquire_dev_cmd; + struct ope_acquire_dev_info ope_acquire; + cam_hw_event_cb_func ctxt_event_cb; + struct cam_ope_request *req_list[CAM_CTX_REQ_MAX]; + struct cam_ope_cdm ope_cdm; + uint64_t last_req_time; + struct cam_req_mgr_timer *req_watch_dog; + uint32_t req_watch_dog_reset_counter; + struct cam_ctx_clk_info clk_info; + struct cam_req_mgr_timer *clk_watch_dog; + uint32_t clk_watch_dog_reset_counter; + uint64_t last_flush_req; + bool pf_mid_found; + uint64_t req_timer_timeout; +}; + +/** + * struct cam_ope_hw_intf_data - OPE hw intf data + * + * @Brief: ope hw intf pointer and pid list data + * + * @devices: ope hw intf pointer + * @num_devices: Number of OPE devices + * @num_hw_pid: Number of pids for this hw + * @hw_pid: ope hw pid values + * + */ +struct cam_ope_hw_intf_data { + struct cam_hw_intf *hw_intf; + uint32_t num_hw_pid; + uint32_t hw_pid[OPE_DEV_MAX]; +}; + +/** + * struct cam_ope_hw_mgr + * + * @open_cnt: OPE device open count + * @ope_ctx_cnt: Open context count + * @hw_mgr_mutex: Mutex for HW manager + * @hw_mgr_lock: Spinlock for HW manager + * @hfi_en: Flag for HFI + * @iommu_hdl: OPE Handle + * @iommu_sec_hdl: OPE Handle for secure + * @iommu_cdm_hdl: CDM Handle + * @iommu_sec_cdm_hdl: CDM Handle for secure + * @num_ope: Number of OPE + * @secure_mode: Mode of OPE operation + * @ctx_bitmap: Context bit map + * @ctx_bitmap_size: Context bit map size + * @ctx_bits: Context bit map bits + * @ctx: OPE context + * @devices: OPE devices + * @ope_dev_data: OPE device specific data + * @ope_caps: OPE capabilities + * @cmd_work: Command work + * @msg_work: Message work + * @timer_work: Timer work + * @cmd_work_data: Command work data + * @msg_work_data: Message work data + * @timer_work_data: Timer work data + * @ope_dev_intf: OPE device interface + * @cdm_reg_map: OPE CDM register map + * @clk_info: OPE clock Info for HW manager + * @dentry: Pointer to OPE debugfs directory + * @frame_dump_enable: OPE frame setting dump enablement + * @dump_req_data_enable: OPE hang dump enablement + */ +struct cam_ope_hw_mgr { + int32_t open_cnt; + uint32_t ope_ctx_cnt; + struct mutex hw_mgr_mutex; + spinlock_t hw_mgr_lock; + bool hfi_en; + int32_t iommu_hdl; + int32_t iommu_sec_hdl; + int32_t iommu_cdm_hdl; + int32_t iommu_sec_cdm_hdl; + uint32_t num_ope; + bool secure_mode; + void *ctx_bitmap; + size_t ctx_bitmap_size; + size_t ctx_bits; + struct cam_ope_ctx ctx[OPE_CTX_MAX]; + struct cam_hw_intf **devices[OPE_DEV_MAX]; + struct cam_ope_hw_intf_data *ope_dev_data[OPE_DEV_MAX]; + struct ope_query_cap_cmd ope_caps; + uint64_t last_callback_time; + + struct cam_req_mgr_core_workq *cmd_work; + struct cam_req_mgr_core_workq *msg_work; + struct cam_req_mgr_core_workq *timer_work; + struct ope_cmd_work_data *cmd_work_data; + struct ope_msg_work_data *msg_work_data; + struct ope_clk_work_data *timer_work_data; + struct cam_hw_intf *ope_dev_intf[OPE_DEV_MAX]; + struct cam_soc_reg_map *cdm_reg_map[OPE_DEV_MAX][OPE_BASE_MAX]; + struct cam_ope_clk_info clk_info; + struct dentry *dentry; + bool frame_dump_enable; + bool dump_req_data_enable; +}; + +/** + * struct cam_ope_buf_entry + * + * @fd: FD of cmd buffer + * @memhdl: Memhandle of cmd buffer + * @iova: IOVA address of cmd buffer + * @offset: Offset of cmd buffer + * @len: Length of cmd buffer + * @size: Size of cmd buffer + */ +struct cam_ope_buf_entry { + uint32_t fd; + uint64_t memhdl; + uint64_t iova; + uint64_t offset; + uint64_t len; + uint64_t size; +}; + +/** + * struct cam_ope_bl_entry + * + * @base: Base IOVA address of BL + * @len: Length of BL + * @arbitration: Arbitration bit + */ +struct cam_ope_bl_entry { + uint32_t base; + uint32_t len; + uint32_t arbitration; +}; + +/** + * struct cam_ope_output_info + * + * @iova: IOVA address of output buffer + * @offset: Offset of buffer + * @len: Length of buffer + */ +struct cam_ope_output_info { + uint64_t iova; + uint64_t offset; + uint64_t len; +}; + +/** + * struct cam_ope_hang_dump + * + * @num_bls: count of BLs for request + * @num_bufs: Count of buffer related to request + * @num_outputs: Count of output beffers + * @entries: Buffers info + * @bl_entries: BLs info + * @outputs: Output info + */ +struct cam_ope_hang_dump { + uint32_t num_bls; + uint32_t num_bufs; + uint64_t num_outputs; + struct cam_ope_buf_entry entries[OPE_MAX_BATCH_SIZE * OPE_MAX_CMD_BUFS]; + struct cam_ope_bl_entry bl_entries[OPE_MAX_CDM_BLS]; + struct cam_ope_output_info outputs + [OPE_MAX_BATCH_SIZE * OPE_OUT_RES_MAX]; +}; +#endif /* CAM_OPE_HW_MGR_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr_intf.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr_intf.h new file mode 100644 index 0000000000..b5abaf48e0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr_intf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_OPE_HW_MGR_INTF_H +#define CAM_OPE_HW_MGR_INTF_H + +#include +#include +#include +#include "cam_ope_hw_mgr.h" + +int cam_ope_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl); + +int cam_ope_hw_init(struct cam_ope_hw_intf_data **ope_hw_intf_data, + uint32_t hw_idx); + +#endif /* CAM_OPE_HW_MGR_INTF_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.c new file mode 100644 index 0000000000..955ab11b5e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.c @@ -0,0 +1,884 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ope_core.h" +#include "ope_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "cam_cdm_util.h" +#include "ope_hw.h" +#include "ope_dev_intf.h" +#include "ope_bus_rd.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +static struct ope_bus_rd *bus_rd; + +enum cam_ope_bus_unpacker_format { + UNPACKER_FMT_PLAIN_128 = 0x0, + UNPACKER_FMT_PLAIN_8 = 0x1, + UNPACKER_FMT_PLAIN_16_10BPP = 0x2, + UNPACKER_FMT_PLAIN_16_12BPP = 0x3, + UNPACKER_FMT_PLAIN_16_14BPP = 0x4, + UNPACKER_FMT_PLAIN_32_20BPP = 0x5, + UNPACKER_FMT_ARGB_16_10BPP = 0x6, + UNPACKER_FMT_ARGB_16_12BPP = 0x7, + UNPACKER_FMT_ARGB_16_14BPP = 0x8, + UNPACKER_FMT_PLAIN_32 = 0x9, + UNPACKER_FMT_PLAIN_64 = 0xA, + UNPACKER_FMT_TP_10 = 0xB, + UNPACKER_FMT_MIPI_8 = 0xC, + UNPACKER_FMT_MIPI_10 = 0xD, + UNPACKER_FMT_MIPI_12 = 0xE, + UNPACKER_FMT_MIPI_14 = 0xF, + UNPACKER_FMT_PLAIN_16_16BPP = 0x10, + UNPACKER_FMT_PLAIN_128_ODD_EVEN = 0x11, + UNPACKER_FMT_PLAIN_8_ODD_EVEN = 0x12, + UNPACKER_FMT_MAX = 0x13, +}; + +static int cam_ope_bus_rd_in_port_idx(uint32_t input_port_id) +{ + int i; + + for (i = 0; i < OPE_IN_RES_MAX; i++) + if (bus_rd->in_port_to_rm[i].input_port_id == + input_port_id) + return i; + + return -EINVAL; +} + +static int cam_ope_bus_rd_combo_idx(uint32_t format) +{ + int rc = -EINVAL; + + switch (format) { + case CAM_FORMAT_YUV422: + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + rc = BUS_RD_YUV; + break; + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + case CAM_FORMAT_MIPI_RAW_10: + case CAM_FORMAT_MIPI_RAW_12: + case CAM_FORMAT_MIPI_RAW_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_QTI_RAW_8: + case CAM_FORMAT_QTI_RAW_10: + case CAM_FORMAT_QTI_RAW_12: + case CAM_FORMAT_QTI_RAW_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN32_20: + case CAM_FORMAT_PLAIN64: + case CAM_FORMAT_PLAIN128: + rc = BUS_RD_BAYER; + break; + default: + break; + } + + CAM_DBG(CAM_OPE, "Input format = %u rc = %d", + format, rc); + return rc; +} + +static int cam_ope_bus_is_rm_enabled( + struct cam_ope_request *ope_request, + uint32_t batch_idx, + uint32_t rm_id) +{ + int i, k; + int32_t combo_idx; + struct ope_io_buf *io_buf; + struct ope_bus_in_port_to_rm *in_port_to_rm; + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return -EINVAL; + } + + for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) { + io_buf = ope_request->io_buf[batch_idx][i]; + if (io_buf->direction != CAM_BUF_INPUT) + continue; + in_port_to_rm = + &bus_rd->in_port_to_rm[io_buf->resource_type - 1]; + combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format); + if (combo_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid combo_idx"); + return -EINVAL; + } + for (k = 0; k < io_buf->num_planes; k++) { + if (rm_id == + in_port_to_rm->rm_port_id[combo_idx][k]) + return true; + } + } + + return false; +} + +static uint32_t *cam_ope_bus_rd_update(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t *kmd_buf, int batch_idx, + int io_idx, struct cam_ope_dev_prepare_req *prepare, + int32_t *num_stripes) +{ + int k, l, m; + uint32_t idx; + int32_t combo_idx; + uint32_t req_idx, count = 0, temp; + uint32_t temp_reg[128] = {0}; + uint32_t rm_id, header_size; + uint32_t rsc_type; + uint32_t *next_buff_addr = NULL; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_ope_ctx *ctx_data; + struct cam_ope_request *ope_request; + struct ope_io_buf *io_buf; + struct ope_stripe_io *stripe_io; + struct ope_bus_rd_ctx *bus_rd_ctx; + struct cam_ope_bus_rd_reg *rd_reg; + struct cam_ope_bus_rd_client_reg *rd_reg_client; + struct cam_ope_bus_rd_reg_val *rd_reg_val; + struct cam_ope_bus_rd_client_reg_val *rd_res_val_client; + struct ope_bus_in_port_to_rm *in_port_to_rm; + struct ope_bus_rd_io_port_cdm_info *io_port_cdm; + struct cam_cdm_utils_ops *cdm_ops; + struct ope_bus_rd_io_port_info *io_port_info; + + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare); + return NULL; + } + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return NULL; + } + + if (io_idx >= OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Invalid IO idx: %d", io_idx); + return NULL; + } + + prepare_args = prepare->prepare_args; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + ope_request = ctx_data->req_list[req_idx]; + CAM_DBG(CAM_OPE, "req_idx = %d req_id = %lld KMDbuf %x offset %d", + req_idx, ope_request->request_id, + kmd_buf, prepare->kmd_buf_offset); + bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id]; + io_port_info = &bus_rd_ctx->io_port_info; + rd_reg = ope_hw_info->bus_rd_reg; + rd_reg_val = ope_hw_info->bus_rd_reg_val; + io_buf = ope_request->io_buf[batch_idx][io_idx]; + + CAM_DBG(CAM_OPE, + "req_idx = %d req_id = %lld KMDbuf 0x%x offset %d rsc %d", + req_idx, ope_request->request_id, + kmd_buf, prepare->kmd_buf_offset, + io_buf->resource_type); + CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d", + batch_idx, io_idx, io_buf->direction); + io_port_cdm = + &bus_rd_ctx->io_port_cdm_batch.io_port_cdm[batch_idx]; + in_port_to_rm = + &bus_rd->in_port_to_rm[io_buf->resource_type - 1]; + combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format); + if (combo_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid combo_idx"); + return NULL; + } + + for (k = 0; k < io_buf->num_planes; k++) { + for (l = 0; l < io_buf->num_stripes[k]; l++) { + *num_stripes = io_buf->num_stripes[k]; + stripe_io = &io_buf->s_io[k][l]; + rsc_type = io_buf->resource_type - 1; + /* frame level info */ + /* stripe level info */ + rm_id = in_port_to_rm->rm_port_id[combo_idx][k]; + rd_reg_client = &rd_reg->rd_clients[rm_id]; + rd_res_val_client = &rd_reg_val->rd_clients[rm_id]; + + /* security cfg */ + temp_reg[count++] = rd_reg->offset + + rd_reg->security_cfg; + temp_reg[count++] = + ctx_data->ope_acquire.secure_mode; + + /* enable client */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->core_cfg; + temp_reg[count++] = 1; + + /* ccif meta data */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->ccif_meta_data; + temp = 0; + temp |= stripe_io->s_location & + rd_res_val_client->stripe_location_mask; + temp |= (io_buf->pix_pattern & + rd_res_val_client->pix_pattern_mask) << + rd_res_val_client->pix_pattern_shift; + temp_reg[count++] = temp; + + /* Address of the Image */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->img_addr; + temp_reg[count++] = stripe_io->iova_addr; + + /* Buffer size */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->img_cfg; + temp = 0; + temp = stripe_io->height; + temp |= + (stripe_io->width & + rd_res_val_client->img_width_mask) << + rd_res_val_client->img_width_shift; + temp_reg[count++] = temp; + + /* stride */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->stride; + temp_reg[count++] = stripe_io->stride; + + /* + * In case of NV12, change the unpacker format of + * chroma plane to odd even byte swapped format. + */ + if (k == 1 && stripe_io->format == CAM_FORMAT_NV12) + stripe_io->unpack_format = + UNPACKER_FMT_PLAIN_8_ODD_EVEN; + + /* Unpack cfg : Mode and alignment */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->unpack_cfg; + temp = 0; + temp |= (stripe_io->unpack_format & + rd_res_val_client->mode_mask) << + rd_res_val_client->mode_shift; + temp |= (stripe_io->alignment & + rd_res_val_client->alignment_mask) << + rd_res_val_client->alignment_shift; + temp_reg[count++] = temp; + + /* latency buffer allocation */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->latency_buf_allocation; + temp_reg[count++] = io_port_info->latency_buf_size; + + header_size = cdm_ops->cdm_get_cmd_header_size( + CAM_CDM_CMD_REG_RANDOM); + idx = io_port_cdm->num_s_cmd_bufs[l]; + io_port_cdm->s_cdm_info[l][idx].len = sizeof(temp) * + (count + header_size); + io_port_cdm->s_cdm_info[l][idx].offset = + prepare->kmd_buf_offset; + io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf; + io_port_cdm->num_s_cmd_bufs[l]++; + + next_buff_addr = cdm_ops->cdm_write_regrandom( + kmd_buf, count/2, temp_reg); + if (next_buff_addr > kmd_buf) + prepare->kmd_buf_offset += + ((count + header_size) * sizeof(temp)); + kmd_buf = next_buff_addr; + CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d", + batch_idx, io_idx, k, l); + for (m = 0; m < count; m += 2) + CAM_DBG(CAM_OPE, "%d: off: 0x%x val: 0x%x", + m, temp_reg[m], temp_reg[m+1]); + CAM_DBG(CAM_OPE, "kmd_buf:%x offset:%d", + kmd_buf, prepare->kmd_buf_offset); + CAM_DBG(CAM_OPE, "RD cmdbufs:%d off:%d len %d", + io_port_cdm->num_s_cmd_bufs[l], + io_port_cdm->s_cdm_info[l][idx].offset, + io_port_cdm->s_cdm_info[l][idx].len); + count = 0; + } + } + + return kmd_buf; +} + +static uint32_t *cam_ope_bus_rm_disable(struct ope_hw *ope_hw_info, + int32_t ctx_id, struct cam_ope_dev_prepare_req *prepare, + int batch_idx, int rm_idx, + uint32_t *kmd_buf, uint32_t num_stripes) +{ + int l; + uint32_t idx; + uint32_t req_idx; + uint32_t temp_reg[128]; + uint32_t count = 0; + uint32_t temp = 0; + uint32_t header_size; + uint32_t *next_buff_addr = NULL; + struct cam_ope_ctx *ctx_data; + struct ope_bus_rd_ctx *bus_rd_ctx; + struct cam_ope_bus_rd_reg *rd_reg; + struct cam_ope_bus_rd_client_reg *rd_reg_client; + struct ope_bus_rd_io_port_cdm_batch *io_port_cdm_batch; + struct ope_bus_rd_io_port_cdm_info *io_port_cdm; + struct cam_cdm_utils_ops *cdm_ops; + + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare); + return NULL; + } + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return NULL; + } + + if (rm_idx >= MAX_RD_CLIENTS) { + CAM_ERR(CAM_OPE, "Invalid read client: %d", rm_idx); + return NULL; + } + + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id]; + io_port_cdm_batch = &bus_rd_ctx->io_port_cdm_batch; + rd_reg = ope_hw_info->bus_rd_reg; + + CAM_DBG(CAM_OPE, + "kmd_buf = 0x%x req_idx = %d offset = %d rd_idx %d b %d", + kmd_buf, req_idx, prepare->kmd_buf_offset, rm_idx, batch_idx); + + io_port_cdm = + &bus_rd_ctx->io_port_cdm_batch.io_port_cdm[batch_idx]; + + for (l = 0; l < num_stripes; l++) { + /* stripe level info */ + rd_reg_client = &rd_reg->rd_clients[rm_idx]; + + /* Core cfg: enable, Mode */ + temp_reg[count++] = rd_reg->offset + + rd_reg_client->core_cfg; + temp_reg[count++] = 0; + + header_size = cdm_ops->cdm_get_cmd_header_size( + CAM_CDM_CMD_REG_RANDOM); + idx = io_port_cdm->num_s_cmd_bufs[l]; + io_port_cdm->s_cdm_info[l][idx].len = + sizeof(temp) * (count + header_size); + io_port_cdm->s_cdm_info[l][idx].offset = + prepare->kmd_buf_offset; + io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf; + io_port_cdm->num_s_cmd_bufs[l]++; + + next_buff_addr = cdm_ops->cdm_write_regrandom( + kmd_buf, count/2, temp_reg); + if (next_buff_addr > kmd_buf) + prepare->kmd_buf_offset += ((count + header_size) * + sizeof(temp)); + kmd_buf = next_buff_addr; + CAM_DBG(CAM_OPE, "RD cmd bufs = %d", + io_port_cdm->num_s_cmd_bufs[l]); + CAM_DBG(CAM_OPE, "stripe %d off:%d len:%d", + l, io_port_cdm->s_cdm_info[l][idx].offset, + io_port_cdm->s_cdm_info[l][idx].len); + count = 0; + } + + prepare->rd_cdm_batch = &bus_rd_ctx->io_port_cdm_batch; + + return kmd_buf; +} + +static int cam_ope_bus_rd_prepare(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + int i, j; + int32_t combo_idx; + uint32_t req_idx, count = 0, temp; + uint32_t temp_reg[32] = {0}; + uint32_t header_size; + uint32_t *kmd_buf; + uint32_t *next_buff_addr = NULL; + int is_rm_enabled; + struct cam_ope_dev_prepare_req *prepare; + struct cam_ope_ctx *ctx_data; + struct cam_ope_request *ope_request; + struct ope_io_buf *io_buf; + struct ope_bus_rd_ctx *bus_rd_ctx; + struct cam_ope_bus_rd_reg *rd_reg; + struct cam_ope_bus_rd_reg_val *rd_reg_val; + struct ope_bus_rd_io_port_cdm_batch *io_port_cdm_batch; + struct ope_bus_rd_io_port_cdm_info *io_port_cdm = NULL; + struct cam_cdm_utils_ops *cdm_ops; + int32_t num_stripes = 0; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + prepare = data; + + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + ope_request = ctx_data->req_list[req_idx]; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + prepare->kmd_buf_offset; + CAM_DBG(CAM_OPE, "req_idx = %d req_id = %lld", + req_idx, ope_request->request_id); + CAM_DBG(CAM_OPE, "KMD buf and offset = %x %d", + kmd_buf, prepare->kmd_buf_offset); + bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id]; + io_port_cdm_batch = + &bus_rd_ctx->io_port_cdm_batch; + memset(io_port_cdm_batch, 0, + sizeof(struct ope_bus_rd_io_port_cdm_batch)); + rd_reg = ope_hw_info->bus_rd_reg; + rd_reg_val = ope_hw_info->bus_rd_reg_val; + + for (i = 0; i < ope_request->num_batch; i++) { + for (j = 0; j < ope_request->num_io_bufs[i]; j++) { + io_buf = ope_request->io_buf[i][j]; + if (io_buf->direction != CAM_BUF_INPUT) + continue; + + CAM_DBG(CAM_OPE, "batch:%d iobuf:%d direction:%d", + i, j, io_buf->direction); + io_port_cdm = + &bus_rd_ctx->io_port_cdm_batch.io_port_cdm[i]; + + combo_idx = cam_ope_bus_rd_combo_idx(io_buf->format); + if (combo_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid combo_idx"); + return combo_idx; + } + + kmd_buf = cam_ope_bus_rd_update(ope_hw_info, + ctx_id, kmd_buf, i, j, prepare, + &num_stripes); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + } + } + + if (!io_port_cdm) { + rc = -EINVAL; + goto end; + } + + /* Disable RMs which are not enabled */ + for (i = 0; i < ope_request->num_batch; i++) { + for (j = 0; j < rd_reg_val->num_clients; j++) { + is_rm_enabled = cam_ope_bus_is_rm_enabled( + ope_request, i, j); + if (is_rm_enabled < 0) { + rc = -EINVAL; + goto end; + } + if (is_rm_enabled) + continue; + + kmd_buf = cam_ope_bus_rm_disable(ope_hw_info, + ctx_id, prepare, i, j, + kmd_buf, num_stripes); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + } + } + + /* Go command */ + count = 0; + temp_reg[count++] = rd_reg->offset + + rd_reg->input_if_cmd; + temp = 0; + temp |= rd_reg_val->go_cmd; + temp_reg[count++] = temp; + + header_size = + cdm_ops->cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM); + for (i = 0; i < ope_request->num_batch; i++) { + io_port_cdm = + &bus_rd_ctx->io_port_cdm_batch.io_port_cdm[i]; + io_port_cdm->go_cmd_addr = kmd_buf; + io_port_cdm->go_cmd_len = + sizeof(temp) * (count + header_size); + io_port_cdm->go_cmd_offset = + prepare->kmd_buf_offset; + } + next_buff_addr = cdm_ops->cdm_write_regrandom( + kmd_buf, count/2, temp_reg); + if (next_buff_addr > kmd_buf) + prepare->kmd_buf_offset += + ((count + header_size) * sizeof(temp)); + kmd_buf = next_buff_addr; + CAM_DBG(CAM_OPE, "kmd_buf:%x,offset:%d", + kmd_buf, prepare->kmd_buf_offset); + CAM_DBG(CAM_OPE, "t_reg:%xcount: %d size:%d", + temp_reg, count, header_size); + prepare->rd_cdm_batch = &bus_rd_ctx->io_port_cdm_batch; + +end: + return rc; +} + +static int cam_ope_bus_rd_release(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (ctx_id < 0 || ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + vfree(bus_rd->bus_rd_ctx[ctx_id]); + bus_rd->bus_rd_ctx[ctx_id] = NULL; + + return rc; +} + +static int cam_ope_bus_rd_acquire(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i; + struct ope_acquire_dev_info *in_acquire; + struct ope_bus_rd_ctx *bus_rd_ctx; + struct ope_bus_in_port_to_rm *in_port_to_rm; + struct cam_ope_bus_rd_reg_val *bus_rd_reg_val; + int combo_idx; + int in_port_idx; + + + if (ctx_id < 0 || !data || !ope_hw_info || ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x %x", + ctx_id, data, ope_hw_info); + return -EINVAL; + } + + bus_rd->bus_rd_ctx[ctx_id] = vzalloc(sizeof(struct ope_bus_rd_ctx)); + if (!bus_rd->bus_rd_ctx[ctx_id]) { + CAM_ERR(CAM_OPE, "Out of memory"); + return -ENOMEM; + } + + bus_rd->bus_rd_ctx[ctx_id]->ope_acquire = data; + in_acquire = data; + bus_rd_ctx = bus_rd->bus_rd_ctx[ctx_id]; + bus_rd_ctx->num_in_ports = in_acquire->num_in_res; + bus_rd_ctx->security_flag = in_acquire->secure_mode; + bus_rd_reg_val = ope_hw_info->bus_rd_reg_val; + + for (i = 0; i < in_acquire->num_in_res; i++) { + if (!in_acquire->in_res[i].width) + continue; + + CAM_DBG(CAM_OPE, "i = %d format = %u width = %x height = %x", + i, in_acquire->in_res[i].format, + in_acquire->in_res[i].width, + in_acquire->in_res[i].height); + CAM_DBG(CAM_OPE, "pix_pattern:%u alignment:%u unpack_format:%u", + in_acquire->in_res[i].pixel_pattern, + in_acquire->in_res[i].alignment, + in_acquire->in_res[i].unpacker_format); + CAM_DBG(CAM_OPE, "max_stripe = %u fps = %u", + in_acquire->in_res[i].max_stripe_size, + in_acquire->in_res[i].fps); + + in_port_idx = cam_ope_bus_rd_in_port_idx(i + 1); + if (in_port_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid in_port_idx: %d", i + 1); + rc = -EINVAL; + goto end; + } + + in_port_to_rm = &bus_rd->in_port_to_rm[in_port_idx]; + combo_idx = cam_ope_bus_rd_combo_idx( + in_acquire->in_res[i].format); + if (combo_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid format: %d", + in_acquire->in_res[i].format); + rc = -EINVAL; + goto end; + } + + if (!in_port_to_rm->num_rm[combo_idx]) { + CAM_ERR(CAM_OPE, "Invalid format for Input port"); + rc = -EINVAL; + goto end; + } + + bus_rd_ctx->io_port_info.input_port_id[i] = + in_acquire->in_res[i].res_id; + bus_rd_ctx->io_port_info.input_format_type[i] = + in_acquire->in_res[i].format; + if (in_acquire->in_res[i].pixel_pattern > + PIXEL_PATTERN_CRYCBY) { + CAM_ERR(CAM_OPE, "Invalid pix pattern = %u", + in_acquire->in_res[i].pixel_pattern); + rc = -EINVAL; + goto end; + } + + bus_rd_ctx->io_port_info.pixel_pattern[i] = + in_acquire->in_res[i].pixel_pattern; + bus_rd_ctx->io_port_info.latency_buf_size = + bus_rd_reg_val->latency_buf_size; + + CAM_DBG(CAM_OPE, "i:%d port_id = %u format %u pix_pattern = %u", + i, bus_rd_ctx->io_port_info.input_port_id[i], + bus_rd_ctx->io_port_info.input_format_type[i], + bus_rd_ctx->io_port_info.pixel_pattern[i]); + CAM_DBG(CAM_OPE, "latency_buf_size = %u", + bus_rd_ctx->io_port_info.latency_buf_size); + } + +end: + return rc; +} + +static int cam_ope_bus_rd_init(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_ope_bus_rd_reg_val *bus_rd_reg_val; + struct cam_ope_bus_rd_reg *bus_rd_reg; + struct cam_ope_dev_init *dev_init = data; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + bus_rd_reg_val = ope_hw_info->bus_rd_reg_val; + bus_rd_reg = ope_hw_info->bus_rd_reg; + bus_rd_reg->base = dev_init->core_info->ope_hw_info->ope_bus_rd_base; + + /* OPE SW RESET */ + init_completion(&bus_rd->reset_complete); + + /* enable interrupt mask */ + cam_io_w_mb(bus_rd_reg_val->irq_mask, + ope_hw_info->bus_rd_reg->base + bus_rd_reg->irq_mask); + + cam_io_w_mb(bus_rd_reg_val->sw_reset, + ope_hw_info->bus_rd_reg->base + bus_rd_reg->sw_reset); + + rc = cam_common_wait_for_completion_timeout( + &bus_rd->reset_complete, msecs_to_jiffies(30000)); + + if (!rc || rc < 0) { + CAM_ERR(CAM_OPE, "reset error result = %d", rc); + if (!rc) + rc = -ETIMEDOUT; + } else { + rc = 0; + } + + cam_io_w_mb(bus_rd_reg_val->irq_mask, + ope_hw_info->bus_rd_reg->base + bus_rd_reg->irq_mask); + + return rc; +} + +static int cam_ope_bus_rd_probe(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i, j, combo_idx, k; + struct cam_ope_bus_rd_reg_val *bus_rd_reg_val; + struct cam_ope_bus_rd_reg *bus_rd_reg; + struct ope_bus_in_port_to_rm *in_port_to_rm; + uint32_t input_port_idx; + uint32_t rm_idx; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + bus_rd = CAM_MEM_ZALLOC(sizeof(struct ope_bus_rd), GFP_KERNEL); + if (!bus_rd) { + CAM_ERR(CAM_OPE, "Out of memory"); + return -ENOMEM; + } + bus_rd->ope_hw_info = ope_hw_info; + bus_rd_reg_val = ope_hw_info->bus_rd_reg_val; + bus_rd_reg = ope_hw_info->bus_rd_reg; + + for (i = 0; i < bus_rd_reg_val->num_clients; i++) { + input_port_idx = + bus_rd_reg_val->rd_clients[i].input_port_id - 1; + in_port_to_rm = &bus_rd->in_port_to_rm[input_port_idx]; + if (bus_rd_reg_val->rd_clients[i].format_type & + BUS_RD_COMBO_BAYER_MASK) { + combo_idx = BUS_RD_BAYER; + rm_idx = in_port_to_rm->num_rm[combo_idx]; + in_port_to_rm->input_port_id = + bus_rd_reg_val->rd_clients[i].input_port_id; + in_port_to_rm->rm_port_id[combo_idx][rm_idx] = + bus_rd_reg_val->rd_clients[i].rm_port_id; + if (!in_port_to_rm->num_rm[combo_idx]) + in_port_to_rm->num_combos++; + in_port_to_rm->num_rm[combo_idx]++; + } + if (bus_rd_reg_val->rd_clients[i].format_type & + BUS_RD_COMBO_YUV_MASK) { + combo_idx = BUS_RD_YUV; + rm_idx = in_port_to_rm->num_rm[combo_idx]; + in_port_to_rm->input_port_id = + bus_rd_reg_val->rd_clients[i].input_port_id; + in_port_to_rm->rm_port_id[combo_idx][rm_idx] = + bus_rd_reg_val->rd_clients[i].rm_port_id; + if (!in_port_to_rm->num_rm[combo_idx]) + in_port_to_rm->num_combos++; + in_port_to_rm->num_rm[combo_idx]++; + } + } + + for (i = 0; i < OPE_IN_RES_MAX; i++) { + in_port_to_rm = &bus_rd->in_port_to_rm[i]; + CAM_DBG(CAM_OPE, "input port id = %d num_combos = %d", + in_port_to_rm->input_port_id, + in_port_to_rm->num_combos); + for (j = 0; j < in_port_to_rm->num_combos; j++) { + CAM_DBG(CAM_OPE, "combo idx = %d num_rms = %d", + j, in_port_to_rm->num_rm[j]); + for (k = 0; k < in_port_to_rm->num_rm[j]; k++) { + CAM_DBG(CAM_OPE, "rm port id = %d", + in_port_to_rm->rm_port_id[j][k]); + } + } + } + + return rc; +} + +static int cam_ope_bus_rd_isr(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + uint32_t irq_status; + struct cam_ope_bus_rd_reg *bus_rd_reg; + struct cam_ope_bus_rd_reg_val *bus_rd_reg_val; + struct cam_ope_irq_data *irq_data = data; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + bus_rd_reg = ope_hw_info->bus_rd_reg; + bus_rd_reg_val = ope_hw_info->bus_rd_reg_val; + + /* Read and Clear Top Interrupt status */ + irq_status = cam_io_r_mb(bus_rd_reg->base + bus_rd_reg->irq_status); + cam_io_w_mb(irq_status, + bus_rd_reg->base + bus_rd_reg->irq_clear); + + cam_io_w_mb(bus_rd_reg_val->irq_set_clear, + bus_rd_reg->base + bus_rd_reg->irq_cmd); + + if (irq_status & bus_rd_reg_val->rst_done) { + complete(&bus_rd->reset_complete); + CAM_DBG(CAM_OPE, "ope bus rd reset done"); + } + + if ((irq_status & bus_rd_reg_val->violation) == + bus_rd_reg_val->violation) { + irq_data->error = 1; + CAM_ERR(CAM_OPE, "ope bus rd CCIF vioalation"); + } + + return rc; +} + +int cam_ope_bus_rd_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = -EINVAL; + + switch (cmd_id) { + case OPE_HW_PROBE: + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E"); + rc = cam_ope_bus_rd_probe(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X"); + break; + case OPE_HW_INIT: + CAM_DBG(CAM_OPE, "OPE_HW_INIT: E"); + rc = cam_ope_bus_rd_init(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_INIT: X"); + break; + case OPE_HW_ACQUIRE: + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E"); + rc = cam_ope_bus_rd_acquire(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X"); + break; + case OPE_HW_RELEASE: + CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: E"); + rc = cam_ope_bus_rd_release(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: X"); + break; + case OPE_HW_PREPARE: + CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: E"); + rc = cam_ope_bus_rd_prepare(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: X"); + break; + case OPE_HW_ISR: + rc = cam_ope_bus_rd_isr(ope_hw_info, 0, data); + break; + case OPE_HW_DEINIT: + case OPE_HW_START: + case OPE_HW_STOP: + case OPE_HW_FLUSH: + case OPE_HW_CLK_UPDATE: + case OPE_HW_BW_UPDATE: + case OPE_HW_RESET: + case OPE_HW_SET_IRQ_CB: + rc = 0; + CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id); + break; + default: + CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id); + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.h new file mode 100644 index 0000000000..d6ebb3313c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_rd/ope_bus_rd.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef OPE_BUS_RD_H +#define OPE_BUS_RD_H + +#include +#include +#include +#include "ope_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_context.h" +#include "cam_ope_context.h" +#include "cam_ope_hw_mgr.h" + +/** + * struct ope_bus_rd_cdm_info + * + * @offset: Offset + * @addr: Address + * @len: Length + */ +struct ope_bus_rd_cdm_info { + uint32_t offset; + uint32_t *addr; + uint32_t len; +}; + +/** + * struct ope_bus_rd_io_port_cdm_info + * + * @num_frames_cmds: Number of frame commands + * @f_cdm_info: Frame cdm info + * @num_stripes: Number of stripes + * @num_s_cmd_bufs: Number of stripe commands + * @s_cdm_info: Stripe cdm info + * @go_cmd_addr: GO command address + * @go_cmd_len: GO command length + */ +struct ope_bus_rd_io_port_cdm_info { + uint32_t num_frames_cmds; + struct ope_bus_rd_cdm_info f_cdm_info[MAX_RD_CLIENTS]; + uint32_t num_stripes; + uint32_t num_s_cmd_bufs[OPE_MAX_STRIPES]; + struct ope_bus_rd_cdm_info s_cdm_info[OPE_MAX_STRIPES][MAX_RD_CLIENTS]; + uint32_t go_cmd_offset; + uint32_t *go_cmd_addr; + uint32_t go_cmd_len; +}; + +/** + * struct ope_bus_rd_io_port_cdm_batch + * + * num_batch: Number of batches + * io_port_cdm: CDM IO Port Info + */ +struct ope_bus_rd_io_port_cdm_batch { + uint32_t num_batch; + struct ope_bus_rd_io_port_cdm_info io_port_cdm[OPE_MAX_BATCH_SIZE]; +}; + +/** + * struct ope_bus_rd_rm + * + * @rm_port_id: RM port ID + * @format_type: Format type + */ +struct ope_bus_rd_rm { + uint32_t rm_port_id; + uint32_t format_type; +}; + +/** + * struct ope_bus_in_port_to_rm + * + * @input_port_id: Intput port ID + * @num_combos: Number of combos + * @num_rm: Number of RMs + * @rm_port_id: RM port Id + */ +struct ope_bus_in_port_to_rm { + uint32_t input_port_id; + uint32_t num_combos; + uint32_t num_rm[BUS_RD_COMBO_MAX]; + uint32_t rm_port_id[BUS_RD_COMBO_MAX][MAX_RD_CLIENTS]; +}; + +/** + * struct ope_bus_rd_io_port_info + * + * @pixel_pattern: Pixel pattern + * @input_port_id: Port Id + * @input_format_type: Format type + * @latency_buf_size: Latency buffer size + */ +struct ope_bus_rd_io_port_info { + uint32_t pixel_pattern[OPE_IN_RES_MAX]; + uint32_t input_port_id[OPE_IN_RES_MAX]; + uint32_t input_format_type[OPE_IN_RES_MAX]; + uint32_t latency_buf_size; +}; + +/** + * struct ope_bus_rd_ctx + * + * @ope_acquire: OPE acquire structure + * @security_flag: security flag + * @num_in_ports: Number of in ports + * @io_port_info: IO port info + * @io_port_cdm_batch: IO port cdm info + */ +struct ope_bus_rd_ctx { + struct ope_acquire_dev_info *ope_acquire; + bool security_flag; + uint32_t num_in_ports; + struct ope_bus_rd_io_port_info io_port_info; + struct ope_bus_rd_io_port_cdm_batch io_port_cdm_batch; +}; + +/** + * struct ope_bus_rd + * + * @ope_hw_info: OPE hardware info + * @in_port_to_rm: IO port to RM mapping + * @bus_rd_ctx: RM context + */ +struct ope_bus_rd { + struct ope_hw *ope_hw_info; + struct ope_bus_in_port_to_rm in_port_to_rm[OPE_IN_RES_MAX]; + struct ope_bus_rd_ctx *bus_rd_ctx[OPE_CTX_MAX]; + struct completion reset_complete; +}; +#endif /* OPE_BUS_RD_H */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.c new file mode 100644 index 0000000000..9736e6375b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ope_core.h" +#include "ope_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "ope_hw.h" +#include "ope_dev_intf.h" +#include "ope_bus_wr.h" +#include "cam_cdm_util.h" +#include "cam_mem_mgr_api.h" + +static struct ope_bus_wr *wr_info; + +enum cam_ope_bus_packer_format { + PACKER_FMT_PLAIN_128 = 0x0, + PACKER_FMT_PLAIN_8 = 0x1, + PACKER_FMT_PLAIN_8_ODD_EVEN = 0x2, + PACKER_FMT_PLAIN_8_LSB_MSB_10 = 0x3, + PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0x4, + PACKER_FMT_PLAIN_16_10BPP = 0x5, + PACKER_FMT_PLAIN_16_12BPP = 0x6, + PACKER_FMT_PLAIN_16_14BPP = 0x7, + PACKER_FMT_PLAIN_16_16BPP = 0x8, + PACKER_FMT_PLAIN_32 = 0x9, + PACKER_FMT_PLAIN_64 = 0xA, + PACKER_FMT_TP_10 = 0xB, + PACKER_FMT_MIPI_10 = 0xC, + PACKER_FMT_MIPI_12 = 0xD, + PACKER_FMT_MAX = 0xE, +}; + +static int cam_ope_bus_en_port_idx( + struct cam_ope_request *ope_request, + uint32_t batch_idx, + uint32_t output_port_id) +{ + int i; + struct ope_io_buf *io_buf; + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return -EINVAL; + } + + for (i = 0; i < ope_request->num_io_bufs[batch_idx]; i++) { + io_buf = ope_request->io_buf[batch_idx][i]; + if (io_buf->direction != CAM_BUF_OUTPUT) + continue; + if (io_buf->resource_type == output_port_id) + return i; + } + + return -EINVAL; +} +static int cam_ope_bus_wr_out_port_idx(uint32_t output_port_id) +{ + int i; + + for (i = 0; i < OPE_OUT_RES_MAX; i++) + if (wr_info->out_port_to_wm[i].output_port_id == output_port_id) + return i; + + return -EINVAL; +} + + +static int cam_ope_bus_wr_subsample( + struct cam_ope_ctx *ctx_data, + struct ope_hw *ope_hw_info, + struct cam_ope_bus_wr_client_reg *wr_reg_client, + struct ope_io_buf *io_buf, + uint32_t *temp_reg, uint32_t count, + int plane_idx, int stripe_idx) +{ + int k, l; + struct cam_ope_bus_wr_reg *wr_reg; + struct cam_ope_bus_wr_reg_val *wr_reg_val; + + wr_reg = ope_hw_info->bus_wr_reg; + wr_reg_val = ope_hw_info->bus_wr_reg_val; + + if (plane_idx >= OPE_MAX_PLANES) { + CAM_ERR(CAM_OPE, "Invalid plane idx: %d", plane_idx); + return count; + } + k = plane_idx; + l = stripe_idx; + + /* subsample period and pattern */ + if ((ctx_data->ope_acquire.dev_type == + OPE_DEV_TYPE_OPE_RT) && l == 0) { + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_period; + temp_reg[count++] = io_buf->num_stripes[k]; + + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_pattern; + temp_reg[count++] = 1 << + (io_buf->num_stripes[k] - 1); + } else if ((ctx_data->ope_acquire.dev_type == + OPE_DEV_TYPE_OPE_NRT) && + ((l % + ctx_data->ope_acquire.nrt_stripes_for_arb) == + 0)) { + if (io_buf->num_stripes[k] >= + (l + + ctx_data->ope_acquire.nrt_stripes_for_arb)){ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_period; + temp_reg[count++] = + ctx_data->ope_acquire.nrt_stripes_for_arb; + + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_pattern; + temp_reg[count++] = 1 << + (ctx_data->ope_acquire.nrt_stripes_for_arb - + 1); + } else { + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_period; + temp_reg[count++] = io_buf->num_stripes[k] - l; + + /* subsample pattern */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->subsample_pattern; + temp_reg[count++] = 1 << (io_buf->num_stripes[k] - + l - 1); + } + } + return count; +} + +static int cam_ope_bus_wr_release(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (ctx_id < 0 || ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + vfree(wr_info->bus_wr_ctx[ctx_id]); + wr_info->bus_wr_ctx[ctx_id] = NULL; + + return rc; +} + +static uint32_t *cam_ope_bus_wr_update(struct ope_hw *ope_hw_info, + int32_t ctx_id, struct cam_ope_dev_prepare_req *prepare, + int batch_idx, int io_idx, + uint32_t *kmd_buf, uint32_t *num_stripes) +{ + int k, l, m, out_port_idx; + uint32_t idx; + uint32_t num_wm_ports; + uint32_t comb_idx; + uint32_t req_idx; + uint32_t temp_reg[128]; + uint32_t count = 0; + uint32_t temp = 0; + uint32_t wm_port_id; + uint32_t header_size; + uint32_t *next_buff_addr = NULL; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_ope_ctx *ctx_data; + struct cam_ope_request *ope_request; + struct ope_io_buf *io_buf; + struct ope_stripe_io *stripe_io; + struct ope_bus_wr_ctx *bus_wr_ctx; + struct cam_ope_bus_wr_reg *wr_reg; + struct cam_ope_bus_wr_client_reg *wr_reg_client; + struct cam_ope_bus_wr_reg_val *wr_reg_val; + struct cam_ope_bus_wr_client_reg_val *wr_res_val_client; + struct ope_bus_out_port_to_wm *out_port_to_wm; + struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch; + struct ope_bus_wr_io_port_cdm_info *io_port_cdm; + struct cam_cdm_utils_ops *cdm_ops; + + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare); + return NULL; + } + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return NULL; + } + + if (io_idx >= OPE_MAX_IO_BUFS) { + CAM_ERR(CAM_OPE, "Invalid IO idx: %d", io_idx); + return NULL; + } + + prepare_args = prepare->prepare_args; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + ope_request = ctx_data->req_list[req_idx]; + bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id]; + io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; + wr_reg = ope_hw_info->bus_wr_reg; + wr_reg_val = ope_hw_info->bus_wr_reg_val; + + CAM_DBG(CAM_OPE, "kmd_buf = %x req_idx = %d req_id = %lld offset = %d", + kmd_buf, req_idx, ope_request->request_id, + prepare->kmd_buf_offset); + + io_buf = ope_request->io_buf[batch_idx][io_idx]; + CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d rsc %d", + batch_idx, io_idx, io_buf->direction, io_buf->resource_type); + + io_port_cdm = + &bus_wr_ctx->io_port_cdm_batch.io_port_cdm[batch_idx]; + out_port_idx = + cam_ope_bus_wr_out_port_idx(io_buf->resource_type); + if (out_port_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid idx for rsc type: %d", + io_buf->resource_type); + return NULL; + } + out_port_to_wm = &wr_info->out_port_to_wm[out_port_idx]; + comb_idx = BUS_WR_YUV; + num_wm_ports = out_port_to_wm->num_wm[comb_idx]; + + for (k = 0; k < io_buf->num_planes; k++) { + *num_stripes = io_buf->num_stripes[k]; + for (l = 0; l < io_buf->num_stripes[k]; l++) { + stripe_io = &io_buf->s_io[k][l]; + CAM_DBG(CAM_OPE, "comb_idx = %d p_idx = %d s_idx = %d", + comb_idx, k, l); + /* frame level info */ + /* stripe level info */ + wm_port_id = out_port_to_wm->wm_port_id[comb_idx][k]; + wr_reg_client = &wr_reg->wr_clients[wm_port_id]; + wr_res_val_client = &wr_reg_val->wr_clients[wm_port_id]; + + /* Core cfg: enable, Mode */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->core_cfg; + temp = 0; + if (!stripe_io->disable_bus) + temp = wr_res_val_client->core_cfg_en; + temp |= ((wr_res_val_client->mode & + wr_res_val_client->mode_mask) << + wr_res_val_client->mode_shift); + temp_reg[count++] = temp; + + /* Address of the Image */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->img_addr; + temp_reg[count++] = stripe_io->iova_addr; + + /* Buffer size */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->img_cfg; + temp = 0; + temp = stripe_io->width; + temp |= (stripe_io->height & + wr_res_val_client->height_mask) << + wr_res_val_client->height_shift; + temp_reg[count++] = temp; + + /* x_init */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->x_init; + temp_reg[count++] = stripe_io->x_init; + + /* stride */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->stride; + temp_reg[count++] = stripe_io->stride; + + /* pack cfg : Format and alignment */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->pack_cfg; + temp = 0; + + /* + * In case of NV12, change the packer format of chroma + * plane to odd even byte swapped format + */ + + if (k == 1 && stripe_io->format == CAM_FORMAT_NV12) + stripe_io->pack_format = + PACKER_FMT_PLAIN_8_ODD_EVEN; + + temp |= ((stripe_io->pack_format & + wr_res_val_client->format_mask) << + wr_res_val_client->format_shift); + temp |= ((stripe_io->alignment & + wr_res_val_client->alignment_mask) << + wr_res_val_client->alignment_shift); + temp_reg[count++] = temp; + + /* subsample period and pattern */ + count = cam_ope_bus_wr_subsample( + ctx_data, ope_hw_info, + wr_reg_client, io_buf, + temp_reg, count, k, l); + + header_size = cdm_ops->cdm_get_cmd_header_size( + CAM_CDM_CMD_REG_RANDOM); + idx = io_port_cdm->num_s_cmd_bufs[l]; + io_port_cdm->s_cdm_info[l][idx].len = + sizeof(temp) * (count + header_size); + io_port_cdm->s_cdm_info[l][idx].offset = + prepare->kmd_buf_offset; + io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf; + io_port_cdm->num_s_cmd_bufs[l]++; + + next_buff_addr = cdm_ops->cdm_write_regrandom( + kmd_buf, count/2, temp_reg); + if (next_buff_addr > kmd_buf) + prepare->kmd_buf_offset += + ((count + header_size) * sizeof(temp)); + kmd_buf = next_buff_addr; + + CAM_DBG(CAM_OPE, "b:%d io:%d p:%d s:%d", + batch_idx, io_idx, k, l); + for (m = 0; m < count; m += 2) + CAM_DBG(CAM_OPE, "%d: off: 0x%x val: 0x%x", + m, temp_reg[m], temp_reg[m+1]); + CAM_DBG(CAM_OPE, "kmdbuf:%x, offset:%d", + kmd_buf, prepare->kmd_buf_offset); + CAM_DBG(CAM_OPE, "WR cmd bufs = %d off:%d len:%d", + io_port_cdm->num_s_cmd_bufs[l], + io_port_cdm->s_cdm_info[l][idx].offset, + io_port_cdm->s_cdm_info[l][idx].len); + count = 0; + } + } + + return kmd_buf; +} + +static uint32_t *cam_ope_bus_wm_disable(struct ope_hw *ope_hw_info, + int32_t ctx_id, struct cam_ope_dev_prepare_req *prepare, + int batch_idx, int io_idx, + uint32_t *kmd_buf, uint32_t num_stripes) +{ + int k, l; + uint32_t idx; + uint32_t num_wm_ports; + uint32_t comb_idx; + uint32_t req_idx; + uint32_t temp_reg[128]; + uint32_t count = 0; + uint32_t temp = 0; + uint32_t wm_port_id; + uint32_t header_size; + uint32_t *next_buff_addr = NULL; + struct cam_ope_ctx *ctx_data; + struct ope_bus_wr_ctx *bus_wr_ctx; + struct cam_ope_bus_wr_reg *wr_reg; + struct cam_ope_bus_wr_client_reg *wr_reg_client; + struct ope_bus_out_port_to_wm *out_port_to_wm; + struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch; + struct ope_bus_wr_io_port_cdm_info *io_port_cdm; + struct cam_cdm_utils_ops *cdm_ops; + + + if (ctx_id < 0 || !prepare) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, prepare); + return NULL; + } + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid batch idx: %d", batch_idx); + return NULL; + } + + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id]; + io_port_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; + wr_reg = ope_hw_info->bus_wr_reg; + + CAM_DBG(CAM_OPE, + "kmd_buf = %x req_idx = %d offset = %d out_idx %d b %d", + kmd_buf, req_idx, prepare->kmd_buf_offset, io_idx, batch_idx); + + io_port_cdm = + &bus_wr_ctx->io_port_cdm_batch.io_port_cdm[batch_idx]; + out_port_to_wm = &wr_info->out_port_to_wm[io_idx]; + comb_idx = BUS_WR_YUV; + num_wm_ports = out_port_to_wm->num_wm[comb_idx]; + + for (k = 0; k < num_wm_ports; k++) { + for (l = 0; l < num_stripes; l++) { + /* frame level info */ + /* stripe level info */ + wm_port_id = out_port_to_wm->wm_port_id[comb_idx][k]; + wr_reg_client = &wr_reg->wr_clients[wm_port_id]; + + /* Core cfg: enable, Mode */ + temp_reg[count++] = wr_reg->offset + + wr_reg_client->core_cfg; + temp_reg[count++] = 0; + + header_size = cdm_ops->cdm_get_cmd_header_size( + CAM_CDM_CMD_REG_RANDOM); + idx = io_port_cdm->num_s_cmd_bufs[l]; + io_port_cdm->s_cdm_info[l][idx].len = + sizeof(temp) * (count + header_size); + io_port_cdm->s_cdm_info[l][idx].offset = + prepare->kmd_buf_offset; + io_port_cdm->s_cdm_info[l][idx].addr = kmd_buf; + io_port_cdm->num_s_cmd_bufs[l]++; + + next_buff_addr = cdm_ops->cdm_write_regrandom( + kmd_buf, count/2, temp_reg); + + if (next_buff_addr > kmd_buf) + prepare->kmd_buf_offset += + ((count + header_size) * sizeof(temp)); + kmd_buf = next_buff_addr; + + CAM_DBG(CAM_OPE, "WR cmd bufs = %d", + io_port_cdm->num_s_cmd_bufs[l]); + CAM_DBG(CAM_OPE, "s:%d off:%d len:%d", + l, io_port_cdm->s_cdm_info[l][idx].offset, + io_port_cdm->s_cdm_info[l][idx].len); + count = 0; + } + } + + prepare->wr_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; + + return kmd_buf; +} + +static int cam_ope_bus_wr_prepare(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + int i, j = 0; + uint32_t req_idx; + uint32_t *kmd_buf; + struct cam_ope_dev_prepare_req *prepare; + struct cam_ope_ctx *ctx_data; + struct cam_ope_request *ope_request; + struct ope_io_buf *io_buf; + uint32_t temp; + int io_buf_idx; + uint32_t num_stripes = 0; + struct ope_bus_wr_io_port_cdm_batch *io_port_cdm_batch; + struct ope_bus_wr_ctx *bus_wr_ctx; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + prepare = data; + ctx_data = prepare->ctx_data; + req_idx = prepare->req_idx; + bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id]; + + ope_request = ctx_data->req_list[req_idx]; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + (prepare->kmd_buf_offset / sizeof(temp)); + + + CAM_DBG(CAM_OPE, "kmd_buf = %x req_idx = %d req_id = %lld offset = %d", + kmd_buf, req_idx, ope_request->request_id, + prepare->kmd_buf_offset); + + io_port_cdm_batch = &wr_info->bus_wr_ctx[ctx_id]->io_port_cdm_batch; + memset(io_port_cdm_batch, 0, + sizeof(struct ope_bus_wr_io_port_cdm_batch)); + + for (i = 0; i < ope_request->num_batch; i++) { + for (j = 0; j < ope_request->num_io_bufs[i]; j++) { + io_buf = ope_request->io_buf[i][j]; + CAM_DBG(CAM_OPE, "batch = %d io buf num = %d dir = %d", + i, j, io_buf->direction); + if (io_buf->direction != CAM_BUF_OUTPUT) + continue; + + kmd_buf = cam_ope_bus_wr_update(ope_hw_info, + ctx_id, prepare, i, j, + kmd_buf, &num_stripes); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + } + } + + /* Disable WMs which are not enabled */ + for (i = 0; i < ope_request->num_batch; i++) { + for (j = OPE_OUT_RES_VIDEO; j <= OPE_OUT_RES_MAX; j++) { + io_buf_idx = cam_ope_bus_en_port_idx(ope_request, i, j); + if (io_buf_idx >= 0) + continue; + + io_buf_idx = cam_ope_bus_wr_out_port_idx(j); + if (io_buf_idx < 0) { + CAM_ERR(CAM_OPE, "Invalid idx for rsc type:%d", + j); + return io_buf_idx; + } + kmd_buf = cam_ope_bus_wm_disable(ope_hw_info, + ctx_id, prepare, i, io_buf_idx, + kmd_buf, num_stripes); + } + } + prepare->wr_cdm_batch = &bus_wr_ctx->io_port_cdm_batch; + +end: + return rc; +} + +static int cam_ope_bus_wr_acquire(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i; + struct ope_acquire_dev_info *in_acquire; + struct ope_bus_wr_ctx *bus_wr_ctx; + struct ope_bus_out_port_to_wm *out_port_to_wr; + int combo_idx; + int out_port_idx; + + if (ctx_id < 0 || !data || ctx_id >= OPE_CTX_MAX) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + + wr_info->bus_wr_ctx[ctx_id] = vzalloc(sizeof(struct ope_bus_wr_ctx)); + if (!wr_info->bus_wr_ctx[ctx_id]) { + CAM_ERR(CAM_OPE, "Out of memory"); + return -ENOMEM; + } + + wr_info->bus_wr_ctx[ctx_id]->ope_acquire = data; + in_acquire = data; + bus_wr_ctx = wr_info->bus_wr_ctx[ctx_id]; + bus_wr_ctx->num_out_ports = in_acquire->num_out_res; + bus_wr_ctx->security_flag = in_acquire->secure_mode; + + for (i = 0; i < in_acquire->num_out_res; i++) { + if (!in_acquire->out_res[i].width) + continue; + + CAM_DBG(CAM_OPE, "i = %d format = %u width = %x height = %x", + i, in_acquire->out_res[i].format, + in_acquire->out_res[i].width, + in_acquire->out_res[i].height); + CAM_DBG(CAM_OPE, "pix_pattern:%u alignment:%u packer_format:%u", + in_acquire->out_res[i].pixel_pattern, + in_acquire->out_res[i].alignment, + in_acquire->out_res[i].packer_format); + CAM_DBG(CAM_OPE, "subsample_period = %u subsample_pattern = %u", + in_acquire->out_res[i].subsample_period, + in_acquire->out_res[i].subsample_pattern); + + out_port_idx = + cam_ope_bus_wr_out_port_idx(in_acquire->out_res[i].res_id); + if (out_port_idx < 0) { + CAM_DBG(CAM_OPE, "Invalid in_port_idx: %d", + in_acquire->out_res[i].res_id); + rc = -EINVAL; + goto end; + } + out_port_to_wr = &wr_info->out_port_to_wm[out_port_idx]; + combo_idx = BUS_WR_YUV; + if (!out_port_to_wr->num_wm[combo_idx]) { + CAM_DBG(CAM_OPE, "Invalid format for Input port"); + rc = -EINVAL; + goto end; + } + + bus_wr_ctx->io_port_info.output_port_id[i] = + in_acquire->out_res[i].res_id; + bus_wr_ctx->io_port_info.output_format_type[i] = + in_acquire->out_res[i].format; + if (in_acquire->out_res[i].pixel_pattern > + PIXEL_PATTERN_CRYCBY) { + CAM_DBG(CAM_OPE, "Invalid pix pattern = %u", + in_acquire->out_res[i].pixel_pattern); + rc = -EINVAL; + goto end; + } + + bus_wr_ctx->io_port_info.pixel_pattern[i] = + in_acquire->out_res[i].pixel_pattern; + bus_wr_ctx->io_port_info.latency_buf_size = 4096; + CAM_DBG(CAM_OPE, "i:%d port_id = %u format %u pix_pattern = %u", + i, bus_wr_ctx->io_port_info.output_port_id[i], + bus_wr_ctx->io_port_info.output_format_type[i], + bus_wr_ctx->io_port_info.pixel_pattern[i]); + CAM_DBG(CAM_OPE, "latency_buf_size = %u", + bus_wr_ctx->io_port_info.latency_buf_size); + } + +end: + return rc; +} + +static int cam_ope_bus_wr_init(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_ope_bus_wr_reg_val *bus_wr_reg_val; + struct cam_ope_bus_wr_reg *bus_wr_reg; + struct cam_ope_dev_init *dev_init = data; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + wr_info->ope_hw_info = ope_hw_info; + bus_wr_reg_val = ope_hw_info->bus_wr_reg_val; + bus_wr_reg = ope_hw_info->bus_wr_reg; + bus_wr_reg->base = dev_init->core_info->ope_hw_info->ope_bus_wr_base; + + cam_io_w_mb(bus_wr_reg_val->irq_mask_0, + ope_hw_info->bus_wr_reg->base + bus_wr_reg->irq_mask_0); + cam_io_w_mb(bus_wr_reg_val->irq_mask_1, + ope_hw_info->bus_wr_reg->base + bus_wr_reg->irq_mask_1); + + return rc; +} + +static int cam_ope_bus_wr_probe(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0, i, j, combo_idx, k; + struct cam_ope_bus_wr_reg_val *bus_wr_reg_val; + struct ope_bus_out_port_to_wm *out_port_to_wm; + uint32_t output_port_idx; + uint32_t wm_idx; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + wr_info = CAM_MEM_ZALLOC(sizeof(struct ope_bus_wr), GFP_KERNEL); + if (!wr_info) { + CAM_ERR(CAM_OPE, "Out of memory"); + return -ENOMEM; + } + + wr_info->ope_hw_info = ope_hw_info; + bus_wr_reg_val = ope_hw_info->bus_wr_reg_val; + + for (i = 0; i < bus_wr_reg_val->num_clients; i++) { + output_port_idx = + bus_wr_reg_val->wr_clients[i].output_port_id - 1; + out_port_to_wm = &wr_info->out_port_to_wm[output_port_idx]; + combo_idx = BUS_WR_YUV; + wm_idx = out_port_to_wm->num_wm[combo_idx]; + out_port_to_wm->output_port_id = + bus_wr_reg_val->wr_clients[i].output_port_id; + out_port_to_wm->wm_port_id[combo_idx][wm_idx] = + bus_wr_reg_val->wr_clients[i].wm_port_id; + if (!out_port_to_wm->num_wm[combo_idx]) + out_port_to_wm->num_combos++; + out_port_to_wm->num_wm[combo_idx]++; + } + + for (i = 0; i < OPE_OUT_RES_MAX; i++) { + out_port_to_wm = &wr_info->out_port_to_wm[i]; + CAM_DBG(CAM_OPE, "output port id = %d num_combos = %d", + out_port_to_wm->output_port_id, + out_port_to_wm->num_combos); + for (j = 0; j < out_port_to_wm->num_combos; j++) { + CAM_DBG(CAM_OPE, "combo idx = %d num_wms = %d", + j, out_port_to_wm->num_wm[j]); + for (k = 0; k < out_port_to_wm->num_wm[j]; k++) { + CAM_DBG(CAM_OPE, "wm port id = %d", + out_port_to_wm->wm_port_id[j][k]); + } + } + } + + return rc; +} + +static int cam_ope_bus_wr_isr(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + uint32_t irq_status_0, irq_status_1, violation_status; + struct cam_ope_bus_wr_reg *bus_wr_reg; + struct cam_ope_bus_wr_reg_val *bus_wr_reg_val; + struct cam_ope_irq_data *irq_data = data; + + if (!ope_hw_info || !irq_data) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + bus_wr_reg = ope_hw_info->bus_wr_reg; + bus_wr_reg_val = ope_hw_info->bus_wr_reg_val; + + /* Read and Clear Top Interrupt status */ + irq_status_0 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_0); + irq_status_1 = cam_io_r_mb(bus_wr_reg->base + bus_wr_reg->irq_status_1); + cam_io_w_mb(irq_status_0, + bus_wr_reg->base + bus_wr_reg->irq_clear_0); + cam_io_w_mb(irq_status_1, + bus_wr_reg->base + bus_wr_reg->irq_clear_1); + + cam_io_w_mb(bus_wr_reg_val->irq_set_clear, + bus_wr_reg->base + bus_wr_reg->irq_cmd); + + if (irq_status_0 & bus_wr_reg_val->cons_violation) { + irq_data->error = 1; + CAM_ERR(CAM_OPE, "ope bus wr cons_violation"); + } + + if (irq_status_0 & bus_wr_reg_val->violation) { + irq_data->error = 1; + violation_status = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->violation_status); + CAM_ERR(CAM_OPE, + "ope bus wr violation, violation_status 0x%x", + violation_status); + } + + if (irq_status_0 & bus_wr_reg_val->img_size_violation) { + irq_data->error = 1; + violation_status = cam_io_r_mb(bus_wr_reg->base + + bus_wr_reg->image_size_violation_status); + CAM_ERR(CAM_OPE, + "ope bus wr img_size_violation, violation_status 0x%x", + violation_status); + } + + return rc; +} + +int cam_ope_bus_wr_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = 0; + + switch (cmd_id) { + case OPE_HW_PROBE: + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E"); + rc = cam_ope_bus_wr_probe(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X"); + break; + case OPE_HW_INIT: + CAM_DBG(CAM_OPE, "OPE_HW_INIT: E"); + rc = cam_ope_bus_wr_init(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_INIT: X"); + break; + case OPE_HW_ACQUIRE: + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E"); + rc = cam_ope_bus_wr_acquire(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X"); + break; + case OPE_HW_RELEASE: + CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: E"); + rc = cam_ope_bus_wr_release(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_RELEASE: X"); + break; + case OPE_HW_PREPARE: + CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: E"); + rc = cam_ope_bus_wr_prepare(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_PREPARE: X"); + break; + case OPE_HW_DEINIT: + case OPE_HW_START: + case OPE_HW_STOP: + case OPE_HW_FLUSH: + case OPE_HW_CLK_UPDATE: + case OPE_HW_BW_UPDATE: + case OPE_HW_RESET: + case OPE_HW_SET_IRQ_CB: + rc = 0; + CAM_DBG(CAM_OPE, "Unhandled cmds: %d", cmd_id); + break; + case OPE_HW_ISR: + rc = cam_ope_bus_wr_isr(ope_hw_info, 0, data); + break; + default: + CAM_ERR(CAM_OPE, "Unsupported cmd: %d", cmd_id); + break; + } + + return rc; +} + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.h new file mode 100644 index 0000000000..ed9b8cca68 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/bus_wr/ope_bus_wr.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef OPE_BUS_WR_H +#define OPE_BUS_WR_H + +#include +#include +#include +#include "ope_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_context.h" +#include "cam_ope_context.h" +#include "cam_ope_hw_mgr.h" + +/** + * struct ope_bus_wr_cdm_info + * + * @offset: Offset + * @addr: Address + * @len: Length + */ +struct ope_bus_wr_cdm_info { + uint32_t offset; + uint32_t *addr; + uint32_t len; +}; + +/** + * struct ope_bus_wr_io_port_cdm_info + * + * @num_frames_cmds: Number of frame commands + * @f_cdm_info: Frame cdm info + * @num_stripes: Number of stripes + * @num_s_cmd_bufs: Number of stripe commands + * @s_cdm_info: Stripe cdm info + * @go_cmd_addr: GO command address + * @go_cmd_len: GO command length + */ +struct ope_bus_wr_io_port_cdm_info { + uint32_t num_frames_cmds; + struct ope_bus_wr_cdm_info f_cdm_info[MAX_WR_CLIENTS]; + uint32_t num_stripes; + uint32_t num_s_cmd_bufs[OPE_MAX_STRIPES]; + struct ope_bus_wr_cdm_info s_cdm_info[OPE_MAX_STRIPES][MAX_WR_CLIENTS]; + uint32_t *go_cmd_addr; + uint32_t go_cmd_len; +}; + +/** + * struct ope_bus_wr_io_port_cdm_batch + * + * num_batch: Number of batches + * io_port_cdm: CDM IO Port Info + */ +struct ope_bus_wr_io_port_cdm_batch { + uint32_t num_batch; + struct ope_bus_wr_io_port_cdm_info io_port_cdm[OPE_MAX_BATCH_SIZE]; +}; + +/** + * struct ope_bus_wr_wm + * + * @wm_port_id: WM port ID + * @format_type: Format type + */ +struct ope_bus_wr_wm { + uint32_t wm_port_id; + uint32_t format_type; +}; + +/** + * struct ope_bus_out_port_to_wm + * + * @output_port_id: Output port ID + * @num_combos: Number of combos + * @num_wm: Number of WMs + * @wm_port_id: WM port Id + */ +struct ope_bus_out_port_to_wm { + uint32_t output_port_id; + uint32_t num_combos; + uint32_t num_wm[BUS_WR_COMBO_MAX]; + uint32_t wm_port_id[BUS_WR_COMBO_MAX][MAX_WR_CLIENTS]; +}; + +/** + * struct ope_bus_wr_io_port_info + * + * @pixel_pattern: Pixel pattern + * @output_port_id: Port Id + * @output_format_type: Format type + * @latency_buf_size: Latency buffer size + */ +struct ope_bus_wr_io_port_info { + uint32_t pixel_pattern[OPE_OUT_RES_MAX]; + uint32_t output_port_id[OPE_OUT_RES_MAX]; + uint32_t output_format_type[OPE_OUT_RES_MAX]; + uint32_t latency_buf_size; +}; + +/** + * struct ope_bus_wr_ctx + * + * @ope_acquire: OPE acquire structure + * @security_flag: security flag + * @num_out_ports: Number of out ports + * @io_port_info: IO port info + * @io_port_cdm_batch: IO port cdm info + */ +struct ope_bus_wr_ctx { + struct ope_acquire_dev_info *ope_acquire; + bool security_flag; + uint32_t num_out_ports; + struct ope_bus_wr_io_port_info io_port_info; + struct ope_bus_wr_io_port_cdm_batch io_port_cdm_batch; +}; + +/** + * struct ope_bus_wr + * + * @ope_hw_info: OPE hardware info + * @out_port_to_wm: IO port to WM mapping + * @bus_wr_ctx: WM context + */ +struct ope_bus_wr { + struct ope_hw *ope_hw_info; + struct ope_bus_out_port_to_wm out_port_to_wm[OPE_OUT_RES_MAX]; + struct ope_bus_wr_ctx *bus_wr_ctx[OPE_CTX_MAX]; +}; + +#endif /* OPE_BUS_WR_H */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c new file mode 100644 index 0000000000..7c9cb160d7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c @@ -0,0 +1,1851 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ope_core.h" +#include "ope_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "ope_hw.h" +#include "ope_dev_intf.h" +#include "cam_cdm_util.h" +#include "ope_bus_rd.h" +#include "ope_bus_wr.h" +#include "cam_compat.h" + +static int cam_ope_caps_vote(struct cam_ope_device_core_info *core_info, + struct cam_ope_dev_bw_update *cpas_vote) +{ + int rc = 0; + + if (cpas_vote->ahb_vote_valid) + rc = cam_cpas_update_ahb_vote(core_info->cpas_handle, + &cpas_vote->ahb_vote); + if (cpas_vote->axi_vote_valid) + rc = cam_cpas_update_axi_vote(core_info->cpas_handle, + &cpas_vote->axi_vote); + if (rc) + CAM_ERR(CAM_OPE, "cpas vote is failed: %d", rc); + + return rc; +} + +int cam_ope_get_hw_caps(void *hw_priv, void *get_hw_cap_args, + uint32_t arg_size) +{ + struct cam_hw_info *ope_dev = hw_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ope_device_core_info *core_info = NULL; + struct ope_hw_ver *ope_hw_ver; + struct cam_ope_top_reg_val *top_reg_val; + + if (!hw_priv) { + CAM_ERR(CAM_OPE, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ope_dev->soc_info; + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_OPE, "soc_info = %x core_info = %x", + soc_info, core_info); + return -EINVAL; + } + + if (!get_hw_cap_args) { + CAM_ERR(CAM_OPE, "Invalid caps"); + return -EINVAL; + } + + top_reg_val = core_info->ope_hw_info->ope_hw->top_reg_val; + ope_hw_ver = get_hw_cap_args; + ope_hw_ver->hw_type = core_info->hw_type; + ope_hw_ver->hw_ver.major = + (core_info->hw_version & top_reg_val->major_mask) >> + top_reg_val->major_shift; + ope_hw_ver->hw_ver.minor = + (core_info->hw_version & top_reg_val->minor_mask) >> + top_reg_val->minor_shift; + ope_hw_ver->hw_ver.incr = + (core_info->hw_version & top_reg_val->incr_mask) >> + top_reg_val->incr_shift; + + return 0; +} + +int cam_ope_start(void *hw_priv, void *start_args, uint32_t arg_size) +{ + return 0; +} + +int cam_ope_stop(void *hw_priv, void *start_args, uint32_t arg_size) +{ + struct cam_hw_info *ope_dev = hw_priv; + struct cam_ope_device_core_info *core_info = NULL; + int rc = 0; + + if (!hw_priv) { + CAM_ERR(CAM_OPE, "Invalid cam_dev_info"); + return -EINVAL; + } + + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + if (!core_info) { + CAM_ERR(CAM_OPE, "core_info = %pK", core_info); + return -EINVAL; + } + + if (core_info->cpas_start) { + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_OPE, "cpas stop is failed"); + else + core_info->cpas_start = false; + } + + return rc; +} + +int cam_ope_flush(void *hw_priv, void *flush_args, uint32_t arg_size) +{ + return 0; +} + +static int cam_ope_dev_process_init(struct ope_hw *ope_hw, + void *cmd_args) +{ + int rc = 0; + + rc = cam_ope_top_process(ope_hw, 0, OPE_HW_INIT, cmd_args); + if (rc) + goto top_init_fail; + + rc = cam_ope_bus_rd_process(ope_hw, 0, OPE_HW_INIT, cmd_args); + if (rc) + goto bus_rd_init_fail; + + rc = cam_ope_bus_wr_process(ope_hw, 0, OPE_HW_INIT, cmd_args); + if (rc) + goto bus_wr_init_fail; + + return rc; + +bus_wr_init_fail: + rc = cam_ope_bus_rd_process(ope_hw, 0, + OPE_HW_DEINIT, NULL); +bus_rd_init_fail: + rc = cam_ope_top_process(ope_hw, 0, + OPE_HW_DEINIT, NULL); +top_init_fail: + return rc; +} + +static int cam_ope_process_init(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_init(ope_hw, cmd_args); + + CAM_ERR(CAM_OPE, "hfi_en is not supported"); + return -EINVAL; +} + +int cam_ope_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ope_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ope_device_core_info *core_info = NULL; + struct cam_ope_cpas_vote *cpas_vote; + int rc = 0; + struct cam_ope_dev_init *init; + struct ope_hw *ope_hw; + + if (!device_priv) { + CAM_ERR(CAM_OPE, "Invalid cam_dev_info"); + rc = -EINVAL; + goto end; + } + + soc_info = &ope_dev->soc_info; + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK", + soc_info, core_info); + rc = -EINVAL; + goto end; + } + ope_hw = core_info->ope_hw_info->ope_hw; + + cpas_vote = CAM_MEM_ZALLOC(sizeof(struct cam_ope_cpas_vote), GFP_KERNEL); + if (!cpas_vote) { + CAM_ERR(CAM_ISP, "Out of memory"); + rc = -ENOMEM; + goto end; + } + + cpas_vote->ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote->ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote->axi_vote.num_paths = 1; + cpas_vote->axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_ALL; + cpas_vote->axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + cpas_vote->axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].ddr_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote->axi_vote.axi_path[0].ddr_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote->ahb_vote, &cpas_vote->axi_vote); + if (rc) { + CAM_ERR(CAM_OPE, "cpass start failed: %d", rc); + goto free_cpas_vote; + } + core_info->cpas_start = true; + + rc = cam_ope_enable_soc_resources(soc_info); + if (rc) + goto enable_soc_resource_failed; + else + core_info->clk_enable = true; + + init = init_hw_args; + + core_info->ope_hw_info->hfi_en = init->hfi_en; + init->core_info = core_info; + rc = cam_ope_process_init(ope_hw, init_hw_args, init->hfi_en); + if (rc) + goto process_init_failed; + else + goto free_cpas_vote; + +process_init_failed: + if (cam_ope_disable_soc_resources(soc_info, core_info->clk_enable)) + CAM_ERR(CAM_OPE, "disable soc resource failed"); +enable_soc_resource_failed: + if (cam_cpas_stop(core_info->cpas_handle)) + CAM_ERR(CAM_OPE, "cpas stop is failed"); + else + core_info->cpas_start = false; +free_cpas_vote: + CAM_MEM_ZFREE((void *)cpas_vote, sizeof(struct cam_ope_cpas_vote)); + cpas_vote = NULL; +end: + return rc; +} + +int cam_ope_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) +{ + struct cam_hw_info *ope_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ope_device_core_info *core_info = NULL; + int rc = 0; + + if (!device_priv) { + CAM_ERR(CAM_OPE, "Invalid cam_dev_info"); + return -EINVAL; + } + + soc_info = &ope_dev->soc_info; + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_OPE, "soc_info = %pK core_info = %pK", + soc_info, core_info); + return -EINVAL; + } + + rc = cam_ope_disable_soc_resources(soc_info, core_info->clk_enable); + if (rc) + CAM_ERR(CAM_OPE, "soc disable is failed : %d", rc); + core_info->clk_enable = false; + + return rc; +} + +static int cam_ope_dev_process_dump_debug_reg(struct ope_hw *ope_hw) +{ + int rc = 0; + + rc = cam_ope_top_process(ope_hw, -1, + OPE_HW_DUMP_DEBUG, NULL); + + return rc; +} + +static int cam_ope_dev_process_reset(struct ope_hw *ope_hw, void *cmd_args) +{ + int rc = 0; + + rc = cam_ope_top_process(ope_hw, -1, + OPE_HW_RESET, NULL); + + return rc; +} + +static int cam_ope_dev_process_release(struct ope_hw *ope_hw, void *cmd_args) +{ + int rc = 0; + struct cam_ope_dev_release *ope_dev_release; + + ope_dev_release = cmd_args; + rc = cam_ope_top_process(ope_hw, ope_dev_release->ctx_id, + OPE_HW_RELEASE, NULL); + + rc |= cam_ope_bus_rd_process(ope_hw, ope_dev_release->ctx_id, + OPE_HW_RELEASE, NULL); + + rc |= cam_ope_bus_wr_process(ope_hw, ope_dev_release->ctx_id, + OPE_HW_RELEASE, NULL); + + return rc; +} + +static int cam_ope_dev_process_acquire(struct ope_hw *ope_hw, void *cmd_args) +{ + int rc = 0; + struct cam_ope_dev_acquire *ope_dev_acquire; + + if (!cmd_args || !ope_hw) { + CAM_ERR(CAM_OPE, "Invalid arguments: %pK %pK", + cmd_args, ope_hw); + return -EINVAL; + } + + ope_dev_acquire = cmd_args; + rc = cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id, + OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire); + if (rc) + goto top_acquire_fail; + + rc = cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id, + OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire); + if (rc) + goto bus_rd_acquire_fail; + + rc = cam_ope_bus_wr_process(ope_hw, ope_dev_acquire->ctx_id, + OPE_HW_ACQUIRE, ope_dev_acquire->ope_acquire); + if (rc) + goto bus_wr_acquire_fail; + + return 0; + +bus_wr_acquire_fail: + cam_ope_bus_rd_process(ope_hw, ope_dev_acquire->ctx_id, + OPE_HW_RELEASE, ope_dev_acquire->ope_acquire); +bus_rd_acquire_fail: + cam_ope_top_process(ope_hw, ope_dev_acquire->ctx_id, + OPE_HW_RELEASE, ope_dev_acquire->ope_acquire); +top_acquire_fail: + return rc; +} + +static int cam_ope_dev_prepare_cdm_request( + struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t kmd_buf_offset, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req, + uint32_t len, bool arbitrate) +{ + int i; + struct cam_ope_request *ope_request; + struct cam_cdm_bl_request *cdm_cmd; + uint32_t *kmd_buf; + + ope_request = ctx_data->req_list[req_idx]; + cdm_cmd = ope_request->cdm_cmd; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + kmd_buf_offset; + + cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; + cdm_cmd->flag = true; + cdm_cmd->userdata = ctx_data; + cdm_cmd->cookie = req_idx; + cdm_cmd->gen_irq_arb = true; + + i = cdm_cmd->cmd_arrary_count; + cdm_cmd->cmd_flex[i].bl_addr.mem_handle = ope_request->ope_kmd_buf.mem_handle; + cdm_cmd->cmd_flex[i].offset = kmd_buf_offset + ope_request->ope_kmd_buf.offset; + cdm_cmd->cmd_flex[i].len = len; + cdm_cmd->cmd_flex[i].arbitrate = arbitrate; + cdm_cmd->cmd_flex[i].enable_debug_gen_irq = false; + + cdm_cmd->cmd_arrary_count++; + + CAM_DBG(CAM_OPE, "CDM cmd:Req idx = %d req_id = %lld array cnt = %d", + cdm_cmd->cookie, ope_request->request_id, + cdm_cmd->cmd_arrary_count); + CAM_DBG(CAM_OPE, "CDM cmd:mem_hdl = %d offset = %d len = %d, iova 0x%x", + ope_request->ope_kmd_buf.mem_handle, kmd_buf_offset, len, + cdm_cmd->cmd_flex[i].bl_addr.hw_iova); + + return 0; +} + + +static int dump_dmi_cmd(uint32_t print_idx, + uint32_t *print_ptr, struct cdm_dmi_cmd *dmi_cmd, + uint32_t *temp) +{ + CAM_DBG(CAM_OPE, "%d:dma_ptr:%x l:%d", + print_idx, print_ptr, + dmi_cmd->length); + CAM_DBG(CAM_OPE, "%d:cmd:%hhx addr:%x", + print_ptr, dmi_cmd->cmd, + dmi_cmd->addr); + CAM_DBG(CAM_OPE, "%d: dmiadr:%x sel:%d", + print_idx, dmi_cmd->DMIAddr, + dmi_cmd->DMISel); + CAM_DBG(CAM_OPE, "%d: %x %x %x", + print_idx, + temp[0], temp[1], temp[2]); + + return 0; +} + +static int dump_direct_cmd(uint32_t print_idx, + uint32_t *print_ptr, + struct ope_frame_process *frm_proc, + int batch_idx, int cmd_buf_idx) +{ + int len; + + if (cmd_buf_idx >= OPE_MAX_CMD_BUFS || + batch_idx >= OPE_MAX_BATCH_SIZE) + return 0; + + len = frm_proc->cmd_buf[batch_idx][cmd_buf_idx].length / 4; + CAM_DBG(CAM_OPE, "Frame DB : direct: E"); + for (print_idx = 0; print_idx < len; print_idx++) + CAM_DBG(CAM_OPE, "%d: %x", print_idx, print_ptr[print_idx]); + CAM_DBG(CAM_OPE, "Frame DB : direct: X"); + + return 0; +} + +static int dump_frame_cmd(struct ope_frame_process *frm_proc, + int i, int j, uint64_t iova_addr, uint32_t *kmd_buf, uint32_t buf_len) +{ + if (j >= OPE_MAX_CMD_BUFS || i >= OPE_MAX_BATCH_SIZE) + return 0; + + CAM_DBG(CAM_OPE, "Frame DB:scope:%d buffer:%d type:%d", + frm_proc->cmd_buf[i][j].cmd_buf_scope, + frm_proc->cmd_buf[i][j].cmd_buf_buffered, + frm_proc->cmd_buf[i][j].type); + CAM_DBG(CAM_OPE, "kmdbuf:%x memhdl:%x iova:%x %pK", + kmd_buf, + frm_proc->cmd_buf[i][j].mem_handle, + iova_addr, iova_addr); + CAM_DBG(CAM_OPE, "buflen:%d len:%d offset:%d", + buf_len, + frm_proc->cmd_buf[i][j].length, + frm_proc->cmd_buf[i][j].offset); + + return 0; +} + +static int dump_stripe_cmd(struct ope_frame_process *frm_proc, + uint32_t stripe_idx, int i, int k, uint64_t iova_addr, + uint32_t *kmd_buf, uint32_t buf_len) +{ + if (k >= OPE_MAX_CMD_BUFS) + return 0; + + CAM_DBG(CAM_OPE, "Stripe:%d scope:%d buffer:%d", + stripe_idx, + frm_proc->cmd_buf[i][k].cmd_buf_scope, + frm_proc->cmd_buf[i][k].cmd_buf_buffered); + CAM_DBG(CAM_OPE, "type:%d kmdbuf:%x memhdl:%x", + frm_proc->cmd_buf[i][k].type, kmd_buf, + frm_proc->cmd_buf[i][k].mem_handle); + CAM_DBG(CAM_OPE, "iova:%x %pK buflen:%d len:%d", + iova_addr, iova_addr, buf_len, + frm_proc->cmd_buf[i][k].length); + CAM_DBG(CAM_OPE, "offset:%d", + frm_proc->cmd_buf[i][k].offset); + return 0; +} + +int ope_validate_buff_offset(size_t buf_len, + struct ope_cmd_buf_info *cmd_buf) +{ + if ((buf_len <= cmd_buf->offset) || + (cmd_buf->size < cmd_buf->length) || + ((buf_len - cmd_buf->offset) < cmd_buf->length)) { + CAM_ERR(CAM_OPE, "invalid offset:0x%x, mem_hdl:0x%x buf_len:%llu", + cmd_buf->offset, + cmd_buf->mem_handle, + cmd_buf->length); + + return -EINVAL; + } else + return 0; +} + +static uint32_t *ope_create_frame_cmd_batch(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t *kmd_buf, uint32_t buffered, int batch_idx, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int rc = 0, i, j; + uint32_t temp[3]; + struct cam_ope_request *ope_request; + struct cdm_dmi_cmd *dmi_cmd; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + dma_addr_t iova_addr; + uintptr_t cpu_addr; + size_t buf_len; + uint32_t print_idx; + uint32_t *print_ptr; + int num_dmi = 0; + struct cam_cdm_utils_ops *cdm_ops; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0]; + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx); + return NULL; + } + i = batch_idx; + + for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) { + if (frm_proc->cmd_buf[i][j].cmd_buf_scope != + OPE_CMD_BUF_SCOPE_FRAME) + continue; + + if (frm_proc->cmd_buf[i][j].cmd_buf_usage == + OPE_CMD_BUF_KMD || + frm_proc->cmd_buf[i][j].cmd_buf_usage == + OPE_CMD_BUF_DEBUG) + continue; + + if (frm_proc->cmd_buf[i][j].cmd_buf_buffered != + buffered) + continue; + + if (!frm_proc->cmd_buf[i][j].mem_handle) + continue; + + rc = cam_mem_get_io_buf( + frm_proc->cmd_buf[i][j].mem_handle, + hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL); + if (rc || !iova_addr || !buf_len) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + hw_mgr->iommu_hdl); + return NULL; + } + + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][j]); + if (rc) + return NULL; + + iova_addr = iova_addr + frm_proc->cmd_buf[i][j].offset; + + rc = cam_mem_get_cpu_buf( + frm_proc->cmd_buf[i][j].mem_handle, + &cpu_addr, &buf_len); + if (rc || !cpu_addr || !buf_len) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + hw_mgr->iommu_hdl); + return NULL; + } + + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][j]); + if (rc) + return NULL; + + cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset; + if (frm_proc->cmd_buf[i][j].type == + OPE_CMD_BUF_TYPE_DIRECT) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + iova_addr, + frm_proc->cmd_buf[i][j].length); + print_ptr = (uint32_t *)cpu_addr; + dump_direct_cmd(print_idx, print_ptr, + frm_proc, i, j); + } else { + num_dmi = frm_proc->cmd_buf[i][j].length / + sizeof(struct cdm_dmi_cmd); + CAM_DBG(CAM_OPE, "Frame DB : In direct: E"); + print_ptr = (uint32_t *)cpu_addr; + for (print_idx = 0; + print_idx < num_dmi; print_idx++) { + memcpy(temp, (const void *)print_ptr, + sizeof(struct cdm_dmi_cmd)); + dmi_cmd = (struct cdm_dmi_cmd *)temp; + if (!dmi_cmd->addr) { + CAM_ERR(CAM_OPE, "Null dmi cmd addr"); + cam_mem_put_cpu_buf(frm_proc->cmd_buf[i][j].mem_handle); + return NULL; + } + + kmd_buf = cdm_ops->cdm_write_dmi( + kmd_buf, + 0, dmi_cmd->DMIAddr, + dmi_cmd->DMISel, dmi_cmd->addr, + dmi_cmd->length); + if (hw_mgr->frame_dump_enable) + dump_dmi_cmd(print_idx, + print_ptr, dmi_cmd, temp); + print_ptr += + sizeof(struct cdm_dmi_cmd) / + sizeof(uint32_t); + } + CAM_DBG(CAM_OPE, "Frame DB : In direct: X"); + } + if (hw_mgr->frame_dump_enable) + dump_frame_cmd(frm_proc, i, j, + iova_addr, kmd_buf, buf_len); + + cam_mem_put_cpu_buf(frm_proc->cmd_buf[i][j].mem_handle); + } + return kmd_buf; + +} + +static uint32_t *ope_create_frame_wr(struct cam_ope_ctx *ctx_data, + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info, + uint32_t *kmd_buf, struct cam_ope_request *ope_request) +{ + struct cam_cdm_utils_ops *cdm_ops; + int i; + + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + for (i = 0; i < wr_cdm_info->num_frames_cmds; i++) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + wr_cdm_info->f_cdm_info[i].offset, + wr_cdm_info->f_cdm_info[i].len); + CAM_DBG(CAM_OPE, "FrameWR:i:%d kmdbuf:%x len:%d iova:%x %pK", + i, kmd_buf, wr_cdm_info->f_cdm_info[i].len, + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + } + return kmd_buf; +} + +static uint32_t *ope_create_frame_rd(struct cam_ope_ctx *ctx_data, + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info, + uint32_t *kmd_buf, struct cam_ope_request *ope_request) +{ + struct cam_cdm_utils_ops *cdm_ops; + int i; + + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + /* Frame 0 RD */ + for (i = 0; i < rd_cdm_info->num_frames_cmds; i++) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + rd_cdm_info->f_cdm_info[i].offset, + rd_cdm_info->f_cdm_info[i].len); + CAM_DBG(CAM_OPE, "FrameRD:i:%d kmdbuf:%x len:%d iova:%x %pK", + i, kmd_buf, rd_cdm_info->f_cdm_info[i].len, + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + } + return kmd_buf; +} + +static uint32_t *ope_create_frame_cmd(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t *kmd_buf, uint32_t buffered, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int rc = 0, i, j; + uint32_t temp[3]; + struct cam_ope_request *ope_request; + struct cdm_dmi_cmd *dmi_cmd; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + dma_addr_t iova_addr; + uintptr_t cpu_addr; + size_t buf_len; + uint32_t print_idx; + uint32_t *print_ptr; + int num_dmi = 0; + struct cam_cdm_utils_ops *cdm_ops; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0]; + + for (i = 0; i < frm_proc->batch_size; i++) { + for (j = 0; j < frm_proc->num_cmd_bufs[i]; j++) { + if (frm_proc->cmd_buf[i][j].cmd_buf_scope != + OPE_CMD_BUF_SCOPE_FRAME) + continue; + + if (frm_proc->cmd_buf[i][j].cmd_buf_usage == + OPE_CMD_BUF_KMD || + frm_proc->cmd_buf[i][j].cmd_buf_usage == + OPE_CMD_BUF_DEBUG) + continue; + + if (frm_proc->cmd_buf[i][j].cmd_buf_buffered != + buffered) + continue; + + if (!frm_proc->cmd_buf[i][j].mem_handle) + continue; + + rc = cam_mem_get_io_buf( + frm_proc->cmd_buf[i][j].mem_handle, + hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL); + if (rc) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + hw_mgr->iommu_hdl); + return NULL; + } + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][j]); + if (rc) + return NULL; + iova_addr = iova_addr + frm_proc->cmd_buf[i][j].offset; + + rc = cam_mem_get_cpu_buf( + frm_proc->cmd_buf[i][j].mem_handle, + &cpu_addr, &buf_len); + if (rc || !cpu_addr || !buf_len) { + CAM_ERR(CAM_OPE, "get cmd buf failed %x", + hw_mgr->iommu_hdl); + return NULL; + } + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][j]); + if (rc) + return NULL; + cpu_addr = cpu_addr + frm_proc->cmd_buf[i][j].offset; + if (frm_proc->cmd_buf[i][j].type == + OPE_CMD_BUF_TYPE_DIRECT) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + iova_addr, + frm_proc->cmd_buf[i][j].length); + print_ptr = (uint32_t *)cpu_addr; + if (hw_mgr->frame_dump_enable) + dump_direct_cmd(print_idx, print_ptr, + frm_proc, i, j); + } else { + num_dmi = frm_proc->cmd_buf[i][j].length / + sizeof(struct cdm_dmi_cmd); + CAM_DBG(CAM_OPE, "Frame DB : In direct: E"); + print_ptr = (uint32_t *)cpu_addr; + for (print_idx = 0; + print_idx < num_dmi; print_idx++) { + memcpy(temp, (const void *)print_ptr, + sizeof(struct cdm_dmi_cmd)); + dmi_cmd = (struct cdm_dmi_cmd *)temp; + if (!dmi_cmd->addr) { + CAM_ERR(CAM_OPE, + "Null dmi cmd addr"); + cam_mem_put_cpu_buf( + frm_proc->cmd_buf[i][j].mem_handle); + return NULL; + } + + kmd_buf = cdm_ops->cdm_write_dmi( + kmd_buf, + 0, dmi_cmd->DMIAddr, + dmi_cmd->DMISel, dmi_cmd->addr, + dmi_cmd->length); + if (hw_mgr->frame_dump_enable) + dump_dmi_cmd(print_idx, + print_ptr, dmi_cmd, + temp); + print_ptr += + sizeof(struct cdm_dmi_cmd) / + sizeof(uint32_t); + } + CAM_DBG(CAM_OPE, "Frame DB : In direct: X"); + } + if (hw_mgr->frame_dump_enable) + dump_frame_cmd(frm_proc, i, j, + iova_addr, kmd_buf, buf_len); + + cam_mem_put_cpu_buf(frm_proc->cmd_buf[i][j].mem_handle); + } + } + return kmd_buf; +} + +static uint32_t *ope_create_stripe_cmd(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, + uint32_t *kmd_buf, + int batch_idx, + int s_idx, + uint32_t stripe_idx, + struct ope_frame_process *frm_proc) +{ + int rc = 0, i, j, k; + uint32_t temp[3]; + struct cdm_dmi_cmd *dmi_cmd; + dma_addr_t iova_addr; + uintptr_t cpu_addr; + size_t buf_len; + uint32_t print_idx; + uint32_t *print_ptr; + int num_dmi = 0; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t reg_val_pair[2]; + struct cam_hw_info *ope_dev; + struct cam_ope_device_core_info *core_info; + struct ope_hw *ope_hw; + struct cam_ope_top_reg *top_reg; + + if (s_idx >= OPE_MAX_CMD_BUFS || + batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid inputs: %d %d", + batch_idx, s_idx); + return NULL; + } + + i = batch_idx; + j = s_idx; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + /* cmd buffer stripes */ + for (k = 0; k < frm_proc->num_cmd_bufs[i]; k++) { + if (frm_proc->cmd_buf[i][k].cmd_buf_scope != + OPE_CMD_BUF_SCOPE_STRIPE) + continue; + + if (frm_proc->cmd_buf[i][k].stripe_idx != + stripe_idx) + continue; + + if (!frm_proc->cmd_buf[i][k].mem_handle) + continue; + + CAM_DBG(CAM_OPE, "process stripe %d", stripe_idx); + rc = cam_mem_get_io_buf(frm_proc->cmd_buf[i][k].mem_handle, + hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL); + if (rc || !buf_len || !iova_addr) { + CAM_DBG(CAM_OPE, "get cmd buf fail %x", + hw_mgr->iommu_hdl); + return NULL; + } + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][k]); + if (rc) + return NULL; + iova_addr = iova_addr + frm_proc->cmd_buf[i][k].offset; + rc = cam_mem_get_cpu_buf(frm_proc->cmd_buf[i][k].mem_handle, + &cpu_addr, &buf_len); + if (rc || !cpu_addr || !buf_len) { + CAM_DBG(CAM_OPE, "get cmd buf fail %x", + hw_mgr->iommu_hdl); + return NULL; + } + rc = ope_validate_buff_offset(buf_len, &frm_proc->cmd_buf[i][k]); + if (rc) + return NULL; + cpu_addr = cpu_addr + frm_proc->cmd_buf[i][k].offset; + + if (frm_proc->cmd_buf[i][k].type == OPE_CMD_BUF_TYPE_DIRECT) { + kmd_buf = cdm_ops->cdm_write_indirect( + kmd_buf, + iova_addr, + frm_proc->cmd_buf[i][k].length); + print_ptr = (uint32_t *)cpu_addr; + CAM_DBG(CAM_OPE, "Stripe:%d direct:E", + stripe_idx); + if (hw_mgr->frame_dump_enable) + dump_direct_cmd(print_idx, print_ptr, + frm_proc, i, k); + CAM_DBG(CAM_OPE, "Stripe:%d direct:X", stripe_idx); + } else if (frm_proc->cmd_buf[i][k].type == + OPE_CMD_BUF_TYPE_INDIRECT) { + num_dmi = frm_proc->cmd_buf[i][j].length / + sizeof(struct cdm_dmi_cmd); + CAM_DBG(CAM_OPE, "Stripe:%d Indirect:E", stripe_idx); + print_ptr = (uint32_t *)cpu_addr; + for (print_idx = 0; print_idx < num_dmi; print_idx++) { + memcpy(temp, (const void *)print_ptr, + sizeof(struct cdm_dmi_cmd)); + dmi_cmd = (struct cdm_dmi_cmd *)temp; + if (!dmi_cmd->addr) { + CAM_ERR(CAM_OPE, "Null dmi cmd addr"); + cam_mem_put_cpu_buf(frm_proc->cmd_buf[i][k].mem_handle); + return NULL; + } + + kmd_buf = cdm_ops->cdm_write_dmi(kmd_buf, + 0, dmi_cmd->DMIAddr, dmi_cmd->DMISel, + dmi_cmd->addr, dmi_cmd->length); + if (hw_mgr->frame_dump_enable) + dump_dmi_cmd(print_idx, + print_ptr, dmi_cmd, temp); + print_ptr += sizeof(struct cdm_dmi_cmd) / + sizeof(uint32_t); + } + CAM_DBG(CAM_OPE, "Stripe:%d Indirect:X", stripe_idx); + } + + if (hw_mgr->frame_dump_enable) + dump_stripe_cmd(frm_proc, stripe_idx, i, k, + iova_addr, kmd_buf, buf_len); + + cam_mem_put_cpu_buf(frm_proc->cmd_buf[i][k].mem_handle); + } + + ope_dev = hw_mgr->ope_dev_intf[0]->hw_priv; + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + ope_hw = core_info->ope_hw_info->ope_hw; + top_reg = ope_hw->top_reg; + + reg_val_pair[0] = top_reg->offset + top_reg->scratch_reg; + reg_val_pair[1] = stripe_idx; + kmd_buf = cdm_ops->cdm_write_regrandom(kmd_buf, 1, reg_val_pair); + + return kmd_buf; +} + +static uint32_t *ope_create_stripe_wr(struct cam_ope_ctx *ctx_data, + uint32_t stripe_idx, struct ope_bus_wr_io_port_cdm_info *wr_cdm_info, + struct cam_ope_request *ope_request, uint32_t *kmd_buf) +{ + struct cam_cdm_utils_ops *cdm_ops; + int k; + + if (stripe_idx >= OPE_MAX_STRIPES) { + CAM_ERR(CAM_OPE, "invalid s_idx = %d", stripe_idx); + return NULL; + } + + cdm_ops = ctx_data->ope_cdm.cdm_ops; + for (k = 0; k < wr_cdm_info->num_s_cmd_bufs[stripe_idx]; k++) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + wr_cdm_info->s_cdm_info[stripe_idx][k].offset, + wr_cdm_info->s_cdm_info[stripe_idx][k].len); + CAM_DBG(CAM_OPE, "WR stripe:%d %d kmdbuf:%x", + stripe_idx, k, kmd_buf); + CAM_DBG(CAM_OPE, "offset:%d len:%d iova:%x %pK", + wr_cdm_info->s_cdm_info[stripe_idx][k].offset, + wr_cdm_info->s_cdm_info[stripe_idx][k].len, + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + } + return kmd_buf; +} + +static uint32_t *ope_create_stripe_rd(struct cam_ope_ctx *ctx_data, + uint32_t stripe_idx, struct ope_bus_rd_io_port_cdm_info *rd_cdm_info, + struct cam_ope_request *ope_request, uint32_t *kmd_buf) +{ + struct cam_cdm_utils_ops *cdm_ops; + int k; + + if (stripe_idx >= OPE_MAX_STRIPES) { + CAM_ERR(CAM_OPE, "invalid s_idx = %d", stripe_idx); + return NULL; + } + + cdm_ops = ctx_data->ope_cdm.cdm_ops; + for (k = 0; k < rd_cdm_info->num_s_cmd_bufs[stripe_idx]; k++) { + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + rd_cdm_info->s_cdm_info[stripe_idx][k].offset, + rd_cdm_info->s_cdm_info[stripe_idx][k].len); + CAM_DBG(CAM_OPE, "WR stripe:%d %d kmdbuf:%x", + stripe_idx, k, kmd_buf); + CAM_DBG(CAM_OPE, "offset:%d len:%d iova:%x %pK", + rd_cdm_info->s_cdm_info[stripe_idx][k].offset, + rd_cdm_info->s_cdm_info[stripe_idx][k].len, + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + } + return kmd_buf; +} + +static uint32_t *ope_create_stripes_batch(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t *kmd_buf, int batch_idx, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int i, j; + struct cam_ope_request *ope_request; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + uint32_t stripe_idx = 0; + struct cam_cdm_utils_ops *cdm_ops; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + if (batch_idx >= OPE_MAX_BATCH_SIZE) { + CAM_ERR(CAM_OPE, "Invalid input: %d", batch_idx); + return NULL; + } + i = batch_idx; + /* Stripes */ + + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i]; + for (j = 0; j < ope_request->num_stripes[i]; j++) { + /* cmd buffer stripes */ + kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data, + kmd_buf, i, j, stripe_idx, frm_proc); + if (!kmd_buf) + goto end; + + /* WR stripes */ + kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx, + wr_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + /* RD stripes */ + kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx, + rd_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + /* add go command */ + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + rd_cdm_info->go_cmd_offset, + rd_cdm_info->go_cmd_len); + + CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + CAM_DBG(CAM_OPE, "iova:%x %pK", + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + + /* wait for RUP done */ + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_RUP, 0x0); + CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + stripe_idx++; + } + +end: + return kmd_buf; +} + +static uint32_t *ope_create_stripes(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t *kmd_buf, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int i, j; + struct cam_ope_request *ope_request; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + uint32_t stripe_idx = 0; + struct cam_cdm_utils_ops *cdm_ops; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + /* Stripes */ + for (i = 0; i < frm_proc->batch_size; i++) { + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i]; + for (j = 0; j < ope_request->num_stripes[i]; j++) { + /* cmd buffer stripes */ + kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data, + kmd_buf, i, j, stripe_idx, frm_proc); + if (!kmd_buf) + goto end; + + /* WR stripes */ + kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx, + wr_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + /* RD stripes */ + kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx, + rd_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + /* add go command */ + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + rd_cdm_info->go_cmd_offset, + rd_cdm_info->go_cmd_len); + + CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + CAM_DBG(CAM_OPE, "iova:%x %pK", + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + + /* wait for RUP done */ + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_RUP, 0x0); + CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + stripe_idx++; + } + } +end: + return kmd_buf; +} + +static uint32_t *ope_create_stripes_nrt(struct cam_ope_hw_mgr *hw_mgr, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + uint32_t *kmd_buf, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req, + uint32_t *kmd_buf_offset, uint32_t **cdm_kmd_start_addr) +{ + int i, j; + struct cam_ope_request *ope_request; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + uint32_t stripe_idx = 0; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t len; + int num_nrt_stripes, num_arb; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + num_nrt_stripes = ctx_data->ope_acquire.nrt_stripes_for_arb; + num_arb = ope_request->num_stripes[0] / + ctx_data->ope_acquire.nrt_stripes_for_arb; + if (ope_request->num_stripes[0] % + ctx_data->ope_acquire.nrt_stripes_for_arb) + num_arb++; + CAM_DBG(CAM_OPE, "Number of ARB for snap: %d", num_arb); + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + /* Stripes */ + for (i = 0; i < frm_proc->batch_size; i++) { + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i]; + for (j = 0; j < ope_request->num_stripes[i]; j++) { + CAM_DBG(CAM_OPE, "num_nrt_stripes = %d num_arb = %d", + num_nrt_stripes, num_arb); + if (!num_nrt_stripes) { + kmd_buf = cdm_ops->cdm_write_wait_comp_event( + kmd_buf, + OPE_WAIT_COMP_IDLE, 0x0); + len = (kmd_buf - *cdm_kmd_start_addr) * + sizeof(uint32_t); + cam_ope_dev_prepare_cdm_request( + ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, + ope_dev_prepare_req->req_idx, + *kmd_buf_offset, ope_dev_prepare_req, + len, true); + *cdm_kmd_start_addr = kmd_buf; + *kmd_buf_offset += len; + } + /* cmd buffer stripes */ + kmd_buf = ope_create_stripe_cmd(hw_mgr, ctx_data, + kmd_buf, i, j, stripe_idx, frm_proc); + if (!kmd_buf) + goto end; + + /* WR stripes */ + kmd_buf = ope_create_stripe_wr(ctx_data, stripe_idx, + wr_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + /* RD stripes */ + kmd_buf = ope_create_stripe_rd(ctx_data, stripe_idx, + rd_cdm_info, ope_request, kmd_buf); + if (!kmd_buf) + goto end; + + if (!num_nrt_stripes) { + /* For num_nrt_stripes create CDM BL with ARB */ + /* Add Frame level cmds in this condition */ + /* Frame 0 DB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) + goto end; + + /* Frame 0 SB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) + goto end; + + /* Frame 0 WR */ + kmd_buf = ope_create_frame_wr(ctx_data, + wr_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) + goto end; + + /* Frame 0 RD */ + kmd_buf = ope_create_frame_rd(ctx_data, + rd_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) + goto end; + num_arb--; + num_nrt_stripes = + ctx_data->ope_acquire.nrt_stripes_for_arb; + } + // add go command + kmd_buf = cdm_ops->cdm_write_indirect(kmd_buf, + (uint32_t)ope_request->ope_kmd_buf.iova_cdm_addr + + rd_cdm_info->go_cmd_offset, + rd_cdm_info->go_cmd_len); + + CAM_DBG(CAM_OPE, "Go cmd for stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + CAM_DBG(CAM_OPE, "iova:%x %pK", + ope_request->ope_kmd_buf.iova_cdm_addr, + ope_request->ope_kmd_buf.iova_cdm_addr); + + // wait for RUP done + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_RUP, 0x0); + CAM_DBG(CAM_OPE, "wait RUP cmd stripe:%d kmd_buf:%x", + stripe_idx, kmd_buf); + stripe_idx++; + num_nrt_stripes--; + } + } +end: + return kmd_buf; +} + +static int cam_ope_dev_create_kmd_buf_nrt(struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int rc = 0; + uint32_t len; + struct cam_ope_request *ope_request; + uint32_t *kmd_buf; + uint32_t *cdm_kmd_start_addr; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + struct cam_cdm_utils_ops *cdm_ops; + uint32_t kmd_buff_offset = ope_dev_prepare_req->kmd_buf_offset; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + (ope_dev_prepare_req->kmd_buf_offset / sizeof(len)); + cdm_kmd_start_addr = kmd_buf; + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0]; + + cdm_ops = ctx_data->ope_cdm.cdm_ops; + + kmd_buf = cdm_ops->cdm_write_clear_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE|OPE_WAIT_COMP_RUP, 0x0); + + /* Frame 0 DB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 SB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 WR */ + kmd_buf = ope_create_frame_wr(ctx_data, + wr_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 RD */ + kmd_buf = ope_create_frame_rd(ctx_data, + rd_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Stripes */ + kmd_buf = ope_create_stripes_nrt(hw_mgr, ctx_data, req_idx, kmd_buf, + ope_dev_prepare_req, &kmd_buff_offset, &cdm_kmd_start_addr); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Last arbitration if there are odd number of stripes */ + /* wait_idle_irq */ + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE, 0x0); + + /* prepare CDM submit packet */ + len = (kmd_buf - cdm_kmd_start_addr) * sizeof(uint32_t); + cam_ope_dev_prepare_cdm_request(ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, ope_dev_prepare_req->req_idx, + ope_dev_prepare_req->kmd_buf_offset, ope_dev_prepare_req, + len, false); + + ope_dev_prepare_req->kmd_buf_offset += len; +end: + return rc; +} + +static int cam_ope_dev_create_kmd_buf_batch(struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int rc = 0, i; + uint32_t len; + struct cam_ope_request *ope_request; + uint32_t *kmd_buf; + uint32_t *cdm_kmd_start_addr; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct ope_frame_process *frm_proc; + struct cam_cdm_utils_ops *cdm_ops; + + frm_proc = ope_dev_prepare_req->frame_process; + ope_request = ctx_data->req_list[req_idx]; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + (ope_dev_prepare_req->kmd_buf_offset / sizeof(len)); + cdm_kmd_start_addr = kmd_buf; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + kmd_buf = cdm_ops->cdm_write_clear_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE|OPE_WAIT_COMP_RUP, 0x0); + + for (i = 0; i < frm_proc->batch_size; i++) { + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[i]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[i]; + + /* After second batch DB programming add prefecth dis */ + if (i) { + kmd_buf = + cdm_ops->cdm_write_wait_prefetch_disable( + kmd_buf, 0x0, + OPE_WAIT_COMP_IDLE, 0x0); + } + + /* Frame i DB */ + kmd_buf = ope_create_frame_cmd_batch(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, i, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame i SB */ + kmd_buf = ope_create_frame_cmd_batch(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED, i, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame i WR */ + kmd_buf = ope_create_frame_wr(ctx_data, + wr_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame i RD */ + kmd_buf = ope_create_frame_rd(ctx_data, + rd_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Stripe level programming for batch i */ + /* Stripes */ + kmd_buf = ope_create_stripes_batch(hw_mgr, ctx_data, req_idx, + kmd_buf, i, ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + } + + /* wait_idle_irq */ + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE, 0x0); + + /* prepare CDM submit packet */ + len = (kmd_buf - cdm_kmd_start_addr) * sizeof(uint32_t); + cam_ope_dev_prepare_cdm_request(ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, ope_dev_prepare_req->req_idx, + ope_dev_prepare_req->kmd_buf_offset, ope_dev_prepare_req, + len, false); + + ope_dev_prepare_req->kmd_buf_offset += len; + +end: + return rc; +} + +static int cam_ope_dev_create_kmd_buf(struct cam_ope_hw_mgr *hw_mgr, + struct cam_hw_prepare_update_args *prepare_args, + struct cam_ope_ctx *ctx_data, uint32_t req_idx, + struct cam_ope_dev_prepare_req *ope_dev_prepare_req) +{ + int rc = 0; + uint32_t len; + struct cam_ope_request *ope_request; + uint32_t *kmd_buf; + uint32_t *cdm_kmd_start_addr; + struct ope_bus_wr_io_port_cdm_info *wr_cdm_info; + struct ope_bus_rd_io_port_cdm_info *rd_cdm_info; + struct cam_cdm_utils_ops *cdm_ops; + + + if (ctx_data->ope_acquire.dev_type == OPE_DEV_TYPE_OPE_NRT) { + return cam_ope_dev_create_kmd_buf_nrt( + ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, + ope_dev_prepare_req->req_idx, + ope_dev_prepare_req); + } + + if (ctx_data->ope_acquire.batch_size > 1) { + return cam_ope_dev_create_kmd_buf_batch( + ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, + ope_dev_prepare_req->req_idx, + ope_dev_prepare_req); + } + + ope_request = ctx_data->req_list[req_idx]; + kmd_buf = (uint32_t *)ope_request->ope_kmd_buf.cpu_addr + + (ope_dev_prepare_req->kmd_buf_offset / sizeof(len)); + cdm_kmd_start_addr = kmd_buf; + cdm_ops = ctx_data->ope_cdm.cdm_ops; + wr_cdm_info = + &ope_dev_prepare_req->wr_cdm_batch->io_port_cdm[0]; + rd_cdm_info = + &ope_dev_prepare_req->rd_cdm_batch->io_port_cdm[0]; + + + CAM_DBG(CAM_OPE, "kmd_buf:%x req_idx:%d req_id:%lld offset:%d", + kmd_buf, req_idx, ope_request->request_id, ope_dev_prepare_req->kmd_buf_offset); + + kmd_buf = cdm_ops->cdm_write_clear_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE|OPE_WAIT_COMP_RUP, 0x0); + /* Frame 0 DB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_DOUBLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 SB */ + kmd_buf = ope_create_frame_cmd(hw_mgr, + ctx_data, req_idx, + kmd_buf, OPE_CMD_BUF_SINGLE_BUFFERED, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 WR */ + kmd_buf = ope_create_frame_wr(ctx_data, + wr_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Frame 0 RD */ + kmd_buf = ope_create_frame_rd(ctx_data, + rd_cdm_info, kmd_buf, ope_request); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* Stripes */ + kmd_buf = ope_create_stripes(hw_mgr, ctx_data, req_idx, kmd_buf, + ope_dev_prepare_req); + if (!kmd_buf) { + rc = -EINVAL; + goto end; + } + + /* wait_idle_irq */ + kmd_buf = cdm_ops->cdm_write_wait_comp_event(kmd_buf, + OPE_WAIT_COMP_IDLE, 0x0); + + CAM_DBG(CAM_OPE, "wait for idle IRQ: kmd_buf:%x", kmd_buf); + + /* prepare CDM submit packet */ + len = (kmd_buf - cdm_kmd_start_addr) * sizeof(uint32_t); + CAM_DBG(CAM_OPE, "kmd_start_addr:%x kmdbuf_addr:%x len:%d", + cdm_kmd_start_addr, kmd_buf, len); + cam_ope_dev_prepare_cdm_request( + ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, + ope_dev_prepare_req->req_idx, + ope_dev_prepare_req->kmd_buf_offset, + ope_dev_prepare_req, + len, false); + ope_dev_prepare_req->kmd_buf_offset += len; +end: + return rc; +} + +static int cam_ope_dev_process_prepare(struct ope_hw *ope_hw, void *cmd_args) +{ + int rc = 0; + struct cam_ope_dev_prepare_req *ope_dev_prepare_req; + + ope_dev_prepare_req = cmd_args; + + rc = cam_ope_top_process(ope_hw, ope_dev_prepare_req->ctx_data->ctx_id, + OPE_HW_PREPARE, ope_dev_prepare_req); + if (rc) + goto end; + + rc = cam_ope_bus_rd_process(ope_hw, + ope_dev_prepare_req->ctx_data->ctx_id, + OPE_HW_PREPARE, ope_dev_prepare_req); + if (rc) + goto end; + + rc = cam_ope_bus_wr_process(ope_hw, + ope_dev_prepare_req->ctx_data->ctx_id, + OPE_HW_PREPARE, ope_dev_prepare_req); + if (rc) + goto end; + + rc = cam_ope_dev_create_kmd_buf(ope_dev_prepare_req->hw_mgr, + ope_dev_prepare_req->prepare_args, + ope_dev_prepare_req->ctx_data, + ope_dev_prepare_req->req_idx, + ope_dev_prepare_req); + +end: + return rc; +} + +static int cam_ope_dev_process_probe(struct ope_hw *ope_hw, + void *cmd_args) +{ + cam_ope_top_process(ope_hw, -1, OPE_HW_PROBE, NULL); + cam_ope_bus_rd_process(ope_hw, -1, OPE_HW_PROBE, NULL); + cam_ope_bus_wr_process(ope_hw, -1, OPE_HW_PROBE, NULL); + + return 0; +} + +static int cam_ope_process_probe(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + struct cam_ope_dev_probe *ope_probe = cmd_args; + + if (!ope_probe->hfi_en) + return cam_ope_dev_process_probe(ope_hw, cmd_args); + + return -EINVAL; +} + +static int cam_ope_process_dump_debug_reg(struct ope_hw *ope_hw, + bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_dump_debug_reg(ope_hw); + + return -EINVAL; +} + +static int cam_ope_process_reset(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_reset(ope_hw, cmd_args); + + return -EINVAL; +} + +static int cam_ope_process_release(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_release(ope_hw, cmd_args); + + return -EINVAL; +} + +static int cam_ope_process_acquire(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_acquire(ope_hw, cmd_args); + + return -EINVAL; +} + +static int cam_ope_process_prepare(struct ope_hw *ope_hw, + void *cmd_args, bool hfi_en) +{ + if (!hfi_en) + return cam_ope_dev_process_prepare(ope_hw, cmd_args); + + return -EINVAL; +} + +int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) +{ + int rc = 0; + struct cam_hw_info *ope_dev = device_priv; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_ope_device_core_info *core_info = NULL; + struct cam_ope_match_pid_args *match_pid_mid = NULL; + struct ope_hw *ope_hw; + bool hfi_en; + unsigned long flags; + int i; + uint32_t device_idx; + + if (!device_priv) { + CAM_ERR(CAM_OPE, "Invalid args %x for cmd %u", + device_priv, cmd_type); + return -EINVAL; + } + + soc_info = &ope_dev->soc_info; + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + if ((!soc_info) || (!core_info)) { + CAM_ERR(CAM_OPE, "soc_info = %x core_info = %x", + soc_info, core_info); + return -EINVAL; + } + + hfi_en = core_info->ope_hw_info->hfi_en; + ope_hw = core_info->ope_hw_info->ope_hw; + if (!ope_hw) { + CAM_ERR(CAM_OPE, "Invalid ope hw info"); + return -EINVAL; + } + + switch (cmd_type) { + case OPE_HW_PROBE: + rc = cam_ope_process_probe(ope_hw, cmd_args, hfi_en); + break; + case OPE_HW_ACQUIRE: + rc = cam_ope_process_acquire(ope_hw, cmd_args, hfi_en); + break; + case OPE_HW_RELEASE: + rc = cam_ope_process_release(ope_hw, cmd_args, hfi_en); + break; + case OPE_HW_PREPARE: + rc = cam_ope_process_prepare(ope_hw, cmd_args, hfi_en); + break; + case OPE_HW_START: + break; + case OPE_HW_STOP: + break; + case OPE_HW_FLUSH: + break; + case OPE_HW_RESET: + rc = cam_ope_process_reset(ope_hw, cmd_args, hfi_en); + break; + case OPE_HW_CLK_UPDATE: { + struct cam_ope_dev_clk_update *clk_upd_cmd = + (struct cam_ope_dev_clk_update *)cmd_args; + + if (core_info->clk_enable == false) { + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, + CAM_SVS_VOTE); + if (rc) { + CAM_ERR(CAM_OPE, "Clock enable is failed"); + return rc; + } + core_info->clk_enable = true; + } + + rc = cam_ope_update_clk_rate(soc_info, clk_upd_cmd->clk_rate); + if (rc) + CAM_ERR(CAM_OPE, "Failed to update clk: %d", rc); + } + break; + case OPE_HW_CLK_DISABLE: { + if (core_info->clk_enable == true) + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + core_info->clk_enable = false; + } + break; + case OPE_HW_BW_UPDATE: { + struct cam_ope_dev_bw_update *cpas_vote = cmd_args; + + if (!cmd_args) + return -EINVAL; + + rc = cam_ope_caps_vote(core_info, cpas_vote); + if (rc) + CAM_ERR(CAM_OPE, "failed to update bw: %d", rc); + } + break; + case OPE_HW_SET_IRQ_CB: { + struct cam_ope_set_irq_cb *irq_cb = cmd_args; + + if (!cmd_args) { + CAM_ERR(CAM_OPE, "cmd args NULL"); + return -EINVAL; + } + + spin_lock_irqsave(&ope_dev->hw_lock, flags); + core_info->irq_cb.ope_hw_mgr_cb = irq_cb->ope_hw_mgr_cb; + core_info->irq_cb.data = irq_cb->data; + spin_unlock_irqrestore(&ope_dev->hw_lock, flags); + } + break; + case OPE_HW_DUMP_DEBUG: + rc = cam_ope_process_dump_debug_reg(ope_hw, hfi_en); + break; + case OPE_HW_MATCH_PID_MID: + if (!cmd_args) { + CAM_ERR(CAM_OPE, "cmd args NULL"); + return -EINVAL; + } + + match_pid_mid = (struct cam_ope_match_pid_args *)cmd_args; + match_pid_mid->mid_match_found = false; + + device_idx = match_pid_mid->device_idx; + + for (i = 0; i < MAX_RW_CLIENTS; i++) { + if ((match_pid_mid->fault_mid == + ope_hw->common->ope_mid_info[device_idx][i].mid) && + (match_pid_mid->fault_pid == + ope_hw->common->ope_mid_info[device_idx][i].pid)) { + match_pid_mid->match_res = + ope_hw->common->ope_mid_info[device_idx][i].cam_ope_res_type; + match_pid_mid->mid_match_found = true; + break; + } + } + if (!match_pid_mid->mid_match_found) { + rc = -1; + CAM_INFO(CAM_OPE, "mid match not found"); + } + break; + default: + break; + } + + return rc; +} + +irqreturn_t cam_ope_irq(int irq_num, void *data) +{ + struct cam_hw_info *ope_dev = data; + struct cam_ope_device_core_info *core_info = NULL; + struct ope_hw *ope_hw; + struct cam_ope_irq_data irq_data; + + if (!data) { + CAM_ERR(CAM_OPE, "Invalid cam_dev_info or query_cap args"); + return IRQ_HANDLED; + } + + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + ope_hw = core_info->ope_hw_info->ope_hw; + + irq_data.error = 0; + cam_ope_top_process(ope_hw, 0, OPE_HW_ISR, &irq_data); + cam_ope_bus_rd_process(ope_hw, 0, OPE_HW_ISR, &irq_data); + cam_ope_bus_wr_process(ope_hw, 0, OPE_HW_ISR, &irq_data); + + + spin_lock(&ope_dev->hw_lock); + if (core_info->irq_cb.ope_hw_mgr_cb && core_info->irq_cb.data) + if (irq_data.error) + core_info->irq_cb.ope_hw_mgr_cb(irq_data.error, + core_info->irq_cb.data); + spin_unlock(&ope_dev->hw_lock); + + + return IRQ_HANDLED; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.h new file mode 100644 index 0000000000..22edd138fc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAM_OPE_CORE_H +#define CAM_OPE_CORE_H + + +#include +#include +#include +#include +#include +#include +#include "cam_cpas_api.h" +#include "ope_hw.h" +#include "ope_dev_intf.h" + +#define CAM_OPE_HW_MAX_NUM_PID 2 + +/** + * struct cam_ope_cpas_vote + * @ahb_vote: AHB vote info + * @axi_vote: AXI vote info + * @ahb_vote_valid: Flag for ahb vote data + * @axi_vote_valid: flag for axi vote data + */ +struct cam_ope_cpas_vote { + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote; + uint32_t ahb_vote_valid; + uint32_t axi_vote_valid; +}; + +/** + * struct cam_ope_device_hw_info + * + * @ope_hw: OPE hardware + * @hw_idx: Hardware index + * @ope_cdm_base: Base address of CDM + * @ope_top_base: Base address of top + * @ope_qos_base: Base address of QOS + * @ope_pp_base: Base address of PP + * @ope_bus_rd_base: Base address of RD + * @ope_bus_wr_base: Base address of WM + * @hfi_en: HFI flag enable + * @reserved: Reserved + */ +struct cam_ope_device_hw_info { + struct ope_hw *ope_hw; + uint32_t hw_idx; + void *ope_cdm_base; + void *ope_top_base; + void *ope_qos_base; + void *ope_pp_base; + void *ope_bus_rd_base; + void *ope_bus_wr_base; + bool hfi_en; + uint32_t reserved; +}; + +/** + * struct cam_ope_device_core_info + * + * @ope_hw_info: OPE hardware info + * @hw_version: Hardware version + * @hw_idx: Hardware Index + * @hw_type: Hardware Type + * @cpas_handle: CPAS Handle + * @cpas_start: CPAS start flag + * @clk_enable: Clock enable flag + * @irq_cb: IRQ Callback + */ +struct cam_ope_device_core_info { + struct cam_ope_device_hw_info *ope_hw_info; + uint32_t hw_version; + uint32_t hw_idx; + uint32_t hw_type; + uint32_t cpas_handle; + bool cpas_start; + bool clk_enable; + struct cam_ope_set_irq_cb irq_cb; +}; + +int cam_ope_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ope_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); +int cam_ope_start(void *device_priv, + void *start_args, uint32_t arg_size); +int cam_ope_stop(void *device_priv, + void *stop_args, uint32_t arg_size); +int cam_ope_flush(void *device_priv, + void *flush_args, uint32_t arg_size); +int cam_ope_get_hw_caps(void *device_priv, + void *get_hw_cap_args, uint32_t arg_size); +int cam_ope_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); +int ope_validate_buff_offset(size_t buf_len, + struct ope_cmd_buf_info *cmd_buf); +irqreturn_t cam_ope_irq(int irq_num, void *data); + +#endif /* CAM_OPE_CORE_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev.c new file mode 100644 index 0000000000..02b5a3614b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ope_core.h" +#include "ope_soc.h" +#include "cam_hw.h" +#include "ope_hw.h" +#include "cam_hw_intf.h" +#include "cam_io_util.h" +#include "cam_ope_hw_mgr_intf.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "ope_hw_100.h" +#include "ope_dev_intf.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +static struct cam_ope_hw_intf_data cam_ope_dev_list[OPE_DEV_MAX]; +static struct cam_ope_device_hw_info ope_hw_info; +struct cam_ope_soc_private ope_soc_info; +EXPORT_SYMBOL(ope_soc_info); + +static struct hw_version_reg ope_hw_version_reg = { + .hw_ver = 0x0, +}; + +static int cam_ope_init_hw_version(struct cam_hw_soc_info *soc_info, + struct cam_ope_device_core_info *core_info) +{ + int rc = 0; + + CAM_DBG(CAM_OPE, "soc_info = %x core_info = %x", + soc_info, core_info); + CAM_DBG(CAM_OPE, "CDM:%x TOP: %x QOS: %x PP: %x RD: %x WR: %x", + soc_info->reg_map[OPE_CDM_BASE].mem_base, + soc_info->reg_map[OPE_TOP_BASE].mem_base, + soc_info->reg_map[OPE_QOS_BASE].mem_base, + soc_info->reg_map[OPE_PP_BASE].mem_base, + soc_info->reg_map[OPE_BUS_RD].mem_base, + soc_info->reg_map[OPE_BUS_WR].mem_base); + CAM_DBG(CAM_OPE, "core: %x", + core_info->ope_hw_info->ope_cdm_base); + + core_info->ope_hw_info->ope_cdm_base = + soc_info->reg_map[OPE_CDM_BASE].mem_base; + core_info->ope_hw_info->ope_top_base = + soc_info->reg_map[OPE_TOP_BASE].mem_base; + core_info->ope_hw_info->ope_qos_base = + soc_info->reg_map[OPE_QOS_BASE].mem_base; + core_info->ope_hw_info->ope_pp_base = + soc_info->reg_map[OPE_PP_BASE].mem_base; + core_info->ope_hw_info->ope_bus_rd_base = + soc_info->reg_map[OPE_BUS_RD].mem_base; + core_info->ope_hw_info->ope_bus_wr_base = + soc_info->reg_map[OPE_BUS_WR].mem_base; + + core_info->hw_version = cam_io_r_mb( + core_info->ope_hw_info->ope_top_base + + ope_hw_version_reg.hw_ver); + + switch (core_info->hw_version) { + case OPE_HW_VER_1_0_0: + case OPE_HW_VER_1_1_0: + core_info->ope_hw_info->ope_hw = &ope_hw_100; + break; + default: + CAM_ERR(CAM_OPE, "Unsupported version : %u", + core_info->hw_version); + rc = -EINVAL; + break; + } + + ope_hw_100.top_reg->base = core_info->ope_hw_info->ope_top_base; + ope_hw_100.bus_rd_reg->base = core_info->ope_hw_info->ope_bus_rd_base; + ope_hw_100.bus_wr_reg->base = core_info->ope_hw_info->ope_bus_wr_base; + ope_hw_100.pp_reg->base = core_info->ope_hw_info->ope_pp_base; + + return rc; +} + +int cam_ope_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_ope_device_core_info *core_info, + uint32_t hw_idx) +{ + struct cam_cpas_register_params cpas_register_params; + int rc; + + cpas_register_params.dev = &soc_info->pdev->dev; + memcpy(cpas_register_params.identifier, "ope", sizeof("ope")); + cpas_register_params.cam_cpas_client_cb = NULL; + cpas_register_params.cell_index = hw_idx; + cpas_register_params.userdata = NULL; + + rc = cam_cpas_register_client(&cpas_register_params); + if (rc < 0) { + CAM_ERR(CAM_OPE, "failed: %d", rc); + return rc; + } + core_info->cpas_handle = cpas_register_params.client_handle; + + return rc; +} + +static int cam_ope_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_hw_intf *ope_dev_intf = NULL; + struct cam_hw_info *ope_dev = NULL; + const struct of_device_id *match_dev = NULL; + struct cam_ope_device_core_info *core_info = NULL; + struct cam_ope_dev_probe ope_probe; + struct cam_ope_cpas_vote cpas_vote; + struct cam_ope_soc_private *soc_private; + int i; + uint32_t hw_idx; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + of_property_read_u32(pdev->dev.of_node, + "cell-index", &hw_idx); + + ope_dev_intf = CAM_MEM_ZALLOC(sizeof(struct cam_hw_intf), GFP_KERNEL); + if (!ope_dev_intf) + return -ENOMEM; + + ope_dev_intf->hw_idx = hw_idx; + ope_dev_intf->hw_type = OPE_DEV_OPE; + ope_dev = CAM_MEM_ZALLOC(sizeof(struct cam_hw_info), GFP_KERNEL); + if (!ope_dev) { + rc = -ENOMEM; + goto ope_dev_alloc_failed; + } + + ope_dev->soc_info.pdev = pdev; + ope_dev->soc_info.dev = &pdev->dev; + ope_dev->soc_info.dev_name = pdev->name; + ope_dev_intf->hw_priv = ope_dev; + ope_dev_intf->hw_ops.init = cam_ope_init_hw; + ope_dev_intf->hw_ops.deinit = cam_ope_deinit_hw; + ope_dev_intf->hw_ops.get_hw_caps = cam_ope_get_hw_caps; + ope_dev_intf->hw_ops.start = cam_ope_start; + ope_dev_intf->hw_ops.stop = cam_ope_stop; + ope_dev_intf->hw_ops.flush = cam_ope_flush; + ope_dev_intf->hw_ops.process_cmd = cam_ope_process_cmd; + + CAM_DBG(CAM_OPE, "type %d index %d", + ope_dev_intf->hw_type, + ope_dev_intf->hw_idx); + + if (ope_dev_intf->hw_idx < OPE_DEV_MAX) + cam_ope_dev_list[ope_dev_intf->hw_idx].hw_intf = + ope_dev_intf; + + platform_set_drvdata(pdev, ope_dev_intf); + + + ope_dev->core_info = CAM_MEM_ZALLOC(sizeof(struct cam_ope_device_core_info), + GFP_KERNEL); + if (!ope_dev->core_info) { + rc = -ENOMEM; + goto ope_core_alloc_failed; + } + core_info = (struct cam_ope_device_core_info *)ope_dev->core_info; + core_info->ope_hw_info = &ope_hw_info; + ope_dev->soc_info.soc_private = &ope_soc_info; + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + rc = -EINVAL; + CAM_DBG(CAM_OPE, "No ope hardware info"); + goto ope_match_dev_failed; + } + + rc = cam_ope_init_soc_resources(&ope_dev->soc_info, cam_ope_irq, + ope_dev); + if (rc < 0) { + CAM_ERR(CAM_OPE, "failed to init_soc"); + goto init_soc_failed; + } + core_info->hw_type = OPE_DEV_OPE; + core_info->hw_idx = hw_idx; + rc = cam_ope_register_cpas(&ope_dev->soc_info, + core_info, ope_dev_intf->hw_idx); + if (rc < 0) + goto register_cpas_failed; + + rc = cam_ope_enable_soc_resources(&ope_dev->soc_info); + if (rc < 0) { + CAM_ERR(CAM_OPE, "enable soc resorce failed: %d", rc); + goto enable_soc_failed; + } + cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE; + cpas_vote.ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + cpas_vote.axi_vote.num_paths = 1; + cpas_vote.axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_OPE_WR_VID; + cpas_vote.axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + cpas_vote.axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].ddr_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + cpas_vote.axi_vote.axi_path[0].ddr_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(core_info->cpas_handle, + &cpas_vote.ahb_vote, &cpas_vote.axi_vote); + + rc = cam_ope_init_hw_version(&ope_dev->soc_info, ope_dev->core_info); + if (rc) + goto init_hw_failure; + + cam_ope_disable_soc_resources(&ope_dev->soc_info, true); + cam_cpas_stop(core_info->cpas_handle); + ope_dev->hw_state = CAM_HW_STATE_POWER_DOWN; + + ope_probe.hfi_en = ope_soc_info.hfi_en; + cam_ope_process_cmd(ope_dev, OPE_HW_PROBE, + &ope_probe, sizeof(ope_probe)); + mutex_init(&ope_dev->hw_mutex); + spin_lock_init(&ope_dev->hw_lock); + init_completion(&ope_dev->hw_complete); + + CAM_DBG(CAM_OPE, "OPE:%d component bound successfully", + ope_dev_intf->hw_idx); + soc_private = ope_dev->soc_info.soc_private; + cam_ope_dev_list[ope_dev_intf->hw_idx].num_hw_pid = + soc_private->num_pid; + + for (i = 0; i < soc_private->num_pid; i++) + cam_ope_dev_list[ope_dev_intf->hw_idx].hw_pid[i] = + soc_private->pid[i]; + + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +init_hw_failure: +enable_soc_failed: +register_cpas_failed: +init_soc_failed: +ope_match_dev_failed: + CAM_MEM_FREE(ope_dev->core_info); +ope_core_alloc_failed: + CAM_MEM_FREE(ope_dev); +ope_dev_alloc_failed: + CAM_MEM_FREE(ope_dev_intf); + return rc; +} + +static void cam_ope_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_DBG(CAM_OPE, "Unbinding component: %s", pdev->name); +} + +int cam_ope_hw_init(struct cam_ope_hw_intf_data **ope_hw_intf_data, + uint32_t hw_idx) +{ + int rc = 0; + + if (cam_ope_dev_list[hw_idx].hw_intf) { + *ope_hw_intf_data = &cam_ope_dev_list[hw_idx]; + rc = 0; + } else { + CAM_ERR(CAM_OPE, "inval param"); + *ope_hw_intf_data = NULL; + rc = -ENODEV; + } + return rc; +} + +const static struct component_ops cam_ope_component_ops = { + .bind = cam_ope_component_bind, + .unbind = cam_ope_component_unbind, +}; + +int cam_ope_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_OPE, "Adding OPE component"); + rc = component_add(&pdev->dev, &cam_ope_component_ops); + if (rc) + CAM_ERR(CAM_OPE, "failed to add component rc: %d", rc); + + return rc; +} + +static const struct of_device_id cam_ope_dt_match[] = { + { + .compatible = "qcom,ope", + .data = &ope_hw_version_reg, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cam_ope_dt_match); + +struct platform_driver cam_ope_driver = { + .probe = cam_ope_probe, + .driver = { + .name = "ope", + .of_match_table = cam_ope_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ope_init_module(void) +{ + return platform_driver_register(&cam_ope_driver); +} + +void cam_ope_exit_module(void) +{ + platform_driver_unregister(&cam_ope_driver); +} + +MODULE_DESCRIPTION("CAM OPE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h new file mode 100644 index 0000000000..304d30dd35 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_dev_intf.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_OPE_DEV_INTF_H +#define CAM_OPE_DEV_INTF_H + +#include +#include "cam_ope_hw_mgr.h" +#include "cam_cdm_intf_api.h" +#include "cam_cpas_api.h" + + +#define OPE_HW_INIT 0x1 +#define OPE_HW_DEINIT 0x2 +#define OPE_HW_ACQUIRE 0x3 +#define OPE_HW_RELEASE 0x4 +#define OPE_HW_START 0x5 +#define OPE_HW_STOP 0x6 +#define OPE_HW_FLUSH 0x7 +#define OPE_HW_PREPARE 0x8 +#define OPE_HW_ISR 0x9 +#define OPE_HW_PROBE 0xA +#define OPE_HW_CLK_UPDATE 0xB +#define OPE_HW_BW_UPDATE 0xC +#define OPE_HW_RESET 0xD +#define OPE_HW_SET_IRQ_CB 0xE +#define OPE_HW_CLK_DISABLE 0xF +#define OPE_HW_CLK_ENABLE 0x10 +#define OPE_HW_DUMP_DEBUG 0x11 +#define OPE_HW_MATCH_PID_MID 0x12 + +struct cam_ope_match_pid_args { + uint32_t fault_pid; + uint32_t fault_mid; + uint32_t match_res; + uint32_t device_idx; + bool mid_match_found; +}; + +/** + * struct cam_ope_dev_probe + * + * @hfi_en: HFI enable flag + */ +struct cam_ope_dev_probe { + bool hfi_en; +}; + +/** + * struct cam_ope_dev_init + * + * @hfi_en: HFI enable flag + * @core_info: OPE core info + */ +struct cam_ope_dev_init { + bool hfi_en; + struct cam_ope_device_core_info *core_info; +}; + +/** + * struct cam_ope_dev_clk_update + * + * @clk_rate: Clock rate + */ +struct cam_ope_dev_clk_update { + uint32_t clk_rate; +}; + +/** + * struct cam_ope_dev_bw_update + * + * @ahb_vote: AHB vote info + * @axi_vote: AXI vote info + * @ahb_vote_valid: Flag for ahb vote + * @axi_vote_valid: Flag for axi vote + */ +struct cam_ope_dev_bw_update { + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote; + uint32_t ahb_vote_valid; + uint32_t axi_vote_valid; +}; + +/** + * struct cam_ope_dev_caps + * + * @hw_idx: Hardware index + * @hw_ver: Hardware version info + */ +struct cam_ope_dev_caps { + uint32_t hw_idx; + struct ope_hw_ver hw_ver; +}; + +/** + * struct cam_ope_dev_acquire + * + * @ctx_id: Context id + * @ope_acquire: OPE acquire info + * @bus_wr_ctx: Bus Write context + * @bus_rd_ctx: Bus Read context + */ +struct cam_ope_dev_acquire { + uint32_t ctx_id; + struct ope_acquire_dev_info *ope_acquire; + struct ope_bus_wr_ctx *bus_wr_ctx; + struct ope_bus_rd_ctx *bus_rd_ctx; +}; + +/** + * struct cam_ope_dev_release + * + * @ctx_id: Context id + * @bus_wr_ctx: Bus Write context + * @bus_rd_ctx: Bus Read context + */ +struct cam_ope_dev_release { + uint32_t ctx_id; + struct ope_bus_wr_ctx *bus_wr_ctx; + struct ope_bus_rd_ctx *bus_rd_ctx; +}; + +/** + * struct cam_ope_set_irq_cb + * + * @ope_hw_mgr_cb: Callback to hardware manager + * @data: Private data + */ +struct cam_ope_set_irq_cb { + int32_t (*ope_hw_mgr_cb)(uint32_t irq_status, void *data); + void *data; +}; + +/** + * struct cam_ope_irq_data + * + * @error: IRQ error + */ +struct cam_ope_irq_data { + uint32_t error; +}; + +/** + * struct cam_ope_dev_prepare_req + * + * @hw_mgr: OPE hardware manager + * @packet: Packet + * @prepare_args: Prepare request args + * @ctx_data: Context data + * @wr_cdm_batch: WM request + * @rd_cdm_batch: RD master request + * @frame_process: Frame process command + * @req_idx: Request Index + * @kmd_buf_offset: KMD buffer offset + */ +struct cam_ope_dev_prepare_req { + struct cam_ope_hw_mgr *hw_mgr; + struct cam_packet *packet; + struct cam_hw_prepare_update_args *prepare_args; + struct cam_ope_ctx *ctx_data; + struct ope_bus_wr_io_port_cdm_batch *wr_cdm_batch; + struct ope_bus_rd_io_port_cdm_batch *rd_cdm_batch; + struct ope_frame_process *frame_process; + uint32_t req_idx; + uint32_t kmd_buf_offset; +}; + +int cam_ope_top_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +int cam_ope_bus_rd_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +int cam_ope_bus_wr_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data); + +int cam_ope_init_module(void); +void cam_ope_exit_module(void); + +int cam_ope_subdev_init_module(void); +void cam_ope_subdev_exit_module(void); + +#endif /* CAM_OPE_DEV_INTF_H */ + diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw.h new file mode 100644 index 0000000000..680c8e0b7d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw.h @@ -0,0 +1,435 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_OPE_HW_H +#define CAM_OPE_HW_H + +#define OPE_HW_VER_1_0_0 0x10000000 +#define OPE_HW_VER_1_1_0 0x10010000 + +#define OPE_DEV_OPE 0 +#define OPE_DEV_MAX 1 + +#define MAX_RD_CLIENTS 3 +#define MAX_WR_CLIENTS 8 +#define MAX_PP_CLIENTS 32 + +#define MAX_RW_CLIENTS (MAX_RD_CLIENTS + MAX_WR_CLIENTS) + +#define OPE_CDM_BASE 0x0 +#define OPE_TOP_BASE 0x1 +#define OPE_QOS_BASE 0x2 +#define OPE_PP_BASE 0x3 +#define OPE_BUS_RD 0x4 +#define OPE_BUS_WR 0x5 +#define OPE_BASE_MAX 0x6 + +#define BUS_RD_COMBO_BAYER_MASK 0x1 +#define BUS_RD_COMBO_YUV_MASK 0x2 +#define BUS_RD_COMBO_MAX 0x2 + +#define BUS_RD_BAYER 0x0 +#define BUS_RD_YUV 0x1 + +#define BUS_WR_COMBO_YUV_MASK 0x1 +#define BUS_WR_COMBO_MAX 0x1 + +#define BUS_WR_YUV 0x0 + +#define BUS_WR_VIDEO_Y 0x0 +#define BUS_WR_VIDEO_C 0x1 +#define BUS_WR_DISP_Y 0x2 +#define BUS_WR_DISP_C 0x3 +#define BUS_WR_ARGB 0x4 +#define BUS_WR_STATS_RS 0x5 +#define BUS_WR_STATS_IHIST 0x6 +#define BUS_WR_STATS_LTM 0x7 + +#define OPE_WAIT_COMP_RUP 0x1 +#define OPE_WAIT_COMP_WR_DONE 0x2 +#define OPE_WAIT_COMP_IDLE 0x4 +#define OPE_WAIT_COMP_GEN_IRQ 0x8 + +#define OPE_MAX_DEBUG_REGISTER 30 + +struct cam_ope_pid_mid_info { + int cam_ope_res_type; + uint32_t pid; + uint32_t mid; + bool read; +}; + +struct cam_ope_common { + uint32_t mode[CAM_FORMAT_MAX]; + struct cam_ope_pid_mid_info (*ope_mid_info)[MAX_RW_CLIENTS]; +}; + +struct cam_ope_top_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t reset_cmd; + uint32_t core_clk_cfg_ctrl_0; + uint32_t ahb_clk_cgc_ctrl; + uint32_t core_cfg; + uint32_t irq_status; + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_set; + uint32_t irq_cmd; + uint32_t violation_status; + uint32_t throttle_cnt_cfg; + uint32_t debug_cfg; + uint32_t scratch_reg; + uint32_t num_debug_registers; + struct cam_ope_debug_register *debug_regs; +}; + +struct cam_ope_top_reg_val { + uint32_t hw_version; + uint32_t major_mask; + uint32_t major_shift; + uint32_t minor_mask; + uint32_t minor_shift; + uint32_t incr_mask; + uint32_t incr_shift; + uint32_t irq_mask; + uint32_t irq_set_clear; + uint32_t sw_reset_cmd; + uint32_t hw_reset_cmd; + uint32_t core_clk_cfg_ctrl_0; + uint32_t ahb_clk_cgc_ctrl; + uint32_t input_format; + uint32_t input_format_mask; + uint32_t color_correct_src_sel; + uint32_t color_correct_src_sel_mask; + uint32_t stats_ihist_src_sel; + uint32_t stats_ihist_src_sel_mask; + uint32_t chroma_up_src_sel; + uint32_t chroma_up_src_sel_mask; + uint32_t argb_alpha; + uint32_t argb_alpha_mask; + uint32_t rs_throttle_cnt; + uint32_t rs_throttle_cnt_mask; + uint32_t ihist_throttle_cnt; + uint32_t ihist_throttle_cnt_mask; + uint32_t rst_done; + uint32_t we_done; + uint32_t fe_done; + uint32_t ope_violation; + uint32_t idle; + uint32_t debug_cfg_val; +}; + +struct cam_ope_qos_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t hw_status; + uint32_t module_cfg; + uint32_t curve_cfg_0; + uint32_t curve_cfg_1; + uint32_t window_cfg; + uint32_t eos_status_0; + uint32_t eos_status_1; + uint32_t eos_status_2; +}; + +struct cam_ope_qos_reg_val { + uint32_t hw_version; + uint32_t proc_interval; + uint32_t proc_interval_mask; + uint32_t static_health; + uint32_t static_health_mask; + uint32_t module_cfg_en; + uint32_t module_cfg_en_mask; + uint32_t yexp_ymin_dec; + uint32_t yexp_ymin_dec_mask; + uint32_t ymin_inc; + uint32_t ymin_inc_mask; + uint32_t initial_delta; + uint32_t initial_delta_mask; + uint32_t window_cfg; +}; + +struct cam_ope_bus_rd_client_reg { + uint32_t core_cfg; + uint32_t ccif_meta_data; + uint32_t img_addr; + uint32_t img_cfg; + uint32_t stride; + uint32_t unpack_cfg; + uint32_t latency_buf_allocation; + uint32_t misr_cfg_0; + uint32_t misr_cfg_1; + uint32_t misr_rd_val; +}; + +struct cam_ope_bus_rd_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t sw_reset; + uint32_t cgc_override; + uint32_t irq_mask; + uint32_t irq_clear; + uint32_t irq_cmd; + uint32_t irq_status; + uint32_t input_if_cmd; + uint32_t irq_set; + uint32_t misr_reset; + uint32_t security_cfg; + uint32_t iso_cfg; + uint32_t iso_seed; + + uint32_t num_clients; + struct cam_ope_bus_rd_client_reg rd_clients[MAX_RD_CLIENTS]; +}; + +struct cam_ope_bus_rd_client_reg_val { + uint32_t core_cfg; + uint32_t stripe_location; + uint32_t stripe_location_mask; + uint32_t stripe_location_shift; + uint32_t pix_pattern; + uint32_t pix_pattern_mask; + uint32_t pix_pattern_shift; + uint32_t img_addr; + uint32_t img_width; + uint32_t img_width_mask; + uint32_t img_width_shift; + uint32_t img_height; + uint32_t img_height_mask; + uint32_t img_height_shift; + uint32_t stride; + uint32_t mode; + uint32_t mode_mask; + uint32_t mode_shift; + uint32_t alignment; + uint32_t alignment_mask; + uint32_t alignment_shift; + uint32_t latency_buf_allocation; + uint32_t misr_cfg_samp_mode; + uint32_t misr_cfg_samp_mode_mask; + uint32_t misr_cfg_en; + uint32_t misr_cfg_en_mask; + uint32_t misr_cfg_1; + uint32_t misr_rd_val; + uint32_t input_port_id; + uint32_t rm_port_id; + uint32_t format_type; + uint32_t num_combos_supported; +}; + +struct cam_ope_bus_rd_reg_val { + uint32_t hw_version; + uint32_t sw_reset; + uint32_t cgc_override; + uint32_t irq_mask; + uint32_t go_cmd; + uint32_t go_cmd_mask; + uint32_t ica_en; + uint32_t ica_en_mask; + uint32_t static_prg; + uint32_t static_prg_mask; + uint32_t go_cmd_sel; + uint32_t go_cmd_sel_mask; + uint32_t fs_sync_en; + uint32_t fs_sync_en_mask; + uint32_t misr_reset; + uint32_t security_cfg; + uint32_t iso_bpp_select; + uint32_t iso_bpp_select_mask; + uint32_t iso_pattern_select; + uint32_t iso_pattern_select_mask; + uint32_t iso_en; + uint32_t iso_en_mask; + uint32_t iso_seed; + uint32_t irq_set_clear; + uint32_t rst_done; + uint32_t rup_done; + uint32_t rd_buf_done; + uint32_t violation; + uint32_t latency_buf_size; + + uint32_t num_clients; + struct cam_ope_bus_rd_client_reg_val rd_clients[MAX_RD_CLIENTS]; +}; + +struct cam_ope_bus_wr_client_reg { + uint32_t core_cfg; + uint32_t img_addr; + uint32_t img_cfg; + uint32_t x_init; + uint32_t stride; + uint32_t pack_cfg; + uint32_t bw_limit; + uint32_t frame_header_addr; + uint32_t subsample_period; + uint32_t subsample_pattern; +}; + +struct cam_ope_bus_wr_reg { + void *base; + uint32_t offset; + uint32_t hw_version; + uint32_t cgc_override; + uint32_t irq_mask_0; + uint32_t irq_mask_1; + uint32_t irq_clear_0; + uint32_t irq_clear_1; + uint32_t irq_status_0; + uint32_t irq_status_1; + uint32_t irq_cmd; + uint32_t frame_header_cfg_0; + uint32_t local_frame_header_cfg_0; + uint32_t irq_set_0; + uint32_t irq_set_1; + uint32_t iso_cfg; + uint32_t violation_status; + uint32_t image_size_violation_status; + uint32_t misr_cfg_0; + uint32_t misr_cfg_1; + uint32_t misr_rd_sel; + uint32_t misr_reset; + uint32_t misr_val; + uint32_t num_clients; + struct cam_ope_bus_wr_client_reg wr_clients[MAX_WR_CLIENTS]; +}; + +struct cam_ope_bus_wr_client_reg_val { + uint32_t core_cfg_en; + uint32_t core_cfg_en_mask; + uint32_t core_cfg_en_shift; + uint32_t virtual_frame_en; + uint32_t virtual_frame_en_mask; + uint32_t virtual_frame_en_shift; + uint32_t frame_header_en; + uint32_t frame_header_en_mask; + uint32_t frame_header_en_shift; + uint32_t auto_recovery_en; + uint32_t auto_recovery_en_mask; + uint32_t auto_recovery_en_shift; + uint32_t mode; + uint32_t mode_mask; + uint32_t mode_shift; + uint32_t img_addr; + uint32_t width; + uint32_t width_mask; + uint32_t width_shift; + uint32_t height; + uint32_t height_mask; + uint32_t height_shift; + uint32_t x_init; + uint32_t stride; + uint32_t format; + uint32_t format_mask; + uint32_t format_shift; + uint32_t alignment; + uint32_t alignment_mask; + uint32_t alignment_shift; + uint32_t bw_limit_en; + uint32_t bw_limit_en_mask; + uint32_t bw_limit_counter; + uint32_t bw_limit_counter_mask; + uint32_t frame_header_addr; + uint32_t subsample_period; + uint32_t subsample_pattern; + uint32_t output_port_id; + uint32_t wm_port_id; + uint32_t format_type; + uint32_t num_combos_supported; +}; + +struct cam_ope_bus_wr_reg_val { + uint32_t hw_version; + uint32_t cgc_override; + uint32_t irq_mask_0; + uint32_t irq_mask_1; + uint32_t irq_set_clear; + uint32_t comp_rup_done; + uint32_t comp_buf_done; + uint32_t cons_violation; + uint32_t violation; + uint32_t img_size_violation; + uint32_t frame_header_cfg_0; + uint32_t local_frame_header_cfg_0; + uint32_t iso_cfg; + uint32_t misr_0_en; + uint32_t misr_0_en_mask; + uint32_t misr_1_en; + uint32_t misr_1_en_mask; + uint32_t misr_2_en; + uint32_t misr_2_en_mask; + uint32_t misr_3_en; + uint32_t misr_3_en_mask; + uint32_t misr_0_samp_mode; + uint32_t misr_0_samp_mode_mask; + uint32_t misr_1_samp_mode; + uint32_t misr_1_samp_mode_mask; + uint32_t misr_2_samp_mode; + uint32_t misr_2_samp_mode_mask; + uint32_t misr_3_samp_mode; + uint32_t misr_3_samp_mode_mask; + uint32_t misr_0_id; + uint32_t misr_0_id_mask; + uint32_t misr_1_id; + uint32_t misr_1_id_mask; + uint32_t misr_2_id; + uint32_t misr_2_id_mask; + uint32_t misr_3_id; + uint32_t misr_3_id_mask; + uint32_t misr_rd_misr_sel; + uint32_t misr_rd_misr_sel_mask; + uint32_t misr_rd_word_sel; + uint32_t misr_rd_word_sel_mask; + uint32_t misr_reset; + uint32_t misr_val; + + + uint32_t num_clients; + struct cam_ope_bus_wr_client_reg_val wr_clients[MAX_WR_CLIENTS]; +}; + +struct cam_ope_debug_register { + uint32_t offset; +}; + +struct cam_ope_bus_pp_client_reg { + uint32_t hw_status; +}; + +struct cam_ope_pp_reg { + void *base; + uint32_t offset; + + uint32_t num_clients; + struct cam_ope_bus_pp_client_reg pp_clients[MAX_PP_CLIENTS]; +}; + +struct ope_hw { + struct cam_ope_top_reg *top_reg; + struct cam_ope_top_reg_val *top_reg_val; + + struct cam_ope_bus_rd_reg *bus_rd_reg; + struct cam_ope_bus_rd_reg_val *bus_rd_reg_val; + + struct cam_ope_bus_wr_reg *bus_wr_reg; + struct cam_ope_bus_wr_reg_val *bus_wr_reg_val; + + struct cam_ope_qos_reg *qos_reg; + struct cam_ope_qos_reg_val *qos_reg_val; + + struct cam_ope_common *common; + + struct cam_ope_pp_reg *pp_reg; +}; + +struct hw_version_reg { + uint32_t hw_ver; + uint32_t reserved; +}; + +#endif /* CAM_OPE_HW_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw_100.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw_100.h new file mode 100644 index 0000000000..3ccf9ee120 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_hw_100.h @@ -0,0 +1,752 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_OPE_HW_100_H +#define CAM_OPE_HW_100_H + +#define OPE_BUS_RD_TYPE_BAYER 0x0 +#define OPE_BUS_RD_TYPE_YUV_Y 0x0 +#define OPE_BUS_RD_TYPE_YUC_C 0x1 +#define OPE_BUS_RD_TYPE_CDM 0x2 + +#define OPE_BUS_WR_TYPE_VID_Y 0x0 +#define OPE_BUS_WR_TYPE_VID_C 0x1 +#define OPE_BUS_WR_TYPE_DISP_Y 0x2 +#define OPE_BUS_WR_TYPE_DISP_C 0x3 +#define OPE_BUS_WR_TYPE_ARGB 0x4 +#define OPE_BUS_WR_TYPE_RS 0x5 +#define OPE_BUS_WR_TYPE_IHIST 0x6 +#define OPE_BUS_WR_TYPE_LTM 0x7 + +static struct cam_ope_pid_mid_info g_ope_mid_info[OPE_DEV_MAX][MAX_RW_CLIENTS] = { + { + { + .cam_ope_res_type = OPE_IN_RES_FULL, + .pid = 0, + .mid = 0, + .read = true, + }, + { + .cam_ope_res_type = OPE_IN_RES_FULL, + .pid = 0, + .mid = 1, + .read = true, + }, + { + .cam_ope_res_type = OPE_IN_RES_FULL, + .pid = 0, + .mid = 2, + .read = true, + }, + { + .cam_ope_res_type = OPE_OUT_RES_VIDEO, + .pid = 1, + .mid = 16, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_VIDEO, + .pid = 1, + .mid = 17, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_DISP, + .pid = 1, + .mid = 18, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_DISP, + .pid = 1, + .mid = 19, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_ARGB, + .pid = 1, + .mid = 20, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_STATS_RS, + .pid = 1, + .mid = 21, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_STATS_IHIST, + .pid = 1, + .mid = 22, + .read = false, + }, + { + .cam_ope_res_type = OPE_OUT_RES_STATS_LTM, + .pid = 1, + .mid = 23, + .read = false, + }, + }, +}; + +static struct cam_ope_common common_data = { + .ope_mid_info = g_ope_mid_info, +}; + +enum cam_ope_bus_rd_unpacker_format { + BUS_RD_VER1_PACKER_FMT_PLAIN_128_BYPASS = 0x0, + BUS_RD_VER1_PACKER_FMT_PLAIN_8 = 0x1, + BUS_RD_VER1_PACKER_FMT_PLAIN_16_10BPP = 0x2, + BUS_RD_VER1_PACKER_FMT_PLAIN_16_12BPP = 0x3, + BUS_RD_VER1_PACKER_FMT_PLAIN_16_14BPP = 0x4, + BUS_RD_VER1_PACKER_FMT_PLAIN_32_20BPP = 0x5, + BUS_RD_VER1_PACKER_FMT_ARGB16_10 = 0x6, + BUS_RD_VER1_PACKER_FMT_ARGB16_12 = 0x7, + BUS_RD_VER1_PACKER_FMT_ARGB16_14 = 0x8, + BUS_RD_VER1_PACKER_FMT_PLAIN_32 = 0x9, + BUS_RD_VER1_PACKER_FMT_PLAIN_64 = 0xA, + BUS_RD_VER1_PACKER_FMT_TP_10 = 0xB, + BUS_RD_VER1_PACKER_FMT_MIPI_8 = 0xC, + BUS_RD_VER1_PACKER_FMT_MIPI_10 = 0xD, + BUS_RD_VER1_PACKER_FMT_MIPI_12 = 0xE, + BUS_RD_VER1_PACKER_FMT_MIPI_14 = 0xF, + BUS_RD_VER1_PACKER_FMT_PLAIN_16_16BPP = 0x10, + BUS_RD_VER1_PACKER_FMT_BYPASS_SWAP = 0x11, + BUS_RD_VER1_PACKER_FMT_PLAIN_8_SWAP = 0x12, + BUS_RD_VER1_PACKER_FMT_MAX = 0x13, +}; + +static struct cam_ope_debug_register ope_debug_regs[OPE_MAX_DEBUG_REGISTER] = { + { + .offset = 0xA0, + }, + { + .offset = 0xA4 + }, + { + .offset = 0xA8, + }, + { + .offset = 0xAC, + }, + { + .offset = 0xB0, + }, + { + .offset = 0xB4, + }, + { + .offset = 0xB8, + }, + { + .offset = 0xBC, + }, + { + .offset = 0xD0, + }, + { + .offset = 0xD4, + }, +}; + +static struct cam_ope_top_reg ope_top_reg = { + .offset = 0x400, + .hw_version = 0x0, + .reset_cmd = 0x4, + .core_clk_cfg_ctrl_0 = 0x8, + .ahb_clk_cgc_ctrl = 0xC, + .core_cfg = 0x10, + .irq_status = 0x14, + .irq_mask = 0x18, + .irq_clear = 0x1C, + .irq_set = 0x20, + .irq_cmd = 0x24, + .violation_status = 0x28, + .throttle_cnt_cfg = 0x2C, + .debug_cfg = 0xDC, + .scratch_reg = 0x1F0, + .num_debug_registers = 10, + .debug_regs = ope_debug_regs, +}; + +static struct cam_ope_top_reg_val ope_top_reg_val = { + .hw_version = 0x10000000, + .major_mask = 0xFFFF, + .major_shift = 0x0, + .minor_mask = 0x0FFF0000, + .minor_shift = 0xF, + .incr_mask = 0xF0000000, + .incr_shift = 0x1B, + .irq_mask = 0x0000000F, + .sw_reset_cmd = 0x2, + .hw_reset_cmd = 0x1, + .irq_set_clear = 0x1, + .rst_done = 0x1, + .we_done = 0x2, + .fe_done = 0x4, + .ope_violation = 0x8, + .idle = 0x10, + .debug_cfg_val = 0x1, +}; + + +static struct cam_ope_bus_rd_reg_val ope_bus_rd_reg_val = { + .hw_version = 0x00050000, + .sw_reset = 0x1, + .cgc_override = 0x0, + .irq_mask = 0x30001, + .irq_set_clear = 0x1, + .rst_done = 0x1, + .rup_done = 0x2, + .rd_buf_done = 0xC, + .violation = 0x3000, + .go_cmd = 0x1, + .security_cfg = 0x0, + .latency_buf_size = 4096, + .num_clients = 0x2, + .rd_clients = { + { + .core_cfg = 0x1, + .stripe_location_mask = 0x3, + .stripe_location_shift = 0x0, + .pix_pattern_mask = 0x3F, + .pix_pattern_shift = 0x2, + .img_width_mask = 0xFFFF, + .img_width_shift = 0x10, + .img_height_mask = 0xFFFF, + .img_height_shift = 0x0, + .mode_mask = 0x1F, + .mode_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x5, + .latency_buf_allocation = 4096, + .input_port_id = OPE_IN_RES_FULL, + .rm_port_id = 0, + .format_type = BUS_RD_COMBO_BAYER_MASK | + BUS_RD_COMBO_YUV_MASK, + .num_combos_supported = 2, + }, + { + .core_cfg = 0x1, + .stripe_location_mask = 0x3, + .stripe_location_shift = 0x0, + .pix_pattern_mask = 0x3F, + .pix_pattern_shift = 0x2, + .img_width_mask = 0xFFFF, + .img_width_shift = 0x10, + .img_height_mask = 0xFFFF, + .img_height_shift = 0x0, + .mode_mask = 0x1F, + .mode_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x5, + .latency_buf_allocation = 4096, + .input_port_id = OPE_IN_RES_FULL, + .rm_port_id = 1, + .format_type = BUS_RD_COMBO_YUV_MASK, + .num_combos_supported = 1, + + }, + }, +}; + +static struct cam_ope_bus_rd_reg ope_bus_rd_reg = { + .offset = 0x4C00, + .hw_version = 0x0, + .sw_reset = 0x4, + .cgc_override = 0x8, + .irq_mask = 0xC, + .irq_clear = 0x10, + .irq_cmd = 0x14, + .irq_status = 0x18, + .input_if_cmd = 0x1C, + .irq_set = 0x20, + .misr_reset = 0x24, + .security_cfg = 0x28, + .iso_cfg = 0x2C, + .iso_seed = 0x30, + .num_clients = 0x2, + .rd_clients = { + { + .core_cfg = 0x50, + .ccif_meta_data = 0x54, + .img_addr = 0x58, + .img_cfg = 0x5C, + .stride = 0x60, + .unpack_cfg = 0x64, + .latency_buf_allocation = 0x78, + .misr_cfg_0 = 0x80, + .misr_cfg_1 = 0x84, + .misr_rd_val = 0x88, + }, + { + .core_cfg = 0xF0, + .ccif_meta_data = 0xF4, + .img_addr = 0xF8, + .img_cfg = 0xFC, + .stride = 0x100, + .unpack_cfg = 0x104, + .latency_buf_allocation = 0x118, + .misr_cfg_0 = 0x120, + .misr_cfg_1 = 0x124, + .misr_rd_val = 0x128, + }, + }, +}; + +static struct cam_ope_bus_wr_reg ope_bus_wr_reg = { + .offset = 0x4D90, + .hw_version = 0x0, + .cgc_override = 0x8, + .irq_mask_0 = 0x18, + .irq_mask_1 = 0x1C, + .irq_clear_0 = 0x20, + .irq_clear_1 = 0x24, + .irq_status_0 = 0x28, + .irq_status_1 = 0x2C, + .irq_cmd = 0x30, + .frame_header_cfg_0 = 0x34, + .local_frame_header_cfg_0 = 0x4C, + .irq_set_0 = 0x50, + .irq_set_1 = 0x54, + .iso_cfg = 0x5C, + .violation_status = 0x64, + .image_size_violation_status = 0x70, + .misr_cfg_0 = 0xB8, + .misr_cfg_1 = 0xBC, + .misr_rd_sel = 0xC8, + .misr_reset = 0xCC, + .misr_val = 0xD0, + .num_clients = 0x8, + .wr_clients = { + { + .core_cfg = 0x200, + .img_addr = 0x204, + .img_cfg = 0x20C, + .x_init = 0x210, + .stride = 0x214, + .pack_cfg = 0x218, + .bw_limit = 0x21C, + .frame_header_addr = 0x220, + .subsample_period = 0x230, + .subsample_pattern = 0x234, + }, + { + .core_cfg = 0x300, + .img_addr = 0x304, + .img_cfg = 0x30C, + .x_init = 0x310, + .stride = 0x314, + .pack_cfg = 0x318, + .bw_limit = 0x31C, + .frame_header_addr = 0x320, + .subsample_period = 0x330, + .subsample_pattern = 0x334, + }, + { + .core_cfg = 0x400, + .img_addr = 0x404, + .img_cfg = 0x40C, + .x_init = 0x410, + .stride = 0x414, + .pack_cfg = 0x418, + .bw_limit = 0x41C, + .frame_header_addr = 0x420, + .subsample_period = 0x430, + .subsample_pattern = 0x434, + }, + { + .core_cfg = 0x500, + .img_addr = 0x504, + .img_cfg = 0x50C, + .x_init = 0x510, + .stride = 0x514, + .pack_cfg = 0x518, + .bw_limit = 0x51C, + .frame_header_addr = 0x520, + .subsample_period = 0x530, + .subsample_pattern = 0x534, + }, + { + .core_cfg = 0x600, + .img_addr = 0x604, + .img_cfg = 0x60C, + .x_init = 0x610, + .stride = 0x614, + .pack_cfg = 0x618, + .bw_limit = 0x61C, + .frame_header_addr = 0x620, + .subsample_period = 0x630, + .subsample_pattern = 0x634, + }, + { + .core_cfg = 0x700, + .img_addr = 0x704, + .img_cfg = 0x70C, + .x_init = 0x710, + .stride = 0x714, + .pack_cfg = 0x718, + .bw_limit = 0x71C, + .frame_header_addr = 0x720, + .subsample_period = 0x730, + .subsample_pattern = 0x734, + }, + { + .core_cfg = 0x800, + .img_addr = 0x804, + .img_cfg = 0x80C, + .x_init = 0x810, + .stride = 0x814, + .pack_cfg = 0x818, + .bw_limit = 0x81C, + .frame_header_addr = 0x820, + .subsample_period = 0x830, + .subsample_pattern = 0x834, + }, + { + .core_cfg = 0x900, + .img_addr = 0x904, + .img_cfg = 0x90C, + .x_init = 0x910, + .stride = 0x914, + .pack_cfg = 0x918, + .bw_limit = 0x91C, + .frame_header_addr = 0x920, + .subsample_period = 0x930, + .subsample_pattern = 0x934, + }, + }, +}; + +static struct cam_ope_bus_wr_reg_val ope_bus_wr_reg_val = { + .hw_version = 0x20010000, + .irq_mask_0 = 0xD0000000, + .irq_mask_1 = 0x0, + .irq_set_clear = 0x1, + .comp_rup_done = 0x1, + .comp_buf_done = 0x100, + .cons_violation = 0x10000000, + .violation = 0x40000000, + .img_size_violation = 0x80000000, + .num_clients = 0x8, + .wr_clients = { + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_VIDEO, + .wm_port_id = BUS_WR_VIDEO_Y, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_VIDEO, + .wm_port_id = BUS_WR_VIDEO_C, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_DISP, + .wm_port_id = BUS_WR_DISP_Y, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_DISP, + .wm_port_id = BUS_WR_DISP_C, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_ARGB, + .wm_port_id = BUS_WR_ARGB, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_STATS_RS, + .wm_port_id = BUS_WR_STATS_RS, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_STATS_IHIST, + .wm_port_id = BUS_WR_STATS_IHIST, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + { + .core_cfg_en = 0x1, + .core_cfg_en_mask = 0x1, + .core_cfg_en_shift = 0x0, + .virtual_frame_en_mask = 0x1, + .virtual_frame_en_shift = 0x1, + .frame_header_en_mask = 0x1, + .frame_header_en_shift = 0x2, + .auto_recovery_en_mask = 0x1, + .auto_recovery_en_shift = 0x4, + .mode_mask = 0x3, + .mode_shift = 0x10, + .width_mask = 0xFFFF, + .width_shift = 0x0, + .height_mask = 0xFFFF, + .height_shift = 0x10, + .format_mask = 0xF, + .format_shift = 0x0, + .alignment_mask = 0x1, + .alignment_shift = 0x4, + .output_port_id = OPE_OUT_RES_STATS_LTM, + .wm_port_id = BUS_WR_STATS_LTM, + .format_type = BUS_WR_COMBO_YUV_MASK, + .num_combos_supported = 1, + }, + }, +}; + +static struct cam_ope_pp_reg ope_pp_reg = { + .offset = 0x800, + .num_clients = MAX_PP_CLIENTS, + .pp_clients = { + { + .hw_status = 0x4, + }, + { + .hw_status = 0x204, + }, + { + .hw_status = 0x404, + }, + { + .hw_status = 0x604, + }, + { + .hw_status = 0x804, + }, + { + .hw_status = 0xA04, + }, + { + .hw_status = 0xC04, + }, + { + .hw_status = 0xE04, + }, + { + .hw_status = 0x1004, + }, + { + .hw_status = 0x1204, + }, + { + .hw_status = 0x1404, + }, + { + .hw_status = 0x1604, + }, + { + .hw_status = 0x1804, + }, + { + .hw_status = 0x1A04, + }, + { + .hw_status = 0x1C04, + }, + { + .hw_status = 0x1E04, + }, + { + .hw_status = 0x2204, + }, + { + .hw_status = 0x2604, + }, + { + .hw_status = 0x2804, + }, + { + .hw_status = 0x2A04, + }, + { + .hw_status = 0x2C04, + }, + { + .hw_status = 0x2E04, + }, + { + .hw_status = 0x3004, + }, + { + .hw_status = 0x3204, + }, + { + .hw_status = 0x3404, + }, + { + .hw_status = 0x3604, + }, + { + .hw_status = 0x3804, + }, + { + .hw_status = 0x3A04, + }, + { + .hw_status = 0x3C04, + }, + { + .hw_status = 0x3E04, + }, + { + .hw_status = 0x4004, + }, + { + .hw_status = 0x4204, + }, + }, +}; + +static struct ope_hw ope_hw_100 = { + .top_reg = &ope_top_reg, + .top_reg_val = &ope_top_reg_val, + .bus_rd_reg = &ope_bus_rd_reg, + .bus_rd_reg_val = &ope_bus_rd_reg_val, + .bus_wr_reg = &ope_bus_wr_reg, + .bus_wr_reg_val = &ope_bus_wr_reg_val, + .pp_reg = &ope_pp_reg, + .common = &common_data, +}; + +#endif /* CAM_OPE_HW_100_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c new file mode 100644 index 0000000000..07a527dcd0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ope_soc.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +static int cam_ope_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct platform_device *pdev = NULL; + struct device_node *of_node = NULL; + struct cam_ope_soc_private *ope_soc_info; + + if (!soc_info) { + CAM_ERR(CAM_OPE, "soc_info is NULL"); + return -EINVAL; + } + + pdev = soc_info->pdev; + of_node = pdev->dev.of_node; + ope_soc_info = soc_info->soc_private; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) + CAM_ERR(CAM_OPE, "get ope dt prop is failed: %d", rc); + + ope_soc_info->hfi_en = of_property_read_bool(of_node, "hfi_en"); + + return rc; +} + +static int cam_ope_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t ope_irq_handler, void *data) +{ + int rc = 0, i; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = data; + + rc = cam_soc_util_request_platform_resource(soc_info, ope_irq_handler, &(irq_data[0])); + + return rc; +} + +int cam_ope_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ope_irq_handler, void *irq_data) +{ + struct cam_ope_soc_private *soc_private; + struct platform_device *pdev = NULL; + int num_pid = 0, i = 0; + int rc = 0; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_ope_soc_private), GFP_KERNEL); + if (!soc_private) { + CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed"); + return -ENOMEM; + } + soc_info->soc_private = soc_private; + + rc = cam_ope_get_dt_properties(soc_info); + if (rc < 0) + return rc; + + rc = cam_ope_request_platform_resource(soc_info, ope_irq_handler, + irq_data); + if (rc < 0) + return rc; + + soc_private->num_pid = 0; + pdev = soc_info->pdev; + num_pid = of_property_count_u32_elems(pdev->dev.of_node, "cam_hw_pid"); + CAM_DBG(CAM_OPE, "ope: %d pid count %d", soc_info->index, num_pid); + + if (num_pid <= 0 || num_pid > CAM_OPE_HW_MAX_NUM_PID) + goto end; + + soc_private->num_pid = num_pid; + + for (i = 0; i < num_pid; i++) + of_property_read_u32_index(pdev->dev.of_node, "cam_hw_pid", i, + &soc_private->pid[i]); +end: + return rc; +} + +int cam_ope_enable_soc_resources(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + CAM_SVS_VOTE, true); + if (rc) { + CAM_ERR(CAM_OPE, "enable platform failed"); + return rc; + } + + return rc; +} + +int cam_ope_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk) +{ + int rc = 0; + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, disable_clk, + true); + if (rc) + CAM_ERR(CAM_OPE, "enable platform failed"); + + return rc; +} + +int cam_ope_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate) +{ + int32_t src_clk_idx; + + if (!soc_info) { + CAM_ERR(CAM_OPE, "Invalid soc info"); + return -EINVAL; + } + + src_clk_idx = soc_info->src_clk_idx; + + CAM_DBG(CAM_OPE, "clk_rate = %u src_clk_index = %d", + clk_rate, src_clk_idx); + if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) && + (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) && + (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) { + CAM_DBG(CAM_OPE, "clk_rate %d greater than max, reset to %d", + clk_rate, + soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]); + clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]; + } + + CAM_DBG(CAM_OPE, "clk_rate = %u src_clk_index = %d", + clk_rate, src_clk_idx); + return cam_soc_util_set_src_clk_rate(soc_info, CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); +} + +int cam_ope_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable) +{ + int rc = 0; + + if (clk_enable) + rc = cam_soc_util_clk_enable_default(soc_info, CAM_CLK_SW_CLIENT_IDX, CAM_SVS_VOTE); + else + cam_soc_util_clk_disable_default(soc_info, CAM_CLK_SW_CLIENT_IDX); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.h new file mode 100644 index 0000000000..3af5c8c345 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/ope_soc.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef CAM_OPE_SOC_H +#define CAM_OPE_SOC_H + +#include "cam_soc_util.h" + +#define CAM_OPE_HW_MAX_NUM_PID 2 + +/** + * struct cam_ope_soc_private + * + * @hfi_en: HFI enable flag + * @num_pid: OPE number of pids + * @pid: OPE pid value list + */ +struct cam_ope_soc_private { + uint32_t hfi_en; + uint32_t num_pid; + uint32_t pid[CAM_OPE_HW_MAX_NUM_PID]; +}; + + + +int cam_ope_init_soc_resources(struct cam_hw_soc_info *soc_info, + irq_handler_t ope_irq_handler, void *irq_data); + + +int cam_ope_enable_soc_resources(struct cam_hw_soc_info *soc_info); + +int cam_ope_disable_soc_resources(struct cam_hw_soc_info *soc_info, + bool disable_clk); + +int cam_ope_update_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_rate); + +int cam_ope_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable); +#endif /* CAM_OPE_SOC_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c new file mode 100644 index 0000000000..02008a5ad0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_io_util.h" +#include "cam_hw.h" +#include "cam_hw_intf.h" +#include "ope_core.h" +#include "ope_soc.h" +#include "cam_soc_util.h" +#include "cam_io_util.h" +#include "cam_cpas_api.h" +#include "cam_debug_util.h" +#include "ope_hw.h" +#include "ope_dev_intf.h" +#include "ope_top.h" +#include "cam_common_util.h" + +static struct ope_top ope_top_info; + +static int cam_ope_top_dump_debug_reg(struct ope_hw *ope_hw_info) +{ + uint32_t i, val[3]; + struct cam_ope_top_reg *top_reg; + struct cam_ope_pp_reg *pp_reg; + uint32_t pp_hw_status = 0; + + top_reg = ope_hw_info->top_reg; + pp_reg = ope_hw_info->pp_reg; + + for (i = 0; i < top_reg->num_debug_registers; i = i+3) { + val[0] = cam_io_r_mb(top_reg->base + + top_reg->debug_regs[i].offset); + val[1] = ((i+1) < top_reg->num_debug_registers) ? + cam_io_r_mb(top_reg->base + + top_reg->debug_regs[i+1].offset) : 0; + val[2] = ((i+2) < top_reg->num_debug_registers) ? + cam_io_r_mb(top_reg->base + + top_reg->debug_regs[i+2].offset) : 0; + + CAM_INFO(CAM_OPE, "status[%d-%d] : 0x%x 0x%x 0x%x", + i, i+2, val[0], val[1], val[2]); + } + + CAM_INFO(CAM_OPE, "scrath reg: 0x%x, stripe_idx: %d", + top_reg->offset + top_reg->scratch_reg, + cam_io_r_mb(top_reg->base + top_reg->scratch_reg)); + + for (i = 0; i < pp_reg->num_clients ; i++) { + pp_hw_status = 0; + pp_hw_status = + cam_io_r_mb(pp_reg->base + + pp_reg->pp_clients[i] + .hw_status); + + if (pp_hw_status) + CAM_ERR(CAM_OPE, + "ope pp hw_status offset 0x%x val 0x%x", + pp_reg->pp_clients[i].hw_status, + pp_hw_status); + } + return 0; +} + +static int cam_ope_top_reset(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_ope_top_reg *top_reg; + struct cam_ope_top_reg_val *top_reg_val; + uint32_t irq_mask, irq_status; + unsigned long flags; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + top_reg = ope_hw_info->top_reg; + top_reg_val = ope_hw_info->top_reg_val; + + mutex_lock(&ope_top_info.ope_hw_mutex); + reinit_completion(&ope_top_info.reset_complete); + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + ope_hw_info->top_reg->base + top_reg->irq_mask); + + /* OPE SW RESET */ + cam_io_w_mb(top_reg_val->sw_reset_cmd, + ope_hw_info->top_reg->base + top_reg->reset_cmd); + + rc = cam_common_wait_for_completion_timeout( + &ope_top_info.reset_complete, + msecs_to_jiffies(60)); + + cam_io_w_mb(top_reg_val->debug_cfg_val, + top_reg->base + top_reg->debug_cfg); + + if (!rc || rc < 0) { + spin_lock_irqsave(&ope_top_info.hw_lock, flags); + if (!completion_done(&ope_top_info.reset_complete)) { + CAM_DBG(CAM_OPE, + "IRQ delayed, checking the status registers"); + irq_mask = cam_io_r_mb(ope_hw_info->top_reg->base + + top_reg->irq_mask); + irq_status = cam_io_r_mb(ope_hw_info->top_reg->base + + top_reg->irq_status); + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_OPE, "ope reset done"); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + cam_io_w_mb(top_reg_val->irq_set_clear, + top_reg->base + top_reg->irq_cmd); + } else { + CAM_ERR(CAM_OPE, + "irq mask 0x%x irq status 0x%x", + irq_mask, irq_status); + cam_ope_top_dump_debug_reg(ope_hw_info); + rc = -ETIMEDOUT; + } + } else { + rc = 0; + } + spin_unlock_irqrestore(&ope_top_info.hw_lock, flags); + } else { + rc = 0; + } + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + ope_hw_info->top_reg->base + top_reg->irq_mask); + + mutex_unlock(&ope_top_info.ope_hw_mutex); + return rc; +} + +static int cam_ope_top_release(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (ctx_id < 0) { + CAM_ERR(CAM_OPE, "Invalid data: %d", ctx_id); + return -EINVAL; + } + + ope_top_info.top_ctx[ctx_id].ope_acquire = NULL; + + return rc; +} + +static int cam_ope_top_acquire(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (ctx_id < 0 || !data) { + CAM_ERR(CAM_OPE, "Invalid data: %d %x", ctx_id, data); + return -EINVAL; + } + + ope_top_info.top_ctx[ctx_id].ope_acquire = data; + + return rc; +} + +static int cam_ope_top_init(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + struct cam_ope_top_reg *top_reg; + struct cam_ope_top_reg_val *top_reg_val; + struct cam_ope_dev_init *dev_init = data; + uint32_t irq_mask, irq_status; + unsigned long flags; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + top_reg = ope_hw_info->top_reg; + top_reg_val = ope_hw_info->top_reg_val; + + top_reg->base = dev_init->core_info->ope_hw_info->ope_top_base; + + mutex_init(&ope_top_info.ope_hw_mutex); + /* OPE SW RESET */ + init_completion(&ope_top_info.reset_complete); + + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + ope_hw_info->top_reg->base + top_reg->irq_mask); + cam_io_w_mb(top_reg_val->sw_reset_cmd, + ope_hw_info->top_reg->base + top_reg->reset_cmd); + + rc = cam_common_wait_for_completion_timeout( + &ope_top_info.reset_complete, + msecs_to_jiffies(60)); + + cam_io_w_mb(top_reg_val->debug_cfg_val, + top_reg->base + top_reg->debug_cfg); + + if (!rc || rc < 0) { + spin_lock_irqsave(&ope_top_info.hw_lock, flags); + if (!completion_done(&ope_top_info.reset_complete)) { + CAM_DBG(CAM_OPE, + "IRQ delayed, checking the status registers"); + irq_mask = cam_io_r_mb(ope_hw_info->top_reg->base + + top_reg->irq_mask); + irq_status = cam_io_r_mb(ope_hw_info->top_reg->base + + top_reg->irq_status); + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_OPE, "ope reset done"); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + cam_io_w_mb(top_reg_val->irq_set_clear, + top_reg->base + top_reg->irq_cmd); + } else { + CAM_ERR(CAM_OPE, + "irq mask 0x%x irq status 0x%x", + irq_mask, irq_status); + cam_ope_top_dump_debug_reg(ope_hw_info); + rc = -ETIMEDOUT; + } + } else { + CAM_DBG(CAM_OPE, "reset done"); + rc = 0; + } + spin_unlock_irqrestore(&ope_top_info.hw_lock, flags); + } else { + rc = 0; + } + /* enable interrupt mask */ + cam_io_w_mb(top_reg_val->irq_mask, + ope_hw_info->top_reg->base + top_reg->irq_mask); + + return rc; +} + +static int cam_ope_top_probe(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + ope_top_info.ope_hw_info = ope_hw_info; + spin_lock_init(&ope_top_info.hw_lock); + + return rc; +} + +static int cam_ope_top_isr(struct ope_hw *ope_hw_info, + int32_t ctx_id, void *data) +{ + int rc = 0; + uint32_t irq_status; + uint32_t violation_status; + uint32_t pp_hw_status = 0; + struct cam_ope_top_reg *top_reg; + struct cam_ope_top_reg_val *top_reg_val; + struct cam_ope_pp_reg *pp_reg; + struct cam_ope_irq_data *irq_data = data; + int i; + + if (!ope_hw_info) { + CAM_ERR(CAM_OPE, "Invalid ope_hw_info"); + return -EINVAL; + } + + top_reg = ope_hw_info->top_reg; + top_reg_val = ope_hw_info->top_reg_val; + pp_reg = ope_hw_info->pp_reg; + + spin_lock(&ope_top_info.hw_lock); + /* Read and Clear Top Interrupt status */ + irq_status = cam_io_r_mb(top_reg->base + top_reg->irq_status); + cam_io_w_mb(irq_status, + top_reg->base + top_reg->irq_clear); + + cam_io_w_mb(top_reg_val->irq_set_clear, + top_reg->base + top_reg->irq_cmd); + + if (irq_status & top_reg_val->rst_done) { + CAM_DBG(CAM_OPE, "ope reset done"); + complete(&ope_top_info.reset_complete); + } + + if (irq_status & top_reg_val->ope_violation) { + violation_status = cam_io_r_mb(top_reg->base + + top_reg->violation_status); + irq_data->error = 1; + CAM_ERR(CAM_OPE, "ope violation: %x", violation_status); + + for (i = 0; i < pp_reg->num_clients ; i++) { + pp_hw_status = 0; + pp_hw_status = + cam_io_r_mb(pp_reg->base + + pp_reg->pp_clients[i] + .hw_status); + + if (pp_hw_status) + CAM_ERR(CAM_OPE, + "ope pp hw_status offset 0x%x val 0x%x", + pp_reg->pp_clients[i].hw_status, + pp_hw_status); + } + } + spin_unlock(&ope_top_info.hw_lock); + + return rc; +} + +int cam_ope_top_process(struct ope_hw *ope_hw_info, + int32_t ctx_id, uint32_t cmd_id, void *data) +{ + int rc = 0; + + switch (cmd_id) { + case OPE_HW_PROBE: + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: E"); + rc = cam_ope_top_probe(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_PROBE: X"); + break; + case OPE_HW_INIT: + CAM_DBG(CAM_OPE, "OPE_HW_INIT: E"); + rc = cam_ope_top_init(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_INIT: X"); + break; + case OPE_HW_DEINIT: + break; + case OPE_HW_ACQUIRE: + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: E"); + rc = cam_ope_top_acquire(ope_hw_info, ctx_id, data); + CAM_DBG(CAM_OPE, "OPE_HW_ACQUIRE: X"); + break; + case OPE_HW_PREPARE: + break; + case OPE_HW_RELEASE: + rc = cam_ope_top_release(ope_hw_info, ctx_id, data); + break; + case OPE_HW_START: + break; + case OPE_HW_STOP: + break; + case OPE_HW_FLUSH: + break; + case OPE_HW_ISR: + rc = cam_ope_top_isr(ope_hw_info, 0, data); + break; + case OPE_HW_RESET: + rc = cam_ope_top_reset(ope_hw_info, 0, 0); + break; + case OPE_HW_DUMP_DEBUG: + rc = cam_ope_top_dump_debug_reg(ope_hw_info); + break; + default: + break; + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.h b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.h new file mode 100644 index 0000000000..57d90712bd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef OPE_TOP_H +#define OPE_TOP_H + +#include +#include +#include +#include "ope_hw.h" +#include "cam_hw_mgr_intf.h" +#include "cam_hw_intf.h" +#include "cam_soc_util.h" +#include "cam_context.h" +#include "cam_ope_context.h" +#include "cam_ope_hw_mgr.h" + +/** + * struct ope_top_ctx + * + * @ope_acquire: OPE acquire info + */ +struct ope_top_ctx { + struct ope_acquire_dev_info *ope_acquire; +}; + +/** + * struct ope_top + * + * @ope_hw_info: OPE hardware info + * @top_ctx: OPE top context + * @reset_complete: Reset complete flag + * @ope_mutex: OPE hardware mutex + * @hw_lock: OPE hardware spinlock + */ +struct ope_top { + struct ope_hw *ope_hw_info; + struct ope_top_ctx top_ctx[OPE_CTX_MAX]; + struct completion reset_complete; + struct mutex ope_hw_mutex; + spinlock_t hw_lock; +}; +#endif /* OPE_TOP_H */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_presil/inc/cam_presil_hw_access.h b/qcom/opensource/camera-kernel/drivers/cam_presil/inc/cam_presil_hw_access.h new file mode 100644 index 0000000000..701fdcd4c2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_presil/inc/cam_presil_hw_access.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_PRESIL_HW_ACCESS_H_ +#define _CAM_PRESIL_HW_ACCESS_H_ + +#include + +#define CAM_PRESIL_CLIENT_ID_CAMERA 0x1 +#define CAM_PRESIL_CLIENT_ID_EVA 0x2 + +#define CAM_SS_START_PRESIL 0x08000000 +#define CAM_SS_END_PRESIL 0x09000000 +#define CVP_SS_START_PRESIL 0x06c00000 +#define CAM_EVENT_START_PRESIL 0x20000000 + +/* presil events to carry shared values from HW-KMD to PC-HOST CSim Wrapper */ +#define CAM_PRESIL_EVENT_HFI_REG_BASE 0x600 +#define CAM_PRESIL_EVENT_HFI_REG(n) (CAM_PRESIL_EVENT_HFI_REG_BASE + (n * 4)) +#define CAM_PRESIL_EVENT_HFI_REG_CMD_Q_IOVA CAM_PRESIL_EVENT_HFI_REG(1) +#define CAM_PRESIL_EVENT_HFI_REG_MSG_Q_IOVA CAM_PRESIL_EVENT_HFI_REG(2) +#define CAM_PRESIL_EVENT_HFI_REG_DBG_Q_IOVA CAM_PRESIL_EVENT_HFI_REG(3) +#define CAM_PRESIL_EVENT_HFI_REG_SFR_LEN CAM_PRESIL_EVENT_HFI_REG(4) +#define CAM_PRESIL_EVENT_HFI_REG_ICP_V1_HW_VERSION_TO_START_HFI_INIT CAM_PRESIL_EVENT_HFI_REG(13) +#define CAM_PRESIL_EVENT_HFI_REG_ON_FIRST_REG_START_FW_DOWNLOAD 0x638 /* write FF to start */ +#define CAM_PRESIL_EVENT_IFE_FRAME_RUN 0x123 /* write FF to start */ + +typedef int (*CAM_PRESIL_IRQ_HANDLER_BOTTOM_HALF)(void *handler_priv, + void *evt_payload_priv); + +/* + * enum cam_presil_err - return code from presil apis + * + * @CAM_PRESIL_SUCCESS : Success + * @CAM_PRESIL_FAILED : Failed + * @CAM_PRESIL_BLOCKED : not presil hw + * @CAM_PRESIL_BLOCKED_BOOTUP : presil hw but at boot presil not connected + * + */ +enum cam_presil_err { + CAM_PRESIL_SUCCESS = 0x0, + CAM_PRESIL_FAILED, + CAM_PRESIL_BLOCKED, + CAM_PRESIL_BLOCKED_BOOTUP, +}; + +/** + * struct cam_presil_intr_reginfo - register received with irq + * callback + */ +struct cam_presil_intr_reginfo +{ + uint32_t intr_en_off; + uint32_t intr_status_off; + uint32_t intr_en_val; + uint32_t intr_status_val; +}; + +/** + * struct cam_presil_intr_regwrinfo - reg val pair from pchost + */ +struct cam_presil_intr_regwrinfo +{ + void * reg_off; + uint32_t reg_val; +}; + +#define CAM_MODE_MAX_REG_CNT 25 + +/** + * struct cam_presil_irq_data - data received along with irq cb + * from pchost + */ +struct cam_presil_irq_data +{ + uint32_t irq_num; + uint32_t irq_reg_count; + struct cam_presil_intr_reginfo intr_reg[CAM_MODE_MAX_REG_CNT]; + uint32_t irq_wr_count; + struct cam_presil_intr_regwrinfo intr_wr_reg[CAM_MODE_MAX_REG_CNT]; + uint32_t magic; +}; + +/* + * cam_presil_subscribe_device_irq() + * + * @brief : Register for irq from presil framework. + * + * @irq_num : Unique irq number + * @irq_handler : handler callback + * @irq_priv_data : Callback data + * @irq_name : Irq name + * + * @return true or false. + */ +bool cam_presil_subscribe_device_irq(int irq_num, irq_handler_t irq_handler, + void* irq_priv_data, const char *irq_name); + +/* + * cam_presil_subscribe_device_irq() + * + * @brief : Un-Register for irq from presil framework. + * + * @irq_num : Unique irq number + * + * @return true or false. + */ +bool cam_presil_unsubscribe_device_irq(int irq_num); + +/* + * cam_presil_register_read() + * + * @brief : register read from presil hw. + * + * @addr : Register offset + * @pValue : Value read from hw + * + * @return: Success or Failure + */ +int cam_presil_register_read(void *addr, uint32_t *pValue); + +/* + * cam_presil_register_write() + * + * @brief : register write to presil hw. + * + * @addr : Register offset + * @pValue : Value to write to hw + * @flags : Flags + * + * @return: Success or Failure + */ +int cam_presil_register_write(void *addr, uint32_t value, uint32_t flags); + +/* + * cam_presil_send_buffer() + * + * @brief : Copy buffer content to presil hw memory. + * + * @dma_buf_uint : Not fd , it is dma_buf ptr to be sent to + * presil umd daemon + * @mmu_hdl : Iommu handle + * @offset : Offset to start copy + * @size : Size of copy + * @addr32 : Iova to start copy at + * @hfi_skip : Skip logs for HFI actions + * + * @return: Success or Failure + */ +int cam_presil_send_buffer(uint64_t dma_buf_uint, int mmu_hdl, uint32_t offset, + uint32_t size, uint32_t addr32, uintptr_t cpu_vaddr, bool hfi_skip); + +/* + * cam_presil_retrieve_buffer() + * + * @brief : Copy buffer content back from presil hw memory. + * + * @dma_buf_uint : Not fd , it is dma_buf ptr to be sent to + * presil umd daemon + * @mmu_hdl : Iommu handle + * @offset : Offset to start copy + * @size : Size of copy + * @addr32 : Iova to start copy at + * @hfi_skip : Skip logs for HFI actions + * + * @return: Success or Failure + */ +int cam_presil_retrieve_buffer(uint64_t dma_buf_uint, + int mmu_hdl, uint32_t offset, uint32_t size, uint32_t addr32, + uintptr_t cpu_vaddr, bool hfi_skip); + +/* + * cam_presil_readl_poll_timeout() + * + * @brief : Custom register poll function for presil hw. + * + * @mem_address : Reg offset to poll + * @val : Value to compare + * @max_try_count : Max number of tries + * @interval_msec : Interval between tries + * + * @return: Success or Failure + */ +int cam_presil_readl_poll_timeout(void __iomem *mem_address, uint32_t val, + int max_try_count, int interval_msec); + +/* + * cam_presil_hfi_write_cmd() + * + * @brief : Write HFI command to presil hw. + * + * @addr : Pointer to HFI command + * @cmdlen : Length + * @client_id : client Id of caller + * + * @return: Success or Failure + */ +int cam_presil_hfi_write_cmd(void *addr, uint32_t cmdlen, uint32_t client_id); + +/* + * cam_presil_hfi_read_message() + * + * @brief : Read HFI response message from presil hw. + * + * @pmsg : Pointer to HFI message buffer + * @q_id : Length + * @words_read : Response message + * @client_id : client Id of caller + * + * @return: Success or Failure + */ +int cam_presil_hfi_read_message(uint32_t *pmsg, uint8_t q_id, + uint32_t *words_read, uint32_t client_id); + +/** + * @brief : API to check if camera driver running in presil + * enabled mode + * + * @return true or false. + */ +bool cam_presil_mode_enabled(void); + +/* + * cam_presil_send_event() + * + * @brief : send event to pchost. + * + * @event_id : Event Id + * @value : Value with additional information + * + * @return: Success or Failure + */ +int cam_presil_send_event(uint32_t event_id, uint32_t value); + +/* + * cam_presil_enqueue_presil_irq_tasklet() + * + * @brief : enqueue workqueue cb for bottom half of irq in presil mode. + * + * @bh_handler : Bottom half handler func + * @handler_priv : Handler private data + * @payload : Payload + * + * @return: Success or Failure + */ +int cam_presil_enqueue_presil_irq_tasklet(CAM_PRESIL_IRQ_HANDLER_BOTTOM_HALF bh_handler, + void *handler_priv, + void *payload); +#endif /* _CAM_PRESIL_HW_ACCESS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_presil/stub/cam_presil_hw_access_stub.c b/qcom/opensource/camera-kernel/drivers/cam_presil/stub/cam_presil_hw_access_stub.c new file mode 100644 index 0000000000..67d1aae65a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_presil/stub/cam_presil_hw_access_stub.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_debug_util.h" +#include "cam_presil_hw_access.h" + +bool cam_presil_mode_enabled(void) +{ + return false; +} + +bool cam_presil_subscribe_device_irq(int irq_num, + irq_handler_t irq_handler, void *irq_priv_data, const char *irq_name) +{ + return true; +} + +bool cam_presil_unsubscribe_device_irq(int irq_num) +{ + return true; +} + +int cam_presil_register_read(void *addr, uint32_t *val) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_register_write(void *addr, uint32_t value, uint32_t flags) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_send_buffer(uint64_t dma_buf_uint, int mmu_hdl, uint32_t offset, + uint32_t size, uint32_t addr32, uintptr_t cpu_vaddr, bool hfi_skip) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_retrieve_buffer(uint64_t dma_buf_uint, int mmu_hdl, + uint32_t offset, uint32_t size, uint32_t addr32, uintptr_t cpu_vaddr, bool hfi_skip) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_readl_poll_timeout(void __iomem *mem_address, uint32_t val, + int max_try_count, int interval_msec) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_hfi_write_cmd(void *hfi_cmd, uint32_t cmdlen, uint32_t client_id) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_hfi_read_message(uint32_t *pmsg, uint8_t q_id, + uint32_t *words_read, uint32_t client_id) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_send_event(uint32_t event_id, uint32_t value) +{ + return CAM_PRESIL_SUCCESS; +} + +int cam_presil_enqueue_presil_irq_tasklet(CAM_PRESIL_IRQ_HANDLER_BOTTOM_HALF bh_handler, + void *handler_priv, + void *payload) +{ + return CAM_PRESIL_SUCCESS; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.c new file mode 100644 index 0000000000..f342d90222 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.c @@ -0,0 +1,3302 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#include +#include +#endif + +#include "cam_compat.h" +#include "cam_req_mgr_util.h" +#include "cam_mem_mgr.h" +#include "cam_smmu_api.h" +#include "cam_debug_util.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_presil_hw_access.h" +#include "cam_mem_mgr_api.h" + +#define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024) + +static struct cam_mem_table tbl; +static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED); + +/* Number of words for dumping req state info */ +#define CAM_MEM_MGR_DUMP_BUF_NUM_WORDS 29 + +/* cam_mem_mgr_debug - global struct to keep track of debug settings for mem mgr + * + * @dentry : Directory entry to the mem mgr root folder + * @alloc_profile_enable : Whether to enable alloc profiling + * @override_cpu_access_dir : Override cpu access direction to BIDIRECTIONAL + */ +static struct { + struct dentry *dentry; + bool alloc_profile_enable; + bool override_cpu_access_dir; +} g_cam_mem_mgr_debug; + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +static void cam_mem_mgr_put_dma_heaps(void); +static int cam_mem_mgr_get_dma_heaps(void); +#endif + +#ifdef CONFIG_CAM_PRESIL +static inline void cam_mem_mgr_reset_presil_params(int idx) +{ + tbl.bufq[idx].presil_params.fd_for_umd_daemon = -1; + tbl.bufq[idx].presil_params.refcount = 0; +} +#else +static inline void cam_mem_mgr_reset_presil_params(int idx) +{ + return; +} +#endif + +static unsigned long cam_mem_mgr_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_mem_table_mini_dump *md; + + if (!dst) { + CAM_ERR(CAM_MEM, "Invalid params"); + return 0; + } + + if (len < sizeof(*md)) { + CAM_ERR(CAM_MEM, "Insufficient length %u", len); + return 0; + } + + md = (struct cam_mem_table_mini_dump *)dst; + memcpy(md->bufq, tbl.bufq, CAM_MEM_BUFQ_MAX * sizeof(struct cam_mem_buf_queue)); + md->dbg_buf_idx = tbl.dbg_buf_idx; + md->alloc_profile_enable = g_cam_mem_mgr_debug.alloc_profile_enable; + md->force_cache_allocs = tbl.force_cache_allocs; + md->need_shared_buffer_padding = tbl.need_shared_buffer_padding; + return sizeof(*md); +} + +static void cam_mem_mgr_print_tbl(void) +{ + int i; + uint64_t ms, hrs, min, sec; + struct timespec64 current_ts; + + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***", + hrs, min, sec, ms); + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[i].timestamp), hrs, min, sec, ms); + CAM_INFO(CAM_MEM, + "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s", + hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino, + tbl.bufq[i].len, tbl.bufq[i].active, tbl.bufq[i].buf_handle, + kref_read(&tbl.bufq[i].krefcount), kref_read(&tbl.bufq[i].urefcount), + tbl.bufq[i].buf_name); + } + +} +/** + * For faster lookups, maintaining same indexing as SMMU + * for saving iova for a given buffer for a given context + * bank + * + * Buffer X : [iova_1, 0x0, iova_3, ...] + * Here iova_1 is for device_1, no iova available for device_2, + * iova_3 for device_3 and so on + */ +static inline bool cam_mem_mgr_get_hwva_entry_idx( + int32_t mem_handle, int32_t *entry_idx) +{ + int entry; + + entry = GET_SMMU_TABLE_IDX(mem_handle); + if (unlikely((entry < 0) || (entry >= tbl.max_hdls_supported))) { + CAM_ERR(CAM_MEM, + "Invalid mem_hdl: 0x%x, failed to lookup", mem_handle); + return false; + } + + *entry_idx = entry; + return true; +} + +static int cam_mem_util_get_dma_dir(uint32_t flags) +{ + int rc = -EINVAL; + + if (flags & CAM_MEM_FLAG_HW_READ_ONLY) + rc = DMA_TO_DEVICE; + else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY) + rc = DMA_FROM_DEVICE; + else if (flags & CAM_MEM_FLAG_HW_READ_WRITE) + rc = DMA_BIDIRECTIONAL; + else if (flags & CAM_MEM_FLAG_PROTECTED_MODE) + rc = DMA_BIDIRECTIONAL; + + return rc; +} + +static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf, uintptr_t *vaddr, size_t *len) +{ + int rc = 0; + + /* + * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() + * need to be called in pair to avoid stability issue. + */ + rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + if (rc) { + CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc); + return rc; + } + + rc = cam_compat_util_get_dmabuf_va(dmabuf, vaddr); + if (rc) { + CAM_ERR(CAM_MEM, "kernel vmap failed: rc = %d", rc); + *len = 0; + dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + } + else { + *len = dmabuf->size; + CAM_DBG(CAM_MEM, "vaddr = %llu, len = %zu", *vaddr, *len); + } + + return rc; +} + +static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf, + uint64_t vaddr) +{ + int rc = 0; + + if (!dmabuf || !vaddr) { + CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr); + return -EINVAL; + } + + cam_compat_util_put_dmabuf_va(dmabuf, (void *)vaddr); + + /* + * dma_buf_begin_cpu_access() and + * dma_buf_end_cpu_access() need to be called in pair + * to avoid stability issue. + */ + rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + if (rc) { + CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK", + dmabuf); + return rc; + } + + return rc; +} + +static int cam_mem_mgr_create_debug_fs(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available() || g_cam_mem_mgr_debug.dentry) + return 0; + + rc = cam_debugfs_create_subdir("memmgr", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_MEM, "DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + + g_cam_mem_mgr_debug.dentry = dbgfileptr; + + debugfs_create_bool("alloc_profile_enable", 0644, g_cam_mem_mgr_debug.dentry, + &g_cam_mem_mgr_debug.alloc_profile_enable); + + debugfs_create_bool("override_cpu_access_dir", 0644, g_cam_mem_mgr_debug.dentry, + &g_cam_mem_mgr_debug.override_cpu_access_dir); +end: + return rc; +} + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM +void cam_mem_mgr_set_svm_heap_sizes(uint32_t device_heap_size, + uint32_t session_heap_size) +{ + tbl.device_heap_size = device_heap_size; + tbl.session_heap_size = session_heap_size; + + CAM_DBG(CAM_MEM, "Set device_heap_size: 0x%x, session_heap_size: 0x%x", + tbl.device_heap_size, tbl.session_heap_size); +} + +int cam_mem_mgr_transfer_from_pvm_heap(struct dma_heap *heap, + size_t size, enum cam_dma_heap_type heap_type) +{ + void *handle; + + if (size <= 0) { + CAM_ERR(CAM_MEM, "Invalid size: 0x%x to create SVM heap", size); + return -EINVAL; + } + + if (((heap_type == CAM_SVM_HEAP_DEVICE) && (tbl.cam_device_mem_pool_handle)) || + ((heap_type == CAM_SVM_HEAP_SESSION) && (tbl.cam_session_mem_pool_handle))) { + CAM_ERR(CAM_MEM, "Multi create happens, heap_type: %d unreleased handle: 0x%x", + heap_type, (heap_type == CAM_SVM_HEAP_DEVICE) ? + tbl.cam_device_mem_pool_handle : tbl.cam_session_mem_pool_handle); + return -EINVAL; + } + + CAM_DBG(CAM_MEM, "heap: 0x%x size: %zu heap_type: %d", heap, size, heap_type); + + handle = cam_mem_heap_add_kernel_pool(heap, size); + if (IS_ERR_OR_NULL(handle)) { + CAM_ERR(CAM_MEM, "Failed to create the SVM heap with heap_type: %d", + heap_type); + return -EINVAL; + } + + if (heap_type == CAM_SVM_HEAP_DEVICE) + tbl.cam_device_mem_pool_handle = handle; + else + tbl.cam_session_mem_pool_handle = handle; + + CAM_DBG(CAM_MEM, "SVM heap: 0x%x size: 0x%x heap_type: %d is successfully created", + handle, size, heap_type); + + return 0; +} + +int cam_mem_mgr_release_to_pvm_heap(enum cam_dma_heap_type heap_type) +{ + CAM_DBG(CAM_MEM, "heap_type: %d", heap_type); + + if (heap_type == CAM_SVM_HEAP_DEVICE) { + cam_mem_heap_remove_kernel_pool(tbl.cam_device_mem_pool_handle); + tbl.cam_device_mem_pool_handle = NULL; + } else { + cam_mem_heap_remove_kernel_pool(tbl.cam_session_mem_pool_handle); + tbl.cam_session_mem_pool_handle = NULL; + } + + return 0; +} + +static bool cam_mem_util_heap_mem_available( + enum cam_dma_heap_type heap_type) +{ + bool rc = false; + + if (heap_type == CAM_SVM_HEAP_SESSION) { + if (tbl.cam_session_mem_pool_handle) + rc = true; + else + rc = false; + } else { + if (tbl.cam_device_mem_pool_handle) + rc = true; + else + rc = false; + } + + return rc; +} +#endif +#endif + +int cam_mem_mgr_init(void) +{ + int i; + int bitmap_size; + int rc = 0; + + if (atomic_read(&cam_mem_mgr_state)) + return 0; + + memset(tbl.bufq, 0, sizeof(tbl.bufq)); + + if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) { + CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag"); + return -EINVAL; + } + + tbl.need_shared_buffer_padding = cam_smmu_need_shared_buffer_padding(); + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) + rc = cam_mem_mgr_get_dma_heaps(); + if (rc) { + CAM_ERR(CAM_MEM, "Failed in getting dma heaps rc=%d", rc); + return rc; + } + +#ifdef CONFIG_ARCH_QTI_VM + tbl.session_heap_alloc_cnt = 0; + tbl.device_heap_alloc_cnt = 0; + + rc = cam_mem_mgr_transfer_from_pvm_heap(tbl.cam_device_heap, + tbl.device_heap_size, CAM_SVM_HEAP_DEVICE); + if (rc) { + CAM_ERR(CAM_MEM, "Failed to create SVM device mem pool rc=%d", rc); + goto put_heaps; + } +#endif +#endif + + bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long); + tbl.bitmap = CAM_MEM_ZALLOC(bitmap_size, GFP_KERNEL); + if (!tbl.bitmap) { + rc = -ENOMEM; + goto put_heaps; + } + + tbl.bits = bitmap_size * BITS_PER_BYTE; + bitmap_zero(tbl.bitmap, tbl.bits); + /* We need to reserve slot 0 because 0 is invalid */ + set_bit(0, tbl.bitmap); + + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + tbl.bufq[i].fd = -1; + tbl.bufq[i].buf_handle = -1; + cam_mem_mgr_reset_presil_params(i); + mutex_init(&tbl.bufq[i].q_lock); + spin_lock_init(&tbl.bufq[i].idx_lock); + } + mutex_init(&tbl.m_lock); + + atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED); + + cam_mem_mgr_create_debug_fs(); + cam_common_register_mini_dump_cb(cam_mem_mgr_mini_dump_cb, + "cam_mem", NULL); + + rc = cam_smmu_driver_init(&tbl.csf_version, &tbl.max_hdls_supported); + if (rc) + goto clean_bitmap_and_mutex; + + if (!tbl.max_hdls_supported) { + CAM_ERR(CAM_MEM, "Invalid number of supported handles"); + rc = -EINVAL; + goto clean_bitmap_and_mutex; + } + + tbl.max_hdls_info_size = sizeof(struct cam_mem_buf_hw_hdl_info) * + tbl.max_hdls_supported; + + /* Index 0 is reserved as invalid slot */ + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + tbl.bufq[i].hdls_info = CAM_MEM_ZALLOC(tbl.max_hdls_info_size, GFP_KERNEL); + + if (!tbl.bufq[i].hdls_info) { + CAM_ERR(CAM_MEM, "Failed to allocate hdls array queue idx: %d", i); + rc = -ENOMEM; + goto free_hdls_info; + } + } + + return 0; + +free_hdls_info: + for (--i; i > 0; i--) { + CAM_MEM_FREE(tbl.bufq[i].hdls_info); + tbl.bufq[i].hdls_info = NULL; + } + +clean_bitmap_and_mutex: + CAM_MEM_FREE(tbl.bitmap); + tbl.bitmap = NULL; + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) + mutex_destroy(&tbl.bufq[i].q_lock); + mutex_destroy(&tbl.m_lock); + atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED); +put_heaps: +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM + cam_mem_mgr_release_to_pvm_heap(CAM_SVM_HEAP_DEVICE); +#endif + cam_mem_mgr_put_dma_heaps(); +#endif + return rc; +} + +static int32_t cam_mem_get_slot(void) +{ + int32_t idx; + + mutex_lock(&tbl.m_lock); + idx = find_first_zero_bit(tbl.bitmap, tbl.bits); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + mutex_unlock(&tbl.m_lock); + return -ENOMEM; + } + set_bit(idx, tbl.bitmap); + mutex_unlock(&tbl.m_lock); + + mutex_lock(&tbl.bufq[idx].q_lock); + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].active = true; + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].release_deferred = false; + CAM_GET_TIMESTAMP((tbl.bufq[idx].timestamp)); + tbl.bufq[idx].dma_heap_type = CAM_HEAP_MAX; + mutex_unlock(&tbl.bufq[idx].q_lock); + + return idx; +} + +static void cam_mem_put_slot(int32_t idx) +{ + mutex_lock(&tbl.bufq[idx].q_lock); + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].active = false; + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].release_deferred = false; + tbl.bufq[idx].is_internal = false; + memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64)); + kref_init(&tbl.bufq[idx].krefcount); + kref_init(&tbl.bufq[idx].urefcount); + mutex_unlock(&tbl.bufq[idx].q_lock); + + mutex_lock(&tbl.m_lock); + clear_bit(idx, tbl.bitmap); + mutex_unlock(&tbl.m_lock); +} + +static bool cam_mem_mgr_is_iova_info_updated_locked( + struct cam_mem_buf_hw_hdl_info *hw_vaddr_info_arr, + int32_t mmu_hdl) +{ + int entry, iommu_hdl; + struct cam_mem_buf_hw_hdl_info *vaddr_entry; + + iommu_hdl = CAM_SMMU_GET_BASE_HDL(mmu_hdl); + + /* validate hdl for entry idx */ + if (!cam_mem_mgr_get_hwva_entry_idx(iommu_hdl, &entry)) + return false; + + vaddr_entry = &hw_vaddr_info_arr[entry]; + if (vaddr_entry->valid_mapping && + vaddr_entry->iommu_hdl == iommu_hdl) + return true; + + return false; +} + +static void cam_mem_mgr_update_iova_info_locked( + struct cam_mem_buf_hw_hdl_info *hw_vaddr_info_arr, + dma_addr_t vaddr, int32_t mmu_hdl, size_t len, + bool valid_mapping, struct kref *ref_count) +{ + int entry, iommu_hdl; + struct cam_mem_buf_hw_hdl_info *vaddr_entry; + + iommu_hdl = CAM_SMMU_GET_BASE_HDL(mmu_hdl); + + /* validate hdl for entry idx */ + if (!cam_mem_mgr_get_hwva_entry_idx(iommu_hdl, &entry)) + return; + + vaddr_entry = &hw_vaddr_info_arr[entry]; + + vaddr_entry->vaddr = vaddr; + vaddr_entry->iommu_hdl = iommu_hdl; + vaddr_entry->addr_updated = true; + vaddr_entry->valid_mapping = valid_mapping; + vaddr_entry->len = len; + vaddr_entry->ref_count = ref_count; +} + +/* Utility to be invoked with bufq entry lock held */ +static int cam_mem_mgr_try_retrieving_hwva_locked( + int idx, int32_t mmu_handle, dma_addr_t *iova_ptr, size_t *len_ptr, + struct list_head *buf_tracker) +{ + int32_t rc = -EINVAL, entry, iommu_hdl; + struct cam_mem_buf_hw_hdl_info *hdl_info = NULL; + + iommu_hdl = CAM_SMMU_GET_BASE_HDL(mmu_handle); + + /* Check for valid entry */ + if (cam_mem_mgr_get_hwva_entry_idx(iommu_hdl, &entry)) { + hdl_info = &tbl.bufq[idx].hdls_info[entry]; + + /* Ensure we are picking a valid entry */ + if ((hdl_info->iommu_hdl == iommu_hdl) && (hdl_info->addr_updated)) { + *iova_ptr = hdl_info->vaddr; + *len_ptr = hdl_info->len; + if (buf_tracker) + cam_smmu_add_buf_to_track_list(tbl.bufq[idx].fd, + tbl.bufq[idx].i_ino, &hdl_info->ref_count, buf_tracker, + GET_SMMU_TABLE_IDX(iommu_hdl)); + rc = 0; + } + } + + return rc; +} + +int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle, + dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags, + struct list_head *buf_tracker) +{ + int rc = 0, idx; + bool retrieved_iova = false; + struct kref *ref_count; + + *len_ptr = 0; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) + return -ENOENT; + + mutex_lock(&tbl.bufq[idx].q_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,", + idx); + rc = -EAGAIN; + goto err; + } + + if (buf_handle != tbl.bufq[idx].buf_handle) { + rc = -EINVAL; + goto err; + } + + if (flags) + *flags = tbl.bufq[idx].flags; + + /* Try retrieving iova if saved previously */ + rc = cam_mem_mgr_try_retrieving_hwva_locked(idx, mmu_handle, iova_ptr, len_ptr, + buf_tracker); + if (!rc) { + retrieved_iova = true; + goto end; + } + + if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle)) + rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf, + iova_ptr, len_ptr, buf_tracker, &ref_count); + else + rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf, + iova_ptr, len_ptr, buf_tracker, &ref_count); + + if (rc) { + CAM_ERR(CAM_MEM, + "failed to find buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d i_ino:%lu", + buf_handle, mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino); + goto err; + } + + /* Save iova in bufq for future use */ + cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info, + *iova_ptr, mmu_handle, *len_ptr, false, ref_count); + +end: + CAM_DBG(CAM_MEM, + "handle:0x%x fd:%d i_ino:%lu iova_ptr:0x%lx len_ptr:%lu retrieved from bufq: %s", + mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, *iova_ptr, *len_ptr, + CAM_BOOL_TO_YESNO(retrieved_iova)); +err: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_get_io_buf); + +int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len) +{ + int idx, rc = 0; + + /* Check to avoid kernel panic - cannot call mutex in softirq/atomic context */ + if (!in_task()) { + CAM_ERR(CAM_MEM, "Calling from softirq/atomic context"); + dump_stack(); + return -EINVAL; + } + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!buf_handle || !vaddr_ptr || !len) + return -EINVAL; + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) + return -EINVAL; + + mutex_lock(&tbl.bufq[idx].q_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,", + idx); + rc = -EPERM; + goto end; + } + + if (buf_handle != tbl.bufq[idx].buf_handle) { + CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d", + idx, buf_handle); + rc = -EINVAL; + goto end; + } + + if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) { + CAM_ERR(CAM_MEM, "idx: %d Invalid flag 0x%x", + idx, tbl.bufq[idx].flags); + rc = -EINVAL; + goto end; + } + + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) { + *vaddr_ptr = tbl.bufq[idx].kmdvaddr; + *len = tbl.bufq[idx].len; + } else { + CAM_ERR(CAM_MEM, "No KMD access requested, kmdvddr= %p, idx= %d, buf_handle= %d", + tbl.bufq[idx].kmdvaddr, idx, buf_handle); + rc = -EINVAL; + } + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_get_cpu_buf); + +int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd) +{ + int rc = 0, idx; + uint32_t cache_dir; + unsigned long dmabuf_flag = 0; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!cmd) + return -EINVAL; + + idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) + return -EINVAL; + + mutex_lock(&tbl.m_lock); + if (!test_bit(idx, tbl.bitmap)) { + CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,", + idx); + mutex_unlock(&tbl.m_lock); + return -EINVAL; + } + mutex_unlock(&tbl.m_lock); + + mutex_lock(&tbl.bufq[idx].q_lock); + if (cmd->buf_handle != tbl.bufq[idx].buf_handle) { + rc = -EINVAL; + goto end; + } + + rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag); + if (rc) { + CAM_ERR(CAM_MEM, "cache get flags failed %d", rc); + goto end; + } + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) + CAM_DBG(CAM_MEM, "Calling dmap buf APIs for cache operations"); + cache_dir = DMA_BIDIRECTIONAL; +#else + if (dmabuf_flag & ION_FLAG_CACHED) { + switch (cmd->mem_cache_ops) { + case CAM_MEM_CLEAN_CACHE: + cache_dir = DMA_TO_DEVICE; + break; + case CAM_MEM_INV_CACHE: + cache_dir = DMA_FROM_DEVICE; + break; + case CAM_MEM_CLEAN_INV_CACHE: + cache_dir = DMA_BIDIRECTIONAL; + break; + default: + CAM_ERR(CAM_MEM, + "invalid cache ops :%d", cmd->mem_cache_ops); + rc = -EINVAL; + goto end; + } + } else { + CAM_DBG(CAM_MEM, "BUF is not cached"); + goto end; + } +#endif + rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf, + (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (rc) { + CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc); + goto end; + } + + rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf, + cache_dir); + if (rc) { + CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc); + goto end; + } + +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_cache_ops); + +int cam_mem_mgr_cpu_access_op(struct cam_mem_cpu_access_op *cmd) +{ + int rc = 0, idx; + uint32_t direction; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_MEM, "Invalid cmd"); + return -EINVAL; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Invalid idx=%d, buf_handle 0x%x, access=0x%x", + idx, cmd->buf_handle, cmd->access); + return -EINVAL; + } + + mutex_lock(&tbl.m_lock); + if (!test_bit(idx, tbl.bitmap)) { + CAM_ERR(CAM_MEM, "Buffer at idx=%d is already freed/unmapped", idx); + mutex_unlock(&tbl.m_lock); + return -EINVAL; + } + mutex_unlock(&tbl.m_lock); + + mutex_lock(&tbl.bufq[idx].q_lock); + if (cmd->buf_handle != tbl.bufq[idx].buf_handle) { + CAM_ERR(CAM_MEM, + "Buffer at idx=%d is different incoming handle 0x%x, actual handle 0x%x", + idx, cmd->buf_handle, tbl.bufq[idx].buf_handle); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_MEM, "buf_handle=0x%x, access=0x%x, access_type=0x%x, override_access=%d", + cmd->buf_handle, cmd->access, cmd->access_type, + g_cam_mem_mgr_debug.override_cpu_access_dir); + + if (cmd->access_type & CAM_MEM_CPU_ACCESS_READ && + cmd->access_type & CAM_MEM_CPU_ACCESS_WRITE) { + direction = DMA_BIDIRECTIONAL; + } else if (cmd->access_type & CAM_MEM_CPU_ACCESS_READ) { + direction = DMA_FROM_DEVICE; + } else if (cmd->access_type & CAM_MEM_CPU_ACCESS_WRITE) { + direction = DMA_TO_DEVICE; + } else { + direction = DMA_BIDIRECTIONAL; + CAM_WARN(CAM_MEM, + "Invalid access type buf_handle=0x%x, access=0x%x, access_type=0x%x", + cmd->buf_handle, cmd->access, cmd->access_type); + } + + if (g_cam_mem_mgr_debug.override_cpu_access_dir) + direction = DMA_BIDIRECTIONAL; + + if (cmd->access & CAM_MEM_BEGIN_CPU_ACCESS) { + rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf, direction); + if (rc) { + CAM_ERR(CAM_MEM, + "dma begin cpu access failed rc=%d, buf_handle=0x%x, access=0x%x, access_type=0x%x", + rc, cmd->buf_handle, cmd->access, cmd->access_type); + goto end; + } + } + + if (cmd->access & CAM_MEM_END_CPU_ACCESS) { + rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf, direction); + if (rc) { + CAM_ERR(CAM_MEM, + "dma end cpu access failed rc=%d, buf_handle=0x%x, access=0x%x, access_type=0x%x", + rc, cmd->buf_handle, cmd->access, cmd->access_type); + goto end; + } + } + +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_cpu_access_op); + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) + +#define CAM_MAX_VMIDS 4 + +static void cam_mem_mgr_put_dma_heaps(void) +{ + CAM_DBG(CAM_MEM, "Releasing DMA Buf heaps usage"); +} + +static int cam_mem_mgr_get_dma_heaps(void) +{ + int rc = 0; + +#ifndef CONFIG_ARCH_QTI_VM + tbl.system_heap = NULL; + tbl.system_movable_heap = NULL; + tbl.system_uncached_heap = NULL; + tbl.camera_heap = NULL; + tbl.camera_uncached_heap = NULL; + tbl.secure_display_heap = NULL; + tbl.ubwc_p_heap = NULL; + tbl.ubwc_p_movable_heap = NULL; + + tbl.system_heap = dma_heap_find("qcom,system"); + if (IS_ERR_OR_NULL(tbl.system_heap)) { + rc = PTR_ERR(tbl.system_heap); + CAM_ERR(CAM_MEM, "qcom system heap not found, rc=%d", rc); + tbl.system_heap = NULL; + goto put_heaps; + } + + tbl.system_movable_heap = dma_heap_find("qcom,system-movable"); + if (IS_ERR_OR_NULL(tbl.system_movable_heap)) { + rc = PTR_ERR(tbl.system_movable_heap); + CAM_DBG(CAM_MEM, "qcom system heap not found, rc=%d", rc); + tbl.system_movable_heap = NULL; + /* not fatal error, we can fallback to system heap */ + } + + tbl.system_uncached_heap = dma_heap_find("qcom,system-uncached"); + if (IS_ERR_OR_NULL(tbl.system_uncached_heap)) { + if (tbl.force_cache_allocs) { + /* optional, we anyway do not use uncached */ + CAM_DBG(CAM_MEM, + "qcom system-uncached heap not found, err=%d", + PTR_ERR(tbl.system_uncached_heap)); + tbl.system_uncached_heap = NULL; + } else { + /* fatal, must need uncached heaps */ + rc = PTR_ERR(tbl.system_uncached_heap); + CAM_ERR(CAM_MEM, + "qcom system-uncached heap not found, rc=%d", + rc); + tbl.system_uncached_heap = NULL; + goto put_heaps; + } + } + + tbl.ubwc_p_heap = dma_heap_find("qcom,ubwcp"); + if (IS_ERR_OR_NULL(tbl.ubwc_p_heap)) { + CAM_DBG(CAM_MEM, "qcom ubwcp heap not found, err=%d", PTR_ERR(tbl.ubwc_p_heap)); + tbl.ubwc_p_heap = NULL; + } + + tbl.ubwc_p_movable_heap = dma_heap_find("qcom,ubwcp-movable"); + if (IS_ERR_OR_NULL(tbl.ubwc_p_movable_heap)) { + CAM_DBG(CAM_MEM, "qcom ubwcp movable heap not found, err=%d", + PTR_ERR(tbl.ubwc_p_movable_heap)); + tbl.ubwc_p_movable_heap = NULL; + } + + tbl.secure_display_heap = dma_heap_find("qcom,display"); + if (IS_ERR_OR_NULL(tbl.secure_display_heap)) { + rc = PTR_ERR(tbl.secure_display_heap); + CAM_ERR(CAM_MEM, "qcom,display heap not found, rc=%d", + rc); + tbl.secure_display_heap = NULL; + goto put_heaps; + } + + tbl.camera_heap = dma_heap_find("qcom,camera"); + if (IS_ERR_OR_NULL(tbl.camera_heap)) { + /* optional heap, not a fatal error */ + CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d", + PTR_ERR(tbl.camera_heap)); + tbl.camera_heap = NULL; + } + + tbl.camera_uncached_heap = dma_heap_find("qcom,camera-uncached"); + if (IS_ERR_OR_NULL(tbl.camera_uncached_heap)) { + /* optional heap, not a fatal error */ + CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d", + PTR_ERR(tbl.camera_uncached_heap)); + tbl.camera_uncached_heap = NULL; + } + + CAM_INFO(CAM_MEM, + "Heaps : system=%pK %pK, system_uncached=%pK, camera=%pK, camera-uncached=%pK, secure_display=%pK, ubwc_p=%pK %pK", + tbl.system_heap, tbl.system_movable_heap, tbl.system_uncached_heap, + tbl.camera_heap, tbl.camera_uncached_heap, + tbl.secure_display_heap, tbl.ubwc_p_heap, tbl.ubwc_p_movable_heap); +#else + tbl.cam_session_heap = NULL; + tbl.cam_device_heap = NULL; + + tbl.cam_device_heap = dma_heap_find("qcom,cam-svm-device"); + if (IS_ERR_OR_NULL(tbl.cam_device_heap)) { + rc = PTR_ERR(tbl.cam_device_heap); + CAM_ERR(CAM_MEM, "qcom,cam-svm-device heap not found, rc=%d", rc); + tbl.cam_device_heap = NULL; + goto put_heaps; + } + + tbl.cam_session_heap = dma_heap_find("qcom,cam-svm-session"); + if (IS_ERR_OR_NULL(tbl.cam_session_heap)) { + rc = PTR_ERR(tbl.cam_session_heap); + CAM_ERR(CAM_MEM, "qcom,cam-svm-session heap not found, rc=%d", rc); + tbl.cam_session_heap = NULL; + goto put_heaps; + } + + CAM_INFO(CAM_MEM, "Heaps : cam-svm-session=0x%x, cam-svm-device=0x%x", + tbl.cam_session_heap, tbl.cam_device_heap); +#endif + + return 0; +put_heaps: + cam_mem_mgr_put_dma_heaps(); + return rc; +} + +int cam_mem_mgr_check_for_supported_heaps(uint64_t *heap_mask) +{ + uint64_t heap_caps = 0; + + if (!heap_mask) + return -EINVAL; + + if (tbl.ubwc_p_heap) + heap_caps |= CAM_REQ_MGR_MEM_UBWC_P_HEAP_SUPPORTED; + + if ((tbl.camera_heap) || (tbl.camera_uncached_heap)) + heap_caps |= CAM_REQ_MGR_MEM_CAMERA_HEAP_SUPPORTED; + + *heap_mask = heap_caps; + return 0; +} + +static int cam_mem_util_get_dma_buf(size_t len, + unsigned int cam_flags, + enum cam_mem_mgr_allocator alloc_type, + struct dma_buf **buf, + unsigned long *i_ino, + enum cam_dma_heap_type *heap_type) +{ + int rc = 0; + struct dma_heap *heap = NULL, *try_heap = NULL; + struct timespec64 ts1, ts2; + long microsec = 0; +#ifndef CONFIG_ARCH_QTI_VM + bool use_cached_heap = false; +#endif + struct mem_buf_lend_kernel_arg arg; + int vmids[CAM_MAX_VMIDS]; + int perms[CAM_MAX_VMIDS]; + int num_vmids = 0; + + if (!buf) { + CAM_ERR(CAM_MEM, "Invalid params"); + return -EINVAL; + } + + if (g_cam_mem_mgr_debug.alloc_profile_enable) + CAM_GET_TIMESTAMP(ts1); + +#ifndef CONFIG_ARCH_QTI_VM + if ((cam_flags & CAM_MEM_FLAG_CACHE) || + (tbl.force_cache_allocs && + (!(cam_flags & CAM_MEM_FLAG_PROTECTED_MODE)))) { + CAM_DBG(CAM_MEM, + "Using CACHED heap, cam_flags=0x%x, force_cache_allocs=%d", + cam_flags, tbl.force_cache_allocs); + use_cached_heap = true; + } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) { + use_cached_heap = true; + CAM_DBG(CAM_MEM, + "Using CACHED heap for secure, cam_flags=0x%x, force_cache_allocs=%d", + cam_flags, tbl.force_cache_allocs); + } else { + use_cached_heap = false; + if (!tbl.system_uncached_heap && !tbl.camera_uncached_heap) { + CAM_ERR(CAM_MEM, + "Using UNCACHED heap not supported, cam_flags=0x%x, force_cache_allocs=%d", + cam_flags, tbl.force_cache_allocs); + return -EINVAL; + } + } + + if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) { + if (IS_CSF25(tbl.csf_version.arch_ver, tbl.csf_version.max_ver)) { + heap = tbl.system_heap; + len = cam_align_dma_buf_size(len); + } else { + heap = tbl.secure_display_heap; + vmids[num_vmids] = VMID_CP_CAMERA; + perms[num_vmids] = PERM_READ | PERM_WRITE; + num_vmids++; + } + + if (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT) { + CAM_DBG(CAM_MEM, "Secure mode CDSP flags"); + + vmids[num_vmids] = VMID_CP_CDSP; + perms[num_vmids] = PERM_READ | PERM_WRITE; + num_vmids++; + } + } else if (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL) { + heap = tbl.secure_display_heap; + vmids[num_vmids] = VMID_CP_NON_PIXEL; + perms[num_vmids] = PERM_READ | PERM_WRITE; + num_vmids++; + } else if (cam_flags & CAM_MEM_FLAG_UBWC_P_HEAP) { + if (!tbl.ubwc_p_heap) { + CAM_ERR(CAM_MEM, "ubwc-p heap is not available, can't allocate"); + return -EINVAL; + } + + if (tbl.ubwc_p_movable_heap && (alloc_type == CAM_MEMMGR_ALLOC_USER)) + heap = tbl.ubwc_p_movable_heap; + else + heap = tbl.ubwc_p_heap; + CAM_DBG(CAM_MEM, "Allocating from ubwc-p heap %pK, size=%d, flags=0x%x", + heap, len, cam_flags); + } else if (use_cached_heap) { + + /* + * The default scheme is to try allocating from the camera heap + * if available; if not, try for the system heap. Userland can also select + * to pick a specific heap for allocation; this will deviate from the + * default selection scheme. + * + */ + if (!(cam_flags & CAM_MEM_FLAG_USE_SYS_HEAP_ONLY)) + try_heap = tbl.camera_heap; + + if (!(cam_flags & CAM_MEM_FLAG_USE_CAMERA_HEAP_ONLY)) { + if (tbl.system_movable_heap && (alloc_type == CAM_MEMMGR_ALLOC_USER)) + heap = tbl.system_movable_heap; + else + heap = tbl.system_heap; + } + } else { + if (!(cam_flags & CAM_MEM_FLAG_USE_SYS_HEAP_ONLY)) + try_heap = tbl.camera_uncached_heap; + + if (!(cam_flags & CAM_MEM_FLAG_USE_CAMERA_HEAP_ONLY)) + heap = tbl.system_uncached_heap; + } +#else + /* + * Device heap is created at boot up time, session heap is created + * and destroyed align with session start and stop boundary. + */ + mutex_lock(&tbl.m_lock); + CAM_DBG(CAM_MEM, "Allocating with alloc_type: %d size: %d flags: 0x%x", + alloc_type, len, cam_flags); + if (alloc_type == CAM_MEMMGR_ALLOC_USER) { + if (!cam_mem_util_heap_mem_available(CAM_SVM_HEAP_SESSION)) { + rc = cam_mem_mgr_transfer_from_pvm_heap(tbl.cam_session_heap, + tbl.session_heap_size, CAM_SVM_HEAP_SESSION); + if (rc) { + CAM_ERR(CAM_MEM, "Failed to create SVM session mem pool rc: %d", + rc); + return rc; + } + } + + heap = tbl.cam_session_heap; + tbl.session_heap_alloc_cnt++; + *heap_type = CAM_SVM_HEAP_SESSION; + } else { + if (!cam_mem_util_heap_mem_available(CAM_SVM_HEAP_DEVICE)) { + CAM_ERR(CAM_MEM, "Device heap unavailable, handle: 0x%x count: %d", + tbl.cam_device_mem_pool_handle, tbl.device_heap_alloc_cnt); + return -EINVAL; + } + + heap = tbl.cam_device_heap; + tbl.device_heap_alloc_cnt++; + *heap_type = CAM_SVM_HEAP_DEVICE; + } + + CAM_DBG(CAM_MEM, + "Get buffer from heap: %pK session_heap_alloc_cnt: %d device_heap_alloc_cnt: %d", + heap, tbl.session_heap_alloc_cnt, tbl.device_heap_alloc_cnt); + mutex_unlock(&tbl.m_lock); +#endif + CAM_DBG(CAM_MEM, "Using heaps : try=%pK, heap=%pK", try_heap, heap); + + *buf = NULL; + + if (!try_heap && !heap) { + CAM_ERR(CAM_MEM, + "No heap available for allocation, can't allocate flag: 0x%x", + cam_flags); + rc = -EINVAL; + goto release_heap; + } + + if (try_heap) { + *buf = dma_heap_buffer_alloc(try_heap, len, O_RDWR, 0); + if (IS_ERR(*buf)) { + CAM_WARN(CAM_MEM, + "Failed in allocating from try heap, heap=%pK, len=%zu, err=%d", + try_heap, len, PTR_ERR(*buf)); + *buf = NULL; + } + } + + if (*buf == NULL) { + if (!heap) { + CAM_ERR(CAM_MEM, + "No default heap selected, flags = 0x%x", cam_flags); + rc = -ENOMEM; + goto release_heap; + } + + if (heap) + *buf = dma_heap_buffer_alloc(heap, len, O_RDWR, 0); + + if (IS_ERR(*buf)) { + rc = PTR_ERR(*buf); + CAM_ERR(CAM_MEM, + "Failed in allocating from heap, heap=%pK, len=%zu, err=%d", + heap, len, rc); + *buf = NULL; + goto release_heap; + } + } + + *i_ino = file_inode((*buf)->file)->i_ino; + + if (((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) && + !IS_CSF25(tbl.csf_version.arch_ver, tbl.csf_version.max_ver)) || + (cam_flags & CAM_MEM_FLAG_EVA_NOPIXEL)) { + if (num_vmids >= CAM_MAX_VMIDS) { + CAM_ERR(CAM_MEM, "Insufficient array size for vmids %d", num_vmids); + rc = -EINVAL; + goto end; + } + + arg.nr_acl_entries = num_vmids; + arg.vmids = vmids; + arg.perms = perms; + + rc = mem_buf_lend(*buf, &arg); + if (rc) { + CAM_ERR(CAM_MEM, + "Failed in buf lend rc=%d, buf=%pK, vmids [0]=0x%x, [1]=0x%x, [2]=0x%x", + rc, *buf, vmids[0], vmids[1], vmids[2]); + goto end; + } + } + + CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK, i_ino=%lu", len, *buf, *i_ino); + + if (g_cam_mem_mgr_debug.alloc_profile_enable) { + CAM_GET_TIMESTAMP(ts2); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec); + trace_cam_log_event("IONAllocProfile", "size and time in micro", + len, microsec); + } + + return rc; +end: + dma_buf_put(*buf); +release_heap: +#ifdef CONFIG_ARCH_QTI_VM + mutex_lock(&tbl.m_lock); + if (alloc_type == CAM_MEMMGR_ALLOC_USER) { + tbl.session_heap_alloc_cnt--; + + if ((!tbl.session_heap_alloc_cnt) && tbl.cam_session_mem_pool_handle) { + if (cam_mem_mgr_release_to_pvm_heap(CAM_SVM_HEAP_SESSION)) + CAM_ERR(CAM_MEM, "Failed to destroy SVM session heap"); + } + } else { + tbl.device_heap_alloc_cnt--; + } + + mutex_unlock(&tbl.m_lock); +#endif + return rc; +} +#else + +bool cam_mem_mgr_ubwc_p_heap_supported(void) +{ + return false; +} + +static int cam_mem_util_get_dma_buf(size_t len, + unsigned int cam_flags, + enum cam_mem_mgr_allocator alloc_type, + struct dma_buf **buf, + unsigned long *i_ino, + enum cam_dma_heap_type *heap_type) +{ + int rc = 0; + unsigned int heap_id; + int32_t ion_flag = 0; + struct timespec64 ts1, ts2; + long microsec = 0; + + if (!buf) { + CAM_ERR(CAM_MEM, "Invalid params"); + return -EINVAL; + } + + if (cam_flags & CAM_MEM_FLAG_UBWC_P_HEAP) { + CAM_ERR(CAM_MEM, "ubwcp heap not supported"); + return -EINVAL; + } + + if (g_cam_mem_mgr_debug.alloc_profile_enable) + CAM_GET_TIMESTAMP(ts1); + + if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) && + (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) { + heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID); + ion_flag |= + ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP; + } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) { + heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID); + ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA; + } else { + heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) | + ION_HEAP(ION_CAMERA_HEAP_ID); + } + + if (cam_flags & CAM_MEM_FLAG_CACHE) + ion_flag |= ION_FLAG_CACHED; + else + ion_flag &= ~ION_FLAG_CACHED; + + if (tbl.force_cache_allocs && (!(ion_flag & ION_FLAG_SECURE))) + ion_flag |= ION_FLAG_CACHED; + + *buf = ion_alloc(len, heap_id, ion_flag); + if (IS_ERR_OR_NULL(*buf)) + return -ENOMEM; + + *i_ino = file_inode((*buf)->file)->i_ino; + + if (g_cam_mem_mgr_debug.alloc_profile_enable) { + CAM_GET_TIMESTAMP(ts2); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec); + trace_cam_log_event("IONAllocProfile", "size and time in micro", + len, microsec); + } + + return rc; +} +#endif + +static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags, + struct dma_buf **dmabuf, + int *fd, + unsigned long *i_ino, + enum cam_dma_heap_type *heap_type) +{ + int rc; + + rc = cam_mem_util_get_dma_buf(len, flags, CAM_MEMMGR_ALLOC_USER, + dmabuf, i_ino, heap_type); + if (rc) { + CAM_ERR(CAM_MEM, + "Error allocating dma buf : len=%llu, flags=0x%x", + len, flags); + return rc; + } + + /* + * increment the ref count so that ref count becomes 2 here + * when we close fd, refcount becomes 1 and when we do + * dmap_put_buf, ref count becomes 0 and memory will be freed. + */ + get_dma_buf(*dmabuf); + + *fd = dma_buf_fd(*dmabuf, O_CLOEXEC); + if (*fd < 0) { + CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd); + rc = -EINVAL; + goto put_buf; + } + + CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d, i_ino=%lu", + len, *dmabuf, *fd, *i_ino); + + return rc; + +put_buf: + dma_buf_put(*dmabuf); + return rc; +} + +static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd_v2 *cmd) +{ + if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) { + CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)", + CAM_MEM_MMU_MAX_HANDLE); + return -EINVAL; + } + + if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE && + cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) { + CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed"); + return -EINVAL; + } + + if ((cmd->flags & CAM_MEM_FLAG_EVA_NOPIXEL) && + (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE || + cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)) { + CAM_ERR(CAM_MEM, + "Kernel mapping and secure mode not allowed in no pixel mode"); + return -EINVAL; + } + + if (cmd->flags & CAM_MEM_FLAG_UBWC_P_HEAP && + (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE || + cmd->flags & CAM_MEM_FLAG_EVA_NOPIXEL || + cmd->flags & CAM_MEM_FLAG_KMD_ACCESS || + cmd->flags & CAM_MEM_FLAG_CMD_BUF_TYPE || + cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS || + cmd->flags & CAM_MEM_FLAG_HW_AND_CDM_OR_SHARED)) { + CAM_ERR(CAM_MEM, + "UBWC-P buffer not supported with this combinatation of flags 0x%x", + cmd->flags); + return -EINVAL; + } + + return 0; +} + +static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd_v2 *cmd) +{ + if (!cmd->flags) { + CAM_ERR(CAM_MEM, "Invalid flags"); + return -EINVAL; + } + + if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) { + CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)", + cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE); + return -EINVAL; + } + + if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE && + cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) { + CAM_ERR(CAM_MEM, + "Kernel mapping in secure mode not allowed, flags=0x%x", + cmd->flags); + return -EINVAL; + } + + if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) { + CAM_ERR(CAM_MEM, + "Shared memory buffers are not allowed to be mapped"); + return -EINVAL; + } + + return 0; +} + +static int cam_mem_util_map_hw_va(uint32_t flags, + int32_t *mmu_hdls, + int32_t num_hdls, + int fd, + struct dma_buf *dmabuf, + struct cam_mem_buf_hw_hdl_info *hw_vaddr_info_arr, + size_t *len, + enum cam_smmu_region_id region, + bool is_internal) +{ + int i; + int rc = -1; + int dir = cam_mem_util_get_dma_dir(flags); + bool dis_delayed_unmap = false; + dma_addr_t hw_vaddr; + struct kref *ref_count; + struct cam_mem_buf_hw_hdl_info *hdl_info = NULL; + + if (dir < 0) { + CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir); + return dir; + } + + if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP) + dis_delayed_unmap = true; + + CAM_DBG(CAM_MEM, + "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d", + fd, flags, dir, num_hdls); + + for (i = 0; i < num_hdls; i++) { + if (cam_mem_mgr_is_iova_info_updated_locked(hw_vaddr_info_arr, mmu_hdls[i])) + continue; + + /* If 36-bit enabled, check for ICP cmd buffers and map them within the shared region */ + if (cam_smmu_is_expanded_memory() && + cam_smmu_supports_shared_region(mmu_hdls[i]) && + ((flags & CAM_MEM_FLAG_CMD_BUF_TYPE) || + (flags & CAM_MEM_FLAG_HW_AND_CDM_OR_SHARED))) + region = CAM_SMMU_REGION_SHARED; + + if (flags & CAM_MEM_FLAG_PROTECTED_MODE) + rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, &hw_vaddr, len, + &ref_count); + else + rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dmabuf, dis_delayed_unmap, dir, + &hw_vaddr, len, region, is_internal, &ref_count); + if (rc) { + CAM_ERR(CAM_MEM, + "Failed %s map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d", + (flags & CAM_MEM_FLAG_PROTECTED_MODE) ? "" : "secured", + i, fd, dir, mmu_hdls[i], rc); + goto multi_map_fail; + } + + /* cache hw va */ + cam_mem_mgr_update_iova_info_locked(hw_vaddr_info_arr, + hw_vaddr, mmu_hdls[i], *len, true, ref_count); + } + + return rc; +multi_map_fail: + for (i = 0; i < tbl.max_hdls_supported; i++) { + if (!hw_vaddr_info_arr[i].valid_mapping) + continue; + + hdl_info = &hw_vaddr_info_arr[i]; + + if (flags & CAM_MEM_FLAG_PROTECTED_MODE) + cam_smmu_unmap_stage2_iova(hdl_info->iommu_hdl, fd, dmabuf, + false); + else + cam_smmu_unmap_user_iova(hdl_info->iommu_hdl, fd, dmabuf, + CAM_SMMU_REGION_IO, false); + } + /* reset any updated entries */ + memset(hw_vaddr_info_arr, 0x0, tbl.max_hdls_info_size); + return rc; +} + +int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd_v2 *cmd) +{ + int rc, idx; + struct dma_buf *dmabuf = NULL; + int fd = -1; + size_t len; + uintptr_t kvaddr = 0; + size_t klen; + unsigned long i_ino = 0; + enum cam_dma_heap_type heap_type; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_MEM, " Invalid argument"); + return -EINVAL; + } + + if (cmd->num_hdl > tbl.max_hdls_supported) { + CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)", + cmd->num_hdl, tbl.max_hdls_supported); + return -EINVAL; + } + + len = cmd->len; + + if (tbl.need_shared_buffer_padding && + (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)) { + len += CAM_MEM_SHARED_BUFFER_PAD_4K; + CAM_DBG(CAM_MEM, "Pad 4k size, actual %llu, allocating %zu", + cmd->len, len); + } + + if (cam_presil_mode_enabled()) { + cmd->flags |= CAM_MEM_FLAG_KMD_ACCESS; + CAM_DBG(CAM_MEM, "PRESIL BUF add KMD_ACCESS, need for bufcopy 0x%08x", cmd->flags); + } + rc = cam_mem_util_check_alloc_flags(cmd); + if (rc) { + CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d", + cmd->flags, rc); + return rc; + } + + rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd, + &i_ino, &heap_type); + if (rc) { + CAM_ERR(CAM_MEM, + "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d", + len, cmd->align, cmd->flags, cmd->num_hdl); + cam_mem_mgr_print_tbl(); + return rc; + } + if (!dmabuf) { + CAM_ERR(CAM_MEM, + "Ion Alloc return NULL dmabuf! fd=%d, i_ino=%lu, len=%d", fd, i_ino, len); + cam_mem_mgr_print_tbl(); + return rc; + } + + idx = cam_mem_get_slot(); + if (idx < 0) { + CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx); + rc = -ENOMEM; + cam_mem_mgr_print_tbl(); + goto slot_fail; + } + + if (cam_dma_buf_set_name(dmabuf, cmd->buf_name)) + CAM_ERR(CAM_MEM, "set dma buffer name(%s) failed", cmd->buf_name); + + if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) || + (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) || + (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) { + + enum cam_smmu_region_id region; + + if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) + region = CAM_SMMU_REGION_IO; + + + if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) + region = CAM_SMMU_REGION_SHARED; + + if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) + region = CAM_SMMU_REGION_IO; + + rc = cam_mem_util_map_hw_va(cmd->flags, + cmd->mmu_hdls, + cmd->num_hdl, + fd, + dmabuf, + tbl.bufq[idx].hdls_info, + &len, + region, + true); + + if (rc) { + CAM_ERR(CAM_MEM, + "Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d", + len, cmd->flags, + fd, region, cmd->num_hdl, rc); + if (rc == -EALREADY) { + if ((size_t)dmabuf->size != len) + rc = -EBADR; + cam_mem_mgr_print_tbl(); + } + goto map_hw_fail; + } + } + + mutex_lock(&tbl.bufq[idx].q_lock); + tbl.bufq[idx].fd = fd; + tbl.bufq[idx].i_ino = i_ino; + tbl.bufq[idx].dma_buf = NULL; + tbl.bufq[idx].flags = cmd->flags; + tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd); + tbl.bufq[idx].is_internal = true; + if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) + CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true); + + if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) { + rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen); + if (rc) { + CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d", + dmabuf, rc); + goto map_kernel_fail; + } + } + + if (cmd->flags & CAM_MEM_FLAG_KMD_DEBUG_FLAG) + tbl.dbg_buf_idx = idx; + + tbl.bufq[idx].kmdvaddr = kvaddr; + tbl.bufq[idx].dma_buf = dmabuf; + tbl.bufq[idx].len = len; + tbl.bufq[idx].num_hdls = cmd->num_hdl; + cam_mem_mgr_reset_presil_params(idx); + tbl.bufq[idx].is_imported = false; + + if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) + kref_init(&tbl.bufq[idx].krefcount); + + kref_init(&tbl.bufq[idx].urefcount); + + tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER; + strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name)); + tbl.bufq[idx].dma_heap_type = heap_type; + mutex_unlock(&tbl.bufq[idx].q_lock); + + cmd->out.buf_handle = tbl.bufq[idx].buf_handle; + cmd->out.fd = tbl.bufq[idx].fd; + cmd->out.vaddr = 0; + + CAM_DBG(CAM_MEM, + "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu, name:%s", + cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle, + tbl.bufq[idx].len, tbl.bufq[idx].i_ino, cmd->buf_name); + + return rc; + +map_kernel_fail: + mutex_unlock(&tbl.bufq[idx].q_lock); +map_hw_fail: + cam_mem_put_slot(idx); +slot_fail: + dma_buf_put(dmabuf); + return rc; +} + +int cam_mem_mgr_alloc_presil_copy_buf(uint32_t presil_copy_buffer_len, + struct dma_buf **p_retrieve_buffer_dma_buf, + uint32_t *p_buf_handle, + uintptr_t *p_kvaddr) +{ + int rc; + struct dma_buf *dmabuf = NULL; + int fd = -1; + uintptr_t kvaddr = 0; + size_t klen; + unsigned long i_ino = 0; + enum cam_dma_heap_type heap_type; + + if (!p_buf_handle || !p_retrieve_buffer_dma_buf || !p_kvaddr) { + CAM_ERR(CAM_MEM, "Invalid argument %pK %pK %pK", + p_buf_handle, p_retrieve_buffer_dma_buf, p_kvaddr); + return -EINVAL; + } + + rc = cam_mem_util_buffer_alloc(presil_copy_buffer_len, + CAM_MEM_FLAG_UMD_ACCESS|CAM_MEM_FLAG_KMD_ACCESS, + &dmabuf, + &fd, + &i_ino, + &heap_type); + if (rc) { + CAM_ERR(CAM_MEM, + "Ion Alloc failed len=%llu flags=0x%x dmabuf=0x%x fd=%d i_no=%d heap_type=0x%x", + presil_copy_buffer_len, CAM_MEM_FLAG_UMD_ACCESS|CAM_MEM_FLAG_KMD_ACCESS, + dmabuf, fd, i_ino, heap_type); + return rc; + } + + rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen); + if (rc) { + CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d", dmabuf, rc); + dma_buf_put(dmabuf); + return rc; + } + + CAM_DBG(CAM_MEM, + "Success len=%llu flags=0x%x dmabuf 0x%x fd=%d i_no=%d heap_type=0x%x kvaddr %pK klen %d", + presil_copy_buffer_len, CAM_MEM_FLAG_UMD_ACCESS|CAM_MEM_FLAG_KMD_ACCESS, + dmabuf, fd, i_ino, heap_type, kvaddr, klen); + + *p_retrieve_buffer_dma_buf = dmabuf; + *p_kvaddr = kvaddr; + *p_buf_handle = GET_MEM_HANDLE(-1, fd); + + return 0; +} + +static bool cam_mem_util_is_map_internal(int32_t fd, unsigned i_ino) +{ + uint32_t i; + bool is_internal = false; + + mutex_lock(&tbl.m_lock); + for_each_set_bit(i, tbl.bitmap, tbl.bits) { + if ((tbl.bufq[i].fd == fd) && (tbl.bufq[i].i_ino == i_ino)) { + is_internal = tbl.bufq[i].is_internal; + break; + } + } + mutex_unlock(&tbl.m_lock); + + return is_internal; +} + +int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd_v2 *cmd) +{ + int32_t idx; + int rc; + struct dma_buf *dmabuf; + size_t len = 0; + bool is_internal = false; + unsigned long i_ino; + uintptr_t kvaddr = 0; + size_t klen; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!cmd || (cmd->fd < 0)) { + CAM_ERR(CAM_MEM, "Invalid argument"); + return -EINVAL; + } + + if (cmd->num_hdl > tbl.max_hdls_supported) { + CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)", + cmd->num_hdl, tbl.max_hdls_supported); + return -EINVAL; + } + + if (cam_presil_mode_enabled()) { + cmd->flags |= CAM_MEM_FLAG_KMD_ACCESS; + CAM_DBG(CAM_MEM, "presil kmd access 0x%08x", cmd->flags); + } + + rc = cam_mem_util_check_map_flags(cmd); + if (rc) { + CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags); + return rc; + } + + dmabuf = dma_buf_get(cmd->fd); + if (IS_ERR_OR_NULL((void *)(dmabuf))) { + CAM_ERR(CAM_MEM, "Failed to import dma_buf fd"); + return -EINVAL; + } + + i_ino = file_inode(dmabuf->file)->i_ino; + + is_internal = cam_mem_util_is_map_internal(cmd->fd, i_ino); + + idx = cam_mem_get_slot(); + if (idx < 0) { + CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d, fd=%d", + idx, cmd->fd); + rc = -ENOMEM; + cam_mem_mgr_print_tbl(); + goto slot_fail; + } + + if (cam_dma_buf_set_name(dmabuf, cmd->buf_name)) + CAM_DBG(CAM_MEM, "Dma buffer (%s) busy", cmd->buf_name); + + if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) || + (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) { + rc = cam_mem_util_map_hw_va(cmd->flags, + cmd->mmu_hdls, + cmd->num_hdl, + cmd->fd, + dmabuf, + tbl.bufq[idx].hdls_info, + &len, + CAM_SMMU_REGION_IO, + is_internal); + if (rc) { + CAM_ERR(CAM_MEM, + "Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d", + cmd->flags, cmd->fd, len, + CAM_SMMU_REGION_IO, cmd->num_hdl, rc); + if (rc == -EALREADY) { + if ((size_t)dmabuf->size != len) { + rc = -EBADR; + cam_mem_mgr_print_tbl(); + } + } + goto map_fail; + } + } + + mutex_lock(&tbl.bufq[idx].q_lock); + tbl.bufq[idx].fd = cmd->fd; + tbl.bufq[idx].i_ino = i_ino; + tbl.bufq[idx].dma_buf = NULL; + tbl.bufq[idx].flags = cmd->flags; + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + + if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) + CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true); + + if (cam_presil_mode_enabled()) { + if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) { + rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen); + if (rc) { + CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d", + dmabuf, rc); + goto map_kernel_fail; + } + } + tbl.bufq[idx].kmdvaddr = kvaddr; + CAM_DBG(CAM_MEM, + "mapped presil fd=%dflags=0x%x idx=%d len=%zu klen=%zu i_ino=%lu name:%s", + cmd->fd, cmd->flags, idx, len, klen, tbl.bufq[idx].i_ino, cmd->buf_name); + } else { + tbl.bufq[idx].kmdvaddr = 0; + } + + tbl.bufq[idx].dma_buf = dmabuf; + tbl.bufq[idx].len = len; + tbl.bufq[idx].num_hdls = cmd->num_hdl; + tbl.bufq[idx].is_imported = true; + tbl.bufq[idx].is_internal = is_internal; + if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) + kref_init(&tbl.bufq[idx].krefcount); + kref_init(&tbl.bufq[idx].urefcount); + tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER; + strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name)); + mutex_unlock(&tbl.bufq[idx].q_lock); + + cmd->out.buf_handle = tbl.bufq[idx].buf_handle; + cmd->out.vaddr = 0; + cmd->out.size = (uint32_t)len; + CAM_DBG(CAM_MEM, + "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu, i_ino=%lu, name:%s", + cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle, + tbl.bufq[idx].len, tbl.bufq[idx].i_ino, cmd->buf_name); + + return rc; + +map_kernel_fail: + mutex_unlock(&tbl.bufq[idx].q_lock); +map_fail: + cam_mem_put_slot(idx); +slot_fail: + dma_buf_put(dmabuf); + return rc; +} + +static int cam_mem_util_unmap_hw_va(int32_t idx, + enum cam_smmu_region_id region, + enum cam_smmu_mapping_client client, bool force_unmap) +{ + int i, fd, num_hdls; + uint32_t flags; + struct cam_mem_buf_hw_hdl_info *hdl_info = NULL; + struct dma_buf *dma_buf; + unsigned long i_ino; + int rc = 0; + + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Incorrect index"); + return -EINVAL; + } + + flags = tbl.bufq[idx].flags; + num_hdls = tbl.bufq[idx].num_hdls; + fd = tbl.bufq[idx].fd; + dma_buf = tbl.bufq[idx].dma_buf; + i_ino = tbl.bufq[idx].i_ino; + + if (unlikely(!num_hdls)) { + CAM_DBG(CAM_MEM, "No valid handles to unmap"); + return 0; + } + + CAM_DBG(CAM_MEM, + "unmap_hw_va : idx=%d, fd=%x, i_ino=%lu flags=0x%x, num_hdls=%d, client=%d", + idx, fd, i_ino, flags, tbl.bufq[idx].num_hdls, client); + + for (i = 0; i < tbl.max_hdls_supported; i++) { + if (!tbl.bufq[idx].hdls_info[i].valid_mapping) + continue; + + hdl_info = &tbl.bufq[idx].hdls_info[i]; + + if (flags & CAM_MEM_FLAG_PROTECTED_MODE) + rc = cam_smmu_unmap_stage2_iova(hdl_info->iommu_hdl, fd, dma_buf, + force_unmap); + else if (client == CAM_SMMU_MAPPING_USER) + rc = cam_smmu_unmap_user_iova(hdl_info->iommu_hdl, fd, dma_buf, region, + force_unmap); + else if (client == CAM_SMMU_MAPPING_KERNEL) + rc = cam_smmu_unmap_kernel_iova(hdl_info->iommu_hdl, + tbl.bufq[idx].dma_buf, region); + else { + CAM_ERR(CAM_MEM, "invalid caller for unmapping : %d", client); + rc = -EINVAL; + goto end; + } + + if (rc < 0) { + CAM_ERR(CAM_MEM, + "Failed in %s unmap, i=%d, fd=%d, i_ino=%lu, mmu_hdl=%d, rc=%d", + ((flags & CAM_MEM_FLAG_PROTECTED_MODE) ? "secure" : "non-secure"), + i, fd, i_ino, hdl_info->iommu_hdl, rc); + goto end; + } + + CAM_DBG(CAM_MEM, + "i: %d unmap_hw_va : idx=%d, fd=%x, i_ino=%lu flags=0x%x, num_hdls=%d, client=%d hdl: %d", + i, idx, fd, i_ino, flags, tbl.bufq[idx].num_hdls, + client, hdl_info->iommu_hdl); + + /* exit loop if all handles for this buffer have been unmapped */ + if (!(--num_hdls)) + break; + } + +end: + return rc; +} + +static void cam_mem_mgr_unmap_active_buf(int idx) +{ + enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED; + + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) + region = CAM_SMMU_REGION_SHARED; + else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) + region = CAM_SMMU_REGION_IO; + + cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER, true); + + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) + cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf, + tbl.bufq[idx].kmdvaddr); +} + +static int cam_mem_mgr_cleanup_table(void) +{ + int i; + + if (cam_presil_mode_enabled()) { + CAM_INFO(CAM_MEM, "PRESIL-HACK not cleaning up table, HFI not free/alloc hack"); + return 0; + } + + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + mutex_lock(&tbl.bufq[i].q_lock); + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[i].idx_lock); + if (!tbl.bufq[i].active) { + CAM_DBG(CAM_MEM, + "Buffer inactive at idx=%d, continuing", i); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[i].idx_lock); + mutex_unlock(&tbl.bufq[i].q_lock); + mutex_destroy(&tbl.bufq[i].q_lock); + continue; + } else { + CAM_DBG(CAM_MEM, + "Buffer active at idx=%d fd=0x%x handle:0x%x kref:%d name: %s", + i, tbl.bufq[i].fd, tbl.bufq[i].buf_handle, tbl.bufq[i].krefcount, tbl.bufq[i].buf_name); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[i].idx_lock); + cam_mem_mgr_unmap_active_buf(i); + } + + if (tbl.bufq[i].dma_buf) { + dma_buf_put(tbl.bufq[i].dma_buf); + tbl.bufq[i].dma_buf = NULL; + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM + mutex_lock(&tbl.m_lock); + if (!tbl.bufq[i].is_imported) { + if (tbl.bufq[i].dma_heap_type == CAM_SVM_HEAP_DEVICE) + tbl.device_heap_alloc_cnt--; + else if (tbl.bufq[i].dma_heap_type == CAM_SVM_HEAP_SESSION) + tbl.session_heap_alloc_cnt--; + } + mutex_unlock(&tbl.m_lock); +#endif +#endif + } + tbl.bufq[i].fd = -1; + tbl.bufq[i].i_ino = 0; + tbl.bufq[i].flags = 0; + tbl.bufq[i].buf_handle = -1; + tbl.bufq[i].len = 0; + tbl.bufq[i].num_hdls = 0; + tbl.bufq[i].dma_buf = NULL; + tbl.bufq[i].active = false; + tbl.bufq[i].release_deferred = false; + tbl.bufq[i].is_internal = false; + memset(tbl.bufq[i].hdls_info, 0x0, tbl.max_hdls_info_size); + cam_mem_mgr_reset_presil_params(i); + kref_init(&tbl.bufq[i].krefcount); + kref_init(&tbl.bufq[i].urefcount); + mutex_unlock(&tbl.bufq[i].q_lock); + mutex_destroy(&tbl.bufq[i].q_lock); + } + mutex_lock(&tbl.m_lock); +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM + { + int rc; + + if ((!tbl.device_heap_alloc_cnt) && tbl.cam_device_mem_pool_handle) { + rc = cam_mem_mgr_release_to_pvm_heap(CAM_SVM_HEAP_DEVICE); + if (rc) + CAM_ERR(CAM_MEM, "Failed to destroy SVM device heap rc: %d", rc); + } else { + CAM_ERR(CAM_MEM, "Invalid device buffer cnt: %d handle: 0x%x", + tbl.device_heap_alloc_cnt, tbl.cam_device_mem_pool_handle); + } + + if ((!tbl.session_heap_alloc_cnt) && tbl.cam_session_mem_pool_handle) { + CAM_DBG(CAM_MEM, "Session buffer cnt: %d handle: 0x%x", + tbl.session_heap_alloc_cnt, tbl.cam_session_mem_pool_handle); + + rc = cam_mem_mgr_release_to_pvm_heap(CAM_SVM_HEAP_SESSION); + if (rc) + CAM_ERR(CAM_MEM, "Failed to destroy SVM session heap rc=%d", rc); + } else { + CAM_DBG(CAM_MEM, "Cleanup with session buffer cnt: %d handle: 0x%x", + tbl.session_heap_alloc_cnt, tbl.cam_session_mem_pool_handle); + } + } +#endif +#endif + + bitmap_zero(tbl.bitmap, tbl.bits); + /* We need to reserve slot 0 because 0 is invalid */ + set_bit(0, tbl.bitmap); + mutex_unlock(&tbl.m_lock); + + return 0; +} + +void cam_mem_mgr_deinit(void) +{ + int i; + + if (!atomic_read(&cam_mem_mgr_state)) + return; + + atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED); + cam_mem_mgr_cleanup_table(); + cam_smmu_driver_deinit(); + mutex_lock(&tbl.m_lock); + bitmap_zero(tbl.bitmap, tbl.bits); + CAM_MEM_FREE(tbl.bitmap); + tbl.bitmap = NULL; + tbl.dbg_buf_idx = -1; + + /* index 0 is reserved */ + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + CAM_MEM_FREE(tbl.bufq[i].hdls_info); + tbl.bufq[i].hdls_info = NULL; + } + + mutex_unlock(&tbl.m_lock); + mutex_destroy(&tbl.m_lock); +} + +static void cam_mem_util_unmap_dummy(struct kref *kref) +{ + CAM_DBG(CAM_MEM, "Cam mem util unmap dummy"); +} + +static void cam_mem_util_unmap(int32_t idx) +{ + int rc = 0; + enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED; + enum cam_smmu_mapping_client client; + + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Incorrect index"); + return; + } + + client = tbl.bufq[idx].smmu_mapping_client; + + CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx); + + if (!tbl.bufq[idx].active) { + CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped", idx); + return; + } + + /* Deactivate the buffer queue to prevent multiple unmap */ + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].active = false; + tbl.bufq[idx].buf_handle = -1; + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].release_deferred = false; + + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) { + if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) { + rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf, + tbl.bufq[idx].kmdvaddr); + if (rc) + CAM_ERR(CAM_MEM, + "Failed, dmabuf=%pK, kmdvaddr=%pK", + tbl.bufq[idx].dma_buf, + (void *) tbl.bufq[idx].kmdvaddr); + } + } + + /* SHARED flag gets precedence, all other flags after it */ + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) { + region = CAM_SMMU_REGION_SHARED; + } else { + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) + region = CAM_SMMU_REGION_IO; + } + + if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) || + (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) || + (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) { + rc = cam_mem_util_unmap_hw_va(idx, region, client, false); + if (rc) + CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK", + tbl.bufq[idx].dma_buf); + } + + tbl.bufq[idx].flags = 0; + + CAM_DBG(CAM_MEM, + "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK, i_ino %lu", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].is_imported, tbl.bufq[idx].dma_buf, + tbl.bufq[idx].i_ino); + + if (tbl.bufq[idx].dma_buf) + dma_buf_put(tbl.bufq[idx].dma_buf); + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM + mutex_lock(&tbl.m_lock); + if (!tbl.bufq[idx].is_imported) { + if (tbl.bufq[idx].dma_heap_type == CAM_SVM_HEAP_DEVICE) + tbl.device_heap_alloc_cnt--; + else if (tbl.bufq[idx].dma_heap_type == CAM_SVM_HEAP_SESSION) + tbl.session_heap_alloc_cnt--; + } + + CAM_DBG(CAM_MEM, "Unmap with session_heap_alloc_cnt: %d device_heap_alloc_cnt: %d", + tbl.session_heap_alloc_cnt, tbl.device_heap_alloc_cnt); + + if ((!tbl.session_heap_alloc_cnt) && tbl.cam_session_mem_pool_handle) { + rc = cam_mem_mgr_release_to_pvm_heap(CAM_SVM_HEAP_SESSION); + if (rc) + CAM_ERR(CAM_MEM, "Failed to destroy SVM session pool rc: %d", rc); + } + mutex_unlock(&tbl.m_lock); +#endif +#endif + + tbl.bufq[idx].fd = -1; + tbl.bufq[idx].i_ino = 0; + tbl.bufq[idx].dma_buf = NULL; + tbl.bufq[idx].is_imported = false; + tbl.bufq[idx].is_internal = false; + tbl.bufq[idx].len = 0; + tbl.bufq[idx].num_hdls = 0; + memset(tbl.bufq[idx].hdls_info, 0x0, tbl.max_hdls_info_size); + cam_mem_mgr_reset_presil_params(idx); + memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64)); + memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref)); + memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref)); + + mutex_lock(&tbl.m_lock); + clear_bit(idx, tbl.bitmap); + mutex_unlock(&tbl.m_lock); + +} + +static void cam_mem_util_unmap_wrapper(struct kref *kref) +{ + int32_t idx; + struct cam_mem_buf_queue *bufq = container_of(kref, typeof(*bufq), krefcount); + + idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "idx: %d not valid", idx); + return; + } + + cam_mem_util_unmap(idx); +} + +void cam_mem_put_cpu_buf(int32_t buf_handle) +{ + int idx; + uint64_t ms, hrs, min, sec; + struct timespec64 current_ts; + uint32_t krefcount = 0, urefcount = 0; + bool unmap = false; + + /* Check to avoid kernel panic - cannot call mutex in softirq/atomic context */ + if (!in_task()) { + CAM_ERR(CAM_MEM, "Calling from softirq/atomic context"); + dump_stack(); + return; + } + + if (!buf_handle) { + CAM_ERR(CAM_MEM, "Invalid buf_handle"); + return; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "idx: %d not valid", idx); + return; + } + + mutex_lock(&tbl.bufq[idx].q_lock); + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "idx: %d not active", idx); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + goto end; + } + + if (buf_handle != tbl.bufq[idx].buf_handle) { + CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d", + idx, buf_handle); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + goto end; + } + + kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy); + + krefcount = kref_read(&tbl.bufq[idx].krefcount); + urefcount = kref_read(&tbl.bufq[idx].urefcount); + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + + if ((krefcount == 1) && (urefcount == 0)) + unmap = true; + + if (unmap) { + cam_mem_util_unmap(idx); + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + CAM_DBG(CAM_MEM, + "%llu:%llu:%llu:%llu Called unmap from here, buf_handle: %u, idx: %d", + hrs, min, sec, ms, buf_handle, idx); + } else if (tbl.bufq[idx].release_deferred) { + CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms); + CAM_ERR(CAM_MEM, + "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s", + hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, + tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle, + krefcount, urefcount, tbl.bufq[idx].buf_name); + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + CAM_ERR(CAM_MEM, + "%llu:%llu:%llu:%llu Not unmapping even after defer, buf_handle: %u, idx: %d", + hrs, min, sec, ms, buf_handle, idx); + } else if (krefcount == 0) { + CAM_ERR(CAM_MEM, + "Unbalanced release Called buf_handle: %u, idx: %d kref: %d", + tbl.bufq[idx].buf_handle, idx, krefcount); + } + +end: + mutex_unlock(&tbl.bufq[idx].q_lock); +} +EXPORT_SYMBOL(cam_mem_put_cpu_buf); + +void cam_mem_put_kref(int32_t buf_handle) +{ + int idx; + uint32_t krefcount = 0, urefcount = 0; + uint64_t ms, hrs, min, sec; + + if (!buf_handle) { + CAM_ERR(CAM_MEM, "Invalid buf_handle"); + return; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "idx: %d not valid", idx); + return; + } + + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + if (tbl.bufq[idx].active && (buf_handle == tbl.bufq[idx].buf_handle)) { + urefcount = kref_read(&tbl.bufq[idx].urefcount); + krefcount = kref_read(&tbl.bufq[idx].krefcount); + + if (urefcount == 0) { + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + goto warn; + } else + kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy); + } + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + return; +warn: + CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms); + CAM_ERR(CAM_MEM, + "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s", + hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, + tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle, + krefcount, urefcount, tbl.bufq[idx].buf_name); + CAM_ERR(CAM_MEM, "Buffer unmap called from UMD before KMD , not unmapping!"); + +} +EXPORT_SYMBOL(cam_mem_put_kref); + +int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd) +{ + int idx; + int rc = 0; + uint64_t ms, hrs, min, sec; + struct timespec64 current_ts; + uint32_t krefcount = 0, urefcount = 0; + bool unmap = false; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!cmd) { + CAM_ERR(CAM_MEM, "Invalid argument"); + return -EINVAL; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle", + idx); + return -EINVAL; + } + mutex_lock(&tbl.bufq[idx].q_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "Released buffer state should be active"); + rc = -EINVAL; + goto end; + } + + if (tbl.bufq[idx].buf_handle != cmd->buf_handle) { + CAM_ERR(CAM_MEM, + "Released buf handle %d not matching within table %d, idx=%d", + cmd->buf_handle, tbl.bufq[idx].buf_handle, idx); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx); + + kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy); + + urefcount = kref_read(&tbl.bufq[idx].urefcount); + + if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) { + krefcount = kref_read(&tbl.bufq[idx].krefcount); + if ((krefcount == 1) && (urefcount == 0)) + unmap = true; + } else { + if (urefcount == 0) + unmap = true; + } + + if (unmap) { + cam_mem_util_unmap(idx); + CAM_DBG(CAM_MEM, + "Called unmap from here, buf_handle: %u, idx: %d", cmd->buf_handle, idx); + } else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) { + rc = -EINVAL; + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms); + CAM_ERR(CAM_MEM, + "%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s", + hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino, + tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle, + krefcount, urefcount, tbl.bufq[idx].buf_name); + if (tbl.bufq[idx].release_deferred) + CAM_ERR(CAM_MEM, "Unbalanced release Called buf_handle: %u, idx: %d", + tbl.bufq[idx].buf_handle, idx); + tbl.bufq[idx].release_deferred = true; + } + +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} + +int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp, + struct cam_mem_mgr_memory_desc *out) +{ + struct dma_buf *buf = NULL; + int ion_fd = -1, rc = 0; + uintptr_t kvaddr; + dma_addr_t iova = 0; + size_t request_len = 0; + uint32_t mem_handle; + int32_t idx; + int32_t smmu_hdl = 0; + unsigned long i_ino = 0; + enum cam_dma_heap_type heap_type; + + enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!inp || !out) { + CAM_ERR(CAM_MEM, "Invalid params"); + return -EINVAL; + } + + if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE || + inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS || + inp->flags & CAM_MEM_FLAG_CACHE)) { + CAM_ERR(CAM_MEM, "Invalid flags for request mem"); + return -EINVAL; + } + + rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, CAM_MEMMGR_ALLOC_KERNEL, + &buf, &i_ino, &heap_type); + + if (rc) { + CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer"); + goto ion_fail; + } else if (!buf) { + CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer"); + goto ion_fail; + } else { + CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf); + } + + /* + * we are mapping kva always here, + * update flags so that we do unmap properly + */ + inp->flags |= CAM_MEM_FLAG_KMD_ACCESS; + rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len); + if (rc) { + CAM_ERR(CAM_MEM, "Failed to get kernel vaddr"); + goto map_fail; + } + + if (!inp->smmu_hdl) { + CAM_ERR(CAM_MEM, "Invalid SMMU handle"); + rc = -EINVAL; + goto smmu_fail; + } + + /* SHARED flag gets precedence, all other flags after it */ + if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) { + region = CAM_SMMU_REGION_SHARED; + } else { + if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE) + region = CAM_SMMU_REGION_IO; + } + + rc = cam_smmu_map_kernel_iova(inp->smmu_hdl, + buf, + CAM_SMMU_MAP_RW, + &iova, + &request_len, + region); + + if (rc < 0) { + CAM_ERR(CAM_MEM, "SMMU mapping failed"); + goto smmu_fail; + } + + smmu_hdl = inp->smmu_hdl; + + idx = cam_mem_get_slot(); + if (idx < 0) { + CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx); + rc = -ENOMEM; + cam_mem_mgr_print_tbl(); + goto slot_fail; + } + + mutex_lock(&tbl.bufq[idx].q_lock); + mem_handle = GET_MEM_HANDLE(idx, ion_fd); + tbl.bufq[idx].dma_buf = buf; + tbl.bufq[idx].fd = -1; + tbl.bufq[idx].i_ino = i_ino; + tbl.bufq[idx].flags = inp->flags; + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].buf_handle = mem_handle; + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].kmdvaddr = kvaddr; + + cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info, + iova, inp->smmu_hdl, inp->size, true, NULL); + + tbl.bufq[idx].len = inp->size; + tbl.bufq[idx].num_hdls = 1; + tbl.bufq[idx].is_imported = false; + kref_init(&tbl.bufq[idx].krefcount); + tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL; + tbl.bufq[idx].dma_heap_type = heap_type; + mutex_unlock(&tbl.bufq[idx].q_lock); + + out->kva = kvaddr; + out->iova = (uint32_t)iova; + out->smmu_hdl = smmu_hdl; + out->mem_handle = mem_handle; + out->len = inp->size; + out->region = region; + + CAM_DBG(CAM_MEM, "idx=%d, dmabuf=%pK, i_ino=%lu, flags=0x%x, mem_handle=0x%x", + idx, buf, i_ino, inp->flags, mem_handle); + + return rc; +slot_fail: + cam_smmu_unmap_kernel_iova(inp->smmu_hdl, + buf, region); +smmu_fail: + cam_mem_util_unmap_cpu_va(buf, kvaddr); +map_fail: + dma_buf_put(buf); +ion_fail: + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_request_mem); + +int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp) +{ + int32_t idx; + int rc = 0; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!inp) { + CAM_ERR(CAM_MEM, "Invalid argument"); + return -EINVAL; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle"); + return -EINVAL; + } + mutex_lock(&tbl.bufq[idx].q_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "Released buffer state should be active"); + rc = -EINVAL; + goto end; + } + + if (tbl.bufq[idx].buf_handle != inp->mem_handle) { + CAM_ERR(CAM_MEM, + "Released buf handle not matching within table"); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle); + if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper)) + CAM_DBG(CAM_MEM, + "Called unmap from here, buf_handle: %u, idx: %d", + tbl.bufq[idx].buf_handle, idx); + else { + CAM_ERR(CAM_MEM, + "Unbalanced release Called buf_handle: %u, idx: %d", + tbl.bufq[idx].buf_handle, idx); + rc = -EINVAL; + } +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_release_mem); + +int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp, + enum cam_smmu_region_id region, + struct cam_mem_mgr_memory_desc *out) +{ + struct dma_buf *buf = NULL; + int rc = 0, ion_fd = -1; + dma_addr_t iova = 0; + size_t request_len = 0; + uint32_t mem_handle; + int32_t idx; + int32_t smmu_hdl = 0; + uintptr_t kvaddr = 0; + unsigned long i_ino = 0; + enum cam_dma_heap_type heap_type; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!inp || !out) { + CAM_ERR(CAM_MEM, "Invalid param(s)"); + return -EINVAL; + } + + if (!inp->smmu_hdl) { + CAM_ERR(CAM_MEM, "Invalid SMMU handle"); + return -EINVAL; + } + + if ((region != CAM_SMMU_REGION_SECHEAP) && + (region != CAM_SMMU_REGION_FWUNCACHED)) { + CAM_ERR(CAM_MEM, "Only secondary heap supported"); + return -EINVAL; + } + + rc = cam_mem_util_get_dma_buf(inp->size, 0, CAM_MEMMGR_ALLOC_KERNEL, + &buf, &i_ino, &heap_type); + + if (rc) { + CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer"); + goto ion_fail; + } else if (!buf) { + CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer"); + goto ion_fail; + } else { + CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf); + } + + if (inp->flags & CAM_MEM_FLAG_KMD_ACCESS) { + rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len); + if (rc) { + CAM_ERR(CAM_MEM, "Failed to get kernel vaddr"); + goto kmap_fail; + } + } + + rc = cam_smmu_reserve_buf_region(region, + inp->smmu_hdl, buf, &iova, &request_len); + + if (rc) { + CAM_ERR(CAM_MEM, "Reserving secondary heap failed"); + goto smmu_fail; + } + + smmu_hdl = inp->smmu_hdl; + + idx = cam_mem_get_slot(); + if (idx < 0) { + CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx); + rc = -ENOMEM; + cam_mem_mgr_print_tbl(); + goto slot_fail; + } + + mutex_lock(&tbl.bufq[idx].q_lock); + mem_handle = GET_MEM_HANDLE(idx, ion_fd); + tbl.bufq[idx].fd = -1; + tbl.bufq[idx].i_ino = i_ino; + tbl.bufq[idx].dma_buf = buf; + tbl.bufq[idx].flags = inp->flags; + _SPIN_LOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].buf_handle = mem_handle; + _SPIN_UNLOCK_PROCESS_TO_BH(&tbl.bufq[idx].idx_lock); + tbl.bufq[idx].kmdvaddr = kvaddr; + + cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info, + iova, inp->smmu_hdl, request_len, true, NULL); + + tbl.bufq[idx].len = request_len; + tbl.bufq[idx].num_hdls = 1; + tbl.bufq[idx].is_imported = false; + kref_init(&tbl.bufq[idx].krefcount); + tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL; + tbl.bufq[idx].dma_heap_type = heap_type; + mutex_unlock(&tbl.bufq[idx].q_lock); + + out->kva = kvaddr; + out->iova = (uint32_t)iova; + out->smmu_hdl = smmu_hdl; + out->mem_handle = mem_handle; + out->len = request_len; + out->region = region; + + CAM_DBG(CAM_MEM, "Alloc success idx: %lu len: %zu mem_handle: 0x%x", + idx, request_len, mem_handle); + + return rc; + +slot_fail: + cam_smmu_release_buf_region(region, smmu_hdl); +smmu_fail: + if (region == CAM_SMMU_REGION_FWUNCACHED) + cam_mem_util_unmap_cpu_va(buf, kvaddr); +kmap_fail: + dma_buf_put(buf); +ion_fail: + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region); + +static void *cam_mem_mgr_user_dump_buf( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_mem_buf_queue *buf = NULL; + uint64_t *addr; + int i = 0; + + buf = (struct cam_mem_buf_queue *)dump_struct; + + addr = (uint64_t *)addr_ptr; + + *addr++ = buf->timestamp.tv_sec; + *addr++ = buf->timestamp.tv_nsec / NSEC_PER_USEC; + *addr++ = buf->fd; + *addr++ = buf->i_ino; + *addr++ = buf->buf_handle; + *addr++ = buf->len; + *addr++ = buf->align; + *addr++ = buf->flags; + *addr++ = buf->kmdvaddr; + *addr++ = buf->is_imported; + *addr++ = buf->is_internal; + *addr++ = buf->num_hdls; + for (i = 0; i < tbl.max_hdls_supported; i++) { + if (!buf->hdls_info[i].addr_updated) + continue; + + *addr++ = buf->hdls_info[i].iommu_hdl; + *addr++ = buf->hdls_info[i].vaddr; + } + + return addr; +} + +int cam_mem_mgr_dump_user(struct cam_dump_req_cmd *dump_req) +{ + int rc = 0; + int i; + struct cam_common_hw_dump_args dump_args; + size_t buf_len; + size_t remain_len; + uint32_t min_len; + uintptr_t cpu_addr; + + rc = cam_mem_get_cpu_buf(dump_req->buf_handle, + &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_MEM, "Invalid handle %u rc %d", + dump_req->buf_handle, rc); + return rc; + } + if (buf_len <= dump_req->offset) { + CAM_WARN(CAM_MEM, "Dump buffer overshoot len %zu offset %zu", + buf_len, dump_req->offset); + cam_mem_put_cpu_buf(dump_req->buf_handle); + return -ENOSPC; + } + + remain_len = buf_len - dump_req->offset; + min_len = + (CAM_MEM_BUFQ_MAX * + (CAM_MEM_MGR_DUMP_BUF_NUM_WORDS * sizeof(uint64_t) + + sizeof(struct cam_common_hw_dump_header))); + + if (remain_len < min_len) { + CAM_WARN(CAM_MEM, "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + cam_mem_put_cpu_buf(dump_req->buf_handle); + return -ENOSPC; + } + + dump_args.req_id = dump_req->issue_req_id; + dump_args.cpu_addr = cpu_addr; + dump_args.buf_len = buf_len; + dump_args.offset = dump_req->offset; + dump_args.ctxt_to_hw_map = NULL; + + for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) { + mutex_lock(&tbl.bufq[i].q_lock); + if (tbl.bufq[i].active) { + rc = cam_common_user_dump_helper(&dump_args, + cam_mem_mgr_user_dump_buf, + &tbl.bufq[i], + sizeof(uint64_t), "MEM_MGR_BUF.%d:", i); + if (rc) { + CAM_ERR(CAM_CRM, + "Dump state info failed, rc: %d", + rc); + mutex_unlock(&tbl.bufq[i].q_lock); + return rc; + } + } + mutex_unlock(&tbl.bufq[i].q_lock); + } + + dump_req->offset = dump_args.offset; + cam_mem_put_cpu_buf(dump_req->buf_handle); + + return rc; +} + + +int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp) +{ + int32_t rc = 0, idx, entry_idx; + + if (!atomic_read(&cam_mem_mgr_state)) { + CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized"); + return -EINVAL; + } + + if (!inp) { + CAM_ERR(CAM_MEM, "Invalid argument"); + return -EINVAL; + } + + if ((inp->region != CAM_SMMU_REGION_SECHEAP) && + (inp->region != CAM_SMMU_REGION_FWUNCACHED)) { + CAM_ERR(CAM_MEM, "Only secondary heap supported"); + return -EINVAL; + } + + idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle); + if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { + CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle"); + return -EINVAL; + } + + mutex_lock(&tbl.bufq[idx].q_lock); + if (!tbl.bufq[idx].active) { + CAM_ERR(CAM_MEM, "Released buffer state should be active"); + rc = -EINVAL; + goto end; + } + + if (tbl.bufq[idx].buf_handle != inp->mem_handle) { + CAM_ERR(CAM_MEM, + "Released buf handle not matching within table"); + rc = -EINVAL; + goto end; + } + + if (tbl.bufq[idx].num_hdls != 1) { + CAM_ERR(CAM_MEM, + "Sec heap region should have only one smmu hdl"); + rc = -ENODEV; + goto end; + } + + if (!cam_mem_mgr_get_hwva_entry_idx(inp->smmu_hdl, &entry_idx)) { + CAM_ERR(CAM_MEM, + "Passed SMMU handle not a valid handle"); + rc = -ENODEV; + goto end; + } + + if (CAM_SMMU_GET_BASE_HDL(inp->smmu_hdl) != + tbl.bufq[idx].hdls_info[entry_idx].iommu_hdl) { + CAM_ERR(CAM_MEM, + "Passed SMMU handle doesn't match with internal hdl"); + rc = -ENODEV; + goto end; + } + + rc = cam_smmu_release_buf_region(inp->region, inp->smmu_hdl); + if (rc) { + CAM_ERR(CAM_MEM, + "Sec heap region release failed"); + rc = -ENODEV; + goto end; + } + + CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle); + if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper)) + CAM_DBG(CAM_MEM, + "Called unmap from here, buf_handle: %u, idx: %d", + inp->mem_handle, idx); + else { + CAM_ERR(CAM_MEM, + "Unbalanced release Called buf_handle: %u, idx: %d", + inp->mem_handle, idx); + rc = -EINVAL; + } +end: + mutex_unlock(&tbl.bufq[idx].q_lock); + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_free_memory_region); + +#ifdef CONFIG_CAM_PRESIL +struct dma_buf *cam_mem_mgr_get_dma_buf(int fd) +{ + struct dma_buf *dmabuf = NULL; + + dmabuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL((void *)(dmabuf))) { + CAM_ERR(CAM_MEM, "Failed to import dma_buf for fd"); + return NULL; + } + + CAM_INFO(CAM_PRESIL, "Received DMA Buf* %pK", dmabuf); + + return dmabuf; +} + +int cam_mem_mgr_put_dmabuf_from_fd(uint64_t input_dmabuf) +{ + struct dma_buf *dmabuf = (struct dma_buf *)(uint64_t)input_dmabuf; + int idx = 0; + + CAM_INFO(CAM_PRESIL, "Received dma_buf :%pK", dmabuf); + + if (!dmabuf) { + CAM_ERR(CAM_PRESIL, "NULL to import dma_buf fd"); + return -EINVAL; + } + + for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) { + if ((tbl.bufq[idx].dma_buf != NULL) && (tbl.bufq[idx].dma_buf == dmabuf)) { + if (tbl.bufq[idx].presil_params.refcount) + tbl.bufq[idx].presil_params.refcount--; + else + CAM_ERR(CAM_PRESIL, "Unbalanced dmabuf put: %pK", dmabuf); + + if (!tbl.bufq[idx].presil_params.refcount) { + dma_buf_put(dmabuf); + cam_mem_mgr_reset_presil_params(idx); + CAM_DBG(CAM_PRESIL, "Done dma_buf_put for %pK", dmabuf); + } + } + } + + return 0; +} + +int cam_mem_mgr_get_fd_from_dmabuf(uint64_t input_dmabuf) +{ + int fd_for_dmabuf = -1; + struct dma_buf *dmabuf = (struct dma_buf *)(uint64_t)input_dmabuf; + int idx = 0; + + CAM_DBG(CAM_PRESIL, "Received dma_buf :%pK", dmabuf); + + if (!dmabuf) { + CAM_ERR(CAM_PRESIL, "NULL to import dma_buf fd"); + return -EINVAL; + } + + for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) { + if ((tbl.bufq[idx].dma_buf != NULL) && (tbl.bufq[idx].dma_buf == dmabuf)) { + CAM_DBG(CAM_PRESIL, + "Found entry for request from Presil UMD Daemon at %d, dmabuf %pK fd_for_umd_daemon %d refcount: %d", + idx, tbl.bufq[idx].dma_buf, + tbl.bufq[idx].presil_params.fd_for_umd_daemon, + tbl.bufq[idx].presil_params.refcount); + + if (tbl.bufq[idx].presil_params.fd_for_umd_daemon < 0) { + if (fd_for_dmabuf == -1) { + fd_for_dmabuf = dma_buf_fd(dmabuf, O_CLOEXEC); + if (fd_for_dmabuf < 0) { + CAM_ERR(CAM_PRESIL, "get fd fail, fd_for_dmabuf=%d", + fd_for_dmabuf); + return -EINVAL; + } + } + + tbl.bufq[idx].presil_params.fd_for_umd_daemon = fd_for_dmabuf; + CAM_INFO(CAM_PRESIL, + "Received generated idx %d fd_for_dmabuf Buf* %lld", idx, + fd_for_dmabuf); + } else { + fd_for_dmabuf = tbl.bufq[idx].presil_params.fd_for_umd_daemon; + CAM_INFO(CAM_PRESIL, + "Received existing at idx %d fd_for_dmabuf Buf* %lld", idx, + fd_for_dmabuf); + } + + tbl.bufq[idx].presil_params.refcount++; + } else { + CAM_DBG(CAM_MEM, + "Not found dmabuf at idx=%d, dma_buf %pK handle 0x%0x active %d ", + idx, tbl.bufq[idx].dma_buf, tbl.bufq[idx].buf_handle, + tbl.bufq[idx].active); + } + } + + return (int)fd_for_dmabuf; +} + +int cam_mem_mgr_send_buffer_to_presil(int32_t mmu_hdl, int32_t buf_handle) +{ + int rc = 0; + + /* Sending Presil IO Buf to PC side ( as iova start address indicates) */ + uint64_t io_buf_addr; + size_t io_buf_size; + int i, j, fd = -1, idx = 0, entry = -1, iommu_hdl; + uint8_t *iova_ptr = NULL; + uint64_t dmabuf = 0; + bool is_mapped_in_cb = false; + uintptr_t cpu_vaddr = 0; + size_t cpu_len; + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + iommu_hdl = CAM_SMMU_GET_BASE_HDL(mmu_hdl); + + if (!cam_mem_mgr_get_hwva_entry_idx(iommu_hdl, &entry)) { + CAM_ERR(CAM_PRESIL, "no iommu entry, idx=%d, FD %d handle 0x%0x active %d", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].active); + return -EINVAL; + } + + for (i = 0; i < tbl.bufq[idx].num_hdls; i++) { + if ((tbl.bufq[idx].hdls_info[entry].iommu_hdl == iommu_hdl) && + (tbl.bufq[idx].hdls_info[entry].addr_updated)) { + CAM_DBG(CAM_PRESIL, "found idx %d iommu 0x%x hdl 0x%0x bufq-iommu 0x%x", + idx, iommu_hdl, buf_handle, + tbl.bufq[idx].hdls_info[entry].iommu_hdl); + is_mapped_in_cb = true; + } + } + + if (!is_mapped_in_cb) { + for (j = 0; j < CAM_MEM_BUFQ_MAX; j++) { + if (tbl.bufq[j].i_ino == tbl.bufq[idx].i_ino) { + for (i = 0; i < tbl.bufq[j].num_hdls; i++) { + if (tbl.bufq[j].hdls_info[entry].iommu_hdl == iommu_hdl) + is_mapped_in_cb = true; + } + } + } + + if (!is_mapped_in_cb) { + CAM_DBG(CAM_PRESIL, + "Still Could not find idx=%d, FD %d buf_handle 0x%0x entry %d", + idx, GET_FD_FROM_HANDLE(buf_handle), buf_handle, entry); + + /* + * Okay to return 0, since this function also gets called for buffers that + * are shared only between umd/kmd, these may not be mapped with smmu + */ + return 0; + } + } + + if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active) && + (tbl.bufq[idx].buf_handle == buf_handle)) { + CAM_DBG(CAM_PRESIL, + "Found dmabuf in bufq idx %d, FD %d handle 0x%0x dmabuf %pK", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].dma_buf); + dmabuf = (uint64_t)tbl.bufq[idx].dma_buf; + fd = tbl.bufq[idx].fd; + } else { + CAM_ERR(CAM_PRESIL, + "Could not find dmabuf Invalid Mem idx=%d, FD %d handle 0x%0x active %d", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].active); + return -EINVAL; + } + + rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, + NULL, NULL); + if (rc || NULL == (void *)io_buf_addr) { + CAM_DBG(CAM_PRESIL, "Invalid ioaddr : 0x%x, fd = %d, dmabuf = %pK", + io_buf_addr, fd, dmabuf); + return -EINVAL; + } + + iova_ptr = (uint8_t *)io_buf_addr; + CAM_INFO(CAM_PRESIL, "Sending buffer with ioaddr : 0x%x, fd = %d, dmabuf = %pK", + io_buf_addr, fd, dmabuf); + + rc = cam_mem_get_cpu_buf(buf_handle, &cpu_vaddr, &cpu_len); + if (rc || !cpu_vaddr) { + CAM_ERR(CAM_PRESIL, "Unable to get cpu ptr buf_hdl: 0x%0x iommu_hdl: 0x%0x", + buf_handle, iommu_hdl); + return -EINVAL; + } + + if (cpu_len != io_buf_size) { + CAM_WARN(CAM_PRESIL, + "Size mismatch ioaddr 0x%x offset %d size %d fd = %d dmabuf %pK cpu_len %d", + io_buf_addr, 0, io_buf_size, fd, dmabuf, cpu_len); + } + + rc = cam_presil_send_buffer(dmabuf, 0, 0, (uint32_t)io_buf_size, + (uint64_t)iova_ptr, cpu_vaddr, false); + if (rc) + CAM_ERR(CAM_PRESIL, "failed to send buffer: 0x%0x iommu_hdl: 0x%0x", + buf_handle, iommu_hdl); + + cam_mem_put_cpu_buf(buf_handle); + + return rc; +} + +int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl) +{ + int idx = 0; + int rc = 0; + int32_t fd_already_sent[128]; + int fd_already_sent_count = 0; + int fd_already_index = 0; + int fd_already_sent_found = 0; + + + memset(&fd_already_sent, 0x0, sizeof(fd_already_sent)); + + for (idx = 0; idx < CAM_MEM_BUFQ_MAX; idx++) { + if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active)) { + CAM_DBG(CAM_PRESIL, "Sending %d, FD %d handle 0x%0x", idx, tbl.bufq[idx].fd, + tbl.bufq[idx].buf_handle); + fd_already_sent_found = 0; + + for (fd_already_index = 0; fd_already_index < fd_already_sent_count; + fd_already_index++) { + + if (fd_already_sent[fd_already_index] == tbl.bufq[idx].fd) { + fd_already_sent_found = 1; + CAM_DBG(CAM_PRESIL, + "fd_already_sent %d, FD %d handle 0x%0x flags=0x%0x", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, + tbl.bufq[idx].flags); + } + } + + if (fd_already_sent_found) + continue; + + CAM_DBG(CAM_PRESIL, "Sending %d, FD %d handle 0x%0x flags=0x%0x", idx, + tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].flags); + + rc = cam_mem_mgr_send_buffer_to_presil(iommu_hdl, tbl.bufq[idx].buf_handle); + fd_already_sent[fd_already_sent_count++] = tbl.bufq[idx].fd; + + } else { + CAM_DBG(CAM_PRESIL, "Invalid Mem idx=%d, FD %d handle 0x%0x active %d", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, + tbl.bufq[idx].active); + } + } + + return rc; +} +EXPORT_SYMBOL(cam_mem_mgr_send_all_buffers_to_presil); + +int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle, uint32_t buf_size, + uint32_t offset, int32_t iommu_hdl) +{ + int rc = 0; + + /* Receive output buffer from Presil IO Buf to PC side (as iova start address indicates) */ + uint64_t io_buf_addr; + size_t io_buf_size; + uint64_t dmabuf = 0; + int fd = 0; + uint8_t *iova_ptr = NULL; + int idx = 0; + uintptr_t cpu_vaddr = 0; + size_t cpu_len; + + CAM_DBG(CAM_PRESIL, "buf handle 0x%0x ", buf_handle); + rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, + NULL, NULL); + if (rc) { + CAM_ERR(CAM_PRESIL, "Unable to get IOVA for buffer buf_hdl: 0x%0x iommu_hdl: 0x%0x", + buf_handle, iommu_hdl); + return -EINVAL; + } + + iova_ptr = (uint8_t *)io_buf_addr; + iova_ptr += offset; // correct target address to start writing buffer to. + + if (!buf_size) { + buf_size = io_buf_size; + CAM_DBG(CAM_PRESIL, "Updated buf_size from Zero to 0x%0x", buf_size); + } + + fd = GET_FD_FROM_HANDLE(buf_handle); + + idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle); + if ((tbl.bufq[idx].buf_handle != 0) && (tbl.bufq[idx].active) && + (tbl.bufq[idx].buf_handle == buf_handle)) { + CAM_DBG(CAM_PRESIL, "Found dmabuf in bufq idx %d, FD %d handle 0x%0x dmabuf %pK", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].dma_buf); + dmabuf = (uint64_t)tbl.bufq[idx].dma_buf; + } else { + CAM_ERR(CAM_PRESIL, + "Could not find dmabuf Invalid Mem idx=%d, FD %d handle 0x%0x active %d ", + idx, tbl.bufq[idx].fd, tbl.bufq[idx].buf_handle, tbl.bufq[idx].active); + } + + rc = cam_mem_get_cpu_buf(buf_handle, &cpu_vaddr, &cpu_len); + if (rc || !cpu_vaddr) { + CAM_ERR(CAM_PRESIL, "Unable to get cpu ptr buf_hdl: 0x%0x iommu_hdl: 0x%0x", + buf_handle, iommu_hdl); + return -EINVAL; + } + + if (cpu_len != buf_size) { + CAM_WARN(CAM_PRESIL, + "Size mismatch ioaddr 0x%x offset %d size %d fd = %d dmabuf %pK cpu_len %d", + io_buf_addr, 0, buf_size, fd, dmabuf, cpu_len); + } + + CAM_DBG(CAM_PRESIL, + "Retrieving buffer ioaddr 0x%x offset %d size = %d fd = %d dmabuf = %pK cpuaddr 0x%x", + io_buf_addr, offset, buf_size, fd, dmabuf, cpu_vaddr); + + rc = cam_presil_retrieve_buffer(dmabuf, 0, 0, (uint32_t)buf_size, (uint64_t)io_buf_addr, + cpu_vaddr, false); + if (rc) { + CAM_ERR(CAM_PRESIL, "failed to retrieve buffer: 0x%0x iommu_hdl: 0x%0x", + buf_handle, iommu_hdl); + goto putbuf; + } + + CAM_INFO(CAM_PRESIL, + "Retrieved buffer ioaddr 0x%x offset %d size = %d fd = %d dmabuf = %pK", + io_buf_addr, 0, buf_size, fd, dmabuf); + +putbuf: + cam_mem_put_cpu_buf(buf_handle); + + return rc; +} + +#else /* ifdef CONFIG_CAM_PRESIL */ +struct dma_buf * cam_mem_mgr_get_dma_buf(int fd) +{ + return NULL; +} + +int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl) +{ + return 0; +} + +int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle) +{ + return 0; +} + +int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle, + uint32_t buf_size, + uint32_t offset, + int32_t iommu_hdl) +{ + return 0; +} +#endif /* ifdef CONFIG_CAM_PRESIL */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.h new file mode 100644 index 0000000000..ecab78af72 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr.h @@ -0,0 +1,354 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_MEM_MGR_H_ +#define _CAM_MEM_MGR_H_ + +#include +#include +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#include +#ifdef CONFIG_ARCH_QTI_VM +#include "cam_compat.h" +#endif +#endif +#include +#include "cam_mem_mgr_api.h" + +/* Enum for possible mem mgr states */ +enum cam_mem_mgr_state { + CAM_MEM_MGR_UNINITIALIZED, + CAM_MEM_MGR_INITIALIZED, +}; + +/*Enum for memory allocation initiator */ +enum cam_mem_mgr_allocator { + CAM_MEMMGR_ALLOC_USER, + CAM_MEMMGR_ALLOC_KERNEL, +}; + +/*Enum for possible SMMU operations */ +enum cam_smmu_mapping_client { + CAM_SMMU_MAPPING_USER, + CAM_SMMU_MAPPING_KERNEL, +}; + +#ifdef CONFIG_CAM_PRESIL +struct cam_presil_dmabuf_params { + int32_t fd_for_umd_daemon; + uint32_t refcount; +}; +#endif + +/** + * struct cam_mem_buf_hw_vaddr_info + * + * @iommu_hdl: IOMMU handle for the given bank + * @vaddr: IOVA of the buffer + * @len: cached length for a given handle + * @ref_count: ref count for buffer + * @addr_updated: Indicates if entry is updated only for addr caching + * @valid_mapping: Indicates if entry is indeed a valid mapping for this buf + * + */ +struct cam_mem_buf_hw_hdl_info { + int32_t iommu_hdl; + dma_addr_t vaddr; + size_t len; + struct kref *ref_count; + + bool addr_updated; + bool valid_mapping; +}; + +/** + * enum cam_dma_heap_type + * + * @CAM_SVM_HEAP_DEVICE: The memory heap that is used for one time kernel + * space allocations. This is created during probe + * and lives forever. + * @CAM_SVM_HEAP_SESSION: The memory heap that is used for session based + * user space buffers. This is created at the start + * of a session and destroyed at the end of it. + */ +enum cam_dma_heap_type { + CAM_SVM_HEAP_DEVICE, + CAM_SVM_HEAP_SESSION, + CAM_HEAP_MAX, +}; + +/** + * struct cam_mem_buf_queue + * + * @dma_buf: pointer to the allocated dma_buf in the table + * @q_lock: mutex lock for buffer + * @fd: file descriptor of buffer + * @i_ino: inode number of this dmabuf. Uniquely identifies a buffer + * @buf_handle: unique handle for buffer + * @align: alignment for allocation + * @len: size of buffer + * @flags: attributes of buffer + * @num_hdls: number of valid handles + * @vaddr_info: Array of IOVA addresses mapped for different devices + * using the same indexing as SMMU + * @kmdvaddr: Kernel virtual address + * @active: state of the buffer + * @release_deferred: Buffer is deferred for release. + * @is_imported: Flag indicating if buffer is imported from an FD in user space + * @is_internal: Flag indicating kernel allocated buffer + * @timestamp: Timestamp at which this entry in tbl was made + * @krefcount: Reference counter to track whether the buffer is + * mapped and in use by kmd + * @smmu_mapping_client: Client buffer (User or kernel) + * @buf_name: Name associated with buffer. + * @presil_params: Parameters specific to presil environment + * @dma_heap_type: Indicating memory heap type + * @urefcount: Reference counter to track whether the buffer is + * mapped and in use by umd + * @idx_lock: spinlock for buffer + */ +struct cam_mem_buf_queue { + struct dma_buf *dma_buf; + struct mutex q_lock; + int32_t fd; + unsigned long i_ino; + int32_t buf_handle; + int32_t align; + size_t len; + uint32_t flags; + uintptr_t kmdvaddr; + int32_t num_hdls; + struct cam_mem_buf_hw_hdl_info *hdls_info; + bool active; + bool release_deferred; + bool is_imported; + bool is_internal; + struct timespec64 timestamp; + struct kref krefcount; + enum cam_smmu_mapping_client smmu_mapping_client; + char buf_name[CAM_DMA_BUF_NAME_LEN]; +#ifdef CONFIG_CAM_PRESIL + struct cam_presil_dmabuf_params presil_params; +#endif + enum cam_dma_heap_type dma_heap_type; + struct kref urefcount; + spinlock_t idx_lock; +}; + +/** + * struct cam_mem_table + * + * @m_lock: mutex lock for table + * @bitmap: bitmap of the mem mgr utility + * @bits: max bits of the utility + * @bufq: array of buffers + * @dbg_buf_idx: debug buffer index to get usecases info + * @max_hdls_supported: Maximum number of SMMU device handles supported + * A buffer can only be mapped for these number of + * device context banks + * @max_hdls_info_size: Size of the hdls array allocated per buffer, + * computed value to be used in driver + * @force_cache_allocs: Force all internal buffer allocations with cache + * @need_shared_buffer_padding: Whether padding is needed for shared buffer + * allocations. + * @csf_version: Camera security framework version + * @system_heap: Handle to system heap + * @system_movable_heap: Handle to system movable heap + * @system_uncached_heap: Handle to system uncached heap + * @camera_heap: Handle to camera heap + * @camera_uncached_heap: Handle to camera uncached heap + * @secure_display_heap: Handle to secure display heap + * @ubwc_p_heap: Handle to ubwc-p heap + * @ubwc_p_movable_heap: Handle to ubwc-p movable heap + * @cam_device_heap: Handle of svm device heap + * @cam_session_heap: Handle of svm session heap + * @cam_device_mem_pool_handle: Handle of device memory pool + * @cam_session_mem_pool_handle: Handle of session memory pool + * @device_heap_alloc_cnt: Buffer count alloc from device heap + * @session_heap_alloc_cnt: Buffer count alloc from session heap + * @device_heap_size: Device pool size + * @session_heap_size: Session pool size + * + */ +struct cam_mem_table { + struct mutex m_lock; + void *bitmap; + size_t bits; + struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX]; + size_t dbg_buf_idx; + int32_t max_hdls_supported; + size_t max_hdls_info_size; + bool force_cache_allocs; + bool need_shared_buffer_padding; + struct cam_csf_version csf_version; +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) + struct dma_heap *system_heap; + struct dma_heap *system_movable_heap; + struct dma_heap *system_uncached_heap; + struct dma_heap *camera_heap; + struct dma_heap *camera_uncached_heap; + struct dma_heap *secure_display_heap; + struct dma_heap *ubwc_p_heap; + struct dma_heap *ubwc_p_movable_heap; +#ifdef CONFIG_ARCH_QTI_VM + struct dma_heap *cam_device_heap; + struct dma_heap *cam_session_heap; + void *cam_device_mem_pool_handle; + void *cam_session_mem_pool_handle; + uint32_t device_heap_alloc_cnt; + uint32_t session_heap_alloc_cnt; + uint32_t device_heap_size; + uint32_t session_heap_size; +#endif +#endif + +}; + +/** + * struct cam_mem_table_mini_dump + * + * @bufq: array of buffers + * @dbg_buf_idx: debug buffer index to get usecases info + * @alloc_profile_enable: Whether to enable alloc profiling + * @dbg_buf_idx: debug buffer index to get usecases info + * @force_cache_allocs: Force all internal buffer allocations with cache + * @need_shared_buffer_padding: Whether padding is needed for shared buffer + * allocations. + */ +struct cam_mem_table_mini_dump { + struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX]; + size_t dbg_buf_idx; + bool alloc_profile_enable; + bool force_cache_allocs; + bool need_shared_buffer_padding; +}; + +/** + * @brief: Allocates and maps buffer + * + * @cmd: Allocation information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd_v2 *cmd); + +/** + * @brief: Releases a buffer reference + * + * @cmd: Buffer release information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd); + +/** + * @brief Maps a buffer + * + * @cmd: Buffer mapping information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd_v2 *cmd); + +/** + * @brief: Perform cache ops on the buffer + * + * @cmd: Cache ops information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd); + +/** + * @brief: Perform cpu access ops on the buffer + * + * @cmd: CPU access ops information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_cpu_access_op(struct cam_mem_cpu_access_op *cmd); + +/** + * @brief: Provide all supported heap capabilities + * + * @heap_mask: Update mask for all supported heaps + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_check_for_supported_heaps(uint64_t *heap_mask); + +/** + * @brief: Initializes the memory manager + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_init(void); + +/** + * @brief: Tears down the memory manager + * + * @return None + */ +void cam_mem_mgr_deinit(void); + +#ifdef CONFIG_CAM_PRESIL +/** + * @brief: Put dma-buf for input dmabuf + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_put_dmabuf_from_fd(uint64_t input_dmabuf); + +/** + * @brief: Create a fd for dma-buf + * + * @return Status of operation. Negative in case of error. Zero or + * Positive otherwise. + */ +int cam_mem_mgr_get_fd_from_dmabuf(uint64_t input_dmabuf); +#endif /* ifdef CONFIG_CAM_PRESIL */ + +/** + * @brief: Copy buffer content to presil mem for all buffers of + * iommu handle + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_send_all_buffers_to_presil(int32_t iommu_hdl); + +/** + * @brief: Copy buffer content of single buffer to presil + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle); + +/** + * @brief: Copy back buffer content of single buffer from + * presil + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle, + uint32_t buf_size, uint32_t offset, int32_t iommu_hdl); + +/** + * @brief: Dump mem mgr info into user buffer + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_dump_user(struct cam_dump_req_cmd *dump_req); + +#ifdef CONFIG_ARCH_QTI_VM +/** + * @brief: Set device and session heap size to mem mgr info + * + * @return None. + */ +void cam_mem_mgr_set_svm_heap_sizes(uint32_t device_heap_size, + uint32_t session_heap_size); +#endif +#endif /* _CAM_MEM_MGR_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr_api.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr_api.h new file mode 100644 index 0000000000..2dab8a0978 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_mem_mgr_api.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_MEM_MGR_API_H_ +#define _CAM_MEM_MGR_API_H_ + +#include +#include "cam_smmu_api.h" + +/** + * CAM_MEM_ALLOC : Allocates memory without initializing it to zero. + * + * @size : size of memory requested for allocation + * @flags : GFP flags (GFP_KERNEL, GFP_DMA, GFP_ATOMIC etc) + * + * NOTE: GFP_ATOMIC is not supported by vmalloc, so if allocation + * fails with kmalloc for GFP_ATOMIC, then fallback option won't + * be present. + */ +#define CAM_MEM_ALLOC(size, flags) \ + kvmalloc(size, flags) + +/** + * CAM_MEM_ZALLOC : Allocates memory and zero initialize it. + * + * @size : size of memory requested for allocation + * @flags : GFP flags (GPF_KERNEL, GFP_DMA, GPF_ATOMIC etc) + * + * NOTE: GFP_ATOMIC is not supported by vmalloc, so if allocation + * fails with kmalloc for GFP_ATOMIC, then fallback option won't + * be present. + */ +#define CAM_MEM_ZALLOC(size, flags) \ + kvzalloc(size, flags) + +/** + * CAM_MEM_ZALLOC_ARRAY : Allocates memory for array and zero initialises it. + * + * @count : count of number of elements in array + * @size : size of each element + * @flags : GFP flags (GFP_KERNEL, GFP_DMA, GFP_ATOMIC etc) + * + * NOTE: GFP_ATOMIC is not supported by vmalloc, so if allocation + * fails with kmalloc for GFP_ATOMIC, then fallback option won't + * be present. + */ +#define CAM_MEM_ZALLOC_ARRAY(count, size, flags) \ + kvcalloc(count, size, flags) + +/** + * CAM_MEM_FREE : Frees memory without initializing it to zero. + * + * @addr : address of data object to be freed + */ +#define CAM_MEM_FREE(addr) \ + kvfree(addr) + +/** + * CAM_MEM_ZFREE : Frees memory and zero initialize it. + * + * @addr : address of data object to be freed + * @size : size of the data object + */ +#define CAM_MEM_ZFREE(addr, size) \ + if (likely(!ZERO_OR_NULL_PTR(addr))) { \ + memset((void *)addr, 0x0, size); \ + kvfree(addr); \ + } + +/** + * struct cam_mem_mgr_request_desc + * + * @size : Size of memory requested for allocation + * @align : Alignment of requested memory + * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped + * @flags: Flags to indicate cached/uncached property + */ +struct cam_mem_mgr_request_desc { + uint64_t size; + uint64_t align; + int32_t smmu_hdl; + uint32_t flags; +}; + +/** + * struct cam_mem_mgr_memory_desc + * + * @kva : Kernel virtual address of allocated memory + * @iova : IOVA of allocated memory + * @smmu_hdl : SMMU handle of allocated memory + * @mem_handle : Mem handle identifying allocated memory + * @len : Length of allocated memory + * @region : Region to which allocated memory belongs + */ +struct cam_mem_mgr_memory_desc { + uintptr_t kva; + uint32_t iova; + int32_t smmu_hdl; + uint32_t mem_handle; + uint64_t len; + enum cam_smmu_region_id region; +}; + +/** + * @brief: Requests a memory buffer + * + * @inp: Information specifying requested buffer properties + * @out: Information about allocated buffer + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp, + struct cam_mem_mgr_memory_desc *out); + +/** + * @brief: Releases a memory buffer + * + * @inp: Information specifying buffer to be released + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp); + +/** + * @brief: Returns IOVA information about buffer + * + * @buf_handle: Handle of the buffer + * @mmu_handle: SMMU handle where buffer is mapped + * @iova_ptr : Pointer to mmu's iova + * @len_ptr : Length of the buffer + * @flags : Flags the buffer was allocated with + * @buf_tracker: List of buffers we want to keep ref counts on + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle, + dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags, + struct list_head *buf_tracker); + +/** + * @brief: This indicates begin of CPU access. + * Also returns CPU address information about DMA buffer + * + * @buf_handle: Handle for the buffer + * @vaddr_ptr : pointer to kernel virtual address + * @len : Length of the buffer + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, + size_t *len); + +/** + * @brief: This indicates end of CPU access + * + * @buf_handle: Handle for the buffer + * + */ +void cam_mem_put_cpu_buf(int32_t buf_handle); + +/** + * @brief: decrements kref reference for a buf handle + * + * @buf_handle: Handle for the buffer + * + */ +void cam_mem_put_kref(int32_t buf_handle); + +static inline bool cam_mem_is_secure_buf(int32_t buf_handle) +{ + return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle); +} + +/** + * @brief: Reserves a memory region + * + * @inp: Information specifying requested region properties + * @region : Region which is to be reserved + * @out : Information about reserved region + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp, + enum cam_smmu_region_id region, + struct cam_mem_mgr_memory_desc *out); + +/** + * @brief: Frees a memory region + * + * @inp : Information about region which is to be freed + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp); + +/** + * @brief: Translate fd into dmabuf + * + * @inp : fd for buffer + * + * @return dmabuf . + */ +struct dma_buf * cam_mem_mgr_get_dma_buf(int fd); + +#endif /* _CAM_MEM_MGR_API_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.c new file mode 100644 index 0000000000..6db8700aa6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.c @@ -0,0 +1,6308 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "cam_req_mgr_interface.h" +#include "cam_req_mgr_util.h" +#include "cam_req_mgr_core.h" +#include "cam_req_mgr_workq.h" +#include "cam_req_mgr_debug.h" +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_req_mgr_dev.h" +#include "cam_req_mgr_debug.h" +#include "cam_common_util.h" +#include "cam_mem_mgr.h" +#include "cam_mem_mgr_api.h" +#include "cam_cpas_api.h" + +static struct cam_req_mgr_core_device *g_crm_core_dev; +static struct cam_req_mgr_core_link g_links[MAXIMUM_LINKS_CAPACITY]; + +static void __cam_req_mgr_reset_apply_data(struct cam_req_mgr_core_link *link) +{ + int pd; + + for (pd = 0; pd < CAM_PIPELINE_DELAY_MAX; pd++) { + link->req.apply_data[pd].req_id = -1; + link->req.prev_apply_data[pd].req_id = -1; + } +} + +static void __cam_req_mgr_reset_state_monitor_array( + struct cam_req_mgr_core_link *link) +{ + int i = 0; + struct cam_req_mgr_state_monitor *state_monitor; + + for (i = 0; i < MAX_REQ_STATE_MONITOR_NUM; i++) { + state_monitor = &link->req.state_monitor[i]; + state_monitor->req_state = CAM_CRM_STATE_INVALID; + state_monitor->req_id = -1; + state_monitor->frame_id = -1; + memset(&state_monitor->time_stamp, 0, sizeof(struct timespec64)); + memset(&state_monitor->name, 0, sizeof(state_monitor->name)); + } +} + +void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link) +{ + int i = 0; + + spin_lock_bh(&link->req.reset_link_spin_lock); + link->link_hdl = 0; + link->num_devs = 0; + link->max_delay = CAM_PIPELINE_DELAY_0; + link->min_delay = CAM_PIPELINE_DELAY_2; + link->workq = NULL; + link->pd_mask = 0; + link->l_dev = NULL; + link->req.in_q = NULL; + link->req.l_tbl = NULL; + link->req.num_tbl = 0; + link->watchdog = NULL; + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_AVAILABLE; + spin_unlock_bh(&link->link_state_spin_lock); + link->parent = NULL; + link->sync_link_sof_skip = false; + link->open_req_cnt = 0; + link->last_flush_id = -1; + link->initial_sync_req = -1; + link->dual_trigger = false; + link->trigger_cnt[0][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[0][CAM_TRIGGER_POINT_EOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_EOF] = 0; + link->in_msync_mode = false; + link->retry_cnt = 0; + link->is_shutdown = false; + link->initial_skip = true; + link->sof_timestamp = 0; + link->prev_sof_timestamp = 0; + link->skip_init_frame = false; + link->num_sync_links = 0; + link->last_sof_trigger_jiffies = 0; + link->wq_congestion = false; + link->try_for_internal_recovery = false; + link->is_sending_req = false; + link->resume_sync_curr_mask = 0; + link->resume_sync_dev_mask = 0; + link->exp_time_for_resume = 0; + atomic_set(&link->eof_event_cnt, 0); + link->cont_empty_slots = 0; + __cam_req_mgr_reset_apply_data(link); + __cam_req_mgr_reset_state_monitor_array(link); + + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; i++) + link->sync_link[i] = NULL; + spin_unlock_bh(&link->req.reset_link_spin_lock); + + mutex_lock(&link->lock); + link->properties_mask = CAM_LINK_PROPERTY_NONE; + mutex_unlock(&link->lock); +} + +void cam_req_mgr_handle_core_shutdown(void) +{ + struct cam_req_mgr_core_session *session; + struct cam_req_mgr_core_session *tsession; + struct cam_req_mgr_session_info ses_info; + + if (!list_empty(&g_crm_core_dev->session_head)) { + list_for_each_entry_safe(session, tsession, + &g_crm_core_dev->session_head, entry) { + ses_info.session_hdl = + session->session_hdl; + cam_req_mgr_destroy_session(&ses_info, true); + } + } +} + +static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq) +{ + int32_t i = 0; + int rc = 0; + struct crm_task_payload *task_data = NULL; + + task_data = CAM_MEM_ZALLOC_ARRAY( + workq->task.num_task, sizeof(*task_data), + GFP_KERNEL); + if (!task_data) { + rc = -ENOMEM; + } else { + for (i = 0; i < workq->task.num_task; i++) + workq->task.pool[i].payload = &task_data[i]; + } + + return rc; +} + +/** + * __cam_req_mgr_find_pd_tbl() + * + * @brief : Find pipeline delay based table pointer which matches delay + * @tbl : Pointer to list of request table + * @delay : Pipeline delay value to be searched for comparison + * + * @return : pointer to request table for matching pipeline delay table. + * + */ +static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl( + struct cam_req_mgr_req_tbl *tbl, int32_t delay) +{ + if (!tbl) + return NULL; + + do { + if (delay != tbl->pd) + tbl = tbl->next; + else + return tbl; + } while (tbl != NULL); + + return NULL; +} + +/** + * __cam_req_mgr_inc_idx() + * + * @brief : Increment val passed by step size and rollover after max_val + * @val : value to be incremented + * @step : amount/step by which val is incremented + * @max_val : max val after which idx will roll over + * + */ +static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val) +{ + *val = (*val + step) % max_val; +} + +/** + * __cam_req_mgr_dec_idx() + * + * @brief : Decrement val passed by step size and rollover after max_val + * @val : value to be decremented + * @step : amount/step by which val is decremented + * @max_val : after zero value will roll over to max val + * + */ +static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val) +{ + *val = *val - step; + if (*val < 0) + *val = max_val + (*val); +} + +/** + * __cam_req_mgr_inject_delay() + * + * @brief : Check if any pd device is injecting delay + * @tbl : cam_req_mgr_req_tbl + * @curr_idx : slot idx + * @trigger : Trigger point + * + * @return : 0 for success, negative for failure + */ +static int __cam_req_mgr_inject_delay( + struct cam_req_mgr_req_tbl *tbl, + int32_t curr_idx, uint32_t trigger) +{ + struct cam_req_mgr_tbl_slot *slot = NULL; + int rc = 0; + + while (tbl) { + slot = &tbl->slot[curr_idx]; + if ((trigger == CAM_TRIGGER_POINT_SOF) && + (slot->inject_delay_at_sof > 0)) { + slot->inject_delay_at_sof--; + CAM_DBG(CAM_CRM, + "Delay injected by pd %d device at SOF, inject_delay_at_sof:%d", + tbl->pd, slot->inject_delay_at_sof); + rc = -EAGAIN; + } else if ((trigger == CAM_TRIGGER_POINT_EOF) && + (slot->inject_delay_at_eof > 0)) { + slot->inject_delay_at_eof--; + CAM_DBG(CAM_CRM, + "Delay injected by pd %d device at EOF, inject_delay_at_eof:%d", + tbl->pd, slot->inject_delay_at_eof); + rc = -EAGAIN; + } + __cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta, + tbl->num_slots); + tbl = tbl->next; + } + return rc; +} + +/** + * __cam_req_mgr_dev_handle_to_name() + * + * @brief : Finds device name based on the device handle + * @dev_hdl : Device handle whose name is to be found + * @link : Link on which the device is connected + * @return : String containing the device name + * + */ +static char *__cam_req_mgr_dev_handle_to_name( + int32_t dev_hdl, struct cam_req_mgr_core_link *link) +{ + struct cam_req_mgr_connected_device *dev = NULL; + int i = 0; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + + if (dev_hdl == dev->dev_hdl) + return dev->dev_info.name; + } + + return "Invalid dev_hdl"; +} + +/** + * __cam_req_mgr_name_to_dev_handle() + * + * @brief : Finds device handle based on the device name + * @dev_name : Device name whose dev handle is to be found + * @link : Link on which the device is connected + * @return : device handle + * + */ +static int32_t __cam_req_mgr_name_to_dev_handle( + char *dev_name, struct cam_req_mgr_core_link *link) +{ + struct cam_req_mgr_connected_device *dev = NULL; + int i = 0; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + CAM_DBG(CAM_CRM, "dev name %s, iterate dev name %s", + dev_name, dev->dev_info.name); + + if (strcmp(dev_name, dev->dev_info.name) == 0) + return dev->dev_hdl; + } + return -EINVAL; +} + +/** + * __cam_req_mgr_find_dev_name() + * + * @brief : Find the dev name whose req is not ready + * @link : link info + * @req_id : req_id which is not ready + * @pd : pipeline delay + * @masked_val : masked value holds the bit for all devices + * that don't have the req_id ready for a given + * pipeline delay + * @pd : pipeline delay + * + */ +static void __cam_req_mgr_find_dev_name( + struct cam_req_mgr_core_link *link, + int64_t req_id, uint32_t pd, uint32_t masked_val) +{ + int i = 0; + struct cam_req_mgr_connected_device *dev = NULL; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev->dev_info.p_delay == pd) { + if (masked_val & BIT(dev->dev_bit)) + continue; + if (link->wq_congestion) + CAM_INFO_RATE_LIMIT(CAM_CRM, + "WQ congestion, Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %u", + req_id, link->link_hdl, pd, + dev->dev_info.name, link->open_req_cnt); + else + CAM_INFO(CAM_CRM, + "Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %u", + req_id, link->link_hdl, pd, + dev->dev_info.name, link->open_req_cnt); + } + } +} + +static const char *__cam_req_mgr_operation_type_to_str( + enum cam_req_mgr_req_state type) +{ + switch (type) { + case CAM_CRM_REQ_READY: + return "REQ_READY"; + case CAM_CRM_NOTIFY_TRIGGER: + return "NOTIFY_TRIGGER"; + case CAM_CRM_PROCESS_TRIGGER: + return "PROCESS_TRIGGER"; + case CAM_CRM_SEND_REQ: + return "SEND_REQ"; + case CAM_CRM_NOTIFY_ERR: + return "NOTIFY_ERR"; + case CAM_CRM_PROCESS_ERR: + return "PROCESS_ERR"; + default: + return "INVALID_TYPE"; + } +} + +static void __cam_req_mgr_update_state_monitor_array( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_state_monitor *state) +{ + int idx = link->req.next_state_idx; + char *dev_name = NULL; + struct cam_req_mgr_state_monitor *state_monitor = + &link->req.state_monitor[idx]; + + dev_name = __cam_req_mgr_dev_handle_to_name(state->dev_hdl, link); + + spin_lock_bh(&link->req.monitor_slock); + CAM_DBG(CAM_REQ, + "Update: link_hdl %x dev %x dev_name %s req_id %lld frame_id %lld set to State: %s", + link->link_hdl, state->dev_hdl, + dev_name, + state->req_id, state->frame_id, + __cam_req_mgr_operation_type_to_str(state->req_state)); + + state_monitor->req_state = state->req_state; + state_monitor->req_id = state->req_id; + state_monitor->dev_hdl = state->dev_hdl; + state_monitor->frame_id = state->frame_id; + scnprintf(state_monitor->name, sizeof(state_monitor->name), "%s", dev_name); + ktime_get_clocktai_ts64(&state_monitor->time_stamp); + + __cam_req_mgr_inc_idx(&link->req.next_state_idx, 1, MAX_REQ_STATE_MONITOR_NUM); + + spin_unlock_bh(&link->req.monitor_slock); +} + +/** + * __cam_req_mgr_dump_state_monitor_array() + * + * @brief : dump the state monitor array, dump from index next_wr_idx + * to print state information in time order. + * @link : link pointer + * + */ +static void __cam_req_mgr_dump_state_monitor_array( + struct cam_req_mgr_core_link *link) +{ + int i; + int idx = link->req.next_state_idx; + struct tm ts; + struct timespec64 timespec; + + /* + * Get current time and print it immediately, wen need this log to cacluate the + * difference between log time and the time we get using ktime_get_clocktai_ts64, + * we need do like this because the time we get always has a 20-30ms difference from + * log time and do not have a good solution at present. + */ + ktime_get_clocktai_ts64(×pec); + time64_to_tm(timespec.tv_sec, 0, &ts); + CAM_INFO(CAM_CRM, "Link %x start dump state monitor array, time: %d-%d %d:%d:%d.%lld", + link->link_hdl, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, + ts.tm_min, ts.tm_sec, timespec.tv_nsec / 1000000); + + CAM_INFO(CAM_CRM, "%16s %6s %10s %8s %10s", + "state", "req id", "dev hdl", "frame id", "time stamp"); + spin_lock_bh(&link->req.monitor_slock); + for (i = 0; i < MAX_REQ_STATE_MONITOR_NUM; i++) { + if (link->req.state_monitor[idx].req_state != CAM_CRM_STATE_INVALID) { + time64_to_tm(link->req.state_monitor[idx].time_stamp.tv_sec, 0, &ts); + CAM_INFO(CAM_CRM, "%16s %6d %10x %8d %d-%d %d:%d:%d.%lld", + __cam_req_mgr_operation_type_to_str( + link->req.state_monitor[idx].req_state), + link->req.state_monitor[idx].req_id, + link->req.state_monitor[idx].dev_hdl, + link->req.state_monitor[idx].frame_id, + ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, + ts.tm_min, ts.tm_sec, + link->req.state_monitor[idx].time_stamp.tv_nsec / 1000000); + } + + __cam_req_mgr_inc_idx(&idx, 1, MAX_REQ_STATE_MONITOR_NUM); + } + spin_unlock_bh(&link->req.monitor_slock); +} + +/** + * __cam_req_mgr_notify_frame_skip() + * + * @brief : Notify all devices of frame skipping + * + */ +static int __cam_req_mgr_notify_frame_skip( + struct cam_req_mgr_core_link *link, + uint32_t trigger) +{ + int rc = 0, i, j, pd, idx; + bool found = false; + struct cam_req_mgr_apply_request frame_skip; + struct cam_req_mgr_apply *apply_data = NULL; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_tbl_slot *slot = NULL; + bool frame_duration_changing = false; + struct cam_req_mgr_link_evt_data evt_data; + + apply_data = link->req.prev_apply_data; + + if (link->max_delay < 0 || link->max_delay >= CAM_PIPELINE_DELAY_MAX) { + CAM_ERR(CAM_CRM, "link->max_delay is out of bounds: %d", + link->max_delay); + return -EINVAL; + } + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if ((dev->dev_info.dev_id == CAM_REQ_MGR_DEVICE_SENSOR) && + (dev->ops && dev->ops->process_evt)) { + evt_data.req_id = apply_data[dev->pd_tbl->pd].req_id; + evt_data.dev_hdl = dev->dev_hdl; + evt_data.link_hdl = link->link_hdl; + evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_FRAME_DURATION_CHANGING; + spin_lock_bh(&link->link_state_spin_lock); + evt_data.u.is_recovery = (link->state == CAM_CRM_LINK_STATE_ERR); + spin_unlock_bh(&link->link_state_spin_lock); + rc = dev->ops->process_evt(&evt_data); + if (rc) { + CAM_ERR(CAM_CRM, + "Failed to send FRAME_SKIP_AVALIABLE on link 0x%x dev 0x%x", + link->link_hdl, dev->dev_hdl); + return -EINVAL; + } + frame_duration_changing = evt_data.u.frame_duration_changing; + break; + } + } + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + pd = dev->dev_info.p_delay; + if (pd >= CAM_PIPELINE_DELAY_MAX) { + CAM_WARN(CAM_CRM, "pd %d greater than max", + pd); + continue; + } + + found = false; + idx = apply_data[pd].idx; + slot = &dev->pd_tbl->slot[idx]; + + for (j = 0; j < slot->ops.num_dev; j++) { + if (dev->dev_hdl == slot->ops.dev_hdl[j]) { + found = true; + break; + } + } + + /* + * If apply_at_eof is enabled do not apply at SOF + * e.x. Flash device + */ + if ((trigger == CAM_TRIGGER_POINT_SOF) && found && + (slot->ops.apply_at_eof)) + continue; + + /* + * If apply_at_eof is not enabled ignore EOF + */ + if ((trigger == CAM_TRIGGER_POINT_EOF) && found && + (!slot->ops.apply_at_eof)) + continue; + + frame_skip.dev_hdl = dev->dev_hdl; + frame_skip.link_hdl = link->link_hdl; + frame_skip.request_id = + apply_data[pd].req_id; + frame_skip.trigger_point = trigger; + frame_skip.report_if_bubble = 0; + frame_skip.last_applied_max_pd_req = + link->req.prev_apply_data[link->max_delay].req_id; + frame_skip.frame_duration_changing = frame_duration_changing; + + CAM_DBG(CAM_REQ, + "Notify_frame_skip: link: 0x%x pd %d req_id %lld last_applied %lld frame_duration_changing:%d", + link->link_hdl, pd, apply_data[pd].req_id, + link->req.prev_apply_data[link->max_delay].req_id, + frame_duration_changing); + if ((dev->ops) && (dev->ops->notify_frame_skip)) + dev->ops->notify_frame_skip(&frame_skip); + } + + return rc; +} + +/** + * __cam_req_mgr_send_evt() + * + * @brief : Send event to all connected devices + * @req_id : Req ID + * @type : Event type + * @error : Error type + * @link : Link info + * + */ +static int __cam_req_mgr_send_evt( + uint64_t req_id, + enum cam_req_mgr_link_evt_type type, + enum cam_req_mgr_device_error error, + struct cam_req_mgr_core_link *link) +{ + int i, rc = 0; + struct cam_req_mgr_link_evt_data evt_data = {0}; + struct cam_req_mgr_connected_device *device; + + CAM_DBG(CAM_CRM, + "Notify event type: %d to all connected devices on link: 0x%x", + type, link->link_hdl); + + /* Try for internal recovery */ + if (link->try_for_internal_recovery) + evt_data.try_for_recovery = true; + + for (i = 0; i < link->num_devs; i++) { + device = &link->l_dev[i]; + + evt_data.dev_hdl = device->dev_hdl; + evt_data.link_hdl = link->link_hdl; + evt_data.req_id = req_id; + evt_data.evt_type = type; + if (type == CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES) + evt_data.u.properties_mask = link->properties_mask; + else + evt_data.u.error = error; + + if (device->ops && device->ops->process_evt) { + rc = device->ops->process_evt(&evt_data); + if (rc) { + CAM_ERR(CAM_CRM, + "Failed to send evt %d on link 0x%x dev 0x%x req %lld", + type, link->link_hdl, device->dev_hdl, req_id); + break; + } + } + } + + /* Updated if internal recovery succeeded */ + link->try_for_internal_recovery = evt_data.try_for_recovery; + + return rc; +} + +/** + * __cam_req_mgr_notify_error_on_link() + * + * @brief : Notify userspace on exceeding max retry + * attempts to apply same req + * @link : link on which the req could not be applied + * + */ +static int __cam_req_mgr_notify_error_on_link( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_connected_device *dev) +{ + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_message msg = {0}; + int rc = 0, pd; + + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", link->link_hdl); + return -EINVAL; + } + + pd = dev->dev_info.p_delay; + if (pd >= CAM_PIPELINE_DELAY_MAX) { + CAM_ERR(CAM_CRM, "pd : %d is more than expected", pd); + return -EINVAL; + } + + if (link->min_delay >= CAM_PIPELINE_DELAY_MAX) { + CAM_ERR(CAM_CRM, "min pd : %d is more than expected", + link->min_delay); + return -EINVAL; + } + + /* Notify all devices in the link about the error */ + __cam_req_mgr_send_evt(link->req.apply_data[link->min_delay].req_id, + CAM_REQ_MGR_LINK_EVT_STALLED, CRM_KMD_ERR_FATAL, link); + + /* + * Internal recovery succeeded - skip userland notification + * If recovery had failed subdevice will reset this flag + */ + if (link->try_for_internal_recovery) { + CAM_INFO(CAM_CRM, "Internal recovery succeeded on link: 0x%x", + link->link_hdl); + return 0; + } + + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Notifying userspace to trigger recovery on link 0x%x for session %d", + link->link_hdl, session->session_hdl); + + __cam_req_mgr_dump_state_monitor_array(link); + + memset(&msg, 0, sizeof(msg)); + + msg.session_hdl = session->session_hdl; + msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY; + msg.u.err_msg.request_id = + link->req.apply_data[pd].req_id; + msg.u.err_msg.link_hdl = link->link_hdl; + msg.u.err_msg.resource_size = 0; + msg.u.err_msg.error_code = CAM_REQ_MGR_LINK_STALLED_ERROR; + + CAM_DBG(CAM_CRM, "Failed for device: %s while applying request: %lld", + dev->dev_info.name, link->req.apply_data[pd].req_id); + + rc = cam_req_mgr_notify_message(&msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, + V4L_EVENT_CAM_REQ_MGR_EVENT); + + if (rc) + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Error in notifying recovery for session %d link 0x%x rc %d", + session->session_hdl, link->link_hdl, rc); + + return rc; +} + +/** + * __cam_req_mgr_traverse() + * + * @brief : Traverse through pd tables, it will internally cover all linked + * pd tables. Each pd table visited will check if idx passed to its + * in ready state. If ready means all devices linked to the pd table + * have this request id packet ready. Then it calls subsequent pd + * tbl with new idx. New idx value takes into account the delta + * between current pd table and next one. + * @traverse_data: contains all the info to traverse through pd tables + * + * @return: 0 for success, negative for failure + * + */ +static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data) +{ + int rc = 0; + int32_t next_idx = traverse_data->idx; + int32_t curr_idx = traverse_data->idx; + struct cam_req_mgr_req_tbl *tbl; + struct cam_req_mgr_apply *apply_data; + struct cam_req_mgr_tbl_slot *slot = NULL; + + if (!traverse_data->tbl || !traverse_data->apply_data) { + CAM_ERR(CAM_CRM, "NULL pointer %pK %pK", + traverse_data->tbl, traverse_data->apply_data); + traverse_data->result = 0; + return -EINVAL; + } + + tbl = traverse_data->tbl; + apply_data = traverse_data->apply_data; + slot = &tbl->slot[curr_idx]; + CAM_DBG(CAM_CRM, + "Enter pd %d idx %d state %d skip %d status %d skip_idx %d", + tbl->pd, curr_idx, tbl->slot[curr_idx].state, + tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status, + traverse_data->in_q->slot[curr_idx].skip_idx); + + /* Check if req is ready or in skip mode or pd tbl is in skip mode */ + if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY || + traverse_data->in_q->slot[curr_idx].skip_idx == 1 || + tbl->skip_traverse > 0) { + if (tbl->next) { + __cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta, + tbl->num_slots); + traverse_data->idx = next_idx; + traverse_data->tbl = tbl->next; + rc = __cam_req_mgr_traverse(traverse_data); + } + if (rc >= 0) { + SET_SUCCESS_BIT(traverse_data->result, tbl->pd); + + if (traverse_data->validate_only == false) { + apply_data[tbl->pd].pd = tbl->pd; + apply_data[tbl->pd].req_id = + CRM_GET_REQ_ID( + traverse_data->in_q, curr_idx); + apply_data[tbl->pd].idx = curr_idx; + + CAM_DBG(CAM_CRM, "req_id: %lld with pd of %d", + apply_data[tbl->pd].req_id, + apply_data[tbl->pd].pd); + /* + * If traverse is successful decrement + * traverse skip + */ + if (tbl->skip_traverse > 0) { + apply_data[tbl->pd].req_id = -1; + tbl->skip_traverse--; + } + } + } else { + /* linked pd table is not ready for this traverse yet */ + return rc; + } + } else { + /* This pd table is not ready to proceed with asked idx */ + traverse_data->result_data.req_id = + CRM_GET_REQ_ID(traverse_data->in_q, curr_idx); + traverse_data->result_data.pd = tbl->pd; + traverse_data->result_data.masked_value = + (tbl->dev_mask & slot->req_ready_map); + SET_FAILURE_BIT(traverse_data->result, tbl->pd); + return -EAGAIN; + } + + return 0; +} + +/** + * __cam_req_mgr_in_q_skip_idx() + * + * @brief : Decrement val passed by step size and rollover after max_val + * @in_q : input queue pointer + * @idx : Sets skip_idx bit of the particular slot to true so when traverse + * happens for this idx, no req will be submitted for devices + * handling this idx. + * + */ +static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q, + int32_t idx) +{ + in_q->slot[idx].req_id = -1; + in_q->slot[idx].skip_idx = 1; + CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx); +} + +/** + * __cam_req_mgr_tbl_set_id() + * + * @brief : Set unique id to table + * @tbl : pipeline based table which requires new id + * @req : pointer to request data wihch contains num_tables counter + * + */ +static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl, + struct cam_req_mgr_req_data *req) +{ + if (!tbl) + return; + do { + tbl->id = req->num_tbl++; + CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d", + tbl->id, tbl->pd, tbl->skip_traverse, + tbl->pd_delta); + tbl = tbl->next; + } while (tbl != NULL); +} + +/** + * __cam_req_mgr_tbl_set_all_skip_cnt() + * + * @brief : Each pd table sets skip value based on delta between itself and + * max pd value. During initial streamon or bubble case this is + * used. That way each pd table skips required num of traverse and + * align themselve with req mgr connected devs. + * @l_tbl : iterates through list of pd tables and sets skip traverse + * + */ +static void __cam_req_mgr_tbl_set_all_skip_cnt( + struct cam_req_mgr_req_tbl **l_tbl) +{ + struct cam_req_mgr_req_tbl *tbl = *l_tbl; + int32_t max_pd; + + if (!tbl) + return; + + max_pd = tbl->pd; + do { + tbl->skip_traverse = max_pd - tbl->pd; + CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d", + tbl->id, tbl->pd, tbl->skip_traverse, + tbl->pd_delta); + tbl = tbl->next; + } while (tbl != NULL); +} + +/** + * __cam_req_mgr_find_slot_for_req() + * + * @brief : Find idx from input queue at which req id is enqueued + * @in_q : input request queue pointer + * @req_id : request id which needs to be searched in input queue + * + * @return : slot index where passed request id is stored, -1 for failure + * + */ +static int32_t __cam_req_mgr_find_slot_for_req( + struct cam_req_mgr_req_queue *in_q, int64_t req_id) +{ + int32_t idx, i; + struct cam_req_mgr_slot *slot; + + idx = in_q->rd_idx; + for (i = 0; i < in_q->num_slots; i++) { + slot = &in_q->slot[idx]; + if (slot->req_id == req_id) { + CAM_DBG(CAM_CRM, + "req: %lld found at idx: %d status: %d sync_mode: %d", + req_id, idx, slot->status, slot->sync_mode); + break; + } + __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots); + } + if (i >= in_q->num_slots) + idx = -1; + + return idx; +} + +/** + * __cam_req_mgr_disconnect_req_on_sync_link() + * + * @brief : Disconnect link and sync link + * @link : pointer to link + * @slot : poniter to slot + * + */ +static void __cam_req_mgr_disconnect_req_on_sync_link( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_slot *slot) +{ + int i, j; + int sync_idx; + struct cam_req_mgr_slot *sync_slot; + struct cam_req_mgr_req_queue *sync_in_q; + struct cam_req_mgr_core_link *sync_link; + + for (i = 0; i < slot->num_sync_links; i++) { + sync_link = cam_get_link_priv(slot->sync_link_hdls[i]); + if (!sync_link) + continue; + + sync_in_q = sync_link->req.in_q; + sync_idx = __cam_req_mgr_find_slot_for_req(sync_in_q, slot->req_id); + if (sync_idx < 0) + continue; + + CAM_DBG(CAM_CRM, + "Req: %llu on link: 0x%x flushed, update sync_link: 0x%x at slot: %d", + slot->req_id, link->link_hdl, sync_link->link_hdl, sync_idx); + + sync_slot = &sync_in_q->slot[sync_idx]; + if (sync_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + sync_slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + sync_slot->num_sync_links = 0; + + for (j = 0; j < MAXIMUM_LINKS_PER_SESSION - 1; j++) { + if (sync_slot->sync_link_hdls[j] == link->link_hdl) + sync_slot->sync_link_hdls[j] = 0; + } + } + } +} + +/** + * __cam_req_mgr_flush_req_slot() + * + * @brief : reset all the slots/pd tables when flush is + * invoked + * @link : link pointer + * + */ +static void __cam_req_mgr_flush_req_slot( + struct cam_req_mgr_core_link *link) +{ + int i; + int idx; + struct cam_req_mgr_slot *slot; + struct cam_req_mgr_req_tbl *tbl; + struct cam_req_mgr_req_queue *in_q = link->req.in_q; + + for (idx = 0; idx < in_q->num_slots; idx++) { + slot = &in_q->slot[idx]; + tbl = link->req.l_tbl; + CAM_DBG(CAM_CRM, + "RESET idx: %d req_id: %lld slot->status: %d", + idx, slot->req_id, slot->status); + + if ((slot->req_id > 0) && slot->num_sync_links) + __cam_req_mgr_disconnect_req_on_sync_link(link, slot); + + /* Reset input queue slot */ + slot->req_id = -1; + slot->bubble_times = 0; + slot->internal_recovered = false; + slot->skip_idx = 1; + slot->recover = 0; + slot->additional_timeout = 0; + slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + slot->status = CRM_SLOT_STATUS_NO_REQ; + slot->num_sync_links = 0; + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; i++) + slot->sync_link_hdls[i] = 0; + + /* Reset all pd table slot */ + while (tbl != NULL) { + CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d", + tbl->pd, idx, tbl->slot[idx].state); + tbl->slot[idx].req_ready_map = 0; + tbl->slot[idx].req_apply_map = 0; + tbl->slot[idx].state = CRM_REQ_STATE_EMPTY; + tbl->slot[idx].ops.apply_at_eof = false; + for (i = 0; i < MAX_DEV_FOR_SPECIAL_OPS; i++) + tbl->slot[idx].ops.dev_hdl[i] = -1; + tbl->slot[idx].ops.num_dev = 0; + tbl = tbl->next; + } + } + + atomic_set(&link->eof_event_cnt, 0); + in_q->wr_idx = 0; + in_q->rd_idx = 0; + + link->trigger_cnt[0][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[0][CAM_TRIGGER_POINT_EOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_EOF] = 0; + link->cont_empty_slots = 0; +} + +/** + * __cam_req_mgr_reset_req_slot() + * + * @brief : reset specified idx/slot in input queue as well as all pd tables + * @link : link pointer + * @idx : slot index which will be reset + * + */ +static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link, + int32_t idx) +{ + int i; + struct cam_req_mgr_slot *slot; + struct cam_req_mgr_req_tbl *tbl = link->req.l_tbl; + struct cam_req_mgr_req_queue *in_q = link->req.in_q; + + if (idx < 0) { + CAM_ERR(CAM_CRM, "Wrong array idx is sent, idx: %d", idx); + return; + } + + slot = &in_q->slot[idx]; + CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status); + + /* Check if CSL has already pushed new request*/ + if ((slot->status == CRM_SLOT_STATUS_REQ_ADDED) || + (in_q->last_applied_idx == idx)) + return; + + if ((slot->req_id > 0) && slot->num_sync_links) + __cam_req_mgr_disconnect_req_on_sync_link(link, slot); + + /* Reset input queue slot */ + slot->req_id = -1; + slot->bubble_times = 0; + slot->internal_recovered = false; + slot->skip_idx = 0; + slot->recover = 0; + slot->additional_timeout = 0; + slot->recovery_counter = 0; + slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + slot->status = CRM_SLOT_STATUS_NO_REQ; + slot->num_sync_links = 0; + slot->skip_set = false; + slot->frame_sync_shift = 0; + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; i++) + slot->sync_link_hdls[i] = 0; + + /* Reset all pd table slot */ + while (tbl != NULL) { + CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d", + tbl->pd, idx, tbl->slot[idx].state); + tbl->slot[idx].req_ready_map = 0; + tbl->slot[idx].req_apply_map = 0; + tbl->slot[idx].state = CRM_REQ_STATE_EMPTY; + tbl->slot[idx].inject_delay_at_sof = 0; + tbl->slot[idx].inject_delay_at_eof = 0; + tbl->slot[idx].ops.apply_at_eof = false; + for (i = 0; i < MAX_DEV_FOR_SPECIAL_OPS; i++) + tbl->slot[idx].ops.dev_hdl[i] = -1; + tbl->slot[idx].ops.num_dev = 0; + tbl = tbl->next; + } +} + +/** + * __cam_req_mgr_validate_crm_wd_timer() + * + * @brief : Validate/modify the wd timer based on associated + * timeout with the request + * @link : link pointer + * + */ +static void __cam_req_mgr_validate_crm_wd_timer( + struct cam_req_mgr_core_link *link) +{ + int idx = 0; + int next_frame_timeout, current_frame_timeout, max_frame_timeout; + int64_t current_req_id, next_req_id; + struct cam_req_mgr_req_queue *in_q = link->req.in_q; + + if (link->skip_init_frame) { + CAM_DBG(CAM_CRM, + "skipping modifying wd timer for first frame after streamon"); + link->skip_init_frame = false; + return; + } + + idx = in_q->rd_idx; + __cam_req_mgr_dec_idx( + &idx, (link->max_delay - 1), + in_q->num_slots); + next_frame_timeout = in_q->slot[idx].additional_timeout; + next_req_id = in_q->slot[idx].req_id; + CAM_DBG(CAM_CRM, + "rd_idx: %d idx: %d next_req_id: %lld next_frame_timeout: %d ms", + in_q->rd_idx, idx, next_req_id, next_frame_timeout); + + idx = in_q->rd_idx; + __cam_req_mgr_dec_idx( + &idx, link->max_delay, + in_q->num_slots); + current_frame_timeout = in_q->slot[idx].additional_timeout; + current_req_id = in_q->slot[idx].req_id; + CAM_DBG(CAM_CRM, + "rd_idx: %d idx: %d curr_req_id: %lld current_frame_timeout: %d ms", + in_q->rd_idx, idx, current_req_id, current_frame_timeout); + + if ((current_req_id == -1) && (next_req_id == -1)) { + /* Check for resume scenario for long exposure */ + if (link->exp_time_for_resume) { + CAM_DBG(CAM_CRM, + "Modifying wd timer expiry to %d ms on link: 0x%x", + (link->exp_time_for_resume + CAM_REQ_MGR_WATCHDOG_TIMEOUT), + link->link_hdl); + spin_lock_bh(&link->link_state_spin_lock); + if (link->watchdog) { + crm_timer_modify(link->watchdog, + link->exp_time_for_resume + + CAM_REQ_MGR_WATCHDOG_TIMEOUT); + } + link->exp_time_for_resume = 0; + spin_unlock_bh(&link->link_state_spin_lock); + } else { + CAM_DBG(CAM_CRM, + "Skip modifying wd timer, continue with same timeout"); + } + return; + } + + max_frame_timeout = (current_frame_timeout > next_frame_timeout) ? + current_frame_timeout : next_frame_timeout; + + spin_lock_bh(&link->link_state_spin_lock); + if (link->watchdog) { + if ((max_frame_timeout + CAM_REQ_MGR_WATCHDOG_TIMEOUT) > + link->watchdog->expires) { + CAM_DBG(CAM_CRM, + "Modifying wd timer expiry from %d ms to %d ms", + link->watchdog->expires, + (next_frame_timeout + + CAM_REQ_MGR_WATCHDOG_TIMEOUT)); + crm_timer_modify(link->watchdog, + max_frame_timeout + + CAM_REQ_MGR_WATCHDOG_TIMEOUT); + } else if (max_frame_timeout) { + max_frame_timeout = CAM_REQ_MGR_WATCHDOG_TIMEOUT + + CAM_REQ_MGR_COMPUTE_TIMEOUT(max_frame_timeout); + CAM_DBG(CAM_CRM, + "Reset wd timer to frame from %d ms to %d ms", + link->watchdog->expires, + max_frame_timeout); + crm_timer_modify(link->watchdog, + max_frame_timeout); + } else if (!max_frame_timeout && (link->watchdog->expires > + CAM_REQ_MGR_WATCHDOG_TIMEOUT)) { + CAM_DBG(CAM_CRM, + "Reset wd timer to default from %d ms to %d ms", + link->watchdog->expires, + CAM_REQ_MGR_WATCHDOG_TIMEOUT); + crm_timer_modify(link->watchdog, + CAM_REQ_MGR_WATCHDOG_TIMEOUT); + } + } else { + CAM_WARN(CAM_CRM, "Watchdog timer exited already"); + } + spin_unlock_bh(&link->link_state_spin_lock); +} + +/** + * __cam_req_mgr_check_for_lower_pd_devices() + * + * @brief : Checks if there are any devices on the link having a lesser + * pd than the max pd of the link + * @link : Pointer to link which needs to be checked + * + * @return : 0 if a lower pd device is found negative otherwise + */ +static int __cam_req_mgr_check_for_lower_pd_devices( + struct cam_req_mgr_core_link *link) +{ + int i = 0; + struct cam_req_mgr_connected_device *dev = NULL; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev->dev_info.p_delay < link->max_delay) + return 0; + } + + return -EAGAIN; +} + +/** + * __cam_req_mgr_move_to_next_req_slot() + * + * @brief : While streaming if input queue does not contain any pending + * request, req mgr still needs to submit pending request ids to + * devices with lower pipeline delay value. But if there are + * continuous max_delay empty slots, we don't need to move to + * next slot since the last request is applied to all devices. + * @link : Link handle + * + * @return : 0 for success, negative for failure + */ +static int __cam_req_mgr_move_to_next_req_slot( + struct cam_req_mgr_core_link *link) +{ + int rc = 0; + struct cam_req_mgr_req_queue *in_q = link->req.in_q; + int32_t idx = in_q->rd_idx; + struct cam_req_mgr_slot *slot; + + __cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots); + slot = &in_q->slot[idx]; + + CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status); + + /* + * Some slot can't be reset due to irq congestion and + * and performance issue, we need to reset it when we + * want to move to this slot. + */ + if (slot->status == CRM_SLOT_STATUS_REQ_APPLIED) { + CAM_WARN(CAM_CRM, + "slot [idx: %d req: %lld last_applied_idx: %d] was not reset, reset it now", + idx, in_q->slot[idx].req_id, in_q->last_applied_idx); + if (in_q->last_applied_idx == idx) { + CAM_WARN(CAM_CRM, + "last_applied_idx: %d", + in_q->last_applied_idx); + in_q->last_applied_idx = -1; + } + __cam_req_mgr_reset_req_slot(link, idx); + } + + /* Check if there is new req from CSL, if not complete req */ + if (slot->status == CRM_SLOT_STATUS_NO_REQ) { + rc = __cam_req_mgr_check_for_lower_pd_devices(link); + if (rc) { + CAM_DBG(CAM_CRM, "No lower pd devices on link 0x%x", + link->link_hdl); + return rc; + } + + if (link->cont_empty_slots++ >= link->max_delay) { + CAM_DBG(CAM_CRM, "There are %d continuous empty slots on link 0x%x", + link->cont_empty_slots, link->link_hdl); + return -EAGAIN; + } + + __cam_req_mgr_in_q_skip_idx(in_q, idx); + slot->status = CRM_SLOT_STATUS_REQ_ADDED; + if (in_q->wr_idx != idx) + CAM_WARN(CAM_CRM, + "CHECK here wr %d, rd %d", in_q->wr_idx, idx); + else + __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots); + } else + link->cont_empty_slots = 0; + + __cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots); + + return rc; +} + +/** + * __cam_req_mgr_send_req() + * + * @brief : send request id to be applied to each device connected on link + * @link : pointer to link whose input queue and req tbl are + * traversed through + * @in_q : pointer to input request queue + * + * @return : 0 for success, negative for failure + * + */ +static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_req_queue *in_q, uint32_t trigger, + struct cam_req_mgr_connected_device **failed_dev) +{ + int rc = 0, pd, i, j, idx; + bool found = false; + int64_t req_applied_to_min_pd = -1; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_apply_request apply_req; + struct cam_req_mgr_link_evt_data evt_data; + struct cam_req_mgr_tbl_slot *slot = NULL; + struct cam_req_mgr_slot *req_slot = NULL; + struct cam_req_mgr_apply *apply_data = NULL; + struct cam_req_mgr_apply *prev_apply_data = NULL; + struct cam_req_mgr_state_monitor state; + bool frame_duration_changing = false; + + apply_req.link_hdl = link->link_hdl; + apply_req.report_if_bubble = 0; + apply_req.re_apply = false; + if (link->retry_cnt > 0) { + if (g_crm_core_dev->recovery_on_apply_fail) + apply_req.re_apply = true; + } + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state == CAM_CRM_LINK_STATE_ERR) + apply_req.recovery = true; + else + apply_req.recovery = false; + spin_unlock_bh(&link->link_state_spin_lock); + + if (link->max_delay < 0 || link->max_delay >= CAM_PIPELINE_DELAY_MAX) { + CAM_ERR(CAM_CRM, "link->max_delay is out of bounds: %d", + link->max_delay); + return -EINVAL; + } + + req_slot = &in_q->slot[in_q->rd_idx]; + apply_data = link->req.apply_data; + prev_apply_data = link->req.prev_apply_data; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + pd = dev->pd_tbl->pd; + if ((dev->dev_info.dev_id == CAM_REQ_MGR_DEVICE_SENSOR) && + (dev->ops && dev->ops->process_evt) && + (apply_data[pd].skip_idx || (apply_data[pd].req_id < 0))) { + evt_data.req_id = prev_apply_data[pd].req_id; + evt_data.dev_hdl = dev->dev_hdl; + evt_data.link_hdl = link->link_hdl; + evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_FRAME_DURATION_CHANGING; + spin_lock_bh(&link->link_state_spin_lock); + evt_data.u.is_recovery = (link->state == CAM_CRM_LINK_STATE_ERR); + spin_unlock_bh(&link->link_state_spin_lock); + rc = dev->ops->process_evt(&evt_data); + if (rc) { + CAM_ERR(CAM_CRM, + "Failed to send FRAME_SKIP_AVALIABLE on link 0x%x dev 0x%x", + link->link_hdl, dev->dev_hdl); + return -EINVAL; + } + frame_duration_changing = evt_data.u.frame_duration_changing; + break; + } + } + + /* + * This For loop is to address the special operation requested + * by device + */ + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + pd = dev->dev_info.p_delay; + if (pd >= CAM_PIPELINE_DELAY_MAX) { + CAM_WARN(CAM_CRM, "pd %d greater than max", + pd); + continue; + } + + found = false; + idx = apply_data[pd].idx; + slot = &dev->pd_tbl->slot[idx]; + + if (!slot->ops.num_dev || (slot->ops.num_dev > MAX_DEV_FOR_SPECIAL_OPS)) { + CAM_DBG(CAM_CRM, + "No special ops detected for slot %d dev %s num_dev %d", + idx, dev->dev_info.name, slot->ops.num_dev); + continue; + } + + for (j = 0; j < slot->ops.num_dev; j++) { + if (dev->dev_hdl == slot->ops.dev_hdl[j]) { + found = true; + break; + } + } + + if (!found) { + CAM_DBG(CAM_CRM, + "Dev_hdl : %d Not matched", dev->dev_hdl); + continue; + } + + if (slot->req_apply_map & BIT(dev->dev_bit)) + continue; + + if (apply_data[pd].skip_idx || + (apply_data[pd].req_id < 0)) { + CAM_DBG(CAM_CRM, + "dev %s skip %d req_id %lld", + dev->dev_info.name, + apply_data[pd].skip_idx, + apply_data[pd].req_id); + apply_req.dev_hdl = dev->dev_hdl; + apply_req.request_id = + link->req.prev_apply_data[pd].req_id; + apply_req.trigger_point = trigger; + apply_req.report_if_bubble = 0; + apply_req.last_applied_max_pd_req = + link->req.prev_apply_data[link->max_delay].req_id; + if ((dev->ops) && (dev->ops->notify_frame_skip)) + dev->ops->notify_frame_skip(&apply_req); + continue; + } + + /* This one is to prevent EOF request to apply on SOF*/ + if ((trigger == CAM_TRIGGER_POINT_SOF) && + (slot->ops.apply_at_eof)) { + CAM_DBG(CAM_CRM, "EOF event cannot be applied at SOF"); + break; + } + + if ((trigger == CAM_TRIGGER_POINT_EOF) && + (!slot->ops.apply_at_eof)) { + CAM_DBG(CAM_CRM, "NO EOF DATA FOR REQ: %llu", + apply_data[pd].req_id); + break; + } + + apply_req.dev_hdl = dev->dev_hdl; + apply_req.request_id = + apply_data[pd].req_id; + apply_req.trigger_point = trigger; + if ((dev->ops) && (dev->ops->apply_req)) { + rc = dev->ops->apply_req(&apply_req); + if (rc) { + *failed_dev = dev; + __cam_req_mgr_notify_frame_skip(link, + trigger); + return rc; + } else + slot->req_apply_map |= BIT(dev->dev_bit); + } else { + CAM_DBG(CAM_REQ, + "link_hdl: %x pd: %d req_id %lld ops %p apply_req %p", + link->link_hdl, pd, apply_req.request_id, + dev->ops, (dev->ops ? dev->ops->apply_req : NULL)); + break; + } + + state.req_state = CAM_CRM_SEND_REQ; + state.req_id = apply_req.request_id; + state.dev_hdl = apply_req.dev_hdl; + state.frame_id = -1; + __cam_req_mgr_update_state_monitor_array(link, &state); + + CAM_DBG(CAM_REQ, + "SEND: link_hdl %x dev %s pd %d req_id %lld", + link->link_hdl, dev->dev_info.name, + pd, apply_req.request_id); + + if ((trigger == CAM_TRIGGER_POINT_EOF) && + (slot->ops.apply_at_eof)) { + slot->ops.apply_at_eof = false; + if (atomic_read(&link->eof_event_cnt) > 0) + atomic_dec(&link->eof_event_cnt); + return 0; + } + } + + /* For regular send requests */ + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev) { + pd = dev->dev_info.p_delay; + if (pd >= CAM_PIPELINE_DELAY_MAX) { + CAM_WARN(CAM_CRM, "pd %d greater than max", + pd); + continue; + } + + if (!(dev->dev_info.trigger & trigger)) + continue; + + if (apply_data[pd].skip_idx || + (apply_data[pd].req_id < 0)) { + CAM_DBG(CAM_CRM, + "dev %s skip %d req_id %lld prev_apply_req:%lld frame_duration_changing:%d", + dev->dev_info.name, + apply_data[pd].skip_idx, + apply_data[pd].req_id, + link->req.prev_apply_data[pd].req_id, + frame_duration_changing); + apply_req.dev_hdl = dev->dev_hdl; + apply_req.request_id = + link->req.prev_apply_data[pd].req_id; + apply_req.trigger_point = trigger; + apply_req.report_if_bubble = 0; + apply_req.last_applied_max_pd_req = + link->req.prev_apply_data[link->max_delay].req_id; + apply_req.no_further_requests = + (req_slot->req_id == -1) ? true : false; + apply_req.frame_duration_changing = frame_duration_changing; + if ((dev->ops) && (dev->ops->notify_frame_skip)) + dev->ops->notify_frame_skip(&apply_req); + continue; + } + + found = false; + + apply_req.dev_hdl = dev->dev_hdl; + apply_req.request_id = + apply_data[pd].req_id; + idx = apply_data[pd].idx; + slot = &dev->pd_tbl->slot[idx]; + apply_req.report_if_bubble = + in_q->slot[idx].recover; + + /* + * If it is dual trigger usecase, need to tell + * devices that the req is re-applied, then the + * devices need to skip applying if the req has + * been handled. + * e.x. ISP device + */ + if (link->retry_cnt > 0) { + if (!apply_req.report_if_bubble && + link->dual_trigger) + apply_req.re_apply = true; + } + + if (slot->req_apply_map & BIT(dev->dev_bit)) + continue; + + for (j = 0; j < slot->ops.num_dev; j++) { + if (dev->dev_hdl == slot->ops.dev_hdl[j]) { + found = true; + break; + } + } + + /* + * If apply_at_eof is enabled do not apply at SOF + * e.x. Flash device + */ + if ((trigger == CAM_TRIGGER_POINT_SOF) && found && + (slot->ops.apply_at_eof)) + continue; + + if ((trigger == CAM_TRIGGER_POINT_EOF) && found && + (!slot->ops.apply_at_eof)) + continue; + + apply_req.trigger_point = trigger; + apply_req.last_applied_done_timestamp = 0; + apply_req.frame_duration_changing = frame_duration_changing; + if ((dev->dev_info.dev_id == CAM_REQ_MGR_DEVICE_IFE) && + (in_q->slot[idx].mismatched_frame_mode == CRM_NOTIFY_MISMATCHED_FRMAE)) + apply_req.last_applied_done_timestamp = link->last_applied_done_timestamp; + + CAM_DBG(CAM_REQ, + "SEND: link_hdl %x dev %s pd %d req_id %lld frame_duration_changing %d", + link->link_hdl, dev->dev_info.name, + pd, apply_req.request_id, + frame_duration_changing); + if (dev->ops && dev->ops->apply_req) { + rc = dev->ops->apply_req(&apply_req); + if (rc < 0) { + *failed_dev = dev; + break; + } else + slot->req_apply_map |= BIT(dev->dev_bit); + } + + if (dev->dev_info.dev_id == CAM_REQ_MGR_DEVICE_SENSOR) { + link->last_applied_done_timestamp = apply_req.last_applied_done_timestamp; + CAM_DBG(CAM_REQ, + "Apply req:%lld done with last_applied_done_timestamp:0x%llx", + apply_req.request_id, link->last_applied_done_timestamp); + } + + state.req_state = CAM_CRM_SEND_REQ; + state.req_id = apply_req.request_id; + state.dev_hdl = apply_req.dev_hdl; + state.frame_id = -1; + __cam_req_mgr_update_state_monitor_array(link, &state); + + if (pd == link->min_delay) + req_applied_to_min_pd = apply_req.request_id; + + trace_cam_req_mgr_apply_request(link, &apply_req, dev); + } + } + + if (rc < 0) { + CAM_WARN_RATE_LIMIT(CAM_CRM, "APPLY FAILED pd %d req_id %lld", + dev->dev_info.p_delay, apply_req.request_id); + /* Apply req failed notify already applied devs */ + for (; i >= 0; i--) { + dev = &link->l_dev[i]; + evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR; + evt_data.dev_hdl = dev->dev_hdl; + evt_data.link_hdl = link->link_hdl; + evt_data.req_id = apply_req.request_id; + evt_data.u.error = CRM_KMD_ERR_BUBBLE; + if (dev->ops && dev->ops->process_evt) + dev->ops->process_evt(&evt_data); + } + __cam_req_mgr_notify_frame_skip(link, trigger); + } else { + memcpy(link->req.prev_apply_data, link->req.apply_data, + CAM_PIPELINE_DELAY_MAX * + sizeof(struct cam_req_mgr_apply)); + if (req_applied_to_min_pd > 0) { + link->open_req_cnt--; + CAM_DBG(CAM_REQ, + "Open_reqs: %u after successfully applying req:%d", + link->open_req_cnt, req_applied_to_min_pd); + } + } + + return rc; +} + +/** + * __cam_req_mgr_check_link_is_ready() + * + * @brief : traverse through all request tables and see if all devices are + * ready to apply request settings. + * @link : pointer to link whose input queue and req tbl are + * traversed through + * @idx : index within input request queue + * @validate_only : Whether to validate only and/or update settings + * + * @return : 0 for success, negative for failure + * + */ +static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link, + int32_t idx, bool validate_only) +{ + int rc; + struct cam_req_mgr_traverse traverse_data; + struct cam_req_mgr_req_queue *in_q; + struct cam_req_mgr_apply *apply_data; + struct cam_req_mgr_slot *link_slot = NULL; + + in_q = link->req.in_q; + + apply_data = link->req.apply_data; + + if (validate_only == false) { + memset(apply_data, 0, + sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX); + } + + traverse_data.apply_data = apply_data; + traverse_data.idx = idx; + traverse_data.tbl = link->req.l_tbl; + traverse_data.in_q = in_q; + traverse_data.result = 0; + traverse_data.result_data.masked_value = 0; + traverse_data.result_data.pd = 0; + traverse_data.result_data.req_id = 0; + traverse_data.validate_only = validate_only; + traverse_data.open_req_cnt = link->open_req_cnt; + + /* + * Some no-sync mode requests are processed after link config, + * then process the sync mode requests after no-sync mode requests + * are handled, the initial_skip should be false when processing + * the sync mode requests. + */ + if (link->initial_skip) { + CAM_DBG(CAM_CRM, + "Set initial_skip to false for link %x", + link->link_hdl); + link->initial_skip = false; + } + + link_slot = &link->req.in_q->slot[idx]; + /* + * Traverse through all pd tables, if result is success, + * apply the settings + */ + rc = __cam_req_mgr_traverse(&traverse_data); + + if (!rc && traverse_data.result == link->pd_mask) { + CAM_DBG(CAM_CRM, + "READY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld", + link->link_hdl, idx, + apply_data[2].req_id, + apply_data[1].req_id, + apply_data[0].req_id); + if (unlikely((g_crm_core_dev->simulate_skip_frame) && (link_slot->skip_set))) { + rc = -EAGAIN; + CAM_WARN(CAM_CRM, "Simulate Skip on link: 0x%x req_id= %lld :%lld :%lld", + link->link_hdl, apply_data[2].req_id, + apply_data[1].req_id, apply_data[0].req_id); + link_slot->skip_set = false; + } + } else { + rc = -EAGAIN; + __cam_req_mgr_find_dev_name(link, + traverse_data.result_data.req_id, + traverse_data.result_data.pd, + traverse_data.result_data.masked_value); + } + + return rc; +} + +/** + * __cam_req_mgr_check_sync_for_mslave() + * + * @brief : Processes requests during sync mode [master-slave] + * Here master corresponds to the link having a higher + * max_delay (pd) compared to the slave link. + * @link : Pointer to link whose input queue and req tbl are + * traversed through + * @slot : Pointer to the current slot being processed + * @return : 0 for success, negative for failure + * + */ +static int __cam_req_mgr_check_sync_for_mslave( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_core_link *sync_link, + struct cam_req_mgr_slot *slot) +{ + struct cam_req_mgr_slot *sync_slot = NULL; + int sync_slot_idx = 0, prev_idx, next_idx, rd_idx, sync_rd_idx, rc = 0; + int64_t req_id = 0, sync_req_id = 0; + int32_t sync_num_slots = 0; + + if (!sync_link || !link) { + CAM_ERR(CAM_CRM, "Sync link or link is null"); + return -EINVAL; + } + + req_id = slot->req_id; + + if (!sync_link->req.in_q) { + CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", + sync_link->link_hdl); + return -EINVAL; + } + + sync_num_slots = sync_link->req.in_q->num_slots; + sync_rd_idx = sync_link->req.in_q->rd_idx; + + CAM_DBG(CAM_CRM, + "link_hdl %x req %lld frame_skip_flag %d open_req_cnt:%u initial_sync_req [%lld,%lld] is_master:%d", + link->link_hdl, req_id, link->sync_link_sof_skip, + link->open_req_cnt, link->initial_sync_req, + sync_link->initial_sync_req, link->is_master); + + if (sync_link->sync_link_sof_skip) { + CAM_DBG(CAM_CRM, + "No req applied on corresponding SOF on sync link: %x", + sync_link->link_hdl); + sync_link->sync_link_sof_skip = false; + return -EAGAIN; + } + + if (link->in_msync_mode && + sync_link->in_msync_mode && + (req_id - sync_link->req.in_q->slot[sync_rd_idx].req_id > + link->max_delay - sync_link->max_delay)) { + CAM_DBG(CAM_CRM, + "Req: %lld on link:%x need to hold for link: %x req:%d", + req_id, + link->link_hdl, + sync_link->link_hdl, + sync_link->req.in_q->slot[sync_rd_idx].req_id); + return -EINVAL; + } + + if (link->is_master) { + if (sync_link->initial_skip) { + CAM_DBG(CAM_CRM, "Link 0x%x [slave] not streamed on", + sync_link->link_hdl); + return -EAGAIN; + } + + rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); + if (rc) { + CAM_DBG(CAM_CRM, + "Req: %lld [master] not ready on link: %x, rc=%d", + req_id, link->link_hdl, rc); + link->sync_link_sof_skip = true; + return rc; + } + + prev_idx = slot->idx; + __cam_req_mgr_dec_idx(&prev_idx, + (link->max_delay - sync_link->max_delay), + link->req.in_q->num_slots); + + rd_idx = sync_link->req.in_q->rd_idx; + sync_req_id = link->req.in_q->slot[prev_idx].req_id; + if ((sync_link->initial_sync_req != -1) && + (sync_link->initial_sync_req <= sync_req_id)) { + sync_slot_idx = __cam_req_mgr_find_slot_for_req( + sync_link->req.in_q, sync_req_id); + if (sync_slot_idx == -1) { + CAM_DBG(CAM_CRM, + "Prev Req: %lld [master] not found on link: %x [slave]", + sync_req_id, sync_link->link_hdl); + link->sync_link_sof_skip = true; + return -EINVAL; + } + + if ((sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED) && + (((sync_slot_idx - rd_idx + sync_num_slots) % + sync_num_slots) >= 1) && + (sync_link->req.in_q->slot[rd_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED)) { + CAM_DBG(CAM_CRM, + "Prev Req: %lld [master] not next on link: %x [slave]", + sync_req_id, + sync_link->link_hdl); + return -EINVAL; + } + + rc = __cam_req_mgr_check_link_is_ready(sync_link, + sync_slot_idx, true); + if (rc && + (sync_link->req.in_q->slot[sync_slot_idx].status + != CRM_SLOT_STATUS_REQ_APPLIED)) { + CAM_DBG(CAM_CRM, + "Req: %lld not ready on [slave] link: %x, rc=%d", + sync_req_id, sync_link->link_hdl, rc); + link->sync_link_sof_skip = true; + return rc; + } + } + } else { + if (link->initial_skip) + link->initial_skip = false; + + rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); + if (rc) { + CAM_DBG(CAM_CRM, + "Req: %lld [slave] not ready on link: %x, rc=%d", + req_id, link->link_hdl, rc); + link->sync_link_sof_skip = true; + return rc; + } + + next_idx = link->req.in_q->rd_idx; + rd_idx = sync_link->req.in_q->rd_idx; + __cam_req_mgr_inc_idx(&next_idx, + (sync_link->max_delay - link->max_delay), + link->req.in_q->num_slots); + + sync_req_id = link->req.in_q->slot[next_idx].req_id; + + if ((sync_link->initial_sync_req != -1) && + (sync_link->initial_sync_req <= sync_req_id)) { + sync_slot_idx = __cam_req_mgr_find_slot_for_req( + sync_link->req.in_q, sync_req_id); + if (sync_slot_idx == -1) { + CAM_DBG(CAM_CRM, + "Next Req: %lld [slave] not found on link: %x [master]", + sync_req_id, sync_link->link_hdl); + link->sync_link_sof_skip = true; + return -EINVAL; + } + + if ((sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED) && + (((sync_slot_idx - rd_idx + sync_num_slots) % + sync_num_slots) >= 1) && + (sync_link->req.in_q->slot[rd_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED)) { + CAM_DBG(CAM_CRM, + "Next Req: %lld [slave] not next on link: %x [master]", + sync_req_id, sync_link->link_hdl); + return -EINVAL; + } + + sync_slot = &sync_link->req.in_q->slot[sync_slot_idx]; + rc = __cam_req_mgr_check_link_is_ready(sync_link, + sync_slot_idx, true); + if (rc && (sync_slot->status != + CRM_SLOT_STATUS_REQ_APPLIED)) { + CAM_DBG(CAM_CRM, + "Next Req: %lld [slave] not ready on [master] link: %x, rc=%d", + sync_req_id, sync_link->link_hdl, rc); + link->sync_link_sof_skip = true; + return rc; + } + } + } + + CAM_DBG(CAM_REQ, + "Req: %lld ready to apply on link: %x [validation successful]", + req_id, link->link_hdl); + + return 0; +} + + +/** + * __cam_req_mgr_check_sync_request_is_ready() + * + * @brief : processes requests during sync mode + * @link : pointer to link whose input queue and req tbl are + * traversed through + * @slot : pointer to the current slot being processed + * @return : 0 for success, negative for failure + * + */ +static int __cam_req_mgr_check_sync_req_is_ready( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_core_link *sync_link, + struct cam_req_mgr_slot *slot, + uint32_t trigger) +{ + struct cam_req_mgr_slot *sync_rd_slot = NULL; + int64_t req_id = 0, sync_req_id = 0; + int sync_slot_idx = 0, sync_rd_idx = 0, rc = 0; + int32_t sync_num_slots = 0; + uint64_t sync_frame_duration = 0; + uint64_t sof_timestamp_delta = 0; + uint64_t master_slave_diff = 0; + uint64_t ref_timestamp, sync_ref_timestamp; + bool ready = true, sync_ready = true; + int slot_idx_diff = 0; + + if (!sync_link || !link) { + CAM_ERR(CAM_CRM, "Sync link null"); + return -EINVAL; + } + + req_id = slot->req_id; + + if (!sync_link->req.in_q) { + CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", + sync_link->link_hdl); + return -EINVAL; + } + + sync_num_slots = sync_link->req.in_q->num_slots; + sync_rd_idx = sync_link->req.in_q->rd_idx; + sync_rd_slot = &sync_link->req.in_q->slot[sync_rd_idx]; + sync_req_id = sync_rd_slot->req_id; + + CAM_DBG(CAM_REQ, + "link_hdl %x sync link_hdl %x req %lld", + link->link_hdl, sync_link->link_hdl, req_id); + + if ((sync_link->initial_skip) && + (req_id > sync_req_id)) { + link->initial_skip = false; + CAM_DBG(CAM_CRM, + "sync link %x not streamed on", + sync_link->link_hdl); + return -EAGAIN; + } + + if (sync_link->prev_sof_timestamp) + sync_frame_duration = sync_link->sof_timestamp - + sync_link->prev_sof_timestamp; + else + sync_frame_duration = DEFAULT_FRAME_DURATION; + + ref_timestamp = link->sof_timestamp + slot->frame_sync_shift; + sync_ref_timestamp = sync_link->sof_timestamp + sync_rd_slot->frame_sync_shift; + + sof_timestamp_delta = + (ref_timestamp >= sync_ref_timestamp) + ? (ref_timestamp - sync_ref_timestamp) + : (sync_ref_timestamp - ref_timestamp); + + CAM_DBG(CAM_CRM, + "sync link %x last frame_duration is %d ns sof_timestamp_delta:%llu ns", + sync_link->link_hdl, sync_frame_duration, sof_timestamp_delta); + + if (link->initial_skip) { + link->initial_skip = false; + + if ((ref_timestamp > sync_ref_timestamp) && + (sync_ref_timestamp > 0) && + (ref_timestamp - sync_ref_timestamp) < + (sync_frame_duration / 2)) { + /* + * If this frame sync with the previous frame of sync + * link, then we need to skip this frame, since the + * previous frame of sync link is also skipped. + */ + CAM_DBG(CAM_CRM, + "This frame sync with previous sync_link %x frame", + sync_link->link_hdl); + return -EAGAIN; + } else if (ref_timestamp <= sync_ref_timestamp) { + /* + * Sometimes, link receives the SOF event is eariler + * than sync link in IFE CSID side, but link's SOF + * event is processed later than sync link's, then + * we need to skip this SOF event since the sync + * link's SOF event is also skipped. + */ + CAM_DBG(CAM_CRM, + "The previous frame of sync link is skipped"); + return -EAGAIN; + } + } + + if (sync_link->sync_link_sof_skip) { + CAM_DBG(CAM_REQ, + "No req applied on corresponding SOF on sync link: %x", + sync_link->link_hdl); + sync_link->sync_link_sof_skip = false; + return -EAGAIN; + } + + sync_slot_idx = __cam_req_mgr_find_slot_for_req( + sync_link->req.in_q, req_id); + if (sync_slot_idx == -1) { + CAM_DBG(CAM_CRM, "Req: %lld not found on link: %x [other link]", + req_id, sync_link->link_hdl); + return -EAGAIN; + } + + slot_idx_diff = (sync_slot_idx - sync_rd_idx + sync_num_slots) % + sync_num_slots; + if ((sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED) && + ((slot_idx_diff > 1) || + ((slot_idx_diff == 1) && + (sync_rd_slot->status != + CRM_SLOT_STATUS_REQ_APPLIED)))) { + CAM_DBG(CAM_CRM, + "Req: %lld [other link] not next req to be applied on link: %x", + req_id, sync_link->link_hdl); + return -EAGAIN; + } + + rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); + if (rc) { + CAM_DBG(CAM_CRM, + "Req: %lld [My link] not ready on link: %x, rc=%d", + req_id, link->link_hdl, rc); + ready = false; + } + + if (sync_link->req.in_q) { + rc = __cam_req_mgr_check_link_is_ready(sync_link, + sync_slot_idx, true); + if (rc && + (sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED) && + (sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_READY)) { + CAM_DBG(CAM_CRM, + "Req: %lld not ready on link: %x, rc=%d", + req_id, sync_link->link_hdl, rc); + sync_ready = false; + } + } else { + CAM_ERR(CAM_CRM, "Link hdl %x in_q is NULL", + sync_link->link_hdl); + return -EINVAL; + } + + /* + * If both of them are ready or not ready, then just + * skip this sof and don't skip sync link next SOF. + */ + if (sync_ready != ready) { + CAM_DBG(CAM_CRM, + "Req: %lld ready %d sync_ready %d, ignore sync link next SOF", + req_id, ready, sync_ready); + + /* + * Only skip the frames if current frame sync with + * next frame of sync link. + */ + if (sof_timestamp_delta > sync_frame_duration / 2) + link->sync_link_sof_skip = true; + return -EINVAL; + } else if (!ready) { + CAM_DBG(CAM_CRM, + "Req: %lld not ready on link: %x", + req_id, link->link_hdl); + return -EINVAL; + } + + /* + * Do the self-correction when the frames are sync, + * we consider that the frames are synced if the + * difference of two SOF timestamp less than + * (sync_frame_duration / 5). + */ + master_slave_diff = sync_frame_duration; + do_div(master_slave_diff, 5); + if ((trigger == CAM_TRIGGER_POINT_SOF) && + (sync_ref_timestamp > 0) && + (sof_timestamp_delta < master_slave_diff) && + (sync_rd_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) && + (req_id >= link->initial_sync_req) && + (req_id - link->initial_sync_req >= + (INITIAL_IN_SYNC_REQ + link->max_delay))) { + + /* + * This means current frame should sync with next + * frame of sync link, then the request id of in + * rd slot of two links should be same. + */ + CAM_DBG(CAM_CRM, + "link %x req_id %lld, sync_link %x req_id %lld", + link->link_hdl, req_id, + sync_link->link_hdl, sync_req_id); + + if (req_id > sync_req_id) { + CAM_DBG(CAM_CRM, + "link %x too quickly, skip this frame", + link->link_hdl); + return -EAGAIN; + } else if (req_id < sync_req_id) { + CAM_DBG(CAM_CRM, + "sync link %x too quickly, skip next frame of sync link", + sync_link->link_hdl); + link->sync_link_sof_skip = true; + } else if ((sync_link->req.in_q->slot[sync_slot_idx].status != + CRM_SLOT_STATUS_REQ_APPLIED) && !sync_link->is_sending_req) { + CAM_DBG(CAM_CRM, + "link %x other not applied", link->link_hdl); + return -EAGAIN; + } + } + CAM_DBG(CAM_REQ, + "Req: %lld ready to apply on link: %x [validation successful]", + req_id, link->link_hdl); + + return 0; +} + +static int __cam_req_mgr_check_multi_sync_link_ready( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_core_link **sync_link, + struct cam_req_mgr_slot *slot, + int32_t num_sync_links, + uint32_t trigger) +{ + int i, rc = 0; + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state == CAM_CRM_LINK_STATE_IDLE) { + spin_unlock_bh(&link->link_state_spin_lock); + CAM_ERR(CAM_CRM, "link hdl %x is in idle state", + link->link_hdl); + return -EINVAL; + } + spin_unlock_bh(&link->link_state_spin_lock); + + for (i = 0; i < num_sync_links; i++) { + if (sync_link[i]) { + if (slot->req_id <= sync_link[i]->last_flush_id) { + CAM_DBG(CAM_CRM, + "link:0x%x req:%lld has been flushed in sync link:0x%x, last_flush_id:%lld", + link->link_hdl, slot->req_id, + sync_link[i]->link_hdl, + sync_link[i]->last_flush_id); + + rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true); + if (!rc) + continue; + else { + CAM_DBG(CAM_CRM, "link %x not ready", link->link_hdl); + return -EINVAL; + } + } + + spin_lock_bh(&sync_link[i]->link_state_spin_lock); + if (sync_link[i]->state == CAM_CRM_LINK_STATE_IDLE) { + spin_unlock_bh(&sync_link[i]->link_state_spin_lock); + CAM_ERR(CAM_CRM, "sync link hdl %x is idle", + sync_link[i]->link_hdl); + return -EINVAL; + } + spin_unlock_bh(&sync_link[i]->link_state_spin_lock); + + if (link->max_delay == sync_link[i]->max_delay) { + rc = __cam_req_mgr_check_sync_req_is_ready( + link, sync_link[i], + slot, trigger); + if (rc < 0) { + CAM_DBG(CAM_CRM, "link %x not ready", + link->link_hdl); + return rc; + } + } else if (link->max_delay > + sync_link[i]->max_delay) { + link->is_master = true; + sync_link[i]->is_master = false; + rc = __cam_req_mgr_check_sync_for_mslave( + link, sync_link[i], slot); + if (rc < 0) { + CAM_DBG(CAM_CRM, "link%x not ready", + link->link_hdl); + return rc; + } + } else { + link->is_master = false; + sync_link[i]->is_master = true; + rc = __cam_req_mgr_check_sync_for_mslave( + link, sync_link[i], slot); + if (rc < 0) { + CAM_DBG(CAM_CRM, "link %x not ready", + link->link_hdl); + return rc; + } + } + } else { + CAM_ERR(CAM_REQ, "Sync link is null"); + return -EINVAL; + } + } + + rc = __cam_req_mgr_inject_delay(link->req.l_tbl, + slot->idx, trigger); + if (rc < 0) { + CAM_DBG(CAM_CRM, + "Req: %lld needs to inject delay at %s", + slot->req_id, + (trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF"); + return -EINVAL; + } + + /* + * At this point all validation is successfully done + * and we can proceed to apply the given request. + * Ideally the next call should return success. + */ + rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, false); + if (rc) + CAM_WARN(CAM_CRM, "Unexpected return value rc: %d", rc); + + return 0; +} + +/** + * __cam_req_mgr_process_req() + * + * @brief : processes read index in request queue and traverse through table + * @link : pointer to link whose input queue and req tbl are + * traversed through + * + * @return : 0 for success, negative for failure + * + */ +static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_trigger_notify *trigger_data) +{ + int rc = 0, idx, pd, i; + int reset_step = 0; + bool is_applied = true; + int32_t num_sync_links; + uint32_t trigger = trigger_data->trigger; + uint64_t wq_sched_timeout = 0; + struct cam_req_mgr_slot *slot = NULL; + struct cam_req_mgr_tbl_slot *tbl_slot = NULL; + struct cam_req_mgr_req_queue *in_q; + struct cam_req_mgr_core_session *session; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_core_link *tmp_link = NULL; + struct cam_req_mgr_apply *apply_data = NULL; + struct cam_req_mgr_core_link + *sync_link[MAXIMUM_LINKS_PER_SESSION - 1]; + + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", link->link_hdl); + return -EINVAL; + } + + mutex_lock(&session->lock); + /* + * During session destroy/unlink the link state is updated and session + * mutex is released when flushing the workq. In case the wq is scheduled + * thereafter this API will then check the updated link state and exit + */ + spin_lock_bh(&link->link_state_spin_lock); + if (link->state == CAM_CRM_LINK_STATE_IDLE) { + spin_unlock_bh(&link->link_state_spin_lock); + mutex_unlock(&session->lock); + return -EPERM; + } + spin_unlock_bh(&link->link_state_spin_lock); + + in_q = link->req.in_q; + /* + * Check if new read index, + * - if in pending state, traverse again to complete + * transaction of this read index. + * - if in applied_state, somthign wrong. + * - if in no_req state, no new req + */ + CAM_DBG(CAM_REQ, + "%s Req[%lld] idx %d req_status %d link_hdl %x wd_timeout %d ms trigger:%d", + ((trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF"), + in_q->slot[in_q->rd_idx].req_id, in_q->rd_idx, + in_q->slot[in_q->rd_idx].status, link->link_hdl, + in_q->slot[in_q->rd_idx].additional_timeout, trigger); + + slot = &in_q->slot[in_q->rd_idx]; + + if ((trigger != CAM_TRIGGER_POINT_SOF) && + (trigger != CAM_TRIGGER_POINT_EOF)) + goto end; + + if ((trigger == CAM_TRIGGER_POINT_EOF) && + (!atomic_read(&link->eof_event_cnt))) { + CAM_DBG(CAM_CRM, "Not any request to schedule at EOF"); + goto end; + } + + if (slot->status == CRM_SLOT_STATUS_REQ_APPLIED) + goto end; + + if (trigger == CAM_TRIGGER_POINT_SOF) { + + if (slot->status == CRM_SLOT_STATUS_NO_REQ) { + CAM_DBG(CAM_CRM, "No Pending req"); + rc = 0; + __cam_req_mgr_notify_frame_skip(link, + trigger); + goto end; + } + + /* + * Update the timestamp in session lock protection + * to avoid timing issue. + */ + link->prev_sof_timestamp = link->sof_timestamp; + link->sof_timestamp = trigger_data->sof_timestamp_val; + + /* Use half frame interval to detect the WQ congestion */ + wq_sched_timeout = CAM_REQ_MGR_HALF_FRAME_DURATION(link->sof_timestamp - + link->prev_sof_timestamp) / CAM_COMMON_NS_PER_MS; + + /* Check for WQ congestion */ + if (jiffies_to_msecs(jiffies - + link->last_sof_trigger_jiffies) < + wq_sched_timeout) + link->wq_congestion = true; + else + link->wq_congestion = false; + + /* + * Only update the jiffies for SOF trigger, + * since it is used to protect from + * applying fails in ISP which is triggered at SOF. + */ + if (trigger == CAM_TRIGGER_POINT_SOF) + link->last_sof_trigger_jiffies = jiffies; + + } + + if (slot->status != CRM_SLOT_STATUS_REQ_READY) { + if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + /* + * If slot->num_sync_links is 0, it is sched_req_v1, otherwise + * it is sched_req_v2. + */ + if (slot->num_sync_links == 0) { + num_sync_links = link->num_sync_links; + for (i = 0; i < num_sync_links; i++) + sync_link[i] = link->sync_link[i]; + } else { + num_sync_links = slot->num_sync_links; + for (i = 0; i < num_sync_links; i++) + sync_link[i] = cam_get_link_priv(slot->sync_link_hdls[i]); + } + rc = __cam_req_mgr_check_multi_sync_link_ready( + link, sync_link, slot, num_sync_links, trigger); + } else { + if (link->in_msync_mode) { + CAM_DBG(CAM_CRM, + "Settings master-slave non sync mode for link 0x%x", + link->link_hdl); + link->in_msync_mode = false; + link->initial_sync_req = -1; + for (i = 0; i < link->num_sync_links; i++) { + if (link->sync_link[i]) { + tmp_link = link->sync_link[i]; + tmp_link->initial_sync_req = -1; + tmp_link->in_msync_mode = false; + } + } + } + + /* + * Validate that if the req is ready to apply before + * checking the inject delay. + */ + rc = __cam_req_mgr_check_link_is_ready(link, + slot->idx, true); + + if (!rc) { + rc = __cam_req_mgr_inject_delay(link->req.l_tbl, + slot->idx, trigger); + if (rc < 0) + CAM_DBG(CAM_CRM, + "Req: %lld needs to inject delay at %s", + slot->req_id, + (trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF"); + + if (!rc) + rc = __cam_req_mgr_check_link_is_ready(link, + slot->idx, false); + } + } + + if (rc < 0) { + /* + * If traverse result is not success, then some devices + * are not ready with packet for the asked request id, + * hence try again in next sof + */ + slot->status = CRM_SLOT_STATUS_REQ_PENDING; + spin_lock_bh(&link->link_state_spin_lock); + if (link->state == CAM_CRM_LINK_STATE_ERR) { + /* + * During error recovery all tables should be + * ready, don't expect to enter here. + * @TODO: gracefully handle if recovery fails. + */ + CAM_ERR_RATE_LIMIT(CAM_CRM, + "FATAL recovery cant finish idx %d status %d", + in_q->rd_idx, + in_q->slot[in_q->rd_idx].status); + rc = -EPERM; + } + spin_unlock_bh(&link->link_state_spin_lock); + __cam_req_mgr_notify_frame_skip(link, trigger); + __cam_req_mgr_validate_crm_wd_timer(link); + goto end; + } else { + slot->status = CRM_SLOT_STATUS_REQ_READY; + link->is_sending_req = true; + CAM_DBG(CAM_REQ, + "linx_hdl %x Req[%lld] idx %d ready to apply", + link->link_hdl, in_q->slot[in_q->rd_idx].req_id, + in_q->rd_idx); + } + } else { + rc = __cam_req_mgr_inject_delay(link->req.l_tbl, + slot->idx, trigger); + if (rc < 0) { + CAM_DBG(CAM_CRM, + "linx_hdl %x Req: %lld needs to inject delay at %s", + link->link_hdl, slot->req_id, + (trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF"); + __cam_req_mgr_notify_frame_skip(link, trigger); + __cam_req_mgr_validate_crm_wd_timer(link); + goto end; + } + } + mutex_unlock(&session->lock); + + rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger, &dev); + if (rc < 0) { + /* Apply req failed retry at next sof */ + slot->status = CRM_SLOT_STATUS_REQ_PENDING; + link->is_sending_req = false; + + if (!link->wq_congestion && dev) { + if (rc != -EAGAIN) + link->retry_cnt++; + + if (link->retry_cnt >= MAXIMUM_RETRY_ATTEMPTS) { + CAM_DBG(CAM_CRM, + "Max retry attempts (count %d) reached on link[0x%x] for req [%lld]", + MAXIMUM_RETRY_ATTEMPTS, link->link_hdl, + in_q->slot[in_q->rd_idx].req_id); + + cam_req_mgr_debug_delay_detect(); + trace_cam_delay_detect("CRM", + "Max retry attempts reached", + in_q->slot[in_q->rd_idx].req_id, + CAM_DEFAULT_VALUE, + link->link_hdl, + CAM_DEFAULT_VALUE, rc); + + /* + * Try for internal recovery - primarily for IFE subdev + * if it's the first instance of stall + */ + if (!slot->recovery_counter) + link->try_for_internal_recovery = true; + + __cam_req_mgr_notify_error_on_link(link, dev); + + /* Increment internal recovery counter */ + if (link->try_for_internal_recovery) { + slot->recovery_counter++; + link->try_for_internal_recovery = false; + } + + link->retry_cnt = 0; + } + } else + CAM_WARN_RATE_LIMIT(CAM_CRM, + "workqueue congestion, last applied idx:%d rd idx:%d", + in_q->last_applied_idx, + in_q->rd_idx); + } else { + if (link->retry_cnt) + link->retry_cnt = 0; + + /* Check for any long exposure settings */ + __cam_req_mgr_validate_crm_wd_timer(link); + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state == CAM_CRM_LINK_STATE_ERR) { + CAM_WARN(CAM_CRM, "Err recovery done idx %d", + in_q->rd_idx); + link->state = CAM_CRM_LINK_STATE_READY; + } + spin_unlock_bh(&link->link_state_spin_lock); + + if (link->sync_link_sof_skip) + link->sync_link_sof_skip = false; + + apply_data = link->req.apply_data; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + pd = dev->dev_info.p_delay; + + if (pd >= CAM_PIPELINE_DELAY_MAX) { + rc = -EINVAL; + CAM_ERR(CAM_CRM, "link_hdl %x dev %s pd %d greater than max", + link->link_hdl, dev->dev_info.name, pd); + goto end; + } + + idx = apply_data[pd].idx; + tbl_slot = &dev->pd_tbl->slot[idx]; + + if ((apply_data[pd].req_id != -1) && + (tbl_slot->req_apply_map != dev->pd_tbl->dev_mask)) { + is_applied = false; + break; + } + } + + if (is_applied) { + slot->status = CRM_SLOT_STATUS_REQ_APPLIED; + + CAM_DBG(CAM_CRM, "req %d is applied on link %x success", + slot->req_id, + link->link_hdl); + idx = in_q->rd_idx; + reset_step = link->max_delay; + + for (i = 0; i < link->num_sync_links; i++) { + if (link->sync_link[i]) { + if ((link->in_msync_mode) && + (link->sync_link[i]->max_delay > + reset_step)) + reset_step = + link->sync_link[i]->max_delay; + } + } + + in_q->last_applied_idx = idx; + + __cam_req_mgr_dec_idx( + &idx, reset_step + 1, + in_q->num_slots); + __cam_req_mgr_reset_req_slot(link, idx); + } + link->is_sending_req = false; + } + + return rc; + +end: + mutex_unlock(&session->lock); + return rc; +} + +/** + * __cam_req_mgr_add_tbl_to_link() + * + * @brief : Add table to list under link sorted by pd decremeting order + * @l_tbl : list of pipeline delay tables. + * @new_tbl : new tbl which will be appended to above list as per its pd value + * + */ +static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl, + struct cam_req_mgr_req_tbl *new_tbl) +{ + struct cam_req_mgr_req_tbl *tbl; + + if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) { + new_tbl->next = *l_tbl; + if (*l_tbl) { + new_tbl->pd_delta = + new_tbl->pd - (*l_tbl)->pd; + } + *l_tbl = new_tbl; + } else { + tbl = *l_tbl; + + /* Reach existing tbl which has less pd value */ + while (tbl->next != NULL && + new_tbl->pd < tbl->next->pd) { + tbl = tbl->next; + } + if (tbl->next != NULL) { + new_tbl->pd_delta = + new_tbl->pd - tbl->next->pd; + } else { + /* This is last table in linked list*/ + new_tbl->pd_delta = 0; + } + new_tbl->next = tbl->next; + tbl->next = new_tbl; + tbl->pd_delta = tbl->pd - new_tbl->pd; + } + CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd, + new_tbl->pd_delta); +} + +/** + * __cam_req_mgr_create_pd_tbl() + * + * @brief : Creates new request table for new delay value + * @delay : New pd table allocated will have this delay value + * + * @return : pointer to newly allocated table, NULL for failure + * + */ +static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay) +{ + int i = 0; + + struct cam_req_mgr_req_tbl *tbl = + CAM_MEM_ZALLOC(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL); + if (tbl != NULL) { + tbl->num_slots = MAX_REQ_SLOTS; + CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots); + for (i = 0; i < MAX_REQ_SLOTS; i++) { + tbl->slot[i].ops.apply_at_eof = false; + } + } + + return tbl; +} + +/** + * __cam_req_mgr_destroy_all_tbl() + * + * @brief : This func will destroy all pipeline delay based req table structs + * @l_tbl : pointer to first table in list and it has max pd . + * + */ +static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl) +{ + struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp; + + CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl); + while (tbl != NULL) { + temp = tbl->next; + CAM_MEM_FREE(tbl); + tbl = temp; + } + *l_tbl = NULL; +} + +/** + * __cam_req_mgr_setup_in_q() + * + * @brief : Initialize req table data + * @req : request data pointer + * + * @return: 0 for success, negative for failure + * + */ +static int __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req) +{ + int i; + struct cam_req_mgr_req_queue *in_q = req->in_q; + + if (!in_q) { + CAM_ERR(CAM_CRM, "NULL in_q"); + return -EINVAL; + } + + mutex_lock(&req->lock); + in_q->num_slots = MAX_REQ_SLOTS; + + for (i = 0; i < in_q->num_slots; i++) { + in_q->slot[i].idx = i; + in_q->slot[i].req_id = -1; + in_q->slot[i].skip_idx = 0; + in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ; + } + + in_q->wr_idx = 0; + in_q->rd_idx = 0; + mutex_unlock(&req->lock); + + return 0; +} + +/** + * __cam_req_mgr_reset_req_tbl() + * + * @brief : Initialize req table data + * @req : request queue pointer + * + * @return: 0 for success, negative for failure + * + */ +static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req) +{ + struct cam_req_mgr_req_queue *in_q = req->in_q; + + if (!in_q) { + CAM_ERR(CAM_CRM, "NULL in_q"); + return -EINVAL; + } + + mutex_lock(&req->lock); + memset(in_q->slot, 0, + sizeof(struct cam_req_mgr_slot) * in_q->num_slots); + in_q->num_slots = 0; + + in_q->wr_idx = 0; + in_q->rd_idx = 0; + mutex_unlock(&req->lock); + + return 0; +} + +/** + * __cam_req_mgr_process_sof_freeze() + * + * @brief : Apoptosis - Handles case when connected devices are not responding + * @priv : link information + * @data : task data + * + */ +static int __cam_req_mgr_process_sof_freeze(void *priv, void *data) +{ + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_req_queue *in_q = NULL; + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_message msg = {0}; + int rc = 0; + int64_t last_applied_req_id = -EINVAL; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + return -EINVAL; + } + + link = (struct cam_req_mgr_core_link *)priv; + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", link->link_hdl); + return -EINVAL; + } + + in_q = link->req.in_q; + if (in_q) { + mutex_lock(&link->req.lock); + if (in_q->last_applied_idx >= 0) + last_applied_req_id = + in_q->slot[in_q->last_applied_idx].req_id; + mutex_unlock(&link->req.lock); + } + + spin_lock_bh(&link->link_state_spin_lock); + if ((link->watchdog) && (link->watchdog->pause_timer)) { + CAM_INFO(CAM_CRM, + "link:%x watchdog paused, maybe stream on/off is delayed", + link->link_hdl); + spin_unlock_bh(&link->link_state_spin_lock); + return rc; + } + spin_unlock_bh(&link->link_state_spin_lock); + + CAM_ERR(CAM_CRM, + "SOF freeze for session: %d link: 0x%x max_pd: %d last_req_id:%d", + session->session_hdl, link->link_hdl, link->max_delay, + last_applied_req_id); + + __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_SOF_FREEZE, + CRM_KMD_ERR_FATAL, link); + memset(&msg, 0, sizeof(msg)); + + msg.session_hdl = session->session_hdl; + msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE; + msg.u.err_msg.request_id = 0; + msg.u.err_msg.link_hdl = link->link_hdl; + msg.u.err_msg.error_code = CAM_REQ_MGR_ISP_UNREPORTED_ERROR; + msg.u.err_msg.resource_size = 0; + + rc = cam_req_mgr_notify_message(&msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT); + + if (rc) + CAM_ERR(CAM_CRM, + "Error notifying SOF freeze for session %d link 0x%x rc %d", + session->session_hdl, link->link_hdl, rc); + + return rc; +} + +/** + * __cam_req_mgr_sof_freeze() + * + * @brief : Callback function for timer timeout indicating SOF freeze + * @data : timer pointer + * + */ +static void __cam_req_mgr_sof_freeze(struct timer_list *timer_data) +{ + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct crm_task_payload *task_data; + + if (!timer) { + CAM_ERR(CAM_CRM, "NULL timer"); + return; + } + + link = (struct cam_req_mgr_core_link *)timer->parent; + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR(CAM_CRM, "No empty task"); + return; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_NOTIFY_FREEZE; + task->process_cb = &__cam_req_mgr_process_sof_freeze; + cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); +} + +/** + * __cam_req_mgr_create_subdevs() + * + * @brief : Create new crm subdev to link with realtime devices + * @l_dev : list of subdevs internal to crm + * @num_dev : num of subdevs to be created for link + * + * @return : pointer to allocated list of devices + */ +static int __cam_req_mgr_create_subdevs( + struct cam_req_mgr_connected_device **l_dev, int32_t num_dev) +{ + int rc = 0; + *l_dev = CAM_MEM_ZALLOC(sizeof(struct cam_req_mgr_connected_device) * + num_dev, GFP_KERNEL); + if (!*l_dev) + rc = -ENOMEM; + + return rc; +} + +/** + * __cam_req_mgr_destroy_subdev() + * + * @brief : Cleans up the subdevs allocated by crm for link + * @l_device : pointer to list of subdevs crm created + * + */ +static void __cam_req_mgr_destroy_subdev( + struct cam_req_mgr_connected_device **l_device) +{ + CAM_DBG(CAM_CRM, "*l_device %pK", *l_device); + if (*(l_device) != NULL) { + CAM_MEM_FREE(*(l_device)); + *l_device = NULL; + } +} + +/** + * __cam_req_mgr_disconnect_link() + * + * @brief : Unlinks all devices on the link + * @link : pointer to link + * + * @return : returns if unlink for any device was success or failure + */ +static int __cam_req_mgr_disconnect_link(struct cam_req_mgr_core_link *link) +{ + int32_t i = 0; + struct cam_req_mgr_connected_device *dev; + struct cam_req_mgr_core_dev_link_setup link_data; + int rc = 0; + + link_data.link_enable = 0; + link_data.link_hdl = link->link_hdl; + link_data.crm_cb = NULL; + + /* Using device ops unlink devices */ + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + link_data.dev_hdl = dev->dev_hdl; + if (dev->ops && dev->ops->link_setup) { + rc = dev->ops->link_setup(&link_data); + if (rc) + CAM_ERR(CAM_CRM, + "Unlink failed dev name %s hdl %x", + dev->dev_info.name, + dev->dev_hdl); + } + dev->dev_hdl = 0; + dev->parent = NULL; + dev->ops = NULL; + } + + return rc; +} + +/** + * __cam_req_mgr_destroy_link_info() + * + * @brief : Cleans up the mem allocated while linking + * @link : pointer to link, mem associated with this link is freed + */ +static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) +{ + __cam_req_mgr_destroy_all_tbl(&link->req.l_tbl); + __cam_req_mgr_reset_in_q(&link->req); + link->req.num_tbl = 0; + mutex_destroy(&link->req.lock); + + link->pd_mask = 0; + link->num_devs = 0; + link->max_delay = CAM_PIPELINE_DELAY_0; + link->min_delay = CAM_PIPELINE_DELAY_2; +} + +/** + * __cam_req_mgr_reserve_link() + * + * @brief: Reserves one link data struct within session + * @session: session identifier + * + * @return: pointer to link reserved + * + */ +static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link( + struct cam_req_mgr_core_session *session) +{ + struct cam_req_mgr_core_link *link; + struct cam_req_mgr_req_queue *in_q; + int i; + + if (!session || !g_crm_core_dev) { + CAM_ERR(CAM_CRM, "NULL session/core_dev ptr"); + return NULL; + } + + if (session->num_links >= MAXIMUM_LINKS_PER_SESSION) { + CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d", + session->num_links, MAXIMUM_LINKS_PER_SESSION); + return NULL; + } + for (i = 0; i < MAXIMUM_LINKS_CAPACITY; i++) { + if (!atomic_cmpxchg(&g_links[i].is_used, 0, 1)) { + link = &g_links[i]; + CAM_DBG(CAM_CRM, "alloc link index %d", i); + cam_req_mgr_core_link_reset(link); + break; + } + } + if (i == MAXIMUM_LINKS_CAPACITY) + return NULL; + + in_q = CAM_MEM_ZALLOC(sizeof(struct cam_req_mgr_req_queue), + GFP_KERNEL); + if (!in_q) { + CAM_ERR(CAM_CRM, "failed to create input queue, no mem"); + return NULL; + } + + mutex_lock(&link->lock); + link->num_devs = 0; + link->max_delay = CAM_PIPELINE_DELAY_0; + link->min_delay = CAM_PIPELINE_DELAY_2; + memset(in_q->slot, 0, + sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS); + link->req.in_q = in_q; + in_q->num_slots = 0; + + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_IDLE; + spin_unlock_bh(&link->link_state_spin_lock); + + link->parent = (void *)session; + link->is_sending_req = false; + + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION - 1; i++) + link->sync_link[i] = NULL; + + mutex_unlock(&link->lock); + + mutex_lock(&session->lock); + /* Loop through and find a free index */ + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) { + if (!session->links[i]) { + CAM_DBG(CAM_CRM, + "Free link index %d found, num_links=%d", + i, session->num_links); + session->links[i] = link; + break; + } + } + + if (i == MAXIMUM_LINKS_PER_SESSION) { + CAM_ERR(CAM_CRM, "Free link index not found"); + goto error; + } + + session->num_links++; + CAM_DBG(CAM_CRM, "Active session links (%d)", + session->num_links); + mutex_unlock(&session->lock); + + return link; +error: + mutex_unlock(&session->lock); + CAM_MEM_FREE(in_q); + return NULL; +} + +/* + * __cam_req_mgr_free_link() + * + * @brief: Frees the link and its request queue + * + * @link: link identifier + * + */ +static void __cam_req_mgr_free_link(struct cam_req_mgr_core_link *link) +{ + ptrdiff_t i; + CAM_MEM_FREE(link->req.in_q); + link->parent = NULL; + i = link - g_links; + CAM_DBG(CAM_CRM, "free link index %d", i); + cam_req_mgr_core_link_reset(link); + atomic_set(&g_links[i].is_used, 0); +} + +/** + * __cam_req_mgr_unreserve_link() + * + * @brief : Removes the link data struct from the session and frees it + * @session: session identifier + * @link : link identifier + * + */ +static void __cam_req_mgr_unreserve_link( + struct cam_req_mgr_core_session *session, + struct cam_req_mgr_core_link *link) +{ + int i, j; + + if (!session || !link) { + CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK", + session, link); + return; + } + + mutex_lock(&session->lock); + if (!session->num_links) { + CAM_WARN(CAM_CRM, "No active link or invalid state: hdl %x", + link->link_hdl); + mutex_unlock(&session->lock); + return; + } + + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) { + if (session->links[i] == link) + session->links[i] = NULL; + for (j = 0; j < MAXIMUM_LINKS_PER_SESSION - 1; j++) { + if (link->sync_link[j]) { + if (link->sync_link[j] == session->links[i]) + session->links[i]->sync_link[j] = NULL; + } + } + } + + for (j = 0; j < MAXIMUM_LINKS_PER_SESSION - 1; j++) + link->sync_link[j] = NULL; + session->num_links--; + CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links); + mutex_unlock(&session->lock); + __cam_req_mgr_free_link(link); +} + +/* Workqueue context processing section */ + +static inline bool cam_req_mgr_check_for_functional_flush_caps( + uint32_t bit_location, + struct cam_req_mgr_flush_info *flush_info) +{ + if (!flush_info->reserved) + return false; + + if (flush_info->reserved & bit_location) + return true; + + return false; +} + +static int __cam_req_mgr_flush_dev_with_max_pd(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_flush_info *flush_info, int max_pd) +{ + struct cam_req_mgr_connected_device *device; + struct cam_req_mgr_flush_request flush_req = {0}; + int i, rc = 0; + bool enable_standby = cam_req_mgr_check_for_functional_flush_caps( + CAM_REQ_MGR_ENABLE_SENSOR_STANDBY, flush_info); + + for (i = 0; i < link->num_devs; i++) { + device = &link->l_dev[i]; + if (device->dev_info.p_delay > max_pd) + continue; + + flush_req.link_hdl = flush_info->link_hdl; + flush_req.dev_hdl = device->dev_hdl; + flush_req.req_id = flush_info->req_id; + flush_req.type = flush_info->flush_type; + flush_req.enable_sensor_standby = + (!g_crm_core_dev->disable_sensor_standby && enable_standby); + + if (device->ops && device->ops->flush_req) + rc = device->ops->flush_req(&flush_req); + } + + return rc; +} + +static int __cam_req_mgr_get_dev_pd(struct cam_req_mgr_core_link *link, + enum cam_req_mgr_device_id dev_id) +{ + struct cam_req_mgr_connected_device *dev; + int i; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev->dev_info.dev_id == dev_id) + return dev->dev_info.p_delay; + } + + return -ENODEV; +} + +/** + * __cam_req_mgr_try_cancel_req() + * + * @brief : Attempt to cancel a request. Requests that have not been + * applied to IFE can be cancelled. + * @link : Link on which to cancel + * @flush_info : Contains info about the request to be flushed + * + * @return: 0 on success. + */ +static int __cam_req_mgr_try_cancel_req(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_flush_info *flush_info) +{ + struct cam_req_mgr_slot *slot = NULL; + struct cam_req_mgr_req_queue *in_q = link->req.in_q; + int idx, pd = CAM_PIPELINE_DELAY_MAX; + + idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id); + if (idx < 0) { + CAM_ERR(CAM_CRM, "req_id %lld not found in input queue", flush_info->req_id); + return -ENOENT; + } + + slot = &in_q->slot[idx]; + if ((slot->req_id > 0) && slot->num_sync_links) + __cam_req_mgr_disconnect_req_on_sync_link(link, slot); + + switch (slot->status) { + case CRM_SLOT_STATUS_REQ_PENDING: + case CRM_SLOT_STATUS_REQ_APPLIED: + pd = __cam_req_mgr_get_dev_pd(link, CAM_REQ_MGR_DEVICE_IFE); + if ((pd < 0) || (pd >= CAM_PIPELINE_DELAY_MAX)) { + CAM_ERR(CAM_CRM, "pd: %d link_hdl: 0x%x red_id: %d", pd, + link->link_hdl, flush_info->req_id); + return pd; + } + + if (flush_info->req_id <= link->req.prev_apply_data[pd].req_id) { + CAM_WARN(CAM_CRM, "req %lld already applied to IFE on link 0x%x", + flush_info->req_id, flush_info->link_hdl); + return -EPERM; + } + + /* find highest pd device that can be flushed */ + while (pd + 1 <= link->max_delay) { + if (flush_info->req_id <= link->req.prev_apply_data[pd + 1].req_id) + break; + pd++; + } + fallthrough; + case CRM_SLOT_STATUS_REQ_READY: + case CRM_SLOT_STATUS_REQ_ADDED: + slot->additional_timeout = 0; + __cam_req_mgr_in_q_skip_idx(in_q, idx); + break; + default: + CAM_ERR(CAM_CRM, "invalid state for slot link:0x%x req:%lld", + flush_info->link_hdl, flush_info->req_id); + return -EINVAL; + } + + CAM_DBG(CAM_CRM, "cancelling request %lld on link 0x%x for devices with pd less than %d", + flush_info->req_id, flush_info->link_hdl, pd); + __cam_req_mgr_flush_dev_with_max_pd(link, flush_info, pd); + link->open_req_cnt--; + return 0; +} + +/** + * cam_req_mgr_process_flush_req() + * + * @brief: This runs in workque thread context. Call core funcs to check + * which requests need to be removed/cancelled. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +int cam_req_mgr_process_flush_req(void *priv, void *data) +{ + int rc = 0; + struct cam_req_mgr_flush_info *flush_info = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct crm_task_payload *task_data = NULL; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + return -EINVAL; + } + + link = (struct cam_req_mgr_core_link *)priv; + task_data = (struct crm_task_payload *)data; + flush_info = (struct cam_req_mgr_flush_info *)&task_data->u; + CAM_DBG(CAM_REQ, "link_hdl %x req_id %lld type %d", + flush_info->link_hdl, + flush_info->req_id, + flush_info->flush_type); + + trace_cam_flush_req(link, flush_info); + + mutex_lock(&link->req.lock); + switch (flush_info->flush_type) { + case CAM_REQ_MGR_FLUSH_TYPE_ALL: + link->last_flush_id = flush_info->req_id; + CAM_INFO(CAM_CRM, "Last request id to flush is %lld on link 0x%x", + flush_info->req_id, link->link_hdl); + __cam_req_mgr_flush_req_slot(link); + __cam_req_mgr_reset_apply_data(link); + __cam_req_mgr_flush_dev_with_max_pd(link, flush_info, link->max_delay); + link->open_req_cnt = 0; + link->resume_sync_curr_mask = 0x0; + link->exp_time_for_resume = 0; + break; + case CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ: + link->last_flush_id = flush_info->req_id; + CAM_DBG(CAM_CRM, "Canceling req %lld on link 0x%x", + flush_info->req_id, link->link_hdl); + rc = __cam_req_mgr_try_cancel_req(link, flush_info); + if (rc) + CAM_WARN(CAM_CRM, "cannot cancel req_id %lld on link 0x%x", + flush_info->req_id, flush_info->link_hdl); + break; + default: + CAM_ERR(CAM_CRM, "Invalid flush type %u", flush_info->flush_type); + rc = -EINVAL; + break; + } + + complete(&link->workq_comp); + mutex_unlock(&link->req.lock); + + return rc; +} + +/** + * cam_req_mgr_process_sched_req() + * + * @brief: This runs in workque thread context. Call core funcs to check + * which peding requests can be processed. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +int cam_req_mgr_process_sched_req(void *priv, void *data) +{ + int rc = 0, i, sync_idx = 0; + struct cam_req_mgr_core_sched_req *sched_req = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_req_queue *in_q = NULL; + struct cam_req_mgr_slot *slot = NULL; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + link = (struct cam_req_mgr_core_link *)priv; + sched_req = (struct cam_req_mgr_core_sched_req *)data; + in_q = link->req.in_q; + + CAM_DBG(CAM_CRM, + "link_hdl %x req_id %lld at slot %d sync_mode %d is_master %d exp_timeout_val %d ms", + sched_req->link_hdl, sched_req->req_id, + in_q->wr_idx, sched_req->sync_mode, + link->is_master, + sched_req->additional_timeout); + + mutex_lock(&link->req.lock); + slot = &in_q->slot[in_q->wr_idx]; + + if (slot->status != CRM_SLOT_STATUS_NO_REQ && + slot->status != CRM_SLOT_STATUS_REQ_APPLIED) + CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status); + + slot->status = CRM_SLOT_STATUS_REQ_ADDED; + slot->req_id = sched_req->req_id; + slot->sync_mode = sched_req->sync_mode; + slot->skip_idx = 0; + slot->recover = sched_req->bubble_enable; + + if ((sched_req->num_valid_params > 0) && + (sched_req->param_mask & CAM_CRM_MISMATCHED_FRAME_MODE_MASK)) + slot->mismatched_frame_mode = sched_req->params[0]; + + for (i = 0; i < sched_req->num_links; i++) { + if (link->link_hdl != sched_req->link_hdls[i]) { + slot->sync_link_hdls[sync_idx] = sched_req->link_hdls[i]; + CAM_DBG(CAM_REQ, "link:0x%x req:%lld sync_link[%d]:0x%x", + link->link_hdl, slot->req_id, sync_idx, + slot->sync_link_hdls[sync_idx]); + sync_idx++; + } + } + slot->num_sync_links = sync_idx; + + link->open_req_cnt++; + CAM_DBG(CAM_REQ, "Open_req_cnt:%u after scheduling req:%d mismatched_frame_mode:%d", + link->open_req_cnt, sched_req->req_id, slot->mismatched_frame_mode); + __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots); + + if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + if (link->initial_sync_req == -1) + link->initial_sync_req = slot->req_id; + } else { + link->initial_sync_req = -1; + for (i = 0; i < link->num_sync_links; i++) { + if (link->sync_link[i]) + link->sync_link[i]->initial_sync_req = -1; + } + } + + mutex_unlock(&link->req.lock); + +end: + return rc; +} + +/** + * cam_req_mgr_sync_info_for_sync_mode() + * + * @brief : Keep links in sync by syncing the according req + * table slot info on the sync link during sync mode. + * @link : pointer to link whose input queue and req tbl are + * traversed through. + * @add_req : information about new request available at a device. + * @slot : pointer to the current slot being processed. + * @type : sync type + * @return : 0 for success, negative for failure. + */ +int cam_req_mgr_sync_info_for_sync_mode( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_add_request *add_req, + struct cam_req_mgr_tbl_slot *slot, + uint32_t type) +{ + int rc = 0, i = 0, j = 0; + int idx; + int32_t sync_dev_hdl = -1; + char *dev_name = NULL; + struct cam_req_mgr_connected_device *device = NULL; + struct cam_req_mgr_req_tbl *tbl = NULL; + struct cam_req_mgr_tbl_slot *sync_slot = NULL; + + for (i = 0; i < link->num_sync_links; i++) { + mutex_lock(&link->sync_link[i]->req.lock); + idx = __cam_req_mgr_find_slot_for_req( + link->sync_link[i]->req.in_q, add_req->req_id); + if (idx < 0) { + CAM_ERR(CAM_CRM, + "Req_id %lld not found in in_q for dev %s on link 0x%x", + add_req->req_id, link->l_dev[i].dev_info.name, + link->sync_link[i]->link_hdl); + rc = -EBADSLT; + mutex_unlock(&link->sync_link[i]->req.lock); + goto end; + } + + dev_name = __cam_req_mgr_dev_handle_to_name( + add_req->dev_hdl, link); + sync_dev_hdl = __cam_req_mgr_name_to_dev_handle( + dev_name, link->sync_link[i]); + + for (j = 0; j < link->sync_link[i]->num_devs; j++) { + device = &link->sync_link[i]->l_dev[j]; + CAM_DBG(CAM_CRM, + "Req %lld device hdl %x : sync link dev hdl %x", + add_req->req_id, device->dev_hdl, sync_dev_hdl); + if (device->dev_hdl == sync_dev_hdl) { + tbl = device->pd_tbl; + break; + } + } + + if (!tbl) { + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Sync link dev hdl %x not found", sync_dev_hdl); + rc = -EINVAL; + mutex_unlock(&link->sync_link[i]->req.lock); + goto end; + } + + sync_slot = &tbl->slot[idx]; + + switch (type) { + case CAM_SYNC_TYPE_DELAY_AT_SOF: + sync_slot->inject_delay_at_sof = slot->inject_delay_at_sof; + break; + case CAM_SYNC_TYPE_DELAY_AT_EOF: + sync_slot->inject_delay_at_eof = slot->inject_delay_at_eof; + break; + case CAM_SYNC_TYPE_APPLY_AT_EOF: + sync_slot->ops.apply_at_eof = slot->ops.apply_at_eof; + if (sync_slot->ops.num_dev == MAX_DEV_FOR_SPECIAL_OPS) + CAM_WARN(CAM_CRM, + "Sync link 0x%x slot:%d already has %d devices, can't add more", + link->sync_link[i]->link_hdl, idx, sync_slot->ops.num_dev); + else + sync_slot->ops.dev_hdl[sync_slot->ops.num_dev++] = sync_dev_hdl; + break; + default: + CAM_ERR(CAM_CRM, "Invalid Sync type:%u", type); + rc = -EINVAL; + break; + } + mutex_unlock(&link->sync_link[i]->req.lock); + } +end: + return rc; +} + +/** + * cam_req_mgr_process_add_req() + * + * @brief: This runs in workque thread context. Call core funcs to check + * which peding requests can be processed. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +int cam_req_mgr_process_add_req(void *priv, void *data) +{ + int rc = 0, i = 0; + int idx, next_idx = 0; + struct cam_req_mgr_add_request *add_req = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_connected_device *device = NULL; + struct cam_req_mgr_req_tbl *tbl = NULL; + struct cam_req_mgr_tbl_slot *slot = NULL; + struct crm_task_payload *task_data = NULL; + struct cam_req_mgr_slot *link_slot = NULL, *next_slot = NULL; + struct cam_req_mgr_state_monitor state; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + + link = (struct cam_req_mgr_core_link *)priv; + task_data = (struct crm_task_payload *)data; + add_req = (struct cam_req_mgr_add_request *)&task_data->u; + + for (i = 0; i < link->num_devs; i++) { + device = &link->l_dev[i]; + if (device->dev_hdl == add_req->dev_hdl) { + tbl = device->pd_tbl; + break; + } + } + if (!tbl) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "dev_hdl not found %x, %x %x", + add_req->dev_hdl, + link->l_dev[0].dev_hdl, + link->l_dev[1].dev_hdl); + rc = -EINVAL; + goto end; + } + /* + * Go through request table and add + * request id to proper table + * 1. find req slot in in_q matching req_id.sent by dev + * 2. goto table of this device based on p_delay + * 3. mark req_ready_map with this dev_bit. + */ + + mutex_lock(&link->req.lock); + idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id); + if (idx < 0) { + CAM_ERR(CAM_CRM, + "Req_id %lld not found in in_q for dev %s on link 0x%x", + add_req->req_id, device->dev_info.name, link->link_hdl); + rc = -EBADSLT; + mutex_unlock(&link->req.lock); + goto end; + } + + link_slot = &link->req.in_q->slot[idx]; + slot = &tbl->slot[idx]; + if (add_req->trigger_skip) { + next_idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, (add_req->req_id + 1)); + if (next_idx >= 0) { + next_slot = &link->req.in_q->slot[next_idx]; + CAM_WARN(CAM_CRM, "Skip requested on req: %u skipping for req: %u idx: %u", + add_req->req_id, (add_req->req_id + 1), next_idx); + next_slot->skip_set = true; + } + } + + if ((add_req->skip_at_sof & 0xFF) > slot->inject_delay_at_sof) { + slot->inject_delay_at_sof = (add_req->skip_at_sof & 0xFF); + CAM_DBG(CAM_CRM, + "Req_id %llu injecting delay %llu frame at SOF by %s on link 0x%x", + add_req->req_id, + slot->inject_delay_at_sof, + device->dev_info.name, + link->link_hdl); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + rc = cam_req_mgr_sync_info_for_sync_mode( + link, add_req, slot, CAM_SYNC_TYPE_DELAY_AT_SOF); + if (rc < 0) { + CAM_ERR(CAM_CRM, + "Req_id %llu injecting delay %llu failed frame at SOF by %s for sync link 0x%x", + add_req->req_id, + slot->inject_delay_at_sof, + device->dev_info.name, + link->link_hdl); + mutex_unlock(&link->req.lock); + goto end; + } else { + CAM_DBG(CAM_CRM, + "Req_id %llu injecting delay %llu frame at SOF by %s for sync link 0x%x", + add_req->req_id, + slot->inject_delay_at_sof, + device->dev_info.name, + link->link_hdl); + } + } + } + + if ((add_req->skip_at_eof & 0xFF) > slot->inject_delay_at_eof) { + slot->inject_delay_at_eof = (add_req->skip_at_eof & 0xFF); + CAM_DBG(CAM_CRM, + "Req_id %llu injecting delay %llu frame at EOF by %s on link 0x%x", + add_req->req_id, + slot->inject_delay_at_eof, + device->dev_info.name, + link->link_hdl); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + rc = cam_req_mgr_sync_info_for_sync_mode( + link, add_req, slot, CAM_SYNC_TYPE_DELAY_AT_EOF); + if (rc < 0) { + CAM_ERR(CAM_CRM, + "Req_id %llu injecting delay %llu failed frame at EOF by %s for sync link 0x%x", + add_req->req_id, + slot->inject_delay_at_eof, + device->dev_info.name, + link->link_hdl); + mutex_unlock(&link->req.lock); + goto end; + } else { + CAM_DBG(CAM_CRM, + "Req_id %llu injecting delay %llu frame at EOF by %s for sync link 0x%x", + add_req->req_id, + slot->inject_delay_at_eof, + device->dev_info.name, + link->link_hdl); + } + } + } + + if (add_req->trigger_eof) { + slot->ops.apply_at_eof = true; + if (slot->ops.num_dev == MAX_DEV_FOR_SPECIAL_OPS) { + CAM_ERR(CAM_REQ, + "Req_id %llu slot:%d already has %d devices, can't add more", + add_req->req_id, idx, slot->ops.num_dev); + mutex_unlock(&link->req.lock); + return -EINVAL; + } + + slot->ops.dev_hdl[slot->ops.num_dev++] = add_req->dev_hdl; + CAM_DBG(CAM_REQ, + "Req_id %llu slot:%d added for EOF tigger for Device: %s on link 0x%x", + add_req->req_id, idx, device->dev_info.name, link->link_hdl); + + if (link_slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + rc = cam_req_mgr_sync_info_for_sync_mode( + link, add_req, slot, CAM_SYNC_TYPE_APPLY_AT_EOF); + if (rc < 0) { + CAM_ERR(CAM_REQ, + "Req_id %llu slot:%d added failed for EOF tigger for Device: %s for sync link 0x%x", + add_req->req_id, + idx, + device->dev_info.name, + link->link_hdl); + mutex_unlock(&link->req.lock); + goto end; + } else { + CAM_DBG(CAM_REQ, + "Req_id %llu slot:%d added for EOF tigger for Device: %s for sync link 0x%x", + add_req->req_id, + idx, + device->dev_info.name, + link->link_hdl); + } + } + } + + if (slot->state != CRM_REQ_STATE_PENDING && + slot->state != CRM_REQ_STATE_EMPTY) { + CAM_WARN(CAM_CRM, + "Unexpected state %d for slot %d map %x for dev %s on link 0x%x", + slot->state, idx, slot->req_ready_map, + device->dev_info.name, link->link_hdl); + } + + slot->state = CRM_REQ_STATE_PENDING; + slot->req_ready_map |= BIT(device->dev_bit); + + CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x on link 0x%x", + idx, add_req->dev_hdl, add_req->req_id, tbl->pd, + slot->req_ready_map, link->link_hdl); + + trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device); + + if (slot->req_ready_map == tbl->dev_mask) { + CAM_DBG(CAM_REQ, + "link 0x%x idx %d req_id %lld pd %d SLOT READY", + link->link_hdl, idx, add_req->req_id, tbl->pd); + slot->state = CRM_REQ_STATE_READY; + + state.req_state = CAM_CRM_REQ_READY; + state.req_id = add_req->req_id; + state.dev_hdl = -1; + state.frame_id = -1; + __cam_req_mgr_update_state_monitor_array(link, &state); + } + mutex_unlock(&link->req.lock); + +end: + return rc; +} + +/** + * __cam_req_mgr_apply_on_bubble() + * + * @brief : This API tries to apply settings to the device + * with highest pd on the bubbled frame + * @link : link information. + * @err_info : contains information about frame_id, trigger etc. + * + */ +void __cam_req_mgr_apply_on_bubble( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_error_notify *err_info) +{ + int rc = 0; + struct cam_req_mgr_trigger_notify trigger_data; + + trigger_data.dev_hdl = err_info->dev_hdl; + trigger_data.frame_id = err_info->frame_id; + trigger_data.link_hdl = err_info->link_hdl; + trigger_data.sof_timestamp_val = + err_info->sof_timestamp_val; + trigger_data.trigger = err_info->trigger; + + rc = __cam_req_mgr_process_req(link, &trigger_data); + if (rc) + CAM_ERR(CAM_CRM, + "Failed to apply request on bubbled frame"); +} + + +/** + * cam_req_mgr_issue_resume() + * + * @brief: This runs in workque thread context to issue + * resume cmd to relevant devices + * @priv : link information + * @data : contains information on the resume message + * + * @return: 0 on success + */ +int cam_req_mgr_issue_resume(void *priv, void *data) +{ + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + + link = (struct cam_req_mgr_core_link *)priv; + rc = __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_RESUME_HW, + CRM_KMD_ERR_MAX, link); + +end: + return rc; +} + +/** + * cam_req_mgr_process_error() + * + * @brief: This runs in workque thread context. bubble /err recovery. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +int cam_req_mgr_process_error(void *priv, void *data) +{ + int rc = 0, idx = -1; + int i, slot_diff; + struct cam_req_mgr_error_notify *err_info = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_req_queue *in_q = NULL; + struct cam_req_mgr_slot *slot = NULL; + struct crm_task_payload *task_data = NULL; + struct cam_req_mgr_state_monitor state; + struct cam_req_mgr_req_tbl *tbl; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + + link = (struct cam_req_mgr_core_link *)priv; + task_data = (struct crm_task_payload *)data; + err_info = (struct cam_req_mgr_error_notify *)&task_data->u; + CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d", + err_info->link_hdl, + err_info->req_id, + err_info->error); + + in_q = link->req.in_q; + tbl = link->req.l_tbl; + + state.req_state = CAM_CRM_PROCESS_ERR; + state.req_id = err_info->req_id; + state.dev_hdl = -1; + state.frame_id = -1; + __cam_req_mgr_update_state_monitor_array(link, &state); + + mutex_lock(&link->req.lock); + switch (err_info->error) { + case CRM_KMD_ERR_BUBBLE: + case CRM_KMD_WARN_INTERNAL_RECOVERY: + idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id); + if (idx < 0) { + CAM_ERR_RATE_LIMIT(CAM_CRM, + "req_id %lld not found in input queue", + err_info->req_id); + } else { + CAM_DBG(CAM_CRM, "req_id %lld found at idx %d last_applied %d", + err_info->req_id, idx, in_q->last_applied_idx); + slot = &in_q->slot[idx]; + if (!slot->recover) { + CAM_WARN(CAM_CRM, + "err recovery disabled req_id %lld", + err_info->req_id); + mutex_unlock(&link->req.lock); + return 0; + } else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING + && slot->status != CRM_SLOT_STATUS_REQ_APPLIED) { + CAM_WARN(CAM_CRM, + "req_id %lld can not be recovered %d", + err_info->req_id, slot->status); + mutex_unlock(&link->req.lock); + return -EINVAL; + } + /* Bring processing pointer to bubbled req id */ + __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl); + in_q->rd_idx = idx; + /* Increment bubble counter only for bubble errors */ + if (err_info->error == CRM_KMD_ERR_BUBBLE) + in_q->slot[idx].bubble_times++; + in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED; + + /* Reset request apply map for all pd tables */ + while (tbl) { + tbl->slot[idx].req_apply_map = 0; + tbl = tbl->next; + } + + if (link->sync_link[0]) { + in_q->slot[idx].sync_mode = 0; + __cam_req_mgr_inc_idx(&idx, 1, + link->req.l_tbl->num_slots); + in_q->slot[idx].sync_mode = 0; + } + + /* + * We need do internal recovery for back to back bubble, but if + * the request had been internal recovered, we won't recover + * it again. + */ + if ((in_q->slot[idx].bubble_times >= REQ_MAXIMUM_BUBBLE_TIMES) && + !in_q->slot[idx].internal_recovered) { + link->try_for_internal_recovery = true; + /* + * Notify all devices in the link about the error, need to pause + * watch dog before internal recovery because the recover may + * take a long time. if don't pause watch dog here, SOF freeze + * may be triggered. After recovery, need to restart watch dog. + */ + spin_lock_bh(&link->link_state_spin_lock); + crm_timer_reset(link->watchdog); + link->watchdog->pause_timer = true; + spin_unlock_bh(&link->link_state_spin_lock); + + __cam_req_mgr_send_evt(err_info->req_id, + CAM_REQ_MGR_LINK_EVT_STALLED, CRM_KMD_ERR_FATAL, link); + + spin_lock_bh(&link->link_state_spin_lock); + crm_timer_reset(link->watchdog); + link->watchdog->pause_timer = false; + spin_unlock_bh(&link->link_state_spin_lock); + + in_q->slot[idx].internal_recovered = true; + link->try_for_internal_recovery = false; + } + + /* + * Reset till last applied, even if there are scheduling delays + * we start fresh from the request on which bubble has + * been reported + */ + idx = in_q->rd_idx; + if (in_q->last_applied_idx >= 0) { + slot_diff = in_q->last_applied_idx - idx; + if (slot_diff < 0) + slot_diff += link->req.l_tbl->num_slots; + } else { + /* Next req at the minimum may be applied */ + slot_diff = 1; + } + + for (i = 0; i < slot_diff; i++) { + __cam_req_mgr_inc_idx(&idx, 1, + link->req.l_tbl->num_slots); + + CAM_DBG(CAM_CRM, + "Recovery on idx: %d reset slot [idx: %d status: %d]", + in_q->rd_idx, idx, in_q->slot[idx].status); + if (in_q->slot[idx].status == CRM_SLOT_STATUS_REQ_APPLIED) { + in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED; + + tbl = link->req.l_tbl; + /* Reset request apply map for all pd tables */ + while (tbl) { + tbl->slot[idx].req_apply_map = 0; + tbl = tbl->next; + } + } + } + + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_ERR; + spin_unlock_bh(&link->link_state_spin_lock); + link->open_req_cnt++; + + /* Apply immediately to highest pd device on same frame */ + __cam_req_mgr_apply_on_bubble(link, err_info); + } + break; + case CRM_KMD_ERR_FATAL: + rc = __cam_req_mgr_send_evt(err_info->req_id, + CAM_REQ_MGR_LINK_EVT_ERR, err_info->error, link); + break; + default: + break; + } + mutex_unlock(&link->req.lock); + +end: + return rc; +} + +/** + * cam_req_mgr_process_stop() + * + * @brief: This runs in workque thread context. stop notification. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +int cam_req_mgr_process_stop(void *priv, void *data) +{ + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + + link = (struct cam_req_mgr_core_link *)priv; + __cam_req_mgr_flush_req_slot(link); +end: + return rc; +} + +/** + * cam_req_mgr_process_trigger() + * + * @brief: This runs in workque thread context. Call core funcs to check + * which peding requests can be processed. + * @priv : link information. + * @data : contains information about frame_id, link etc. + * + * @return: 0 on success. + */ +static int cam_req_mgr_process_trigger(void *priv, void *data) +{ + int rc = 0; + int32_t idx = -1; + struct cam_req_mgr_trigger_notify *trigger_data = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_req_queue *in_q = NULL; + struct crm_task_payload *task_data = NULL; + int reset_step = 0; + int i = 0; + struct cam_req_mgr_state_monitor state; + + if (!data || !priv) { + CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv); + rc = -EINVAL; + goto end; + } + + link = (struct cam_req_mgr_core_link *)priv; + task_data = (struct crm_task_payload *)data; + trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u; + + CAM_DBG(CAM_REQ, "link_hdl %x frame_id %lld, trigger %x\n", + trigger_data->link_hdl, + trigger_data->frame_id, + trigger_data->trigger); + + in_q = link->req.in_q; + + state.req_state = CAM_CRM_PROCESS_TRIGGER; + state.req_id = in_q->slot[in_q->rd_idx].req_id; + state.dev_hdl = -1; + state.frame_id = trigger_data->frame_id; + __cam_req_mgr_update_state_monitor_array(link, &state); + + mutex_lock(&link->req.lock); + + if (trigger_data->trigger == CAM_TRIGGER_POINT_SOF) { + idx = __cam_req_mgr_find_slot_for_req(in_q, + trigger_data->req_id); + if (idx >= 0) { + if (idx == in_q->last_applied_idx) + in_q->last_applied_idx = -1; + if (idx == in_q->rd_idx) + __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots); + + reset_step = link->max_delay; + for (i = 0; i < link->num_sync_links; i++) { + if (link->sync_link[i]) { + if ((link->in_msync_mode) && + (link->sync_link[i]->max_delay > + reset_step)) + reset_step = + link->sync_link[i]->max_delay; + } + } + + __cam_req_mgr_dec_idx( + &idx, reset_step + 1, + in_q->num_slots); + + __cam_req_mgr_reset_req_slot(link, idx); + } + } else if (trigger_data->trigger == CAM_TRIGGER_POINT_EOF) { + if (link->properties_mask & CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF) { + rc = __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_EOF, + CRM_KMD_ERR_MAX, link); + if (!rc) + CAM_DBG(CAM_CRM, "Notify EOF event done on link:0x%x", + link->link_hdl); + } + } + + /* + * Check if current read index is in applied state, if yes make it free + * and increment read index to next slot. + */ + CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d", + link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status); + + spin_lock_bh(&link->link_state_spin_lock); + + if (link->state < CAM_CRM_LINK_STATE_READY) { + CAM_WARN(CAM_CRM, "invalid link state:%d for link 0x%x", + link->state, link->link_hdl); + spin_unlock_bh(&link->link_state_spin_lock); + rc = -EPERM; + goto release_lock; + } + + if (link->state == CAM_CRM_LINK_STATE_ERR) + CAM_WARN_RATE_LIMIT(CAM_CRM, "Error recovery idx %d status %d", + in_q->rd_idx, + in_q->slot[in_q->rd_idx].status); + + spin_unlock_bh(&link->link_state_spin_lock); + + /* + * Move to next req at SOF only in case + * the rd_idx is updated at EOF. + */ + if ((trigger_data->trigger == CAM_TRIGGER_POINT_SOF) && + (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED)) { + /* + * Do NOT reset req q slot data here, it can not be done + * here because we need to preserve the data to handle bubble. + * + * Check if any new req is pending in slot, if not finish the + * lower pipeline delay device with available req ids. + */ + CAM_DBG(CAM_CRM, "link[%x] Req[%lld] invalidating slot", + link->link_hdl, in_q->slot[in_q->rd_idx].req_id); + rc = __cam_req_mgr_move_to_next_req_slot(link); + if (rc) { + CAM_DBG(CAM_REQ, + "No pending req to apply to lower pd devices"); + rc = 0; + __cam_req_mgr_notify_frame_skip(link, trigger_data->trigger); + goto release_lock; + } + } + + rc = __cam_req_mgr_process_req(link, trigger_data); + +release_lock: + mutex_unlock(&link->req.lock); +end: + return rc; +} + +/* Linked devices' Callback section */ + +/** + * cam_req_mgr_cb_add_req() + * + * @brief : Drivers call this function to notify new packet is available. + * @add_req : Information about new request available at a device. + * + * @return : 0 on success, negative in case of failure + * + */ +static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req) +{ + int rc = 0, i = 0, idx; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_add_request *dev_req; + struct crm_task_payload *task_data; + + if (!add_req) { + CAM_ERR(CAM_CRM, "sof_data is NULL"); + return -EINVAL; + } + + link = cam_get_link_priv(add_req->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl); + return -EINVAL; + } + + CAM_DBG(CAM_REQ, + "dev name %s dev_hdl %d dev req %lld, trigger_eof %d link_state %d on link 0x%x", + __cam_req_mgr_dev_handle_to_name(add_req->dev_hdl, link), + add_req->dev_hdl, add_req->req_id, add_req->trigger_eof, + link->state, link->link_hdl); + + mutex_lock(&link->lock); + /* Validate if req id is present in input queue */ + idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id); + if (idx < 0) { + if (((uint32_t)add_req->req_id) <= (link->last_flush_id)) { + CAM_INFO(CAM_CRM, + "req %lld not found in in_q; it has been flushed [last_flush_req %lld] link 0x%x", + add_req->req_id, link->last_flush_id, link->link_hdl); + rc = -EBADR; + } else { + CAM_ERR(CAM_CRM, + "req %lld not found in in_q on link 0x%x [last_flush_req %lld]", + add_req->req_id, link->link_hdl, link->last_flush_id); + rc = -ENOENT; + } + goto end; + } + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld", + add_req->dev_hdl, add_req->req_id); + rc = -EBUSY; + goto end; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ; + dev_req = (struct cam_req_mgr_add_request *)&task_data->u; + dev_req->req_id = add_req->req_id; + dev_req->link_hdl = add_req->link_hdl; + dev_req->dev_hdl = add_req->dev_hdl; + dev_req->trigger_eof = add_req->trigger_eof; + dev_req->skip_at_sof = add_req->skip_at_sof; + dev_req->skip_at_eof = add_req->skip_at_eof; + dev_req->trigger_skip = add_req->trigger_skip; + if (dev_req->trigger_eof) { + atomic_inc(&link->eof_event_cnt); + CAM_DBG(CAM_REQ, "Req_id: %llu, eof_event_cnt: %d, link 0x%x", + dev_req->req_id, link->eof_event_cnt, link->link_hdl); + for (i = 0; i < link->num_sync_links; i++) { + mutex_lock(&link->sync_link[i]->req.lock); + atomic_set(&link->sync_link[i]->eof_event_cnt, link->eof_event_cnt.counter); + CAM_DBG(CAM_REQ, + "Sync link idx: %d, sync link_hdl: 0x%x, eof_event_cnt: %d", + i, link->sync_link[i]->link_hdl, + link->sync_link[i]->eof_event_cnt); + mutex_unlock(&link->sync_link[i]->req.lock); + } + } + + task->process_cb = &cam_req_mgr_process_add_req; + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + CAM_DBG(CAM_CRM, "X: dev %x dev req %lld on link 0x%x", + add_req->dev_hdl, add_req->req_id, link->link_hdl); + +end: + mutex_unlock(&link->lock); + return rc; +} + +/** + * cam_req_mgr_cb_notify_err() + * + * @brief : Error received from device, sends bubble recovery + * @err_info : contains information about error occurred like bubble/overflow + * + * @return : 0 on success, negative in case of failure + * + */ +static int cam_req_mgr_cb_notify_err( + struct cam_req_mgr_error_notify *err_info) +{ + int rc = 0; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_error_notify *notify_err; + struct crm_task_payload *task_data; + struct cam_req_mgr_state_monitor state; + + if (!err_info) { + CAM_ERR(CAM_CRM, "err_info is NULL"); + rc = -EINVAL; + goto end; + } + + link = cam_get_link_priv(err_info->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl); + rc = -EINVAL; + goto end; + } + + state.req_state = CAM_CRM_NOTIFY_ERR; + state.req_id = err_info->req_id; + state.dev_hdl = -1; + state.frame_id = err_info->frame_id; + __cam_req_mgr_update_state_monitor_array(link, &state); + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state != CAM_CRM_LINK_STATE_READY) { + CAM_WARN(CAM_CRM, "invalid link state:%d", link->state); + spin_unlock_bh(&link->link_state_spin_lock); + rc = -EPERM; + goto end; + } + crm_timer_reset(link->watchdog); + spin_unlock_bh(&link->link_state_spin_lock); + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id); + rc = -EBUSY; + goto end; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR; + notify_err = (struct cam_req_mgr_error_notify *)&task_data->u; + notify_err->req_id = err_info->req_id; + notify_err->link_hdl = err_info->link_hdl; + notify_err->dev_hdl = err_info->dev_hdl; + notify_err->error = err_info->error; + notify_err->trigger = err_info->trigger; + task->process_cb = &cam_req_mgr_process_error; + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + +end: + return rc; +} + +static int __cam_req_mgr_check_for_dual_trigger( + struct cam_req_mgr_core_link *link, + uint32_t trigger) +{ + int rc = -EAGAIN; + + CAM_DBG(CAM_CRM, "%s trigger_cnt [%u: %u]", + (trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF", + link->trigger_cnt[0][trigger], link->trigger_cnt[1][trigger]); + + if (link->trigger_cnt[0][trigger] == link->trigger_cnt[1][trigger]) { + link->trigger_cnt[0][trigger] = 0; + link->trigger_cnt[1][trigger] = 0; + rc = 0; + return rc; + } + + if ((link->trigger_cnt[0][trigger] && + (link->trigger_cnt[0][trigger] - link->trigger_cnt[1][trigger] > 1)) || + (link->trigger_cnt[1][trigger] && + (link->trigger_cnt[1][trigger] - link->trigger_cnt[0][trigger] > 1))) { + + CAM_WARN(CAM_CRM, + "One of the devices could not generate trigger"); + + link->trigger_cnt[0][trigger] = 0; + link->trigger_cnt[1][trigger] = 0; + CAM_DBG(CAM_CRM, "Reset the trigger cnt"); + return rc; + } + + CAM_DBG(CAM_CRM, "Only one device has generated trigger for %s", + (trigger == CAM_TRIGGER_POINT_SOF) ? "SOF" : "EOF"); + + return rc; +} + +/** + * cam_req_mgr_cb_notify_timer() + * + * @brief : Notify SOF timer to pause after flush + * @timer_data : contains information about frame_id, link etc. + * + * @return : 0 on success + * + */ +static int cam_req_mgr_cb_notify_timer( + struct cam_req_mgr_timer_notify *timer_data) +{ + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + + if (!timer_data) { + CAM_ERR(CAM_CRM, "timer data is NULL"); + rc = -EINVAL; + goto end; + } + + link = cam_get_link_priv(timer_data->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", timer_data->link_hdl); + rc = -EINVAL; + goto end; + } + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state < CAM_CRM_LINK_STATE_READY) { + CAM_WARN(CAM_CRM, "invalid link state:%d", link->state); + spin_unlock_bh(&link->link_state_spin_lock); + rc = -EPERM; + goto end; + } + if (link->watchdog) { + if (!timer_data->state) + link->watchdog->pause_timer = true; + else + link->watchdog->pause_timer = false; + crm_timer_reset(link->watchdog); + CAM_DBG(CAM_CRM, "link %x pause_timer %d", + link->link_hdl, link->watchdog->pause_timer); + } + + spin_unlock_bh(&link->link_state_spin_lock); + +end: + return rc; +} + +/* + * cam_req_mgr_cb_notify_stop() + * + * @brief : Stop received from device, resets the morked slots + * @err_info : contains information about error occurred like bubble/overflow + * + * @return : 0 on success, negative in case of failure + * + */ +static int cam_req_mgr_cb_notify_stop( + struct cam_req_mgr_notify_stop *stop_info) +{ + int rc = 0; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_notify_stop *notify_stop; + struct crm_task_payload *task_data; + + if (!stop_info) { + CAM_ERR(CAM_CRM, "stop_info is NULL"); + rc = -EINVAL; + goto end; + } + + link = cam_get_link_priv(stop_info->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", stop_info->link_hdl); + rc = -EINVAL; + goto end; + } + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state != CAM_CRM_LINK_STATE_READY) { + CAM_WARN(CAM_CRM, "invalid link state:%d", link->state); + spin_unlock_bh(&link->link_state_spin_lock); + rc = -EPERM; + goto end; + } + crm_timer_reset(link->watchdog); + link->watchdog->pause_timer = true; + spin_unlock_bh(&link->link_state_spin_lock); + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR(CAM_CRM, "no empty task"); + rc = -EBUSY; + goto end; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR; + notify_stop = (struct cam_req_mgr_notify_stop *)&task_data->u; + notify_stop->link_hdl = stop_info->link_hdl; + task->process_cb = &cam_req_mgr_process_stop; + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + +end: + return rc; +} + + + +/** + * cam_req_mgr_cb_notify_trigger() + * + * @brief : SOF received from device, sends trigger through workqueue + * @sof_data: contains information about frame_id, link etc. + * + * @return : 0 on success + * + */ +static int cam_req_mgr_cb_notify_trigger( + struct cam_req_mgr_trigger_notify *trigger_data) +{ + int32_t rc = 0, trigger_id = 0; + uint32_t trigger; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_trigger_notify *notify_trigger; + struct crm_task_payload *task_data; + struct cam_req_mgr_state_monitor state; + struct cam_req_mgr_req_queue *in_q = NULL; + + if (!trigger_data) { + CAM_ERR(CAM_CRM, "trigger_data is NULL"); + rc = -EINVAL; + goto end; + } + + link = cam_get_link_priv(trigger_data->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_REQ, "link_hdl %x frame_id %lld, trigger %x\n", + trigger_data->link_hdl, + trigger_data->frame_id, + trigger_data->trigger); + + trigger_id = trigger_data->trigger_id; + trigger = trigger_data->trigger; + + spin_lock_bh(&link->req.reset_link_spin_lock); + in_q = link->req.in_q; + if (!in_q) { + CAM_DBG(CAM_CRM, "in_q ptr NULL, link_hdl %x", trigger_data->link_hdl); + spin_unlock_bh(&link->req.reset_link_spin_lock); + rc = -EINVAL; + goto end; + } + + state.req_state = CAM_CRM_NOTIFY_TRIGGER; + state.req_id = in_q->slot[in_q->rd_idx].req_id; + state.dev_hdl = -1; + state.frame_id = trigger_data->frame_id; + __cam_req_mgr_update_state_monitor_array(link, &state); + spin_unlock_bh(&link->req.reset_link_spin_lock); + + /* + * Reduce the workq overhead when there is + * not any eof event found. + */ + if (trigger == CAM_TRIGGER_POINT_EOF) { + if (!atomic_read(&link->eof_event_cnt) && + !(link->properties_mask & CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF)) { + CAM_DBG(CAM_CRM, "No any request to schedule at EOF"); + goto end; + } + } + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state < CAM_CRM_LINK_STATE_READY) { + CAM_WARN_RATE_LIMIT(CAM_CRM, "invalid link state:%d", link->state); + spin_unlock_bh(&link->link_state_spin_lock); + rc = -EPERM; + goto end; + } + + if ((link->watchdog) && (link->watchdog->pause_timer) && + (trigger == CAM_TRIGGER_POINT_SOF)) + link->watchdog->pause_timer = false; + + if (link->dual_trigger) { + if ((trigger_id >= 0) && (trigger_id < + CAM_REQ_MGR_MAX_TRIGGERS)) { + link->trigger_cnt[trigger_id][trigger]++; + rc = __cam_req_mgr_check_for_dual_trigger(link, trigger); + if (rc) { + spin_unlock_bh(&link->link_state_spin_lock); + goto end; + } + } else { + CAM_ERR(CAM_CRM, "trigger_id invalid %d", trigger_id); + rc = -EINVAL; + spin_unlock_bh(&link->link_state_spin_lock); + goto end; + } + } + + if (trigger_data->trigger == CAM_TRIGGER_POINT_SOF) + crm_timer_reset(link->watchdog); + + if ((in_q->slot[in_q->rd_idx].mismatched_frame_mode == CRM_DROP_MISMATCHED_FRMAE) && + link->last_applied_done_timestamp && + ((trigger_data->boot_timestamp - CAM_CRM_SENSOR_APPLIY_DELAY_THRESHOLD) < + link->last_applied_done_timestamp)) { + CAM_WARN(CAM_REQ, + "Apply delayed req:%lld, link:0x%x, timestamp boot:0x%llx applied:0x%llx", + in_q->slot[in_q->rd_idx].req_id, trigger_data->link_hdl, + trigger_data->boot_timestamp, link->last_applied_done_timestamp); + spin_unlock_bh(&link->link_state_spin_lock); + goto end; + } + spin_unlock_bh(&link->link_state_spin_lock); + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task frame %lld", + trigger_data->frame_id); + rc = -EBUSY; + spin_lock_bh(&link->link_state_spin_lock); + if ((link->watchdog) && !(link->watchdog->pause_timer)) + link->watchdog->pause_timer = true; + spin_unlock_bh(&link->link_state_spin_lock); + goto end; + } + task_data = (struct crm_task_payload *)task->payload; + task_data->type = (trigger_data->trigger == CAM_TRIGGER_POINT_SOF) ? + CRM_WORKQ_TASK_NOTIFY_SOF : CRM_WORKQ_TASK_NOTIFY_EOF; + notify_trigger = (struct cam_req_mgr_trigger_notify *)&task_data->u; + notify_trigger->frame_id = trigger_data->frame_id; + notify_trigger->link_hdl = trigger_data->link_hdl; + notify_trigger->dev_hdl = trigger_data->dev_hdl; + notify_trigger->trigger = trigger_data->trigger; + notify_trigger->req_id = trigger_data->req_id; + notify_trigger->sof_timestamp_val = trigger_data->sof_timestamp_val; + task->process_cb = &cam_req_mgr_process_trigger; + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + +end: + return rc; +} + +static int cam_req_mgr_cb_notify_msg( + struct cam_req_mgr_notify_msg *msg) +{ + int i, slot_idx, rc = 0; + struct cam_req_mgr_slot *slot; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_req_queue *in_q = NULL; + struct cam_req_mgr_connected_device *dev = NULL; + struct cam_req_mgr_link_evt_data evt_data; + uint64_t frame_duration_ms; + + if (!msg) { + CAM_ERR(CAM_CRM, "msg is NULL"); + return -EINVAL; + } + + link = cam_get_link_priv(msg->link_hdl); + if (!link) { + CAM_DBG(CAM_CRM, "link ptr NULL %x", msg->link_hdl); + return -EINVAL; + } + + CAM_DBG(CAM_REQ, "link_hdl 0x%x request id:%llu msg type:%d", + link->link_hdl, msg->req_id, msg->msg_type); + + switch (msg->msg_type) { + case CAM_REQ_MGR_MSG_SENSOR_FRAME_INFO: + + frame_duration_ms = msg->u.frame_info.frame_duration / CAM_COMMON_NS_PER_MS; + + spin_lock_bh(&link->req.reset_link_spin_lock); + /* Long exposure case for resume from standby */ + if (msg->u.frame_info.use_for_wd) { + link->exp_time_for_resume = frame_duration_ms; + spin_unlock_bh(&link->req.reset_link_spin_lock); + return 0; + } + + in_q = link->req.in_q; + if (!in_q) { + CAM_ERR(CAM_CRM, "in_q ptr NULL, link_hdl %x", msg->link_hdl); + spin_unlock_bh(&link->req.reset_link_spin_lock); + return -EINVAL; + } + + slot_idx = __cam_req_mgr_find_slot_for_req( + in_q, msg->req_id); + if (slot_idx == -1) { + CAM_ERR(CAM_CRM, "Req: %lld not found on link: 0x%x", + msg->req_id, link->link_hdl); + spin_unlock_bh(&link->req.reset_link_spin_lock); + return -EINVAL; + } + + slot = &in_q->slot[slot_idx]; + spin_unlock_bh(&link->req.reset_link_spin_lock); + slot->frame_sync_shift = msg->u.frame_info.frame_sync_shift; + if (frame_duration_ms > CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX) { + CAM_WARN(CAM_CRM, + "Requested timeout [%dms] max supported timeout [%dms] resetting to max", + frame_duration_ms, CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX); + slot->additional_timeout = CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX; + } else { + slot->additional_timeout = frame_duration_ms; + } + + CAM_DBG(CAM_CRM, + "Requested timeout [%dms] req: %lld", frame_duration_ms, msg->req_id); + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (msg->dev_hdl == dev->dev_hdl) + continue; + + evt_data.req_id = msg->req_id; + evt_data.dev_hdl = dev->dev_hdl; + evt_data.link_hdl = link->link_hdl; + evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_SENSOR_FRAME_INFO; + evt_data.u.frame_info = msg->u.frame_info; + + if (dev->ops && dev->ops->process_evt) { + rc = dev->ops->process_evt(&evt_data); + if (rc) + CAM_ERR(CAM_CRM, + "Failed to set properties on link 0x%x dev 0x%x", + link->link_hdl, dev->dev_hdl); + } + } + break; + case CAM_REQ_MGR_MSG_NOTIFY_FOR_SYNCED_RESUME: { + struct crm_task_payload *task_data = NULL; + struct crm_workq_task *task = NULL; + struct cam_req_mgr_notify_msg *notify_msg; + + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (msg->dev_hdl != dev->dev_hdl) + continue; + + link->resume_sync_curr_mask |= BIT(dev->dev_info.dev_id); + } + + if (link->resume_sync_curr_mask == link->resume_sync_dev_mask) { + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + CAM_ERR(CAM_CRM, "no empty task"); + rc = -EBUSY; + break; + } + + link->resume_sync_curr_mask = 0; + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_TRIGGER_SYNCED_RESUME; + notify_msg = (struct cam_req_mgr_notify_msg *)&task_data->u; + notify_msg->req_id = msg->req_id; + notify_msg->link_hdl = msg->link_hdl; + notify_msg->dev_hdl = msg->dev_hdl; + notify_msg->msg_type = msg->msg_type; + task->process_cb = &cam_req_mgr_issue_resume; + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + } + } + break; + case CAM_REQ_MGR_MSG_UPDATE_DEVICE_INFO: { + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + if (dev->dev_hdl != msg->dev_hdl) + continue; + + snprintf(dev->dev_info.name, sizeof(dev->dev_info.name), "%s(%s)", + "cam-isp", msg->u.ife_hw_name); + break; + } + } + break; + default: + rc = -EINVAL; + CAM_ERR(CAM_CRM, + "link:0x%x gets an invalid msg:%d from dev:0x%x at req:%llu", + link->link_hdl, msg->msg_type, + msg->dev_hdl, msg->req_id); + break; + } + + return rc; +} + +static struct cam_req_mgr_crm_cb cam_req_mgr_ops = { + .notify_trigger = cam_req_mgr_cb_notify_trigger, + .notify_err = cam_req_mgr_cb_notify_err, + .add_req = cam_req_mgr_cb_add_req, + .notify_timer = cam_req_mgr_cb_notify_timer, + .notify_stop = cam_req_mgr_cb_notify_stop, + .notify_msg = cam_req_mgr_cb_notify_msg, +}; + +/** + * __cam_req_mgr_setup_link_info() + * + * @brief : Sets up input queue, create pd based tables, communicate with + * devs connected on this link and setup communication. + * @link : pointer to link to setup + * @link_info : link_info coming from CSL to prepare link + * + * @return : 0 on success, negative in case of failure + * + */ +static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_ver_info *link_info) +{ + int rc = 0, i = 0, num_devices = 0; + struct cam_req_mgr_core_dev_link_setup link_data; + struct cam_req_mgr_connected_device *dev; + struct cam_req_mgr_req_tbl *pd_tbl; + enum cam_pipeline_delay max_delay; + enum cam_modeswitch_delay max_modeswitch; + uint32_t num_trigger_devices = 0; + if (link_info->version == VERSION_1) { + if (link_info->u.link_info_v1.num_devices > + CAM_REQ_MGR_MAX_HANDLES) + return -EPERM; + } else if (link_info->version == VERSION_2) { + if (link_info->u.link_info_v2.num_devices > + CAM_REQ_MGR_MAX_HANDLES_V2) + return -EPERM; + } + + mutex_init(&link->req.lock); + CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock); + link->req.num_tbl = 0; + + rc = __cam_req_mgr_setup_in_q(&link->req); + if (rc < 0) + return rc; + + max_delay = CAM_PIPELINE_DELAY_0; + max_modeswitch = CAM_MODESWITCH_DELAY_0; + if (link_info->version == VERSION_1) + num_devices = link_info->u.link_info_v1.num_devices; + else if (link_info->version == VERSION_2) + num_devices = link_info->u.link_info_v2.num_devices; + for (i = 0; i < num_devices; i++) { + dev = &link->l_dev[i]; + /* Using dev hdl, get ops ptr to communicate with device */ + if (link_info->version == VERSION_1) + dev->ops = (struct cam_req_mgr_kmd_ops *) + cam_get_device_ops( + link_info->u.link_info_v1.dev_hdls[i]); + else if (link_info->version == VERSION_2) + dev->ops = (struct cam_req_mgr_kmd_ops *) + cam_get_device_ops( + link_info->u.link_info_v2.dev_hdls[i]); + if (!dev->ops || + !dev->ops->get_dev_info || + !dev->ops->link_setup) { + CAM_ERR(CAM_CRM, "FATAL: device ops NULL"); + rc = -ENXIO; + goto error; + } + if (link_info->version == VERSION_1) + dev->dev_hdl = link_info->u.link_info_v1.dev_hdls[i]; + else if (link_info->version == VERSION_2) + dev->dev_hdl = link_info->u.link_info_v2.dev_hdls[i]; + dev->parent = (void *)link; + dev->dev_info.dev_hdl = dev->dev_hdl; + rc = dev->ops->get_dev_info(&dev->dev_info); + + trace_cam_req_mgr_connect_device(link, &dev->dev_info); + if (link_info->version == VERSION_1) + CAM_DBG(CAM_CRM, + "%x: connected: %s, id %d, delay %d, trigger %x", + link_info->u.link_info_v1.session_hdl, + dev->dev_info.name, + dev->dev_info.dev_id, dev->dev_info.p_delay, + dev->dev_info.trigger); + else if (link_info->version == VERSION_2) + CAM_DBG(CAM_CRM, + "%x: connected: %s, id %d, delay %d, trigger %x", + link_info->u.link_info_v2.session_hdl, + dev->dev_info.name, + dev->dev_info.dev_id, dev->dev_info.p_delay, + dev->dev_info.trigger); + if (rc < 0 || + dev->dev_info.p_delay >= + CAM_PIPELINE_DELAY_MAX || + dev->dev_info.p_delay < + CAM_PIPELINE_DELAY_0) { + CAM_ERR(CAM_CRM, "get device info failed"); + goto error; + } else if (dev->dev_info.m_delay >= + CAM_MODESWITCH_DELAY_MAX || + dev->dev_info.m_delay < + CAM_MODESWITCH_DELAY_0) { + CAM_ERR(CAM_CRM, "get mode switch info failed"); + goto error; + } else { + if (link_info->version == VERSION_1) { + CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d", + link_info->u.link_info_v1.session_hdl, + dev->dev_info.name, + dev->dev_info.p_delay); + } + else if (link_info->version == VERSION_2) { + CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d", + link_info->u.link_info_v2.session_hdl, + dev->dev_info.name, + dev->dev_info.p_delay); + } + if (dev->dev_info.p_delay > max_delay) + max_delay = dev->dev_info.p_delay; + + if (dev->dev_info.m_delay > max_modeswitch) + max_modeswitch = dev->dev_info.m_delay; + } + + if (dev->dev_info.trigger_on) + num_trigger_devices++; + + if (dev->dev_info.resume_sync_on) + link->resume_sync_dev_mask |= BIT(dev->dev_info.dev_id); + } + + if (num_trigger_devices > CAM_REQ_MGR_MAX_TRIGGERS) { + CAM_ERR(CAM_CRM, + "Unsupported number of trigger devices %u", + num_trigger_devices); + rc = -EINVAL; + goto error; + } + + link_data.link_enable = 1; + link_data.link_hdl = link->link_hdl; + link_data.crm_cb = &cam_req_mgr_ops; + link_data.max_delay = max_delay; + link_data.mode_switch_max_delay = max_modeswitch; + if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS) + link->dual_trigger = true; + + num_trigger_devices = 0; + for (i = 0; i < num_devices; i++) { + dev = &link->l_dev[i]; + + link_data.dev_hdl = dev->dev_hdl; + /* + * For unique pipeline delay table create request + * tracking table + */ + if (link->pd_mask & BIT(dev->dev_info.p_delay)) { + pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl, + dev->dev_info.p_delay); + if (!pd_tbl) { + CAM_ERR(CAM_CRM, "pd %d tbl not found", + dev->dev_info.p_delay); + rc = -ENXIO; + goto error; + } + } else { + pd_tbl = __cam_req_mgr_create_pd_tbl( + dev->dev_info.p_delay); + if (pd_tbl == NULL) { + CAM_ERR(CAM_CRM, "create new pd tbl failed"); + rc = -ENXIO; + goto error; + } + pd_tbl->pd = dev->dev_info.p_delay; + link->pd_mask |= BIT(pd_tbl->pd); + /* + * Add table to list and also sort list + * from max pd to lowest + */ + __cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl); + } + dev->dev_bit = pd_tbl->dev_count++; + dev->pd_tbl = pd_tbl; + pd_tbl->dev_mask |= BIT(dev->dev_bit); + CAM_DBG(CAM_CRM, "dev_bit %u name %s pd %u mask %d", + dev->dev_bit, dev->dev_info.name, pd_tbl->pd, + pd_tbl->dev_mask); + link_data.trigger_id = -1; + if ((dev->dev_info.trigger_on) && (link->dual_trigger)) { + link_data.trigger_id = num_trigger_devices; + num_trigger_devices++; + } + + /* Communicate with dev to establish the link */ + dev->ops->link_setup(&link_data); + + /* Compute max/min pipeline delay */ + if (dev->dev_info.p_delay > link->max_delay) + link->max_delay = dev->dev_info.p_delay; + if (dev->dev_info.p_delay < link->min_delay) + link->min_delay = dev->dev_info.p_delay; + + /* Compute max/min modeswitch delay */ + if (dev->dev_info.m_delay > link->max_mswitch_delay) + link->max_mswitch_delay = dev->dev_info.m_delay; + if (dev->dev_info.m_delay < link->min_mswitch_delay) + link->min_mswitch_delay = dev->dev_info.m_delay; + } + + link->num_devs = num_devices; + + /* Assign id for pd tables */ + __cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req); + + /* At start, expect max pd devices, all are in skip state */ + __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl); + + return 0; + +error: + __cam_req_mgr_destroy_link_info(link); + return rc; +} + +/* IOCTLs handling section */ +int cam_req_mgr_create_session( + struct cam_req_mgr_session_info *ses_info) +{ + int rc = 0; + int32_t session_hdl; + struct cam_req_mgr_core_session *cam_session = NULL; + + if (!ses_info) { + CAM_DBG(CAM_CRM, "NULL session info pointer"); + return -EINVAL; + } + mutex_lock(&g_crm_core_dev->crm_lock); + cam_session = CAM_MEM_ZALLOC(sizeof(*cam_session), + GFP_KERNEL); + if (!cam_session) { + rc = -ENOMEM; + goto end; + } + + session_hdl = cam_create_session_hdl((void *)cam_session); + if (session_hdl < 0) { + CAM_ERR(CAM_CRM, "unable to create session_hdl = %x", + session_hdl); + rc = session_hdl; + CAM_MEM_FREE(cam_session); + goto end; + } + ses_info->session_hdl = session_hdl; + + mutex_init(&cam_session->lock); + CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK hdl 0x%x", + &cam_session->lock, session_hdl); + + mutex_lock(&cam_session->lock); + cam_session->session_hdl = session_hdl; + cam_session->num_links = 0; + cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC; + list_add(&cam_session->entry, &g_crm_core_dev->session_head); + mutex_unlock(&cam_session->lock); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +/** + * __cam_req_mgr_unlink() + * + * @brief : Unlink devices on a link structure from the session + * This API is to be invoked with session mutex held + * @session: session of the link + * @link : Pointer to the link structure + * + * @return: 0 for success, negative for failure + * + */ +static int __cam_req_mgr_unlink( + struct cam_req_mgr_core_session *session, + struct cam_req_mgr_core_link *link) +{ + int rc; + + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_IDLE; + spin_unlock_bh(&link->link_state_spin_lock); + + if (!link->is_shutdown) { + /* Hold the request lock prior to disconnecting link */ + mutex_lock(&link->req.lock); + rc = __cam_req_mgr_disconnect_link(link); + if (rc) + CAM_ERR(CAM_CORE, + "Unlink for all devices was not successful"); + mutex_unlock(&link->req.lock); + } + + mutex_lock(&link->lock); + spin_lock_bh(&link->link_state_spin_lock); + /* Destroy timer of link */ + crm_timer_exit(&link->watchdog); + spin_unlock_bh(&link->link_state_spin_lock); + /* Release session mutex for workq processing */ + mutex_unlock(&session->lock); + /* Destroy workq of link */ + cam_req_mgr_workq_destroy(&link->workq); + /* Acquire session mutex after workq flush */ + mutex_lock(&session->lock); + /* Cleanup request tables and unlink devices */ + __cam_req_mgr_destroy_link_info(link); + /* Free memory holding data of linked devs */ + + __cam_req_mgr_destroy_subdev(&link->l_dev); + + /* Destroy the link handle */ + rc = cam_destroy_link_hdl(link->link_hdl); + if (rc < 0) { + CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d", + link->link_hdl, rc); + } else + link->link_hdl = -1; + + mutex_unlock(&link->lock); + return rc; +} + +int cam_req_mgr_destroy_session( + struct cam_req_mgr_session_info *ses_info, + bool is_shutdown) +{ + int rc; + int i; + struct cam_req_mgr_core_session *cam_session = NULL; + struct cam_req_mgr_core_link *link; + + if (!ses_info) { + CAM_DBG(CAM_CRM, "NULL session info pointer"); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + cam_session = cam_get_session_priv(ses_info->session_hdl); + if (!cam_session || (cam_session->session_hdl != ses_info->session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, ses_info->ses_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(cam_session), ses_info->session_hdl, + (!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl); + rc = -ENOENT; + goto end; + + } + + mutex_lock(&cam_session->lock); + if (cam_session->num_links) { + CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d", + ses_info->session_hdl, + cam_session->num_links); + + for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) { + link = cam_session->links[i]; + if (!link) + continue; + + CAM_DBG(CAM_CRM, "Unlink link hdl 0x%x", + link->link_hdl); + /* Ignore return value since session is going away */ + link->is_shutdown = is_shutdown; + __cam_req_mgr_unlink(cam_session, link); + __cam_req_mgr_free_link(link); + } + } + list_del(&cam_session->entry); + mutex_unlock(&cam_session->lock); + mutex_destroy(&cam_session->lock); + + CAM_MEM_FREE(cam_session); + + rc = cam_destroy_session_hdl(ses_info->session_hdl); + if (rc < 0) + CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d", + ses_info->session_hdl, rc); + +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +static void cam_req_mgr_process_workq_link_worker(struct work_struct *w) +{ + cam_req_mgr_process_workq(w); +} + +int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info) +{ + int rc = 0; + int wq_flag = 0; + char buf[128]; + struct cam_create_dev_hdl root_dev = {0}; + struct cam_req_mgr_core_session *cam_session; + struct cam_req_mgr_core_link *link; + + if (!link_info) { + CAM_DBG(CAM_CRM, "NULL pointer"); + return -EINVAL; + } + if (link_info->u.link_info_v1.num_devices > CAM_REQ_MGR_MAX_HANDLES) { + CAM_ERR(CAM_CRM, "Invalid num devices %d", + link_info->u.link_info_v1.num_devices); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + + /* session hdl's priv data is cam session struct */ + cam_session = cam_get_session_priv(link_info->u.link_info_v1.session_hdl); + if (!cam_session || (cam_session->session_hdl != link_info->u.link_info_v1.session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, link_info->ses_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(cam_session), link_info->u.link_info_v1.session_hdl, + (!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + + /* Allocate link struct and map it with session's request queue */ + link = __cam_req_mgr_reserve_link(cam_session); + if (!link) { + CAM_ERR(CAM_CRM, "failed to reserve new link"); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl); + + root_dev.session_hdl = link_info->u.link_info_v1.session_hdl; + root_dev.priv = (void *)link; + root_dev.dev_id = CAM_CRM; + + mutex_lock(&link->lock); + /* Create unique handle for link */ + link->link_hdl = cam_create_link_hdl(&root_dev); + if (link->link_hdl < 0) { + CAM_ERR(CAM_CRM, + "Insufficient memory to create new device handle"); + rc = link->link_hdl; + goto link_hdl_fail; + } + link_info->u.link_info_v1.link_hdl = link->link_hdl; + link->last_flush_id = 0; + + /* Allocate memory to hold data of all linked devs */ + rc = __cam_req_mgr_create_subdevs(&link->l_dev, + link_info->u.link_info_v1.num_devices); + if (rc < 0) { + CAM_ERR(CAM_CRM, + "Insufficient memory to create new crm subdevs"); + goto create_subdev_failed; + } + + /* Using device ops query connected devs, prepare request tables */ + rc = __cam_req_mgr_setup_link_info(link, link_info); + if (rc < 0) + goto setup_failed; + + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_READY; + spin_unlock_bh(&link->link_state_spin_lock); + + /* Create worker for current link */ + snprintf(buf, sizeof(buf), "%x-%x", + link_info->u.link_info_v1.session_hdl, link->link_hdl); + wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL; + rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, + &link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag, + cam_req_mgr_process_workq_link_worker); + if (rc < 0) { + CAM_ERR(CAM_CRM, "FATAL: unable to create worker"); + __cam_req_mgr_destroy_link_info(link); + goto setup_failed; + } + + /* Assign payload to workqueue tasks */ + rc = __cam_req_mgr_setup_payload(link->workq); + if (rc < 0) { + __cam_req_mgr_destroy_link_info(link); + cam_req_mgr_workq_destroy(&link->workq); + goto setup_failed; + } + + mutex_unlock(&link->lock); + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +setup_failed: + __cam_req_mgr_destroy_subdev(&link->l_dev); +create_subdev_failed: + cam_destroy_link_hdl(link->link_hdl); + link_info->u.link_info_v1.link_hdl = -1; +link_hdl_fail: + mutex_unlock(&link->lock); + __cam_req_mgr_unreserve_link(cam_session, link); + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info) +{ + int rc = 0; + int wq_flag = 0; + char buf[128]; + struct cam_create_dev_hdl root_dev = {0}; + struct cam_req_mgr_core_session *cam_session; + struct cam_req_mgr_core_link *link; + + if (!link_info) { + CAM_DBG(CAM_CRM, "NULL pointer"); + return -EINVAL; + } + if (link_info->u.link_info_v2.num_devices > + CAM_REQ_MGR_MAX_HANDLES_V2) { + CAM_ERR(CAM_CRM, "Invalid num devices %d", + link_info->u.link_info_v2.num_devices); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + + /* session hdl's priv data is cam session struct */ + cam_session = cam_get_session_priv(link_info->u.link_info_v2.session_hdl); + if (!cam_session || (cam_session->session_hdl != link_info->u.link_info_v2.session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, link_info->ses_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(cam_session), link_info->u.link_info_v2.session_hdl, + (!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + + /* Allocate link struct and map it with session's request queue */ + link = __cam_req_mgr_reserve_link(cam_session); + if (!link) { + CAM_ERR(CAM_CRM, "failed to reserve new link"); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl); + + root_dev.session_hdl = link_info->u.link_info_v2.session_hdl; + root_dev.priv = (void *)link; + root_dev.dev_id = CAM_CRM; + + mutex_lock(&link->lock); + /* Create unique handle for link */ + link->link_hdl = cam_create_link_hdl(&root_dev); + if (link->link_hdl < 0) { + CAM_ERR(CAM_CRM, + "Insufficient memory to create new device handle"); + rc = link->link_hdl; + goto link_hdl_fail; + } + link_info->u.link_info_v2.link_hdl = link->link_hdl; + link->last_flush_id = 0; + + /* Allocate memory to hold data of all linked devs */ + rc = __cam_req_mgr_create_subdevs(&link->l_dev, + link_info->u.link_info_v2.num_devices); + if (rc < 0) { + CAM_ERR(CAM_CRM, + "Insufficient memory to create new crm subdevs"); + goto create_subdev_failed; + } + + /* Using device ops query connected devs, prepare request tables */ + rc = __cam_req_mgr_setup_link_info(link, link_info); + if (rc < 0) + goto setup_failed; + + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_READY; + spin_unlock_bh(&link->link_state_spin_lock); + + /* Create worker for current link */ + snprintf(buf, sizeof(buf), "%x-%x", + link_info->u.link_info_v2.session_hdl, link->link_hdl); + wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL; + rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, + &link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag, + cam_req_mgr_process_workq_link_worker); + if (rc < 0) { + CAM_ERR(CAM_CRM, "FATAL: unable to create worker"); + __cam_req_mgr_destroy_link_info(link); + goto setup_failed; + } + + /* Assign payload to workqueue tasks */ + rc = __cam_req_mgr_setup_payload(link->workq); + if (rc < 0) { + __cam_req_mgr_destroy_link_info(link); + cam_req_mgr_workq_destroy(&link->workq); + goto setup_failed; + } + + link->trigger_cnt[0][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[0][CAM_TRIGGER_POINT_EOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_SOF] = 0; + link->trigger_cnt[1][CAM_TRIGGER_POINT_EOF] = 0; + + mutex_unlock(&link->lock); + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +setup_failed: + __cam_req_mgr_destroy_subdev(&link->l_dev); +create_subdev_failed: + cam_destroy_link_hdl(link->link_hdl); + link_info->u.link_info_v2.link_hdl = -1; +link_hdl_fail: + mutex_unlock(&link->lock); + __cam_req_mgr_unreserve_link(cam_session, link); + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + + +int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info) +{ + int rc = 0; + struct cam_req_mgr_core_session *cam_session; + struct cam_req_mgr_core_link *link; + + if (!unlink_info) { + CAM_ERR(CAM_CRM, "NULL pointer"); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl); + + /* session hdl's priv data is cam session struct */ + cam_session = cam_get_session_priv(unlink_info->session_hdl); + if (!cam_session || (cam_session->session_hdl != unlink_info->session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, unlink_info->ses_hdl:%x, cam_session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(cam_session), unlink_info->session_hdl, + (!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + + /* link hdl's priv data is core_link struct */ + link = cam_get_link_priv(unlink_info->link_hdl); + if (!link || (link->link_hdl != unlink_info->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, unlink_info->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), unlink_info->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto done; + } + mutex_lock(&cam_session->lock); + rc = __cam_req_mgr_unlink(cam_session, link); + mutex_unlock(&cam_session->lock); + + /* Free curent link and put back into session's free pool of links */ + __cam_req_mgr_unreserve_link(cam_session, link); + +done: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_schedule_request( + struct cam_req_mgr_sched_request *sched_req) +{ + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_core_sched_req sched; + + if (!sched_req) { + CAM_ERR(CAM_CRM, "csl_req is NULL"); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + link = cam_get_link_priv(sched_req->link_hdl); + if (!link || (link->link_hdl != sched_req->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, sched_req->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), sched_req->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl); + rc = -EINVAL; + goto end; + } + + if (sched_req->req_id <= link->last_flush_id) { + CAM_INFO(CAM_CRM, + "request %lld is flushed, last_flush_id to flush %d", + sched_req->req_id, link->last_flush_id); + rc = -EBADR; + goto end; + } + + if (sched_req->req_id > link->last_flush_id) + link->last_flush_id = 0; + + CAM_DBG(CAM_CRM, "link 0x%x req %lld, sync_mode %d", + sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode); + + memset(&sched, 0, sizeof(sched)); + + sched.req_id = sched_req->req_id; + sched.sync_mode = sched_req->sync_mode; + sched.link_hdl = sched_req->link_hdl; + sched.additional_timeout = sched_req->additional_timeout; + if (session->force_err_recovery == AUTO_RECOVERY) { + sched.bubble_enable = sched_req->bubble_enable; + } else { + sched.bubble_enable = + (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0; + } + + rc = cam_req_mgr_process_sched_req(link, &sched); + + CAM_DBG(CAM_REQ, "Open req %lld on link 0x%x with sync_mode %d", + sched_req->req_id, sched_req->link_hdl, sched_req->sync_mode); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_schedule_request_v2( + struct cam_req_mgr_sched_request_v2 *sched_req) +{ + int i = 0; + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_core_sched_req sched; + struct cam_req_mgr_core_link *sync_links[MAXIMUM_LINKS_PER_SESSION]; + + mutex_lock(&g_crm_core_dev->crm_lock); + link = cam_get_link_priv(sched_req->link_hdl); + if (!link || (link->link_hdl != sched_req->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, sched_req->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), sched_req->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl); + rc = -EINVAL; + goto end; + } + + if (sched_req->req_id <= link->last_flush_id) { + CAM_INFO(CAM_CRM, + "request %lld is flushed, last_flush_id to flush %d", + sched_req->req_id, link->last_flush_id); + rc = -EBADR; + goto end; + } + + if (sched_req->req_id > link->last_flush_id) + link->last_flush_id = 0; + + CAM_DBG(CAM_CRM, "link 0x%x req %lld, sync_mode %d num_links %d", + sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode, + sched_req->num_links); + + memset(&sched, 0, sizeof(sched)); + + sched.req_id = sched_req->req_id; + sched.sync_mode = sched_req->sync_mode; + sched.link_hdl = sched_req->link_hdl; + sched.additional_timeout = sched_req->additional_timeout; + sched.num_valid_params = sched_req->num_valid_params; + sched.param_mask = sched_req->param_mask; + sched.params = sched_req->params; + + if (session->force_err_recovery == AUTO_RECOVERY) { + sched.bubble_enable = sched_req->bubble_enable; + } else { + sched.bubble_enable = + (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0; + } + + if (sched_req->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + if ((sched_req->num_links <= 0) || + (sched_req->num_links > + min(MAXIMUM_LINKS_PER_SESSION, MAX_LINKS_PER_SESSION))) { + CAM_ERR(CAM_CRM, "link:0x%x req:%lld invalid num_links:%d", + link->link_hdl, sched_req->req_id, sched_req->num_links); + rc = -EINVAL; + goto end; + } + + for (i = 0; i < sched_req->num_links; i++) { + if (!sched_req->link_hdls[i]) { + CAM_ERR(CAM_CRM, "link handle %d in sched_req is null", i); + rc = -EINVAL; + goto end; + } + + sync_links[i] = cam_get_link_priv(sched_req->link_hdls[i]); + if (!sync_links[i] || + (sync_links[i]->link_hdl != sched_req->link_hdls[i])) { + CAM_ERR(CAM_CRM, + "Invalid sync link, sync link[%d]: %s sched_req->link_hdl: %x sync_links->link_hdl: 0x%x", + i, CAM_IS_NULL_TO_STR(sync_links[i]), + sched_req->link_hdls[i], + ((!sync_links[i]) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : + sync_links[i]->link_hdl)); + rc = -EINVAL; + goto end; + } + } + sched.num_links = sched_req->num_links; + sched.link_hdls = sched_req->link_hdls; + } else + sched.num_links = 0; + + rc = cam_req_mgr_process_sched_req(link, &sched); + + CAM_DBG(CAM_REQ, "Open req %lld on link 0x%x with sync_mode %d", + sched_req->req_id, sched_req->link_hdl, sched_req->sync_mode); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_schedule_request_v3( + struct cam_req_mgr_sched_request_v3 *sched_req) +{ + int i = 0; + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_core_sched_req sched; + struct cam_req_mgr_core_link *sync_links[MAXIMUM_LINKS_PER_SESSION]; + + mutex_lock(&g_crm_core_dev->crm_lock); + + link = cam_get_link_priv(sched_req->link_hdl); + if (!link || (link->link_hdl != sched_req->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, sched_req->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), sched_req->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + session = (struct cam_req_mgr_core_session *)link->parent; + if (!session) { + CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl); + rc = -EINVAL; + goto end; + } + + if (sched_req->req_id <= link->last_flush_id) { + CAM_INFO(CAM_CRM, + "request %lld is flushed, last_flush_id to flush %d", + sched_req->req_id, link->last_flush_id); + rc = -EBADR; + goto end; + } + + if (sched_req->req_id > link->last_flush_id) + link->last_flush_id = 0; + + CAM_DBG(CAM_CRM, "link 0x%x req %lld, sync_mode %d num_links %d", + sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode, + sched_req->num_links); + + memset(&sched, 0, sizeof(sched)); + + sched.req_id = sched_req->req_id; + sched.sync_mode = sched_req->sync_mode; + sched.link_hdl = sched_req->link_hdl; + sched.additional_timeout = sched_req->additional_timeout; + sched.num_valid_params = sched_req->num_valid_params; + sched.param_mask = sched_req->param_mask; + sched.params = sched_req->params; + + if (session->force_err_recovery == AUTO_RECOVERY) { + sched.bubble_enable = sched_req->bubble_enable; + } else { + sched.bubble_enable = + (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0; + } + + if (sched_req->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + if ((sched_req->num_links <= 0) || + (sched_req->num_links > MAXIMUM_LINKS_PER_SESSION)) { + CAM_ERR(CAM_CRM, "link:0x%x req:%lld invalid num_links:%d", + link->link_hdl, sched_req->req_id, sched_req->num_links); + rc = -EINVAL; + goto end; + } + + for (i = 0; i < sched_req->num_links; i++) { + if (!sched_req->link_hdls[i]) { + CAM_ERR(CAM_CRM, "link handle %d in sched_req is null", i); + rc = -EINVAL; + goto end; + } + + sync_links[i] = cam_get_link_priv(sched_req->link_hdls[i]); + if (!sync_links[i] || + (sync_links[i]->link_hdl != sched_req->link_hdls[i])) { + CAM_ERR(CAM_CRM, + "Invalid sync link, sync link[%d]: %s sched_req->link_hdl: %x sync_links->link_hdl: 0x%x", + i, CAM_IS_NULL_TO_STR(sync_links[i]), + sched_req->link_hdls[i], + ((!sync_links[i]) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : + sync_links[i]->link_hdl)); + rc = -EINVAL; + goto end; + } + } + sched.num_links = sched_req->num_links; + sched.link_hdls = sched_req->link_hdls; + } else + sched.num_links = 0; + + rc = cam_req_mgr_process_sched_req(link, &sched); + + CAM_DBG(CAM_REQ, "Open req %lld on link 0x%x with sync_mode %d", + sched_req->req_id, sched_req->link_hdl, sched_req->sync_mode); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_sync_config( + struct cam_req_mgr_sync_mode *sync_info) +{ + int i, j, rc = 0; + int sync_idx = 0; + struct cam_req_mgr_core_session *cam_session; + struct cam_req_mgr_core_link *link[MAX_LINKS_PER_SESSION]; + + if (!sync_info) { + CAM_ERR(CAM_CRM, "NULL pointer"); + return -EINVAL; + } + + if ((sync_info->num_links < 0) || + (sync_info->num_links > + MAX_LINKS_PER_SESSION)) { + CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links); + return -EINVAL; + } + + if ((sync_info->sync_mode != CAM_REQ_MGR_SYNC_MODE_SYNC) && + (sync_info->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC)) { + CAM_ERR(CAM_CRM, "Invalid sync mode %d", sync_info->sync_mode); + return -EINVAL; + } + + if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) { + CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x", + sync_info->link_hdls[0], sync_info->link_hdls[1]); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + /* session hdl's priv data is cam session struct */ + cam_session = cam_get_session_priv(sync_info->session_hdl); + if (!cam_session || (cam_session->session_hdl != sync_info->session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, sync_info->session_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(cam_session), sync_info->session_hdl, + (!cam_session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : cam_session->session_hdl); + mutex_unlock(&g_crm_core_dev->crm_lock); + return -EINVAL; + } + + mutex_lock(&cam_session->lock); + + for (i = 0; i < sync_info->num_links; i++) { + + if (!sync_info->link_hdls[i]) { + CAM_ERR(CAM_CRM, "link handle %d is null", i); + rc = -EINVAL; + goto done; + } + + link[i] = cam_get_link_priv(sync_info->link_hdls[i]); + if (!link[i] || (link[i]->link_hdl != sync_info->link_hdls[i])) { + CAM_ERR(CAM_CRM, "link: %s, sync_info->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), sync_info->link_hdls[i], + (!link[i]) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link[i]->link_hdl); + rc = -EINVAL; + goto done; + } + + link[i]->sync_link_sof_skip = false; + link[i]->is_master = false; + link[i]->in_msync_mode = false; + link[i]->initial_sync_req = -1; + link[i]->num_sync_links = 0; + + for (j = 0; j < sync_info->num_links-1; j++) + link[i]->sync_link[j] = NULL; + } + + if (sync_info->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) { + for (i = 0; i < sync_info->num_links; i++) { + j = 0; + sync_idx = 0; + CAM_DBG(CAM_REQ, "link %x adds sync link:", + link[i]->link_hdl); + while (j < sync_info->num_links) { + if (i != j) { + link[i]->sync_link[sync_idx++] = + link[j]; + link[i]->num_sync_links++; + CAM_DBG(CAM_REQ, "sync_link[%d] : %x", + sync_idx-1, link[j]->link_hdl); + } + j++; + } + link[i]->initial_skip = true; + link[i]->sof_timestamp = 0; + } + } else { + for (j = 0; j < sync_info->num_links; j++) { + link[j]->initial_skip = true; + link[j]->sof_timestamp = 0; + } + } + + cam_session->sync_mode = sync_info->sync_mode; + CAM_DBG(CAM_REQ, + "Sync config completed on %d links with sync_mode %d", + sync_info->num_links, sync_info->sync_mode); + +done: + mutex_unlock(&cam_session->lock); + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_flush_requests( + struct cam_req_mgr_flush_info *flush_info) +{ + int rc = 0; + struct crm_workq_task *task; + struct cam_req_mgr_core_link *link; + struct cam_req_mgr_flush_info *flush; + struct crm_task_payload *task_data; + struct cam_req_mgr_core_session *session; + unsigned long rem_jiffies; + + if (!flush_info) { + CAM_ERR(CAM_CRM, "flush req is NULL"); + return -EFAULT; + } + if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) { + CAM_ERR(CAM_CRM, "incorrect flush type %x", + flush_info->flush_type); + return -EINVAL; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + + /* session hdl's priv data is cam session struct */ + session = cam_get_session_priv(flush_info->session_hdl); + if (!session || (session->session_hdl != flush_info->session_hdl)) { + CAM_ERR(CAM_CRM, "session: %s, flush_info->ses_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(session), flush_info->session_hdl, + (!session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : session->session_hdl); + rc = -EINVAL; + goto end; + } + if (session->num_links <= 0) { + CAM_WARN(CAM_CRM, "No active links in session %x", + flush_info->session_hdl); + goto end; + } + + link = cam_get_link_priv(flush_info->link_hdl); + if (!link || (link->link_hdl != flush_info->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, flush_info->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), flush_info->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + task = cam_req_mgr_workq_get_task(link->workq); + if (!task) { + rc = -ENOMEM; + goto end; + } + + task_data = (struct crm_task_payload *)task->payload; + task_data->type = CRM_WORKQ_TASK_FLUSH_REQ; + flush = (struct cam_req_mgr_flush_info *)&task_data->u; + flush->req_id = flush_info->req_id; + flush->link_hdl = flush_info->link_hdl; + flush->flush_type = flush_info->flush_type; + flush->reserved = flush_info->reserved; + task->process_cb = &cam_req_mgr_process_flush_req; + init_completion(&link->workq_comp); + rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0); + if (rc) { + CAM_ERR(CAM_CRM, "Failed to queue flush task rc: %d", rc); + goto end; + } + + /* Blocking call */ + rem_jiffies = cam_common_wait_for_completion_timeout( + &link->workq_comp, + msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT)); + if (!rem_jiffies) { + CAM_WARN(CAM_CRM, "Flush call timeout for session_hdl %u link_hdl %u type: %d", + flush_info->link_hdl, flush_info->session_hdl, + flush_info->flush_type); + rc = -ETIMEDOUT; + } +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control) +{ + int rc = 0, i; + struct cam_req_mgr_core_link *link = NULL; + int init_timeout = 0; + + if (!control) { + CAM_ERR(CAM_CRM, "Control command is NULL"); + rc = -EINVAL; + goto end; + } + + if ((control->num_links <= 0) || + (control->num_links > MAX_LINKS_PER_SESSION)) { + CAM_ERR(CAM_CRM, "Invalid number of links %d", + control->num_links); + rc = -EINVAL; + goto end; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + for (i = 0; i < control->num_links; i++) { + link = cam_get_link_priv(control->link_hdls[i]); + if (!link || (link->link_hdl != control->link_hdls[i])) { + CAM_ERR(CAM_CRM, "link: %s, control->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), control->link_hdls[i], + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + break; + } + + mutex_lock(&link->lock); + if (control->ops == CAM_REQ_MGR_LINK_ACTIVATE) { + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_READY; + spin_unlock_bh(&link->link_state_spin_lock); + if (control->init_timeout[i]) + link->skip_init_frame = true; + init_timeout = (2 * control->init_timeout[i]); + CAM_DBG(CAM_CRM, + "Activate link: 0x%x init_timeout: %d ms", + link->link_hdl, control->init_timeout[i]); + /* Start SOF watchdog timer */ + rc = crm_timer_init(&link->watchdog, + (init_timeout + CAM_REQ_MGR_WATCHDOG_TIMEOUT), + link, &__cam_req_mgr_sof_freeze); + if (rc < 0) { + CAM_ERR(CAM_CRM, + "SOF timer start fails: link=0x%x", + link->link_hdl); + rc = -EFAULT; + } + /* Wait for the streaming of sync link */ + link->initial_skip = true; + /* Pause the timer before sensor stream on */ + spin_lock_bh(&link->link_state_spin_lock); + link->watchdog->pause_timer = true; + spin_unlock_bh(&link->link_state_spin_lock); + /* notify nodes */ + __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_RESUME, + CRM_KMD_ERR_MAX, link); + } else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) { + /* notify nodes */ + __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_PAUSE, + CRM_KMD_ERR_MAX, link); + + /* Destroy SOF watchdog timer */ + spin_lock_bh(&link->link_state_spin_lock); + link->state = CAM_CRM_LINK_STATE_IDLE; + link->skip_init_frame = false; + crm_timer_exit(&link->watchdog); + spin_unlock_bh(&link->link_state_spin_lock); + CAM_DBG(CAM_CRM, + "De-activate link: 0x%x", link->link_hdl); + } else { + CAM_ERR(CAM_CRM, "Invalid link control command"); + rc = -EINVAL; + } + mutex_unlock(&link->lock); + } + mutex_unlock(&g_crm_core_dev->crm_lock); +end: + return rc; +} + +int cam_req_mgr_link_properties(struct cam_req_mgr_link_properties *properties) +{ + int rc = 0; + struct cam_req_mgr_core_link *link = NULL; + + mutex_lock(&g_crm_core_dev->crm_lock); + link = cam_get_link_priv(properties->link_hdl); + if (!link || (link->link_hdl != properties->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, properties->link_hdl:0x%x, link->link_hdl:0x%x", + CAM_IS_NULL_TO_STR(link), properties->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + spin_lock_bh(&link->link_state_spin_lock); + if (link->state != CAM_CRM_LINK_STATE_READY) { + spin_unlock_bh(&link->link_state_spin_lock); + CAM_ERR(CAM_CRM, + "Only can config link 0x%x properties in ready state", + link->link_hdl); + rc = -EAGAIN; + goto end; + } + spin_unlock_bh(&link->link_state_spin_lock); + + mutex_lock(&link->lock); + link->properties_mask = properties->properties_mask; + + rc = __cam_req_mgr_send_evt(0, CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES, + CRM_KMD_ERR_MAX, link); + if (rc) + CAM_ERR(CAM_CRM, + "Failed to set properties on link 0x%x", link->link_hdl); + else + CAM_DBG(CAM_CRM, "link 0x%x set properties mask:0x%x successfully", + link->link_hdl, link->properties_mask); + + mutex_unlock(&link->lock); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +static void *cam_req_mgr_user_dump_state_monitor_array_info( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_req_mgr_state_monitor *evt = NULL; + uint64_t *addr; + struct cam_common_hw_dump_header *hdr; + uint8_t *dst; + + evt = (struct cam_req_mgr_state_monitor *)dump_struct; + + addr = (uint64_t *)addr_ptr; + + *addr++ = evt->req_id; + + dst = (uint8_t *)addr; + hdr = (struct cam_common_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_CRM_DUMP_TAG_MAX_LEN, + "%s", evt->name); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + + dst = (uint8_t *)addr; + hdr = (struct cam_common_hw_dump_header *)dst; + scnprintf(hdr->tag, CAM_CRM_DUMP_TAG_MAX_LEN, + "%s", __cam_req_mgr_operation_type_to_str(evt->req_state)); + hdr->word_size = sizeof(uint64_t); + addr = (uint64_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + + *addr++ = evt->dev_hdl; + *addr++ = evt->frame_id; + *addr++ = evt->time_stamp.tv_sec; + *addr++ = evt->time_stamp.tv_nsec / NSEC_PER_USEC; + return addr; +} + +/** + * cam_req_mgr_dump_state_monitor_info() + * + * @brief : dump the state monitor array, dump from index next_wr_idx + * to save state information in time order. + * @link : link pointer + * @dump_info : dump payload + */ +static int cam_req_mgr_dump_state_monitor_info( + struct cam_req_mgr_core_link *link, + struct cam_req_mgr_dump_info *dump_info) +{ + int rc = 0; + int i; + int idx = link->req.next_state_idx; + struct cam_common_hw_dump_args dump_args; + size_t buf_len; + size_t remain_len; + uint32_t min_len; + uintptr_t cpu_addr; + + rc = cam_mem_get_cpu_buf(dump_info->buf_handle, + &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_CRM, "Invalid handle %u rc %d", + dump_info->buf_handle, rc); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; + } + if (buf_len <= dump_info->offset) { + CAM_WARN(CAM_CRM, "Dump buffer overshoot len %zu offset %zu", + buf_len, dump_info->offset); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + remain_len = buf_len - dump_info->offset; + min_len = sizeof(struct cam_common_hw_dump_header) + + (MAX_REQ_STATE_MONITOR_NUM * (CAM_CRM_DUMP_EVENT_NUM_WORDS * sizeof(uint64_t) + + sizeof(struct cam_common_hw_dump_header) * CAM_CRM_DUMP_EVENT_NUM_HEADERS)); + + if (remain_len < min_len) { + CAM_WARN(CAM_CRM, "Dump buffer exhaust remain %zu min %u", + remain_len, min_len); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return -ENOSPC; + } + + dump_args.req_id = dump_info->req_id; + dump_args.cpu_addr = cpu_addr; + dump_args.buf_len = buf_len; + dump_args.offset = dump_info->offset; + dump_args.ctxt_to_hw_map = NULL; + + for (i = 0; i < MAX_REQ_STATE_MONITOR_NUM; i++) { + if (link->req.state_monitor[idx].req_state != CAM_CRM_STATE_INVALID) { + rc = cam_common_user_dump_helper(&dump_args, + cam_req_mgr_user_dump_state_monitor_array_info, + &link->req.state_monitor[idx], + sizeof(uint64_t), "CRM_STATE_MONITOR.%x:", link->link_hdl); + if (rc) { + CAM_ERR(CAM_CRM, + "Dump state info failed, rc: %d", + rc); + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; + } + } + __cam_req_mgr_inc_idx(&idx, 1, MAX_REQ_STATE_MONITOR_NUM); + } + + dump_info->offset = dump_args.offset; + cam_mem_put_cpu_buf(dump_info->buf_handle); + return rc; +} + +int cam_req_mgr_dump_request(struct cam_dump_req_cmd *dump_req) +{ + int rc = 0; + int i; + struct cam_req_mgr_dump_info info; + struct cam_req_mgr_core_link *link = NULL; + struct cam_req_mgr_core_session *session = NULL; + struct cam_req_mgr_connected_device *device = NULL; + + if (!dump_req) { + CAM_ERR(CAM_CRM, "dump req is NULL"); + return -EFAULT; + } + + mutex_lock(&g_crm_core_dev->crm_lock); + /* session hdl's priv data is cam session struct */ + session = cam_get_session_priv(dump_req->session_handle); + if (!session || (session->session_hdl != dump_req->session_handle)) { + CAM_ERR(CAM_CRM, "session: %s, dump_req->ses_hdl:%x, session->ses_hdl:%x", + CAM_IS_NULL_TO_STR(session), dump_req->session_handle, + (!session) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : session->session_hdl); + rc = -EINVAL; + goto end; + } + if (session->num_links <= 0) { + CAM_WARN(CAM_CRM, "No active links in session %x", + dump_req->session_handle); + goto end; + } + + link = cam_get_link_priv(dump_req->link_hdl); + if (!link || (link->link_hdl != dump_req->link_hdl)) { + CAM_ERR(CAM_CRM, "link: %s, dump_req->link_hdl:%x, link->link_hdl:%x", + CAM_IS_NULL_TO_STR(link), dump_req->link_hdl, + (!link) ? CAM_REQ_MGR_DEFAULT_HDL_VAL : link->link_hdl); + rc = -EINVAL; + goto end; + } + + /* + * Dump Mem MGR + */ + rc = cam_mem_mgr_dump_user(dump_req); + if (rc) + CAM_ERR(CAM_REQ, + "Fail to dump mem mgr rc %d", rc); + + /* + * Dump CRM + */ + info.offset = dump_req->offset; + info.link_hdl = dump_req->link_hdl; + info.dev_hdl = 0; + info.req_id = dump_req->issue_req_id; + info.buf_handle = dump_req->buf_handle; + info.error_type = dump_req->error_type; + rc = cam_req_mgr_dump_state_monitor_info(link, &info); + if (rc) + CAM_ERR(CAM_REQ, + "Fail to dump state monitor req %llu rc %d", + info.req_id, rc); + + info.buf_handle = dump_req->buf_handle; + rc = cam_cpas_dump_state_monitor_info(&info); + if (rc) + CAM_ERR(CAM_CRM, "Fail to dump cpas vote info, rc %d", rc); + + for (i = 0; i < link->num_devs; i++) { + device = &link->l_dev[i]; + info.link_hdl = dump_req->link_hdl; + info.dev_hdl = device->dev_hdl; + info.req_id = dump_req->issue_req_id; + info.buf_handle = dump_req->buf_handle; + info.error_type = dump_req->error_type; + if (device->ops && device->ops->dump_req) { + rc = device->ops->dump_req(&info); + if (rc) + CAM_ERR(CAM_REQ, + "Fail dump req %llu dev %d rc %d", + info.req_id, device->dev_hdl, rc); + } + } + dump_req->offset = info.offset; + CAM_INFO(CAM_REQ, "req %llu, offset %zu", + dump_req->issue_req_id, dump_req->offset); +end: + mutex_unlock(&g_crm_core_dev->crm_lock); + return rc; +} + +static unsigned long cam_req_mgr_core_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_req_mgr_core_link *link; + struct cam_req_mgr_core_mini_dump *md; + struct cam_req_mgr_core_link_mini_dump *md_link; + struct cam_req_mgr_req_tbl *l_tbl = NULL; + uint8_t *waddr; + unsigned long dumped_len = 0; + unsigned long remain_len = len; + uint32_t i = 0, j = 0, k = 0; + + if (!dst || len < sizeof(*md)) { + CAM_ERR(CAM_CRM, "Insufficient received length %lu dumped len: %u", + len, dumped_len); + return 0; + } + + md = (struct cam_req_mgr_core_mini_dump *)dst; + md->num_link = 0; + waddr = (uint8_t *)dst + sizeof(*md); + dumped_len += sizeof(*md); + remain_len -= dumped_len; + + for (i = 0; i < MAXIMUM_LINKS_CAPACITY; i++) { + if (remain_len < sizeof(*md_link)) { + CAM_ERR(CAM_CRM, + "Insufficent received length: %lu, dumped_len %lu", + len, dumped_len); + goto end; + } + + if (atomic_read(&g_links[i].is_used) == 0) + continue; + + waddr += (k * sizeof(*md_link)); + md_link = (struct cam_req_mgr_core_link_mini_dump *)waddr; + md->link[k] = md_link; + link = &g_links[i]; + md_link->sync_link_sof_skip = link->sync_link_sof_skip; + md_link->num_sync_links = link->num_sync_links; + md_link->initial_sync_req = link->initial_sync_req; + md_link->last_flush_id = link->last_flush_id; + md_link->is_used = atomic_read(&link->is_used); + md_link->retry_cnt = link->retry_cnt; + md_link->eof_event_cnt = atomic_read(&link->eof_event_cnt); + md_link->pd_mask = link->pd_mask; + md_link->link_hdl = link->link_hdl; + md_link->num_devs = link->num_devs; + md_link->max_delay = link->max_delay; + md_link->dual_trigger = link->dual_trigger; + md_link->state = link->state; + md_link->is_shutdown = link->is_shutdown; + md_link->skip_init_frame = link->skip_init_frame; + md_link->is_master = link->is_master; + md_link->initial_skip = link->initial_skip; + md_link->in_msync_mode = link->in_msync_mode; + md_link->wq_congestion = link->wq_congestion; + memcpy(md_link->req.apply_data, link->req.apply_data, + sizeof(link->req.apply_data)); + memcpy(md_link->req.prev_apply_data, link->req.prev_apply_data, + sizeof(link->req.prev_apply_data)); + memcpy(&md_link->req.in_q, link->req.in_q, + sizeof(struct cam_req_mgr_req_queue)); + md_link->req.num_tbl = link->req.num_tbl; + + md_link->workq.workq_scheduled_ts = + link->workq->workq_scheduled_ts; + md_link->workq.task.pending_cnt = + atomic_read(&link->workq->task.pending_cnt); + md_link->workq.task.free_cnt = + atomic_read(&link->workq->task.free_cnt); + md_link->workq.task.num_task = link->workq->task.num_task; + + l_tbl = link->req.l_tbl; + j = 0; + while(l_tbl) { + md_link->req.l_tbl[j].id = l_tbl->id; + md_link->req.l_tbl[j].pd = l_tbl->id; + md_link->req.l_tbl[j].dev_count = l_tbl->dev_count; + md_link->req.l_tbl[j].dev_mask = l_tbl->dev_mask; + md_link->req.l_tbl[j].skip_traverse = + l_tbl->skip_traverse; + md_link->req.l_tbl[j].pd_delta = l_tbl->pd_delta; + md_link->req.l_tbl[j].num_slots = l_tbl->num_slots; + memcpy(md_link->req.l_tbl[j].slot, l_tbl->slot, + sizeof(l_tbl->slot)); + l_tbl = l_tbl->next; + j++; + } + md->num_link++; + dumped_len += sizeof(*md_link); + remain_len = len - dumped_len; + } +end: + return dumped_len; +} + +void cam_req_mgr_dump_linked_devices_on_err(int32_t link_hdl) +{ + int i, log_buf_size, buf_used = 0; + struct cam_req_mgr_connected_device *dev; + struct cam_req_mgr_core_link *link; + struct cam_req_mgr_core_session *session; + char log_buf[CAM_CRM_DUMP_LINKED_DEVICES_MAX_LEN]; + + link = cam_get_link_priv(link_hdl); + if (!link || link->link_hdl != link_hdl) { + CAM_DBG(CAM_CRM, "Invalid link hdl 0x%x", link_hdl); + return; + } + + session = (struct cam_req_mgr_core_session *)link->parent; + for (i = 0; i < link->num_devs; i++) { + dev = &link->l_dev[i]; + + log_buf_size = CAM_CRM_DUMP_LINKED_DEVICES_MAX_LEN - buf_used; + buf_used += snprintf(log_buf + buf_used, log_buf_size, " %s", + dev->dev_info.name); + } + + CAM_INFO(CAM_CRM, "Connected devices on the link 0x%x in session 0x%x:%s", + link->link_hdl, session->session_hdl, log_buf); +} + +int cam_req_mgr_core_device_init(void) +{ + int i; + CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev); + + if (g_crm_core_dev) { + CAM_WARN(CAM_CRM, "core device is already initialized"); + return 0; + } + g_crm_core_dev = CAM_MEM_ZALLOC(sizeof(*g_crm_core_dev), + GFP_KERNEL); + if (!g_crm_core_dev) + return -ENOMEM; + + CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev); + INIT_LIST_HEAD(&g_crm_core_dev->session_head); + mutex_init(&g_crm_core_dev->crm_lock); + cam_req_mgr_debug_register(g_crm_core_dev); + + for (i = 0; i < MAXIMUM_LINKS_CAPACITY; i++) { + mutex_init(&g_links[i].lock); + spin_lock_init(&g_links[i].link_state_spin_lock); + spin_lock_init(&g_links[i].req.monitor_slock); + spin_lock_init(&g_links[i].req.reset_link_spin_lock); + atomic_set(&g_links[i].is_used, 0); + cam_req_mgr_core_link_reset(&g_links[i]); + } + cam_common_register_mini_dump_cb(cam_req_mgr_core_mini_dump_cb, + "CAM_CRM", NULL); + + return 0; +} + +int cam_req_mgr_core_device_deinit(void) +{ + if (!g_crm_core_dev) { + CAM_ERR(CAM_CRM, "NULL pointer"); + return -EINVAL; + } + + CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev); + cam_req_mgr_debug_unregister(); + mutex_destroy(&g_crm_core_dev->crm_lock); + CAM_MEM_FREE(g_crm_core_dev); + g_crm_core_dev = NULL; + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.h new file mode 100644 index 0000000000..9bd31240ce --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core.h @@ -0,0 +1,867 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_REQ_MGR_CORE_H_ +#define _CAM_REQ_MGR_CORE_H_ + +#include +#include "cam_req_mgr_interface.h" +#include "cam_req_mgr_core_defs.h" +#include "cam_req_mgr_workq.h" +#include "cam_req_mgr_timer.h" + +#define CAM_REQ_MGR_MAX_LINKED_DEV 16 +#define MAX_REQ_SLOTS 48 +#define MAX_REQ_STATE_MONITOR_NUM 108 +#define MAX_DEV_FOR_SPECIAL_OPS 4 + +#define CAM_REQ_MGR_WATCHDOG_TIMEOUT 1000 +#define CAM_REQ_MGR_WATCHDOG_TIMEOUT_DEFAULT 5000 +#define CAM_REQ_MGR_WATCHDOG_TIMEOUT_MAX 60000 +#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT 1000 +#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30 +#define CAM_REQ_MGR_DEFAULT_HDL_VAL 0 + +#define FORCE_DISABLE_RECOVERY 2 +#define FORCE_ENABLE_RECOVERY 1 +#define AUTO_RECOVERY 0 + +#define CRM_WORKQ_NUM_TASKS 60 + +#define MAX_SYNC_COUNT 65535 + + +/* Default frame rate is 30 */ +#define DEFAULT_FRAME_DURATION 33333333 + +#define INITIAL_IN_SYNC_REQ 5 + +#define SYNC_LINK_SOF_CNT_MAX_LMT 1 + +#define MAXIMUM_LINKS_CAPACITY 8 + +#define MAXIMUM_LINKS_PER_SESSION 4 + +#define MAXIMUM_RETRY_ATTEMPTS 3 + +#define VERSION_1 1 +#define VERSION_2 2 +#define CAM_REQ_MGR_MAX_TRIGGERS 2 + +#define REQ_MAXIMUM_BUBBLE_TIMES 2 + +#define CAM_REQ_MGR_HALF_FRAME_DURATION(frame_duration) ((frame_duration) / 2) +#define CAM_REQ_MGR_COMPUTE_TIMEOUT(x) ((x) + (((x) * (x + 25)) / 100)) + +/* Number of words for dumping req state info */ +#define CAM_CRM_DUMP_EVENT_NUM_WORDS 6 + +/* Maximum length of tag while dumping */ +#define CAM_CRM_DUMP_TAG_MAX_LEN 128 + +/* Number of headers in dumping req state info function */ +#define CAM_CRM_DUMP_EVENT_NUM_HEADERS 2 + +/* Threshold of sensor setting apply delay is 0.1ms */ +#define CAM_CRM_SENSOR_APPLIY_DELAY_THRESHOLD 100000 + +/* Mismatched frame mode mask */ +#define CAM_CRM_MISMATCHED_FRAME_MODE_MASK BIT(0) + +/* Maximum length of log buffer when printing connected devices on error */ +#define CAM_CRM_DUMP_LINKED_DEVICES_MAX_LEN 200 + +/* Mismatched frame mode to identify notify or drop the mismatchd frame */ +enum crm_mismatched_frame_mode { + CRM_NOTIFY_MISMATCHED_FRMAE, + CRM_DROP_MISMATCHED_FRMAE, +}; + +/** + * enum crm_workq_task_type + * @codes: to identify which type of task is present + */ +enum crm_workq_task_type { + CRM_WORKQ_TASK_GET_DEV_INFO, + CRM_WORKQ_TASK_SETUP_LINK, + CRM_WORKQ_TASK_DEV_ADD_REQ, + CRM_WORKQ_TASK_APPLY_REQ, + CRM_WORKQ_TASK_NOTIFY_SOF, + CRM_WORKQ_TASK_NOTIFY_EOF, + CRM_WORKQ_TASK_NOTIFY_ERR, + CRM_WORKQ_TASK_NOTIFY_FREEZE, + CRM_WORKQ_TASK_SCHED_REQ, + CRM_WORKQ_TASK_FLUSH_REQ, + CRM_WORKQ_TASK_TRIGGER_SYNCED_RESUME, + CRM_WORKQ_TASK_INVALID, +}; + +/** + * struct crm_task_payload + * @type : to identify which type of task is present + * @u : union of payload of all types of tasks supported + * @sched_req : contains info of incoming reqest from CSL to CRM + * @flush_info : contains info of cancelled reqest + * @dev_req : contains tracking info of available req id at device + * @send_req : contains info of apply settings to be sent to devs in link + * @notify_trigger : contains notification from IFE to CRM about trigger + * @notify_err : contains error info happened while processing request + * - + */ +struct crm_task_payload { + enum crm_workq_task_type type; + union { + struct cam_req_mgr_sched_request_v2 sched_req; + struct cam_req_mgr_flush_info flush_info; + struct cam_req_mgr_add_request dev_req; + struct cam_req_mgr_send_request send_req; + struct cam_req_mgr_trigger_notify notify_trigger; + struct cam_req_mgr_error_notify notify_err; + } u; +}; + +/** + * enum crm_req_state + * State machine for life cycle of request in pd table + * EMPTY : indicates req slot is empty + * PENDING : indicates req slot is waiting for reqs from all devs + * READY : indicates req slot is ready to be sent to devs + * INVALID : indicates req slot is not in valid state + */ +enum crm_req_state { + CRM_REQ_STATE_EMPTY, + CRM_REQ_STATE_PENDING, + CRM_REQ_STATE_READY, + CRM_REQ_STATE_INVALID, +}; + +/** + * enum crm_slot_status + * State machine for life cycle of request in input queue + * NO_REQ : empty slot + * REQ_ADDED : new entry in slot + * REQ_PENDING : waiting for next trigger to apply + * REQ_READY : req has ready + * REQ_APPLIED : req is sent to all devices + * INVALID : invalid state + */ +enum crm_slot_status { + CRM_SLOT_STATUS_NO_REQ, + CRM_SLOT_STATUS_REQ_ADDED, + CRM_SLOT_STATUS_REQ_PENDING, + CRM_SLOT_STATUS_REQ_READY, + CRM_SLOT_STATUS_REQ_APPLIED, + CRM_SLOT_STATUS_INVALID, +}; + +/** + * enum cam_req_mgr_link_state + * State machine for life cycle of link in crm + * AVAILABLE : link available + * IDLE : link initialized but not ready yet + * READY : link is ready for use + * ERR : link has encountered error + * MAX : invalid state + */ +enum cam_req_mgr_link_state { + CAM_CRM_LINK_STATE_AVAILABLE, + CAM_CRM_LINK_STATE_IDLE, + CAM_CRM_LINK_STATE_READY, + CAM_CRM_LINK_STATE_ERR, + CAM_CRM_LINK_STATE_MAX, +}; + +/** + * enum cam_req_mgr_sync_type + * Sync type for syncing info + * DELAY_AT_SOF : inject delay at SOF + * DELAY_AT_EOF : inject delay at EOF + * APPLY_AT_EOF : apply at EOF + */ +enum cam_req_mgr_sync_type { + CAM_SYNC_TYPE_DELAY_AT_SOF, + CAM_SYNC_TYPE_DELAY_AT_EOF, + CAM_SYNC_TYPE_APPLY_AT_EOF, +}; + +/** + * enum cam_req_mgr_req_state + * Request operation type for request state recording + * REQ_READY : all packets for this request is added + * NOTIFY_TRIGGER : request is triggered + * PROCESS_TRIGGER : request is in trigger processing + * SEND_REQ : CRM is sending request to each device + * NOTIFY_ERR : request is notified an error + * PROCESS_ERR : request is in error processing + * INVALID : invalid state + */ +enum cam_req_mgr_req_state { + CAM_CRM_REQ_READY, + CAM_CRM_NOTIFY_TRIGGER, + CAM_CRM_PROCESS_TRIGGER, + CAM_CRM_SEND_REQ, + CAM_CRM_NOTIFY_ERR, + CAM_CRM_PROCESS_ERR, + CAM_CRM_STATE_INVALID, +}; + +/** + * struct cam_req_mgr_traverse_result + * @req_id : Req id that is not ready + * @pd : pipeline delay + * @masked_value : Holds the dev bit for devices not ready + * for the given request + */ +struct cam_req_mgr_traverse_result { + int64_t req_id; + uint32_t pd; + uint32_t masked_value; +}; + +/** + * struct cam_req_mgr_traverse + * @idx : slot index + * @result : contains which all tables were able to apply successfully + * @result_data : holds the result of traverse in case it fails + * @tbl : pointer of pipeline delay based request table + * @apply_data : pointer which various tables will update during traverse + * @in_q : input request queue pointer + * @validate_only : Whether to validate only and/or update settings + * @open_req_cnt : Count of open requests yet to be serviced in the kernel. + */ +struct cam_req_mgr_traverse { + int32_t idx; + uint32_t result; + struct cam_req_mgr_traverse_result result_data; + struct cam_req_mgr_req_tbl *tbl; + struct cam_req_mgr_apply *apply_data; + struct cam_req_mgr_req_queue *in_q; + bool validate_only; + uint32_t open_req_cnt; +}; + +/** + * struct cam_req_mgr_apply + * @idx : corresponding input queue slot index + * @pd : pipeline delay of device + * @req_id : req id for dev with above pd to process + * @skip_idx: skip applying settings when this is set. + */ +struct cam_req_mgr_apply { + int32_t idx; + int32_t pd; + int64_t req_id; + int32_t skip_idx; +}; + +/** + * struct crm_tbl_slot_special_ops + * @num_dev : Number of devices need to be applied at this trigger point + * @dev_hdl : Device handle who requested for special ops + * @apply_at_eof : Boolean Identifier for request to be applied at EOF + */ +struct crm_tbl_slot_special_ops { + uint32_t num_dev; + int32_t dev_hdl[MAX_DEV_FOR_SPECIAL_OPS]; + bool apply_at_eof; +}; + +/** + * struct cam_req_mgr_tbl_slot + * @idx : slot index + * @req_ready_map : mask tracking which all devices have request ready + * @req_apply_map : mask tracking which all devices have request applied + * @state : state machine for life cycle of a slot + * @inject_delay_at_sof : insert extra bubbling for flash type of use cases + * @inject_delay_at_eof : insert extra bubbling for flash type of use cases + * @ops : special operation for the table slot + * e.g. + * skip_next frame: in case of applying one device + * and skip others + * apply_at_eof: device that needs to apply at EOF + */ +struct cam_req_mgr_tbl_slot { + int32_t idx; + uint32_t req_ready_map; + uint32_t req_apply_map; + enum crm_req_state state; + uint32_t inject_delay_at_sof; + uint32_t inject_delay_at_eof; + struct crm_tbl_slot_special_ops ops; +}; + +/** + * struct cam_req_mgr_req_tbl + * @id : table indetifier + * @pd : pipeline delay of table + * @dev_count : num of devices having same pipeline delay + * @dev_mask : mask to track which devices are linked + * @skip_traverse : to indicate how many traverses need to be dropped + * by this table especially in the beginning or bubble recovery + * @next : pointer to next pipeline delay request table + * @pd_delta : differnce between this table's pipeline delay and next + * @num_slots : number of request slots present in the table + * @slot : array of slots tracking requests availability at devices + */ +struct cam_req_mgr_req_tbl { + int32_t id; + int32_t pd; + int32_t dev_count; + int32_t dev_mask; + int32_t skip_traverse; + struct cam_req_mgr_req_tbl *next; + int32_t pd_delta; + int32_t num_slots; + struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS]; +}; + +/** + * struct cam_req_mgr_slot + * - Internal Book keeping + * @idx : slot index + * @skip_idx : if req id in this slot needs to be skipped/not applied + * @status : state machine for life cycle of a slot + * - members updated due to external events + * @recover : if user enabled recovery for this request. + * @req_id : mask tracking which all devices have request ready + * @sync_mode : Sync mode in which req id in this slot has to applied + * @additional_timeout : Adjusted watchdog timeout value associated with + * this request + * @recovery_counter : Internal recovery counter + * @num_sync_links : Num of sync links + * @sync_link_hdls : Array of sync link handles + * @bubble_times : times of bubbles the req happended + * @frame_sync_shift : frame sync shift value, frame sync may not do SOF sync, + * so we need to know the shift value in KMD, the unit is ns. + * @mismatched_frame_mode : Mismatched frame mode to notify or drop setting mismatched frame + * of this slot + * @internal_recovered : indicate if internal recover is already done for request + * @skip_set : Simulate a frame skip on this slot + */ +struct cam_req_mgr_slot { + int32_t idx; + int32_t skip_idx; + enum crm_slot_status status; + int32_t recover; + int64_t req_id; + int32_t sync_mode; + int32_t additional_timeout; + int32_t recovery_counter; + int32_t num_sync_links; + int32_t sync_link_hdls[MAXIMUM_LINKS_PER_SESSION - 1]; + uint32_t bubble_times; + uint64_t frame_sync_shift; + int32_t mismatched_frame_mode; + bool internal_recovered; + bool skip_set; +}; + +/** + * struct cam_req_mgr_req_queue + * @num_slots : max num of input queue slots + * @slot : request slot holding incoming request id and bubble info. + * @rd_idx : indicates slot index currently in process. + * @wr_idx : indicates slot index to hold new upcoming req. + * @last_applied_idx : indicates slot index last applied successfully. + */ +struct cam_req_mgr_req_queue { + int32_t num_slots; + struct cam_req_mgr_slot slot[MAX_REQ_SLOTS]; + int32_t rd_idx; + int32_t wr_idx; + int32_t last_applied_idx; +}; + +/** + * struct cam_req_mgr_req_state_monitor + * @req_state : operation type + * @req_id : request id + * @dev_hdl : to track which device is sending request + * @frame_id : frame id + * @time_stamp : the time stamp of the state + * @name : device_name + */ +struct cam_req_mgr_state_monitor { + enum cam_req_mgr_req_state req_state; + int64_t req_id; + int32_t dev_hdl; + int64_t frame_id; + struct timespec64 time_stamp; + char name[256]; +}; + +/** + * struct cam_req_mgr_req_data + * @in_q : Poiner to Input request queue + * @l_tbl : unique pd request tables. + * @num_tbl : how many unique pd value devices are present + * @apply_data : Holds information about request id for a request + * @prev_apply_data : Holds information about request id for a previous + * applied request + * @state_monitor : used to track the request state in CRM + * @next_state_idx : indicates index to hold new request state + * @lock : mutex lock protecting request data ops. + * @monitor_slock : spin lock protecting state mointor variables + * @reset_link_spin_lock : spin lock protecting link data ops when reset link + */ +struct cam_req_mgr_req_data { + struct cam_req_mgr_req_queue *in_q; + struct cam_req_mgr_req_tbl *l_tbl; + int32_t num_tbl; + struct cam_req_mgr_apply apply_data[CAM_PIPELINE_DELAY_MAX]; + struct cam_req_mgr_apply prev_apply_data[CAM_PIPELINE_DELAY_MAX]; + struct cam_req_mgr_state_monitor state_monitor[ + MAX_REQ_STATE_MONITOR_NUM]; + int32_t next_state_idx; + struct mutex lock; + spinlock_t monitor_slock; + spinlock_t reset_link_spin_lock; +}; + +/** + * struct cam_req_mgr_connected_device + * - Device Properties + * @dev_hdl : device handle + * @dev_bit : unique bit assigned to device in link + * - Device characteristics + * @pd_tbl : tracks latest available req id at this device + * @dev_info : holds dev characteristics such as pipeline delay, dev name + * @ops : holds func pointer to call methods on this device + * @parent : pvt data - like link which this dev hdl belongs to + */ +struct cam_req_mgr_connected_device { + int32_t dev_hdl; + int64_t dev_bit; + struct cam_req_mgr_req_tbl *pd_tbl; + struct cam_req_mgr_device_info dev_info; + struct cam_req_mgr_kmd_ops *ops; + void *parent; +}; + +/** + * struct cam_req_mgr_core_link + * - Link Properties + * @link_hdl : Link identifier + * @num_devs : num of connected devices to this link + * @max_delay : Max of pipeline delay of all connected devs + * @min_delay : Min of pipeline delay of all connected devs + * @max_mswitch_delay : Max of modeswitch delay of all connected devs + * @min_mswitch_delay : Min of modeswitch delay of all connected devs + * @workq : Pointer to handle workq related jobs + * @pd_mask : each set bit indicates the device with pd equal to + * bit position is available. + * - List of connected devices + * @l_dev : List of connected devices to this link + * - Request handling data struct + * @req : req data holder. + * - Timer + * @watchdog : watchdog timer to recover from sof freeze + * - Link private data + * @workq_comp : conditional variable to block user thread for workq + * to finish schedule request processing + * @state : link state machine + * @parent : pvt data - link's parent is session + * @lock : mutex lock to guard link data operations + * @link_state_spin_lock : spin lock to protect link state variable + * @sync_link : array of pointer to the sync link for synchronization + * @num_sync_links : num of links sync associated with this link + * @sync_link_sof_skip : flag determines if a pkt is not available for a given + * frame in a particular link skip corresponding + * frame in sync link as well. + * @open_req_cnt : Counter to keep track of open requests that are yet + * to be serviced in the kernel. + * @last_flush_id : Last request to flush + * @is_used : 1 if link is in use else 0 + * @is_master : Based on pd among links, the link with the highest pd + * is assigned as master + * @initial_skip : Flag to determine if slave has started streaming in + * master-slave sync + * @in_msync_mode : Flag to determine if a link is in master-slave mode + * @initial_sync_req : The initial req which is required to sync with the + * other link + * @retry_cnt : Counter that tracks number of attempts to apply + * the same req + * @is_shutdown : Flag to indicate if link needs to be disconnected + * as part of shutdown. + * @sof_timestamp_value : SOF timestamp value + * @prev_sof_timestamp : Previous SOF timestamp value + * @dual_trigger : Links needs to wait for two triggers prior to + * applying the settings + * @trigger_cnt : trigger count value per device initiating the trigger + * @eof_event_cnt : Atomic variable to track the number of EOF requests + * @skip_init_frame : skip initial frames crm_wd_timer validation in the + * case of long exposure use case + * @last_sof_trigger_jiffies : Record the jiffies of last sof trigger jiffies + * @wq_congestion : Indicates if WQ congestion is detected or not + * @try_for_internal_recovery : If the link stalls try for RT internal recovery + * @properties_mask : Indicates if current link enables some special properties + * @cont_empty_slots : Continuous empty slots + * @resume_sync_dev_mask : Device mask for all devices seeking sync in resume sequence + * @resume_sync_curr_mask : Current device mask of devices notifying they are ready for resume + * @last_applied_done_timestamp : Last applied done timestamp value + * @exp_time_for_resume : Exposure time in ms, to be used for WD timer post resume + */ +struct cam_req_mgr_core_link { + int32_t link_hdl; + int32_t num_devs; + enum cam_pipeline_delay max_delay; + enum cam_pipeline_delay min_delay; + enum cam_modeswitch_delay max_mswitch_delay; + enum cam_modeswitch_delay min_mswitch_delay; + struct cam_req_mgr_core_workq *workq; + int32_t pd_mask; + struct cam_req_mgr_connected_device *l_dev; + struct cam_req_mgr_req_data req; + struct cam_req_mgr_timer *watchdog; + struct completion workq_comp; + enum cam_req_mgr_link_state state; + void *parent; + struct mutex lock; + spinlock_t link_state_spin_lock; + struct cam_req_mgr_core_link + *sync_link[MAXIMUM_LINKS_PER_SESSION - 1]; + int32_t num_sync_links; + bool sync_link_sof_skip; + uint32_t open_req_cnt; + int64_t last_flush_id; + atomic_t is_used; + bool is_master; + bool initial_skip; + bool in_msync_mode; + int64_t initial_sync_req; + uint32_t retry_cnt; + bool is_shutdown; + uint64_t sof_timestamp; + uint64_t prev_sof_timestamp; + bool dual_trigger; + uint32_t trigger_cnt[CAM_REQ_MGR_MAX_TRIGGERS] + [CAM_TRIGGER_MAX_POINTS + 1]; + atomic_t eof_event_cnt; + bool skip_init_frame; + uint64_t last_sof_trigger_jiffies; + bool wq_congestion; + bool try_for_internal_recovery; + bool is_sending_req; + uint32_t properties_mask; + uint32_t cont_empty_slots; + uint32_t resume_sync_dev_mask; + uint32_t resume_sync_curr_mask; + uint64_t last_applied_done_timestamp; + uint32_t exp_time_for_resume; +}; + +/** + * struct cam_req_mgr_core_session + * - Session Properties + * @session_hdl : session identifier + * @num_links : num of active links for current session + * - Links of this session + * @links : pointer to array of links within session + * - Session private data + * @entry : pvt data - entry in the list of sessions + * @lock : pvt data - spin lock to guard session data + * - Debug data + * @force_err_recovery : For debugging, we can force bubble recovery + * to be always ON or always OFF using debugfs. + * @sync_mode : Sync mode for this session links + */ +struct cam_req_mgr_core_session { + int32_t session_hdl; + uint32_t num_links; + struct cam_req_mgr_core_link *links[MAXIMUM_LINKS_PER_SESSION]; + struct list_head entry; + struct mutex lock; + int32_t force_err_recovery; + int32_t sync_mode; +}; + +/** + * struct cam_req_mgr_core_device + * - Core camera request manager data struct + * @session_head : list head holding sessions + * @crm_lock : mutex lock to protect session creation & destruction + * @recovery_on_apply_fail : Recovery on apply failure using debugfs. + * @disable_sensor_standby : Disable synced resume feature including + * sensor standby8 + * @simulate_skip_frame : Simulate a skip if set + */ +struct cam_req_mgr_core_device { + struct list_head session_head; + struct mutex crm_lock; + bool recovery_on_apply_fail; + bool disable_sensor_standby; + bool simulate_skip_frame; +}; + +/** + * struct cam_req_mgr_req_tbl + * @id : table indetifier + * @pd : pipeline delay of table + * @dev_count : num of devices having same pipeline delay + * @dev_mask : mask to track which devices are linked + * @skip_traverse : to indicate how many traverses need to be dropped + * by this table especially in the beginning or bubble recovery + * @pd_delta : differnce between this table's pipeline delay and next + * @num_slots : number of request slots present in the table + * @slot : array of slots tracking requests availability at devices + */ +struct cam_req_mgr_req_tbl_mini_dump { + int32_t id; + int32_t pd; + int32_t dev_count; + int32_t dev_mask; + int32_t skip_traverse; + int32_t pd_delta; + int32_t num_slots; + struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS]; +}; + +/** + * struct cam_req_mgr_req_data_mini_dump + * @apply_data : Holds information about request id for a request + * @prev_apply_data : Holds information about request id for a previous + * applied request + * @in_q : Poiner to Input request queue + * @l_tbl : unique pd request tables. + * @num_tbl : how many unique pd value devices are present + */ +struct cam_req_mgr_req_data_mini_dump { + struct cam_req_mgr_apply apply_data[CAM_PIPELINE_DELAY_MAX]; + struct cam_req_mgr_apply prev_apply_data[CAM_PIPELINE_DELAY_MAX]; + struct cam_req_mgr_req_queue in_q; + struct cam_req_mgr_req_tbl_mini_dump l_tbl[4]; + int32_t num_tbl; +}; + +/** + * struct cam_req_mgr_core_link_mini_dump + * @workq : Work q information + * @req : req data holder. + * @initial_sync_req : The initial req which is required to sync with the + * @prev_sof_timestamp : Previous SOF timestamp value + * @last_flush_id : Last request to flush + * @is_used : 1 if link is in use else 0 + * @eof_event_cnt : Atomic variable to track the number of EOF requests + * @pd_mask : each set bit indicates the device with pd equal to + * @num_sync_links : num of links sync associated with this link + * @open_req_cnt : Counter to keep track of open requests that are yet + * to be serviced in the kernel. + * @link_hdl : Link identifier + * @num_devs : num of connected devices to this link + * @max_delay : Max of pipeline delay of all connected devs + * bit position is available. + * @state : link state machine + * @dual_trigger : Dual tirgger flag + * @is_shutdown : Is shutting down + * @skip_init_frame : skip initial frames crm_wd_timer validation in the + * @is_master : Based on pd among links, the link with the highest pd + * is assigned as master + * @initial_skip : Flag to determine if slave has started streaming in + * master-slave sync + * @in_m_sync_mode : M-sync mode flag + * @sync_link_sof_skip : flag determines if a pkt is not available + * @wq_congestion : Indicates if WQ congestion is detected or not + */ +struct cam_req_mgr_core_link_mini_dump { + struct cam_req_mgr_core_workq_mini_dump workq; + struct cam_req_mgr_req_data_mini_dump req; + int64_t initial_sync_req; + uint32_t last_flush_id; + uint32_t is_used; + uint32_t retry_cnt; + uint32_t eof_event_cnt; + int32_t pd_mask; + int32_t num_sync_links; + int32_t open_req_cnt; + int32_t link_hdl; + int32_t num_devs; + enum cam_pipeline_delay max_delay; + enum cam_req_mgr_link_state state; + bool dual_trigger; + bool is_shutdown; + bool skip_init_frame; + bool is_master; + bool initial_skip; + bool in_msync_mode; + bool sync_link_sof_skip; + bool wq_congestion; +}; + +/** + * struct cam_req_mgr_core_sched_req + * + * - Common members + * @session_hdl : Input param - Identifier for CSL session + * @ink_hdl : Input Param -Identifier for link including itself. + * @bubble_enable : Input Param - Cam req mgr will do bubble recovery if this + * flag is set. + * @sync_mode : Type of Sync mode for this request + * @additional_timeout : Additional timeout value (in ms) associated with + * this request. This value needs to be 0 in cases where long exposure is + * not configured for the sensor.The max timeout that will be supported + * is 50000 ms + * @req_id : Input Param - Request Id from which all requests will be flushed + * + * - Support sched_request + * @reserved : Reserved + * + * - Support sched_request_v2/sched_request_v3 + * @version : Version number + * @num_links : Input Param - Num of links for sync + * @num_valid_params : Number of valid params + * @param_mask : mask to indicate what the parameters are + * @params : pointer, point to parameters passed from user space + * @link_hdls : pointer, point to Input Param - Array of link handles to be for sync + */ +struct cam_req_mgr_core_sched_req { + int32_t session_hdl; + int32_t link_hdl; + int32_t bubble_enable; + int32_t sync_mode; + int32_t additional_timeout; + int64_t req_id; + int32_t reserved; + int32_t version; + int32_t num_links; + int32_t num_valid_params; + int32_t param_mask; + int32_t *params; + int32_t *link_hdls; +}; + +/** + * struct cam_req_mgr_core_mini_dump + * @link : Array of dumped links + * @num_link : Number of links dumped + */ +struct cam_req_mgr_core_mini_dump { + struct cam_req_mgr_core_link_mini_dump *link[MAXIMUM_LINKS_PER_SESSION]; + uint32_t num_link; +}; + +/** + * cam_req_mgr_create_session() + * @brief : creates session + * @ses_info : output param for session handle + * + * called as part of session creation. + */ +int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info); + +/** + * cam_req_mgr_destroy_session() + * @brief : destroy session + * @ses_info : session handle info, input param + * @is_shutdown: To indicate if devices on link need to be disconnected. + * + * Called as part of session destroy + * return success/failure + */ +int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info, + bool is_shutdown); + +/** + * cam_req_mgr_link() + * @brief : creates a link for a session + * @link_info : handle and session info to create a link + * + * link is formed in a session for multiple devices. it creates + * a unique link handle for the link and is specific to a + * session. Returns link handle + */ +int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info); +int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info); + +/** + * cam_req_mgr_unlink() + * @brief : destroy a link in a session + * @unlink_info : session and link handle info + * + * link is destroyed in a session + */ +int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info); + +/** + * cam_req_mgr_schedule_request() + * @brief: Request is scheduled + * @sched_req: request id, session and link id info, bubble recovery info + */ +int cam_req_mgr_schedule_request(struct cam_req_mgr_sched_request *sched_req); + +/** + * cam_req_mgr_schedule_request_v2() + * @brief: Request is scheduled + * @sched_req: request id, session, link id info, bubble recovery info and sync info + */ +int cam_req_mgr_schedule_request_v2(struct cam_req_mgr_sched_request_v2 *sched_req); + +/** + * cam_req_mgr_schedule_request_v3() + * @brief: Request is scheduled + * @sched_req: request id, session, link id info, bubble recovery info and sync info + */ +int cam_req_mgr_schedule_request_v3(struct cam_req_mgr_sched_request_v3 *sched_req); + +/** + * cam_req_mgr_sync_config() + * @brief: sync for links in a session + * @sync_info: session, links info and master link info + */ +int cam_req_mgr_sync_config(struct cam_req_mgr_sync_mode *sync_info); + +/** + * cam_req_mgr_flush_requests() + * @brief: flush all requests + * @flush_info: requests related to link and session + */ +int cam_req_mgr_flush_requests(struct cam_req_mgr_flush_info *flush_info); + +/** + * cam_req_mgr_core_device_init() + * @brief: initialize crm core + */ +int cam_req_mgr_core_device_init(void); + +/** + * cam_req_mgr_core_device_deinit() + * @brief: cleanp crm core + */ +int cam_req_mgr_core_device_deinit(void); + +/** + * cam_req_mgr_handle_core_shutdown() + * @brief: Handles camera close + */ +void cam_req_mgr_handle_core_shutdown(void); + +/** + * cam_req_mgr_link_control() + * @brief: Handles link control operations + * @control: Link control command + */ +int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control); + +/** + * cam_req_mgr_dump_request() + * @brief: Dumps the request information + * @dump_req: Dump request + */ +int cam_req_mgr_dump_request(struct cam_dump_req_cmd *dump_req); + +/** + * cam_req_mgr_link_properties() + * @brief: Handles link properties + * @properties: Link properties + */ +int cam_req_mgr_link_properties(struct cam_req_mgr_link_properties *properties); + +/** + * cam_req_mgr_dump_connected_devices() + * @brief: Dump all connected devices on the link + * @link_hdl: Link Handle + */ +void cam_req_mgr_dump_linked_devices_on_err(int32_t link_hdl); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core_defs.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core_defs.h new file mode 100644 index 0000000000..fbaa7de494 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_core_defs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ +#ifndef _CAM_REQ_MGR_CORE_DEFS_H_ +#define _CAM_REQ_MGR_CORE_DEFS_H_ + +#define CRM_TRACE_ENABLE 0 +#define CRM_DEBUG_MUTEX 0 + +#define SET_SUCCESS_BIT(ret, pd) (ret |= (1 << (pd))) + +#define SET_FAILURE_BIT(ret, pd) (ret &= (~(1 << (pd)))) + +#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.c new file mode 100644 index 0000000000..50a671098c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_req_mgr_debug.h" +#include "cam_mem_mgr_api.h" + +#define MAX_SESS_INFO_LINE_BUFF_LEN 256 +#define MAX_RAW_BUFF_LEN 8192 +#define MAX_LINE_BUFF_LEN 100 + +static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN]; +static int cam_debug_mgr_delay_detect; +static char boot_stats_info[MAX_RAW_BUFF_LEN]; +static unsigned long cam_driver_bind_latency_in_usec; +static LIST_HEAD(cam_bind_latency_list); + +static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val) +{ + struct cam_req_mgr_core_device *core_dev = data; + struct cam_req_mgr_core_session *session; + int rc = 0; + + mutex_lock(&core_dev->crm_lock); + + if (!list_empty(&core_dev->session_head)) { + list_for_each_entry(session, + &core_dev->session_head, entry) { + session->force_err_recovery = val; + } + } + + mutex_unlock(&core_dev->crm_lock); + + return rc; +} + +static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val) +{ + struct cam_req_mgr_core_device *core_dev = data; + struct cam_req_mgr_core_session *session; + + mutex_lock(&core_dev->crm_lock); + + if (!list_empty(&core_dev->session_head)) { + session = list_first_entry(&core_dev->session_head, + struct cam_req_mgr_core_session, + entry); + *val = session->force_err_recovery; + } + mutex_unlock(&core_dev->crm_lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery, + cam_req_mgr_debug_set_bubble_recovery, "%lld\n"); + +static int session_info_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t session_info_read(struct file *t_file, char *t_char, + size_t t_size_t, loff_t *t_loff_t) +{ + int i; + char *out_buffer = sess_info_buffer; + char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0}; + struct cam_req_mgr_core_device *core_dev = + (struct cam_req_mgr_core_device *) t_file->private_data; + struct cam_req_mgr_core_session *session; + + memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN); + + mutex_lock(&core_dev->crm_lock); + + if (!list_empty(&core_dev->session_head)) { + list_for_each_entry(session, + &core_dev->session_head, entry) { + snprintf(line_buffer, sizeof(line_buffer), + "session_hdl = %x \t" + "num_links = %d\n", + session->session_hdl, session->num_links); + strlcat(out_buffer, line_buffer, + sizeof(sess_info_buffer)); + for (i = 0; i < session->num_links; i++) { + snprintf(line_buffer, sizeof(line_buffer), + "link_hdl[%d] = 0x%x, num_devs connected = %d\n", + i, session->links[i]->link_hdl, + session->links[i]->num_devs); + strlcat(out_buffer, line_buffer, + sizeof(sess_info_buffer)); + } + } + } + + mutex_unlock(&core_dev->crm_lock); + + return simple_read_from_buffer(t_char, t_size_t, + t_loff_t, out_buffer, strlen(out_buffer)); +} + +static ssize_t session_info_write(struct file *t_file, + const char *t_char, size_t t_size_t, loff_t *t_loff_t) +{ + memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN); + + return 0; +} + +static const struct file_operations session_info = { + .open = session_info_open, + .read = session_info_read, + .write = session_info_write, +}; + +static ssize_t cam_boot_info_read(struct file *t_file, char *t_char, + size_t t_size_t, loff_t *t_loff_t) +{ + char *out_buffer = boot_stats_info; + struct camera_submodule_bind_time_node *node; + int write_len = 0; + + memset(out_buffer, 0, MAX_RAW_BUFF_LEN); + + write_len = scnprintf(out_buffer, MAX_RAW_BUFF_LEN, + "\nOverall Camera Driver Bind Latency: %lu usec\n\n" + "---Individual Driver Bind Latency Break-up---\n", + cam_driver_bind_latency_in_usec); + if (!list_empty(&cam_bind_latency_list)) { + list_for_each_entry(node, + &cam_bind_latency_list, list) { + write_len += scnprintf((out_buffer + write_len), + MAX_RAW_BUFF_LEN - write_len, + "driver_instance = %s \t" + "bind latency = %lu usec\n", + node->name, node->bind_time_usec); + } + } + + return simple_read_from_buffer(t_char, t_size_t, + t_loff_t, out_buffer, strlen(out_buffer)); +} + +static const struct file_operations cam_boot_debug_info = { + .read = cam_boot_info_read, +}; + +static struct dentry *debugfs_root; +int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("req_mgr", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_MEM,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + debugfs_root = dbgfileptr; + + debugfs_create_file("sessions_info", 0644, debugfs_root, + core_dev, &session_info); + debugfs_create_file("bubble_recovery", 0644, + debugfs_root, core_dev, &bubble_recovery); + debugfs_create_bool("recovery_on_apply_fail", 0644, + debugfs_root, &core_dev->recovery_on_apply_fail); + debugfs_create_bool("disable_sensor_standby", 0644, + debugfs_root, &core_dev->disable_sensor_standby); + debugfs_create_u32("delay_detect_count", 0644, debugfs_root, + &cam_debug_mgr_delay_detect); + debugfs_create_file("cam_print_boot_stats", 0644, debugfs_root, + NULL, &cam_boot_debug_info); + debugfs_create_bool("simulate_skip", 0644, + debugfs_root, &core_dev->simulate_skip_frame); +end: + return rc; +} + +int cam_req_mgr_debug_unregister(void) +{ + debugfs_root = NULL; + return 0; +} + +void cam_req_mgr_debug_delay_detect(void) +{ + cam_debug_mgr_delay_detect += 1; +} + +void cam_req_mgr_debug_record_bind_time(unsigned long time_in_usec) +{ + cam_driver_bind_latency_in_usec = time_in_usec; +} + +void cam_req_mgr_debug_bind_latency_cleanup(void) +{ + struct camera_submodule_bind_time_node *node, *tmp; + + list_for_each_entry_safe(node, tmp, &cam_bind_latency_list, list) { + list_del(&node->list); + kfree(node->name); + CAM_MEM_FREE(node); + } +} + +void cam_req_mgr_debug_record_bind_latency(const char *driver_name, unsigned long time_in_usec) +{ + struct camera_submodule_bind_time_node *new_node = + CAM_MEM_ZALLOC(sizeof(struct camera_submodule_bind_time_node), GFP_KERNEL); + + if (!new_node) { + CAM_WARN(CAM_REQ, "%s: %u usec: Failed to allocate Bind Time node", + driver_name, time_in_usec); + return; + } + CAM_DBG(CAM_REQ, "%s: bind latency: %u", driver_name, time_in_usec); + new_node->name = kstrdup(driver_name, GFP_KERNEL); + if (!new_node->name) { + CAM_WARN(CAM_REQ, "%s: %u usec: Failed to create driver_name", + driver_name, time_in_usec); + CAM_MEM_FREE(new_node); + return; + } + new_node->bind_time_usec = time_in_usec; + list_add_tail(&new_node->list, &cam_bind_latency_list); +} + diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.h new file mode 100644 index 0000000000..9e8c0f5df5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_debug.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_DEBUG_H_ +#define _CAM_REQ_MGR_DEBUG_H_ + +#include +#include "cam_req_mgr_core.h" +#include "cam_debug_util.h" + +struct camera_submodule_bind_time_node { + char *name; + long bind_time_usec; + struct list_head list; +}; + +int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev); +int cam_req_mgr_debug_unregister(void); + +/* cam_req_mgr_debug_delay_detect() + * @brief : increment debug_fs varaible by 1 whenever delay occurred. + */ +void cam_req_mgr_debug_delay_detect(void); +void cam_req_mgr_debug_record_bind_time(unsigned long time_in_usec); +void cam_req_mgr_debug_record_bind_latency(const char *driver_name, unsigned long time_in_usec); +void cam_req_mgr_debug_bind_latency_cleanup(void); +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.c new file mode 100644 index 0000000000..1e9bc7ca3d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.c @@ -0,0 +1,1303 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "cam_req_mgr_dev.h" +#include "cam_req_mgr_util.h" +#include "cam_req_mgr_core.h" +#include "cam_subdev.h" +#include "cam_mem_mgr.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "cam_cpas_hw.h" +#include "cam_compat.h" +#include "camera_main.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_debug.h" + +#define CAM_REQ_MGR_EVENT_MAX 30 +#define CAM_I3C_MASTER_COMPAT "qcom,geni-i3c" + +static struct cam_req_mgr_device g_dev; +struct kmem_cache *g_cam_req_mgr_timer_cachep; +static struct list_head cam_req_mgr_ordered_sd_list; + +DECLARE_RWSEM(rwsem_lock); + +static struct device_attribute camera_debug_sysfs_attr = + __ATTR(debug_node, 0600, NULL, cam_debug_sysfs_node_store); + +static const struct of_device_id cam_sensor_module_dt_match[] = { + {.compatible = "qcom,cam-sensor"}, + { .compatible = "qcom,eeprom" }, + {.compatible = "qcom,actuator"}, + { .compatible = "qcom,ois" }, + {.compatible = "qcom,camera-flash", .data = NULL}, + {} +}; + +static int cam_media_device_setup(struct device *dev) +{ + int rc; + + g_dev.v4l2_dev->mdev = CAM_MEM_ZALLOC(sizeof(*g_dev.v4l2_dev->mdev), + GFP_KERNEL); + if (!g_dev.v4l2_dev->mdev) { + rc = -ENOMEM; + goto mdev_fail; + } + + media_device_init(g_dev.v4l2_dev->mdev); + g_dev.v4l2_dev->mdev->dev = dev; + strscpy(g_dev.v4l2_dev->mdev->model, CAM_REQ_MGR_VNODE_NAME, + sizeof(g_dev.v4l2_dev->mdev->model)); + + rc = media_device_register(g_dev.v4l2_dev->mdev); + if (rc) + goto media_fail; + + return rc; + +media_fail: + CAM_MEM_FREE(g_dev.v4l2_dev->mdev); + g_dev.v4l2_dev->mdev = NULL; +mdev_fail: + return rc; +} + +static void cam_media_device_cleanup(void) +{ + media_device_unregister(g_dev.v4l2_dev->mdev); + media_device_cleanup(g_dev.v4l2_dev->mdev); + CAM_MEM_FREE(g_dev.v4l2_dev->mdev); + g_dev.v4l2_dev->mdev = NULL; +} + +static int cam_v4l2_device_setup(struct device *dev) +{ + int rc; + + g_dev.v4l2_dev = CAM_MEM_ZALLOC(sizeof(*g_dev.v4l2_dev), + GFP_KERNEL); + if (!g_dev.v4l2_dev) + return -ENOMEM; + + rc = v4l2_device_register(dev, g_dev.v4l2_dev); + if (rc) + goto reg_fail; + + return rc; + +reg_fail: + CAM_MEM_FREE(g_dev.v4l2_dev); + g_dev.v4l2_dev = NULL; + return rc; +} + +static void cam_v4l2_device_cleanup(void) +{ + v4l2_device_unregister(g_dev.v4l2_dev); + CAM_MEM_FREE(g_dev.v4l2_dev); + g_dev.v4l2_dev = NULL; +} + +void cam_record_bind_latency(const char *driver_name, unsigned long time_in_usec) +{ + cam_req_mgr_debug_record_bind_latency(driver_name, time_in_usec); +} +EXPORT_SYMBOL(cam_record_bind_latency); + +void cam_req_mgr_rwsem_read_op(enum cam_subdev_rwsem lock) +{ + if (lock == CAM_SUBDEV_LOCK) + down_read(&rwsem_lock); + else if (lock == CAM_SUBDEV_UNLOCK) + up_read(&rwsem_lock); +} + +static void cam_req_mgr_rwsem_write_op(enum cam_subdev_rwsem lock) +{ + if (lock == CAM_SUBDEV_LOCK) + down_write(&rwsem_lock); + else if (lock == CAM_SUBDEV_UNLOCK) + up_write(&rwsem_lock); +} + +static int cam_req_mgr_open(struct file *filep) +{ + int rc; + + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_LOCK); + + mutex_lock(&g_dev.cam_lock); + if (g_dev.open_cnt >= 1) { + rc = -EALREADY; + goto end; + } + + rc = v4l2_fh_open(filep); + if (rc) { + CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc); + goto end; + } + + spin_lock_bh(&g_dev.cam_eventq_lock); + g_dev.cam_eventq = filep->private_data; + spin_unlock_bh(&g_dev.cam_eventq_lock); + + g_dev.open_cnt++; + rc = cam_mem_mgr_init(); + if (rc) { + g_dev.open_cnt--; + CAM_ERR(CAM_CRM, "mem mgr init failed"); + goto mem_mgr_init_fail; + } + + mutex_unlock(&g_dev.cam_lock); + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_UNLOCK); + return rc; + +mem_mgr_init_fail: + v4l2_fh_release(filep); +end: + mutex_unlock(&g_dev.cam_lock); + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_UNLOCK); + return rc; +} + +static __poll_t cam_req_mgr_poll(struct file *f, + struct poll_table_struct *pll_table) +{ + __poll_t masks = 0; + struct v4l2_fh *eventq = f->private_data; + + if (!eventq) { + CAM_ERR(CAM_CRM, "v4l2_fh_poll with unexpected input eventq"); + return masks; + } + + poll_wait(f, &eventq->wait, pll_table); + if (v4l2_event_pending(eventq)) + masks |= POLLPRI; + + return masks; +} + +static int cam_req_mgr_close(struct file *filep) +{ + struct v4l2_subdev *sd; + struct cam_subdev *csd; + struct v4l2_fh *vfh = filep->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_LOCK); + + mutex_lock(&g_dev.cam_lock); + + CAM_WARN(CAM_CRM, + "release invoked associated userspace process has died, open_cnt: %d", + g_dev.open_cnt); + + if (g_dev.open_cnt <= 0) { + mutex_unlock(&g_dev.cam_lock); + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_UNLOCK); + return -EINVAL; + } + + cam_req_mgr_handle_core_shutdown(); + g_dev.shutdown_state = true; + + list_for_each_entry(csd, &cam_req_mgr_ordered_sd_list, list) { + sd = &csd->sd; + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) + continue; + if (sd->internal_ops) { + CAM_DBG(CAM_CRM, "Invoke subdev close for device %s", + sd->name); + v4l2_subdev_call(sd, core, ioctl, + CAM_SD_SHUTDOWN, subdev_fh); + } + } + + g_dev.open_cnt--; + g_dev.shutdown_state = false; + v4l2_fh_release(filep); + + spin_lock_bh(&g_dev.cam_eventq_lock); + g_dev.cam_eventq = NULL; + spin_unlock_bh(&g_dev.cam_eventq_lock); + + cam_req_mgr_util_free_hdls(); + cam_mem_mgr_deinit(); + mutex_unlock(&g_dev.cam_lock); + + cam_req_mgr_rwsem_write_op(CAM_SUBDEV_UNLOCK); + + return 0; +} + +static struct v4l2_file_operations g_cam_fops = { + .owner = THIS_MODULE, + .open = cam_req_mgr_open, + .poll = cam_req_mgr_poll, + .release = cam_req_mgr_close, + .unlocked_ioctl = video_ioctl2, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = video_ioctl2, +#endif +}; + +static void cam_v4l2_event_queue_notify_error(const struct v4l2_event *old, + struct v4l2_event *new) +{ + struct cam_req_mgr_message *ev_header; + + ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR((*old), + struct cam_req_mgr_message); + + switch (old->id) { + case V4L_EVENT_CAM_REQ_MGR_SOF: + case V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS: + case V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS: + if (ev_header->u.frame_msg.request_id) + CAM_ERR(CAM_CRM, + "Failed to notify %s Sess %X FrameId %lld FrameMeta %d ReqId %lld link %X", + ((old->id == V4L_EVENT_CAM_REQ_MGR_SOF) ? + "SOF_TS" : ((old->id == V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS) ? + "BOOT_TS" : "UNIFIED_TS")), + ev_header->session_hdl, + ev_header->u.frame_msg.frame_id, + ev_header->u.frame_msg.frame_id_meta, + ev_header->u.frame_msg.request_id, + ev_header->u.frame_msg.link_hdl); + else + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_CRM, 5, 1, + "Failed to notify %s Sess %X FrameId %lld FrameMeta %d ReqId %lld link %X", + ((old->id == V4L_EVENT_CAM_REQ_MGR_SOF) ? + "SOF_TS" : ((old->id == V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS) ? + "BOOT_TS" : "UNIFIED_TS")), + ev_header->session_hdl, + ev_header->u.frame_msg.frame_id, + ev_header->u.frame_msg.frame_id_meta, + ev_header->u.frame_msg.request_id, + ev_header->u.frame_msg.link_hdl); + break; + case V4L_EVENT_CAM_REQ_MGR_ERROR: + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Failed to notify ERROR Sess %X ReqId %d Link %X Type %d ERR_code: %u", + ev_header->session_hdl, + ev_header->u.err_msg.request_id, + ev_header->u.err_msg.link_hdl, + ev_header->u.err_msg.error_type, + ev_header->u.err_msg.error_code); + break; + case V4L_EVENT_CAM_REQ_MGR_NODE_EVENT: + CAM_ERR_RATE_LIMIT(CAM_CRM, + "Failed to notify node event. Sess 0x%x ReqId %d Lnk 0x%x dev_hdl: %d evt_type: %u evt_cause: %u", + ev_header->session_hdl, + ev_header->u.node_msg.request_id, + ev_header->u.node_msg.link_hdl, + ev_header->u.node_msg.device_hdl, + ev_header->u.node_msg.event_type, + ev_header->u.node_msg.event_cause); + break; + default: + CAM_ERR(CAM_CRM, "Failed to notify crm event id %d", + old->id); + } +} + +static struct v4l2_subscribed_event_ops g_cam_v4l2_ops = { + .merge = cam_v4l2_event_queue_notify_error, +}; + +static int cam_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + g_dev.v4l2_sub_ids |= 1 << sub->id; + return v4l2_event_subscribe(fh, sub, CAM_REQ_MGR_EVENT_MAX, + &g_cam_v4l2_ops); +} + +uint32_t cam_req_mgr_get_id_subscribed(void) +{ + return g_dev.v4l2_sub_ids; +} +EXPORT_SYMBOL(cam_req_mgr_get_id_subscribed); + +static int cam_unsubscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + g_dev.v4l2_sub_ids &= ~(1 << sub->id); + return v4l2_event_unsubscribe(fh, sub); +} + +static long cam_private_ioctl(struct file *file, void *fh, + bool valid_prio, unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_control *k_ioctl; + + if ((!arg) || (cmd != VIDIOC_CAM_CONTROL)) + return -EINVAL; + + k_ioctl = (struct cam_control *)arg; + + if (!k_ioctl->handle) + return -EINVAL; + + switch (k_ioctl->op_code) { + case CAM_REQ_MGR_CREATE_SESSION: { + struct cam_req_mgr_session_info ses_info; + + if (k_ioctl->size != sizeof(ses_info)) + return -EINVAL; + + if (copy_from_user(&ses_info, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_session_info))) { + return -EFAULT; + } + + rc = cam_req_mgr_create_session(&ses_info); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &ses_info, + sizeof(struct cam_req_mgr_session_info))) + rc = -EFAULT; + } + break; + + case CAM_REQ_MGR_DESTROY_SESSION: { + struct cam_req_mgr_session_info ses_info; + + if (k_ioctl->size != sizeof(ses_info)) + return -EINVAL; + + if (copy_from_user(&ses_info, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_session_info))) { + return -EFAULT; + } + + rc = cam_req_mgr_destroy_session(&ses_info, false); + } + break; + + case CAM_REQ_MGR_LINK: { + struct cam_req_mgr_ver_info ver_info; + + if (k_ioctl->size != sizeof(ver_info.u.link_info_v1)) + return -EINVAL; + + if (copy_from_user(&ver_info.u.link_info_v1, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_link_info))) { + return -EFAULT; + } + ver_info.version = VERSION_1; + rc = cam_req_mgr_link(&ver_info); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &ver_info.u.link_info_v1, + sizeof(struct cam_req_mgr_link_info))) + rc = -EFAULT; + } + break; + + case CAM_REQ_MGR_LINK_V2: { + struct cam_req_mgr_ver_info ver_info; + + if (k_ioctl->size != sizeof(ver_info.u.link_info_v2)) + return -EINVAL; + + if (copy_from_user(&ver_info.u.link_info_v2, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_link_info_v2))) { + return -EFAULT; + } + + ver_info.version = VERSION_2; + rc = cam_req_mgr_link_v2(&ver_info); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &ver_info.u.link_info_v2, + sizeof(struct cam_req_mgr_link_info_v2))) + rc = -EFAULT; + } + break; + + case CAM_REQ_MGR_UNLINK: { + struct cam_req_mgr_unlink_info unlink_info; + + if (k_ioctl->size != sizeof(unlink_info)) + return -EINVAL; + + if (copy_from_user(&unlink_info, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_unlink_info))) { + return -EFAULT; + } + + rc = cam_req_mgr_unlink(&unlink_info); + } + break; + + case CAM_REQ_MGR_SCHED_REQ: { + struct cam_req_mgr_sched_request sched_req; + + if (k_ioctl->size != sizeof(sched_req)) + return -EINVAL; + + if (copy_from_user(&sched_req, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_sched_request))) { + return -EFAULT; + } + + rc = cam_req_mgr_schedule_request(&sched_req); + } + break; + + case CAM_REQ_MGR_SCHED_REQ_V2: { + struct cam_req_mgr_sched_request_v2 sched_req; + + if (k_ioctl->size != sizeof(sched_req)) + return -EINVAL; + + if (copy_from_user(&sched_req, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_sched_request_v2))) { + return -EFAULT; + } + + rc = cam_req_mgr_schedule_request_v2(&sched_req); + } + break; + + case CAM_REQ_MGR_SCHED_REQ_V3: { + struct cam_req_mgr_sched_request_v3 *sched_req; + struct cam_req_mgr_sched_request_v3 crm_sched_req; + int sched_req_size; + int num_links = 0; + + if (copy_from_user(&crm_sched_req, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_sched_request_v3))) { + return -EFAULT; + } + + num_links = crm_sched_req.num_links; + + if ((num_links > MAXIMUM_LINKS_PER_SESSION) || + (num_links < 0)) + return -EINVAL; + + sched_req_size = sizeof(struct cam_req_mgr_sched_request_v3) + + ((num_links) * sizeof(__signed__ int)); + + if (k_ioctl->size != sched_req_size) + return -EINVAL; + + sched_req = CAM_MEM_ZALLOC(sched_req_size, GFP_KERNEL); + if (!sched_req) { + return -ENOMEM; + } + + if (copy_from_user(sched_req, u64_to_user_ptr(k_ioctl->handle), sched_req_size)) { + CAM_MEM_FREE(sched_req); + sched_req = NULL; + return -EFAULT; + } + + crm_sched_req.num_links = num_links; + + rc = cam_req_mgr_schedule_request_v3(sched_req); + CAM_MEM_FREE(sched_req); + } + break; + + case CAM_REQ_MGR_FLUSH_REQ: { + struct cam_req_mgr_flush_info flush_info; + + if (k_ioctl->size != sizeof(flush_info)) + return -EINVAL; + + if (copy_from_user(&flush_info, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_flush_info))) { + return -EFAULT; + } + + rc = cam_req_mgr_flush_requests(&flush_info); + } + break; + + case CAM_REQ_MGR_SYNC_MODE: { + struct cam_req_mgr_sync_mode sync_info; + + if (k_ioctl->size != sizeof(sync_info)) + return -EINVAL; + + if (copy_from_user(&sync_info, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_sync_mode))) { + return -EFAULT; + } + + rc = cam_req_mgr_sync_config(&sync_info); + } + break; + + case CAM_REQ_MGR_ALLOC_BUF: { + struct cam_mem_mgr_alloc_cmd cmd; + struct cam_mem_mgr_alloc_cmd_v2 cmd_v2 = {0}; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_mgr_alloc_cmd))) { + rc = -EFAULT; + break; + } + + strscpy(cmd_v2.buf_name, "UNKNOWN", CAM_DMA_BUF_NAME_LEN); + memcpy(cmd_v2.mmu_hdls, cmd.mmu_hdls, + sizeof(__s32) * CAM_MEM_MMU_MAX_HANDLE); + cmd_v2.num_hdl = cmd.num_hdl; + cmd_v2.flags = cmd.flags; + cmd_v2.len = cmd.len; + cmd_v2.align = cmd.align; + + rc = cam_mem_mgr_alloc_and_map(&cmd_v2); + memcpy(&cmd.out, &cmd_v2.out, sizeof(cmd.out)); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_mem_mgr_alloc_cmd))) { + rc = -EFAULT; + break; + } + } + break; + case CAM_REQ_MGR_ALLOC_BUF_V2: { + struct cam_mem_mgr_alloc_cmd_v2 cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_mgr_alloc_cmd_v2))) { + rc = -EFAULT; + break; + } + + rc = cam_mem_mgr_alloc_and_map(&cmd); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_mem_mgr_alloc_cmd_v2))) { + rc = -EFAULT; + break; + } + } + break; + case CAM_REQ_MGR_MAP_BUF: { + struct cam_mem_mgr_map_cmd cmd; + struct cam_mem_mgr_map_cmd_v2 cmd_v2 = {0}; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_mgr_map_cmd))) { + rc = -EFAULT; + break; + } + + strscpy(cmd_v2.buf_name, "UNKNOWN", CAM_DMA_BUF_NAME_LEN); + memcpy(cmd_v2.mmu_hdls, cmd.mmu_hdls, + sizeof(__s32) * CAM_MEM_MMU_MAX_HANDLE); + cmd_v2.num_hdl = cmd.num_hdl; + cmd_v2.flags = cmd.flags; + cmd_v2.fd = cmd.fd; + + rc = cam_mem_mgr_map(&cmd_v2); + memcpy(&cmd.out, &cmd_v2.out, sizeof(cmd.out)); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_mem_mgr_map_cmd))) { + rc = -EFAULT; + break; + } + } + break; + case CAM_REQ_MGR_MAP_BUF_V2: { + struct cam_mem_mgr_map_cmd_v2 cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_mgr_map_cmd_v2))) { + rc = -EFAULT; + break; + } + + rc = cam_mem_mgr_map(&cmd); + if (!rc) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_mem_mgr_map_cmd_v2))) { + rc = -EFAULT; + break; + } + } + break; + case CAM_REQ_MGR_RELEASE_BUF: { + struct cam_mem_mgr_release_cmd cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_mgr_release_cmd))) { + rc = -EFAULT; + break; + } + + rc = cam_mem_mgr_release(&cmd); + } + break; + case CAM_REQ_MGR_CACHE_OPS: { + struct cam_mem_cache_ops_cmd cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_cache_ops_cmd))) { + rc = -EFAULT; + break; + } + + rc = cam_mem_mgr_cache_ops(&cmd); + } + break; + case CAM_REQ_MGR_MEM_CPU_ACCESS_OP: { + struct cam_mem_cpu_access_op cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_mem_cpu_access_op))) { + rc = -EFAULT; + break; + } + + rc = cam_mem_mgr_cpu_access_op(&cmd); + if (rc) + rc = -EINVAL; + } + break; + case CAM_REQ_MGR_LINK_CONTROL: { + struct cam_req_mgr_link_control cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_link_control))) { + rc = -EFAULT; + break; + } + + rc = cam_req_mgr_link_control(&cmd); + if (rc) + rc = -EINVAL; + } + break; + case CAM_REQ_MGR_REQUEST_DUMP: { + struct cam_dump_req_cmd cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_dump_req_cmd))) { + rc = -EFAULT; + break; + } + rc = cam_req_mgr_dump_request(&cmd); + if (rc) { + CAM_ERR(CAM_CORE, "dump fail for dev %d req %llu rc %d", + cmd.dev_handle, cmd.issue_req_id, rc); + break; + } + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_dump_req_cmd))) + rc = -EFAULT; + } + break; + case CAM_REQ_MGR_LINK_PROPERTIES: { + struct cam_req_mgr_link_properties cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_link_properties))) { + rc = -EFAULT; + break; + } + + rc = cam_req_mgr_link_properties(&cmd); + } + break; + case CAM_REQ_MGR_QUERY_CAP: { + struct cam_req_mgr_query_cap cmd; + + if (k_ioctl->size != sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, + u64_to_user_ptr(k_ioctl->handle), + sizeof(struct cam_req_mgr_query_cap))) { + rc = -EFAULT; + break; + } + + cmd.feature_mask = 0; + + rc = cam_mem_mgr_check_for_supported_heaps(&cmd.feature_mask); + if (rc) { + CAM_ERR(CAM_CRM, "Failed to retrieve heap capability rc: %d", rc); + break; + } + + if (copy_to_user( + u64_to_user_ptr(k_ioctl->handle), + &cmd, sizeof(struct cam_req_mgr_query_cap))) + rc = -EFAULT; + } + break; + default: + CAM_ERR(CAM_CRM, "Invalid Opcode %d", k_ioctl->op_code); + rc = -ENOIOCTLCMD; + } + + return rc; +} + +static const struct v4l2_ioctl_ops g_cam_ioctl_ops = { + .vidioc_subscribe_event = cam_subscribe_event, + .vidioc_unsubscribe_event = cam_unsubscribe_event, + .vidioc_default = cam_private_ioctl, +}; + +static int cam_video_device_setup(void) +{ + int rc; + + g_dev.video = video_device_alloc(); + if (!g_dev.video) { + rc = -ENOMEM; + goto video_fail; + } + + g_dev.video->v4l2_dev = g_dev.v4l2_dev; + + strscpy(g_dev.video->name, "cam-req-mgr", + sizeof(g_dev.video->name)); + g_dev.video->release = video_device_release_empty; + g_dev.video->fops = &g_cam_fops; + g_dev.video->ioctl_ops = &g_cam_ioctl_ops; + g_dev.video->minor = -1; + g_dev.video->vfl_type = VFL_TYPE_VIDEO; + g_dev.video->device_caps |= V4L2_CAP_VIDEO_CAPTURE; + rc = video_register_device(g_dev.video, VFL_TYPE_VIDEO, -1); + if (rc) { + CAM_ERR(CAM_CRM, + "video device registration failure rc = %d, name = %s, device_caps = %d", + rc, g_dev.video->name, g_dev.video->device_caps); + goto v4l2_fail; + } + + rc = media_entity_pads_init(&g_dev.video->entity, 0, NULL); + if (rc) + goto entity_fail; + + g_dev.video->entity.function = CAM_VNODE_DEVICE_TYPE; + g_dev.video->entity.name = video_device_node_name(g_dev.video); + + return rc; + +entity_fail: + video_unregister_device(g_dev.video); +v4l2_fail: + video_device_release(g_dev.video); + g_dev.video = NULL; +video_fail: + return rc; +} + +int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg, + uint32_t id, + uint32_t type) +{ + struct v4l2_event event; + struct cam_req_mgr_message *ev_header; + + if (!msg) + return -EINVAL; + + /* Print all connected devices if components meet error or specifically pf error */ + if (id == V4L_EVENT_CAM_REQ_MGR_ERROR) + cam_req_mgr_dump_linked_devices_on_err(msg->u.err_msg.link_hdl); + + if (id == V4L_EVENT_CAM_REQ_MGR_PF_ERROR) + cam_req_mgr_dump_linked_devices_on_err(msg->u.pf_err_msg.link_hdl); + + event.id = id; + event.type = type; + ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR(event, + struct cam_req_mgr_message); + memcpy(ev_header, msg, sizeof(struct cam_req_mgr_message)); + v4l2_event_queue(g_dev.video, &event); + + return 0; +} +EXPORT_SYMBOL(cam_req_mgr_notify_message); + +void cam_video_device_cleanup(void) +{ + media_entity_cleanup(&g_dev.video->entity); + video_unregister_device(g_dev.video); + video_device_release(g_dev.video); + g_dev.video = NULL; +} + +void cam_subdev_notify_message(u32 subdev_type, + enum cam_subdev_message_type_t message_type, + void *data) +{ + struct v4l2_subdev *sd = NULL; + struct cam_subdev *csd = NULL; + + list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) { + if (sd->entity.function == subdev_type) { + csd = container_of(sd, struct cam_subdev, sd); + if (csd->msg_cb != NULL) + csd->msg_cb(sd, message_type, data); + } + } +} +EXPORT_SYMBOL(cam_subdev_notify_message); + +bool cam_req_mgr_is_open(void) +{ + bool crm_status = false; + + mutex_lock(&g_dev.cam_lock); + crm_status = g_dev.open_cnt ? true : false; + mutex_unlock(&g_dev.cam_lock); + + return crm_status; +} +EXPORT_SYMBOL(cam_req_mgr_is_open); + +bool cam_req_mgr_is_shutdown(void) +{ + return g_dev.shutdown_state; +} +EXPORT_SYMBOL(cam_req_mgr_is_shutdown); + +int cam_register_subdev(struct cam_subdev *csd) +{ + struct v4l2_subdev *sd; + int rc; + + if (!g_dev.state) { + CAM_DBG(CAM_CRM, "camera root device not ready yet"); + return -EPROBE_DEFER; + } + + if (!csd || !csd->name) { + CAM_ERR(CAM_CRM, "invalid arguments"); + return -EINVAL; + } + + mutex_lock(&g_dev.dev_lock); + + sd = &csd->sd; + v4l2_subdev_init(sd, csd->ops); + sd->internal_ops = csd->internal_ops; + snprintf(sd->name, CAM_SUBDEV_NAME_SIZE, "%s", csd->name); + v4l2_set_subdevdata(sd, csd->token); + + sd->flags = csd->sd_flags; + sd->entity.num_pads = 0; + sd->entity.pads = NULL; + sd->entity.function = csd->ent_function; + + list_add(&csd->list, &cam_req_mgr_ordered_sd_list); + list_sort(NULL, &cam_req_mgr_ordered_sd_list, + cam_req_mgr_ordered_list_cmp); + + rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd); + if (rc) { + CAM_ERR(CAM_CRM, "register subdev failed"); + goto reg_fail; + } + + rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev); + if (rc) { + CAM_ERR(CAM_CRM, "Failed to register subdev node: %s, rc: %d", + sd->name, rc); + goto reg_fail; + } + + if (sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE) { + sd->entity.name = video_device_node_name(sd->devnode); + CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name); + } + + g_dev.count++; + +reg_fail: + mutex_unlock(&g_dev.dev_lock); + return rc; +} +EXPORT_SYMBOL(cam_register_subdev); + +int cam_unregister_subdev(struct cam_subdev *csd) +{ + if (!g_dev.state) { + CAM_ERR(CAM_CRM, "camera root device not ready yet"); + return -ENODEV; + } + + mutex_lock(&g_dev.dev_lock); + v4l2_device_unregister_subdev(&csd->sd); + g_dev.count--; + mutex_unlock(&g_dev.dev_lock); + + return 0; +} +EXPORT_SYMBOL(cam_unregister_subdev); + +static inline void cam_req_mgr_destroy_timer_slab(void) +{ + kmem_cache_destroy(g_cam_req_mgr_timer_cachep); + g_cam_req_mgr_timer_cachep = NULL; +} + +static int cam_req_mgr_component_master_bind(struct device *dev) +{ + int rc = 0; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + + CAM_DBG(CAM_CRM, "Master bind called"); + rc = cam_v4l2_device_setup(dev); + if (rc) + return rc; + + rc = cam_media_device_setup(dev); + if (rc) + goto media_setup_fail; + + rc = cam_video_device_setup(); + if (rc) + goto video_setup_fail; + + g_dev.open_cnt = 0; + g_dev.shutdown_state = false; + mutex_init(&g_dev.cam_lock); + spin_lock_init(&g_dev.cam_eventq_lock); + mutex_init(&g_dev.dev_lock); + + rc = cam_req_mgr_util_init(); + if (rc) { + CAM_ERR(CAM_CRM, "cam req mgr util init is failed"); + goto req_mgr_util_fail; + } + + rc = cam_req_mgr_core_device_init(); + if (rc) { + CAM_ERR(CAM_CRM, "core device setup failed"); + goto req_mgr_core_fail; + } + + INIT_LIST_HEAD(&cam_req_mgr_ordered_sd_list); + + if (g_cam_req_mgr_timer_cachep == NULL) { + g_cam_req_mgr_timer_cachep = KMEM_CACHE(cam_req_mgr_timer, 0x0); + if (!g_cam_req_mgr_timer_cachep) + CAM_ERR(CAM_CRM, + "Failed to create kmem_cache for crm_timer"); + else + CAM_DBG(CAM_CRM, "Name : cam_req_mgr_timer"); + } + + CAM_DBG(CAM_CRM, "All probes done, binding slave components"); + g_dev.state = true; + rc = component_bind_all(dev, NULL); + if (rc) { + CAM_ERR(CAM_CRM, + "Error in binding all components rc: %d, Camera initialization failed!", + rc); + goto req_mgr_device_deinit; + } + + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + /* Record time taken by Overall camera modules bind completion.*/ + cam_req_mgr_debug_record_bind_time(microsec); + + CAM_INFO(CAM_CRM, + "All components bound successfully, Spectra camera driver initialized, Time taken(usec): %lu", + microsec); + + rc = cam_vmrm_populate_io_resource_info(); + if (rc) { + CAM_ERR(CAM_CRM, + "Error in populate io resource in rm rc: %d, Camera initialization failed!", + rc); + goto req_mgr_device_deinit; + } + + rc = cam_vmrm_register_gh_callback(); + if (rc) { + CAM_ERR(CAM_CRM, + "Error in register gh callback in rm rc: %d, Camera initialization failed!", + rc); + goto req_mgr_device_deinit; + } + + rc = sysfs_create_file(&dev->kobj, &camera_debug_sysfs_attr.attr); + if (rc < 0) { + CAM_ERR(CAM_CPAS, + "Failed to create debug attribute, rc=%d\n", rc); + goto sysfs_fail; + } + + return rc; + +sysfs_fail: + sysfs_remove_file(&dev->kobj, &camera_debug_sysfs_attr.attr); +req_mgr_device_deinit: + cam_req_mgr_destroy_timer_slab(); + cam_req_mgr_core_device_deinit(); +req_mgr_core_fail: + cam_req_mgr_util_deinit(); +req_mgr_util_fail: + mutex_destroy(&g_dev.dev_lock); + mutex_destroy(&g_dev.cam_lock); + cam_video_device_cleanup(); +video_setup_fail: + cam_media_device_cleanup(); +media_setup_fail: + cam_v4l2_device_cleanup(); + g_dev.state = false; + return rc; +} + +static void cam_req_mgr_component_master_unbind(struct device *dev) +{ + int rc = 0; + + rc = cam_vmrm_unregister_gh_callback(); + if (rc) { + CAM_ERR(CAM_CRM, + "Error in unregister gh callback in rm rc: %d", rc); + return; + } + + /* Unbinding all slave components first */ + component_unbind_all(dev, NULL); + + /* Now proceed with unbinding master */ + sysfs_remove_file(&dev->kobj, &camera_debug_sysfs_attr.attr); + cam_req_mgr_core_device_deinit(); + cam_req_mgr_util_deinit(); + cam_media_device_cleanup(); + cam_video_device_cleanup(); + cam_v4l2_device_cleanup(); + cam_req_mgr_destroy_timer_slab(); + mutex_destroy(&g_dev.dev_lock); + cam_req_mgr_debug_bind_latency_cleanup(); + g_dev.state = false; +} + +static const struct component_master_ops cam_req_mgr_component_master_ops = { + .bind = cam_req_mgr_component_master_bind, + .unbind = cam_req_mgr_component_master_unbind, +}; + +static int cam_req_mgr_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &cam_req_mgr_component_master_ops); + return 0; +} + +static int cam_req_mgr_probe(struct platform_device *pdev) +{ + int rc = 0, i; + struct component_match *match_list = NULL; + struct device *dev = &pdev->dev; + struct device_node *np = NULL; + uint32_t cam_bypass_driver = 0; + struct device_node *of_node = NULL; +#ifdef CONFIG_ARCH_QTI_VM + uint32_t device_heap_size = 0; + uint32_t session_heap_size = 0; +#endif + + for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) { + while ((np = of_find_compatible_node(np, NULL, + cam_component_i2c_drivers[i]->driver.of_match_table->compatible))) { + if (of_device_is_available(np) && !(of_find_i2c_device_by_node(np))) { + CAM_INFO_RATE_LIMIT(CAM_CRM, + "I2C device: %s not available, deferring probe", + np->full_name); + rc = -EPROBE_DEFER; + goto end; + } + } + } + + np = NULL; + while ((np = of_find_compatible_node(np, NULL, CAM_I3C_MASTER_COMPAT))) { + rc = of_platform_populate(np, cam_sensor_module_dt_match, NULL, NULL); + if (rc) { + CAM_ERR(CAM_CRM, + "Failed to populate child nodes as platform devices for parent: %s, rc=%d", + np->full_name, rc); + goto end; + } + } + + rc = camera_component_match_add_drivers(dev, &match_list); + if (rc) { + CAM_ERR(CAM_CRM, + "Unable to match components, probe failed rc: %d", + rc); + goto end; + } + + /* Supply match_list to master for handing over control */ + rc = component_master_add_with_match(dev, + &cam_req_mgr_component_master_ops, match_list); + if (rc) { + CAM_ERR(CAM_CRM, + "Unable to add master, probe failed rc: %d", + rc); + goto end; + } + + of_node = dev->of_node; + rc = of_property_read_u32(of_node, "cam-bypass-driver", + &cam_bypass_driver); + if (!rc) { + cam_soc_util_set_bypass_drivers(cam_bypass_driver); + } else { + CAM_INFO(CAM_CRM, "bypass driver parameter not found"); + rc = 0; + } + +#ifdef CONFIG_ARCH_QTI_VM + rc = of_property_read_u32(of_node, "device-heap-size", + &device_heap_size); + if (rc) { + CAM_WARN(CAM_CRM, "device heap size parameter not found"); + + /* Setup default devie heap size to 4M */ + device_heap_size = SZ_4M; + rc = 0; + } + + rc = of_property_read_u32(of_node, "session-heap-size", + &session_heap_size); + if (rc) { + CAM_WARN(CAM_CRM, "session heap size parameter not found"); + + /* Setup default devie heap size to 200M */ + session_heap_size = SZ_128M | SZ_64M | SZ_8M; + rc = 0; + } + + cam_mem_mgr_set_svm_heap_sizes(device_heap_size, session_heap_size); +#endif +end: + of_node_put(np); + return rc; +} + +static const struct of_device_id cam_req_mgr_dt_match[] = { + {.compatible = "qcom,cam-req-mgr"}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_req_mgr_dt_match); + +struct platform_driver cam_req_mgr_driver = { + .probe = cam_req_mgr_probe, + .remove = cam_req_mgr_remove, + .driver = { + .name = "cam_req_mgr", + .owner = THIS_MODULE, + .of_match_table = cam_req_mgr_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_req_mgr_init(void) +{ + return platform_driver_register(&cam_req_mgr_driver); +} +EXPORT_SYMBOL(cam_req_mgr_init); + +void cam_req_mgr_exit(void) +{ + platform_driver_unregister(&cam_req_mgr_driver); +} + +MODULE_DESCRIPTION("Camera Request Manager"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.h new file mode 100644 index 0000000000..cd382c15f6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_dev.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_DEV_H_ +#define _CAM_REQ_MGR_DEV_H_ + +#include "media/cam_req_mgr.h" +/** + * struct cam_req_mgr_device - a camera request manager device + * + * @video: pointer to struct video device. + * @v4l2_dev: pointer to struct v4l2 device. + * @count: number of subdevices registered. + * @dev_lock: lock for the subdevice count. + * @state: state of the root device. + * @open_cnt: open count of subdev + * @v4l2_sub_ids: bits representing v4l2 event ids subscribed or not + * @cam_lock: per file handle lock + * @cam_eventq: event queue + * @cam_eventq_lock: lock for event queue + * @shutdown_state: shutdown state + */ +struct cam_req_mgr_device { + struct video_device *video; + struct v4l2_device *v4l2_dev; + int count; + struct mutex dev_lock; + bool state; + int32_t open_cnt; + uint32_t v4l2_sub_ids; + struct mutex cam_lock; + struct v4l2_fh *cam_eventq; + spinlock_t cam_eventq_lock; + bool shutdown_state; +}; + +#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type) \ + (type *)((char *)ev.u.data) + +int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg, + uint32_t id, + uint32_t type); + +/** + * @brief : API to register REQ_MGR to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_req_mgr_init(void); + +/** + * @brief : API to remove REQ_MGR from platform framework. + */ +void cam_req_mgr_exit(void); + +/** + * @brief : API to get V4L2 eventIDs subscribed by UMD. + */ +uint32_t cam_req_mgr_get_id_subscribed(void); + +/** + * @brief : API to record bind latency for each driver. + */ +void cam_record_bind_latency(const char *driver_name, unsigned long time_in_usec); +#endif /* _CAM_REQ_MGR_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_interface.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_interface.h new file mode 100644 index 0000000000..d84177deb5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_interface.h @@ -0,0 +1,538 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_INTERFACE_H +#define _CAM_REQ_MGR_INTERFACE_H + +#include +#include +#include "cam_req_mgr_core_defs.h" +#include "cam_req_mgr_util.h" + +struct cam_req_mgr_trigger_notify; +struct cam_req_mgr_error_notify; +struct cam_req_mgr_add_request; +struct cam_req_mgr_timer_notify; +struct cam_req_mgr_notify_stop; +struct cam_req_mgr_device_info; +struct cam_req_mgr_core_dev_link_setup; +struct cam_req_mgr_apply_request; +struct cam_req_mgr_flush_request; +struct cam_req_mgr_link_evt_data; +struct cam_req_mgr_dump_info; +struct cam_req_mgr_notify_msg; + +/* Request Manager -- camera device driver interface */ +/** + * @brief: camera kernel drivers to cam req mgr communication + * + * @cam_req_mgr_notify_trigger: for device which generates trigger to inform CRM + * @cam_req_mgr_notify_err : device use this to inform about different errors + * @cam_req_mgr_add_req : to info CRM about new rqeuest received from + * userspace + * @cam_req_mgr_notify_timer : start the timer + * @cam_req_mgr_notify_msg : notify the message + */ +typedef int (*cam_req_mgr_notify_trigger)(struct cam_req_mgr_trigger_notify *); +typedef int (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *); +typedef int (*cam_req_mgr_add_req)(struct cam_req_mgr_add_request *); +typedef int (*cam_req_mgr_notify_timer)(struct cam_req_mgr_timer_notify *); +typedef int (*cam_req_mgr_notify_stop)(struct cam_req_mgr_notify_stop *); +typedef int (*cam_req_mgr_notify_msg)(struct cam_req_mgr_notify_msg *); + +/** + * @brief: cam req mgr to camera device drivers + * + * @cam_req_mgr_get_dev_info : to fetch details about device linked + * @cam_req_mgr_link_setup : to establish link with device for a session + * @cam_req_mgr_apply_req : CRM asks device to apply certain request id + * @cam_req_mgr_notify_frame_skip: CRM asks device to apply setting for + * frame skip + * @cam_req_mgr_flush_req : Flush or cancel request + * cam_req_mgr_process_evt : generic events + * @cam_req_mgr_dump_req : dump request + */ +typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *); +typedef int (*cam_req_mgr_link_setup)(struct cam_req_mgr_core_dev_link_setup *); +typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *); +typedef int (*cam_req_mgr_notify_frame_skip)( + struct cam_req_mgr_apply_request *); +typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *); +typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *); +typedef int (*cam_req_mgr_dump_req)(struct cam_req_mgr_dump_info *); + +/** + * @brief : cam_req_mgr_crm_cb - func table + * + * @notify_trigger : payload for trigger indication event + * @notify_err : payload for different error occurred at device + * @add_req : payload to inform which device and what request is received + * @notify_timer : payload for timer start event + * @notify_stop : payload to inform stop event + * @notify_msg : payload to inform a message + */ +struct cam_req_mgr_crm_cb { + cam_req_mgr_notify_trigger notify_trigger; + cam_req_mgr_notify_err notify_err; + cam_req_mgr_add_req add_req; + cam_req_mgr_notify_timer notify_timer; + cam_req_mgr_notify_stop notify_stop; + cam_req_mgr_notify_msg notify_msg; +}; + +/** + * @brief : cam_req_mgr_kmd_ops - func table + * + * @get_dev_info : payload to fetch device details + * @link_setup : payload to establish link with device + * @apply_req : payload to apply request id on a device linked + * @notify_frame_skip: payload to notify frame skip + * @flush_req : payload to flush request + * @process_evt : payload to generic event + * @dump_req : payload to dump request + */ +struct cam_req_mgr_kmd_ops { + cam_req_mgr_get_dev_info get_dev_info; + cam_req_mgr_link_setup link_setup; + cam_req_mgr_apply_req apply_req; + cam_req_mgr_notify_frame_skip notify_frame_skip; + cam_req_mgr_flush_req flush_req; + cam_req_mgr_process_evt process_evt; + cam_req_mgr_dump_req dump_req; +}; + +/** + * enum cam_pipeline_delay + * @brief : enumerator for different pipeline delays in camera + * + * @DELAY_0 : device processed settings on same frame + * @DELAY_1 : device processed settings after 1 frame + * @DELAY_2 : device processed settings after 2 frames + * @DELAY_MAX : maximum supported pipeline delay + */ +enum cam_pipeline_delay { + CAM_PIPELINE_DELAY_0, + CAM_PIPELINE_DELAY_1, + CAM_PIPELINE_DELAY_2, + CAM_PIPELINE_DELAY_MAX, +}; + +/** + * enum cam_modeswitch_delay + * @brief : enumerator for different modeswitch delays in camera + * + * @DELAY_0 : device processed mode switch settings after 0 frame + * @DELAY_1 : device processed mode switch settings after 1 frame + * @DELAY_2 : device processed mode switch settings after 2 frames + * @DELAY_MAX : maximum supported mode switch delay + */ +enum cam_modeswitch_delay { + CAM_MODESWITCH_DELAY_0, + CAM_MODESWITCH_DELAY_1, + CAM_MODESWITCH_DELAY_2, + CAM_MODESWITCH_DELAY_MAX, +}; + + +/** + * @CAM_TRIGGER_POINT_SOF : Trigger point for Start Of Frame + * @CAM_TRIGGER_POINT_EOF : Trigger point for End Of Frame + * @CAM_TRIGGER_MAX_POINTS : Maximum number of trigger point + */ +#define CAM_TRIGGER_POINT_SOF (1 << 0) +#define CAM_TRIGGER_POINT_EOF (1 << 1) +#define CAM_TRIGGER_MAX_POINTS 2 + +/** + * enum cam_req_status + * @brief : enumerator for request status + * + * @SUCCESS : device processed settings successfully + * @FAILED : device processed settings failed + * @MAX : invalid status value + */ +enum cam_req_status { + CAM_REQ_STATUS_SUCCESS, + CAM_REQ_STATUS_FAILED, + CAM_REQ_STATUS_MAX, +}; + +/** + * enum cam_req_mgr_device_error + * @brief : enumerator for different errors occurred at device + * + * @NOT_FOUND : settings asked by request manager is not found + * @BUBBLE : device hit timing issue and is able to recover + * @FATAL : device is in bad shape and can not recover from error + * @PAGE_FAULT : Page fault while accessing memory + * @OVERFLOW : Bus Overflow for IFE/VFE + * @TIMEOUT : Timeout from cci or bus. + * @RECOVERY : Internal recovery for bus overflow + * @MAX : Invalid error value + */ +enum cam_req_mgr_device_error { + CRM_KMD_ERR_NOT_FOUND, + CRM_KMD_ERR_BUBBLE, + CRM_KMD_ERR_FATAL, + CRM_KMD_ERR_PAGE_FAULT, + CRM_KMD_ERR_OVERFLOW, + CRM_KMD_ERR_TIMEOUT, + CRM_KMD_ERR_STOPPED, + CRM_KMD_WARN_INTERNAL_RECOVERY, + CRM_KMD_ERR_MAX, +}; + +/** + * enum cam_req_mgr_device_id + * @brief : enumerator for different devices in subsystem + * + * @CAM_REQ_MGR : request manager itself + * @SENSOR : sensor device + * @FLASH : LED flash or dual LED device + * @ACTUATOR : lens mover + * @IFE : Image processing device + * @CUSTOM : Custom HW block + * @EXTERNAL_1 : third party device + * @EXTERNAL_2 : third party device + * @EXTERNAL_3 : third party device + * @TPG : Test Pattern Generator + * @MAX : invalid device id + */ +enum cam_req_mgr_device_id { + CAM_REQ_MGR_DEVICE, + CAM_REQ_MGR_DEVICE_SENSOR, + CAM_REQ_MGR_DEVICE_FLASH, + CAM_REQ_MGR_DEVICE_ACTUATOR, + CAM_REQ_MGR_DEVICE_IFE, + CAM_REQ_MGR_DEVICE_CUSTOM_HW, + CAM_REQ_MGR_DEVICE_EXTERNAL_1, + CAM_REQ_MGR_DEVICE_EXTERNAL_2, + CAM_REQ_MGR_DEVICE_EXTERNAL_3, + CAM_REQ_MGR_DEVICE_TPG, + CAM_REQ_MGR_DEVICE_ID_MAX, +}; + +/* Camera device driver to Req Mgr device interface */ + +/** + * enum cam_req_mgr_link_evt_type + * @CAM_REQ_MGR_LINK_EVT_ERR : error on the link from any of the + * connected devices + * @CAM_REQ_MGR_LINK_EVT_PAUSE : to pause the link vote down + * @CAM_REQ_MGR_LINK_EVT_RESUME : resumes the link which was paused votes up + * @CAM_REQ_MGR_LINK_EVT_RESUME_HW : seeking synced resume to unpause HW + * @CAM_REQ_MGR_LINK_EVT_SOF_FREEZE : request manager has detected an + * sof freeze + * @CAM_REQ_MGR_LINK_EVT_STALLED : Indicate to all connected devices + * that the pipeline is stalled. + * Devices can handle accordingly + * @CAM_REQ_MGR_LINK_EVT_EOF : Indicate to all connected devices + * that we get an EOF + * @CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES : Notify sub devices of the properties + * updating + * @CAM_REQ_MGR_LINK_EVT_SENSOR_FRAME_INFO : Notify sub devices of the sensor frame info + * @CAM_REQ_MGR_LINK_EVT_FRAME_DURATION_CHANGING : Check if the frame skip packet is avaliable + * @CAM_REQ_MGR_LINK_EVT_MAX : invalid event type + */ +enum cam_req_mgr_link_evt_type { + CAM_REQ_MGR_LINK_EVT_ERR, + CAM_REQ_MGR_LINK_EVT_PAUSE, + CAM_REQ_MGR_LINK_EVT_RESUME, + CAM_REQ_MGR_LINK_EVT_RESUME_HW, + CAM_REQ_MGR_LINK_EVT_SOF_FREEZE, + CAM_REQ_MGR_LINK_EVT_STALLED, + CAM_REQ_MGR_LINK_EVT_EOF, + CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES, + CAM_REQ_MGR_LINK_EVT_SENSOR_FRAME_INFO, + CAM_REQ_MGR_LINK_EVT_FRAME_DURATION_CHANGING, + CAM_REQ_MGR_LINK_EVT_MAX, +}; + +/** + * enum cam_req_mgr_msg_type + * @CAM_REQ_MGR_MSG_SENSOR_FRAME_INFO : sensor frame info message type + * @CAM_REQ_MGR_MSG_UPDATE_DEVICE_INFO : Update device specific info + * @CAM_REQ_MGR_MSG_MAX : invalid msg type + * @CAM_REQ_MGR_MSG_CHECK_FOR_RESUME : Check for synced resume post flush + */ +enum cam_req_mgr_msg_type { + CAM_REQ_MGR_MSG_SENSOR_FRAME_INFO, + CAM_REQ_MGR_MSG_UPDATE_DEVICE_INFO, + CAM_REQ_MGR_MSG_NOTIFY_FOR_SYNCED_RESUME, + CAM_REQ_MGR_MSG_MAX, +}; + +/** + * struct cam_req_mgr_trigger_notify + * @link_hdl : link identifier + * @dev_hdl : device handle which has sent this req id + * @frame_id : frame id for internal tracking + * @trigger : trigger point of this notification, CRM will send apply + * only to the devices which subscribe to this point. + * @sof_timestamp_val : Captured time stamp value at sof hw event + * @req_id : req id which returned buf_done + * @trigger_id : ID to differentiate between the trigger devices + * @boot_timestamp : Captured boot time stamp value at sof hw event + */ +struct cam_req_mgr_trigger_notify { + int32_t link_hdl; + int32_t dev_hdl; + int64_t frame_id; + uint32_t trigger; + uint64_t sof_timestamp_val; + uint64_t req_id; + int32_t trigger_id; + uint64_t boot_timestamp; +}; + +/** + * struct cam_req_mgr_timer_notify + * @link_hdl : link identifier + * @dev_hdl : device handle which has sent this req id + * @state : timer state i.e ON or OFF + */ +struct cam_req_mgr_timer_notify { + int32_t link_hdl; + int32_t dev_hdl; + bool state; +}; + +/** + * struct cam_req_mgr_error_notify + * @link_hdl : link identifier + * @dev_hdl : device handle which has sent this req id + * @req_id : req id which hit error + * @frame_id : frame id for internal tracking + * @trigger : trigger point of this notification, CRM will send apply + * @sof_timestamp_val : Captured time stamp value at sof hw event + * @error : what error device hit while processing this req + */ +struct cam_req_mgr_error_notify { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t req_id; + int64_t frame_id; + uint32_t trigger; + uint64_t sof_timestamp_val; + enum cam_req_mgr_device_error error; +}; + +/** + * struct cam_req_mgr_add_request + * @link_hdl : link identifier + * @dev_hdl : device handle which has sent this req id + * @req_id : req id which device is ready to process + * @skip_at_sof : before applying req mgr introduce bubble + * by not sending request to devices. ex: IFE and Flash + * @skip_at_eof : before applying req mgr introduce bubble + * by not sending request to devices. ex: IFE and Flash + * @trigger_eof : to identify that one of the device at this slot needs + * to be apply at EOF + * @trigger_skip : Trigger skip frame if set + */ +struct cam_req_mgr_add_request { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t req_id; + uint32_t skip_at_sof; + uint32_t skip_at_eof; + bool trigger_eof; + bool trigger_skip; +}; + +/** + * struct cam_req_mgr_notify_stop + * @link_hdl : link identifier + */ +struct cam_req_mgr_notify_stop { + int32_t link_hdl; +}; + +/** + * struct cam_req_mgr_sensor_frame_info + * @frame_sync_shift : Indicates how far the frame synchronization + * : reference point from SOF, this is used to + * : align with userland and kernel frame sync offset. + * @frame_duration : The sensor frame duration betwwen previous SOF and current SOF + * @blanking_duration : The vertical blanking between previous EOF and current SOF + * @use_for_wd : If set frame duration is to be used for WD timer for ePCR + * scenarios + * + */ +struct cam_req_mgr_sensor_frame_info { + uint64_t frame_sync_shift; + uint64_t frame_duration; + uint64_t blanking_duration; + bool use_for_wd; +}; + +/** + * struct cam_req_mgr_notify_msg + * @link_hdl : link identifier + * @dev_hdl : Indicate which device sends this message + * @req_id : Indicate which req sends this message + * @msg_type : Message type + * @frame_info : Frame info structure includes frame duration and + * : vertical blanking + * @ife_hw_name : Update acquired IFE/SFE name + */ +struct cam_req_mgr_notify_msg { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t req_id; + enum cam_req_mgr_msg_type msg_type; + union { + struct cam_req_mgr_sensor_frame_info frame_info; + char ife_hw_name[30]; + } u; +}; + +/* CRM to KMD devices */ +/** + * struct cam_req_mgr_device_info + * @dev_hdl : Input_param : device handle for reference + * @name : link link or unlink + * @dev_id : device id info + * @p_delay : delay between time settings applied and take effect + * @m_delay : delay between time modeswitch settings applied and take effect + * @trigger : Trigger point for the client + * @trigger_on : This device provides trigger + * @resume_sync_on: Device is seeking sync to resume post flush/halt + */ +struct cam_req_mgr_device_info { + int32_t dev_hdl; + char name[256]; + enum cam_req_mgr_device_id dev_id; + enum cam_pipeline_delay p_delay; + enum cam_modeswitch_delay m_delay; + uint32_t trigger; + bool trigger_on; + bool resume_sync_on; +}; + +/** + * struct cam_req_mgr_core_dev_link_setup + * @link_enable : link or unlink + * @link_hdl : link identifier + * @dev_hdl : device handle for reference + * @max_delay : max pipeline delay on this link + * @mode_switch_max_delay : max modeswitch delay on this link + * @crm_cb : callback funcs to communicate with req mgr + * @trigger_id : Unique ID provided to the triggering device + */ +struct cam_req_mgr_core_dev_link_setup { + int32_t link_enable; + int32_t link_hdl; + int32_t dev_hdl; + enum cam_pipeline_delay max_delay; + enum cam_modeswitch_delay mode_switch_max_delay; + struct cam_req_mgr_crm_cb *crm_cb; + int32_t trigger_id; +}; + +/** + * struct cam_req_mgr_apply_request + * @link_hdl : link identifier + * @dev_hdl : device handle for cross check + * @request_id : request id settings to apply + * @last_applied_max_pd_req : Last applied request on highest pd device + * -1 is considered invalid + * @report_if_bubble : report to crm if failure in applying + * @trigger_point : the trigger point of this apply + * @last_applied_done_timestamp : Last setting apply done timestamp value + * @re_apply : to skip re_apply for buf_done request + * @recovery : Indicate if it is recovery req + * @no_further_requests : No further requests on link notification + * @frame_duration_changing : Indicate if the frame duration is changing in this applying + */ +struct cam_req_mgr_apply_request { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t request_id; + int64_t last_applied_max_pd_req; + int32_t report_if_bubble; + uint32_t trigger_point; + uint64_t last_applied_done_timestamp; + bool re_apply; + bool recovery; + bool no_further_requests; + bool frame_duration_changing; +}; + +/** + * struct cam_req_mgr_flush_request + * @link_hdl : link identifier + * @dev_hdl : device handle for cross check + * @type : cancel request type flush all or a request + * @req_id : request id to cancel + * @enable_sensor_standby : Enable sensor standby + * + */ +struct cam_req_mgr_flush_request { + int32_t link_hdl; + int32_t dev_hdl; + uint32_t type; + uint64_t req_id; + bool enable_sensor_standby; +}; + +/** + * struct cam_req_mgr_event_data + * @link_hdl : link handle + * @req_id : request id + * @try_for_recovery : Link is stalled allow subdevices to recover if + * possible + * @evt_type : link event + * @error : error code + * @properties_mask : properties mask + * @frame_info : Frame info structure includes frame duration and + * : vertical blanking + * @frame_duration_changing : Indicate if the sensor changes frame duration + */ +struct cam_req_mgr_link_evt_data { + int32_t link_hdl; + int32_t dev_hdl; + uint64_t req_id; + bool try_for_recovery; + enum cam_req_mgr_link_evt_type evt_type; + union { + enum cam_req_mgr_device_error error; + uint32_t properties_mask; + struct cam_req_mgr_sensor_frame_info frame_info; + bool frame_duration_changing; + bool is_recovery; + } u; +}; + +/** + * struct cam_req_mgr_send_request + * @link_hdl : link identifier + * @in_q : input request queue pointer + */ +struct cam_req_mgr_send_request { + int32_t link_hdl; + struct cam_req_mgr_req_queue *in_q; +}; + +/** + * struct cam_req_mgr_dump_info + * @req_id : request id to dump + * @offset : offset of buffer + * @error_type : error type + * @buf_handle : buf handle + * @link_hdl : link identifier + * @dev_hdl : device handle for cross check + * + */ +struct cam_req_mgr_dump_info { + uint64_t req_id; + size_t offset; + uint32_t error_type; + uint32_t buf_handle; + int32_t link_hdl; + int32_t dev_hdl; +}; +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.c new file mode 100644 index 0000000000..a426074a33 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2019, 2021, The Linux Foundation. All rights reserved. + */ + +#include "cam_req_mgr_timer.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" + +extern struct kmem_cache *g_cam_req_mgr_timer_cachep; + +void crm_timer_reset(struct cam_req_mgr_timer *crm_timer) +{ + if (!crm_timer) + return; + + cam_common_modify_timer(&crm_timer->sys_timer, crm_timer->expires); + +} + +void crm_timer_callback(struct timer_list *timer_data) +{ + struct cam_req_mgr_timer *timer = + container_of(timer_data, struct cam_req_mgr_timer, sys_timer); + if (!timer) { + CAM_ERR(CAM_CRM, "NULL timer"); + return; + } + CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent); + crm_timer_reset(timer); +} + +void crm_timer_modify(struct cam_req_mgr_timer *crm_timer, + int32_t expires) +{ + CAM_DBG(CAM_CRM, "new time %d", expires); + if (crm_timer) { + crm_timer->expires = expires; + crm_timer_reset(crm_timer); + } +} + +int crm_timer_init(struct cam_req_mgr_timer **timer, + int32_t expires, void *parent, void (*timer_cb)(struct timer_list *)) +{ + int ret = 0; + struct cam_req_mgr_timer *crm_timer = NULL; + + CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer); + if (*timer == NULL) { + if (g_cam_req_mgr_timer_cachep) { + crm_timer = kmem_cache_alloc(g_cam_req_mgr_timer_cachep, + __GFP_ZERO | GFP_KERNEL); + if (!crm_timer) { + ret = -ENOMEM; + goto end; + } + } + + else { + ret = -ENOMEM; + goto end; + } + + if (timer_cb != NULL) + crm_timer->timer_cb = timer_cb; + else + crm_timer->timer_cb = crm_timer_callback; + + crm_timer->expires = expires; + crm_timer->parent = parent; + timer_setup(&crm_timer->sys_timer, + crm_timer->timer_cb, 0); + crm_timer_reset(crm_timer); + *timer = crm_timer; + } else { + CAM_WARN(CAM_CRM, "Timer already exists!!"); + ret = -EINVAL; + } +end: + return ret; +} +void crm_timer_exit(struct cam_req_mgr_timer **crm_timer) +{ + CAM_DBG(CAM_CRM, "destroy timer %pK @ %pK", *crm_timer, crm_timer); + if (*crm_timer) { + del_timer_sync(&(*crm_timer)->sys_timer); + if (g_cam_req_mgr_timer_cachep) + kmem_cache_free(g_cam_req_mgr_timer_cachep, *crm_timer); + *crm_timer = NULL; + } +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.h new file mode 100644 index 0000000000..5517a57d44 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_timer.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_TIMER_H_ +#define _CAM_REQ_MGR_TIMER_H_ + +#include +#include + +#include "cam_req_mgr_core_defs.h" + +/** + * struct cam_req_mgr_timer + * @expires : timeout value for timer + * @sys_timer : system timer variable + * @parent : priv data - link pointer + * @timer_cb : callback func which will be called when timeout expires + * @pause_timer : flag to pause SOF timer + */ +struct cam_req_mgr_timer { + int32_t expires; + struct timer_list sys_timer; + void *parent; + void (*timer_cb)(struct timer_list *timer_data); + bool pause_timer; +}; + +/** + * crm_timer_modify() + * @brief : modify expiry time. + * @timer : timer which will be reset to expires values + */ +void crm_timer_modify(struct cam_req_mgr_timer *crm_timer, + int32_t expires); + +/** + * crm_timer_reset() + * @brief : destroys the timer allocated. + * @timer : timer which will be reset to expires values + */ +void crm_timer_reset(struct cam_req_mgr_timer *timer); + +/** + * crm_timer_init() + * @brief : create a new general purpose timer. + * timer utility takes care of allocating memory and deleting + * @timer : double pointer to new timer allocated + * @expires : Timeout value to fire callback + * @parent : void pointer which caller can use for book keeping + * @timer_cb : caller can chose to use its own callback function when + * timer fires the timeout. If no value is set timer util + * will use default. + */ +int crm_timer_init(struct cam_req_mgr_timer **timer, + int32_t expires, void *parent, void (*timer_cb)(struct timer_list *)); + +/** + * crm_timer_exit() + * @brief : destroys the timer allocated. + * @timer : timer pointer which will be freed + */ +void crm_timer_exit(struct cam_req_mgr_timer **timer); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.c new file mode 100644 index 0000000000..9aa5c205b6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "CAM-REQ-MGR_UTIL %s:%d " fmt, __func__, __LINE__ + +#include +#include +#include +#include +#include +#include +#include +#include "cam_req_mgr_util.h" +#include "cam_debug_util.h" +#include "cam_subdev.h" +#include "cam_mem_mgr_api.h" + +static struct cam_req_mgr_util_hdl_tbl *hdl_tbl; +static DEFINE_SPINLOCK(hdl_tbl_lock); + +int cam_req_mgr_util_init(void) +{ + int rc = 0; + int bitmap_size; + static struct cam_req_mgr_util_hdl_tbl *hdl_tbl_local; + + if (hdl_tbl) { + rc = -EINVAL; + CAM_ERR(CAM_CRM, "Hdl_tbl is already present"); + goto hdl_tbl_check_failed; + } + + hdl_tbl_local = CAM_MEM_ZALLOC(sizeof(*hdl_tbl), GFP_KERNEL); + if (!hdl_tbl_local) { + rc = -ENOMEM; + goto hdl_tbl_alloc_failed; + } + bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES_V2) * sizeof(long); + hdl_tbl_local->bitmap = CAM_MEM_ZALLOC(bitmap_size, GFP_KERNEL); + if (!hdl_tbl_local->bitmap) { + rc = -ENOMEM; + goto bitmap_alloc_fail; + } + hdl_tbl_local->bits = bitmap_size * BITS_PER_BYTE; + + spin_lock_bh(&hdl_tbl_lock); + if (hdl_tbl) { + spin_unlock_bh(&hdl_tbl_lock); + rc = -EEXIST; + CAM_MEM_FREE(hdl_tbl_local->bitmap); + CAM_MEM_FREE(hdl_tbl_local); + goto hdl_tbl_check_failed; + } + hdl_tbl = hdl_tbl_local; + spin_unlock_bh(&hdl_tbl_lock); + + return rc; + +bitmap_alloc_fail: + CAM_MEM_FREE(hdl_tbl_local); + spin_lock_bh(&hdl_tbl_lock); + hdl_tbl = NULL; + spin_unlock_bh(&hdl_tbl_lock); +hdl_tbl_alloc_failed: +hdl_tbl_check_failed: + return rc; +} + +int cam_req_mgr_util_deinit(void) +{ + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; + } + + CAM_MEM_FREE(hdl_tbl->bitmap); + hdl_tbl->bitmap = NULL; + CAM_MEM_FREE(hdl_tbl); + hdl_tbl = NULL; + spin_unlock_bh(&hdl_tbl_lock); + + return 0; +} + +int cam_handle_validate(int32_t session_hdl, int32_t handle) +{ + int idx, rc = 0; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + rc = -EINVAL; + goto out; + } + + idx = CAM_REQ_MGR_GET_HDL_IDX(handle); + if (idx < 0 || idx >= CAM_REQ_MGR_MAX_HANDLES_V2) { + CAM_ERR(CAM_CRM, "Invalid index:%d for handle: 0x%x", idx, handle); + rc = -EINVAL; + goto out; + } + + if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) { + CAM_ERR(CAM_CRM, "Invalid state:%d", hdl_tbl->hdl[idx].state); + rc = -EINVAL; + goto out; + } + + if (hdl_tbl->hdl[idx].session_hdl != session_hdl || + hdl_tbl->hdl[idx].hdl_value != handle) { + CAM_ERR(CAM_CRM, "Exp ses_hdl: 0x%x ses_hdl: 0x%x Exp hdl: 0x%x hdl: 0x%x", + hdl_tbl->hdl[idx].session_hdl, session_hdl, hdl_tbl->hdl[idx].hdl_value, + handle); + rc = -EINVAL; + goto out; + } + +out: + spin_unlock_bh(&hdl_tbl_lock); + return rc; +} + +int cam_req_mgr_util_free_hdls(void) +{ + int i = 0; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; + } + + for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES_V2; i++) { + if (hdl_tbl->hdl[i].state == HDL_ACTIVE) { + CAM_WARN(CAM_CRM, "Dev handle = %x session_handle = %x", + hdl_tbl->hdl[i].hdl_value, + hdl_tbl->hdl[i].session_hdl); + hdl_tbl->hdl[i].state = HDL_FREE; + clear_bit(i, hdl_tbl->bitmap); + } + } + bitmap_zero(hdl_tbl->bitmap, CAM_REQ_MGR_MAX_HANDLES_V2); + spin_unlock_bh(&hdl_tbl_lock); + + return 0; +} + +static int32_t cam_get_free_handle_index(void) +{ + int idx; + + idx = find_first_zero_bit(hdl_tbl->bitmap, hdl_tbl->bits); + + if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2 || idx < 0) { + CAM_ERR(CAM_CRM, "No free index found idx: %d", idx); + return -ENOSR; + } + + set_bit(idx, hdl_tbl->bitmap); + + return idx; +} + +static void cam_dump_tbl_info(void) +{ + int i; + + for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES_V2; i++) + CAM_INFO_RATE_LIMIT_CUSTOM(CAM_CRM, CAM_RATE_LIMIT_INTERVAL_5SEC, + CAM_REQ_MGR_MAX_HANDLES_V2, + "session_hdl=%x hdl_value=%x type=%d state=%d dev_id=%lld", + hdl_tbl->hdl[i].session_hdl, hdl_tbl->hdl[i].hdl_value, + hdl_tbl->hdl[i].type, hdl_tbl->hdl[i].state, hdl_tbl->hdl[i].dev_id); +} + +int32_t cam_create_session_hdl(void *priv) +{ + int idx; + int rand = 0; + int32_t handle = 0; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; + } + + idx = cam_get_free_handle_index(); + if (idx < 0) { + CAM_ERR(CAM_CRM, "Unable to create session handle(idx = %d)", idx); + cam_dump_tbl_info(); + spin_unlock_bh(&hdl_tbl_lock); + return idx; + } + + get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES); + handle = GET_DEV_HANDLE(rand, HDL_TYPE_SESSION, idx); + hdl_tbl->hdl[idx].session_hdl = handle; + hdl_tbl->hdl[idx].hdl_value = handle; + hdl_tbl->hdl[idx].type = HDL_TYPE_SESSION; + hdl_tbl->hdl[idx].state = HDL_ACTIVE; + hdl_tbl->hdl[idx].priv = priv; + hdl_tbl->hdl[idx].ops = NULL; + hdl_tbl->hdl[idx].dev_id = CAM_CRM; + spin_unlock_bh(&hdl_tbl_lock); + + return handle; +} + +int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data) +{ + int idx; + int rand = 0; + int32_t handle; + bool crm_active; + + crm_active = cam_req_mgr_is_open(); + if (!crm_active) { + CAM_ERR(CAM_ICP, "CRM is not ACTIVE"); + return -EINVAL; + } + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; + } + + idx = cam_get_free_handle_index(); + if (idx < 0) { + CAM_ERR(CAM_CRM, "Unable to create device handle(idx= %d)", idx); + cam_dump_tbl_info(); + spin_unlock_bh(&hdl_tbl_lock); + return idx; + } + + get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES); + handle = GET_DEV_HANDLE(rand, HDL_TYPE_DEV, idx); + hdl_tbl->hdl[idx].session_hdl = hdl_data->session_hdl; + hdl_tbl->hdl[idx].hdl_value = handle; + hdl_tbl->hdl[idx].type = HDL_TYPE_DEV; + hdl_tbl->hdl[idx].state = HDL_ACTIVE; + hdl_tbl->hdl[idx].priv = hdl_data->priv; + hdl_tbl->hdl[idx].ops = hdl_data->ops; + hdl_tbl->hdl[idx].dev_id = hdl_data->dev_id; + spin_unlock_bh(&hdl_tbl_lock); + + pr_debug("%s: handle = 0x%x idx = %d\n", __func__, handle, idx); + return handle; +} + +int32_t cam_create_link_hdl(struct cam_create_dev_hdl *hdl_data) +{ + int idx; + int rand = 0; + int32_t handle; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; + } + + idx = cam_get_free_handle_index(); + if (idx < 0) { + CAM_ERR(CAM_CRM, "Unable to create link handle(idx = %d)", idx); + cam_dump_tbl_info(); + spin_unlock_bh(&hdl_tbl_lock); + return idx; + } + + get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES); + handle = GET_DEV_HANDLE(rand, HDL_TYPE_LINK, idx); + hdl_tbl->hdl[idx].session_hdl = hdl_data->session_hdl; + hdl_tbl->hdl[idx].hdl_value = handle; + hdl_tbl->hdl[idx].type = HDL_TYPE_LINK; + hdl_tbl->hdl[idx].state = HDL_ACTIVE; + hdl_tbl->hdl[idx].priv = hdl_data->priv; + hdl_tbl->hdl[idx].ops = NULL; + hdl_tbl->hdl[idx].dev_id = hdl_data->dev_id; + spin_unlock_bh(&hdl_tbl_lock); + + CAM_DBG(CAM_CRM, "handle = %x", handle); + return handle; +} + +void *cam_get_priv(int32_t dev_hdl, int handle_type) +{ + int idx; + int type; + void *priv; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "Hdl tbl is NULL"); + goto device_priv_fail; + } + + idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl); + if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx:%d", idx); + goto device_priv_fail; + } + + if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl [%d] [%d]", + dev_hdl, hdl_tbl->hdl[idx].hdl_value); + goto device_priv_fail; + } + + if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state:%d", + hdl_tbl->hdl[idx].state); + goto device_priv_fail; + } + + type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl); + if (type != handle_type) { + CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type:%d", type); + goto device_priv_fail; + } + + priv = hdl_tbl->hdl[idx].priv; + spin_unlock_bh(&hdl_tbl_lock); + + return priv; + +device_priv_fail: + spin_unlock_bh(&hdl_tbl_lock); + return NULL; +} + +void *cam_get_device_priv(int32_t dev_hdl) +{ + void *priv; + + priv = cam_get_priv(dev_hdl, HDL_TYPE_DEV); + return priv; +} + +struct cam_req_mgr_core_session *cam_get_session_priv(int32_t dev_hdl) +{ + struct cam_req_mgr_core_session *priv; + + priv = (struct cam_req_mgr_core_session *) + cam_get_priv(dev_hdl, HDL_TYPE_SESSION); + + return priv; +} + +struct cam_req_mgr_core_link *cam_get_link_priv(int32_t dev_hdl) +{ + struct cam_req_mgr_core_link *priv; + + priv = (struct cam_req_mgr_core_link *) + cam_get_priv(dev_hdl, HDL_TYPE_LINK); + + return priv; +} + +void *cam_get_device_ops(int32_t dev_hdl) +{ + int idx; + int type; + void *ops; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + goto device_ops_fail; + } + + idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl); + if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2) { + CAM_ERR(CAM_CRM, "Invalid idx"); + goto device_ops_fail; + } + + if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) { + CAM_ERR(CAM_CRM, "Invalid state"); + goto device_ops_fail; + } + + type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl); + if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type && HDL_TYPE_LINK != type) { + CAM_ERR(CAM_CRM, "Invalid type"); + goto device_ops_fail; + } + + if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) { + CAM_ERR(CAM_CRM, "Invalid hdl"); + goto device_ops_fail; + } + + ops = hdl_tbl->hdl[idx].ops; + spin_unlock_bh(&hdl_tbl_lock); + + return ops; + +device_ops_fail: + spin_unlock_bh(&hdl_tbl_lock); + return NULL; +} + +static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type) +{ + int idx; + int type; + + spin_lock_bh(&hdl_tbl_lock); + if (!hdl_tbl) { + CAM_ERR(CAM_CRM, "Hdl tbl is NULL"); + goto destroy_hdl_fail; + } + + idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl); + if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2) { + CAM_ERR(CAM_CRM, "Invalid idx %d", idx); + goto destroy_hdl_fail; + } + + if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) { + CAM_ERR(CAM_CRM, "Invalid state"); + goto destroy_hdl_fail; + } + + type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl); + if (type != dev_hdl_type) { + CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type); + goto destroy_hdl_fail; + } + + if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) { + CAM_ERR(CAM_CRM, "Invalid hdl"); + goto destroy_hdl_fail; + } + + hdl_tbl->hdl[idx].state = HDL_FREE; + hdl_tbl->hdl[idx].ops = NULL; + hdl_tbl->hdl[idx].priv = NULL; + clear_bit(idx, hdl_tbl->bitmap); + spin_unlock_bh(&hdl_tbl_lock); + + return 0; + +destroy_hdl_fail: + spin_unlock_bh(&hdl_tbl_lock); + return -EINVAL; +} + +int cam_destroy_device_hdl(int32_t dev_hdl) +{ + return cam_destroy_hdl(dev_hdl, HDL_TYPE_DEV); +} + +int cam_destroy_link_hdl(int32_t dev_hdl) +{ + CAM_DBG(CAM_CRM, "handle = %x", dev_hdl); + return cam_destroy_hdl(dev_hdl, HDL_TYPE_LINK); +} + +int cam_destroy_session_hdl(int32_t dev_hdl) +{ + return cam_destroy_hdl(dev_hdl, HDL_TYPE_SESSION); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.h new file mode 100644 index 0000000000..2cf1d2ffa2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_UTIL_API_H_ +#define _CAM_REQ_MGR_UTIL_API_H_ + +#include +#include "cam_req_mgr_util_priv.h" + +/** + * state of a handle(session/device) + * @HDL_FREE: free handle + * @HDL_ACTIVE: active handles + */ +enum hdl_state { + HDL_FREE, + HDL_ACTIVE +}; + +/** + * handle type + * @HDL_TYPE_DEV: for device + * @HDL_TYPE_SESSION: for session + * @HDL_TYPE_LINK: for link + */ +enum hdl_type { + HDL_TYPE_DEV = 1, + HDL_TYPE_SESSION, + HDL_TYPE_LINK +}; + +/** + * struct handle + * @session_hdl: session handle + * @hdl_value: Allocated handle + * @type: session/device handle + * @state: free/used + * @dev_id: device id for handle + * @ops: ops structure + * @priv: private data of a handle + */ +struct handle { + int32_t session_hdl; + uint32_t hdl_value; + enum hdl_type type; + enum hdl_state state; + uint64_t dev_id; + void *ops; + void *priv; +}; + +/** + * struct cam_req_mgr_util_hdl_tbl + * @hdl: row of handles + * @bitmap: bit map to get free hdl row idx + * @bits: size of bit map in bits + */ +struct cam_req_mgr_util_hdl_tbl { + struct handle hdl[CAM_REQ_MGR_MAX_HANDLES_V2]; + void *bitmap; + size_t bits; +}; + +/** + * cam_req_mgr_util APIs for KMD drivers and cam_req_mgr + * @session_hdl: session_hdl info + * @v4l2_sub_dev_flag: flag to create v4l2 sub device + * @media_entity_flag: flag for media entity + * @reserved: reserved field + * @dev_id: device id for handle + * @ops: ops pointer for a device handle + * @priv: private data for a device handle + */ +struct cam_create_dev_hdl { + int32_t session_hdl; + int32_t v4l2_sub_dev_flag; + int32_t media_entity_flag; + int32_t reserved; + uint64_t dev_id; + void *ops; + void *priv; +}; + +/** + * cam_handle_validate() - validate session/device handle + * @session_hdl: handle for a session + * @handle: handle for a session/device + * + * cam_req_mgr_core and KMD drivers use this function to + * validate session/device handle. Returns 0 on success, + * -EINVAL on failure. + */ +int cam_handle_validate(int32_t session_hdl, int32_t handle); + +/** + * cam_create_session_hdl() - create a session handle + * @priv: private data for a session handle + * + * cam_req_mgr core calls this function to get + * a unique session handle. Returns a unique session + * handle + */ +int32_t cam_create_session_hdl(void *priv); + +/** + * cam_create_device_hdl() - create a device handle + * @hdl_data: session hdl, flags, ops and priv dara as input + * + * cam_req_mgr_core calls this function to get + * session and link handles + * KMD drivers calls this function to create + * a device handle. Returns a unique device handle + */ +int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data); + +/** + * cam_create_link_hdl() - create a link handle + * @hdl_data: session hdl, flags, ops and priv dara as input + * + * cam_req_mgr_core calls this function to get + * session and link handles + * KMD drivers calls this function to create + * a link handle. Returns a unique link handle + */ +int32_t cam_create_link_hdl(struct cam_create_dev_hdl *hdl_data); + +/** + * cam_get_device_priv() - get private data of a device handle + * @dev_hdl: handle for a device + * + * cam_req_mgr_core and KMD drivers use this function to + * get private data of a handle. Returns a private data + * structure pointer. + */ +void *cam_get_device_priv(int32_t dev_hdl); + +/** + * cam_get_session_priv() - get private data of a session handle + * @dev_hdl: handle for a session + * + * cam_req_mgr_core and KMD drivers use this function to + * get private data of a handle. Returns a private data + * structure pointer. + */ +struct cam_req_mgr_core_session *cam_get_session_priv(int32_t dev_hdl); + +/** + * cam_get_link_priv() - get private data of a link handle + * @dev_hdl: handle for a link + * + * cam_req_mgr_core and KMD drivers use this function to + * get private data of a handle. Returns a private data + * structure pointer. + */ +struct cam_req_mgr_core_link *cam_get_link_priv(int32_t dev_hdl); + +/** + * cam_get_device_ops() - get ops of a handle + * @dev_hdl: handle for a session/link/device + * + * cam_req_mgr_core and KMD drivers use this function to + * get ops of a handle. Returns a pointer to ops. + */ +void *cam_get_device_ops(int32_t dev_hdl); + +/** + * cam_destroy_device_hdl() - destroy device handle + * @dev_hdl: handle for a device. + * + * Returns success/failure + */ +int32_t cam_destroy_device_hdl(int32_t dev_hdl); + +/** + * cam_destroy_link_hdl() - destroy link handle + * @dev_hdl: handle for a link. + * + * Returns success/failure + */ +int32_t cam_destroy_link_hdl(int32_t dev_hdl); + +/** + * cam_destroy_session_hdl() - destroy device handle + * @dev_hdl: handle for a session + * + * Returns success/failure + */ +int32_t cam_destroy_session_hdl(int32_t dev_hdl); + + +/* Internal functions */ +/** + * cam_req_mgr_util_init() - init function of cam_req_mgr_util + * + * This is called as part of probe function to initialize + * handle table, bitmap, locks + */ +int cam_req_mgr_util_init(void); + +/** + * cam_req_mgr_util_deinit() - deinit function of cam_req_mgr_util + * + * This function is called in case of probe failure + */ +int32_t cam_req_mgr_util_deinit(void); + +/** + * cam_req_mgr_util_free_hdls() - free handles in case of crash + * + * Called from cam_req_mgr_dev release function to make sure + * all data structures are cleaned to avoid leaks + * + * cam_req_mgr core can call this function at the end of + * camera to make sure all stale entries are printed and + * cleaned + */ +int32_t cam_req_mgr_util_free_hdls(void); + +#endif /* _CAM_REQ_MGR_UTIL_API_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util_priv.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util_priv.h new file mode 100644 index 0000000000..075f5c3470 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_util_priv.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_UTIL_PRIV_H_ +#define _CAM_REQ_MGR_UTIL_PRIV_H_ + +/** + * handle format: + * @bits (0-7): handle index + * @bits (8-11): handle type + * @bits (12-15): reserved + * @bits (16-23): random bits + * @bits (24-31): zeros + */ + +#define CAM_REQ_MGR_HDL_SIZE 32 +#define CAM_REQ_MGR_RND1_SIZE 8 +#define CAM_REQ_MGR_RVD_SIZE 4 +#define CAM_REQ_MGR_HDL_TYPE_SIZE 4 +#define CAM_REQ_MGR_HDL_IDX_SIZE 8 + +#define CAM_REQ_MGR_RND1_POS 24 +#define CAM_REQ_MGR_RVD_POS 16 +#define CAM_REQ_MGR_HDL_TYPE_POS 12 + +#define CAM_REQ_MGR_RND1_BYTES 1 + +#define CAM_REQ_MGR_HDL_TYPE_MASK ((1 << CAM_REQ_MGR_HDL_TYPE_SIZE) - 1) + +#define GET_DEV_HANDLE(rnd1, type, idx) \ + ((rnd1 << (CAM_REQ_MGR_RND1_POS - CAM_REQ_MGR_RND1_SIZE)) | \ + (0x0 << (CAM_REQ_MGR_RVD_POS - CAM_REQ_MGR_RVD_SIZE)) | \ + (type << (CAM_REQ_MGR_HDL_TYPE_POS - CAM_REQ_MGR_HDL_TYPE_SIZE)) | \ + (idx << (CAM_REQ_MGR_HDL_IDX_POS - CAM_REQ_MGR_HDL_IDX_SIZE))) \ + +#define CAM_REQ_MGR_GET_HDL_IDX(hdl) (hdl & CAM_REQ_MGR_HDL_IDX_MASK) +#define CAM_REQ_MGR_GET_HDL_TYPE(hdl) \ + ((hdl >> CAM_REQ_MGR_HDL_IDX_POS) & CAM_REQ_MGR_HDL_TYPE_MASK) + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.c b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.c new file mode 100644 index 0000000000..0b21f2e839 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_req_mgr_workq.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +#define WORKQ_ACQUIRE_LOCK(workq, flags) {\ + if ((workq)->in_irq) \ + spin_lock_irqsave(&(workq)->lock_bh, (flags)); \ + else \ + spin_lock_bh(&(workq)->lock_bh); \ +} + +#define WORKQ_RELEASE_LOCK(workq, flags) {\ + if ((workq)->in_irq) \ + spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \ + else \ + spin_unlock_bh(&(workq)->lock_bh); \ +} + +struct crm_workq_task *cam_req_mgr_workq_get_task( + struct cam_req_mgr_core_workq *workq) +{ + struct crm_workq_task *task = NULL; + unsigned long flags = 0; + + if (!workq) + return NULL; + + WORKQ_ACQUIRE_LOCK(workq, flags); + if (list_empty(&workq->task.empty_head)) + goto end; + + task = list_first_entry(&workq->task.empty_head, + struct crm_workq_task, entry); + if (task) { + atomic_sub(1, &workq->task.free_cnt); + list_del_init(&task->entry); + } + +end: + WORKQ_RELEASE_LOCK(workq, flags); + + return task; +} + +static void cam_req_mgr_workq_put_task(struct crm_workq_task *task) +{ + struct cam_req_mgr_core_workq *workq = + (struct cam_req_mgr_core_workq *)task->parent; + unsigned long flags = 0; + + list_del_init(&task->entry); + task->cancel = 0; + task->process_cb = NULL; + task->priv = NULL; + WORKQ_ACQUIRE_LOCK(workq, flags); + list_add_tail(&task->entry, + &workq->task.empty_head); + atomic_add(1, &workq->task.free_cnt); + WORKQ_RELEASE_LOCK(workq, flags); +} + +void cam_req_mgr_workq_flush(struct cam_req_mgr_core_workq *workq) +{ + if (!workq) { + CAM_ERR(CAM_CRM, "workq is null"); + return; + } + + atomic_set(&workq->flush, 1); + cancel_work_sync(&workq->work); + atomic_set(&workq->flush, 0); +} + +/** + * cam_req_mgr_process_task() - Process the enqueued task + * @task: pointer to task workq thread shall process + */ +static int cam_req_mgr_process_task(struct crm_workq_task *task) +{ + if (!task) + return -EINVAL; + + if (task->process_cb) + task->process_cb(task->priv, task->payload); + else + CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq"); + cam_req_mgr_workq_put_task(task); + + return 0; +} + +/** + * cam_req_mgr_process_workq() - main loop handling + * @w: workqueue task pointer + */ +void cam_req_mgr_process_workq(struct work_struct *w) +{ + struct cam_req_mgr_core_workq *workq = NULL; + struct crm_workq_task *task; + int32_t i = CRM_TASK_PRIORITY_0; + unsigned long flags = 0; + ktime_t sched_start_time; + void *cb = NULL; + + if (!w) { + CAM_ERR(CAM_CRM, "NULL task pointer can not schedule"); + return; + } + workq = (struct cam_req_mgr_core_workq *) + container_of(w, struct cam_req_mgr_core_workq, work); + + while (i < CRM_TASK_PRIORITY_MAX) { + WORKQ_ACQUIRE_LOCK(workq, flags); + while (!list_empty(&workq->task.process_head[i])) { + task = list_first_entry(&workq->task.process_head[i], + struct crm_workq_task, entry); + cb = (void *)task->process_cb; + cam_common_util_thread_switch_delay_detect( + workq->workq_name, "schedule", cb, + task->task_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + sched_start_time = ktime_get(); + atomic_sub(1, &workq->task.pending_cnt); + list_del_init(&task->entry); + WORKQ_RELEASE_LOCK(workq, flags); + if (!unlikely(atomic_read(&workq->flush))) + cam_req_mgr_process_task(task); + cam_common_util_thread_switch_delay_detect( + workq->workq_name, "execution", cb, + sched_start_time, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d", + task, atomic_read(&workq->task.free_cnt)); + WORKQ_ACQUIRE_LOCK(workq, flags); + } + WORKQ_RELEASE_LOCK(workq, flags); + i++; + } +} + +int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task, + void *priv, int32_t prio) +{ + int rc = 0; + struct cam_req_mgr_core_workq *workq = NULL; + unsigned long flags = 0; + + if (!task) { + CAM_WARN(CAM_CRM, "NULL task pointer can not schedule"); + return -EINVAL; + } + workq = (struct cam_req_mgr_core_workq *)task->parent; + if (!workq) { + CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption"); + return -EINVAL; + } + + if (task->cancel == 1 || atomic_read(&workq->flush)) { + rc = 0; + goto abort; + } + task->priv = priv; + task->priority = + (prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0) + ? prio : CRM_TASK_PRIORITY_0; + task->task_scheduled_ts = ktime_get(); + + WORKQ_ACQUIRE_LOCK(workq, flags); + if (!workq->job) { + rc = -EINVAL; + WORKQ_RELEASE_LOCK(workq, flags); + goto abort; + } + + list_add_tail(&task->entry, + &workq->task.process_head[task->priority]); + + atomic_add(1, &workq->task.pending_cnt); + CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d", + task, atomic_read(&workq->task.pending_cnt)); + + queue_work(workq->job, &workq->work); + WORKQ_RELEASE_LOCK(workq, flags); + + return rc; +abort: + cam_req_mgr_workq_put_task(task); + CAM_INFO(CAM_CRM, "task aborted and queued back to pool"); + return rc; +} + +int cam_req_mgr_workq_create(char *name, int32_t num_tasks, + struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq, + int flags, void (*func)(struct work_struct *w)) +{ + int32_t i, wq_flags = 0, max_active_tasks = 0; + struct crm_workq_task *task; + struct cam_req_mgr_core_workq *crm_workq = NULL; + char buf[128] = "crm_workq-"; + + if (!*workq) { + crm_workq = CAM_MEM_ZALLOC(sizeof(struct cam_req_mgr_core_workq), + GFP_KERNEL); + if (crm_workq == NULL) + return -ENOMEM; + + wq_flags |= WQ_UNBOUND; + if (flags & CAM_WORKQ_FLAG_HIGH_PRIORITY) + wq_flags |= WQ_HIGHPRI; + + if (flags & CAM_WORKQ_FLAG_SERIAL) + max_active_tasks = 1; + + strlcat(buf, name, sizeof(buf)); + CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name); + crm_workq->job = alloc_workqueue(buf, + wq_flags, max_active_tasks, NULL); + if (!crm_workq->job) { + CAM_MEM_FREE(crm_workq); + return -ENOMEM; + } + + /* Workq attributes initialization */ + strscpy(crm_workq->workq_name, buf, sizeof(crm_workq->workq_name)); + INIT_WORK(&crm_workq->work, func); + spin_lock_init(&crm_workq->lock_bh); + CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK", + name, &crm_workq->lock_bh); + + /* Task attributes initialization */ + atomic_set(&crm_workq->task.pending_cnt, 0); + atomic_set(&crm_workq->task.free_cnt, 0); + for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++) + INIT_LIST_HEAD(&crm_workq->task.process_head[i]); + INIT_LIST_HEAD(&crm_workq->task.empty_head); + atomic_set(&crm_workq->flush, 0); + crm_workq->in_irq = in_irq; + crm_workq->task.num_task = num_tasks; + crm_workq->task.pool = CAM_MEM_ZALLOC_ARRAY(crm_workq->task.num_task, + sizeof(struct crm_workq_task), GFP_KERNEL); + if (!crm_workq->task.pool) { + CAM_WARN(CAM_CRM, "Insufficient memory %zu", + sizeof(struct crm_workq_task) * + crm_workq->task.num_task); + CAM_MEM_FREE(crm_workq); + return -ENOMEM; + } + + for (i = 0; i < crm_workq->task.num_task; i++) { + task = &crm_workq->task.pool[i]; + task->parent = (void *)crm_workq; + /* Put all tasks in free pool */ + INIT_LIST_HEAD(&task->entry); + cam_req_mgr_workq_put_task(task); + } + *workq = crm_workq; + CAM_DBG(CAM_CRM, "free tasks %d", + atomic_read(&crm_workq->task.free_cnt)); + } + + return 0; +} + +void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq) +{ + unsigned long flags = 0; + struct workqueue_struct *job; + struct cam_req_mgr_core_workq *workq; + int i; + + if (crm_workq && *crm_workq) { + workq = *crm_workq; + CAM_DBG(CAM_CRM, "destroy workque %s", workq->workq_name); + WORKQ_ACQUIRE_LOCK(workq, flags); + /* prevent any processing of callbacks */ + atomic_set(&workq->flush, 1); + if (workq->job) { + job = workq->job; + workq->job = NULL; + WORKQ_RELEASE_LOCK(workq, flags); + destroy_workqueue(job); + WORKQ_ACQUIRE_LOCK(workq, flags); + } + /* Destroy workq payload data */ + CAM_MEM_FREE(workq->task.pool[0].payload); + CAM_MEM_FREE(workq->task.pool); + + /* Leave lists in stable state after freeing pool */ + INIT_LIST_HEAD(&workq->task.empty_head); + for (i = 0; i < CRM_TASK_PRIORITY_MAX; i++) + INIT_LIST_HEAD(&workq->task.process_head[i]); + *crm_workq = NULL; + WORKQ_RELEASE_LOCK(workq, flags); + CAM_MEM_FREE(workq); + } +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.h new file mode 100644 index 0000000000..6a2f95d564 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_req_mgr_workq.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_REQ_MGR_WORKQ_H_ +#define _CAM_REQ_MGR_WORKQ_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* Threshold for scheduling delay in ms */ +#define CAM_WORKQ_SCHEDULE_TIME_THRESHOLD 5 + +/* Threshold for execution delay in ms */ +#define CAM_WORKQ_EXE_TIME_THRESHOLD 10 + +/* Flag to create a high priority workq */ +#define CAM_WORKQ_FLAG_HIGH_PRIORITY (1 << 0) + +/* + * This flag ensures only one task from a given + * workq will execute at any given point on any + * given CPU. + */ +#define CAM_WORKQ_FLAG_SERIAL (1 << 1) + +/* Task priorities, lower the number higher the priority*/ +enum crm_task_priority { + CRM_TASK_PRIORITY_0, + CRM_TASK_PRIORITY_1, + CRM_TASK_PRIORITY_MAX, +}; + +/* workqueue will be used from irq context or not */ +enum crm_workq_context { + CRM_WORKQ_USAGE_NON_IRQ, + CRM_WORKQ_USAGE_IRQ, + CRM_WORKQ_USAGE_INVALID, +}; + +/** struct crm_workq_task + * @priority : caller can assign priority to task based on type. + * @payload : depending of user of task this payload type will change + * @process_cb : registered callback called by workq when task enqueued is + * ready for processing in workq thread context + * @parent : workq's parent is link which is enqqueing taks to this workq + * @entry : list head of this list entry is worker's empty_head + * @cancel : if caller has got free task from pool but wants to abort + * or put back without using it + * @priv : when task is enqueuer caller can attach priv along which + * it will get in process callback + * @ret : return value in future to use for blocking calls + * @task_scheduled_ts: enqueue time of task + */ +struct crm_workq_task { + int32_t priority; + int32_t ret; + void *payload; + int32_t (*process_cb)(void *priv, void *data); + void *parent; + struct list_head entry; + uint8_t cancel; + void *priv; + ktime_t task_scheduled_ts; +}; + +/** struct cam_req_mgr_core_workq + * @work : work token used by workqueue + * @job : workqueue internal job struct + * @lock_bh : lock for task structs + * @in_irq : set true if workque can be used in irq context + * @flush : used to track if flush has been called on workqueue + * @work_q_name : name of the workq + * @workq_scheduled_ts: enqueue time of workq + * task - + * @lock : Current task's lock handle + * @pending_cnt : # of tasks left in queue + * @free_cnt : # of free/available tasks + * @process_head: + * @empty_head : list head of available taska which can be used + * or acquired in order to enqueue a task to workq + * @pool : pool of tasks used for handling events in workq context + * @num_task : size of tasks pool + */ +struct cam_req_mgr_core_workq { + struct work_struct work; + struct workqueue_struct *job; + spinlock_t lock_bh; + uint32_t in_irq; + ktime_t workq_scheduled_ts; + atomic_t flush; + char workq_name[128]; + + /* tasks */ + struct { + struct mutex lock; + atomic_t pending_cnt; + atomic_t free_cnt; + + struct list_head process_head[CRM_TASK_PRIORITY_MAX]; + struct list_head empty_head; + struct crm_workq_task *pool; + uint32_t num_task; + } task; +}; + +/** + * struct cam_req_mgr_core_workq_mini_dump + * @workq_scheduled_ts: scheduled ts + * task - + * @pending_cnt : # of tasks left in queue + * @free_cnt : # of free/available tasks + * @num_task : size of tasks pool + */ +struct cam_req_mgr_core_workq_mini_dump { + ktime_t workq_scheduled_ts; + /* tasks */ + struct { + uint32_t pending_cnt; + uint32_t free_cnt; + uint32_t num_task; + } task; +}; +/** + * cam_req_mgr_process_workq() - main loop handling + * @w: workqueue task pointer + */ +void cam_req_mgr_process_workq(struct work_struct *w); + +/** + * cam_req_mgr_workq_create() + * @brief : create a workqueue + * @name : Name of the workque to be allocated, it is combination + * of session handle and link handle + * @num_task : Num_tasks to be allocated for workq + * @workq : Double pointer worker + * @in_irq : Set to one if workq might be used in irq context + * @flags : Bitwise OR of Flags for workq behavior. + * e.g. CAM_REQ_MGR_WORKQ_HIGH_PRIORITY | CAM_REQ_MGR_WORKQ_SERIAL + * @func : function pointer for cam_req_mgr_process_workq wrapper function + * This function will allocate and create workqueue and pass + * the workq pointer to caller. + */ +int cam_req_mgr_workq_create(char *name, int32_t num_tasks, + struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq, + int flags, void (*func)(struct work_struct *w)); + +/** + * cam_req_mgr_workq_destroy() + * @brief: destroy workqueue + * @workq: pointer to worker data struct + * this function will destroy workqueue and clean up resources + * associated with worker such as tasks. + */ +void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq); + +/** + * cam_req_mgr_workq_enqueue_task() + * @brief: Enqueue task in worker queue + * @task : task to be processed by worker + * @priv : clients private data + * @prio : task priority + * process callback func + */ +int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task, + void *priv, int32_t prio); + +/** + * cam_req_mgr_workq_get_task() + * @brief: Returns empty task pointer for use + * @workq: workque used for processing + */ +struct crm_workq_task *cam_req_mgr_workq_get_task( + struct cam_req_mgr_core_workq *workq); + +/** + * cam_req_mgr_workq_flush() + * @brief: Flushes the work queue. Function will sleep until any active task is complete. + * @workq: pointer to worker data struct + */ +void cam_req_mgr_workq_flush(struct cam_req_mgr_core_workq *workq); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_subdev.h b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_subdev.h new file mode 100644 index 0000000000..626976efd8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_req_mgr/cam_subdev.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SUBDEV_H_ +#define _CAM_SUBDEV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#define CAM_SUBDEVICE_EVENT_MAX 30 + + +/* Enum for indicating CSID event */ +enum cam_subdev_phy_csid_state { + CAM_SUBDEV_PHY_CSID_HALT = 1, + CAM_SUBDEV_PHY_CSID_RESUME, +}; + +/** + * struct cam_subdev_msg_phy_conn_csid_info: Contains phy num and connected CSID info + * + * @phy_idx: Phy idx value indicating which phy is connected to csid core + * @lane_cfg: This value is similar to lane_assign in the PHY + * driver, and is used to identify the particular + * PHY instance with which this IFE session is + * connected to. + * @core_idx: Primary(in case of dual ife) core idx for the csid to which the phy + * is connected, -1 while disconnecting + */ +struct cam_subdev_msg_phy_conn_csid_info { + uint32_t phy_idx; + uint32_t lane_cfg; + int32_t core_idx; +}; + +/** + * struct cam_subdev_msg_phy_drv_info: Contains drv config info + * + * @phy_idx: Phy idx value indicating which phy is connected to csid core + * @lane_cfg: This value is similar to lane_assign in the PHY + * driver, and is used to identify the particular + * PHY instance with which this IFE session is + * connected to. + * @use_hw_client_voting: Whether to use hw client voting for csiphy clk + * @is_drv_config_en: If DRV config is enabled in CSID + */ +struct cam_subdev_msg_phy_drv_info { + uint32_t phy_idx; + uint32_t lane_cfg; + bool use_hw_client_voting; + bool is_drv_config_en; +}; + +/** + * struct cam_subdev_msg_phy_halt_resume_info: Contains csid halt resume info + * + * @phy_idx: Phy idx value indicating which phy is connected to csid core + * @lane_cfg: This value is similar to lane_assign in the PHY + * driver, and is used to identify the particular + * PHY instance with which this IFE session is + * connected to. + * @csid_state: Notification of CSID state + * @do_drv_ops: Only if set perform the drv related ops + * @reset_phy: Reset PHY + */ +struct cam_subdev_msg_phy_halt_resume_info { + uint32_t phy_idx; + uint32_t lane_cfg; + enum cam_subdev_phy_csid_state csid_state; + bool do_drv_ops; + bool reset_resume_phy; +}; + +/** + * struct cam_subdev_msg_cdr_sweep_info: Contains all relevant info wrt + * CSID register values needed as + * part of CSIPHY CDR tuning + * + * @phy_idx: Phy idx value indicating which phy is connected to csid core + * @lane_cfg: This value is similar to lane_assign in the PHY + * driver, and is used to identify the particular + * PHY instance with which this IFE session is + * connected to + * @csi2_rx_status: Rx irq status register value + * @csi2_rx_total_crc_err: Rx total crc err register value + * @csi2_rx_total_pkts_rcvd: Rx total pkts rcvd register value + * @csi2_err_seen: If CSI RX has an error + * @epd_enabled: If sensor streaming on this CSI is + * EPD enabled + */ +struct cam_subdev_msg_cdr_sweep_info { + uint32_t phy_idx; + uint32_t lane_cfg; + uint32_t csi2_rx_status; + uint32_t csi2_rx_total_crc_err; + uint32_t csi2_rx_total_pkts_rcvd; + bool csi2_err_seen; + bool epd_enabled; +}; + +enum cam_subdev_message_type_t { + CAM_SUBDEV_MESSAGE_REG_DUMP = 0x1, + CAM_SUBDEV_MESSAGE_APPLY_CSIPHY_AUX, + CAM_SUBDEV_MESSAGE_DOMAIN_ID_SECURE_PARAMS, + CAM_SUBDEV_MESSAGE_CONN_CSID_INFO, + CAM_SUBDEV_MESSAGE_DRV_INFO, + CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME, + CAM_SUBDEV_MESSAGE_CLOCK_UPDATE, + CAM_SUBDEV_MESSAGE_CDR_SWEEP +}; + +/* Enum for close sequence priority */ +enum cam_subdev_close_seq_priority { + CAM_SD_CLOSE_HIGH_PRIORITY, + CAM_SD_CLOSE_MEDIUM_PRIORITY, + CAM_SD_CLOSE_MEDIUM_LOW_PRIORITY, + CAM_SD_CLOSE_LOW_PRIORITY +}; + +enum cam_subdev_rwsem { + CAM_SUBDEV_LOCK = 1, + CAM_SUBDEV_UNLOCK, +}; + +/** + * struct cam_subdev - describes a camera sub-device + * + * @pdev: Pointer to the platform device + * @sd: V4l2 subdevice + * @ops: V4l2 subdecie operations + * @internal_ops: V4l2 subdevice internal operations + * @name: Name of the sub-device. Please notice that the name + * must be unique. + * @sd_flags: Subdev flags. Can be: + * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if + * this subdev needs a device node. + * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if + * this subdev generates events. + * @token: Pointer to cookie of the client driver + * @ent_function: Media entity function type. Can be: + * %CAM_IFE_DEVICE_TYPE - identifies as IFE device. + * %CAM_ICP_DEVICE_TYPE - identifies as ICP device. + * @list: list pointer + * @close_seq_prior: cam_subdev_close_seq_priority type + * + * Each instance of a subdev driver should create this struct, either + * stand-alone or embedded in a larger struct. This structure should be + * initialized/registered by cam_register_subdev + * + */ +struct cam_subdev { + struct platform_device *pdev; + struct v4l2_subdev sd; + const struct v4l2_subdev_ops *ops; + const struct v4l2_subdev_internal_ops *internal_ops; + char *name; + u32 sd_flags; + void *token; + u32 ent_function; + void (*msg_cb)( + struct v4l2_subdev *sd, + enum cam_subdev_message_type_t msg_type, + void *data); + struct list_head list; + enum cam_subdev_close_seq_priority close_seq_prior; +}; + +/** + * cam_subdev_notify_message() + * + * @brief: Notify message to a subdevs of specific type + * + * @subdev_type: Subdev type + * @message_type: message type + * @data: data to be delivered. + * + */ +void cam_subdev_notify_message(u32 subdev_type, + enum cam_subdev_message_type_t message_type, + void *data); + +/** + * cam_subdev_probe() + * + * @brief: Camera Subdevice node probe function for v4l2 setup + * + * @sd: Camera subdevice object + * @name: Name of the subdevice node + * @dev_type: Subdevice node type + * + */ +int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev, + char *name, uint32_t dev_type); + +/** + * cam_subdev_remove() + * + * @brief: Called when subdevice node is unloaded + * + * @sd: Camera subdevice node object + * + */ +int cam_subdev_remove(struct cam_subdev *sd); + +/** + * cam_register_subdev() + * + * @brief: This is the common utility function to be called by each camera + * subdevice node when it tries to register itself to the camera + * request manager + * + * @sd: Pointer to struct cam_subdev. + */ +int cam_register_subdev(struct cam_subdev *sd); + +/** + * cam_unregister_subdev() + * + * @brief: This is the common utility function to be called by each camera + * subdevice node when it tries to unregister itself from the + * camera request manger + * + * @sd: Pointer to struct cam_subdev. + */ +int cam_unregister_subdev(struct cam_subdev *sd); + +/** + * cam_req_mgr_rwsem_read_op() + * + * @brief : API to acquire read semaphore lock to platform framework. + * + * @lock : value indicates to lock or unlock the read lock + */ +void cam_req_mgr_rwsem_read_op(enum cam_subdev_rwsem lock); + +/** + * cam_req_mgr_is_open() + * + * @brief: This common utility function returns the crm active status + * + */ +bool cam_req_mgr_is_open(void); + +/** + * cam_req_mgr_is_shutdown() + * + * @brief: This common utility function returns the shutdown state + */ +bool cam_req_mgr_is_shutdown(void); + +#endif /* _CAM_SUBDEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c new file mode 100644 index 0000000000..59b0c06141 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c @@ -0,0 +1,1151 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_actuator_core.h" +#include "cam_sensor_util.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr_api.h" + +int32_t cam_actuator_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info) +{ + int rc = 0; + + power_info->power_setting_size = 1; + power_info->power_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_setting) + return -ENOMEM; + + power_info->power_setting[0].seq_type = SENSOR_VAF; + power_info->power_setting[0].seq_val = CAM_VAF; + power_info->power_setting[0].config_val = 1; + power_info->power_setting[0].delay = 2; + + power_info->power_down_setting_size = 1; + power_info->power_down_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_down_setting) { + rc = -ENOMEM; + goto free_power_settings; + } + + power_info->power_down_setting[0].seq_type = SENSOR_VAF; + power_info->power_down_setting[0].seq_val = CAM_VAF; + power_info->power_down_setting[0].config_val = 0; + + return rc; + +free_power_settings: + CAM_MEM_FREE(power_info->power_setting); + power_info->power_setting = NULL; + power_info->power_setting_size = 0; + return rc; +} + +static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info; + struct cam_actuator_soc_private *soc_private; + struct cam_sensor_power_ctrl_t *power_info; + struct completion *i3c_probe_completion = NULL; + + soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + + if ((power_info->power_setting == NULL) && + (power_info->power_down_setting == NULL)) { + CAM_INFO(CAM_ACTUATOR, + "Using default power settings"); + rc = cam_actuator_construct_default_power_setting(power_info); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Construct default actuator power setting failed."); + return rc; + } + } + + /* Parse and fill vreg params for power up settings */ + rc = msm_camera_fill_vreg_params( + &a_ctrl->soc_info, + power_info->power_setting, + power_info->power_setting_size); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "failed to fill vreg params for power up rc:%d", rc); + return rc; + } + + /* Parse and fill vreg params for power down settings*/ + rc = msm_camera_fill_vreg_params( + &a_ctrl->soc_info, + power_info->power_down_setting, + power_info->power_down_setting_size); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "failed to fill vreg params power down rc:%d", rc); + return rc; + } + + power_info->dev = soc_info->dev; + + if (a_ctrl->io_master_info.master_type == I3C_MASTER) + i3c_probe_completion = cam_actuator_get_i3c_completion(a_ctrl->soc_info.index); + + rc = cam_sensor_core_power_up(power_info, soc_info, i3c_probe_completion); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "failed in actuator power up rc %d", rc); + return rc; + } + + rc = camera_io_init(&a_ctrl->io_master_info); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "cci init failed: rc: %d", rc); + goto cci_failure; + } + + return rc; +cci_failure: + if (cam_sensor_util_power_down(power_info, soc_info)) + CAM_ERR(CAM_ACTUATOR, "Power down failure"); + + return rc; +} + +static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl) +{ + int32_t rc = 0; + struct cam_sensor_power_ctrl_t *power_info; + struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info; + struct cam_actuator_soc_private *soc_private; + + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "failed: a_ctrl %pK", a_ctrl); + return -EINVAL; + } + + soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + soc_info = &a_ctrl->soc_info; + + if (!power_info) { + CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info); + return -EINVAL; + } + rc = cam_sensor_util_power_down(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc); + return rc; + } + + camera_io_release(&a_ctrl->io_master_info); + + return rc; +} + +static int32_t cam_actuator_i2c_modes_util( + struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list) +{ + int32_t rc = 0; + uint32_t i, size; + + if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) { + rc = camera_io_dev_write(io_master_info, + &(i2c_list->i2c_settings)); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed to random write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) { + rc = camera_io_dev_write_continuous( + io_master_info, + &(i2c_list->i2c_settings), + CAM_SENSOR_I2C_WRITE_SEQ); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed to seq write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) { + rc = camera_io_dev_write_continuous( + io_master_info, + &(i2c_list->i2c_settings), + CAM_SENSOR_I2C_WRITE_BURST); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed to burst write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) { + size = i2c_list->i2c_settings.size; + for (i = 0; i < size; i++) { + rc = camera_io_dev_poll( + io_master_info, + i2c_list->i2c_settings.reg_setting[i].reg_addr, + i2c_list->i2c_settings.reg_setting[i].reg_data, + i2c_list->i2c_settings.reg_setting[i].data_mask, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.reg_setting[i].delay); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "i2c poll apply setting Fail: %d", rc); + return rc; + } + } + } + + return rc; +} + +int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl, + uint32_t *cmd_buf, size_t len) +{ + int32_t rc = 0; + struct cam_cmd_i2c_info *i2c_info; + + if (!a_ctrl || !cmd_buf || (len < sizeof(struct cam_cmd_i2c_info))) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + if (a_ctrl->io_master_info.master_type == CCI_MASTER) { + a_ctrl->io_master_info.cci_client->cci_i2c_master = + a_ctrl->cci_i2c_master; + a_ctrl->io_master_info.cci_client->i2c_freq_mode = + i2c_info->i2c_freq_mode; + a_ctrl->io_master_info.cci_client->sid = + i2c_info->slave_addr >> 1; + CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d", + i2c_info->slave_addr, i2c_info->i2c_freq_mode); + } else if ((a_ctrl->io_master_info.master_type == I2C_MASTER) && + (a_ctrl->io_master_info.qup_client != NULL)) { + a_ctrl->io_master_info.qup_client->i2c_client->addr = i2c_info->slave_addr; + CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x", i2c_info->slave_addr); + } else { + CAM_ERR(CAM_ACTUATOR, "Invalid Master type: %d", + a_ctrl->io_master_info.master_type); + rc = -EINVAL; + } + + return rc; +} + +int32_t cam_actuator_apply_settings(struct cam_actuator_ctrl_t *a_ctrl, + struct i2c_settings_array *i2c_set) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0; + + if (a_ctrl == NULL || i2c_set == NULL) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + if (i2c_set->is_settings_valid != 1) { + CAM_ERR(CAM_ACTUATOR, " Invalid settings"); + return -EINVAL; + } + + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + rc = cam_actuator_i2c_modes_util( + &(a_ctrl->io_master_info), + i2c_list); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed to apply settings: %d", + rc); + } else { + CAM_DBG(CAM_ACTUATOR, + "Success:request ID: %d", + i2c_set->request_id); + } + } + + return rc; +} + +int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply) +{ + int32_t rc = 0, request_id, del_req_id; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + struct i2c_settings_array *i2c_set = NULL; + + if (!apply) { + CAM_ERR(CAM_ACTUATOR, "Invalid Input Args"); + return -EINVAL; + } + + a_ctrl = (struct cam_actuator_ctrl_t *) + cam_get_device_priv(apply->dev_hdl); + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Device data is NULL"); + return -EINVAL; + } + request_id = apply->request_id % MAX_PER_FRAME_ARRAY; + + trace_cam_apply_req("Actuator", a_ctrl->soc_info.index, apply->request_id, apply->link_hdl); + + CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id); + mutex_lock(&(a_ctrl->actuator_mutex)); + if ((apply->request_id == + a_ctrl->i2c_data.per_frame[request_id].request_id) && + (a_ctrl->i2c_data.per_frame[request_id].is_settings_valid) + == 1) { + rc = cam_actuator_apply_settings(a_ctrl, + &a_ctrl->i2c_data.per_frame[request_id]); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed in applying the request: %lld\n", + apply->request_id); + goto release_mutex; + } + } + + for (del_req_id = 0; del_req_id < MAX_PER_FRAME_ARRAY; del_req_id++) { + i2c_set = &(a_ctrl->i2c_data.per_frame[del_req_id]); + if ((i2c_set->is_settings_valid == 1) && + (apply->request_id > (i2c_set->request_id + MAX_SYSTEM_PIPELINE_DELAY))) { + CAM_DBG(CAM_ACTUATOR, "Clean up per frame[%d] = %lld", + del_req_id, i2c_set->request_id); + i2c_set->request_id = 0; + rc = delete_request(i2c_set); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Fail deleting the req: %d err: %d\n", + del_req_id, rc); + goto release_mutex; + } + } + } + CAM_DBG(CAM_ACTUATOR, "Req Per frame validation check Finished"); + +release_mutex: + mutex_unlock(&(a_ctrl->actuator_mutex)); + return rc; +} + +int32_t cam_actuator_establish_link( + struct cam_req_mgr_core_dev_link_setup *link) +{ + struct cam_actuator_ctrl_t *a_ctrl = NULL; + + if (!link) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + a_ctrl = (struct cam_actuator_ctrl_t *) + cam_get_device_priv(link->dev_hdl); + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&(a_ctrl->actuator_mutex)); + if (link->link_enable) { + a_ctrl->bridge_intf.link_hdl = link->link_hdl; + a_ctrl->bridge_intf.crm_cb = link->crm_cb; + } else { + a_ctrl->bridge_intf.link_hdl = -1; + a_ctrl->bridge_intf.crm_cb = NULL; + } + mutex_unlock(&(a_ctrl->actuator_mutex)); + + return 0; +} + +static int cam_actuator_update_req_mgr( + struct cam_actuator_ctrl_t *a_ctrl, + struct cam_packet *csl_packet) +{ + int rc = 0; + struct cam_req_mgr_add_request add_req; + + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = a_ctrl->bridge_intf.link_hdl; + add_req.req_id = csl_packet->header.request_id; + add_req.dev_hdl = a_ctrl->bridge_intf.device_hdl; + + if (a_ctrl->bridge_intf.crm_cb && + a_ctrl->bridge_intf.crm_cb->add_req) { + rc = a_ctrl->bridge_intf.crm_cb->add_req(&add_req); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_ACTUATOR, + "Adding request: %llu failed: rc: %d, it has been flushed", + csl_packet->header.request_id, rc); + else + CAM_ERR(CAM_ACTUATOR, + "Adding request: %llu failed: rc: %d", + csl_packet->header.request_id, rc); + return rc; + } + CAM_DBG(CAM_ACTUATOR, "Request Id: %lld added to CRM", + add_req.req_id); + } else { + CAM_ERR(CAM_ACTUATOR, "Can't add Request ID: %lld to CRM", + csl_packet->header.request_id); + rc = -EINVAL; + } + + return rc; +} + +int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info) +{ + struct cam_actuator_ctrl_t *a_ctrl; + + if (!info) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + a_ctrl = (struct cam_actuator_ctrl_t *) + cam_get_device_priv(info->dev_hdl); + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Device data is NULL"); + return -EINVAL; + } + + info->dev_id = CAM_REQ_MGR_DEVICE_ACTUATOR; + snprintf(info->name, sizeof(info->name), "%s(camera-actuator%u)", + CAM_ACTUATOR_NAME, a_ctrl->soc_info.index); + info->p_delay = CAM_PIPELINE_DELAY_1; + info->m_delay = CAM_MODESWITCH_DELAY_1; + info->trigger = CAM_TRIGGER_POINT_SOF; + + return 0; +} + +int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl, + void *arg) +{ + int32_t rc = 0; + int32_t i = 0; + uint32_t total_cmd_buf_in_bytes = 0; + size_t len_of_buff = 0; + size_t remain_len = 0; + uint32_t *offset = NULL; + uint32_t *cmd_buf = NULL; + uintptr_t generic_ptr; + uintptr_t generic_pkt_ptr; + struct common_header *cmm_hdr = NULL; + struct cam_control *ioctl_ctrl = NULL; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + struct cam_config_dev_cmd config; + struct i2c_data_settings *i2c_data = NULL; + struct i2c_settings_array *i2c_reg_settings = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_actuator_soc_private *soc_private = NULL; + struct cam_sensor_power_ctrl_t *power_info = NULL; + + if (!a_ctrl || !arg) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + + power_info = &soc_private->power_info; + + ioctl_ctrl = (struct cam_control *)arg; + if (copy_from_user(&config, + u64_to_user_ptr(ioctl_ctrl->handle), + sizeof(config))) + return -EFAULT; + rc = cam_mem_get_cpu_buf(config.packet_handle, + &generic_pkt_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d", + rc); + return rc; + } + + remain_len = len_of_buff; + if ((sizeof(struct cam_packet) > len_of_buff) || + ((size_t)config.offset >= len_of_buff - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_ACTUATOR, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len_of_buff); + rc = -EINVAL; + goto put_buf; + } + + remain_len -= (size_t)config.offset; + csl_packet_u = (struct cam_packet *) + (generic_pkt_ptr + (uint32_t)config.offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_ACTUATOR, "Copying packet to KMD failed"); + goto end; + } + + CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code); + + if ((csl_packet->header.op_code & 0xFFFFFF) != + CAM_ACTUATOR_PACKET_OPCODE_INIT && + csl_packet->header.request_id <= a_ctrl->last_flush_req + && a_ctrl->last_flush_req != 0) { + CAM_DBG(CAM_ACTUATOR, + "reject request %lld, last request to flush %lld", + csl_packet->header.request_id, a_ctrl->last_flush_req); + rc = -EBADR; + goto end; + } + + if (csl_packet->header.request_id > a_ctrl->last_flush_req) + a_ctrl->last_flush_req = 0; + + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_ACTUATOR_PACKET_OPCODE_INIT: + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + /* Loop through multiple command buffers */ + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + goto end; + + total_cmd_buf_in_bytes = cmd_desc[i].length; + if (!total_cmd_buf_in_bytes) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf"); + goto end; + } + cmd_buf = (uint32_t *)generic_ptr; + if (!cmd_buf) { + CAM_ERR(CAM_ACTUATOR, "invalid cmd buf"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + rc = -EINVAL; + goto end; + } + if ((len_of_buff < sizeof(struct common_header)) || + (cmd_desc[i].offset > (len_of_buff - + sizeof(struct common_header)))) { + CAM_ERR(CAM_ACTUATOR, + "Invalid length for sensor cmd"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + rc = -EINVAL; + goto end; + } + remain_len = len_of_buff - cmd_desc[i].offset; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + cmm_hdr = (struct common_header *)cmd_buf; + + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + CAM_DBG(CAM_ACTUATOR, + "Received slave info buffer"); + rc = cam_actuator_slaveInfo_pkt_parser( + a_ctrl, cmd_buf, remain_len); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed to parse slave info: %d", rc); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + break; + case CAMERA_SENSOR_CMD_TYPE_PWR_UP: + case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN: + CAM_DBG(CAM_ACTUATOR, + "Received power settings buffer"); + rc = cam_sensor_update_power_settings( + cmd_buf, + total_cmd_buf_in_bytes, + power_info, remain_len); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "Failed:parse power settings: %d", + rc); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + break; + default: + CAM_DBG(CAM_ACTUATOR, + "Received initSettings buffer"); + i2c_data = &(a_ctrl->i2c_data); + i2c_reg_settings = + &i2c_data->init_settings; + + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + rc = cam_sensor_i2c_command_parser( + &a_ctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed:parse init settings: %d", + rc); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + break; + } + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + + if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) { + rc = cam_actuator_power_up(a_ctrl); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + " Actuator Power up failed"); + goto end; + } + a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG; + } + + rc = cam_actuator_apply_settings(a_ctrl, + &a_ctrl->i2c_data.init_settings); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings"); + goto end; + } + + /* Delete the request even if the apply is failed */ + rc = delete_request(&a_ctrl->i2c_data.init_settings); + if (rc < 0) { + CAM_WARN(CAM_ACTUATOR, + "Fail in deleting the Init settings"); + rc = 0; + } + break; + case CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS: + if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Not in right state to move lens: %d", + a_ctrl->cam_act_state); + goto end; + } + a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_NOW; + + i2c_data = &(a_ctrl->i2c_data); + i2c_reg_settings = &i2c_data->init_settings; + + i2c_data->init_settings.request_id = + csl_packet->header.request_id; + i2c_reg_settings->is_settings_valid = 1; + offset = (uint32_t *)&csl_packet->payload_flex; + offset += csl_packet->cmd_buf_offset / sizeof(uint32_t); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &a_ctrl->io_master_info, + i2c_reg_settings, + cmd_desc, 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Auto move lens parsing failed: %d", rc); + goto end; + } + rc = cam_actuator_update_req_mgr(a_ctrl, csl_packet); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "Failed in adding request to request manager"); + goto end; + } + break; + case CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS: + if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Not in right state to move lens: %d", + a_ctrl->cam_act_state); + goto end; + } + + a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_LATER; + i2c_data = &(a_ctrl->i2c_data); + i2c_reg_settings = &i2c_data->per_frame[ + csl_packet->header.request_id % MAX_PER_FRAME_ARRAY]; + + i2c_reg_settings->request_id = + csl_packet->header.request_id; + i2c_reg_settings->is_settings_valid = 1; + offset = (uint32_t *)&csl_packet->payload_flex; + offset += csl_packet->cmd_buf_offset / sizeof(uint32_t); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &a_ctrl->io_master_info, + i2c_reg_settings, + cmd_desc, 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Manual move lens parsing failed: %d", rc); + goto end; + } + + rc = cam_actuator_update_req_mgr(a_ctrl, csl_packet); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "Failed in adding request to request manager"); + goto end; + } + break; + case CAM_ACTUATOR_PACKET_NOP_OPCODE: + if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) { + CAM_WARN(CAM_ACTUATOR, + "Received NOP packets in invalid state: %d", + a_ctrl->cam_act_state); + rc = -EINVAL; + goto end; + } + rc = cam_actuator_update_req_mgr(a_ctrl, csl_packet); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "Failed in adding request to request manager"); + goto end; + } + break; + case CAM_ACTUATOR_PACKET_OPCODE_READ: { + uint64_t qtime_ns; + struct cam_buf_io_cfg *io_cfg; + struct i2c_settings_array i2c_read_settings; + + if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Not in right state to read actuator: %d", + a_ctrl->cam_act_state); + goto end; + } + CAM_DBG(CAM_ACTUATOR, "number of I/O configs: %d:", + csl_packet->num_io_configs); + if (csl_packet->num_io_configs == 0) { + CAM_ERR(CAM_ACTUATOR, "No I/O configs to process"); + rc = -EINVAL; + goto end; + } + + INIT_LIST_HEAD(&(i2c_read_settings.list_head)); + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &csl_packet->payload_flex + + csl_packet->io_configs_offset); + + if (io_cfg == NULL) { + CAM_ERR(CAM_ACTUATOR, "I/O config is invalid(NULL)"); + rc = -EINVAL; + goto end; + } + + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + i2c_read_settings.is_settings_valid = 1; + i2c_read_settings.request_id = 0; + rc = cam_sensor_i2c_command_parser(&a_ctrl->io_master_info, + &i2c_read_settings, + cmd_desc, 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "actuator read pkt parsing failed: %d", rc); + goto end; + } + + rc = cam_sensor_i2c_read_data( + &i2c_read_settings, + &a_ctrl->io_master_info); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "cannot read data, rc:%d", rc); + delete_request(&i2c_read_settings); + goto end; + } + + if (csl_packet->num_io_configs > 1) { + rc = cam_sensor_util_get_current_qtimer_ns(&qtime_ns); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed to get qtimer rc:%d"); + delete_request(&i2c_read_settings); + goto end; + } + + rc = cam_sensor_util_write_qtimer_to_io_buffer( + qtime_ns, &io_cfg[1]); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "write qtimer failed rc: %d", rc); + delete_request(&i2c_read_settings); + goto end; + } + } + + rc = delete_request(&i2c_read_settings); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed in deleting the read settings"); + goto end; + } + break; + } + default: + CAM_ERR(CAM_ACTUATOR, "Wrong Opcode: %d", + csl_packet->header.op_code & 0xFFFFFF); + rc = -EINVAL; + goto end; + } + +end: + cam_common_mem_free(csl_packet); +put_buf: + cam_mem_put_cpu_buf(config.packet_handle); + return rc; +} + +void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl) +{ + int rc = 0; + struct cam_actuator_soc_private *soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = + &soc_private->power_info; + + if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT) + return; + + if (a_ctrl->cam_act_state >= CAM_ACTUATOR_CONFIG) { + rc = cam_actuator_power_down(a_ctrl); + if (rc < 0) + CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed"); + a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE; + } + + if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) { + rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_ACTUATOR, "destroying dhdl failed"); + a_ctrl->bridge_intf.device_hdl = -1; + a_ctrl->bridge_intf.link_hdl = -1; + a_ctrl->bridge_intf.session_hdl = -1; + } + + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + a_ctrl->last_flush_req = 0; + + a_ctrl->cam_act_state = CAM_ACTUATOR_INIT; +} + +int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl, + void *arg) +{ + int rc = 0; + struct cam_control *cmd = (struct cam_control *)arg; + struct cam_actuator_soc_private *soc_private = NULL; + struct cam_sensor_power_ctrl_t *power_info = NULL; + + if (!a_ctrl || !cmd) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args"); + return -EINVAL; + } + + soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + + power_info = &soc_private->power_info; + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_ACTUATOR, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + + CAM_DBG(CAM_ACTUATOR, "Opcode to Actuator: %d", cmd->op_code); + + mutex_lock(&(a_ctrl->actuator_mutex)); + switch (cmd->op_code) { + case CAM_ACQUIRE_DEV: { + struct cam_sensor_acquire_dev actuator_acq_dev; + struct cam_create_dev_hdl bridge_params; + + if (a_ctrl->bridge_intf.device_hdl != -1) { + CAM_ERR(CAM_ACTUATOR, "Device is already acquired"); + rc = -EINVAL; + goto release_mutex; + } + rc = copy_from_user(&actuator_acq_dev, + u64_to_user_ptr(cmd->handle), + sizeof(actuator_acq_dev)); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n"); + goto release_mutex; + } + + bridge_params.session_hdl = actuator_acq_dev.session_handle; + bridge_params.ops = &a_ctrl->bridge_intf.ops; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = a_ctrl; + bridge_params.dev_id = CAM_ACTUATOR; + + actuator_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (actuator_acq_dev.device_handle <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_ACTUATOR, "Can not create device handle"); + goto release_mutex; + } + a_ctrl->bridge_intf.device_hdl = actuator_acq_dev.device_handle; + a_ctrl->bridge_intf.session_hdl = + actuator_acq_dev.session_handle; + + CAM_DBG(CAM_ACTUATOR, "Device Handle: %d", + actuator_acq_dev.device_handle); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &actuator_acq_dev, + sizeof(struct cam_sensor_acquire_dev))) { + CAM_ERR(CAM_ACTUATOR, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + + a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE; + } + break; + case CAM_RELEASE_DEV: { + if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Cant release actuator: in start state"); + goto release_mutex; + } + + if (a_ctrl->bridge_intf.device_hdl == -1) { + CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d", + a_ctrl->bridge_intf.device_hdl, + a_ctrl->bridge_intf.link_hdl); + rc = -EINVAL; + goto release_mutex; + } + + if (a_ctrl->cam_act_state == CAM_ACTUATOR_CONFIG) { + rc = cam_actuator_power_down(a_ctrl); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Actuator Power Down Failed"); + goto release_mutex; + } + } + + if (a_ctrl->bridge_intf.link_hdl != -1) { + CAM_ERR(CAM_ACTUATOR, + "Device [%d] still active on link 0x%x", + a_ctrl->cam_act_state, + a_ctrl->bridge_intf.link_hdl); + rc = -EAGAIN; + goto release_mutex; + } + + rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_ACTUATOR, "destroying the device hdl"); + a_ctrl->bridge_intf.device_hdl = -1; + a_ctrl->bridge_intf.link_hdl = -1; + a_ctrl->bridge_intf.session_hdl = -1; + a_ctrl->cam_act_state = CAM_ACTUATOR_INIT; + a_ctrl->last_flush_req = 0; + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_down_setting_size = 0; + power_info->power_setting_size = 0; + } + break; + case CAM_QUERY_CAP: { + struct cam_actuator_query_cap actuator_cap = {0}; + + actuator_cap.slot_info = a_ctrl->soc_info.index; + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &actuator_cap, + sizeof(struct cam_actuator_query_cap))) { + CAM_ERR(CAM_ACTUATOR, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + } + break; + case CAM_START_DEV: { + if (a_ctrl->cam_act_state != CAM_ACTUATOR_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Not in right state to start : %d", + a_ctrl->cam_act_state); + goto release_mutex; + } + a_ctrl->cam_act_state = CAM_ACTUATOR_START; + a_ctrl->last_flush_req = 0; + } + break; + case CAM_STOP_DEV: { + struct i2c_settings_array *i2c_set = NULL; + int i; + + if (a_ctrl->cam_act_state != CAM_ACTUATOR_START) { + rc = -EINVAL; + CAM_WARN(CAM_ACTUATOR, + "Not in right state to stop : %d", + a_ctrl->cam_act_state); + goto release_mutex; + } + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(a_ctrl->i2c_data.per_frame[i]); + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete request: %lld rc: %d", + i2c_set->request_id, rc); + } + } + a_ctrl->last_flush_req = 0; + a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG; + } + break; + case CAM_CONFIG_DEV: { + a_ctrl->setting_apply_state = + ACT_APPLY_SETTINGS_LATER; + rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg); + if (rc < 0) { + if (rc == -EBADR) + CAM_INFO(CAM_ACTUATOR, + "Failed in actuator Parsing, it has been flushed"); + else + CAM_ERR(CAM_ACTUATOR, + "Failed in actuator Parsing"); + goto release_mutex; + } + + if (a_ctrl->setting_apply_state == + ACT_APPLY_SETTINGS_NOW) { + rc = cam_actuator_apply_settings(a_ctrl, + &a_ctrl->i2c_data.init_settings); + if ((rc == -EAGAIN) && + (a_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_ACTUATOR, + "CCI HW is in resetting mode:: Reapplying Init settings"); + usleep_range(1000, 1010); + rc = cam_actuator_apply_settings(a_ctrl, + &a_ctrl->i2c_data.init_settings); + } + + if (rc < 0) + CAM_ERR(CAM_ACTUATOR, + "Failed to apply Init settings: rc = %d", + rc); + /* Delete the request even if the apply is failed */ + rc = delete_request(&a_ctrl->i2c_data.init_settings); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "Failed in Deleting the Init Pkt: %d", + rc); + goto release_mutex; + } + } + } + break; + default: + CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code); + } + +release_mutex: + mutex_unlock(&(a_ctrl->actuator_mutex)); + + return rc; +} + +int32_t cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush_req) +{ + int32_t rc = 0, i; + uint32_t cancel_req_id_found = 0; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + struct i2c_settings_array *i2c_set = NULL; + + if (!flush_req) + return -EINVAL; + + a_ctrl = (struct cam_actuator_ctrl_t *) + cam_get_device_priv(flush_req->dev_hdl); + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Device data is NULL"); + return -EINVAL; + } + + if (a_ctrl->i2c_data.per_frame == NULL) { + CAM_ERR(CAM_ACTUATOR, "i2c frame data is NULL"); + return -EINVAL; + } + + mutex_lock(&(a_ctrl->actuator_mutex)); + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + a_ctrl->last_flush_req = flush_req->req_id; + CAM_DBG(CAM_ACTUATOR, "last reqest to flush is %lld", + flush_req->req_id); + } + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(a_ctrl->i2c_data.per_frame[i]); + + if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) + && (i2c_set->request_id != flush_req->req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_ACTUATOR, + "delete request: %lld rc: %d", + i2c_set->request_id, rc); + + if (flush_req->type == + CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + cancel_req_id_found = 1; + break; + } + } + } + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ && + !cancel_req_id_found) + CAM_DBG(CAM_ACTUATOR, + "Flush request id:%lld not found in the pending list", + flush_req->req_id); + mutex_unlock(&(a_ctrl->actuator_mutex)); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.h new file mode 100644 index 0000000000..1f35f96325 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_core.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_ACTUATOR_CORE_H_ +#define _CAM_ACTUATOR_CORE_H_ + +#include "cam_actuator_dev.h" + +/** + * @power_info: power setting info to control the power + * + * This API construct the default actuator power setting. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int32_t cam_actuator_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info); + +/** + * @apply: Req mgr structure for applying request + * + * This API applies the request that is mentioned + */ +int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply); + +/** + * @info: Sub device info to req mgr + * + * This API publish the subdevice info to req mgr + */ +int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info); + +/** + * @flush: Req mgr structure for flushing request + * + * This API flushes the request that is mentioned + */ +int cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush); + + +/** + * @link: Link setup info + * + * This API establishes link actuator subdevice with req mgr + */ +int32_t cam_actuator_establish_link( + struct cam_req_mgr_core_dev_link_setup *link); + +/** + * @a_ctrl: Actuator ctrl structure + * @arg: Camera control command argument + * + * This API handles the camera control argument reached to actuator + */ +int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl, void *arg); + +/** + * @a_ctrl: Actuator ctrl structure + * + * This API handles the shutdown ioctl/close + */ +void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl); + +struct completion *cam_actuator_get_i3c_completion(uint32_t index); + +#endif /* _CAM_ACTUATOR_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.c new file mode 100644 index 0000000000..10ccf4225a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.c @@ -0,0 +1,827 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_actuator_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_actuator_soc.h" +#include "cam_actuator_core.h" +#include "cam_trace.h" +#include "camera_main.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" + +static struct cam_i3c_actuator_data { + struct cam_actuator_ctrl_t *a_ctrl; + struct completion probe_complete; +} g_i3c_actuator_data[MAX_CAMERAS]; + +struct completion *cam_actuator_get_i3c_completion(uint32_t index) +{ + return &g_i3c_actuator_data[index].probe_complete; +} + +static int cam_actuator_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_actuator_ctrl_t *a_ctrl = + v4l2_get_subdevdata(sd); + + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "a_ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&(a_ctrl->actuator_mutex)); + cam_actuator_shutdown(a_ctrl); + mutex_unlock(&(a_ctrl->actuator_mutex)); + + return 0; +} + +static int cam_actuator_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_ACTUATOR, + "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_actuator_subdev_close_internal(sd, fh); +} + +static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_actuator_ctrl_t *a_ctrl = + v4l2_get_subdevdata(sd); + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_actuator_driver_cmd(a_ctrl, arg); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_ACTUATOR, + "Failed for driver_cmd: %d, it has been flushed", + rc); + else + CAM_ERR(CAM_ACTUATOR, + "Failed for driver_cmd: %d", rc); + } + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + rc = cam_actuator_subdev_close_internal(sd, NULL); + break; + default: + CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd: %u", cmd); + rc = -ENOIOCTLCMD; + break; + } + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_actuator_init_subdev_do_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc = 0; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_ACTUATOR, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + cmd = VIDIOC_CAM_CONTROL; + rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data); + if (rc) { + CAM_ERR(CAM_ACTUATOR, + "Failed in actuator subdev handling rc: %d", + rc); + return rc; + } + break; + default: + CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_ACTUATOR, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + return rc; +} +#endif + +static struct v4l2_subdev_core_ops cam_actuator_subdev_core_ops = { + .ioctl = cam_actuator_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_actuator_init_subdev_do_ioctl, +#endif +}; + +static struct v4l2_subdev_ops cam_actuator_subdev_ops = { + .core = &cam_actuator_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops cam_actuator_internal_ops = { + .close = cam_actuator_subdev_close, +}; + +static int cam_actuator_init_subdev(struct cam_actuator_ctrl_t *a_ctrl) +{ + int rc = 0; + + a_ctrl->v4l2_dev_str.internal_ops = + &cam_actuator_internal_ops; + a_ctrl->v4l2_dev_str.ops = + &cam_actuator_subdev_ops; + strscpy(a_ctrl->device_name, CAMX_ACTUATOR_DEV_NAME, + sizeof(a_ctrl->device_name)); + a_ctrl->v4l2_dev_str.name = + a_ctrl->device_name; + a_ctrl->v4l2_dev_str.sd_flags = + (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + a_ctrl->v4l2_dev_str.ent_function = + CAM_ACTUATOR_DEVICE_TYPE; + a_ctrl->v4l2_dev_str.token = a_ctrl; + a_ctrl->v4l2_dev_str.close_seq_prior = + CAM_SD_CLOSE_MEDIUM_PRIORITY; + + rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_ACTUATOR, + "Fail with cam_register_subdev rc: %d", rc); + + return rc; +} + +static int cam_actuator_i2c_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0; + int32_t i = 0; + struct i2c_client *client; + struct cam_actuator_ctrl_t *a_ctrl; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_actuator_soc_private *soc_private = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct device_node *np = NULL; + const char *drv_name; + + CAM_GET_TIMESTAMP(ts_start); + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_ACTUATOR, + "Failed to get i2c client"); + return -EFAULT; + } + + /* Create sensor control structure */ + a_ctrl = CAM_MEM_ZALLOC(sizeof(*a_ctrl), GFP_KERNEL); + if (!a_ctrl) + return -ENOMEM; + + a_ctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(a_ctrl->io_master_info.qup_client)) { + rc = -ENOMEM; + goto free_ctrl; + } + + i2c_set_clientdata(client, a_ctrl); + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_actuator_soc_private), + GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto free_qup; + } + a_ctrl->soc_info.soc_private = soc_private; + + a_ctrl->io_master_info.qup_client->i2c_client = client; + soc_info = &a_ctrl->soc_info; + soc_info->dev = &client->dev; + soc_info->dev_name = client->name; + a_ctrl->io_master_info.master_type = I2C_MASTER; + + np = of_node_get(client->dev.of_node); + drv_name = of_node_full_name(np); + + rc = cam_actuator_parse_dt(a_ctrl, &client->dev); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc); + goto free_soc; + } + + rc = cam_actuator_init_subdev(a_ctrl); + if (rc) + goto free_soc; + + if (soc_private->i2c_info.slave_addr != 0) + a_ctrl->io_master_info.qup_client->i2c_client->addr = + soc_private->i2c_info.slave_addr; + + a_ctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (a_ctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto unreg_subdev; + } + + cam_sensor_module_add_i2c_device((void *) a_ctrl, CAM_SENSOR_ACTUATOR); + + INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head)); + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head)); + + a_ctrl->bridge_intf.device_hdl = -1; + a_ctrl->bridge_intf.link_hdl = -1; + a_ctrl->bridge_intf.ops.get_dev_info = + cam_actuator_publish_dev_info; + a_ctrl->bridge_intf.ops.link_setup = + cam_actuator_establish_link; + a_ctrl->bridge_intf.ops.apply_req = + cam_actuator_apply_request; + a_ctrl->last_flush_req = 0; + a_ctrl->cam_act_state = CAM_ACTUATOR_INIT; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(drv_name, microsec); + of_node_put(np); + + return rc; + +unreg_subdev: + cam_unregister_subdev(&(a_ctrl->v4l2_dev_str)); +free_soc: + CAM_MEM_FREE(soc_private); +free_qup: + CAM_MEM_FREE(a_ctrl->io_master_info.qup_client); +free_ctrl: + CAM_MEM_FREE(a_ctrl); + return rc; +} + +static void cam_actuator_i2c_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct i2c_client *client = NULL; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_ACTUATOR, + "Failed to get i2c client"); + return; + } + + a_ctrl = i2c_get_clientdata(client); + /* Handle I2C Devices */ + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL"); + return; + } + + CAM_INFO(CAM_ACTUATOR, "i2c remove invoked"); + mutex_lock(&(a_ctrl->actuator_mutex)); + cam_actuator_shutdown(a_ctrl); + mutex_unlock(&(a_ctrl->actuator_mutex)); + cam_unregister_subdev(&(a_ctrl->v4l2_dev_str)); + + /*Free Allocated Mem */ + CAM_MEM_FREE(a_ctrl->i2c_data.per_frame); + a_ctrl->i2c_data.per_frame = NULL; + a_ctrl->soc_info.soc_private = NULL; + v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(a_ctrl->io_master_info.qup_client); + CAM_MEM_FREE(a_ctrl); +} + +const static struct component_ops cam_actuator_i2c_component_ops = { + .bind = cam_actuator_i2c_component_bind, + .unbind = cam_actuator_i2c_component_unbind, +}; + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +static int cam_actuator_driver_i2c_probe(struct i2c_client *client) +{ + int rc = 0; + + if (client == NULL) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_ACTUATOR, "Adding sensor actuator component"); + rc = component_add(&client->dev, &cam_actuator_i2c_component_ops); + if (rc) + CAM_ERR(CAM_ACTUATOR, "failed to add component rc: %d", rc); + + return rc; +} +#else +static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_ACTUATOR, "Adding sensor actuator component"); + rc = component_add(&client->dev, &cam_actuator_i2c_component_ops); + if (rc) + CAM_ERR(CAM_ACTUATOR, "failed to add component rc: %d", rc); + + return rc; +} +#endif + +#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE +void cam_actuator_driver_i2c_remove( + struct i2c_client *client) +{ + component_del(&client->dev, &cam_actuator_i2c_component_ops); +} +#else +static int32_t cam_actuator_driver_i2c_remove( + struct i2c_client *client) +{ + component_del(&client->dev, &cam_actuator_i2c_component_ops); + return 0; +} +#endif + +static int cam_actuator_platform_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0; + int32_t i = 0; + bool i3c_i2c_target; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + struct cam_actuator_soc_private *soc_private = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return 0; + + /* Create actuator control structure */ + a_ctrl = devm_kzalloc(&pdev->dev, + sizeof(struct cam_actuator_ctrl_t), GFP_KERNEL); + if (!a_ctrl) + return -ENOMEM; + + /*fill in platform device*/ + a_ctrl->v4l2_dev_str.pdev = pdev; + a_ctrl->soc_info.pdev = pdev; + a_ctrl->soc_info.dev = &pdev->dev; + a_ctrl->soc_info.dev_name = pdev->name; + a_ctrl->io_master_info.master_type = CCI_MASTER; + + a_ctrl->io_master_info.cci_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_cci_client), GFP_KERNEL); + if (!(a_ctrl->io_master_info.cci_client)) { + rc = -ENOMEM; + goto free_ctrl; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_actuator_soc_private), + GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto free_cci_client; + } + a_ctrl->soc_info.soc_private = soc_private; + soc_private->power_info.dev = &pdev->dev; + + a_ctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (a_ctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto free_soc; + } + + cam_sensor_module_add_i2c_device((void *) a_ctrl, CAM_SENSOR_ACTUATOR); + + INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head)); + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head)); + + rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev)); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc); + goto free_mem; + } + + /* Fill platform device id*/ + pdev->id = a_ctrl->soc_info.index; + + rc = cam_actuator_init_subdev(a_ctrl); + if (rc) + goto free_mem; + + a_ctrl->bridge_intf.device_hdl = -1; + a_ctrl->bridge_intf.link_hdl = -1; + a_ctrl->bridge_intf.ops.get_dev_info = + cam_actuator_publish_dev_info; + a_ctrl->bridge_intf.ops.link_setup = + cam_actuator_establish_link; + a_ctrl->bridge_intf.ops.apply_req = + cam_actuator_apply_request; + a_ctrl->bridge_intf.ops.flush_req = + cam_actuator_flush_request; + a_ctrl->last_flush_req = 0; + + platform_set_drvdata(pdev, a_ctrl); + a_ctrl->cam_act_state = CAM_ACTUATOR_INIT; + CAM_DBG(CAM_ACTUATOR, "Component bound successfully %d", + a_ctrl->soc_info.index); + + g_i3c_actuator_data[a_ctrl->soc_info.index].a_ctrl = a_ctrl; + init_completion(&g_i3c_actuator_data[a_ctrl->soc_info.index].probe_complete); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +free_mem: + CAM_MEM_FREE(a_ctrl->i2c_data.per_frame); +free_soc: + CAM_MEM_FREE(soc_private); +free_cci_client: + CAM_MEM_FREE(a_ctrl->io_master_info.cci_client); +free_ctrl: + devm_kfree(&pdev->dev, a_ctrl); + return rc; +} + +static void cam_actuator_platform_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_actuator_ctrl_t *a_ctrl; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return; + + a_ctrl = platform_get_drvdata(pdev); + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL"); + return; + } + + mutex_lock(&(a_ctrl->actuator_mutex)); + cam_actuator_shutdown(a_ctrl); + mutex_unlock(&(a_ctrl->actuator_mutex)); + cam_unregister_subdev(&(a_ctrl->v4l2_dev_str)); + + CAM_MEM_FREE(a_ctrl->io_master_info.cci_client); + a_ctrl->io_master_info.cci_client = NULL; + CAM_MEM_FREE(a_ctrl->soc_info.soc_private); + a_ctrl->soc_info.soc_private = NULL; + CAM_MEM_FREE(a_ctrl->i2c_data.per_frame); + a_ctrl->i2c_data.per_frame = NULL; + v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, NULL); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, a_ctrl); + CAM_INFO(CAM_ACTUATOR, "Actuator component unbinded"); +} + +const static struct component_ops cam_actuator_platform_component_ops = { + .bind = cam_actuator_platform_component_bind, + .unbind = cam_actuator_platform_component_unbind, +}; + +static int32_t cam_actuator_platform_remove( + struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_actuator_platform_component_ops); + return 0; +} + +static const struct of_device_id cam_actuator_driver_dt_match[] = { + {.compatible = "qcom,actuator"}, + {} +}; + +static int32_t cam_actuator_driver_platform_probe( + struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_ACTUATOR, "Adding sensor actuator component"); + rc = component_add(&pdev->dev, &cam_actuator_platform_component_ops); + if (rc) + CAM_ERR(CAM_ACTUATOR, "failed to add component rc: %d", rc); + + return rc; +} + +MODULE_DEVICE_TABLE(of, cam_actuator_driver_dt_match); + +struct platform_driver cam_actuator_platform_driver = { + .probe = cam_actuator_driver_platform_probe, + .driver = { + .name = "qcom,actuator", + .owner = THIS_MODULE, + .of_match_table = cam_actuator_driver_dt_match, + .suppress_bind_attrs = true, + }, + .remove = cam_actuator_platform_remove, +}; + +static const struct i2c_device_id i2c_id[] = { + {ACTUATOR_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +static const struct of_device_id cam_actuator_i2c_driver_dt_match[] = { + {.compatible = "qcom,cam-i2c-actuator"}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_actuator_i2c_driver_dt_match); + +struct i2c_driver cam_actuator_i2c_driver = { + .id_table = i2c_id, + .probe = cam_actuator_driver_i2c_probe, + .remove = cam_actuator_driver_i2c_remove, + .driver = { + .of_match_table = cam_actuator_i2c_driver_dt_match, + .owner = THIS_MODULE, + .name = ACTUATOR_DRIVER_I2C, + .suppress_bind_attrs = true, + }, +}; + +static struct i3c_device_id actuator_i3c_id[MAX_I3C_DEVICE_ID_ENTRIES + 1]; + +static int cam_actuator_i3c_driver_probe(struct i3c_device *client) +{ + int32_t rc = 0; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + uint32_t index; + struct device *dev; + + if (!client) { + CAM_INFO(CAM_ACTUATOR, "Null Client pointer"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_ACTUATOR, "Probe for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_ACTUATOR, "device %s failed to read cell-index", dev_name(dev)); + return rc; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_ACTUATOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + a_ctrl = g_i3c_actuator_data[index].a_ctrl; + if (!a_ctrl) { + CAM_ERR(CAM_ACTUATOR, + "a_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + cam_sensor_utils_parse_pm_ctrl_flag(dev->of_node, &(a_ctrl->io_master_info)); + + CAM_INFO(CAM_SENSOR, + "master: %d (1-CCI, 2-I2C, 3-SPI, 4-I3C) pm_ctrl_client_enable: %d", + a_ctrl->io_master_info.master_type, + a_ctrl->io_master_info.qup_client->pm_ctrl_client_enable); + + a_ctrl->io_master_info.qup_client->i3c_client = client; + a_ctrl->io_master_info.qup_client->i3c_wait_for_hotjoin = false; + + complete_all(&g_i3c_actuator_data[index].probe_complete); + + CAM_DBG(CAM_ACTUATOR, "I3C Probe Finished for %s", dev_name(dev)); + return rc; +} + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +static void cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + struct cam_actuator_ctrl_t *a_ctrl = NULL; + struct device *dev; + uint32_t index; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return; + } + + a_ctrl = g_i3c_actuator_data[index].a_ctrl; + if (!a_ctrl) { + CAM_ERR(CAM_SENSOR, "a_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(a_ctrl->io_master_info.qup_client); + a_ctrl->io_master_info.qup_client = NULL; +} + +#else +static int cam_i3c_driver_remove(struct i3c_device *client) +{ + struct cam_actuator_ctrl_t *a_ctrl = NULL; + struct device *dev; + uint32_t index; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return -EINVAL; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + a_ctrl = g_i3c_actuator_data[index].a_ctrl; + if (!a_ctrl) { + CAM_ERR(CAM_SENSOR, "a_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(a_ctrl->io_master_info.qup_client); + a_ctrl->io_master_info.qup_client = NULL; + return 0; +} +#endif + +static struct i3c_driver cam_actuator_i3c_driver = { + .id_table = actuator_i3c_id, + .probe = cam_actuator_i3c_driver_probe, + .remove = cam_i3c_driver_remove, + .driver = { + .owner = THIS_MODULE, + .name = ACTUATOR_DRIVER_I3C, + .of_match_table = cam_actuator_driver_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_actuator_driver_init(void) +{ + int32_t rc = 0; + struct device_node *dev; + int num_entries = 0; + + rc = platform_driver_register(&cam_actuator_platform_driver); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, + "platform_driver_register failed rc = %d", rc); + return rc; + } + + rc = i2c_add_driver(&cam_actuator_i2c_driver); + if (rc) { + CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc); + goto i2c_register_err; + } + + memset(actuator_i3c_id, 0, sizeof(struct i3c_device_id) * (MAX_I3C_DEVICE_ID_ENTRIES + 1)); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_ACTUATOR, "Couldnt Find the i3c-id-table dev node"); + return 0; + } + + rc = cam_sensor_count_elems_i3c_device_id(dev, &num_entries, + "i3c-actuator-id-table"); + if (rc) + return 0; + + rc = cam_sensor_fill_i3c_device_id(dev, num_entries, + "i3c-actuator-id-table", actuator_i3c_id); + if (rc) + goto i3c_register_err; + + rc = i3c_driver_register_with_owner(&cam_actuator_i3c_driver, THIS_MODULE); + if (rc) { + CAM_ERR(CAM_ACTUATOR, "i3c_driver registration failed, rc: %d", rc); + goto i3c_register_err; + } + + return 0; + +i3c_register_err: + i2c_del_driver(&cam_actuator_i2c_driver); +i2c_register_err: + platform_driver_unregister(&cam_actuator_platform_driver); + + return rc; +} + +void cam_actuator_driver_exit(void) +{ + struct device_node *dev; + + platform_driver_unregister(&cam_actuator_platform_driver); + i2c_del_driver(&cam_actuator_i2c_driver); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_ACTUATOR, "Couldnt Find the i3c-id-table dev node"); + return; + } + + i3c_driver_unregister(&cam_actuator_i3c_driver); +} + +MODULE_DESCRIPTION("cam_actuator_driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.h new file mode 100644 index 0000000000..3e334eb514 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_dev.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#ifndef _CAM_ACTUATOR_DEV_H_ +#define _CAM_ACTUATOR_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_sensor_util.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_context.h" + +#define NUM_MASTERS 2 +#define NUM_QUEUES 2 + +#define ACTUATOR_DRIVER_I2C "cam-i2c-actuator" +#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver" +#define ACTUATOR_DRIVER_I3C "i3c_camera_actuator" + + +#define MSM_ACTUATOR_MAX_VREGS (10) +#define ACTUATOR_MAX_POLL_COUNT 10 + +enum cam_actuator_apply_state_t { + ACT_APPLY_SETTINGS_NOW, + ACT_APPLY_SETTINGS_LATER, +}; + +enum cam_actuator_state { + CAM_ACTUATOR_INIT, + CAM_ACTUATOR_ACQUIRE, + CAM_ACTUATOR_CONFIG, + CAM_ACTUATOR_START, +}; + +/** + * struct cam_actuator_i2c_info_t - I2C info + * @slave_addr : slave address + * @i2c_freq_mode : i2c frequency mode + */ +struct cam_actuator_i2c_info_t { + uint16_t slave_addr; + uint8_t i2c_freq_mode; +}; + +struct cam_actuator_soc_private { + struct cam_actuator_i2c_info_t i2c_info; + struct cam_sensor_power_ctrl_t power_info; +}; + +/** + * struct actuator_intf_params + * @device_hdl: Device Handle + * @session_hdl: Session Handle + * @ops: KMD operations + * @crm_cb: Callback API pointers + */ +struct actuator_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +/** + * struct cam_actuator_ctrl_t + * @device_name: Device name + * @i2c_driver: I2C device info + * @pdev: Platform device + * @cci_i2c_master: I2C structure + * @io_master_info: Information about the communication master + * @actuator_mutex: Actuator mutex + * @is_i3c_device : A Flag to indicate whether this actuator is I3C device + * @act_apply_state: Actuator settings aRegulator config + * @id: Cell Index + * @res_apply_state: Actuator settings apply state + * @cam_act_state: Actuator state + * @gconf: GPIO config + * @pinctrl_info: Pinctrl information + * @v4l2_dev_str: V4L2 device structure + * @i2c_data: I2C register settings structure + * @act_info: Sensor query cap structure + * @of_node: Node ptr + * @last_flush_req: Last request to flush + */ +struct cam_actuator_ctrl_t { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct i2c_driver *i2c_driver; + enum cci_i2c_master_t cci_i2c_master; + enum cci_device_num cci_num; + struct camera_io_master io_master_info; + struct cam_hw_soc_info soc_info; + struct mutex actuator_mutex; + bool is_i3c_device; + uint32_t id; + enum cam_actuator_apply_state_t setting_apply_state; + enum cam_actuator_state cam_act_state; + uint8_t cam_pinctrl_status; + struct cam_subdev v4l2_dev_str; + struct i2c_data_settings i2c_data; + struct cam_actuator_query_cap act_info; + struct actuator_intf_params bridge_intf; + uint32_t last_flush_req; +}; + +/** + * @brief : API to register Actuator hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_actuator_driver_init(void); + +/** + * @brief : API to remove Actuator Hw from platform framework. + */ +void cam_actuator_driver_exit(void); +#endif /* _CAM_ACTUATOR_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.c new file mode 100644 index 0000000000..8d2a93bffe --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "cam_actuator_soc.h" +#include "cam_soc_util.h" + +int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl, + struct device *dev) +{ + int32_t i, rc = 0; + struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info; + struct cam_actuator_soc_private *soc_private = + (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + struct device_node *of_node = NULL; + struct device_node *of_parent = NULL; + + /* Initialize mutex */ + mutex_init(&(a_ctrl->actuator_mutex)); + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc); + return rc; + } + + of_node = soc_info->dev->of_node; + + rc = of_property_read_bool(of_node, "i3c-target"); + if (rc) { + a_ctrl->is_i3c_device = true; + a_ctrl->io_master_info.master_type = I3C_MASTER; + } + + CAM_DBG(CAM_SENSOR, "I3C Target: %s", CAM_BOOL_TO_YESNO(a_ctrl->is_i3c_device)); + + if (a_ctrl->io_master_info.master_type == CCI_MASTER) { + rc = of_property_read_u32(of_node, "cci-master", + &(a_ctrl->cci_i2c_master)); + CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d", + a_ctrl->cci_i2c_master, rc); + if ((rc < 0) || (a_ctrl->cci_i2c_master >= MASTER_MAX)) { + CAM_ERR(CAM_ACTUATOR, + "Wrong info: rc: %d, dt CCI master:%d", + rc, a_ctrl->cci_i2c_master); + rc = -EFAULT; + return rc; + } + + of_parent = of_get_parent(of_node); + if (of_property_read_u32(of_parent, "cell-index", + &a_ctrl->cci_num) < 0) + /* Set default master 0 */ + a_ctrl->cci_num = CCI_DEVICE_0; + a_ctrl->io_master_info.cci_client->cci_device = a_ctrl->cci_num; + CAM_DBG(CAM_ACTUATOR, "cci-device %d", a_ctrl->cci_num); + } + + /* Initialize regulators to default parameters */ + for (i = 0; i < soc_info->num_rgltr; i++) { + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + rc = PTR_ERR(soc_info->rgltr[i]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_ACTUATOR, "get failed for regulator %s %d", + soc_info->rgltr_name[i], rc); + return rc; + } + CAM_DBG(CAM_ACTUATOR, "get for regulator %s", + soc_info->rgltr_name[i]); + } + + cam_sensor_utils_parse_pm_ctrl_flag(of_node, &(a_ctrl->io_master_info)); + + if (!soc_info->gpio_data) { + CAM_DBG(CAM_ACTUATOR, "No GPIO found"); + rc = 0; + return rc; + } + + if (!soc_info->gpio_data->cam_gpio_common_tbl_size) { + CAM_DBG(CAM_ACTUATOR, "No GPIO found"); + return -EINVAL; + } + + rc = cam_sensor_util_init_gpio_pin_tbl(soc_info, + &power_info->gpio_num_info); + if ((rc < 0) || (!power_info->gpio_num_info)) { + CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs"); + return -EINVAL; + } + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.h new file mode 100644 index 0000000000..65b4fb99fa --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_actuator/cam_actuator_soc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_ACTUATOR_SOC_H_ +#define _CAM_ACTUATOR_SOC_H_ + +#include "cam_actuator_dev.h" + +/** + * @a_ctrl: Actuator ctrl structure + * + * This API parses actuator device tree + */ +int cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl, + struct device *dev); + +#endif /* _CAM_ACTUATOR_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.c new file mode 100644 index 0000000000..72a3df5464 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.c @@ -0,0 +1,2548 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_cci_core.h" +#include "cam_cci_dev.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +static int32_t cam_cci_convert_type_to_num_bytes( + enum camera_sensor_i2c_type type) +{ + int32_t num_bytes; + + switch (type) { + case CAMERA_SENSOR_I2C_TYPE_BYTE: + num_bytes = 1; + break; + case CAMERA_SENSOR_I2C_TYPE_WORD: + num_bytes = 2; + break; + case CAMERA_SENSOR_I2C_TYPE_3B: + num_bytes = 3; + break; + case CAMERA_SENSOR_I2C_TYPE_DWORD: + num_bytes = 4; + break; + default: + CAM_ERR(CAM_CCI, "Wrong Sensor I2c Type: %d", type); + num_bytes = 0; + break; + } + return num_bytes; +} + +static void cam_cci_flush_queue(struct cci_device *cci_dev, + enum cci_i2c_master_t master) +{ + int32_t rc = 0; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + + cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR); + if (!cci_dev->cci_master_info[master].status) + reinit_completion(&cci_dev->cci_master_info[master] + .reset_complete); + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].reset_complete, + CCI_TIMEOUT)) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d wait timeout for reset complete", + cci_dev->soc_info.index, master); + + /* Set reset pending flag to true */ + cci_dev->cci_master_info[master].reset_pending = true; + cci_dev->cci_master_info[master].status = 0; + + /* Set proper mask to RESET CMD address based on MASTER */ + if (master == MASTER_0) + cam_io_w_mb(CCI_M0_RESET_RMSK, + base + CCI_RESET_CMD_ADDR); + else + cam_io_w_mb(CCI_M1_RESET_RMSK, + base + CCI_RESET_CMD_ADDR); + + /* wait for reset done irq */ + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].reset_complete, + CCI_TIMEOUT)) { + rc = -EINVAL; + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d Retry:: wait timeout for reset complete", + cci_dev->soc_info.index, master); + } + cci_dev->cci_master_info[master].status = 0; + } + + if (!rc) + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d Success: Reset complete", + cci_dev->soc_info.index, master); +} + +static int32_t cam_cci_validate_queue(struct cci_device *cci_dev, + uint32_t len, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + int32_t rc = 0; + uint32_t read_val = 0; + uint32_t reg_offset = master * 0x200 + queue * 0x100; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + unsigned long flags; + + read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d_CUR_WORD_CNT_ADDR %d len %d max %d", + cci_dev->soc_info.index, master, queue, read_val, len, + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size); + if ((read_val + len + 1) > + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) { + uint32_t reg_val = 0; + uint32_t report_id = + cci_dev->cci_i2c_queue_info[master][queue].report_id; + uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8) | + (1 << 9) | (report_id << 4); + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_REPORT_CMD", + cci_dev->soc_info.index, master, queue); + cam_io_w_mb(report_val, + base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + read_val++; + cci_dev->cci_i2c_queue_info[master][queue].report_id++; + if (cci_dev->cci_i2c_queue_info[master][queue].report_id == REPORT_IDSIZE) + cci_dev->cci_i2c_queue_info[master][queue].report_id = 0; + + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d_EXEC_WORD_CNT_ADDR %d", + cci_dev->soc_info.index, master, queue, read_val); + cam_io_w_mb(read_val, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); + reg_val = 1 << ((master * 2) + queue); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_START_ADDR", cci_dev->soc_info.index, master, queue); + spin_lock_irqsave( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + atomic_set( + &cci_dev->cci_master_info[master].done_pending[queue], + 1); + cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR); + atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].report_q[queue], + CCI_TIMEOUT)) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d wait timeout, rc:%d", + cci_dev->soc_info.index, master, queue, rc); + cam_cci_flush_queue(cci_dev, master); + return -EINVAL; + } + rc = cci_dev->cci_master_info[master].status; + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d is in error state", + cci_dev->soc_info.index, master, queue); + cci_dev->cci_master_info[master].status = 0; + } + } + + return rc; +} + +static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev, + uint32_t val, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + int32_t rc = 0; + uint32_t reg_offset = master * 0x200 + queue * 0x100; + struct cam_hw_soc_info *soc_info = NULL; + void __iomem *base = NULL; + + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + + soc_info = &cci_dev->soc_info; + base = soc_info->reg_map[0].mem_base; + + rc = cam_cci_validate_queue(cci_dev, 1, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to validate:: rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_LOAD_DATA_ADDR:val 0x%x:0x%x ", + cci_dev->soc_info.index, master, queue, + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + + return rc; +} + +static void cam_cci_lock_queue(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue, uint32_t en) +{ + uint32_t val = 0; + uint32_t read_val = 0; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = + soc_info->reg_map[0].mem_base; + uint32_t reg_offset = + master * 0x200 + queue * 0x100; + + if (queue != PRIORITY_QUEUE) + return; + + val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_LOAD_DATA_ADDR:val 0x%x:0x%x ", + cci_dev->soc_info.index, master, queue, + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + + if (cci_dev->cci_master_info[master].is_burst_enable[queue] == true) { + cci_dev->cci_master_info[master].num_words_exec[queue]++; + read_val = cci_dev->cci_master_info[master].num_words_exec[queue]; + } else { + read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_EXEC_WORD_CNT_ADDR %d", + cci_dev->soc_info.index, master, queue, read_val); + cam_io_w_mb(read_val, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); +} + +void cam_cci_dump_registers(struct cci_device *cci_dev, + enum cci_i2c_master_t master, enum cci_i2c_queue_t queue) +{ + uint32_t dump_en = 0; + uint32_t read_val = 0; + uint32_t i = 0; + uint32_t reg_offset = 0; + void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base; + + dump_en = cci_dev->dump_en; + if (!(dump_en & CAM_CCI_NACK_DUMP_EN) && + !(dump_en & CAM_CCI_TIMEOUT_DUMP_EN)) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Nack and Timeout dump is not enabled", + cci_dev->soc_info.index, master, queue); + return; + } + + CAM_INFO(CAM_CCI, "**** CCI%d_I2C_M%d_Q%d register dump ****", + cci_dev->soc_info.index, master, queue); + + /* CCI Top Registers */ + CAM_INFO(CAM_CCI, "**** CCI TOP Registers ****"); + for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) { + reg_offset = DEBUG_TOP_REG_START + i * 4; + read_val = cam_io_r_mb(base + reg_offset); + CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X", + reg_offset, read_val); + } + + /* CCI Master registers */ + CAM_INFO(CAM_CCI, "**** CCI%d_I2C_M%d Registers ****", + cci_dev->soc_info.index, master); + for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) { + if ((i * 4) == 0x18) + continue; + + reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4; + read_val = cam_io_r_mb(base + reg_offset); + CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X", + reg_offset, read_val); + } + + /* CCI Master Queue registers */ + CAM_INFO(CAM_CCI, " **** CCI%d_I2C_M%d_Q%d Registers ****", + cci_dev->soc_info.index, master, queue); + for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) { + reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 + + queue*0x100 + i * 4; + read_val = cam_io_r_mb(base + reg_offset); + CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X", + reg_offset, read_val); + } + + /* CCI Interrupt registers */ + CAM_INFO(CAM_CCI, " ****CCI Interrupt Registers****"); + for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) { + reg_offset = DEBUG_INTR_REG_START + i * 4; + read_val = cam_io_r_mb(base + reg_offset); + CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X", + reg_offset, read_val); + } +} +EXPORT_SYMBOL(cam_cci_dump_registers); + +static int cam_cci_wait(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + int32_t rc = 0; + + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev pointer is NULL"); + return -EINVAL; + } + + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].report_q[queue], + CCI_TIMEOUT)) { + cam_cci_dump_registers(cci_dev, master, queue); + + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d wait timeout, rc: %d", + cci_dev->soc_info.index, master, queue, rc); + rc = -ETIMEDOUT; + cam_cci_flush_queue(cci_dev, master); + CAM_INFO(CAM_CCI, + "CCI%d_I2C_M%d_Q%d dump register after reset", + cci_dev->soc_info.index, master, queue); + cam_cci_dump_registers(cci_dev, master, queue); + return rc; + } + + rc = cci_dev->cci_master_info[master].status; + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q% is in error state", + cci_dev->soc_info.index, master, queue); + cci_dev->cci_master_info[master].status = 0; + return rc; + } + + return 0; +} + +static void cam_cci_load_report_cmd(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + + uint32_t reg_offset = master * 0x200 + queue * 0x100; + uint32_t read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + uint32_t report_id = + cci_dev->cci_i2c_queue_info[master][queue].report_id; + uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8) | + (1 << 9) | (report_id << 4); + + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d_REPORT_CMD curr_w_cnt: %d report_id %d", + cci_dev->soc_info.index, master, queue, read_val, report_id); + cam_io_w_mb(report_val, + base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + if (cci_dev->cci_master_info[master].is_burst_enable[queue] == true) { + cci_dev->cci_master_info[master].num_words_exec[queue]++; + read_val = cci_dev->cci_master_info[master].num_words_exec[queue]; + } else { + read_val++; + } + cci_dev->cci_i2c_queue_info[master][queue].report_id++; + if (cci_dev->cci_i2c_queue_info[master][queue].report_id == REPORT_IDSIZE) + cci_dev->cci_i2c_queue_info[master][queue].report_id = 0; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_EXEC_WORD_CNT_ADDR %d (ReadValue: %u)", + cci_dev->soc_info.index, master, queue, read_val, + cam_io_r_mb(base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset)); + cam_io_w_mb(read_val, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); +} + +static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + unsigned long flags; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + + uint32_t reg_val = 1 << ((master * 2) + queue); + + spin_lock_irqsave( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); + atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1); + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR); + + return cam_cci_wait(cci_dev, master, queue); +} + +static int32_t cam_cci_transfer_end(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + int32_t rc = 0; + unsigned long flags; + + spin_lock_irqsave( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) { + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + cam_cci_load_report_cmd(cci_dev, master, queue); + cam_cci_lock_queue(cci_dev, master, queue, 0); + + rc = cam_cci_wait_report_cmd(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed for wait_report_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + } else { + atomic_set( + &cci_dev->cci_master_info[master].done_pending[queue], + 1); + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + rc = cam_cci_wait(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed with cci_wait for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + cam_cci_load_report_cmd(cci_dev, master, queue); + cam_cci_lock_queue(cci_dev, master, queue, 0); + + rc = cam_cci_wait_report_cmd(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed in wait_report_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + } + + return rc; +} + +static int32_t cam_cci_get_queue_free_size(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + uint32_t read_val = 0; + uint32_t reg_offset = master * 0x200 + queue * 0x100; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + + read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_CUR_WORD_CNT_ADDR %d max %d", + cci_dev->soc_info.index, master, queue, read_val, + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size); + return ((cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) - + read_val); +} + +static void cam_cci_process_half_q(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + unsigned long flags; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + uint32_t reg_val = 1 << ((master * 2) + queue); + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d ENTER", cci_dev->soc_info.index, master, queue); + + spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d is free", cci_dev->soc_info.index, master, queue); + cam_cci_load_report_cmd(cci_dev, master, queue); + atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); + cam_io_w_mb(reg_val, base + + CCI_QUEUE_START_ADDR); + } + spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue], + flags); +} + +static int32_t cam_cci_process_full_q(struct cci_device *cci_dev, + enum cci_i2c_master_t master, + enum cci_i2c_queue_t queue) +{ + int32_t rc = 0; + unsigned long flags; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d ENTER", cci_dev->soc_info.index, master, queue); + spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 1) { + atomic_set( + &cci_dev->cci_master_info[master].done_pending[queue], + 1); + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d is set to 1", cci_dev->soc_info.index, master, queue); + rc = cam_cci_wait(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d cci_wait failed for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + } else { + spin_unlock_irqrestore( + &cci_dev->cci_master_info[master].lock_q[queue], flags); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d is set to 0", cci_dev->soc_info.index, master, queue); + cam_cci_load_report_cmd(cci_dev, master, queue); + rc = cam_cci_wait_report_cmd(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed in wait_report for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d EXIT", cci_dev->soc_info.index, master, queue); + + return rc; +} + +static int32_t cam_cci_calc_cmd_len(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl, uint32_t cmd_size, + struct cam_sensor_i2c_reg_array *i2c_cmd, uint32_t *pack) +{ + uint8_t i; + uint32_t len = 0; + uint8_t data_len = 0, addr_len = 0; + uint8_t pack_max_len; + struct cam_sensor_i2c_reg_setting *msg; + struct cam_sensor_i2c_reg_array *cmd = i2c_cmd; + uint32_t size = cmd_size; + + if (!cci_dev || !c_ctrl) { + CAM_ERR(CAM_CCI, "Invalid arguments cci_dev:%p, c_ctrl:%p", + cci_dev, c_ctrl); + return -EINVAL; + } + + msg = &c_ctrl->cfg.cci_i2c_write_cfg; + *pack = 0; + + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) { + addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type); + len = (size + addr_len) <= (cci_dev->payload_size) ? + (size + addr_len):cci_dev->payload_size; + } else { + addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type); + data_len = cam_cci_convert_type_to_num_bytes(msg->data_type); + len = data_len + addr_len; + pack_max_len = size < (cci_dev->payload_size-len) ? + size : (cci_dev->payload_size-len); + for (i = 0; i < pack_max_len;) { + if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1))) + break; + if (cmd->reg_addr + 1 == + (cmd+1)->reg_addr) { + len += data_len; + if (len > cci_dev->payload_size) { + len = len - data_len; + break; + } + (*pack)++; + } else { + break; + } + i += data_len; + cmd++; + } + } + + if (len > cci_dev->payload_size) { + CAM_ERR(CAM_CCI, "Len error: len: %u expected_len: %u", + len, cci_dev->payload_size); + return -EINVAL; + } + + len += 1; /*add i2c WR command*/ + len = len/4 + 1; + + return len; +} + +static uint32_t cam_cci_cycles_per_ms(unsigned long clk) +{ + uint32_t cycles_per_us; + + if (clk) { + cycles_per_us = ((clk/1000)*256)/1000; + } else { + CAM_ERR(CAM_CCI, "Failed: Can use default: %d", + CYCLES_PER_MICRO_SEC_DEFAULT); + cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT; + } + + return cycles_per_us; +} + +void cam_cci_get_clk_rates(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl) + +{ + int32_t src_clk_idx, j; + uint32_t cci_clk_src; + unsigned long clk; + struct cam_cci_clk_params_t *clk_params = NULL; + + enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode; + struct cam_hw_soc_info *soc_info = &cci_dev->soc_info; + + if (i2c_freq_mode >= I2C_MAX_MODES || + i2c_freq_mode < I2C_STANDARD_MODE) { + CAM_ERR(CAM_CCI, "Invalid frequency mode: %d", + (int32_t)i2c_freq_mode); + cci_dev->clk_level_index = -1; + return; + } + + clk_params = &cci_dev->cci_clk_params[i2c_freq_mode]; + cci_clk_src = clk_params->cci_clk_src; + + src_clk_idx = soc_info->src_clk_idx; + + if (src_clk_idx < 0) { + cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT; + cci_dev->clk_level_index = 0; + return; + } + + if (cci_clk_src == 0) { + clk = soc_info->clk_rate[0][src_clk_idx]; + cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk); + cci_dev->clk_level_index = 0; + return; + } + + for (j = 0; j < CAM_MAX_VOTE; j++) { + clk = soc_info->clk_rate[j][src_clk_idx]; + if (clk == cci_clk_src) { + cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk); + cci_dev->clk_level_index = j; + return; + } + } +} + +static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl) +{ + struct cam_cci_clk_params_t *clk_params = NULL; + enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master; + enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode; + void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base; + struct cam_cci_master_info *cci_master = + &cci_dev->cci_master_info[master]; + + if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d invalid i2c_freq_mode = %d", + cci_dev->soc_info.index, master, i2c_freq_mode); + return -EINVAL; + } + /* + * If no change in i2c freq, then acquire semaphore only for the first + * i2c transaction to indicate I2C transaction is in progress, else + * always try to acquire semaphore, to make sure that no other I2C + * transaction is in progress. + */ + mutex_lock(&cci_master->mutex); + if (i2c_freq_mode == cci_dev->i2c_freq_mode[master]) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d, curr_freq: %d", cci_dev->soc_info.index, master, + i2c_freq_mode); + mutex_lock(&cci_master->freq_cnt_lock); + if (cci_master->freq_ref_cnt == 0) + down(&cci_master->master_sem); + cci_master->freq_ref_cnt++; + mutex_unlock(&cci_master->freq_cnt_lock); + mutex_unlock(&cci_master->mutex); + return 0; + } + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d, curr_freq: %d, req_freq: %d", + cci_dev->soc_info.index, master, cci_dev->i2c_freq_mode[master], i2c_freq_mode); + down(&cci_master->master_sem); + + mutex_lock(&cci_master->freq_cnt_lock); + cci_master->freq_ref_cnt++; + mutex_unlock(&cci_master->freq_cnt_lock); + + clk_params = &cci_dev->cci_clk_params[i2c_freq_mode]; + + if (master == MASTER_0) { + cam_io_w_mb(clk_params->hw_thigh << 16 | + clk_params->hw_tlow, + base + CCI_I2C_M0_SCL_CTL_ADDR); + cam_io_w_mb(clk_params->hw_tsu_sto << 16 | + clk_params->hw_tsu_sta, + base + CCI_I2C_M0_SDA_CTL_0_ADDR); + cam_io_w_mb(clk_params->hw_thd_dat << 16 | + clk_params->hw_thd_sta, + base + CCI_I2C_M0_SDA_CTL_1_ADDR); + cam_io_w_mb(clk_params->hw_tbuf, + base + CCI_I2C_M0_SDA_CTL_2_ADDR); + cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 | + clk_params->hw_trdhld << 4 | clk_params->hw_tsp, + base + CCI_I2C_M0_MISC_CTL_ADDR); + } else if (master == MASTER_1) { + cam_io_w_mb(clk_params->hw_thigh << 16 | + clk_params->hw_tlow, + base + CCI_I2C_M1_SCL_CTL_ADDR); + cam_io_w_mb(clk_params->hw_tsu_sto << 16 | + clk_params->hw_tsu_sta, + base + CCI_I2C_M1_SDA_CTL_0_ADDR); + cam_io_w_mb(clk_params->hw_thd_dat << 16 | + clk_params->hw_thd_sta, + base + CCI_I2C_M1_SDA_CTL_1_ADDR); + cam_io_w_mb(clk_params->hw_tbuf, + base + CCI_I2C_M1_SDA_CTL_2_ADDR); + cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 | + clk_params->hw_trdhld << 4 | clk_params->hw_tsp, + base + CCI_I2C_M1_MISC_CTL_ADDR); + } + cci_dev->i2c_freq_mode[master] = i2c_freq_mode; + + mutex_unlock(&cci_master->mutex); + return 0; +} + +int32_t cam_cci_data_queue_burst_apply(struct cci_device *cci_dev, + enum cci_i2c_master_t master, enum cci_i2c_queue_t queue, + uint32_t triggerHalfQueue) +{ + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + uint32_t reg_val = 1 << ((master * 2) + queue); + uint32_t iterate = 0; + uint32_t numBytes = 0; + bool condition = false; + uint32_t num_word_written_to_queue = 0; + uint32_t *data_queue = NULL; + uint32_t index = 0; + uint32_t reg_offset; + uint32_t queue_size = cci_dev->cci_i2c_queue_info[master][queue].max_queue_size; + uint32_t numWordsInQueue = 0, queueStartThreshold = 0; + + reg_offset = master * 0x200 + queue * 0x100; + data_queue = cci_dev->cci_master_info[master].data_queue[queue]; + num_word_written_to_queue = cci_dev->cci_master_info[master].num_words_in_data_queue[queue]; + index = cci_dev->cci_master_info[master].data_queue_start_index[queue]; + queueStartThreshold = cci_dev->cci_master_info[master].half_queue_mark[queue]; + + if (data_queue == NULL) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d data_queue is NULL", + cci_dev->soc_info.index, master, queue); + return -EINVAL; + } + + /* At First this routine is called from process context with FULL QUEUE + * Execution. and next iteration will be called from IRQ Context to process + * only HALF QUEUE size decided by precomputed value "queueStartThreshold" + * */ + if (triggerHalfQueue == 1) { + // Apply HALF QUEUE + trace_cam_cci_burst(cci_dev->soc_info.index, master, queue, + "thirq raised Buflvl", + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Threshold IRQ Raised, BufferLevel: %d", + cci_dev->soc_info.index, master, queue, + cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset)); + } else { + // Apply FULL QUEUE + numWordsInQueue = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + } + + while (index < num_word_written_to_queue) { + numBytes = (data_queue[index] & 0xF0) >> 4; + if ((numBytes == 0xF) || (numBytes == 0xE)) { + iterate = 3; + } else { + numBytes = (numBytes + 4) & ~0x03; + iterate = numBytes / 4; + } + if (numBytes == 0xE) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d THRESHOLD IRQ Enabled; data_queue[%d]: 0x%x refcnt: %d", + cci_dev->soc_info.index, master, queue, index, data_queue[index], + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + } + if (triggerHalfQueue == 0) { + condition = ((numWordsInQueue + iterate + 1) > queue_size); + } else { + condition = (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0) ? + (numWordsInQueue >= queueStartThreshold) : 0; + } + + if (condition == true) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d CUR_WORD_CNT_ADDR %d len %d max %d", + cci_dev->soc_info.index, master, queue, numWordsInQueue, iterate, queue_size); + if ((cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]) > 0) { + cam_io_w_mb(numWordsInQueue, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); + cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR); + triggerHalfQueue = 1; + numWordsInQueue = 0; + CAM_INFO(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Issued QUEUE_START, " + "wait for Threshold_IRQ, th_irq_ref_cnt[%d]:%d", + cci_dev->soc_info.index, master, queue, queue, + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + trace_cam_cci_burst(cci_dev->soc_info.index, master, queue, + "Q_START thirq_cnt", + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + + return 0; + } + } else { + while (iterate > 0) { + cam_io_w_mb(data_queue[index], base + + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + master * 0x200 + queue * 0x100); + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d LOAD_DATA_ADDR 0x%x, " + "index: %d trig: %d numWordsInQueue: %d", + cci_dev->soc_info.index, master, queue, + data_queue[index], (index + 1), + triggerHalfQueue, (numWordsInQueue + 1)); + numWordsInQueue++; + index++; + cci_dev->cci_master_info[master].data_queue_start_index[queue] = index; + iterate--; + } + } + } + + if ((numWordsInQueue > 0) && (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0)) { + cam_io_w_mb(numWordsInQueue, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); + cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR); + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Issued ****** FINAL QUEUE_START********, " + "numWordsInQueue: %d, th_irq_ref_cnt[%d]:%d", + cci_dev->soc_info.index, master, queue, queue, numWordsInQueue, + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + numWordsInQueue = 0; + } + + return 0; +} + +static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue, + enum cci_i2c_sync sync_en) +{ + uint16_t i = 0, j = 0, len = 0; + int32_t rc = 0, en_seq_write = 0; + struct cam_sensor_i2c_reg_setting *i2c_msg = + &c_ctrl->cfg.cci_i2c_write_cfg; + struct cam_sensor_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting; + enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master; + uint16_t reg_addr = 0, cmd_size = i2c_msg->size; + uint32_t reg_offset, val, delay = 0; + uint32_t max_queue_size, queue_size = 0; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + unsigned long flags; + uint8_t next_position = i2c_msg->data_type; + uint32_t half_queue_mark = 0, full_queue_mark = 0, num_payload = 0; + uint32_t num_word_written_to_queue = 0; + uint32_t *data_queue = NULL; + uint8_t data_len = 0, addr_len = 0; + uint32_t index = 0; + uint8_t *buf = NULL; + uint32_t last_i2c_full_payload = 0; + uint32_t trigger_half_queue = 0, queue_start_threshold = 0; + uint32_t en_threshold_irq = 0, cci_enable_th_irq = 0; + + if (i2c_cmd == NULL) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Failed: i2c cmd is NULL", + cci_dev->soc_info.index, master, queue); + return -EINVAL; + } + + if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid cmd_size %d", + cci_dev->soc_info.index, master, queue, cmd_size); + return -EINVAL; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d addr type %d data type %d cmd_size %d", + cci_dev->soc_info.index, master, queue, i2c_msg->addr_type, i2c_msg->data_type, cmd_size); + + if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid addr_type 0x%X", + cci_dev->soc_info.index, master, queue, i2c_msg->addr_type); + return -EINVAL; + } + if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid data_type 0x%X", + cci_dev->soc_info.index, master, queue, i2c_msg->data_type); + return -EINVAL; + } + + trace_cam_cci_burst(cci_dev->soc_info.index, master, queue, + "cci burst write START for sid", + c_ctrl->cci_info->sid); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d : START for sid: 0x%x size: %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->sid, i2c_msg->size); + + cci_dev->cci_master_info[master].is_burst_enable[queue] = false; + cci_dev->cci_master_info[master].num_words_exec[queue] = 0; + + addr_len = cam_cci_convert_type_to_num_bytes(i2c_msg->addr_type); + data_len = cam_cci_convert_type_to_num_bytes(i2c_msg->data_type); + len = (cmd_size * data_len + addr_len); + last_i2c_full_payload = len/MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11; + /* + * For every 11 Bytes of Data 1 Byte of data is Control cmd: 0xF9 or 0xE9 or {0x19 to 0xB9} + * Hence compute will account for "len/PAYLOAD_SIZE_11" + */ + len = len + len/MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 + + (((len % MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) == 0) ? 0 : 1); + if (len % 4) { + len = len/4 + 1; + } else { + len = len/4; + } + /* + * Its possible that 8 number of CCI cmds, each 32-bit + * can co-exisist in QUEUE along with I2C Data + */ + len = len + 8; + + data_queue = CAM_MEM_ZALLOC((len * sizeof(uint32_t)), + GFP_KERNEL); + if (!data_queue) { + CAM_ERR(CAM_CCI, "Unable to allocate memory, BUF is NULL"); + return -ENOMEM; + } + + reg_offset = master * 0x200 + queue * 0x100; + + cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid, + base + CCI_SET_CID_SYNC_TIMER_ADDR + + cci_dev->cci_wait_sync_cfg.csid * + CCI_SET_CID_SYNC_TIMER_OFFSET); + + /* Retry count is not supported in BURST MODE */ + c_ctrl->cci_info->retries = 0; + + /* + * 1. Configure Slave ID through SET_PARAM_CMD + * For Burst Mode retries are not supported. + * Record the number of words written to QUEUE + */ + val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 | + c_ctrl->cci_info->retries << 16 | + c_ctrl->cci_info->id_map << 18; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_LOAD_DATA_ADDR:val 0x%x:0x%x", + cci_dev->soc_info.index, master, queue, CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + index++; + + /* + * 2. Initialize the variables used for synchronizing between + * process context and CCI IRQ Context + */ + spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0); + // atomic_set(&cci_dev->cci_master_info[master].th_irq_ref_cnt[queue], 0); + reinit_completion(&cci_dev->cci_master_info[master].th_burst_complete[queue]); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] = 0; + + max_queue_size = + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size; + + if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) || + (c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST)) + queue_size = max_queue_size; + else + queue_size = max_queue_size / 2; + reg_addr = i2c_cmd->reg_addr; + + if (len < queue_size) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d: len: %d < QueueSize: %d " + "No need of threshold IRQ", + cci_dev->soc_info.index, master, queue, len, queue_size); + cci_enable_th_irq = 0; + } else { + cci_enable_th_irq = CCI_ENABLE_THRESHOLD_IRQ; + } + + if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync && + cmd_size < max_queue_size) { + val = CCI_I2C_WAIT_SYNC_CMD | + ((cci_dev->cci_wait_sync_cfg.line) << 4); + cam_io_w_mb(val, + base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + index++; + } + + /* 3. LOCK the QUEUE So that we can start BURST WRITE*/ + cam_cci_lock_queue(cci_dev, master, queue, 1); + index++; + + /* + * 4. Need to place 0xE0 marker in middle and end of the QUEUE to trigger + * Thresold Interrupt + */ + full_queue_mark = (queue_size - index - 1) / MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_WORDS; + half_queue_mark = full_queue_mark / 2; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d queue_size: %d full_queue_mark: %d half_queue_mark: %d", + cci_dev->soc_info.index, master, queue, queue_size, full_queue_mark, half_queue_mark); + + /* + * 5. Iterate through entire size of settings ==> {reg_addr, reg_data} + * and formulate in QUEUE0 like below + * D2 A1 A2 F9 ==> 0xF9: Hold the BUS for I2C WRITE; {0xA2A1, 0xD2D1, + * D6 D3 D4 D1 ==> 0xD4D3, 0xD6D5, 0xD8D7, 0xD10D9.......} + * D10 D7 D8 D5 + */ + + index = 0; + buf = (uint8_t *) &data_queue[index]; + while (cmd_size) { + delay = i2c_cmd->delay; + i = 0; + buf[i++] = CCI_I2C_WRITE_CMD; + + if (en_seq_write == 0) { + for (j = 0; j < i2c_msg->addr_type; j++) { + buf[i2c_msg->addr_type - j] = (reg_addr >> (j * 8)) & 0xFF; + i++; + } + } + do { + if (i2c_msg->data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[i++] = i2c_cmd->reg_data; + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) + reg_addr++; + } else { + if (i <= cci_dev->payload_size) { + /* + * this logic fill reg data to buf[] array + * which has a max index value 11, + * and the sensor reg data type can be DWORD/3B/WORD, + * next_position records the split position or the + * position in the reg data where will be filled into + * next buf[] array slot. + */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[i++] = (i2c_cmd->reg_data & + 0xFF000000) >> 24; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_3B; + break; + } + } + /* fill highest byte of 3B type sensor reg data */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_3B) { + buf[i++] = (i2c_cmd->reg_data & + 0x00FF0000) >> 16; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_WORD; + break; + } + } + /* fill high byte of WORD type sensor reg data */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[i++] = (i2c_cmd->reg_data & + 0x0000FF00) >> 8; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_BYTE; + break; + } + } + /* fill lowest byte of sensor reg data */ + buf[i++] = i2c_cmd->reg_data & 0x000000FF; + next_position = i2c_msg->data_type; + + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) + reg_addr += i2c_msg->data_type; + } + } + /* move to next cmd while all reg data bytes are filled */ + if (next_position == i2c_msg->data_type) { + i2c_cmd++; + --cmd_size; + } + } while ((cmd_size > 0) && (i <= cci_dev->payload_size)); + + num_payload++; + en_threshold_irq = cci_enable_th_irq && + (((num_payload % half_queue_mark) == 0) || (num_payload == last_i2c_full_payload)); + if (cmd_size > 0) { + if (en_threshold_irq) { + buf[0] |= 0xE0; + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]++; + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Th IRQ enabled for index: %d " + "num_payld: %d th_irq_ref_cnt: %d", + cci_dev->soc_info.index, master, queue, num_word_written_to_queue, + num_payload, cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + } else { + buf[0] |= 0xF0; + } + } else { + buf[0] |= ((i-1) << 4); + CAM_DBG(CAM_CCI, "End of register Write............ "); + } + en_seq_write = 1; + len = ((i-1)/4) + 1; + /* increment pointer to next multiple of 4; which is a word in CCI QUEUE */ + buf = buf + ((i+3) & ~0x03); + num_word_written_to_queue += len; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d num words to Queue: %d th_irq_ref_cnt: %d cci_dev: %p", + cci_dev->soc_info.index, master, queue, num_word_written_to_queue, + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue], cci_dev); + + trace_cam_cci_burst(cci_dev->soc_info.index, master, queue, + "thirq_cnt", + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]); + + index = 0; + queue_start_threshold = half_queue_mark * MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_WORDS; + + cci_dev->cci_master_info[master].data_queue[queue] = data_queue; + cci_dev->cci_master_info[master].num_words_in_data_queue[queue] = num_word_written_to_queue; + cci_dev->cci_master_info[master].data_queue_start_index[queue] = index; + cci_dev->cci_master_info[master].half_queue_mark[queue] = queue_start_threshold; + + cam_cci_data_queue_burst_apply(cci_dev, master, queue, trigger_half_queue); + + while ((cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]) > 0) { + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].th_burst_complete[queue], + CCI_TIMEOUT)) { + cam_cci_dump_registers(cci_dev, master, queue); + + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d wait timeout, rc: %d", + cci_dev->soc_info.index, master, queue, rc); + rc = -ETIMEDOUT; + cam_cci_flush_queue(cci_dev, master); + CAM_INFO(CAM_CCI, + "CCI%d_I2C_M%d_Q%d dump register after reset", + cci_dev->soc_info.index, master, queue); + cam_cci_dump_registers(cci_dev, master, queue); + goto ERROR; + } + cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]--; + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Threshold IRQ Raised, BufferLevel: %d", + cci_dev->soc_info.index, master, queue, + cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset)); + } + + if (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0) { + cci_dev->cci_master_info[master].is_burst_enable[queue] = true; + cci_dev->cci_master_info[master].num_words_exec[queue] = 0; + } + + rc = cam_cci_transfer_end(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Slave: 0x%x failed rc %d", + cci_dev->soc_info.index, master, queue, (c_ctrl->cci_info->sid << 1), rc); + goto ERROR; + } + trace_cam_cci_burst(cci_dev->soc_info.index, master, queue, + "cci burst write Done for sid", + c_ctrl->cci_info->sid); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d : completed ....for sid: 0x%x size: %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->sid, i2c_msg->size); + +ERROR: + CAM_MEM_FREE(data_queue); + cci_dev->cci_master_info[master].data_queue[queue] = NULL; + return rc; +} + +static int32_t cam_cci_data_queue(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue, + enum cci_i2c_sync sync_en) +{ + uint16_t i = 0, j = 0, k = 0, h = 0, len = 0; + int32_t rc = 0, free_size = 0, en_seq_write = 0; + uint8_t write_data[CAM_MAX_NUM_CCI_PAYLOAD_BYTES + 1] = {0}; + struct cam_sensor_i2c_reg_setting *i2c_msg = + &c_ctrl->cfg.cci_i2c_write_cfg; + struct cam_sensor_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting; + enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master; + uint16_t reg_addr = 0, cmd_size = i2c_msg->size; + uint32_t read_val = 0, reg_offset, val, delay = 0; + uint32_t max_queue_size, queue_size = 0, cmd = 0; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + unsigned long flags; + uint8_t next_position = i2c_msg->data_type; + + if (i2c_cmd == NULL) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Failed: i2c cmd is NULL", + cci_dev->soc_info.index, master, queue); + return -EINVAL; + } + + if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid cmd_size %d", + cci_dev->soc_info.index, master, queue, cmd_size); + return -EINVAL; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d addr type %d data type %d cmd_size %d", + cci_dev->soc_info.index, master, queue, i2c_msg->addr_type, i2c_msg->data_type, cmd_size); + + if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid addr_type 0x%X", + cci_dev->soc_info.index, master, queue, i2c_msg->addr_type); + return -EINVAL; + } + if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed: invalid data_type 0x%X", + cci_dev->soc_info.index, master, queue, i2c_msg->data_type); + return -EINVAL; + } + reg_offset = master * 0x200 + queue * 0x100; + + cci_dev->cci_master_info[master].is_burst_enable[queue] = false; + cci_dev->cci_master_info[master].num_words_exec[queue] = 0; + cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid, + base + CCI_SET_CID_SYNC_TIMER_ADDR + + cci_dev->cci_wait_sync_cfg.csid * + CCI_SET_CID_SYNC_TIMER_OFFSET); + + cam_cci_lock_queue(cci_dev, master, queue, 1); + + val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 | + c_ctrl->cci_info->retries << 16 | + c_ctrl->cci_info->id_map << 18; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_LOAD_DATA_ADDR:val 0x%x:0x%x", + cci_dev->soc_info.index, master, queue, CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + + spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue], + flags); + + max_queue_size = + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size; + + if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) || + (c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST)) + queue_size = max_queue_size; + else + queue_size = max_queue_size / 2; + reg_addr = i2c_cmd->reg_addr; + + if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync && + cmd_size < max_queue_size) { + val = CCI_I2C_WAIT_SYNC_CMD | + ((cci_dev->cci_wait_sync_cfg.line) << 4); + cam_io_w_mb(val, + base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + reg_offset); + } + + while (cmd_size) { + uint32_t pack = 0; + + len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size, + i2c_cmd, &pack); + if (len <= 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Calculate command len failed, len: %d", + cci_dev->soc_info.index, master, queue, len); + return -EINVAL; + } + + read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d CUR_WORD_CNT_ADDR %d len %d max %d", + cci_dev->soc_info.index, master, queue, read_val, len, max_queue_size); + /* + 2 - space alocation for Report and Unlock CMD */ + if ((read_val + len + 2) > queue_size) { + if ((read_val + len + 2) > max_queue_size) { + rc = cam_cci_process_full_q(cci_dev, + master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to process full queue rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + continue; + } + cam_cci_process_half_q(cci_dev, master, queue); + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d cmd_size %d addr 0x%x data 0x%x", + cci_dev->soc_info.index, master, queue, cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data); + delay = i2c_cmd->delay; + i = 0; + write_data[i++] = CCI_I2C_WRITE_CMD; + + /* + * in case of multiple command + * MSM_CCI_I2C_WRITE : address is not continuous, so update + * address for a new packet. + * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep + * the incremented address for a + * new packet + */ + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK) + reg_addr = i2c_cmd->reg_addr; + + if (en_seq_write == 0) { + for (j = 0; j < i2c_msg->addr_type; j++) { + write_data[i2c_msg->addr_type - j] = (reg_addr >> (j * 8)) & 0xFF; + i++; + } + } + + do { + if (i2c_msg->data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + write_data[i++] = i2c_cmd->reg_data; + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) + reg_addr++; + } else { + if (i <= cci_dev->payload_size) { + /* + * this logic fill reg data to write_data[] array + * which has a max index value 11, + * and the sensor reg data type can be DWORD/3B/WORD, + * next_position records the split position or the + * position in the reg data where will be filled into + * next write_data[] array slot. + */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_DWORD) { + write_data[i++] = (i2c_cmd->reg_data & + 0xFF000000) >> 24; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_3B; + break; + } + } + /* fill highest byte of 3B type sensor reg data */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_3B) { + write_data[i++] = (i2c_cmd->reg_data & + 0x00FF0000) >> 16; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_WORD; + break; + } + } + /* fill high byte of WORD type sensor reg data */ + if (next_position >= CAMERA_SENSOR_I2C_TYPE_WORD) { + write_data[i++] = (i2c_cmd->reg_data & + 0x0000FF00) >> 8; + if ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) { + next_position = CAMERA_SENSOR_I2C_TYPE_BYTE; + break; + } + } + /* fill lowest byte of sensor reg data */ + write_data[i++] = i2c_cmd->reg_data & 0x000000FF; + next_position = i2c_msg->data_type; + + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) + reg_addr += i2c_msg->data_type; + } + } + /* move to next cmd while all reg data bytes are filled */ + if (next_position == i2c_msg->data_type) { + i2c_cmd++; + --cmd_size; + } + } while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) || pack--) && + (cmd_size > 0) && (i <= cci_dev->payload_size)); + free_size = cam_cci_get_queue_free_size(cci_dev, master, + queue); + if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ || + c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) && + ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) && + cci_dev->support_seq_write && cmd_size > 0 && + free_size > BURST_MIN_FREE_SIZE) { + write_data[0] |= 0xF0; + en_seq_write = 1; + } else { + write_data[0] |= ((i-1) << 4); + en_seq_write = 0; + } + len = ((i-1)/4) + 1; + + CAM_DBG(CAM_CCI, "free_size %d, en_seq_write %d i: %d len: %d ", + free_size, en_seq_write, i, len); + read_val = cam_io_r_mb(base + + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); + for (h = 0, k = 0; h < len; h++) { + cmd = 0; + for (j = 0; (j < 4 && k < i); j++) + cmd |= (write_data[k++] << (j * 8)); + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d LOAD_DATA_ADDR 0x%x, len:%d, cnt: %d", + cci_dev->soc_info.index, master, queue, cmd, len, read_val); + cam_io_w_mb(cmd, base + + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + master * 0x200 + queue * 0x100); + + read_val += 1; + + } + + cam_io_w_mb(read_val, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); + + if ((delay > 0) && (delay < CCI_MAX_DELAY) && + en_seq_write == 0) { + cmd = (uint32_t)((delay * cci_dev->cycles_per_us) / + 0x100); + cmd <<= 4; + cmd |= CCI_I2C_WAIT_CMD; + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d_LOAD_DATA_ADDR 0x%x", + cci_dev->soc_info.index, master, queue, cmd); + cam_io_w_mb(cmd, base + + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + + master * 0x200 + queue * 0x100); + read_val += 1; + cam_io_w_mb(read_val, base + + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); + } + } + + rc = cam_cci_transfer_end(cci_dev, master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Slave: 0x%x failed rc %d", + cci_dev->soc_info.index, master, queue, (c_ctrl->cci_info->sid << 1), rc); + return rc; + } + + return rc; +} + +static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + uint32_t val = 0, i = 0, j = 0, irq_mask_update = 0; + unsigned long rem_jiffies, flags; + int32_t read_words = 0, exp_words = 0; + int32_t index = 0, first_byte = 0, total_read_words = 0; + enum cci_i2c_master_t master; + enum cci_i2c_queue_t queue = QUEUE_1; + struct cci_device *cci_dev = NULL; + struct cam_cci_read_cfg *read_cfg = NULL; + struct cam_hw_soc_info *soc_info = NULL; + void __iomem *base = NULL; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + master = c_ctrl->cci_info->cci_i2c_master; + read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg; + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Invalid I2C master addr", + cci_dev->soc_info.index, master, queue); + return -EINVAL; + } + + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d cam_cci_set_clk_param failed rc: %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + + mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]); + cci_dev->is_burst_read[master] = true; + reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]); + + soc_info = &cci_dev->soc_info; + base = soc_info->reg_map[0].mem_base; + + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_read call. This is to + * avoid overflow / underflow of queue + */ + rc = cam_cci_validate_queue(cci_dev, + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1, + master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Initial validataion failed rc:%d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Invalid read retries info retries from slave: %d, max retries: %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->retries, CCI_I2C_READ_MAX_RETRIES); + goto rel_mutex_q; + } + + if (read_cfg->data == NULL) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Data ptr is NULL", + cci_dev->soc_info.index, master, queue); + goto rel_mutex_q; + } + + if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d failed : Invalid addr type: %u", + cci_dev->soc_info.index, master, queue, read_cfg->addr_type); + rc = -EINVAL; + goto rel_mutex_q; + } + + val = CCI_I2C_LOCK_CMD; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d failed to write lock_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d set param sid 0x%x retries %d id_map %d", + cci_dev->soc_info.index, master, queue, + c_ctrl->cci_info->sid, c_ctrl->cci_info->retries, + c_ctrl->cci_info->id_map); + val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 | + c_ctrl->cci_info->retries << 16 | + c_ctrl->cci_info->id_map << 18; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write param_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4); + for (i = 0; i < read_cfg->addr_type; i++) { + val |= ((read_cfg->addr >> (i << 3)) & 0xFF) << + ((read_cfg->addr_type - i) << 3); + } + + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write disable cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4); + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write read_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = CCI_I2C_UNLOCK_CMD; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d failed to write unlock_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + + master * 0x200 + queue * 0x100); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d cur word cnt 0x%x", + cci_dev->soc_info.index, master, queue, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + + master * 0x200 + queue * 0x100); + + val = 1 << ((master * 2) + queue); + cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR); + + exp_words = ((read_cfg->num_byte / 4) + 1); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d waiting for threshold [exp_words %d]", + cci_dev->soc_info.index, master, queue, exp_words); + + while (total_read_words != exp_words) { + rem_jiffies = cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].th_complete, + CCI_TIMEOUT); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + val = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + + master * 0x100); + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d wait timeout for th_complete, FIFO buf_lvl:0x%x, rc: %d", + cci_dev->soc_info.index, master, queue, val, rc); + cam_cci_dump_registers(cci_dev, master, queue); + + cam_cci_flush_queue(cci_dev, master); + goto rel_mutex_q; + } + + if (cci_dev->cci_master_info[master].status) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Error with Slave: 0x%x", + cci_dev->soc_info.index, master, queue, (c_ctrl->cci_info->sid << 1)); + rc = -EINVAL; + cci_dev->cci_master_info[master].status = 0; + goto rel_mutex_q; + } + + read_words = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100); + if (read_words <= 0) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d FIFO Buffer lvl is 0", + cci_dev->soc_info.index, master, queue); + goto enable_irq; + } + +read_again: + j++; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d Iteration: %u read_words %d", + cci_dev->soc_info.index, master, queue, j, read_words); + + total_read_words += read_words; + while (read_words > 0) { + val = cam_io_r(base + + CCI_I2C_M0_READ_DATA_ADDR + master * 0x100); + for (i = 0; (i < 4) && + (index < read_cfg->num_byte); i++) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d i:%d index:%d", + cci_dev->soc_info.index, master, queue, i, index); + if (!first_byte) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d sid 0x%x", + cci_dev->soc_info.index, master, queue, val & 0xFF); + first_byte++; + } else { + read_cfg->data[index] = + (val >> (i * 8)) & 0xFF; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d data[%d] 0x%x", + cci_dev->soc_info.index, master, queue, index, + read_cfg->data[index]); + index++; + } + } + read_words--; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d Iteration:%u total_read_words %d", + cci_dev->soc_info.index, master, queue, j, total_read_words); + + read_words = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100); + if (read_words > 0) { + CAM_DBG(CAM_CCI, "FIFO Buffer lvl is %d", read_words); + goto read_again; + } + +enable_irq: + spin_lock_irqsave(&cci_dev->lock_status, flags); + if (cci_dev->irqs_disabled) { + irq_mask_update = + cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR); + if (master == MASTER_0 && cci_dev->irqs_disabled & + CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) + irq_mask_update |= + CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD; + else if (master == MASTER_1 && cci_dev->irqs_disabled & + CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) + irq_mask_update |= + CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD; + cam_io_w_mb(irq_mask_update, + base + CCI_IRQ_MASK_1_ADDR); + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + + if (total_read_words == exp_words) { + /* + * This wait is for RD_DONE irq, if RD_DONE is + * triggered we will call complete on both threshold + * & read done waits. As part of the threshold wait + * we will be draining the entire buffer out. This + * wait is to compensate for the complete invoked for + * RD_DONE exclusively. + */ + rem_jiffies = cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].rd_done, + CCI_TIMEOUT); + if (!rem_jiffies) { + rc = -ETIMEDOUT; + val = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + + master * 0x100); + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d wait timeout for RD_DONE irq for rc = %d FIFO buf_lvl:0x%x, rc: %d", + cci_dev->soc_info.index, master, queue, + val, rc); + cam_cci_dump_registers(cci_dev, + master, queue); + + cam_cci_flush_queue(cci_dev, master); + goto rel_mutex_q; + } + + if (cci_dev->cci_master_info[master].status) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Error with Slave 0x%x", + cci_dev->soc_info.index, master, queue, (c_ctrl->cci_info->sid << 1)); + rc = -EINVAL; + cci_dev->cci_master_info[master].status = 0; + goto rel_mutex_q; + } + break; + } + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d Burst read successful words_read %d", + cci_dev->soc_info.index, master, queue, total_read_words); + +rel_mutex_q: + mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); + + mutex_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); + if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); + mutex_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; +} + +static int32_t cam_cci_read(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + uint32_t val = 0; + uint8_t read_data_byte[CAM_MAX_NUM_CCI_PAYLOAD_BYTES + 1] = {0}; + uint32_t *reg_addr; + int32_t read_words = 0, exp_words = 0; + int32_t index = 0, first_byte = 0; + uint32_t i = 0; + enum cci_i2c_master_t master; + enum cci_i2c_queue_t queue = QUEUE_1; + struct cci_device *cci_dev = NULL; + struct cam_cci_read_cfg *read_cfg = NULL; + struct cam_hw_soc_info *soc_info = NULL; + void __iomem *base = NULL; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + master = c_ctrl->cci_info->cci_i2c_master; + read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg; + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Invalid I2C master addr:%d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->cci_i2c_master); + return -EINVAL; + } + + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { + CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc); + return rc; + } + + mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]); + cci_dev->is_burst_read[master] = false; + reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]); + + soc_info = &cci_dev->soc_info; + base = soc_info->reg_map[0].mem_base; + + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_read call. This is to + * avoid overflow / underflow of queue + */ + rc = cam_cci_validate_queue(cci_dev, + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1, + master, queue); + if (rc < 0) { + val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_CMD_ADDR + + master * 0x200 + queue * 0x100); + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Initial validataion failed rc: %d, CUR_CMD:0x%x", + cci_dev->soc_info.index, master, queue, rc, val); + goto rel_mutex_q; + } + + if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Invalid read retries info retries from slave: %d, max retries: %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->retries, CCI_I2C_READ_MAX_RETRIES); + goto rel_mutex_q; + } + + if (read_cfg->data == NULL) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Data ptr is NULL", + cci_dev->soc_info.index, master, queue); + goto rel_mutex_q; + } + + val = CCI_I2C_LOCK_CMD; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d failed to write lock_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d set param sid 0x%x retries %d id_map %d", + cci_dev->soc_info.index, master, queue, + c_ctrl->cci_info->sid, c_ctrl->cci_info->retries, + c_ctrl->cci_info->id_map); + val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 | + c_ctrl->cci_info->retries << 16 | + c_ctrl->cci_info->id_map << 18; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write param_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Failed : Invalid addr type: %u", + cci_dev->soc_info.index, master, queue, read_cfg->addr_type); + rc = -EINVAL; + goto rel_mutex_q; + } + + read_data_byte[0] = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4); + for (i = 0; i < read_cfg->addr_type; i++) { + read_data_byte[read_cfg->addr_type - i] = (read_cfg->addr >> (i * 8)) & 0xFF; + } + + reg_addr = (uint32_t *)&read_data_byte[0]; + read_words = DIV_ROUND_UP(read_cfg->addr_type + 1, 4); + + for (i = 0; i < read_words; i++) { + rc = cam_cci_write_i2c_queue(cci_dev, *reg_addr, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write disable_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + reg_addr++; + } + + val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4); + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed to write read_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = CCI_I2C_UNLOCK_CMD; + rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue); + if (rc < 0) { + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d_Q%d failed to write unlock_cmd for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto rel_mutex_q; + } + + val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + + master * 0x200 + queue * 0x100); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d_CUR_WORD_CNT 0x%x", + cci_dev->soc_info.index, master, queue, val); + cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + + master * 0x200 + queue * 0x100); + + val = 1 << ((master * 2) + queue); + cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d exp_words to be read: %d", + cci_dev->soc_info.index, master, queue, ((read_cfg->num_byte / 4) + 1)); + + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].rd_done, CCI_TIMEOUT)) { + cam_cci_dump_registers(cci_dev, master, queue); + + rc = -ETIMEDOUT; + val = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100); + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d rd_done wait timeout FIFO buf_lvl: 0x%x, rc: %d", + cci_dev->soc_info.index, master, queue, val, rc); + cam_cci_flush_queue(cci_dev, master); + goto rel_mutex_q; + } + + if (cci_dev->cci_master_info[master].status) { + if (cci_dev->is_probing) + CAM_INFO(CAM_CCI, "CCI%d_I2C_M%d_Q%d ERROR with Slave 0x%x", + cci_dev->soc_info.index, master, queue, + (c_ctrl->cci_info->sid << 1)); + else + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d ERROR with Slave 0x%x", + cci_dev->soc_info.index, master, queue, + (c_ctrl->cci_info->sid << 1)); + rc = -EINVAL; + cci_dev->cci_master_info[master].status = 0; + goto rel_mutex_q; + } + + read_words = cam_io_r_mb(base + + CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100); + exp_words = ((read_cfg->num_byte / 4) + 1); + if (read_words != exp_words) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d read_words: %d, exp words: %d", + cci_dev->soc_info.index, master, queue, read_words, exp_words); + memset(read_cfg->data, 0, read_cfg->num_byte); + rc = -EINVAL; + goto rel_mutex_q; + } + index = 0; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d index: %d, num_bytes: %d", + cci_dev->soc_info.index, master, queue, index, read_cfg->num_byte); + first_byte = 0; + while (read_words > 0) { + val = cam_io_r(base + + CCI_I2C_M0_READ_DATA_ADDR + master * 0x100); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d read val: 0x%x", + cci_dev->soc_info.index, master, queue, val); + for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d i: %d, index: %d", + cci_dev->soc_info.index, master, queue, i, index); + if (!first_byte) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d sid: 0x%x", + cci_dev->soc_info.index, master, queue, val & 0xFF); + first_byte++; + } else { + read_cfg->data[index] = + (val >> (i * 8)) & 0xFF; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d data[%d] 0x%x", + cci_dev->soc_info.index, master, queue, index, + read_cfg->data[index]); + index++; + } + } + read_words--; + } +rel_mutex_q: + mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); + + mutex_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); + if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); + mutex_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; +} + +static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue, + enum cci_i2c_sync sync_en) +{ + int32_t rc = 0; + struct cci_device *cci_dev; + enum cci_i2c_master_t master; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + + if (cci_dev->cci_state != CCI_STATE_ENABLED) { + CAM_ERR(CAM_CCI, "invalid cci: %d state: %d", + cci_dev->soc_info.index, cci_dev->cci_state); + return -EINVAL; + } + master = c_ctrl->cci_info->cci_i2c_master; + if (master >= MASTER_MAX || master < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Invalid I2C master addr", + cci_dev->soc_info.index, + master); + return -EINVAL; + } + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d set param sid 0x%x retries %d id_map %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->sid, c_ctrl->cci_info->retries, + c_ctrl->cci_info->id_map); + + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d cam_cci_set_clk_param failed rc %d", + cci_dev->soc_info.index, master, queue, rc); + return rc; + } + reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]); + reinit_completion(&cci_dev->cci_master_info[master].th_burst_complete[queue]); + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_write call. This is to + * avoid overflow / underflow of queue + */ + rc = cam_cci_validate_queue(cci_dev, + cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1, + master, queue); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Initial validataion failed rc %d", + cci_dev->soc_info.index, master, queue, rc); + goto ERROR; + } + if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Invalid read retries info retries from slave: %d, max retries: %d", + cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->retries, CCI_I2C_READ_MAX_RETRIES); + goto ERROR; + } + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) { + rc = cam_cci_data_queue_burst(cci_dev, c_ctrl, queue, sync_en); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed in queueing i2c Burst write data for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto ERROR; + } + } else { + rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en); + if (rc < 0) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d_Q%d Failed in queueing the data for rc: %d", + cci_dev->soc_info.index, master, queue, rc); + goto ERROR; + } + } + +ERROR: + mutex_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); + if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); + mutex_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; +} + +static void cam_cci_write_async_helper(struct work_struct *work) +{ + int rc; + struct cci_device *cci_dev; + struct cci_write_async *write_async = + container_of(work, struct cci_write_async, work); + enum cci_i2c_master_t master; + struct cam_cci_master_info *cci_master_info; + + cam_common_util_thread_switch_delay_detect( + "cam_cci_workq", "schedule", cam_cci_write_async_helper, + write_async->workq_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + cci_dev = write_async->cci_dev; + master = write_async->c_ctrl.cci_info->cci_i2c_master; + cci_master_info = &cci_dev->cci_master_info[master]; + + mutex_lock(&cci_master_info->mutex_q[write_async->queue]); + rc = cam_cci_i2c_write(&(cci_dev->v4l2_dev_str.sd), + &write_async->c_ctrl, write_async->queue, write_async->sync_en); + mutex_unlock(&cci_master_info->mutex_q[write_async->queue]); + if (rc < 0) + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Failed rc: %d", + cci_dev->soc_info.index, master, write_async->queue, rc); + + CAM_MEM_FREE(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting); + CAM_MEM_FREE(write_async); +} + +static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue, + enum cci_i2c_sync sync_en) +{ + int32_t rc = 0; + struct cci_write_async *write_async; + struct cci_device *cci_dev; + struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg; + struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg_w; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + + write_async = CAM_MEM_ZALLOC(sizeof(*write_async), GFP_KERNEL); + if (!write_async) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Memory allocation failed for write_async", + cci_dev->soc_info.index, c_ctrl->cci_info->cci_i2c_master, queue); + return -ENOMEM; + } + + + INIT_WORK(&write_async->work, cam_cci_write_async_helper); + write_async->cci_dev = cci_dev; + write_async->c_ctrl = *c_ctrl; + write_async->queue = queue; + write_async->sync_en = sync_en; + + cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg; + cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg; + + if (cci_i2c_write_cfg->size == 0) { + CAM_MEM_FREE(write_async); + return -EINVAL; + } + + cci_i2c_write_cfg_w->reg_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_i2c_reg_array)* + cci_i2c_write_cfg->size, GFP_KERNEL); + if (!cci_i2c_write_cfg_w->reg_setting) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Couldn't allocate memory for reg_setting", + cci_dev->soc_info.index, c_ctrl->cci_info->cci_i2c_master, queue); + CAM_MEM_FREE(write_async); + return -ENOMEM; + } + memcpy(cci_i2c_write_cfg_w->reg_setting, + cci_i2c_write_cfg->reg_setting, + (sizeof(struct cam_sensor_i2c_reg_array)* + cci_i2c_write_cfg->size)); + + cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type; + cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type; + cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type; + cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size; + cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay; + + write_async->workq_scheduled_ts = ktime_get(); + queue_work(cci_dev->write_wq[write_async->queue], &write_async->work); + + return rc; +} + +static int32_t cam_cci_read_bytes_v_1_2(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + struct cci_device *cci_dev = NULL; + enum cci_i2c_master_t master; + struct cam_cci_read_cfg *read_cfg = NULL; + uint16_t read_bytes = 0; + + if (!sd || !c_ctrl) { + CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", + sd, c_ctrl); + return -EINVAL; + } + if (!c_ctrl->cci_info) { + CAM_ERR(CAM_CCI, "cci_info NULL"); + return -EINVAL; + } + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + if (cci_dev->cci_state != CCI_STATE_ENABLED) { + CAM_ERR(CAM_CCI, "invalid CCI:%d state %d", + cci_dev->soc_info.index, cci_dev->cci_state); + return -EINVAL; + } + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { + CAM_ERR(CAM_CCI, "Invalid I2C master addr"); + return -EINVAL; + } + + master = c_ctrl->cci_info->cci_i2c_master; + read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg; + if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d read num bytes 0", + cci_dev->soc_info.index, master); + rc = -EINVAL; + goto ERROR; + } + + reinit_completion(&cci_dev->cci_master_info[master].rd_done); + read_bytes = read_cfg->num_byte; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d Bytes to read %u", + cci_dev->soc_info.index, master, read_bytes); + do { + if (read_bytes >= CCI_READ_MAX_V_1_2) + read_cfg->num_byte = CCI_READ_MAX_V_1_2; + else + read_cfg->num_byte = read_bytes; + + cci_dev->is_burst_read[master] = false; + rc = cam_cci_read(sd, c_ctrl); + if (rc) { + if (cci_dev->is_probing) + CAM_INFO(CAM_CCI, "CCI%d_I2C_M%d failed to read rc: %d", + cci_dev->soc_info.index, master, rc); + else + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d failed to read rc: %d", + cci_dev->soc_info.index, master, rc); + goto ERROR; + } + + if (read_bytes >= CCI_READ_MAX_V_1_2) { + read_cfg->addr += CCI_READ_MAX_V_1_2; + read_cfg->data += CCI_READ_MAX_V_1_2; + read_bytes -= CCI_READ_MAX_V_1_2; + } else { + read_bytes = 0; + } + } while (read_bytes); + +ERROR: + return rc; +} + +static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + struct cci_device *cci_dev = NULL; + enum cci_i2c_master_t master; + struct cam_cci_read_cfg *read_cfg = NULL; + uint16_t read_bytes = 0; + + if (!sd || !c_ctrl) { + CAM_ERR(CAM_CCI, "Invalid arg sd %pK c_ctrl %pK", + sd, c_ctrl); + return -EINVAL; + } + if (!c_ctrl->cci_info) { + CAM_ERR(CAM_CCI, "cci_info NULL"); + return -EINVAL; + } + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + if (cci_dev->cci_state != CCI_STATE_ENABLED) { + CAM_ERR(CAM_CCI, "invalid CCI:%d state %d", + cci_dev->soc_info.index, cci_dev->cci_state); + return -EINVAL; + } + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { + CAM_ERR(CAM_CCI, "Invalid I2C master addr"); + return -EINVAL; + } + + cci_dev->is_probing = c_ctrl->is_probing; + + master = c_ctrl->cci_info->cci_i2c_master; + read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg; + if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d read num bytes 0", + cci_dev->soc_info.index, master); + rc = -EINVAL; + goto ERROR; + } + + read_bytes = read_cfg->num_byte; + + /* + * To avoid any conflicts due to back to back trigger of + * THRESHOLD irq's, we reinit the threshold wait before + * we load the burst read cmd. + */ + mutex_lock(&cci_dev->cci_master_info[master].mutex_q[QUEUE_1]); + reinit_completion(&cci_dev->cci_master_info[master].rd_done); + reinit_completion(&cci_dev->cci_master_info[master].th_complete); + mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[QUEUE_1]); + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d Bytes to read %u", + cci_dev->soc_info.index, master, read_bytes); + do { + if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) + read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT; + else + read_cfg->num_byte = read_bytes; + + if (read_cfg->num_byte >= CCI_READ_MAX) { + rc = cam_cci_burst_read(sd, c_ctrl); + } else { + rc = cam_cci_read(sd, c_ctrl); + } + if (rc) { + if (cci_dev->is_probing) + CAM_INFO(CAM_CCI, "CCI%d_I2C_M%d Failed to read rc:%d", + cci_dev->soc_info.index, master, rc); + else + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Failed to read rc:%d", + cci_dev->soc_info.index, master, rc); + goto ERROR; + } + + if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) { + read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT / + read_cfg->data_type); + read_cfg->data += CCI_I2C_MAX_BYTE_COUNT; + read_bytes -= CCI_I2C_MAX_BYTE_COUNT; + } else { + read_bytes = 0; + } + } while (read_bytes); + +ERROR: + return rc; +} + +static int32_t cam_cci_i2c_set_sync_prms(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + struct cci_device *cci_dev; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev || !c_ctrl) { + CAM_ERR(CAM_CCI, + "Failed: invalid params cci_dev:%pK, c_ctrl:%pK", + cci_dev, c_ctrl); + rc = -EINVAL; + return rc; + } + cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg; + cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1; + + return rc; +} + +static int32_t cam_cci_release(struct v4l2_subdev *sd, + enum cci_i2c_master_t master) +{ + int rc; + struct cci_device *cci_dev; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + + rc = cam_cci_soc_release(cci_dev, master); + if (rc < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Failed in releasing the rc: %d", + cci_dev->soc_info.index, master, rc); + return rc; + } + + return rc; +} + +static int32_t cam_cci_write(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + int32_t rc = 0; + struct cci_device *cci_dev; + enum cci_i2c_master_t master; + struct cam_cci_master_info *cci_master_info; + uint32_t i; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev || !c_ctrl) { + CAM_ERR(CAM_CCI, + "Failed: invalid params cci_dev:%pK, c_ctrl:%pK", + cci_dev, c_ctrl); + rc = -EINVAL; + return rc; + } + + master = c_ctrl->cci_info->cci_i2c_master; + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Invalid I2C master addr", cci_dev->soc_info.index, master); + return -EINVAL; + } + + cci_master_info = &cci_dev->cci_master_info[master]; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d ctrl_cmd = %d", cci_dev->soc_info.index, master, c_ctrl->cmd); + + switch (c_ctrl->cmd) { + case MSM_CCI_I2C_WRITE_SYNC_BLOCK: + mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]); + rc = cam_cci_i2c_write(sd, c_ctrl, + SYNC_QUEUE, MSM_SYNC_ENABLE); + mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]); + break; + case MSM_CCI_I2C_WRITE_SYNC: + rc = cam_cci_i2c_write_async(sd, c_ctrl, + SYNC_QUEUE, MSM_SYNC_ENABLE); + break; + case MSM_CCI_I2C_WRITE: + for (i = 0; i < NUM_QUEUES; i++) { + if (mutex_trylock(&cci_master_info->mutex_q[i])) { + rc = cam_cci_i2c_write(sd, c_ctrl, i, + MSM_SYNC_DISABLE); + mutex_unlock(&cci_master_info->mutex_q[i]); + return rc; + } + } + mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]); + rc = cam_cci_i2c_write(sd, c_ctrl, + PRIORITY_QUEUE, MSM_SYNC_DISABLE); + mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]); + break; + case MSM_CCI_I2C_WRITE_SEQ: + case MSM_CCI_I2C_WRITE_BURST: + mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]); + rc = cam_cci_i2c_write(sd, c_ctrl, + PRIORITY_QUEUE, MSM_SYNC_DISABLE); + mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]); + break; + case MSM_CCI_I2C_WRITE_ASYNC: + rc = cam_cci_i2c_write_async(sd, c_ctrl, + PRIORITY_QUEUE, MSM_SYNC_DISABLE); + break; + default: + rc = -ENOIOCTLCMD; + } + + return rc; +} + +int32_t cam_cci_core_cfg(struct v4l2_subdev *sd, + struct cam_cci_ctrl *cci_ctrl) +{ + int32_t rc = 0; + struct cci_device *cci_dev = v4l2_get_subdevdata(sd); + enum cci_i2c_master_t master = MASTER_MAX; + + if (!cci_dev) { + CAM_ERR(CAM_CCI, "CCI_DEV is null"); + return -EINVAL; + } + + if (!cci_ctrl) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d CCI_CTRL IS NULL", cci_dev->soc_info.index, master); + return -EINVAL; + } + + master = cci_ctrl->cci_info->cci_i2c_master; + if (master >= MASTER_MAX) { + CAM_ERR(CAM_CCI, "INVALID MASTER: %d", master); + return -EINVAL; + } + + if ((cci_dev->cci_master_info[master].status < 0) && (cci_ctrl->cmd != MSM_CCI_RELEASE)) { + CAM_WARN(CAM_CCI, "CCI hardware is resetting"); + return -EAGAIN; + } + cci_dev->is_probing = false; + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d cmd = %d", cci_dev->soc_info.index, master, cci_ctrl->cmd); + + switch (cci_ctrl->cmd) { + case MSM_CCI_INIT: + mutex_lock(&cci_dev->init_mutex); + rc = cam_cci_init(sd, cci_ctrl); + mutex_unlock(&cci_dev->init_mutex); + break; + case MSM_CCI_RELEASE: + mutex_lock(&cci_dev->init_mutex); + rc = cam_cci_release(sd, master); + mutex_unlock(&cci_dev->init_mutex); + break; + case MSM_CCI_I2C_READ: + /* + * CCI version 1.2 does not support burst read + * due to the absence of the read threshold register + */ + if (cci_dev->hw_version == CCI_VERSION_1_2_9) { + CAM_DBG(CAM_CCI, "cci-v1.2 no burst read"); + rc = cam_cci_read_bytes_v_1_2(sd, cci_ctrl); + } else { + rc = cam_cci_read_bytes(sd, cci_ctrl); + } + break; + case MSM_CCI_I2C_WRITE: + case MSM_CCI_I2C_WRITE_SEQ: + case MSM_CCI_I2C_WRITE_BURST: + case MSM_CCI_I2C_WRITE_SYNC: + case MSM_CCI_I2C_WRITE_ASYNC: + case MSM_CCI_I2C_WRITE_SYNC_BLOCK: + rc = cam_cci_write(sd, cci_ctrl); + break; + case MSM_CCI_GPIO_WRITE: + break; + case MSM_CCI_SET_SYNC_CID: + rc = cam_cci_i2c_set_sync_prms(sd, cci_ctrl); + break; + + default: + rc = -ENOIOCTLCMD; + } + + cci_ctrl->status = rc; + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.h new file mode 100644 index 0000000000..50298625de --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_core.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_CCI_CORE_H_ +#define _CAM_CCI_CORE_H_ + +#include +#include +#include "cam_cci_dev.h" +#include "cam_cci_soc.h" + +/** + * @cci_dev: CCI device structure + * @c_ctrl: CCI control structure + * + * This API gets CCI clk rates + */ +void cam_cci_get_clk_rates(struct cci_device *cci_dev, + struct cam_cci_ctrl *c_ctrl); + +/** + * @sd: V4L2 sub device + * @c_ctrl: CCI control structure + * + * This API handles I2C operations for CCI + */ +int32_t cam_cci_core_cfg(struct v4l2_subdev *sd, + struct cam_cci_ctrl *cci_ctrl); + +/** + * @irq_num: IRQ number + * @data: CCI private structure + * + * This API handles CCI IRQs + */ +irqreturn_t cam_cci_irq(int irq_num, void *data); + +/** + * @irq_num: IRQ number + * @data: CCI private structure + * + * This API handles CCI Threaded IRQs + */ +irqreturn_t cam_cci_threaded_irq(int irq_num, void *data); + +/** + * @cci_dev: CCI device structure + * @master: CCI master index + * @queue: CCI master Queue index + * @triggerHalfQueue: Flag to execute FULL/HALF Queue + * + * This API handles I2C operations for CCI + */ +int32_t cam_cci_data_queue_burst_apply(struct cci_device *cci_dev, + enum cci_i2c_master_t master, enum cci_i2c_queue_t queue, + uint32_t triggerHalfQueue); + +#endif /* _CAM_CCI_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.c new file mode 100644 index 0000000000..0ac80e3ca8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cci_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_cci_soc.h" +#include "cam_cci_core.h" +#include "camera_main.h" +#include "uapi/linux/sched/types.h" +#include "linux/sched/types.h" +#include "linux/sched.h" + +#define CCI_MAX_DELAY 1000000 +#define QUEUE_SIZE 100 + +struct cci_irq_data { + int32_t is_valid; + uint32_t irq_status0; + uint32_t irq_status1; + enum cci_i2c_master_t master; + enum cci_i2c_queue_t queue; +}; + +static struct v4l2_subdev *g_cci_subdev[MAX_CCI] = { 0 }; +static struct dentry *debugfs_root; +static struct cci_irq_data cci_irq_queue[QUEUE_SIZE] = { 0 }; +static int32_t head; +static int32_t tail; + +static inline int32_t increment_index(int32_t index) +{ + return (index + 1) % QUEUE_SIZE; +} + +struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index) +{ + struct v4l2_subdev *sub_device = NULL; + + if ((cci_dev_index < MAX_CCI) && (g_cci_subdev[cci_dev_index] != NULL)) + sub_device = g_cci_subdev[cci_dev_index]; + else + CAM_WARN(CAM_CCI, "CCI subdev not available at Index: %u, MAX_CCI : %u", + cci_dev_index, MAX_CCI); + + return sub_device; +} + +static long cam_cci_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int32_t rc = 0; + + if (arg == NULL) { + CAM_ERR(CAM_CCI, "Args is Null"); + return -EINVAL; + } + + switch (cmd) { + case VIDIOC_MSM_CCI_CFG: + rc = cam_cci_core_cfg(sd, arg); + break; + case VIDIOC_CAM_CONTROL: + break; + default: + CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd); + rc = -ENOIOCTLCMD; + } + + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_cci_subdev_compat_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + return cam_cci_subdev_ioctl(sd, cmd, NULL); +} +#endif + +irqreturn_t cam_cci_irq(int irq_num, void *data) +{ + uint32_t irq_status0, irq_status1, reg_bmsk; + uint32_t irq_update_rd_done = 0; + struct cci_device *cci_dev = data; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + void __iomem *base = soc_info->reg_map[0].mem_base; + unsigned long flags; + bool rd_done_th_assert = false; + struct cam_cci_master_info *cci_master_info; + irqreturn_t rc = IRQ_HANDLED; + int32_t next_head; + + irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR); + irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR); + CAM_DBG(CAM_CCI, + "BASE: %p, irq0:%x irq1:%x", + base, irq_status0, irq_status1); + + cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR); + cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR); + + reg_bmsk = CCI_IRQ_MASK_1_RMSK; + if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) && + !(irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK)) { + reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD; + spin_lock_irqsave(&cci_dev->lock_status, flags); + cci_dev->irqs_disabled |= + CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD; + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + + if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) && + !(irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK)) { + reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD; + spin_lock_irqsave(&cci_dev->lock_status, flags); + cci_dev->irqs_disabled |= + CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD; + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + + if (reg_bmsk != CCI_IRQ_MASK_1_RMSK) { + cam_io_w_mb(reg_bmsk, base + CCI_IRQ_MASK_1_ADDR); + CAM_DBG(CAM_CCI, "Updating the reg mask for irq1: 0x%x", + reg_bmsk); + } else if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK || + irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) { + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) { + spin_lock_irqsave(&cci_dev->lock_status, flags); + if (cci_dev->irqs_disabled & + CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) { + irq_update_rd_done |= + CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD; + cci_dev->irqs_disabled &= + ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD; + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) { + spin_lock_irqsave(&cci_dev->lock_status, flags); + if (cci_dev->irqs_disabled & + CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) { + irq_update_rd_done |= + CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD; + cci_dev->irqs_disabled &= + ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD; + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + } + + if (irq_update_rd_done != 0) { + irq_update_rd_done |= cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR); + cam_io_w_mb(irq_update_rd_done, base + CCI_IRQ_MASK_1_ADDR); + } + + cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR); + + if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) { + struct cam_cci_master_info *cci_master_info; + if (cci_dev->cci_master_info[MASTER_0].reset_pending == true) { + cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + cci_dev->cci_master_info[MASTER_0].reset_pending = + false; + if (!cci_master_info->status) + complete(&cci_master_info->reset_complete); + + complete_all(&cci_master_info->rd_done); + complete_all(&cci_master_info->th_complete); + } + if (cci_dev->cci_master_info[MASTER_1].reset_pending == true) { + cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + cci_dev->cci_master_info[MASTER_1].reset_pending = + false; + if (!cci_master_info->status) + complete(&cci_master_info->reset_complete); + + complete_all(&cci_master_info->rd_done); + complete_all(&cci_master_info->th_complete); + } + } + + if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) && + (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) { + cci_dev->cci_master_info[MASTER_0].status = 0; + rd_done_th_assert = true; + complete(&cci_dev->cci_master_info[MASTER_0].th_complete); + complete(&cci_dev->cci_master_info[MASTER_0].rd_done); + } + if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) && + (!rd_done_th_assert)) { + cci_dev->cci_master_info[MASTER_0].status = 0; + rd_done_th_assert = true; + if (cci_dev->is_burst_read[MASTER_0]) + complete( + &cci_dev->cci_master_info[MASTER_0].th_complete); + complete(&cci_dev->cci_master_info[MASTER_0].rd_done); + } + if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) && + (!rd_done_th_assert)) { + cci_dev->cci_master_info[MASTER_0].status = 0; + complete(&cci_dev->cci_master_info[MASTER_0].th_complete); + } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_Q0_THRESHOLD) + { + cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave(&cci_dev->lock_status, flags); + trace_cam_cci_burst(cci_dev->soc_info.index, 1, 0, + "th_irq honoured irq1", irq_status1); + CAM_DBG(CAM_CCI, "CCI%d_M1_Q0: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d", + cci_dev->soc_info.index, irq_status1, + cci_master_info->th_irq_ref_cnt[QUEUE_0]); + if (cci_master_info->th_irq_ref_cnt[QUEUE_0] == 1) { + complete(&cci_master_info->th_burst_complete[QUEUE_0]); + } else { + // Decrement Threshold irq ref count + cci_master_info->th_irq_ref_cnt[QUEUE_0]--; + next_head = increment_index(head); + if (next_head == tail) { + CAM_ERR(CAM_CCI, + "CCI%d_M1_Q0 CPU Scheduing Issue: " + "Unable to process BURST", + cci_dev->soc_info.index); + rc = IRQ_NONE; + } else { + cci_irq_queue[head].irq_status0 = irq_status0; + cci_irq_queue[head].irq_status1 = irq_status1; + cci_irq_queue[head].master = MASTER_1; + cci_irq_queue[head].queue = QUEUE_0; + cci_irq_queue[head].is_valid = 1; + head = next_head; + // wake up Threaded irq Handler + rc = IRQ_WAKE_THREAD; + } + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_Q1_THRESHOLD) + { + cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave(&cci_dev->lock_status, flags); + trace_cam_cci_burst(cci_dev->soc_info.index, 1, 1, + "th_irq honoured irq1", irq_status1); + CAM_DBG(CAM_CCI, + "CCI%d_M1_Q1: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d", + cci_dev->soc_info.index, irq_status1, + cci_master_info->th_irq_ref_cnt[QUEUE_1]); + if (cci_master_info->th_irq_ref_cnt[QUEUE_1] == 1) { + complete(&cci_master_info->th_burst_complete[QUEUE_1]); + } else { + // Decrement Threshold irq ref count + cci_master_info->th_irq_ref_cnt[QUEUE_1]--; + next_head = increment_index(head); + if (next_head == tail) { + CAM_ERR(CAM_CCI, + "CCI%d_M1_Q0 CPU Scheduing Issue: " + "Unable to process BURST", + cci_dev->soc_info.index); + rc = IRQ_NONE; + } else { + cci_irq_queue[head].irq_status0 = irq_status0; + cci_irq_queue[head].irq_status1 = irq_status1; + cci_irq_queue[head].master = MASTER_1; + cci_irq_queue[head].queue = QUEUE_1; + cci_irq_queue[head].is_valid = 1; + head = next_head; + // wake up Threaded irq Handler + rc = IRQ_WAKE_THREAD; + } + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_Q0_THRESHOLD) + { + cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave(&cci_dev->lock_status, flags); + trace_cam_cci_burst(cci_dev->soc_info.index, 0, 0, + "th_irq honoured irq1", irq_status1); + CAM_DBG(CAM_CCI, + "CCI%d_M0_Q0: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d", + cci_dev->soc_info.index, irq_status1, + cci_master_info->th_irq_ref_cnt[QUEUE_0]); + if (cci_master_info->th_irq_ref_cnt[QUEUE_0] == 1) { + complete(&cci_master_info->th_burst_complete[QUEUE_0]); + } else { + // Decrement Threshold irq ref count + cci_master_info->th_irq_ref_cnt[QUEUE_0]--; + next_head = increment_index(head); + if (next_head == tail) { + CAM_ERR(CAM_CCI, + "CCI%d_M1_Q0 CPU Scheduing Issue: " + "Unable to process BURST", + cci_dev->soc_info.index); + rc = IRQ_NONE; + } else { + cci_irq_queue[head].irq_status0 = irq_status0; + cci_irq_queue[head].irq_status1 = irq_status1; + cci_irq_queue[head].master = MASTER_0; + cci_irq_queue[head].queue = QUEUE_0; + cci_irq_queue[head].is_valid = 1; + head = next_head; + // wake up Threaded irq Handler + rc = IRQ_WAKE_THREAD; + } + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_Q1_THRESHOLD) + { + cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave(&cci_dev->lock_status, flags); + trace_cam_cci_burst(cci_dev->soc_info.index, 0, 1, + "th_irq honoured irq1", irq_status1); + CAM_DBG(CAM_CCI, + "CCI%d_M0_Q1: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d", + cci_dev->soc_info.index, irq_status1, + cci_master_info->th_irq_ref_cnt[QUEUE_1]); + if (cci_master_info->th_irq_ref_cnt[QUEUE_1] == 1) { + complete(&cci_master_info->th_burst_complete[QUEUE_1]); + } else { + // Decrement Threshold irq ref count + cci_master_info->th_irq_ref_cnt[QUEUE_1]--; + next_head = increment_index(head); + if (next_head == tail) { + CAM_ERR(CAM_CCI, + "CCI%d_M1_Q0 CPU Scheduing Issue: " + "Unable to process BURST", + cci_dev->soc_info.index); + rc = IRQ_NONE; + } else { + cci_irq_queue[head].irq_status0 = irq_status0; + cci_irq_queue[head].irq_status1 = irq_status1; + cci_irq_queue[head].master = MASTER_0; + cci_irq_queue[head].queue = QUEUE_1; + cci_irq_queue[head].is_valid = 1; + head = next_head; + // wake up Threaded irq Handler + rc = IRQ_WAKE_THREAD; + } + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) { + struct cam_cci_master_info *cci_master_info; + + cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave( + &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0], + flags); + atomic_set(&cci_master_info->q_free[QUEUE_0], 0); + cci_master_info->status = 0; + if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) { + complete(&cci_master_info->report_q[QUEUE_0]); + atomic_set(&cci_master_info->done_pending[QUEUE_0], 0); + } + spin_unlock_irqrestore( + &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0], + flags); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) { + struct cam_cci_master_info *cci_master_info; + + cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave( + &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1], + flags); + atomic_set(&cci_master_info->q_free[QUEUE_1], 0); + cci_master_info->status = 0; + if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) { + complete(&cci_master_info->report_q[QUEUE_1]); + atomic_set(&cci_master_info->done_pending[QUEUE_1], 0); + } + spin_unlock_irqrestore( + &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1], + flags); + } + rd_done_th_assert = false; + if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) && + (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) { + cci_dev->cci_master_info[MASTER_1].status = 0; + rd_done_th_assert = true; + complete(&cci_dev->cci_master_info[MASTER_1].th_complete); + complete(&cci_dev->cci_master_info[MASTER_1].rd_done); + } + if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) && + (!rd_done_th_assert)) { + cci_dev->cci_master_info[MASTER_1].status = 0; + rd_done_th_assert = true; + if (cci_dev->is_burst_read[MASTER_1]) + complete( + &cci_dev->cci_master_info[MASTER_1].th_complete); + complete(&cci_dev->cci_master_info[MASTER_1].rd_done); + } + if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) && + (!rd_done_th_assert)) { + cci_dev->cci_master_info[MASTER_1].status = 0; + complete(&cci_dev->cci_master_info[MASTER_1].th_complete); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) { + struct cam_cci_master_info *cci_master_info; + + cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave( + &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0], + flags); + atomic_set(&cci_master_info->q_free[QUEUE_0], 0); + cci_master_info->status = 0; + if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) { + complete(&cci_master_info->report_q[QUEUE_0]); + atomic_set(&cci_master_info->done_pending[QUEUE_0], 0); + } + spin_unlock_irqrestore( + &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0], + flags); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) { + struct cam_cci_master_info *cci_master_info; + + cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave( + &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1], + flags); + atomic_set(&cci_master_info->q_free[QUEUE_1], 0); + cci_master_info->status = 0; + if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) { + complete(&cci_master_info->report_q[QUEUE_1]); + atomic_set(&cci_master_info->done_pending[QUEUE_1], 0); + } + spin_unlock_irqrestore( + &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1], + flags); + } + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE) + CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_0"); + + if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE) + CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_1"); + + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) { + cci_dev->cci_master_info[MASTER_0].reset_pending = true; + cam_io_w_mb(CCI_M0_RESET_RMSK, + base + CCI_RESET_CMD_ADDR); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) { + cci_dev->cci_master_info[MASTER_1].reset_pending = true; + cam_io_w_mb(CCI_M1_RESET_RMSK, + base + CCI_RESET_CMD_ADDR); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) { + cci_dev->cci_master_info[MASTER_0].status = -EINVAL; + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERROR_BMSK) { + if (cci_dev->is_probing) { + CAM_INFO(CAM_CCI, + "Base:%pK,cci: %d, M0_Q0 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + } else { + CAM_ERR(CAM_CCI, + "Base:%pK,cci: %d, M0_Q0 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + trace_cam_cci_burst(cci_dev->soc_info.index, 0, 0, + "NACK_ERROR irq0", irq_status0); + } + cam_cci_dump_registers(cci_dev, MASTER_0, + QUEUE_0); + if ((cci_dev->cci_master_info[MASTER_0].th_irq_ref_cnt[QUEUE_0]) > 0) { + complete_all(&cci_dev->cci_master_info[MASTER_0]. + th_burst_complete[QUEUE_0]); + } + complete_all(&cci_dev->cci_master_info[MASTER_0] + .report_q[QUEUE_0]); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERROR_BMSK) { + if (cci_dev->is_probing) { + CAM_INFO(CAM_CCI, + "Base:%pK,cci: %d, M0_Q1 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + } else { + CAM_ERR(CAM_CCI, + "Base:%pK,cci: %d, M0_Q1 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + trace_cam_cci_burst(cci_dev->soc_info.index, 0, 1, + "NACK_ERROR irq0", irq_status0); + } + cam_cci_dump_registers(cci_dev, MASTER_0, + QUEUE_1); + if ((cci_dev->cci_master_info[MASTER_0].th_irq_ref_cnt[QUEUE_1]) > 0) { + complete_all(&cci_dev->cci_master_info[MASTER_0]. + th_burst_complete[QUEUE_1]); + } + complete_all(&cci_dev->cci_master_info[MASTER_0] + .report_q[QUEUE_1]); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_ERROR_BMSK) + CAM_ERR(CAM_CCI, + "Base:%pK, cci: %d, M0 QUEUE_OVER/UNDER_FLOW OR CMD ERR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_ERROR_BMSK) + CAM_ERR(CAM_CCI, + "Base: %pK, M0 RD_OVER/UNDER_FLOW ERROR: 0x%x", + base, irq_status0); + + cci_dev->cci_master_info[MASTER_0].reset_pending = true; + cam_io_w_mb(CCI_M0_RESET_RMSK, base + CCI_RESET_CMD_ADDR); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) { + cci_dev->cci_master_info[MASTER_1].status = -EINVAL; + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERROR_BMSK) { + if (cci_dev->is_probing) { + CAM_INFO(CAM_CCI, + "Base:%pK, cci: %d, M1_Q0 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + } else { + CAM_ERR(CAM_CCI, + "Base:%pK, cci: %d, M1_Q0 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + trace_cam_cci_burst(cci_dev->soc_info.index, 1, 0, + "NACK_ERROR irq0", irq_status0); + } + cam_cci_dump_registers(cci_dev, MASTER_1, + QUEUE_0); + if ((cci_dev->cci_master_info[MASTER_1].th_irq_ref_cnt[QUEUE_0]) > 0) { + complete_all(&cci_dev->cci_master_info[MASTER_1]. + th_burst_complete[QUEUE_0]); + } + complete_all(&cci_dev->cci_master_info[MASTER_1] + .report_q[QUEUE_0]); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERROR_BMSK) { + if (cci_dev->is_probing) { + CAM_INFO(CAM_CCI, + "Base:%pK, cci: %d, M1_Q1 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + } else { + CAM_ERR(CAM_CCI, + "Base:%pK, cci: %d, M1_Q1 NACK ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + trace_cam_cci_burst(cci_dev->soc_info.index, 1, 1, + "NACK_ERROR irq0", irq_status0); + } + cam_cci_dump_registers(cci_dev, MASTER_1, + QUEUE_1); + if ((cci_dev->cci_master_info[MASTER_1].th_irq_ref_cnt[QUEUE_1]) > 0) { + complete_all(&cci_dev->cci_master_info[MASTER_1]. + th_burst_complete[QUEUE_1]); + } + complete_all(&cci_dev->cci_master_info[MASTER_1] + .report_q[QUEUE_1]); + } + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_ERROR_BMSK) + CAM_ERR(CAM_CCI, + "Base:%pK, cci: %d, M1 QUEUE_OVER_UNDER_FLOW OR CMD ERROR:0x%x", + base, cci_dev->soc_info.index, irq_status0); + if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_ERROR_BMSK) + CAM_ERR(CAM_CCI, + "Base:%pK, cci: %d, M1 RD_OVER/UNDER_FLOW ERROR: 0x%x", + base, cci_dev->soc_info.index, irq_status0); + + cci_dev->cci_master_info[MASTER_1].reset_pending = true; + cam_io_w_mb(CCI_M1_RESET_RMSK, base + CCI_RESET_CMD_ADDR); + } + + return rc; +} + +irqreturn_t cam_cci_threaded_irq(int irq_num, void *data) +{ + struct cci_device *cci_dev = data; + struct cam_hw_soc_info *soc_info = + &cci_dev->soc_info; + struct cci_irq_data cci_data; + unsigned long flags; + uint32_t triggerHalfQueue = 1; + struct task_struct *task = current; + + CAM_INFO(CAM_CCI, "CCI%d: nice: %d rt-Priority: %d cci_dev: %p", + soc_info->index, task_nice(current), task->rt_priority, cci_dev); + spin_lock_irqsave(&cci_dev->lock_status, flags); + if (tail != head) { + cci_data = cci_irq_queue[tail]; + tail = increment_index(tail); + /* + * "head" and "tail" variables are shared across + * Top half and Bottom Half routines, Hence place a + * lock while accessing these variables. + */ + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + cam_cci_data_queue_burst_apply(cci_dev, + cci_data.master, cci_data.queue, triggerHalfQueue); + spin_lock_irqsave(&cci_dev->lock_status, flags); + } + spin_unlock_irqrestore(&cci_dev->lock_status, flags); + return IRQ_HANDLED; +} + +static int cam_cci_irq_routine(struct v4l2_subdev *sd, u32 status, + bool *handled) +{ + struct cci_device *cci_dev = NULL; + irqreturn_t ret; + struct cam_hw_soc_info *soc_info = NULL; + + if (!sd) { + CAM_ERR(CAM_CCI, "Error No data in subdev"); + return -EINVAL; + } + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return -EINVAL; + } + + soc_info = &cci_dev->soc_info; + + ret = cam_cci_irq(soc_info->irq_num[0], cci_dev); + if (ret == IRQ_NONE) + CAM_ERR(CAM_CCI, "Interrupt was not handled properly, ret %d", ret); + + *handled = true; + return 0; +} + +static struct v4l2_subdev_core_ops cci_subdev_core_ops = { + .ioctl = cam_cci_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_cci_subdev_compat_ioctl, +#endif + .interrupt_service_routine = cam_cci_irq_routine, +}; + +static const struct v4l2_subdev_ops cci_subdev_ops = { + .core = &cci_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops cci_subdev_intern_ops; + +static int cam_cci_get_debug(void *data, u64 *val) +{ + struct cci_device *cci_dev = (struct cci_device *)data; + + *val = cci_dev->dump_en; + + return 0; +} + +static int cam_cci_set_debug(void *data, u64 val) +{ + struct cci_device *cci_dev = (struct cci_device *)data; + + cci_dev->dump_en = val; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_cci_debug, + cam_cci_get_debug, + cam_cci_set_debug, "%16llu\n"); + +static int cam_cci_create_debugfs_entry(struct cci_device *cci_dev) +{ + int rc = 0, idx; + struct dentry *dbgfileptr = NULL; + static char * const filename[] = { "en_dump_cci0", "en_dump_cci1", "en_dump_cci2"}; + + if (!cam_debugfs_available()) + return 0; + + if (!debugfs_root) { + rc = cam_debugfs_create_subdir("cci", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_CCI, "debugfs directory creation fail"); + return rc; + } + debugfs_root = dbgfileptr; + } + + idx = cci_dev->soc_info.index; + if (idx >= ARRAY_SIZE(filename)) { + CAM_ERR(CAM_CCI, "cci-dev %d invalid", idx); + return -ENODEV; + } + + debugfs_create_file(filename[idx], 0644, debugfs_root, cci_dev, &cam_cci_debug); + + return 0; +} + +static int cam_cci_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_cpas_register_params cpas_parms; + struct cci_device *new_cci_dev; + struct cam_hw_soc_info *soc_info = NULL; + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + new_cci_dev = devm_kzalloc(&pdev->dev, sizeof(struct cci_device), + GFP_KERNEL); + if (!new_cci_dev) { + CAM_ERR(CAM_CCI, "Memory allocation failed for cci_dev"); + return -ENOMEM; + } + soc_info = &new_cci_dev->soc_info; + + new_cci_dev->v4l2_dev_str.pdev = pdev; + + soc_info->pdev = pdev; + soc_info->dev = &pdev->dev; + soc_info->dev_name = pdev->name; + + rc = cam_cci_parse_dt_info(pdev, new_cci_dev); + if (rc < 0) { + CAM_ERR(CAM_CCI, "Resource get Failed rc:%d", rc); + goto cci_no_resource; + } + + new_cci_dev->v4l2_dev_str.internal_ops = + &cci_subdev_intern_ops; + new_cci_dev->v4l2_dev_str.ops = + &cci_subdev_ops; + strscpy(new_cci_dev->device_name, CAMX_CCI_DEV_NAME, + sizeof(new_cci_dev->device_name)); + new_cci_dev->v4l2_dev_str.name = + new_cci_dev->device_name; + new_cci_dev->v4l2_dev_str.sd_flags = V4L2_SUBDEV_FL_HAS_EVENTS; + new_cci_dev->v4l2_dev_str.ent_function = + CAM_CCI_DEVICE_TYPE; + new_cci_dev->v4l2_dev_str.token = + new_cci_dev; + + rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str)); + if (rc < 0) { + CAM_ERR(CAM_CCI, "Fail with cam_register_subdev rc: %d", rc); + goto cci_no_resource; + } + + platform_set_drvdata(pdev, &(new_cci_dev->v4l2_dev_str.sd)); + v4l2_set_subdevdata(&new_cci_dev->v4l2_dev_str.sd, new_cci_dev); + if (soc_info->index >= MAX_CCI) { + CAM_ERR(CAM_CCI, "Invalid index: %d max supported:%d", + soc_info->index, MAX_CCI-1); + goto cci_no_resource; + } + + g_cci_subdev[soc_info->index] = &new_cci_dev->v4l2_dev_str.sd; + mutex_init(&(new_cci_dev->init_mutex)); + CAM_DBG(CAM_CCI, "Device Type :%d", soc_info->index); + + cpas_parms.cam_cpas_client_cb = NULL; + cpas_parms.cell_index = soc_info->index; + cpas_parms.dev = &pdev->dev; + cpas_parms.userdata = new_cci_dev; + strscpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH); + rc = cam_cpas_register_client(&cpas_parms); + if (rc) { + CAM_ERR(CAM_CCI, "CPAS registration failed rc:%d", rc); + goto cci_unregister_subdev; + } + + CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d", + cpas_parms.client_handle); + new_cci_dev->cpas_handle = cpas_parms.client_handle; + + rc = cam_cci_create_debugfs_entry(new_cci_dev); + if (rc) { + CAM_WARN(CAM_CCI, "debugfs creation failed"); + rc = 0; + } + head = 0; + tail = 0; + CAM_DBG(CAM_CCI, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +cci_unregister_subdev: + cam_unregister_subdev(&(new_cci_dev->v4l2_dev_str)); +cci_no_resource: + devm_kfree(&pdev->dev, new_cci_dev); + return rc; +} + +static void cam_cci_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + + struct v4l2_subdev *subdev = platform_get_drvdata(pdev); + struct cci_device *cci_dev = NULL; + + if (!subdev) { + CAM_ERR(CAM_CCI, "Error No data in subdev"); + return; + } + + cci_dev = v4l2_get_subdevdata(subdev); + if (!cci_dev) { + CAM_ERR(CAM_CCI, "Error No data in cci_dev"); + return; + } + + if (!cci_dev) { + CAM_ERR(CAM_CCI, "cci_dev NULL"); + return; + } + + cam_cpas_unregister_client(cci_dev->cpas_handle); + debugfs_root = NULL; + cam_cci_soc_remove(pdev, cci_dev); + rc = cam_unregister_subdev(&(cci_dev->v4l2_dev_str)); + if (rc < 0) + CAM_ERR(CAM_CCI, "Fail with cam_unregister_subdev. rc:%d", rc); + + devm_kfree(&pdev->dev, cci_dev); +} + +const static struct component_ops cam_cci_component_ops = { + .bind = cam_cci_component_bind, + .unbind = cam_cci_component_unbind, +}; + +static int cam_cci_platform_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CCI, "Adding CCI component"); + rc = component_add(&pdev->dev, &cam_cci_component_ops); + if (rc) + CAM_ERR(CAM_CCI, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_cci_device_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_cci_component_ops); + return 0; +} + +static const struct of_device_id cam_cci_dt_match[] = { + {.compatible = "qcom,cci"}, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_cci_dt_match); + +struct platform_driver cci_driver = { + .probe = cam_cci_platform_probe, + .remove = cam_cci_device_remove, + .driver = { + .name = CAMX_CCI_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_cci_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_cci_init_module(void) +{ + return platform_driver_register(&cci_driver); +} + +void cam_cci_exit_module(void) +{ + platform_driver_unregister(&cci_driver); +} + +MODULE_DESCRIPTION("MSM CCI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.h new file mode 100644 index 0000000000..fed1e20b5f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_dev.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CCI_DEV_H_ +#define _CAM_CCI_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_subdev.h" +#include +#include "cam_cci_hwreg.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" + +#define CCI_I2C_QUEUE_0_SIZE 128 +#define CCI_I2C_QUEUE_1_SIZE 32 +#define CCI_I2C_QUEUE_0_SIZE_V_1_2 64 +#define CCI_I2C_QUEUE_1_SIZE_V_1_2 16 +#define CYCLES_PER_MICRO_SEC_DEFAULT 4915 +#define CCI_MAX_DELAY 1000000 + +#define CCI_TIMEOUT msecs_to_jiffies(1500) +#define NUM_QUEUES 2 + +#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11 +#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_WORDS ((MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 + 1) / 4) +#define BURST_MIN_FREE_SIZE 8 + +/* Max bytes that can be read per CCI read transaction */ +#define CCI_READ_MAX 256 +#define CCI_READ_MAX_V_1_2 0xE +#define CCI_I2C_READ_MAX_RETRIES 3 +#define CCI_I2C_MAX_READ 20480 +#define CCI_I2C_MAX_WRITE 20480 +#define CCI_ENABLE_THRESHOLD_IRQ 1 +#define CCI_I2C_MAX_BYTE_COUNT 65535 + +#define CAMX_CCI_DEV_NAME "cam-cci-driver" + +#define CAM_CCI_WORKQUEUE_NAME "cam_cci_wq" + +#define MAX_CCI 3 + +#define PRIORITY_QUEUE (QUEUE_0) +#define SYNC_QUEUE (QUEUE_1) + +#define CAM_CCI_NACK_DUMP_EN BIT(1) +#define CAM_CCI_TIMEOUT_DUMP_EN BIT(2) + +#define CCI_VERSION_1_2_9 0x10020009 +#define REPORT_IDSIZE 16 +enum cci_i2c_sync { + MSM_SYNC_DISABLE, + MSM_SYNC_ENABLE, +}; + +enum cam_cci_cmd_type { + MSM_CCI_INIT, + MSM_CCI_RELEASE, + MSM_CCI_SET_SID, + MSM_CCI_SET_FREQ, + MSM_CCI_SET_SYNC_CID, + MSM_CCI_I2C_READ, + MSM_CCI_I2C_WRITE, + MSM_CCI_I2C_WRITE_SEQ, + MSM_CCI_I2C_WRITE_BURST, + MSM_CCI_I2C_WRITE_ASYNC, + MSM_CCI_GPIO_WRITE, + MSM_CCI_I2C_WRITE_SYNC, + MSM_CCI_I2C_WRITE_SYNC_BLOCK, +}; + +enum cci_i2c_queue_t { + QUEUE_0, + QUEUE_1, + QUEUE_INVALID, +}; + +struct cam_cci_wait_sync_cfg { + uint16_t cid; + int16_t csid; + uint16_t line; + uint16_t delay; +}; + +struct cam_cci_gpio_cfg { + uint16_t gpio_queue; + uint16_t i2c_queue; +}; + +struct cam_cci_read_cfg { + uint32_t addr; + uint16_t addr_type; + uint8_t *data; + uint16_t num_byte; + uint16_t data_type; +}; + +struct cam_cci_i2c_queue_info { + uint32_t max_queue_size; + uint32_t report_id; + uint32_t irq_en; + uint32_t capture_rep_data; +}; + +struct cam_cci_master_info { + int32_t status; + atomic_t q_free[NUM_QUEUES]; + uint8_t q_lock[NUM_QUEUES]; + uint8_t reset_pending; + struct mutex mutex; + struct completion reset_complete; + struct completion rd_done; + struct completion th_complete; + struct mutex mutex_q[NUM_QUEUES]; + struct completion report_q[NUM_QUEUES]; + atomic_t done_pending[NUM_QUEUES]; + spinlock_t lock_q[NUM_QUEUES]; + struct semaphore master_sem; + struct mutex freq_cnt_lock; + uint16_t freq_ref_cnt; + bool is_initilized; + struct completion th_burst_complete[NUM_QUEUES]; + uint32_t th_irq_ref_cnt[NUM_QUEUES]; + bool is_burst_enable[NUM_QUEUES]; + uint32_t num_words_exec[NUM_QUEUES]; + uint32_t *data_queue[NUM_QUEUES]; + uint32_t num_words_in_data_queue[NUM_QUEUES]; + int32_t data_queue_start_index[NUM_QUEUES]; + int32_t half_queue_mark[NUM_QUEUES]; +}; + +struct cam_cci_clk_params_t { + uint16_t hw_thigh; + uint16_t hw_tlow; + uint16_t hw_tsu_sto; + uint16_t hw_tsu_sta; + uint16_t hw_thd_dat; + uint16_t hw_thd_sta; + uint16_t hw_tbuf; + uint8_t hw_scl_stretch_en; + uint8_t hw_trdhld; + uint8_t hw_tsp; + uint32_t cci_clk_src; +}; + +enum cam_cci_state_t { + CCI_STATE_ENABLED, + CCI_STATE_DISABLED, +}; + +/** + * struct cci_device + * @pdev: Platform device + * @subdev: V4L2 sub device + * @base: Base address of CCI device + * @hw_version: Hardware version + * @ref_count: Reference Count + * @cci_state: CCI state machine + * @num_clk: Number of CCI clock + * @cci_clk: CCI clock structure + * @cci_clk_info: CCI clock information + * @cam_cci_i2c_queue_info: CCI queue information + * @i2c_freq_mode: I2C frequency of operations + * @master_active_slave: Number of active/connected slaves for master + * @cci_clk_params: CCI hw clk params + * @cci_gpio_tbl: CCI GPIO table + * @cci_gpio_tbl_size: GPIO table size + * @cci_pinctrl: Pinctrl structure + * @cci_pinctrl_status: CCI pinctrl status + * @cci_clk_src: CCI clk src rate + * @cci_vreg: CCI regulator structure + * @cci_reg_ptr: CCI individual regulator structure + * @regulator_count: Regulator count + * @support_seq_write: Set this flag when sequential write is enabled + * @write_wq: Work queue structure + * @valid_sync: Is it a valid sync with CSID + * @v4l2_dev_str: V4L2 device structure + * @cci_wait_sync_cfg: CCI sync config + * @cycles_per_us: Cycles per micro sec + * @payload_size: CCI packet payload size + * @irq_status1: Store irq_status1 to be cleared after + * draining FIFO buffer for burst read + * @lock_status: to protect changes to irq_status1 + * @is_burst_read: Flag to determine if we are performing + * a burst read operation or not + * @irqs_disabled: Mask for IRQs that are disabled + * @init_mutex: Mutex for maintaining refcount for attached + * devices to cci during init/deinit. + * @dump_en: To enable the selective dump + * @is_probing: Flag to determine if we are probing a sensor + */ +struct cci_device { + struct v4l2_subdev subdev; + struct cam_hw_soc_info soc_info; + uint32_t hw_version; + uint8_t ref_count; + enum cam_cci_state_t cci_state; + struct cam_cci_i2c_queue_info + cci_i2c_queue_info[MASTER_MAX][NUM_QUEUES]; + struct cam_cci_master_info cci_master_info[MASTER_MAX]; + enum i2c_freq_mode i2c_freq_mode[MASTER_MAX]; + uint8_t master_active_slave[MASTER_MAX]; + struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES]; + struct msm_pinctrl_info cci_pinctrl; + uint8_t cci_pinctrl_status; + uint8_t support_seq_write; + struct workqueue_struct *write_wq[MASTER_MAX]; + struct cam_cci_wait_sync_cfg cci_wait_sync_cfg; + uint8_t valid_sync; + struct cam_subdev v4l2_dev_str; + uint32_t cycles_per_us; + int32_t clk_level_index; + uint8_t payload_size; + char device_name[20]; + uint32_t cpas_handle; + uint32_t irq_status1; + spinlock_t lock_status; + bool is_burst_read[MASTER_MAX]; + uint32_t irqs_disabled; + struct mutex init_mutex; + uint64_t dump_en; + bool is_probing; +}; + +enum cam_cci_i2c_cmd_type { + CCI_I2C_SET_PARAM_CMD = 1, + CCI_I2C_WAIT_CMD, + CCI_I2C_WAIT_SYNC_CMD, + CCI_I2C_WAIT_GPIO_EVENT_CMD, + CCI_I2C_TRIG_I2C_EVENT_CMD, + CCI_I2C_LOCK_CMD, + CCI_I2C_UNLOCK_CMD, + CCI_I2C_REPORT_CMD, + CCI_I2C_WRITE_CMD, + CCI_I2C_READ_CMD, + CCI_I2C_WRITE_DISABLE_P_CMD, + CCI_I2C_READ_DISABLE_P_CMD, + CCI_I2C_WRITE_CMD2, + CCI_I2C_WRITE_CMD3, + CCI_I2C_REPEAT_CMD, + CCI_I2C_INVALID_CMD, +}; + +enum cam_cci_gpio_cmd_type { + CCI_GPIO_SET_PARAM_CMD = 1, + CCI_GPIO_WAIT_CMD, + CCI_GPIO_WAIT_SYNC_CMD, + CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD, + CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD, + CCI_GPIO_OUT_CMD, + CCI_GPIO_TRIG_EVENT_CMD, + CCI_GPIO_REPORT_CMD, + CCI_GPIO_REPEAT_CMD, + CCI_GPIO_CONTINUE_CMD, + CCI_GPIO_INVALID_CMD, +}; + +struct cam_sensor_cci_client { + struct v4l2_subdev *cci_subdev; + uint32_t freq; + enum i2c_freq_mode i2c_freq_mode; + enum cci_i2c_master_t cci_i2c_master; + uint16_t sid; + uint16_t cid; + uint32_t timeout; + uint16_t retries; + uint16_t id_map; + uint16_t cci_device; + bool is_probing; +}; + +struct cam_cci_ctrl { + int32_t status; + struct cam_sensor_cci_client *cci_info; + enum cam_cci_cmd_type cmd; + union { + struct cam_sensor_i2c_reg_setting cci_i2c_write_cfg; + struct cam_cci_read_cfg cci_i2c_read_cfg; + struct cam_cci_wait_sync_cfg cci_wait_sync_cfg; + struct cam_cci_gpio_cfg gpio_cfg; + } cfg; + bool is_probing; +}; + +struct cci_write_async { + struct cci_device *cci_dev; + struct cam_cci_ctrl c_ctrl; + enum cci_i2c_queue_t queue; + struct work_struct work; + ktime_t workq_scheduled_ts; + enum cci_i2c_sync sync_en; +}; + +irqreturn_t cam_cci_irq(int irq_num, void *data); +irqreturn_t cam_cci_threaded_irq(int irq_num, void *data); + +struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index); +void cam_cci_dump_registers(struct cci_device *cci_dev, + enum cci_i2c_master_t master, enum cci_i2c_queue_t queue); + +/** + * @brief : API to register CCI hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_cci_init_module(void); + +/** + * @brief : API to remove CCI Hw from platform framework. + */ +void cam_cci_exit_module(void); +#define VIDIOC_MSM_CCI_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct cam_cci_ctrl) + +#endif /* _CAM_CCI_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_hwreg.h new file mode 100644 index 0000000000..c9f8e8410a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_hwreg.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015, 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CCI_HWREG_ +#define _CAM_CCI_HWREG_ + +#define CCI_HW_VERSION_ADDR 0x00000000 +#define CCI_RESET_CMD_ADDR 0x00000004 +#define CCI_RESET_CMD_RMSK 0x0f73f3f7 +#define CCI_M0_RESET_RMSK 0x3F1 +#define CCI_M1_RESET_RMSK 0x3F001 +#define CCI_QUEUE_START_ADDR 0x00000008 +#define CCI_SET_CID_SYNC_TIMER_ADDR 0x00000010 +#define CCI_SET_CID_SYNC_TIMER_OFFSET 0x00000004 +#define CCI_I2C_M0_SCL_CTL_ADDR 0x00000100 +#define CCI_I2C_M0_SDA_CTL_0_ADDR 0x00000104 +#define CCI_I2C_M0_SDA_CTL_1_ADDR 0x00000108 +#define CCI_I2C_M0_SDA_CTL_2_ADDR 0x0000010c +#define CCI_I2C_M0_READ_DATA_ADDR 0x00000118 +#define CCI_I2C_M0_MISC_CTL_ADDR 0x00000110 +#define CCI_I2C_M0_STATUS_ADDR 0x00000114 +#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR 0x0000011C +#define CCI_HALT_REQ_ADDR 0x00000034 +#define CCI_M0_HALT_REQ_RMSK 0x1 +#define CCI_M1_HALT_REQ_RMSK 0x2 +#define CCI_I2C_M1_SCL_CTL_ADDR 0x00000200 +#define CCI_I2C_M1_SDA_CTL_0_ADDR 0x00000204 +#define CCI_I2C_M1_SDA_CTL_1_ADDR 0x00000208 +#define CCI_I2C_M1_SDA_CTL_2_ADDR 0x0000020c +#define CCI_I2C_M1_MISC_CTL_ADDR 0x00000210 +#define CCI_I2C_M1_STATUS_ADDR 0x00000214 +#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR 0x00000304 +#define CCI_I2C_M0_Q0_CUR_CMD_ADDR 0x00000308 +#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR 0x0000030c +#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR 0x00000300 +#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310 +#define CCI_IRQ_MASK_0_ADDR 0x00000c04 +#define CCI_IRQ_MASK_0_RMSK 0x7fff7ff7 +#define CCI_IRQ_MASK_1_ADDR 0x00000c10 +#define CCI_IRQ_MASK_1_RMSK 0x00DD0000 +#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08 +#define CCI_IRQ_CLEAR_1_ADDR 0x00000c14 +#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c +#define CCI_IRQ_STATUS_1_ADDR 0x00000c18 +#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000 +#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000 +#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000 +#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000 +#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000 +#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000 +#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD 0x100000 +#define CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE 0x200000 +#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100 +#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10 +#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6 +#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000 +#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERROR_BMSK 0x8000000 +#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERROR_BMSK 0x10000000 +#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERROR_BMSK 0x20000000 +#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERROR_BMSK 0x40000000 +#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_ERROR_BMSK 0xEE0 +#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_ERROR_BMSK 0xEE0000 +#define CCI_IRQ_STATUS_0_I2C_M0_RD_ERROR_BMSK 0x6 +#define CCI_IRQ_STATUS_0_I2C_M1_RD_ERROR_BMSK 0x6000 +#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1 +#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD 0x10000 +#define CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE 0x20000 +#define CCI_IRQ_STATUS_1_I2C_M1_Q0_THRESHOLD 0x400000 +#define CCI_IRQ_STATUS_1_I2C_M1_Q1_THRESHOLD 0x800000 +#define CCI_IRQ_STATUS_1_I2C_M0_Q0_THRESHOLD 0x40000 +#define CCI_IRQ_STATUS_1_I2C_M0_Q1_THRESHOLD 0x80000 +#define CCI_I2C_M0_RD_THRESHOLD_ADDR 0x00000120 +#define CCI_I2C_M1_RD_THRESHOLD_ADDR 0x00000220 +#define CCI_I2C_RD_THRESHOLD_VALUE 0x30 +#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00 + +#define DEBUG_TOP_REG_START 0x0 +#define DEBUG_TOP_REG_COUNT 14 +#define DEBUG_MASTER_REG_START 0x100 +#define DEBUG_MASTER_REG_COUNT 9 +#define DEBUG_MASTER_QUEUE_REG_START 0x300 +#define DEBUG_MASTER_QUEUE_REG_COUNT 7 +#define DEBUG_INTR_REG_START 0xC00 +#define DEBUG_INTR_REG_COUNT 7 +#endif /* _CAM_CCI_HWREG_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.c new file mode 100644 index 0000000000..a8821813e7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_cci_dev.h" +#include "cam_cci_core.h" +#include "linux/interrupt.h" +#include "uapi/linux/sched/types.h" +#include "linux/sched/types.h" + +static int cam_cci_init_master(struct cci_device *cci_dev, + enum cci_i2c_master_t master) +{ + int i, rc; + void __iomem *base; + struct cam_hw_soc_info *soc_info; + uint32_t max_queue_0_size, max_queue_1_size; + + soc_info = &cci_dev->soc_info; + base = soc_info->reg_map[0].mem_base; + + if (cci_dev->hw_version == CCI_VERSION_1_2_9) { + max_queue_0_size = CCI_I2C_QUEUE_0_SIZE_V_1_2; + max_queue_1_size = CCI_I2C_QUEUE_1_SIZE_V_1_2; + } else { + max_queue_0_size = CCI_I2C_QUEUE_0_SIZE; + max_queue_1_size = CCI_I2C_QUEUE_1_SIZE; + } + + cci_dev->master_active_slave[master]++; + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d active slave: %d", + cci_dev->soc_info.index, master, cci_dev->master_active_slave[master]); + if (!cci_dev->cci_master_info[master].is_initilized) { + /* Re-initialize the completion */ + rc = cam_soc_util_select_pinctrl_state(soc_info, master, true); + if (rc) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d Pinctrl active state x'sition failed, rc: %d", + cci_dev->soc_info.index, master, rc); + goto MASTER_INIT_ERR; + } + + reinit_completion( + &cci_dev->cci_master_info[master].reset_complete); + reinit_completion(&cci_dev->cci_master_info[master].rd_done); + + /* reinit the reports for the queue */ + for (i = 0; i < NUM_QUEUES; i++) + reinit_completion( + &cci_dev->cci_master_info[master].report_q[i]); + + /* Set reset pending flag to true */ + cci_dev->cci_master_info[master].reset_pending = true; + cci_dev->cci_master_info[master].status = 0; + if (cci_dev->ref_count == 1) { + cam_io_w_mb(CCI_RESET_CMD_RMSK, + base + CCI_RESET_CMD_ADDR); + cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR); + } else { + cam_io_w_mb((master == MASTER_0) ? + CCI_M0_RESET_RMSK : CCI_M1_RESET_RMSK, + base + CCI_RESET_CMD_ADDR); + } + if (!cam_common_wait_for_completion_timeout( + &cci_dev->cci_master_info[master].reset_complete, + CCI_TIMEOUT)) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d Failed: reset complete timeout", + cci_dev->soc_info.index, master); + rc = -ETIMEDOUT; + goto MASTER_INIT_ERR; + } + + flush_workqueue(cci_dev->write_wq[master]); + + /* Setting up the queue size for master */ + cci_dev->cci_i2c_queue_info[master][QUEUE_0].max_queue_size + = max_queue_0_size; + cci_dev->cci_i2c_queue_info[master][QUEUE_1].max_queue_size + = max_queue_1_size; + + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d:: Q0: %d Q1: %d", + cci_dev->soc_info.index, master, + cci_dev->cci_i2c_queue_info[master][QUEUE_0] + .max_queue_size, + cci_dev->cci_i2c_queue_info[master][QUEUE_1] + .max_queue_size); + + cci_dev->cci_master_info[master].status = 0; + cci_dev->cci_master_info[master].is_initilized = true; + cci_dev->is_burst_read[master] = false; + } + + return 0; + +MASTER_INIT_ERR: + cci_dev->master_active_slave[master]--; + + return rc; +} + +int cam_cci_init(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl) +{ + uint8_t i = 0; + int32_t rc = 0; + struct cci_device *cci_dev; + enum cci_i2c_master_t master = MASTER_MAX; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + struct cam_hw_soc_info *soc_info = NULL; + void __iomem *base = NULL; + + cci_dev = v4l2_get_subdevdata(sd); + if (!cci_dev || !c_ctrl) { + CAM_ERR(CAM_CCI, + "Invalid params cci_dev: %p, c_ctrl: %p", + cci_dev, c_ctrl); + rc = -EINVAL; + return rc; + } + + master = c_ctrl->cci_info->cci_i2c_master; + soc_info = &cci_dev->soc_info; + + if (!soc_info) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d failed: invalid params soc_info:%pK", + cci_dev->soc_info.index, master, soc_info); + rc = -EINVAL; + return rc; + } + + base = soc_info->reg_map[0].mem_base; + + if (!base) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d failed: invalid params base:%pK", + cci_dev->soc_info.index, master, base); + rc = -EINVAL; + return rc; + } + + if (master >= MASTER_MAX || master < 0) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Incorrect Master", + cci_dev->soc_info.index, master); + return -EINVAL; + } + + if (!cci_dev->write_wq[master]) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Null memory for write wq", + cci_dev->soc_info.index, master); + rc = -ENOMEM; + return rc; + } + + if (cci_dev->ref_count++) { + rc = cam_cci_init_master(cci_dev, master); + if (rc) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Failed to init rc: %d", + cci_dev->soc_info.index, master, rc); + cci_dev->ref_count--; + } + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d ref_count %d", + cci_dev->soc_info.index, master, cci_dev->ref_count); + return rc; + } + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(cci_dev->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d CPAS start failed, rc: %d", + cci_dev->soc_info.index, master, rc); + return rc; + } + + cam_cci_get_clk_rates(cci_dev, c_ctrl); + + /* Enable Regulators and IRQ*/ + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, + soc_info->lowest_clk_level, true); + if (rc < 0) { + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d request platform resources failed, rc: %d", + cci_dev->soc_info.index, master, rc); + goto platform_enable_failed; + } + + cci_dev->hw_version = cam_io_r_mb(base + CCI_HW_VERSION_ADDR); + CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d hw_version = 0x%x", + cci_dev->soc_info.index, master, cci_dev->hw_version); + + cci_dev->payload_size = MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11; + cci_dev->support_seq_write = 1; + + rc = cam_cci_init_master(cci_dev, master); + if (rc) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d Failed to init, rc: %d", + cci_dev->soc_info.index, master, rc); + goto reset_complete_failed; + } + + for (i = 0; i < MASTER_MAX; i++) + cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES; + + cam_io_w_mb(CCI_IRQ_MASK_0_RMSK, base + CCI_IRQ_MASK_0_ADDR); + cam_io_w_mb(CCI_IRQ_MASK_0_RMSK, base + CCI_IRQ_CLEAR_0_ADDR); + cam_io_w_mb(CCI_IRQ_MASK_1_RMSK, base + CCI_IRQ_MASK_1_ADDR); + cam_io_w_mb(CCI_IRQ_MASK_1_RMSK, base + CCI_IRQ_CLEAR_1_ADDR); + cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR); + + /* Set RD FIFO threshold for M0 & M1 */ + if (cci_dev->hw_version != CCI_VERSION_1_2_9) { + cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE, + base + CCI_I2C_M0_RD_THRESHOLD_ADDR); + cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE, + base + CCI_I2C_M1_RD_THRESHOLD_ADDR); + } + + cci_dev->cci_state = CCI_STATE_ENABLED; + + return 0; + +reset_complete_failed: + cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); +platform_enable_failed: + cci_dev->ref_count--; + cam_cpas_stop(cci_dev->cpas_handle); + + return rc; +} + +void cam_cci_soc_remove(struct platform_device *pdev, + struct cci_device *cci_dev) +{ + struct cam_hw_soc_info *soc_info = &cci_dev->soc_info; + + cam_soc_util_release_platform_resource(soc_info); +} + +static void cam_cci_init_cci_params(struct cci_device *new_cci_dev) +{ + uint8_t i = 0, j = 0; + + for (i = 0; i < MASTER_MAX; i++) { + new_cci_dev->cci_master_info[i].status = 0; + new_cci_dev->cci_master_info[i].freq_ref_cnt = 0; + new_cci_dev->cci_master_info[i].is_initilized = false; + mutex_init(&new_cci_dev->cci_master_info[i].mutex); + sema_init(&new_cci_dev->cci_master_info[i].master_sem, 1); + mutex_init(&new_cci_dev->cci_master_info[i].freq_cnt_lock); + init_completion( + &new_cci_dev->cci_master_info[i].reset_complete); + init_completion( + &new_cci_dev->cci_master_info[i].th_complete); + init_completion( + &new_cci_dev->cci_master_info[i].rd_done); + + for (j = 0; j < NUM_QUEUES; j++) { + mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]); + init_completion( + &new_cci_dev->cci_master_info[i].report_q[j]); + init_completion( + &new_cci_dev->cci_master_info[i].th_burst_complete[j]); + spin_lock_init( + &new_cci_dev->cci_master_info[i].lock_q[j]); + } + } + spin_lock_init(&new_cci_dev->lock_status); +} + +static void cam_cci_init_default_clk_params(struct cci_device *cci_dev, + uint8_t index) +{ + /* default clock params are for 100Khz */ + cci_dev->cci_clk_params[index].hw_thigh = 201; + cci_dev->cci_clk_params[index].hw_tlow = 174; + cci_dev->cci_clk_params[index].hw_tsu_sto = 204; + cci_dev->cci_clk_params[index].hw_tsu_sta = 231; + cci_dev->cci_clk_params[index].hw_thd_dat = 22; + cci_dev->cci_clk_params[index].hw_thd_sta = 162; + cci_dev->cci_clk_params[index].hw_tbuf = 227; + cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0; + cci_dev->cci_clk_params[index].hw_trdhld = 6; + cci_dev->cci_clk_params[index].hw_tsp = 3; + cci_dev->cci_clk_params[index].cci_clk_src = 37500000; +} + +static void cam_cci_init_clk_params(struct cci_device *cci_dev) +{ + int32_t rc = 0; + uint32_t val = 0; + uint8_t count = 0; + struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node; + struct device_node *src_node = NULL; + + for (count = 0; count < I2C_MAX_MODES; count++) { + + if (count == I2C_STANDARD_MODE) + src_node = of_find_node_by_name(of_node, + "qcom,i2c_standard_mode"); + else if (count == I2C_FAST_MODE) + src_node = of_find_node_by_name(of_node, + "qcom,i2c_fast_mode"); + else if (count == I2C_FAST_PLUS_MODE) + src_node = of_find_node_by_name(of_node, + "qcom,i2c_fast_plus_mode"); + else + src_node = of_find_node_by_name(of_node, + "qcom,i2c_custom_mode"); + + rc = of_property_read_u32(src_node, "hw-thigh", &val); + CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc); + if (!rc) { + cci_dev->cci_clk_params[count].hw_thigh = val; + rc = of_property_read_u32(src_node, "hw-tlow", + &val); + CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_tlow = val; + rc = of_property_read_u32(src_node, "hw-tsu-sto", + &val); + CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_tsu_sto = val; + rc = of_property_read_u32(src_node, "hw-tsu-sta", + &val); + CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_tsu_sta = val; + rc = of_property_read_u32(src_node, "hw-thd-dat", + &val); + CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_thd_dat = val; + rc = of_property_read_u32(src_node, "hw-thd-sta", + &val); + CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_thd_sta = val; + rc = of_property_read_u32(src_node, "hw-tbuf", + &val); + CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_tbuf = val; + rc = of_property_read_u32(src_node, + "hw-scl-stretch-en", &val); + CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_scl_stretch_en = val; + rc = of_property_read_u32(src_node, "hw-trdhld", + &val); + CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d", + val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_trdhld = val; + rc = of_property_read_u32(src_node, "hw-tsp", + &val); + CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc); + } + if (!rc) { + cci_dev->cci_clk_params[count].hw_tsp = val; + val = 0; + rc = of_property_read_u32(src_node, "cci-clk-src", + &val); + CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc); + cci_dev->cci_clk_params[count].cci_clk_src = val; + } else + cam_cci_init_default_clk_params(cci_dev, count); + + of_node_put(src_node); + } +} + +int cam_cci_parse_dt_info(struct platform_device *pdev, + struct cci_device *new_cci_dev) +{ + int rc = 0, i = 0; + struct cam_hw_soc_info *soc_info = + &new_cci_dev->soc_info; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + int32_t num_irq = 0; + struct task_struct *task = NULL; + struct irq_desc *desc = NULL; + struct sched_param param = {0}; + + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc); + return -EINVAL; + } + + new_cci_dev->ref_count = 0; + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = new_cci_dev; + /* + * Bypass devm_request_irq() and induce + * devm_request_threaded_irq() externally. + */ + num_irq = soc_info->irq_count; + soc_info->irq_count = 0; + rc = cam_soc_util_request_platform_resource(soc_info, + cam_cci_irq, &(irq_data[0])); + if (rc < 0) { + CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc); + return -EINVAL; + } + soc_info->irq_count = num_irq; + for (i = 0; i < soc_info->irq_count; i++) { + rc = devm_request_threaded_irq(&pdev->dev, + soc_info->irq_num[i], + cam_cci_irq, + cam_cci_threaded_irq, + IRQF_TRIGGER_RISING, + soc_info->irq_name[i], + (void *)(irq_data[i])); + if (rc < 0) { + CAM_ERR(CAM_CCI, "Failed to reserve IRQ: %d", rc); + return -EINVAL; + } + disable_irq(soc_info->irq_num[i]); + desc = irq_to_desc(soc_info->irq_num[i]); + if (!desc) { + CAM_WARN(CAM_CCI, + "Unable to locate Descriptor for irq_num: %d", + soc_info->irq_num[i]); + } else { + task = desc->action->thread; + param.sched_priority = MAX_RT_PRIO - 1; + if (task) { + rc = sched_setscheduler(task, SCHED_FIFO, ¶m); + if (rc) { + CAM_ERR(CAM_CCI, + "non-fatal: Failed to set Scheduler Priority: %d", + rc); + } + } + } + } + + new_cci_dev->v4l2_dev_str.pdev = pdev; + cam_cci_init_cci_params(new_cci_dev); + cam_cci_init_clk_params(new_cci_dev); + + for (i = 0; i < MASTER_MAX; i++) { + new_cci_dev->write_wq[i] = create_singlethread_workqueue( + CAM_CCI_WORKQUEUE_NAME); + if (!new_cci_dev->write_wq[i]) + CAM_ERR(CAM_CCI, "Failed to create write wq"); + } + CAM_DBG(CAM_CCI, "Exit"); + return 0; +} + +int cam_cci_soc_release(struct cci_device *cci_dev, + enum cci_i2c_master_t master) +{ + uint8_t i = 0, rc = 0; + struct cam_hw_soc_info *soc_info = &cci_dev->soc_info; + + if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED || + !cci_dev->master_active_slave[master]) { + CAM_ERR(CAM_CCI, + "CCI%d_I2C_M%d invalid cci_dev_ref count %u | cci state %d | master_ref_count %u", + cci_dev->soc_info.index, master, cci_dev->ref_count, cci_dev->cci_state, + cci_dev->master_active_slave[master]); + return -EINVAL; + } + + if (!(--cci_dev->master_active_slave[master])) { + if (cam_soc_util_select_pinctrl_state(soc_info, master, false)) + CAM_WARN(CAM_CCI, + "CCI%d_I2C_M%d Pinctrl suspend state x'sition failed", + cci_dev->soc_info.index, master); + + cci_dev->cci_master_info[master].is_initilized = false; + CAM_DBG(CAM_CCI, + "CCI%d_I2C_M%d All submodules are released", cci_dev->soc_info.index, master); + } + + if (--cci_dev->ref_count) { + CAM_DBG(CAM_CCI, "CCI%d_M%d Submodule release: Ref_count: %d", + cci_dev->soc_info.index, master, cci_dev->ref_count); + return 0; + } + + for (i = 0; i < MASTER_MAX; i++) { + if (cci_dev->write_wq[i]) + flush_workqueue(cci_dev->write_wq[i]); + cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES; + } + + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (rc) { + CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d platform resources disable failed, rc: %d", + cci_dev->soc_info.index, master, rc); + return rc; + } + + cci_dev->cci_state = CCI_STATE_DISABLED; + cci_dev->cycles_per_us = 0; + + cam_cpas_stop(cci_dev->cpas_handle); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.h new file mode 100644 index 0000000000..e78d9918ff --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_cci/cam_cci_soc.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CCI_SOC_H_ +#define _CAM_CCI_SOC_H_ + +#include "cam_cci_core.h" +#include "cam_soc_util.h" + +/** + * @sd: V4L2 sub device + * @c_ctrl: CCI control structure + * + * This API initializes the CCI and acquires SOC resources + */ +int cam_cci_init(struct v4l2_subdev *sd, + struct cam_cci_ctrl *c_ctrl); + +/** + * @cci_dev: CCI device structure + * + * This API releases the CCI and its SOC resources + */ +int cam_cci_soc_release(struct cci_device *cci_dev, + enum cci_i2c_master_t master); + +/** + * @pdev: Platform device + * @new_cci_dev: CCI device structure + * + * This API parses CCI device tree + */ +int cam_cci_parse_dt_info(struct platform_device *pdev, + struct cci_device *new_cci_dev); + +/** + * @pdev: Platform device + * @cci_dev: CCI device structure + * + * This API puts all SOC resources + */ +void cam_cci_soc_remove(struct platform_device *pdev, + struct cci_device *cci_dev); +#endif /* _CAM_CCI_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c new file mode 100644 index 0000000000..caa28a2123 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c @@ -0,0 +1,3297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include + +#include "cam_compat.h" +#include "cam_csiphy_core.h" +#include "cam_csiphy_dev.h" +#include "cam_csiphy_soc.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr.h" +#include "cam_cpas_api.h" +#include "cam_compat.h" + +#define SCM_SVC_CAMERASS 0x18 +#define SECURE_SYSCALL_ID 0x6 +#define SECURE_SYSCALL_ID_2 0x7 + +#define LANE_MASK_2PH 0x1F +#define LANE_MASK_3PH 0x7 + +/* Size of CPAS_SEC_LANE_CP_CTRL register mask */ +#define SEC_LANE_CP_REG_LEN 32 +/* + * PHY index at which CPAS_SEC_LANE_CP_CTRL register mask + * changes depending on PHY HW version + */ +#define CAM_MAX_PHYS_PER_CP_CTRL_REG 4 + +#define CSIPHY_POLL_DELAY_US 500 +#define CSIPHY_POLL_TIMEOUT_US 10000 +#define CSPIHY_REFGEN_STATUS_BIT (1 << 7) +#define CSIPHY_ONTHEGO_BUFSIZE 90 +#define CSIPHY_ONTHEGO_BUFSIZE_READ (CSIPHY_ONTHEGO_BUFSIZE / 6) +#define CSIPHY_QMARGIN_STR_LEN 20 +#define CSIPHY_QMARGIN_DEFAULT_STR "default" +#define SETTLE_CNT_ADJUSTMENT_OFFSET 10 + +struct g_csiphy_data { + void __iomem *base_address; + uint8_t is_3phase; + uint32_t cpas_handle; + bool is_configured_for_main; + uint64_t data_rate_aux_mask; + uint32_t computed_cdr_value; + uint32_t aon_cam_id; + struct cam_csiphy_aon_sel_params_t *aon_sel_param; + struct csiphy_qmargin_sweep_data *qmargin_data; +}; + +static DEFINE_MUTEX(active_csiphy_cnt_mutex); +static DEFINE_MUTEX(main_aon_selection); +static struct g_csiphy_data g_phy_data[MAX_CSIPHY] = {0}; +static int active_csiphy_hw_cnt; +static char csiphy_onthego_regs[20]; +static int csiphy_onthego_reg_count[MAX_CSIPHY]; +static unsigned int csiphy_onthego_regvals[MAX_CSIPHY][CSIPHY_ONTHEGO_BUFSIZE]; +static char csiphy_qmargin[CSIPHY_QMARGIN_STR_LEN]; +static unsigned int csiphy_onthego_read_count[MAX_CSIPHY]; +static unsigned int csiphy_onthego_readregs[MAX_CSIPHY][CSIPHY_ONTHEGO_BUFSIZE_READ]; + +static void csiphy_set_onthego_write_regs(int *inp, int num_input, int phy_idx) +{ + int i; + + if (phy_idx >= MAX_CSIPHY) + return; + + for (i = 0; i < num_input && i < CSIPHY_ONTHEGO_BUFSIZE; i++) + csiphy_onthego_regvals[phy_idx][i] = (unsigned int) inp[i]; + + csiphy_onthego_reg_count[phy_idx] = i; +} + +static void csiphy_set_onthego_read_regs(int *inp, int num_input, int phy_idx) +{ + int i; + + if (phy_idx >= MAX_CSIPHY) + return; + + for (i = 0; i < num_input && i < CSIPHY_ONTHEGO_BUFSIZE_READ ; i++) + csiphy_onthego_readregs[phy_idx][i] = (unsigned int) inp[i]; + + csiphy_onthego_read_count[phy_idx] = i; +} + +static int csiphy_set_onthego_values(const char *val, const struct kernel_param *kp) +{ + /** + * Expected format string: ":n1,n2,..:val1,val2,..."" + * Values between the colons, specify the PHY(s) with + * which these settings apply to + * The actual onthego values should have comma-delimited + * entries with total a multiple of 3 (reg_addr, val, delay) + */ + int i, idx, onthego_val, onthego_idx = 0; + int onthego_values[CSIPHY_ONTHEGO_BUFSIZE] = {0}; + char *p1, *p2, *token; + bool phy_target[MAX_CSIPHY] = {false}, read = false; + + p1 = strnchr(val, 1, ':'); p2 = strrchr(val, ':'); + if (!p1 || !p2 || p2 - p1 < 2) { + CAM_ERR(CAM_CSIPHY, "Invalid csiphy onthego input string: %s", val); + return -EINVAL; + } + + strscpy(csiphy_onthego_regs, p1, 20); + while ((token = strsep(&p1, ":")) != NULL) { + if (!kstrtoint(token, 0, &idx) && idx >= 0 && idx < MAX_CSIPHY) + phy_target[idx] = true; + } + + p1 = p2 + 1; + + if (!strncasecmp(p1, "X", 1)) { + for (i = 0; i < MAX_CSIPHY; i++) { + if (phy_target[i]) { + csiphy_onthego_reg_count[i] = 0; + csiphy_onthego_read_count[i] = 0; + } + } + return 0; + } + + while (((token = strsep(&p1, ",")) != NULL) && onthego_idx < CSIPHY_ONTHEGO_BUFSIZE) { + while (token && token[0] == ' ') + token++; + if (token && !strncasecmp(token, "R", 1)) + read = true; + if (!kstrtoint(token, 0, &onthego_val)) + onthego_values[onthego_idx++] = onthego_val; + } + + if (!onthego_idx || (!read && (onthego_idx % 3))) { + CAM_ERR(CAM_CSIPHY, "Invalid multiple of onthego entries: %d,", onthego_idx); + return -EINVAL; + } + + for (i = 0; i < MAX_CSIPHY && onthego_idx; i++) { + if (phy_target[i]) { + if (read) + csiphy_set_onthego_read_regs(onthego_values, onthego_idx, i); + else + csiphy_set_onthego_write_regs(onthego_values, onthego_idx, i); + } + } + + return 0; +} + +static int csiphy_get_onthego_values(char *buffer, const struct kernel_param *kp) +{ + int rc = 0, i, j, reg_count; + char *p = buffer; + + for (i = 0; i < MAX_CSIPHY; i++) { + reg_count = csiphy_onthego_reg_count[i]; + rc += scnprintf(p + rc, PAGE_SIZE - rc, "PHY idx %d: ", i); + for (j = 0; j < reg_count; j++) + rc += scnprintf(p + rc, PAGE_SIZE - rc, "0x%x,", + csiphy_onthego_regvals[i][j]); + rc += scnprintf(p + rc, PAGE_SIZE - rc, "\n"); + + if (csiphy_onthego_read_count[i]) { + rc += scnprintf(p + rc, PAGE_SIZE - rc, "Reg read for PHY_%d:\n", i); + for (j = 0; j < csiphy_onthego_read_count[i]; j++) { + rc += scnprintf(p + rc, PAGE_SIZE - rc, + "\tOffset: 0x%x, Val: 0x%x\n", + csiphy_onthego_readregs[i][j], cam_io_r_mb( + g_phy_data[i].base_address + + csiphy_onthego_readregs[i][j])); + } + rc += scnprintf(p + rc, PAGE_SIZE - rc, "\n"); + } + } + return rc; +} + +static const struct kernel_param_ops csiphy_onthego_ops = { + .set = csiphy_set_onthego_values, + .get = csiphy_get_onthego_values, +}; + +static int csiphy_qmargin_get_defaults(char *buffer, int phy_idx) +{ + int i, bytes_written = 0; + char *p = buffer; + struct csiphy_reg_t *cdr_regs; + struct csiphy_qmargin_csid_output *qmargin_csid_output; + + if (!g_phy_data[phy_idx].qmargin_data) { + CAM_ERR(CAM_CSIPHY, "Invalid PHY idx: %u", phy_idx); + return 0; + } + + cdr_regs = &g_phy_data[phy_idx].qmargin_data->cdr_regs[0]; + qmargin_csid_output = &(g_phy_data[phy_idx].qmargin_data->qmargin_csid_output); + + if (g_phy_data[phy_idx].qmargin_data->bw != 0) { + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_BW: 0x%llx\n", phy_idx, + g_phy_data[phy_idx].qmargin_data->bw); + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_EPD_ENABLED: %s\n", phy_idx, + CAM_BOOL_TO_YESNO(qmargin_csid_output->epd_enabled)); + } + for (i = 0; i < CAM_CSIPHY_MAX_CPHY_LANES; i++) { + if (!cdr_regs[i].reg_addr) + return bytes_written; + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_CDR_%d, ADDR: 0x%x, DEFAULT: 0x%x\n", + phy_idx, i, cdr_regs[i].reg_addr, cdr_regs[i].reg_data); + } + + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, "\n"); + + return bytes_written; +} + +static int csiphy_qmargin_get(char *buffer, const struct kernel_param *kp) +{ + int bytes_written = 0, i, j; + char *p = buffer; + struct csiphy_qmargin_csid_output *qmargin_csid_output; + unsigned int *csiphy_qmargin_output_regs; + char num[10]; + + for (i = 0; i < MAX_CSIPHY; i++) { + if (strstr(csiphy_qmargin, CSIPHY_QMARGIN_DEFAULT_STR)) + bytes_written += csiphy_qmargin_get_defaults(p + bytes_written, i); + + snprintf(num, 10, "%d", i); + + if (strstr(csiphy_qmargin, num) && g_phy_data[i].qmargin_data) { + qmargin_csid_output = &(g_phy_data[i].qmargin_data->qmargin_csid_output); + csiphy_qmargin_output_regs = + g_phy_data[i].qmargin_data->csiphy_qmargin_output_regs; + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_CSI2_RX: 0x%x\n", + i, qmargin_csid_output->csi2_rx_status); + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_CSI2_CRC: 0x%x\n", + i, qmargin_csid_output->csi2_total_crc_err); + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - bytes_written, + "PHY%d_CSI2_PKT: 0x%x\n", + i, qmargin_csid_output->csi2_total_pkts_rcvd); + for (j = 0; j < CSIPHY_QMARGIN_CMN_STATUS_REG_COUNT; j++) { + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - + bytes_written, "PHY%d_COMMON_STATUS%u: 0x%x\n", + i, j, csiphy_qmargin_output_regs[j]); + csiphy_qmargin_output_regs[j] = 0; + } + bytes_written += scnprintf(p + bytes_written, PAGE_SIZE - + bytes_written, "\n"); + } + + if (g_phy_data[i].qmargin_data) { + memset(g_phy_data[i].qmargin_data, 0, + sizeof(struct csiphy_qmargin_sweep_data)); + } + } + + csiphy_qmargin[0] = '\0'; + + return bytes_written; +} + +static int csiphy_qmargin_set(const char *val, const struct kernel_param *kp) +{ + strscpy(csiphy_qmargin, val, CSIPHY_QMARGIN_STR_LEN); + + return 0; +} + +static const struct kernel_param_ops csiphy_qmargin_ops = { + .set = csiphy_qmargin_set, + .get = csiphy_qmargin_get, +}; + +module_param_cb(csiphy_onthego_regs, &csiphy_onthego_ops, NULL, 0644); +MODULE_PARM_DESC(csiphy_onthego_regs, "Functionality to let csiphy registers program on the fly"); +module_param_cb(csiphy_qmargin, &csiphy_qmargin_ops, NULL, 0644); +MODULE_PARM_DESC(csiphy_qmargin, "Functionality to print registers related to PHY tuning"); + +void cam_csiphy_update_auxiliary_mask(struct csiphy_device *csiphy_dev) +{ + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid param"); + return; + } + + if (!g_phy_data[csiphy_dev->soc_info.index].is_3phase) { + CAM_INFO_RATE_LIMIT(CAM_CSIPHY, "2PH Sensor is connected to the PHY"); + return; + } + + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask |= + BIT_ULL(csiphy_dev->curr_data_rate_idx); + + CAM_DBG(CAM_CSIPHY, + "CSIPHY:%u configuring aux settings curr_data_rate_idx: %u curr_data_rate: %llu curr_aux_mask: 0x%lx", + csiphy_dev->soc_info.index, csiphy_dev->curr_data_rate_idx, + csiphy_dev->current_data_rate, + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask); +} + +void cam_csiphy_update_qmargin_csid_vals(void *data, int phy_idx) +{ + struct cam_subdev_msg_cdr_sweep_info *qmargin_csid_update = + (struct cam_subdev_msg_cdr_sweep_info *) data; + struct csiphy_qmargin_csid_output *qmargin_csid_output; + + if (phy_idx < 0 || phy_idx >= MAX_CSIPHY) + return; + + qmargin_csid_output = &(g_phy_data[phy_idx].qmargin_data->qmargin_csid_output); + + if (!qmargin_csid_update->csi2_err_seen && qmargin_csid_output->csi2_err_seen) + return; + + qmargin_csid_output->csi2_rx_status = qmargin_csid_update->csi2_rx_status; + qmargin_csid_output->csi2_total_crc_err = qmargin_csid_update->csi2_rx_total_crc_err; + qmargin_csid_output->csi2_total_pkts_rcvd = qmargin_csid_update->csi2_rx_total_pkts_rcvd; + qmargin_csid_output->csi2_err_seen = qmargin_csid_update->csi2_err_seen; + qmargin_csid_output->epd_enabled = qmargin_csid_update->epd_enabled; +} + +int32_t cam_csiphy_get_instance_offset(struct csiphy_device *csiphy_dev, int32_t dev_handle) +{ + int32_t i = 0; + + if (csiphy_dev->acquire_count > csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, + "Invalid acquire count: %d, Max supported device for session: %u", + csiphy_dev->acquire_count, + csiphy_dev->session_max_device_support); + return -EINVAL; + } + + for (i = 0; i < csiphy_dev->acquire_count; i++) { + if (dev_handle == + csiphy_dev->csiphy_info[i].hdl_data.device_hdl) + break; + } + + return i; +} + +static int cam_csiphy_get_settle_count( + struct csiphy_device *csiphy_dev, + int32_t index, + uint16_t *settle_cnt) +{ + struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info; + uint64_t intermediate_var = 0; + + uint32_t t3_prepare = csiphy_dev->csiphy_info[index].t3_prepare; + uint32_t t3_preamble = csiphy_dev->csiphy_info[index].t3_preamble; + + enum cam_vote_level vote_level = + csiphy_dev->ctrl_reg->getclockvoting(csiphy_dev, index); + int64_t timer_clk_rate = + soc_info->clk_rate[vote_level][csiphy_dev->timer_clk_src_idx]; + + intermediate_var = csiphy_dev->csiphy_info[index].settle_time; + do_div(intermediate_var, 200000000); + *settle_cnt = intermediate_var; + + if (*settle_cnt == 0) { + if (csiphy_dev->csiphy_info[index].csiphy_3phase) { + CAM_DBG(CAM_CSIPHY, + "PHY:%d cphy:%d, timer_clk_rate:%lld, t3_prepare:%u, t3_preamble:%u", + soc_info->index, + csiphy_dev->csiphy_info[index].csiphy_3phase, + timer_clk_rate, t3_prepare, t3_preamble); + + /* T-timer: 1 / Timer Clock */ + /* CPHY Ideal SC: (T-prepare + T-preamble / 3) / T-timer - 10 */ + intermediate_var = ((t3_prepare + t3_preamble / 3) * timer_clk_rate); + intermediate_var /= NSEC_PER_SEC; + } else { + CAM_DBG(CAM_CSIPHY, "PHY:%d cphy:%d, timer_clk_rate:%lld, data_rate:%llu", + soc_info->index, + csiphy_dev->csiphy_info[index].csiphy_3phase, + timer_clk_rate, + csiphy_dev->csiphy_info[index].data_rate); + + /* UI: 1 / Datarate */ + /* T-timer: 1 / Timer Clock */ + /* DPHY Min SC: (85 + 6UI) / T-timer – 10 */ + intermediate_var = (85 + (6 * (int64_t)NSEC_PER_SEC) / + csiphy_dev->csiphy_info[index].data_rate) * timer_clk_rate; + intermediate_var /= NSEC_PER_SEC; + } + + if (intermediate_var > SETTLE_CNT_ADJUSTMENT_OFFSET) { + intermediate_var -= SETTLE_CNT_ADJUSTMENT_OFFSET; + } else { + CAM_WARN(CAM_CSIPHY, + "PHY:%d Invalid calculated settle count:%d, setting to 0", + soc_info->index, (intermediate_var - SETTLE_CNT_ADJUSTMENT_OFFSET)); + intermediate_var = 0; + } + *settle_cnt = intermediate_var; + CAM_INFO(CAM_CSIPHY, "PHY:%d Calculated settle count:%u", + soc_info->index, *settle_cnt); + } + + return 0; +} + +static int cam_csiphy_cpas_ops( + uint32_t cpas_handle, bool start) +{ + int rc = 0; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + + if (start) { + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_LOWSVS_D1_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = + CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = + CAM_AXI_TRANSACTION_WRITE; + axi_vote.axi_path[0].camnoc_bw = + CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = + CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = + CAM_CPAS_DEFAULT_AXI_BW; + + rc = cam_cpas_start(cpas_handle, + &ahb_vote, &axi_vote); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc); + return rc; + } + CAM_DBG(CAM_CSIPHY, "CPAS START"); + } else { + rc = cam_cpas_stop(cpas_handle); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc); + return rc; + } + CAM_DBG(CAM_CSIPHY, "CPAS STOPPED"); + } + + return rc; +} + +static void cam_csiphy_reset_phyconfig_param(struct csiphy_device *csiphy_dev, + int32_t index) +{ + CAM_DBG(CAM_CSIPHY, "Resetting phyconfig param at index: %d", index); + csiphy_dev->csiphy_info[index].lane_cnt = 0; + csiphy_dev->csiphy_info[index].lane_assign = 0; + csiphy_dev->csiphy_info[index].lane_enable = 0; + csiphy_dev->csiphy_info[index].settle_time = 0; + csiphy_dev->csiphy_info[index].data_rate = 0; + csiphy_dev->csiphy_info[index].secure_mode = 0; + csiphy_dev->csiphy_info[index].mipi_flags = 0; + csiphy_dev->csiphy_info[index].hdl_data.device_hdl = -1; + csiphy_dev->csiphy_info[index].csiphy_3phase = -1; + csiphy_dev->csiphy_info[index].conn_csid_idx = -1; + csiphy_dev->csiphy_info[index].use_hw_client_voting = false; + csiphy_dev->csiphy_info[index].is_drv_config_en = false; + csiphy_dev->csiphy_info[index].t3_prepare = 0; + csiphy_dev->csiphy_info[index].t3_preamble = 0; +} + +static inline void cam_csiphy_apply_onthego_reg_values(void __iomem *csiphybase, uint8_t csiphy_idx) +{ + int i; + + CAM_DBG(CAM_CSIPHY, "csiphy: %d, onthego_reg_count: %d", + csiphy_idx, + csiphy_onthego_reg_count[csiphy_idx]); + + for (i = 0; i < csiphy_onthego_reg_count[csiphy_idx]; i += 3) { + cam_io_w_mb(csiphy_onthego_regvals[csiphy_idx][i+1], + csiphybase + csiphy_onthego_regvals[csiphy_idx][i]); + + if (csiphy_onthego_regvals[csiphy_idx][i+2]) + usleep_range(csiphy_onthego_regvals[csiphy_idx][i+2], + csiphy_onthego_regvals[csiphy_idx][i+2] + 5); + + CAM_INFO(CAM_CSIPHY, "Offset: 0x%x, Val: 0x%x Delay(us): %u", + csiphy_onthego_regvals[csiphy_idx][i], + cam_io_r_mb(csiphybase + csiphy_onthego_regvals[csiphy_idx][i]), + csiphy_onthego_regvals[csiphy_idx][i+2]); + } + +} + +int cam_csiphy_release_from_reset_state(struct csiphy_device *csiphy_dev, + void __iomem *csiphybase, int32_t instance) +{ + int i; + struct csiphy_reg_parms_t *csiphy_reg; + struct csiphy_reg_t *csiphy_reset_release_reg; + bool config_found = false; + + if (!csiphy_dev || !csiphybase) { + CAM_ERR(CAM_CSIPHY, "Invalid input params: csiphy_dev: %s, csiphybase: %s", + CAM_IS_NULL_TO_STR(csiphy_dev), + CAM_IS_NULL_TO_STR(csiphybase)); + return -EINVAL; + } + + CAM_DBG(CAM_CSIPHY, "Csiphy idx: %d", csiphy_dev->soc_info.index); + + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + for (i = 0; i < csiphy_reg->csiphy_reset_exit_array_size; i++) { + csiphy_reset_release_reg = &csiphy_dev->ctrl_reg->csiphy_reset_exit_regs[i]; + + switch (csiphy_reset_release_reg->csiphy_param_type) { + case CSIPHY_2PH_REGS: + if (!g_phy_data[csiphy_dev->soc_info.index].is_3phase && + !csiphy_dev->combo_mode && + !csiphy_dev->cphy_dphy_combo_mode) { + cam_io_w_mb(csiphy_reset_release_reg->reg_data, + csiphybase + csiphy_reset_release_reg->reg_addr); + config_found = true; + } + break; + case CSIPHY_3PH_REGS: + if (g_phy_data[csiphy_dev->soc_info.index].is_3phase && + !csiphy_dev->combo_mode && + !csiphy_dev->cphy_dphy_combo_mode) { + cam_io_w_mb(csiphy_reset_release_reg->reg_data, + csiphybase + csiphy_reset_release_reg->reg_addr); + config_found = true; + } + break; + case CSIPHY_2PH_COMBO_REGS: + if (!csiphy_dev->csiphy_info[instance].csiphy_3phase && + csiphy_dev->combo_mode && + !csiphy_dev->cphy_dphy_combo_mode) { + cam_io_w_mb(csiphy_reset_release_reg->reg_data, + csiphybase + csiphy_reset_release_reg->reg_addr); + config_found = true; + } + break; + case CSIPHY_3PH_COMBO_REGS: + if (csiphy_dev->csiphy_info[instance].csiphy_3phase && + csiphy_dev->combo_mode && + !csiphy_dev->cphy_dphy_combo_mode) { + cam_io_w_mb(csiphy_reset_release_reg->reg_data, + csiphybase + csiphy_reset_release_reg->reg_addr); + config_found = true; + } + break; + case CSIPHY_2PH_3PH_COMBO_REGS: + if (!csiphy_dev->combo_mode && csiphy_dev->cphy_dphy_combo_mode) { + cam_io_w_mb(csiphy_reset_release_reg->reg_data, + csiphybase + csiphy_reset_release_reg->reg_addr); + config_found = true; + } + break; + default: + CAM_ERR(CAM_CSIPHY, "Invalid combination"); + return -EINVAL; + break; + } + + if (config_found) { + if (csiphy_reset_release_reg->delay) { + usleep_range(csiphy_reset_release_reg->delay, + csiphy_reset_release_reg->delay + 5); + } + + break; + } + + } + return 0; +} + +static int __csiphy_prgm_common_data(uint32_t phy_idx, struct csiphy_reg_t *csiphy_common_reg, + uint32_t size, bool reset) +{ + int i = 0; + void __iomem *csiphybase = NULL; + uint8_t is_3phase; + + csiphybase = g_phy_data[phy_idx].base_address; + is_3phase = g_phy_data[phy_idx].is_3phase; + + if (!csiphybase) { + CAM_DBG(CAM_CSIPHY, "CSIPHY: %d is not available in platform", phy_idx); + return 0; + } + + CAM_DBG(CAM_CSIPHY, "common_data_array_size: %d, is_3phase: %d, reset: %s", + size, is_3phase, (reset ? "true" : "false")); + + for (i = 0; i < size; i++) { + switch (csiphy_common_reg[i].csiphy_param_type) { + case CSIPHY_DEFAULT_PARAMS: + cam_io_w_mb(reset ? 0x00 : csiphy_common_reg[i].reg_data, + csiphybase + csiphy_common_reg[i].reg_addr); + break; + case CSIPHY_2PH_REGS: + if (!is_3phase) { + cam_io_w_mb(reset ? 0x00 : csiphy_common_reg[i].reg_data, + csiphybase + csiphy_common_reg[i].reg_addr); + } + break; + case CSIPHY_3PH_REGS: + if (is_3phase) { + cam_io_w_mb(reset ? 0x00 : csiphy_common_reg[i].reg_data, + csiphybase + csiphy_common_reg[i].reg_addr); + } + break; + default: + break; + } + + if (csiphy_common_reg[i].delay > 0) + usleep_range(csiphy_common_reg[i].delay, csiphy_common_reg[i].delay + 5); + } + + return 0; +} + +void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev, + struct cam_csiphy_query_cap *csiphy_cap) +{ + struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info; + + csiphy_cap->slot_info = soc_info->index; + csiphy_cap->version = csiphy_dev->hw_version; + csiphy_cap->clk_lane = csiphy_dev->clk_lane; +} + +int cam_csiphy_dump_status_reg(struct csiphy_device *csiphy_dev) +{ + struct cam_hw_soc_info *soc_info; + void __iomem *phybase = NULL; + void __iomem *lane0_offset = 0; + void __iomem *lane1_offset = 0; + void __iomem *lane2_offset = 0; + void __iomem *lane3_offset = 0; + void __iomem *clk_offset = 0; + struct csiphy_reg_parms_t *csiphy_reg; + struct cam_cphy_dphy_status_reg_params_t *status_regs; + int i = 0; + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Null csiphy_dev"); + return -EINVAL; + } + + soc_info = &csiphy_dev->soc_info; + if (!soc_info) { + CAM_ERR(CAM_CSIPHY, "Null soc_info"); + return -EINVAL; + } + + if (!g_phy_data[soc_info->index].base_address) { + CAM_ERR(CAM_CSIPHY, "Invalid cphy_idx: %d", soc_info->index); + return -EINVAL; + } + + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + status_regs = csiphy_reg->status_reg_params; + phybase = g_phy_data[soc_info->index].base_address; + + if (!status_regs) { + CAM_ERR(CAM_CSIPHY, "2ph/3ph status offset not set"); + return -EINVAL; + } + + if (g_phy_data[soc_info->index].is_3phase) { + CAM_INFO(CAM_CSIPHY, "Dumping 3ph status regs"); + lane0_offset = phybase + status_regs->csiphy_3ph_status0_offset; + lane1_offset = + lane0_offset + csiphy_reg->size_offset_betn_lanes; + lane2_offset = + lane1_offset + csiphy_reg->size_offset_betn_lanes; + + for (i = 0; i < status_regs->csiphy_3ph_status_size; i++) { + CAM_INFO(CAM_CSIPHY, + "PHY: %d, Status%u. Ln0: 0x%x, Ln1: 0x%x, Ln2: 0x%x", + soc_info->index, i, + cam_io_r(lane0_offset + (i * 4)), + cam_io_r(lane1_offset + (i * 4)), + cam_io_r(lane2_offset + (i * 4))); + } + } else { + CAM_INFO(CAM_CSIPHY, "Dumping 2ph status regs"); + lane0_offset = phybase + status_regs->csiphy_2ph_status0_offset; + lane1_offset = + lane0_offset + csiphy_reg->size_offset_betn_lanes; + lane2_offset = + lane1_offset + csiphy_reg->size_offset_betn_lanes; + lane3_offset = + lane2_offset + csiphy_reg->size_offset_betn_lanes; + clk_offset = + lane3_offset + (csiphy_reg->size_offset_betn_lanes / 2); + + for (i = 0; i < status_regs->csiphy_2ph_status_size; i++) { + CAM_INFO(CAM_CSIPHY, + "PHY: %d, Status%u. " + "CLK_LN: 0x%x Ln0: 0x%x, Ln1: 0x%x, Ln2: 0x%x, Ln3: 0x%x", + soc_info->index, i, + cam_io_r(clk_offset + (i * 4)), + cam_io_r(lane0_offset + (i * 4)), + cam_io_r(lane1_offset + (i * 4)), + cam_io_r(lane2_offset + (i * 4)), + cam_io_r(lane3_offset + (i * 4))); + } + } + return 0; +} + +void cam_csiphy_reset(struct csiphy_device *csiphy_dev) +{ + int32_t i; + void __iomem *base = NULL; + uint32_t size = + csiphy_dev->ctrl_reg->csiphy_reg->csiphy_reset_enter_array_size; + struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info; + + base = soc_info->reg_map[0].mem_base; + + for (i = 0; i < size; i++) { + cam_io_w_mb( + csiphy_dev->ctrl_reg->csiphy_reset_enter_regs[i].reg_data, + base + + csiphy_dev->ctrl_reg->csiphy_reset_enter_regs[i].reg_addr); + if (csiphy_dev->ctrl_reg->csiphy_reset_enter_regs[i].delay > 0) + usleep_range( + csiphy_dev->ctrl_reg->csiphy_reset_enter_regs[i].delay, + csiphy_dev->ctrl_reg->csiphy_reset_enter_regs[i].delay + + 5); + } + + if (csiphy_dev->en_lane_status_reg_dump) { + CAM_INFO(CAM_CSIPHY, "Status Reg Dump after phy reset"); + cam_csiphy_dump_status_reg(csiphy_dev); + } +} + +static void cam_csiphy_program_common_registers( + struct csiphy_device *csiphy_dev, + bool reset, + enum cam_csiphy_common_reg_program selection) +{ + int csiphy_idx; + uint32_t size; + struct csiphy_reg_t *csiphy_common_reg; + + csiphy_common_reg = csiphy_dev->ctrl_reg->csiphy_common_reg; + size = csiphy_dev->ctrl_reg->csiphy_reg->csiphy_common_reg_array_size; + + if (selection == CAM_CSIPHY_PRGM_ALL) { + if (active_csiphy_hw_cnt < 0 || active_csiphy_hw_cnt >= MAX_CSIPHY) { + CAM_WARN(CAM_CSIPHY, + "MisMatched in active phy hw: %d and Max supported: %d", + active_csiphy_hw_cnt, MAX_CSIPHY); + return; + } + + if (active_csiphy_hw_cnt) { + CAM_DBG(CAM_CSIPHY, + "Common Settings already applied for all PHYs. active_phy_cnt: %d", + active_csiphy_hw_cnt); + return; + } + + CAM_DBG(CAM_CSIPHY, "Is reset required: %d", reset); + + for (csiphy_idx = 0; csiphy_idx < MAX_CSIPHY; csiphy_idx++) + __csiphy_prgm_common_data(csiphy_idx, csiphy_common_reg, size, reset); + } else { + __csiphy_prgm_common_data(csiphy_dev->soc_info.index, + csiphy_common_reg, size, reset); + } +} + +static int cam_csiphy_update_secure_info(struct csiphy_device *csiphy_dev, int32_t index) +{ + uint64_t lane_assign_bitmask = 0; + uint16_t lane_assign; + uint32_t bit_offset_bet_phys_in_cp_ctrl; + uint8_t lane_cnt; + uint32_t cpas_version; + int rc; + + if (csiphy_dev->domain_id_security) { + CAM_DBG(CAM_CSIPHY, "Target supports domain ID security, skipping legacy update"); + return 0; + } + + lane_assign = csiphy_dev->csiphy_info[index].lane_assign; + lane_cnt = csiphy_dev->csiphy_info[index].lane_cnt; + + while (lane_cnt--) { + lane_assign_bitmask |= (1 << (lane_assign & 0xF)); + lane_assign >>= 4; + } + + rc = cam_cpas_get_cpas_hw_version(&cpas_version); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed while getting CPAS Version"); + return rc; + } + + switch (cpas_version) { + case CAM_CPAS_TITAN_580_V100: + case CAM_CPAS_TITAN_680_V100: + case CAM_CPAS_TITAN_780_V100: + bit_offset_bet_phys_in_cp_ctrl = + (csiphy_dev->soc_info.index < CAM_MAX_PHYS_PER_CP_CTRL_REG) ? + (CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) : + (CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES + 1); + break; + default: + bit_offset_bet_phys_in_cp_ctrl = + CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES; + break; + } + + if (csiphy_dev->soc_info.index < CAM_MAX_PHYS_PER_CP_CTRL_REG) + csiphy_dev->csiphy_info[index].csiphy_cpas_cp_reg_mask = + lane_assign_bitmask << + ((csiphy_dev->soc_info.index * bit_offset_bet_phys_in_cp_ctrl) + + (!csiphy_dev->csiphy_info[index].csiphy_3phase) * + (CAM_CSIPHY_MAX_CPHY_LANES)); + else + csiphy_dev->csiphy_info[index].csiphy_cpas_cp_reg_mask = + lane_assign_bitmask << + ((csiphy_dev->soc_info.index - CAM_MAX_PHYS_PER_CP_CTRL_REG) * + bit_offset_bet_phys_in_cp_ctrl + + SEC_LANE_CP_REG_LEN + + (!csiphy_dev->csiphy_info[index].csiphy_3phase) * + CAM_CSIPHY_MAX_CPHY_LANES); + + CAM_DBG(CAM_CSIPHY, "CSIPHY_idx:%d, cp_reg_mask:0x%llx", + csiphy_dev->soc_info.index, + csiphy_dev->csiphy_info[index].csiphy_cpas_cp_reg_mask); + + return 0; +} + +static int cam_csiphy_get_lane_enable( + struct csiphy_device *csiphy, int index, + uint16_t lane_assign, uint32_t *lane_enable) +{ + uint32_t lane_select = 0; + + if (csiphy->csiphy_info[index].csiphy_3phase) { + CAM_DBG(CAM_CSIPHY, "LaneEnable for CPHY"); + switch (lane_assign & 0xF) { + case 0x0: + lane_select |= CPHY_LANE_0; + break; + case 0x1: + lane_select |= CPHY_LANE_1; + break; + case 0x2: + lane_select |= CPHY_LANE_2; + break; + default: + CAM_ERR(CAM_CSIPHY, + "Wrong lane configuration for CPHY : %d", + lane_assign); + *lane_enable = 0; + return -EINVAL; + } + } else { + CAM_DBG(CAM_CSIPHY, "LaneEnable for DPHY"); + switch (lane_assign & 0xF) { + case 0x0: + lane_select |= DPHY_LANE_0; + lane_select |= DPHY_CLK_LN; + break; + case 0x1: + lane_select |= DPHY_LANE_1; + lane_select |= DPHY_CLK_LN; + break; + case 0x2: + lane_select |= DPHY_LANE_2; + if (csiphy->combo_mode) + lane_select |= DPHY_LANE_3; + else + lane_select |= DPHY_CLK_LN; + break; + case 0x3: + if (csiphy->combo_mode) { + CAM_ERR(CAM_CSIPHY, + "Wrong lane configuration for DPHYCombo: %d", + lane_assign); + *lane_enable = 0; + return -EINVAL; + } + lane_select |= DPHY_LANE_3; + lane_select |= DPHY_CLK_LN; + break; + default: + CAM_ERR(CAM_CSIPHY, + "Wrong lane configuration for DPHY: %d", + lane_assign); + *lane_enable = 0; + return -EINVAL; + } + } + + *lane_enable = lane_select; + CAM_DBG(CAM_CSIPHY, "Lane_select: 0x%x", lane_select); + + return 0; +} + +static int cam_csiphy_sanitize_lane_cnt( + struct csiphy_device *csiphy_dev, + int32_t index, uint8_t lane_cnt) +{ + uint8_t max_supported_lanes = 0; + + if (csiphy_dev->combo_mode) { + if (csiphy_dev->csiphy_info[index].csiphy_3phase) + max_supported_lanes = 1; + else + max_supported_lanes = 2; + } else if (csiphy_dev->cphy_dphy_combo_mode) { + /* 2DPHY + 1CPHY or 2CPHY + 1DPHY */ + if (csiphy_dev->csiphy_info[index].csiphy_3phase) + max_supported_lanes = 2; + else + max_supported_lanes = 2; + } else { + /* Mission Mode */ + if (csiphy_dev->csiphy_info[index].csiphy_3phase) + max_supported_lanes = 3; + else + max_supported_lanes = 4; + } + + if (lane_cnt <= 0 || lane_cnt > max_supported_lanes) { + CAM_ERR(CAM_CSIPHY, + "wrong lane_cnt configuration: expected max lane_cnt: %u received lane_cnt: %u", + max_supported_lanes, lane_cnt); + return -EINVAL; + } + + return 0; +} + +static int __cam_csiphy_parse_lane_info_cmd_buf( + int32_t dev_handle, + struct csiphy_device *csiphy_dev, + struct cam_cmd_buf_desc *cmd_desc) +{ + int index, rc = 0; + uint8_t lane_cnt = 0; + uint32_t lane_enable = 0; + uint16_t lane_assign = 0, preamble_en = 0; + uintptr_t generic_ptr; + uint32_t *cmd_buf = NULL; + size_t len; + rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, + &generic_ptr, &len); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, + "Failed to get cmd buf mem address : %d", rc); + return rc; + } + + cmd_buf = (uint32_t *)generic_ptr; + cmd_buf += cmd_desc->offset / 4; + index = cam_csiphy_get_instance_offset(csiphy_dev, dev_handle); + if (index < 0 || index >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "index in invalid: %d", index); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return -EINVAL; + } + + if (cmd_desc->meta_data == CAM_CSIPHY_PACKET_META_LANE_INFO_V2) { + struct cam_csiphy_info_v2 *cam_cmd_csiphy_info_v2 = NULL; + + if ((len < sizeof(struct cam_csiphy_info_v2)) || + (cmd_desc->offset > (len - sizeof(struct cam_csiphy_info_v2)))) { + CAM_ERR(CAM_CSIPHY, + "Not enough buffer provided for cam_csiphy_info"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; + } + + cam_cmd_csiphy_info_v2 = (struct cam_csiphy_info_v2 *)cmd_buf; + rc = cam_csiphy_sanitize_lane_cnt(csiphy_dev, index, + cam_cmd_csiphy_info_v2->lane_cnt); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Wrong configuration lane_cnt: %u", + cam_cmd_csiphy_info_v2->lane_cnt); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; + } + + preamble_en = (cam_cmd_csiphy_info_v2->mipi_flags & + PREAMBLE_PATTEN_CAL_MASK); + + csiphy_dev->csiphy_info[index].lane_cnt = + cam_cmd_csiphy_info_v2->lane_cnt; + csiphy_dev->csiphy_info[index].lane_assign = + cam_cmd_csiphy_info_v2->lane_assign; + csiphy_dev->csiphy_info[index].settle_time = + cam_cmd_csiphy_info_v2->settle_time; + csiphy_dev->csiphy_info[index].data_rate = + cam_cmd_csiphy_info_v2->data_rate; + csiphy_dev->csiphy_info[index].secure_mode = + cam_cmd_csiphy_info_v2->secure_mode; + csiphy_dev->csiphy_info[index].mipi_flags = + (cam_cmd_csiphy_info_v2->mipi_flags & SKEW_CAL_MASK); + csiphy_dev->csiphy_info[index].channel_type = + cam_cmd_csiphy_info_v2->channel_type; + + csiphy_dev->csiphy_info[index].t3_prepare = 0; + csiphy_dev->csiphy_info[index].t3_preamble = 0; + + if (cam_cmd_csiphy_info_v2->num_valid_params > 0) { + if (cam_cmd_csiphy_info_v2->param_mask & CAM_CSIPHY_T3_PREPARE_NS_MASK) + csiphy_dev->csiphy_info[index].t3_prepare = + cam_cmd_csiphy_info_v2->params[0]; + + if (cam_cmd_csiphy_info_v2->param_mask & CAM_CSIPHY_T3_PREAMBLE_NS_MASK) + csiphy_dev->csiphy_info[index].t3_preamble = + cam_cmd_csiphy_info_v2->params[1]; + } + } else if (cmd_desc->meta_data == CAM_CSIPHY_PACKET_META_LANE_INFO) { + struct cam_csiphy_info *cam_cmd_csiphy_info = NULL; + + if ((len < sizeof(struct cam_csiphy_info)) || + (cmd_desc->offset > (len - sizeof(struct cam_csiphy_info)))) { + CAM_ERR(CAM_CSIPHY, + "Not enough buffer provided for cam_csiphy_info"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; + } + + cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf; + rc = cam_csiphy_sanitize_lane_cnt(csiphy_dev, index, + cam_cmd_csiphy_info->lane_cnt); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Wrong configuration lane_cnt: %u", + cam_cmd_csiphy_info->lane_cnt); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; + } + + preamble_en = (cam_cmd_csiphy_info->mipi_flags & + PREAMBLE_PATTEN_CAL_MASK); + + csiphy_dev->csiphy_info[index].lane_cnt = + cam_cmd_csiphy_info->lane_cnt; + csiphy_dev->csiphy_info[index].lane_assign = + cam_cmd_csiphy_info->lane_assign; + csiphy_dev->csiphy_info[index].settle_time = + cam_cmd_csiphy_info->settle_time; + csiphy_dev->csiphy_info[index].data_rate = + cam_cmd_csiphy_info->data_rate; + csiphy_dev->csiphy_info[index].secure_mode = + cam_cmd_csiphy_info->secure_mode; + csiphy_dev->csiphy_info[index].mipi_flags = + (cam_cmd_csiphy_info->mipi_flags & SKEW_CAL_MASK); + csiphy_dev->csiphy_info[index].channel_type = + CAM_CSIPHY_DATARATE_SHORT_CHANNEL; + } + + /* Cannot support CPHY combo mode with One sensor setting + * preamble enable and second/third sensor is without + * preamble enable. + */ + if (csiphy_dev->preamble_enable && !preamble_en && + csiphy_dev->combo_mode && + !csiphy_dev->cphy_dphy_combo_mode) { + CAM_ERR(CAM_CSIPHY, + "Cannot support %s combo mode with differnt preamble settings", + (csiphy_dev->csiphy_info[index].csiphy_3phase ? + "CPHY" : "DPHY")); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return -EINVAL; + } + + csiphy_dev->preamble_enable = preamble_en; + lane_assign = csiphy_dev->csiphy_info[index].lane_assign; + lane_cnt = csiphy_dev->csiphy_info[index].lane_cnt; + + while (lane_cnt--) { + rc = cam_csiphy_get_lane_enable(csiphy_dev, index, + (lane_assign & 0xF), &lane_enable); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Wrong lane configuration: %d", + csiphy_dev->csiphy_info[index].lane_assign); + if ((csiphy_dev->combo_mode) || + (csiphy_dev->cphy_dphy_combo_mode)) { + CAM_DBG(CAM_CSIPHY, + "Resetting error to zero for other devices to configure"); + rc = 0; + } + lane_enable = 0; + csiphy_dev->csiphy_info[index].lane_enable = lane_enable; + goto reset_settings; + } + csiphy_dev->csiphy_info[index].lane_enable |= lane_enable; + lane_assign >>= 4; + } + + if (csiphy_dev->csiphy_info[index].secure_mode == 1) { + rc = cam_csiphy_update_secure_info(csiphy_dev, index); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Secure info configuration failed for index: %d", index); + goto reset_settings; + } + } + + CAM_DBG(CAM_CSIPHY, + "phy version:%d, phy_idx: %d, preamble_en: %u", + csiphy_dev->hw_version, + csiphy_dev->soc_info.index, + csiphy_dev->preamble_enable); + CAM_DBG(CAM_CSIPHY, + "3phase:%d, combo mode:%d, secure mode:%d", + csiphy_dev->csiphy_info[index].csiphy_3phase, + csiphy_dev->combo_mode, + csiphy_dev->csiphy_info[index].secure_mode); + CAM_DBG(CAM_CSIPHY, + "lane_cnt: 0x%x, lane_assign: 0x%x, lane_enable: 0x%x, settle time:%llu, datarate:%llu", + csiphy_dev->csiphy_info[index].lane_cnt, + csiphy_dev->csiphy_info[index].lane_assign, + csiphy_dev->csiphy_info[index].lane_enable, + csiphy_dev->csiphy_info[index].settle_time, + csiphy_dev->csiphy_info[index].data_rate); + + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; + +reset_settings: + cam_csiphy_reset_phyconfig_param(csiphy_dev, index); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; +} + +static int __cam_csiphy_handle_cdr_sweep_info( + struct csiphy_device *csiphy_dev, + struct cam_csiphy_cdr_sweep_params *cdr_sweep_params) +{ + if (cdr_sweep_params->tolerance_op_type != CAM_CSIPHY_CDR_ADD_TOLERANCE && + cdr_sweep_params->tolerance_op_type != CAM_CSIPHY_CDR_SUB_TOLERANCE) { + CAM_ERR(CAM_CSIPHY, "Invalid tolerance op type: %u", + cdr_sweep_params->tolerance_op_type); + return -EINVAL; + } + + csiphy_dev->cdr_params.cdr_tolerance = cdr_sweep_params->cdr_tolerance; + csiphy_dev->cdr_params.tolerance_op_type = cdr_sweep_params->tolerance_op_type; + csiphy_dev->cdr_params.cdr_sweep_enabled = true; + + CAM_DBG(CAM_CSIPHY, + "CSIPHY:%u cdr sweep with tolerance: %u op_type: %u", + csiphy_dev->soc_info.index, csiphy_dev->cdr_params.cdr_tolerance, + csiphy_dev->cdr_params.tolerance_op_type); + + return 0; +} + +static int __cam_csiphy_handle_aux_mem_buffer( + struct csiphy_device *csiphy_dev, + struct cam_csiphy_aux_settings_params *aux_setting_params) +{ + if (aux_setting_params->data_rate_aux_mask) + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask |= + aux_setting_params->data_rate_aux_mask; + + CAM_DBG(CAM_CSIPHY, + "CSIPHY:%u provided_mask: 0x%llx current_mask :0x%llx", + csiphy_dev->soc_info.index, + aux_setting_params->data_rate_aux_mask, + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask); + + return 0; +} + +static void cam_csiphy_aux_data_populate( + uint64_t *aux_config_ptr, + struct csiphy_device *csiphy_dev) +{ + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid param"); + return; + } + + if (!g_phy_data[csiphy_dev->soc_info.index].is_3phase) { + CAM_INFO_RATE_LIMIT(CAM_CSIPHY, "2PH Sensor is connected to the PHY"); + return; + } + + *aux_config_ptr = + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask; + + CAM_DBG(CAM_CSIPHY, + "CSIPHY:%u configuring aux settings curr_data_rate_idx: %u curr_data_rate: %llu curr_aux_mask: 0x%lx", + csiphy_dev->soc_info.index, csiphy_dev->curr_data_rate_idx, + csiphy_dev->current_data_rate, + g_phy_data[csiphy_dev->soc_info.index].data_rate_aux_mask); +} + +static void cam_csiphy_cdr_data_populate( + uint32_t *computed_cdr, + struct csiphy_device *csiphy_dev) +{ + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid param"); + return; + } + + if (!g_phy_data[csiphy_dev->soc_info.index].is_3phase) { + CAM_INFO_RATE_LIMIT(CAM_CSIPHY, "2PH Sensor is connected to the PHY"); + return; + } + + if (csiphy_dev->cdr_params.cdr_sweep_enabled) + *computed_cdr = + g_phy_data[csiphy_dev->soc_info.index].computed_cdr_value; + + CAM_DBG(CAM_CSIPHY, + "CSIPHY:%u configuring cdr settings curr_data_rate_idx: %u curr_data_rate: %llu curr_cdr_mask: 0x%x updated in memory: %s", + csiphy_dev->soc_info.index, csiphy_dev->curr_data_rate_idx, + csiphy_dev->current_data_rate, + g_phy_data[csiphy_dev->soc_info.index].computed_cdr_value, + CAM_BOOL_TO_YESNO(csiphy_dev->cdr_params.cdr_sweep_enabled)); +} + +static int32_t cam_csiphy_generic_data_update( + void *user_data, uint32_t blob_type, + uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + struct csiphy_device *csiphy_dev = (struct csiphy_device *)user_data; + + CAM_DBG(CAM_CSIPHY, "blob_type=%d, blob_size=%d", + blob_type, blob_size); + + switch (blob_type) { + case CAM_CSIPHY_GENERIC_BLOB_TYPE_CDR_CONFIG: { + struct cam_csiphy_cdr_sweep_params *cdr_config_update; + + if (blob_size < sizeof(struct cam_csiphy_cdr_sweep_params)) { + CAM_ERR(CAM_CSIPHY, "Invalid blob size %u, blob_type=%d for CDR update", + blob_size, blob_type); + return -EINVAL; + } + cdr_config_update = (struct cam_csiphy_cdr_sweep_params *)blob_data; + cam_csiphy_cdr_data_populate(&cdr_config_update->configured_cdr, + csiphy_dev); + break; + } + case CAM_CSIPHY_GENERIC_BLOB_TYPE_AUX_CONFIG: { + struct cam_csiphy_aux_settings_params *aux_config_update; + + if (blob_size < sizeof(struct cam_csiphy_aux_settings_params)) { + CAM_ERR(CAM_CSIPHY, "Invalid blob size %u, blob_type=%d for aux update", + blob_size, blob_type); + return -EINVAL; + } + aux_config_update = (struct cam_csiphy_aux_settings_params *)blob_data; + cam_csiphy_aux_data_populate(&aux_config_update->data_rate_aux_mask, + csiphy_dev); + break; + } + default: + CAM_WARN(CAM_CSIPHY, "Unknown op code %d for CSIPHY = %d", + blob_type, csiphy_dev->soc_info.index); + rc = -EINVAL; + break; + } + + return rc; +} + +static int32_t __cam_csiphy_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + struct csiphy_device *csiphy_dev = + (struct csiphy_device *)user_data; + + if (!blob_data || (blob_size == 0)) { + CAM_ERR(CAM_CSIPHY, "Invalid blob info %pK %u", blob_data, + blob_size); + return -EINVAL; + } + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid user data"); + return -EINVAL; + } + + switch (blob_type) { + case CAM_CSIPHY_GENERIC_BLOB_TYPE_CDR_CONFIG: { + struct cam_csiphy_cdr_sweep_params *cdr_sweep_params = + (struct cam_csiphy_cdr_sweep_params *)blob_data; + + if (blob_size < sizeof(struct cam_csiphy_cdr_sweep_params)) { + CAM_ERR(CAM_CSIPHY, "Invalid blob size expected: 0x%x actual: 0x%x", + sizeof(struct cam_csiphy_cdr_sweep_params), blob_size); + return -EINVAL; + } + + rc = __cam_csiphy_handle_cdr_sweep_info(csiphy_dev, cdr_sweep_params); + break; + } + case CAM_CSIPHY_GENERIC_BLOB_TYPE_AUX_CONFIG: { + struct cam_csiphy_aux_settings_params *aux_setting_params = + (struct cam_csiphy_aux_settings_params *)blob_data; + + if (blob_size < sizeof(struct cam_csiphy_aux_settings_params)) { + CAM_ERR(CAM_CSIPHY, "Invalid blob size expected: 0x%x actual: 0x%x", + sizeof(struct cam_csiphy_aux_settings_params), blob_size); + return -EINVAL; + } + + rc = __cam_csiphy_handle_aux_mem_buffer(csiphy_dev, aux_setting_params); + break; + } + default: + CAM_WARN(CAM_CSIPHY, "Invalid blob type %d", blob_type); + break; + } + + return rc; +} + +int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev, + struct cam_config_dev_cmd *cfg_dev) +{ + int rc = 0, i; + uintptr_t generic_pkt_ptr; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + size_t len, remain_len; + uint32_t cmd_buf_type; + + if (!cfg_dev || !csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid Args"); + return -EINVAL; + } + + rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle, + &generic_pkt_ptr, &len); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc); + return rc; + } + + remain_len = len; + if ((sizeof(struct cam_packet) > len) || + ((size_t)cfg_dev->offset >= len - sizeof(struct cam_packet))) { + CAM_ERR(CAM_CSIPHY, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len); + cam_mem_put_cpu_buf(cfg_dev->packet_handle); + rc = -EINVAL; + goto put_buf; + } + + remain_len -= (size_t)cfg_dev->offset; + csl_packet_u = (struct cam_packet *) + (generic_pkt_ptr + (uint32_t)cfg_dev->offset); + + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Copying packet to KMD failed"); + goto put_buf; + } + + if (csl_packet->num_cmd_buf) + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset / 4); + else { + CAM_ERR(CAM_CSIPHY, "num_cmd_buffer = %d", csl_packet->num_cmd_buf); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_CSIPHY, "CSIPHY:%u num cmd buffers received: %u", + csiphy_dev->soc_info.index, csl_packet->num_cmd_buf); + + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + goto end; + + cmd_buf_type = cmd_desc[i].meta_data; + + CAM_DBG(CAM_CSIPHY, "CSIPHY:%u cmd_buffer_%d type: %u", + csiphy_dev->soc_info.index, i, cmd_buf_type); + + switch (cmd_buf_type) { + case CAM_CSIPHY_PACKET_META_LANE_INFO: + rc = __cam_csiphy_parse_lane_info_cmd_buf( + cfg_dev->dev_handle, csiphy_dev, &cmd_desc[i]); + break; + case CAM_CSIPHY_PACKET_META_LANE_INFO_V2: + rc = __cam_csiphy_parse_lane_info_cmd_buf( + cfg_dev->dev_handle, csiphy_dev, &cmd_desc[i]); + break; + case CAM_CSIPHY_PACKET_META_GENERIC_BLOB: + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + __cam_csiphy_generic_blob_handler, csiphy_dev); + break; + default: + CAM_WARN(CAM_CSIPHY, + "Invalid meta type: %u", cmd_buf_type); + break; + } + + if (rc) + break; + } + +end: + cam_common_mem_free(csl_packet); +put_buf: + cam_mem_put_cpu_buf(cfg_dev->packet_handle); + return rc; +} + +void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev) +{ + int32_t i; + struct csiphy_reg_t *csiphy_irq_reg; + uint32_t num_of_irq_status_regs = 0; + + void __iomem *csiphybase = + csiphy_dev->soc_info.reg_map[0].mem_base; + + num_of_irq_status_regs = + csiphy_dev->ctrl_reg->csiphy_reg->csiphy_interrupt_status_size; + + for (i = 0; i < num_of_irq_status_regs; i++) { + csiphy_irq_reg = &csiphy_dev->ctrl_reg->csiphy_irq_reg[i]; + cam_io_w_mb(csiphy_irq_reg->reg_data, + csiphybase + csiphy_irq_reg->reg_addr); + + if (csiphy_irq_reg->delay) + usleep_range(csiphy_irq_reg->delay, + csiphy_irq_reg->delay + 5); + } +} + +void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev) +{ + int32_t i; + void __iomem *csiphybase = + csiphy_dev->soc_info.reg_map[0].mem_base; + uint32_t num_of_irq_status_regs = 0; + + num_of_irq_status_regs = + csiphy_dev->ctrl_reg->csiphy_reg->csiphy_interrupt_status_size; + + for (i = 0; i < num_of_irq_status_regs; i++) + cam_io_w_mb(0x0, csiphybase + + csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr); +} + +irqreturn_t cam_csiphy_irq(int irq_num, void *data) +{ + struct csiphy_device *csiphy_dev = + (struct csiphy_device *)data; + struct csiphy_reg_parms_t *csiphy_reg = NULL; + void __iomem *base = NULL; + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Invalid Args"); + return IRQ_NONE; + } + + base = csiphy_dev->soc_info.reg_map[0].mem_base; + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + + if (csiphy_dev->en_common_status_reg_dump) { + cam_csiphy_common_status_reg_dump(csiphy_dev, true); + cam_io_w_mb(0x1, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr); + cam_io_w_mb(0x0, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr); + } + + return IRQ_HANDLED; +} + +static inline void __cam_csiphy_compute_cdr_value( + int32_t *cdr_val, struct csiphy_device *csiphy_device) +{ + if (csiphy_device->cdr_params.tolerance_op_type == + CAM_CSIPHY_CDR_ADD_TOLERANCE) + *cdr_val += csiphy_device->cdr_params.cdr_tolerance; + else + *cdr_val -= csiphy_device->cdr_params.cdr_tolerance; +} + +static int cam_csiphy_cphy_data_rate_config(struct csiphy_device *csiphy_device, int32_t idx, + uint8_t datarate_variant_idx) +{ + int i, j = 0, rc = 0; + unsigned int data_rate_idx; + uint64_t required_phy_data_rate; + void __iomem *csiphybase; + ssize_t num_data_rates; + struct data_rate_settings_t *settings_table; + uint16_t settle_cnt = 0; + uint32_t reg_addr, reg_data, reg_param_type; + int32_t delay; + struct csiphy_reg_t *config_params, *cdr_regs; + uint8_t csiphy_index = 0; + uint32_t channel_type; + + if ((csiphy_device == NULL) || (csiphy_device->ctrl_reg == NULL)) { + CAM_ERR(CAM_CSIPHY, "Device is NULL"); + return -EINVAL; + } + + if (csiphy_device->ctrl_reg->data_rates_settings_table == NULL) { + CAM_DBG(CAM_CSIPHY, + "Data rate specific register table not available"); + return 0; + } + + if (csiphy_device->soc_info.index >= MAX_CSIPHY) { + CAM_ERR(CAM_CSIPHY, "soc_info.index:%d >= MAX_CSIPHY:%d ", + csiphy_device->soc_info.index, MAX_CSIPHY); + return -EINVAL; + } + + required_phy_data_rate = csiphy_device->csiphy_info[idx].data_rate; + csiphybase = csiphy_device->soc_info.reg_map[0].mem_base; + settings_table = csiphy_device->ctrl_reg->data_rates_settings_table; + num_data_rates = settings_table->num_data_rate_settings; + + rc = cam_csiphy_get_settle_count(csiphy_device, idx, &settle_cnt); + if (rc) + return rc; + + csiphy_index = csiphy_device->soc_info.index; + channel_type = csiphy_device->csiphy_info[idx].channel_type; + + CAM_DBG(CAM_CSIPHY, "required data rate : %llu", required_phy_data_rate); + for (data_rate_idx = 0; data_rate_idx < num_data_rates; data_rate_idx++) { + struct data_rate_reg_info_t *drate_settings = settings_table->data_rate_settings; + uint64_t supported_phy_bw = drate_settings[data_rate_idx].bandwidth; + ssize_t num_reg_entries = drate_settings[data_rate_idx].data_rate_reg_array_size; + struct csiphy_reg_t **drate_reg_array = + drate_settings[data_rate_idx].data_rate_reg_array[csiphy_index]; + + if ((required_phy_data_rate > supported_phy_bw) && + (data_rate_idx < (num_data_rates - 1))) { + CAM_DBG(CAM_CSIPHY, + "Skipping table [%d] with BW: %llu, Required data_rate: %llu", + data_rate_idx, supported_phy_bw, required_phy_data_rate); + continue; + } + + CAM_DBG(CAM_CSIPHY, "table[%d] BW : %llu Selected", + data_rate_idx, supported_phy_bw); + + if (strstr(csiphy_qmargin, CSIPHY_QMARGIN_DEFAULT_STR)) + csiphy_device->qmargin_data.bw = supported_phy_bw; + + if (datarate_variant_idx >= CAM_CSIPHY_MAX_DATARATE_VARIANTS) { + CAM_ERR(CAM_CSIPHY, "Datarate variant Idx: %u can not exceed %u", + datarate_variant_idx, CAM_CSIPHY_MAX_DATARATE_VARIANTS-1); + return -EINVAL; + } + + config_params = drate_reg_array[datarate_variant_idx]; + if (!config_params) { + CAM_ERR(CAM_CSIPHY, "Datarate settings are null. datarate variant idx: %u", + datarate_variant_idx); + return -EINVAL; + } + + for (i = 0; i < num_reg_entries; i++) { + reg_addr = config_params[i].reg_addr; + reg_data = config_params[i].reg_data; + reg_param_type = config_params[i].csiphy_param_type; + delay = config_params[i].delay; + CAM_DBG(CAM_CSIPHY, + "param_type: %d writing reg : %x val : %x delay: %dus", + reg_param_type, reg_addr, reg_data, + delay); + switch (reg_param_type) { + case CSIPHY_DEFAULT_PARAMS: + cam_io_w_mb(reg_data, + csiphybase + reg_addr); + break; + case CSIPHY_SHORT_CHANNEL_PARAMS: + if (channel_type == CAM_CSIPHY_DATARATE_SHORT_CHANNEL) + cam_io_w_mb(reg_data, + csiphybase + reg_addr); + break; + case CSIPHY_STANDARD_CHANNEL_PARAMS: + if (channel_type == CAM_CSIPHY_DATARATE_STANDARD_CHANNEL) + cam_io_w_mb(reg_data, + csiphybase + reg_addr); + break; + case CSIPHY_SETTLE_CNT_LOWER_BYTE: + cam_io_w_mb(settle_cnt & 0xFF, + csiphybase + reg_addr); + break; + case CSIPHY_SETTLE_CNT_HIGHER_BYTE: + cam_io_w_mb((settle_cnt >> 8) & 0xFF, + csiphybase + reg_addr); + break; + case CSIPHY_AUXILIARY_SETTING: { + uint32_t phy_idx = csiphy_device->soc_info.index; + + if (g_phy_data[phy_idx].data_rate_aux_mask & + BIT_ULL(data_rate_idx)) { + cam_io_w_mb(reg_data, csiphybase + reg_addr); + CAM_DBG(CAM_CSIPHY, + "CSIPHY: %u configuring new aux setting reg_addr: 0x%x reg_val: 0x%x", + csiphy_device->soc_info.index, reg_addr, reg_data); + } + } + break; + default: + CAM_DBG(CAM_CSIPHY, "Do Nothing"); + break; + } + + if ((reg_param_type == + (CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS) && + (channel_type == CAM_CSIPHY_DATARATE_SHORT_CHANNEL)) || + (reg_param_type == + (CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS) && + (channel_type == CAM_CSIPHY_DATARATE_STANDARD_CHANNEL)) || + reg_param_type == CSIPHY_CDR_LN_SETTINGS) { + int32_t cdr_val = reg_data; + struct cam_csiphy_dev_cdr_sweep_params *cdr_params = + &csiphy_device->cdr_params; + + if (cdr_params->cdr_sweep_enabled) { + __cam_csiphy_compute_cdr_value(&cdr_val, csiphy_device); + + if (cdr_val < 0) { + CAM_ERR(CAM_CSIPHY, + "CSIPHY: %u invalid CDR tolerance computation, default: 0x%x tolerance: 0x%x op_type: 0x%x", + csiphy_device->soc_info.index, + reg_data, cdr_params->cdr_tolerance, + cdr_params->tolerance_op_type); + return -EINVAL; + } + g_phy_data[csiphy_device->soc_info.index].computed_cdr_value = cdr_val; + } + + cam_io_w_mb(cdr_val, csiphybase + reg_addr); + CAM_DBG(CAM_CSIPHY, + "CSIPHY: %u CDR reg_addr: 0x%x reg_val: 0x%x sweep test: %s", + csiphy_device->soc_info.index, + reg_addr, cdr_val, + CAM_BOOL_TO_YESNO(cdr_params->cdr_sweep_enabled)); + } + + if (strstr(csiphy_qmargin, CSIPHY_QMARGIN_DEFAULT_STR) && + reg_param_type == CSIPHY_CDR_LN_SETTINGS) { + cdr_regs = &csiphy_device->qmargin_data.cdr_regs[0]; + cdr_regs[j].reg_addr = reg_addr; + cdr_regs[j].reg_data = reg_data; + cdr_regs[j].delay = delay; + j++; + } + + if (delay > 0) + usleep_range(delay, delay + 5); + } + + csiphy_device->curr_data_rate_idx = data_rate_idx; + break; + } + + return 0; +} + +static int __cam_csiphy_prgm_bist_reg(struct csiphy_device *csiphy_dev, bool is_3phase) +{ + int i; + int bist_arr_size; + struct csiphy_reg_t *csiphy_common_reg; + void __iomem *csiphybase; + + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + + if (is_3phase) { + csiphy_common_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg->bist_3ph_settings_arry; + bist_arr_size = csiphy_dev->ctrl_reg->csiphy_bist_reg->num_3ph_bist_settings; + } else { + csiphy_common_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg->bist_2ph_settings_arry; + bist_arr_size = csiphy_dev->ctrl_reg->csiphy_bist_reg->num_2ph_bist_settings; + } + + if (!csiphy_common_reg) { + CAM_DBG(CAM_CSIPHY, "No Bist Settings available for %s", + (is_3phase) ? "CPHY" : "DPHY"); + return 0; + } + + for (i = 0; i < bist_arr_size; i++) { + cam_io_w_mb(csiphy_common_reg[i].reg_data, + csiphybase + csiphy_common_reg[i].reg_addr); + + if (csiphy_common_reg[i].delay) + usleep_range(csiphy_common_reg[i].delay, csiphy_common_reg[i].delay + 5); + } + + return 0; +} + +static int cam_csiphy_program_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset, bool is_shutdown) +{ + int rc = 0; + + if (csiphy_dev->domain_id_security) { + if (!csiphy_dev->csiphy_info[offset].secure_info_updated) { + CAM_ERR(CAM_CSIPHY, + "PHY[%u] domain id info not updated, aborting secure call", + csiphy_dev->soc_info.index); + + return -EINVAL; + } + + rc = cam_cpas_enable_clks_for_domain_id(true); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed to enable the Domain ID clocks"); + return rc; + } + } + + rc = cam_csiphy_notify_secure_mode(csiphy_dev, protect, offset, is_shutdown); + + if (csiphy_dev->domain_id_security) { + if (cam_cpas_enable_clks_for_domain_id(false)) + CAM_ERR(CAM_CSIPHY, "Failed to disable the Domain ID clocks"); + + if (!protect) + csiphy_dev->csiphy_info[offset].secure_info_updated = false; + } + + return rc; +} + +int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev, + int32_t dev_handle, uint8_t datarate_variant_idx) +{ + int32_t rc = 0; + uint32_t lane_enable = 0; + uint16_t i = 0, cfg_size = 0; + uint16_t settle_cnt = 0; + uint8_t skew_cal_enable = 0; + int index; + void __iomem *csiphybase; + struct csiphy_reg_t *reg_array; + struct csiphy_reg_parms_t *csiphy_reg; + bool is_3phase = false; + + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + + CAM_DBG(CAM_CSIPHY, "ENTER"); + if (!csiphybase) { + CAM_ERR(CAM_CSIPHY, "csiphybase NULL"); + return -EINVAL; + } + + index = cam_csiphy_get_instance_offset(csiphy_dev, dev_handle); + if (index < 0 || index >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "index is invalid: %d", index); + return -EINVAL; + } + + CAM_DBG(CAM_CSIPHY, + "Index: %d: expected dev_hdl: 0x%x : derived dev_hdl: 0x%x", + index, dev_handle, + csiphy_dev->csiphy_info[index].hdl_data.device_hdl); + + if (csiphy_dev->csiphy_info[index].csiphy_3phase) + is_3phase = true; + + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + if (csiphy_dev->combo_mode) { + /* for CPHY(3Phase) or DPHY(2Phase) combo mode selection */ + if (is_3phase) { + /* CPHY combo mode */ + if (csiphy_dev->ctrl_reg->csiphy_3ph_combo_reg) { + reg_array = csiphy_dev->ctrl_reg->csiphy_3ph_combo_reg; + cfg_size = csiphy_reg->csiphy_3ph_combo_config_array_size; + } else { + CAM_WARN(CAM_CSIPHY, "CPHY combo mode reg settings not found"); + reg_array = csiphy_dev->ctrl_reg->csiphy_3ph_reg; + cfg_size = csiphy_reg->csiphy_3ph_config_array_size; + } + } else { + /* DPHY combo mode*/ + if (csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg) { + reg_array = csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg; + cfg_size = csiphy_reg->csiphy_2ph_combo_config_array_size; + } else { + CAM_WARN(CAM_CSIPHY, "DPHY combo mode reg settings not found"); + reg_array = csiphy_dev->ctrl_reg->csiphy_2ph_reg; + cfg_size = csiphy_reg->csiphy_2ph_config_array_size; + } + } + } else if (csiphy_dev->cphy_dphy_combo_mode) { + /* for CPHY and DPHY combo mode selection */ + if (csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg) { + reg_array = csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg; + cfg_size = csiphy_reg->csiphy_2ph_3ph_config_array_size; + } else { + reg_array = csiphy_dev->ctrl_reg->csiphy_3ph_reg; + cfg_size = csiphy_reg->csiphy_3ph_config_array_size; + CAM_WARN(CAM_CSIPHY, + "Unsupported configuration, Falling back to CPHY mission mode"); + } + } else { + /* for CPHY(3Phase) or DPHY(2Phase) Non combe mode selection */ + if (is_3phase) { + CAM_DBG(CAM_CSIPHY, + "3phase Non combo mode reg array selected"); + reg_array = csiphy_dev->ctrl_reg->csiphy_3ph_reg; + cfg_size = csiphy_reg->csiphy_3ph_config_array_size; + } else { + CAM_DBG(CAM_CSIPHY, + "2PHASE Non combo mode reg array selected"); + reg_array = csiphy_dev->ctrl_reg->csiphy_2ph_reg; + cfg_size = csiphy_reg->csiphy_2ph_config_array_size; + } + } + + lane_enable = csiphy_dev->csiphy_info[index].lane_enable; + + if (csiphy_dev->csiphy_info[index].csiphy_3phase) { + rc = cam_csiphy_cphy_data_rate_config(csiphy_dev, index, datarate_variant_idx); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Date rate specific configuration failed rc: %d", + rc); + return rc; + } + } + + rc = cam_csiphy_get_settle_count(csiphy_dev, index, &settle_cnt); + if (rc) + return rc; + skew_cal_enable = csiphy_dev->csiphy_info[index].mipi_flags; + + for (i = 0; i < cfg_size; i++) { + switch (reg_array[i].csiphy_param_type) { + case CSIPHY_LANE_ENABLE: + cam_io_w_mb(lane_enable, csiphybase + reg_array[i].reg_addr); + break; + case CSIPHY_DEFAULT_PARAMS: + cam_io_w_mb(reg_array[i].reg_data, csiphybase + reg_array[i].reg_addr); + break; + case CSIPHY_SETTLE_CNT_LOWER_BYTE: + cam_io_w_mb(settle_cnt & 0xFF, csiphybase + reg_array[i].reg_addr); + break; + case CSIPHY_SETTLE_CNT_HIGHER_BYTE: + cam_io_w_mb((settle_cnt >> 8) & 0xFF, csiphybase + reg_array[i].reg_addr); + break; + case CSIPHY_SKEW_CAL: + if (skew_cal_enable) + cam_io_w_mb(reg_array[i].reg_data, csiphybase + reg_array[i].reg_addr); + else + cam_io_w_mb(0x00, csiphybase + reg_array[i].reg_addr); + break; + default: + CAM_DBG(CAM_CSIPHY, "Do Nothing"); + break; + } + + if (reg_array[i].delay > 0) + usleep_range(reg_array[i].delay, reg_array[i].delay + 5); + } + + if (csiphy_dev->preamble_enable) + __cam_csiphy_prgm_bist_reg(csiphy_dev, is_3phase); + + cam_csiphy_cphy_irq_config(csiphy_dev); + + CAM_DBG(CAM_CSIPHY, "EXIT"); + return rc; +} + +void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev) +{ + struct cam_hw_soc_info *soc_info; + struct cam_csiphy_param *param; + int i, rc; + + if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT) + return; + + if (!csiphy_dev->acquire_count && !csiphy_dev->start_dev_count) + return; + + if (csiphy_dev->acquire_count >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_WARN(CAM_CSIPHY, "acquire count is invalid: %u", + csiphy_dev->acquire_count); + csiphy_dev->acquire_count = + CSIPHY_MAX_INSTANCES_PER_PHY; + } + + if ((csiphy_dev->csiphy_state == CAM_CSIPHY_START) || + csiphy_dev->start_dev_count) { + soc_info = &csiphy_dev->soc_info; + + for (i = 0; i < csiphy_dev->acquire_count; i++) { + param = &csiphy_dev->csiphy_info[i]; + + if (param->secure_mode) + cam_csiphy_program_secure_mode(csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, i, true); + + param->secure_mode = CAM_SECURE_MODE_NON_SECURE; + + if (soc_info->is_clk_drv_en && param->use_hw_client_voting) { + rc = cam_soc_util_set_src_clk_rate(soc_info, param->conn_csid_idx, + 0, 0); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "[%d] Failed in setting clk rate for %d", + soc_info->index, param->conn_csid_idx); + } else { + rc = cam_soc_util_cesta_channel_switch(param->conn_csid_idx, + "csiphy_shutdown"); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for cesta client:%d rc:%d", + param->conn_csid_idx, rc); + } + } + } + + cam_csiphy_reset_phyconfig_param(csiphy_dev, i); + } + + mutex_lock(&active_csiphy_cnt_mutex); + if ((csiphy_dev->prgm_cmn_reg_across_csiphy) && + (active_csiphy_hw_cnt > 0)) { + active_csiphy_hw_cnt--; + cam_csiphy_program_common_registers(csiphy_dev, true, + CAM_CSIPHY_PRGM_ALL); + } + mutex_unlock(&active_csiphy_cnt_mutex); + + cam_csiphy_reset(csiphy_dev); + cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, true, true); + if (g_phy_data[soc_info->index].aon_cam_id == NOT_AON_CAM) + cam_cpas_stop(csiphy_dev->cpas_handle); + + csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE; + } + + if (csiphy_dev->csiphy_state == CAM_CSIPHY_ACQUIRE) { + for (i = 0; i < csiphy_dev->acquire_count; i++) { + if (csiphy_dev->csiphy_info[i].hdl_data.device_hdl != -1) { + rc = cam_destroy_device_hdl( + csiphy_dev->csiphy_info[i].hdl_data.device_hdl); + if (rc) + CAM_ERR(CAM_CSIPHY, + "Failed at destroying the device hdl: 0x%x", + csiphy_dev->csiphy_info[i].hdl_data.device_hdl); + } + csiphy_dev->csiphy_info[i].hdl_data.device_hdl = -1; + csiphy_dev->csiphy_info[i].hdl_data.session_hdl = -1; + } + } + + csiphy_dev->ref_count = 0; + csiphy_dev->acquire_count = 0; + csiphy_dev->start_dev_count = 0; + csiphy_dev->csiphy_state = CAM_CSIPHY_INIT; +} + +static int32_t cam_csiphy_external_cmd(struct csiphy_device *csiphy_dev, + struct cam_config_dev_cmd *p_submit_cmd) +{ + struct cam_csiphy_info cam_cmd_csiphy_info; + int32_t rc = 0; + int32_t index = -1; + + if (copy_from_user(&cam_cmd_csiphy_info, + u64_to_user_ptr(p_submit_cmd->packet_handle), + sizeof(struct cam_csiphy_info))) { + CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n"); + rc = -EFAULT; + } else { + index = cam_csiphy_get_instance_offset(csiphy_dev, + p_submit_cmd->dev_handle); + if (index < 0 || + index >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "index is invalid: %d", index); + return -EINVAL; + } + + csiphy_dev->csiphy_info[index].lane_cnt = + cam_cmd_csiphy_info.lane_cnt; + csiphy_dev->csiphy_info[index].lane_assign = + cam_cmd_csiphy_info.lane_assign; + csiphy_dev->csiphy_info[index].settle_time = + cam_cmd_csiphy_info.settle_time; + csiphy_dev->csiphy_info[index].data_rate = + cam_cmd_csiphy_info.data_rate; + CAM_DBG(CAM_CSIPHY, + "%s CONFIG_DEV_EXT settle_time= %lld lane_cnt=%d", + __func__, + csiphy_dev->csiphy_info[index].settle_time, + csiphy_dev->csiphy_info[index].lane_cnt); + } + + return rc; +} + +static int cam_csiphy_update_lane_selection(struct csiphy_device *csiphy, int index, bool enable) +{ + uint32_t lane_enable; + void __iomem *base_address; + int32_t lane_reg_addr; + int32_t delay; + + base_address = csiphy->soc_info.reg_map[0].mem_base; + lane_reg_addr = csiphy->ctrl_reg->csiphy_lane_config_reg->reg_addr; + delay = csiphy->ctrl_reg->csiphy_lane_config_reg->delay; + + lane_enable = cam_io_r(base_address + lane_reg_addr); + + if (enable) + lane_enable |= csiphy->csiphy_info[index].lane_enable; + else + lane_enable &= ~csiphy->csiphy_info[index].lane_enable; + + CAM_INFO(CAM_CSIPHY, "lane_reg_addr: 0x%x, lane_assign: 0x%x, lane_enable: 0x%x, delay: %d", + lane_reg_addr, csiphy->csiphy_info[index].lane_assign, lane_enable, delay); + + cam_io_w_mb(lane_enable, base_address + lane_reg_addr); + + if (delay) + usleep_range(delay, delay + 5); + + return 0; +} + +static int __csiphy_cpas_configure_for_main_or_aon( + bool get_access, uint32_t phy_idx, + struct cam_csiphy_aon_sel_params_t *aon_sel_params) +{ + int rc = 0; + uint32_t aon_config = 0; + uint32_t cpas_handle = g_phy_data[phy_idx].cpas_handle; + + if (g_phy_data[phy_idx].aon_cam_id == NOT_AON_CAM) { + CAM_ERR(CAM_CSIPHY, "Not an AON Camera"); + return -EINVAL; + } + + if (!aon_sel_params->aon_cam_sel_offset[g_phy_data[phy_idx].aon_cam_id]) { + CAM_ERR(CAM_CSIPHY, "Mux register offset can not be 0. AON_Cam_ID: %u", + g_phy_data[phy_idx].aon_cam_id); + return -EINVAL; + } + + if (get_access == g_phy_data[phy_idx].is_configured_for_main) { + CAM_DBG(CAM_CSIPHY, "Already Configured/Released for %s", + get_access ? "Main" : "AON"); + return 0; + } + + if (get_access) { + rc = cam_csiphy_cpas_ops(cpas_handle, true); + if (rc) { + CAM_ERR(CAM_CSIPHY, "voting CPAS: %d failed", rc); + return rc; + } + } + + rc = cam_cpas_reg_read(cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + aon_sel_params->aon_cam_sel_offset[g_phy_data[phy_idx].aon_cam_id], + true, &aon_config); + if (rc) + CAM_WARN(CAM_CSIPHY, "CPAS AON sel register read failed"); + + if (get_access && !g_phy_data[phy_idx].is_configured_for_main) { + aon_config &= ~(aon_sel_params->cam_sel_mask | + aon_sel_params->mclk_sel_mask); + CAM_INFO(CAM_CSIPHY, + "Selecting MainCamera over AON Camera"); + g_phy_data[phy_idx].is_configured_for_main = true; + } else if (!get_access && g_phy_data[phy_idx].is_configured_for_main) { + aon_config |= (aon_sel_params->cam_sel_mask | + aon_sel_params->mclk_sel_mask); + CAM_INFO(CAM_CSIPHY, + "Releasing MainCamera to AON Camera"); + g_phy_data[phy_idx].is_configured_for_main = false; + } + + CAM_DBG(CAM_CSIPHY, "value of aon_config = %u", aon_config); + rc = cam_cpas_reg_write(cpas_handle, CAM_CPAS_REGBASE_CPASTOP, + aon_sel_params->aon_cam_sel_offset[g_phy_data[phy_idx].aon_cam_id], + true, aon_config); + if (rc) + CAM_ERR(CAM_CSIPHY, "CPAS AON sel register write failed"); + + if (!get_access) { + rc = cam_csiphy_cpas_ops(cpas_handle, false); + if (rc) { + CAM_ERR(CAM_CSIPHY, "voting CPAS: %d failed", rc); + return rc; + } + } + + return rc; +} + +int cam_csiphy_util_update_aon_registration(uint32_t phy_idx, uint32_t aon_cam_id) +{ + /* aon support enable for the sensor associated with phy idx*/ + if (phy_idx >= MAX_CSIPHY) { + CAM_ERR(CAM_CSIPHY, + "Invalid PHY index: %u", phy_idx); + return -EINVAL; + } + + if (!g_phy_data[phy_idx].base_address) { + CAM_ERR(CAM_CSIPHY, "Invalid PHY idx: %d from Sensor user", phy_idx); + return -EINVAL; + } + + g_phy_data[phy_idx].aon_cam_id = aon_cam_id; + + return 0; +} + +int cam_csiphy_util_update_aon_ops( + bool get_access, uint32_t phy_idx) +{ + struct cam_csiphy_aon_sel_params_t *aon_sel_params; + int rc = 0; + + if (phy_idx >= MAX_CSIPHY) { + CAM_ERR(CAM_CSIPHY, "Null device"); + return -ENODEV; + } + + if (!g_phy_data[phy_idx].base_address) { + CAM_ERR(CAM_CSIPHY, "phy_idx: %d is not supported", phy_idx); + return -EINVAL; + } + + if (!g_phy_data[phy_idx].aon_sel_param) { + CAM_ERR(CAM_CSIPHY, "AON select parameters are null"); + return -EINVAL; + } + + aon_sel_params = g_phy_data[phy_idx].aon_sel_param; + + mutex_lock(&main_aon_selection); + + CAM_DBG(CAM_CSIPHY, "PHY idx: %d, AON_support %s", phy_idx, + (get_access) ? "enable" : "disable"); + + rc = __csiphy_cpas_configure_for_main_or_aon( + get_access, phy_idx, aon_sel_params); + if (rc) + CAM_ERR(CAM_CSIPHY, "Configuration for AON ops failed: rc: %d", rc); + + mutex_unlock(&main_aon_selection); + return rc; +} + +static void __cam_csiphy_read_2phase_bist_counter_status( + struct csiphy_device *csiphy_dev, uint32_t *counter) +{ + int i = 0, lane_count; + int bist_status_arr_size = + csiphy_dev->ctrl_reg->csiphy_bist_reg->number_of_counters; + uint32_t base_offset = 0; + void __iomem *phy_base = NULL; + uint32_t val = 0; + uint32_t offset_betwn_lane = 0; + struct bist_reg_settings_t *bist_reg = NULL; + + phy_base = csiphy_dev->soc_info.reg_map[0].mem_base; + bist_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg; + offset_betwn_lane = + csiphy_dev->ctrl_reg->csiphy_reg->size_offset_betn_lanes; + + for (i = 0; i < bist_status_arr_size; i++) { + base_offset = bist_reg->bist_counter_2ph_base_offset + (0x4 * i); + val = 0; + for (lane_count = 0; lane_count < CAM_CSIPHY_MAX_DPHY_LANES; lane_count++) { + CAM_DBG(CAM_CSIPHY, "value to be read from addr: 0x%x is 0x%x", + base_offset + (lane_count * offset_betwn_lane), + (cam_io_r(phy_base + base_offset + (lane_count * offset_betwn_lane)))); + val |= ((cam_io_r(phy_base + base_offset + (lane_count * offset_betwn_lane)))); + } + *counter |= (val << (i * 8)); + CAM_DBG(CAM_CSIPHY, "COUNTER VALUE is 0x%x", *counter); + } + + return; +} + +static void __cam_csiphy_get_2phase_pattern_status( + struct csiphy_device *csiphy_dev) +{ + int i; + int bist_status_arr_size = csiphy_dev->ctrl_reg->csiphy_bist_reg->num_status_reg; + struct csiphy_reg_t *csiphy_common_reg = NULL; + void __iomem *csiphybase; + uint32_t status = 0; + uint32_t counter = 0; + struct bist_reg_settings_t *bist_reg; + + CAM_DBG(CAM_CSIPHY, "ENTER"); + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + bist_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg; + + /* This loop is to read every lane status value + * in case if loop breaks with only last lane. + */ + for (i = 0; i < bist_status_arr_size; i++) { + csiphy_common_reg = &bist_reg->bist_status_arr[i]; + switch (csiphy_common_reg->csiphy_param_type) { + case CSIPHY_2PH_REGS: + status |= cam_io_r(csiphybase + csiphy_common_reg->reg_addr); + break; + } + } + + /* Read the Counter value for possible corrupted headers */ + __cam_csiphy_read_2phase_bist_counter_status(csiphy_dev, &counter); + + if ((status & PREAMBLE_PATTERN_BIST_DONE) && + (status & bist_reg->error_status_val_2ph)) { + /** + * This condition happen when CSIPHY try to read status after sensor + * streamoff. In this case error bit is set due to postamble detection + * which is bit(4). In this scenraio this is not consider as an error. + * We need to check for status2/3 counter value to determine if there are + * more header that is corrupted than 2. Counter always shows value of 2 + * with postamble packet. + * + */ + if (counter <= PREAMBLE_MAX_ERR_COUNT_ALLOWED) { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern rxced succesfully:: counter value: 0x%x, Status0: 0x%x", + counter, status); + } else { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern is corrupted:: counter value: 0x%x, Status0: 0x%x", + counter, status); + } + } else if ((status & PREAMBLE_PATTERN_BIST_DONE) && + !(status & bist_reg->error_status_val_2ph)) { + /** + * This condition happen when CSIPHY try to read status with some counter + * value is set to check against. In this case error bit is not expected + * to be set. + */ + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern rxced succesfully:: counter value: 0x%x, Status0: 0x%x", + counter, status); + } else { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern is corrupted:: counter value: 0x%x Status0: 0x%x", + counter, status); + } + + return; +} + +static void __cam_csiphy_2ph_status_checker_ops( + struct csiphy_device *csiphy_dev, bool set) +{ + int i = 0; + void __iomem *csiphybase = NULL; + uint32_t base_offset = 0; + uint32_t read_back_value; + uint32_t lane_offset = 0; + uint32_t offset_betwn_lane = 0; + struct bist_reg_settings_t *bist_reg = NULL; + + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + bist_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg; + base_offset = bist_reg->set_status_update_2ph_base_offset; + offset_betwn_lane = + csiphy_dev->ctrl_reg->csiphy_reg->size_offset_betn_lanes; + + /* Set checker bit to read the correct status1 value */ + for (i = 0; i < CAM_CSIPHY_MAX_DPHY_LANES; i++) { + lane_offset = base_offset + (i * offset_betwn_lane); + read_back_value = cam_io_r(csiphybase + lane_offset); + set ? (read_back_value |= PREAMBLE_PATTERN_SET_CHECKER) : + (read_back_value &= ~PREAMBLE_PATTERN_SET_CHECKER); + cam_io_w_mb(read_back_value, csiphybase + lane_offset); + } +} + +static void __cam_csiphy_read_3phase_bist_counter_status( + struct csiphy_device *csiphy_dev, uint32_t *counter) +{ + int i = 0, lane_count; + int bist_status_arr_size = + csiphy_dev->ctrl_reg->csiphy_bist_reg->number_of_counters; + uint32_t base_offset = 0; + void __iomem *phy_base = NULL; + uint32_t val = 0; + uint32_t offset_betwn_lane = 0; + struct bist_reg_settings_t *bist_reg = NULL; + + phy_base = csiphy_dev->soc_info.reg_map[0].mem_base; + bist_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg; + offset_betwn_lane = + csiphy_dev->ctrl_reg->csiphy_reg->size_offset_betn_lanes; + + for (i = 0; i < bist_status_arr_size; i++) { + base_offset = bist_reg->bist_counter_3ph_base_offset + (0x4 * i); + val = 0; + for (lane_count = 0; lane_count < CAM_CSIPHY_MAX_CPHY_LANES; lane_count++) { + CAM_DBG(CAM_CSIPHY, "value to be read from addr: 0x%x is 0x%x", + base_offset + (lane_count * offset_betwn_lane), + (cam_io_r(phy_base + base_offset + (lane_count * offset_betwn_lane)))); + val |= ((cam_io_r(phy_base + base_offset + (lane_count * offset_betwn_lane)))); + } + + *counter |= (val << (i * 8)); + CAM_DBG(CAM_CSIPHY, "COUNTER VALUE is 0x%x", *counter); + } + + return; +} + +static void __cam_csiphy_get_3phase_pattern_status( + struct csiphy_device *csiphy_dev) +{ + int i = 0; + int bist_status_arr_size = + csiphy_dev->ctrl_reg->csiphy_bist_reg->num_status_reg; + struct csiphy_reg_t *csiphy_common_reg = NULL; + void __iomem *csiphybase = NULL; + uint32_t base_offset = 0; + uint32_t lane_offset = 0; + uint32_t status1 = 0, status0 = 0; + uint32_t counter = 0; + uint32_t offset_betwn_lane = 0; + struct bist_reg_settings_t *bist_reg = NULL; + + CAM_DBG(CAM_CSIPHY, "ENTER"); + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + bist_reg = csiphy_dev->ctrl_reg->csiphy_bist_reg; + base_offset = bist_reg->bist_sensor_data_3ph_status_base_offset; + offset_betwn_lane = + csiphy_dev->ctrl_reg->csiphy_reg->size_offset_betn_lanes; + + /* This loop is to read every lane status value + * in case if loop breaks with only last lane. + */ + for (i = 0; i < bist_status_arr_size; i++) { + csiphy_common_reg = &bist_reg->bist_status_arr[i]; + switch (csiphy_common_reg->csiphy_param_type) { + case CSIPHY_3PH_REGS: + status1 |= cam_io_r(csiphybase + csiphy_common_reg->reg_addr); + break; + } + } + + /* Read Status0 value to detect sensor related communication */ + for (i = 0; i < CAM_CSIPHY_MAX_CPHY_LANES; i++) { + lane_offset = base_offset + (i * offset_betwn_lane); + status0 |= cam_io_r(csiphybase + lane_offset); + } + + + /* Read the Counter value for possible corrupted headers */ + __cam_csiphy_read_3phase_bist_counter_status(csiphy_dev, &counter); + + if ((status1 & PREAMBLE_PATTERN_BIST_DONE) && + (status1 & bist_reg->error_status_val_3ph)) { + /** + * This condition happen when CSIPHY try to read status after sensor + * streamoff. In this case error bit is set due to postamble detection + * which is bit(4). In this scenraio this is not consider as an error. + * We need to check for status2/3 counter value to determine if there are + * more header that is corrupted than 2. Counter always shows value of 2 + * with postamble packet. + * + */ + if (counter <= PREAMBLE_MAX_ERR_COUNT_ALLOWED) { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern rxced succesfully after sensor streamoff:: counter value: 0x%x Status1: 0x%x Status0: 0x%x", + counter, status1, status0); + } else { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern is corrupted:: counter value: 0x%x Status1: 0x%x Status0: 0x%x", + counter, status1, status0); + } + } else if ((status1 & PREAMBLE_PATTERN_BIST_DONE) && + !(status1 & bist_reg->error_status_val_3ph)) { + /** + * This condition happen when CSIPHY try to read status with some counter + * value is set to check against. In this case error bit is not expected + * to be set. + */ + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern rxced succesfully before sensor streamoff:: counter value: 0x%x Status1: 0x%x Status0: 0x%x", + counter, status1, status0); + } else { + CAM_INFO(CAM_CSIPHY, + "PN9 Pattern is corrupted:: counter value: 0x%x Status1: 0x%x Status0: 0x%x", + counter, status1, status0); + } + + return; +} + +static void __cam_csiphy_3ph_status_checker_ops( + struct csiphy_device *csiphy_dev, bool set) +{ + int i = 0; + void __iomem *csiphybase = NULL; + uint32_t base_offset = 0; + uint32_t read_back_value; + uint32_t lane_offset = 0; + uint32_t offset_betwn_lane = 0; + + csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base; + base_offset = + csiphy_dev->ctrl_reg->csiphy_bist_reg->set_status_update_3ph_base_offset; + offset_betwn_lane = + csiphy_dev->ctrl_reg->csiphy_reg->size_offset_betn_lanes; + /* Set checker bit to read the correct status1 value */ + for (i = 0; i < CAM_CSIPHY_MAX_CPHY_LANES; i++) { + lane_offset = base_offset + (i * offset_betwn_lane); + read_back_value = cam_io_r(csiphybase + lane_offset); + set ? (read_back_value |= PREAMBLE_PATTERN_SET_CHECKER) : + (read_back_value &= ~PREAMBLE_PATTERN_SET_CHECKER); + cam_io_w_mb(read_back_value, csiphybase + lane_offset); + } +} + +static void __cam_csiphy_get_preamble_status( + struct csiphy_device *csiphy_dev, int offset) +{ + bool is_3phase = false; + + is_3phase = csiphy_dev->csiphy_info[offset].csiphy_3phase; + + if (is_3phase) { + __cam_csiphy_3ph_status_checker_ops(csiphy_dev, true); + __cam_csiphy_get_3phase_pattern_status(csiphy_dev); + __cam_csiphy_3ph_status_checker_ops(csiphy_dev, false); + } else { + __cam_csiphy_2ph_status_checker_ops(csiphy_dev, true); + __cam_csiphy_get_2phase_pattern_status(csiphy_dev); + __cam_csiphy_2ph_status_checker_ops(csiphy_dev, false); + } + return; +} + +int32_t cam_csiphy_core_cfg(void *phy_dev, + void *arg) +{ + struct cam_control *cmd = (struct cam_control *)arg; + struct csiphy_device *csiphy_dev = (struct csiphy_device *)phy_dev; + struct cam_cphy_dphy_status_reg_params_t *status_reg_ptr; + struct csiphy_reg_parms_t *csiphy_reg; + struct cam_hw_soc_info *soc_info; + uint32_t cphy_trio_status; + uint32_t refgen_status = 0; + void __iomem *csiphybase; + int32_t rc = 0; + uint32_t i; + + if (!csiphy_dev || !cmd) { + CAM_ERR(CAM_CSIPHY, "Invalid input args"); + return -EINVAL; + } + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_CSIPHY, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + + soc_info = &csiphy_dev->soc_info; + if (!soc_info) { + CAM_ERR(CAM_CSIPHY, "Null Soc_info"); + return -EINVAL; + } + + if (!g_phy_data[soc_info->index].base_address) { + CAM_ERR(CAM_CSIPHY, "CSIPHY hw is not avaialble at index: %d", + soc_info->index); + return -EINVAL; + } + + csiphybase = soc_info->reg_map[0].mem_base; + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + status_reg_ptr = csiphy_reg->status_reg_params; + + if (!status_reg_ptr) { + CAM_ERR(CAM_CSIPHY, "CSIPHY %d status reg is NULL: %s", + soc_info->index, CAM_IS_NULL_TO_STR(status_reg_ptr)); + return -EINVAL; + } + + CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code); + mutex_lock(&csiphy_dev->mutex); + switch (cmd->op_code) { + case CAM_ACQUIRE_DEV: { + struct cam_sensor_acquire_dev csiphy_acq_dev; + struct cam_csiphy_acquire_dev_info csiphy_acq_params; + int index; + struct cam_create_dev_hdl bridge_params; + + CAM_DBG(CAM_CSIPHY, "ACQUIRE_CNT: %d COMBO_MODE: %d", + csiphy_dev->acquire_count, + csiphy_dev->combo_mode); + if ((csiphy_dev->csiphy_state == CAM_CSIPHY_START) && + (csiphy_dev->combo_mode == 0) && + (csiphy_dev->acquire_count > 0)) { + CAM_ERR(CAM_CSIPHY, + "NonComboMode does not support multiple acquire: Acquire_count: %d", + csiphy_dev->acquire_count); + rc = -EINVAL; + goto release_mutex; + } + + if ((csiphy_dev->acquire_count) && + (csiphy_dev->acquire_count >= + csiphy_dev->session_max_device_support)) { + CAM_ERR(CAM_CSIPHY, + "Max acquires are allowed in combo mode: %d", + csiphy_dev->session_max_device_support); + rc = -EINVAL; + goto release_mutex; + } + + rc = copy_from_user(&csiphy_acq_dev, + u64_to_user_ptr(cmd->handle), + sizeof(csiphy_acq_dev)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + goto release_mutex; + } + + csiphy_acq_params.combo_mode = 0; + + if (copy_from_user(&csiphy_acq_params, + u64_to_user_ptr(csiphy_acq_dev.info_handle), + sizeof(csiphy_acq_params))) { + CAM_ERR(CAM_CSIPHY, + "Failed copying from User"); + goto release_mutex; + } + + if (csiphy_acq_params.combo_mode && + csiphy_acq_params.cphy_dphy_combo_mode) { + CAM_ERR(CAM_CSIPHY, + "Cannot support both Combo_mode and cphy_dphy_combo_mode"); + rc = -EINVAL; + goto release_mutex; + } + + if (csiphy_acq_params.combo_mode == 1) { + CAM_DBG(CAM_CSIPHY, "combo mode stream detected"); + csiphy_dev->combo_mode = 1; + if (csiphy_acq_params.csiphy_3phase) { + CAM_DBG(CAM_CSIPHY, "3Phase ComboMode"); + csiphy_dev->session_max_device_support = + CSIPHY_MAX_INSTANCES_PER_PHY; + } else { + csiphy_dev->session_max_device_support = + CSIPHY_MAX_INSTANCES_PER_PHY - 1; + CAM_DBG(CAM_CSIPHY, "2Phase ComboMode"); + } + } + if (csiphy_acq_params.cphy_dphy_combo_mode == 1) { + CAM_DBG(CAM_CSIPHY, + "cphy_dphy_combo_mode stream detected"); + csiphy_dev->cphy_dphy_combo_mode = 1; + csiphy_dev->session_max_device_support = + CSIPHY_MAX_INSTANCES_PER_PHY - 1; + } + + if (!csiphy_acq_params.combo_mode && + !csiphy_acq_params.cphy_dphy_combo_mode) { + CAM_DBG(CAM_CSIPHY, "Non Combo Mode stream"); + csiphy_dev->session_max_device_support = 1; + } + + bridge_params.ops = NULL; + bridge_params.session_hdl = csiphy_acq_dev.session_handle; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = csiphy_dev; + bridge_params.dev_id = CAM_CSIPHY; + index = csiphy_dev->acquire_count; + csiphy_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (csiphy_acq_dev.device_handle <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_CSIPHY, "Can not create device handle"); + goto release_mutex; + } + + csiphy_dev->csiphy_info[index].hdl_data.device_hdl = + csiphy_acq_dev.device_handle; + csiphy_dev->csiphy_info[index].hdl_data.session_hdl = + csiphy_acq_dev.session_handle; + csiphy_dev->csiphy_info[index].csiphy_3phase = + csiphy_acq_params.csiphy_3phase; + csiphy_dev->csiphy_info[index].conn_csid_idx = -1; + csiphy_dev->csiphy_info[index].use_hw_client_voting = false; + csiphy_dev->csiphy_info[index].is_drv_config_en = false; + + CAM_DBG(CAM_CSIPHY, "Add dev_handle:0x%x at index: %d ", + csiphy_dev->csiphy_info[index].hdl_data.device_hdl, + index); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &csiphy_acq_dev, + sizeof(struct cam_sensor_acquire_dev))) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + rc = -EINVAL; + goto release_mutex; + } + + if (!csiphy_dev->acquire_count) { + g_phy_data[soc_info->index].is_3phase = csiphy_acq_params.csiphy_3phase; + CAM_DBG(CAM_CSIPHY, + "g_csiphy data is updated for index: %d is_3phase: %u", + soc_info->index, + g_phy_data[soc_info->index].is_3phase); + memset(&csiphy_dev->qmargin_data, 0, + sizeof(struct csiphy_qmargin_sweep_data)); + } + + if (g_phy_data[soc_info->index].aon_cam_id != NOT_AON_CAM) { + rc = cam_csiphy_util_update_aon_ops(true, soc_info->index); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Error in setting up AON operation for phy_idx: %d, rc: %d", + soc_info->index, rc); + goto release_mutex; + } + } + + csiphy_dev->acquire_count++; + + if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT) + csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE; + + CAM_INFO(CAM_CSIPHY, + "CAM_ACQUIRE_DEV: %u Type: %s acquire_count: %d combo: %u cphy+dphy combo: %u", + soc_info->index, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY", + csiphy_dev->acquire_count, + csiphy_dev->combo_mode, + csiphy_dev->cphy_dphy_combo_mode); + } + break; + case CAM_QUERY_CAP: { + struct cam_csiphy_query_cap csiphy_cap = {0}; + + cam_csiphy_query_cap(csiphy_dev, &csiphy_cap); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &csiphy_cap, sizeof(struct cam_csiphy_query_cap))) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + rc = -EINVAL; + goto release_mutex; + } + } + break; + case CAM_STOP_DEV: { + int32_t offset, rc = 0; + struct cam_start_stop_dev_cmd config; + struct cam_csiphy_param *param; + uint16_t settle_cnt = 0; + + rc = copy_from_user(&config, (void __user *)cmd->handle, + sizeof(config)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + goto release_mutex; + } + + if (csiphy_dev->csiphy_state != CAM_CSIPHY_START) { + CAM_ERR(CAM_CSIPHY, "Csiphy:%d Not in right state to stop : %d", + soc_info->index, csiphy_dev->csiphy_state); + goto release_mutex; + } + + offset = cam_csiphy_get_instance_offset(csiphy_dev, + config.dev_handle); + if (offset < 0 || + offset >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "Index is invalid: %d", offset); + goto release_mutex; + } + + param = &csiphy_dev->csiphy_info[offset]; + + cam_csiphy_get_settle_count(csiphy_dev, offset, &settle_cnt); + + if (strlen(csiphy_qmargin)) + cam_csiphy_common_status_reg_dump(csiphy_dev, false); + + if (--csiphy_dev->start_dev_count) { + if (param->secure_mode) + cam_csiphy_program_secure_mode(csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, offset, false); + + param->secure_mode = CAM_SECURE_MODE_NON_SECURE; + param->csiphy_cpas_cp_reg_mask = 0; + + cam_csiphy_update_lane_selection(csiphy_dev, offset, false); + + if (soc_info->is_clk_drv_en && param->use_hw_client_voting) { + rc = cam_soc_util_set_src_clk_rate(soc_info, param->conn_csid_idx, + 0, 0); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "[%d] Failed in setting clk rate for %d", + soc_info->index, param->conn_csid_idx); + } else { + rc = cam_soc_util_cesta_channel_switch(param->conn_csid_idx, + "csiphy_stop_dev"); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for cesta client:%d rc:%d", + param->conn_csid_idx, rc); + } + } + } + + CAM_INFO(CAM_CSIPHY, + "CAM_STOP_PHYDEV: %d, CSID:%d, Type: %s, dev_cnt: %u, slot: %d, Datarate: %llu, Settlecount: %u", + soc_info->index, param->conn_csid_idx, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY", + csiphy_dev->start_dev_count, offset, param->data_rate, + settle_cnt); + + goto release_mutex; + } + + if (param->secure_mode) + cam_csiphy_program_secure_mode(csiphy_dev, CAM_SECURE_MODE_NON_SECURE, + offset, false); + + param->secure_mode = CAM_SECURE_MODE_NON_SECURE; + param->csiphy_cpas_cp_reg_mask = 0x0; + + if (csiphy_dev->prgm_cmn_reg_across_csiphy) { + mutex_lock(&active_csiphy_cnt_mutex); + active_csiphy_hw_cnt--; + mutex_unlock(&active_csiphy_cnt_mutex); + + cam_csiphy_program_common_registers(csiphy_dev, true, + CAM_CSIPHY_PRGM_ALL); + } else { + cam_csiphy_program_common_registers(csiphy_dev, true, + CAM_CSIPHY_PRGM_INDVDL); + } + + if (csiphy_dev->preamble_enable) + __cam_csiphy_get_preamble_status(csiphy_dev, offset); + + cam_csiphy_update_lane_selection(csiphy_dev, offset, false); + + rc = cam_csiphy_disable_hw(csiphy_dev, offset); + if (rc < 0) + CAM_ERR(CAM_CSIPHY, "Failed in csiphy release"); + + if (!g_phy_data[soc_info->index].is_configured_for_main) { + if (cam_csiphy_cpas_ops(csiphy_dev->cpas_handle, false)) { + CAM_ERR(CAM_CSIPHY, "Failed in de-voting CPAS"); + rc = -EFAULT; + } + } + + csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE; + + CAM_INFO(CAM_CSIPHY, + "CAM_STOP_PHYDEV: %u, CSID:%d, Type: %s, slot: %d, Datarate: %llu, Settlecount: %u", + soc_info->index, param->conn_csid_idx, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY", + offset, param->data_rate, settle_cnt); + } + break; + case CAM_RELEASE_DEV: { + int32_t offset; + struct cam_release_dev_cmd release; + + if (!csiphy_dev->acquire_count) { + CAM_ERR(CAM_CSIPHY, "No valid devices to release"); + rc = -EINVAL; + goto release_mutex; + } + + if (copy_from_user(&release, + u64_to_user_ptr(cmd->handle), + sizeof(release))) { + rc = -EFAULT; + goto release_mutex; + } + + offset = cam_csiphy_get_instance_offset(csiphy_dev, + release.dev_handle); + if (offset < 0 || + offset >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "index is invalid: %d", offset); + goto release_mutex; + } + + if (csiphy_dev->csiphy_info[offset].secure_mode) + cam_csiphy_program_secure_mode( + csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, offset, false); + + csiphy_dev->csiphy_info[offset].secure_mode = + CAM_SECURE_MODE_NON_SECURE; + + csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0; + csiphy_dev->preamble_enable = 0; + + rc = cam_destroy_device_hdl(release.dev_handle); + if (rc < 0) + CAM_ERR(CAM_CSIPHY, "destroying the device hdl"); + csiphy_dev->csiphy_info[offset].hdl_data.device_hdl = -1; + csiphy_dev->csiphy_info[offset].hdl_data.session_hdl = -1; + + cam_csiphy_reset_phyconfig_param(csiphy_dev, offset); + + if (csiphy_dev->acquire_count) { + csiphy_dev->acquire_count--; + CAM_DBG(CAM_CSIPHY, "Acquire_cnt: %d", + csiphy_dev->acquire_count); + } + + if (csiphy_dev->acquire_count == 0) { + CAM_DBG(CAM_CSIPHY, "All PHY devices released"); + if (g_phy_data[soc_info->index].aon_cam_id != NOT_AON_CAM) { + rc = cam_csiphy_util_update_aon_ops(false, soc_info->index); + if (rc) { + CAM_WARN(CAM_CSIPHY, + "Error in releasing AON operation for phy_idx: %d, rc: %d", + csiphy_dev->soc_info.index, rc); + rc = 0; + } + } + csiphy_dev->combo_mode = 0; + csiphy_dev->csiphy_state = CAM_CSIPHY_INIT; + } + + if (csiphy_dev->cdr_params.cdr_sweep_enabled) + memset(&csiphy_dev->cdr_params, 0x0, + sizeof(struct cam_csiphy_dev_cdr_sweep_params)); + + CAM_DBG(CAM_CSIPHY, "CAM_RELEASE_PHYDEV: %u Type: %s", + soc_info->index, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY"); + + if (csiphy_dev->acquire_count < csiphy_dev->start_dev_count) { + rc = -EINVAL; + CAM_ERR(CAM_CSIPHY, + "PHYDEV %u streamon count:%u bigger than acquire count:%u, missing stream offs", + soc_info->index, csiphy_dev->start_dev_count, + csiphy_dev->acquire_count); + } + + break; + } + case CAM_CONFIG_DEV: { + struct cam_config_dev_cmd config; + + CAM_DBG(CAM_CSIPHY, "CONFIG_DEV Called"); + + if (copy_from_user(&config, u64_to_user_ptr(cmd->handle), sizeof(config))) { + CAM_ERR(CAM_CSIPHY, "Couldn't copy the Entire Config From User"); + rc = -EFAULT; + } else { + rc = cam_cmd_buf_parser(csiphy_dev, &config); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser"); + goto release_mutex; + } + } + break; + } + case CAM_START_DEV: { + struct cam_csiphy_param *param; + struct cam_start_stop_dev_cmd config; + int32_t offset; + int clk_vote_level_high = -1; + int clk_vote_level_low = -1; + uint8_t data_rate_variant_idx = 0; + uint16_t settle_cnt = 0; + + CAM_DBG(CAM_CSIPHY, "START_DEV Called"); + rc = copy_from_user(&config, (void __user *)cmd->handle, + sizeof(config)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + goto release_mutex; + } + + if ((csiphy_dev->csiphy_state == CAM_CSIPHY_START) && + (csiphy_dev->start_dev_count > + csiphy_dev->session_max_device_support)) { + CAM_ERR(CAM_CSIPHY, + "Invalid start count: %d, Max supported devices: %u", + csiphy_dev->start_dev_count, + csiphy_dev->session_max_device_support); + rc = -EINVAL; + goto release_mutex; + } + + offset = cam_csiphy_get_instance_offset(csiphy_dev, + config.dev_handle); + if (offset < 0 || + offset >= csiphy_dev->session_max_device_support) { + CAM_ERR(CAM_CSIPHY, "index is invalid: %d", offset); + goto release_mutex; + } + + param = &csiphy_dev->csiphy_info[offset]; + + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed to query DRV enable rc: %d", rc); + goto release_mutex; + } + + cam_csiphy_get_settle_count(csiphy_dev, offset, &settle_cnt); + + if (csiphy_dev->start_dev_count) { + clk_vote_level_high = + csiphy_dev->ctrl_reg->getclockvoting(csiphy_dev, offset); + clk_vote_level_low = clk_vote_level_high; + + CAM_DBG(CAM_CSIPHY, + "CSIPHY[%d] is_clk_drv_en[%d] conn_csid_idx[%d] use_hw_client_voting[%d] is_drv_config_en[%d]", + csiphy_dev->soc_info.index, soc_info->is_clk_drv_en, + param->conn_csid_idx, param->use_hw_client_voting, + param->is_drv_config_en); + + if (soc_info->is_clk_drv_en && param->use_hw_client_voting) { + if (param->is_drv_config_en) + clk_vote_level_low = soc_info->lowest_clk_level; + + rc = cam_soc_util_set_clk_rate_level(&csiphy_dev->soc_info, + param->conn_csid_idx, clk_vote_level_high, + clk_vote_level_low, false); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to set the req clk_rate level[high low]: [%s %s] cesta_client_idx: %d rc: %d", + cam_soc_util_get_string_from_level( + clk_vote_level_high), + cam_soc_util_get_string_from_level( + clk_vote_level_low), param->conn_csid_idx, rc); + rc = -EINVAL; + goto release_mutex; + } + + rc = cam_soc_util_cesta_channel_switch(param->conn_csid_idx, + "csiphy_combo"); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for crm client:%d rc:%d", + param->conn_csid_idx, rc); + rc = 0; + } + } else { + rc = cam_soc_util_set_clk_rate_level(&csiphy_dev->soc_info, + CAM_CLK_SW_CLIENT_IDX, clk_vote_level_high, 0, false); + if (rc) { + CAM_WARN(CAM_CSIPHY, + "Failed to set the req clk_rate level: %s", + cam_soc_util_get_string_from_level( + clk_vote_level_high)); + rc = -EINVAL; + goto release_mutex; + + } + } + + if (csiphy_dev->csiphy_info[offset].secure_mode == 1) { + if (!cam_cpas_is_feature_supported( + CAM_CPAS_SECURE_CAMERA_ENABLE, + CAM_CPAS_HW_IDX_ANY, NULL)) { + CAM_ERR(CAM_CSIPHY, + "sec_cam: camera fuse bit not set"); + goto release_mutex; + } + + rc = cam_csiphy_program_secure_mode(csiphy_dev, + CAM_SECURE_MODE_SECURE, offset, false); + if (rc) { + csiphy_dev->csiphy_info[offset] + .secure_mode = + CAM_SECURE_MODE_NON_SECURE; + CAM_ERR(CAM_CSIPHY, + "sec_cam: notify failed: rc: %d", + rc); + goto release_mutex; + } + } + + if (csiphy_dev->csiphy_info[offset].csiphy_3phase) { + rc = cam_csiphy_cphy_data_rate_config( + csiphy_dev, offset, data_rate_variant_idx); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Data rate specific configuration failed rc: %d", + rc); + goto release_mutex; + } + } + + rc = cam_csiphy_update_lane_selection(csiphy_dev, offset, true); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Update enable lane failed, rc: %d", rc); + goto release_mutex; + } + + if (csiphy_dev->en_full_phy_reg_dump) + cam_csiphy_reg_dump(&csiphy_dev->soc_info); + + csiphy_dev->start_dev_count++; + + CAM_INFO(CAM_CSIPHY, + "CAM_START_PHYDEV: %d, CSID:%d, Type: %s, dev_cnt: %u, slot: %d, combo: %u, cphy+dphy: %u, skew_en: %d, sec_mode: %d, Datarate: %llu, Settlecount: %u", + soc_info->index, + csiphy_dev->csiphy_info[offset].conn_csid_idx, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY", + csiphy_dev->start_dev_count, + offset, + csiphy_dev->combo_mode, + csiphy_dev->cphy_dphy_combo_mode, + csiphy_dev->csiphy_info[offset].mipi_flags, + csiphy_dev->csiphy_info[offset].secure_mode, + csiphy_dev->csiphy_info[offset].data_rate, + settle_cnt); + + goto release_mutex; + } + + if (!g_phy_data[soc_info->index].is_configured_for_main) { + if (cam_csiphy_cpas_ops(csiphy_dev->cpas_handle, true)) { + rc = -EFAULT; + CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc); + goto release_mutex; + } + } + + if (csiphy_dev->csiphy_info[offset].secure_mode == 1) { + if (!cam_cpas_is_feature_supported( + CAM_CPAS_SECURE_CAMERA_ENABLE, + CAM_CPAS_HW_IDX_ANY, NULL)) { + CAM_ERR(CAM_CSIPHY, + "sec_cam: camera fuse bit not set"); + rc = -EINVAL; + goto cpas_stop; + } + + rc = cam_csiphy_program_secure_mode( + csiphy_dev, + CAM_SECURE_MODE_SECURE, offset, false); + if (rc) { + csiphy_dev->csiphy_info[offset].secure_mode = + CAM_SECURE_MODE_NON_SECURE; + goto cpas_stop; + } + } + + rc = cam_csiphy_enable_hw(csiphy_dev, offset); + if (rc != 0) { + CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed"); + goto cpas_stop; + } + + if (cam_common_read_poll_timeout(csiphybase + + status_reg_ptr->refgen_status_offset, + CSIPHY_POLL_DELAY_US, CSIPHY_POLL_TIMEOUT_US, + CSPIHY_REFGEN_STATUS_BIT, CSPIHY_REFGEN_STATUS_BIT, &refgen_status)) { + CAM_ERR(CAM_CSIPHY, "Response poll timed out: status=0x%08x", + refgen_status); + rc = -ETIMEDOUT; + goto cpas_stop; + } + + rc = cam_csiphy_update_lane_selection(csiphy_dev, offset, true); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Update enable lane failed, rc: %d", rc); + goto cpas_stop; + } + + if (csiphy_dev->prgm_cmn_reg_across_csiphy) { + cam_csiphy_program_common_registers(csiphy_dev, false, CAM_CSIPHY_PRGM_ALL); + mutex_lock(&active_csiphy_cnt_mutex); + active_csiphy_hw_cnt++; + mutex_unlock(&active_csiphy_cnt_mutex); + } else { + cam_csiphy_program_common_registers(csiphy_dev, false, + CAM_CSIPHY_PRGM_INDVDL); + } + + rc = cam_csiphy_config_dev(csiphy_dev, config.dev_handle, data_rate_variant_idx); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed"); + cam_csiphy_disable_hw(csiphy_dev, offset); + goto hw_cnt_decrement; + } + + if (csiphy_onthego_reg_count[soc_info->index]) + cam_csiphy_apply_onthego_reg_values(csiphybase, soc_info->index); + + cam_csiphy_release_from_reset_state(csiphy_dev, csiphybase, offset); + + if (g_phy_data[soc_info->index].is_3phase) { + for (i = 0; i < CAM_CSIPHY_MAX_CPHY_LANES; i++) { + if (status_reg_ptr->cphy_lane_status[i]) { + cphy_trio_status = cam_io_r_mb(csiphybase + + status_reg_ptr->cphy_lane_status[i]); + + cphy_trio_status &= 0x1F; + if (cphy_trio_status == 0 || cphy_trio_status == 8) { + CAM_DBG(CAM_CSIPHY, + "Reg_offset: 0x%x, cphy_trio%d_status = 0x%x", + status_reg_ptr->cphy_lane_status[i], + i, cphy_trio_status); + } else { + CAM_WARN(CAM_CSIPHY, + "Reg_offset: 0x%x, Cphy_trio%d_status = 0x%x", + status_reg_ptr->cphy_lane_status[i], + i, cphy_trio_status); + } + } + } + } + + if (csiphy_dev->en_full_phy_reg_dump) + cam_csiphy_reg_dump(&csiphy_dev->soc_info); + + if (csiphy_dev->en_lane_status_reg_dump) { + usleep_range(50000, 50005); + CAM_INFO(CAM_CSIPHY, "Status Reg Dump after config"); + cam_csiphy_dump_status_reg(csiphy_dev); + } + + csiphy_dev->start_dev_count++; + csiphy_dev->csiphy_state = CAM_CSIPHY_START; + + CAM_INFO(CAM_CSIPHY, + "CAM_START_PHYDEV: %d, CSID:%d, Type: %s, dev_cnt: %u, slot: %d, combo: %u, cphy+dphy: %u, skew_en: %d, sec_mode: %d, Datarate: %llu, Settlecount: %u", + soc_info->index, + csiphy_dev->csiphy_info[offset].conn_csid_idx, + g_phy_data[soc_info->index].is_3phase ? "CPHY" : "DPHY", + csiphy_dev->start_dev_count, + offset, + csiphy_dev->combo_mode, + csiphy_dev->cphy_dphy_combo_mode, + csiphy_dev->csiphy_info[offset].mipi_flags, + csiphy_dev->csiphy_info[offset].secure_mode, + csiphy_dev->csiphy_info[offset].data_rate, + settle_cnt); + } + break; + case CAM_CONFIG_DEV_EXTERNAL: { + struct cam_config_dev_cmd submit_cmd; + + if (copy_from_user(&submit_cmd, + u64_to_user_ptr(cmd->handle), + sizeof(struct cam_config_dev_cmd))) { + CAM_ERR(CAM_CSIPHY, "failed copy config ext\n"); + rc = -EFAULT; + goto release_mutex; + } else { + rc = cam_csiphy_external_cmd(csiphy_dev, &submit_cmd); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "exteranal command configuration failed rc: %d", + rc); + goto release_mutex; + } + } + break; + } + case CAM_QUERY_HW_DEV_INFO: { + void *blob_data = CAM_MEM_ZALLOC(cmd->size, GFP_KERNEL); + + if (blob_data) { + rc = copy_from_user(blob_data, u64_to_user_ptr(cmd->handle), + cmd->size); + if (rc) { + CAM_MEM_FREE(blob_data); + CAM_ERR(CAM_CSIPHY, "Failed in copy from user, rc=%d", + rc); + break; + } + + rc = cam_packet_util_process_generic_blob(cmd->size, blob_data, + cam_csiphy_generic_data_update, csiphy_dev); + if (rc) { + CAM_MEM_FREE(blob_data); + break; + } + + rc = copy_to_user(u64_to_user_ptr(cmd->handle), blob_data, + cmd->size); + if (rc) + CAM_ERR(CAM_CSIPHY, "Failed in copy to user, rc=%d", rc); + + CAM_MEM_FREE(blob_data); + } else { + rc = -ENOMEM; + CAM_ERR(CAM_CSIPHY, "memory allocation is failed rc = %d", rc); + } + break; + } + default: + CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code); + rc = -EINVAL; + goto release_mutex; + } + + mutex_unlock(&csiphy_dev->mutex); + return rc; + +hw_cnt_decrement: + if (csiphy_dev->prgm_cmn_reg_across_csiphy) { + mutex_lock(&active_csiphy_cnt_mutex); + active_csiphy_hw_cnt--; + mutex_unlock(&active_csiphy_cnt_mutex); + } + +cpas_stop: + if (cam_csiphy_cpas_ops(csiphy_dev->cpas_handle, false)) + CAM_ERR(CAM_CSIPHY, "cpas stop failed"); +release_mutex: + mutex_unlock(&csiphy_dev->mutex); + + return rc; +} + +int cam_csiphy_register_baseaddress(struct csiphy_device *csiphy_dev) +{ + int phy_idx; + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Data is NULL"); + return -EINVAL; + } + + if (csiphy_dev->soc_info.index >= MAX_CSIPHY) { + CAM_ERR(CAM_CSIPHY, "Invalid soc index: %u Max soc index: %u", + csiphy_dev->soc_info.index, MAX_CSIPHY); + return -EINVAL; + } + + phy_idx = csiphy_dev->soc_info.index; + g_phy_data[phy_idx].base_address = + csiphy_dev->soc_info.reg_map[0].mem_base; + g_phy_data[phy_idx].cpas_handle = + csiphy_dev->cpas_handle; + g_phy_data[phy_idx].aon_sel_param = + csiphy_dev->ctrl_reg->csiphy_reg->aon_sel_params; + g_phy_data[phy_idx].aon_cam_id = NOT_AON_CAM; + g_phy_data[phy_idx].is_configured_for_main = false; + g_phy_data[phy_idx].data_rate_aux_mask = 0; + g_phy_data[phy_idx].qmargin_data = &csiphy_dev->qmargin_data; + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.h new file mode 100644 index 0000000000..afa936d514 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_CORE_H_ +#define _CAM_CSIPHY_CORE_H_ + +#include +#include "cam_csiphy_dev.h" +#include +#include +#include + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API programs CSIPhy IRQ registers + */ +void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev); + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API resets CSIPhy hardware + */ +void cam_csiphy_reset(struct csiphy_device *csiphy_dev); + +/** + * @csiphy_dev: CSIPhy device structure + * @csiphybase: CSIPhy base addr + * @instance : CSIPhy instance + * + * This API releases CSIPhy from reset + */ +int cam_csiphy_release_from_reset_state(struct csiphy_device *csiphy_dev, + void __iomem *csiphybase, int32_t instance); + +/** + * @csiphy_dev: CSIPhy device structure + * @arg: Camera control command argument + * + * This API handles the camera control argument reached to CSIPhy + */ +int cam_csiphy_core_cfg(void *csiphy_dev, void *arg); + +/** + * @irq_num: IRQ number + * @data: CSIPhy device structure + * + * This API handles CSIPhy IRQs + */ +irqreturn_t cam_csiphy_irq(int irq_num, void *data); + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API handles the CSIPhy close + */ +void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev); + +/** + * @soc_idx : CSIPHY cell index + * + * This API registers base address per soc_idx + */ +int cam_csiphy_register_baseaddress(struct csiphy_device *csiphy_dev); + +/** + * @get_access : Get Access for the Main Camera over AON Camera + * @phy_idx : To acquire the correct PHY hw to do the operation with + * + * This API provides Utility/helper function to program the MUX for + * correct PHY hw. + * + */ +int cam_csiphy_util_update_aon_ops(bool get_access, uint32_t phy_idx); + + +/** + * @csiphy_dev : CSIPhy device structure + * + * This API updates the auxiliary settings mask for the current data rate + * + */ +void cam_csiphy_update_auxiliary_mask(struct csiphy_device *csiphy_dev); + +/** + * @data: Qmargin CSID register tuning feedback + * @phy_idx: PHY idx + * + */ +void cam_csiphy_update_qmargin_csid_vals(void *data, int phy_idx); + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API allows to print all the cphy/dphy specific status registers + */ +int cam_csiphy_dump_status_reg(struct csiphy_device *csiphy_dev); + +/** + * @phy_idx : To acquire the correct PHY hw to do the operation with + * @aon_camera_id : AON Camera ID + * + * This API provides Utility/helper function to register AON Hw supprot for + * correct PHY hw. + * + */ +int cam_csiphy_util_update_aon_registration(uint32_t phy_idx, uint32_t aon_camera_id); +#endif /* _CAM_CSIPHY_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c new file mode 100644 index 0000000000..0d79b01534 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c @@ -0,0 +1,749 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_csiphy_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_csiphy_soc.h" +#include "cam_csiphy_core.h" +#include +#include "camera_main.h" +#include + +#define CSIPHY_DEBUGFS_NAME_MAX_SIZE 10 +static struct dentry *root_dentry; + +static inline void cam_csiphy_trigger_reg_dump(struct csiphy_device *csiphy_dev) +{ + cam_csiphy_common_status_reg_dump(csiphy_dev, true); + + if (csiphy_dev->en_full_phy_reg_dump) + cam_csiphy_reg_dump(&csiphy_dev->soc_info); + + if (csiphy_dev->en_lane_status_reg_dump) { + CAM_INFO(CAM_CSIPHY, "Status Reg Dump on failure"); + cam_csiphy_dump_status_reg(csiphy_dev); + } +} + +static void cam_csiphy_clk_dump(struct csiphy_device *csiphy_dev) +{ + int i = 0; + struct cam_hw_soc_info *soc_info; + + soc_info = &csiphy_dev->soc_info; + + if (csiphy_dev->curr_clk_vote_level >= CAM_MAX_VOTE) + return; + + for (i = 0; i < soc_info->num_clk; i++) { + CAM_INFO(CAM_CSIPHY, "PHY[%d] Clock Name=%s Clock Rate=%d", + soc_info->index, + soc_info->clk_name[i], + soc_info->clk_rate[csiphy_dev->curr_clk_vote_level][i]); + } +} + +static int cam_csiphy_format_secure_phy_lane_info( + struct csiphy_device *csiphy_dev, int offset, uint64_t *mask) +{ + struct cam_csiphy_param *param; + uint64_t phy_lane_sel_mask = 0; + + param = &csiphy_dev->csiphy_info[offset]; + + if (param->csiphy_3phase) { + if (param->lane_enable & CPHY_LANE_0) + phy_lane_sel_mask |= LANE_0_SEL; + if (param->lane_enable & CPHY_LANE_1) + phy_lane_sel_mask |= LANE_1_SEL; + if (param->lane_enable & CPHY_LANE_2) + phy_lane_sel_mask |= LANE_2_SEL; + phy_lane_sel_mask <<= CPHY_LANE_SELECTION_SHIFT; + } else { + if (param->lane_enable & DPHY_LANE_0) + phy_lane_sel_mask |= LANE_0_SEL; + if (param->lane_enable & DPHY_LANE_1) + phy_lane_sel_mask |= LANE_1_SEL; + if (param->lane_enable & DPHY_LANE_2) + phy_lane_sel_mask |= LANE_2_SEL; + if (param->lane_enable & DPHY_LANE_3) + phy_lane_sel_mask |= LANE_3_SEL; + phy_lane_sel_mask <<= DPHY_LANE_SELECTION_SHIFT; + } + if (csiphy_dev->soc_info.index > MAX_SUPPORTED_PHY_IDX) { + CAM_ERR(CAM_CSIPHY, "Invalid PHY index: %u", + csiphy_dev->soc_info.index); + return -EINVAL; + } + + phy_lane_sel_mask |= BIT(csiphy_dev->soc_info.index); + *mask = phy_lane_sel_mask; + + CAM_DBG(CAM_CSIPHY, "Formatted PHY[%u] phy_lane_sel_mask: 0x%llx", + csiphy_dev->soc_info.index, *mask); + + return 0; + +} + +static int cam_csiphy_get_session_index(struct csiphy_device *csiphy_dev, + uint32_t lane_assign) +{ + int i; + struct cam_csiphy_param *param; + + for (i = 0; i < CSIPHY_MAX_INSTANCES_PER_PHY; i++) { + param = &csiphy_dev->csiphy_info[i]; + + if (param->lane_assign == lane_assign) + break; + } + + return i; +} + +static void cam_csiphy_populate_secure_info( + struct csiphy_device *csiphy_dev, void *data) +{ + int i; + struct cam_csiphy_secure_info *secure_info = + (struct cam_csiphy_secure_info *)data; + struct cam_csiphy_param *param; + + for (i = 0; i < CSIPHY_MAX_INSTANCES_PER_PHY; i++) { + param = &csiphy_dev->csiphy_info[i]; + + if (param->secure_mode && + param->lane_assign == secure_info->lane_assign) { + + param->secure_info.cdm_hw_idx_mask = secure_info->cdm_hw_idx_mask; + param->secure_info.csid_hw_idx_mask = secure_info->csid_hw_idx_mask; + param->secure_info.vc_mask = secure_info->vc_mask; + param->secure_info.phy_lane_sel_mask = 0; + + if (!cam_csiphy_format_secure_phy_lane_info(csiphy_dev, i, + ¶m->csiphy_phy_lane_sel_mask)) { + param->secure_info_updated = true; + + CAM_DBG(CAM_CSIPHY, + "PHY[%d] secure info, phy_lane_mask: 0x%llx, ife: 0x%x, cdm: 0x%x, vc_mask: 0x%llx", + csiphy_dev->soc_info.index, + param->csiphy_phy_lane_sel_mask, + param->secure_info.csid_hw_idx_mask, + param->secure_info.cdm_hw_idx_mask, + param->secure_info.vc_mask); + } else + CAM_ERR(CAM_CSIPHY, + "Error in formatting PHY[%u] phy_lane_sel_mask: 0x%llx", + csiphy_dev->soc_info.index, + param->csiphy_phy_lane_sel_mask); + + break; + } + } + + if (i == CSIPHY_MAX_INSTANCES_PER_PHY) + CAM_ERR(CAM_CSIPHY, "No matching secure PHY for a session"); + +} + +static inline int cam_csiphy_drv_ops_util(bool halt, + struct csiphy_device *csiphy_dev, struct cam_csiphy_param *param, + struct cam_subdev_msg_phy_halt_resume_info *halt_resume_info) +{ + int rc = 0, drv_idx = param->conn_csid_idx; + unsigned long clk_rate; + + if (!csiphy_dev->soc_info.is_clk_drv_en || !param->use_hw_client_voting || + !param->is_drv_config_en) + return rc; + + CAM_DBG(CAM_CSIPHY, + "PHY: %d, CSID: %d DRV info : use hw client %d, enable drv config %d, op=%s", + csiphy_dev->soc_info.index, param->conn_csid_idx, + param->use_hw_client_voting, param->is_drv_config_en, + (halt_resume_info->csid_state == CAM_SUBDEV_PHY_CSID_HALT) ? "HALT" : "RESUME"); + + if (halt) { + clk_rate = csiphy_dev->soc_info.applied_src_clk_rates.hw_client[drv_idx].high; + rc = cam_soc_util_set_src_clk_rate(&csiphy_dev->soc_info, + CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); + if (rc) + CAM_ERR(CAM_CSIPHY, + "PHY[%d] CSID HALT: csiphy set_rate %ld failed rc: %d", + csiphy_dev->soc_info.index, clk_rate, rc); + } else { + int32_t src_idx = csiphy_dev->soc_info.src_clk_idx; + uint32_t lowest_clk_level = csiphy_dev->soc_info.lowest_clk_level; + + clk_rate = csiphy_dev->soc_info.clk_rate[lowest_clk_level][src_idx]; + rc = cam_soc_util_set_src_clk_rate(&csiphy_dev->soc_info, + CAM_CLK_SW_CLIENT_IDX, clk_rate, 0); + if (rc) + CAM_ERR(CAM_CSIPHY, + "PHY[%d] CSID RESUME: csiphy _set_rate %ld failed rc: %d", + csiphy_dev->soc_info.index, clk_rate, rc); + } + + return rc; +} + +static void cam_csiphy_subdev_handle_message(struct v4l2_subdev *sd, + enum cam_subdev_message_type_t message_type, void *data) +{ + struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd); + uint32_t phy_idx; + int rc = 0; + + if (!data) { + CAM_ERR(CAM_CSIPHY, "Empty Payload"); + return; + } + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL"); + return; + } + + phy_idx = *(uint32_t *)data; + if (phy_idx != csiphy_dev->soc_info.index) { + CAM_DBG(CAM_CSIPHY, "Current HW IDX: %u, Expected IDX: %u", + csiphy_dev->soc_info.index, phy_idx); + return; + } + + switch (message_type) { + case CAM_SUBDEV_MESSAGE_REG_DUMP: { + cam_csiphy_trigger_reg_dump(csiphy_dev); + cam_csiphy_clk_dump(csiphy_dev); + + break; + } + case CAM_SUBDEV_MESSAGE_APPLY_CSIPHY_AUX: { + cam_csiphy_trigger_reg_dump(csiphy_dev); + cam_csiphy_clk_dump(csiphy_dev); + + if (!csiphy_dev->skip_aux_settings) { + cam_csiphy_update_auxiliary_mask(csiphy_dev); + + CAM_INFO(CAM_CSIPHY, + "CSIPHY[%u] updating aux settings for data rate idx: %u", + csiphy_dev->soc_info.index, csiphy_dev->curr_data_rate_idx); + } + break; + } + case CAM_SUBDEV_MESSAGE_DOMAIN_ID_SECURE_PARAMS: { + cam_csiphy_populate_secure_info(csiphy_dev, data); + + break; + } + case CAM_SUBDEV_MESSAGE_CONN_CSID_INFO: { + struct cam_subdev_msg_phy_conn_csid_info *conn_csid_info = + (struct cam_subdev_msg_phy_conn_csid_info *) data; + int idx; + struct cam_csiphy_param *param; + + idx = cam_csiphy_get_session_index(csiphy_dev, conn_csid_info->lane_cfg); + if (idx >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Phy session not found %d %d", + csiphy_dev->soc_info.index, conn_csid_info->lane_cfg); + break; + } + + param = &csiphy_dev->csiphy_info[idx]; + param->conn_csid_idx = conn_csid_info->core_idx; + + CAM_DBG(CAM_CSIPHY, "PHY: %d, CSID: %d connected", csiphy_dev->soc_info.index, + param->conn_csid_idx); + break; + } + case CAM_SUBDEV_MESSAGE_DRV_INFO: { + struct cam_subdev_msg_phy_drv_info *drv_info = + (struct cam_subdev_msg_phy_drv_info *) data; + int idx; + struct cam_csiphy_param *param; + + idx = cam_csiphy_get_session_index(csiphy_dev, drv_info->lane_cfg); + if (idx >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Phy session not found %d %d", + csiphy_dev->soc_info.index, drv_info->lane_cfg); + break; + } + + param = &csiphy_dev->csiphy_info[idx]; + param->is_drv_config_en = drv_info->is_drv_config_en; + param->use_hw_client_voting = drv_info->use_hw_client_voting; + + CAM_DBG(CAM_CSIPHY, + "PHY: %d, CSID: %d DRV info : use hw client %d, enable drv config %d", + csiphy_dev->soc_info.index, param->conn_csid_idx, + param->use_hw_client_voting, param->is_drv_config_en); + + break; + } + case CAM_SUBDEV_MESSAGE_NOTIFY_HALT_RESUME: { + struct cam_subdev_msg_phy_halt_resume_info *halt_resume_info = + (struct cam_subdev_msg_phy_halt_resume_info *) data; + int idx; + struct cam_csiphy_param *param; + struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info; + + idx = cam_csiphy_get_session_index(csiphy_dev, halt_resume_info->lane_cfg); + if (idx >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Phy session not found %d %d", + csiphy_dev->soc_info.index, halt_resume_info->lane_cfg); + break; + } + + param = &csiphy_dev->csiphy_info[idx]; + if (halt_resume_info->csid_state == CAM_SUBDEV_PHY_CSID_HALT) { + if (halt_resume_info->reset_resume_phy) + cam_csiphy_reset(csiphy_dev); + + if (halt_resume_info->do_drv_ops) + rc = cam_csiphy_drv_ops_util(true, csiphy_dev, param, + halt_resume_info); + } else if (halt_resume_info->csid_state == CAM_SUBDEV_PHY_CSID_RESUME) { + if (halt_resume_info->reset_resume_phy) + rc = cam_csiphy_release_from_reset_state(csiphy_dev, + soc_info->reg_map[0].mem_base, idx); + + if (halt_resume_info->do_drv_ops) + rc = cam_csiphy_drv_ops_util(false, csiphy_dev, param, + halt_resume_info); + } else { + CAM_ERR(CAM_CSIPHY, + "CSIPHY:%d Failed to handle CSID halt resume csid_state: %d", + phy_idx, halt_resume_info->csid_state); + } + break; + } + case CAM_SUBDEV_MESSAGE_CDR_SWEEP: { + cam_csiphy_update_qmargin_csid_vals(data, phy_idx); + + break; + } + default: + break; + } +} + +static int cam_csiphy_debug_register(struct csiphy_device *csiphy_dev) +{ + struct dentry *dbgfileptr = NULL; + char debugfs_name[CSIPHY_DEBUGFS_NAME_MAX_SIZE]; + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "null CSIPHY dev ptr"); + return -EINVAL; + } + + if (!cam_debugfs_available()) + return 0; + + if (!root_dentry) { + if (cam_debugfs_create_subdir("csiphy", &dbgfileptr)) { + CAM_ERR(CAM_CSIPHY, + "Debugfs could not create directory!"); + return -ENOENT; + } + root_dentry = dbgfileptr; + } + + /* Create the CSIPHY directory for this csiphy */ + snprintf(debugfs_name, CSIPHY_DEBUGFS_NAME_MAX_SIZE, "CSIPHY%d", + csiphy_dev->soc_info.index); + dbgfileptr = debugfs_create_dir(debugfs_name, root_dentry); + if (IS_ERR(dbgfileptr)) { + CAM_ERR(CAM_CSIPHY, "Could not create a debugfs PHY indx subdirectory. rc: %ld", + dbgfileptr); + return -ENOENT; + } + + debugfs_create_bool("en_common_status_reg_dump", 0644, + dbgfileptr, &csiphy_dev->en_common_status_reg_dump); + + debugfs_create_bool("en_lane_status_reg_dump", 0644, + dbgfileptr, &csiphy_dev->en_lane_status_reg_dump); + + debugfs_create_bool("en_full_phy_reg_dump", 0644, + dbgfileptr, &csiphy_dev->en_full_phy_reg_dump); + + debugfs_create_bool("skip_aux_settings", 0644, + dbgfileptr, &csiphy_dev->skip_aux_settings); + + return 0; +} + +static void cam_csiphy_debug_unregister(void) +{ + root_dentry = NULL; +} + +static int cam_csiphy_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct csiphy_device *csiphy_dev = + v4l2_get_subdevdata(sd); + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&csiphy_dev->mutex); + cam_csiphy_shutdown(csiphy_dev); + mutex_unlock(&csiphy_dev->mutex); + + return 0; +} + +static int cam_csiphy_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_CSIPHY, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_csiphy_subdev_close_internal(sd, fh); +} + +static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd); + int rc = 0; + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL"); + return -EINVAL; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_csiphy_core_cfg(csiphy_dev, arg); + if (rc) + CAM_ERR(CAM_CSIPHY, + "Failed in configuring the device: %d", rc); + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + rc = cam_csiphy_subdev_close_internal(sd, NULL); + break; + default: + CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_csiphy_subdev_compat_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + int32_t rc = 0; + struct cam_control cmd_data; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + /* All the arguments converted to 64 bit here + * Passed to the api in core.c + */ + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data); + if (rc) + CAM_ERR(CAM_CSIPHY, + "Failed in subdev_ioctl: %d", rc); + break; + default: + CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_CSIPHY, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} +#endif + +static struct v4l2_subdev_core_ops csiphy_subdev_core_ops = { + .ioctl = cam_csiphy_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_csiphy_subdev_compat_ioctl, +#endif +}; + +static const struct v4l2_subdev_ops csiphy_subdev_ops = { + .core = &csiphy_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops csiphy_subdev_intern_ops = { + .close = cam_csiphy_subdev_close, +}; + +static int cam_csiphy_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_cpas_register_params cpas_parms; + struct csiphy_device *new_csiphy_dev; + int32_t rc = 0; + struct platform_device *pdev = to_platform_device(dev); + char wq_name[32]; + int i; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + new_csiphy_dev = devm_kzalloc(&pdev->dev, + sizeof(struct csiphy_device), GFP_KERNEL); + if (!new_csiphy_dev) + return -ENOMEM; + + mutex_init(&new_csiphy_dev->mutex); + new_csiphy_dev->v4l2_dev_str.pdev = pdev; + + new_csiphy_dev->soc_info.pdev = pdev; + new_csiphy_dev->soc_info.dev = &pdev->dev; + new_csiphy_dev->soc_info.dev_name = pdev->name; + new_csiphy_dev->ref_count = 0; + new_csiphy_dev->current_data_rate = 0; + new_csiphy_dev->curr_clk_vote_level = 0; + + rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc); + goto csiphy_no_resource; + } + + /* validate PHY fuse only for CSIPHY4 */ + if ((new_csiphy_dev->soc_info.index == 4) && + !cam_cpas_is_feature_supported( + CAM_CPAS_CSIPHY_FUSE, + (1 << new_csiphy_dev->soc_info.index), NULL)) { + CAM_ERR(CAM_CSIPHY, "PHY%d is not supported", + new_csiphy_dev->soc_info.index); + goto csiphy_no_resource; + } + + if (cam_cpas_query_domain_id_security_support()) + new_csiphy_dev->domain_id_security = true; + + new_csiphy_dev->v4l2_dev_str.internal_ops = + &csiphy_subdev_intern_ops; + new_csiphy_dev->v4l2_dev_str.ops = + &csiphy_subdev_ops; + snprintf(new_csiphy_dev->device_name, + CAM_CTX_DEV_NAME_MAX_LENGTH, + "%s%d", CAMX_CSIPHY_DEV_NAME, + new_csiphy_dev->soc_info.index); + new_csiphy_dev->v4l2_dev_str.name = + new_csiphy_dev->device_name; + new_csiphy_dev->v4l2_dev_str.sd_flags = + (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + new_csiphy_dev->v4l2_dev_str.ent_function = + CAM_CSIPHY_DEVICE_TYPE; + new_csiphy_dev->v4l2_dev_str.msg_cb = + cam_csiphy_subdev_handle_message; + new_csiphy_dev->v4l2_dev_str.token = + new_csiphy_dev; + new_csiphy_dev->v4l2_dev_str.close_seq_prior = + CAM_SD_CLOSE_MEDIUM_PRIORITY; + + rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc); + goto csiphy_no_resource; + } + + platform_set_drvdata(pdev, &(new_csiphy_dev->v4l2_dev_str.sd)); + + for (i = 0; i < CSIPHY_MAX_INSTANCES_PER_PHY; i++) { + new_csiphy_dev->csiphy_info[i].hdl_data.device_hdl = -1; + new_csiphy_dev->csiphy_info[i].hdl_data.session_hdl = -1; + new_csiphy_dev->csiphy_info[i].csiphy_3phase = -1; + new_csiphy_dev->csiphy_info[i].data_rate = 0; + new_csiphy_dev->csiphy_info[i].settle_time = 0; + new_csiphy_dev->csiphy_info[i].lane_cnt = 0; + new_csiphy_dev->csiphy_info[i].lane_assign = 0; + new_csiphy_dev->csiphy_info[i].lane_enable = 0; + new_csiphy_dev->csiphy_info[i].mipi_flags = 0; + new_csiphy_dev->csiphy_info[i].conn_csid_idx = -1; + new_csiphy_dev->csiphy_info[i].use_hw_client_voting = false; + new_csiphy_dev->csiphy_info[i].is_drv_config_en = false; + } + + new_csiphy_dev->ops.get_dev_info = NULL; + new_csiphy_dev->ops.link_setup = NULL; + new_csiphy_dev->ops.apply_req = NULL; + + new_csiphy_dev->acquire_count = 0; + new_csiphy_dev->start_dev_count = 0; + new_csiphy_dev->preamble_enable = 0; + + cpas_parms.cam_cpas_client_cb = NULL; + cpas_parms.cell_index = new_csiphy_dev->soc_info.index; + cpas_parms.dev = &pdev->dev; + cpas_parms.userdata = new_csiphy_dev; + + strscpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH); + + rc = cam_cpas_register_client(&cpas_parms); + if (rc) { + CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc); + goto csiphy_unregister_subdev; + } + + CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d", + cpas_parms.client_handle); + new_csiphy_dev->cpas_handle = cpas_parms.client_handle; + + snprintf(wq_name, 32, "%s%d%s", "csiphy", + new_csiphy_dev->soc_info.index, "_wq"); + + rc = cam_csiphy_register_baseaddress(new_csiphy_dev); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed to register baseaddress, rc: %d", rc); + goto cpas_unregister; + } + + CAM_DBG(CAM_CSIPHY, "%s component bound successfully", + pdev->name); + + cam_csiphy_debug_register(new_csiphy_dev); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +cpas_unregister: + cam_cpas_unregister_client(new_csiphy_dev->cpas_handle); +csiphy_unregister_subdev: + cam_unregister_subdev(&(new_csiphy_dev->v4l2_dev_str)); +csiphy_no_resource: + mutex_destroy(&new_csiphy_dev->mutex); + devm_kfree(&pdev->dev, new_csiphy_dev); + return rc; +} + +static void cam_csiphy_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + struct v4l2_subdev *subdev = NULL; + struct csiphy_device *csiphy_dev = NULL; + + subdev = platform_get_drvdata(pdev); + + if (!subdev) { + CAM_ERR(CAM_CSIPHY, "Error No data in subdev"); + return; + } + + csiphy_dev = v4l2_get_subdevdata(subdev); + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "Error No data in csiphy_dev"); + return; + } + + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL"); + return; + } + + cam_csiphy_debug_unregister(); + CAM_INFO(CAM_CSIPHY, "Unbind CSIPHY component"); + cam_cpas_unregister_client(csiphy_dev->cpas_handle); + cam_csiphy_soc_release(csiphy_dev); + mutex_lock(&csiphy_dev->mutex); + cam_csiphy_shutdown(csiphy_dev); + mutex_unlock(&csiphy_dev->mutex); + cam_unregister_subdev(&(csiphy_dev->v4l2_dev_str)); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&(csiphy_dev->v4l2_dev_str.sd), NULL); + devm_kfree(&pdev->dev, csiphy_dev); +} + +const static struct component_ops cam_csiphy_component_ops = { + .bind = cam_csiphy_component_bind, + .unbind = cam_csiphy_component_unbind, +}; + +static int32_t cam_csiphy_platform_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_CSIPHY, "Adding CSIPHY component"); + rc = component_add(&pdev->dev, &cam_csiphy_component_ops); + if (rc) + CAM_ERR(CAM_CSIPHY, "failed to add component rc: %d", rc); + + return rc; +} + + +static int32_t cam_csiphy_device_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_csiphy_component_ops); + return 0; +} + +static const struct of_device_id cam_csiphy_dt_match[] = { + {.compatible = "qcom,csiphy"}, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_csiphy_dt_match); + +struct platform_driver csiphy_driver = { + .probe = cam_csiphy_platform_probe, + .remove = cam_csiphy_device_remove, + .driver = { + .name = CAMX_CSIPHY_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_csiphy_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int32_t cam_csiphy_init_module(void) +{ + return platform_driver_register(&csiphy_driver); +} + +void cam_csiphy_exit_module(void) +{ + platform_driver_unregister(&csiphy_driver); +} + +MODULE_DESCRIPTION("CAM CSIPHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h new file mode 100644 index 0000000000..4200289a7c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h @@ -0,0 +1,506 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_DEV_H_ +#define _CAM_CSIPHY_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_context.h" + +#define MAX_CSIPHY 8 + +#define CSIPHY_NUM_CLK_MAX 16 + +#define MAX_LANES 5 +#define MAX_SETTINGS_PER_LANE 50 +#define MAX_DATA_RATES 25 +#define MAX_DATA_RATE_REGS 30 + +#define CAMX_CSIPHY_DEV_NAME "cam-csiphy-driver" +#define CAM_CSIPHY_RX_CLK_SRC "cphy_rx_clk_src" +#define CAM_CSIPHY_TIMER_CLK_SRC "phytimer_clk_src" + +#define CSIPHY_DEFAULT_PARAMS BIT(0) +#define CSIPHY_LANE_ENABLE BIT(1) +#define CSIPHY_SETTLE_CNT_LOWER_BYTE BIT(2) +#define CSIPHY_SETTLE_CNT_HIGHER_BYTE BIT(3) +#define CSIPHY_2PH_REGS BIT(4) +#define CSIPHY_3PH_REGS BIT(5) +#define CSIPHY_SKEW_CAL BIT(6) +#define CSIPHY_2PH_COMBO_REGS BIT(7) +#define CSIPHY_3PH_COMBO_REGS BIT(8) +#define CSIPHY_2PH_3PH_COMBO_REGS BIT(9) +#define CSIPHY_AUXILIARY_SETTING BIT(10) +#define CSIPHY_CDR_LN_SETTINGS BIT(11) +#define CSIPHY_SHORT_CHANNEL_PARAMS BIT(12) +#define CSIPHY_STANDARD_CHANNEL_PARAMS BIT(13) +#define CSIPHY_DNP_PARAMS BIT(14) + +#define CSIPHY_MAX_INSTANCES_PER_PHY 3 + +#define CAM_CSIPHY_MAX_DPHY_LANES 4 +#define CAM_CSIPHY_MAX_CPHY_LANES 3 +#define CAM_CSIPHY_MAX_CPHY_DPHY_COMBO_LN 3 +#define CAM_CSIPHY_MAX_DATARATE_VARIANTS 3 +#define CSIPHY_QMARGIN_CMN_STATUS_REG_COUNT 11 + +#define DPHY_LANE_0 BIT(0) +#define CPHY_LANE_0 BIT(1) +#define DPHY_LANE_1 BIT(2) +#define CPHY_LANE_1 BIT(3) +#define DPHY_LANE_2 BIT(4) +#define CPHY_LANE_2 BIT(5) +#define DPHY_LANE_3 BIT(6) +#define DPHY_CLK_LN BIT(7) + +/* Lane info packing for scm call */ +#define LANE_0_SEL BIT(0) +#define LANE_1_SEL BIT(1) +#define LANE_2_SEL BIT(2) +#define LANE_3_SEL BIT(3) +#define CPHY_LANE_SELECTION_SHIFT 16 +#define DPHY_LANE_SELECTION_SHIFT 24 +#define MAX_SUPPORTED_PHY_IDX 7 + +/* PRBS Pattern Macros */ +#define PREAMBLE_PATTERN_SET_CHECKER BIT(4) +#define PREAMBLE_PATTERN_BIST_DONE BIT(0) +#define PREAMBLE_MAX_ERR_COUNT_ALLOWED 2 + +enum cam_csiphy_state { + CAM_CSIPHY_INIT, + CAM_CSIPHY_ACQUIRE, + CAM_CSIPHY_START, +}; + +enum cam_csiphy_datarate_channel { + CAM_CSIPHY_DATARATE_SHORT_CHANNEL, + CAM_CSIPHY_DATARATE_STANDARD_CHANNEL, +}; + +/** + * enum cam_csiphy_common_reg_program + * @CAM_CSIPHY_PRGM_ALL : Programs common registers for all CSIPHYs + * @CAM_CSIPHY_PRGM_INDVDL : Programs common registers only for the given CSIPHY + */ +enum cam_csiphy_common_reg_program { + CAM_CSIPHY_PRGM_ALL = 0, + CAM_CSIPHY_PRGM_INDVDL, +}; + +/** + * struct cam_csiphy_secure_info + * + * This is an internal struct that is a reflection of the one + * passed over from csid + * + * @phy_lane_sel_mask: This value to be filled completely by csiphy + * @lane_assign: Lane_cfg value sent over from csid is + * equivalent to lane_assign here + * @vc_mask: Virtual channel masks (Unused for mobile usecase) + * @csid_hw_idx_mask: Bit position denoting CSID(s) in use for secure + * session + * @cdm_hw_idx_mask: Bit position denoting CDM in use for secure + * session + */ +struct cam_csiphy_secure_info { + uint32_t phy_lane_sel_mask; + uint32_t lane_assign; + uint64_t vc_mask; + uint32_t csid_hw_idx_mask; + uint32_t cdm_hw_idx_mask; +}; + +/** + * struct cam_csiphy_aon_sel_params_t + * @aon_cam_sel_offset : AON Cam Select Register offset in cpas top + * @cam_sel_mask : Camera select mask. + * @mclk_sel_mask : MCLK select mask. + */ +struct cam_csiphy_aon_sel_params_t { + uint32_t aon_cam_sel_offset[MAX_AON_CAM]; + uint32_t cam_sel_mask; + uint32_t mclk_sel_mask; +}; + +/** + * struct cam_cphy_dphy_status_reg_params_t + * @csiphy_3ph_status0_offset : CSIPhy 3ph status addr + * @csiphy_2ph_status0_offset : CSIPhy 2ph status addr + * @refgen_offset : Refgen offset + * @cphy_lane_status : CPHY Lane status6 register offsets for each lane + * @csiphy_3ph_status_size : CSIPhy 3ph status registers size + * @csiphy_2ph_status_size : CSIPhy 2ph status registers size + */ +struct cam_cphy_dphy_status_reg_params_t { + uint32_t csiphy_3ph_status0_offset; + uint32_t csiphy_2ph_status0_offset; + uint32_t refgen_status_offset; + uint32_t cphy_lane_status[CAM_CSIPHY_MAX_CPHY_LANES]; + uint16_t csiphy_3ph_status_size; + uint16_t csiphy_2ph_status_size; +}; + +/** + * struct csiphy_reg_parms_t + * @mipi_csiphy_glbl_irq_cmd_addr : CSIPhy irq addr + * @mipi_csiphy_interrupt_status0_addr: CSIPhy interrupt status addr + * @mipi_csiphy_interrupt_clear0_addr : CSIPhy interrupt clear addr + * @status_reg_params : Parameters to read cphy/dphy + * specific status registers + * @size_offset_betn_lanes : Size Offset between consecutive + * 2ph or 3ph lanes + * @csiphy_interrupt_status_size : Number of interrupt status registers + * @csiphy_num_common_status_regs : Number of common status registers + * @csiphy_common_reg_array_size : CSIPhy common array size + * @csiphy_reset_enter_array_size : CSIPhy reset array size + * @csiphy_reset_exit_array_size : CSIPhy reset release array size + * @csiphy_2ph_config_array_size : 2ph settings size + * @csiphy_3ph_config_array_size : 3ph settings size + * @csiphy_2ph_3ph_config_array_size : Size of the 2ph-3ph combo settings array + * @csiphy_2ph_combo_config_array_size: Size of the 2ph-2ph combo settings array + * @csiphy_3ph_combo_config_array_size: Size of the 3ph-3ph combo settings array + * @aon_sel_params : aon selection parameters + */ +struct csiphy_reg_parms_t { +/*MIPI CSI PHY registers*/ + uint32_t mipi_csiphy_glbl_irq_cmd_addr; + uint32_t mipi_csiphy_interrupt_status0_addr; + uint32_t mipi_csiphy_interrupt_clear0_addr; + struct cam_cphy_dphy_status_reg_params_t *status_reg_params; + uint32_t size_offset_betn_lanes; + uint32_t csiphy_interrupt_status_size; + uint32_t csiphy_num_common_status_regs; + uint32_t csiphy_common_reg_array_size; + uint32_t csiphy_reset_enter_array_size; + uint32_t csiphy_reset_exit_array_size; + uint32_t csiphy_2ph_config_array_size; + uint32_t csiphy_3ph_config_array_size; + uint32_t csiphy_2ph_3ph_config_array_size; + uint32_t csiphy_2ph_combo_config_array_size; + uint32_t csiphy_3ph_combo_config_array_size; + struct cam_csiphy_aon_sel_params_t *aon_sel_params; +}; + +/** + * struct csiphy_intf_params + * @device_hdl : Device Handle + * @session_hdl : Session Handle + */ +struct csiphy_hdl_tbl { + int32_t device_hdl; + int32_t session_hdl; +}; + +/** + * struct csiphy_reg_t + * @reg_addr : Register address + * @reg_data : Register data + * @delay : Delay in us + * @csiphy_param_type : CSIPhy parameter type + */ +struct csiphy_reg_t { + int32_t reg_addr; + int32_t reg_data; + int32_t delay; + uint32_t csiphy_param_type; +}; + +struct csiphy_device; + +/* + * struct data_rate_reg_info_t + * @bandwidth : max bandwidth supported by this reg settings + * @data_rate_reg_array_size : data rate settings size + * @data_rate_reg_array : array of data rate specific reg value pairs + */ +struct data_rate_reg_info_t { + uint64_t bandwidth; + ssize_t data_rate_reg_array_size; + struct csiphy_reg_t *data_rate_reg_array[MAX_CSIPHY][CAM_CSIPHY_MAX_DATARATE_VARIANTS]; +}; + +/** + * struct data_rate_settings_t + * @num_data_rate_settings: Number of valid settings + * present in the data rate settings array + * @data_rate_settings : Array of regsettings which are specific to + * data rate + */ +struct data_rate_settings_t { + ssize_t num_data_rate_settings; + struct data_rate_reg_info_t *data_rate_settings; +}; + +struct bist_reg_settings_t { + uint32_t error_status_val_3ph; + uint32_t error_status_val_2ph; + uint32_t set_status_update_3ph_base_offset; + uint32_t set_status_update_2ph_base_offset; + uint32_t bist_status_3ph_base_offset; + uint32_t bist_status_2ph_base_offset; + uint32_t bist_sensor_data_3ph_status_base_offset; + uint32_t bist_counter_3ph_base_offset; + uint32_t bist_counter_2ph_base_offset; + uint32_t number_of_counters; + ssize_t num_status_reg; + ssize_t num_3ph_bist_settings; + struct csiphy_reg_t *bist_3ph_settings_arry; + ssize_t num_2ph_bist_settings; + struct csiphy_reg_t *bist_2ph_settings_arry; + struct csiphy_reg_t *bist_status_arr; +}; + +/** + * struct csiphy_ctrl_t + * @csiphy_reg : Register address + * @csiphy_common_reg : Common register set + * @csiphy_irq_reg : Irq register set + * @csiphy_reset_enter_regs : Reset register set + * @csiphy_reset_exit_regs : Reset release registers + * @csiphy_lane_config_reg : Lane select register + * @csiphy_bist_reg : Bist register set + * @csiphy_2ph_reg : 2phase register set + * @csiphy_2ph_combo_mode_reg : 2ph-2ph combo register set + * @csiphy_3ph_reg : 3phase register set + * @csiphy_3ph_combo_reg : 3ph-3ph combo register set + * @csiphy_2ph_3ph_mode_reg : 2ph-3ph combo register set + * @getclockvoting : function pointer which is used to find the clock + * voting for the sensor output data rate + * @data_rate_settings_table : Table which maintains the resgister settings specific to data rate + */ +struct csiphy_ctrl_t { + struct csiphy_reg_parms_t *csiphy_reg; + struct csiphy_reg_t *csiphy_common_reg; + struct csiphy_reg_t *csiphy_irq_reg; + struct csiphy_reg_t *csiphy_reset_enter_regs; + struct csiphy_reg_t *csiphy_reset_exit_regs; + struct csiphy_reg_t *csiphy_lane_config_reg; + struct bist_reg_settings_t *csiphy_bist_reg; + struct csiphy_reg_t *csiphy_2ph_reg; + struct csiphy_reg_t *csiphy_2ph_combo_mode_reg; + struct csiphy_reg_t *csiphy_3ph_reg; + struct csiphy_reg_t *csiphy_3ph_combo_reg; + struct csiphy_reg_t *csiphy_2ph_3ph_mode_reg; + enum cam_vote_level (*getclockvoting)(struct csiphy_device *phy_dev, int32_t index); + struct data_rate_settings_t *data_rates_settings_table; +}; + +/** + * cam_csiphy_param : Provides cmdbuffer structure + * @lane_assign : Lanes the sensor will be using(One Lane idx in one Nibble) + * @lane_cnt : Total number of lanes to be enabled + * @secure_mode : To identify whether stream is secure/nonsecure + * @lane_enable : Data Lane selection + * @settle_time : Settling time in ms + * @data_rate : Data rate in mbps + * @csiphy_3phase : To identify DPHY or CPHY + * @mipi_flags : MIPI phy flags + * @csiphy_cpas_cp_reg_mask : CP reg mask for phy instance + * @csiphy_phy_lane_sel_mask : Generic format for CP information for PHY and lane + * @hdl_data : CSIPHY handle table + * @secure_info : All domain-id security related information packed in proper + * format for mink call + * @secure_info_updated : If all information in the secure_info struct above + * is passed and formatted properly from CSID driver + * @conn_csid_idx : Connected CSID core idx (Primary csid in case of dual ife) + * @use_hw_client_voting : Whether to use hw client voting for clk on chipsets with cesta + * @is_drv_config_en : If drv is configured in CSID + * @channel_type : Channel type for different channel settings + * @t3_prepare : T3-Prepare in ns + * @t3_preamble : T3-Preamble in ns + */ +struct cam_csiphy_param { + uint16_t lane_assign; + uint8_t lane_cnt; + uint8_t secure_mode; + uint32_t lane_enable; + uint64_t settle_time; + uint64_t data_rate; + int csiphy_3phase; + uint16_t mipi_flags; + uint64_t csiphy_cpas_cp_reg_mask; + uint64_t csiphy_phy_lane_sel_mask; + struct csiphy_hdl_tbl hdl_data; + struct cam_csiphy_secure_info secure_info; + bool secure_info_updated; + int32_t conn_csid_idx; + bool use_hw_client_voting; + bool is_drv_config_en; + uint32_t channel_type; + uint32_t t3_prepare; + uint32_t t3_preamble; +}; + +struct csiphy_work_queue { + struct csiphy_device *csiphy_dev; + int32_t acquire_idx; + struct work_struct work; +}; + +/** + * struct cam_csiphy_dev_cdr_sweep_params + * + * @cdr_tolerance : cdr tolerance + * @tolerance_op_type : if tolerance needs to be added/subtracted + * published + * @cdr_sweep_enabled : cdr sweep enabled + */ +struct cam_csiphy_dev_cdr_sweep_params { + uint32_t cdr_tolerance; + uint32_t tolerance_op_type; + bool cdr_sweep_enabled; +}; + +/** + * struct csiphy_qmargin_csid_output + * + * @csi2_rx_status : RX status register value of CSID PHY is + * connected to + * @csi2_total_crc_err : Total CRC errors seen by CSID while + * streaming + * @csi2_total_pkts_rcvd : Total packets received by CSID while + * streaming + * @csi2_err_seen : If CSID receiver encountered error(s) + * while streaming + * @epd_enabled : If EPD was enabled for stream + */ +struct csiphy_qmargin_csid_output { + uint32_t csi2_rx_status; + uint32_t csi2_total_crc_err; + uint32_t csi2_total_pkts_rcvd; + bool csi2_err_seen; + bool epd_enabled; +}; + +/** + * struct csiphy_qmargin_sweep_data + * + * @qmargin_csid_output : Set of CSID register values sent over + * by CSID through cam_subdev_notify_message + * @cdr_regs : Default CDR values needed by Qmargin + * @csiphy_qmargin_output_regs : Register values needed by Qmargin to + * determine sweep results + * @bw : BW at which PHY is streaming at + */ +struct csiphy_qmargin_sweep_data { + struct csiphy_qmargin_csid_output qmargin_csid_output; + struct csiphy_reg_t cdr_regs[CAM_CSIPHY_MAX_CPHY_LANES]; + unsigned int csiphy_qmargin_output_regs[ + CSIPHY_QMARGIN_CMN_STATUS_REG_COUNT]; + uint64_t bw; +}; + +/** + * struct csiphy_device + * @device_name : Device name + * @mutex : ioctl operation mutex + * @hw_version : Hardware Version + * @clk_lane : Clock lane + * @acquire_count : Acquire device count + * @start_dev_count : Start count + * @cpas_handle : CPAS handle + * @session_max_device_support : Max number of devices supported in a session + * @combo_mode : Info regarding combo_mode is enable / disable + * @cphy_dphy_combo_mode : Info regarding 2ph/3ph combo modes + * @rx_clk_src_idx : Phy rx clk index + * @timer_clk_src_idx : Phy timer clk index + * @is_divisor_32_comp : 32 bit hw compatibility + * @curr_data_rate_idx : Index of the datarate array which is being used currently by phy + * @csiphy_state : CSIPhy state + * @ctrl_reg : CSIPhy control registers + * @ref_count : Reference count + * @v4l2_dev_str : V4L2 related data + * @csiphy_info : Sensor specific csiphy info + * @soc_info : SOC information + * @current_data_rate : Data rate in mbps + * @curr_clk_vote_level : Currently applied clock vote level + * @csiphy_cpas_cp_reg_mask : Secure csiphy lane mask + * @ops : KMD operations + * @crm_cb : Callback API pointers + * @cdr_params : CDR sweep params + * @qmargin_data : Qmargin params + * @prgm_cmn_reg_across_csiphy : Flag to decide if com settings need to be programmed for all PHYs + * @en_common_status_reg_dump : Debugfs flag to enable common status register dump + * @en_lane_status_reg_dump : Debugfs flag to enable cphy/dphy lane status dump + * @en_full_phy_reg_dump : Debugfs flag to enable the dump for all the Phy registers + * @skip_aux_settings : Debugfs flag to ignore calls to update aux settings + * @domain_id_security : Flag to determine if target has domain-id based security + * @preamble_enable : To enable preamble pattern + */ +struct csiphy_device { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct mutex mutex; + uint32_t hw_version; + uint32_t clk_lane; + uint32_t acquire_count; + uint32_t start_dev_count; + uint32_t cpas_handle; + uint8_t session_max_device_support; + uint8_t combo_mode; + uint8_t cphy_dphy_combo_mode; + uint8_t rx_clk_src_idx; + uint8_t timer_clk_src_idx; + uint8_t is_divisor_32_comp; + uint8_t curr_data_rate_idx; + enum cam_csiphy_state csiphy_state; + struct csiphy_ctrl_t *ctrl_reg; + int32_t ref_count; + struct cam_subdev v4l2_dev_str; + struct cam_csiphy_param csiphy_info[ + CSIPHY_MAX_INSTANCES_PER_PHY]; + struct cam_hw_soc_info soc_info; + uint64_t current_data_rate; + enum cam_vote_level curr_clk_vote_level; + uint64_t csiphy_cpas_cp_reg_mask[ + CSIPHY_MAX_INSTANCES_PER_PHY]; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; + struct cam_csiphy_dev_cdr_sweep_params cdr_params; + struct csiphy_qmargin_sweep_data qmargin_data; + bool prgm_cmn_reg_across_csiphy; + bool en_common_status_reg_dump; + bool en_lane_status_reg_dump; + bool en_full_phy_reg_dump; + bool skip_aux_settings; + bool domain_id_security; + uint16_t preamble_enable; +}; + +/** + * @brief : API to register CSIPHY hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int32_t cam_csiphy_init_module(void); + +/** + * @brief : API to remove CSIPHY Hw from platform framework. + */ +void cam_csiphy_exit_module(void); + +enum cam_vote_level get_clk_voting_dynamic( + struct csiphy_device *csiphy_dev, int32_t index); + +#endif /* _CAM_CSIPHY_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c new file mode 100644 index 0000000000..f561c98d97 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_csiphy_soc.h" +#include "cam_csiphy_core.h" +#include "include/cam_csiphy_2_1_0_hwreg.h" +#include "include/cam_csiphy_2_1_1_hwreg.h" +#include "include/cam_csiphy_2_1_2_hwreg.h" +#include "include/cam_csiphy_2_1_3_hwreg.h" +#include "include/cam_csiphy_2_2_0_hwreg.h" +#include "include/cam_csiphy_2_2_1_hwreg.h" +#include "include/cam_csiphy_2_3_0_hwreg.h" +#include "include/cam_csiphy_2_4_0_hwreg.h" +#include "include/cam_csiphy_2_4_1_hwreg.h" +#include "cam_mem_mgr_api.h" + +/* Clock divide factor for CPHY spec v1.0 */ +#define CSIPHY_DIVISOR_16 16 +/* Clock divide factor for CPHY spec v1.2 and up */ +#define CSIPHY_DIVISOR_32 32 +/* Clock divide factor for DPHY */ +#define CSIPHY_DIVISOR_8 8 +#define CSIPHY_LOG_BUFFER_SIZE_IN_BYTES 250 +#define ONE_LOG_LINE_MAX_SIZE 20 + +static int cam_csiphy_io_dump(void __iomem *base_addr, uint16_t num_regs, int csiphy_idx) +{ + char *buffer; + uint8_t buffer_offset = 0; + uint8_t rem_buffer_size = CSIPHY_LOG_BUFFER_SIZE_IN_BYTES; + uint16_t i; + uint32_t reg_offset; + + if (!base_addr || !num_regs) { + CAM_ERR(CAM_CSIPHY, "Invalid params. base_addr: 0x%p num_regs: %u", + base_addr, num_regs); + return -EINVAL; + } + + buffer = CAM_MEM_ZALLOC(CSIPHY_LOG_BUFFER_SIZE_IN_BYTES, GFP_KERNEL); + if (!buffer) { + CAM_ERR(CAM_CSIPHY, "Could not allocate the memory for buffer"); + return -ENOMEM; + } + + CAM_INFO(CAM_CSIPHY, "Base: 0x%pK num_regs: %u", base_addr, num_regs); + CAM_INFO(CAM_CSIPHY, "CSIPHY:%d Dump", csiphy_idx); + for (i = 0; i < num_regs; i++) { + reg_offset = i << 2; + buffer_offset += scnprintf(buffer + buffer_offset, rem_buffer_size, "0x%x=0x%x\n", + reg_offset, cam_io_r_mb(base_addr + reg_offset)); + + rem_buffer_size = CSIPHY_LOG_BUFFER_SIZE_IN_BYTES - buffer_offset; + + if (rem_buffer_size <= ONE_LOG_LINE_MAX_SIZE) { + buffer[buffer_offset - 1] = '\0'; + pr_info("%s\n", buffer); + buffer_offset = 0; + rem_buffer_size = CSIPHY_LOG_BUFFER_SIZE_IN_BYTES; + } + } + + if (buffer_offset) { + buffer[buffer_offset - 1] = '\0'; + pr_info("%s\n", buffer); + } + + CAM_MEM_FREE(buffer); + + return 0; +} + +int32_t cam_csiphy_common_status_reg_dump(struct csiphy_device *csiphy_dev, + bool dump_to_log) +{ + struct csiphy_reg_parms_t *csiphy_reg = NULL; + int32_t rc = 0; + resource_size_t size = 0; + void __iomem *phy_base = NULL; + int reg_id = 0; + uint32_t val, status_reg, clear_reg; + unsigned int *buffer; + + if (!csiphy_dev) { + rc = -EINVAL; + CAM_ERR(CAM_CSIPHY, "invalid input %d", rc); + return rc; + } + + csiphy_reg = csiphy_dev->ctrl_reg->csiphy_reg; + phy_base = csiphy_dev->soc_info.reg_map[0].mem_base; + status_reg = csiphy_reg->mipi_csiphy_interrupt_status0_addr; + clear_reg = csiphy_reg->mipi_csiphy_interrupt_clear0_addr; + buffer = csiphy_dev->qmargin_data.csiphy_qmargin_output_regs; + size = dump_to_log ? csiphy_reg->csiphy_num_common_status_regs : + CSIPHY_QMARGIN_CMN_STATUS_REG_COUNT; + + if (dump_to_log) + CAM_INFO(CAM_CSIPHY, "PHY base addr=%pK offset=0x%x size=%d", + phy_base, status_reg, size); + + if (unlikely(!phy_base)) { + CAM_ERR(CAM_CSIPHY, "phy base is NULL %s", CAM_BOOL_TO_YESNO(phy_base)); + return -EINVAL; + } + + if (unlikely(!size || !buffer)) { + CAM_ERR(CAM_CSIPHY, "Common status read buffer is NULL: %s, reg reads: %d", + CAM_BOOL_TO_YESNO(!buffer), size); + return -EINVAL; + } + + for (reg_id = 0; reg_id < size; reg_id++) { + val = cam_io_r(phy_base + status_reg + (0x4 * reg_id)); + + if (reg_id < csiphy_reg->csiphy_interrupt_status_size) + cam_io_w_mb(val, phy_base + clear_reg + (0x4 * reg_id)); + + if (dump_to_log) + CAM_INFO(CAM_CSIPHY, "CSIPHY%d_COMMON_STATUS%u = 0x%x", + csiphy_dev->soc_info.index, reg_id, val); + if (buffer && (reg_id < CSIPHY_QMARGIN_CMN_STATUS_REG_COUNT)) + buffer[reg_id] = val; + } + + return rc; +} + +int32_t cam_csiphy_reg_dump(struct cam_hw_soc_info *soc_info) +{ + int32_t rc = 0; + resource_size_t size = 0; + void __iomem *addr = NULL; + + if (!soc_info) { + rc = -EINVAL; + CAM_ERR(CAM_CSIPHY, "invalid input %d", rc); + return rc; + } + addr = soc_info->reg_map[0].mem_base; + size = resource_size(soc_info->mem_block[0]); + rc = cam_csiphy_io_dump(addr, (size >> 2), soc_info->index); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "generating dump failed %d", rc); + return rc; + } + return rc; +} + +enum cam_vote_level get_clk_voting_dynamic( + struct csiphy_device *csiphy_dev, int32_t index) +{ + uint32_t cam_vote_level = 0; + uint32_t last_valid_vote = 0; + struct cam_hw_soc_info *soc_info; + uint64_t phy_data_rate = csiphy_dev->csiphy_info[index].data_rate; + + soc_info = &csiphy_dev->soc_info; + phy_data_rate = max(phy_data_rate, csiphy_dev->current_data_rate); + + if (csiphy_dev->csiphy_info[index].csiphy_3phase) { + if (csiphy_dev->is_divisor_32_comp) + do_div(phy_data_rate, CSIPHY_DIVISOR_32); + else + do_div(phy_data_rate, CSIPHY_DIVISOR_16); + } else { + do_div(phy_data_rate, CSIPHY_DIVISOR_8); + } + + /* round off to next integer */ + phy_data_rate += 1; + csiphy_dev->current_data_rate = phy_data_rate; + + for (cam_vote_level = 0; cam_vote_level < CAM_MAX_VOTE; cam_vote_level++) { + if (soc_info->clk_level_valid[cam_vote_level] != true) + continue; + + if (soc_info->clk_rate[cam_vote_level] + [csiphy_dev->rx_clk_src_idx] > phy_data_rate) { + CAM_DBG(CAM_CSIPHY, + "Found match PHY:%d clk_name:%s data_rate:%llu clk_rate:%d level:%d", + soc_info->index, + soc_info->clk_name[csiphy_dev->rx_clk_src_idx], + phy_data_rate, + soc_info->clk_rate[cam_vote_level][csiphy_dev->rx_clk_src_idx], + cam_vote_level); + return cam_vote_level; + } + last_valid_vote = cam_vote_level; + } + return last_valid_vote; +} + +int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev, int32_t index) +{ + int32_t rc = 0; + struct cam_hw_soc_info *soc_info; + enum cam_vote_level vote_level; + struct cam_csiphy_param *param = &csiphy_dev->csiphy_info[index]; + int i; + + soc_info = &csiphy_dev->soc_info; + + if (csiphy_dev->ref_count++) { + CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d", + csiphy_dev->ref_count); + goto end; + } + + vote_level = csiphy_dev->ctrl_reg->getclockvoting(csiphy_dev, index); + + for (i = 0; i < soc_info->num_clk; i++) { + CAM_DBG(CAM_CSIPHY, "PHY:%d %s:%d", + soc_info->index, + soc_info->clk_name[i], + soc_info->clk_rate[vote_level][i]); + } + csiphy_dev->curr_clk_vote_level = vote_level; + + rc = cam_soc_util_enable_platform_resource(soc_info, + (soc_info->is_clk_drv_en && param->use_hw_client_voting) ? + param->conn_csid_idx : CAM_CLK_SW_CLIENT_IDX, true, vote_level, true); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "[%s] failed to enable platform resources, clk_drv_en=%d, use_hw_client=%d conn_csid_idx=%d, rc=%d", + soc_info->dev_name, soc_info->is_clk_drv_en, param->use_hw_client_voting, + param->conn_csid_idx, rc); + goto end; + } + + if (soc_info->is_clk_drv_en && param->use_hw_client_voting && param->is_drv_config_en) { + int clk_vote_level_high = vote_level; + int clk_vote_level_low = soc_info->lowest_clk_level; + + rc = cam_soc_util_set_clk_rate_level(soc_info, param->conn_csid_idx, + clk_vote_level_high, clk_vote_level_low, false); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to set the req clk_rate level[high low]: [%s %s] cesta_client_idx: %d rc: %d", + cam_soc_util_get_string_from_level(clk_vote_level_high), + cam_soc_util_get_string_from_level(clk_vote_level_low), + param->conn_csid_idx, rc); + rc = -EINVAL; + goto disable_platform_resource; + } + + rc = cam_soc_util_cesta_channel_switch(param->conn_csid_idx, "csiphy_start"); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "[%s] Failed to apply power states for crm client:%d rc:%d", + soc_info->dev_name, param->conn_csid_idx, rc); + goto disable_platform_resource; + } + + } + + cam_csiphy_reset(csiphy_dev); + + return rc; + +disable_platform_resource: + cam_soc_util_disable_platform_resource(soc_info, + (soc_info->is_clk_drv_en && param->use_hw_client_voting) ? + param->conn_csid_idx : CAM_CLK_SW_CLIENT_IDX, + true, true); +end: + return rc; +} + +int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev, int32_t index) +{ + struct cam_hw_soc_info *soc_info; + struct cam_csiphy_param *param; + + if (!csiphy_dev || !csiphy_dev->ref_count) { + CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO"); + return 0; + } + soc_info = &csiphy_dev->soc_info; + param = &csiphy_dev->csiphy_info[index]; + + if (--csiphy_dev->ref_count) { + CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d", + csiphy_dev->ref_count); + return 0; + } + + cam_csiphy_reset(csiphy_dev); + + cam_soc_util_disable_platform_resource(soc_info, + (soc_info->is_clk_drv_en && param->use_hw_client_voting) ? + param->conn_csid_idx : CAM_CLK_SW_CLIENT_IDX, + true, true); + + return 0; +} + +int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev, + struct csiphy_device *csiphy_dev) +{ + int32_t rc = 0, i = 0; + uint32_t is_regulator_enable_sync; + struct cam_hw_soc_info *soc_info; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + soc_info = &csiphy_dev->soc_info; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc); + return rc; + } + + if (of_property_read_u32(soc_info->dev->of_node, "rgltr-enable-sync", + &is_regulator_enable_sync)) { + /* this is not fatal, overwrite to 0 */ + is_regulator_enable_sync = 0; + } + + csiphy_dev->prgm_cmn_reg_across_csiphy = (bool) is_regulator_enable_sync; + + if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.1.0")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_1_0; + csiphy_dev->hw_version = CSIPHY_VERSION_V210; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.1.1")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_1_1; + csiphy_dev->hw_version = CSIPHY_VERSION_V211; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.1.2")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_1_2; + csiphy_dev->hw_version = CSIPHY_VERSION_V212; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.1.3")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_1_3; + csiphy_dev->hw_version = CSIPHY_VERSION_V213; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.2.0")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_2_0; + csiphy_dev->hw_version = CSIPHY_VERSION_V220; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.2.1")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_2_1; + csiphy_dev->hw_version = CSIPHY_VERSION_V221; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.3.0")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_3_0; + csiphy_dev->hw_version = CSIPHY_VERSION_V230; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.4.0")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_4_0; + csiphy_dev->hw_version = CSIPHY_VERSION_V240; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else if (of_device_is_compatible(soc_info->dev->of_node, "qcom,csiphy-v2.4.1")) { + csiphy_dev->ctrl_reg = &ctrl_reg_2_4_1; + csiphy_dev->hw_version = CSIPHY_VERSION_V241; + csiphy_dev->is_divisor_32_comp = true; + csiphy_dev->clk_lane = 0; + } else { + CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x", + csiphy_dev->hw_version); + rc = -EINVAL; + return rc; + } + + if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) { + CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d", + soc_info->num_clk, CSIPHY_NUM_CLK_MAX); + return -EINVAL; + } + + for (i = 0; i < soc_info->num_clk; i++) { + if (!strcmp(soc_info->clk_name[i], + CAM_CSIPHY_RX_CLK_SRC)) { + csiphy_dev->rx_clk_src_idx = i; + } else if (strnstr(soc_info->clk_name[i], CAM_CSIPHY_TIMER_CLK_SRC, + strlen(soc_info->clk_name[i]))) { + csiphy_dev->timer_clk_src_idx = i; + } + + CAM_DBG(CAM_CSIPHY, "PHY:%d clk_rate[0][%d] = %d", + soc_info->index, i, + soc_info->clk_rate[0][i]); + } + + for (i = 0; i < soc_info->irq_count; i++) + irq_data[i] = csiphy_dev; + + rc = cam_cpas_query_drv_enable(NULL, &soc_info->is_clk_drv_en); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed to query DRV enable rc:%d", rc); + return rc; + } + rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info, + cam_csiphy_irq, &(irq_data[0])); + + return rc; +} + +int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev) +{ + if (!csiphy_dev) { + CAM_ERR(CAM_CSIPHY, "csiphy dev NULL"); + return 0; + } + + cam_soc_util_release_platform_resource(&csiphy_dev->soc_info); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h new file mode 100644 index 0000000000..740f5a175a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_SOC_H_ +#define _CAM_CSIPHY_SOC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_csiphy_dev.h" +#include "cam_csiphy_core.h" + +#define CSIPHY_VERSION_V10 0x10 +#define CSIPHY_VERSION_V11 0x11 +#define CSIPHY_VERSION_V12 0x12 +#define CSIPHY_VERSION_V121 0x121 +#define CSIPHY_VERSION_V123 0x123 +#define CSIPHY_VERSION_V124 0x124 +#define CSIPHY_VERSION_V125 0x125 +#define CSIPHY_VERSION_V20 0x20 +#define CSIPHY_VERSION_V201 0x201 +#define CSIPHY_VERSION_V210 0x210 +#define CSIPHY_VERSION_V211 0x211 +#define CSIPHY_VERSION_V212 0x212 +#define CSIPHY_VERSION_V213 0x213 +#define CSIPHY_VERSION_V220 0x220 +#define CSIPHY_VERSION_V221 0x221 +#define CSIPHY_VERSION_V230 0x230 +#define CSIPHY_VERSION_V240 0x240 +#define CSIPHY_VERSION_V241 0x241 + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API release SOC related parameters + */ +int cam_csiphy_soc_release(struct csiphy_device *csiphy_dev); + +/** + * @pdev: Platform device + * @csiphy_dev: CSIPhy device structure + * + * This API parses csiphy device tree information + */ +int cam_csiphy_parse_dt_info(struct platform_device *pdev, + struct csiphy_device *csiphy_dev); + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API enables SOC related parameters + */ +int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev, int32_t index); + +/** + * @csiphy_dev: CSIPhy device structure + * + * This API disables SOC related parameters + */ +int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev, int32_t index); + +/** + * @soc_info: Soc info of cam hw driver module + * + * This API dumps memory for the entire mapped region + * (needs to be macro enabled before use) + */ +int cam_csiphy_reg_dump(struct cam_hw_soc_info *soc_info); + +/** + * @csiphy_dev: CSIPhy device structure + * @dump_to_log: Write dump to kernel log + * + * This API dumps memory for the entire status region + */ +int32_t cam_csiphy_common_status_reg_dump(struct csiphy_device *csiphy_dev, + bool dump_to_log); +#endif /* _CAM_CSIPHY_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_0_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_0_hwreg.h new file mode 100644 index 0000000000..5dbf78984b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_0_hwreg.h @@ -0,0 +1,938 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_1_0_HWREG_H_ +#define _CAM_CSIPHY_2_1_0_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_1_0 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_1_0[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_1_0[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_1_0[] = { + {0x1000, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_1_0[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0xBE8, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_1_0[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_0_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_0_combo_mode_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_1_0_reg[] = { + {0x0268, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x020C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0230, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; +struct csiphy_reg_t bist_3ph_arr_2_1_0[] = { + /* 3Phase BIST CONFIGURATION REG SET */ + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_1_0[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_1_0 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_1_0), + .bist_3ph_settings_arry = bist_3ph_arr_2_1_0, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_1_0), + .bist_status_arr = bist_status_arr_2_1_0, +}; + +struct csiphy_reg_t datarate_210_1p2Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_AUXILIARY_SETTING}, +}; + +struct csiphy_reg_t datarate_210_1p5Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_AUXILIARY_SETTING}, +}; + +struct csiphy_reg_t datarate_210_1p7Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_AUXILIARY_SETTING}, +}; + +struct csiphy_reg_t datarate_210_2p1Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_AUXILIARY_SETTING}, +}; + +struct csiphy_reg_t datarate_210_2p35Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_AUXILIARY_SETTING}, +}; + +struct csiphy_reg_t datarate_210_2p6Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_2p8Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_3p3Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_3p5Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_4Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_4p5Gsps[] = { + {0x0274, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_210_5Gsps[] = { + {0x0274, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_1_0[] = { + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_210_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_210_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_210_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_210_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_210_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_210_1p7Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_210_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_210_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_210_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_210_2p35Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_210_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_210_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_210_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_210_2p8Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_210_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_210_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_210_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_210_3p5Gsps, + }, + { + /* ((4 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_4Gsps), + .data_rate_reg_array[0][0] = datarate_210_4Gsps, + .data_rate_reg_array[1][0] = datarate_210_4Gsps, + .data_rate_reg_array[2][0] = datarate_210_4Gsps, + .data_rate_reg_array[3][0] = datarate_210_4Gsps, + .data_rate_reg_array[4][0] = datarate_210_4Gsps, + .data_rate_reg_array[5][0] = datarate_210_4Gsps, + .data_rate_reg_array[6][0] = datarate_210_4Gsps, + .data_rate_reg_array[7][0] = datarate_210_4Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_210_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_210_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_210_5Gsps), + .data_rate_reg_array[0][0] = datarate_210_5Gsps, + .data_rate_reg_array[1][0] = datarate_210_5Gsps, + .data_rate_reg_array[2][0] = datarate_210_5Gsps, + .data_rate_reg_array[3][0] = datarate_210_5Gsps, + .data_rate_reg_array[4][0] = datarate_210_5Gsps, + .data_rate_reg_array[5][0] = datarate_210_5Gsps, + .data_rate_reg_array[6][0] = datarate_210_5Gsps, + .data_rate_reg_array[7][0] = datarate_210_5Gsps, + }, +}; + +struct data_rate_settings_t data_rate_delta_table_2_1_0 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_1_0), + .data_rate_settings = data_rate_settings_2_1_0, +}; + +struct csiphy_reg_parms_t csiphy_v2_1_0 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_1_0, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_1_0), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_1_0), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_1_0), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_0_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_1_0_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_0_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_1_0), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params, +}; + +struct csiphy_ctrl_t ctrl_reg_2_1_0 = { + .csiphy_common_reg = csiphy_common_reg_2_1_0, + .csiphy_2ph_reg = csiphy_2ph_v2_1_0_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_1_0_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_1_0_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_1_0, + .csiphy_irq_reg = csiphy_irq_reg_2_1_0, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_1_0, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_1_0, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_1_0, + .data_rates_settings_table = &data_rate_delta_table_2_1_0, + .csiphy_bist_reg = &bist_setting_2_1_0, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_1_0_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_1_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_1_hwreg.h new file mode 100644 index 0000000000..cc40351557 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_1_hwreg.h @@ -0,0 +1,927 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_1_1_HWREG_H_ +#define _CAM_CSIPHY_2_1_1_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_1_1 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_1_1[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_1_1[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_1_1[] = { + {0x1000, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_1_1[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0xBE8, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_1_1[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_1_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_1_combo_mode_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_1_1_reg[] = { + {0x0268, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x020C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0230, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x94, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0xA6, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_3ph_arr_2_1_1[] = { + /* 3Phase BIST CONFIGURATION REG SET */ + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_1_1[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_1_1 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_1_1), + .bist_3ph_settings_arry = bist_3ph_arr_2_1_1, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_1_1), + .bist_status_arr = bist_status_arr_2_1_1, +}; + +struct csiphy_reg_t datarate_211_1p2Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_1p5Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_1p7Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_2p1Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_2p35Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_2p6Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_2p8Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_3p3Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_3p5Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_4Gsps[] = { + {0x0274, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_4p5Gsps[] = { + {0x0274, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_211_5Gsps[] = { + {0x0274, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0xC1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x18, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_1_1[] = { + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_211_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_211_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_211_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_211_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_211_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_211_1p7Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_211_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_211_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_211_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_211_2p35Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_211_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_211_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_211_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_211_2p8Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_211_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_211_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_211_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_211_3p5Gsps, + }, + { + /* ((4 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_4Gsps), + .data_rate_reg_array[0][0] = datarate_211_4Gsps, + .data_rate_reg_array[1][0] = datarate_211_4Gsps, + .data_rate_reg_array[2][0] = datarate_211_4Gsps, + .data_rate_reg_array[3][0] = datarate_211_4Gsps, + .data_rate_reg_array[4][0] = datarate_211_4Gsps, + .data_rate_reg_array[5][0] = datarate_211_4Gsps, + .data_rate_reg_array[6][0] = datarate_211_4Gsps, + .data_rate_reg_array[7][0] = datarate_211_4Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_211_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_211_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_211_5Gsps), + .data_rate_reg_array[0][0] = datarate_211_5Gsps, + .data_rate_reg_array[1][0] = datarate_211_5Gsps, + .data_rate_reg_array[2][0] = datarate_211_5Gsps, + .data_rate_reg_array[3][0] = datarate_211_5Gsps, + .data_rate_reg_array[4][0] = datarate_211_5Gsps, + .data_rate_reg_array[5][0] = datarate_211_5Gsps, + .data_rate_reg_array[6][0] = datarate_211_5Gsps, + .data_rate_reg_array[7][0] = datarate_211_5Gsps, + }, +}; + +struct data_rate_settings_t data_rate_delta_table_2_1_1 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_1_1), + .data_rate_settings = data_rate_settings_2_1_1, +}; + +struct csiphy_reg_parms_t csiphy_v2_1_1 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_1_1, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_1_1), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_1_1), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_1_1), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_1_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_1_1_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_1_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_1_1), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params, +}; + +struct csiphy_ctrl_t ctrl_reg_2_1_1 = { + .csiphy_common_reg = csiphy_common_reg_2_1_1, + .csiphy_2ph_reg = csiphy_2ph_v2_1_1_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_1_1_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_1_1_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_1_1, + .csiphy_irq_reg = csiphy_irq_reg_2_1_1, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_1_1, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_1_1, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_1_1, + .data_rates_settings_table = &data_rate_delta_table_2_1_1, + .csiphy_bist_reg = &bist_setting_2_1_1, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_1_1_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_2_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_2_hwreg.h new file mode 100644 index 0000000000..9ba73bddaf --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_2_hwreg.h @@ -0,0 +1,1977 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_1_2_HWREG_H_ +#define _CAM_CSIPHY_2_1_2_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_1_2 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_1_2 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_1_2[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_1_2[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_1_2[] = { + {0x1000, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_1_2[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0xBE8, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_1_2[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_2_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_2_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_1_2_reg[] = { + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x02, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x02, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x02, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_212_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x00, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive*/ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_1_2[] = { + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 228000000, + .data_rate_reg_array[0][0] = datarate_212_100Msps, + .data_rate_reg_array[1][0] = datarate_212_100Msps, + .data_rate_reg_array[2][0] = datarate_212_100Msps, + .data_rate_reg_array[3][0] = datarate_212_100Msps, + .data_rate_reg_array[4][0] = datarate_212_100Msps, + .data_rate_reg_array[5][0] = datarate_212_100Msps, + .data_rate_reg_array[6][0] = datarate_212_100Msps, + .data_rate_reg_array[7][0] = datarate_212_100Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_100Msps), + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 456000000, + .data_rate_reg_array[0][0] = datarate_212_200Msps, + .data_rate_reg_array[1][0] = datarate_212_200Msps, + .data_rate_reg_array[2][0] = datarate_212_200Msps, + .data_rate_reg_array[3][0] = datarate_212_200Msps, + .data_rate_reg_array[4][0] = datarate_212_200Msps, + .data_rate_reg_array[5][0] = datarate_212_200Msps, + .data_rate_reg_array[6][0] = datarate_212_200Msps, + .data_rate_reg_array[7][0] = datarate_212_200Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_200Msps), + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 684000000, + .data_rate_reg_array[0][0] = datarate_212_300Msps, + .data_rate_reg_array[1][0] = datarate_212_300Msps, + .data_rate_reg_array[2][0] = datarate_212_300Msps, + .data_rate_reg_array[3][0] = datarate_212_300Msps, + .data_rate_reg_array[4][0] = datarate_212_300Msps, + .data_rate_reg_array[5][0] = datarate_212_300Msps, + .data_rate_reg_array[6][0] = datarate_212_300Msps, + .data_rate_reg_array[7][0] = datarate_212_300Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_300Msps), + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 798000000, + .data_rate_reg_array[0][0] = datarate_212_350Msps, + .data_rate_reg_array[1][0] = datarate_212_350Msps, + .data_rate_reg_array[2][0] = datarate_212_350Msps, + .data_rate_reg_array[3][0] = datarate_212_350Msps, + .data_rate_reg_array[4][0] = datarate_212_350Msps, + .data_rate_reg_array[5][0] = datarate_212_350Msps, + .data_rate_reg_array[6][0] = datarate_212_350Msps, + .data_rate_reg_array[7][0] = datarate_212_350Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_350Msps), + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 912000000, + .data_rate_reg_array[0][0] = datarate_212_400Msps, + .data_rate_reg_array[1][0] = datarate_212_400Msps, + .data_rate_reg_array[2][0] = datarate_212_400Msps, + .data_rate_reg_array[3][0] = datarate_212_400Msps, + .data_rate_reg_array[4][0] = datarate_212_400Msps, + .data_rate_reg_array[5][0] = datarate_212_400Msps, + .data_rate_reg_array[6][0] = datarate_212_400Msps, + .data_rate_reg_array[7][0] = datarate_212_400Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_400Msps), + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1140000000, + .data_rate_reg_array[0][0] = datarate_212_500Msps, + .data_rate_reg_array[1][0] = datarate_212_500Msps, + .data_rate_reg_array[2][0] = datarate_212_500Msps, + .data_rate_reg_array[3][0] = datarate_212_500Msps, + .data_rate_reg_array[4][0] = datarate_212_500Msps, + .data_rate_reg_array[5][0] = datarate_212_500Msps, + .data_rate_reg_array[6][0] = datarate_212_500Msps, + .data_rate_reg_array[7][0] = datarate_212_500Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_500Msps), + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1368000000, + .data_rate_reg_array[0][0] = datarate_212_600Msps, + .data_rate_reg_array[1][0] = datarate_212_600Msps, + .data_rate_reg_array[2][0] = datarate_212_600Msps, + .data_rate_reg_array[3][0] = datarate_212_600Msps, + .data_rate_reg_array[4][0] = datarate_212_600Msps, + .data_rate_reg_array[5][0] = datarate_212_600Msps, + .data_rate_reg_array[6][0] = datarate_212_600Msps, + .data_rate_reg_array[7][0] = datarate_212_600Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_600Msps), + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1596000000, + .data_rate_reg_array[0][0] = datarate_212_700Msps, + .data_rate_reg_array[1][0] = datarate_212_700Msps, + .data_rate_reg_array[2][0] = datarate_212_700Msps, + .data_rate_reg_array[3][0] = datarate_212_700Msps, + .data_rate_reg_array[4][0] = datarate_212_700Msps, + .data_rate_reg_array[5][0] = datarate_212_700Msps, + .data_rate_reg_array[6][0] = datarate_212_700Msps, + .data_rate_reg_array[7][0] = datarate_212_700Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_700Msps), + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1824000000, + .data_rate_reg_array[0][0] = datarate_212_800Msps, + .data_rate_reg_array[1][0] = datarate_212_800Msps, + .data_rate_reg_array[2][0] = datarate_212_800Msps, + .data_rate_reg_array[3][0] = datarate_212_800Msps, + .data_rate_reg_array[4][0] = datarate_212_800Msps, + .data_rate_reg_array[5][0] = datarate_212_800Msps, + .data_rate_reg_array[6][0] = datarate_212_800Msps, + .data_rate_reg_array[7][0] = datarate_212_800Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_800Msps), + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2052000000, + .data_rate_reg_array[0][0] = datarate_212_900Msps, + .data_rate_reg_array[1][0] = datarate_212_900Msps, + .data_rate_reg_array[2][0] = datarate_212_900Msps, + .data_rate_reg_array[3][0] = datarate_212_900Msps, + .data_rate_reg_array[4][0] = datarate_212_900Msps, + .data_rate_reg_array[5][0] = datarate_212_900Msps, + .data_rate_reg_array[6][0] = datarate_212_900Msps, + .data_rate_reg_array[7][0] = datarate_212_900Msps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_900Msps), + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2280000000, + .data_rate_reg_array[0][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_1p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_1p0Gsps), + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2736000000, + .data_rate_reg_array[0][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_212_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_212_1p2Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_1p2Gsps), + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3420000000, + .data_rate_reg_array[0][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_212_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_212_1p5Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_1p5Gsps), + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3876000000, + .data_rate_reg_array[0][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_212_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_212_1p7Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_1p7Gsps), + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4560000000, + .data_rate_reg_array[0][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p0Gsps), + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4788000000, + .data_rate_reg_array[0][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p1Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p1Gsps), + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5358000000, + .data_rate_reg_array[0][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p35Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p35Gsps), + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5700000000, + .data_rate_reg_array[0][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p5Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p5Gsps), + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5928000000, + .data_rate_reg_array[0][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p6Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p6Gsps), + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array[0][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_212_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_212_2p8Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_2p8Gsps), + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array[0][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_3p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_3p0Gsps), + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array[0][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_212_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_212_3p3Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_3p3Gsps), + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array[0][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_212_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_212_3p5Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_3p5Gsps), + }, + { + /* ((4 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array[0][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_4p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_4p0Gsps), + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array[0][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_212_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_212_4p5Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_4p5Gsps), + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array[0][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_5p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_5p0Gsps), + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array[0][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_212_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_212_5p5Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_5p5Gsps), + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array[0][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_212_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_212_6p0Gsps, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_212_6p0Gsps), + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_1_2[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_1_2[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_1_2 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_1_2), + .bist_3ph_settings_arry = bist_3ph_arr_2_1_2, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_1_2), + .bist_status_arr = bist_status_arr_2_1_2, +}; + +struct data_rate_settings_t data_rate_delta_table_2_1_2 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_1_2), + .data_rate_settings = data_rate_settings_2_1_2, +}; + +struct csiphy_reg_parms_t csiphy_v2_1_2 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_1_2, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_1_2), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_1_2), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_1_2), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_2_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_1_2_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_2_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_1_2), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_1_2, +}; + +struct csiphy_ctrl_t ctrl_reg_2_1_2 = { + .csiphy_common_reg = csiphy_common_reg_2_1_2, + .csiphy_2ph_reg = csiphy_2ph_v2_1_2_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_1_2_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_1_2_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_1_2, + .csiphy_irq_reg = csiphy_irq_reg_2_1_2, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_1_2, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_1_2, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_1_2, + .data_rates_settings_table = &data_rate_delta_table_2_1_2, + .csiphy_bist_reg = &bist_setting_2_1_2, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_1_2_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_3_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_3_hwreg.h new file mode 100644 index 0000000000..3e04e94bec --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_1_3_hwreg.h @@ -0,0 +1,1478 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_1_3_HWREG_H_ +#define _CAM_CSIPHY_2_1_3_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_1_3 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_1_3 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_1_3[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_1_3[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_1_3[] = { + {0x1000, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_1_3[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0xBE8, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_1_3[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_3_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_1_3_combo_mode_reg[] = { + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_1_3_reg[] = { + {0x0268, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x020C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0x00, 0x02, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0x64, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_3ph_arr_2_1_3[] = { + /* 3Phase BIST CONFIGURATION REG SET */ + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_1_3[] = { + + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_1_3 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_1_3), + .bist_3ph_settings_arry = bist_3ph_arr_2_1_3, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_1_3), + .bist_status_arr = bist_status_arr_2_1_3, +}; + +struct csiphy_reg_t datarate_213_100Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xA70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_200Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_300Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_400Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_500Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_600Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_700Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_800Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_900Msps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_1p0Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_1p2Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_1p5Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x4D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_1p7Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x43, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p0Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p1Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p35Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p5Gsps[] = { + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x11, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p6Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x28, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_2p8Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_3p0Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + + +struct csiphy_reg_t datarate_213_3p3Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_3p5Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x15, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_4Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_4p5Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x34, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_213_5Gsps[] = { + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0288, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x45, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0688, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x45, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x45, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_1_3[] = { + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_100Msps), + .data_rate_reg_array[0][0] = datarate_213_100Msps, + .data_rate_reg_array[1][0] = datarate_213_100Msps, + .data_rate_reg_array[2][0] = datarate_213_100Msps, + .data_rate_reg_array[3][0] = datarate_213_100Msps, + .data_rate_reg_array[4][0] = datarate_213_100Msps, + .data_rate_reg_array[5][0] = datarate_213_100Msps, + .data_rate_reg_array[6][0] = datarate_213_100Msps, + .data_rate_reg_array[7][0] = datarate_213_100Msps + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_200Msps), + .data_rate_reg_array[0][0] = datarate_213_200Msps, + .data_rate_reg_array[1][0] = datarate_213_200Msps, + .data_rate_reg_array[2][0] = datarate_213_200Msps, + .data_rate_reg_array[3][0] = datarate_213_200Msps, + .data_rate_reg_array[4][0] = datarate_213_200Msps, + .data_rate_reg_array[5][0] = datarate_213_200Msps, + .data_rate_reg_array[6][0] = datarate_213_200Msps, + .data_rate_reg_array[7][0] = datarate_213_200Msps + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_300Msps), + .data_rate_reg_array[0][0] = datarate_213_300Msps, + .data_rate_reg_array[1][0] = datarate_213_300Msps, + .data_rate_reg_array[2][0] = datarate_213_300Msps, + .data_rate_reg_array[3][0] = datarate_213_300Msps, + .data_rate_reg_array[4][0] = datarate_213_300Msps, + .data_rate_reg_array[5][0] = datarate_213_300Msps, + .data_rate_reg_array[6][0] = datarate_213_300Msps, + .data_rate_reg_array[7][0] = datarate_213_300Msps + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_400Msps), + .data_rate_reg_array[0][0] = datarate_213_400Msps, + .data_rate_reg_array[1][0] = datarate_213_400Msps, + .data_rate_reg_array[2][0] = datarate_213_400Msps, + .data_rate_reg_array[3][0] = datarate_213_400Msps, + .data_rate_reg_array[4][0] = datarate_213_400Msps, + .data_rate_reg_array[5][0] = datarate_213_400Msps, + .data_rate_reg_array[6][0] = datarate_213_400Msps, + .data_rate_reg_array[7][0] = datarate_213_400Msps + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_500Msps), + .data_rate_reg_array[0][0] = datarate_213_500Msps, + .data_rate_reg_array[1][0] = datarate_213_500Msps, + .data_rate_reg_array[2][0] = datarate_213_500Msps, + .data_rate_reg_array[3][0] = datarate_213_500Msps, + .data_rate_reg_array[4][0] = datarate_213_500Msps, + .data_rate_reg_array[5][0] = datarate_213_500Msps, + .data_rate_reg_array[6][0] = datarate_213_500Msps, + .data_rate_reg_array[7][0] = datarate_213_500Msps + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_600Msps), + .data_rate_reg_array[0][0] = datarate_213_600Msps, + .data_rate_reg_array[1][0] = datarate_213_600Msps, + .data_rate_reg_array[2][0] = datarate_213_600Msps, + .data_rate_reg_array[3][0] = datarate_213_600Msps, + .data_rate_reg_array[4][0] = datarate_213_600Msps, + .data_rate_reg_array[5][0] = datarate_213_600Msps, + .data_rate_reg_array[6][0] = datarate_213_600Msps, + .data_rate_reg_array[7][0] = datarate_213_600Msps + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_700Msps), + .data_rate_reg_array[0][0] = datarate_213_700Msps, + .data_rate_reg_array[1][0] = datarate_213_700Msps, + .data_rate_reg_array[2][0] = datarate_213_700Msps, + .data_rate_reg_array[3][0] = datarate_213_700Msps, + .data_rate_reg_array[4][0] = datarate_213_700Msps, + .data_rate_reg_array[5][0] = datarate_213_700Msps, + .data_rate_reg_array[6][0] = datarate_213_700Msps, + .data_rate_reg_array[7][0] = datarate_213_700Msps + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_800Msps), + .data_rate_reg_array[0][0] = datarate_213_800Msps, + .data_rate_reg_array[1][0] = datarate_213_800Msps, + .data_rate_reg_array[2][0] = datarate_213_800Msps, + .data_rate_reg_array[3][0] = datarate_213_800Msps, + .data_rate_reg_array[4][0] = datarate_213_800Msps, + .data_rate_reg_array[5][0] = datarate_213_800Msps, + .data_rate_reg_array[6][0] = datarate_213_800Msps, + .data_rate_reg_array[7][0] = datarate_213_800Msps + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_900Msps), + .data_rate_reg_array[0][0] = datarate_213_900Msps, + .data_rate_reg_array[1][0] = datarate_213_900Msps, + .data_rate_reg_array[2][0] = datarate_213_900Msps, + .data_rate_reg_array[3][0] = datarate_213_900Msps, + .data_rate_reg_array[4][0] = datarate_213_900Msps, + .data_rate_reg_array[5][0] = datarate_213_900Msps, + .data_rate_reg_array[6][0] = datarate_213_900Msps, + .data_rate_reg_array[7][0] = datarate_213_900Msps + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_213_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_213_1p0Gsps + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_213_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_213_1p2Gsps + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_213_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_213_1p5Gsps + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_213_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_213_1p7Gsps + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p0Gsps + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p1Gsps + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p35Gsps + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p5Gsps + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value*/ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_213_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_213_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_213_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_213_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_213_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_213_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_213_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_213_3p5Gsps, + }, + { + /* ((4 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_4Gsps), + .data_rate_reg_array[0][0] = datarate_213_4Gsps, + .data_rate_reg_array[1][0] = datarate_213_4Gsps, + .data_rate_reg_array[2][0] = datarate_213_4Gsps, + .data_rate_reg_array[3][0] = datarate_213_4Gsps, + .data_rate_reg_array[4][0] = datarate_213_4Gsps, + .data_rate_reg_array[5][0] = datarate_213_4Gsps, + .data_rate_reg_array[6][0] = datarate_213_4Gsps, + .data_rate_reg_array[7][0] = datarate_213_4Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_213_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_213_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_213_5Gsps), + .data_rate_reg_array[0][0] = datarate_213_5Gsps, + .data_rate_reg_array[1][0] = datarate_213_5Gsps, + .data_rate_reg_array[2][0] = datarate_213_5Gsps, + .data_rate_reg_array[3][0] = datarate_213_5Gsps, + .data_rate_reg_array[4][0] = datarate_213_5Gsps, + .data_rate_reg_array[5][0] = datarate_213_5Gsps, + .data_rate_reg_array[6][0] = datarate_213_5Gsps, + .data_rate_reg_array[7][0] = datarate_213_5Gsps, + }, +}; + +struct data_rate_settings_t data_rate_delta_table_2_1_3 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_1_3), + .data_rate_settings = data_rate_settings_2_1_3, +}; + +struct csiphy_reg_parms_t csiphy_v2_1_3 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_1_3, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_1_3), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_1_3), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_1_3), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_3_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_1_3_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_1_3_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_1_3), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params, +}; + +struct csiphy_ctrl_t ctrl_reg_2_1_3 = { + .csiphy_common_reg = csiphy_common_reg_2_1_3, + .csiphy_2ph_reg = csiphy_2ph_v2_1_3_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_1_3_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_1_3_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_1_3, + .csiphy_irq_reg = csiphy_irq_reg_2_1_3, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_1_3, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_1_3, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_1_3, + .data_rates_settings_table = &data_rate_delta_table_2_1_3, + .csiphy_bist_reg = &bist_setting_2_1_3, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_1_3_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_0_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_0_hwreg.h new file mode 100644 index 0000000000..e35343492a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_0_hwreg.h @@ -0,0 +1,2212 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_2_0_HWREG_H_ +#define _CAM_CSIPHY_2_2_0_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_2_0 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0x01E4, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_2_0 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .refgen_status_offset = 0x10FC, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_2_0[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_2_0[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_2_0[] = { + {0x1000, 0x01, 0x14, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_2_0[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0x0A, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_2_0[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_2_0_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_2_0_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_2_0_reg[] = { + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0x03, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_80Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x82, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_220_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x80, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x33, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_2_0[] = { + { + /* ((80 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 182400000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_80Msps), + .data_rate_reg_array[0][0] = datarate_220_80Msps, + .data_rate_reg_array[1][0] = datarate_220_80Msps, + .data_rate_reg_array[2][0] = datarate_220_80Msps, + .data_rate_reg_array[3][0] = datarate_220_80Msps, + .data_rate_reg_array[4][0] = datarate_220_80Msps, + .data_rate_reg_array[5][0] = datarate_220_80Msps, + .data_rate_reg_array[6][0] = datarate_220_80Msps, + .data_rate_reg_array[7][0] = datarate_220_80Msps, + }, + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_100Msps), + .data_rate_reg_array[0][0] = datarate_220_100Msps, + .data_rate_reg_array[1][0] = datarate_220_100Msps, + .data_rate_reg_array[2][0] = datarate_220_100Msps, + .data_rate_reg_array[3][0] = datarate_220_100Msps, + .data_rate_reg_array[4][0] = datarate_220_100Msps, + .data_rate_reg_array[5][0] = datarate_220_100Msps, + .data_rate_reg_array[6][0] = datarate_220_100Msps, + .data_rate_reg_array[7][0] = datarate_220_100Msps, + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_200Msps), + .data_rate_reg_array[0][0] = datarate_220_200Msps, + .data_rate_reg_array[1][0] = datarate_220_200Msps, + .data_rate_reg_array[2][0] = datarate_220_200Msps, + .data_rate_reg_array[3][0] = datarate_220_200Msps, + .data_rate_reg_array[4][0] = datarate_220_200Msps, + .data_rate_reg_array[5][0] = datarate_220_200Msps, + .data_rate_reg_array[6][0] = datarate_220_200Msps, + .data_rate_reg_array[7][0] = datarate_220_200Msps, + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_300Msps), + .data_rate_reg_array[0][0] = datarate_220_300Msps, + .data_rate_reg_array[1][0] = datarate_220_300Msps, + .data_rate_reg_array[2][0] = datarate_220_300Msps, + .data_rate_reg_array[3][0] = datarate_220_300Msps, + .data_rate_reg_array[4][0] = datarate_220_300Msps, + .data_rate_reg_array[5][0] = datarate_220_300Msps, + .data_rate_reg_array[6][0] = datarate_220_300Msps, + .data_rate_reg_array[7][0] = datarate_220_300Msps, + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 798000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_350Msps), + .data_rate_reg_array[0][0] = datarate_220_350Msps, + .data_rate_reg_array[1][0] = datarate_220_350Msps, + .data_rate_reg_array[2][0] = datarate_220_350Msps, + .data_rate_reg_array[3][0] = datarate_220_350Msps, + .data_rate_reg_array[4][0] = datarate_220_350Msps, + .data_rate_reg_array[5][0] = datarate_220_350Msps, + .data_rate_reg_array[6][0] = datarate_220_350Msps, + .data_rate_reg_array[7][0] = datarate_220_350Msps, + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_400Msps), + .data_rate_reg_array[0][0] = datarate_220_400Msps, + .data_rate_reg_array[1][0] = datarate_220_400Msps, + .data_rate_reg_array[2][0] = datarate_220_400Msps, + .data_rate_reg_array[3][0] = datarate_220_400Msps, + .data_rate_reg_array[4][0] = datarate_220_400Msps, + .data_rate_reg_array[5][0] = datarate_220_400Msps, + .data_rate_reg_array[6][0] = datarate_220_400Msps, + .data_rate_reg_array[7][0] = datarate_220_400Msps, + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_500Msps), + .data_rate_reg_array[0][0] = datarate_220_500Msps, + .data_rate_reg_array[1][0] = datarate_220_500Msps, + .data_rate_reg_array[2][0] = datarate_220_500Msps, + .data_rate_reg_array[3][0] = datarate_220_500Msps, + .data_rate_reg_array[4][0] = datarate_220_500Msps, + .data_rate_reg_array[5][0] = datarate_220_500Msps, + .data_rate_reg_array[6][0] = datarate_220_500Msps, + .data_rate_reg_array[7][0] = datarate_220_500Msps, + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_600Msps), + .data_rate_reg_array[0][0] = datarate_220_600Msps, + .data_rate_reg_array[1][0] = datarate_220_600Msps, + .data_rate_reg_array[2][0] = datarate_220_600Msps, + .data_rate_reg_array[3][0] = datarate_220_600Msps, + .data_rate_reg_array[4][0] = datarate_220_600Msps, + .data_rate_reg_array[5][0] = datarate_220_600Msps, + .data_rate_reg_array[6][0] = datarate_220_600Msps, + .data_rate_reg_array[7][0] = datarate_220_600Msps, + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_700Msps), + .data_rate_reg_array[0][0] = datarate_220_700Msps, + .data_rate_reg_array[1][0] = datarate_220_700Msps, + .data_rate_reg_array[2][0] = datarate_220_700Msps, + .data_rate_reg_array[3][0] = datarate_220_700Msps, + .data_rate_reg_array[4][0] = datarate_220_700Msps, + .data_rate_reg_array[5][0] = datarate_220_700Msps, + .data_rate_reg_array[6][0] = datarate_220_700Msps, + .data_rate_reg_array[7][0] = datarate_220_700Msps, + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_800Msps), + .data_rate_reg_array[0][0] = datarate_220_800Msps, + .data_rate_reg_array[1][0] = datarate_220_800Msps, + .data_rate_reg_array[2][0] = datarate_220_800Msps, + .data_rate_reg_array[3][0] = datarate_220_800Msps, + .data_rate_reg_array[4][0] = datarate_220_800Msps, + .data_rate_reg_array[5][0] = datarate_220_800Msps, + .data_rate_reg_array[6][0] = datarate_220_800Msps, + .data_rate_reg_array[7][0] = datarate_220_800Msps, + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_900Msps), + .data_rate_reg_array[0][0] = datarate_220_900Msps, + .data_rate_reg_array[1][0] = datarate_220_900Msps, + .data_rate_reg_array[2][0] = datarate_220_900Msps, + .data_rate_reg_array[3][0] = datarate_220_900Msps, + .data_rate_reg_array[4][0] = datarate_220_900Msps, + .data_rate_reg_array[5][0] = datarate_220_900Msps, + .data_rate_reg_array[6][0] = datarate_220_900Msps, + .data_rate_reg_array[7][0] = datarate_220_900Msps, + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_1p0Gsps, + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_220_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_220_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_220_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_220_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_220_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_220_1p7Gsps, + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p0Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p35Gsps, + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p5Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_220_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_220_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_220_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_220_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_220_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_220_3p5Gsps, + }, + { + /* ((4.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_4p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_4p0Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_220_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_220_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_5p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_5p0Gsps, + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_5p5Gsps), + .data_rate_reg_array[0][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_220_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_220_5p5Gsps, + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_220_6p0Gsps), + .data_rate_reg_array[0][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_220_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_220_6p0Gsps, + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_2_0[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_2_0[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_2_0 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_2_0), + .bist_3ph_settings_arry = bist_3ph_arr_2_2_0, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_2_0), + .bist_status_arr = bist_status_arr_2_2_0, +}; + +struct data_rate_settings_t data_rate_delta_table_2_2_0 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_2_0), + .data_rate_settings = data_rate_settings_2_2_0, +}; + +struct csiphy_reg_parms_t csiphy_v2_2_0 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_2_0, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_2_0), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_2_0), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_2_0), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_2_0_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_2_0_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_2_0_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_2_0), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_2_0, +}; + +struct csiphy_ctrl_t ctrl_reg_2_2_0 = { + .csiphy_common_reg = csiphy_common_reg_2_2_0, + .csiphy_2ph_reg = csiphy_2ph_v2_2_0_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_2_0_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_2_0_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_2_0, + .csiphy_irq_reg = csiphy_irq_reg_2_2_0, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_2_0, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_2_0, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_2_0, + .data_rates_settings_table = &data_rate_delta_table_2_2_0, + .csiphy_bist_reg = &bist_setting_2_2_0, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_2_0_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_1_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_1_hwreg.h new file mode 100644 index 0000000000..d3ef93c0fe --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_2_1_hwreg.h @@ -0,0 +1,2297 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_2_1_HWREG_H_ +#define _CAM_CSIPHY_2_2_1_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_2_1 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0x01E4, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_2_1 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .refgen_status_offset = 0x10FC, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_2_1[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_2_1[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_DEFAULT_PARAMS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_2_1[] = { + {0x1000, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_2_1[] = { + {0x1000, 0x02, 0x00, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x00, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0xBE8, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_2_1[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_2_1_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_2_1_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xD1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x04, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_2_1_reg[] = { + {0x0294, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0xD3, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0xD3, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0xD3, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_80Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x58, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x05, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x45, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x2E, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x0D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x20, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x98, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x98, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x98, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x03, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x17, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x10, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x06, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x2D, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x30, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x37, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x02, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x84, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x02, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_221_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0294, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0278, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0288, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x026C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x028C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x028C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0274, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0668, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0678, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0688, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x066C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x068C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x068C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0674, 0x02, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A68, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A78, 0x81, 0x00, CSIPHY_CDR_LN_SETTINGS | CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A88, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x28, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A6C, 0x03, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A8C, 0x73, 0x00, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A8C, 0x35, 0x00, CSIPHY_STANDARD_CHANNEL_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_SHORT_CHANNEL_PARAMS}, + {0x0A74, 0x02, 0x0A, CSIPHY_STANDARD_CHANNEL_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_2_1[] = { + { + /* ((80 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 182400000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_80Msps), + .data_rate_reg_array[0][0] = datarate_221_80Msps, + .data_rate_reg_array[1][0] = datarate_221_80Msps, + .data_rate_reg_array[2][0] = datarate_221_80Msps, + .data_rate_reg_array[3][0] = datarate_221_80Msps, + .data_rate_reg_array[4][0] = datarate_221_80Msps, + .data_rate_reg_array[5][0] = datarate_221_80Msps, + .data_rate_reg_array[6][0] = datarate_221_80Msps, + .data_rate_reg_array[7][0] = datarate_221_80Msps, + }, + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_100Msps), + .data_rate_reg_array[0][0] = datarate_221_100Msps, + .data_rate_reg_array[1][0] = datarate_221_100Msps, + .data_rate_reg_array[2][0] = datarate_221_100Msps, + .data_rate_reg_array[3][0] = datarate_221_100Msps, + .data_rate_reg_array[4][0] = datarate_221_100Msps, + .data_rate_reg_array[5][0] = datarate_221_100Msps, + .data_rate_reg_array[6][0] = datarate_221_100Msps, + .data_rate_reg_array[7][0] = datarate_221_100Msps, + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_200Msps), + .data_rate_reg_array[0][0] = datarate_221_200Msps, + .data_rate_reg_array[1][0] = datarate_221_200Msps, + .data_rate_reg_array[2][0] = datarate_221_200Msps, + .data_rate_reg_array[3][0] = datarate_221_200Msps, + .data_rate_reg_array[4][0] = datarate_221_200Msps, + .data_rate_reg_array[5][0] = datarate_221_200Msps, + .data_rate_reg_array[6][0] = datarate_221_200Msps, + .data_rate_reg_array[7][0] = datarate_221_200Msps, + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_300Msps), + .data_rate_reg_array[0][0] = datarate_221_300Msps, + .data_rate_reg_array[1][0] = datarate_221_300Msps, + .data_rate_reg_array[2][0] = datarate_221_300Msps, + .data_rate_reg_array[3][0] = datarate_221_300Msps, + .data_rate_reg_array[4][0] = datarate_221_300Msps, + .data_rate_reg_array[5][0] = datarate_221_300Msps, + .data_rate_reg_array[6][0] = datarate_221_300Msps, + .data_rate_reg_array[7][0] = datarate_221_300Msps, + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 798000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_350Msps), + .data_rate_reg_array[0][0] = datarate_221_350Msps, + .data_rate_reg_array[1][0] = datarate_221_350Msps, + .data_rate_reg_array[2][0] = datarate_221_350Msps, + .data_rate_reg_array[3][0] = datarate_221_350Msps, + .data_rate_reg_array[4][0] = datarate_221_350Msps, + .data_rate_reg_array[5][0] = datarate_221_350Msps, + .data_rate_reg_array[6][0] = datarate_221_350Msps, + .data_rate_reg_array[7][0] = datarate_221_350Msps, + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_400Msps), + .data_rate_reg_array[0][0] = datarate_221_400Msps, + .data_rate_reg_array[1][0] = datarate_221_400Msps, + .data_rate_reg_array[2][0] = datarate_221_400Msps, + .data_rate_reg_array[3][0] = datarate_221_400Msps, + .data_rate_reg_array[4][0] = datarate_221_400Msps, + .data_rate_reg_array[5][0] = datarate_221_400Msps, + .data_rate_reg_array[6][0] = datarate_221_400Msps, + .data_rate_reg_array[7][0] = datarate_221_400Msps, + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_500Msps), + .data_rate_reg_array[0][0] = datarate_221_500Msps, + .data_rate_reg_array[1][0] = datarate_221_500Msps, + .data_rate_reg_array[2][0] = datarate_221_500Msps, + .data_rate_reg_array[3][0] = datarate_221_500Msps, + .data_rate_reg_array[4][0] = datarate_221_500Msps, + .data_rate_reg_array[5][0] = datarate_221_500Msps, + .data_rate_reg_array[6][0] = datarate_221_500Msps, + .data_rate_reg_array[7][0] = datarate_221_500Msps, + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_600Msps), + .data_rate_reg_array[0][0] = datarate_221_600Msps, + .data_rate_reg_array[1][0] = datarate_221_600Msps, + .data_rate_reg_array[2][0] = datarate_221_600Msps, + .data_rate_reg_array[3][0] = datarate_221_600Msps, + .data_rate_reg_array[4][0] = datarate_221_600Msps, + .data_rate_reg_array[5][0] = datarate_221_600Msps, + .data_rate_reg_array[6][0] = datarate_221_600Msps, + .data_rate_reg_array[7][0] = datarate_221_600Msps, + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_700Msps), + .data_rate_reg_array[0][0] = datarate_221_700Msps, + .data_rate_reg_array[1][0] = datarate_221_700Msps, + .data_rate_reg_array[2][0] = datarate_221_700Msps, + .data_rate_reg_array[3][0] = datarate_221_700Msps, + .data_rate_reg_array[4][0] = datarate_221_700Msps, + .data_rate_reg_array[5][0] = datarate_221_700Msps, + .data_rate_reg_array[6][0] = datarate_221_700Msps, + .data_rate_reg_array[7][0] = datarate_221_700Msps, + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_800Msps), + .data_rate_reg_array[0][0] = datarate_221_800Msps, + .data_rate_reg_array[1][0] = datarate_221_800Msps, + .data_rate_reg_array[2][0] = datarate_221_800Msps, + .data_rate_reg_array[3][0] = datarate_221_800Msps, + .data_rate_reg_array[4][0] = datarate_221_800Msps, + .data_rate_reg_array[5][0] = datarate_221_800Msps, + .data_rate_reg_array[6][0] = datarate_221_800Msps, + .data_rate_reg_array[7][0] = datarate_221_800Msps, + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_900Msps), + .data_rate_reg_array[0][0] = datarate_221_900Msps, + .data_rate_reg_array[1][0] = datarate_221_900Msps, + .data_rate_reg_array[2][0] = datarate_221_900Msps, + .data_rate_reg_array[3][0] = datarate_221_900Msps, + .data_rate_reg_array[4][0] = datarate_221_900Msps, + .data_rate_reg_array[5][0] = datarate_221_900Msps, + .data_rate_reg_array[6][0] = datarate_221_900Msps, + .data_rate_reg_array[7][0] = datarate_221_900Msps, + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_1p0Gsps, + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_221_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_221_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_221_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_221_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_221_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_221_1p7Gsps, + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p0Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p35Gsps, + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p5Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_221_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_221_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_221_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_221_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_221_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_221_3p5Gsps, + }, + { + /* ((4.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_4p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_4p0Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_221_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_221_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_5p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_5p0Gsps, + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_5p5Gsps), + .data_rate_reg_array[0][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_221_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_221_5p5Gsps, + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_221_6p0Gsps), + .data_rate_reg_array[0][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_221_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_221_6p0Gsps, + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_2_1[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_2_1[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_2_1 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_2_1), + .bist_3ph_settings_arry = bist_3ph_arr_2_2_1, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_2_1), + .bist_status_arr = bist_status_arr_2_2_1, +}; + +struct data_rate_settings_t data_rate_delta_table_2_2_1 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_2_1), + .data_rate_settings = data_rate_settings_2_2_1, +}; + +struct csiphy_reg_parms_t csiphy_v2_2_1 = { + .mipi_csiphy_interrupt_status0_addr = 0x10B0, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_2_1, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_2_1), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_2_1), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_2_1), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_2_1_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_2_1_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_2_1_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_2_1), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_2_1, +}; + +struct csiphy_ctrl_t ctrl_reg_2_2_1 = { + .csiphy_common_reg = csiphy_common_reg_2_2_1, + .csiphy_2ph_reg = csiphy_2ph_v2_2_1_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_2_1_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_2_1_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_2_1, + .csiphy_irq_reg = csiphy_irq_reg_2_2_1, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_2_1, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_2_1, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_2_1, + .data_rates_settings_table = &data_rate_delta_table_2_2_1, + .csiphy_bist_reg = &bist_setting_2_2_1, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_2_1_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_3_0_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_3_0_hwreg.h new file mode 100644 index 0000000000..bbac37175e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_3_0_hwreg.h @@ -0,0 +1,2037 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_3_0_HWREG_H_ +#define _CAM_CSIPHY_2_3_0_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_3_0 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0x01E4, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_3_0 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .refgen_status_offset = 0x1184, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_3_0[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_3_0[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_2PH_REGS}, + {0x108C, 0x0E, 0x01, CSIPHY_3PH_REGS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_3_0[] = { + {0x1000, 0x01, 0x14, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_3_0[] = { + {0x1000, 0x02, 0x0A, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x0A, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0x0A, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_3_0[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_3_0_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_3_0_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_3_0_reg[] = { + {0x0294, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_80Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_230_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_3_0[] = { + { + /* ((80 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 182400000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_80Msps), + .data_rate_reg_array[0][0] = datarate_230_80Msps, + .data_rate_reg_array[1][0] = datarate_230_80Msps, + .data_rate_reg_array[2][0] = datarate_230_80Msps, + .data_rate_reg_array[3][0] = datarate_230_80Msps, + .data_rate_reg_array[4][0] = datarate_230_80Msps, + .data_rate_reg_array[5][0] = datarate_230_80Msps, + .data_rate_reg_array[6][0] = datarate_230_80Msps, + .data_rate_reg_array[7][0] = datarate_230_80Msps, + }, + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_100Msps), + .data_rate_reg_array[0][0] = datarate_230_100Msps, + .data_rate_reg_array[1][0] = datarate_230_100Msps, + .data_rate_reg_array[2][0] = datarate_230_100Msps, + .data_rate_reg_array[3][0] = datarate_230_100Msps, + .data_rate_reg_array[4][0] = datarate_230_100Msps, + .data_rate_reg_array[5][0] = datarate_230_100Msps, + .data_rate_reg_array[6][0] = datarate_230_100Msps, + .data_rate_reg_array[7][0] = datarate_230_100Msps, + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_200Msps), + .data_rate_reg_array[0][0] = datarate_230_200Msps, + .data_rate_reg_array[1][0] = datarate_230_200Msps, + .data_rate_reg_array[2][0] = datarate_230_200Msps, + .data_rate_reg_array[3][0] = datarate_230_200Msps, + .data_rate_reg_array[4][0] = datarate_230_200Msps, + .data_rate_reg_array[5][0] = datarate_230_200Msps, + .data_rate_reg_array[6][0] = datarate_230_200Msps, + .data_rate_reg_array[7][0] = datarate_230_200Msps, + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_300Msps), + .data_rate_reg_array[0][0] = datarate_230_300Msps, + .data_rate_reg_array[1][0] = datarate_230_300Msps, + .data_rate_reg_array[2][0] = datarate_230_300Msps, + .data_rate_reg_array[3][0] = datarate_230_300Msps, + .data_rate_reg_array[4][0] = datarate_230_300Msps, + .data_rate_reg_array[5][0] = datarate_230_300Msps, + .data_rate_reg_array[6][0] = datarate_230_300Msps, + .data_rate_reg_array[7][0] = datarate_230_300Msps, + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 798000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_350Msps), + .data_rate_reg_array[0][0] = datarate_230_350Msps, + .data_rate_reg_array[1][0] = datarate_230_350Msps, + .data_rate_reg_array[2][0] = datarate_230_350Msps, + .data_rate_reg_array[3][0] = datarate_230_350Msps, + .data_rate_reg_array[4][0] = datarate_230_350Msps, + .data_rate_reg_array[5][0] = datarate_230_350Msps, + .data_rate_reg_array[6][0] = datarate_230_350Msps, + .data_rate_reg_array[7][0] = datarate_230_350Msps, + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_400Msps), + .data_rate_reg_array[0][0] = datarate_230_400Msps, + .data_rate_reg_array[1][0] = datarate_230_400Msps, + .data_rate_reg_array[2][0] = datarate_230_400Msps, + .data_rate_reg_array[3][0] = datarate_230_400Msps, + .data_rate_reg_array[4][0] = datarate_230_400Msps, + .data_rate_reg_array[5][0] = datarate_230_400Msps, + .data_rate_reg_array[6][0] = datarate_230_400Msps, + .data_rate_reg_array[7][0] = datarate_230_400Msps, + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_500Msps), + .data_rate_reg_array[0][0] = datarate_230_500Msps, + .data_rate_reg_array[1][0] = datarate_230_500Msps, + .data_rate_reg_array[2][0] = datarate_230_500Msps, + .data_rate_reg_array[3][0] = datarate_230_500Msps, + .data_rate_reg_array[4][0] = datarate_230_500Msps, + .data_rate_reg_array[5][0] = datarate_230_500Msps, + .data_rate_reg_array[6][0] = datarate_230_500Msps, + .data_rate_reg_array[7][0] = datarate_230_500Msps, + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_600Msps), + .data_rate_reg_array[0][0] = datarate_230_600Msps, + .data_rate_reg_array[1][0] = datarate_230_600Msps, + .data_rate_reg_array[2][0] = datarate_230_600Msps, + .data_rate_reg_array[3][0] = datarate_230_600Msps, + .data_rate_reg_array[4][0] = datarate_230_600Msps, + .data_rate_reg_array[5][0] = datarate_230_600Msps, + .data_rate_reg_array[6][0] = datarate_230_600Msps, + .data_rate_reg_array[7][0] = datarate_230_600Msps, + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_700Msps), + .data_rate_reg_array[0][0] = datarate_230_700Msps, + .data_rate_reg_array[1][0] = datarate_230_700Msps, + .data_rate_reg_array[2][0] = datarate_230_700Msps, + .data_rate_reg_array[3][0] = datarate_230_700Msps, + .data_rate_reg_array[4][0] = datarate_230_700Msps, + .data_rate_reg_array[5][0] = datarate_230_700Msps, + .data_rate_reg_array[6][0] = datarate_230_700Msps, + .data_rate_reg_array[7][0] = datarate_230_700Msps, + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_800Msps), + .data_rate_reg_array[0][0] = datarate_230_800Msps, + .data_rate_reg_array[1][0] = datarate_230_800Msps, + .data_rate_reg_array[2][0] = datarate_230_800Msps, + .data_rate_reg_array[3][0] = datarate_230_800Msps, + .data_rate_reg_array[4][0] = datarate_230_800Msps, + .data_rate_reg_array[5][0] = datarate_230_800Msps, + .data_rate_reg_array[6][0] = datarate_230_800Msps, + .data_rate_reg_array[7][0] = datarate_230_800Msps, + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_900Msps), + .data_rate_reg_array[0][0] = datarate_230_900Msps, + .data_rate_reg_array[1][0] = datarate_230_900Msps, + .data_rate_reg_array[2][0] = datarate_230_900Msps, + .data_rate_reg_array[3][0] = datarate_230_900Msps, + .data_rate_reg_array[4][0] = datarate_230_900Msps, + .data_rate_reg_array[5][0] = datarate_230_900Msps, + .data_rate_reg_array[6][0] = datarate_230_900Msps, + .data_rate_reg_array[7][0] = datarate_230_900Msps, + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_1p0Gsps, + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_230_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_230_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_230_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_230_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_230_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_230_1p7Gsps, + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p0Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p35Gsps, + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p5Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_230_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_230_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_230_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_230_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_230_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_230_3p5Gsps, + }, + { + /* ((4.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_4p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_4p0Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_230_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_230_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_5p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_5p0Gsps, + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_5p5Gsps), + .data_rate_reg_array[0][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_230_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_230_5p5Gsps, + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_230_6p0Gsps), + .data_rate_reg_array[0][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_230_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_230_6p0Gsps, + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_3_0[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_3_0[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_3_0 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_3_0), + .bist_3ph_settings_arry = bist_3ph_arr_2_3_0, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_3_0), + .bist_status_arr = bist_status_arr_2_3_0, +}; + +struct data_rate_settings_t data_rate_delta_table_2_3_0 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_3_0), + .data_rate_settings = data_rate_settings_2_3_0, +}; + +struct csiphy_reg_parms_t csiphy_v2_3_0 = { + .mipi_csiphy_interrupt_status0_addr = 0x1138, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_3_0, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_3_0), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_3_0), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_3_0), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_3_0_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_3_0_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_3_0_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_3_0), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_3_0, +}; + +struct csiphy_ctrl_t ctrl_reg_2_3_0 = { + .csiphy_common_reg = csiphy_common_reg_2_3_0, + .csiphy_2ph_reg = csiphy_2ph_v2_3_0_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_3_0_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_3_0_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_3_0, + .csiphy_irq_reg = csiphy_irq_reg_2_3_0, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_3_0, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_3_0, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_3_0, + .data_rates_settings_table = &data_rate_delta_table_2_3_0, + .csiphy_bist_reg = &bist_setting_2_3_0, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_3_0_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_0_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_0_hwreg.h new file mode 100644 index 0000000000..7532da3736 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_0_hwreg.h @@ -0,0 +1,2037 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_4_0_HWREG_H_ +#define _CAM_CSIPHY_2_4_0_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_4_0 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0x01E4, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_4_0 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .refgen_status_offset = 0x1184, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_4_0[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_4_0[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_2PH_REGS}, + {0x108C, 0x0E, 0x01, CSIPHY_3PH_REGS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_4_0[] = { + {0x1000, 0x01, 0x14, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_4_0[] = { + {0x1000, 0x02, 0x0A, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x0A, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0x0A, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_4_0[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_4_0_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_4_0_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_4_0_reg[] = { + {0x0294, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_80Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_240_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_4_0[] = { + { + /* ((80 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 182400000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_80Msps), + .data_rate_reg_array[0][0] = datarate_240_80Msps, + .data_rate_reg_array[1][0] = datarate_240_80Msps, + .data_rate_reg_array[2][0] = datarate_240_80Msps, + .data_rate_reg_array[3][0] = datarate_240_80Msps, + .data_rate_reg_array[4][0] = datarate_240_80Msps, + .data_rate_reg_array[5][0] = datarate_240_80Msps, + .data_rate_reg_array[6][0] = datarate_240_80Msps, + .data_rate_reg_array[7][0] = datarate_240_80Msps, + }, + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_100Msps), + .data_rate_reg_array[0][0] = datarate_240_100Msps, + .data_rate_reg_array[1][0] = datarate_240_100Msps, + .data_rate_reg_array[2][0] = datarate_240_100Msps, + .data_rate_reg_array[3][0] = datarate_240_100Msps, + .data_rate_reg_array[4][0] = datarate_240_100Msps, + .data_rate_reg_array[5][0] = datarate_240_100Msps, + .data_rate_reg_array[6][0] = datarate_240_100Msps, + .data_rate_reg_array[7][0] = datarate_240_100Msps, + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_200Msps), + .data_rate_reg_array[0][0] = datarate_240_200Msps, + .data_rate_reg_array[1][0] = datarate_240_200Msps, + .data_rate_reg_array[2][0] = datarate_240_200Msps, + .data_rate_reg_array[3][0] = datarate_240_200Msps, + .data_rate_reg_array[4][0] = datarate_240_200Msps, + .data_rate_reg_array[5][0] = datarate_240_200Msps, + .data_rate_reg_array[6][0] = datarate_240_200Msps, + .data_rate_reg_array[7][0] = datarate_240_200Msps, + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_300Msps), + .data_rate_reg_array[0][0] = datarate_240_300Msps, + .data_rate_reg_array[1][0] = datarate_240_300Msps, + .data_rate_reg_array[2][0] = datarate_240_300Msps, + .data_rate_reg_array[3][0] = datarate_240_300Msps, + .data_rate_reg_array[4][0] = datarate_240_300Msps, + .data_rate_reg_array[5][0] = datarate_240_300Msps, + .data_rate_reg_array[6][0] = datarate_240_300Msps, + .data_rate_reg_array[7][0] = datarate_240_300Msps, + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 798000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_350Msps), + .data_rate_reg_array[0][0] = datarate_240_350Msps, + .data_rate_reg_array[1][0] = datarate_240_350Msps, + .data_rate_reg_array[2][0] = datarate_240_350Msps, + .data_rate_reg_array[3][0] = datarate_240_350Msps, + .data_rate_reg_array[4][0] = datarate_240_350Msps, + .data_rate_reg_array[5][0] = datarate_240_350Msps, + .data_rate_reg_array[6][0] = datarate_240_350Msps, + .data_rate_reg_array[7][0] = datarate_240_350Msps, + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_400Msps), + .data_rate_reg_array[0][0] = datarate_240_400Msps, + .data_rate_reg_array[1][0] = datarate_240_400Msps, + .data_rate_reg_array[2][0] = datarate_240_400Msps, + .data_rate_reg_array[3][0] = datarate_240_400Msps, + .data_rate_reg_array[4][0] = datarate_240_400Msps, + .data_rate_reg_array[5][0] = datarate_240_400Msps, + .data_rate_reg_array[6][0] = datarate_240_400Msps, + .data_rate_reg_array[7][0] = datarate_240_400Msps, + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_500Msps), + .data_rate_reg_array[0][0] = datarate_240_500Msps, + .data_rate_reg_array[1][0] = datarate_240_500Msps, + .data_rate_reg_array[2][0] = datarate_240_500Msps, + .data_rate_reg_array[3][0] = datarate_240_500Msps, + .data_rate_reg_array[4][0] = datarate_240_500Msps, + .data_rate_reg_array[5][0] = datarate_240_500Msps, + .data_rate_reg_array[6][0] = datarate_240_500Msps, + .data_rate_reg_array[7][0] = datarate_240_500Msps, + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_600Msps), + .data_rate_reg_array[0][0] = datarate_240_600Msps, + .data_rate_reg_array[1][0] = datarate_240_600Msps, + .data_rate_reg_array[2][0] = datarate_240_600Msps, + .data_rate_reg_array[3][0] = datarate_240_600Msps, + .data_rate_reg_array[4][0] = datarate_240_600Msps, + .data_rate_reg_array[5][0] = datarate_240_600Msps, + .data_rate_reg_array[6][0] = datarate_240_600Msps, + .data_rate_reg_array[7][0] = datarate_240_600Msps, + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_700Msps), + .data_rate_reg_array[0][0] = datarate_240_700Msps, + .data_rate_reg_array[1][0] = datarate_240_700Msps, + .data_rate_reg_array[2][0] = datarate_240_700Msps, + .data_rate_reg_array[3][0] = datarate_240_700Msps, + .data_rate_reg_array[4][0] = datarate_240_700Msps, + .data_rate_reg_array[5][0] = datarate_240_700Msps, + .data_rate_reg_array[6][0] = datarate_240_700Msps, + .data_rate_reg_array[7][0] = datarate_240_700Msps, + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_800Msps), + .data_rate_reg_array[0][0] = datarate_240_800Msps, + .data_rate_reg_array[1][0] = datarate_240_800Msps, + .data_rate_reg_array[2][0] = datarate_240_800Msps, + .data_rate_reg_array[3][0] = datarate_240_800Msps, + .data_rate_reg_array[4][0] = datarate_240_800Msps, + .data_rate_reg_array[5][0] = datarate_240_800Msps, + .data_rate_reg_array[6][0] = datarate_240_800Msps, + .data_rate_reg_array[7][0] = datarate_240_800Msps, + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_900Msps), + .data_rate_reg_array[0][0] = datarate_240_900Msps, + .data_rate_reg_array[1][0] = datarate_240_900Msps, + .data_rate_reg_array[2][0] = datarate_240_900Msps, + .data_rate_reg_array[3][0] = datarate_240_900Msps, + .data_rate_reg_array[4][0] = datarate_240_900Msps, + .data_rate_reg_array[5][0] = datarate_240_900Msps, + .data_rate_reg_array[6][0] = datarate_240_900Msps, + .data_rate_reg_array[7][0] = datarate_240_900Msps, + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_1p0Gsps, + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_240_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_240_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_240_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_240_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_240_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_240_1p7Gsps, + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p0Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p35Gsps, + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p5Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_240_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_240_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_240_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_240_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_240_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_240_3p5Gsps, + }, + { + /* ((4.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_4p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_4p0Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_240_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_240_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_5p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_5p0Gsps, + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_5p5Gsps), + .data_rate_reg_array[0][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_240_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_240_5p5Gsps, + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_240_6p0Gsps), + .data_rate_reg_array[0][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_240_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_240_6p0Gsps, + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_4_0[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_4_0[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_4_0 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_4_0), + .bist_3ph_settings_arry = bist_3ph_arr_2_4_0, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_4_0), + .bist_status_arr = bist_status_arr_2_4_0, +}; + +struct data_rate_settings_t data_rate_delta_table_2_4_0 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_4_0), + .data_rate_settings = data_rate_settings_2_4_0, +}; + +struct csiphy_reg_parms_t csiphy_v2_4_0 = { + .mipi_csiphy_interrupt_status0_addr = 0x1138, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_4_0, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_4_0), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_4_0), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_4_0), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_4_0_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_4_0_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_4_0_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_4_0), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_4_0, +}; + +struct csiphy_ctrl_t ctrl_reg_2_4_0 = { + .csiphy_common_reg = csiphy_common_reg_2_4_0, + .csiphy_2ph_reg = csiphy_2ph_v2_4_0_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_4_0_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_4_0_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_4_0, + .csiphy_irq_reg = csiphy_irq_reg_2_4_0, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_4_0, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_4_0, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_4_0, + .data_rates_settings_table = &data_rate_delta_table_2_4_0, + .csiphy_bist_reg = &bist_setting_2_4_0, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_4_0_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_1_hwreg.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_1_hwreg.h new file mode 100644 index 0000000000..37cb4b20f7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_4_1_hwreg.h @@ -0,0 +1,2037 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_CSIPHY_2_4_1_HWREG_H_ +#define _CAM_CSIPHY_2_4_1_HWREG_H_ + +#include "../cam_csiphy_dev.h" + +struct cam_csiphy_aon_sel_params_t aon_cam_select_params_2_4_1 = { + .aon_cam_sel_offset[0] = 0x01E0, + .aon_cam_sel_offset[1] = 0x01E4, + .cam_sel_mask = BIT(0), + .mclk_sel_mask = BIT(8), +}; + +struct cam_cphy_dphy_status_reg_params_t status_regs_2_4_1 = { + .csiphy_3ph_status0_offset = 0x0340, + .csiphy_2ph_status0_offset = 0x00C0, + .refgen_status_offset = 0x1184, + .cphy_lane_status = {0x0358, 0x0758, 0x0B58}, + .csiphy_3ph_status_size = 24, + .csiphy_2ph_status_size = 20, +}; + +struct csiphy_reg_t csiphy_lane_en_reg_2_4_1[] = { + {0x1014, 0x00, 0x00, CSIPHY_LANE_ENABLE}, +}; + +struct csiphy_reg_t csiphy_common_reg_2_4_1[] = { + {0x1084, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x108C, 0x00, 0x01, CSIPHY_2PH_REGS}, + {0x108C, 0x0E, 0x01, CSIPHY_3PH_REGS}, + {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_enter_reg_2_4_1[] = { + {0x1000, 0x01, 0x14, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_reset_exit_reg_2_4_1[] = { + {0x1000, 0x02, 0x0A, CSIPHY_2PH_REGS}, + {0x1000, 0x00, 0x0A, CSIPHY_2PH_COMBO_REGS}, + {0x1000, 0x0E, 0x0A, CSIPHY_3PH_REGS}, +}; + +struct csiphy_reg_t csiphy_irq_reg_2_4_1[] = { + {0x102c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1030, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1034, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1038, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x103c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1040, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1044, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1048, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x104c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1050, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x1054, 0xff, 0x64, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_4_1_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0C60, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_2ph_v2_4_1_combo_mode_reg[] = { + {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0494, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0894, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C94, 0x07, 0xd1, CSIPHY_DEFAULT_PARAMS}, + {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0E08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x8C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0828, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0808, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C28, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0060, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0460, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL}, + {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL}, + {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL}, + {0x0860, 0xFD, 0x00, CSIPHY_SKEW_CAL}, + {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL}, +}; + +struct csiphy_reg_t csiphy_3ph_v2_4_1_reg[] = { + {0x0294, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0694, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06FC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06F0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF8, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AFC, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AF0, 0xEF, 0xd3, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0218, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0220, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0224, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0310, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0254, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0284, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06E8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06EC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0618, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0620, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0624, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06BC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0654, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0684, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AE8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AEC, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A18, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A1C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A20, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A24, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A28, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A2C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A64, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB2, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B10, 0x35, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0ABC, 0xD0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A54, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A60, 0xA8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_80Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1F, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x02, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_100Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xB6, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x01, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x6B, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_200Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0xE4, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_300Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x9E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_350Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x8A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_400Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x7B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_500Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x66, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_600Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x58, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_700Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x4E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_800Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x46, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_900Msps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x40, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_1p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x5F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x3C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_1p2Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x35, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_1p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x3F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_1p7Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x2A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x09, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x26, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x27, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p1Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x26, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p35Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x23, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p6Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x22, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_2p8Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x21, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_3p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x12, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x20, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_3p3Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_3p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0F, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_4p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x0C, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1C, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_4p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1B, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_5p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x09, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x1A, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_5p5Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x08, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x75, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t datarate_241_6p0Gsps[] = { + /* AFE Settings */ + {0x0268, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x026C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0270, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0274, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0278, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0288, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x028C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0290, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0668, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x066C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0670, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0674, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0678, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0688, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x068C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0690, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A68, 0xF1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A6C, 0x1B, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A70, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A74, 0x00, 0x0A, CSIPHY_DEFAULT_PARAMS}, + {0x0A78, 0x07, 0x00, CSIPHY_CDR_LN_SETTINGS}, + {0x0A88, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0x77, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A90, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + /* Datarate Sensitive */ + {0x020C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0210, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0610, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A0C, 0x19, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0A08, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, + {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A14, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +static struct data_rate_reg_info_t data_rate_settings_2_4_1[] = { + { + /* ((80 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 182400000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_80Msps), + .data_rate_reg_array[0][0] = datarate_241_80Msps, + .data_rate_reg_array[1][0] = datarate_241_80Msps, + .data_rate_reg_array[2][0] = datarate_241_80Msps, + .data_rate_reg_array[3][0] = datarate_241_80Msps, + .data_rate_reg_array[4][0] = datarate_241_80Msps, + .data_rate_reg_array[5][0] = datarate_241_80Msps, + .data_rate_reg_array[6][0] = datarate_241_80Msps, + .data_rate_reg_array[7][0] = datarate_241_80Msps, + }, + { + /* ((100 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 228000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_100Msps), + .data_rate_reg_array[0][0] = datarate_241_100Msps, + .data_rate_reg_array[1][0] = datarate_241_100Msps, + .data_rate_reg_array[2][0] = datarate_241_100Msps, + .data_rate_reg_array[3][0] = datarate_241_100Msps, + .data_rate_reg_array[4][0] = datarate_241_100Msps, + .data_rate_reg_array[5][0] = datarate_241_100Msps, + .data_rate_reg_array[6][0] = datarate_241_100Msps, + .data_rate_reg_array[7][0] = datarate_241_100Msps, + }, + { + /* ((200 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 456000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_200Msps), + .data_rate_reg_array[0][0] = datarate_241_200Msps, + .data_rate_reg_array[1][0] = datarate_241_200Msps, + .data_rate_reg_array[2][0] = datarate_241_200Msps, + .data_rate_reg_array[3][0] = datarate_241_200Msps, + .data_rate_reg_array[4][0] = datarate_241_200Msps, + .data_rate_reg_array[5][0] = datarate_241_200Msps, + .data_rate_reg_array[6][0] = datarate_241_200Msps, + .data_rate_reg_array[7][0] = datarate_241_200Msps, + }, + { + /* ((300 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 684000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_300Msps), + .data_rate_reg_array[0][0] = datarate_241_300Msps, + .data_rate_reg_array[1][0] = datarate_241_300Msps, + .data_rate_reg_array[2][0] = datarate_241_300Msps, + .data_rate_reg_array[3][0] = datarate_241_300Msps, + .data_rate_reg_array[4][0] = datarate_241_300Msps, + .data_rate_reg_array[5][0] = datarate_241_300Msps, + .data_rate_reg_array[6][0] = datarate_241_300Msps, + .data_rate_reg_array[7][0] = datarate_241_300Msps, + }, + { + /* ((350 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 798000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_350Msps), + .data_rate_reg_array[0][0] = datarate_241_350Msps, + .data_rate_reg_array[1][0] = datarate_241_350Msps, + .data_rate_reg_array[2][0] = datarate_241_350Msps, + .data_rate_reg_array[3][0] = datarate_241_350Msps, + .data_rate_reg_array[4][0] = datarate_241_350Msps, + .data_rate_reg_array[5][0] = datarate_241_350Msps, + .data_rate_reg_array[6][0] = datarate_241_350Msps, + .data_rate_reg_array[7][0] = datarate_241_350Msps, + }, + { + /* ((400 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 912000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_400Msps), + .data_rate_reg_array[0][0] = datarate_241_400Msps, + .data_rate_reg_array[1][0] = datarate_241_400Msps, + .data_rate_reg_array[2][0] = datarate_241_400Msps, + .data_rate_reg_array[3][0] = datarate_241_400Msps, + .data_rate_reg_array[4][0] = datarate_241_400Msps, + .data_rate_reg_array[5][0] = datarate_241_400Msps, + .data_rate_reg_array[6][0] = datarate_241_400Msps, + .data_rate_reg_array[7][0] = datarate_241_400Msps, + }, + { + /* ((500 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1140000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_500Msps), + .data_rate_reg_array[0][0] = datarate_241_500Msps, + .data_rate_reg_array[1][0] = datarate_241_500Msps, + .data_rate_reg_array[2][0] = datarate_241_500Msps, + .data_rate_reg_array[3][0] = datarate_241_500Msps, + .data_rate_reg_array[4][0] = datarate_241_500Msps, + .data_rate_reg_array[5][0] = datarate_241_500Msps, + .data_rate_reg_array[6][0] = datarate_241_500Msps, + .data_rate_reg_array[7][0] = datarate_241_500Msps, + }, + { + /* ((600 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1368000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_600Msps), + .data_rate_reg_array[0][0] = datarate_241_600Msps, + .data_rate_reg_array[1][0] = datarate_241_600Msps, + .data_rate_reg_array[2][0] = datarate_241_600Msps, + .data_rate_reg_array[3][0] = datarate_241_600Msps, + .data_rate_reg_array[4][0] = datarate_241_600Msps, + .data_rate_reg_array[5][0] = datarate_241_600Msps, + .data_rate_reg_array[6][0] = datarate_241_600Msps, + .data_rate_reg_array[7][0] = datarate_241_600Msps, + }, + { + /* ((700 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1596000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_700Msps), + .data_rate_reg_array[0][0] = datarate_241_700Msps, + .data_rate_reg_array[1][0] = datarate_241_700Msps, + .data_rate_reg_array[2][0] = datarate_241_700Msps, + .data_rate_reg_array[3][0] = datarate_241_700Msps, + .data_rate_reg_array[4][0] = datarate_241_700Msps, + .data_rate_reg_array[5][0] = datarate_241_700Msps, + .data_rate_reg_array[6][0] = datarate_241_700Msps, + .data_rate_reg_array[7][0] = datarate_241_700Msps, + }, + { + /* ((800 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 1824000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_800Msps), + .data_rate_reg_array[0][0] = datarate_241_800Msps, + .data_rate_reg_array[1][0] = datarate_241_800Msps, + .data_rate_reg_array[2][0] = datarate_241_800Msps, + .data_rate_reg_array[3][0] = datarate_241_800Msps, + .data_rate_reg_array[4][0] = datarate_241_800Msps, + .data_rate_reg_array[5][0] = datarate_241_800Msps, + .data_rate_reg_array[6][0] = datarate_241_800Msps, + .data_rate_reg_array[7][0] = datarate_241_800Msps, + }, + { + /* ((900 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2052000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_900Msps), + .data_rate_reg_array[0][0] = datarate_241_900Msps, + .data_rate_reg_array[1][0] = datarate_241_900Msps, + .data_rate_reg_array[2][0] = datarate_241_900Msps, + .data_rate_reg_array[3][0] = datarate_241_900Msps, + .data_rate_reg_array[4][0] = datarate_241_900Msps, + .data_rate_reg_array[5][0] = datarate_241_900Msps, + .data_rate_reg_array[6][0] = datarate_241_900Msps, + .data_rate_reg_array[7][0] = datarate_241_900Msps, + }, + { + /* ((1000 MSpS) * (10^6) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2280000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_1p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_1p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_1p0Gsps, + }, + { + /* ((1.2 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 2736000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_1p2Gsps), + .data_rate_reg_array[0][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[1][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[2][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[3][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[4][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[5][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[6][0] = datarate_241_1p2Gsps, + .data_rate_reg_array[7][0] = datarate_241_1p2Gsps, + }, + { + /* ((1.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3420000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_1p5Gsps), + .data_rate_reg_array[0][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[1][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[2][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[3][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[4][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[5][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[6][0] = datarate_241_1p5Gsps, + .data_rate_reg_array[7][0] = datarate_241_1p5Gsps, + }, + { + /* ((1.7 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 3876000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_1p7Gsps), + .data_rate_reg_array[0][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[1][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[2][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[3][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[4][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[5][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[6][0] = datarate_241_1p7Gsps, + .data_rate_reg_array[7][0] = datarate_241_1p7Gsps, + }, + { + /* ((2.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4560000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p0Gsps, + }, + { + /* ((2.1 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 4788000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p1Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p1Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p1Gsps, + }, + { + /* ((2.35 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5358000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p35Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p35Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p35Gsps, + }, + { + /* ((2.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5700000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p5Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p5Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p5Gsps, + }, + { + /* ((2.6 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 5928000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p6Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p6Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p6Gsps, + }, + { + /* ((2.8 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6384000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_2p8Gsps), + .data_rate_reg_array[0][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[1][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[2][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[3][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[4][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[5][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[6][0] = datarate_241_2p8Gsps, + .data_rate_reg_array[7][0] = datarate_241_2p8Gsps, + }, + { + /* ((3.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 6840000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_3p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_3p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_3p0Gsps, + }, + { + /* ((3.3 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7524000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_3p3Gsps), + .data_rate_reg_array[0][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[1][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[2][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[3][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[4][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[5][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[6][0] = datarate_241_3p3Gsps, + .data_rate_reg_array[7][0] = datarate_241_3p3Gsps, + }, + { + /* ((3.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 7980000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_3p5Gsps), + .data_rate_reg_array[0][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[1][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[2][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[3][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[4][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[5][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[6][0] = datarate_241_3p5Gsps, + .data_rate_reg_array[7][0] = datarate_241_3p5Gsps, + }, + { + /* ((4.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 9120000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_4p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_4p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_4p0Gsps, + }, + { + /* ((4.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 10260000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_4p5Gsps), + .data_rate_reg_array[0][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[1][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[2][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[3][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[4][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[5][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[6][0] = datarate_241_4p5Gsps, + .data_rate_reg_array[7][0] = datarate_241_4p5Gsps, + }, + { + /* ((5.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 11400000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_5p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_5p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_5p0Gsps, + }, + { + /* ((5.5 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 12540000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_5p5Gsps), + .data_rate_reg_array[0][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[1][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[2][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[3][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[4][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[5][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[6][0] = datarate_241_5p5Gsps, + .data_rate_reg_array[7][0] = datarate_241_5p5Gsps, + }, + { + /* ((6.0 GSpS) * (10^9) * (2.28 bits/symbol)) rounded value */ + .bandwidth = 13680000000, + .data_rate_reg_array_size = ARRAY_SIZE(datarate_241_6p0Gsps), + .data_rate_reg_array[0][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[1][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[2][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[3][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[4][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[5][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[6][0] = datarate_241_6p0Gsps, + .data_rate_reg_array[7][0] = datarate_241_6p0Gsps, + }, +}; + +struct csiphy_reg_t bist_3ph_arr_2_4_1[] = { + {0x0230, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0258, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x02D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0248, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x024C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0250, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0244, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0240, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0630, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0658, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06C8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x06D8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0648, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x064C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0650, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0644, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0640, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A30, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A34, 0xFA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A38, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A3C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A58, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AC8, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD0, 0xAA, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD4, 0x64, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AD8, 0x3E, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A48, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A4C, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A50, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A44, 0xB1, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A40, 0x85, 0x00, CSIPHY_DEFAULT_PARAMS}, +}; + +struct csiphy_reg_t bist_status_arr_2_4_1[] = { + {0x0344, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0744, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x0B44, 0x00, 0x00, CSIPHY_3PH_REGS}, + {0x00C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x04C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x08C0, 0x00, 0x00, CSIPHY_2PH_REGS}, + {0x0CC0, 0x00, 0x00, CSIPHY_2PH_REGS}, +}; + +struct bist_reg_settings_t bist_setting_2_4_1 = { + .error_status_val_3ph = 0x10, + .error_status_val_2ph = 0x10, + .set_status_update_3ph_base_offset = 0x0240, + .set_status_update_2ph_base_offset = 0x0050, + .bist_status_3ph_base_offset = 0x0344, + .bist_status_2ph_base_offset = 0x00C0, + .bist_sensor_data_3ph_status_base_offset = 0x0340, + .bist_counter_3ph_base_offset = 0x0348, + .bist_counter_2ph_base_offset = 0x00C8, + .number_of_counters = 2, + .num_3ph_bist_settings = ARRAY_SIZE(bist_3ph_arr_2_4_1), + .bist_3ph_settings_arry = bist_3ph_arr_2_4_1, + .bist_2ph_settings_arry = NULL, + .num_2ph_bist_settings = 0, + .num_status_reg = ARRAY_SIZE(bist_status_arr_2_4_1), + .bist_status_arr = bist_status_arr_2_4_1, +}; + +struct data_rate_settings_t data_rate_delta_table_2_4_1 = { + .num_data_rate_settings = ARRAY_SIZE(data_rate_settings_2_4_1), + .data_rate_settings = data_rate_settings_2_4_1, +}; + +struct csiphy_reg_parms_t csiphy_v2_4_1 = { + .mipi_csiphy_interrupt_status0_addr = 0x1138, + .mipi_csiphy_interrupt_clear0_addr = 0x1058, + .mipi_csiphy_glbl_irq_cmd_addr = 0x1028, + .size_offset_betn_lanes = 0x400, + .status_reg_params = &status_regs_2_4_1, + .csiphy_common_reg_array_size = ARRAY_SIZE(csiphy_common_reg_2_4_1), + .csiphy_reset_enter_array_size = ARRAY_SIZE(csiphy_reset_enter_reg_2_4_1), + .csiphy_reset_exit_array_size = ARRAY_SIZE(csiphy_reset_exit_reg_2_4_1), + .csiphy_2ph_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_4_1_reg), + .csiphy_3ph_config_array_size = ARRAY_SIZE(csiphy_3ph_v2_4_1_reg), + .csiphy_2ph_combo_config_array_size = ARRAY_SIZE(csiphy_2ph_v2_4_1_combo_mode_reg), + .csiphy_3ph_combo_config_array_size = 0, + .csiphy_2ph_3ph_config_array_size = 0, + .csiphy_interrupt_status_size = ARRAY_SIZE(csiphy_irq_reg_2_4_1), + .csiphy_num_common_status_regs = 20, + .aon_sel_params = &aon_cam_select_params_2_4_1, +}; + +struct csiphy_ctrl_t ctrl_reg_2_4_1 = { + .csiphy_common_reg = csiphy_common_reg_2_4_1, + .csiphy_2ph_reg = csiphy_2ph_v2_4_1_reg, + .csiphy_3ph_reg = csiphy_3ph_v2_4_1_reg, + .csiphy_2ph_combo_mode_reg = csiphy_2ph_v2_4_1_combo_mode_reg, + .csiphy_3ph_combo_reg = NULL, + .csiphy_2ph_3ph_mode_reg = NULL, + .csiphy_reg = &csiphy_v2_4_1, + .csiphy_irq_reg = csiphy_irq_reg_2_4_1, + .csiphy_reset_enter_regs = csiphy_reset_enter_reg_2_4_1, + .csiphy_reset_exit_regs = csiphy_reset_exit_reg_2_4_1, + .csiphy_lane_config_reg = csiphy_lane_en_reg_2_4_1, + .data_rates_settings_table = &data_rate_delta_table_2_4_1, + .csiphy_bist_reg = &bist_setting_2_4_1, + .getclockvoting = get_clk_voting_dynamic, +}; + +#endif /* _CAM_CSIPHY_2_4_1_HWREG_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c new file mode 100644 index 0000000000..07f9d258b7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c @@ -0,0 +1,1574 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "cam_eeprom_core.h" +#include "cam_eeprom_soc.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr_api.h" + +#define MAX_READ_SIZE 0x7FFFF + +/** + * cam_eeprom_read_memory() - read map data into buffer + * @e_ctrl: eeprom control struct + * @block: block to be read + * + * This function iterates through blocks stored in block->map, reads each + * region and concatenate them into the pre-allocated block->mapdata + */ +static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_eeprom_memory_block_t *block) +{ + int rc = 0; + int j; + struct cam_sensor_i2c_reg_setting i2c_reg_settings = {0}; + struct cam_sensor_i2c_reg_array i2c_reg_array = {0}; + struct cam_eeprom_memory_map_t *emap = block->map; + struct cam_eeprom_soc_private *eb_info = NULL; + uint8_t *memptr = block->mapdata; + + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "e_ctrl is NULL"); + return -EINVAL; + } + + eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + + for (j = 0; j < block->num_map; j++) { + CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr); + if (emap[j].saddr) { + eb_info->i2c_info.slave_addr = emap[j].saddr; + rc = cam_eeprom_update_i2c_info(e_ctrl, + &eb_info->i2c_info); + if (rc) { + CAM_ERR(CAM_EEPROM, + "failed: to update i2c info rc %d", + rc); + return rc; + } + } + + if (emap[j].page.valid_size) { + i2c_reg_settings.addr_type = emap[j].page.addr_type; + i2c_reg_settings.data_type = emap[j].page.data_type; + i2c_reg_settings.size = 1; + i2c_reg_array.reg_addr = emap[j].page.addr; + i2c_reg_array.reg_data = emap[j].page.data; + i2c_reg_array.delay = emap[j].page.delay; + i2c_reg_settings.reg_setting = &i2c_reg_array; + rc = camera_io_dev_write(&e_ctrl->io_master_info, + &i2c_reg_settings); + if (rc) { + CAM_ERR(CAM_EEPROM, "page write failed rc %d", + rc); + return rc; + } + } + + if (emap[j].pageen.valid_size) { + i2c_reg_settings.addr_type = emap[j].pageen.addr_type; + i2c_reg_settings.data_type = emap[j].pageen.data_type; + i2c_reg_settings.size = 1; + i2c_reg_array.reg_addr = emap[j].pageen.addr; + i2c_reg_array.reg_data = emap[j].pageen.data; + i2c_reg_array.delay = emap[j].pageen.delay; + i2c_reg_settings.reg_setting = &i2c_reg_array; + rc = camera_io_dev_write(&e_ctrl->io_master_info, + &i2c_reg_settings); + if (rc) { + CAM_ERR(CAM_EEPROM, "page enable failed rc %d", + rc); + return rc; + } + } + + if (emap[j].poll.valid_size) { + rc = camera_io_dev_poll(&e_ctrl->io_master_info, + emap[j].poll.addr, emap[j].poll.data, + 0, emap[j].poll.addr_type, + emap[j].poll.data_type, + emap[j].poll.delay); + if (rc) { + CAM_ERR(CAM_EEPROM, "poll failed rc %d", + rc); + return rc; + } + } + + if (emap[j].mem.valid_size) { + rc = camera_io_dev_read_seq(&e_ctrl->io_master_info, + emap[j].mem.addr, memptr, + emap[j].mem.addr_type, + emap[j].mem.data_type, + emap[j].mem.valid_size); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "read failed rc %d", + rc); + return rc; + } + memptr += emap[j].mem.valid_size; + } + + if (emap[j].pageen.valid_size) { + i2c_reg_settings.addr_type = emap[j].pageen.addr_type; + i2c_reg_settings.data_type = emap[j].pageen.data_type; + i2c_reg_settings.size = 1; + i2c_reg_array.reg_addr = emap[j].pageen.addr; + i2c_reg_array.reg_data = 0; + i2c_reg_array.delay = emap[j].pageen.delay; + i2c_reg_settings.reg_setting = &i2c_reg_array; + rc = camera_io_dev_write(&e_ctrl->io_master_info, + &i2c_reg_settings); + if (rc) { + CAM_ERR(CAM_EEPROM, + "page disable failed rc %d", + rc); + return rc; + } + } + } + return rc; +} + +/** + * cam_eeprom_power_up - Power up eeprom hardware + * @e_ctrl: ctrl structure + * @power_info: power up/down info for eeprom + * + * Returns success or failure + */ +static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_sensor_power_ctrl_t *power_info) +{ + int32_t rc = 0; + struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info; + struct completion *i3c_probe_completion = NULL; + + /* Parse and fill vreg params for power up settings */ + rc = msm_camera_fill_vreg_params( + &e_ctrl->soc_info, + power_info->power_setting, + power_info->power_setting_size); + if (rc) { + CAM_ERR(CAM_EEPROM, + "failed to fill power up vreg params rc:%d", rc); + return rc; + } + + /* Parse and fill vreg params for power down settings*/ + rc = msm_camera_fill_vreg_params( + &e_ctrl->soc_info, + power_info->power_down_setting, + power_info->power_down_setting_size); + if (rc) { + CAM_ERR(CAM_EEPROM, + "failed to fill power down vreg params rc:%d", rc); + return rc; + } + + power_info->dev = soc_info->dev; + + if (e_ctrl->io_master_info.master_type == I3C_MASTER) + i3c_probe_completion = cam_eeprom_get_i3c_completion(e_ctrl->soc_info.index); + + rc = cam_sensor_core_power_up(power_info, soc_info, i3c_probe_completion); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc); + return rc; + } + + rc = camera_io_init(&(e_ctrl->io_master_info)); + if (rc) { + CAM_ERR(CAM_EEPROM, "cci_init failed"); + goto cci_failure; + } + + return rc; +cci_failure: + if (cam_sensor_util_power_down(power_info, soc_info)) + CAM_ERR(CAM_EEPROM, "Power down failure"); + + return rc; +} + +/** + * cam_eeprom_power_down - Power down eeprom hardware + * @e_ctrl: ctrl structure + * + * Returns success or failure + */ +static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl) +{ + struct cam_sensor_power_ctrl_t *power_info; + struct cam_hw_soc_info *soc_info; + struct cam_eeprom_soc_private *soc_private; + int rc = 0; + + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl); + return -EINVAL; + } + + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + soc_info = &e_ctrl->soc_info; + + if (!power_info) { + CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info); + return -EINVAL; + } + rc = cam_sensor_util_power_down(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc); + return rc; + } + + camera_io_release(&(e_ctrl->io_master_info)); + + return rc; +} + +/** + * cam_eeprom_match_id - match eeprom id + * @e_ctrl: ctrl structure + * + * Returns success or failure + */ +static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int rc; + struct camera_io_master *client = &e_ctrl->io_master_info; + uint8_t id[2]; + + rc = cam_spi_query_id(client, 0, CAMERA_SENSOR_I2C_TYPE_WORD, + &id[0], 2); + if (rc) + return rc; + CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x", + id[0], id[1], client->spi_client->mfr_id0, + client->spi_client->device_id0); + if (id[0] != client->spi_client->mfr_id0 + || id[1] != client->spi_client->device_id0) + return -ENODEV; + return 0; +} + +/** + * cam_eeprom_parse_read_memory_map - Parse memory map + * @of_node: device node + * @e_ctrl: ctrl structure + * + * Returns success or failure + */ +int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node, + struct cam_eeprom_ctrl_t *e_ctrl) +{ + int32_t rc = 0; + struct cam_eeprom_soc_private *soc_private; + struct cam_sensor_power_ctrl_t *power_info; + + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL"); + return -EINVAL; + } + + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + + rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc); + return rc; + } + rc = cam_eeprom_power_up(e_ctrl, power_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc); + goto data_mem_free; + } + + e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG; + if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) { + rc = cam_eeprom_match_id(e_ctrl); + if (rc) { + CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc); + goto power_down; + } + } + rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data); + if (rc) { + CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed"); + goto power_down; + } + + rc = cam_eeprom_power_down(e_ctrl); + if (rc) + CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc); + + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + return rc; +power_down: + cam_eeprom_power_down(e_ctrl); +data_mem_free: + vfree(e_ctrl->cal_data.mapdata); + vfree(e_ctrl->cal_data.map); + e_ctrl->cal_data.num_data = 0; + e_ctrl->cal_data.num_map = 0; + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + return rc; +} + +/** + * cam_eeprom_get_dev_handle - get device handle + * @e_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl, + void *arg) +{ + struct cam_sensor_acquire_dev eeprom_acq_dev; + struct cam_create_dev_hdl bridge_params; + struct cam_control *cmd = (struct cam_control *)arg; + + if (e_ctrl->bridge_intf.device_hdl != -1) { + CAM_ERR(CAM_EEPROM, "Device is already acquired"); + return -EFAULT; + } + if (copy_from_user(&eeprom_acq_dev, + u64_to_user_ptr(cmd->handle), + sizeof(eeprom_acq_dev))) { + CAM_ERR(CAM_EEPROM, + "EEPROM:ACQUIRE_DEV: copy from user failed"); + return -EFAULT; + } + + bridge_params.session_hdl = eeprom_acq_dev.session_handle; + bridge_params.ops = &e_ctrl->bridge_intf.ops; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = e_ctrl; + bridge_params.dev_id = CAM_EEPROM; + + eeprom_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (eeprom_acq_dev.device_handle <= 0) { + CAM_ERR(CAM_EEPROM, "Can not create device handle"); + return -EFAULT; + } + e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle; + e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle; + + CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &eeprom_acq_dev, sizeof(struct cam_sensor_acquire_dev))) { + CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed"); + return -EFAULT; + } + return 0; +} + +/** + * cam_eeprom_update_slaveInfo - Update slave info + * @e_ctrl: ctrl structure + * @cmd_buf: command buffer + * + * Returns success or failure + */ +static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl, + void *cmd_buf) +{ + int32_t rc = 0; + struct cam_eeprom_soc_private *soc_private; + struct cam_cmd_i2c_info *cmd_i2c_info = NULL; + + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr; + soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode; + + rc = cam_eeprom_update_i2c_info(e_ctrl, + &soc_private->i2c_info); + CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d", + soc_private->i2c_info.slave_addr, + soc_private->i2c_info.i2c_freq_mode); + + return rc; +} + +/** + * cam_eeprom_parse_memory_map - Parse memory map info + * @data: memory block data + * @cmd_buf: command buffer + * @cmd_length: command buffer length + * @num_map: memory map size + * @cmd_length_bytes: command length processed in this function + * + * Returns success or failure + */ +static int32_t cam_eeprom_parse_memory_map( + struct cam_eeprom_memory_block_t *data, + void *cmd_buf, int cmd_length, uint32_t *cmd_length_bytes, + int *num_map, size_t remain_buf_len) +{ + int32_t rc = 0; + int32_t cnt = 0; + int32_t processed_size = 0; + int32_t payload_count; + uint8_t generic_op_code; + struct cam_eeprom_memory_map_t *map = data->map; + struct common_header *cmm_hdr = + (struct common_header *)cmd_buf; + uint16_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_random_wr *i2c_random_wr = NULL; + struct cam_cmd_i2c_continuous_rd *i2c_cont_rd = NULL; + struct cam_cmd_conditional_wait *i2c_poll = NULL; + struct cam_cmd_unconditional_wait *i2c_uncond_wait = NULL; + size_t validate_size = 0; + + generic_op_code = cmm_hdr->fifth_byte; + + if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR) + validate_size = sizeof(struct cam_cmd_i2c_random_wr); + else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD) + validate_size = sizeof(struct cam_cmd_i2c_continuous_rd); + else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) + validate_size = sizeof(struct cam_cmd_unconditional_wait); + + if (remain_buf_len < validate_size || + *num_map >= (MSM_EEPROM_MAX_MEM_MAP_CNT * + MSM_EEPROM_MEMORY_MAP_MAX_SIZE)) { + CAM_ERR(CAM_EEPROM, "not enough buffer"); + return -EINVAL; + } + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: + i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf; + payload_count = i2c_random_wr->header.count; + + if (payload_count == 0 || + payload_count >= MSM_EEPROM_MAX_MEM_MAP_CNT || + (size_t)*num_map >= ((MSM_EEPROM_MAX_MEM_MAP_CNT * + MSM_EEPROM_MEMORY_MAP_MAX_SIZE) - + payload_count)) { + CAM_ERR(CAM_EEPROM, "OOB Error"); + return -EINVAL; + } + cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_random_wr) + + ((payload_count - 1) * + sizeof(struct i2c_random_wr_payload)); + + if (cmd_length_in_bytes > remain_buf_len) { + CAM_ERR(CAM_EEPROM, "Not enough buffer remaining"); + return -EINVAL; + } + for (cnt = 0; cnt < (payload_count); + cnt++) { + map[*num_map + cnt].page.addr = + i2c_random_wr->random_wr_payload_flex[cnt].reg_addr; + map[*num_map + cnt].page.addr_type = + i2c_random_wr->header.addr_type; + map[*num_map + cnt].page.data = + i2c_random_wr->random_wr_payload_flex[cnt].reg_data; + map[*num_map + cnt].page.data_type = + i2c_random_wr->header.data_type; + map[*num_map + cnt].page.valid_size = 1; + } + + *num_map += (payload_count - 1); + processed_size += + cmd_length_in_bytes; + break; + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD: + i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf; + cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd); + payload_count = i2c_cont_rd->header.count; + + if (payload_count >= U32_MAX - data->num_data) { + CAM_ERR(CAM_EEPROM, + "int overflow on eeprom memory block"); + return -EINVAL; + } + map[*num_map].mem.addr = i2c_cont_rd->reg_addr; + map[*num_map].mem.addr_type = i2c_cont_rd->header.addr_type; + map[*num_map].mem.data_type = i2c_cont_rd->header.data_type; + map[*num_map].mem.valid_size = + payload_count; + processed_size += + cmd_length_in_bytes; + data->num_data += map[*num_map].mem.valid_size; + break; + case CAMERA_SENSOR_CMD_TYPE_WAIT: + if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_HW_UCND || + generic_op_code == + CAMERA_SENSOR_WAIT_OP_SW_UCND) { + i2c_uncond_wait = + (struct cam_cmd_unconditional_wait *)cmd_buf; + cmd_length_in_bytes = + sizeof(struct cam_cmd_unconditional_wait); + + if (*num_map < 1) { + CAM_ERR(CAM_EEPROM, + "invalid map number, num_map=%d", + *num_map); + return -EINVAL; + } + + /* + * Though delay is added all of them, but delay will + * be applicable to only one of them as only one of + * them will have valid_size set to >= 1. + */ + map[*num_map - 1].mem.delay = i2c_uncond_wait->delay; + map[*num_map - 1].page.delay = i2c_uncond_wait->delay; + map[*num_map - 1].pageen.delay = i2c_uncond_wait->delay; + } else if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_COND) { + i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf; + cmd_length_in_bytes = + sizeof(struct cam_cmd_conditional_wait); + + map[*num_map].poll.addr = i2c_poll->reg_addr; + map[*num_map].poll.addr_type = i2c_poll->addr_type; + map[*num_map].poll.data = i2c_poll->reg_data; + map[*num_map].poll.data_type = i2c_poll->data_type; + map[*num_map].poll.delay = i2c_poll->timeout; + map[*num_map].poll.valid_size = 1; + } + processed_size += + cmd_length_in_bytes; + break; + default: + break; + } + + *cmd_length_bytes = processed_size; + return rc; +} + +static struct i2c_settings_list* + cam_eeprom_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings, + uint32_t size) +{ + struct i2c_settings_list *tmp; + + tmp = CAM_MEM_ZALLOC(sizeof(struct i2c_settings_list), GFP_KERNEL); + + if (tmp != NULL) + list_add_tail(&(tmp->list), + &(i2c_reg_settings->list_head)); + else + return NULL; + + tmp->seq_settings.reg_data = + CAM_MEM_ZALLOC_ARRAY(size, sizeof(uint8_t), GFP_KERNEL); + if (tmp->seq_settings.reg_data == NULL) { + list_del(&(tmp->list)); + CAM_MEM_FREE(tmp); + tmp = NULL; + return NULL; + } + tmp->seq_settings.size = size; + + return tmp; +} + +static int32_t cam_eeprom_handle_continuous_write( + struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_cmd_i2c_continuous_wr *cam_cmd_i2c_continuous_wr, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0, cnt = 0; + + + CAM_DBG(CAM_EEPROM, "Total Size: %d", + cam_cmd_i2c_continuous_wr->header.count); + + i2c_list = cam_eeprom_get_i2c_ptr(i2c_reg_settings, + cam_cmd_i2c_continuous_wr->header.count); + if (i2c_list == NULL || + i2c_list->seq_settings.reg_data == NULL) { + CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list"); + CAM_MEM_FREE(i2c_list); + return -ENOMEM; + } + + *cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) + + sizeof(cam_cmd_i2c_continuous_wr->reg_addr) + + sizeof(struct cam_cmd_read) * + (cam_cmd_i2c_continuous_wr->header.count)); + if (cam_cmd_i2c_continuous_wr->header.op_code == + CAMERA_SENSOR_I2C_OP_CONT_WR_BRST) + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST; + else if (cam_cmd_i2c_continuous_wr->header.op_code == + CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN) + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ; + else { + rc = -EINVAL; + goto deallocate_i2c_list; + } + + i2c_list->seq_settings.addr_type = + cam_cmd_i2c_continuous_wr->header.addr_type; + + CAM_ERR(CAM_EEPROM, "Write Address: 0x%x", + cam_cmd_i2c_continuous_wr->reg_addr); + if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) { + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM; + e_ctrl->eebin_info.start_address = + cam_cmd_i2c_continuous_wr->reg_addr; + e_ctrl->eebin_info.size = + cam_cmd_i2c_continuous_wr->header.count; + CAM_DBG(CAM_EEPROM, "Header Count: %d", + cam_cmd_i2c_continuous_wr->header.count); + e_ctrl->eebin_info.is_valid = 1; + + i2c_list->seq_settings.reg_addr = + cam_cmd_i2c_continuous_wr->reg_addr; + } else + CAM_ERR(CAM_EEPROM, "Burst Mode Not Supported\n"); + + (*offset) += cnt; + *list = &(i2c_list->list); + return rc; +deallocate_i2c_list: + CAM_MEM_FREE(i2c_list->seq_settings.reg_data); + CAM_MEM_FREE(i2c_list); + return rc; +} + +static int32_t cam_eeprom_handle_delay( + uint32_t **cmd_buf, + uint16_t generic_op_code, + struct i2c_settings_array *i2c_reg_settings, + uint32_t offset, uint32_t *byte_cnt, + struct list_head *list_ptr, + size_t remain_buf_len) +{ + int32_t rc = 0; + struct i2c_settings_list *i2c_list = NULL; + struct cam_cmd_unconditional_wait *cmd_uncond_wait = + (struct cam_cmd_unconditional_wait *) *cmd_buf; + + if (remain_buf_len < (sizeof(struct cam_cmd_unconditional_wait))) { + CAM_ERR(CAM_EEPROM, "Not Enough buffer"); + return -EINVAL; + } + + if (list_ptr == NULL) { + CAM_ERR(CAM_SENSOR, "Invalid list ptr"); + return -EINVAL; + } + + if (offset > 0) { + i2c_list = + list_entry(list_ptr, struct i2c_settings_list, list); + if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_HW_UCND) + i2c_list->i2c_settings.reg_setting[offset - 1].delay = + cmd_uncond_wait->delay; + else + i2c_list->i2c_settings.delay = + cmd_uncond_wait->delay; + (*cmd_buf) += + sizeof( + struct cam_cmd_unconditional_wait) / sizeof(uint32_t); + (*byte_cnt) += + sizeof( + struct cam_cmd_unconditional_wait); + } else { + CAM_ERR(CAM_SENSOR, "Delay Rxed before any buffer: %d", offset); + return -EINVAL; + } + + return rc; +} + +/** + * cam_eeprom_parse_write_memory_packet - Write eeprom packet + * @csl_packet: csl packet received + * @e_ctrl: ctrl structure + * + * Returns success or failure + */ +static int32_t cam_eeprom_parse_write_memory_packet( + struct cam_packet *csl_packet, + struct cam_eeprom_ctrl_t *e_ctrl) +{ + struct cam_cmd_buf_desc *cmd_desc = NULL; + uint32_t *offset = NULL; + int32_t i, rc = 0; + uint32_t *cmd_buf = NULL; + uintptr_t generic_pkt_addr; + size_t pkt_len = 0; + size_t remain_len = 0; + uint32_t total_cmd_buf_in_bytes = 0; + uint32_t processed_cmd_buf_in_bytes = 0; + struct common_header *cmm_hdr = NULL; + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_info *i2c_info = NULL; + + + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + CAM_DBG(CAM_EEPROM, "Number of Command Buffers: %d", + csl_packet->num_cmd_buf); + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + struct list_head *list = NULL; + uint16_t generic_op_code; + uint32_t off = 0; + int master; + struct cam_sensor_cci_client *cci; + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + total_cmd_buf_in_bytes = cmd_desc[i].length; + processed_cmd_buf_in_bytes = 0; + + if (!total_cmd_buf_in_bytes) + continue; + + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_pkt_addr, &pkt_len); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed to get cpu buf"); + return rc; + } + + cmd_buf = (uint32_t *)generic_pkt_addr; + if (!cmd_buf) { + CAM_ERR(CAM_EEPROM, "invalid cmd buf"); + rc = -EINVAL; + goto end; + } + + if ((pkt_len < sizeof(struct common_header) || + (cmd_desc[i].offset) > (pkt_len - + sizeof(struct common_header)))) { + CAM_ERR(CAM_EEPROM, "Not enough buffer"); + rc = -EINVAL; + goto end; + } + + remain_len = pkt_len - cmd_desc[i].offset; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + + if (total_cmd_buf_in_bytes > remain_len) { + CAM_ERR(CAM_EEPROM, "Not enough buffer for command"); + rc = -EINVAL; + goto end; + } + + master = e_ctrl->io_master_info.master_type; + cci = e_ctrl->io_master_info.cci_client; + while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) { + if ((remain_len - processed_cmd_buf_in_bytes) < + sizeof(struct common_header)) { + CAM_ERR(CAM_EEPROM, "Not Enough buffer"); + rc = -EINVAL; + goto end; + } + cmm_hdr = (struct common_header *)cmd_buf; + generic_op_code = cmm_hdr->fifth_byte; + + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + if ((remain_len - processed_cmd_buf_in_bytes) < + sizeof(struct cam_cmd_i2c_info)) { + CAM_ERR(CAM_EEPROM, "Not enough buf"); + rc = -EINVAL; + goto end; + } + if (master == CCI_MASTER) { + cci->cci_i2c_master = + e_ctrl->cci_i2c_master; + cci->i2c_freq_mode = + i2c_info->i2c_freq_mode; + cci->sid = + i2c_info->slave_addr >> 1; + CAM_DBG(CAM_EEPROM, + "Slave addr: 0x%x Freq Mode: %d", + i2c_info->slave_addr, + i2c_info->i2c_freq_mode); + } else if (master == I2C_MASTER) { + e_ctrl->io_master_info.qup_client->i2c_client->addr = + i2c_info->slave_addr; + CAM_DBG(CAM_EEPROM, + "Slave addr: 0x%x", + i2c_info->slave_addr); + } else if (master == SPI_MASTER) { + CAM_ERR(CAM_EEPROM, + "No Need of Slave Info"); + } else { + CAM_ERR(CAM_EEPROM, + "Invalid Master type: %d", + master); + rc = -EINVAL; + goto end; + } + cmd_length_in_bytes = + sizeof(struct cam_cmd_i2c_info); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/4; + break; + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR: { + struct cam_cmd_i2c_continuous_wr + *cam_cmd_i2c_continuous_wr = + (struct cam_cmd_i2c_continuous_wr *) + cmd_buf; + if ((remain_len - processed_cmd_buf_in_bytes) < + sizeof(struct cam_cmd_i2c_continuous_wr)) { + CAM_ERR(CAM_EEPROM, "Not enough buf"); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_EEPROM, + "CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR"); + rc = cam_eeprom_handle_continuous_write( + e_ctrl, + cam_cmd_i2c_continuous_wr, + &(e_ctrl->wr_settings), + &cmd_length_in_bytes, &off, &list); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed in continuous write %d", rc); + goto end; + } + + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + break; + } + case CAMERA_SENSOR_CMD_TYPE_WAIT: { + CAM_DBG(CAM_EEPROM, + "CAMERA_SENSOR_CMD_TYPE_WAIT"); + if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_HW_UCND || + generic_op_code == + CAMERA_SENSOR_WAIT_OP_SW_UCND) { + + rc = cam_eeprom_handle_delay( + &cmd_buf, generic_op_code, + &(e_ctrl->wr_settings), off, + &cmd_length_in_bytes, + list, (remain_len - + processed_cmd_buf_in_bytes)); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, + "delay hdl failed: %d", + rc); + goto end; + } + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + } else { + CAM_ERR(CAM_EEPROM, + "Wrong Wait Command: %d", + generic_op_code); + rc = -EINVAL; + goto end; + } + break; + } + default: + CAM_ERR(CAM_EEPROM, + "Invalid Cmd_type rxed: %d\n", + cmm_hdr->cmd_type); + rc = -EINVAL; + break; + } + } + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + return rc; +end: + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return rc; +} + +/** + * cam_eeprom_init_pkt_parser - Parse eeprom packet + * @e_ctrl: ctrl structure + * @csl_packet: csl packet received + * + * Returns success or failure + */ +static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_packet *csl_packet) +{ + int32_t rc = 0; + int i = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uint32_t *offset = NULL; + uint32_t *cmd_buf = NULL; + uintptr_t generic_pkt_addr; + size_t pkt_len = 0; + size_t remain_len = 0; + uint32_t total_cmd_buf_in_bytes = 0; + uint32_t processed_cmd_buf_in_bytes = 0; + struct common_header *cmm_hdr = NULL; + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_info *i2c_info = NULL; + int num_map = -1; + struct cam_eeprom_memory_map_t *map = NULL; + struct cam_eeprom_soc_private *soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + + e_ctrl->cal_data.map = vzalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE * + MSM_EEPROM_MAX_MEM_MAP_CNT) * + (sizeof(struct cam_eeprom_memory_map_t))); + if (!e_ctrl->cal_data.map) { + rc = -ENOMEM; + CAM_ERR(CAM_EEPROM, "failed"); + return rc; + } + map = e_ctrl->cal_data.map; + + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + /* Loop through multiple command buffers */ + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + total_cmd_buf_in_bytes = cmd_desc[i].length; + processed_cmd_buf_in_bytes = 0; + if (!total_cmd_buf_in_bytes) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_pkt_addr, &pkt_len); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed to get cpu buf"); + return rc; + } + cmd_buf = (uint32_t *)generic_pkt_addr; + if (!cmd_buf) { + CAM_ERR(CAM_EEPROM, "invalid cmd buf"); + rc = -EINVAL; + goto end; + } + + if ((pkt_len < sizeof(struct common_header)) || + (cmd_desc[i].offset > (pkt_len - + sizeof(struct common_header)))) { + CAM_ERR(CAM_EEPROM, "Not enough buffer"); + rc = -EINVAL; + goto end; + } + remain_len = pkt_len - cmd_desc[i].offset; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + + if (total_cmd_buf_in_bytes > remain_len) { + CAM_ERR(CAM_EEPROM, "Not enough buffer for command"); + rc = -EINVAL; + goto end; + } + /* Loop through multiple cmd formats in one cmd buffer */ + while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) { + if ((remain_len - processed_cmd_buf_in_bytes) < + sizeof(struct common_header)) { + CAM_ERR(CAM_EEPROM, "Not enough buf"); + rc = -EINVAL; + goto end; + } + cmm_hdr = (struct common_header *)cmd_buf; + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + if ((remain_len - processed_cmd_buf_in_bytes) < + sizeof(struct cam_cmd_i2c_info)) { + CAM_ERR(CAM_EEPROM, "Not enough buf"); + rc = -EINVAL; + goto end; + } + + if ((num_map + 1) >= (MSM_EEPROM_MAX_MEM_MAP_CNT * + MSM_EEPROM_MEMORY_MAP_MAX_SIZE)) { + CAM_ERR(CAM_EEPROM, "OOB map read error"); + rc = -EINVAL; + goto end; + } + /* Configure the following map slave address */ + + map[num_map + 1].saddr = i2c_info->slave_addr; + rc = cam_eeprom_update_slaveInfo(e_ctrl, + cmd_buf); + cmd_length_in_bytes = + sizeof(struct cam_cmd_i2c_info); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + break; + case CAMERA_SENSOR_CMD_TYPE_PWR_UP: + case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN: + cmd_length_in_bytes = total_cmd_buf_in_bytes; + rc = cam_sensor_update_power_settings(cmd_buf, + cmd_length_in_bytes, power_info, + (remain_len - + processed_cmd_buf_in_bytes)); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed"); + goto end; + } + break; + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD: + case CAMERA_SENSOR_CMD_TYPE_WAIT: + num_map++; + rc = cam_eeprom_parse_memory_map( + &e_ctrl->cal_data, cmd_buf, + total_cmd_buf_in_bytes, + &cmd_length_in_bytes, &num_map, + (remain_len - + processed_cmd_buf_in_bytes)); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/sizeof(uint32_t); + if (rc) { + CAM_ERR(CAM_EEPROM, "Parse memory map failed"); + goto end; + } + break; + default: + CAM_ERR(CAM_EEPROM, "Invalid cmd_type 0x%x", + cmm_hdr->cmd_type); + rc = -EINVAL; + goto end; + } + } + e_ctrl->cal_data.num_map = num_map + 1; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + return rc; + +end: + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return rc; +} + +/** + * cam_eeprom_get_cal_data - parse the userspace IO config and + * copy read data to share with userspace + * @e_ctrl: ctrl structure + * @csl_packet: csl packet received + * + * Returns success or failure + */ +static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_packet *csl_packet) +{ + struct cam_buf_io_cfg *io_cfg; + uint32_t i = 0; + size_t plane_offset; + int32_t mem_handle; + int rc = 0; + uintptr_t buf_addr; + size_t buf_size; + uint8_t *read_buffer; + size_t remain_len = 0; + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &csl_packet->payload_flex + + csl_packet->io_configs_offset); + plane_offset = io_cfg->offsets[0]; + mem_handle = io_cfg->mem_handle[0]; + + CAM_DBG(CAM_EEPROM, "number of IO configs: %d:", + csl_packet->num_io_configs); + + for (i = 0; i < csl_packet->num_io_configs; i++) { + CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction); + if (io_cfg->direction == CAM_BUF_OUTPUT) { + rc = cam_mem_get_cpu_buf(mem_handle, + &buf_addr, &buf_size); + if (rc) { + CAM_ERR(CAM_EEPROM, "Fail in get buffer: %d", + rc); + return rc; + } + if (buf_size <= plane_offset) { + CAM_ERR(CAM_EEPROM, "Not enough buffer"); + cam_mem_put_cpu_buf(mem_handle); + rc = -EINVAL; + return rc; + } + + remain_len = buf_size - plane_offset; + CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n", + (void *)buf_addr, buf_size); + + read_buffer = (uint8_t *)buf_addr; + if (!read_buffer) { + CAM_ERR(CAM_EEPROM, + "invalid buffer to copy data"); + cam_mem_put_cpu_buf(mem_handle); + rc = -EINVAL; + return rc; + } + read_buffer += plane_offset; + + if (remain_len < e_ctrl->cal_data.num_data) { + CAM_ERR(CAM_EEPROM, + "failed to copy, Invalid size"); + cam_mem_put_cpu_buf(mem_handle); + rc = -EINVAL; + return rc; + } + + CAM_DBG(CAM_EEPROM, "copy the data, len:%d", + e_ctrl->cal_data.num_data); + memcpy(read_buffer, e_ctrl->cal_data.mapdata, + e_ctrl->cal_data.num_data); + cam_mem_put_cpu_buf(mem_handle); + } else { + CAM_ERR(CAM_EEPROM, "Invalid direction"); + rc = -EINVAL; + } + } + + return rc; +} + +static int32_t delete_eeprom_request(struct i2c_settings_array *i2c_array) +{ + struct i2c_settings_list *i2c_list = NULL, *i2c_next = NULL; + int32_t rc = 0; + + if (i2c_array == NULL) { + CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument"); + return -EINVAL; + } + + list_for_each_entry_safe(i2c_list, i2c_next, + &(i2c_array->list_head), list) { + CAM_MEM_FREE(i2c_list->seq_settings.reg_data); + list_del(&(i2c_list->list)); + CAM_MEM_FREE(i2c_list); + } + INIT_LIST_HEAD(&(i2c_array->list_head)); + i2c_array->is_settings_valid = 0; + + return rc; +} + +/** + * cam_eeprom_write - Write Packet + * @e_ctrl: ctrl structure + * + * Returns success or failure + */ +static int32_t cam_eeprom_write(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int32_t rc = 0; + struct i2c_settings_array *i2c_set = NULL; + struct i2c_settings_list *i2c_list = NULL; + + i2c_set = &e_ctrl->wr_settings; + + if (i2c_set->is_settings_valid == 1) { + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + rc = camera_io_dev_write_continuous( + &e_ctrl->io_master_info, + &i2c_list->i2c_settings, CAM_SENSOR_I2C_WRITE_BURST); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, + "Error in EEPROM write"); + goto del_req; + } + } + } + +del_req: + delete_eeprom_request(i2c_set); + return rc; +} + +/** + * cam_eeprom_pkt_parse - Parse csl packet + * @e_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg) +{ + int32_t rc = 0; + struct cam_control *ioctl_ctrl = NULL; + struct cam_config_dev_cmd dev_config; + uintptr_t generic_pkt_addr; + size_t pkt_len; + size_t remain_len = 0; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u= NULL; + struct cam_eeprom_soc_private *soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + + ioctl_ctrl = (struct cam_control *)arg; + + if (copy_from_user(&dev_config, + u64_to_user_ptr(ioctl_ctrl->handle), + sizeof(dev_config))) + return -EFAULT; + rc = cam_mem_get_cpu_buf(dev_config.packet_handle, + &generic_pkt_addr, &pkt_len); + if (rc) { + CAM_ERR(CAM_EEPROM, + "error in converting command Handle Error: %d", rc); + return rc; + } + + remain_len = pkt_len; + if ((sizeof(struct cam_packet) > pkt_len) || + ((size_t)dev_config.offset >= pkt_len - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_EEPROM, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), pkt_len); + rc = -EINVAL; + goto put_ref; + } + + remain_len -= (size_t)dev_config.offset; + csl_packet_u = (struct cam_packet *) + (generic_pkt_addr + (uint32_t)dev_config.offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_EEPROM, "Copying packet to KMD failed"); + goto put_ref; + } + + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_EEPROM_PACKET_OPCODE_INIT: + if (e_ctrl->userspace_probe == false) { + rc = cam_eeprom_parse_read_memory_map( + e_ctrl->soc_info.dev->of_node, e_ctrl); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc); + goto end; + } + + rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet); + if (rc) + CAM_WARN(CAM_EEPROM, "failed to get calibration data rc %d", rc); + + vfree(e_ctrl->cal_data.mapdata); + e_ctrl->cal_data.mapdata = NULL; + vfree(e_ctrl->cal_data.map); + e_ctrl->cal_data.map = NULL; + e_ctrl->cal_data.num_data = 0; + e_ctrl->cal_data.num_map = 0; + CAM_DBG(CAM_EEPROM, + "Returning the data using kernel probe"); + break; + } + rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet); + if (rc) { + CAM_ERR(CAM_EEPROM, + "Failed in parsing the pkt"); + goto end; + } + + e_ctrl->cal_data.mapdata = + vzalloc(e_ctrl->cal_data.num_data); + if (!e_ctrl->cal_data.mapdata) { + rc = -ENOMEM; + CAM_ERR(CAM_EEPROM, "failed"); + goto error; + } + + rc = cam_eeprom_power_up(e_ctrl, + &soc_private->power_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed rc %d", rc); + goto memdata_free; + } + + if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) { + rc = cam_eeprom_match_id(e_ctrl); + if (rc) { + CAM_DBG(CAM_EEPROM, + "eeprom not matching %d", rc); + goto memdata_free; + } + } + + e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG; + rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data); + if (rc) { + CAM_ERR(CAM_EEPROM, + "read_eeprom_memory failed"); + goto power_down; + } + + rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet); + if (rc) + CAM_WARN(CAM_EEPROM, "failed to get calibration data rc %d", rc); + + rc = cam_eeprom_power_down(e_ctrl); + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + vfree(e_ctrl->cal_data.mapdata); + e_ctrl->cal_data.mapdata = NULL; + vfree(e_ctrl->cal_data.map); + e_ctrl->cal_data.map = NULL; + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + e_ctrl->cal_data.num_data = 0; + e_ctrl->cal_data.num_map = 0; + break; + case CAM_EEPROM_WRITE: { + struct i2c_settings_array *i2c_reg_settings = + &e_ctrl->wr_settings; + + i2c_reg_settings->is_settings_valid = 1; + rc = cam_eeprom_parse_write_memory_packet( + csl_packet, e_ctrl); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc); + goto end; + } + + rc = cam_eeprom_power_up(e_ctrl, + &soc_private->power_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed power up rc %d", rc); + goto memdata_free; + } + + usleep_range(10*1000, 11*1000); + CAM_DBG(CAM_EEPROM, + "Calling Erase : %d start Address: 0x%x size: %d", + rc, e_ctrl->eebin_info.start_address, + e_ctrl->eebin_info.size); + + rc = camera_io_dev_erase(&e_ctrl->io_master_info, + e_ctrl->eebin_info.start_address, + e_ctrl->eebin_info.size); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "Failed in erase : %d", rc); + goto memdata_free; + } + + /* Buffer time margin */ + usleep_range(10*1000, 11*1000); + + rc = cam_eeprom_write(e_ctrl); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc); + goto memdata_free; + } + + rc = cam_eeprom_power_down(e_ctrl); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed power down rc %d", rc); + goto memdata_free; + } + + break; + } + default: + CAM_ERR(CAM_EEPROM, "Invalid op-code 0x%x", + csl_packet->header.op_code & 0xFFFFFF); + rc = -EINVAL; + break; + } + +end: + cam_common_mem_free(csl_packet); +put_ref: + cam_mem_put_cpu_buf(dev_config.packet_handle); + return rc; +power_down: + cam_eeprom_power_down(e_ctrl); +memdata_free: + vfree(e_ctrl->cal_data.mapdata); + e_ctrl->cal_data.mapdata = NULL; +error: + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + vfree(e_ctrl->cal_data.map); + e_ctrl->cal_data.map = NULL; + e_ctrl->cal_data.num_data = 0; + e_ctrl->cal_data.num_map = 0; + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + cam_common_mem_free(csl_packet); + cam_mem_put_cpu_buf(dev_config.packet_handle); + return rc; +} + +void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int rc; + struct cam_eeprom_soc_private *soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + + if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT) + return; + + if (e_ctrl->cam_eeprom_state == CAM_EEPROM_CONFIG) { + rc = cam_eeprom_power_down(e_ctrl); + if (rc < 0) + CAM_ERR(CAM_EEPROM, "EEPROM Power down failed"); + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + } + + if (e_ctrl->cam_eeprom_state == CAM_EEPROM_ACQUIRE) { + rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_EEPROM, "destroying the device hdl"); + + e_ctrl->bridge_intf.device_hdl = -1; + e_ctrl->bridge_intf.link_hdl = -1; + e_ctrl->bridge_intf.session_hdl = -1; + + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + } + + e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT; +} + +/** + * cam_eeprom_driver_cmd - Handle eeprom cmds + * @e_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg) +{ + int rc = 0; + struct cam_eeprom_query_cap_t eeprom_cap = {0}; + struct cam_control *cmd = (struct cam_control *)arg; + + if (!e_ctrl || !cmd) { + CAM_ERR(CAM_EEPROM, "Invalid Arguments"); + return -EINVAL; + } + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_EEPROM, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + + mutex_lock(&(e_ctrl->eeprom_mutex)); + switch (cmd->op_code) { + case CAM_QUERY_CAP: + eeprom_cap.slot_info = e_ctrl->soc_info.index; + if (e_ctrl->userspace_probe == false) + eeprom_cap.eeprom_kernel_probe = true; + else + eeprom_cap.eeprom_kernel_probe = false; + + eeprom_cap.is_multimodule_mode = + e_ctrl->is_multimodule_mode; + + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &eeprom_cap, + sizeof(struct cam_eeprom_query_cap_t))) { + CAM_ERR(CAM_EEPROM, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info); + break; + case CAM_ACQUIRE_DEV: + rc = cam_eeprom_get_dev_handle(e_ctrl, arg); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed to acquire dev"); + goto release_mutex; + } + e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE; + break; + case CAM_RELEASE_DEV: + if (e_ctrl->cam_eeprom_state != CAM_EEPROM_ACQUIRE) { + rc = -EINVAL; + CAM_WARN(CAM_EEPROM, + "Not in right state to release : %d", + e_ctrl->cam_eeprom_state); + goto release_mutex; + } + + if (e_ctrl->bridge_intf.device_hdl == -1) { + CAM_ERR(CAM_EEPROM, + "Invalid Handles: link hdl: %d device hdl: %d", + e_ctrl->bridge_intf.device_hdl, + e_ctrl->bridge_intf.link_hdl); + rc = -EINVAL; + goto release_mutex; + } + rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_EEPROM, + "failed in destroying the device hdl"); + e_ctrl->bridge_intf.device_hdl = -1; + e_ctrl->bridge_intf.link_hdl = -1; + e_ctrl->bridge_intf.session_hdl = -1; + e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT; + break; + case CAM_CONFIG_DEV: + rc = cam_eeprom_pkt_parse(e_ctrl, arg); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing"); + goto release_mutex; + } + break; + default: + CAM_DBG(CAM_EEPROM, "invalid opcode"); + break; + } + +release_mutex: + mutex_unlock(&(e_ctrl->eeprom_mutex)); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.h new file mode 100644 index 0000000000..1a031f9a15 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_EEPROM_CORE_H_ +#define _CAM_EEPROM_CORE_H_ + +#include "cam_eeprom_dev.h" + +int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg); +int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node, + struct cam_eeprom_ctrl_t *e_ctrl); +/** + * @e_ctrl: EEPROM ctrl structure + * + * This API handles the shutdown ioctl/close + */ +void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl); + +struct completion *cam_eeprom_get_i3c_completion(uint32_t index); + +#endif +/* _CAM_EEPROM_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c new file mode 100644 index 0000000000..3e5b62240f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_eeprom_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_eeprom_soc.h" +#include "cam_eeprom_core.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" + +static struct cam_i3c_eeprom_data { + struct cam_eeprom_ctrl_t *e_ctrl; + struct completion probe_complete; +} g_i3c_eeprom_data[MAX_CAMERAS]; + +struct completion *cam_eeprom_get_i3c_completion(uint32_t index) +{ + return &g_i3c_eeprom_data[index].probe_complete; +} + +static int cam_eeprom_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_eeprom_ctrl_t *e_ctrl = + v4l2_get_subdevdata(sd); + + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "e_ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&(e_ctrl->eeprom_mutex)); + cam_eeprom_shutdown(e_ctrl); + mutex_unlock(&(e_ctrl->eeprom_mutex)); + + return 0; +} + +static int cam_eeprom_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_EEPROM, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_eeprom_subdev_close_internal(sd, fh); +} + +static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd); + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_eeprom_driver_cmd(e_ctrl, arg); + if (rc) + CAM_ERR(CAM_EEPROM, + "Failed in Driver cmd: %d", rc); + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + rc = cam_eeprom_subdev_close_internal(sd, NULL); + break; + default: + rc = -ENOIOCTLCMD; + break; + } + + return rc; +} + +int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_eeprom_i2c_info_t *i2c_info) +{ + struct cam_sensor_cci_client *cci_client = NULL; + + if (e_ctrl->io_master_info.master_type == CCI_MASTER) { + cci_client = e_ctrl->io_master_info.cci_client; + if (!cci_client) { + CAM_ERR(CAM_EEPROM, "failed: cci_client %pK", + cci_client); + return -EINVAL; + } + cci_client->cci_i2c_master = e_ctrl->cci_i2c_master; + cci_client->sid = (i2c_info->slave_addr) >> 1; + cci_client->retries = 3; + cci_client->id_map = 0; + cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode; + } else if (e_ctrl->io_master_info.master_type == I2C_MASTER) { + if (!e_ctrl->io_master_info.qup_client) { + CAM_ERR(CAM_EEPROM, "failed: qup_client %pK", + e_ctrl->io_master_info.qup_client); + return -EINVAL; + } + e_ctrl->io_master_info.qup_client->i2c_client->addr = + i2c_info->slave_addr; + CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x", i2c_info->slave_addr); + } else if (e_ctrl->io_master_info.master_type == SPI_MASTER) { + CAM_ERR(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d", + i2c_info->slave_addr, i2c_info->i2c_freq_mode); + } + return 0; +} + +#ifdef CONFIG_COMPAT +static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc = 0; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_EEPROM, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, + "Failed in eeprom suddev handling rc %d", + rc); + return rc; + } + break; + default: + CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd); + rc = -EINVAL; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_EEPROM, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + return rc; +} +#endif + +static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops = { + .close = cam_eeprom_subdev_close, +}; + +static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = { + .ioctl = cam_eeprom_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl, +#endif +}; + +static struct v4l2_subdev_ops cam_eeprom_subdev_ops = { + .core = &cam_eeprom_subdev_core_ops, +}; + +static int cam_eeprom_init_subdev(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int rc = 0; + + e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops; + e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops; + strscpy(e_ctrl->device_name, CAM_EEPROM_NAME, + sizeof(e_ctrl->device_name)); + e_ctrl->v4l2_dev_str.name = e_ctrl->device_name; + e_ctrl->v4l2_dev_str.sd_flags = + (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE; + e_ctrl->v4l2_dev_str.token = e_ctrl; + e_ctrl->v4l2_dev_str.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + + rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev"); + + return rc; +} + +static int cam_eeprom_i2c_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct i2c_client *client = NULL; + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + struct cam_eeprom_soc_private *soc_private = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct device_node *np = NULL; + const char *drv_name; + + CAM_GET_TIMESTAMP(ts_start); + client = container_of(dev, struct i2c_client, dev); + if (client == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + e_ctrl = CAM_MEM_ZALLOC(sizeof(*e_ctrl), GFP_KERNEL); + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "CAM_MEM_ZALLOC failed"); + rc = -ENOMEM; + goto probe_failure; + } + + e_ctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(e_ctrl->io_master_info.qup_client)) { + rc = -ENOMEM; + goto ectrl_free; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(*soc_private), GFP_KERNEL); + if (!soc_private) + goto ectrl_qup_free; + + e_ctrl->soc_info.soc_private = soc_private; + + i2c_set_clientdata(client, e_ctrl); + + mutex_init(&(e_ctrl->eeprom_mutex)); + + INIT_LIST_HEAD(&(e_ctrl->wr_settings.list_head)); + soc_info = &e_ctrl->soc_info; + soc_info->dev = &client->dev; + soc_info->dev_name = client->name; + e_ctrl->io_master_info.master_type = I2C_MASTER; + e_ctrl->io_master_info.qup_client->i2c_client = client; + e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE; + e_ctrl->cal_data.mapdata = NULL; + e_ctrl->cal_data.map = NULL; + e_ctrl->userspace_probe = false; + + np = of_node_get(client->dev.of_node); + drv_name = of_node_full_name(np); + rc = cam_eeprom_parse_dt(e_ctrl); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc); + goto free_soc; + } + + rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc); + goto free_soc; + } + + rc = cam_eeprom_init_subdev(e_ctrl); + if (rc) + goto free_soc; + + cam_sensor_module_add_i2c_device((void *) e_ctrl, CAM_SENSOR_EEPROM); + + if (soc_private->i2c_info.slave_addr != 0) + e_ctrl->io_master_info.qup_client->i2c_client->addr = + soc_private->i2c_info.slave_addr; + + e_ctrl->bridge_intf.device_hdl = -1; + e_ctrl->bridge_intf.ops.get_dev_info = NULL; + e_ctrl->bridge_intf.ops.link_setup = NULL; + e_ctrl->bridge_intf.ops.apply_req = NULL; + e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(drv_name, microsec); + of_node_put(np); + + return rc; +free_soc: + CAM_MEM_FREE(soc_private); +ectrl_qup_free: + CAM_MEM_FREE(e_ctrl->io_master_info.qup_client); +ectrl_free: + CAM_MEM_FREE(e_ctrl); +probe_failure: + return rc; +} + +static void cam_eeprom_i2c_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct i2c_client *client = NULL; + struct cam_eeprom_ctrl_t *e_ctrl; + struct cam_eeprom_soc_private *soc_private; + struct cam_hw_soc_info *soc_info; + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_EEPROM, + "Failed to get i2c client"); + return; + } + + e_ctrl = (struct cam_eeprom_ctrl_t *)i2c_get_clientdata(client); + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "eeprom device is NULL"); + return; + } + + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + if (!soc_private) { + CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL"); + return; + } + + CAM_INFO(CAM_EEPROM, "i2c driver remove invoked"); + soc_info = &e_ctrl->soc_info; + for (i = 0; i < soc_info->num_clk; i++) { + if (!soc_info->clk[i]) { + CAM_DBG(CAM_EEPROM, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + devm_clk_put(soc_info->dev, soc_info->clk[i]); + } + + mutex_lock(&(e_ctrl->eeprom_mutex)); + cam_eeprom_shutdown(e_ctrl); + mutex_unlock(&(e_ctrl->eeprom_mutex)); + mutex_destroy(&(e_ctrl->eeprom_mutex)); + cam_unregister_subdev(&(e_ctrl->v4l2_dev_str)); + CAM_MEM_FREE(soc_private); + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(e_ctrl->io_master_info.qup_client); + e_ctrl->io_master_info.qup_client = NULL; + CAM_MEM_FREE(e_ctrl); +} + +const static struct component_ops cam_eeprom_i2c_component_ops = { + .bind = cam_eeprom_i2c_component_bind, + .unbind = cam_eeprom_i2c_component_unbind, +}; + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +static int cam_eeprom_i2c_driver_probe(struct i2c_client *client) +{ + int rc = 0; + + if (client == NULL) { + CAM_ERR(CAM_EEPROM, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_EEPROM, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_EEPROM, "Adding sensor eeprom component"); + rc = component_add(&client->dev, &cam_eeprom_i2c_component_ops); + if (rc) + CAM_ERR(CAM_EEPROM, "failed to add component rc: %d", rc); + + return rc; +} +#else +static int cam_eeprom_i2c_driver_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_EEPROM, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_EEPROM, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_EEPROM, "Adding sensor eeprom component"); + rc = component_add(&client->dev, &cam_eeprom_i2c_component_ops); + if (rc) + CAM_ERR(CAM_EEPROM, "failed to add component rc: %d", rc); + + return rc; +} +#endif + +#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE +void cam_eeprom_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_eeprom_i2c_component_ops); +} +#else +static int cam_eeprom_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_eeprom_i2c_component_ops); + + return 0; +} +#endif + +static int cam_eeprom_spi_setup(struct spi_device *spi) +{ + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_sensor_spi_client *spi_client; + struct cam_eeprom_soc_private *eb_info; + struct cam_sensor_power_ctrl_t *power_info = NULL; + int rc = 0; + + e_ctrl = CAM_MEM_ZALLOC(sizeof(*e_ctrl), GFP_KERNEL); + if (!e_ctrl) + return -ENOMEM; + + soc_info = &e_ctrl->soc_info; + soc_info->dev = &spi->dev; + soc_info->dev_name = spi->modalias; + + e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops; + e_ctrl->userspace_probe = false; + e_ctrl->cal_data.mapdata = NULL; + e_ctrl->cal_data.map = NULL; + + spi_client = CAM_MEM_ZALLOC(sizeof(*spi_client), GFP_KERNEL); + if (!spi_client) { + CAM_MEM_FREE(e_ctrl); + return -ENOMEM; + } + + eb_info = CAM_MEM_ZALLOC(sizeof(*eb_info), GFP_KERNEL); + if (!eb_info) + goto spi_free; + e_ctrl->soc_info.soc_private = eb_info; + + e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE; + e_ctrl->io_master_info.spi_client = spi_client; + e_ctrl->io_master_info.master_type = SPI_MASTER; + spi_client->spi_master = spi; + INIT_LIST_HEAD(&(e_ctrl->wr_settings.list_head)); + power_info = &eb_info->power_info; + power_info->dev = &spi->dev; + + /* set spi instruction info */ + spi_client->retry_delay = 1; + spi_client->retries = 0; + + /* Initialize mutex */ + mutex_init(&(e_ctrl->eeprom_mutex)); + + e_ctrl->bridge_intf.device_hdl = -1; + rc = cam_eeprom_parse_dt(e_ctrl); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc); + goto board_free; + } + + rc = cam_eeprom_spi_parse_of(spi_client); + if (rc) { + CAM_ERR(CAM_EEPROM, "Device tree parsing error"); + goto board_free; + } + + rc = cam_eeprom_init_subdev(e_ctrl); + if (rc) + goto board_free; + + e_ctrl->bridge_intf.ops.get_dev_info = NULL; + e_ctrl->bridge_intf.ops.link_setup = NULL; + e_ctrl->bridge_intf.ops.apply_req = NULL; + + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl); + return rc; + +board_free: + CAM_MEM_FREE(e_ctrl->soc_info.soc_private); +spi_free: + CAM_MEM_FREE(spi_client); + CAM_MEM_FREE(e_ctrl); + return rc; +} + +static int cam_eeprom_spi_driver_probe(struct spi_device *spi) +{ + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + spi_setup(spi); + + CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]", + spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0, + (spi->mode & SPI_CPOL) ? 1 : 0, + (spi->mode & SPI_CS_HIGH) ? 1 : 0); + CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz); + + return cam_eeprom_spi_setup(spi); +} + +static int cam_eeprom_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0; + bool i3c_i2c_target; + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + struct cam_eeprom_soc_private *soc_private = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return 0; + + e_ctrl = CAM_MEM_ZALLOC(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL); + if (!e_ctrl) + return -ENOMEM; + + e_ctrl->soc_info.pdev = pdev; + e_ctrl->soc_info.dev = &pdev->dev; + e_ctrl->soc_info.dev_name = pdev->name; + e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE; + e_ctrl->cal_data.mapdata = NULL; + e_ctrl->cal_data.map = NULL; + e_ctrl->userspace_probe = false; + + e_ctrl->io_master_info.master_type = CCI_MASTER; + e_ctrl->io_master_info.cci_client = CAM_MEM_ZALLOC( + sizeof(struct cam_sensor_cci_client), GFP_KERNEL); + if (!e_ctrl->io_master_info.cci_client) { + rc = -ENOMEM; + goto free_e_ctrl; + } + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_eeprom_soc_private), + GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto free_cci_client; + } + e_ctrl->soc_info.soc_private = soc_private; + soc_private->power_info.dev = &pdev->dev; + + /* Initialize mutex */ + mutex_init(&(e_ctrl->eeprom_mutex)); + rc = cam_eeprom_parse_dt(e_ctrl); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc); + goto free_soc; + } + rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info); + if (rc) { + CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc); + goto free_soc; + } + + INIT_LIST_HEAD(&(e_ctrl->wr_settings.list_head)); + rc = cam_eeprom_init_subdev(e_ctrl); + if (rc) + goto free_soc; + + cam_sensor_module_add_i2c_device((void *) e_ctrl, CAM_SENSOR_EEPROM); + + e_ctrl->bridge_intf.device_hdl = -1; + e_ctrl->bridge_intf.ops.get_dev_info = NULL; + e_ctrl->bridge_intf.ops.link_setup = NULL; + e_ctrl->bridge_intf.ops.apply_req = NULL; + platform_set_drvdata(pdev, e_ctrl); + e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT; + CAM_DBG(CAM_EEPROM, "Component bound successfully"); + + g_i3c_eeprom_data[e_ctrl->soc_info.index].e_ctrl = e_ctrl; + init_completion(&g_i3c_eeprom_data[e_ctrl->soc_info.index].probe_complete); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; +free_soc: + CAM_MEM_FREE(soc_private); +free_cci_client: + CAM_MEM_FREE(e_ctrl->io_master_info.cci_client); +free_e_ctrl: + CAM_MEM_FREE(e_ctrl); + + return rc; +} + +static void cam_eeprom_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + bool i3c_i2c_target; + struct cam_eeprom_ctrl_t *e_ctrl; + struct cam_hw_soc_info *soc_info; + struct platform_device *pdev = to_platform_device(dev); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return; + + e_ctrl = platform_get_drvdata(pdev); + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "eeprom device is NULL"); + return; + } + + CAM_DBG(CAM_EEPROM, "Component unbind called for: %s", pdev->name); + soc_info = &e_ctrl->soc_info; + + for (i = 0; i < soc_info->num_clk; i++) { + if (!soc_info->clk[i]) { + CAM_DBG(CAM_EEPROM, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + devm_clk_put(soc_info->dev, soc_info->clk[i]); + } + + mutex_lock(&(e_ctrl->eeprom_mutex)); + cam_eeprom_shutdown(e_ctrl); + mutex_unlock(&(e_ctrl->eeprom_mutex)); + mutex_destroy(&(e_ctrl->eeprom_mutex)); + cam_unregister_subdev(&(e_ctrl->v4l2_dev_str)); + CAM_MEM_FREE(soc_info->soc_private); + CAM_MEM_FREE(e_ctrl->io_master_info.cci_client); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(e_ctrl); +} + +const static struct component_ops cam_eeprom_component_ops = { + .bind = cam_eeprom_component_bind, + .unbind = cam_eeprom_component_unbind, +}; + +static int32_t cam_eeprom_platform_driver_probe( + struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_EEPROM, "Adding EEPROM Sensor component"); + rc = component_add(&pdev->dev, &cam_eeprom_component_ops); + if (rc) + CAM_ERR(CAM_EEPROM, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_eeprom_platform_driver_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_eeprom_component_ops); + return 0; +} + +static const struct of_device_id cam_eeprom_dt_match[] = { + { .compatible = "qcom,eeprom" }, + { } +}; + + +MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match); + +struct platform_driver cam_eeprom_platform_driver = { + .driver = { + .name = "qcom,eeprom", + .owner = THIS_MODULE, + .of_match_table = cam_eeprom_dt_match, + .suppress_bind_attrs = true, + }, + .probe = cam_eeprom_platform_driver_probe, + .remove = cam_eeprom_platform_driver_remove, +}; + +static const struct i2c_device_id cam_eeprom_i2c_id[] = { + { EEPROM_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +static const struct of_device_id cam_eeprom_i2c_dt_match[] = { + { .compatible = "qcom,cam-i2c-eeprom" }, + { } +}; + +MODULE_DEVICE_TABLE(of, cam_eeprom_i2c_dt_match); + +struct i2c_driver cam_eeprom_i2c_driver = { + .id_table = cam_eeprom_i2c_id, + .probe = cam_eeprom_i2c_driver_probe, + .remove = cam_eeprom_i2c_driver_remove, + .driver = { + .name = EEPROM_DRIVER_I2C, + .owner = THIS_MODULE, + .of_match_table = cam_eeprom_i2c_dt_match, + .suppress_bind_attrs = true, + }, +}; + +static struct spi_driver cam_eeprom_spi_driver = { + .driver = { + .name = "qcom_eeprom", + .owner = THIS_MODULE, + .of_match_table = cam_eeprom_dt_match, + }, + .probe = cam_eeprom_spi_driver_probe, + .remove = cam_eeprom_spi_driver_remove, +}; + +static struct i3c_device_id eeprom_i3c_id[MAX_I3C_DEVICE_ID_ENTRIES + 1]; + +static int cam_eeprom_i3c_driver_probe(struct i3c_device *client) +{ + int32_t rc = 0; + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + uint32_t index; + struct device *dev; + + if (!client) { + CAM_INFO(CAM_EEPROM, "Null Client pointer"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_EEPROM, "Probe for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_EEPROM, "device %s failed to read cell-index", dev_name(dev)); + return rc; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_EEPROM, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + e_ctrl = g_i3c_eeprom_data[index].e_ctrl; + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "e_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + cam_sensor_utils_parse_pm_ctrl_flag(dev->of_node, &(e_ctrl->io_master_info)); + + CAM_INFO(CAM_SENSOR, + "master: %d (1-CCI, 2-I2C, 3-SPI, 4-I3C) pm_ctrl_client_enable: %d", + e_ctrl->io_master_info.master_type, + e_ctrl->io_master_info.qup_client->pm_ctrl_client_enable); + + e_ctrl->io_master_info.qup_client->i3c_client = client; + e_ctrl->io_master_info.qup_client->i3c_wait_for_hotjoin = false; + + complete_all(&g_i3c_eeprom_data[index].probe_complete); + + CAM_DBG(CAM_EEPROM, "I3C Probe Finished for %s", dev_name(dev)); + return rc; +} + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +static void cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return; + } + + e_ctrl = g_i3c_eeprom_data[index].e_ctrl; + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "e_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(e_ctrl->io_master_info.qup_client); + e_ctrl->io_master_info.qup_client = NULL; +} +#else +static int cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_eeprom_ctrl_t *e_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return -EINVAL; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + e_ctrl = g_i3c_eeprom_data[index].e_ctrl; + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "e_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(e_ctrl->io_master_info.qup_client); + e_ctrl->io_master_info.qup_client = NULL; + return 0; +} +#endif + +static struct i3c_driver cam_eeprom_i3c_driver = { + .id_table = eeprom_i3c_id, + .probe = cam_eeprom_i3c_driver_probe, + .remove = cam_i3c_driver_remove, + .driver = { + .owner = THIS_MODULE, + .name = EEPROM_DRIVER_I3C, + .of_match_table = cam_eeprom_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_eeprom_driver_init(void) +{ + int rc = 0; + struct device_node *dev; + int num_entries = 0; + + rc = platform_driver_register(&cam_eeprom_platform_driver); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d", + rc); + return rc; + } + + rc = spi_register_driver(&cam_eeprom_spi_driver); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc); + goto spi_register_err; + } + + rc = i2c_add_driver(&cam_eeprom_i2c_driver); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc); + goto i2c_register_err; + } + + memset(eeprom_i3c_id, 0, sizeof(struct i3c_device_id) * (MAX_I3C_DEVICE_ID_ENTRIES + 1)); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_EEPROM, "Couldnt Find the i3c-id-table dev node"); + return 0; + } + + rc = cam_sensor_count_elems_i3c_device_id(dev, &num_entries, + "i3c-eeprom-id-table"); + if (rc) + return 0; + + rc = cam_sensor_fill_i3c_device_id(dev, num_entries, + "i3c-eeprom-id-table", eeprom_i3c_id); + if (rc) + goto i3c_register_err; + + rc = i3c_driver_register_with_owner(&cam_eeprom_i3c_driver, THIS_MODULE); + if (rc) { + CAM_ERR(CAM_EEPROM, "i3c_driver registration failed, rc: %d", rc); + goto i3c_register_err; + } + + return 0; +i3c_register_err: + i2c_del_driver(&cam_sensor_i2c_driver); +i2c_register_err: + spi_unregister_driver(&cam_eeprom_spi_driver); +spi_register_err: + platform_driver_unregister(&cam_sensor_platform_driver); + + return rc; +} + +void cam_eeprom_driver_exit(void) +{ + struct device_node *dev; + + platform_driver_unregister(&cam_eeprom_platform_driver); + spi_unregister_driver(&cam_eeprom_spi_driver); + i2c_del_driver(&cam_eeprom_i2c_driver); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_EEPROM, "Couldnt Find the i3c-id-table dev node"); + return; + } + + i3c_driver_unregister(&cam_eeprom_i3c_driver); +} + +MODULE_DESCRIPTION("CAM EEPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h new file mode 100644 index 0000000000..33e360d45e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_EEPROM_DEV_H_ +#define _CAM_EEPROM_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_soc_util.h" +#include "cam_context.h" + +#define DEFINE_MSM_MUTEX(mutexname) \ + static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +#define PROPERTY_MAXSIZE 32 + +#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE 80 +#define MSM_EEPROM_MAX_MEM_MAP_CNT 100 +#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT 8 + +#define EEPROM_DRIVER_I2C "cam-i2c-eeprom" +#define EEPROM_DRIVER_I3C "i3c_camera_eeprom" + + +enum cam_eeprom_state { + CAM_EEPROM_INIT, + CAM_EEPROM_ACQUIRE, + CAM_EEPROM_CONFIG, +}; + +/** + * struct cam_eeprom_map_t - eeprom map + * @data_type : Data type + * @addr_type : Address type + * @addr : Address + * @data : data + * @delay : Delay + * + */ +struct cam_eeprom_map_t { + uint32_t valid_size; + uint32_t addr; + uint32_t addr_type; + uint32_t data; + uint32_t data_type; + uint32_t delay; +}; + +/** + * struct cam_eeprom_memory_map_t - eeprom memory map types + * @page : page memory + * @pageen : pageen memory + * @poll : poll memory + * @mem : mem + * @saddr : slave addr + * + */ +struct cam_eeprom_memory_map_t { + struct cam_eeprom_map_t page; + struct cam_eeprom_map_t pageen; + struct cam_eeprom_map_t poll; + struct cam_eeprom_map_t mem; + uint32_t saddr; +}; + +/** + * struct cam_eeprom_memory_block_t - eeprom mem block info + * @map : eeprom memory map + * @num_map : number of map blocks + * @mapdata : map data + * @cmd_type : size of total mapdata + * + */ +struct cam_eeprom_memory_block_t { + struct cam_eeprom_memory_map_t *map; + uint32_t num_map; + uint8_t *mapdata; + uint32_t num_data; +}; + +/** + * struct cam_eeprom_cmm_t - camera multimodule + * @cmm_support : cmm support flag + * @cmm_compression : cmm compression flag + * @cmm_offset : cmm data start offset + * @cmm_size : cmm data size + * + */ +struct cam_eeprom_cmm_t { + uint32_t cmm_support; + uint32_t cmm_compression; + uint32_t cmm_offset; + uint32_t cmm_size; +}; + +/** + * struct cam_eeprom_i2c_info_t - I2C info + * @slave_addr : slave address + * @i2c_freq_mode : i2c frequency mode + * + */ +struct cam_eeprom_i2c_info_t { + uint16_t slave_addr; + uint8_t i2c_freq_mode; +}; + +/** + * struct cam_eeprom_soc_private - eeprom soc private data structure + * @eeprom_name : eeprom name + * @i2c_info : i2c info structure + * @power_info : eeprom power info + * @cmm_data : cmm data + * + */ +struct cam_eeprom_soc_private { + const char *eeprom_name; + struct cam_eeprom_i2c_info_t i2c_info; + struct cam_sensor_power_ctrl_t power_info; + struct cam_eeprom_cmm_t cmm_data; +}; + +/** + * struct cam_eeprom_intf_params - bridge interface params + * @device_hdl : Device Handle + * @session_hdl : Session Handle + * @ops : KMD operations + * @crm_cb : Callback API pointers + */ +struct cam_eeprom_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +struct eebin_info { + uint32_t start_address; + uint32_t size; + uint32_t is_valid; +}; + +/** + * struct cam_eeprom_ctrl_t - EEPROM control structure + * @device_name : Device name + * @pdev : platform device + * @spi : spi device + * @eeprom_mutex : eeprom mutex + * @soc_info : eeprom soc related info + * @io_master_info : Information about the communication master + * @gpio_num_info : gpio info + * @cci_i2c_master : I2C structure + * @v4l2_dev_str : V4L2 device structure + * @is_i3c_device : A flag to indicate whether this eeprom is I3C device + * @bridge_intf : bridge interface params + * @cam_eeprom_state : eeprom_device_state + * @userspace_probe : flag indicates userspace or kernel probe + * @cal_data : Calibration data + * @device_name : Device name + * @is_multimodule_mode : To identify multimodule node + * @wr_settings : I2C write settings + * @eebin_info : EEBIN address, size info + */ +struct cam_eeprom_ctrl_t { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct platform_device *pdev; + struct spi_device *spi; + struct mutex eeprom_mutex; + struct cam_hw_soc_info soc_info; + struct camera_io_master io_master_info; + struct msm_camera_gpio_num_info *gpio_num_info; + enum cci_i2c_master_t cci_i2c_master; + enum cci_device_num cci_num; + struct cam_subdev v4l2_dev_str; + bool is_i3c_device; + struct cam_eeprom_intf_params bridge_intf; + enum msm_camera_device_type_t eeprom_device_type; + enum cam_eeprom_state cam_eeprom_state; + bool userspace_probe; + struct cam_eeprom_memory_block_t cal_data; + uint16_t is_multimodule_mode; + struct i2c_settings_array wr_settings; + struct eebin_info eebin_info; +}; + +int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl, + struct cam_eeprom_i2c_info_t *i2c_info); + +/** + * @brief : API to register EEPROM hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_eeprom_driver_init(void); + +/** + * @brief : API to remove EEPROM Hw from platform framework. + */ +void cam_eeprom_driver_exit(void); + +#endif /*_CAM_EEPROM_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c new file mode 100644 index 0000000000..34893dc4da --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_eeprom_soc.h" +#include "cam_debug_util.h" + +#define cam_eeprom_spi_parse_cmd(spi_dev, name, out) \ + { \ + spi_dev->cmd_tbl.name.opcode = out[0]; \ + spi_dev->cmd_tbl.name.addr_len = out[1]; \ + spi_dev->cmd_tbl.name.dummy_len = out[2]; \ + spi_dev->cmd_tbl.name.delay_intv = out[3]; \ + spi_dev->cmd_tbl.name.delay_count = out[4]; \ + } + +int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *spi_dev) +{ + int rc = -EFAULT; + uint32_t tmp[5]; + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-read", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, read, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get read data"); + return -EFAULT; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-readseq", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, read_seq, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get readseq data"); + return -EFAULT; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-queryid", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, query_id, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get queryid data"); + return -EFAULT; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-pprog", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, page_program, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get page program data"); + return -EFAULT; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-wenable", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, write_enable, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get write enable data"); + return rc; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-readst", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, read_status, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get readdst data"); + return rc; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "spiop-erase", tmp, 5); + if (!rc) { + cam_eeprom_spi_parse_cmd(spi_dev, erase, tmp); + } else { + CAM_ERR(CAM_EEPROM, "Failed to get erase data"); + return rc; + } + + rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node, + "eeprom-id", tmp, 2); + if (rc) { + CAM_ERR(CAM_EEPROM, "Failed to get eeprom id"); + return rc; + } + + spi_dev->mfr_id0 = tmp[0]; + spi_dev->device_id0 = tmp[1]; + + return 0; +} + +/* + * cam_eeprom_parse_memory_map() - parse memory map in device node + * @of: device node + * @data: memory block for output + * + * This functions parses @of to fill @data. It allocates map itself, parses + * the @of node, calculate total data length, and allocates required buffer. + * It only fills the map, but does not perform actual reading. + */ +int cam_eeprom_parse_dt_memory_map(struct device_node *node, + struct cam_eeprom_memory_block_t *data) +{ + int i, rc = 0; + char property[PROPERTY_MAXSIZE]; + uint32_t count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT; + struct cam_eeprom_memory_map_t *map; + + snprintf(property, PROPERTY_MAXSIZE, "num-blocks"); + rc = of_property_read_u32(node, property, &data->num_map); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d", + rc); + return rc; + } + + map = vzalloc((sizeof(*map) * data->num_map)); + if (!map) { + rc = -ENOMEM; + return rc; + } + data->map = map; + + for (i = 0; i < data->num_map; i++) { + snprintf(property, PROPERTY_MAXSIZE, "page%d", i); + rc = of_property_read_u32_array(node, property, + (uint32_t *) &map[i].page, count); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "failed: page not available rc %d", + rc); + goto ERROR; + } + + snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i); + rc = of_property_read_u32_array(node, property, + (uint32_t *) &map[i].pageen, count); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "pageen not needed"); + + snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i); + rc = of_property_read_u32_array(node, property, + (uint32_t *) &map[i].saddr, 1); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i); + + snprintf(property, PROPERTY_MAXSIZE, "poll%d", i); + rc = of_property_read_u32_array(node, property, + (uint32_t *) &map[i].poll, count); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d", + rc); + goto ERROR; + } + + snprintf(property, PROPERTY_MAXSIZE, "mem%d", i); + rc = of_property_read_u32_array(node, property, + (uint32_t *) &map[i].mem, count); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d", + rc); + goto ERROR; + } + data->num_data += map[i].mem.valid_size; + } + + data->mapdata = vzalloc(data->num_data); + if (!data->mapdata) { + rc = -ENOMEM; + goto ERROR; + } + return rc; + +ERROR: + vfree(data->map); + memset(data, 0, sizeof(*data)); + return rc; +} + +/** + * @e_ctrl: ctrl structure + * + * Parses eeprom dt + */ +static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info; + struct cam_eeprom_soc_private *soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + struct device_node *of_node = NULL; + + of_node = soc_info->dev->of_node; + + if (e_ctrl->userspace_probe == false) { + rc = cam_get_dt_power_setting_data(of_node, + soc_info, power_info); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "failed in getting power settings"); + return rc; + } + } + + if (!soc_info->gpio_data) { + CAM_INFO(CAM_EEPROM, "No GPIO found"); + return 0; + } + + if (!soc_info->gpio_data->cam_gpio_common_tbl_size) { + CAM_INFO(CAM_EEPROM, "No GPIO found"); + return -EINVAL; + } + + rc = cam_sensor_util_init_gpio_pin_tbl(soc_info, + &power_info->gpio_num_info); + if ((rc < 0) || (!power_info->gpio_num_info)) { + CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs"); + return -EINVAL; + } + + return rc; +} + +/** + * @eb_info: eeprom private data structure + * @of_node: eeprom device node + * + * This function parses the eeprom dt to get the MM data + */ +static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info, + struct device_node *of_node) +{ + int rc = 0; + struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data; + + cmm_data->cmm_support = + of_property_read_bool(of_node, "cmm-data-support"); + if (!cmm_data->cmm_support) { + CAM_DBG(CAM_EEPROM, "No cmm support"); + return 0; + } + + cmm_data->cmm_compression = + of_property_read_bool(of_node, "cmm-data-compressed"); + + rc = of_property_read_u32(of_node, "cmm-data-offset", + &cmm_data->cmm_offset); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc); + + rc = of_property_read_u32(of_node, "cmm-data-size", + &cmm_data->cmm_size); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc); + + CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d", + cmm_data->cmm_compression, cmm_data->cmm_offset, + cmm_data->cmm_size); + return 0; +} + +/** + * @e_ctrl: ctrl structure + * + * This function is called from cam_eeprom_platform/i2c/spi_driver_probe + * it parses the eeprom dt node and decides for userspace or kernel probe. + */ +int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl) +{ + int i, rc = 0; + struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info; + struct device_node *of_node = NULL; + struct device_node *of_parent = NULL; + struct cam_eeprom_soc_private *soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + uint32_t temp; + + if (!soc_info->dev) { + CAM_ERR(CAM_EEPROM, "Dev is NULL"); + return -EINVAL; + } + + e_ctrl->is_multimodule_mode = false; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_EEPROM, "Failed to read DT properties rc : %d", rc); + return rc; + } + + of_node = soc_info->dev->of_node; + + rc = of_property_read_bool(of_node, "i3c-target"); + if (rc) { + e_ctrl->is_i3c_device = true; + e_ctrl->io_master_info.master_type = I3C_MASTER; + } + + CAM_DBG(CAM_SENSOR, "I3C Target: %s", CAM_BOOL_TO_YESNO(e_ctrl->is_i3c_device)); + + if (of_property_read_bool(of_node, "multimodule-support")) { + CAM_DBG(CAM_UTIL, "Multi Module is Supported"); + e_ctrl->is_multimodule_mode = true; + } + + rc = of_property_read_string(of_node, "eeprom-name", + &soc_private->eeprom_name); + if (rc < 0) { + CAM_DBG(CAM_EEPROM, "kernel probe is not enabled"); + e_ctrl->userspace_probe = true; + } + + if (e_ctrl->io_master_info.master_type == CCI_MASTER) { + rc = of_property_read_u32(of_node, "cci-master", + &e_ctrl->cci_i2c_master); + if (rc < 0 || (e_ctrl->cci_i2c_master >= MASTER_MAX)) { + CAM_DBG(CAM_EEPROM, "failed rc %d", rc); + rc = -EFAULT; + return rc; + } + + of_parent = of_get_parent(of_node); + if (of_property_read_u32(of_parent, "cell-index", + &e_ctrl->cci_num) < 0) + /* Set default master 0 */ + e_ctrl->cci_num = CCI_DEVICE_0; + + e_ctrl->io_master_info.cci_client->cci_device = e_ctrl->cci_num; + CAM_DBG(CAM_EEPROM, "cci-index %d", e_ctrl->cci_num, rc); + } + + if (e_ctrl->io_master_info.master_type == SPI_MASTER) { + rc = cam_eeprom_cmm_dts(soc_private, soc_info->dev->of_node); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc); + } + + rc = cam_eeprom_get_dt_data(e_ctrl); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc); + + if ((e_ctrl->userspace_probe == false) && + (e_ctrl->io_master_info.master_type != SPI_MASTER)) { + rc = of_property_read_u32(of_node, "slave-addr", &temp); + if (rc < 0) + CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc); + + soc_private->i2c_info.slave_addr = temp; + + rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp); + soc_private->i2c_info.i2c_freq_mode = temp; + if (rc < 0) { + CAM_ERR(CAM_EEPROM, + "i2c-freq-mode read fail %d", rc); + soc_private->i2c_info.i2c_freq_mode = 0; + } + if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) { + CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d", + soc_private->i2c_info.i2c_freq_mode); + soc_private->i2c_info.i2c_freq_mode = 0; + } + CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", + soc_private->i2c_info.slave_addr); + } + + for (i = 0; i < soc_info->num_clk; i++) { + soc_info->clk[i] = devm_clk_get(soc_info->dev, + soc_info->clk_name[i]); + if (IS_ERR(soc_info->clk[i])) { + CAM_ERR(CAM_EEPROM, "get failed for %s", + soc_info->clk_name[i]); + rc = -ENOENT; + return rc; + } else if (!soc_info->clk[i]) { + CAM_DBG(CAM_EEPROM, "%s handle is NULL skip get", + soc_info->clk_name[i]); + continue; + } + } + + /* Initialize regulators to default parameters */ + for (i = 0; i < soc_info->num_rgltr; i++) { + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + rc = PTR_ERR(soc_info->rgltr[i]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_EEPROM, "get failed for regulator %s", + soc_info->rgltr_name[i]); + return rc; + } + CAM_DBG(CAM_EEPROM, "get for regulator %s", + soc_info->rgltr_name[i]); + } + cam_sensor_utils_parse_pm_ctrl_flag(of_node, &(e_ctrl->io_master_info)); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h new file mode 100644 index 0000000000..ed37989eb2 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ +#ifndef _CAM_EEPROM_SOC_H_ +#define _CAM_EEPROM_SOC_H_ + +#include "cam_eeprom_dev.h" + +int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *client); + +int cam_eeprom_parse_dt_memory_map(struct device_node *of, + struct cam_eeprom_memory_block_t *data); + +int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl); +#endif/* _CAM_EEPROM_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.c new file mode 100644 index 0000000000..5b6c46cf93 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.c @@ -0,0 +1,1986 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include "cam_sensor_cmn_header.h" +#include "cam_flash_core.h" +#include "cam_res_mgr_api.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr_api.h" + +int cam_flash_led_prepare(struct led_trigger *trigger, int options, + int *max_current, bool is_wled) +{ + int rc = 0; + + if (is_wled) { +#if IS_REACHABLE(CONFIG_BACKLIGHT_QCOM_SPMI_WLED) + rc = wled_flash_led_prepare(trigger, options, max_current); + if (rc) { + CAM_ERR(CAM_FLASH, "enable reg failed: rc: %d", + rc); + return rc; + } +#else + return -EPERM; +#endif + } else { +#if IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2) + rc = qpnp_flash_led_prepare(trigger, options, max_current); +#elif IS_REACHABLE(CONFIG_LEDS_QTI_FLASH) + rc = qti_flash_led_prepare(trigger, options, max_current); +#endif + if (rc) { + CAM_ERR(CAM_FLASH, + "Regulator enable failed rc = %d", rc); + return rc; + } + } + + return rc; +} + +static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl) +{ + int j = 0; + struct cam_flash_frame_setting *nrt_settings; + + if (!fctrl) + return -EINVAL; + + nrt_settings = &fctrl->nrt_info; + + if (nrt_settings->cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO) { + fctrl->flash_init_setting.cmn_attr.is_settings_valid = false; + } else if ((nrt_settings->cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) || + (nrt_settings->cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_RER) || + (nrt_settings->cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE)) { + fctrl->nrt_info.cmn_attr.is_settings_valid = false; + fctrl->nrt_info.cmn_attr.count = 0; + fctrl->nrt_info.num_iterations = 0; + fctrl->nrt_info.led_on_delay_ms = 0; + fctrl->nrt_info.led_off_delay_ms = 0; + for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++) + fctrl->nrt_info.led_current_ma[j] = 0; + } + + return 0; +} + +static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl) +{ + int rc = 0; + + if (fctrl->i2c_data.init_settings.is_settings_valid == true) { + rc = delete_request(&fctrl->i2c_data.init_settings); + if (rc) { + CAM_WARN(CAM_FLASH, + "Failed to delete Init i2c_setting: %d", + rc); + return rc; + } + } + if (fctrl->i2c_data.config_settings.is_settings_valid == true) { + rc = delete_request(&fctrl->i2c_data.config_settings); + if (rc) { + CAM_WARN(CAM_FLASH, + "Failed to delete NRT i2c_setting: %d", + rc); + return rc; + } + } + + return rc; +} + +static int cam_flash_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info) +{ + int rc = 0; + + power_info->power_setting_size = 1; + power_info->power_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_setting) + return -ENOMEM; + + power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1; + power_info->power_setting[0].seq_val = CAM_V_CUSTOM1; + power_info->power_setting[0].config_val = 0; + power_info->power_setting[0].delay = 2; + + power_info->power_down_setting_size = 1; + power_info->power_down_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_down_setting) { + rc = -ENOMEM; + goto free_power_settings; + } + + power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1; + power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1; + power_info->power_down_setting[0].config_val = 0; + + return rc; + +free_power_settings: + CAM_MEM_FREE(power_info->power_setting); + power_info->power_setting = NULL; + power_info->power_setting_size = 0; + return rc; +} + +int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &fctrl->soc_info; + struct cam_sensor_power_ctrl_t *power_info = + &fctrl->power_info; + + if (!power_info || !soc_info) { + CAM_ERR(CAM_FLASH, "Power Info is NULL"); + return -EINVAL; + } + power_info->dev = soc_info->dev; + + if (regulator_enable && (fctrl->is_regulator_enabled == false)) { + if ((power_info->power_setting == NULL) && + (power_info->power_down_setting == NULL)) { + CAM_INFO(CAM_FLASH, + "Using default power settings"); + rc = cam_flash_construct_default_power_setting( + power_info); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "Construct default pwr setting failed rc: %d", + rc); + return rc; + } + } + + rc = cam_sensor_core_power_up(power_info, soc_info, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, "power up the core is failed:%d", + rc); + goto free_pwr_settings; + } + + rc = camera_io_init(&(fctrl->io_master_info)); + if (rc) { + CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc); + cam_sensor_util_power_down(power_info, soc_info); + goto free_pwr_settings; + } + fctrl->is_regulator_enabled = true; + } else if ((!regulator_enable) && + (fctrl->is_regulator_enabled == true)) { + rc = cam_sensor_util_power_down(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "power down the core is failed:%d", + rc); + return rc; + } + camera_io_release(&(fctrl->io_master_info)); + fctrl->is_regulator_enabled = false; + goto free_pwr_settings; + } + return rc; + +free_pwr_settings: + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + + return rc; +} + +int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id) +{ + int rc = 0; + int i = 0, j = 0; + int frame_offset = 0; + bool is_off_needed = false; + struct cam_flash_frame_setting *flash_data = NULL; + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + + if (type == FLUSH_ALL) { + /* flush all requests*/ + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + flash_data = + &fctrl->per_frame[i]; + if ((flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_OFF) && + (flash_data->cmn_attr.request_id > 0) && + flash_data->cmn_attr.is_settings_valid) { + is_off_needed = true; + CAM_DBG(CAM_FLASH, + "FLASH_ALL: Turn off the flash for req %llu", + flash_data->cmn_attr.request_id); + } + + flash_data->cmn_attr.request_id = 0; + flash_data->cmn_attr.is_settings_valid = false; + flash_data->cmn_attr.count = 0; + for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++) + flash_data->led_current_ma[j] = 0; + } + + cam_flash_pmic_flush_nrt(fctrl); + } else if ((type == FLUSH_REQ) && (req_id != 0)) { + /* flush request with req_id*/ + frame_offset = req_id % MAX_PER_FRAME_ARRAY; + flash_data = + &fctrl->per_frame[frame_offset]; + + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_OFF) { + is_off_needed = true; + CAM_DBG(CAM_FLASH, + "FLASH_REQ: Turn off the flash for req %llu", + flash_data->cmn_attr.request_id); + } + + flash_data->cmn_attr.request_id = 0; + flash_data->cmn_attr.is_settings_valid = + false; + flash_data->cmn_attr.count = 0; + for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++) + flash_data->led_current_ma[i] = 0; + } else if ((type == FLUSH_REQ) && (req_id == 0)) { + /* Handels NonRealTime usecase */ + cam_flash_pmic_flush_nrt(fctrl); + } else { + CAM_ERR(CAM_FLASH, "Invalid arguments"); + return -EINVAL; + } + + if (is_off_needed) + cam_flash_off(fctrl); + + return rc; +} + +int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id) +{ + int rc = 0; + int i = 0; + uint32_t cancel_req_id_found = 0; + struct i2c_settings_array *i2c_set = NULL; + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + if ((type == FLUSH_REQ) && (req_id == 0)) { + /* This setting will be called only when NonRealTime + * settings needs to clean. + */ + cam_flash_i2c_flush_nrt(fctrl); + } else { + /* All other usecase will be handle here */ + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(fctrl->i2c_data.per_frame[i]); + + if ((type == FLUSH_REQ) && + (i2c_set->request_id != req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_FLASH, + "delete request: %lld rc: %d", + i2c_set->request_id, rc); + + if (type == FLUSH_REQ) { + cancel_req_id_found = 1; + break; + } + } + } + } + + if ((type == FLUSH_REQ) && (req_id != 0) && + (!cancel_req_id_found)) + CAM_DBG(CAM_FLASH, + "Flush request id:%lld not found in the pending list", + req_id); + + return rc; +} + +int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush) +{ + int rc = 0; + struct cam_flash_ctrl *fctrl = NULL; + + fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl); + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&fctrl->flash_mutex); + if (fctrl->flash_state == CAM_FLASH_STATE_INIT) + goto end; + + if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + fctrl->last_flush_req = flush->req_id; + CAM_DBG(CAM_FLASH, "last reqest to flush is %lld", + flush->req_id); + rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + if (rc) { + CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc); + goto end; + } + } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + rc = fctrl->func_tbl.flush_req(fctrl, + FLUSH_REQ, flush->req_id); + if (rc) { + CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc); + goto end; + } + } +end: + mutex_unlock(&fctrl->flash_mutex); + return rc; +} + +static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl, + struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op) +{ + uint32_t curr = 0, max_current = 0; + struct cam_flash_private_soc *soc_private = NULL; + int i = 0; + + if (!flash_ctrl || !flash_data) { + CAM_ERR(CAM_FLASH, "Fctrl or Data NULL"); + return -EINVAL; + } + + soc_private = (struct cam_flash_private_soc *) + flash_ctrl->soc_info.soc_private; + + if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) { + for (i = 0; i < flash_ctrl->torch_num_sources; i++) { + if (flash_ctrl->torch_trigger[i]) { + max_current = soc_private->torch_max_current[i]; + if (flash_data->led_current_ma[i] <= + max_current) + curr = flash_data->led_current_ma[i]; + else + curr = max_current; + } + CAM_DBG(CAM_FLASH, "Led_Torch[%d]: Current: %d", + i, curr); + cam_res_mgr_led_trigger_event( + flash_ctrl->torch_trigger[i], curr); + } + } else if ((op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) || + (op == CAMERA_SENSOR_FLASH_OP_FIREDURATION)) { + for (i = 0; i < flash_ctrl->flash_num_sources; i++) { + if (flash_ctrl->flash_trigger[i]) { + max_current = soc_private->flash_max_current[i]; + if (flash_data->led_current_ma[i] <= + max_current) + curr = flash_data->led_current_ma[i]; + else + curr = max_current; + } + CAM_DBG(CAM_FLASH, "LED_Flash[%d]: Current: %d", + i, curr); + cam_res_mgr_led_trigger_event( + flash_ctrl->flash_trigger[i], curr); + } + } else { + CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op); + return -EINVAL; + } + + if (flash_ctrl->switch_trigger) { +#if IS_ENABLED(CONFIG_LEDS_QTI_FLASH) + int rc = 0; + + if (op == CAMERA_SENSOR_FLASH_OP_FIREDURATION) { + struct flash_led_param param; + + param.off_time_ms = + flash_data->flash_active_time_ms; + param.on_time_ms = flash_data->flash_on_wait_time_ms; + CAM_DBG(CAM_FLASH, + "Precise flash_on time: %u, Precise flash_off time: %u", + param.on_time_ms, param.off_time_ms); + rc = qti_flash_led_set_param( + flash_ctrl->switch_trigger, + param); + if (rc) { + CAM_ERR(CAM_FLASH, + "LED set param fail rc= %d", rc); + return rc; + } + } +#else + if (op == CAMERA_SENSOR_FLASH_OP_FIREDURATION) { + CAM_ERR(CAM_FLASH, "FIREDURATION op not supported"); + return -EINVAL; + } +#endif + cam_res_mgr_led_trigger_event( + flash_ctrl->switch_trigger, + (enum led_brightness)LED_SWITCH_ON); + } + + return 0; +} + +int cam_flash_off(struct cam_flash_ctrl *flash_ctrl) +{ + if (!flash_ctrl) { + CAM_ERR(CAM_FLASH, "Flash control Null"); + return -EINVAL; + } + CAM_DBG(CAM_FLASH, "Flash OFF Triggered"); + if (flash_ctrl->switch_trigger) + cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger, + (enum led_brightness)LED_SWITCH_OFF); + return 0; +} + +static int cam_flash_low( + struct cam_flash_ctrl *flash_ctrl, + struct cam_flash_frame_setting *flash_data) +{ + int i = 0, rc = 0; + + if (!flash_data) { + CAM_ERR(CAM_FLASH, "Flash Data Null"); + return -EINVAL; + } + + for (i = 0; i < flash_ctrl->flash_num_sources; i++) + if (flash_ctrl->flash_trigger[i]) + cam_res_mgr_led_trigger_event( + flash_ctrl->flash_trigger[i], + LED_OFF); + + rc = cam_flash_ops(flash_ctrl, flash_data, + CAMERA_SENSOR_FLASH_OP_FIRELOW); + if (rc) + CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc); + + return rc; +} + +static int cam_flash_high( + struct cam_flash_ctrl *flash_ctrl, + struct cam_flash_frame_setting *flash_data) +{ + int i = 0, rc = 0; + + if (!flash_data) { + CAM_ERR(CAM_FLASH, "Flash Data Null"); + return -EINVAL; + } + + for (i = 0; i < flash_ctrl->torch_num_sources; i++) + if (flash_ctrl->torch_trigger[i]) + cam_res_mgr_led_trigger_event( + flash_ctrl->torch_trigger[i], + LED_OFF); + + rc = cam_flash_ops(flash_ctrl, flash_data, + CAMERA_SENSOR_FLASH_OP_FIREHIGH); + if (rc) + CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc); + + return rc; +} + +static int cam_flash_duration(struct cam_flash_ctrl *fctrl, + struct cam_flash_frame_setting *flash_data) +{ + int i = 0, rc = 0; + + if (!flash_data) { + CAM_ERR(CAM_FLASH, "Flash Data NULL"); + return -EINVAL; + } + + for (i = 0; i < fctrl->torch_num_sources; i++) + if (fctrl->torch_trigger[i]) + cam_res_mgr_led_trigger_event( + fctrl->torch_trigger[i], + LED_OFF); + + rc = cam_flash_ops(fctrl, flash_data, + CAMERA_SENSOR_FLASH_OP_FIREDURATION); + if (rc) + CAM_ERR(CAM_FLASH, "Fire PreciseFlash Failed: %d", rc); + + return rc; +} + +static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + int i = 0, rc = 0; + uint64_t top = 0, del_req_id = 0; + + if (req_id != 0) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + if ((req_id >= + fctrl->i2c_data.per_frame[i].request_id) && + (top < + fctrl->i2c_data.per_frame[i].request_id) && + (fctrl->i2c_data.per_frame[i].is_settings_valid + == 1)) { + del_req_id = top; + top = fctrl->i2c_data.per_frame[i].request_id; + } + } + + if (top < req_id) { + if ((((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) || + (((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX)) + del_req_id = req_id; + } + + if (!del_req_id) + return rc; + + CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu", + top, del_req_id); + } + + cam_flash_i2c_flush_nrt(fctrl); + + return 0; +} + +static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + int i = 0; + struct cam_flash_frame_setting *flash_data = NULL; + uint64_t top = 0, del_req_id = 0; + int frame_offset = 0; + + if (req_id != 0) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + flash_data = &fctrl->per_frame[i]; + if (req_id >= flash_data->cmn_attr.request_id && + flash_data->cmn_attr.is_settings_valid + == 1) { + if (top < flash_data->cmn_attr.request_id) { + del_req_id = top; + top = flash_data->cmn_attr.request_id; + } else if (top > + flash_data->cmn_attr.request_id && + del_req_id < + flash_data->cmn_attr.request_id) { + del_req_id = + flash_data->cmn_attr.request_id; + } + } + } + + if (top < req_id) { + if ((((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) || + (((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX)) + del_req_id = req_id; + } + + if (!del_req_id) + return 0; + + CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu", + top, del_req_id); + } + + /* delete the request */ + frame_offset = del_req_id % MAX_PER_FRAME_ARRAY; + flash_data = &fctrl->per_frame[frame_offset]; + flash_data->cmn_attr.request_id = 0; + flash_data->cmn_attr.is_settings_valid = false; + flash_data->cmn_attr.count = 0; + + for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++) + flash_data->led_current_ma[i] = 0; + + return 0; +} + +static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl, + uint32_t *cmd_buf, size_t len) +{ + int32_t rc = 0; + struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + + if (len < sizeof(struct cam_cmd_i2c_info)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + return -EINVAL; + } + if (fctrl->io_master_info.master_type == CCI_MASTER) { + fctrl->io_master_info.cci_client->cci_i2c_master = + fctrl->cci_i2c_master; + fctrl->io_master_info.cci_client->i2c_freq_mode = + i2c_info->i2c_freq_mode; + fctrl->io_master_info.cci_client->sid = + i2c_info->slave_addr >> 1; + CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d", + i2c_info->slave_addr, i2c_info->i2c_freq_mode); + } else if (fctrl->io_master_info.master_type == I2C_MASTER) { + if (!fctrl->io_master_info.qup_client) { + CAM_ERR(CAM_FLASH, "io_master_info.qup_client is NULL"); + return -EINVAL; + } + fctrl->io_master_info.qup_client->i2c_client->addr = + i2c_info->slave_addr; + CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr); + } else { + CAM_ERR(CAM_FLASH, "Invalid Master type: %d", + fctrl->io_master_info.master_type); + rc = -EINVAL; + } + + return rc; +} + +int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + struct i2c_settings_list *i2c_list; + struct i2c_settings_array *i2c_set = NULL; + int frame_offset = 0, rc = 0; + + if (req_id == 0) { + /* NonRealTime Init settings*/ + if (fctrl->i2c_data.init_settings.is_settings_valid == true) { + list_for_each_entry(i2c_list, + &(fctrl->i2c_data.init_settings.list_head), + list) { + rc = cam_sensor_util_i2c_apply_setting + (&(fctrl->io_master_info), i2c_list); + if ((rc == -EAGAIN) && + (fctrl->io_master_info.master_type == + CCI_MASTER)) { + CAM_WARN(CAM_FLASH, + "CCI HW is in reset mode: Reapplying Init settings"); + usleep_range(1000, 1010); + rc = cam_sensor_util_i2c_apply_setting + (&(fctrl->io_master_info), i2c_list); + } + + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply init settings: %d", + rc); + return rc; + } + } + } + /* NonRealTime (Widget/RER/INIT_FIRE settings) */ + if (fctrl->i2c_data.config_settings.is_settings_valid == true) { + list_for_each_entry(i2c_list, + &(fctrl->i2c_data.config_settings.list_head), + list) { + rc = cam_sensor_util_i2c_apply_setting + (&(fctrl->io_master_info), i2c_list); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply NRT settings: %d", rc); + return rc; + } + } + } + } else { + /* RealTime */ + frame_offset = req_id % MAX_PER_FRAME_ARRAY; + i2c_set = &fctrl->i2c_data.per_frame[frame_offset]; + if ((i2c_set->is_settings_valid == true) && + (i2c_set->request_id == req_id)) { + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + rc = cam_sensor_util_i2c_apply_setting( + &(fctrl->io_master_info), i2c_list); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply settings: %d", rc); + return rc; + } + } + } + } + + cam_flash_i2c_delete_req(fctrl, req_id); + return rc; +} + +int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + int rc = 0, i = 0; + int frame_offset = 0; + uint16_t num_iterations; + struct cam_flash_frame_setting *flash_data = NULL; + + if (req_id == 0) { + if (fctrl->nrt_info.cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE) { + flash_data = &fctrl->nrt_info; + CAM_DBG(CAM_REQ, + "FLASH_INIT_FIRE req_id: %u flash_opcode: %d", + req_id, flash_data->opcode); + + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIREHIGH) { + if (fctrl->flash_state == + CAM_FLASH_STATE_START) { + CAM_WARN(CAM_FLASH, + "Wrong state :Prev state: %d", + fctrl->flash_state); + } + + rc = cam_flash_high(fctrl, flash_data); + if (rc) + CAM_ERR(CAM_FLASH, + "FLASH ON failed : %d", rc); + } + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIRELOW) { + if (fctrl->flash_state == + CAM_FLASH_STATE_START) { + CAM_WARN(CAM_FLASH, + "Wrong state :Prev state: %d", + fctrl->flash_state); + } + + rc = cam_flash_low(fctrl, flash_data); + if (rc) + CAM_ERR(CAM_FLASH, + "TORCH ON failed : %d", rc); + } + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_OFF) { + rc = cam_flash_off(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "LED OFF FAILED: %d", + rc); + return rc; + } + } + } else if (fctrl->nrt_info.cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) { + flash_data = &fctrl->nrt_info; + CAM_DBG(CAM_REQ, + "FLASH_WIDGET req_id: %u flash_opcode: %d", + req_id, flash_data->opcode); + + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIRELOW) { + rc = cam_flash_low(fctrl, flash_data); + if (rc) { + CAM_ERR(CAM_FLASH, + "Torch ON failed : %d", + rc); + goto nrt_del_req; + } + } else if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_OFF) { + rc = cam_flash_off(fctrl); + if (rc) + CAM_ERR(CAM_FLASH, + "LED off failed: %d", + rc); + } + } else if (fctrl->nrt_info.cmn_attr.cmd_type == + CAMERA_SENSOR_FLASH_CMD_TYPE_RER) { + flash_data = &fctrl->nrt_info; + if (fctrl->flash_state != CAM_FLASH_STATE_START) { + rc = cam_flash_off(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "Flash off failed: %d", + rc); + goto nrt_del_req; + } + } + CAM_DBG(CAM_REQ, "FLASH_RER req_id: %u", req_id); + + num_iterations = flash_data->num_iterations; + for (i = 0; i < num_iterations; i++) { + /* Turn On Torch */ + if (fctrl->flash_state == + CAM_FLASH_STATE_START) { + rc = cam_flash_low(fctrl, flash_data); + if (rc) { + CAM_ERR(CAM_FLASH, + "Fire Torch Failed"); + goto nrt_del_req; + } + + usleep_range( + flash_data->led_on_delay_ms * 1000, + flash_data->led_on_delay_ms * 1000 + + 100); + } + /* Turn Off Torch */ + rc = cam_flash_off(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "Flash off failed: %d", rc); + continue; + } + fctrl->flash_state = CAM_FLASH_STATE_START; + usleep_range( + flash_data->led_off_delay_ms * 1000, + flash_data->led_off_delay_ms * 1000 + 100); + } + } + } else { + frame_offset = req_id % MAX_PER_FRAME_ARRAY; + flash_data = &fctrl->per_frame[frame_offset]; + CAM_DBG(CAM_REQ, "FLASH_RT req_id: %u flash_opcode: %d", + req_id, flash_data->opcode); + + if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) && + (flash_data->cmn_attr.is_settings_valid) && + (flash_data->cmn_attr.request_id == req_id)) { + /* Turn On Flash */ + if (fctrl->flash_state == CAM_FLASH_STATE_START) { + rc = cam_flash_high(fctrl, flash_data); + if (rc) { + CAM_ERR(CAM_FLASH, + "Flash ON failed: rc= %d", + rc); + goto apply_setting_err; + } + } + } else if ((flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIRELOW) && + (flash_data->cmn_attr.is_settings_valid) && + (flash_data->cmn_attr.request_id == req_id)) { + /* Turn On Torch */ + if (fctrl->flash_state == CAM_FLASH_STATE_START) { + rc = cam_flash_low(fctrl, flash_data); + if (rc) { + CAM_ERR(CAM_FLASH, + "Torch ON failed: rc= %d", + rc); + goto apply_setting_err; + } + } + } else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) && + (flash_data->cmn_attr.is_settings_valid) && + (flash_data->cmn_attr.request_id == req_id)) { + rc = cam_flash_off(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "Flash off failed %d", rc); + goto apply_setting_err; + } + } else if ((flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIREDURATION) && + (flash_data->cmn_attr.is_settings_valid) && + (flash_data->cmn_attr.request_id == req_id)) { + if (fctrl->flash_state == CAM_FLASH_STATE_START) { + rc = cam_flash_duration(fctrl, flash_data); + if (rc) { + CAM_ERR(CAM_FLASH, + "PreciaseFlash op failed:%d", + rc); + goto apply_setting_err; + } + } + } else if (flash_data->opcode == CAM_PKT_NOP_OPCODE) { + CAM_DBG(CAM_FLASH, "NOP Packet"); + } else { + rc = -EINVAL; + CAM_ERR(CAM_FLASH, "Invalid opcode: %d req_id: %llu", + flash_data->opcode, req_id); + goto apply_setting_err; + } + } + +nrt_del_req: + cam_flash_pmic_delete_req(fctrl, req_id); +apply_setting_err: + return rc; +} + +int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg) +{ + int rc = 0, i = 0; + uintptr_t generic_ptr; + uint32_t total_cmd_buf_in_bytes = 0; + uint32_t *cmd_buf = NULL; + uint32_t *offset = NULL; + uint32_t frm_offset = 0; + size_t len_of_buffer; + size_t remain_len; + struct cam_flash_init *flash_init = NULL; + struct common_header *cmn_hdr = NULL; + struct cam_control *ioctl_ctrl = NULL; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_config_dev_cmd config; + struct cam_req_mgr_add_request add_req; + struct i2c_settings_array *i2c_reg_settings = NULL; + struct cam_sensor_power_ctrl_t *power_info = NULL; + + if (!fctrl || !arg) { + CAM_ERR(CAM_FLASH, "fctrl/arg is NULL"); + return -EINVAL; + } + /* getting CSL Packet */ + ioctl_ctrl = (struct cam_control *)arg; + + if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle, + sizeof(config))) { + CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed"); + return -EFAULT; + } + + rc = cam_mem_get_cpu_buf(config.packet_handle, + &generic_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Failed in getting the packet : %d", rc); + return rc; + } + remain_len = len_of_buffer; + if ((sizeof(struct cam_packet) > len_of_buffer) || + ((size_t)config.offset >= len_of_buffer - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_FLASH, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len_of_buffer); + rc = -EINVAL; + goto put_ref; + } + + remain_len -= (size_t)config.offset; + /* Add offset to the flash csl header */ + csl_packet_u = (struct cam_packet *)(generic_ptr + config.offset); + + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_FLASH, "Copying packet to KMD failed"); + goto put_ref; + } + + if ((csl_packet->header.op_code & 0xFFFFFF) != + CAM_FLASH_PACKET_OPCODE_INIT && + csl_packet->header.request_id <= fctrl->last_flush_req + && fctrl->last_flush_req != 0) { + CAM_DBG(CAM_FLASH, + "reject request %lld, last request to flush %lld", + csl_packet->header.request_id, fctrl->last_flush_req); + rc = -EBADR; + goto end; + } + + if (csl_packet->header.request_id > fctrl->last_flush_req) + fctrl->last_flush_req = 0; + + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_FLASH_PACKET_OPCODE_INIT: { + /* INIT packet*/ + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + /* Loop through multiple command buffers */ + for (i = 1; i < csl_packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) { + CAM_ERR(CAM_FLASH, "Invalid cmd desc"); + goto end; + } + + total_cmd_buf_in_bytes = cmd_desc[i].length; + if (!total_cmd_buf_in_bytes) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_ptr, &len_of_buffer); + if (rc < 0) { + CAM_ERR(CAM_FLASH, "Failed to get cpu buf"); + goto end; + } + cmd_buf = (uint32_t *)generic_ptr; + if (!cmd_buf) { + CAM_ERR(CAM_FLASH, "invalid cmd buf"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + + if ((len_of_buffer < sizeof(struct common_header)) || + (cmd_desc[i].offset > + (len_of_buffer - + sizeof(struct common_header)))) { + CAM_ERR(CAM_FLASH, "invalid cmd buf length"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + remain_len = len_of_buffer - cmd_desc[i].offset; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + cmn_hdr = (struct common_header *)cmd_buf; + + /* Loop through cmd formats in one cmd buffer */ + CAM_DBG(CAM_FLASH, + "command Type: %d,Total: %d", + cmn_hdr->cmd_type, total_cmd_buf_in_bytes); + switch (cmn_hdr->cmd_type) { + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: + if (len_of_buffer < + sizeof(struct cam_flash_init)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + + flash_init = (struct cam_flash_init *)cmd_buf; + fctrl->flash_type = flash_init->flash_type; + break; + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + rc = cam_flash_slaveInfo_pkt_parser( + fctrl, cmd_buf, remain_len); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "Failed parsing slave info: rc: %d", + rc); + break; + } + break; + case CAMERA_SENSOR_CMD_TYPE_PWR_UP: + case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN: + CAM_DBG(CAM_FLASH, + "Received power settings"); + rc = cam_sensor_update_power_settings( + cmd_buf, + total_cmd_buf_in_bytes, + &fctrl->power_info, remain_len); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed update power settings"); + break; + } + break; + default: + CAM_DBG(CAM_FLASH, + "Received initSettings"); + i2c_reg_settings = + &fctrl->i2c_data.init_settings; + + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "pkt parsing failed: %d", rc); + break; + } + break; + } + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + + if (rc) + goto end; + } + power_info = &fctrl->power_info; + if (!power_info) { + CAM_ERR(CAM_FLASH, "Power_info is NULL"); + rc = -EINVAL; + goto end; + } + + /* Parse and fill vreg params for power up settings */ + rc = msm_camera_fill_vreg_params(&fctrl->soc_info, + power_info->power_setting, + power_info->power_setting_size); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed to fill vreg params for power up rc:%d", + rc); + goto end; + } + + /* Parse and fill vreg params for power down settings*/ + rc = msm_camera_fill_vreg_params( + &fctrl->soc_info, + power_info->power_down_setting, + power_info->power_down_setting_size); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed to fill vreg params power down rc:%d", + rc); + goto end; + } + + rc = fctrl->func_tbl.power_ops(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + goto end; + } + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) { + CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc); + goto end; + } + + fctrl->flash_state = CAM_FLASH_STATE_CONFIG; + break; + } + case CAM_FLASH_PACKET_OPCODE_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + /* add support for handling i2c_data*/ + i2c_reg_settings = + &fctrl->i2c_data.per_frame[frm_offset]; + if (i2c_reg_settings->is_settings_valid == true) { + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = false; + goto update_req_mgr; + } + i2c_reg_settings->is_settings_valid = true; + i2c_reg_settings->request_id = + csl_packet->header.request_id; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in parsing i2c packets"); + goto end; + } + break; + } + case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + + /* add support for handling i2c_data*/ + i2c_reg_settings = &fctrl->i2c_data.config_settings; + if (i2c_reg_settings->is_settings_valid == true) { + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = false; + + rc = delete_request(i2c_reg_settings); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in Deleting the err: %d", rc); + goto end; + } + } + i2c_reg_settings->is_settings_valid = true; + i2c_reg_settings->request_id = + csl_packet->header.request_id; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in parsing i2c NRT packets"); + goto end; + } + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, + "Apply setting failed: %d", rc); + goto end; + } + case CAM_PKT_NOP_OPCODE: { + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) { + CAM_WARN(CAM_FLASH, + "Rxed NOP packets without linking"); + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + fctrl->i2c_data.per_frame[frm_offset].is_settings_valid + = false; + goto end; + } + + CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u", + csl_packet->header.request_id); + goto update_req_mgr; + } + default: + CAM_ERR(CAM_FLASH, "Wrong Opcode : %d", + (csl_packet->header.op_code & 0xFFFFFF)); + rc = -EINVAL; + goto end; + } +update_req_mgr: + if (((csl_packet->header.op_code & 0xFFFFF) == + CAM_PKT_NOP_OPCODE) || + ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS)) { + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = fctrl->bridge_intf.link_hdl; + add_req.req_id = csl_packet->header.request_id; + add_req.dev_hdl = fctrl->bridge_intf.device_hdl; + + if ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS) { + add_req.trigger_eof = true; + add_req.skip_at_sof = 1; + } + + if (fctrl->bridge_intf.crm_cb && + fctrl->bridge_intf.crm_cb->add_req) { + rc = fctrl->bridge_intf.crm_cb->add_req(&add_req); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_FLASH, + "Failed in adding request: %llu to request manager, it has been flushed", + csl_packet->header.request_id); + else + CAM_ERR(CAM_FLASH, + "Failed in adding request: %llu to request manager", + csl_packet->header.request_id); + goto end; + } + CAM_DBG(CAM_FLASH, + "add req %lld to req_mgr, trigger_eof %d", + add_req.req_id, add_req.trigger_eof); + } + } + +end: + cam_common_mem_free(csl_packet); +put_ref: + cam_mem_put_cpu_buf(config.packet_handle); + return rc; +} + +int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg) +{ + int rc = 0, i = 0; + uintptr_t generic_ptr, cmd_buf_ptr; + uint32_t *cmd_buf = NULL; + uint32_t *offset = NULL; + uint32_t frm_offset = 0; + uint32_t count; + size_t len_of_buffer; + size_t remain_len; + struct cam_control *ioctl_ctrl = NULL; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct common_header *cmn_hdr; + struct cam_config_dev_cmd config; + struct cam_req_mgr_add_request add_req = {0}; + struct cam_flash_init *cam_flash_info = NULL; + struct cam_flash_set_rer *flash_rer_info = NULL; + struct cam_flash_set_on_off *flash_operation_info = NULL; + struct cam_flash_set_on_off *flash_operation_info_u = NULL; + struct cam_flash_query_curr *flash_query_info = NULL; + struct cam_flash_frame_setting *flash_data = NULL; + struct cam_flash_private_soc *soc_private = NULL; + + if (!fctrl || !arg) { + CAM_ERR(CAM_FLASH, "fctrl/arg is NULL"); + return -EINVAL; + } + + soc_private = (struct cam_flash_private_soc *) + fctrl->soc_info.soc_private; + + /* getting CSL Packet */ + ioctl_ctrl = (struct cam_control *)arg; + + if (copy_from_user((&config), + u64_to_user_ptr(ioctl_ctrl->handle), + sizeof(config))) { + CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed"); + rc = -EFAULT; + return rc; + } + + rc = cam_mem_get_cpu_buf(config.packet_handle, + &generic_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Failed in getting the packet: %d", rc); + return rc; + } + + remain_len = len_of_buffer; + if ((sizeof(struct cam_packet) > len_of_buffer) || + ((size_t)config.offset >= len_of_buffer - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_FLASH, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len_of_buffer); + rc = -EINVAL; + goto put_ref; + } + + remain_len -= (size_t)config.offset; + /* Add offset to the flash csl header */ + csl_packet_u = (struct cam_packet *)(generic_ptr + config.offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_FLASH, "Copying packet to KMD failed"); + goto put_ref; + } + + if ((csl_packet->header.op_code & 0xFFFFFF) != + CAM_FLASH_PACKET_OPCODE_INIT && + csl_packet->header.request_id <= fctrl->last_flush_req + && fctrl->last_flush_req != 0) { + CAM_WARN(CAM_FLASH, + "reject request %lld, last request to flush %d", + csl_packet->header.request_id, fctrl->last_flush_req); + rc = -EINVAL; + goto end; + } + + if (csl_packet->header.request_id > fctrl->last_flush_req) + fctrl->last_flush_req = 0; + + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_FLASH_PACKET_OPCODE_INIT: { + /* INIT packet*/ + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, + &cmd_buf_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Fail in get buffer: %d", rc); + goto end; + } + if ((len_of_buffer < sizeof(struct cam_flash_init)) || + (cmd_desc->offset > + (len_of_buffer - sizeof(struct cam_flash_init)))) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + goto end; + } + remain_len = len_of_buffer - cmd_desc->offset; + cmd_buf = (uint32_t *)((uint8_t *)cmd_buf_ptr + + cmd_desc->offset); + cam_flash_info = (struct cam_flash_init *)cmd_buf; + + switch (cam_flash_info->cmd_type) { + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: { + CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED"); + fctrl->flash_init_setting.cmn_attr.request_id = 0; + fctrl->flash_init_setting.cmn_attr.is_settings_valid = + true; + fctrl->flash_type = cam_flash_info->flash_type; + fctrl->is_regulator_enabled = false; + fctrl->nrt_info.cmn_attr.cmd_type = + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO; + + fctrl->flash_state = + CAM_FLASH_STATE_CONFIG; + break; + } + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: { + CAM_INFO(CAM_FLASH, "INIT_FIRE Operation for dev_hdl: 0x%x", + fctrl->bridge_intf.device_hdl); + + if (remain_len < sizeof(struct cam_flash_set_on_off)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + + flash_operation_info_u = + (struct cam_flash_set_on_off *) cmd_buf; + if (!flash_operation_info_u) { + CAM_ERR(CAM_FLASH, + "flash_operation_info Null"); + rc = -EINVAL; + break; + } + + count = flash_operation_info_u->count; + rc = cam_common_mem_kdup((void**)&flash_operation_info, + flash_operation_info_u, + sizeof(struct cam_flash_set_on_off)); + if (rc) { + CAM_ERR(CAM_FLASH, "Alloc and copy flash operation info failed"); + break; + } + + if (count != flash_operation_info->count) { + CAM_ERR(CAM_FLASH, "Count changed: userspace: %d, kernel: %d", + count, flash_operation_info->count); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + + if (flash_operation_info->count > + CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "led count out of limit"); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + fctrl->nrt_info.cmn_attr.count = + flash_operation_info->count; + fctrl->nrt_info.cmn_attr.request_id = 0; + fctrl->nrt_info.opcode = + flash_operation_info->opcode; + fctrl->nrt_info.cmn_attr.cmd_type = + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE; + for (i = 0; + i < flash_operation_info->count; i++) + fctrl->nrt_info.led_current_ma[i] = + flash_operation_info->led_current_ma[i]; + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, + "Apply setting failed: %d", + rc); + cam_common_mem_free(flash_operation_info); + break; + } + default: + CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d", + cam_flash_info->cmd_type); + rc = -EINVAL; + break; + } + + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + break; + } + case CAM_FLASH_PACKET_OPCODE_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + flash_data = &fctrl->per_frame[frm_offset]; + + if (flash_data->cmn_attr.is_settings_valid == true) { + flash_data->cmn_attr.request_id = 0; + flash_data->cmn_attr.is_settings_valid = false; + for (i = 0; i < flash_data->cmn_attr.count; i++) + flash_data->led_current_ma[i] = 0; + } + + flash_data->cmn_attr.request_id = csl_packet->header.request_id; + flash_data->cmn_attr.is_settings_valid = true; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, + &cmd_buf_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Fail in get buffer: 0x%x", + cmd_desc->mem_handle); + goto end; + } + + if ((len_of_buffer < sizeof(struct common_header)) || + (cmd_desc->offset > + (len_of_buffer - sizeof(struct common_header)))) { + CAM_ERR(CAM_FLASH, "not enough buffer"); + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + rc = -EINVAL; + goto end; + } + remain_len = len_of_buffer - cmd_desc->offset; + + cmd_buf = (uint32_t *)((uint8_t *)cmd_buf_ptr + + cmd_desc->offset); + cmn_hdr = (struct common_header *)cmd_buf; + + switch (cmn_hdr->cmd_type) { + case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: { + CAM_DBG(CAM_FLASH, + "CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called, req:%lld", + csl_packet->header.request_id); + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == + CAM_FLASH_STATE_ACQUIRE)) { + CAM_WARN(CAM_FLASH, + "Rxed Flash fire ops without linking"); + flash_data->cmn_attr.is_settings_valid = false; + rc = -EINVAL; + break; + } + if (remain_len < sizeof(struct cam_flash_set_on_off)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + + flash_operation_info_u = + (struct cam_flash_set_on_off *) cmd_buf; + if (!flash_operation_info_u) { + CAM_ERR(CAM_FLASH, + "flash_operation_info Null"); + rc = -EINVAL; + break; + } + + count = flash_operation_info_u->count; + rc = cam_common_mem_kdup((void**)&flash_operation_info, + flash_operation_info_u, + sizeof(struct cam_flash_set_on_off)); + if (rc) { + CAM_ERR(CAM_FLASH, "Alloc and copy flash operation info failed"); + break; + } + + if (count != flash_operation_info->count) { + CAM_ERR(CAM_FLASH, "Count changed: userspace: %d, kernel: %d", + count, flash_operation_info->count); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + + if (flash_operation_info->count > + CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "led count out of limit"); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + + flash_data->opcode = flash_operation_info->opcode; + flash_data->cmn_attr.count = + flash_operation_info->count; + for (i = 0; i < flash_operation_info->count; i++) + flash_data->led_current_ma[i] + = flash_operation_info->led_current_ma[i]; + + CAM_DBG(CAM_FLASH, + "FLASH_CMD_TYPE op:%d, req:%lld", + flash_data->opcode, csl_packet->header.request_id); + + if (flash_data->opcode == + CAMERA_SENSOR_FLASH_OP_FIREDURATION) { + /* Active time for the preflash */ + flash_data->flash_active_time_ms = + (flash_operation_info->time_on_duration_ns) + / 1000000; + flash_data->flash_on_wait_time_ms = + (flash_operation_info->led_on_wait_time_ns) + / 1000000; + CAM_DBG(CAM_FLASH, + "PRECISE FLASH: active wait tme:%llu duration: %llu", + flash_data->flash_on_wait_time_ms, + flash_data->flash_active_time_ms); + } + cam_common_mem_free(flash_operation_info); + break; + } + default: + CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d", + cmn_hdr->cmd_type); + rc = -EINVAL; + break; + } + + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + break; + } + case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload_flex + + csl_packet->cmd_buf_offset); + fctrl->nrt_info.cmn_attr.is_settings_valid = true; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, + &cmd_buf_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Fail in get buffer: %d", rc); + goto end; + } + + if ((len_of_buffer < sizeof(struct common_header)) || + (cmd_desc->offset > + (len_of_buffer - sizeof(struct common_header)))) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + goto end; + } + remain_len = len_of_buffer - cmd_desc->offset; + cmd_buf = (uint32_t *)((uint8_t *)cmd_buf_ptr + + cmd_desc->offset); + cmn_hdr = (struct common_header *)cmd_buf; + + switch (cmn_hdr->cmd_type) { + case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: { + CAM_DBG(CAM_FLASH, "Widget Flash Operation"); + if (remain_len < sizeof(struct cam_flash_set_on_off)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + + flash_operation_info_u = + (struct cam_flash_set_on_off *) cmd_buf; + if (!flash_operation_info_u) { + CAM_ERR(CAM_FLASH, + "flash_operation_info Null"); + rc = -EINVAL; + break; + } + + count = flash_operation_info_u->count; + rc = cam_common_mem_kdup((void**)&flash_operation_info, + flash_operation_info_u, + sizeof(struct cam_flash_set_on_off)); + if (rc) { + CAM_ERR(CAM_FLASH, "Alloc and copy flash operation info failed"); + break; + } + + if (count != flash_operation_info->count) { + CAM_ERR(CAM_FLASH, "Count changed: userspace: %d, kernel: %d", + count, flash_operation_info->count); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + + if (flash_operation_info->count > + CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "led count out of limit"); + rc = -EINVAL; + cam_common_mem_free(flash_operation_info); + break; + } + + fctrl->nrt_info.cmn_attr.count = + flash_operation_info->count; + fctrl->nrt_info.cmn_attr.request_id = 0; + fctrl->nrt_info.opcode = + flash_operation_info->opcode; + fctrl->nrt_info.cmn_attr.cmd_type = + CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET; + + for (i = 0; i < flash_operation_info->count; i++) + fctrl->nrt_info.led_current_ma[i] = + flash_operation_info->led_current_ma[i]; + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, "Apply setting failed: %d", + rc); + + cam_common_mem_free(flash_operation_info); + break; + } + case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: { + int query_curr_ma = 0; + + if (remain_len < sizeof(struct cam_flash_query_curr)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + flash_query_info = + (struct cam_flash_query_curr *)cmd_buf; +#if __or(IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2), \ + IS_REACHABLE(CONFIG_LEDS_QTI_FLASH)) + rc = cam_flash_led_prepare(fctrl->switch_trigger, + QUERY_MAX_AVAIL_CURRENT, &query_curr_ma, + soc_private->is_wled_flash); + + CAM_DBG(CAM_FLASH, "query_curr_ma = %d", + query_curr_ma); +#else + rc = -EOPNOTSUPP; +#endif + + if (rc) { + CAM_ERR(CAM_FLASH, + "Query current failed with rc=%d", rc); + break; + } + flash_query_info->query_current_ma = query_curr_ma; + break; + } + case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: { + if (remain_len < sizeof(struct cam_flash_set_rer)) { + CAM_ERR(CAM_FLASH, "Not enough buffer"); + rc = -EINVAL; + break; + } + flash_rer_info = (struct cam_flash_set_rer *)cmd_buf; + if (!flash_rer_info) { + CAM_ERR(CAM_FLASH, + "flash_rer_info Null"); + rc = -EINVAL; + break; + } + if (flash_rer_info->count > + CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "led count out of limit"); + rc = -EINVAL; + break; + } + + fctrl->nrt_info.cmn_attr.cmd_type = + CAMERA_SENSOR_FLASH_CMD_TYPE_RER; + fctrl->nrt_info.opcode = flash_rer_info->opcode; + fctrl->nrt_info.cmn_attr.count = flash_rer_info->count; + fctrl->nrt_info.cmn_attr.request_id = 0; + fctrl->nrt_info.num_iterations = + flash_rer_info->num_iteration; + fctrl->nrt_info.led_on_delay_ms = + flash_rer_info->led_on_delay_ms; + fctrl->nrt_info.led_off_delay_ms = + flash_rer_info->led_off_delay_ms; + + for (i = 0; i < flash_rer_info->count; i++) + fctrl->nrt_info.led_current_ma[i] = + flash_rer_info->led_current_ma[i]; + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, "apply_setting failed: %d", + rc); + break; + } + default: + CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d", + cmn_hdr->cmd_type); + rc = -EINVAL; + break; + } + + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + break; + } + case CAM_PKT_NOP_OPCODE: { + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) { + CAM_WARN(CAM_FLASH, + "Rxed NOP packets without linking"); + fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid + = false; + rc = -EINVAL; + goto end; + } + + fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false; + fctrl->per_frame[frm_offset].cmn_attr.request_id = 0; + fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE; + CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu", + csl_packet->header.request_id); + break; + } + default: + CAM_ERR(CAM_FLASH, "Wrong Opcode : %d", + (csl_packet->header.op_code & 0xFFFFFF)); + rc = -EINVAL; + goto end; + } + + if (rc) + goto end; + + if (((csl_packet->header.op_code & 0xFFFFF) == + CAM_PKT_NOP_OPCODE) || + ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS)) { + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = fctrl->bridge_intf.link_hdl; + add_req.req_id = csl_packet->header.request_id; + add_req.dev_hdl = fctrl->bridge_intf.device_hdl; + + if ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS) { + add_req.trigger_eof = true; + if (flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) { + add_req.skip_at_sof = 1; + add_req.skip_at_eof = 0; + } else { + add_req.skip_at_sof = 1; + add_req.skip_at_eof = 0; + } + } + + if (fctrl->bridge_intf.crm_cb && + fctrl->bridge_intf.crm_cb->add_req) { + rc = fctrl->bridge_intf.crm_cb->add_req(&add_req); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_FLASH, + "Failed in adding request: %llu to request manager, it has been flushed", + csl_packet->header.request_id); + else + CAM_ERR(CAM_FLASH, + "Failed in adding request: %llu to request manager", + csl_packet->header.request_id); + goto end; + } + CAM_DBG(CAM_FLASH, + "add req %lld to req_mgr, trigger_eof %d", + add_req.req_id, add_req.trigger_eof); + } + } + +end: + cam_common_mem_free(csl_packet); +put_ref: + cam_mem_put_cpu_buf(config.packet_handle); + return rc; +} + +int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info) +{ + struct cam_flash_ctrl *fctrl; + + fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(info->dev_hdl); + if (!fctrl) { + CAM_ERR(CAM_FLASH, " Device data is NULL"); + return -EINVAL; + } + + info->dev_id = CAM_REQ_MGR_DEVICE_FLASH; + snprintf(info->name, sizeof(info->name), "%s(camera-flash%u)", + CAM_FLASH_NAME, fctrl->soc_info.index); + info->p_delay = CAM_PIPELINE_DELAY_1; + info->m_delay = CAM_MODESWITCH_DELAY_1; + info->trigger = CAM_TRIGGER_POINT_SOF; + return 0; +} + +int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link) +{ + struct cam_flash_ctrl *fctrl = NULL; + + if (!link) + return -EINVAL; + + fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl); + if (!fctrl) { + CAM_ERR(CAM_FLASH, " Device data is NULL"); + return -EINVAL; + } + mutex_lock(&fctrl->flash_mutex); + if (link->link_enable) { + fctrl->bridge_intf.link_hdl = link->link_hdl; + fctrl->bridge_intf.crm_cb = link->crm_cb; + } else { + fctrl->bridge_intf.link_hdl = -1; + fctrl->bridge_intf.crm_cb = NULL; + } + mutex_unlock(&fctrl->flash_mutex); + + return 0; +} + +int cam_flash_release_dev(struct cam_flash_ctrl *fctrl) +{ + int rc = 0; + + if (fctrl->bridge_intf.device_hdl != 1) { + rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl); + if (rc) + CAM_ERR(CAM_FLASH, + "Failed in destroying device handle rc = %d", + rc); + fctrl->bridge_intf.device_hdl = -1; + fctrl->bridge_intf.link_hdl = -1; + fctrl->bridge_intf.session_hdl = -1; + fctrl->last_flush_req = 0; + } + + return rc; +} + +void cam_flash_shutdown(struct cam_flash_ctrl *fctrl) +{ + int rc; + + if (fctrl->flash_state == CAM_FLASH_STATE_INIT) + return; + + if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) || + (fctrl->flash_state == CAM_FLASH_STATE_START)) { + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + rc = cam_flash_off(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "LED OFF FAILED: %d", rc); + } + if (fctrl->func_tbl.power_ops) { + rc = fctrl->func_tbl.power_ops(fctrl, false); + if (rc) + CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d", + rc); + } + } + + rc = cam_flash_release_dev(fctrl); + if (rc) + CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc); + + fctrl->flash_state = CAM_FLASH_STATE_INIT; +} + +int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply) +{ + int rc = 0; + struct cam_flash_ctrl *fctrl = NULL; + + if (!apply) + return -EINVAL; + + fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl); + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&fctrl->flash_mutex); + rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id); + if (rc) + CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d", + rc); + mutex_unlock(&fctrl->flash_mutex); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.h new file mode 100644 index 0000000000..f4e451c1ef --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_core.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FLASH_CORE_H_ +#define _CAM_FLASH_CORE_H_ + +#include +#include "cam_flash_dev.h" + +int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info); +int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link); +int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply); +int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data); +int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush); +int cam_flash_led_prepare(struct led_trigger *trigger, int options, + int *max_current, bool is_wled); + + +#endif /*_CAM_FLASH_CORE_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.c new file mode 100644 index 0000000000..0f08bd8740 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.c @@ -0,0 +1,904 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_flash_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_flash_soc.h" +#include "cam_flash_core.h" +#include "cam_common_util.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" + +static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, + void *arg, struct cam_flash_private_soc *soc_private) +{ + int rc = 0; + int i = 0; + struct cam_control *cmd = (struct cam_control *)arg; + + if (!fctrl || !arg) { + CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK", + fctrl, arg); + return -EINVAL; + } + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_FLASH, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + + mutex_lock(&(fctrl->flash_mutex)); + switch (cmd->op_code) { + case CAM_ACQUIRE_DEV: { + struct cam_sensor_acquire_dev flash_acq_dev; + struct cam_create_dev_hdl bridge_params; + + if (fctrl->flash_state != CAM_FLASH_STATE_INIT) { + CAM_ERR(CAM_FLASH, + "Cannot apply Acquire dev: Prev state: %d", + fctrl->flash_state); + rc = -EINVAL; + goto release_mutex; + } + + if (fctrl->bridge_intf.device_hdl != -1) { + CAM_ERR(CAM_FLASH, "Device is already acquired"); + rc = -EINVAL; + goto release_mutex; + } + + rc = copy_from_user(&flash_acq_dev, + u64_to_user_ptr(cmd->handle), + sizeof(flash_acq_dev)); + if (rc) { + CAM_ERR(CAM_FLASH, "Failed Copying from User"); + goto release_mutex; + } + + bridge_params.session_hdl = flash_acq_dev.session_handle; + bridge_params.ops = &fctrl->bridge_intf.ops; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = fctrl; + bridge_params.dev_id = CAM_FLASH; + + flash_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (flash_acq_dev.device_handle <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_FLASH, "Can not create device handle"); + goto release_mutex; + } + fctrl->bridge_intf.device_hdl = + flash_acq_dev.device_handle; + fctrl->bridge_intf.session_hdl = + flash_acq_dev.session_handle; + + rc = copy_to_user(u64_to_user_ptr(cmd->handle), + &flash_acq_dev, + sizeof(struct cam_sensor_acquire_dev)); + if (rc) { + CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d", + rc); + rc = -EFAULT; + goto release_mutex; + } + fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE; + + CAM_INFO(CAM_FLASH, "CAM_ACQUIRE_DEV for dev_hdl: 0x%x", + fctrl->bridge_intf.device_hdl); + break; + } + case CAM_RELEASE_DEV: { + CAM_INFO(CAM_FLASH, "CAM_RELEASE_DEV for dev_hdl: 0x%x", + fctrl->bridge_intf.device_hdl); + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == CAM_FLASH_STATE_START)) { + CAM_WARN(CAM_FLASH, + "Wrong state for Release dev: Prev state:%d", + fctrl->flash_state); + } + + if (fctrl->bridge_intf.device_hdl == -1 && + fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) { + CAM_ERR(CAM_FLASH, + "Invalid Handle: Link Hdl: %d device hdl: %d", + fctrl->bridge_intf.device_hdl, + fctrl->bridge_intf.link_hdl); + rc = -EINVAL; + goto release_mutex; + } + + if (fctrl->bridge_intf.link_hdl != -1) { + CAM_ERR(CAM_FLASH, + "Device [%d] still active on link 0x%x", + fctrl->flash_state, + fctrl->bridge_intf.link_hdl); + rc = -EAGAIN; + goto release_mutex; + } + + if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) || + (fctrl->flash_state == CAM_FLASH_STATE_START)) + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + + if (cam_flash_release_dev(fctrl)) + CAM_WARN(CAM_FLASH, + "Failed in destroying the device Handle"); + + if (fctrl->func_tbl.power_ops) { + if (fctrl->func_tbl.power_ops(fctrl, false)) + CAM_WARN(CAM_FLASH, "Power Down Failed"); + } + + fctrl->flash_state = CAM_FLASH_STATE_INIT; + break; + } + case CAM_QUERY_CAP: { + struct cam_flash_query_cap_info flash_cap = {0}; + + CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP"); + flash_cap.slot_info = fctrl->soc_info.index; + for (i = 0; i < fctrl->flash_num_sources; i++) { + flash_cap.max_current_flash[i] = + soc_private->flash_max_current[i]; + flash_cap.max_duration_flash[i] = + soc_private->flash_max_duration[i]; + } + + for (i = 0; i < fctrl->torch_num_sources; i++) + flash_cap.max_current_torch[i] = + soc_private->torch_max_current[i]; + + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &flash_cap, sizeof(struct cam_flash_query_cap_info))) { + CAM_ERR(CAM_FLASH, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + break; + } + case CAM_START_DEV: { + CAM_INFO(CAM_FLASH, "CAM_START_DEV for dev_hdl: 0x%x", + fctrl->bridge_intf.device_hdl); + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == CAM_FLASH_STATE_START)) { + CAM_WARN(CAM_FLASH, + "Cannot apply Start Dev: Prev state: %d", + fctrl->flash_state); + rc = -EINVAL; + goto release_mutex; + } + + fctrl->flash_state = CAM_FLASH_STATE_START; + break; + } + case CAM_STOP_DEV: { + CAM_INFO(CAM_FLASH, "CAM_STOP_DEV ENTER for dev_hdl: 0x%x", + fctrl->bridge_intf.device_hdl); + if (fctrl->flash_state != CAM_FLASH_STATE_START) { + CAM_WARN(CAM_FLASH, + "Cannot apply Stop dev: Prev state is: %d", + fctrl->flash_state); + rc = -EINVAL; + goto release_mutex; + } + + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + fctrl->last_flush_req = 0; + cam_flash_off(fctrl); + fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE; + break; + } + case CAM_CONFIG_DEV: { + CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV"); + rc = fctrl->func_tbl.parser(fctrl, arg); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_FLASH, + "Failed Flash Config: rc=%d\n, it has been flushed", rc); + else + CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc); + goto release_mutex; + } + break; + } + default: + CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code); + rc = -EINVAL; + } + +release_mutex: + mutex_unlock(&(fctrl->flash_mutex)); + return rc; +} + +static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl) +{ + /* Validate input parameters */ + if (!fctrl) { + CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK", + fctrl); + return -EINVAL; + } + + CAM_DBG(CAM_FLASH, + "master_type: %d", fctrl->io_master_info.master_type); + /* Initialize cci_client */ + if (fctrl->io_master_info.master_type == CCI_MASTER) { + fctrl->io_master_info.cci_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_cci_client), GFP_KERNEL); + if (!(fctrl->io_master_info.cci_client)) + return -ENOMEM; + } else if (fctrl->io_master_info.master_type == I2C_MASTER) { + if (!(fctrl->io_master_info.qup_client)) + return -EINVAL; + } else { + CAM_ERR(CAM_FLASH, + "Invalid master / Master type Not supported"); + return -EINVAL; + } + + return 0; +} + +static const struct of_device_id cam_flash_dt_match[] = { + {.compatible = "qcom,camera-flash", .data = NULL}, + {} +}; + +static int cam_flash_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_flash_ctrl *fctrl = + v4l2_get_subdevdata(sd); + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&fctrl->flash_mutex); + cam_flash_shutdown(fctrl); + mutex_unlock(&fctrl->flash_mutex); + + return 0; +} + +static int cam_flash_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_FLASH, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_flash_subdev_close_internal(sd, fh); +} + +static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_flash_ctrl *fctrl = NULL; + struct cam_flash_private_soc *soc_private = NULL; + + CAM_DBG(CAM_FLASH, "Enter"); + + fctrl = v4l2_get_subdevdata(sd); + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL"); + return -EINVAL; + } + soc_private = fctrl->soc_info.soc_private; + + switch (cmd) { + case VIDIOC_CAM_CONTROL: { + rc = cam_flash_driver_cmd(fctrl, arg, + soc_private); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_FLASH, + "Failed in driver cmd: %d, it has been flushed", rc); + else + CAM_ERR(CAM_FLASH, + "Failed in driver cmd: %d", rc); + } + break; + } + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + rc = cam_flash_subdev_close_internal(sd, NULL); + break; + default: + CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type"); + rc = -ENOIOCTLCMD; + break; + } + + CAM_DBG(CAM_FLASH, "Exit"); + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc = 0; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_FLASH, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: { + rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data); + if (rc) + CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed"); + break; + } + default: + CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d", + cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_FLASH, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} +#endif + +static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = { + .ioctl = cam_flash_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_flash_subdev_do_ioctl +#endif +}; + +static struct v4l2_subdev_ops cam_flash_subdev_ops = { + .core = &cam_flash_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops cam_flash_internal_ops = { + .close = cam_flash_subdev_close, +}; + +static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl) +{ + int rc = 0; + + strscpy(fctrl->device_name, CAM_FLASH_NAME, + sizeof(fctrl->device_name)); + fctrl->v4l2_dev_str.internal_ops = + &cam_flash_internal_ops; + fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops; + fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME; + fctrl->v4l2_dev_str.sd_flags = + V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; + fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE; + fctrl->v4l2_dev_str.token = fctrl; + fctrl->v4l2_dev_str.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + + rc = cam_register_subdev(&(fctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc); + + return rc; +} + +static int cam_flash_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0, i = 0; + struct cam_flash_ctrl *fctrl = NULL; + struct device_node *of_parent = NULL; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + CAM_DBG(CAM_FLASH, "Binding flash component"); + if (!pdev->dev.of_node) { + CAM_ERR(CAM_FLASH, "of_node NULL"); + return -EINVAL; + } + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return 0; + + fctrl = CAM_MEM_ZALLOC(sizeof(struct cam_flash_ctrl), GFP_KERNEL); + if (!fctrl) + return -ENOMEM; + + fctrl->pdev = pdev; + fctrl->of_node = pdev->dev.of_node; + fctrl->soc_info.pdev = pdev; + fctrl->soc_info.dev = &pdev->dev; + fctrl->soc_info.dev_name = pdev->name; + + platform_set_drvdata(pdev, fctrl); + + rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc); + CAM_MEM_FREE(fctrl); + return -EINVAL; + } + + if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) { + + /* Get CCI master */ + if (of_property_read_u32(pdev->dev.of_node, "cci-master", + &fctrl->cci_i2c_master)) { + /* Set default master 0 */ + fctrl->cci_i2c_master = MASTER_0; + } + CAM_DBG(CAM_FLASH, "cci-master %d", + fctrl->cci_i2c_master); + + fctrl->io_master_info.master_type = CCI_MASTER; + rc = cam_flash_init_default_params(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed: cam_flash_init_default_params rc %d", + rc); + return rc; + } + + of_parent = of_get_parent(pdev->dev.of_node); + if (of_property_read_u32(of_parent, "cell-index", + &fctrl->cci_num) < 0) + /* Set default master 0 */ + fctrl->cci_num = CCI_DEVICE_0; + + fctrl->io_master_info.cci_client->cci_device = fctrl->cci_num; + CAM_DBG(CAM_FLASH, "cci-index %d", fctrl->cci_num, rc); + + fctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (fctrl->i2c_data.per_frame == NULL) { + CAM_ERR(CAM_FLASH, "No Memory"); + rc = -ENOMEM; + goto free_cci_resource; + } + + INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head)); + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD( + &(fctrl->i2c_data.per_frame[i].list_head)); + + fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting; + fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops; + fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request; + } else { + /* PMIC Flash */ + fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting; + fctrl->func_tbl.power_ops = NULL; + fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request; + } + + rc = cam_flash_init_subdev(fctrl); + if (rc) { + if (fctrl->io_master_info.cci_client != NULL) + goto free_cci_resource; + else + goto free_resource; + } + + cam_sensor_module_add_i2c_device((void *) fctrl, CAM_SENSOR_FLASH); + + fctrl->bridge_intf.device_hdl = -1; + fctrl->bridge_intf.link_hdl = -1; + fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info; + fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link; + fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request; + fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request; + fctrl->last_flush_req = 0; + + mutex_init(&(fctrl->flash_mutex)); + + fctrl->flash_state = CAM_FLASH_STATE_INIT; + CAM_DBG(CAM_FLASH, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return rc; + +free_cci_resource: + CAM_MEM_FREE(fctrl->io_master_info.cci_client); + fctrl->io_master_info.cci_client = NULL; +free_resource: + CAM_MEM_FREE(fctrl->i2c_data.per_frame); + CAM_MEM_FREE(fctrl->soc_info.soc_private); + cam_soc_util_release_platform_resource(&fctrl->soc_info); + fctrl->i2c_data.per_frame = NULL; + fctrl->soc_info.soc_private = NULL; + CAM_MEM_FREE(fctrl); + fctrl = NULL; + return rc; +} + +static void cam_flash_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct cam_flash_ctrl *fctrl; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return; + + fctrl = platform_get_drvdata(pdev); + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Flash device is NULL"); + return; + } + + mutex_lock(&fctrl->flash_mutex); + cam_flash_shutdown(fctrl); + mutex_unlock(&fctrl->flash_mutex); + cam_unregister_subdev(&(fctrl->v4l2_dev_str)); + cam_flash_put_source_node_data(fctrl); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&fctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(fctrl); + CAM_INFO(CAM_FLASH, "Flash Sensor component unbind"); +} + +const static struct component_ops cam_flash_component_ops = { + .bind = cam_flash_component_bind, + .unbind = cam_flash_component_unbind, +}; + +static int cam_flash_platform_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_flash_component_ops); + return 0; +} + +static int32_t cam_flash_platform_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_FLASH, "Adding Flash Sensor component"); + rc = component_add(&pdev->dev, &cam_flash_component_ops); + if (rc) + CAM_ERR(CAM_FLASH, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_flash_i2c_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0, i = 0; + struct i2c_client *client = NULL; + struct cam_flash_ctrl *fctrl = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct device_node *np = NULL; + const char *drv_name; + + CAM_GET_TIMESTAMP(ts_start); + client = container_of(dev, struct i2c_client, dev); + if (client == NULL) { + CAM_ERR(CAM_FLASH, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + /* Create sensor control structure */ + fctrl = CAM_MEM_ZALLOC(sizeof(*fctrl), GFP_KERNEL); + if (!fctrl) + return -ENOMEM; + + fctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(fctrl->io_master_info.qup_client)) { + rc = -ENOMEM; + goto free_ctrl; + } + + client->dev.driver_data = fctrl; + fctrl->io_master_info.qup_client->i2c_client = client; + fctrl->of_node = client->dev.of_node; + fctrl->soc_info.dev = &client->dev; + fctrl->soc_info.dev_name = client->name; + fctrl->io_master_info.master_type = I2C_MASTER; + + np = of_node_get(client->dev.of_node); + drv_name = of_node_full_name(np); + rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc); + goto free_qup; + } + + rc = cam_flash_init_default_params(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed: cam_flash_init_default_params rc %d", + rc); + goto free_qup; + } + + soc_info = &fctrl->soc_info; + + /* Initalize regulators to default parameters */ + for (i = 0; i < soc_info->num_rgltr; i++) { + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + rc = PTR_ERR(soc_info->rgltr[i]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_FLASH, "get failed for regulator %s %d", + soc_info->rgltr_name[i], rc); + goto free_qup; + } + CAM_DBG(CAM_FLASH, "get for regulator %s", + soc_info->rgltr_name[i]); + } + + if (!soc_info->gpio_data) { + CAM_DBG(CAM_FLASH, "No GPIO found"); + } else { + if (!soc_info->gpio_data->cam_gpio_common_tbl_size) { + CAM_DBG(CAM_FLASH, "No GPIO found"); + rc = -EINVAL; + goto free_qup; + } + + rc = cam_sensor_util_init_gpio_pin_tbl(soc_info, + &fctrl->power_info.gpio_num_info); + if ((rc < 0) || (!fctrl->power_info.gpio_num_info)) { + CAM_ERR(CAM_FLASH, "No/Error Flash GPIOs"); + rc = -EINVAL; + goto free_qup; + } + } + + rc = cam_flash_init_subdev(fctrl); + if (rc) + goto free_qup; + + fctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (fctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto unreg_subdev; + } + + cam_sensor_module_add_i2c_device((void *) fctrl, CAM_SENSOR_FLASH); + + INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head)); + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head)); + + fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting; + fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops; + fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request; + + fctrl->bridge_intf.device_hdl = -1; + fctrl->bridge_intf.link_hdl = -1; + fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info; + fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link; + fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request; + fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request; + fctrl->last_flush_req = 0; + + mutex_init(&(fctrl->flash_mutex)); + fctrl->flash_state = CAM_FLASH_STATE_INIT; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(drv_name, microsec); + of_node_put(np); + + return rc; + +unreg_subdev: + cam_unregister_subdev(&(fctrl->v4l2_dev_str)); +free_qup: + CAM_MEM_FREE(fctrl->io_master_info.qup_client); +free_ctrl: + CAM_MEM_FREE(fctrl); + fctrl = NULL; + return rc; +} + +static void cam_flash_i2c_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct i2c_client *client = NULL; + struct cam_flash_ctrl *fctrl = NULL; + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_FLASH, + "Failed to get i2c client"); + return; + } + + fctrl = i2c_get_clientdata(client); + /* Handle I2C Devices */ + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Flash device is NULL"); + return; + } + + CAM_INFO(CAM_FLASH, "i2c driver remove invoked"); + /*Free Allocated Mem */ + CAM_MEM_FREE(fctrl->i2c_data.per_frame); + fctrl->i2c_data.per_frame = NULL; + CAM_MEM_FREE(fctrl->io_master_info.qup_client); + CAM_MEM_FREE(fctrl); +} + +const static struct component_ops cam_flash_i2c_component_ops = { + .bind = cam_flash_i2c_component_bind, + .unbind = cam_flash_i2c_component_unbind, +}; + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +static int cam_flash_i2c_driver_probe(struct i2c_client *client) +{ + int rc = 0; + + if (client == NULL) { + CAM_ERR(CAM_FLASH, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_FLASH, "Adding sensor flash component"); + rc = component_add(&client->dev, &cam_flash_i2c_component_ops); + if (rc) + CAM_ERR(CAM_FLASH, "failed to add component rc: %d", rc); + + return rc; +} +#else +static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_FLASH, "Adding sensor flash component"); + rc = component_add(&client->dev, &cam_flash_i2c_component_ops); + if (rc) + CAM_ERR(CAM_FLASH, "failed to add component rc: %d", rc); + + return rc; +} +#endif + +#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE +void cam_flash_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_flash_i2c_component_ops); +} + +#else +static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_flash_i2c_component_ops); + + return 0; +} +#endif + +MODULE_DEVICE_TABLE(of, cam_flash_dt_match); + +struct platform_driver cam_flash_platform_driver = { + .probe = cam_flash_platform_probe, + .remove = cam_flash_platform_remove, + .driver = { + .name = "CAM-FLASH-DRIVER", + .owner = THIS_MODULE, + .of_match_table = cam_flash_dt_match, + .suppress_bind_attrs = true, + }, +}; + +static const struct of_device_id cam_flash_i2c_dt_match[] = { + {.compatible = "qcom,cam-i2c-flash", .data = NULL}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_flash_i2c_dt_match); + +static const struct i2c_device_id i2c_id[] = { + {FLASH_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +struct i2c_driver cam_flash_i2c_driver = { + .id_table = i2c_id, + .probe = cam_flash_i2c_driver_probe, + .remove = cam_flash_i2c_driver_remove, + .driver = { + .owner = THIS_MODULE, + .name = FLASH_DRIVER_I2C, + .of_match_table = cam_flash_i2c_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int32_t cam_flash_init_module(void) +{ + int32_t rc = 0; + + rc = platform_driver_register(&cam_flash_platform_driver); + if (rc < 0) { + CAM_ERR(CAM_FLASH, "platform probe failed rc: %d", rc); + return rc; + } + + rc = i2c_add_driver(&cam_flash_i2c_driver); + if (rc < 0) + CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc); + + return rc; +} + +void cam_flash_exit_module(void) +{ + platform_driver_unregister(&cam_flash_platform_driver); + i2c_del_driver(&cam_flash_i2c_driver); +} + +MODULE_DESCRIPTION("CAM FLASH"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.h new file mode 100644 index 0000000000..4c4d38b47f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_dev.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FLASH_DEV_H_ +#define _CAM_FLASH_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2) +#include +#elif IS_REACHABLE(CONFIG_LEDS_QTI_FLASH) +#include +#endif + +#include "cam_req_mgr_util.h" +#include "cam_req_mgr_interface.h" +#include "cam_subdev.h" +#include "cam_mem_mgr.h" +#include "cam_sensor_cmn_header.h" +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_sensor_io.h" +#include "cam_flash_core.h" +#include "cam_context.h" + +#define CAMX_FLASH_DEV_NAME "cam-flash-dev" + +#define CAM_FLASH_PIPELINE_DELAY 1 + +#define FLASH_DRIVER_I2C "cam-i2c-flash" + +#define CAM_FLASH_PACKET_OPCODE_INIT 0 +#define CAM_FLASH_PACKET_OPCODE_SET_OPS 1 +#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2 + +struct cam_flash_ctrl; + +enum cam_flash_switch_trigger_ops { + LED_SWITCH_OFF = 0, + LED_SWITCH_ON, +}; + +enum cam_flash_state { + CAM_FLASH_STATE_INIT, + CAM_FLASH_STATE_ACQUIRE, + CAM_FLASH_STATE_CONFIG, + CAM_FLASH_STATE_START, +}; + +enum cam_flash_flush_type { + FLUSH_ALL = 0, + FLUSH_REQ, + FLUSH_MAX, +}; + +/** + * struct cam_flash_intf_params + * @device_hdl : Device Handle + * @session_hdl : Session Handle + * @link_hdl : Link Handle + * @ops : KMD operations + * @crm_cb : Callback API pointers + */ +struct cam_flash_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +/** + * struct cam_flash_common_attr + * @is_settings_valid : Notify the valid settings + * @request_id : Request id provided by umd + * @count : Number of led count + * @cmd_type : Command buffer type + */ +struct cam_flash_common_attr { + bool is_settings_valid; + uint64_t request_id; + uint32_t count; + uint8_t cmd_type; +}; + +/** + * struct flash_init_packet + * @cmn_attr : Provides common attributes + * @flash_type : Flash type(PMIC/I2C/GPIO) + */ +struct cam_flash_init_packet { + struct cam_flash_common_attr cmn_attr; + uint32_t flash_type; +}; + +/** + * struct flash_frame_setting + * @cmn_attr : Provides common attributes + * @num_iterations : Iterations used to perform RER + * @led_on_delay_ms : LED on time in milisec + * @led_off_delay_ms : LED off time in milisec + * @opcode : Command buffer opcode + * @led_current_ma[] : LED current array in miliamps + * @flash_active_time_ms : Flash_On time with precise flash + * @flash_on_wait_time_ms : Flash on wait time with precise flash + */ +struct cam_flash_frame_setting { + struct cam_flash_common_attr cmn_attr; + uint16_t num_iterations; + uint16_t led_on_delay_ms; + uint16_t led_off_delay_ms; + int8_t opcode; + uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS]; + uint64_t flash_active_time_ms; + uint64_t flash_on_wait_time_ms; +}; + +/** + * struct cam_flash_private_soc + * @switch_trigger_name : Switch trigger name + * @flash_trigger_name : Flash trigger name array + * @flash_op_current : Flash operational current + * @flash_max_current : Max supported current for LED in flash mode + * @flash_max_duration : Max turn on duration for LED in Flash mode + * @torch_trigger_name : Torch trigger name array + * @torch_op_current : Torch operational current + * @torch_max_current : Max supported current for LED in torch mode + * @is_wled_flash : Detection between WLED/LED flash + */ + +struct cam_flash_private_soc { + const char *switch_trigger_name; + const char *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS]; + uint32_t flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS]; + uint32_t flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS]; + uint32_t flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS]; + const char *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS]; + uint32_t torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS]; + uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS]; + bool is_wled_flash; +}; + +struct cam_flash_func_tbl { + int (*parser)(struct cam_flash_ctrl *fctrl, void *arg); + int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id); + int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable); + int (*flush_req)(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id); +}; + +/** + * struct cam_flash_ctrl + * @device_name : Device name + * @soc_info : Soc related information + * @pdev : Platform device + * @per_frame[] : Per_frame setting array + * @nrt_info : NonRealTime settings + * @of_node : Of Node ptr + * @v4l2_dev_str : V4L2 device structure + * @bridge_intf : CRM interface + * @flash_init_setting : Init command buffer structure + * @switch_trigger : Switch trigger ptr + * @flash_num_sources : Number of flash sources + * @torch_num_source : Number of torch sources + * @flash_mutex : Mutex for flash operations + * @flash_state : Current flash state (LOW/OFF/ON/INIT) + * @flash_type : Flash types (PMIC/I2C/GPIO) + * @is_regulator_enable : Regulator disable/enable notifier + * @func_tbl : Function table for different HW + * (e.g. i2c/pmic/gpio) + * @flash_trigger : Flash trigger ptr + * @torch_trigger : Torch trigger ptr + * @cci_i2c_master : I2C structure + * @cci_device_num : cci parent cell index + * @io_master_info : Information about the communication master + * @i2c_data : I2C register settings + * @last_flush_req : last request to flush + */ +struct cam_flash_ctrl { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct cam_hw_soc_info soc_info; + struct platform_device *pdev; + struct cam_sensor_power_ctrl_t power_info; + struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY]; + struct cam_flash_frame_setting nrt_info; + struct device_node *of_node; + struct cam_subdev v4l2_dev_str; + struct cam_flash_intf_params bridge_intf; + struct cam_flash_init_packet flash_init_setting; + struct led_trigger *switch_trigger; + uint32_t flash_num_sources; + uint32_t torch_num_sources; + struct mutex flash_mutex; + enum cam_flash_state flash_state; + uint8_t flash_type; + bool is_regulator_enabled; + struct cam_flash_func_tbl func_tbl; + struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS]; + struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS]; +/* I2C related setting */ + enum cci_i2c_master_t cci_i2c_master; + enum cci_device_num cci_num; + struct camera_io_master io_master_info; + struct i2c_data_settings i2c_data; + uint32_t last_flush_req; +}; + +int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg); +int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg); +int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id); +int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id); +int cam_flash_off(struct cam_flash_ctrl *fctrl); +int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable); +int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable); +int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id); +int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type, uint64_t req_id); +void cam_flash_shutdown(struct cam_flash_ctrl *fctrl); +int cam_flash_release_dev(struct cam_flash_ctrl *fctrl); + +/** + * @brief : API to register FLASH hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int32_t cam_flash_init_module(void); + +/** + * @brief : API to remove FLASH Hw from platform framework. + */ +void cam_flash_exit_module(void); +#endif /*_CAM_FLASH_DEV_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.c new file mode 100644 index 0000000000..401f1a24aa --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_flash_soc.h" +#include "cam_res_mgr_api.h" +#include "cam_mem_mgr_api.h" + +void cam_flash_put_source_node_data(struct cam_flash_ctrl *fctrl) +{ + uint32_t count = 0, i = 0; + struct cam_flash_private_soc *soc_private = NULL; + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "NULL flash control structure"); + return; + } + + soc_private = fctrl->soc_info.soc_private; + + if (fctrl->switch_trigger) { + CAM_DBG(CAM_FLASH, "switch trigger: %s", + soc_private->switch_trigger_name); + cam_res_mgr_led_trigger_unregister(fctrl->switch_trigger); + } + + if (fctrl->flash_num_sources) { + if (fctrl->flash_num_sources > CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count); + return; + } + + count = fctrl->flash_num_sources; + + for (i = 0; i < count; i++) { + CAM_DBG(CAM_FLASH, "Flash default trigger %s", + soc_private->flash_trigger_name[i]); + cam_res_mgr_led_trigger_unregister( + fctrl->flash_trigger[i]); + } + } + + if (fctrl->torch_num_sources) { + if (fctrl->torch_num_sources > CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count); + return; + } + + count = fctrl->torch_num_sources; + + for (i = 0; i < count; i++) { + CAM_DBG(CAM_FLASH, "Flash default trigger %s", + soc_private->flash_trigger_name[i]); + cam_res_mgr_led_trigger_unregister( + fctrl->torch_trigger[i]); + } + } +} + +#if __or(IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2), \ + IS_REACHABLE(CONFIG_LEDS_QTI_FLASH)) +static int32_t cam_get_source_node_info( + struct device_node *of_node, + struct cam_flash_ctrl *fctrl, + struct cam_flash_private_soc *soc_private) +{ + int32_t rc = 0; + uint32_t count = 0, i = 0; + struct device_node *flash_src_node = NULL; + struct device_node *torch_src_node = NULL; + struct device_node *switch_src_node = NULL; + + soc_private->is_wled_flash = + of_property_read_bool(of_node, "wled-flash-support"); + + switch_src_node = of_parse_phandle(of_node, "switch-source", 0); + if (!switch_src_node) { + CAM_WARN(CAM_FLASH, "switch_src_node NULL"); + } else { + rc = of_property_read_string(switch_src_node, + "qcom,default-led-trigger", + &soc_private->switch_trigger_name); + if (rc) { + CAM_ERR(CAM_FLASH, + "default-led-trigger read failed rc=%d", rc); + } else { + CAM_DBG(CAM_FLASH, "switch trigger %s", + soc_private->switch_trigger_name); + cam_res_mgr_led_trigger_register( + soc_private->switch_trigger_name, + &fctrl->switch_trigger); + } + + of_node_put(switch_src_node); + } + + if (of_get_property(of_node, "flash-source", &count)) { + count /= sizeof(uint32_t); + + if (count > CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count); + return -EINVAL; + } + + fctrl->flash_num_sources = count; + + for (i = 0; i < count; i++) { + flash_src_node = of_parse_phandle(of_node, + "flash-source", i); + if (!flash_src_node) { + CAM_WARN(CAM_FLASH, "flash_src_node NULL"); + continue; + } + + rc = of_property_read_string(flash_src_node, + "qcom,default-led-trigger", + &soc_private->flash_trigger_name[i]); + if (rc) { + CAM_WARN(CAM_FLASH, + "defalut-led-trigger read failed rc=%d", rc); + of_node_put(flash_src_node); + continue; + } + + CAM_DBG(CAM_FLASH, "Flash default trigger %s", + soc_private->flash_trigger_name[i]); + cam_res_mgr_led_trigger_register( + soc_private->flash_trigger_name[i], + &fctrl->flash_trigger[i]); + + if (soc_private->is_wled_flash) { + rc = cam_flash_led_prepare( + fctrl->flash_trigger[i], + QUERY_MAX_AVAIL_CURRENT, + &soc_private->flash_max_current[i], + true); + if (rc) { + CAM_ERR(CAM_FLASH, + "WLED FLASH max_current read fail: %d", + rc); + of_node_put(flash_src_node); + rc = 0; + continue; + } + } else { + rc = of_property_read_u32(flash_src_node, + "qcom,max-current", + &soc_private->flash_max_current[i]); + rc &= of_property_read_u32(flash_src_node, + "qcom,max-current-ma", + &soc_private->flash_max_current[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "LED FLASH max-current read fail: %d", + rc); + of_node_put(flash_src_node); + continue; + } + } + + /* Read operational-current */ + if (of_property_read_u32(flash_src_node, + "qcom,current-ma", + &soc_private->flash_op_current[i])) { + CAM_DBG(CAM_FLASH, "op-current: read failed"); + } + + /* Read max-duration */ + rc = of_property_read_u32(flash_src_node, + "qcom,duration-ms", + &soc_private->flash_max_duration[i]); + if (rc) { + CAM_DBG(CAM_FLASH, + "max-duration prop unavailable: %d", + rc); + rc = 0; + } + of_node_put(flash_src_node); + + CAM_DBG(CAM_FLASH, "MainFlashMaxCurrent[%d]: %d", + i, soc_private->flash_max_current[i]); + } + } + + if (of_get_property(of_node, "torch-source", &count)) { + count /= sizeof(uint32_t); + if (count > CAM_FLASH_MAX_LED_TRIGGERS) { + CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count); + return -EINVAL; + } + + fctrl->torch_num_sources = count; + + CAM_DBG(CAM_FLASH, "torch_num_sources = %d", + fctrl->torch_num_sources); + for (i = 0; i < count; i++) { + torch_src_node = of_parse_phandle(of_node, + "torch-source", i); + if (!torch_src_node) { + CAM_WARN(CAM_FLASH, "torch_src_node NULL"); + continue; + } + + rc = of_property_read_string(torch_src_node, + "qcom,default-led-trigger", + &soc_private->torch_trigger_name[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "default-trigger read failed"); + of_node_put(torch_src_node); + continue; + } + + CAM_DBG(CAM_FLASH, "Torch default trigger %s", + soc_private->torch_trigger_name[i]); + cam_res_mgr_led_trigger_register( + soc_private->torch_trigger_name[i], + &fctrl->torch_trigger[i]); + + if (soc_private->is_wled_flash) { + rc = cam_flash_led_prepare( + fctrl->torch_trigger[i], + QUERY_MAX_AVAIL_CURRENT, + &soc_private->torch_max_current[i], + true); + if (rc) { + CAM_ERR(CAM_FLASH, + "WLED TORCH max_current read fail: %d", + rc); + of_node_put(torch_src_node); + continue; + } + } else { + rc = of_property_read_u32(torch_src_node, + "qcom,max-current", + &soc_private->torch_max_current[i]); + rc &= of_property_read_u32(torch_src_node, + "qcom,max-current-ma", + &soc_private->torch_max_current[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "LED-TORCH max-current read failed: %d", + rc); + of_node_put(torch_src_node); + continue; + } + } + + /* Read operational-current */ + rc = of_property_read_u32(torch_src_node, + "qcom,current-ma", + &soc_private->torch_op_current[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "op-current prop unavailable: %d", rc); + rc = 0; + } + + of_node_put(torch_src_node); + + CAM_DBG(CAM_FLASH, "TorchMaxCurrent[%d]: %d", + i, soc_private->torch_max_current[i]); + } + } + + return rc; +} +#endif + +int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl, + struct cam_hw_soc_info *soc_info) +{ + int32_t rc = 0; + struct device_node *of_node = NULL; + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "NULL flash control structure"); + return -EINVAL; + } + + soc_info->soc_private = + CAM_MEM_ZALLOC(sizeof(struct cam_flash_private_soc), GFP_KERNEL); + if (!soc_info->soc_private) { + rc = -ENOMEM; + goto release_soc_res; + } + + if (fctrl->of_node == NULL) { + CAM_ERR(CAM_FLASH, "device node is NULL"); + rc = -EINVAL; + goto free_soc_private; + } + + of_node = fctrl->of_node; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc); + goto free_soc_private; + } + +#if __or(IS_ENABLED(CONFIG_LEDS_QPNP_FLASH_V2), \ + IS_ENABLED(CONFIG_LEDS_QTI_FLASH)) + rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private); + if (rc) { + CAM_ERR(CAM_FLASH, + "cam_flash_get_pmic_source_info failed rc %d", rc); + goto free_soc_private; + } +#endif + return rc; + +free_soc_private: + CAM_MEM_FREE(soc_info->soc_private); + soc_info->soc_private = NULL; +release_soc_res: + cam_soc_util_release_platform_resource(soc_info); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.h new file mode 100644 index 0000000000..466fbeef19 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_flash/cam_flash_soc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_FLASH_SOC_H_ +#define _CAM_FLASH_SOC_H_ + +#include "cam_flash_dev.h" + +int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl, + struct cam_hw_soc_info *soc_info); + +void cam_flash_put_source_node_data(struct cam_flash_ctrl *fctrl); +#endif /*_CAM_FLASH_SOC_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.c new file mode 100644 index 0000000000..d6c4d73ff8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.c @@ -0,0 +1,1826 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "cam_sensor_cmn_header.h" +#include "cam_ois_core.h" +#include "cam_ois_soc.h" +#include "cam_sensor_util.h" +#include "cam_debug_util.h" +#include "cam_res_mgr_api.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_mem_mgr_api.h" + +#define CAM_OIS_FW_VERSION_CHECK_MASK 0x1 + +static inline uint64_t swap_high_byte_and_low_byte(uint8_t *src, + uint8_t size_bytes) +{ + uint64_t ret_value = 0x00; + uint8_t cycle = 0; + + for (cycle = 0; cycle < size_bytes; cycle++) + ret_value = ((ret_value<<8) | ((*(src+cycle))&0xff)); + + return ret_value; +} + +static inline uint64_t swap_high_word_and_low_word(uint16_t *src, + uint8_t size_words) +{ + uint64_t ret_value = 0x00; + uint8_t cycle = 0; + + for (cycle = 0; cycle < size_words; cycle++) + ret_value = ((ret_value<<16) | ((*(src+cycle))&0xffff)); + + return ret_value; +} + + +int32_t cam_ois_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info) +{ + int rc = 0; + + power_info->power_setting_size = 1; + power_info->power_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_setting) + return -ENOMEM; + + power_info->power_setting[0].seq_type = SENSOR_VAF; + power_info->power_setting[0].seq_val = CAM_VAF; + power_info->power_setting[0].config_val = 1; + power_info->power_setting[0].delay = 2; + + power_info->power_down_setting_size = 1; + power_info->power_down_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_down_setting) { + rc = -ENOMEM; + goto free_power_settings; + } + + power_info->power_down_setting[0].seq_type = SENSOR_VAF; + power_info->power_down_setting[0].seq_val = CAM_VAF; + power_info->power_down_setting[0].config_val = 0; + + return rc; + +free_power_settings: + CAM_MEM_FREE(power_info->power_setting); + power_info->power_setting = NULL; + power_info->power_setting_size = 0; + return rc; +} + + +/** + * cam_ois_get_dev_handle - get device handle + * @o_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl, + void *arg) +{ + struct cam_sensor_acquire_dev ois_acq_dev; + struct cam_create_dev_hdl bridge_params; + struct cam_control *cmd = (struct cam_control *)arg; + + if (o_ctrl->bridge_intf.device_hdl != -1) { + CAM_ERR(CAM_OIS, "Device is already acquired"); + return -EFAULT; + } + if (copy_from_user(&ois_acq_dev, u64_to_user_ptr(cmd->handle), + sizeof(ois_acq_dev))) + return -EFAULT; + + bridge_params.session_hdl = ois_acq_dev.session_handle; + bridge_params.ops = &o_ctrl->bridge_intf.ops; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = o_ctrl; + bridge_params.dev_id = CAM_OIS; + + ois_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (ois_acq_dev.device_handle <= 0) { + CAM_ERR(CAM_OIS, "Can not create device handle"); + return -EFAULT; + } + o_ctrl->bridge_intf.device_hdl = ois_acq_dev.device_handle; + o_ctrl->bridge_intf.session_hdl = ois_acq_dev.session_handle; + + CAM_DBG(CAM_OIS, "Device Handle: %d", ois_acq_dev.device_handle); + if (copy_to_user(u64_to_user_ptr(cmd->handle), &ois_acq_dev, + sizeof(struct cam_sensor_acquire_dev))) { + CAM_ERR(CAM_OIS, "ACQUIRE_DEV: copy to user failed"); + return -EFAULT; + } + return 0; +} + +static int cam_ois_power_up(struct cam_ois_ctrl_t *o_ctrl) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &o_ctrl->soc_info; + struct cam_ois_soc_private *soc_private; + struct cam_sensor_power_ctrl_t *power_info; + struct completion *i3c_probe_completion = NULL; + + soc_private = (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + + if ((power_info->power_setting == NULL) && + (power_info->power_down_setting == NULL)) { + CAM_INFO(CAM_OIS, + "Using default power settings"); + rc = cam_ois_construct_default_power_setting(power_info); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Construct default ois power setting failed."); + return rc; + } + } + + /* Parse and fill vreg params for power up settings */ + rc = msm_camera_fill_vreg_params( + soc_info, + power_info->power_setting, + power_info->power_setting_size); + if (rc) { + CAM_ERR(CAM_OIS, + "failed to fill vreg params for power up rc:%d", rc); + return rc; + } + + /* Parse and fill vreg params for power down settings*/ + rc = msm_camera_fill_vreg_params( + soc_info, + power_info->power_down_setting, + power_info->power_down_setting_size); + if (rc) { + CAM_ERR(CAM_OIS, + "failed to fill vreg params for power down rc:%d", rc); + return rc; + } + + power_info->dev = soc_info->dev; + + if (o_ctrl->io_master_info.master_type == I3C_MASTER) + i3c_probe_completion = cam_ois_get_i3c_completion(o_ctrl->soc_info.index); + + rc = cam_sensor_core_power_up(power_info, soc_info, i3c_probe_completion); + if (rc) { + CAM_ERR(CAM_OIS, "failed in ois power up rc %d", rc); + return rc; + } + + rc = camera_io_init(&o_ctrl->io_master_info); + if (rc) { + CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc); + goto cci_failure; + } + + return rc; +cci_failure: + if (cam_sensor_util_power_down(power_info, soc_info)) + CAM_ERR(CAM_OIS, "Power Down failed"); + + return rc; +} + +/** + * cam_ois_power_down - power down OIS device + * @o_ctrl: ctrl structure + * + * Returns success or failure + */ +static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl) +{ + int32_t rc = 0; + struct cam_sensor_power_ctrl_t *power_info; + struct cam_hw_soc_info *soc_info = + &o_ctrl->soc_info; + struct cam_ois_soc_private *soc_private; + + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "failed: o_ctrl %pK", o_ctrl); + return -EINVAL; + } + + soc_private = + (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + soc_info = &o_ctrl->soc_info; + + if (!power_info) { + CAM_ERR(CAM_OIS, "failed: power_info %pK", power_info); + return -EINVAL; + } + + rc = cam_sensor_util_power_down(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc); + return rc; + } + + camera_io_release(&o_ctrl->io_master_info); + o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE; + + return rc; +} + +static int cam_ois_update_time(struct i2c_settings_array *i2c_set, + enum cam_endianness_type endianness) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0; + uint32_t size = 0; + uint32_t i = 0; + uint64_t qtime_ns = 0; + + if (i2c_set == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args"); + return -EINVAL; + } + + rc = cam_sensor_util_get_current_qtimer_ns(&qtime_ns); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed to get current qtimer value: %d", + rc); + return rc; + } + + if (endianness == CAM_ENDIANNESS_BIG) + qtime_ns = swap_high_word_and_low_word((uint16_t *)(&qtime_ns), + sizeof(qtime_ns) / sizeof(uint16_t)); + + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) { + size = i2c_list->i2c_settings.size; + /* qtimer is 8 bytes so validate here*/ + if (size * (uint32_t)(i2c_list->i2c_settings.data_type) != 8) { + CAM_ERR(CAM_OIS, "Invalid write time settings"); + return -EINVAL; + } + switch (i2c_list->i2c_settings.data_type) { + case CAMERA_SENSOR_I2C_TYPE_BYTE: + for (i = 0; i < size; i++) { + CAM_DBG(CAM_OIS, "time: reg_data[%d]: 0x%x", + i, (qtime_ns & 0xFF)); + i2c_list->i2c_settings.reg_setting[i].reg_data = + (qtime_ns & 0xFF); + qtime_ns >>= 8; + } + + break; + case CAMERA_SENSOR_I2C_TYPE_WORD: + for (i = 0; i < size; i++) { + uint16_t data = (qtime_ns & 0xFFFF); + i2c_list->i2c_settings.reg_setting[i].reg_data = + data; + + qtime_ns >>= 16; + + CAM_DBG(CAM_OIS, "time: reg_data[%d]: 0x%x", + i, data); + } + + break; + default: + CAM_ERR(CAM_OIS, "Unsupported reg data type"); + return -EINVAL; + } + } + } + + return rc; +} + +static int cam_ois_apply_settings(struct cam_ois_ctrl_t *o_ctrl, + struct i2c_settings_array *i2c_set) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0; + uint32_t i, size; + + if (o_ctrl == NULL || i2c_set == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args"); + return -EINVAL; + } + + if (i2c_set->is_settings_valid != 1) { + CAM_ERR(CAM_OIS, " Invalid settings"); + return -EINVAL; + } + + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) { + rc = camera_io_dev_write(&(o_ctrl->io_master_info), + &(i2c_list->i2c_settings)); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in Applying i2c wrt settings"); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) { + rc = camera_io_dev_write_continuous( + &(o_ctrl->io_master_info), + &(i2c_list->i2c_settings), + CAM_SENSOR_I2C_WRITE_SEQ); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed to seq write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) { + size = i2c_list->i2c_settings.size; + for (i = 0; i < size; i++) { + rc = camera_io_dev_poll( + &(o_ctrl->io_master_info), + i2c_list->i2c_settings.reg_setting[i].reg_addr, + i2c_list->i2c_settings.reg_setting[i].reg_data, + i2c_list->i2c_settings.reg_setting[i].data_mask, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.reg_setting[i].delay); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "i2c poll apply setting Fail"); + return rc; + } else if (rc == I2C_COMPARE_MISMATCH) { + CAM_ERR(CAM_OIS, "i2c poll mismatch"); + return rc; + } + } + } + } + + return rc; +} + +static int cam_ois_slaveInfo_pkt_parser(struct cam_ois_ctrl_t *o_ctrl, + uint32_t *cmd_buf, size_t len) +{ + int32_t rc = 0; + struct cam_cmd_ois_info *ois_info; + + if (!o_ctrl || !cmd_buf || len < sizeof(struct cam_cmd_ois_info)) { + CAM_ERR(CAM_OIS, "Invalid Args"); + return -EINVAL; + } + + ois_info = (struct cam_cmd_ois_info *)cmd_buf; + if (o_ctrl->io_master_info.master_type == CCI_MASTER) { + o_ctrl->io_master_info.cci_client->i2c_freq_mode = + ois_info->i2c_freq_mode; + o_ctrl->io_master_info.cci_client->sid = + ois_info->slave_addr >> 1; + o_ctrl->ois_fw_flag = ois_info->ois_fw_flag; + o_ctrl->is_ois_calib = ois_info->is_ois_calib; + memcpy(o_ctrl->ois_name, ois_info->ois_name, OIS_NAME_LEN); + o_ctrl->ois_name[OIS_NAME_LEN - 1] = '\0'; + o_ctrl->io_master_info.cci_client->retries = 3; + o_ctrl->io_master_info.cci_client->id_map = 0; + memcpy(&(o_ctrl->opcode), &(ois_info->opcode), + sizeof(struct cam_ois_opcode)); + CAM_DBG(CAM_OIS, "Slave addr: 0x%x Freq Mode: %d", + ois_info->slave_addr, ois_info->i2c_freq_mode); + } else if (o_ctrl->io_master_info.master_type == I2C_MASTER) { + if (!o_ctrl->io_master_info.qup_client) { + CAM_ERR(CAM_OIS, "io_master_info.qup_client is NULL"); + return -EINVAL; + } + o_ctrl->io_master_info.qup_client->i2c_client->addr = + ois_info->slave_addr; + CAM_DBG(CAM_OIS, "Slave addr: 0x%x", ois_info->slave_addr); + } else { + CAM_ERR(CAM_OIS, "Invalid Master type : %d", + o_ctrl->io_master_info.master_type); + rc = -EINVAL; + } + + return rc; +} + +static int cam_ois_parse_fw_setting(uint8_t *cmd_buf, uint32_t size, + struct i2c_settings_array *reg_settings) +{ + int32_t rc = 0; + uint32_t byte_cnt = 0; + struct common_header *cmm_hdr; + uint16_t op_code; + uint32_t j = 0; + struct list_head *list = NULL; + uint32_t payload_count = 0; + size_t tot_size = 0; + + while (byte_cnt < size) { + if ((size - byte_cnt) < sizeof(struct common_header)) { + CAM_ERR(CAM_OIS, "Not enough buffer"); + rc = -EINVAL; + goto end; + } + cmm_hdr = (struct common_header *)cmd_buf; + op_code = cmm_hdr->fifth_byte; + CAM_DBG(CAM_OIS, "Command Type:%d, Op code:%d", + cmm_hdr->cmd_type, op_code); + + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: { + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_random_wr + *cam_cmd_i2c_random_wr = + (struct cam_cmd_i2c_random_wr *)cmd_buf; + payload_count = cam_cmd_i2c_random_wr->header.count; + + if ((size - byte_cnt) < sizeof(struct cam_cmd_i2c_random_wr)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + tot_size = sizeof(struct i2c_rdwr_header) + + (sizeof(struct i2c_random_wr_payload) * + payload_count); + + if (tot_size > (size - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided %d, %d, %d", + tot_size, size, byte_cnt); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_random_write( + cam_cmd_i2c_random_wr, + reg_settings, + &cmd_length_in_bytes, &j, &list, payload_count); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in random write %d", rc); + goto end; + } + + byte_cnt += sizeof(struct cam_cmd_i2c_random_wr); + cmd_buf += sizeof(struct cam_cmd_i2c_random_wr); + + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR: { + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_continuous_wr + *cam_cmd_i2c_continuous_wr = + (struct cam_cmd_i2c_continuous_wr *)cmd_buf; + payload_count = cam_cmd_i2c_continuous_wr->header.count; + if ((size - byte_cnt) < sizeof(struct cam_cmd_i2c_continuous_wr)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + tot_size = sizeof(struct i2c_rdwr_header) + + sizeof(cam_cmd_i2c_continuous_wr->reg_addr) + + (sizeof(struct cam_cmd_read) * + payload_count); + + if (tot_size > (size - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided %d, %d, %d", + tot_size, size, byte_cnt); + rc = -EINVAL; + goto end; + } + rc = cam_sensor_handle_continuous_write( + cam_cmd_i2c_continuous_wr, + reg_settings, + &cmd_length_in_bytes, &j, &list, payload_count); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in continuous write %d", rc); + goto end; + } + + byte_cnt += sizeof(struct cam_cmd_i2c_continuous_wr); + cmd_buf += sizeof(struct cam_cmd_i2c_continuous_wr); + + break; + } + case CAMERA_SENSOR_CMD_TYPE_WAIT: { + if (op_code == CAMERA_SENSOR_WAIT_OP_HW_UCND || + op_code == CAMERA_SENSOR_WAIT_OP_SW_UCND) { + if ((size - byte_cnt) < + sizeof(struct cam_cmd_unconditional_wait)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_delay( + (uint32_t **)(&cmd_buf), op_code, + reg_settings, j, &byte_cnt, + list); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "delay hdl failed: %d", + rc); + goto end; + } + } else if (op_code == CAMERA_SENSOR_WAIT_OP_COND) { + if ((size - byte_cnt) < + sizeof(struct cam_cmd_conditional_wait)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + rc = cam_sensor_handle_poll( + (uint32_t **)(&cmd_buf), reg_settings, + &byte_cnt, &j, &list); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "parsing POLL fail: %d", + rc); + goto end; + } + } else { + CAM_ERR(CAM_OIS, + "Wrong Wait Command: %d", + op_code); + rc = -EINVAL; + goto end; + } + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD: { + uint16_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_random_rd *i2c_random_rd = + (struct cam_cmd_i2c_random_rd *)cmd_buf; + payload_count = i2c_random_rd->header.count; + if ((size - byte_cnt) < sizeof(struct cam_cmd_i2c_random_rd)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + + tot_size = sizeof(struct i2c_rdwr_header) + + (sizeof(struct cam_cmd_read) * + payload_count); + if (tot_size > (size - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided %d, %d, %d", + tot_size, size, byte_cnt); + rc = -EINVAL; + goto end; + } + rc = cam_sensor_handle_random_read( + i2c_random_rd, + reg_settings, + &cmd_length_in_bytes, &j, &list, + NULL, payload_count); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in random read %d", rc); + goto end; + } + + byte_cnt += sizeof(struct cam_cmd_i2c_random_rd); + cmd_buf += sizeof(struct cam_cmd_i2c_random_rd); + + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD: { + uint16_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_continuous_rd + *i2c_continuous_rd = + (struct cam_cmd_i2c_continuous_rd *)cmd_buf; + + if ((size - byte_cnt) < sizeof(struct cam_cmd_i2c_continuous_rd)) { + CAM_ERR(CAM_OIS, + "Not enough buffer provided,size %d,byte_cnt %d", + size, byte_cnt); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_continuous_read( + i2c_continuous_rd, + reg_settings, + &cmd_length_in_bytes, &j, &list, + NULL); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in continuous read %d", rc); + goto end; + } + + byte_cnt += sizeof(struct cam_cmd_i2c_continuous_rd); + cmd_buf += sizeof(struct cam_cmd_i2c_continuous_rd); + + break; + } + default: + CAM_ERR(CAM_OIS, "Invalid Command Type:%d", + cmm_hdr->cmd_type); + rc = -EINVAL; + goto end; + } + } + +end: + return rc; +} + +static int cam_ois_fw_info_pkt_parser(struct cam_ois_ctrl_t *o_ctrl, + uint32_t *cmd_buf, size_t len) +{ + int32_t rc = 0; + struct cam_cmd_ois_fw_info *ois_fw_info; + uint8_t *pSettingData = NULL; + uint32_t size = 0; + uint32_t version_size = 0; + struct i2c_settings_array *reg_settings = NULL; + uint8_t count = 0; + uint32_t idx; + uint8_t ois_fw_count = 0; + + if (!o_ctrl || !cmd_buf || len < sizeof(struct cam_cmd_ois_fw_info)) { + CAM_ERR(CAM_OIS, "Invalid Args,o_ctrl %p,cmd_buf %p,len %d", + o_ctrl, cmd_buf, len); + return -EINVAL; + } + + ois_fw_info = (struct cam_cmd_ois_fw_info *)cmd_buf; + ois_fw_count = ois_fw_info->fw_count; + CAM_DBG(CAM_OIS, "endianness %d, fw_count %d", + ois_fw_info->endianness, ois_fw_count); + + if (ois_fw_count <= MAX_OIS_FW_COUNT) { + memcpy(&o_ctrl->fw_info, ois_fw_info, sizeof(struct cam_cmd_ois_fw_info)); + o_ctrl->fw_info.fw_count = ois_fw_count; + pSettingData = (uint8_t *)cmd_buf + sizeof(struct cam_cmd_ois_fw_info); + if ((o_ctrl->fw_info.param_mask & CAM_OIS_FW_VERSION_CHECK_MASK) == 0x1) { + version_size = o_ctrl->fw_info.params[0]; + CAM_DBG(CAM_OIS, "versionSize: %d", version_size); + } + + if ((version_size != 0) && (o_ctrl->i2c_fw_version_data.is_settings_valid == 0)) { + reg_settings = &o_ctrl->i2c_fw_version_data; + reg_settings->is_settings_valid = 1; + rc = cam_ois_parse_fw_setting(pSettingData, version_size, reg_settings); + if (rc) { + CAM_ERR(CAM_OIS, "Failed to parse fw version settings"); + return rc; + } + + pSettingData += version_size; + } + + for (count = 0; count < o_ctrl->fw_info.fw_count*2; count++) { + idx = count / 2; + /* init settings */ + if ((count & 0x1) == 0) { + size = o_ctrl->fw_info.fw_param[idx].fw_init_size; + reg_settings = &o_ctrl->i2c_fw_init_data[idx]; + CAM_DBG(CAM_OIS, "init size %d", size); + /* finalize settings */ + } else if ((count & 0x1) == 1) { + size = o_ctrl->fw_info.fw_param[idx].fw_finalize_size; + reg_settings = &o_ctrl->i2c_fw_finalize_data[idx]; + CAM_DBG(CAM_OIS, "finalize size %d", size); + } else { + size = 0; + CAM_DBG(CAM_OIS, "Unsupported case"); + return -EINVAL; + } + + if (size != 0) { + reg_settings->is_settings_valid = 1; + rc = cam_ois_parse_fw_setting(pSettingData, size, reg_settings); + } + + if (rc) { + CAM_ERR(CAM_OIS, "Failed to parse fw setting"); + return rc; + } + + pSettingData += size; + } + } else { + CAM_ERR(CAM_OIS, "Exceed max fw count"); + return -EINVAL; + } + + return rc; +} + +static int cam_ois_fw_download(struct cam_ois_ctrl_t *o_ctrl) +{ + uint16_t total_bytes = 0; + uint8_t *ptr = NULL; + int32_t rc = 0, cnt; + uint32_t fw_size; + const struct firmware *fw = NULL; + const char *fw_name_prog = NULL; + const char *fw_name_coeff = NULL; + char name_prog[32] = {0}; + char name_coeff[32] = {0}; + struct device *dev; + struct cam_sensor_i2c_reg_setting i2c_reg_setting; + void *vaddr = NULL; + + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "Invalid Args"); + return -EINVAL; + } + + dev = &(o_ctrl->pdev->dev); + + snprintf(name_coeff, 32, "%s.coeff", o_ctrl->ois_name); + + snprintf(name_prog, 32, "%s.prog", o_ctrl->ois_name); + + /* cast pointer as const pointer*/ + fw_name_prog = name_prog; + fw_name_coeff = name_coeff; + + /* Load FW */ + rc = request_firmware(&fw, fw_name_prog, dev); + if (rc) { + CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_prog); + return rc; + } + + total_bytes = fw->size; + i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE; + i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE; + i2c_reg_setting.size = total_bytes; + i2c_reg_setting.delay = 0; + fw_size = (sizeof(struct cam_sensor_i2c_reg_array) * total_bytes); + vaddr = vmalloc(fw_size); + if (!vaddr) { + CAM_ERR(CAM_OIS, + "Failed in allocating i2c_array: fw_size: %u", fw_size); + release_firmware(fw); + return -ENOMEM; + } + + CAM_DBG(CAM_OIS, "FW prog size:%d.", total_bytes); + + i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) ( + vaddr); + + for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes; + cnt++, ptr++) { + i2c_reg_setting.reg_setting[cnt].reg_addr = + o_ctrl->opcode.prog; + i2c_reg_setting.reg_setting[cnt].reg_data = *ptr; + i2c_reg_setting.reg_setting[cnt].delay = 0; + i2c_reg_setting.reg_setting[cnt].data_mask = 0; + } + + rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info), + &i2c_reg_setting, CAM_SENSOR_I2C_WRITE_BURST); + if (rc < 0) { + CAM_ERR(CAM_OIS, "OIS FW(prog) size(%d) download failed. %d", + total_bytes, rc); + goto release_firmware; + } + vfree(vaddr); + vaddr = NULL; + release_firmware(fw); + + rc = request_firmware(&fw, fw_name_coeff, dev); + if (rc) { + CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_coeff); + return rc; + } + + total_bytes = fw->size; + i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE; + i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE; + i2c_reg_setting.size = total_bytes; + i2c_reg_setting.delay = 0; + fw_size = (sizeof(struct cam_sensor_i2c_reg_array) * total_bytes); + vaddr = vmalloc(fw_size); + if (!vaddr) { + CAM_ERR(CAM_OIS, + "Failed in allocating i2c_array: fw_size: %u", fw_size); + release_firmware(fw); + return -ENOMEM; + } + + CAM_DBG(CAM_OIS, "FW coeff size:%d", total_bytes); + + i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) ( + vaddr); + + for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes; + cnt++, ptr++) { + i2c_reg_setting.reg_setting[cnt].reg_addr = + o_ctrl->opcode.coeff; + i2c_reg_setting.reg_setting[cnt].reg_data = *ptr; + i2c_reg_setting.reg_setting[cnt].delay = 0; + i2c_reg_setting.reg_setting[cnt].data_mask = 0; + } + + rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info), + &i2c_reg_setting, CAM_SENSOR_I2C_WRITE_BURST); + + if (rc < 0) + CAM_ERR(CAM_OIS, "OIS FW(coeff) size(%d) download failed rc: %d", + total_bytes, rc); + +release_firmware: + vfree(vaddr); + vaddr = NULL; + release_firmware(fw); + return rc; +} + +static int write_ois_fw(uint8_t *fw_data, enum cam_endianness_type endianness, + struct cam_cmd_ois_fw_param *fw_param, struct camera_io_master io_master_info, + uint8_t i2c_operation) +{ + int32_t rc = 0; + struct cam_sensor_i2c_reg_setting setting; + uint8_t *ptr = fw_data; + int32_t cnt = 0, wcnt = 0; + void *vaddr = NULL; + uint16_t data_type = fw_param->fw_data_type; + uint16_t len_per_write = fw_param->fw_len_per_write / + fw_param->fw_data_type; + + vaddr = vmalloc((sizeof(struct cam_sensor_i2c_reg_array) * len_per_write)); + if (!vaddr) { + CAM_ERR(CAM_OIS, + "Failed in allocating i2c_array: size: %u", + (sizeof(struct cam_sensor_i2c_reg_array) * len_per_write)); + return -ENOMEM; + } + + setting.reg_setting = (struct cam_sensor_i2c_reg_array *) (vaddr); + setting.addr_type = fw_param->fw_addr_type; + setting.data_type = fw_param->fw_data_type; + setting.size = len_per_write; + setting.delay = fw_param->fw_delayUs; + + for (wcnt = 0; wcnt < (fw_param->fw_size/data_type); wcnt += len_per_write) { + for (cnt = 0; cnt < len_per_write; cnt++, ptr += data_type) { + setting.reg_setting[cnt].reg_addr = + fw_param->fw_reg_addr + wcnt + cnt; + /* Big */ + if (endianness == CAM_ENDIANNESS_BIG) { + setting.reg_setting[cnt].reg_data = + (uint32_t)swap_high_byte_and_low_byte(ptr, data_type); + /* Little */ + } else if (endianness == CAM_ENDIANNESS_LITTLE) { + switch (data_type) { + case CAMERA_SENSOR_I2C_TYPE_BYTE: + setting.reg_setting[cnt].reg_data = *((uint8_t *)ptr); + break; + case CAMERA_SENSOR_I2C_TYPE_WORD: + setting.reg_setting[cnt].reg_data = *((uint16_t *)ptr); + break; + default: + CAM_ERR(CAM_OIS, + "Unsupported data type"); + rc = -EINVAL; + goto End; + } + } + + setting.reg_setting[cnt].delay = fw_param->fw_delayUs; + setting.reg_setting[cnt].data_mask = 0; + } + + if (i2c_operation == CAM_SENSOR_I2C_WRITE_RANDOM) { + rc = camera_io_dev_write(&(io_master_info), + &setting); + } else if (i2c_operation == CAM_SENSOR_I2C_WRITE_BURST || + i2c_operation == CAM_SENSOR_I2C_WRITE_SEQ) { + rc = camera_io_dev_write_continuous(&io_master_info, + &setting, i2c_operation); + } + + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in Applying i2c wrt settings"); + break; + } + } + +End: + vfree(vaddr); + vaddr = NULL; + + return rc; +} + +static int cam_ois_fw_download_v2(struct cam_ois_ctrl_t *o_ctrl) +{ + int32_t rc = 0; + struct cam_cmd_ois_fw_param *fw_param = NULL; + uint32_t fw_size; + uint16_t len_per_write = 0; + uint8_t *ptr = NULL; + const struct firmware *fw = NULL; + struct device *dev; + uint8_t count = 0; + uint8_t cont_wr_flag = 0; + + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "Invalid Args"); + return -EINVAL; + } + + if (o_ctrl->i2c_fw_version_data.is_settings_valid == 1) { + CAM_DBG(CAM_OIS, "check version to decide FW download"); + rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_fw_version_data); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "CCI HW is resetting: Reapplying FW init settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fw_version_data); + } + + if (delete_request(&o_ctrl->i2c_fw_version_data) < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_version_data: rc: %d", rc); + + if (rc == I2C_COMPARE_MATCH) { + CAM_INFO(CAM_OIS, + "OIS FW version matched, skipping FW download"); + return rc; + } else if (rc == I2C_COMPARE_MISMATCH) { + CAM_INFO(CAM_OIS, "OIS FW version not matched, load FW"); + } else { + CAM_WARN(CAM_OIS, "OIS FW version check failed,rc=%d", rc); + } + } + + for (count = 0; count < o_ctrl->fw_info.fw_count; count++) { + fw_param = &o_ctrl->fw_info.fw_param[count]; + fw_size = fw_param->fw_size; + len_per_write = fw_param->fw_len_per_write / fw_param->fw_data_type; + + CAM_DBG(CAM_OIS, "count: %d, fw_size: %d, data_type: %d, len_per_write: %d", + count, fw_size, fw_param->fw_data_type, len_per_write); + + /* Load FW */ + dev = &(o_ctrl->pdev->dev); + rc = request_firmware(&fw, fw_param->fw_name, dev); + if (rc) { + CAM_ERR(CAM_OIS, "Failed to locate %s", fw_param->fw_name); + return rc; + } + + if (0 == rc && NULL != fw && + (fw_size <= fw->size - fw_param->fw_start_pos)) { + + /* fw init */ + CAM_DBG(CAM_OIS, "fw init"); + if (o_ctrl->i2c_fw_init_data[count].is_settings_valid == 1) { + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fw_init_data[count]); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "CCI HW is resetting: Reapplying FW init settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fw_init_data[count]); + } + if (rc) { + CAM_ERR(CAM_OIS, + "Cannot apply FW init settings %d", + rc); + goto release_firmware; + } else { + CAM_DBG(CAM_OIS, "OIS FW init settings success"); + } + } + + /* send fw */ + CAM_DBG(CAM_OIS, "send fw, operation %d", fw_param->fw_operation); + + ptr = (uint8_t *)(fw->data + fw_param->fw_start_pos); + if (fw_param->fw_operation == CAMERA_SENSOR_I2C_OP_RNDM_WR) + cont_wr_flag = CAM_SENSOR_I2C_WRITE_RANDOM; + else if (fw_param->fw_operation == CAMERA_SENSOR_I2C_OP_CONT_WR_BRST) + cont_wr_flag = CAM_SENSOR_I2C_WRITE_BURST; + else if (fw_param->fw_operation == CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN) + cont_wr_flag = CAM_SENSOR_I2C_WRITE_SEQ; + + write_ois_fw(ptr, (o_ctrl->fw_info.endianness & OIS_ENDIANNESS_MASK_FW), + fw_param, o_ctrl->io_master_info, cont_wr_flag); + + /* fw finalize */ + CAM_DBG(CAM_OIS, "fw finalize"); + if (o_ctrl->i2c_fw_finalize_data[count].is_settings_valid == 1) { + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fw_finalize_data[count]); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "CCI HW is resetting: Reapplying FW finalize settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fw_finalize_data[count]); + } + if (rc) { + CAM_ERR(CAM_OIS, + "Cannot apply FW finalize settings %d", + rc); + goto release_firmware; + } else { + CAM_DBG(CAM_OIS, "OIS FW finalize settings success"); + } + } + } + + if (fw != NULL) { + release_firmware(fw); + fw = NULL; + } + } + +release_firmware: + if (fw != NULL) { + release_firmware(fw); + fw = NULL; + } + + return rc; +} + +/** + * cam_ois_pkt_parse - Parse csl packet + * @o_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg) +{ + int32_t rc = 0; + int32_t i = 0; + uint32_t total_cmd_buf_in_bytes = 0; + struct common_header *cmm_hdr = NULL; + uintptr_t generic_ptr; + struct cam_control *ioctl_ctrl = NULL; + struct cam_config_dev_cmd dev_config; + struct i2c_settings_array *i2c_reg_settings = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uintptr_t generic_pkt_addr; + size_t pkt_len; + size_t remain_len = 0; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + size_t len_of_buff = 0; + uint32_t *offset = NULL, *cmd_buf; + struct cam_ois_soc_private *soc_private = + (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + + ioctl_ctrl = (struct cam_control *)arg; + if (copy_from_user(&dev_config, + u64_to_user_ptr(ioctl_ctrl->handle), + sizeof(dev_config))) + return -EFAULT; + rc = cam_mem_get_cpu_buf(dev_config.packet_handle, + &generic_pkt_addr, &pkt_len); + if (rc) { + CAM_ERR(CAM_OIS, + "error in converting command Handle Error: %d", rc); + return rc; + } + + remain_len = pkt_len; + if ((sizeof(struct cam_packet) > pkt_len) || + ((size_t)dev_config.offset >= pkt_len - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_OIS, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), pkt_len); + rc = -EINVAL; + goto put_ref; + } + + remain_len -= (size_t)dev_config.offset; + csl_packet_u = (struct cam_packet *) + (generic_pkt_addr + (uint32_t)dev_config.offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_OIS, "Copying packet to KMD failed"); + goto put_ref; + } + + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_OIS_PACKET_OPCODE_INIT: + CAM_DBG(CAM_OIS, "CAM_OIS_PACKET_OPCODE_INIT,num_cmd_buf %d", + csl_packet->num_cmd_buf); + + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + /* Loop through multiple command buffers */ + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) { + CAM_ERR(CAM_OIS, "Invalid cmd desc"); + goto end; + } + + total_cmd_buf_in_bytes = cmd_desc[i].length; + if (!total_cmd_buf_in_bytes) + continue; + + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_OIS, "Failed to get cpu buf : 0x%x", + cmd_desc[i].mem_handle); + goto end; + } + cmd_buf = (uint32_t *)generic_ptr; + if (!cmd_buf) { + CAM_ERR(CAM_OIS, "invalid cmd buf"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + + if ((len_of_buff < sizeof(struct common_header)) || + (cmd_desc[i].offset > (len_of_buff - + sizeof(struct common_header)))) { + CAM_ERR(CAM_OIS, + "Invalid length for sensor cmd"); + rc = -EINVAL; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + remain_len = len_of_buff - cmd_desc[i].offset; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + cmm_hdr = (struct common_header *)cmd_buf; + + CAM_DBG(CAM_OIS, + "cmm_hdr->cmd_type: %d", cmm_hdr->cmd_type); + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + rc = cam_ois_slaveInfo_pkt_parser( + o_ctrl, cmd_buf, remain_len); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in parsing slave info"); + break; + } + break; + case CAMERA_SENSOR_CMD_TYPE_PWR_UP: + case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN: + CAM_DBG(CAM_OIS, + "Received power settings buffer"); + rc = cam_sensor_update_power_settings( + cmd_buf, + total_cmd_buf_in_bytes, + power_info, remain_len); + if (rc) { + CAM_ERR(CAM_OIS, + "Failed: parse power settings"); + break; + } + break; + case CAMERA_SENSOR_OIS_CMD_TYPE_FW_INFO: + CAM_DBG(CAM_OIS, + "Received fwInfo buffer,total_cmd_buf_in_bytes: %d", + total_cmd_buf_in_bytes); + rc = cam_ois_fw_info_pkt_parser( + o_ctrl, cmd_buf, total_cmd_buf_in_bytes); + if (rc) { + CAM_ERR(CAM_OIS, + "Failed: parse fw info settings"); + break; + } + break; + default: + if (o_ctrl->i2c_init_data.is_settings_valid == 0) { + CAM_DBG(CAM_OIS, + "Received init/config settings"); + i2c_reg_settings = + &(o_ctrl->i2c_init_data); + i2c_reg_settings->is_settings_valid = 1; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser( + &o_ctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "init parsing failed: %d", rc); + break; + } + } else if ((o_ctrl->is_ois_calib != 0) && + (o_ctrl->i2c_calib_data.is_settings_valid == + 0)) { + CAM_DBG(CAM_OIS, + "Received calib settings"); + i2c_reg_settings = &(o_ctrl->i2c_calib_data); + i2c_reg_settings->is_settings_valid = 1; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser( + &o_ctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Calib parsing failed: %d", rc); + break; + } + } else if (o_ctrl->i2c_fwinit_data.is_settings_valid == 0) { + CAM_DBG(CAM_OIS, "received fwinit settings"); + i2c_reg_settings = + &(o_ctrl->i2c_fwinit_data); + i2c_reg_settings->is_settings_valid = 1; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser( + &o_ctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1, NULL); + if (rc < 0) { + CAM_DBG(CAM_OIS, + "fw init parsing failed: %d", rc); + break; + } + } + break; + } + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + + if (rc < 0) + goto end; + } + + if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) { + rc = cam_ois_power_up(o_ctrl); + if (rc) { + CAM_ERR(CAM_OIS, " OIS Power up failed"); + goto end; + } + } + + CAM_DBG(CAM_OIS, "ois_fw_flag: %d", o_ctrl->ois_fw_flag); + if (o_ctrl->ois_fw_flag) { + CAM_DBG(CAM_OIS, "fw_count: %d", o_ctrl->fw_info.fw_count); + if (o_ctrl->fw_info.fw_count != 0) { + rc = cam_ois_fw_download_v2(o_ctrl); + if (rc) { + CAM_ERR(CAM_OIS, "Failed OIS FW Download v2"); + goto pwr_dwn; + } + } else { + if (o_ctrl->i2c_fwinit_data.is_settings_valid == 1) { + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fwinit_data); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == + CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "Reapplying fwinit settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_fwinit_data); + } + if (rc) { + CAM_ERR(CAM_OIS, + "Cannot apply fwinit data %d", + rc); + goto pwr_dwn; + } else { + CAM_DBG(CAM_OIS, "OIS fwinit settings success"); + } + + rc = cam_ois_fw_download(o_ctrl); + if (rc) { + CAM_ERR(CAM_OIS, "Failed OIS FW Download"); + goto pwr_dwn; + } + } + } + } + if (o_ctrl->i2c_init_data.is_settings_valid == 1) + { + rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "CCI HW is restting: Reapplying INIT settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_init_data); + } + + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Cannot apply Init settings: rc = %d", + rc); + goto pwr_dwn; + } else { + CAM_DBG(CAM_OIS, "apply Init settings success"); + } + } + + if (o_ctrl->is_ois_calib) { + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_calib_data); + if ((rc == -EAGAIN) && + (o_ctrl->io_master_info.master_type == CCI_MASTER)) { + CAM_WARN(CAM_OIS, + "CCI HW is restting: Reapplying calib settings"); + usleep_range(1000, 1010); + rc = cam_ois_apply_settings(o_ctrl, + &o_ctrl->i2c_calib_data); + } + if (rc) { + CAM_ERR(CAM_OIS, "Cannot apply calib data"); + goto pwr_dwn; + } else { + CAM_DBG(CAM_OIS, "apply calib data settings success"); + } + } + + o_ctrl->cam_ois_state = CAM_OIS_CONFIG; + + rc = delete_request(&o_ctrl->i2c_fwinit_data); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting fwinit data: rc: %d", rc); + + for (i = 0; i < MAX_OIS_FW_COUNT; i++) { + if (o_ctrl->i2c_fw_init_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_init_data[i]); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_init_data: rc: %d", rc); + } + if (o_ctrl->i2c_fw_finalize_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_finalize_data[i]); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_finalize_data: rc: %d", rc); + } + } + + rc = delete_request(&o_ctrl->i2c_init_data); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting Init data: rc: %d", rc); + + rc = delete_request(&o_ctrl->i2c_calib_data); + if (rc < 0) { + CAM_WARN(CAM_OIS, + "Fail deleting Calibration data: rc: %d", rc); + rc = 0; + } + break; + case CAM_OIS_PACKET_OPCODE_OIS_CONTROL: + CAM_DBG(CAM_OIS, "CAM_OIS_PACKET_OPCODE_OIS_CONTROL"); + if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_OIS, + "Not in right state to control OIS: %d", + o_ctrl->cam_ois_state); + goto end; + } + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + i2c_reg_settings = &(o_ctrl->i2c_mode_data); + i2c_reg_settings->is_settings_valid = 1; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser(&o_ctrl->io_master_info, + i2c_reg_settings, + cmd_desc, 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_OIS, "OIS pkt parsing failed: %d", rc); + goto end; + } + + rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings); + if (rc < 0) { + CAM_ERR(CAM_OIS, "Cannot apply mode settings"); + goto end; + } + + rc = delete_request(i2c_reg_settings); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Fail deleting Mode data: rc: %d", rc); + goto end; + } + break; + case CAM_OIS_PACKET_OPCODE_READ: { + uint64_t qtime_ns; + struct cam_buf_io_cfg *io_cfg; + struct i2c_settings_array i2c_read_settings; + + CAM_DBG(CAM_OIS, "CAM_OIS_PACKET_OPCODE_READ"); + + if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_OIS, + "Not in right state to read OIS: %d", + o_ctrl->cam_ois_state); + goto end; + } + CAM_DBG(CAM_OIS, "number of I/O configs: %d:", + csl_packet->num_io_configs); + if (csl_packet->num_io_configs == 0) { + CAM_ERR(CAM_OIS, "No I/O configs to process"); + rc = -EINVAL; + goto end; + } + + INIT_LIST_HEAD(&(i2c_read_settings.list_head)); + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &csl_packet->payload_flex + + csl_packet->io_configs_offset); + + /* validate read data io config */ + if (io_cfg == NULL) { + CAM_ERR(CAM_OIS, "I/O config is invalid(NULL)"); + rc = -EINVAL; + goto end; + } + + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + i2c_read_settings.is_settings_valid = 1; + i2c_read_settings.request_id = 0; + rc = cam_sensor_i2c_command_parser(&o_ctrl->io_master_info, + &i2c_read_settings, + cmd_desc, 1, &io_cfg[0]); + if (rc < 0) { + CAM_ERR(CAM_OIS, "OIS read pkt parsing failed: %d", rc); + goto end; + } + + rc = cam_sensor_util_get_current_qtimer_ns(&qtime_ns); + if (rc < 0) { + CAM_ERR(CAM_OIS, "failed to get qtimer rc:%d"); + goto end; + } + + rc = cam_sensor_i2c_read_data( + &i2c_read_settings, + &o_ctrl->io_master_info); + if (rc < 0) { + CAM_ERR(CAM_OIS, "cannot read data rc: %d", rc); + delete_request(&i2c_read_settings); + goto end; + } + + if (csl_packet->num_io_configs > 1) { + rc = cam_sensor_util_write_qtimer_to_io_buffer( + qtime_ns, &io_cfg[1]); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "write qtimer failed rc: %d", rc); + delete_request(&i2c_read_settings); + goto end; + } + } + + rc = delete_request(&i2c_read_settings); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Failed in deleting the read settings"); + goto end; + } + break; + } + case CAM_OIS_PACKET_OPCODE_WRITE_TIME: { + CAM_DBG(CAM_OIS, + "CAM_OIS_PACKET_OPCODE_WRITE_TIME"); + if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) { + rc = -EINVAL; + CAM_ERR(CAM_OIS, + "Not in right state to write time to OIS: %d", + o_ctrl->cam_ois_state); + goto end; + } + offset = (uint32_t *)&csl_packet->payload_flex; + offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t)); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + i2c_reg_settings = &(o_ctrl->i2c_time_data); + i2c_reg_settings->is_settings_valid = 1; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser(&o_ctrl->io_master_info, + i2c_reg_settings, + cmd_desc, 1, NULL); + if (rc < 0) { + CAM_ERR(CAM_OIS, "OIS pkt parsing failed: %d", rc); + goto end; + } + + if (o_ctrl->fw_info.fw_count > 0) { + uint8_t ois_endianness = + (o_ctrl->fw_info.endianness & OIS_ENDIANNESS_MASK_INPUTPARAM) >> 4; + rc = cam_ois_update_time(i2c_reg_settings, ois_endianness); + } else + rc = cam_ois_update_time(i2c_reg_settings, CAM_ENDIANNESS_LITTLE); + if (rc < 0) { + CAM_ERR(CAM_OIS, "Cannot update time"); + goto end; + } + + rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings); + if (rc < 0) { + CAM_ERR(CAM_OIS, "Cannot apply mode settings"); + goto end; + } + + rc = delete_request(i2c_reg_settings); + if (rc < 0) { + CAM_ERR(CAM_OIS, + "Fail deleting Mode data: rc: %d", rc); + goto end; + } + break; + } + default: + CAM_ERR(CAM_OIS, "Invalid Opcode: %d", + (csl_packet->header.op_code & 0xFFFFFF)); + rc = -EINVAL; + goto end; + } + + if (!rc) + goto end; + +pwr_dwn: + cam_ois_power_down(o_ctrl); +end: + cam_common_mem_free(csl_packet); +put_ref: + cam_mem_put_cpu_buf(dev_config.packet_handle); + return rc; +} + +void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl) +{ + int rc = 0, i = 0; + struct cam_ois_soc_private *soc_private = + (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + + if (o_ctrl->cam_ois_state == CAM_OIS_INIT) + return; + + if (o_ctrl->cam_ois_state >= CAM_OIS_CONFIG) { + rc = cam_ois_power_down(o_ctrl); + if (rc < 0) + CAM_ERR(CAM_OIS, "OIS Power down failed"); + } + + if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) { + rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_OIS, "destroying the device hdl"); + o_ctrl->bridge_intf.device_hdl = -1; + o_ctrl->bridge_intf.link_hdl = -1; + o_ctrl->bridge_intf.session_hdl = -1; + } + + if (o_ctrl->i2c_fwinit_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_fwinit_data); + + for (i = 0; i < MAX_OIS_FW_COUNT; i++) { + if (o_ctrl->i2c_fw_init_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_init_data[i]); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_init_data: rc: %d", rc); + } + if (o_ctrl->i2c_fw_finalize_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_finalize_data[i]); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_finalize_data: rc: %d", rc); + } + } + + if (o_ctrl->i2c_fw_version_data.is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_version_data); + if (rc < 0) + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_version_data: rc: %d", rc); + } + + if (o_ctrl->i2c_mode_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_mode_data); + + if (o_ctrl->i2c_calib_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_calib_data); + + if (o_ctrl->i2c_init_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_init_data); + + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_down_setting_size = 0; + power_info->power_setting_size = 0; + + o_ctrl->cam_ois_state = CAM_OIS_INIT; +} + +/** + * cam_ois_driver_cmd - Handle ois cmds + * @e_ctrl: ctrl structure + * @arg: Camera control command argument + * + * Returns success or failure + */ +int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg) +{ + int rc = 0, i = 0; + struct cam_ois_query_cap_t ois_cap = {0}; + struct cam_control *cmd = (struct cam_control *)arg; + struct cam_ois_soc_private *soc_private = NULL; + struct cam_sensor_power_ctrl_t *power_info = NULL; + + if (!o_ctrl || !cmd) { + CAM_ERR(CAM_OIS, "Invalid arguments"); + return -EINVAL; + } + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_OIS, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + + soc_private = + (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + power_info = &soc_private->power_info; + + mutex_lock(&(o_ctrl->ois_mutex)); + switch (cmd->op_code) { + case CAM_QUERY_CAP: + ois_cap.slot_info = o_ctrl->soc_info.index; + + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &ois_cap, + sizeof(struct cam_ois_query_cap_t))) { + CAM_ERR(CAM_OIS, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + CAM_DBG(CAM_OIS, "ois_cap: ID: %d", ois_cap.slot_info); + break; + case CAM_ACQUIRE_DEV: + rc = cam_ois_get_dev_handle(o_ctrl, arg); + if (rc) { + CAM_ERR(CAM_OIS, "Failed to acquire dev"); + goto release_mutex; + } + + o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE; + break; + case CAM_START_DEV: + if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) { + rc = -EINVAL; + CAM_WARN(CAM_OIS, + "Not in right state for start : %d", + o_ctrl->cam_ois_state); + goto release_mutex; + } + o_ctrl->cam_ois_state = CAM_OIS_START; + break; + case CAM_CONFIG_DEV: + rc = cam_ois_pkt_parse(o_ctrl, arg); + if (rc) { + CAM_ERR(CAM_OIS, "Failed in ois pkt Parsing"); + goto release_mutex; + } + break; + case CAM_RELEASE_DEV: + if (o_ctrl->cam_ois_state == CAM_OIS_START) { + rc = -EINVAL; + CAM_WARN(CAM_OIS, + "Cant release ois: in start state"); + goto release_mutex; + } + + if (o_ctrl->cam_ois_state == CAM_OIS_CONFIG) { + rc = cam_ois_power_down(o_ctrl); + if (rc < 0) { + CAM_ERR(CAM_OIS, "OIS Power down failed"); + goto release_mutex; + } + } + + if (o_ctrl->bridge_intf.device_hdl == -1) { + CAM_ERR(CAM_OIS, "link hdl: %d device hdl: %d", + o_ctrl->bridge_intf.device_hdl, + o_ctrl->bridge_intf.link_hdl); + rc = -EINVAL; + goto release_mutex; + } + rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_OIS, "destroying the device hdl"); + o_ctrl->bridge_intf.device_hdl = -1; + o_ctrl->bridge_intf.link_hdl = -1; + o_ctrl->bridge_intf.session_hdl = -1; + o_ctrl->cam_ois_state = CAM_OIS_INIT; + + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_down_setting_size = 0; + power_info->power_setting_size = 0; + + if (o_ctrl->i2c_mode_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_mode_data); + + if (o_ctrl->i2c_calib_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_calib_data); + + if (o_ctrl->i2c_init_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_init_data); + + if (o_ctrl->i2c_fwinit_data.is_settings_valid == 1) + delete_request(&o_ctrl->i2c_fwinit_data); + + for (i = 0; i < MAX_OIS_FW_COUNT; i++) { + if (o_ctrl->i2c_fw_init_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_init_data[i]); + if (rc < 0) { + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_init_data: rc: %d", rc); + rc = 0; + } + } + if (o_ctrl->i2c_fw_finalize_data[i].is_settings_valid == 1) { + rc = delete_request(&o_ctrl->i2c_fw_finalize_data[i]); + if (rc < 0) { + CAM_WARN(CAM_OIS, + "Fail deleting i2c_fw_finalize_data: rc: %d", rc); + rc = 0; + } + } + } + + break; + case CAM_STOP_DEV: + if (o_ctrl->cam_ois_state != CAM_OIS_START) { + rc = -EINVAL; + CAM_WARN(CAM_OIS, + "Not in right state for stop : %d", + o_ctrl->cam_ois_state); + goto release_mutex; + } + o_ctrl->cam_ois_state = CAM_OIS_CONFIG; + break; + case CAM_FLUSH_REQ: + CAM_DBG(CAM_OIS, "Flush recveived"); + break; + default: + CAM_ERR(CAM_OIS, "invalid opcode: %d", cmd->op_code); + goto release_mutex; + } +release_mutex: + mutex_unlock(&(o_ctrl->ois_mutex)); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.h new file mode 100644 index 0000000000..c4956695a3 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_core.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_OIS_CORE_H_ +#define _CAM_OIS_CORE_H_ + +#include +#include "cam_ois_dev.h" + +#define OIS_NAME_LEN 32 +#define OIS_ENDIANNESS_MASK_FW 0x0F +#define OIS_ENDIANNESS_MASK_INPUTPARAM 0xF0 + + +/** + * @power_info: power setting info to control the power + * + * This API construct the default ois power setting. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int32_t cam_ois_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info); + + +int cam_ois_driver_cmd(struct cam_ois_ctrl_t *e_ctrl, void *arg); + +/** + * @o_ctrl: OIS ctrl structure + * + * This API handles the shutdown ioctl/close + */ +void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl); + +struct completion *cam_ois_get_i3c_completion(uint32_t index); + +#endif +/* _CAM_OIS_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.c new file mode 100644 index 0000000000..cc229bf964 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.c @@ -0,0 +1,790 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_ois_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_ois_soc.h" +#include "cam_ois_core.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" + +static struct cam_i3c_ois_data { + struct cam_ois_ctrl_t *o_ctrl; + struct completion probe_complete; +} g_i3c_ois_data[MAX_CAMERAS]; + +struct completion *cam_ois_get_i3c_completion(uint32_t index) +{ + return &g_i3c_ois_data[index].probe_complete; +} + +static int cam_ois_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_ois_ctrl_t *o_ctrl = + v4l2_get_subdevdata(sd); + + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "o_ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&(o_ctrl->ois_mutex)); + cam_ois_shutdown(o_ctrl); + mutex_unlock(&(o_ctrl->ois_mutex)); + + return 0; +} + +static int cam_ois_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_OIS, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_ois_subdev_close_internal(sd, fh); +} + +static long cam_ois_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd); + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_ois_driver_cmd(o_ctrl, arg); + if (rc) + CAM_ERR(CAM_OIS, + "Failed with driver cmd: %d", rc); + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + rc = cam_ois_subdev_close_internal(sd, NULL); + break; + default: + CAM_ERR(CAM_OIS, "Wrong IOCTL cmd: %u", cmd); + rc = -ENOIOCTLCMD; + break; + } + + return rc; +} + +static int32_t cam_ois_update_i2c_info(struct cam_ois_ctrl_t *o_ctrl, + struct cam_ois_i2c_info_t *i2c_info) +{ + struct cam_sensor_cci_client *cci_client = NULL; + + if (o_ctrl->io_master_info.master_type == CCI_MASTER) { + cci_client = o_ctrl->io_master_info.cci_client; + if (!cci_client) { + CAM_ERR(CAM_OIS, "failed: cci_client %pK", + cci_client); + return -EINVAL; + } + cci_client->cci_i2c_master = o_ctrl->cci_i2c_master; + cci_client->sid = (i2c_info->slave_addr) >> 1; + cci_client->retries = 3; + cci_client->id_map = 0; + cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long cam_ois_init_subdev_do_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc = 0; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_OIS, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_ois_subdev_ioctl(sd, cmd, &cmd_data); + if (rc) { + CAM_ERR(CAM_OIS, + "Failed in ois suddev handling rc %d", + rc); + return rc; + } + break; + default: + CAM_ERR(CAM_OIS, "Invalid compat ioctl: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_OIS, + "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + return rc; +} +#endif + +static const struct v4l2_subdev_internal_ops cam_ois_internal_ops = { + .close = cam_ois_subdev_close, +}; + +static struct v4l2_subdev_core_ops cam_ois_subdev_core_ops = { + .ioctl = cam_ois_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_ois_init_subdev_do_ioctl, +#endif +}; + +static struct v4l2_subdev_ops cam_ois_subdev_ops = { + .core = &cam_ois_subdev_core_ops, +}; + +static int cam_ois_init_subdev_param(struct cam_ois_ctrl_t *o_ctrl) +{ + int rc = 0; + + o_ctrl->v4l2_dev_str.internal_ops = &cam_ois_internal_ops; + o_ctrl->v4l2_dev_str.ops = &cam_ois_subdev_ops; + strscpy(o_ctrl->device_name, CAM_OIS_NAME, + sizeof(o_ctrl->device_name)); + o_ctrl->v4l2_dev_str.name = o_ctrl->device_name; + o_ctrl->v4l2_dev_str.sd_flags = + (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + o_ctrl->v4l2_dev_str.ent_function = CAM_OIS_DEVICE_TYPE; + o_ctrl->v4l2_dev_str.token = o_ctrl; + o_ctrl->v4l2_dev_str.close_seq_prior = CAM_SD_CLOSE_MEDIUM_PRIORITY; + + rc = cam_register_subdev(&(o_ctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_OIS, "fail to create subdev"); + + return rc; +} + +static int cam_ois_i2c_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct i2c_client *client = NULL; + struct cam_ois_ctrl_t *o_ctrl = NULL; + struct cam_ois_soc_private *soc_private = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct device_node *np = NULL; + const char *drv_name; + + CAM_GET_TIMESTAMP(ts_start); + client = container_of(dev, struct i2c_client, dev); + if (client == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + o_ctrl = CAM_MEM_ZALLOC(sizeof(*o_ctrl), GFP_KERNEL); + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "CAM_MEM_ZALLOC failed"); + rc = -ENOMEM; + goto probe_failure; + } + + o_ctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(o_ctrl->io_master_info.qup_client)) { + rc = -ENOMEM; + goto octrl_free; + } + + i2c_set_clientdata(client, o_ctrl); + + o_ctrl->soc_info.dev = &client->dev; + o_ctrl->soc_info.dev_name = client->name; + o_ctrl->ois_device_type = MSM_CAMERA_I2C_DEVICE; + o_ctrl->io_master_info.master_type = I2C_MASTER; + o_ctrl->io_master_info.qup_client->i2c_client = client; + + np = of_node_get(client->dev.of_node); + drv_name = of_node_full_name(np); + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_ois_soc_private), + GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto free_qup; + } + + o_ctrl->soc_info.soc_private = soc_private; + rc = cam_ois_driver_soc_init(o_ctrl); + if (rc) { + CAM_ERR(CAM_OIS, "failed: cam_sensor_parse_dt rc %d", rc); + goto soc_free; + } + + rc = cam_ois_init_subdev_param(o_ctrl); + if (rc) + goto soc_free; + + cam_sensor_module_add_i2c_device((void *) o_ctrl, CAM_SENSOR_OIS); + + mutex_init(&(o_ctrl->ois_mutex)); + o_ctrl->cam_ois_state = CAM_OIS_INIT; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(drv_name, microsec); + of_node_put(np); + + return rc; + +soc_free: + CAM_MEM_FREE(soc_private); +free_qup: + CAM_MEM_FREE(o_ctrl->io_master_info.qup_client); +octrl_free: + CAM_MEM_FREE(o_ctrl); +probe_failure: + return rc; +} + +static void cam_ois_i2c_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct i2c_client *client = NULL; + struct cam_ois_ctrl_t *o_ctrl = NULL; + struct cam_hw_soc_info *soc_info; + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_OIS, + "Failed to get i2c client"); + return; + } + + o_ctrl = i2c_get_clientdata(client); + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "ois device is NULL"); + return; + } + + CAM_INFO(CAM_OIS, "i2c driver remove invoked"); + soc_info = &o_ctrl->soc_info; + + for (i = 0; i < soc_info->num_clk; i++) { + if (!soc_info->clk[i]) { + CAM_DBG(CAM_OIS, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + devm_clk_put(soc_info->dev, soc_info->clk[i]); + } + + mutex_lock(&(o_ctrl->ois_mutex)); + cam_ois_shutdown(o_ctrl); + mutex_unlock(&(o_ctrl->ois_mutex)); + cam_unregister_subdev(&(o_ctrl->v4l2_dev_str)); + + CAM_MEM_FREE(o_ctrl->soc_info.soc_private); + v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(o_ctrl->io_master_info.qup_client); + CAM_MEM_FREE(o_ctrl); +} + +const static struct component_ops cam_ois_i2c_component_ops = { + .bind = cam_ois_i2c_component_bind, + .unbind = cam_ois_i2c_component_unbind, +}; + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +static int cam_ois_i2c_driver_probe(struct i2c_client *client) +{ + int rc = 0; + + if (client == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_OIS, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_OIS, "Adding sensor ois component"); + rc = component_add(&client->dev, &cam_ois_i2c_component_ops); + if (rc) + CAM_ERR(CAM_OIS, "failed to add component rc: %d", rc); + + return rc; +} +#else +static int cam_ois_i2c_driver_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_OIS, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_OIS, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_OIS, "Adding sensor ois component"); + rc = component_add(&client->dev, &cam_ois_i2c_component_ops); + if (rc) + CAM_ERR(CAM_OIS, "failed to add component rc: %d", rc); + + return rc; +} +#endif + +#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE +void cam_ois_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_ois_i2c_component_ops); +} +#else +static int cam_ois_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_ois_i2c_component_ops); + + return 0; +} +#endif + +static int cam_ois_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0; + struct cam_ois_ctrl_t *o_ctrl = NULL; + struct cam_ois_soc_private *soc_private = NULL; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return 0; + + o_ctrl = CAM_MEM_ZALLOC(sizeof(struct cam_ois_ctrl_t), GFP_KERNEL); + if (!o_ctrl) + return -ENOMEM; + + o_ctrl->soc_info.pdev = pdev; + o_ctrl->pdev = pdev; + o_ctrl->soc_info.dev = &pdev->dev; + o_ctrl->soc_info.dev_name = pdev->name; + + o_ctrl->ois_device_type = MSM_CAMERA_PLATFORM_DEVICE; + + o_ctrl->io_master_info.master_type = CCI_MASTER; + o_ctrl->io_master_info.cci_client = CAM_MEM_ZALLOC( + sizeof(struct cam_sensor_cci_client), GFP_KERNEL); + if (!o_ctrl->io_master_info.cci_client) + goto free_o_ctrl; + + soc_private = CAM_MEM_ZALLOC(sizeof(struct cam_ois_soc_private), + GFP_KERNEL); + if (!soc_private) { + rc = -ENOMEM; + goto free_cci_client; + } + o_ctrl->soc_info.soc_private = soc_private; + soc_private->power_info.dev = &pdev->dev; + mutex_init(&(o_ctrl->ois_mutex)); + rc = cam_ois_driver_soc_init(o_ctrl); + if (rc) { + CAM_ERR(CAM_OIS, "failed: soc init rc %d", rc); + goto free_soc; + } + + rc = cam_ois_init_subdev_param(o_ctrl); + if (rc) + goto free_soc; + + rc = cam_ois_update_i2c_info(o_ctrl, &soc_private->i2c_info); + if (rc) { + CAM_ERR(CAM_OIS, "failed: to update i2c info rc %d", rc); + goto unreg_subdev; + } + o_ctrl->bridge_intf.device_hdl = -1; + + cam_sensor_module_add_i2c_device((void *) o_ctrl, CAM_SENSOR_OIS); + + platform_set_drvdata(pdev, o_ctrl); + o_ctrl->cam_ois_state = CAM_OIS_INIT; + + g_i3c_ois_data[o_ctrl->soc_info.index].o_ctrl = o_ctrl; + init_completion(&g_i3c_ois_data[o_ctrl->soc_info.index].probe_complete); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + CAM_DBG(CAM_OIS, "Component bound successfully"); + return rc; +unreg_subdev: + cam_unregister_subdev(&(o_ctrl->v4l2_dev_str)); +free_soc: + CAM_MEM_FREE(soc_private); +free_cci_client: + CAM_MEM_FREE(o_ctrl->io_master_info.cci_client); +free_o_ctrl: + CAM_MEM_FREE(o_ctrl); + return rc; +} + +static void cam_ois_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct cam_ois_ctrl_t *o_ctrl; + struct cam_hw_soc_info *soc_info; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return; + + o_ctrl = platform_get_drvdata(pdev); + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "ois device is NULL"); + return; + } + + CAM_INFO(CAM_OIS, "platform driver remove invoked"); + soc_info = &o_ctrl->soc_info; + for (i = 0; i < soc_info->num_clk; i++) { + if (!soc_info->clk[i]) { + CAM_DBG(CAM_OIS, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + devm_clk_put(soc_info->dev, soc_info->clk[i]); + } + + mutex_lock(&(o_ctrl->ois_mutex)); + cam_ois_shutdown(o_ctrl); + mutex_unlock(&(o_ctrl->ois_mutex)); + cam_unregister_subdev(&(o_ctrl->v4l2_dev_str)); + + CAM_MEM_FREE(o_ctrl->soc_info.soc_private); + CAM_MEM_FREE(o_ctrl->io_master_info.cci_client); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(o_ctrl); +} + +const static struct component_ops cam_ois_component_ops = { + .bind = cam_ois_component_bind, + .unbind = cam_ois_component_unbind, +}; + +static int32_t cam_ois_platform_driver_probe( + struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_OIS, "Adding OIS Sensor component"); + rc = component_add(&pdev->dev, &cam_ois_component_ops); + if (rc) + CAM_ERR(CAM_OIS, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_ois_platform_driver_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_ois_component_ops); + return 0; +} + +static const struct of_device_id cam_ois_dt_match[] = { + { .compatible = "qcom,ois" }, + { } +}; + +static const struct of_device_id cam_ois_i2c_dt_match[] = { + { .compatible = "qcom,cam-i2c-ois" }, + { } +}; + +MODULE_DEVICE_TABLE(of, cam_ois_dt_match); +MODULE_DEVICE_TABLE(of, cam_ois_i2c_dt_match); + +struct platform_driver cam_ois_platform_driver = { + .driver = { + .name = "qcom,ois", + .owner = THIS_MODULE, + .of_match_table = cam_ois_dt_match, + }, + .probe = cam_ois_platform_driver_probe, + .remove = cam_ois_platform_driver_remove, +}; +static const struct i2c_device_id cam_ois_i2c_id[] = { + { OIS_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +struct i2c_driver cam_ois_i2c_driver = { + .id_table = cam_ois_i2c_id, + .probe = cam_ois_i2c_driver_probe, + .remove = cam_ois_i2c_driver_remove, + .driver = { + .name = OIS_DRIVER_I2C, + .owner = THIS_MODULE, + .of_match_table = cam_ois_i2c_dt_match, + .suppress_bind_attrs = true, + }, +}; + +static struct i3c_device_id ois_i3c_id[MAX_I3C_DEVICE_ID_ENTRIES + 1]; + +static int cam_ois_i3c_driver_probe(struct i3c_device *client) +{ + int32_t rc = 0; + struct cam_ois_ctrl_t *o_ctrl = NULL; + uint32_t index; + struct device *dev; + + if (!client) { + CAM_INFO(CAM_OIS, "Null Client pointer"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_OIS, "Probe for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_OIS, "device %s failed to read cell-index", dev_name(dev)); + return rc; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_OIS, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + o_ctrl = g_i3c_ois_data[index].o_ctrl; + if (!o_ctrl) { + CAM_ERR(CAM_OIS, "o_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + cam_sensor_utils_parse_pm_ctrl_flag(dev->of_node, &(o_ctrl->io_master_info)); + + CAM_INFO(CAM_SENSOR, + "master: %d (1-CCI, 2-I2C, 3-SPI, 4-I3C) pm_ctrl_client_enable: %d", + o_ctrl->io_master_info.master_type, + o_ctrl->io_master_info.qup_client->pm_ctrl_client_enable); + + o_ctrl->io_master_info.qup_client->i3c_client = client; + o_ctrl->io_master_info.qup_client->i3c_wait_for_hotjoin = false; + + complete_all(&g_i3c_ois_data[index].probe_complete); + + CAM_DBG(CAM_OIS, "I3C Probe Finished for %s", dev_name(dev)); + return rc; +} + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +static void cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_ois_ctrl_t *o_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return; + } + + o_ctrl = g_i3c_ois_data[index].o_ctrl; + if (!o_ctrl) { + CAM_ERR(CAM_SENSOR, "S_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(o_ctrl->io_master_info.qup_client); + o_ctrl->io_master_info.qup_client = NULL; +} +#else +static int cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_ois_ctrl_t *o_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return -EINVAL; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + o_ctrl = g_i3c_ois_data[index].o_ctrl; + if (!o_ctrl) { + CAM_ERR(CAM_SENSOR, "S_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(o_ctrl->io_master_info.qup_client); + o_ctrl->io_master_info.qup_client = NULL; + return 0; +} +#endif + +static struct i3c_driver cam_ois_i3c_driver = { + .id_table = ois_i3c_id, + .probe = cam_ois_i3c_driver_probe, + .remove = cam_i3c_driver_remove, + .driver = { + .owner = THIS_MODULE, + .name = OIS_DRIVER_I3C, + .of_match_table = cam_ois_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_ois_driver_init(void) +{ + int rc = 0; + struct device_node *dev; + int num_entries = 0; + + rc = platform_driver_register(&cam_ois_platform_driver); + if (rc) { + CAM_ERR(CAM_OIS, "platform_driver_register failed rc = %d", rc); + return rc; + } + + rc = i2c_add_driver(&cam_ois_i2c_driver); + if (rc) { + CAM_ERR(CAM_OIS, "i2c_add_driver failed rc = %d", rc); + goto i2c_register_err; + } + + memset(ois_i3c_id, 0, sizeof(struct i3c_device_id) * (MAX_I3C_DEVICE_ID_ENTRIES + 1)); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_OIS, "Couldnt Find the i3c-id-table dev node"); + return 0; + } + + rc = cam_sensor_count_elems_i3c_device_id(dev, &num_entries, + "i3c-ois-id-table"); + if (rc) + return 0; + + rc = cam_sensor_fill_i3c_device_id(dev, num_entries, + "i3c-ois-id-table", ois_i3c_id); + if (rc) + goto i3c_register_err; + + rc = i3c_driver_register_with_owner(&cam_ois_i3c_driver, THIS_MODULE); + if (rc) { + CAM_ERR(CAM_OIS, "i3c_driver registration failed, rc: %d", rc); + goto i3c_register_err; + } + + return 0; + +i3c_register_err: + i2c_del_driver(&cam_ois_i2c_driver); +i2c_register_err: + platform_driver_unregister(&cam_ois_platform_driver); + + return rc; +} + +void cam_ois_driver_exit(void) +{ + struct device_node *dev; + + platform_driver_unregister(&cam_ois_platform_driver); + i2c_del_driver(&cam_ois_i2c_driver); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_EEPROM, "Couldnt Find the i3c-id-table dev node"); + return; + } + + i3c_driver_unregister(&cam_ois_i3c_driver); +} + +MODULE_DESCRIPTION("CAM OIS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.h new file mode 100644 index 0000000000..59712fa409 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_dev.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _CAM_OIS_DEV_H_ +#define _CAM_OIS_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_soc_util.h" +#include "cam_context.h" + +#define DEFINE_MSM_MUTEX(mutexname) \ + static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +#define OIS_DRIVER_I2C "cam-i2c-ois" +#define OIS_DRIVER_I3C "i3c_camera_ois" + +enum cam_ois_state { + CAM_OIS_INIT, + CAM_OIS_ACQUIRE, + CAM_OIS_CONFIG, + CAM_OIS_START, +}; + +/** + * struct cam_ois_i2c_info_t - I2C info + * @slave_addr : slave address + * @i2c_freq_mode : i2c frequency mode + * + */ +struct cam_ois_i2c_info_t { + uint16_t slave_addr; + uint8_t i2c_freq_mode; +}; + +/** + * struct cam_ois_soc_private - ois soc private data structure + * @ois_name : ois name + * @i2c_info : i2c info structure + * @power_info : ois power info + * + */ +struct cam_ois_soc_private { + const char *ois_name; + struct cam_ois_i2c_info_t i2c_info; + struct cam_sensor_power_ctrl_t power_info; +}; + +/** + * struct cam_ois_intf_params - bridge interface params + * @device_hdl : Device Handle + * @session_hdl : Session Handle + * @ops : KMD operations + * @crm_cb : Callback API pointers + */ +struct cam_ois_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +/** + * struct cam_ois_ctrl_t - OIS ctrl private data + * @device_name : ois device_name + * @pdev : platform device + * @ois_mutex : ois mutex + * @soc_info : ois soc related info + * @io_master_info : Information about the communication master + * @cci_i2c_master : I2C structure + * @v4l2_dev_str : V4L2 device structure + * @is_i3c_device : A Flag to indicate whether this OIS is I3C Device or not. + * @bridge_intf : bridge interface params + * @i2c_fwinit_data : ois i2c firmware init settings + * @i2c_init_data : ois i2c init settings + * @i2c_mode_data : ois i2c mode settings + * @i2c_time_data : ois i2c time write settings + * @i2c_calib_data : ois i2c calib settings + * @ois_device_type : ois device type + * @cam_ois_state : ois_device_state + * @ois_fw_flag : flag for firmware download + * @is_ois_calib : flag for Calibration data + * @opcode : ois opcode + * @device_name : Device name + * + */ +struct cam_ois_ctrl_t { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct platform_device *pdev; + struct mutex ois_mutex; + struct cam_hw_soc_info soc_info; + struct camera_io_master io_master_info; + enum cci_i2c_master_t cci_i2c_master; + enum cci_device_num cci_num; + struct cam_subdev v4l2_dev_str; + bool is_i3c_device; + struct cam_ois_intf_params bridge_intf; + struct i2c_settings_array i2c_fwinit_data; + struct i2c_settings_array i2c_init_data; + struct i2c_settings_array i2c_calib_data; + struct i2c_settings_array i2c_mode_data; + struct i2c_settings_array i2c_time_data; + enum msm_camera_device_type_t ois_device_type; + enum cam_ois_state cam_ois_state; + char ois_name[32]; + uint8_t ois_fw_flag; + uint8_t is_ois_calib; + struct cam_ois_opcode opcode; + struct cam_cmd_ois_fw_info fw_info; + struct i2c_settings_array i2c_fw_init_data[MAX_OIS_FW_COUNT]; + struct i2c_settings_array i2c_fw_finalize_data[MAX_OIS_FW_COUNT]; + struct i2c_settings_array i2c_fw_version_data; +}; + +/** + * @brief : API to register OIS hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_ois_driver_init(void); + +/** + * @brief : API to remove OIS Hw from platform framework. + */ +void cam_ois_driver_exit(void); +#endif /*_CAM_OIS_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.c new file mode 100644 index 0000000000..d8ccd2e527 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "cam_ois_soc.h" +#include "cam_debug_util.h" + +/** + * @e_ctrl: ctrl structure + * + * Parses ois dt + */ +static int cam_ois_get_dt_data(struct cam_ois_ctrl_t *o_ctrl) +{ + int i, rc = 0; + struct cam_hw_soc_info *soc_info = &o_ctrl->soc_info; + struct cam_ois_soc_private *soc_private = + (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private; + struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info; + struct device_node *of_node = NULL; + + of_node = soc_info->dev->of_node; + + if (!of_node) { + CAM_ERR(CAM_OIS, "of_node is NULL, device type %d", + o_ctrl->ois_device_type); + return -EINVAL; + } + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_OIS, "cam_soc_util_get_dt_properties rc %d", + rc); + return rc; + } + + rc = of_property_read_bool(of_node, "i3c-target"); + if (rc) { + o_ctrl->is_i3c_device = true; + o_ctrl->io_master_info.master_type = I3C_MASTER; + } + + CAM_DBG(CAM_SENSOR, "I3C Target: %s", CAM_BOOL_TO_YESNO(o_ctrl->is_i3c_device)); + + /* Initialize regulators to default parameters */ + for (i = 0; i < soc_info->num_rgltr; i++) { + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + rc = PTR_ERR(soc_info->rgltr[i]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_OIS, "get failed for regulator %s", + soc_info->rgltr_name[i]); + return rc; + } + CAM_DBG(CAM_OIS, "get for regulator %s", + soc_info->rgltr_name[i]); + } + + cam_sensor_utils_parse_pm_ctrl_flag(of_node, &(o_ctrl->io_master_info)); + + if (!soc_info->gpio_data) { + CAM_INFO(CAM_OIS, "No GPIO found"); + return 0; + } + + if (!soc_info->gpio_data->cam_gpio_common_tbl_size) { + CAM_INFO(CAM_OIS, "No GPIO found"); + return -EINVAL; + } + + rc = cam_sensor_util_init_gpio_pin_tbl(soc_info, + &power_info->gpio_num_info); + if ((rc < 0) || (!power_info->gpio_num_info)) { + CAM_ERR(CAM_OIS, "No/Error OIS GPIOs"); + return -EINVAL; + } + + for (i = 0; i < soc_info->num_clk; i++) { + soc_info->clk[i] = devm_clk_get(soc_info->dev, + soc_info->clk_name[i]); + if (IS_ERR(soc_info->clk[i])) { + CAM_ERR(CAM_OIS, "get failed for %s", + soc_info->clk_name[i]); + rc = -ENOENT; + return rc; + } else if (!soc_info->clk[i]) { + CAM_DBG(CAM_OIS, "%s handle is NULL skip get", + soc_info->clk_name[i]); + continue; + } + } + + return rc; +} +/** + * @o_ctrl: ctrl structure + * + * This function is called from cam_ois_platform/i2c_driver_probe, it parses + * the ois dt node. + */ +int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl) +{ + int rc = 0, i = 0; + struct cam_hw_soc_info *soc_info = &o_ctrl->soc_info; + struct device_node *of_node = NULL; + struct device_node *of_parent = NULL; + + if (!soc_info->dev) { + CAM_ERR(CAM_OIS, "soc_info is not initialized"); + return -EINVAL; + } + + of_node = soc_info->dev->of_node; + if (!of_node) { + CAM_ERR(CAM_OIS, "dev.of_node NULL"); + return -EINVAL; + } + + if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) { + rc = of_property_read_u32(of_node, "cci-master", + &o_ctrl->cci_i2c_master); + if (rc < 0) { + CAM_DBG(CAM_OIS, "failed rc %d", rc); + return rc; + } + + of_parent = of_get_parent(of_node); + if (of_property_read_u32(of_parent, "cell-index", + &o_ctrl->cci_num) < 0) + /* Set default master 0 */ + o_ctrl->cci_num = CCI_DEVICE_0; + + o_ctrl->io_master_info.cci_client->cci_device = o_ctrl->cci_num; + CAM_DBG(CAM_OIS, "cci-device %d", o_ctrl->cci_num, rc); + } + + rc = cam_ois_get_dt_data(o_ctrl); + if (rc < 0) + CAM_DBG(CAM_OIS, "failed: ois get dt data rc %d", rc); + + memset(&o_ctrl->fw_info, 0, sizeof(struct cam_cmd_ois_fw_info)); + + INIT_LIST_HEAD(&(o_ctrl->i2c_init_data.list_head)); + INIT_LIST_HEAD(&(o_ctrl->i2c_calib_data.list_head)); + INIT_LIST_HEAD(&(o_ctrl->i2c_fwinit_data.list_head)); + for (i = 0; i < MAX_OIS_FW_COUNT; i++) { + INIT_LIST_HEAD(&(o_ctrl->i2c_fw_init_data[i].list_head)); + INIT_LIST_HEAD(&(o_ctrl->i2c_fw_finalize_data[i].list_head)); + } + INIT_LIST_HEAD(&(o_ctrl->i2c_fw_version_data.list_head)); + INIT_LIST_HEAD(&(o_ctrl->i2c_mode_data.list_head)); + INIT_LIST_HEAD(&(o_ctrl->i2c_time_data.list_head)); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.h new file mode 100644 index 0000000000..fd2c209c75 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_ois/cam_ois_soc.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ +#ifndef _CAM_OIS_SOC_H_ +#define _CAM_OIS_SOC_H_ + +#include "cam_ois_dev.h" + +int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl); + +#endif/* _CAM_OIS_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr.c new file mode 100644 index 0000000000..628729d87c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr.c @@ -0,0 +1,1006 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "cam_debug_util.h" +#include "cam_req_mgr_dev.h" +#include "cam_res_mgr_api.h" +#include "cam_res_mgr_private.h" +#include "camera_main.h" +#include "cam_mem_mgr_api.h" + +static struct cam_res_mgr *cam_res; + +static void cam_res_mgr_free_res(void) +{ + struct cam_dev_res *dev_res, *dev_temp; + struct cam_gpio_res *gpio_res, *gpio_temp; + struct cam_flash_res *flash_res, *flash_temp; + + if (!cam_res) + return; + + mutex_lock(&cam_res->gpio_res_lock); + list_for_each_entry_safe(gpio_res, gpio_temp, + &cam_res->gpio_res_list, list) { + list_for_each_entry_safe(dev_res, dev_temp, + &gpio_res->dev_list, list) { + list_del_init(&dev_res->list); + CAM_MEM_FREE(dev_res); + } + list_del_init(&gpio_res->list); + CAM_MEM_FREE(gpio_res); + } + mutex_unlock(&cam_res->gpio_res_lock); + + mutex_lock(&cam_res->flash_res_lock); + list_for_each_entry_safe(flash_res, flash_temp, + &cam_res->flash_res_list, list) { + list_del_init(&flash_res->list); + CAM_MEM_FREE(flash_res); + } + mutex_unlock(&cam_res->flash_res_lock); +} + +void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp) +{ + bool found = false; + struct cam_flash_res *flash_res; + + if (!cam_res) { + /* + * If this driver not probed, then just register the + * led trigger. + */ + led_trigger_register_simple(name, tp); + return; + } + + mutex_lock(&cam_res->flash_res_lock); + list_for_each_entry(flash_res, &cam_res->flash_res_list, list) { + if (!strcmp(flash_res->name, name)) { + found = true; + break; + } + } + mutex_unlock(&cam_res->flash_res_lock); + + if (found) { + *tp = flash_res->trigger; + } else { + flash_res = CAM_MEM_ZALLOC(sizeof(struct cam_flash_res), GFP_KERNEL); + if (!flash_res) { + CAM_ERR(CAM_RES, + "Failed to malloc memory for flash_res:%s", + name); + *tp = NULL; + return; + } + + led_trigger_register_simple(name, tp); + INIT_LIST_HEAD(&flash_res->list); + flash_res->trigger = *tp; + flash_res->name = name; + + mutex_lock(&cam_res->flash_res_lock); + list_add_tail(&flash_res->list, &cam_res->flash_res_list); + mutex_unlock(&cam_res->flash_res_lock); + } +} +EXPORT_SYMBOL(cam_res_mgr_led_trigger_register); + +void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp) +{ + bool found = false; + struct cam_flash_res *flash_res; + + if (!cam_res) { + /* + * If this driver not probed, then just unregister the + * led trigger. + */ + led_trigger_unregister_simple(tp); + return; + } + + mutex_lock(&cam_res->flash_res_lock); + list_for_each_entry(flash_res, &cam_res->flash_res_list, list) { + if (flash_res->trigger == tp) { + found = true; + break; + } + } + + if (found) { + led_trigger_unregister_simple(tp); + list_del_init(&flash_res->list); + CAM_MEM_FREE(flash_res); + } + mutex_unlock(&cam_res->flash_res_lock); +} +EXPORT_SYMBOL(cam_res_mgr_led_trigger_unregister); + +void cam_res_mgr_led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness) +{ + bool found = false; + struct cam_flash_res *flash_res; + + if (!cam_res) { + /* + * If this driver not probed, then just trigger + * the led event. + */ + led_trigger_event(trig, brightness); + return; + } + + mutex_lock(&cam_res->flash_res_lock); + list_for_each_entry(flash_res, &cam_res->flash_res_list, list) { + if (flash_res->trigger == trig) { + found = true; + break; + } + } + mutex_unlock(&cam_res->flash_res_lock); + + if (found) + led_trigger_event(trig, brightness); +} +EXPORT_SYMBOL(cam_res_mgr_led_trigger_event); + +int cam_res_mgr_util_get_idx_from_shared_pctrl_gpio( + uint gpio) +{ + int index = 0; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + for (index = 0; index < dt->num_shared_pctrl_gpio; index++) { + if (gpio == dt->shared_pctrl_gpio[index]) + break; + } + + if (index == dt->num_shared_pctrl_gpio) + return -EINVAL; + + return index; +} +EXPORT_SYMBOL(cam_res_mgr_util_get_idx_from_shared_pctrl_gpio); + +int cam_res_mgr_util_get_idx_from_shared_gpio( + uint gpio) +{ + int index = 0; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + for (index = 0; index < dt->num_shared_gpio; index++) { + if (gpio == dt->shared_gpio[index]) + break; + } + + if (index == dt->num_shared_gpio) + return -EINVAL; + + return index; +} +EXPORT_SYMBOL(cam_res_mgr_util_get_idx_from_shared_gpio); + +static bool cam_res_mgr_gpio_is_in_shared_pctrl_gpio( + uint gpio) +{ + int index = 0; + bool found = false; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + for (index = 0; index < dt->num_shared_pctrl_gpio; index++) { + if (gpio == dt->shared_pctrl_gpio[index]) { + found = true; + break; + } + } + + return found; +} + +static bool cam_res_mgr_gpio_is_in_shared_gpio( + uint gpio) +{ + int index = 0; + bool found = false; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + for (; index < dt->num_shared_gpio; index++) { + if (gpio == dt->shared_gpio[index]) { + found = true; + break; + } + } + + return found; +} + +int cam_res_mgr_util_shared_gpio_check_hold(uint gpio) +{ + int index = 0; + int dev_num = 0; + struct list_head *list; + struct cam_gpio_res *gpio_res = NULL; + struct cam_res_mgr_dt *dt = NULL; + bool is_shared_gpio = false; + bool is_shared_pctrl_gpio = false; + + if (!cam_res) { + CAM_DBG(CAM_RES, "res mgr is not initilized"); + return 0; + } + + if (!cam_res->shared_gpio_enabled) { + CAM_DBG(CAM_RES, "Res_mgr is not sharing any gpios"); + return 0; + } + + dt = &cam_res->dt; + + is_shared_gpio = + cam_res_mgr_gpio_is_in_shared_gpio(gpio); + is_shared_pctrl_gpio = + cam_res_mgr_gpio_is_in_shared_pctrl_gpio(gpio); + + if (is_shared_gpio && is_shared_pctrl_gpio) { + CAM_ERR(CAM_RES, + "gpio %u cannot be shared between pinctrl and gpio"); + return -EINVAL; + } + + if (is_shared_gpio) { + index = cam_res_mgr_util_get_idx_from_shared_gpio(gpio); + if (index < 0) { + CAM_ERR(CAM_RES, "Gpio%u not found in shared gpio list", + gpio); + return -EINVAL; + } + + list_for_each_entry(gpio_res, + &cam_res->gpio_res_list, list) { + + if (gpio_res->gpio == + dt->shared_gpio[index]) { + list_for_each(list, &gpio_res->dev_list) + dev_num++; + + if (dev_num >= 2) { + return RES_MGR_GPIO_NEED_HOLD; + } + } + } + } + + if (is_shared_pctrl_gpio) { + index = cam_res_mgr_util_get_idx_from_shared_pctrl_gpio(gpio); + if (index < 0) { + CAM_ERR(CAM_RES, + "gpio%u not found in shared pctrl gpio list", + gpio); + return -EINVAL; + } + + list_for_each_entry(gpio_res, + &cam_res->gpio_res_list, list) { + if (gpio_res->gpio == + dt->shared_pctrl_gpio[index]) { + list_for_each(list, &gpio_res->dev_list) + dev_num++; + + if (dev_num >= 2) { + CAM_DBG(CAM_RES, + "gpio: %u needs to hold", gpio); + return RES_MGR_GPIO_NEED_HOLD; + } + } + } + } + + for (index = 0; index < dt->num_shared_gpio; index++) { + list_for_each_entry(gpio_res, + &cam_res->gpio_res_list, list) { + + if (gpio_res->gpio == + dt->shared_gpio[index]) { + list_for_each(list, &gpio_res->dev_list) + dev_num++; + + if (dev_num >= 2) { + CAM_DBG(CAM_RES, + "gpio: %u needs to hold", gpio); + return RES_MGR_GPIO_NEED_HOLD; + } + } + } + } + + for (index = 0; index < dt->num_shared_pctrl_gpio; index++) { + list_for_each_entry(gpio_res, + &cam_res->gpio_res_list, list) { + + if (gpio_res->gpio == + dt->shared_pctrl_gpio[index]) { + list_for_each(list, &gpio_res->dev_list) + dev_num++; + + if (dev_num >= 2) { + CAM_DBG(CAM_RES, + "gpio: %u needs to hold", gpio); + return RES_MGR_GPIO_NEED_HOLD; + } + } + } + } + + CAM_DBG(CAM_RES, "gpio: %u can free the resource", gpio); + return RES_MGR_GPIO_CAN_FREE; +} +EXPORT_SYMBOL(cam_res_mgr_util_shared_gpio_check_hold); + +static int cam_res_mgr_shared_pinctrl_select_state( + int idx, bool active) +{ + int rc = 0; + + if (!cam_res || !cam_res->shared_gpio_enabled) { + CAM_DBG(CAM_RES, "Not support shared gpio."); + return 0; + } + + if (cam_res->pctrl_res[idx].pstatus == PINCTRL_STATUS_PUT) { + CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!"); + return 0; + } + + if (active && + (cam_res->pctrl_res[idx].pstatus != PINCTRL_STATUS_ACTIVE)) { + CAM_DBG(CAM_RES, + "pinctrl select state to active for the shared_pctrl_gpio idx: %d", + idx); + rc = pinctrl_select_state(cam_res->pinctrl, + cam_res->pctrl_res[idx].active); + cam_res->pctrl_res[idx].pstatus = PINCTRL_STATUS_ACTIVE; + } else if (!active && + (cam_res->pctrl_res[idx].pstatus == PINCTRL_STATUS_ACTIVE)) { + CAM_DBG(CAM_RES, + "pinctrl select state to suspend for the shared_pctrl_gpio idx: %d", + idx); + rc = pinctrl_select_state(cam_res->pinctrl, + cam_res->pctrl_res[idx].suspend); + cam_res->pctrl_res[idx].pstatus = PINCTRL_STATUS_SUSPEND; + } + + return rc; +} + +static int cam_res_mgr_add_device(struct device *dev, + struct cam_gpio_res *gpio_res) +{ + struct cam_dev_res *dev_res = NULL; + + dev_res = CAM_MEM_ZALLOC(sizeof(struct cam_dev_res), GFP_KERNEL); + if (!dev_res) + return -ENOMEM; + + dev_res->dev = dev; + INIT_LIST_HEAD(&dev_res->list); + list_add_tail(&dev_res->list, &gpio_res->dev_list); + + return 0; +} + +static struct cam_gpio_res *cam_res_mgr_find_if_gpio_in_list(uint gpio) +{ + struct cam_gpio_res *gpio_res = NULL; + + list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) { + if (gpio == gpio_res->gpio) + return gpio_res; + } + + return NULL; +} + +static bool __cam_res_mgr_find_if_gpio_is_shared(uint gpio) +{ + bool found_in_shared_gpio = false; + bool found_in_shared_pctrl_gpio = false; + + found_in_shared_gpio = cam_res_mgr_gpio_is_in_shared_gpio(gpio); + + found_in_shared_pctrl_gpio = + cam_res_mgr_gpio_is_in_shared_pctrl_gpio(gpio); + + if (found_in_shared_pctrl_gpio && found_in_shared_gpio) { + CAM_WARN(CAM_RES, "gpio: %u cannot be shared in both list", + gpio); + return false; + } + + if (found_in_shared_pctrl_gpio || found_in_shared_gpio) + return true; + + return false; +} + + +int cam_res_mgr_gpio_request(struct device *dev, uint gpio, + unsigned long flags, const char *label) +{ + int rc = 0; + bool dev_found = false; + bool gpio_found = false; + int pctrl_idx = -1; + struct cam_gpio_res *gpio_res = NULL; + + mutex_lock(&cam_res->gpio_res_lock); + if (cam_res && cam_res->shared_gpio_enabled) { + gpio_res = cam_res_mgr_find_if_gpio_in_list(gpio); + if (gpio_res == NULL) + gpio_found = false; + else + gpio_found = true; + } + + /* + * gpio_found equal to false has two situation: + * 1. shared gpio/pinctrl_gpio not enabled + * 2. shared gpio/pinctrl_gpio enabled, but not find this gpio + * from the gpio_res_list + * These two situations both need request gpio. + */ + if (!gpio_found) { + CAM_DBG(CAM_RES, "gpio: %u not found in gpio_res list", gpio); + rc = gpio_request_one(gpio, flags, label); + if (rc) { + CAM_ERR(CAM_RES, "gpio %d:%s request fails rc = %d", + gpio, label, rc); + goto end; + } + } + + /* + * If the gpio is in the shared list, and not find + * from gpio_res_list, then insert a cam_gpio_res + * to gpio_res_list. + */ + if ((!gpio_found && cam_res + && cam_res->shared_gpio_enabled) && + (cam_res_mgr_gpio_is_in_shared_gpio(gpio) || + (cam_res_mgr_gpio_is_in_shared_pctrl_gpio(gpio)))) { + CAM_DBG(CAM_RES, "gpio: %u is shared", gpio); + + gpio_res = CAM_MEM_ZALLOC(sizeof(struct cam_gpio_res), GFP_KERNEL); + if (!gpio_res) { + rc = -ENOMEM; + goto end; + } + gpio_res->gpio = gpio; + gpio_res->power_on_count = 0; + INIT_LIST_HEAD(&gpio_res->list); + INIT_LIST_HEAD(&gpio_res->dev_list); + + rc = cam_res_mgr_add_device(dev, gpio_res); + if (rc) { + CAM_MEM_FREE(gpio_res); + goto end; + } + + list_add_tail(&gpio_res->list, &cam_res->gpio_res_list); + } + + /* if shared gpio is in pinctrl gpio list */ + if (!gpio_found && cam_res + && cam_res->shared_gpio_enabled && + cam_res_mgr_gpio_is_in_shared_pctrl_gpio(gpio)) { + pctrl_idx = + cam_res_mgr_util_get_idx_from_shared_pctrl_gpio(gpio); + CAM_DBG(CAM_RES, + "shared_pctrl_gpio is at idx: %d", pctrl_idx); + if (pctrl_idx < 0) { + CAM_ERR(CAM_RES, + "pctrl_gpio: %u not found", gpio); + rc = -EINVAL; + goto end; + } + + /* Update Pinctrl state to active */ + cam_res_mgr_shared_pinctrl_select_state(pctrl_idx, true); + } + + if (gpio_found && cam_res + && cam_res->shared_gpio_enabled) { + struct cam_dev_res *dev_res = NULL; + + list_for_each_entry(dev_res, &gpio_res->dev_list, list) { + if (dev_res->dev == dev) { + dev_found = true; + break; + } + } + + if (!dev_found) { + rc = cam_res_mgr_add_device(dev, gpio_res); + if (rc) { + CAM_ERR(CAM_RES, + "add device to gpio res list failed rc: %d", + rc); + goto end; + } + } + } + +end: + mutex_unlock(&cam_res->gpio_res_lock); + return rc; +} +EXPORT_SYMBOL(cam_res_mgr_gpio_request); + +bool cam_res_mgr_util_check_if_gpio_is_shared( + struct gpio *gpio_tbl, uint8_t size) +{ + int i = 0; + bool found = false; + + if (!cam_res) { + CAM_DBG(CAM_RES, "cam_res data is not avaialbe"); + return false; + } + + if (!cam_res->shared_gpio_enabled) { + CAM_DBG(CAM_RES, "shared gpio support is not enabled"); + return false; + } + + for (i = 0; i < size; i++) { + found = __cam_res_mgr_find_if_gpio_is_shared(gpio_tbl[i].gpio); + if (found) + return found; + } + + return false; +} +EXPORT_SYMBOL(cam_res_mgr_util_check_if_gpio_is_shared); + +static void cam_res_mgr_gpio_free(struct device *dev, uint gpio) +{ + bool gpio_found = false; + bool need_free = true; + int dev_num = 0; + struct cam_gpio_res *gpio_res = NULL; + bool is_shared_pctrl_gpio = false; + int pctrl_idx = -1; + + is_shared_pctrl_gpio = + cam_res_mgr_gpio_is_in_shared_pctrl_gpio(gpio); + + mutex_lock(&cam_res->gpio_res_lock); + if (cam_res && cam_res->shared_gpio_enabled) { + list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) { + if (gpio == gpio_res->gpio) { + gpio_found = true; + break; + } + } + } + + if (gpio_found && cam_res + && cam_res->shared_gpio_enabled) { + struct list_head *list; + struct cam_dev_res *dev_res = NULL; + + /* Count the dev number in the dev_list */ + list_for_each(list, &gpio_res->dev_list) + dev_num++; + + /* + * Need free the gpio if only has last 1 device + * in the dev_list, otherwise, not free this + * gpio. + */ + if (dev_num == 1) { + dev_res = list_first_entry(&gpio_res->dev_list, + struct cam_dev_res, list); + list_del_init(&dev_res->list); + CAM_MEM_FREE(dev_res); + list_del_init(&gpio_res->list); + CAM_MEM_FREE(gpio_res); + } else { + list_for_each_entry(dev_res, + &gpio_res->dev_list, list) { + if (dev_res->dev == dev) { + list_del_init(&dev_res->list); + CAM_MEM_FREE(dev_res); + need_free = false; + break; + } + } + } + } + + if (need_free) { + if (is_shared_pctrl_gpio) { + pctrl_idx = + cam_res_mgr_util_get_idx_from_shared_pctrl_gpio( + gpio); + if (pctrl_idx >= 0) { + cam_res_mgr_shared_pinctrl_select_state( + pctrl_idx, false); + } else { + CAM_ERR(CAM_RES, "invalid pinctrl idx: %d", pctrl_idx); + } + } + + CAM_DBG(CAM_RES, "freeing gpio: %u", gpio); + gpio_free(gpio); + } + + mutex_unlock(&cam_res->gpio_res_lock); +} + +void cam_res_mgr_gpio_free_arry(struct device *dev, + const struct gpio *array, size_t num) +{ + while (num--) + cam_res_mgr_gpio_free(dev, (array[num]).gpio); +} +EXPORT_SYMBOL(cam_res_mgr_gpio_free_arry); + +int cam_res_mgr_gpio_set_value(unsigned int gpio, int value) +{ + int rc = 0; + bool found = false; + struct cam_gpio_res *gpio_res = NULL; + + mutex_lock(&cam_res->gpio_res_lock); + if (cam_res && cam_res->shared_gpio_enabled) { + list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) { + if (gpio == gpio_res->gpio) { + found = true; + break; + } + } + } + + /* + * Set the value directly if can't find the gpio from + * gpio_res_list, otherwise, need add ref count support + **/ + if (!found) { + gpio_set_value_cansleep(gpio, value); + } else { + if (value) { + gpio_res->power_on_count++; + if (gpio_res->power_on_count < 2) { + gpio_set_value_cansleep(gpio, value); + CAM_DBG(CAM_RES, + "Shared GPIO(%d) : HIGH", gpio); + } + } else { + gpio_res->power_on_count--; + if (gpio_res->power_on_count < 1) { + gpio_set_value_cansleep(gpio, value); + CAM_DBG(CAM_RES, + "Shared GPIO(%d) : LOW", gpio); + } + } + } + + mutex_unlock(&cam_res->gpio_res_lock); + return rc; +} +EXPORT_SYMBOL(cam_res_mgr_gpio_set_value); + +static int cam_res_mgr_shared_pinctrl_init( + struct device *dev) +{ + int i = 0; + char pctrl_active[50]; + char pctrl_suspend[50]; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + cam_res->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR_OR_NULL(cam_res->pinctrl)) { + CAM_ERR(CAM_RES, "Pinctrl not available"); + return -EINVAL; + } + + for (i = 0; i < dt->num_shared_pctrl_gpio; i++) { + snprintf(pctrl_active, sizeof(pctrl_active), + "%s%s", + cam_res->dt.pctrl_name[i], + "_active"); + CAM_DBG(CAM_RES, "pctrl_active at index: %d name: %s", + i, pctrl_active); + snprintf(pctrl_suspend, sizeof(pctrl_suspend), + "%s%s", + cam_res->dt.pctrl_name[i], + "_suspend"); + CAM_DBG(CAM_RES, "pctrl_suspend at index: %d name: %s", + i, pctrl_suspend); + cam_res->pctrl_res[i].active = + pinctrl_lookup_state(cam_res->pinctrl, + pctrl_active); + if (IS_ERR_OR_NULL(cam_res->pctrl_res[i].active)) { + CAM_ERR(CAM_RES, + "Failed to get the active state pinctrl handle"); + return -EINVAL; + } + cam_res->pctrl_res[i].suspend = + pinctrl_lookup_state(cam_res->pinctrl, + pctrl_suspend); + if (IS_ERR_OR_NULL(cam_res->pctrl_res[i].active)) { + CAM_ERR(CAM_RES, + "Failed to get the active state pinctrl handle"); + return -EINVAL; + } + cam_res->pctrl_res[i].pstatus = PINCTRL_STATUS_GOT; + } + + cam_res->pstatus = PINCTRL_STATUS_GOT; + + return 0; +} + +static int cam_res_mgr_parse_dt_shared_gpio( + struct device *dev) +{ + int rc = 0; + struct device_node *of_node = NULL; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + of_node = dev->of_node; + dt->num_shared_gpio = of_property_count_u32_elems(of_node, + "gpios-shared"); + + if (dt->num_shared_gpio <= 0) { + CAM_DBG(CAM_RES, + "Not found any shared gpio"); + return -ENODEV; + } + + if (dt->num_shared_gpio >= MAX_SHARED_GPIO_SIZE) { + CAM_ERR(CAM_RES, + "shared_gpio: %d max supported: %d", + MAX_SHARED_GPIO_SIZE, + dt->num_shared_gpio); + return -EINVAL; + } + + rc = of_property_read_u32_array(of_node, "gpios-shared", + dt->shared_gpio, dt->num_shared_gpio); + if (rc) { + CAM_ERR(CAM_RES, "Get shared gpio array failed."); + return -EINVAL; + } + + return rc; +} + +static int cam_res_mgr_parse_dt_shared_pinctrl_gpio( + struct device *dev) +{ + int rc = 0, i = 0; + int pinctrl_name_nodes = 0; + struct device_node *of_node = NULL; + struct cam_res_mgr_dt *dt = &cam_res->dt; + + of_node = dev->of_node; + dt->num_shared_pctrl_gpio = of_property_count_u32_elems(of_node, + "gpios-shared-pinctrl"); + + if (dt->num_shared_pctrl_gpio <= 0) { + CAM_DBG(CAM_RES, + "Not found any shared pinctrl res"); + return -ENODEV; + } + + if (dt->num_shared_pctrl_gpio >= MAX_SHARED_PCTRL_GPIO_SIZE) { + CAM_ERR(CAM_RES, + "Invalid Pinctrl GPIO number %d. No shared gpio.", + dt->num_shared_pctrl_gpio); + return -EINVAL; + } + + pinctrl_name_nodes = of_property_count_strings(of_node, + "shared-pctrl-gpio-names"); + + if (pinctrl_name_nodes != dt->num_shared_pctrl_gpio) { + CAM_ERR(CAM_RES, + "Mismatch between entries:: pctrl_gpio: %d and pctrl_name: %d", + dt->num_shared_pctrl_gpio, + pinctrl_name_nodes); + return -EINVAL; + } + + CAM_INFO(CAM_RES, + "number of pctrl_gpio: %d", dt->num_shared_pctrl_gpio); + + rc = of_property_read_u32_array(of_node, "gpios-shared-pinctrl", + dt->shared_pctrl_gpio, dt->num_shared_pctrl_gpio); + if (rc) { + CAM_ERR(CAM_RES, "Get shared pinctrl gpio array failed."); + return -EINVAL; + } + + for (i = 0; i < pinctrl_name_nodes; i++) { + rc = of_property_read_string_index(of_node, + "shared-pctrl-gpio-names", + i, &(dt->pctrl_name[i])); + CAM_INFO(CAM_RES, "shared-pctrl-gpio-names[%d] = %s", + i, dt->pctrl_name[i]); + if (rc) { + CAM_ERR(CAM_RES, + "i= %d pinctrl_name_nodes= %d reading clock-names failed", + i, pinctrl_name_nodes); + return rc; + } + } + + return rc; +} + +static int cam_res_mgr_parse_dt(struct device *dev) +{ + int rc = 0; + + rc = cam_res_mgr_parse_dt_shared_gpio(dev); + if (rc) { + if (rc == -ENODEV) { + CAM_DBG(CAM_RES, + "Shared GPIO resources not available"); + } else { + CAM_ERR(CAM_RES, + "Shared gpio parsing failed: rc: %d", rc); + return rc; + } + } + + rc = cam_res_mgr_parse_dt_shared_pinctrl_gpio(dev); + if (rc) { + if (rc == -ENODEV) { + CAM_DBG(CAM_RES, + "Pinctrl shared resources not available"); + } else { + CAM_ERR(CAM_RES, "Pinctrl parsing failed: rc: %d", + rc); + return rc; + } + } else { + /* When shared pinctrl is detected do the init */ + rc = cam_res_mgr_shared_pinctrl_init(dev); + if (rc) { + CAM_ERR(CAM_RES, "Pinctrl init failed rc: %d", rc); + return rc; + } + } + + return 0; +} + +static int cam_res_mgr_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + cam_res = CAM_MEM_ZALLOC(sizeof(*cam_res), GFP_KERNEL); + if (!cam_res) { + CAM_ERR(CAM_RES, "Not Enough Mem"); + return -ENOMEM; + } + cam_res->dev = &pdev->dev; + + CAM_DBG(CAM_RES, "ENTER"); + rc = cam_res_mgr_parse_dt(&pdev->dev); + if (rc) { + CAM_ERR(CAM_RES, + "Error in parsing device tree, rc: %d", rc); + CAM_MEM_FREE(cam_res); + return rc; + } + + if (cam_res->dt.num_shared_gpio || cam_res->dt.num_shared_pctrl_gpio) { + CAM_DBG(CAM_RES, "Enable shared gpio support."); + cam_res->shared_gpio_enabled = true; + } else { + CAM_DBG(CAM_RES, "Disable shared gpio support."); + cam_res->shared_gpio_enabled = false; + } + + mutex_init(&cam_res->flash_res_lock); + mutex_init(&cam_res->gpio_res_lock); + + INIT_LIST_HEAD(&cam_res->gpio_res_list); + INIT_LIST_HEAD(&cam_res->flash_res_list); + + CAM_DBG(CAM_RES, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; +} + +static void cam_res_mgr_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + if (cam_res) { + cam_res_mgr_free_res(); + if (cam_res->pinctrl) + devm_pinctrl_put(cam_res->pinctrl); + cam_res->pinctrl = NULL; + cam_res->pstatus = PINCTRL_STATUS_PUT; + CAM_MEM_FREE(cam_res); + cam_res = NULL; + } + + CAM_DBG(CAM_RES, "Component unbound successfully"); +} + +const static struct component_ops cam_res_mgr_component_ops = { + .bind = cam_res_mgr_component_bind, + .unbind = cam_res_mgr_component_unbind, +}; + +static int cam_res_mgr_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_RES, "Adding Res mgr component"); + rc = component_add(&pdev->dev, &cam_res_mgr_component_ops); + if (rc) + CAM_ERR(CAM_RES, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_res_mgr_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_res_mgr_component_ops); + return 0; +} + +static const struct of_device_id cam_res_mgr_dt_match[] = { + {.compatible = "qcom,cam-res-mgr"}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_res_mgr_dt_match); + +struct platform_driver cam_res_mgr_driver = { + .probe = cam_res_mgr_probe, + .remove = cam_res_mgr_remove, + .driver = { + .name = "cam_res_mgr", + .owner = THIS_MODULE, + .of_match_table = cam_res_mgr_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_res_mgr_init(void) +{ + return platform_driver_register(&cam_res_mgr_driver); +} + +void cam_res_mgr_exit(void) +{ + platform_driver_unregister(&cam_res_mgr_driver); +} + +MODULE_DESCRIPTION("Camera resource manager driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h new file mode 100644 index 0000000000..cb5022f482 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __CAM_RES_MGR_API_H__ +#define __CAM_RES_MGR_API_H__ + +#include + +/** + * @brief: Register the led trigger + * + * The newly registered led trigger is assigned to flash_res_list. + * + * @name : Pointer to int led trigger name + * @tp : Save the returned led trigger + * + * @return None + */ +void cam_res_mgr_led_trigger_register(const char *name, + struct led_trigger **tp); + +/** + * @brief: Unregister the led trigger + * + * Free the flash_res if this led trigger isn't used by other device . + * + * @tp : Pointer to the led trigger + * + * @return None + */ +void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp); + +/** + * @brief: Trigger the event to led core + * + * @trig : Pointer to the led trigger + * @brightness : The brightness need to fire + * + * @return None + */ +void cam_res_mgr_led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness); + +/** + * @brief: Check for shared gpio + * + * Will check whether requested device shares the gpio with other + * device. This function check against gpio table from device and + * shared gpio resources has been defined at res-mgr level + * + * @gpio_tbl : The GPIO table for respective device + * @size : GPIO table size + * @return Status of operation. False if not shared, true otherwise. + */ +bool cam_res_mgr_util_check_if_gpio_is_shared( + struct gpio *gpio_tbl, uint8_t size); + +/** + * @brief: Request a gpio + * + * Will alloc a gpio_res for the new gpio, other find the corresponding + * gpio_res. + * + * @dev : Pointer to the device + * @gpio : The GPIO number + * @flags : GPIO configuration as specified by GPIOF_* + * @label : A literal description string of this GPIO + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_res_mgr_gpio_request(struct device *dev, unsigned int gpio, + unsigned long flags, const char *label); + +/** + * @brief: Free a array GPIO + * + * Free the GPIOs and release corresponding gpio_res. + * + * @dev : Pointer to the device + * @gpio : Array of the GPIO number + * @num : The number of gpio + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +void cam_res_mgr_gpio_free_arry(struct device *dev, + const struct gpio *array, size_t num); + +/** + * @brief: Set GPIO power level + * + * Add ref count support for shared GPIOs. + * + * @gpio : The GPIO number + * @value : The power level need to setup + * + * @return Status of operation. Negative in case of error. Zero otherwise. + * -EINVAL will be returned if the gpio can't be found in gpio_res_list. + */ +int cam_res_mgr_gpio_set_value(unsigned int gpio, int value); + + +/** + * @brief : API to register RES_MGR to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_res_mgr_init(void); + +/** + * @brief : API to remove RES_MGR from platform framework. + */ +void cam_res_mgr_exit(void); + +/** + * @brief : API to get gpio idx from shared gpio list. + * @return idx for Success and error if gpio not found or invalid. + */ +int cam_res_mgr_util_get_idx_from_shared_gpio(uint gpio); + +/** + * @brief : API to get gpio idx from shared pinctrl gpio list. + * @return idx for Success and error if gpio not found or invalid. + */ +int cam_res_mgr_util_get_idx_from_shared_pctrl_gpio(uint gpio); + +/** + * @brief : API to check whether gpio is in use or can be free. + * @return NEED_HOLD macro if gpio is in use CAN_FREE if gpio can be free, + * error if operation is not valid, 0 in case of res_mgr is not + * available. + */ +int cam_res_mgr_util_shared_gpio_check_hold(uint gpio); + +#endif /* __CAM_RES_MGR_API_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h new file mode 100644 index 0000000000..bee7085579 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef __CAM_RES_MGR_PRIVATE_H__ +#define __CAM_RES_MGR_PRIVATE_H__ + +#include +#include +#include "cam_soc_util.h" +#include "cam_sensor_util.h" + +#define MAX_SHARED_GPIO_SIZE 16 +#define MAX_SHARED_PCTRL_GPIO_SIZE 10 + +/** + * enum pinctrl_status - Enum for pinctrl status + */ +enum pinctrl_status { + PINCTRL_STATUS_GOT = 0, + PINCTRL_STATUS_ACTIVE, + PINCTRL_STATUS_SUSPEND, + PINCTRL_STATUS_PUT, +}; + +/** + * struct cam_dev_res + * + * @list : List member used to append this node to a dev list + * @dev : Device pointer associated with device + */ +struct cam_dev_res { + struct list_head list; + struct device *dev; +}; + +/** + * struct cam_gpio_res + * + * @list : List member used to append this node to a gpio list + * @dev_list : List the device which request this gpio + * @gpio : Gpio value + * @power_on_count : Record the power on times of this gpio + */ +struct cam_gpio_res { + struct list_head list; + struct list_head dev_list; + unsigned int gpio; + int power_on_count; +}; + +/** + * struct cam_pinctrl_res + * + * @list : List member used to append this node to a linked list + * @name : Pointer to the flash trigger's name. + * @trigger : Pointer to the flash trigger + */ +struct cam_flash_res { + struct list_head list; + const char *name; + struct led_trigger *trigger; +}; + +/** + * struct cam_pinctrl_res + * + * @active : Pinctrl state pointer for active state + * @suspend : Pinctrl state pointer for suspend state + * @pstatus : Pinctrl status holder + */ +struct cam_pinctrl_res { + struct pinctrl_state *active; + struct pinctrl_state *suspend; + enum pinctrl_status pstatus; +}; + +/** + * struct cam_res_mgr_dt + * + * @shared_gpio : Shared gpios list + * @shared_pctrl_gpio : Shared pinctrl gpio list + * @num_shared_gpio : Number of shared gpio + * @num_shared_pctrl_gpio : Number of shared pinctrl gpio + * @pctrl_name : Pinctrl name from shared pinctrl gpio list + */ +struct cam_res_mgr_dt { + uint shared_gpio[MAX_SHARED_GPIO_SIZE]; + uint shared_pctrl_gpio[MAX_SHARED_PCTRL_GPIO_SIZE]; + int num_shared_gpio; + int num_shared_pctrl_gpio; + const char *pctrl_name[MAX_SHARED_PCTRL_GPIO_SIZE]; +}; + +/** + * struct cam_pinctrl_res + * + * @dev : Pointer to the device + * @dt : Device tree resource + * @shared_gpio_enabled : The flag to indicate if support shared gpio + * @pstatus : Top level device pinctrl status + * @pinctrl : Device pinctrl pointer + * @pctrl_res : Pinctrl resource array + * @gpio_res_list : List head of the gpio resource + * @flash_res_list : List head of the flash resource + * @gpio_res_lock : GPIO resource lock + * @flash_res_lock : Flash resource lock + */ +struct cam_res_mgr { + struct device *dev; + struct cam_res_mgr_dt dt; + bool shared_gpio_enabled; + + enum pinctrl_status pstatus; + struct pinctrl *pinctrl; + struct cam_pinctrl_res pctrl_res[MAX_SHARED_PCTRL_GPIO_SIZE]; + + struct list_head gpio_res_list; + struct list_head flash_res_list; + struct mutex gpio_res_lock; + struct mutex flash_res_lock; +}; + +#endif /* __CAM_RES_MGR_PRIVATE_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c new file mode 100644 index 0000000000..554b41e160 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c @@ -0,0 +1,2640 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_sensor_core.h" +#include "cam_sensor_util.h" +#include "cam_soc_util.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_packet_util.h" +#include "cam_req_mgr_dev.h" + +#define CAM_SENSOR_PIPELINE_DELAY_MASK 0xFF +#define CAM_SENSOR_MODESWITCH_DELAY_SHIFT 8 +#define CAM_SENSOR_MAX_PER_REQ_SETTINGS 4 + +extern struct completion *cam_sensor_get_i3c_completion(uint32_t index); + +static int cam_sensor_notify_v4l2_error_event( + struct cam_sensor_ctrl_t *s_ctrl, + uint32_t error_type, uint32_t error_code) +{ + int rc = 0; + struct cam_req_mgr_message req_msg = {0}; + + req_msg.session_hdl = s_ctrl->bridge_intf.session_hdl; + req_msg.u.err_msg.device_hdl = s_ctrl->bridge_intf.device_hdl; + req_msg.u.err_msg.link_hdl = s_ctrl->bridge_intf.link_hdl; + req_msg.u.err_msg.error_type = error_type; + req_msg.u.err_msg.request_id = s_ctrl->last_applied_req; + req_msg.u.err_msg.resource_size = 0x0; + req_msg.u.err_msg.error_code = error_code; + + CAM_DBG(CAM_SENSOR, + "v4l2 error event [type: %u code: %u] for req: %llu on %s", + error_type, error_code, s_ctrl->last_applied_req, + s_ctrl->sensor_name); + + rc = cam_req_mgr_notify_message(&req_msg, + V4L_EVENT_CAM_REQ_MGR_ERROR, + V4L_EVENT_CAM_REQ_MGR_EVENT); + if (rc) + CAM_ERR(CAM_SENSOR, + "Notifying v4l2 error [type: %u code: %u] failed for req id:%llu on %s", + error_type, error_code, s_ctrl->last_applied_req, + s_ctrl->sensor_name); + + return rc; +} + +static int cam_sensor_notify_msg_req_mgr( + enum cam_req_mgr_msg_type msg_type, + struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + struct cam_req_mgr_notify_msg msg = {0}; + + msg.link_hdl = s_ctrl->bridge_intf.link_hdl; + msg.dev_hdl = s_ctrl->bridge_intf.device_hdl; + msg.msg_type = msg_type; + + if (s_ctrl->bridge_intf.crm_cb && + s_ctrl->bridge_intf.crm_cb->add_req) { + rc = s_ctrl->bridge_intf.crm_cb->notify_msg(&msg); + if (rc) { + CAM_ERR(CAM_SENSOR, + "Notifying req mgr message: %u failed rc: %d", msg_type, rc); + } + } + + CAM_DBG(CAM_SENSOR, + "Successfully notified message : %u to req mgr", msg_type); + + return rc; +} + +static int cam_sensor_update_req_mgr( + struct cam_sensor_ctrl_t *s_ctrl, + struct cam_packet *csl_packet) +{ + int rc = 0; + struct cam_req_mgr_add_request add_req; + + memset(&add_req, 0, sizeof(add_req)); + add_req.link_hdl = s_ctrl->bridge_intf.link_hdl; + add_req.req_id = csl_packet->header.request_id; + CAM_DBG(CAM_SENSOR, " Rxed Req Id: %llu", + csl_packet->header.request_id); + add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl; + if (s_ctrl->bridge_intf.crm_cb && + s_ctrl->bridge_intf.crm_cb->add_req) { + rc = s_ctrl->bridge_intf.crm_cb->add_req(&add_req); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_SENSOR, + "Adding request: %llu failed with request manager rc: %d, it has been flushed", + csl_packet->header.request_id, rc); + else + CAM_ERR(CAM_SENSOR, + "Adding request: %llu failed with request manager rc: %d", + csl_packet->header.request_id, rc); + return rc; + } + } + + CAM_DBG(CAM_SENSOR, "Successfully add req: %llu to req mgr", + add_req.req_id); + return rc; +} + +static void cam_sensor_release_stream_rsc( + struct cam_sensor_ctrl_t *s_ctrl) +{ + struct i2c_settings_array *i2c_set = NULL; + int rc; + + i2c_set = &(s_ctrl->i2c_data.streamoff_settings); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "failed while deleting Streamoff settings"); + } + + i2c_set = &(s_ctrl->i2c_data.streamon_settings); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "failed while deleting Streamon settings"); + } +} + +static void cam_sensor_release_per_frame_resource( + struct cam_sensor_ctrl_t *s_ctrl) +{ + struct i2c_settings_array *i2c_set = NULL; + int i, rc; + + if (s_ctrl->i2c_data.deferred_frame_update != NULL) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.deferred_frame_update[i]); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete deferred frame_update setting for request: %lld rc: %d", + i2c_set->request_id, rc); + } + } + } + + if (s_ctrl->i2c_data.per_frame != NULL) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.per_frame[i]); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete per frame setting for request: %lld rc: %d", + i2c_set->request_id, rc); + } + } + } + + if (s_ctrl->i2c_data.frame_skip != NULL) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.frame_skip[i]); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete frame skip setting for request: %lld rc: %d", + i2c_set->request_id, rc); + } + } + } + + if (s_ctrl->i2c_data.bubble_update != NULL) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.bubble_update[i]); + if (i2c_set->is_settings_valid == 1) { + i2c_set->is_settings_valid = -1; + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete bubble update setting for request: %lld rc: %d", + i2c_set->request_id, rc); + } + } + } +} + +static int cam_sensor_handle_res_info(struct cam_sensor_res_info *res_info, + struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + uint32_t idx = 0; + + if (!s_ctrl || !res_info) { + CAM_ERR(CAM_SENSOR, "Invalid params: res_info: %s, s_ctrl: %s", + CAM_IS_NULL_TO_STR(res_info), + CAM_IS_NULL_TO_STR(s_ctrl)); + return -EINVAL; + } + + idx = s_ctrl->last_updated_req % MAX_PER_FRAME_ARRAY; + + s_ctrl->sensor_res[idx].res_index = res_info->res_index; + strscpy(s_ctrl->sensor_res[idx].caps, res_info->caps, + sizeof(s_ctrl->sensor_res[idx].caps)); + s_ctrl->sensor_res[idx].width = res_info->width; + s_ctrl->sensor_res[idx].height = res_info->height; + s_ctrl->sensor_res[idx].fps = res_info->fps; + s_ctrl->sensor_res[idx].request_id = s_ctrl->last_updated_req; + + if (res_info->num_valid_params > 0) { + if (res_info->valid_param_mask & CAM_SENSOR_FEATURE_MASK) + s_ctrl->sensor_res[idx].feature_mask = + res_info->params[0]; + + if (res_info->valid_param_mask & CAM_SENSOR_NUM_BATCHED_FRAMES) + s_ctrl->num_batched_frames = res_info->params[1]; + } + + s_ctrl->is_res_info_updated = true; + + /* If request id is 0, it will be during an initial config/acquire */ + CAM_INFO(CAM_SENSOR, + "Sensor[%s-%d] Feature: 0x%x updated for request id: %lu, res index: %u, width: %d, height: %d, capability: %s, fps: %u", + s_ctrl->sensor_name, s_ctrl->soc_info.index, + s_ctrl->sensor_res[idx].feature_mask, + s_ctrl->sensor_res[idx].request_id, s_ctrl->sensor_res[idx].res_index, + s_ctrl->sensor_res[idx].width, s_ctrl->sensor_res[idx].height, + s_ctrl->sensor_res[idx].caps, s_ctrl->sensor_res[idx].fps); + + return rc; +} + +static int cam_sensor_handle_frame_info(struct cam_sensor_ctrl_t *s_ctrl, + struct cam_sensor_frame_info *frame_info) +{ + int rc = 0; + struct cam_req_mgr_notify_msg msg = {0}; + + CAM_DBG(CAM_SENSOR, + "sensor:%d req:%llu frame info: frame sync shift:%llu frame duration:%llu blanking duration:%llu", + s_ctrl->soc_info.index, s_ctrl->last_updated_req, + frame_info->frame_sync_shift, frame_info->frame_duration, + frame_info->blanking_duration); + + if (!s_ctrl->bridge_intf.crm_cb || + !s_ctrl->bridge_intf.crm_cb->notify_msg) { + CAM_ERR(CAM_SENSOR, "Invalid crm_cb:%p or notify_msg:%p", + s_ctrl->bridge_intf.crm_cb, + s_ctrl->bridge_intf.crm_cb ? + s_ctrl->bridge_intf.crm_cb->notify_msg : + NULL); + return -EINVAL; + } + + msg.link_hdl = s_ctrl->bridge_intf.link_hdl; + msg.req_id = s_ctrl->last_updated_req; + msg.dev_hdl = s_ctrl->bridge_intf.device_hdl; + msg.msg_type = CAM_REQ_MGR_MSG_SENSOR_FRAME_INFO; + msg.u.frame_info.frame_sync_shift = frame_info->frame_sync_shift; + msg.u.frame_info.frame_duration = frame_info->frame_duration; + msg.u.frame_info.blanking_duration = frame_info->blanking_duration; + if ((s_ctrl->stream_off_on_flush) && (s_ctrl->sensor_state == CAM_SENSOR_STANDBY)) + msg.u.frame_info.use_for_wd = true; + + rc = s_ctrl->bridge_intf.crm_cb->notify_msg(&msg); + if (rc) { + CAM_ERR(CAM_SENSOR, + "Failed to notify msg SENSOR_FRAME_INFO to CRM at req:%llu, rc:%d", + s_ctrl->last_updated_req, rc); + return rc; + } + + return rc; +} + +static int32_t cam_sensor_generic_blob_handler(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) +{ + int rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = + (struct cam_sensor_ctrl_t *) user_data; + + if (!blob_data || !blob_size) { + CAM_ERR(CAM_SENSOR, "Invalid blob info %pK %u", blob_data, + blob_size); + return -EINVAL; + } + + switch (blob_type) { + case CAM_SENSOR_GENERIC_BLOB_RES_INFO: { + struct cam_sensor_res_info *res_info = + (struct cam_sensor_res_info *) blob_data; + + if (blob_size < sizeof(struct cam_sensor_res_info)) { + CAM_ERR(CAM_SENSOR, "RES_INFO: Invalid blob size expected: 0x%x actual: 0x%x", + sizeof(struct cam_sensor_res_info), blob_size); + return -EINVAL; + } + + rc = cam_sensor_handle_res_info(res_info, s_ctrl); + break; + } + case CAM_SENSOR_GENERIC_BLOB_FRAME_INFO: { + struct cam_sensor_frame_info *frame_info = + (struct cam_sensor_frame_info *) blob_data; + + if (blob_size < sizeof(struct cam_sensor_frame_info)) { + CAM_ERR(CAM_SENSOR, "FRAME_INFO: Invalid blob size expected: 0x%x actual: 0x%x", + sizeof(struct cam_sensor_frame_info), blob_size); + return -EINVAL; + } + + rc = cam_sensor_handle_frame_info(s_ctrl, frame_info); + break; + } + default: + CAM_WARN(CAM_SENSOR, "Invalid blob type %d", blob_type); + break; + } + + return rc; +} + +static int32_t cam_sensor_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl, + void *arg) +{ + int32_t i, rc = 0; + uintptr_t generic_ptr; + struct cam_control *ioctl_ctrl = NULL; + struct cam_packet *csl_packet = NULL; + struct cam_packet *csl_packet_u = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_buf_io_cfg *io_cfg = NULL; + struct i2c_settings_array *i2c_reg_settings = NULL; + struct i2c_settings_array *i2c_set_per_req[CAM_SENSOR_MAX_PER_REQ_SETTINGS]; + size_t len_of_buff = 0; + size_t remain_len = 0; + int64_t prev_updated_req; + uint32_t cmd_buf_type, idx; + struct cam_config_dev_cmd config; + struct i2c_data_settings *i2c_data = NULL; + + ioctl_ctrl = (struct cam_control *)arg; + + if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_SENSOR, "Invalid Handle Type"); + return -EINVAL; + } + + if (copy_from_user(&config, + u64_to_user_ptr(ioctl_ctrl->handle), + sizeof(config))) + return -EFAULT; + + rc = cam_mem_get_cpu_buf( + config.packet_handle, + &generic_ptr, + &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed in getting the packet: %d", rc); + return rc; + } + + remain_len = len_of_buff; + if ((sizeof(struct cam_packet) > len_of_buff) || + ((size_t)config.offset >= len_of_buff - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_SENSOR, + "Inval cam_packet strut size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len_of_buff); + rc = -EINVAL; + goto put_ref; + } + + remain_len -= (size_t)config.offset; + csl_packet_u = (struct cam_packet *)(generic_ptr + + (uint32_t)config.offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_SENSOR, "Copying packet to KMD failed"); + goto put_ref; + } + + if ((csl_packet->header.op_code & 0xFFFFFF) != + CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG && + (csl_packet->header.op_code & 0xFFFFFF) != + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF && + csl_packet->header.request_id <= s_ctrl->last_flush_req + && s_ctrl->last_flush_req != 0) { + CAM_ERR(CAM_SENSOR, + "reject request %lld, last request to flush %u", + csl_packet->header.request_id, s_ctrl->last_flush_req); + rc = -EBADR; + goto end; + } + + if (csl_packet->header.request_id > s_ctrl->last_flush_req) + s_ctrl->last_flush_req = 0; + + prev_updated_req = s_ctrl->last_updated_req; + s_ctrl->is_res_info_updated = false; + + i2c_data = &(s_ctrl->i2c_data); + CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code); + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: { + i2c_reg_settings = &i2c_data->init_settings; + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: { + i2c_reg_settings = &i2c_data->config_settings; + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: { + if (s_ctrl->streamon_count > 0) { + delete_request(&i2c_data->streamon_settings); + s_ctrl->streamon_count = 0; + } + + s_ctrl->streamon_count = s_ctrl->streamon_count + 1; + i2c_reg_settings = &i2c_data->streamon_settings; + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: { + if (s_ctrl->streamoff_count > 0) { + delete_request(&i2c_data->streamoff_settings); + s_ctrl->streamoff_count = 0; + } + + s_ctrl->streamoff_count = s_ctrl->streamoff_count + 1; + i2c_reg_settings = &i2c_data->streamoff_settings; + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_READ: { + i2c_reg_settings = &(i2c_data->read_settings); + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + + CAM_DBG(CAM_SENSOR, "number of IO configs: %d:", + csl_packet->num_io_configs); + if (csl_packet->num_io_configs == 0) { + CAM_ERR(CAM_SENSOR, "No I/O configs to process"); + goto end; + } + + io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *) + &csl_packet->payload_flex + + csl_packet->io_configs_offset); + + if (io_cfg == NULL) { + CAM_ERR(CAM_SENSOR, "I/O config is invalid(NULL)"); + goto end; + } + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: { + if (s_ctrl->sensor_state < CAM_SENSOR_STANDBY) { + CAM_WARN(CAM_SENSOR, + "Rxed Update packets without linking in state: %d", + s_ctrl->sensor_state); + goto end; + } + + i2c_reg_settings = + &i2c_data->per_frame[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + CAM_DBG(CAM_SENSOR, "Received Packet: %lld req: %lld", + csl_packet->header.request_id % MAX_PER_FRAME_ARRAY, + csl_packet->header.request_id); + if (i2c_reg_settings->is_settings_valid == 1) { + //Need to clean the previous i2c settings here + i2c_set_per_req[0] = &i2c_data->per_frame[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + i2c_set_per_req[1] = &i2c_data->deferred_frame_update[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + i2c_set_per_req[2] = &i2c_data->frame_skip[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + i2c_set_per_req[3] = &i2c_data->bubble_update[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + + for (i = 0; i < CAM_SENSOR_MAX_PER_REQ_SETTINGS; i++) { + CAM_DBG(CAM_SENSOR, "setting list idx:%d, request_id:%lld, is_settings_valid:%d", + i, i2c_set_per_req[i]->request_id, i2c_set_per_req[i]->is_settings_valid); + if ((i2c_set_per_req[i]->request_id) && + (i2c_set_per_req[i]->is_settings_valid == 1)) { + i2c_set_per_req[i]->request_id = 0; + rc = delete_request(i2c_set_per_req[i]); + if (rc < 0) + CAM_ERR(CAM_SENSOR, "Delete request Failed during parse pkt rc:%d", rc); + } + } + } + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE: { + if (s_ctrl->sensor_state < CAM_SENSOR_STANDBY) { + CAM_WARN(CAM_SENSOR, + "Rxed Update packets without linking in state: %d", + s_ctrl->sensor_state); + goto end; + } + + i2c_reg_settings = + &i2c_data->frame_skip[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + CAM_DBG(CAM_SENSOR, "Received not ready packet: %lld req: %lld", + csl_packet->header.request_id % MAX_PER_FRAME_ARRAY, + csl_packet->header.request_id); + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_BUBBLE_UPDATE: { + if (s_ctrl->sensor_state < CAM_SENSOR_STANDBY) { + CAM_WARN(CAM_SENSOR, + "Rxed Update packets without linking in state: %d", + s_ctrl->sensor_state); + goto end; + } + + i2c_reg_settings = + &i2c_data->bubble_update[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + CAM_DBG(CAM_SENSOR, "Received bubble update packet: %lld req: %lld", + csl_packet->header.request_id % MAX_PER_FRAME_ARRAY, + csl_packet->header.request_id); + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: { + if (s_ctrl->sensor_state < CAM_SENSOR_STANDBY) { + CAM_WARN(CAM_SENSOR, + "Rxed Update packets without linking in state: %d", + s_ctrl->sensor_state); + goto end; + } + + i2c_reg_settings = + &i2c_data->per_frame[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + i2c_reg_settings->request_id = csl_packet->header.request_id; + i2c_reg_settings->is_settings_valid = 1; + + rc = cam_sensor_update_req_mgr(s_ctrl, csl_packet); + if (rc) + CAM_ERR(CAM_SENSOR, + "Failed in adding request to req_mgr"); + break; + } + default: + CAM_ERR(CAM_SENSOR, "Invalid Packet Header opcode: %d", + csl_packet->header.op_code & 0xFFFFFF); + rc = -EINVAL; + goto end; + } + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint8_t *)&csl_packet->payload + + csl_packet->cmd_buf_offset); + + CAM_DBG(CAM_SENSOR, "cam:%d req:%llu number of command buffers:%d", + s_ctrl->soc_info.index, csl_packet->header.request_id, + csl_packet->num_cmd_buf); + + for (i = 0; i < csl_packet->num_cmd_buf; i++) { + if (!cmd_desc[i].length) + continue; + + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) { + CAM_ERR(CAM_SENSOR, "invalud cmd[%d] buf", i); + rc = -EINVAL; + goto end; + } + + cmd_buf_type = cmd_desc[i].meta_data; + CAM_DBG(CAM_SENSOR, "cam:%d cmd_buf_type:%d", + s_ctrl->soc_info.index, cmd_buf_type); + + switch (cmd_buf_type) { + case CAM_SENSOR_PACKET_I2C_COMMANDS: { + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings, &cmd_desc[i], 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc); + goto end; + } + break; + } + case CAM_SENSOR_PACKET_DEFERRED_I2C_COMMANDS_META: { + struct i2c_settings_array *i2c_reg_settings_deferred = NULL; + + i2c_reg_settings_deferred = + &i2c_data->deferred_frame_update[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + + if (i2c_reg_settings_deferred->is_settings_valid) + CAM_WARN(CAM_SENSOR, + "Sensor[%s] receiving duplicate deferred meta settings for req: %llu", + s_ctrl->sensor_name, csl_packet->header.request_id); + + i2c_reg_settings_deferred->request_id = csl_packet->header.request_id; + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings_deferred, &cmd_desc[i], 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc); + goto end; + } + break; + } + case CAM_SENSOR_PACKET_GENERIC_BLOB: { + s_ctrl->last_updated_req = csl_packet->header.request_id; + idx = s_ctrl->last_updated_req % MAX_PER_FRAME_ARRAY; + + rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i], + cam_sensor_generic_blob_handler, s_ctrl); + if (rc) + s_ctrl->sensor_res[idx].request_id = 0; + + break; + } + case CAM_SENSOR_PACKET_BUBBLE_UPD_I2C_COMMANDS_META: { + struct i2c_settings_array *i2c_reg_settings_bubble = NULL; + + i2c_reg_settings_bubble = + &i2c_data->bubble_update[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + if (i2c_reg_settings_bubble->is_settings_valid) + CAM_WARN(CAM_SENSOR, + "Sensor[%s] receiving duplicate bubble meta settings for req: %llu", + s_ctrl->sensor_name, csl_packet->header.request_id); + i2c_reg_settings_bubble->request_id = csl_packet->header.request_id; + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings_bubble, &cmd_desc[i], 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc); + goto end; + } + break; + } + case CAM_SENSOR_PACKET_FRAME_SKIP_I2C_COMMANDS_META: { + struct i2c_settings_array *i2c_reg_settings_frame_skip = NULL; + + i2c_reg_settings_frame_skip = + &i2c_data->frame_skip[csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY]; + if (i2c_reg_settings_frame_skip->is_settings_valid) + CAM_WARN(CAM_SENSOR, + "Sensor[%s] receiving duplicate frame skip meta settings for req: %llu", + s_ctrl->sensor_name, csl_packet->header.request_id); + i2c_reg_settings_frame_skip->request_id = csl_packet->header.request_id; + + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings_frame_skip, &cmd_desc[i], 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc); + goto end; + } + break; + } + default: + CAM_ERR(CAM_SENSOR, "invalid cmd buf type %d", + cmd_buf_type); + rc = -EINVAL; + goto end; + } + } + + /* + * If no res info in current request, then we pick previous + * resolution info as current resolution info. + * Don't copy the sensor resolution info when the request id + * is invalid. + */ + if ((!s_ctrl->is_res_info_updated) && (csl_packet->header.request_id != 0)) { + /* + * Update the last updated req at two places. + * 1# Got generic blob: The req id can be zero for the initial res info updating + * 2# Copy previous res info: The req id can't be zero, in case some queue info + * are override by slot0. + */ + s_ctrl->last_updated_req = csl_packet->header.request_id; + s_ctrl->sensor_res[s_ctrl->last_updated_req % MAX_PER_FRAME_ARRAY] = + s_ctrl->sensor_res[prev_updated_req % MAX_PER_FRAME_ARRAY]; + } + + if ((csl_packet->header.op_code & 0xFFFFFF) == + CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) { + i2c_reg_settings->request_id = + csl_packet->header.request_id; + rc = cam_sensor_update_req_mgr(s_ctrl, csl_packet); + if (rc) { + CAM_ERR(CAM_SENSOR, + "Failed in adding request to req_mgr"); + goto end; + } + } + + if ((csl_packet->header.op_code & 0xFFFFFF) == + CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE) { + i2c_reg_settings->request_id = + csl_packet->header.request_id; + } + + if ((csl_packet->header.op_code & 0xFFFFFF) == + CAM_SENSOR_PACKET_OPCODE_SENSOR_BUBBLE_UPDATE) { + i2c_reg_settings->request_id = + csl_packet->header.request_id; + } + +end: + cam_common_mem_free(csl_packet); +put_ref: + cam_mem_put_cpu_buf(config.packet_handle); + return rc; +} + +static int32_t cam_sensor_i2c_modes_util( + struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list) +{ + int32_t rc = 0; + uint32_t i, size; + + if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) { + rc = camera_io_dev_write(io_master_info, + &(i2c_list->i2c_settings)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to random write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) { + rc = camera_io_dev_write_continuous( + io_master_info, + &(i2c_list->i2c_settings), + CAM_SENSOR_I2C_WRITE_SEQ); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to seq write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) { + rc = camera_io_dev_write_continuous( + io_master_info, + &(i2c_list->i2c_settings), + CAM_SENSOR_I2C_WRITE_BURST); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to burst write I2C settings: %d", + rc); + return rc; + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) { + size = i2c_list->i2c_settings.size; + for (i = 0; i < size; i++) { + rc = camera_io_dev_poll( + io_master_info, + i2c_list->i2c_settings.reg_setting[i].reg_addr, + i2c_list->i2c_settings.reg_setting[i].reg_data, + i2c_list->i2c_settings.reg_setting[i].data_mask, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.reg_setting[i].delay); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "i2c poll apply setting Fail: %d", rc); + return rc; + } + } + } + + return rc; +} + +int32_t cam_sensor_update_i2c_info(struct cam_cmd_i2c_info *i2c_info, + struct cam_sensor_ctrl_t *s_ctrl) +{ + int32_t rc = 0; + struct cam_sensor_cci_client *cci_client = NULL; + struct cam_sensor_qup_client *qup_client = NULL; + + if (s_ctrl->io_master_info.master_type == CCI_MASTER) { + cci_client = s_ctrl->io_master_info.cci_client; + if (!cci_client) { + CAM_ERR(CAM_SENSOR, "failed: cci_client %pK", + cci_client); + return -EINVAL; + } + cci_client->cci_i2c_master = s_ctrl->cci_i2c_master; + cci_client->sid = i2c_info->slave_addr >> 1; + cci_client->retries = 3; + cci_client->id_map = 0; + cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode; + CAM_DBG(CAM_SENSOR, "CCI: %d Master: %d slave_addr: 0x%x freq_mode: %d", + cci_client->cci_device, cci_client->cci_i2c_master, i2c_info->slave_addr, + i2c_info->i2c_freq_mode); + } else if (s_ctrl->io_master_info.master_type == I2C_MASTER) { + qup_client = s_ctrl->io_master_info.qup_client; + if (!qup_client) { + CAM_ERR(CAM_SENSOR, "failed: qup_client %pK", + qup_client); + return -EINVAL; + } + qup_client->i2c_client->addr = i2c_info->slave_addr; + CAM_DBG(CAM_SENSOR, "Slave addr: 0x%x", i2c_info->slave_addr); + } + + s_ctrl->sensordata->slave_info.sensor_slave_addr = + i2c_info->slave_addr; + return rc; +} + +int32_t cam_sensor_update_slave_info(void *probe_info, + uint32_t cmd, struct cam_sensor_ctrl_t *s_ctrl, uint8_t probe_ver) +{ + int32_t rc = 0; + struct cam_cmd_probe *sensor_probe_info; + struct cam_cmd_probe_v2 *sensor_probe_info_v2; + + memset(s_ctrl->sensor_name, 0, CAM_SENSOR_NAME_MAX_SIZE); + memset(s_ctrl->io_master_info.sensor_name, 0, CAM_SENSOR_NAME_MAX_SIZE); + + if (probe_ver == CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE) { + sensor_probe_info = (struct cam_cmd_probe *)probe_info; + s_ctrl->sensordata->slave_info.sensor_id_reg_addr = + sensor_probe_info->reg_addr; + s_ctrl->sensordata->slave_info.sensor_id = + sensor_probe_info->expected_data; + s_ctrl->sensordata->slave_info.sensor_id_mask = + sensor_probe_info->data_mask; + s_ctrl->pipeline_delay = + sensor_probe_info->reserved; + s_ctrl->modeswitch_delay = 0; + + s_ctrl->sensor_probe_addr_type = sensor_probe_info->addr_type; + s_ctrl->sensor_probe_data_type = sensor_probe_info->data_type; + + s_ctrl->probe_sensor_slave_addr = 0; + } else if (probe_ver == CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE_V2) { + sensor_probe_info_v2 = (struct cam_cmd_probe_v2 *)probe_info; + s_ctrl->sensordata->slave_info.sensor_id_reg_addr = + sensor_probe_info_v2->reg_addr; + s_ctrl->sensordata->slave_info.sensor_id = + sensor_probe_info_v2->expected_data; + s_ctrl->sensordata->slave_info.sensor_id_mask = + sensor_probe_info_v2->data_mask; + s_ctrl->pipeline_delay = + (sensor_probe_info_v2->pipeline_delay & + CAM_SENSOR_PIPELINE_DELAY_MASK); + s_ctrl->modeswitch_delay = (sensor_probe_info_v2->pipeline_delay >> + CAM_SENSOR_MODESWITCH_DELAY_SHIFT); + s_ctrl->sensor_probe_addr_type = + sensor_probe_info_v2->addr_type; + s_ctrl->sensor_probe_data_type = + sensor_probe_info_v2->data_type; + + memcpy(s_ctrl->sensor_name, sensor_probe_info_v2->sensor_name, + CAM_SENSOR_NAME_MAX_SIZE-1); + + s_ctrl->probe_sensor_slave_addr = + sensor_probe_info_v2->reserved[0]; + memcpy(s_ctrl->io_master_info.sensor_name, sensor_probe_info_v2->sensor_name, + CAM_SENSOR_NAME_MAX_SIZE-1); + } + + CAM_DBG(CAM_SENSOR, + "%s Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x sensor_pipeline_delay:0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id_reg_addr, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_id_mask, + s_ctrl->pipeline_delay); + return rc; +} + +int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf, + struct cam_sensor_ctrl_t *s_ctrl, + int32_t cmd_buf_num, uint32_t cmd, + uint32_t cmd_buf_length, size_t remain_len, + uint32_t probe_ver, struct cam_cmd_buf_desc *cmd_desc) +{ + int32_t rc = 0; + size_t required_size = 0; + + switch (cmd_buf_num) { + case 0: { + struct cam_cmd_i2c_info *i2c_info = NULL; + void *probe_info; + + if (probe_ver == CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE) + required_size = sizeof(struct cam_cmd_i2c_info) + + sizeof(struct cam_cmd_probe); + else if(probe_ver == CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE_V2) + required_size = sizeof(struct cam_cmd_i2c_info) + + sizeof(struct cam_cmd_probe_v2); + + if (remain_len < required_size) { + CAM_ERR(CAM_SENSOR, + "not enough buffer for cam_cmd_i2c_info"); + return -EINVAL; + } + i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info"); + return rc; + } + probe_info = cmd_buf + sizeof(struct cam_cmd_i2c_info); + rc = cam_sensor_update_slave_info(probe_info, cmd, s_ctrl, + probe_ver); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Updating the slave Info"); + return rc; + } + } + break; + case 1: { + rc = cam_sensor_update_power_settings(cmd_buf, + cmd_buf_length, &s_ctrl->sensordata->power_info, + remain_len); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed in updating power settings"); + return rc; + } + } + break; + + case 2: { + struct i2c_settings_array *i2c_reg_settings = NULL; + struct i2c_data_settings *i2c_data = NULL; + struct cam_buf_io_cfg *io_cfg = NULL; + + CAM_DBG(CAM_SENSOR, "reg_bank unlock settings"); + i2c_data = &(s_ctrl->i2c_data); + i2c_reg_settings = &i2c_data->reg_bank_unlock_settings; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed in updating reg_bank unlock settings"); + return rc; + } + } + break; + case 3: { + struct i2c_settings_array *i2c_reg_settings = NULL; + struct i2c_data_settings *i2c_data = NULL; + struct cam_buf_io_cfg *io_cfg = NULL; + + CAM_DBG(CAM_SENSOR, "reg_bank lock settings"); + i2c_data = &(s_ctrl->i2c_data); + i2c_reg_settings = &i2c_data->reg_bank_lock_settings; + i2c_reg_settings->request_id = 0; + rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1, io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed in updating reg_bank lock settings"); + return rc; + } + } + break; + + default: + CAM_ERR(CAM_SENSOR, "Invalid command buffer"); + break; + } + return rc; +} + +int32_t cam_handle_mem_ptr(uint64_t handle, uint32_t cmd, + struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0, i; + uint32_t *cmd_buf; + void *ptr; + size_t len; + struct cam_packet *pkt = NULL; + struct cam_packet *pkt_u = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + uintptr_t cmd_buf1 = 0; + uintptr_t packet = 0; + size_t remain_len = 0; + uint32_t probe_ver = 0; + + rc = cam_mem_get_cpu_buf(handle, + &packet, &len); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer"); + return -EINVAL; + } + + pkt_u = (struct cam_packet *)packet; + if (pkt_u == NULL) { + CAM_ERR(CAM_SENSOR, "packet pos is invalid"); + rc = -EINVAL; + goto put_ref; + } + + rc = cam_packet_util_copy_pkt_to_kmd(pkt_u, &pkt, len); + if (rc) { + CAM_ERR(CAM_SENSOR, "Copying packet to KMD failed"); + goto put_ref; + } + + if ((len < sizeof(struct cam_packet)) || + (pkt->cmd_buf_offset >= (len - sizeof(struct cam_packet)))) { + CAM_ERR(CAM_SENSOR, "Not enough buf provided"); + rc = -EINVAL; + goto end; + } + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *)&pkt->payload_flex + pkt->cmd_buf_offset/4); + if (cmd_desc == NULL) { + CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid"); + rc = -EINVAL; + goto end; + } + + probe_ver = pkt->header.op_code & 0xFFFFFF; + CAM_DBG(CAM_SENSOR, "Received Header opcode: %u", probe_ver); + + for (i = 0; i < pkt->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + goto end; + + if (!(cmd_desc[i].length)) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &cmd_buf1, &len); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to parse the command Buffer Header"); + goto end; + } + if (cmd_desc[i].offset >= len) { + CAM_ERR(CAM_SENSOR, + "offset past length of buffer"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + rc = -EINVAL; + goto end; + } + remain_len = len - cmd_desc[i].offset; + if (cmd_desc[i].length > remain_len) { + CAM_ERR(CAM_SENSOR, + "Not enough buffer provided for cmd"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + rc = -EINVAL; + goto end; + } + cmd_buf = (uint32_t *)cmd_buf1; + cmd_buf += cmd_desc[i].offset/4; + ptr = (void *) cmd_buf; + + rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl, + i, cmd, cmd_desc[i].length, remain_len, probe_ver, &cmd_desc[i]); + + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to parse the command Buffer Header"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + goto end; + } + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + +end: + cam_common_mem_free(pkt); +put_ref: + cam_mem_put_cpu_buf(handle); + return rc; +} + +void cam_sensor_query_cap(struct cam_sensor_ctrl_t *s_ctrl, + struct cam_sensor_query_cap *query_cap) +{ + query_cap->pos_roll = s_ctrl->sensordata->pos_roll; + query_cap->pos_pitch = s_ctrl->sensordata->pos_pitch; + query_cap->pos_yaw = s_ctrl->sensordata->pos_yaw; + query_cap->secure_camera = 0; + query_cap->actuator_slot_id = + s_ctrl->sensordata->subdev_id[SUB_MODULE_ACTUATOR]; + query_cap->csiphy_slot_id = + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]; + query_cap->eeprom_slot_id = + s_ctrl->sensordata->subdev_id[SUB_MODULE_EEPROM]; + query_cap->flash_slot_id = + s_ctrl->sensordata->subdev_id[SUB_MODULE_LED_FLASH]; + query_cap->ois_slot_id = + s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS]; + query_cap->slot_info = + s_ctrl->soc_info.index; +} + +static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl, + uint32_t chipid) +{ + uint16_t sensor_id = (uint16_t)(chipid & 0xFFFF); + int16_t sensor_id_mask = s_ctrl->sensordata->slave_info.sensor_id_mask; + + if (!sensor_id_mask) + sensor_id_mask = ~sensor_id_mask; + + sensor_id &= sensor_id_mask; + sensor_id_mask &= -sensor_id_mask; + sensor_id_mask -= 1; + while (sensor_id_mask) { + sensor_id_mask >>= 1; + sensor_id >>= 1; + } + return sensor_id; +} + +void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl) +{ + struct cam_sensor_power_ctrl_t *power_info = + &s_ctrl->sensordata->power_info; + int rc = 0; + + if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) && + (s_ctrl->is_probe_succeed == 0)) + return; + + cam_sensor_release_stream_rsc(s_ctrl); + cam_sensor_release_per_frame_resource(s_ctrl); + + if (s_ctrl->sensor_state != CAM_SENSOR_INIT) + cam_sensor_power_down(s_ctrl); + + if (s_ctrl->bridge_intf.device_hdl != -1) { + rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "dhdl already destroyed: rc = %d", rc); + } + + s_ctrl->bridge_intf.device_hdl = -1; + s_ctrl->bridge_intf.link_hdl = -1; + s_ctrl->bridge_intf.session_hdl = -1; + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + s_ctrl->streamon_count = 0; + s_ctrl->streamoff_count = 0; + s_ctrl->is_probe_succeed = 0; + s_ctrl->last_flush_req = 0; + s_ctrl->sensor_state = CAM_SENSOR_INIT; +} + +int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + uint32_t chipid = 0; + struct cam_camera_slave_info *slave_info; + + slave_info = &(s_ctrl->sensordata->slave_info); + + if (!slave_info) { + CAM_ERR(CAM_SENSOR, " failed: %pK", + slave_info); + return -EINVAL; + } + + if (s_ctrl->hw_no_ops) + return rc; + + if (s_ctrl->io_master_info.master_type == I2C_MASTER) { + if (s_ctrl->probe_sensor_slave_addr != 0) { + CAM_DBG(CAM_SENSOR, "%s read id: 0x%x -> 0x%x", s_ctrl->sensor_name, + s_ctrl->io_master_info.qup_client->i2c_client->addr, + s_ctrl->probe_sensor_slave_addr); + s_ctrl->io_master_info.qup_client->i2c_client->addr = + s_ctrl->probe_sensor_slave_addr; + } + } else if (s_ctrl->io_master_info.master_type == CCI_MASTER) { + if (s_ctrl->probe_sensor_slave_addr != 0) { + CAM_DBG(CAM_SENSOR, "%s read id: 0x%x -> 0x%x", s_ctrl->sensor_name, + s_ctrl->io_master_info.cci_client->sid << 1, + s_ctrl->probe_sensor_slave_addr); + s_ctrl->io_master_info.cci_client->sid = + s_ctrl->probe_sensor_slave_addr >> 1; + } + } + + rc = camera_io_dev_read( + &(s_ctrl->io_master_info), + slave_info->sensor_id_reg_addr, + &chipid, s_ctrl->sensor_probe_addr_type, + s_ctrl->sensor_probe_data_type, true); + + CAM_DBG(CAM_SENSOR, "%s read id: 0x%x expected id 0x%x:", + s_ctrl->sensor_name, chipid, slave_info->sensor_id); + + if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) { + CAM_WARN(CAM_SENSOR, "%s read id: 0x%x expected id 0x%x:", + s_ctrl->sensor_name, chipid, + slave_info->sensor_id); + return -ENODEV; + } + return rc; +} + +int cam_sensor_stream_off(struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + struct timespec64 ts; + uint64_t ms, sec, min, hrs; + + if (s_ctrl->sensor_state != CAM_SENSOR_START && + (s_ctrl->sensor_state != CAM_SENSOR_STANDBY)) { + rc = -EINVAL; + CAM_WARN(CAM_SENSOR, + "Not in right state to stop %s state: %d", + s_ctrl->sensor_name, s_ctrl->sensor_state); + goto end; + } + + if (!s_ctrl->stream_off_on_flush && + s_ctrl->i2c_data.streamoff_settings.is_settings_valid && + (s_ctrl->i2c_data.streamoff_settings.request_id == 0)) { + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "cannot apply streamoff settings for %s", + s_ctrl->sensor_name); + } + + cam_sensor_release_per_frame_resource(s_ctrl); + s_ctrl->last_flush_req = 0; + s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE; + s_ctrl->stream_off_on_flush = false; + memset(s_ctrl->sensor_res, 0, sizeof(s_ctrl->sensor_res)); + + CAM_GET_TIMESTAMP(ts); + CAM_CONVERT_TIMESTAMP_FORMAT(ts, hrs, min, sec, ms); + + CAM_INFO(CAM_SENSOR, + "%llu:%llu:%llu.%llu CAM_STOP_DEV Success for %s sensor_id:0x%x,sensor_slave_addr:0x%x", + hrs, min, sec, ms, + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr); + +end: + return rc; +} + +int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, + void *arg) +{ + int rc = 0, pkt_opcode = 0; + struct cam_control *cmd = (struct cam_control *)arg; + struct cam_sensor_power_ctrl_t *power_info = NULL; + struct timespec64 ts; + uint64_t ms, sec, min, hrs; + + if (!s_ctrl || !arg) { + CAM_ERR(CAM_SENSOR, "s_ctrl is NULL"); + return -EINVAL; + } + + power_info = &s_ctrl->sensordata->power_info; + + if (cmd->op_code != CAM_SENSOR_PROBE_CMD) { + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_SENSOR, "Invalid handle type: %d", + cmd->handle_type); + return -EINVAL; + } + } + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + switch (cmd->op_code) { + case CAM_SENSOR_PROBE_CMD: { + if (s_ctrl->is_probe_succeed == 1) { + CAM_WARN(CAM_SENSOR, + "Sensor %s already Probed in the slot", + s_ctrl->sensor_name); + break; + } + + if (cmd->handle_type == + CAM_HANDLE_MEM_HANDLE) { + rc = cam_handle_mem_ptr(cmd->handle, cmd->op_code, + s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed"); + goto release_mutex; + } + } else { + CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d", + cmd->handle_type); + rc = -EINVAL; + goto release_mutex; + } + + /* Parse and fill vreg params for powerup settings */ + rc = msm_camera_fill_vreg_params( + &s_ctrl->soc_info, + s_ctrl->sensordata->power_info.power_setting, + s_ctrl->sensordata->power_info.power_setting_size); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Fail in filling vreg params for %s PUP rc %d", + s_ctrl->sensor_name, rc); + goto free_power_settings; + } + + /* Parse and fill vreg params for powerdown settings*/ + rc = msm_camera_fill_vreg_params( + &s_ctrl->soc_info, + s_ctrl->sensordata->power_info.power_down_setting, + s_ctrl->sensordata->power_info.power_down_setting_size); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Fail in filling vreg params for %s PDOWN rc %d", + s_ctrl->sensor_name, rc); + goto free_power_settings; + } + + /* Power up and probe sensor */ + rc = cam_sensor_power_up(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Power up failed for %s sensor_id: 0x%x, slave_addr: 0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr + ); + goto free_power_settings; + } + + if (s_ctrl->i2c_data.reg_bank_unlock_settings.is_settings_valid) { + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_UNLOCK); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "REG_bank unlock failed"); + cam_sensor_power_down(s_ctrl); + goto free_power_settings; + } + rc = delete_request(&(s_ctrl->i2c_data.reg_bank_unlock_settings)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "failed while deleting REG_bank unlock settings"); + cam_sensor_power_down(s_ctrl); + goto free_power_settings; + } + } + + /* Match sensor ID */ + rc = cam_sensor_match_id(s_ctrl); + if (rc < 0) { + CAM_INFO(CAM_SENSOR, + "Probe failed for %s slot:%d, slave_addr:0x%x, sensor_id:0x%x", + s_ctrl->sensor_name, + s_ctrl->soc_info.index, + s_ctrl->sensordata->slave_info.sensor_slave_addr, + s_ctrl->sensordata->slave_info.sensor_id); + cam_sensor_power_down(s_ctrl); + goto free_power_settings; + } + + if (s_ctrl->i2c_data.reg_bank_lock_settings.is_settings_valid) { + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_LOCK); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "REG_bank lock failed"); + cam_sensor_power_down(s_ctrl); + goto free_power_settings; + } + rc = delete_request(&(s_ctrl->i2c_data.reg_bank_lock_settings)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "failed while deleting REG_bank lock settings"); + cam_sensor_power_down(s_ctrl); + goto free_power_settings; + } + } + + rc = cam_sensor_power_down(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Fail in %s sensor Power Down", + s_ctrl->sensor_name); + goto free_power_settings; + } + + /* + * Set probe succeeded flag to 1 so that no other camera shall + * probed on this slot + */ + s_ctrl->is_probe_succeed = 1; + s_ctrl->sensor_state = CAM_SENSOR_INIT; + + CAM_INFO(CAM_SENSOR, + "Probe success for %s slot:%d,slave_addr:0x%x,sensor_id:0x%x", + s_ctrl->sensor_name, + s_ctrl->soc_info.index, + s_ctrl->sensordata->slave_info.sensor_slave_addr, + s_ctrl->sensordata->slave_info.sensor_id); + + } + break; + case CAM_ACQUIRE_DEV: { + struct cam_sensor_acquire_dev sensor_acq_dev; + struct cam_create_dev_hdl bridge_params; + + if ((s_ctrl->is_probe_succeed == 0) || + (s_ctrl->sensor_state != CAM_SENSOR_INIT)) { + CAM_WARN(CAM_SENSOR, + "Not in right state to aquire %s state: %d", + s_ctrl->sensor_name, s_ctrl->sensor_state); + rc = -EINVAL; + goto release_mutex; + } + + if (s_ctrl->bridge_intf.device_hdl != -1) { + CAM_ERR(CAM_SENSOR, + "%s Device is already acquired", + s_ctrl->sensor_name); + rc = -EINVAL; + goto release_mutex; + } + rc = copy_from_user(&sensor_acq_dev, + u64_to_user_ptr(cmd->handle), + sizeof(sensor_acq_dev)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed Copying from user"); + goto release_mutex; + } + + bridge_params.session_hdl = sensor_acq_dev.session_handle; + bridge_params.ops = &s_ctrl->bridge_intf.ops; + bridge_params.v4l2_sub_dev_flag = 0; + bridge_params.media_entity_flag = 0; + bridge_params.priv = s_ctrl; + bridge_params.dev_id = CAM_SENSOR; + + sensor_acq_dev.device_handle = + cam_create_device_hdl(&bridge_params); + if (sensor_acq_dev.device_handle <= 0) { + rc = -EFAULT; + CAM_ERR(CAM_SENSOR, "Can not create device handle"); + goto release_mutex; + } + s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle; + s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle; + + CAM_DBG(CAM_SENSOR, "%s Device Handle: %d", + s_ctrl->sensor_name, sensor_acq_dev.device_handle); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &sensor_acq_dev, + sizeof(struct cam_sensor_acquire_dev))) { + CAM_ERR(CAM_SENSOR, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + + rc = cam_sensor_power_up(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Sensor Power up failed for %s sensor_id:0x%x, slave_addr:0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr + ); + goto release_mutex; + } + + s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE; + s_ctrl->last_flush_req = 0; + s_ctrl->is_stopped_by_user = false; + s_ctrl->last_updated_req = 0; + s_ctrl->last_applied_req = 0; + s_ctrl->num_batched_frames = 0; + s_ctrl->last_applied_done_timestamp = 0; + s_ctrl->stream_off_on_flush = false; + memset(s_ctrl->sensor_res, 0, sizeof(s_ctrl->sensor_res)); + CAM_INFO(CAM_SENSOR, + "CAM_ACQUIRE_DEV Success for %s sensor_id:0x%x,sensor_slave_addr:0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr); + } + break; + case CAM_RELEASE_DEV: { + if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) || + (s_ctrl->sensor_state == CAM_SENSOR_START)) { + rc = -EINVAL; + CAM_WARN(CAM_SENSOR, + "Not in right state to release %s state: %d", + s_ctrl->sensor_name, s_ctrl->sensor_state); + goto release_mutex; + } + + if (s_ctrl->bridge_intf.link_hdl != -1) { + CAM_ERR(CAM_SENSOR, + "%s Device [%d] still active on link 0x%x", + s_ctrl->sensor_name, + s_ctrl->sensor_state, + s_ctrl->bridge_intf.link_hdl); + rc = -EAGAIN; + goto release_mutex; + } + + rc = cam_sensor_power_down(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Sensor Power Down failed for %s sensor_id: 0x%x, slave_addr:0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr + ); + goto release_mutex; + } + + cam_sensor_release_per_frame_resource(s_ctrl); + cam_sensor_release_stream_rsc(s_ctrl); + if (s_ctrl->bridge_intf.device_hdl == -1) { + CAM_ERR(CAM_SENSOR, + "Invalid Handles: %s link hdl: %d device hdl: %d", + s_ctrl->sensor_name, + s_ctrl->bridge_intf.device_hdl, + s_ctrl->bridge_intf.link_hdl); + rc = -EINVAL; + goto release_mutex; + } + rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "Failed in destroying %s device hdl", + s_ctrl->sensor_name); + s_ctrl->bridge_intf.device_hdl = -1; + s_ctrl->bridge_intf.link_hdl = -1; + s_ctrl->bridge_intf.session_hdl = -1; + + s_ctrl->sensor_state = CAM_SENSOR_INIT; + CAM_INFO(CAM_SENSOR, + "CAM_RELEASE_DEV Success for %s sensor_id:0x%x, slave_addr:0x%x", + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr); + s_ctrl->streamon_count = 0; + s_ctrl->streamoff_count = 0; + s_ctrl->last_flush_req = 0; + s_ctrl->last_applied_done_timestamp = 0; + s_ctrl->stream_off_on_flush = false; + } + break; + case CAM_QUERY_CAP: { + struct cam_sensor_query_cap sensor_cap; + + CAM_DBG(CAM_SENSOR, "%s Sensor Queried", s_ctrl->sensor_name); + cam_sensor_query_cap(s_ctrl, &sensor_cap); + if (copy_to_user(u64_to_user_ptr(cmd->handle), + &sensor_cap, sizeof(struct cam_sensor_query_cap))) { + CAM_ERR(CAM_SENSOR, "Failed Copy to User"); + rc = -EFAULT; + goto release_mutex; + } + break; + } + case CAM_START_DEV: { + struct cam_req_mgr_timer_notify timer; + if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) || + (s_ctrl->sensor_state == CAM_SENSOR_START) || + s_ctrl->sensor_state == CAM_SENSOR_STANDBY) { + rc = -EINVAL; + CAM_WARN(CAM_SENSOR, + "Not in right state to start %s state: %d", + s_ctrl->sensor_name, + s_ctrl->sensor_state); + goto release_mutex; + } + + if (s_ctrl->i2c_data.streamon_settings.is_settings_valid && + (s_ctrl->i2c_data.streamon_settings.request_id == 0)) { + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "cannot apply streamon settings for %s", + s_ctrl->sensor_name); + goto release_mutex; + } + } + s_ctrl->sensor_state = CAM_SENSOR_START; + + if (s_ctrl->stream_off_after_eof) + s_ctrl->is_stopped_by_user = false; + + if (s_ctrl->bridge_intf.crm_cb && + s_ctrl->bridge_intf.crm_cb->notify_timer) { + timer.link_hdl = s_ctrl->bridge_intf.link_hdl; + timer.dev_hdl = s_ctrl->bridge_intf.device_hdl; + timer.state = true; + rc = s_ctrl->bridge_intf.crm_cb->notify_timer(&timer); + if (rc) { + CAM_ERR(CAM_SENSOR, + "%s Enable CRM SOF freeze timer failed rc: %d", + s_ctrl->sensor_name, rc); + goto release_mutex; + } + } + + CAM_GET_TIMESTAMP(ts); + CAM_CONVERT_TIMESTAMP_FORMAT(ts, hrs, min, sec, ms); + + CAM_INFO(CAM_SENSOR, + "%llu:%llu:%llu.%llu CAM_START_DEV Success for %s sensor_id:0x%x,sensor_slave_addr:0x%x num_batched_frames:%d", + hrs, min, sec, ms, + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr, + s_ctrl->num_batched_frames); + } + break; + case CAM_STOP_DEV: { + if (s_ctrl->stream_off_after_eof) { + s_ctrl->is_stopped_by_user = true; + CAM_DBG(CAM_SENSOR, + "Ignore stop dev cmd, sensor %s already in streamed off state", + s_ctrl->sensor_name); + goto release_mutex; + } + + rc = cam_sensor_stream_off(s_ctrl); + if (rc) + goto release_mutex; + } + break; + case CAM_CONFIG_DEV: { + rc = cam_sensor_pkt_parse(s_ctrl, arg); + if (rc < 0) { + if (rc == -EBADR) + CAM_INFO(CAM_SENSOR, + "%s:Failed pkt parse. rc: %d, it has been flushed", + s_ctrl->sensor_name, rc); + else + CAM_ERR(CAM_SENSOR, + "%s:Failed pkt parse. rc: %d", + s_ctrl->sensor_name, rc); + goto release_mutex; + } + if (s_ctrl->i2c_data.init_settings.is_settings_valid && + (s_ctrl->i2c_data.init_settings.request_id == 0)) { + + pkt_opcode = + CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG; + rc = cam_sensor_apply_settings(s_ctrl, 0, + pkt_opcode); + + if ((rc == -EAGAIN) && + (s_ctrl->io_master_info.master_type == CCI_MASTER)) { + /* If CCI hardware is resetting we need to wait + * for sometime before reapply + */ + CAM_WARN(CAM_SENSOR, + "%s: Reapplying the Init settings due to cci hw reset", + s_ctrl->sensor_name); + usleep_range(1000, 1010); + rc = cam_sensor_apply_settings(s_ctrl, 0, + pkt_opcode); + } + s_ctrl->i2c_data.init_settings.request_id = -1; + + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "%s: cannot apply init settings rc= %d", + s_ctrl->sensor_name, rc); + delete_request(&s_ctrl->i2c_data.init_settings); + goto release_mutex; + } + rc = delete_request(&s_ctrl->i2c_data.init_settings); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "%s: Fail in deleting the Init settings", + s_ctrl->sensor_name); + goto release_mutex; + } + } + + if (s_ctrl->i2c_data.config_settings.is_settings_valid && + (s_ctrl->i2c_data.config_settings.request_id == 0)) { + if (s_ctrl->sensor_state == CAM_SENSOR_START) { + delete_request(&s_ctrl->i2c_data.config_settings); + CAM_ERR(CAM_SENSOR, + "%s: get config setting in start state", + s_ctrl->sensor_name); + goto release_mutex; + } + + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG); + + s_ctrl->i2c_data.config_settings.request_id = -1; + + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "%s: cannot apply config settings", + s_ctrl->sensor_name); + delete_request( + &s_ctrl->i2c_data.config_settings); + goto release_mutex; + } + rc = delete_request(&s_ctrl->i2c_data.config_settings); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "%s: Fail in deleting the config settings", + s_ctrl->sensor_name); + goto release_mutex; + } + s_ctrl->sensor_state = CAM_SENSOR_CONFIG; + } + + if (s_ctrl->i2c_data.read_settings.is_settings_valid) { + if (!s_ctrl->hw_no_ops) + rc = cam_sensor_i2c_read_data( + &s_ctrl->i2c_data.read_settings, + &s_ctrl->io_master_info); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "%s: cannot read data: %d", + s_ctrl->sensor_name, rc); + delete_request(&s_ctrl->i2c_data.read_settings); + goto release_mutex; + } + rc = delete_request( + &s_ctrl->i2c_data.read_settings); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "%s: Fail in deleting the read settings", + s_ctrl->sensor_name); + goto release_mutex; + } + } + + if ((s_ctrl->stream_off_on_flush) && (s_ctrl->sensor_state == CAM_SENSOR_CONFIG)) { + rc = cam_sensor_notify_msg_req_mgr( + CAM_REQ_MGR_MSG_NOTIFY_FOR_SYNCED_RESUME, s_ctrl); + if (!rc) + s_ctrl->stream_off_on_flush = false; + } + } + break; + default: + CAM_ERR(CAM_SENSOR, "%s: Invalid Opcode: %d", + s_ctrl->sensor_name, cmd->op_code); + rc = -EINVAL; + goto release_mutex; + } + +release_mutex: + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; + +free_power_settings: + CAM_MEM_FREE(power_info->power_setting); + CAM_MEM_FREE(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_down_setting_size = 0; + power_info->power_setting_size = 0; + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; +} + +int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info) +{ + int rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + + if (!info) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(info->dev_hdl); + + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + info->dev_id = CAM_REQ_MGR_DEVICE_SENSOR; + snprintf(info->name, sizeof(info->name), "%s(%s)", + CAM_SENSOR_NAME, s_ctrl->sensor_name); + if (s_ctrl->num_batched_frames >= 2) { + info->p_delay = 1; + info->m_delay = s_ctrl->modeswitch_delay; + } else if (s_ctrl->pipeline_delay >= 1 && s_ctrl->pipeline_delay <= 3) { + info->p_delay = s_ctrl->pipeline_delay; + info->m_delay = s_ctrl->modeswitch_delay; + } else { + info->p_delay = CAM_PIPELINE_DELAY_2; + info->m_delay = CAM_MODESWITCH_DELAY_2; + } + info->trigger = CAM_TRIGGER_POINT_SOF; + info->resume_sync_on = true; + + CAM_DBG(CAM_REQ, "num batched frames %d p_delay is %d", + s_ctrl->num_batched_frames, info->p_delay); + + return rc; +} + +int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link) +{ + struct cam_sensor_ctrl_t *s_ctrl = NULL; + + if (!link) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(link->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&s_ctrl->cam_sensor_mutex); + if (link->link_enable) { + s_ctrl->bridge_intf.link_hdl = link->link_hdl; + s_ctrl->bridge_intf.crm_cb = link->crm_cb; + } else { + s_ctrl->bridge_intf.link_hdl = -1; + s_ctrl->bridge_intf.crm_cb = NULL; + } + mutex_unlock(&s_ctrl->cam_sensor_mutex); + + return 0; +} + +int cam_sensor_power(struct v4l2_subdev *sd, int on) +{ + struct cam_sensor_ctrl_t *s_ctrl = v4l2_get_subdevdata(sd); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "s_ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + if (!on && s_ctrl->sensor_state == CAM_SENSOR_START) { + cam_sensor_power_down(s_ctrl); + s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE; + } + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + + return 0; +} + +int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + struct cam_sensor_power_ctrl_t *power_info; + struct cam_camera_slave_info *slave_info; + struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info; + struct completion *i3c_probe_completion = NULL; + + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl); + return -EINVAL; + } + + if (s_ctrl->hw_no_ops) + return rc; + + power_info = &s_ctrl->sensordata->power_info; + slave_info = &(s_ctrl->sensordata->slave_info); + + if (!power_info || !slave_info) { + CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info); + return -EINVAL; + } + + if (s_ctrl->bob_pwm_switch) { + if (cam_sensor_bob_pwm_mode_switch(soc_info, + s_ctrl->bob_reg_index, true)) + CAM_WARN(CAM_SENSOR, "BoB PWM setup failed"); + } + + if (s_ctrl->aon_camera_id != NOT_AON_CAM) { + CAM_INFO(CAM_SENSOR, + "Setup for Main Camera with csiphy index: %d", + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + rc = cam_sensor_util_aon_ops(true, + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + if (rc) { + CAM_ERR(CAM_SENSOR, + "Main camera access operation is not successful rc: %d", + rc); + return rc; + } + } + + if (s_ctrl->io_master_info.master_type == I3C_MASTER) + i3c_probe_completion = cam_sensor_get_i3c_completion(s_ctrl->soc_info.index); + + /* Check before power_up if i3c sensor has the master handle, Which means + * either Camera Daemon had restarted and probe cmd is issued again (or) + * acquire is called from UMD + */ + if ((s_ctrl->io_master_info.master_type == I3C_MASTER) && + (s_ctrl->io_master_info.qup_client != NULL)) { + s_ctrl->io_master_info.qup_client->i3c_wait_for_hotjoin = true; + } + + rc = cam_sensor_core_power_up(power_info, soc_info, i3c_probe_completion); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "core power up failed:%d", rc); + goto powerup_failure; + } + + rc = camera_io_init(&(s_ctrl->io_master_info)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "cci_init failed: rc: %d", rc); + goto cci_failure; + } + + return rc; + +cci_failure: + if (cam_sensor_util_power_down(power_info, soc_info)) + CAM_ERR(CAM_SENSOR, "power down failure"); + +powerup_failure: + if (s_ctrl->aon_camera_id != NOT_AON_CAM) { + if (cam_sensor_util_aon_ops(false, + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY])) + CAM_ERR(CAM_SENSOR, + "Main camera disable CPAS operation is not successful"); + } + return rc; + +} + +int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl) +{ + struct cam_sensor_power_ctrl_t *power_info; + struct cam_hw_soc_info *soc_info; + int ret, rc = 0; + + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl); + return -EINVAL; + } + + if (s_ctrl->hw_no_ops) + return rc; + + /* + * Even if any section of powerdown fails, we still need to move on + * to release resources, otherwise it might block camera from opening + * next time. Report the error as well. + */ + + power_info = &s_ctrl->sensordata->power_info; + soc_info = &s_ctrl->soc_info; + + if (!power_info) { + CAM_ERR(CAM_SENSOR, "failed: %s power_info %pK", + s_ctrl->sensor_name, power_info); + rc = -EINVAL; + } else { + ret = cam_sensor_util_power_down(power_info, soc_info); + if (ret < 0) { + CAM_ERR(CAM_SENSOR, "%s core power down failed:%d", + s_ctrl->sensor_name, ret); + rc = ret; + } + } + + if (s_ctrl->aon_camera_id != NOT_AON_CAM) { + CAM_INFO(CAM_SENSOR, + "Setup for AON FW with csiphy index: %d", + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + ret = cam_sensor_util_aon_ops(false, + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + if (ret) { + CAM_ERR(CAM_SENSOR, + "Main camera disable CPAS operation failed ret: %d", ret); + rc = ret; + } + } + + if (s_ctrl->bob_pwm_switch) { + ret = cam_sensor_bob_pwm_mode_switch(soc_info, + s_ctrl->bob_reg_index, false); + if (ret) + CAM_WARN(CAM_SENSOR, + "%s BoB PWM setup failed ret: %d", + s_ctrl->sensor_name, ret); + } + + camera_io_release(&(s_ctrl->io_master_info)); + + return rc; +} + +int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, + int64_t req_id, enum cam_sensor_packet_opcodes opcode) +{ + int rc = 0, offset, i, j; + uint64_t top = 0, del_req_id = 0; + struct i2c_settings_array *i2c_set = NULL; + struct i2c_settings_array *i2c_set_per_req[CAM_SENSOR_MAX_PER_REQ_SETTINGS]; + struct i2c_settings_list *i2c_list; + ktime_t current_time; + struct timespec64 current_ts; + + if (req_id == 0) { + switch (opcode) { + case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: { + i2c_set = &s_ctrl->i2c_data.streamon_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: { + i2c_set = &s_ctrl->i2c_data.init_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: { + i2c_set = &s_ctrl->i2c_data.config_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: { + i2c_set = &s_ctrl->i2c_data.streamoff_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_UNLOCK: { + i2c_set = &s_ctrl->i2c_data.reg_bank_unlock_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_LOCK: { + i2c_set = &s_ctrl->i2c_data.reg_bank_lock_settings; + break; + } + case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: + case CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE: + case CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE: + case CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE_V2: + default: + return 0; + } + if (i2c_set->is_settings_valid == 1) { + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + if (!s_ctrl->hw_no_ops) + rc = cam_sensor_i2c_modes_util( + &(s_ctrl->io_master_info), + i2c_list); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to apply settings: %d", + rc); + return rc; + } + } + } + ktime_get_clocktai_ts64(&i2c_set->applied_timestamp); + } else if (req_id > 0) { + offset = req_id % MAX_PER_FRAME_ARRAY; + + i2c_set_per_req[0] = s_ctrl->i2c_data.per_frame; + i2c_set_per_req[1] = s_ctrl->i2c_data.frame_skip; + i2c_set_per_req[2] = s_ctrl->i2c_data.bubble_update; + i2c_set_per_req[3] = s_ctrl->i2c_data.deferred_frame_update; + + if (opcode == CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE) + i2c_set = s_ctrl->i2c_data.frame_skip; + else if (opcode == CAM_SENSOR_PACKET_OPCODE_SENSOR_BUBBLE_UPDATE) { + i2c_set = s_ctrl->i2c_data.bubble_update; + /* + * If bubble update isn't valid, then we just use + * per frame update. + */ + if (!(i2c_set[offset].is_settings_valid == 1) && + (i2c_set[offset].request_id == req_id)) + i2c_set = s_ctrl->i2c_data.per_frame; + } else if (opcode == CAM_SENSOR_PACKET_OPCODE_SENSOR_DEFERRED_META) + i2c_set = s_ctrl->i2c_data.deferred_frame_update; + else + i2c_set = s_ctrl->i2c_data.per_frame; + + if (i2c_set[offset].is_settings_valid == 1 && + i2c_set[offset].request_id == req_id) { + list_for_each_entry(i2c_list, + &(i2c_set[offset].list_head), list) { + if (!s_ctrl->hw_no_ops) + rc = cam_sensor_i2c_modes_util( + &(s_ctrl->io_master_info), + i2c_list); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to apply settings: %d", + rc); + return rc; + } + } + CAM_DBG(CAM_SENSOR, "applied req_id: %llu", req_id); + } else { + CAM_DBG(CAM_SENSOR, + "Invalid/NOP request to apply: %lld", req_id); + } + + /* Record the sensor setting applied done timestamp */ + current_time = ktime_get(); + current_ts = ktime_to_timespec64(current_time); + s_ctrl->last_applied_done_timestamp = current_ts.tv_sec * NSEC_PER_SEC + + current_ts.tv_nsec; + CAM_DBG(CAM_REQ, + "Apply req:%lld done on %ld:%06ld last_applied_done_timestamp:0x%llx", + req_id, current_ts.tv_sec, + current_ts.tv_nsec/NSEC_PER_USEC, s_ctrl->last_applied_done_timestamp); + + s_ctrl->last_applied_req = req_id; + ktime_get_clocktai_ts64(&i2c_set[offset].applied_timestamp); + CAM_DBG(CAM_REQ, + "Sensor[%d] updating last_applied [req id: %lld last_applied: %lld] with opcode:%d", + s_ctrl->soc_info.index, req_id, s_ctrl->last_applied_req, opcode); + + /* Change the logic dynamically */ + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + if ((req_id >= + i2c_set[i].request_id + s_ctrl->pipeline_delay) && + (top < + i2c_set[i].request_id + s_ctrl->pipeline_delay) && + (i2c_set[i].is_settings_valid + == 1)) { + del_req_id = top; + top = i2c_set[i].request_id; + } + } + + if ((top < req_id) & (req_id > s_ctrl->pipeline_delay)) { + if ((((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) || + (((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX)) + del_req_id = req_id - s_ctrl->pipeline_delay - 1; + } + + if (!del_req_id) + return rc; + + CAM_DBG(CAM_SENSOR, "top: %llu, del_req_id:%llu", + top, del_req_id); + + /* Delete all the per request settings */ + for (i = 0; i < CAM_SENSOR_MAX_PER_REQ_SETTINGS; i++) { + i2c_set = i2c_set_per_req[i]; + for (j = 0; j < MAX_PER_FRAME_ARRAY; j++) { + if ((del_req_id > + i2c_set[j].request_id) && ( + i2c_set[j].is_settings_valid + == 1)) { + i2c_set[j].request_id = 0; + rc = delete_request( + &(i2c_set[j])); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "Delete request Fail:%lld rc:%d i:%d j:%d", + del_req_id, rc, i, j); + } + } + } + } + + return rc; +} + +int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply) +{ + int32_t rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + int32_t curr_idx, last_applied_idx; + enum cam_sensor_packet_opcodes opcode = + CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE; + + if (!apply) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(apply->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + if ((apply->recovery) && (apply->request_id > 0)) { + if (apply->request_id <= s_ctrl->last_applied_req) { + /* + * Use bubble update for request reapply + */ + curr_idx = apply->request_id % MAX_PER_FRAME_ARRAY; + last_applied_idx = s_ctrl->last_applied_req % MAX_PER_FRAME_ARRAY; + opcode = CAM_SENSOR_PACKET_OPCODE_SENSOR_BUBBLE_UPDATE; + CAM_INFO(CAM_REQ, + "Sensor[%d] update req id: %lld [last_applied: %lld] with opcode:%d recovery: %d last_applied_res_idx: %u current_res_idx: %u", + s_ctrl->soc_info.index, apply->request_id, + s_ctrl->last_applied_req, opcode, apply->recovery, + s_ctrl->sensor_res[last_applied_idx].res_index, + s_ctrl->sensor_res[curr_idx].res_index); + } + } + + CAM_DBG(CAM_REQ, + "Sensor[%d] update req id: %lld [last_applied: %lld] with opcode:%d recovery: %d", + s_ctrl->soc_info.index, apply->request_id, + s_ctrl->last_applied_req, opcode, apply->recovery); + trace_cam_apply_req("Sensor", s_ctrl->soc_info.index, apply->request_id, apply->link_hdl); + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + rc = cam_sensor_apply_settings(s_ctrl, apply->request_id, + opcode); + apply->last_applied_done_timestamp = s_ctrl->last_applied_done_timestamp; + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; +} + +int32_t cam_sensor_notify_frame_skip(struct cam_req_mgr_apply_request *apply) +{ + int32_t rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + enum cam_sensor_packet_opcodes opcode = + CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE; + + if (!apply) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(apply->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_REQ, "Sensor[%d] handle frame skip for req id: %lld", + s_ctrl->soc_info.index, apply->request_id); + trace_cam_notify_frame_skip("Sensor", apply->request_id); + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + rc = cam_sensor_apply_settings(s_ctrl, apply->request_id, + opcode); + /* + * If mode switch delay is 1, and there are no further requests + */ + if ((s_ctrl->modeswitch_delay == CAM_MODESWITCH_DELAY_1) && (apply->no_further_requests)) { + cam_sensor_apply_settings(s_ctrl, apply->request_id, + CAM_SENSOR_PACKET_OPCODE_SENSOR_DEFERRED_META); + CAM_DBG(CAM_SENSOR, "Sensor[%d] applying deferred settings from req id: %lld", + s_ctrl->soc_info.index, apply->request_id); + } + + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; +} + +int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req) +{ + int32_t rc = 0, i; + uint32_t cancel_req_id_found = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct i2c_settings_array *i2c_set = NULL; + + if (!flush_req) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(flush_req->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + if ((s_ctrl->sensor_state != CAM_SENSOR_START) && + (s_ctrl->sensor_state != CAM_SENSOR_CONFIG)) { + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; + } + + if (s_ctrl->i2c_data.deferred_frame_update == NULL) { + CAM_ERR(CAM_SENSOR, "i2c frame data is NULL"); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return -EINVAL; + } + + if (s_ctrl->i2c_data.per_frame == NULL) { + CAM_ERR(CAM_SENSOR, "i2c frame data is NULL"); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return -EINVAL; + } + + if (s_ctrl->i2c_data.frame_skip == NULL) { + CAM_ERR(CAM_SENSOR, "i2c not ready data is NULL"); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return -EINVAL; + } + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + s_ctrl->last_flush_req = flush_req->req_id; + CAM_DBG(CAM_SENSOR, "last reqest to flush is %lld", + flush_req->req_id); + + if (s_ctrl->stream_off_after_eof || flush_req->enable_sensor_standby) { + cam_sensor_stream_off(s_ctrl); + s_ctrl->is_stopped_by_user = false; + if (flush_req->enable_sensor_standby) { + s_ctrl->sensor_state = CAM_SENSOR_STANDBY; + s_ctrl->stream_off_on_flush = true; + } + } + + s_ctrl->stream_off_on_flush = flush_req->enable_sensor_standby; + } + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.deferred_frame_update[i]); + + if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) + && (i2c_set->request_id != flush_req->req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete request for no valid deferred req: %lld rc: %d", + i2c_set->request_id, rc); + + if (flush_req->type == + CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + cancel_req_id_found = 1; + break; + } + } + } + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.per_frame[i]); + + if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) + && (i2c_set->request_id != flush_req->req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete request: %lld rc: %d", + i2c_set->request_id, rc); + + if (flush_req->type == + CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + cancel_req_id_found = 1; + break; + } + } + } + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(s_ctrl->i2c_data.frame_skip[i]); + + if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) + && (i2c_set->request_id != flush_req->req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_SENSOR, + "delete request for not ready packet: %lld rc: %d", + i2c_set->request_id, rc); + + if (flush_req->type == + CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + cancel_req_id_found = 1; + break; + } + } + } + + if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ && + !cancel_req_id_found) + CAM_DBG(CAM_SENSOR, + "Flush request id:%lld not found in the pending list", + flush_req->req_id); + + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + return rc; +} + +int cam_sensor_process_evt(struct cam_req_mgr_link_evt_data *evt_data) +{ + int rc = 0, offset; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct i2c_settings_array *i2c_set = NULL; + + if (!evt_data) + return -EINVAL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(evt_data->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, "Received evt:%d", evt_data->evt_type); + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + + switch (evt_data->evt_type) { + case CAM_REQ_MGR_LINK_EVT_EOF: + if (s_ctrl->stream_off_after_eof && !s_ctrl->stream_off_on_flush) { + rc = cam_sensor_stream_off(s_ctrl); + if (rc) { + CAM_ERR(CAM_SENSOR, "Failed to stream off %s", + s_ctrl->sensor_name); + + cam_sensor_notify_v4l2_error_event(s_ctrl, + CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY, + CAM_REQ_MGR_SENSOR_STREAM_OFF_FAILED); + } + } + break; + case CAM_REQ_MGR_LINK_EVT_UPDATE_PROPERTIES: + if (evt_data->u.properties_mask & + CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF) + s_ctrl->stream_off_after_eof = true; + else + s_ctrl->stream_off_after_eof = false; + + CAM_DBG(CAM_SENSOR, "sensor %s stream off after eof:%s", + s_ctrl->sensor_name, + CAM_BOOL_TO_YESNO(s_ctrl->stream_off_after_eof)); + break; + case CAM_REQ_MGR_LINK_EVT_RESUME_HW: { + struct timespec64 ts; + uint64_t ms, sec, min, hrs; + + if (s_ctrl->i2c_data.streamon_settings.is_settings_valid && + (s_ctrl->i2c_data.streamon_settings.request_id == 0)) { + rc = cam_sensor_apply_settings(s_ctrl, 0, + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "cannot apply streamon settings for %s", + s_ctrl->sensor_name); + break; + } + s_ctrl->sensor_state = CAM_SENSOR_START; + s_ctrl->stream_off_on_flush = false; + CAM_GET_TIMESTAMP(ts); + CAM_CONVERT_TIMESTAMP_FORMAT(ts, hrs, min, sec, ms); + + CAM_INFO(CAM_SENSOR, + "%llu:%llu:%llu.%llu CAM_START_DEV for %s sensor_id:0x%x,sensor_slave_addr:0x%x num_batched_frames:%d post flush", + hrs, min, sec, ms, + s_ctrl->sensor_name, + s_ctrl->sensordata->slave_info.sensor_id, + s_ctrl->sensordata->slave_info.sensor_slave_addr, + s_ctrl->num_batched_frames); + } + } + break; + case CAM_REQ_MGR_LINK_EVT_FRAME_DURATION_CHANGING: { + evt_data->u.frame_duration_changing = false; + offset = evt_data->req_id % MAX_PER_FRAME_ARRAY; + i2c_set = s_ctrl->i2c_data.frame_skip; + + if (i2c_set[offset].is_settings_valid && + (i2c_set[offset].request_id == evt_data->req_id)) + evt_data->u.frame_duration_changing = true; + + i2c_set = s_ctrl->i2c_data.bubble_update; + if (i2c_set[offset].is_settings_valid && evt_data->u.is_recovery && + (evt_data->req_id <= s_ctrl->last_applied_req) && + (i2c_set[offset].request_id == evt_data->req_id)) + evt_data->u.frame_duration_changing = true; + } + break; + default: + /* No handling */ + break; + } + + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + + return rc; +} + +static void cam_sensor_dump_request_info(struct cam_sensor_ctrl_t *s_ctrl, + uint64_t req_id) +{ + int i, j, offset; + size_t len = 0; + char log_info[512]; + struct i2c_settings_array *i2c_set; + struct i2c_settings_list *i2c_list; + + CAM_INFO(CAM_SENSOR, "\tSensor:%s dump req info for req:%llu", + s_ctrl->sensor_name, req_id); + + offset = req_id % MAX_PER_FRAME_ARRAY; + + i2c_set = s_ctrl->i2c_data.per_frame; + if (i2c_set[offset].is_settings_valid && + (i2c_set[offset].request_id == req_id)) { + i = 0; + list_for_each_entry(i2c_list, + &(i2c_set[offset].list_head), list) { + i++; + CAM_INFO(CAM_SENSOR, + "\tSensor:%s per_frame_setting list:%d setting size:%d req:%llu at time[%llu: %09llu]", + s_ctrl->sensor_name, i, + i2c_list->i2c_settings.size, req_id, + i2c_set[offset].applied_timestamp.tv_sec, + i2c_set[offset].applied_timestamp.tv_nsec); + for (j = 0; j < i2c_list->i2c_settings.size; j++) { + int log_info_len = snprintf(NULL, 0, "%04d: 0x%04x=0x%04x", + j, i2c_list->i2c_settings.reg_setting[j].reg_addr, + i2c_list->i2c_settings.reg_setting[j].reg_data); + + /* Check if the log buf has remaining space for new log */ + if (512 - len < (log_info_len + 50)) { + len = 0; + CAM_INFO(CAM_SENSOR, "\t Sensor:%s req:%llu %s", + s_ctrl->sensor_name, req_id, log_info); + } + + CAM_INFO_BUF(CAM_SENSOR, log_info, 512, &len, + "%04d: 0x%04x=0x%04x", + j, i2c_list->i2c_settings.reg_setting[j].reg_addr, + i2c_list->i2c_settings.reg_setting[j].reg_data); + } + len = 0; + CAM_INFO(CAM_SENSOR, "\tSensor:%s req:%llu %s", + s_ctrl->sensor_name, req_id, log_info); + } + } else { + CAM_INFO(CAM_SENSOR, + "\tsensor:%s per_frame_setting is_settings_valid:%d request_id:%llu", + s_ctrl->sensor_name, i2c_set[offset].is_settings_valid, + i2c_set[offset].request_id); + } + + i2c_set = s_ctrl->i2c_data.frame_skip; + if (i2c_set[offset].is_settings_valid && + (i2c_set[offset].request_id == req_id)) { + i = 0; + list_for_each_entry(i2c_list, + &(i2c_set[offset].list_head), list) { + i++; + CAM_INFO(CAM_SENSOR, + "\tsensor:%s frame_skip_setting list:%d setting size:%d req:%llu at time[%llu: %09llu]", + s_ctrl->sensor_name, i, + i2c_list->i2c_settings.size, req_id, + i2c_set[offset].applied_timestamp.tv_sec, + i2c_set[offset].applied_timestamp.tv_nsec); + for (j = 0; j < i2c_list->i2c_settings.size; j++) + CAM_INFO(CAM_SENSOR, + "\tsensor:%s [%04d/%04d] req:%lld reg addr:0x%04x reg data:0x%04x", + s_ctrl->sensor_name, j, + i2c_list->i2c_settings.size, req_id, + i2c_list->i2c_settings.reg_setting[j].reg_addr, + i2c_list->i2c_settings.reg_setting[j].reg_data); + } + } else { + CAM_INFO(CAM_SENSOR, + "\tsensor:%s frame_skip_setting is_settings_valid:%d request_id:%llu", + s_ctrl->sensor_name, i2c_set[offset].is_settings_valid, + i2c_set[offset].request_id); + } +} + +int cam_sensor_dump_request(struct cam_req_mgr_dump_info *dump) +{ + int i, idx; + uint64_t req_id; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + + s_ctrl = (struct cam_sensor_ctrl_t *) + cam_get_device_priv(dump->dev_hdl); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "Device data is NULL"); + return -EINVAL; + } + + CAM_INFO(CAM_SENSOR, + "Sensor:[%s-%d] dump req_info, last applied sensor req:%llu error_req:%llu", + s_ctrl->sensor_name, s_ctrl->soc_info.index, + s_ctrl->last_applied_req, dump->req_id); + + idx = s_ctrl->last_updated_req % MAX_PER_FRAME_ARRAY; + CAM_INFO(CAM_SENSOR, + "Sensor[%s-%d] Feature: 0x%x updated for request id: %lu, res index: %u, width: %d, height: %d, capability: %s, fps: %u", + s_ctrl->sensor_name, s_ctrl->soc_info.index, + s_ctrl->sensor_res[idx].feature_mask, + s_ctrl->sensor_res[idx].request_id, s_ctrl->sensor_res[idx].res_index, + s_ctrl->sensor_res[idx].width, s_ctrl->sensor_res[idx].height, + s_ctrl->sensor_res[idx].caps, s_ctrl->sensor_res[idx].fps); + + req_id = s_ctrl->last_applied_req; + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + for (i = 0; (req_id > 0) && (i <= (s_ctrl->pipeline_delay + 1)); + req_id--, i++) + cam_sensor_dump_request_info(s_ctrl, req_id); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.h new file mode 100644 index 0000000000..26c9c25e7f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_core.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018,2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_CORE_H_ +#define _CAM_SENSOR_CORE_H_ + +#include "cam_sensor_dev.h" + +/** + * @s_ctrl: Sensor ctrl structure + * + * This API powers up the camera sensor module + */ +int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl); + +/** + * @s_ctrl: Sensor ctrl structure + * + * This API powers down the camera sensor module + */ +int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl); + +/** + * @sd: V4L2 subdevice + * @on: Turn off/on flag + * + * This API powers down the sensor module + */ +int cam_sensor_power(struct v4l2_subdev *sd, int on); + +/** + * @s_ctrl: Sensor ctrl structure + * @req_id: Request id + * @opcode: opcode for settings + * + * This API applies the req_id settings to sensor + */ +int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, int64_t req_id, + enum cam_sensor_packet_opcodes opcode); + +/** + * @apply: Req mgr structure for applying request + * + * This API applies the request that is mentioned + */ +int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply); + +/** + * @apply: Req mgr structure for notifying frame skip + * + * This API notifies a frame is skipped + */ +int cam_sensor_notify_frame_skip(struct cam_req_mgr_apply_request *apply); + +/** + * @flush: Req mgr structure for flushing request + * + * This API flushes the request that is mentioned + */ +int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush); + +/** + * @info: Sub device info to req mgr + * + * Publish the subdevice info + */ +int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info); + +/** + * @link: Link setup info + * + * This API establishes link with sensor subdevice with req mgr + */ +int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link); + +/** + * @evt_data: Event data info + * + * This API processes the event which is published by request mgr + */ +int cam_sensor_process_evt(struct cam_req_mgr_link_evt_data *evt_data); + +/** + * @dump: Dump information + * + * This API processes the information dump + */ +int cam_sensor_dump_request(struct cam_req_mgr_dump_info *dump); + +/** + * @s_ctrl: Sensor ctrl structure + * @arg: Camera control command argument + * + * This API handles the camera control argument reached to sensor + */ +int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, void *arg); + +/** + * @s_ctrl: Sensor ctrl structure + * + * This API handles the camera sensor close/shutdown + */ +void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl); + +#endif /* _CAM_SENSOR_CORE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.c new file mode 100644 index 0000000000..c25bb215ca --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_dev.h" +#include "cam_req_mgr_dev.h" +#include "cam_sensor_soc.h" +#include "cam_sensor_core.h" +#include "camera_main.h" +#include "cam_compat.h" +#include "cam_mem_mgr_api.h" + +static struct cam_sensor_i3c_sensor_data { + struct cam_sensor_ctrl_t *s_ctrl; + struct completion probe_complete; +} g_i3c_sensor_data[MAX_CAMERAS]; + +struct completion *cam_sensor_get_i3c_completion(uint32_t index) +{ + return &g_i3c_sensor_data[index].probe_complete; +} + +static int cam_sensor_subdev_close_internal(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_sensor_ctrl_t *s_ctrl = + v4l2_get_subdevdata(sd); + + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "s_ctrl ptr is NULL"); + return -EINVAL; + } + + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + cam_sensor_shutdown(s_ctrl); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + + return 0; +} + +static int cam_sensor_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + bool crm_active = cam_req_mgr_is_open(); + + if (crm_active) { + CAM_DBG(CAM_SENSOR, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + return cam_sensor_subdev_close_internal(sd, fh); +} + +static long cam_sensor_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + int rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = + v4l2_get_subdevdata(sd); + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_sensor_driver_cmd(s_ctrl, arg); + if (rc) { + if (rc == -EBADR) + CAM_INFO(CAM_SENSOR, + "Failed in Driver cmd: %d, it has been flushed", rc); + else if (rc != -ENODEV) + CAM_ERR(CAM_SENSOR, + "Failed in Driver cmd: %d", rc); + } + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + + rc = cam_sensor_subdev_close_internal(sd, NULL); + break; + default: + CAM_ERR(CAM_SENSOR, "Invalid ioctl cmd: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + return rc; +} + +#ifdef CONFIG_COMPAT +static long cam_sensor_init_subdev_do_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + struct cam_control cmd_data; + int32_t rc = 0; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data); + if (rc < 0) + CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed"); + break; + default: + CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_SENSOR, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} + +#endif +static struct v4l2_subdev_core_ops cam_sensor_subdev_core_ops = { + .ioctl = cam_sensor_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_sensor_init_subdev_do_ioctl, +#endif + .s_power = cam_sensor_power, +}; + +static struct v4l2_subdev_ops cam_sensor_subdev_ops = { + .core = &cam_sensor_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops cam_sensor_internal_ops = { + .close = cam_sensor_subdev_close, +}; + +static int cam_sensor_init_subdev_params(struct cam_sensor_ctrl_t *s_ctrl) +{ + int rc = 0; + + s_ctrl->v4l2_dev_str.internal_ops = &cam_sensor_internal_ops; + s_ctrl->v4l2_dev_str.ops = &cam_sensor_subdev_ops; + strscpy(s_ctrl->device_name, CAMX_SENSOR_DEV_NAME, CAM_CTX_DEV_NAME_MAX_LENGTH); + s_ctrl->v4l2_dev_str.name = s_ctrl->device_name; + s_ctrl->v4l2_dev_str.sd_flags = (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + s_ctrl->v4l2_dev_str.ent_function = CAM_SENSOR_DEVICE_TYPE; + s_ctrl->v4l2_dev_str.token = s_ctrl; + s_ctrl->v4l2_dev_str.close_seq_prior = CAM_SD_CLOSE_MEDIUM_LOW_PRIORITY; + + rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev rc: %d", rc); + + return rc; +} + +static int cam_sensor_i3c_driver_probe(struct i3c_device *client) +{ + int32_t rc = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + uint32_t index; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_CSIPHY, "Invalid input args"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "Probe for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return rc; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + s_ctrl = g_i3c_sensor_data[index].s_ctrl; + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "S_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + + dev->driver_data = s_ctrl; + + s_ctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(s_ctrl->io_master_info.qup_client)) { + CAM_ERR(CAM_SENSOR, "Unable to allocate memory for QUP handle for %s", + dev_name(dev)); + return -ENOMEM; + } + cam_sensor_utils_parse_pm_ctrl_flag(s_ctrl->of_node, &(s_ctrl->io_master_info)); + + CAM_INFO(CAM_SENSOR, + "master: %d (1-CCI, 2-I2C, 3-SPI, 4-I3C) pm_ctrl_client_enable: %d", + s_ctrl->io_master_info.master_type, + s_ctrl->io_master_info.qup_client->pm_ctrl_client_enable); + + s_ctrl->io_master_info.qup_client->i3c_client = client; + s_ctrl->io_master_info.qup_client->i3c_wait_for_hotjoin = false; + + complete_all(&g_i3c_sensor_data[index].probe_complete); + + CAM_DBG(CAM_SENSOR, "I3C Probe Finished for %s", dev_name(dev)); + return rc; +} + + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +static void cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return; + } + + s_ctrl = g_i3c_sensor_data[index].s_ctrl; + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "S_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(s_ctrl->io_master_info.qup_client); + s_ctrl->io_master_info.qup_client = NULL; +} +#else +static int cam_i3c_driver_remove(struct i3c_device *client) +{ + int32_t rc = 0; + uint32_t index; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct device *dev; + + if (!client) { + CAM_ERR(CAM_SENSOR, "I3C Driver Remove: Invalid input args"); + return -EINVAL; + } + + dev = &client->dev; + + CAM_DBG(CAM_SENSOR, "driver remove for I3C Slave %s", dev_name(dev)); + + rc = of_property_read_u32(dev->of_node, "cell-index", &index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", dev_name(dev)); + return -EINVAL; + } + + if (index >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Invalid Cell-Index: %u for %s", index, dev_name(dev)); + return -EINVAL; + } + + s_ctrl = g_i3c_sensor_data[index].s_ctrl; + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "S_ctrl is null. I3C Probe before platfom driver probe for %s", + dev_name(dev)); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s", + (client ? dev_name(&client->dev) : "none")); + CAM_MEM_FREE(s_ctrl->io_master_info.qup_client); + s_ctrl->io_master_info.qup_client = NULL; + return 0; +} +#endif + +static int cam_sensor_i2c_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0; + int i = 0; + struct i2c_client *client = NULL; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct cam_hw_soc_info *soc_info = NULL; + struct timespec64 ts_start, ts_end; + long microsec = 0; + struct device_node *np = NULL; + const char *drv_name; + + CAM_GET_TIMESTAMP(ts_start); + client = container_of(dev, struct i2c_client, dev); + if (client == NULL) { + CAM_ERR(CAM_SENSOR, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + /* Create sensor control structure */ + s_ctrl = CAM_MEM_ZALLOC(sizeof(*s_ctrl), GFP_KERNEL); + if (!s_ctrl) + return -ENOMEM; + + s_ctrl->io_master_info.qup_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_qup_client), GFP_KERNEL); + if (!(s_ctrl->io_master_info.qup_client)) { + rc = -ENOMEM; + goto free_s_ctrl; + } + + i2c_set_clientdata(client, s_ctrl); + + s_ctrl->io_master_info.qup_client->i2c_client = client; + soc_info = &s_ctrl->soc_info; + soc_info->dev = &client->dev; + soc_info->dev_name = client->name; + + /* Initialize sensor device type */ + s_ctrl->of_node = client->dev.of_node; + s_ctrl->io_master_info.master_type = I2C_MASTER; + s_ctrl->is_probe_succeed = 0; + s_ctrl->last_flush_req = 0; + + np = of_node_get(client->dev.of_node); + drv_name = of_node_full_name(np); + + rc = cam_sensor_parse_dt(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc); + goto free_qup; + } + + rc = cam_sensor_init_subdev_params(s_ctrl); + if (rc) + goto free_qup; + + s_ctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto unreg_subdev; + } + + s_ctrl->i2c_data.frame_skip = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.frame_skip == NULL) { + rc = -ENOMEM; + goto free_perframe; + } + + s_ctrl->i2c_data.deferred_frame_update = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.deferred_frame_update == NULL) { + rc = -ENOMEM; + goto free_frame_skip; + } + + s_ctrl->i2c_data.bubble_update = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.bubble_update == NULL) { + rc = -ENOMEM; + goto free_deferred_frame_update; + } + + INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.reg_bank_unlock_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.reg_bank_lock_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.read_settings.list_head)); + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + INIT_LIST_HEAD(&(s_ctrl->i2c_data.deferred_frame_update[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.frame_skip[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.bubble_update[i].list_head)); + } + + cam_sensor_module_add_i2c_device((void *) s_ctrl, CAM_SENSOR_DEVICE); + + s_ctrl->bridge_intf.device_hdl = -1; + s_ctrl->bridge_intf.link_hdl = -1; + s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info; + s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link; + s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request; + s_ctrl->bridge_intf.ops.notify_frame_skip = + cam_sensor_notify_frame_skip; + s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request; + s_ctrl->bridge_intf.ops.process_evt = cam_sensor_process_evt; + s_ctrl->bridge_intf.ops.dump_req = cam_sensor_dump_request; + + s_ctrl->sensordata->power_info.dev = soc_info->dev; + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(drv_name, microsec); + of_node_put(np); + + return rc; + +free_deferred_frame_update: + CAM_MEM_FREE(s_ctrl->i2c_data.deferred_frame_update); +free_frame_skip: + CAM_MEM_FREE(s_ctrl->i2c_data.frame_skip); +free_perframe: + CAM_MEM_FREE(s_ctrl->i2c_data.per_frame); +unreg_subdev: + cam_unregister_subdev(&(s_ctrl->v4l2_dev_str)); +free_qup: + CAM_MEM_FREE(s_ctrl->io_master_info.qup_client); +free_s_ctrl: + CAM_MEM_FREE(s_ctrl); + return rc; +} + +static void cam_sensor_i2c_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct i2c_client *client = NULL; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + + client = container_of(dev, struct i2c_client, dev); + if (!client) { + CAM_ERR(CAM_SENSOR, + "Failed to get i2c client"); + return; + } + + s_ctrl = i2c_get_clientdata(client); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "sensor device is NULL"); + return; + } + + CAM_DBG(CAM_SENSOR, "i2c remove invoked"); + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + cam_sensor_shutdown(s_ctrl); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + cam_unregister_subdev(&(s_ctrl->v4l2_dev_str)); + + CAM_MEM_FREE(s_ctrl->i2c_data.deferred_frame_update); + CAM_MEM_FREE(s_ctrl->i2c_data.per_frame); + CAM_MEM_FREE(s_ctrl->i2c_data.frame_skip); + CAM_MEM_FREE(s_ctrl->i2c_data.bubble_update); + v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), NULL); + CAM_MEM_FREE(s_ctrl->io_master_info.qup_client); + CAM_MEM_FREE(s_ctrl); +} + +const static struct component_ops cam_sensor_i2c_component_ops = { + .bind = cam_sensor_i2c_component_bind, + .unbind = cam_sensor_i2c_component_unbind, +}; + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +static int cam_sensor_i2c_driver_probe(struct i2c_client *client) +{ + int rc = 0; + + if (client == NULL) { + CAM_ERR(CAM_SENSOR, "Invalid Args client: %pK", + client); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_SENSOR, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_SENSOR, "Adding sensor component"); + rc = component_add(&client->dev, &cam_sensor_i2c_component_ops); + if (rc) + CAM_ERR(CAM_SENSOR, "failed to add component rc: %d", rc); + + return rc; +} +#else +static int cam_sensor_i2c_driver_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_SENSOR, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_SENSOR, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + CAM_DBG(CAM_SENSOR, "Adding sensor component"); + rc = component_add(&client->dev, &cam_sensor_i2c_component_ops); + if (rc) + CAM_ERR(CAM_SENSOR, "failed to add component rc: %d", rc); + + return rc; +} +#endif + +#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE +void cam_sensor_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_sensor_i2c_component_ops); +} + +#else +static int cam_sensor_i2c_driver_remove(struct i2c_client *client) +{ + component_del(&client->dev, &cam_sensor_i2c_component_ops); + + return 0; +} +#endif + +static int cam_sensor_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int32_t rc = 0, i = 0; + struct cam_sensor_ctrl_t *s_ctrl = NULL; + struct cam_hw_soc_info *soc_info = NULL; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return 0; + + /* Create sensor control structure */ + s_ctrl = devm_kzalloc(&pdev->dev, + sizeof(struct cam_sensor_ctrl_t), GFP_KERNEL); + if (!s_ctrl) + return -ENOMEM; + + soc_info = &s_ctrl->soc_info; + soc_info->pdev = pdev; + soc_info->dev = &pdev->dev; + soc_info->dev_name = pdev->name; + + /* Initialize sensor device type */ + s_ctrl->of_node = pdev->dev.of_node; + s_ctrl->is_probe_succeed = 0; + s_ctrl->last_flush_req = 0; + + /*fill in platform device*/ + s_ctrl->pdev = pdev; + + s_ctrl->io_master_info.master_type = CCI_MASTER; + + rc = cam_sensor_parse_dt(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc); + goto free_s_ctrl; + } + + CAM_DBG(CAM_SENSOR, "Master Type: %u", s_ctrl->io_master_info.master_type); + + /* Fill platform device id*/ + pdev->id = soc_info->index; + + rc = cam_sensor_init_subdev_params(s_ctrl); + if (rc) + goto free_s_ctrl; + + s_ctrl->i2c_data.per_frame = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto unreg_subdev; + } + + s_ctrl->i2c_data.frame_skip = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.frame_skip == NULL) { + rc = -ENOMEM; + goto free_perframe; + } + + s_ctrl->i2c_data.deferred_frame_update = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.deferred_frame_update == NULL) { + rc = -ENOMEM; + goto free_frame_skip; + } + + s_ctrl->i2c_data.bubble_update = + CAM_MEM_ZALLOC(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (s_ctrl->i2c_data.bubble_update == NULL) { + rc = -ENOMEM; + goto free_deferred_frame_update; + } + + INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.reg_bank_unlock_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.reg_bank_lock_settings.list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.read_settings.list_head)); + + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + INIT_LIST_HEAD(&(s_ctrl->i2c_data.deferred_frame_update[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.frame_skip[i].list_head)); + INIT_LIST_HEAD(&(s_ctrl->i2c_data.bubble_update[i].list_head)); + } + + cam_sensor_module_add_i2c_device((void *) s_ctrl, CAM_SENSOR_DEVICE); + + s_ctrl->bridge_intf.device_hdl = -1; + s_ctrl->bridge_intf.link_hdl = -1; + s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info; + s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link; + s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request; + s_ctrl->bridge_intf.ops.notify_frame_skip = + cam_sensor_notify_frame_skip; + s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request; + s_ctrl->bridge_intf.ops.process_evt = cam_sensor_process_evt; + s_ctrl->bridge_intf.ops.dump_req = cam_sensor_dump_request; + + s_ctrl->sensordata->power_info.dev = &pdev->dev; + platform_set_drvdata(pdev, s_ctrl); + s_ctrl->sensor_state = CAM_SENSOR_INIT; + CAM_DBG(CAM_SENSOR, "Component bound successfully for %s", pdev->name); + + g_i3c_sensor_data[soc_info->index].s_ctrl = s_ctrl; + init_completion(&g_i3c_sensor_data[soc_info->index].probe_complete); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +free_deferred_frame_update: + CAM_MEM_FREE(s_ctrl->i2c_data.deferred_frame_update); +free_frame_skip: + CAM_MEM_FREE(s_ctrl->i2c_data.frame_skip); +free_perframe: + CAM_MEM_FREE(s_ctrl->i2c_data.per_frame); +unreg_subdev: + cam_unregister_subdev(&(s_ctrl->v4l2_dev_str)); +free_s_ctrl: + devm_kfree(&pdev->dev, s_ctrl); + return rc; +} + +static void cam_sensor_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + struct cam_sensor_ctrl_t *s_ctrl; + struct cam_hw_soc_info *soc_info; + bool i3c_i2c_target; + struct platform_device *pdev = to_platform_device(dev); + + i3c_i2c_target = of_property_read_bool(pdev->dev.of_node, "i3c-i2c-target"); + if (i3c_i2c_target) + return; + + s_ctrl = platform_get_drvdata(pdev); + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "sensor device is NULL"); + return; + } + + CAM_DBG(CAM_SENSOR, "Component unbind called for: %s", pdev->name); + mutex_lock(&(s_ctrl->cam_sensor_mutex)); + cam_sensor_shutdown(s_ctrl); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); + cam_unregister_subdev(&(s_ctrl->v4l2_dev_str)); + soc_info = &s_ctrl->soc_info; + for (i = 0; i < soc_info->num_clk; i++) { + if (!soc_info->clk[i]) { + CAM_DBG(CAM_SENSOR, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + devm_clk_put(soc_info->dev, soc_info->clk[i]); + } + + CAM_MEM_FREE(s_ctrl->i2c_data.deferred_frame_update); + CAM_MEM_FREE(s_ctrl->i2c_data.per_frame); + CAM_MEM_FREE(s_ctrl->i2c_data.frame_skip); + CAM_MEM_FREE(s_ctrl->i2c_data.bubble_update); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), NULL); + devm_kfree(&pdev->dev, s_ctrl); +} + +const static struct component_ops cam_sensor_component_ops = { + .bind = cam_sensor_component_bind, + .unbind = cam_sensor_component_unbind, +}; + +static int cam_sensor_platform_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_sensor_component_ops); + return 0; +} + +static const struct of_device_id cam_sensor_driver_dt_match[] = { + {.compatible = "qcom,cam-sensor"}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_sensor_driver_dt_match); + +static int32_t cam_sensor_driver_platform_probe( + struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_SENSOR, "Adding Sensor component for %s", pdev->name); + rc = component_add(&pdev->dev, &cam_sensor_component_ops); + if (rc) + CAM_ERR(CAM_SENSOR, "failed to add component rc: %d", rc); + + return rc; +} + +struct platform_driver cam_sensor_platform_driver = { + .probe = cam_sensor_driver_platform_probe, + .driver = { + .name = "qcom,camera", + .owner = THIS_MODULE, + .of_match_table = cam_sensor_driver_dt_match, + .suppress_bind_attrs = true, + }, + .remove = cam_sensor_platform_remove, +}; + +static const struct of_device_id cam_sensor_i2c_driver_dt_match[] = { + {.compatible = "qcom,cam-i2c-sensor"}, + {} +}; +MODULE_DEVICE_TABLE(of, cam_sensor_i2c_driver_dt_match); + +static const struct i2c_device_id i2c_id[] = { + {SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +struct i2c_driver cam_sensor_i2c_driver = { + .id_table = i2c_id, + .probe = cam_sensor_i2c_driver_probe, + .remove = cam_sensor_i2c_driver_remove, + .driver = { + .name = SENSOR_DRIVER_I2C, + .owner = THIS_MODULE, + .of_match_table = cam_sensor_i2c_driver_dt_match, + .suppress_bind_attrs = true, + }, +}; + +static struct i3c_device_id sensor_i3c_id[MAX_I3C_DEVICE_ID_ENTRIES + 1]; + +static struct i3c_driver cam_sensor_i3c_driver = { + .id_table = sensor_i3c_id, + .probe = cam_sensor_i3c_driver_probe, + .remove = cam_i3c_driver_remove, + .driver = { + .owner = THIS_MODULE, + .name = SENSOR_DRIVER_I3C, + .of_match_table = cam_sensor_driver_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_sensor_driver_init(void) +{ + int rc; + struct device_node *dev; + int num_entries = 0; + + rc = platform_driver_register(&cam_sensor_platform_driver); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "platform_driver_register Failed: rc = %d", rc); + return rc; + } + + rc = i2c_add_driver(&cam_sensor_i2c_driver); + if (rc) { + CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc); + goto i2c_register_err; + } + + memset(sensor_i3c_id, 0, sizeof(struct i3c_device_id) * (MAX_I3C_DEVICE_ID_ENTRIES + 1)); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_SENSOR, "Couldnt Find the i3c-id-table dev node"); + return 0; + } + + rc = cam_sensor_count_elems_i3c_device_id(dev, &num_entries, + "i3c-sensor-id-table"); + if (rc) + return 0; + + rc = cam_sensor_fill_i3c_device_id(dev, num_entries, + "i3c-sensor-id-table", sensor_i3c_id); + if (rc) + goto i3c_register_err; + + rc = i3c_driver_register_with_owner(&cam_sensor_i3c_driver, THIS_MODULE); + if (rc) { + CAM_ERR(CAM_SENSOR, "i3c_driver registration failed, rc: %d", rc); + goto i3c_register_err; + } + + cam_sensor_module_debug_register(); + + return 0; + +i3c_register_err: + i2c_del_driver(&cam_sensor_i2c_driver); +i2c_register_err: + platform_driver_unregister(&cam_sensor_platform_driver); + + return rc; +} + +void cam_sensor_driver_exit(void) +{ + struct device_node *dev; + + platform_driver_unregister(&cam_sensor_platform_driver); + i2c_del_driver(&cam_sensor_i2c_driver); + + dev = of_find_node_by_path(I3C_SENSOR_DEV_ID_DT_PATH); + if (!dev) { + CAM_DBG(CAM_ACTUATOR, "Couldnt Find the i3c-id-table dev node"); + return; + } + + i3c_driver_unregister(&cam_sensor_i3c_driver); + + cam_sensor_module_debug_deregister(); +} + +MODULE_DESCRIPTION("cam_sensor_driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.h new file mode 100644 index 0000000000..18fe442da7 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_dev.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_DEV_H_ +#define _CAM_SENSOR_DEV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_debug_util.h" +#include "cam_context.h" + +#define NUM_MASTERS 2 +#define NUM_QUEUES 2 +#define SENSOR_DRIVER_I2C "cam-i2c-sensor" +#define CAMX_SENSOR_DEV_NAME "cam-sensor-driver" +#define SENSOR_DRIVER_I3C "i3c_camera_sensor" + +enum cam_sensor_state_t { + CAM_SENSOR_INIT, + CAM_SENSOR_ACQUIRE, + CAM_SENSOR_STANDBY, + CAM_SENSOR_CONFIG, + CAM_SENSOR_START, +}; + +/** + * struct sensor_intf_params + * @device_hdl: Device Handle + * @session_hdl: Session Handle + * @link_hdl: Link Handle + * @ops: KMD operations + * @crm_cb: Callback API pointers + */ +struct sensor_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +/** + * struct cam_sensor_dev_res_info + * + * @res_index : The resolution index that gets updated + * during a mode switch + * @feature_mask : Feature mask + * @fps : Frame rate + * @width : Pixel width to output to csiphy + * @height : Pixel height to output to csiphy + * request_id : Request Id + * @caps : Specifies capability sensor is configured + * for, (eg, XCFA, HFR), num_exposures and + * PDAF type + */ +struct cam_sensor_dev_res_info { + uint16_t res_index; + uint16_t feature_mask; + uint32_t fps; + uint32_t width; + uint32_t height; + int64_t request_id; + char caps[64]; +}; + +/** + * struct cam_sensor_ctrl_t: Camera control structure + * @device_name: Sensor device name + * @pdev: Platform device + * @cam_sensor_mutex: Sensor mutex + * @sensordata: Sensor board Information + * @sensor_res: Sensor resolution index and other info + * accompanying a mode index switch + * @cci_i2c_master: I2C structure + * @io_master_info: Information about the communication master + * @sensor_state: Sensor states + * @is_probe_succeed: Probe succeeded or not + * @id: Cell Index + * @is_i3c_device: A Flag to indicate whether this sensor is an I3C Device. + * @of_node: Of node ptr + * @v4l2_dev_str: V4L2 device structure + * @sensor_probe_addr_type: Sensor probe address type + * @sensor_probe_data_type: Sensor probe data type + * @i2c_data: Sensor I2C register settings + * @sensor_info: Sensor query cap structure + * @bridge_intf: Bridge interface structure + * @streamon_count: Count to hold the number of times stream on called + * @streamoff_count: Count to hold the number of times stream off called + * @bob_reg_index: Hold to BoB regulator index + * @bob_pwm_switch: Boolean flag to switch into PWM mode for BoB regulator + * @last_flush_req: Last request to flush + * @pipeline_delay: Sensor pipeline delay + * @modeswitch_delay: Mode switch delay + * @probe_sensor_slave_addr: Slave address used for probe if not zero + * @sensor_name: Sensor name + * @aon_camera_id: AON Camera ID associated with this sensor + * @last_applied_req: Last updated request id + * @last_applied_req: Last applied request id + * @num_batched_frames: Number batched frames + * @is_stopped_by_user: Indicate if sensor has been stopped by userland + * @stream_off_after_eof: Indicates if sensor needs to stream off after eof + * @stream_off_on_flush: Streaming off sensor on flush all call + * @is_res_info_updated: Indicate if resolution info is updated + * @last_applied_done_timestamp : Last applied done timestamp value + * @hw_no_ops: To determine whether HW operations need to be disabled + */ +struct cam_sensor_ctrl_t { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct platform_device *pdev; + struct cam_hw_soc_info soc_info; + struct mutex cam_sensor_mutex; + struct cam_sensor_board_info *sensordata; + struct cam_sensor_dev_res_info sensor_res[MAX_PER_FRAME_ARRAY]; + enum cci_i2c_master_t cci_i2c_master; + enum cci_device_num cci_num; + struct camera_io_master io_master_info; + enum cam_sensor_state_t sensor_state; + uint8_t is_probe_succeed; + uint32_t id; + bool is_i3c_device; + struct device_node *of_node; + struct cam_subdev v4l2_dev_str; + uint8_t sensor_probe_addr_type; + uint8_t sensor_probe_data_type; + struct i2c_data_settings i2c_data; + struct cam_sensor_query_cap sensor_info; + struct sensor_intf_params bridge_intf; + uint32_t streamon_count; + uint32_t streamoff_count; + int bob_reg_index; + bool bob_pwm_switch; + uint32_t last_flush_req; + uint16_t pipeline_delay; + uint16_t modeswitch_delay; + char sensor_name[CAM_SENSOR_NAME_MAX_SIZE]; + uint32_t probe_sensor_slave_addr; + uint32_t aon_camera_id; + int64_t last_updated_req; + int64_t last_applied_req; + uint32_t num_batched_frames; + bool is_stopped_by_user; + bool stream_off_after_eof; + bool stream_off_on_flush; + bool is_res_info_updated; + uint64_t last_applied_done_timestamp; + bool hw_no_ops; +}; + +/** + * @brief : API to register SENSOR hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_sensor_driver_init(void); + +/** + * @brief : API to remove SENSOR Hw from platform framework. + */ +void cam_sensor_driver_exit(void); + +#endif /* _CAM_SENSOR_DEV_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.c new file mode 100644 index 0000000000..67be9ffd5e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "cam_sensor_soc.h" +#include "cam_soc_util.h" +#include "cam_mem_mgr_api.h" + +int32_t cam_sensor_get_sub_module_index(struct device_node *of_node, + struct cam_sensor_board_info *s_info) +{ + int rc = 0, i = 0; + uint32_t val = 0; + struct device_node *src_node = NULL; + struct cam_sensor_board_info *sensor_info; + + sensor_info = s_info; + + for (i = 0; i < SUB_MODULE_MAX; i++) + sensor_info->subdev_id[i] = -1; + + src_node = of_parse_phandle(of_node, "actuator-src", 0); + if (!src_node) { + CAM_DBG(CAM_SENSOR, "src_node NULL"); + } else { + rc = of_property_read_u32(src_node, "cell-index", &val); + CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed %d", rc); + of_node_put(src_node); + return rc; + } + sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val; + of_node_put(src_node); + } + + src_node = of_parse_phandle(of_node, "ois-src", 0); + if (!src_node) { + CAM_DBG(CAM_SENSOR, "src_node NULL"); + } else { + rc = of_property_read_u32(src_node, "cell-index", &val); + CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed %d", rc); + of_node_put(src_node); + return rc; + } + sensor_info->subdev_id[SUB_MODULE_OIS] = val; + of_node_put(src_node); + } + + src_node = of_parse_phandle(of_node, "eeprom-src", 0); + if (!src_node) { + CAM_DBG(CAM_SENSOR, "eeprom src_node NULL"); + } else { + rc = of_property_read_u32(src_node, "cell-index", &val); + CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed %d", rc); + of_node_put(src_node); + return rc; + } + sensor_info->subdev_id[SUB_MODULE_EEPROM] = val; + of_node_put(src_node); + } + + src_node = of_parse_phandle(of_node, "led-flash-src", 0); + if (!src_node) { + CAM_DBG(CAM_SENSOR, " src_node NULL"); + } else { + rc = of_property_read_u32(src_node, "cell-index", &val); + CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed %d", rc); + of_node_put(src_node); + return rc; + } + sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val; + of_node_put(src_node); + } + + rc = of_property_read_u32(of_node, "csiphy-sd-index", &val); + if (rc < 0) + CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc); + else + sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val; + + return rc; +} + +static int32_t cam_sensor_init_bus_params(struct cam_sensor_ctrl_t *s_ctrl) +{ + /* Validate input parameters */ + if (!s_ctrl) { + CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK", + s_ctrl); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR, + "master_type: %d", s_ctrl->io_master_info.master_type); + /* Initialize cci_client */ + if (s_ctrl->io_master_info.master_type == CCI_MASTER) { + s_ctrl->io_master_info.cci_client = CAM_MEM_ZALLOC(sizeof( + struct cam_sensor_cci_client), GFP_KERNEL); + if (!(s_ctrl->io_master_info.cci_client)) { + CAM_ERR(CAM_SENSOR, "Memory allocation failed"); + return -ENOMEM; + } + } else if (s_ctrl->io_master_info.master_type == I2C_MASTER) { + if (!(s_ctrl->io_master_info.qup_client)) + return -EINVAL; + } else if (s_ctrl->io_master_info.master_type == I3C_MASTER) { + CAM_DBG(CAM_SENSOR, "I3C Master Type"); + } else { + CAM_ERR(CAM_SENSOR, + "Invalid master / Master type Not supported : %d", + s_ctrl->io_master_info.master_type); + return -EINVAL; + } + + return 0; +} + +static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl) +{ + int32_t rc = 0; + int i = 0; + struct cam_sensor_board_info *sensordata = NULL; + struct device_node *of_node = s_ctrl->of_node; + struct device_node *of_parent = NULL; + struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info; + + s_ctrl->sensordata = CAM_MEM_ZALLOC(sizeof(*sensordata), GFP_KERNEL); + if (!s_ctrl->sensordata) + return -ENOMEM; + + sensordata = s_ctrl->sensordata; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc); + goto FREE_SENSOR_DATA; + } + + rc = cam_sensor_util_init_gpio_pin_tbl(soc_info, + &sensordata->power_info.gpio_num_info); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc); + goto FREE_SENSOR_DATA; + } + + s_ctrl->id = soc_info->index; + + /* Validate cell_id */ + if (s_ctrl->id >= MAX_CAMERAS) { + CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id); + rc = -EINVAL; + goto FREE_SENSOR_DATA; + } + + rc = of_property_read_bool(of_node, "i3c-target"); + if (rc) { + CAM_INFO(CAM_SENSOR, "I3C Target"); + s_ctrl->is_i3c_device = true; + s_ctrl->io_master_info.master_type = I3C_MASTER; + } + + /* Store the index of BoB regulator if it is available */ + for (i = 0; i < soc_info->num_rgltr; i++) { + if (!strcmp(soc_info->rgltr_name[i], + "cam_bob")) { + CAM_DBG(CAM_SENSOR, + "i: %d cam_bob", i); + s_ctrl->bob_reg_index = i; + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + CAM_WARN(CAM_SENSOR, + "Regulator: %s get failed", + soc_info->rgltr_name[i]); + soc_info->rgltr[i] = NULL; + } else { + if (!of_property_read_bool(of_node, + "pwm-switch")) { + CAM_DBG(CAM_SENSOR, + "No BoB PWM switch param defined"); + s_ctrl->bob_pwm_switch = false; + } else { + s_ctrl->bob_pwm_switch = true; + } + } + } + } + + /* Read subdev info */ + rc = cam_sensor_get_sub_module_index(of_node, sensordata); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d", + rc); + goto FREE_SENSOR_DATA; + } + + rc = cam_sensor_init_bus_params(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed in Initialize Bus params, rc %d", rc); + goto FREE_SENSOR_DATA; + } + + if (s_ctrl->io_master_info.master_type == CCI_MASTER) { + + /* Get CCI master */ + if (of_property_read_u32(of_node, "cci-master", + &s_ctrl->cci_i2c_master)) { + /* Set default master 0 */ + s_ctrl->cci_i2c_master = MASTER_0; + } + CAM_DBG(CAM_SENSOR, "cci-master %d", + s_ctrl->cci_i2c_master); + + of_parent = of_get_parent(of_node); + if (of_property_read_u32(of_parent, "cell-index", + &s_ctrl->cci_num) < 0) + /* Set default master 0 */ + s_ctrl->cci_num = CCI_DEVICE_0; + + s_ctrl->io_master_info.cci_client->cci_device + = s_ctrl->cci_num; + + CAM_DBG(CAM_SENSOR, "cci-index %d", s_ctrl->cci_num); + } + + if (of_property_read_u32(of_node, "sensor-position-pitch", + &sensordata->pos_pitch) < 0) { + CAM_DBG(CAM_SENSOR, "Invalid sensor position"); + sensordata->pos_pitch = 360; + } + if (of_property_read_u32(of_node, "sensor-position-roll", + &sensordata->pos_roll) < 0) { + CAM_DBG(CAM_SENSOR, "Invalid sensor position"); + sensordata->pos_roll = 360; + } + if (of_property_read_u32(of_node, "sensor-position-yaw", + &sensordata->pos_yaw) < 0) { + CAM_DBG(CAM_SENSOR, "Invalid sensor position"); + sensordata->pos_yaw = 360; + } + + if (of_property_read_u32(of_node, "aon-camera-id", &s_ctrl->aon_camera_id)) { + CAM_DBG(CAM_SENSOR, "cell_idx: %d is not used for AON usecase", soc_info->index); + s_ctrl->aon_camera_id = NOT_AON_CAM; + } else { + CAM_INFO(CAM_SENSOR, + "AON Sensor detected in cell_idx: %d aon_camera_id: %d phy_index: %d", + soc_info->index, s_ctrl->aon_camera_id, + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + if ((s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY] == 2) && + (s_ctrl->aon_camera_id != AON_CAM2)) { + CAM_ERR(CAM_SENSOR, "Incorrect AON camera id for cphy_index %d", + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + s_ctrl->aon_camera_id = NOT_AON_CAM; + } + if ((s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY] == 4) && + (s_ctrl->aon_camera_id != AON_CAM1)) { + CAM_ERR(CAM_SENSOR, "Incorrect AON camera id for cphy_index %d", + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY]); + s_ctrl->aon_camera_id = NOT_AON_CAM; + } + } + + rc = cam_sensor_util_aon_registration( + s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY], + s_ctrl->aon_camera_id); + if (rc) { + CAM_ERR(CAM_SENSOR, "Aon registration failed, rc: %d", rc); + goto FREE_SENSOR_DATA; + } + + if (!of_property_read_bool(of_node, "hw-no-ops")) + s_ctrl->hw_no_ops = false; + else + s_ctrl->hw_no_ops = true; + + cam_sensor_utils_parse_pm_ctrl_flag(of_node, &(s_ctrl->io_master_info)); + CAM_INFO(CAM_SENSOR, + "master: %d (1-CCI, 2-I2C, 3-SPI, 4-I3C) pm_ctrl_client_enable: %d", + s_ctrl->io_master_info.master_type, + (!s_ctrl->io_master_info.qup_client) ? 0 : + s_ctrl->io_master_info.qup_client->pm_ctrl_client_enable); + + return rc; + +FREE_SENSOR_DATA: + CAM_MEM_FREE(sensordata); + s_ctrl->sensordata = NULL; + + return rc; +} + +int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl) +{ + int32_t i, rc = 0; + struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info; + + /* Parse dt information and store in sensor control structure */ + rc = cam_sensor_driver_get_dt_data(s_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc); + return rc; + } + + /* Initialize mutex */ + mutex_init(&(s_ctrl->cam_sensor_mutex)); + + /* Initialize default parameters */ + for (i = 0; i < soc_info->num_clk; i++) { + soc_info->clk[i] = devm_clk_get(soc_info->dev, + soc_info->clk_name[i]); + if (IS_ERR(soc_info->clk[i])) { + CAM_ERR(CAM_SENSOR, "get failed for %s", + soc_info->clk_name[i]); + rc = -ENOENT; + return rc; + } else if (!soc_info->clk[i]) { + CAM_DBG(CAM_SENSOR, "%s handle is NULL skip get", + soc_info->clk_name[i]); + continue; + } + } + /* Initialize regulators to default parameters */ + for (i = 0; i < soc_info->num_rgltr; i++) { + soc_info->rgltr[i] = devm_regulator_get(soc_info->dev, + soc_info->rgltr_name[i]); + if (IS_ERR_OR_NULL(soc_info->rgltr[i])) { + rc = PTR_ERR(soc_info->rgltr[i]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_SENSOR, "get failed for regulator %s", + soc_info->rgltr_name[i]); + return rc; + } + CAM_DBG(CAM_SENSOR, "get for regulator %s", + soc_info->rgltr_name[i]); + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.h new file mode 100644 index 0000000000..99862da046 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor/cam_sensor_soc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_SENSOR_SOC_H_ +#define _CAM_SENSOR_SOC_H_ + +#include "cam_sensor_dev.h" + +/** + * @s_ctrl: Sensor ctrl structure + * + * Parses sensor dt + */ +int cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl); + +#endif /* _CAM_SENSOR_SOC_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c new file mode 100644 index 0000000000..8a84f45292 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_cmn_header.h" +#include "cam_sensor_i2c.h" +#include "cam_cci_dev.h" + +int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + bool is_probing) +{ + int32_t rc = -EINVAL; + unsigned char buf[CAMERA_SENSOR_I2C_TYPE_DWORD]; + struct cam_cci_ctrl cci_ctrl; + + if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + return rc; + + cci_ctrl.is_probing = is_probing; + cci_ctrl.cmd = MSM_CCI_I2C_READ; + cci_ctrl.cci_info = cci_client; + cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr; + cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type; + cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type; + cci_ctrl.cfg.cci_i2c_read_cfg.data = buf; + cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type; + rc = v4l2_subdev_call(cci_client->cci_subdev, + core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + if (rc < 0) { + if (is_probing) + CAM_INFO(CAM_SENSOR_IO, "rc = %d", rc); + else + CAM_ERR(CAM_SENSOR_IO, "rc = %d", rc); + return rc; + } + + rc = cci_ctrl.status; + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) + *data = buf[0]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) + *data = buf[0] << 8 | buf[1]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) + *data = buf[0] << 16 | buf[1] << 8 | buf[2]; + else + *data = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3]; + + return rc; +} + +int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t num_byte) +{ + int32_t rc = -EFAULT; + unsigned char *buf = NULL; + int i = 0; + struct cam_cci_ctrl cci_ctrl; + + if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + || (num_byte > I2C_REG_DATA_MAX)) { + CAM_ERR(CAM_SENSOR_IO, "addr_type %d num_byte %d", addr_type, + num_byte); + return rc; + } + + buf = CAM_MEM_ZALLOC(num_byte, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + cci_ctrl.cmd = MSM_CCI_I2C_READ; + cci_ctrl.cci_info = cci_client; + cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr; + cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type; + cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type; + cci_ctrl.cfg.cci_i2c_read_cfg.data = buf; + cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte; + cci_ctrl.status = -EFAULT; + rc = v4l2_subdev_call(cci_client->cci_subdev, + core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "CCI config failed rc = %d", rc); + goto end; + } + rc = cci_ctrl.status; + CAM_DBG(CAM_SENSOR_IO, "addr = 0x%x, rc = %d", addr, rc); + for (i = 0; i < num_byte; i++) { + data[i] = buf[i]; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: Data: 0x%x\n", i, data[i]); + } + +end: + CAM_MEM_FREE(buf); + return rc; +} + +static int32_t cam_cci_i2c_write_table_cmd( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + enum cam_cci_cmd_type cmd) +{ + int32_t rc = -EINVAL; + struct cam_cci_ctrl cci_ctrl; + + if (!client || !write_setting) + return rc; + + if (write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + return rc; + + cci_ctrl.cmd = cmd; + cci_ctrl.cci_info = client->cci_client; + cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = + write_setting->reg_setting; + cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type; + cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = write_setting->addr_type; + cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size; + rc = v4l2_subdev_call(client->cci_client->cci_subdev, + core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "Failed rc = %d", rc); + return rc; + } + rc = cci_ctrl.status; + if (write_setting->delay > 20) + msleep(write_setting->delay); + else if (write_setting->delay) + usleep_range(write_setting->delay * 1000, (write_setting->delay + * 1000) + 1000); + + return rc; +} + + +int32_t cam_cci_i2c_write_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + return cam_cci_i2c_write_table_cmd(client, write_setting, + MSM_CCI_I2C_WRITE); +} + +int32_t cam_cci_i2c_write_continuous_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i2c_write_flag) +{ + int32_t rc = 0; + + if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST) + rc = cam_cci_i2c_write_table_cmd(client, write_setting, + MSM_CCI_I2C_WRITE_BURST); + else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ) + rc = cam_cci_i2c_write_table_cmd(client, write_setting, + MSM_CCI_I2C_WRITE_SEQ); + + return rc; +} + +static int32_t cam_cci_i2c_compare(struct cam_sensor_cci_client *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type data_type, + enum camera_sensor_i2c_type addr_type) +{ + int32_t rc; + uint32_t reg_data = 0; + + rc = cam_cci_i2c_read(client, addr, ®_data, + addr_type, data_type, false); + if (rc < 0) + return rc; + + reg_data = reg_data & 0xFFFF; + if (data == (reg_data & ~data_mask)) + return I2C_COMPARE_MATCH; + else { + CAM_WARN(CAM_SENSOR_IO, + "mismatch: reg_data 0x%x: data: 0x%x, data_mask: 0x%x", + reg_data, data, data_mask); + return I2C_COMPARE_MISMATCH; + } +} + +int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type data_type, + enum camera_sensor_i2c_type addr_type, + uint32_t delay_ms) +{ + int32_t rc = -EINVAL; + int32_t i = 0; + + CAM_DBG(CAM_SENSOR_IO, "addr: 0x%x data: 0x%x dt: %d", + addr, data, data_type); + + if (delay_ms > MAX_POLL_DELAY_MS) { + CAM_ERR(CAM_SENSOR_IO, "invalid delay = %d max_delay = %d", + delay_ms, MAX_POLL_DELAY_MS); + return -EINVAL; + } + for (i = 0; i < delay_ms; i++) { + rc = cam_cci_i2c_compare(client, + addr, data, data_mask, data_type, addr_type); + if (!rc) + return rc; + + usleep_range(1000, 1010); + } + + /* If rc is 1 then read is successful but poll is failure */ + if (rc == 1) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d(non-fatal)", rc); + + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d", rc); + + return rc; +} + +int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client, + uint16_t cci_cmd) +{ + int32_t rc = 0; + struct cam_cci_ctrl cci_ctrl; + + cci_ctrl.cmd = cci_cmd; + cci_ctrl.cci_info = cci_client; + rc = v4l2_subdev_call(cci_client->cci_subdev, + core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "Failed rc = %d", rc); + return rc; + } + return cci_ctrl.status; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h new file mode 100644 index 0000000000..1f6d9c73dc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_I2C_H_ +#define _CAM_SENSOR_I2C_H_ + +#include +#include +#include +#include "cam_cci_dev.h" +#include "cam_sensor_io.h" + +#define I2C_POLL_TIME_MS 5 +#define MAX_POLL_DELAY_MS 100 + +#define I2C_COMPARE_MATCH 0 +#define I2C_COMPARE_MISMATCH 1 + +#define I2C_REG_DATA_MAX (20*1024) + +/** + * @client: CCI client structure + * @data: I2C data + * @addr_type: I2c address type + * @data_type: I2C data type + * @is_probing: Is probing a sensor + * + * This API handles CCI read + */ +int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + bool is_probing); + +/** + * @client: CCI client structure + * @addr: I2c address + * @data: I2C data + * @addr_type: I2c address type + * @data_type: I2c data type + * @num_byte: number of bytes + * + * This API handles CCI sequential read + */ +int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t num_byte); + +/** + * @client: CCI client structure + * @write_setting: I2C register setting + * + * This API handles CCI random write + */ +int32_t cam_cci_i2c_write_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting); + +/** + * @client: CCI client structure + * @write_setting: I2C register setting + * @cam_sensor_i2c_write_flag: burst or seq write + * + * This API handles CCI continuous write + */ +int32_t cam_cci_i2c_write_continuous_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i2c_write_flag); + +/** + * @cci_client: CCI client structure + * @cci_cmd: CCI command type + * + * Does I2C call to I2C functionalities + */ +int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client, + uint16_t cci_cmd); + +/** + * @client: CCI client structure + * @addr: I2C address + * @data: I2C data + * @data_mask: I2C data mask + * @data_type: I2C data type + * @addr_type: I2C addr type + * @delay_ms: Delay in milli seconds + * + * This API implements CCI based I2C poll + */ +int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type data_type, + enum camera_sensor_i2c_type addr_type, + uint32_t delay_ms); + + +/** + * cam_qup_i2c_read : QUP based i2c read + * @client : QUP I2C client structure + * @data : I2C data + * @addr_type : I2c address type + * @data_type : I2C data type + * + * This API handles QUP I2C read + */ + +int32_t cam_qup_i2c_read(struct i2c_client *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type); + +/** + * cam_qup_i2c_read_seq : QUP based I2C sequential read + * @client : QUP I2C client structure + * @data : I2C data + * @addr_type : I2c address type + * @num_bytes : number of bytes to read + * This API handles QUP I2C Sequential read + */ + +int32_t cam_qup_i2c_read_seq(struct i2c_client *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte); + +/** + * cam_qup_i2c_poll : QUP based I2C poll operation + * @client : QUP I2C client structure + * @addr : I2C address + * @data : I2C data + * @data_mask : I2C data mask + * @data_type : I2C data type + * @addr_type : I2C addr type + * @delay_ms : Delay in milli seconds + * + * This API implements QUP based I2C poll + */ + +int32_t cam_qup_i2c_poll(struct i2c_client *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms); + +/** + * cam_qup_i2c_write_table : QUP based I2C write random + * @client : QUP I2C client structure + * @write_setting : I2C register settings + * + * This API handles QUP I2C random write + */ + +int32_t cam_qup_i2c_write_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting); + +/** + * cam_qup_i2c_write_continuous_write: QUP based I2C write continuous(Burst/Seq) + * @client: QUP I2C client structure + * @write_setting: I2C register setting + * @cam_sensor_i2c_write_flag: burst or seq write + * + * This API handles QUP continuous write + */ +int32_t cam_qup_i2c_write_continuous_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i2c_write_flag); + +#endif /*_CAM_SENSOR_I2C_H*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i3c.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i3c.h new file mode 100644 index 0000000000..16b063bc8a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_i3c.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_I3C_H_ +#define _CAM_SENSOR_I3C_H_ + +#include +#include +#include +#include "cam_sensor_io.h" + +/** + * cam_qup_i3c_read : This API handles QUP I3C read operations + * @client : QUP IeC client structure + * @data : I3C data + * @addr_type : I3C address type + * @data_type : I3C data type + */ + +int cam_qup_i3c_read(struct i3c_device *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type); + +/** + * cam_qup_i3c_read_seq : This API handles QUP I3C Sequential read operations + * @client : QUP I3C client structure + * @data : I3C data + * @addr_type : I3C address type + * @num_bytes : Number of bytes to read + */ + +int cam_qup_i3c_read_seq(struct i3c_device *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte); + +/** + * cam_qup_i3c_poll : This API handles QUP based I3C poll operation + * @client : QUP I3C client structure + * @addr : I3C address + * @data : I3C data + * @data_mask : I3C data mask + * @data_type : I3C data type + * @addr_type : I3C addr type + * @delay_ms : Delay in milli seconds + */ + +int cam_qup_i3c_poll(struct i3c_device *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms); + +/** + * cam_qup_i3c_write_table : This API Handles QUP based I3C write random operations + * @client : QUP I3C client structure + * @write_setting : I3C register settings + */ + +int cam_qup_i3c_write_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting); + +/** + * cam_qup_i3c_write_continuous_write: This API Handles QUP based I3C write continuous(Burst/Seq) + * @client: QUP I3C client structure + * @write_setting: I3C register setting + * @cam_sensor_i3c_write_flag: burst or seq write + */ +int cam_qup_i3c_write_continuous_table( + struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i3c_write_flag); + +#endif /*_CAM_SENSOR_I3C_H_*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.c new file mode 100644 index 0000000000..3bc2637943 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_io.h" +#include "cam_sensor_i2c.h" +#include "cam_sensor_i3c.h" +#include + +int32_t camera_io_dev_poll(struct camera_io_master *io_master_info, + uint32_t addr, uint16_t data, uint32_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms) +{ + int16_t mask = data_mask & 0xFF; + + if (!io_master_info) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case CCI_MASTER: + return cam_cci_i2c_poll(io_master_info->cci_client, + addr, data, mask, data_type, addr_type, delay_ms); + case I2C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "qup_client is NULL"); + return -EINVAL; + } + return cam_qup_i2c_poll(io_master_info->qup_client->i2c_client, + addr, data, data_mask, addr_type, data_type, delay_ms); + case I3C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "qup_client is NULL"); + return -EINVAL; + } + return cam_qup_i3c_poll(io_master_info->qup_client->i3c_client, + addr, data, data_mask, addr_type, data_type, delay_ms); + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type: %d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_dev_erase(struct camera_io_master *io_master_info, + uint32_t addr, uint32_t size) +{ + if (!io_master_info) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args"); + return -EINVAL; + } + + if (size == 0) + return 0; + + switch (io_master_info->master_type) { + case SPI_MASTER: + CAM_DBG(CAM_SENSOR_IO, "Calling SPI Erase"); + return cam_spi_erase(io_master_info, addr, CAMERA_SENSOR_I2C_TYPE_WORD, size); + case I2C_MASTER: + case CCI_MASTER: + case I3C_MASTER: + CAM_ERR(CAM_SENSOR_IO, "Erase not supported on Master Type: %d", + io_master_info->master_type); + return -EINVAL; + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type: %d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_dev_read(struct camera_io_master *io_master_info, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + bool is_probing) +{ + if (!io_master_info) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case SPI_MASTER: + return cam_spi_read(io_master_info, addr, data, addr_type, data_type); + case I2C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args: qup_client: NULL"); + return -EINVAL; + } + return cam_qup_i2c_read(io_master_info->qup_client->i2c_client, + addr, data, addr_type, data_type); + case CCI_MASTER: + return cam_cci_i2c_read(io_master_info->cci_client, + addr, data, addr_type, data_type, is_probing); + case I3C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args: qup_client: NULL"); + return -EINVAL; + } + return cam_qup_i3c_read(io_master_info->qup_client->i3c_client, + addr, data, addr_type, data_type); + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type: %d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, int32_t num_bytes) +{ + switch (io_master_info->master_type) { + case CCI_MASTER: + return cam_camera_cci_i2c_read_seq(io_master_info->cci_client, + addr, data, addr_type, data_type, num_bytes); + case I2C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args: qup_client: NULL"); + return -EINVAL; + } + return cam_qup_i2c_read_seq(io_master_info->qup_client->i2c_client, + addr, data, addr_type, num_bytes); + case SPI_MASTER: + return cam_spi_read_seq(io_master_info, addr, data, addr_type, num_bytes); + case I3C_MASTER: + if (!io_master_info->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args: qup_client: NULL"); + return -EINVAL; + } + return cam_qup_i3c_read_seq(io_master_info->qup_client->i3c_client, + addr, data, addr_type, num_bytes); + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type: %d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_dev_write(struct camera_io_master *io_master_info, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + if (!write_setting || !io_master_info) { + CAM_ERR(CAM_SENSOR_IO, + "Input parameters not valid ws: %pK ioinfo: %pK", + write_setting, io_master_info); + return -EINVAL; + } + + if (!write_setting->reg_setting) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Register Settings"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case CCI_MASTER: + return cam_cci_i2c_write_table(io_master_info, write_setting); + case I2C_MASTER: + return cam_qup_i2c_write_table(io_master_info, write_setting); + case SPI_MASTER: + return cam_spi_write_table(io_master_info, write_setting); + case I3C_MASTER: + return cam_qup_i3c_write_table(io_master_info, write_setting); + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type:%d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i2c_write_flag) +{ + if (!write_setting || !io_master_info) { + CAM_ERR(CAM_SENSOR_IO, + "Input parameters not valid ws: %pK ioinfo: %pK", + write_setting, io_master_info); + return -EINVAL; + } + + if (!write_setting->reg_setting) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Register Settings"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case CCI_MASTER: + return cam_cci_i2c_write_continuous_table(io_master_info, + write_setting, cam_sensor_i2c_write_flag); + case I2C_MASTER: + return cam_qup_i2c_write_continuous_table(io_master_info, + write_setting, cam_sensor_i2c_write_flag); + case SPI_MASTER: + return cam_spi_write_table(io_master_info, write_setting); + case I3C_MASTER: + return cam_qup_i3c_write_continuous_table(io_master_info, + write_setting, cam_sensor_i2c_write_flag); + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type:%d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_init(struct camera_io_master *io_master_info) +{ + int rc = 0; + + if (!io_master_info) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case CCI_MASTER: + io_master_info->cci_client->cci_subdev = cam_cci_get_subdev( + io_master_info->cci_client->cci_device); + return cam_sensor_cci_i2c_util(io_master_info->cci_client, MSM_CCI_INIT); + case I3C_MASTER: + if ((io_master_info->qup_client != NULL) && + (io_master_info->qup_client->i3c_client != NULL)) { + struct device *parent_dev = + io_master_info->qup_client->i3c_client->dev.parent; + + if (parent_dev != NULL) { + /* I3C master driver: Wait for HOT JOIN only during ACQUIRE*/ + if ((parent_dev->of_node != NULL) && + (parent_dev->of_node->data != NULL) && + (io_master_info->qup_client->i3c_wait_for_hotjoin) && + (io_master_info->qup_client->pm_ctrl_client_enable)) { + *(uint32_t *)(parent_dev->of_node->data) = 1; + CAM_DBG(CAM_SENSOR_IO, "%s:%d: %s: SET of_node->data: %d", + __func__, __LINE__, io_master_info->sensor_name, + *(uint32_t *)(parent_dev->of_node->data)); + } + if (io_master_info->qup_client->pm_ctrl_client_enable) { + CAM_DBG(CAM_SENSOR_IO, + "%s:%d: %s: wait_for_hotjoin: %d I3C_MASTER: Calling get_sync", + __func__, __LINE__, io_master_info->sensor_name, + io_master_info->qup_client->i3c_wait_for_hotjoin); + rc = pm_runtime_get_sync(parent_dev); + if (rc < 0) { + CAM_WARN(CAM_SENSOR_IO, + "Fail I3C getsync rc: %d for parent: %s", + rc, dev_name(parent_dev)); + } + } + /* I3C master driver: Dont Wait for HOT JOIN Further-on */ + if ((parent_dev->of_node != NULL) && + (parent_dev->of_node->data != NULL) && + (io_master_info->qup_client->i3c_wait_for_hotjoin) && + (io_master_info->qup_client->pm_ctrl_client_enable)) { + *(uint32_t *)(parent_dev->of_node->data) = 0; + CAM_DBG(CAM_SENSOR_IO, "%s:%d: %s: SET of_node->data: %d", + __func__, __LINE__, io_master_info->sensor_name, + *(uint32_t *)(parent_dev->of_node->data)); + } + } + } + return 0; + case I2C_MASTER: + if ((io_master_info->qup_client != NULL) && + (io_master_info->qup_client->i2c_client != NULL) && + (io_master_info->qup_client->i2c_client->adapter != NULL) && + (io_master_info->qup_client->pm_ctrl_client_enable)) { + CAM_DBG(CAM_SENSOR_IO, "%s:%d: %s: I2C_MASTER: Calling get_sync", + __func__, __LINE__, io_master_info->sensor_name); + rc = pm_runtime_get_sync( + io_master_info->qup_client->i2c_client->adapter->dev.parent); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "Failed to get sync rc: %d", rc); + return -EINVAL; + } + } + return 0; + case SPI_MASTER: return 0; + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type:%d", io_master_info->master_type); + } + + return -EINVAL; +} + +int32_t camera_io_release(struct camera_io_master *io_master_info) +{ + int rc = 0; + + if (!io_master_info) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Args"); + return -EINVAL; + } + + switch (io_master_info->master_type) { + case CCI_MASTER: + return cam_sensor_cci_i2c_util(io_master_info->cci_client, MSM_CCI_RELEASE); + case I3C_MASTER: + if ((io_master_info->qup_client != NULL) && + (io_master_info->qup_client->i3c_client != NULL) && + (io_master_info->qup_client->pm_ctrl_client_enable)) { + CAM_DBG(CAM_SENSOR_IO, "%s:%d: %s: I3C_MASTER: Calling put_sync", + __func__, __LINE__, io_master_info->sensor_name); + rc = pm_runtime_put_sync( + io_master_info->qup_client->i3c_client->dev.parent); + if (rc < 0) { + CAM_WARN(CAM_SENSOR_IO, + "Failed to I3C PUT_SYNC rc: %d parent: %s", + rc, dev_name( + io_master_info->qup_client->i3c_client->dev.parent)); + } + } + return 0; + case I2C_MASTER: + if ((io_master_info->qup_client != NULL) && + (io_master_info->qup_client->i2c_client != NULL) && + (io_master_info->qup_client->i2c_client->adapter != NULL) && + (io_master_info->qup_client->pm_ctrl_client_enable)) { + CAM_DBG(CAM_SENSOR_IO, "%s:%d: %s: I2C_MASTER: Calling put_sync", + __func__, __LINE__, io_master_info->sensor_name); + pm_runtime_put_sync( + io_master_info->qup_client->i2c_client->adapter->dev.parent); + } + return 0; + case SPI_MASTER: return 0; + default: + CAM_ERR(CAM_SENSOR_IO, "Invalid Master Type:%d", io_master_info->master_type); + } + + return -EINVAL; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.h new file mode 100644 index 0000000000..d9c7cbf767 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_io.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_IO_H_ +#define _CAM_SENSOR_IO_H_ + +#include +#include "cam_sensor_cmn_header.h" + +/* Master Types */ +#define CCI_MASTER 1 +#define I2C_MASTER 2 +#define SPI_MASTER 3 +#define I3C_MASTER 4 + +/** + * @i2c_client: I2C client information structure + * @i3c_client: I3C client information structure + * @pm_enable: Power Management Enable flag + * @i3c_wait_for_hotjoin: Flag to notify I3C master about HOT_JOIN wait + */ +struct cam_sensor_qup_client { + struct i2c_client *i2c_client; + struct i3c_device *i3c_client; + bool pm_ctrl_client_enable; + bool i3c_wait_for_hotjoin; +}; + +/** + * @master_type: CCI master type + * @qup_client: QUP client information structure + * @cci_client: CCI client information structure + * @spi_client: SPI client information structure + * @pm_enable: Power Management Enable flag + */ +struct camera_io_master { + int master_type; + struct cam_sensor_qup_client *qup_client; + struct cam_sensor_cci_client *cci_client; + struct cam_sensor_spi_client *spi_client; + char sensor_name[CAM_SENSOR_NAME_MAX_SIZE]; +}; + +/** + * @io_master_info: I2C/SPI master information + * @addr: I2C address + * @data: I2C data + * @addr_type: I2C addr_type + * @data_type: I2C data type + * @is_probing: Is probing a sensor + * + * This API abstracts read functionality based on master type + */ +int32_t camera_io_dev_read(struct camera_io_master *io_master_info, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + bool is_probing); + +/** + * @io_master_info: I2C/SPI master information + * @addr: I2C address + * @data: I2C data + * @addr_type: I2C addr type + * @data_type: I2C data type + * @num_bytes: number of bytes + * + * This API abstracts read functionality based on master type + */ +int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + int32_t num_bytes); + +/** + * @io_master_info: I2C/SPI master information + * + * This API initializes the I2C/SPI master based on master type + */ +int32_t camera_io_init(struct camera_io_master *io_master_info); + +/** + * @io_master_info: I2C/SPI master information + * + * This API releases the I2C/SPI master based on master type + */ +int32_t camera_io_release(struct camera_io_master *io_master_info); + +/** + * @io_master_info: I2C/SPI master information + * @write_setting: write settings information + * + * This API abstracts write functionality based on master type + */ +int32_t camera_io_dev_write(struct camera_io_master *io_master_info, + struct cam_sensor_i2c_reg_setting *write_setting); + +/** + * @io_master_info: I2C/SPI master information + * @write_setting: write settings information + * @cam_sensor_i2c_write_flag: differentiate between burst & seq + * + * This API abstracts write functionality based on master type and + * write flag for continuous write + */ +int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info, + struct cam_sensor_i2c_reg_setting *write_setting, + uint8_t cam_sensor_i2c_write_flag); + +int32_t camera_io_dev_erase(struct camera_io_master *io_master_info, + uint32_t addr, uint32_t size); +/** + * @io_master_info: I2C/SPI master information + * @addr: I2C address + * @data: I2C data + * @data_mask: I2C data mask + * @data_type: I2C data type + * @addr_type: I2C address type + * @delay_ms: delay in milli seconds + * + * This API abstracts poll functionality based on master type + */ +int32_t camera_io_dev_poll(struct camera_io_master *io_master_info, + uint32_t addr, uint16_t data, uint32_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms); + +#include "cam_sensor_i2c.h" +#include "cam_sensor_spi.h" +#include "cam_sensor_i3c.h" + +#endif /* _CAM_SENSOR_IO_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c new file mode 100644 index 0000000000..7fb17294e5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_cmn_header.h" +#include "cam_sensor_i2c.h" +#include "cam_sensor_io.h" +#include "cam_mem_mgr_api.h" + +#define I2C_REG_MAX_BUF_SIZE 8 + +static int i2c_lookahead_en = 1; +module_param(i2c_lookahead_en, uint, 0644); +MODULE_PARM_DESC(i2c_lookahead_en, + "Flag to enable/disable BURST/Lookahead register packing for I2C QUP"); + +static int32_t cam_qup_i2c_rxdata( + struct i2c_client *dev_client, unsigned char *rxdata, + enum camera_sensor_i2c_type addr_type, + int data_length) +{ + int32_t rc = 0; + uint16_t saddr = dev_client->addr >> 1; + int i2c_msg_size = 2; + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = addr_type, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = data_length, + .buf = rxdata, + }, + }; + rc = i2c_transfer(dev_client->adapter, msgs, i2c_msg_size); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed 0x%x", saddr); + return rc; + } + + if (rc == i2c_msg_size) + rc = 0; + else { + CAM_ERR(CAM_SENSOR_IO, "i2c transfer failed, i2c_msg_size:%d rc:%d", + i2c_msg_size, rc); + rc = -EIO; + } + + return rc; +} + +static inline void cam_qup_i2c_txdata_fill( + struct camera_io_master *dev_client, unsigned char *txdata, + uint16_t length, struct i2c_msg *msgs, int curr_mindx) +{ + msgs[curr_mindx].addr = dev_client->qup_client->i2c_client->addr >> 1; + msgs[curr_mindx].flags = 0; + msgs[curr_mindx].len = length; + msgs[curr_mindx].buf = txdata; +} + +static int32_t cam_qup_i2c_txdata( + struct camera_io_master *dev_client, unsigned char *txdata, + uint16_t length) +{ + int32_t rc = 0; + uint16_t saddr = dev_client->qup_client->i2c_client->addr >> 1; + int i2c_msg_size = 1; + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + rc = i2c_transfer(dev_client->qup_client->i2c_client->adapter, msg, i2c_msg_size); + if (rc == i2c_msg_size) + rc = 0; + else { + CAM_ERR(CAM_SENSOR_IO, "i2c transfer failed, i2c_msg_size:%d rc:%d", + i2c_msg_size, rc); + rc = -EIO; + } + + return rc; +} + +int32_t cam_qup_i2c_read(struct i2c_client *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type) +{ + int32_t rc = -EINVAL; + unsigned char *buf = NULL; + + if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr/data_type verification"); + return rc; + } + + buf = kzalloc(addr_type + data_type, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = addr >> 8; + buf[1] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = addr >> 16; + buf[1] = addr >> 8; + buf[2] = addr; + } else { + buf[0] = addr >> 24; + buf[1] = addr >> 16; + buf[2] = addr >> 8; + buf[3] = addr; + } + + rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + goto read_fail; + } + + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) + *data = buf[0]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) + *data = buf[0] << 8 | buf[1]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) + *data = buf[0] << 16 | buf[1] << 8 | buf[2]; + else + *data = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3]; + + CAM_DBG(CAM_SENSOR_IO, "addr = 0x%x data: 0x%x", addr, *data); +read_fail: + kfree(buf); + buf = NULL; + return rc; +} + +int32_t cam_qup_i2c_read_seq(struct i2c_client *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte) +{ + int32_t rc = -EFAULT; + unsigned char *buf = NULL; + int i; + + if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr_type verification"); + return rc; + } + + if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) { + CAM_ERR(CAM_SENSOR_IO, "num_byte:0x%x max supported:0x%x", + num_byte, I2C_REG_DATA_MAX); + return rc; + } + + buf = kzalloc(addr_type + num_byte, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = addr >> BITS_PER_BYTE; + buf[1] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = addr >> 16; + buf[1] = addr >> 8; + buf[2] = addr; + } else { + buf[0] = addr >> 24; + buf[1] = addr >> 16; + buf[2] = addr >> 8; + buf[3] = addr; + } + + rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + goto read_seq_fail; + } + + for (i = 0; i < num_byte; i++) + data[i] = buf[i]; + +read_seq_fail: + kfree(buf); + buf = NULL; + return rc; +} + +static int32_t cam_qup_i2c_compare(struct i2c_client *client, + uint32_t addr, uint32_t data, uint16_t data_mask, + enum camera_sensor_i2c_type data_type, + enum camera_sensor_i2c_type addr_type) +{ + int32_t rc; + uint32_t reg_data = 0; + + rc = cam_qup_i2c_read(client, addr, ®_data, + addr_type, data_type); + if (rc < 0) + return rc; + + reg_data = reg_data & 0xFFFF; + if (data != (reg_data & ~data_mask)) + return I2C_COMPARE_MISMATCH; + + return I2C_COMPARE_MATCH; +} + +int32_t cam_qup_i2c_poll(struct i2c_client *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms) +{ + int32_t rc = 0; + int i = 0; + + if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) { + CAM_ERR(CAM_SENSOR_IO, "invalid delay = %d max_delay = %d", + delay_ms, MAX_POLL_DELAY_MS); + return -EINVAL; + } + + if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) + return -EINVAL; + + for (i = 0; i < delay_ms; i++) { + rc = cam_qup_i2c_compare(client, + addr, data, data_mask, data_type, addr_type); + if (rc == I2C_COMPARE_MATCH) + return rc; + + usleep_range(1000, 1010); + } + /* If rc is MISMATCH then read is successful but poll is failure */ + if (rc == I2C_COMPARE_MISMATCH) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d(non-fatal)", rc); + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d", rc); + + return rc; +} + +static inline int32_t cam_qup_i2c_write_optimized(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + struct i2c_msg *msgs, + unsigned char *buf) +{ + int32_t rc = 0; + uint16_t len = 0; + struct cam_sensor_i2c_reg_array *reg_setting_previous = NULL; + uint16_t offset = 0; + struct cam_sensor_i2c_reg_array *reg_setting; + enum camera_sensor_i2c_type addr_type; + enum camera_sensor_i2c_type data_type; + bool isLookAhead = false; + int curr_mindx = 0; + int i = 0; + + if (!client || !write_setting || !client->qup_client) + return -EINVAL; + + reg_setting = write_setting->reg_setting; + addr_type = write_setting->addr_type; + data_type = write_setting->data_type; + + while (i < write_setting->size) { + CAM_DBG(CAM_SENSOR_IO, "reg addr = 0x%x data type: %d", + reg_setting->reg_addr, data_type); + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[offset] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[offset]); + offset += 1; + len = 1; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[offset] = reg_setting->reg_addr >> 8; + buf[offset + 1] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len+1, buf[offset+1]); + offset += 2; + len = 2; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[offset] = reg_setting->reg_addr >> 16; + buf[offset+1] = reg_setting->reg_addr >> 8; + buf[offset+2] = reg_setting->reg_addr; + offset += 3; + len = 3; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[offset] = reg_setting->reg_addr >> 24; + buf[offset+1] = reg_setting->reg_addr >> 16; + buf[offset+2] = reg_setting->reg_addr >> 8; + buf[offset+3] = reg_setting->reg_addr; + offset += 4; + len = 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid I2C addr type"); + rc = -EINVAL; + return rc; + } + + do { + CAM_DBG(CAM_SENSOR_IO, "reg addr: 0x%x Data: 0x%x", + reg_setting->reg_addr, reg_setting->reg_data); + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[offset] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + offset += 1; + len += 1; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[offset] = reg_setting->reg_data >> 8; + buf[offset+1] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + offset += 2; + len += 2; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[offset] = reg_setting->reg_data >> 16; + buf[offset + 1] = reg_setting->reg_data >> 8; + buf[offset + 2] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+2, buf[offset+2]); + offset += 3; + len += 3; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[offset] = reg_setting->reg_data >> 24; + buf[offset + 1] = reg_setting->reg_data >> 16; + buf[offset + 2] = reg_setting->reg_data >> 8; + buf[offset + 3] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+2, buf[offset+2]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+3, buf[offset+3]); + offset += 4; + len += 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid Data Type"); + rc = -EINVAL; + return rc; + } + reg_setting_previous = reg_setting; + i++; + if (i < write_setting->size) { + reg_setting++; + isLookAhead = + ((reg_setting_previous->reg_addr + 1) == + reg_setting->reg_addr) ? true : false; + } else { + break; + } + + if (i2c_lookahead_en == 0) + isLookAhead = false; + + } while (isLookAhead); + + CAM_DBG(CAM_SENSOR_IO, "offset: %d len: %d curr_mindx: %d", + offset, len, curr_mindx); + cam_qup_i2c_txdata_fill(client, &buf[offset - len], len, msgs, curr_mindx); + + len = 0; + curr_mindx++; + } + + CAM_DBG(CAM_SENSOR_IO, "Original reg writes: %d optimized Writes: %d", + write_setting->size, curr_mindx); + return curr_mindx; +} + +int32_t cam_qup_i2c_write_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + int32_t rc = -EINVAL; + struct i2c_msg *msgs = NULL; + unsigned char *buf = NULL; + int i2c_msg_size = 0; + + if (!client || !write_setting || !client->qup_client) + return rc; + + msgs = CAM_MEM_ZALLOC_ARRAY(write_setting->size, sizeof(struct i2c_msg), GFP_KERNEL); + if (!msgs) { + CAM_ERR(CAM_SENSOR_IO, "Message Buffer memory allocation failed"); + return -ENOMEM; + } + + buf = kzalloc(write_setting->size*I2C_REG_MAX_BUF_SIZE, GFP_KERNEL|GFP_DMA); + if (!buf) { + CAM_ERR(CAM_SENSOR_IO, "Buffer memory allocation failed"); + CAM_MEM_FREE(msgs); + return -ENOMEM; + } + + if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))) { + rc = -EINVAL; + goto deallocate_buffer; + } + + i2c_msg_size = cam_qup_i2c_write_optimized(client, write_setting, msgs, buf); + if (i2c_msg_size < 0) { + rc = i2c_msg_size; + goto deallocate_buffer; + } + + rc = i2c_transfer(client->qup_client->i2c_client->adapter, msgs, i2c_msg_size); + if (write_setting->delay > 20) + msleep(write_setting->delay); + else if (write_setting->delay) + usleep_range(write_setting->delay * 1000, (write_setting->delay + * 1000) + 1000); + + if (rc == i2c_msg_size) + rc = 0; + else { + CAM_ERR(CAM_SENSOR_IO, "i2c transfer failed, i2c_msg_size:%d rc:%d", + i2c_msg_size, rc); + rc = -EIO; + } + +deallocate_buffer: + kfree(buf); + CAM_MEM_FREE(msgs); + + return rc; +} + +static int32_t cam_qup_i2c_write_burst(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + int i; + int32_t rc = 0; + uint16_t len = 0; + unsigned char *buf = NULL; + struct cam_sensor_i2c_reg_array *reg_setting; + enum camera_sensor_i2c_type addr_type; + enum camera_sensor_i2c_type data_type; + + if (!client->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "qup_client is NULL"); + return -EINVAL; + } + + buf = kzalloc((write_setting->addr_type + + (write_setting->size * write_setting->data_type)), + GFP_KERNEL); + + if (!buf) { + CAM_ERR(CAM_SENSOR_IO, "BUF is NULL"); + return -ENOMEM; + } + + reg_setting = write_setting->reg_setting; + addr_type = write_setting->addr_type; + data_type = write_setting->data_type; + + CAM_DBG(CAM_SENSOR_IO, "reg addr = 0x%x data type: %d", + reg_setting->reg_addr, data_type); + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[len]); + len = 1; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = reg_setting->reg_addr >> 8; + buf[1] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len+1, buf[len+1]); + len = 2; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = reg_setting->reg_addr >> 16; + buf[1] = reg_setting->reg_addr >> 8; + buf[2] = reg_setting->reg_addr; + len = 3; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[0] = reg_setting->reg_addr >> 24; + buf[1] = reg_setting->reg_addr >> 16; + buf[2] = reg_setting->reg_addr >> 8; + buf[3] = reg_setting->reg_addr; + len = 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid I2C addr type"); + rc = -EINVAL; + goto free_res; + } + + for (i = 0; i < write_setting->size; i++) { + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[len] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + len += 1; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[len] = reg_setting->reg_data >> 8; + buf[len+1] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + len += 2; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[len] = reg_setting->reg_data >> 16; + buf[len + 1] = reg_setting->reg_data >> 8; + buf[len + 2] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+2, buf[len+2]); + len += 3; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[len] = reg_setting->reg_data >> 24; + buf[len + 1] = reg_setting->reg_data >> 16; + buf[len + 2] = reg_setting->reg_data >> 8; + buf[len + 3] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+2, buf[len+2]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+3, buf[len+3]); + len += 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid Data Type"); + rc = -EINVAL; + goto free_res; + } + reg_setting++; + } + + if (len > (write_setting->addr_type + + (write_setting->size * write_setting->data_type))) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Length: %u | Expected length: %u", + len, (write_setting->addr_type + + (write_setting->size * write_setting->data_type))); + rc = -EINVAL; + goto free_res; + } + + rc = cam_qup_i2c_txdata(client, buf, len); + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + +free_res: + kfree(buf); + return rc; +} + +int32_t cam_qup_i2c_write_continuous_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_settings, + uint8_t cam_sensor_i2c_write_flag) +{ + int32_t rc = 0; + + if (!client || !write_settings) + return -EINVAL; + + if ((write_settings->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_settings->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || (write_settings->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_settings->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))) + return -EINVAL; + + if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST) + rc = cam_qup_i2c_write_burst(client, write_settings); + else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ) + rc = cam_qup_i2c_write_burst(client, write_settings); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i3c.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i3c.c new file mode 100644 index 0000000000..2d01ff255e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i3c.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_i3c.h" +#include "cam_sensor_io.h" +#include "cam_mem_mgr_api.h" + +#define I3C_REG_MAX_BUF_SIZE 8 + +static int i3c_lookahead_en = 1; +module_param(i3c_lookahead_en, uint, 0644); +MODULE_PARM_DESC(i3c_lookahead_en, + "Flag to enable/disable BURST/Lookahead register packing for I3C QUP"); + +static int cam_qup_i3c_rxdata(struct i3c_device *dev_client, unsigned char *rxdata, + enum camera_sensor_i2c_type addr_type, int data_length) +{ + int rc; + uint32_t us = 0; + struct i3c_priv_xfer read_buf[2] = { + { + .rnw = 0, + .len = addr_type, + .data.out = rxdata, + }, + { + .rnw = 1, + .len = data_length, + .data.in = rxdata, + }, + }; + + rc = i3c_device_do_priv_xfers(dev_client, read_buf, ARRAY_SIZE(read_buf)); + if (rc == -ENOTCONN) { + while (us < CAM_I3C_DEV_PROBE_TIMEOUT_US) { + usleep_range(1000, 1005); + rc = i3c_device_do_priv_xfers(dev_client, read_buf, ARRAY_SIZE(read_buf)); + if (rc != -ENOTCONN) + break; + us += 1000; + } + + if (rc) + CAM_ERR(CAM_SENSOR_IO, "Retry Failed i3c_read: rc = %d, us = %d", rc, us); + } else if (rc) + CAM_ERR(CAM_SENSOR_IO, "Failed with i3c_read: rc = %d", rc); + + return rc; +} + +static inline void cam_qup_i3c_txdata_fill(struct camera_io_master *dev_client, + unsigned char *txdata, uint16_t length, + struct i3c_priv_xfer *msgs, int curr_mindx) +{ + msgs[curr_mindx].rnw = 0; + msgs[curr_mindx].len = length; + msgs[curr_mindx].data.out = txdata; +} + +static int cam_qup_i3c_txdata(struct camera_io_master *dev_client, unsigned char *txdata, + uint16_t length) +{ + int rc; + uint32_t us = 0; + struct i3c_priv_xfer write_buf = { + .rnw = 0, + .len = length, + .data.out = txdata, + }; + + if (!dev_client->qup_client) { + CAM_ERR(CAM_SENSOR_IO, "qup_client is NULL"); + return -EINVAL; + } + + rc = i3c_device_do_priv_xfers(dev_client->qup_client->i3c_client, &write_buf, 1); + if (rc == -ENOTCONN) { + while (us < CAM_I3C_DEV_PROBE_TIMEOUT_US) { + usleep_range(1000, 1005); + rc = i3c_device_do_priv_xfers( + dev_client->qup_client->i3c_client, &write_buf, 1); + if (rc != -ENOTCONN) + break; + us += 1000; + } + + if (rc) + CAM_ERR(CAM_SENSOR_IO, "Retry Failed i3c_write: rc = %d, us = %d", rc, us); + } else if (rc) + CAM_ERR(CAM_SENSOR_IO, "Failed with i3c_write: rc = %d", rc); + + return rc; +} + +int cam_qup_i3c_read(struct i3c_device *client, uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type) +{ + int rc; + unsigned char *buf; + + if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + || (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr/data_type verification"); + return -EINVAL; + } + + buf = kzalloc(addr_type + data_type, GFP_DMA | GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = addr >> 8; + buf[1] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = addr >> 16; + buf[1] = addr >> 8; + buf[2] = addr; + } else { + buf[0] = addr >> 24; + buf[1] = addr >> 16; + buf[2] = addr >> 8; + buf[3] = addr; + } + + rc = cam_qup_i3c_rxdata(client, buf, addr_type, data_type); + if (rc) { + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + goto read_fail; + } + + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) + *data = buf[0]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) + *data = (buf[0] << 8) | buf[1]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) + *data = (buf[0] << 16) | (buf[1] << 8) | buf[2]; + else + *data = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + + CAM_DBG(CAM_SENSOR_IO, "addr = 0x%x data: 0x%x", addr, *data); +read_fail: + kfree(buf); + return rc; +} + +int cam_qup_i3c_read_seq(struct i3c_device *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte) +{ + int rc; + unsigned char *buf; + int i; + + if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr_type verification"); + return -EFAULT; + } + + if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) { + CAM_ERR(CAM_SENSOR_IO, "num_byte:0x%x max supported:0x%x", + num_byte, I2C_REG_DATA_MAX); + return -EFAULT; + } + + buf = kzalloc(addr_type + num_byte, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = addr >> BITS_PER_BYTE; + buf[1] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = addr >> 16; + buf[1] = addr >> 8; + buf[2] = addr; + } else { + buf[0] = addr >> 24; + buf[1] = addr >> 16; + buf[2] = addr >> 8; + buf[3] = addr; + } + + rc = cam_qup_i3c_rxdata(client, buf, addr_type, num_byte); + if (rc) { + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + goto read_seq_fail; + } + + for (i = 0; i < num_byte; i++) + data[i] = buf[i]; + +read_seq_fail: + kfree(buf); + return rc; +} + +static int cam_qup_i3c_compare(struct i3c_device *client, + uint32_t addr, uint32_t data, uint16_t data_mask, + enum camera_sensor_i2c_type data_type, + enum camera_sensor_i2c_type addr_type) +{ + int rc; + uint32_t reg_data; + + rc = cam_qup_i3c_read(client, addr, ®_data, + addr_type, data_type); + if (rc < 0) + return rc; + + reg_data = reg_data & 0xFFFF; + if (data != (reg_data & ~data_mask)) + return I2C_COMPARE_MISMATCH; + + return I2C_COMPARE_MATCH; +} + +int cam_qup_i3c_poll(struct i3c_device *client, + uint32_t addr, uint16_t data, uint16_t data_mask, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type, + uint32_t delay_ms) +{ + int rc; + int i; + + if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) { + CAM_ERR(CAM_SENSOR_IO, "invalid delay = %d max_delay = %d", + delay_ms, MAX_POLL_DELAY_MS); + return -EINVAL; + } + + if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) + return -EINVAL; + + for (i = 0; i < delay_ms; i++) { + rc = cam_qup_i3c_compare(client, + addr, data, data_mask, data_type, addr_type); + if (rc == I2C_COMPARE_MATCH) + return rc; + + usleep_range(1000, 1010); + } + /* If rc is MISMATCH then read is successful but poll is failure */ + if (rc == I2C_COMPARE_MISMATCH) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d(non-fatal)", rc); + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "poll failed rc=%d", rc); + + return rc; +} + +static inline int32_t cam_qup_i3c_write_optimized(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting, + struct i3c_priv_xfer *msgs, + unsigned char *buf) +{ + int32_t rc = 0; + uint16_t len = 0; + struct cam_sensor_i2c_reg_array *reg_setting_previous = NULL; + uint16_t offset = 0; + struct cam_sensor_i2c_reg_array *reg_setting; + enum camera_sensor_i2c_type addr_type; + enum camera_sensor_i2c_type data_type; + bool isLookAhead = false; + int curr_mindx = 0; + int i = 0; + + if (!client || !write_setting) + return -EINVAL; + + reg_setting = write_setting->reg_setting; + addr_type = write_setting->addr_type; + data_type = write_setting->data_type; + + while (i < write_setting->size) { + CAM_DBG(CAM_SENSOR_IO, "reg addr = 0x%x data type: %d", + reg_setting->reg_addr, data_type); + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[offset] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[offset]); + offset += 1; + len = 1; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[offset] = reg_setting->reg_addr >> 8; + buf[offset + 1] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len+1, buf[offset+1]); + offset += 2; + len = 2; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[offset] = reg_setting->reg_addr >> 16; + buf[offset+1] = reg_setting->reg_addr >> 8; + buf[offset+2] = reg_setting->reg_addr; + offset += 3; + len = 3; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[offset] = reg_setting->reg_addr >> 24; + buf[offset+1] = reg_setting->reg_addr >> 16; + buf[offset+2] = reg_setting->reg_addr >> 8; + buf[offset+3] = reg_setting->reg_addr; + offset += 4; + len = 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid I2C addr type"); + rc = -EINVAL; + return rc; + + } + + do { + CAM_DBG(CAM_SENSOR_IO, "reg addr: 0x%x Data: 0x%x", + reg_setting->reg_addr, reg_setting->reg_data); + + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[offset] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + offset += 1; + len += 1; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[offset] = reg_setting->reg_data >> 8; + buf[offset+1] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + offset += 2; + len += 2; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[offset] = reg_setting->reg_data >> 16; + buf[offset + 1] = reg_setting->reg_data >> 8; + buf[offset + 2] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+2, buf[offset+2]); + offset += 3; + len += 3; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[offset] = reg_setting->reg_data >> 24; + buf[offset + 1] = reg_setting->reg_data >> 16; + buf[offset + 2] = reg_setting->reg_data >> 8; + buf[offset + 3] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[offset]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+1, buf[offset+1]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+2, buf[offset+2]); + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len+3, buf[offset+3]); + offset += 4; + len += 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid Data Type"); + rc = -EINVAL; + return rc; + } + reg_setting_previous = reg_setting; + i++; + if (i < write_setting->size) { + reg_setting++; + isLookAhead = + ((reg_setting_previous->reg_addr + 1) == + reg_setting->reg_addr) ? true : false; + } else { + break; + } + if (i3c_lookahead_en == 0) + isLookAhead = false; + + } while (isLookAhead); + + CAM_DBG(CAM_SENSOR_IO, "offset: %d len: %d curr_mindx: %d", + offset, len, curr_mindx); + cam_qup_i3c_txdata_fill(client, &buf[offset - len], len, msgs, curr_mindx); + + len = 0; + curr_mindx++; + } + + CAM_DBG(CAM_SENSOR_IO, "Original reg writes: %d optimized Writes: %d", + write_setting->size, curr_mindx); + return curr_mindx; +} + +int cam_qup_i3c_write_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + int rc = -EINVAL; + uint32_t us = 0; + struct i3c_priv_xfer *msgs = NULL; + unsigned char *buf = NULL; + int i3c_msg_size = 0; + + if (!client || !write_setting || !client->qup_client) + return -EINVAL; + + msgs = CAM_MEM_ZALLOC_ARRAY(write_setting->size, sizeof(struct i3c_priv_xfer), GFP_KERNEL); + if (!msgs) { + CAM_ERR(CAM_SENSOR_IO, "Message Buffer memory allocation failed"); + return -ENOMEM; + } + + buf = kzalloc(write_setting->size*I3C_REG_MAX_BUF_SIZE, GFP_KERNEL|GFP_DMA); + if (!buf) { + CAM_ERR(CAM_SENSOR_IO, "Buffer memory allocation failed"); + CAM_MEM_FREE(msgs); + return -ENOMEM; + } + + if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))) { + rc = -EINVAL; + goto deallocate_buffer; + } + + i3c_msg_size = cam_qup_i3c_write_optimized(client, write_setting, msgs, buf); + if (i3c_msg_size < 0) { + rc = i3c_msg_size; + goto deallocate_buffer; + } + + rc = i3c_device_do_priv_xfers(client->qup_client->i3c_client, msgs, i3c_msg_size); + if (rc == -ENOTCONN) { + while (us < CAM_I3C_DEV_PROBE_TIMEOUT_US) { + usleep_range(1000, 1005); + rc = i3c_device_do_priv_xfers(client->qup_client->i3c_client, + msgs, write_setting->size); + if (rc != -ENOTCONN) + break; + us += 1000; + } + + if (rc) + CAM_ERR(CAM_SENSOR_IO, "Retry Failed i3c_write: rc = %d, us = %d", rc, us); + } else if (rc) + CAM_ERR(CAM_SENSOR_IO, "Failed with i3c_write: rc = %d", rc); + + if (write_setting->delay > 20) + msleep(write_setting->delay); + else if (write_setting->delay) + usleep_range(write_setting->delay * 1000, (write_setting->delay + * 1000) + 1000); + +deallocate_buffer: + kfree(buf); + CAM_MEM_FREE(msgs); + + return rc; +} + +static int cam_qup_i3c_write_burst(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + int i; + int rc; + uint16_t len = 0; + unsigned char *buf; + struct cam_sensor_i2c_reg_array *reg_setting; + enum camera_sensor_i2c_type addr_type; + enum camera_sensor_i2c_type data_type; + + buf = kzalloc((write_setting->addr_type + + (write_setting->size * write_setting->data_type)), + GFP_DMA | GFP_KERNEL); + + if (!buf) { + CAM_ERR(CAM_SENSOR_IO, "BUF is NULL"); + return -ENOMEM; + } + + reg_setting = write_setting->reg_setting; + addr_type = write_setting->addr_type; + data_type = write_setting->data_type; + + CAM_DBG(CAM_SENSOR_IO, "reg addr = 0x%x data type: %d", + reg_setting->reg_addr, data_type); + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[len]); + len = 1; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = reg_setting->reg_addr >> 8; + buf[1] = reg_setting->reg_addr; + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, "byte %d: 0x%x", len+1, buf[len+1]); + len = 2; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = reg_setting->reg_addr >> 16; + buf[1] = reg_setting->reg_addr >> 8; + buf[2] = reg_setting->reg_addr; + len = 3; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[0] = reg_setting->reg_addr >> 24; + buf[1] = reg_setting->reg_addr >> 16; + buf[2] = reg_setting->reg_addr >> 8; + buf[3] = reg_setting->reg_addr; + len = 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid I2C addr type"); + rc = -EINVAL; + goto free_res; + } + + for (i = 0; i < write_setting->size; i++) { + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[len] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + len += 1; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[len] = reg_setting->reg_data >> 8; + buf[len+1] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + len += 2; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[len] = reg_setting->reg_data >> 16; + buf[len + 1] = reg_setting->reg_data >> 8; + buf[len + 2] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+2, buf[len+2]); + len += 3; + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) { + buf[len] = reg_setting->reg_data >> 24; + buf[len + 1] = reg_setting->reg_data >> 16; + buf[len + 2] = reg_setting->reg_data >> 8; + buf[len + 3] = reg_setting->reg_data; + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len, buf[len]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+1, buf[len+1]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+2, buf[len+2]); + CAM_DBG(CAM_SENSOR_IO, + "Byte %d: 0x%x", len+3, buf[len+3]); + len += 4; + } else { + CAM_ERR(CAM_SENSOR_IO, "Invalid Data Type"); + rc = -EINVAL; + goto free_res; + } + reg_setting++; + } + + if (len > (write_setting->addr_type + + (write_setting->size * write_setting->data_type))) { + CAM_ERR(CAM_SENSOR_IO, "Invalid Length: %u | Expected length: %u", + len, (write_setting->addr_type + + (write_setting->size * write_setting->data_type))); + rc = -EINVAL; + goto free_res; + } + + rc = cam_qup_i3c_txdata(client, buf, len); + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "failed rc: %d", rc); + +free_res: + kfree(buf); + return rc; +} + +int cam_qup_i3c_write_continuous_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_settings, + uint8_t cam_sensor_i2c_write_flag) +{ + int rc = 0; + + if (!client || !write_settings) + return -EINVAL; + + if ((write_settings->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_settings->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || (write_settings->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || write_settings->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))) + return -EINVAL; + + if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST) + rc = cam_qup_i3c_write_burst(client, write_settings); + else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ) + rc = cam_qup_i3c_write_burst(client, write_settings); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c new file mode 100644 index 0000000000..2ccbe0eb13 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sensor_spi.h" +#include "cam_debug_util.h" + +static int cam_spi_txfr(struct spi_device *spi, char *txbuf, + char *rxbuf, int num_byte) +{ + struct spi_transfer txfr; + struct spi_message msg; + + memset(&txfr, 0, sizeof(txfr)); + txfr.tx_buf = txbuf; + txfr.rx_buf = rxbuf; + txfr.len = num_byte; + spi_message_init(&msg); + spi_message_add_tail(&txfr, &msg); + + return spi_sync(spi, &msg); +} + +static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf, + char *rxbuf, int txlen, int rxlen) +{ + struct spi_transfer tx; + struct spi_transfer rx; + struct spi_message m; + + memset(&tx, 0, sizeof(tx)); + memset(&rx, 0, sizeof(rx)); + tx.tx_buf = txbuf; + rx.rx_buf = rxbuf; + tx.len = txlen; + rx.len = rxlen; + spi_message_init(&m); + spi_message_add_tail(&tx, &m); + spi_message_add_tail(&rx, &m); + return spi_sync(spi, &m); +} + +/** + * cam_set_addr() - helper function to set transfer address + * @addr: device address + * @addr_len: the addr field length of an instruction + * @type: type (i.e. byte-length) of @addr + * @str: shifted address output, must be zeroed when passed in + * + * This helper function sets @str based on the addr field length of an + * instruction and the data length. + */ +static void cam_set_addr(uint32_t addr, uint8_t addr_len, + enum camera_sensor_i2c_type addr_type, + char *str) +{ + if (!addr_len) + return; + + if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + str[0] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + str[0] = addr >> 8; + str[1] = addr; + } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) { + str[0] = addr >> 16; + str[1] = addr >> 8; + str[2] = addr; + } else { + str[0] = addr >> 24; + str[1] = addr >> 16; + str[2] = addr >> 8; + str[3] = addr; + } +} + +/** + * cam_spi_tx_helper() - wrapper for SPI transaction + * @client: io client + * @inst: inst of this transaction + * @addr: device addr following the inst + * @data: output byte array (could be NULL) + * @num_byte: size of @data + * @tx, rx: optional transfer buffer. It must be at least header + * + @num_byte long. + * + * This is the core function for SPI transaction, except for writes. It first + * checks address type, then allocates required memory for tx/rx buffers. + * It sends out , and optionally receives @num_byte of response, + * if @data is not NULL. This function does not check for wait conditions, + * and will return immediately once bus transaction finishes. + * + * This function will allocate buffers of header + @num_byte long. For + * large transfers, the allocation could fail. External buffer @tx, @rx + * should be passed in to bypass allocation. The size of buffer should be + * at least header + num_byte long. Since buffer is managed externally, + * @data will be ignored, and read results will be in @rx. + * @tx, @rx also can be used for repeated transfers to improve performance. + */ +static int32_t cam_spi_tx_helper(struct camera_io_master *client, + struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte, char *tx, char *rx) +{ + int32_t rc = -EINVAL; + struct spi_device *spi = client->spi_client->spi_master; + struct device *dev = NULL; + char *ctx = NULL, *crx = NULL; + uint32_t len, hlen; + uint8_t retries = client->spi_client->retries; + uint32_t txr = 0, rxr = 0; + void *vaddr_tx = NULL; + void *vaddr_rx = NULL; + + hlen = cam_camera_spi_get_hlen(inst); + len = hlen + num_byte; + dev = &(spi->dev); + + if (!dev) { + CAM_ERR(CAM_SENSOR_IO, "Invalid arguments"); + return -EINVAL; + } + + if (tx) { + ctx = tx; + } else { + txr = len; + vaddr_tx = kzalloc(txr, GFP_KERNEL | GFP_DMA); + if (!vaddr_tx) { + CAM_ERR(CAM_SENSOR_IO, + "Fail to allocate Memory: len: %u", txr); + return -ENOMEM; + } + + ctx = (char *)vaddr_tx; + } + + if (num_byte) { + if (rx) { + crx = rx; + } else { + rxr = len; + vaddr_rx = kzalloc(rxr, GFP_KERNEL | GFP_DMA); + if (!vaddr_rx) { + if (!tx) + kfree(vaddr_tx); + CAM_ERR(CAM_SENSOR_IO, + "Fail to allocate memory: len: %u", + rxr); + return -ENOMEM; + } + crx = (char *)vaddr_rx; + } + } else { + crx = NULL; + } + + ctx[0] = inst->opcode; + cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1); + while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) { + retries--; + msleep(client->spi_client->retry_delay); + } + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed: spi txfr rc %d", rc); + goto out; + } + if (data && num_byte && !rx) + memcpy(data, crx + hlen, num_byte); + +out: + if (!tx) { + kfree(vaddr_tx); + vaddr_tx = NULL; + } + if (!rx) { + kfree(vaddr_rx); + vaddr_rx = NULL; + } + return rc; +} + +static int32_t cam_spi_tx_read(struct camera_io_master *client, + struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint32_t num_byte, char *tx, char *rx) +{ + int32_t rc = -EINVAL; + struct spi_device *spi = client->spi_client->spi_master; + char *ctx = NULL, *crx = NULL; + uint32_t hlen; + uint8_t retries = client->spi_client->retries; + + hlen = cam_camera_spi_get_hlen(inst); + if (tx) { + ctx = tx; + } else { + ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA); + if (!ctx) + return -ENOMEM; + } + if (num_byte) { + if (rx) { + crx = rx; + } else { + crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA); + if (!crx) { + if (!tx) + kfree(ctx); + return -ENOMEM; + } + } + } else { + crx = NULL; + } + + ctx[0] = inst->opcode; + cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1); + + CAM_DBG(CAM_SENSOR_IO, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0], + ctx[1], ctx[2], ctx[3]); + while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte)) + && retries) { + retries--; + msleep(client->spi_client->retry_delay); + } + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed %d", rc); + goto out; + } + if (data && num_byte && !rx) + memcpy(data, crx, num_byte); +out: + if (!tx) + kfree(ctx); + if (!rx) + kfree(crx); + return rc; +} + +int cam_spi_read(struct camera_io_master *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type) +{ + int rc = -EINVAL; + uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX]; + + if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX + || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID + || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr/data_type verification"); + return rc; + } + + rc = cam_spi_tx_read(client, + &client->spi_client->cmd_tbl.read, addr, &temp[0], + addr_type, data_type, NULL, NULL); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed %d", rc); + return rc; + } + + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) + *data = temp[0]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) + *data = (temp[0] << BITS_PER_BYTE) | temp[1]; + else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) + *data = (temp[0] << 16 | temp[1] << 8 | temp[2]); + else + *data = (temp[0] << 24 | temp[1] << 16 | temp[2] << 8 | + temp[3]); + + CAM_DBG(CAM_SENSOR_IO, "addr 0x%x, data %u", addr, *data); + return rc; +} + +int32_t cam_spi_read_seq(struct camera_io_master *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, int32_t num_bytes) +{ + if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) { + CAM_ERR(CAM_SENSOR_IO, "Failed with addr_type verification"); + return -EINVAL; + } + + if (num_bytes == 0) { + CAM_ERR(CAM_SENSOR_IO, "num_byte: 0x%x", num_bytes); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR_IO, "Read Seq addr: 0x%x NB:%d", + addr, num_bytes); + return cam_spi_tx_helper(client, + &client->spi_client->cmd_tbl.read_seq, addr, data, + addr_type, num_bytes, NULL, NULL); +} + +int cam_spi_query_id(struct camera_io_master *client, + uint32_t addr, enum camera_sensor_i2c_type addr_type, + uint8_t *data, uint32_t num_byte) +{ + CAM_DBG(CAM_SENSOR_IO, "SPI Queryid : 0x%x, addr: 0x%x", + client->spi_client->cmd_tbl.query_id, addr); + return cam_spi_tx_helper(client, + &client->spi_client->cmd_tbl.query_id, + addr, data, addr_type, num_byte, NULL, NULL); +} + +static int32_t cam_spi_read_status_reg( + struct camera_io_master *client, uint8_t *status, + enum camera_sensor_i2c_type addr_type) +{ + struct cam_camera_spi_inst *rs = + &client->spi_client->cmd_tbl.read_status; + + if (rs->addr_len != 0) { + CAM_ERR(CAM_SENSOR_IO, "not implemented yet"); + return -ENXIO; + } + return cam_spi_tx_helper(client, rs, 0, status, + addr_type, 1, NULL, NULL); +} + +static int32_t cam_spi_device_busy(struct camera_io_master *client, + uint8_t *busy, enum camera_sensor_i2c_type addr_type) +{ + int rc; + uint8_t st = 0; + + rc = cam_spi_read_status_reg(client, &st, addr_type); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed to read status reg"); + return rc; + } + *busy = st & client->spi_client->busy_mask; + return 0; +} + +static int32_t cam_spi_wait(struct camera_io_master *client, + struct cam_camera_spi_inst *inst, + enum camera_sensor_i2c_type addr_type) +{ + uint8_t busy; + int i, rc; + + CAM_DBG(CAM_SENSOR_IO, "op 0x%x wait start", inst->opcode); + for (i = 0; i < inst->delay_count; i++) { + rc = cam_spi_device_busy(client, &busy, addr_type); + if (rc < 0) + return rc; + if (!busy) + break; + msleep(inst->delay_intv); + CAM_DBG(CAM_SENSOR_IO, "op 0x%x wait", inst->opcode); + } + if (i > inst->delay_count) { + CAM_ERR(CAM_SENSOR_IO, "op %x timed out", inst->opcode); + return -ETIMEDOUT; + } + CAM_DBG(CAM_SENSOR_IO, "op %x finished", inst->opcode); + return 0; +} + +static int32_t cam_spi_write_enable(struct camera_io_master *client, + enum camera_sensor_i2c_type addr_type) +{ + struct cam_camera_spi_inst *we = + &client->spi_client->cmd_tbl.write_enable; + int rc; + + if (we->opcode == 0) + return 0; + if (we->addr_len != 0) { + CAM_ERR(CAM_SENSOR_IO, "not implemented yet"); + return -EINVAL; + } + rc = cam_spi_tx_helper(client, we, 0, NULL, addr_type, + 0, NULL, NULL); + if (rc < 0) + CAM_ERR(CAM_SENSOR_IO, "write enable failed"); + return rc; +} + +/** + * cam_spi_page_program() - core function to perform write + * @client: need for obtaining SPI device + * @addr: address to program on device + * @data: data to write + * @len: size of data + * @tx: tx buffer, size >= header + len + * + * This function performs SPI write, and has no boundary check. Writing range + * should not cross page boundary, or data will be corrupted. Transaction is + * guaranteed to be finished when it returns. This function should never be + * used outside cam_spi_write_seq(). + */ +static int32_t cam_spi_page_program(struct camera_io_master *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + uint16_t len, uint8_t *tx) +{ + int rc; + struct cam_camera_spi_inst *pg = + &client->spi_client->cmd_tbl.page_program; + struct spi_device *spi = client->spi_client->spi_master; + uint8_t retries = client->spi_client->retries; + uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len; + + CAM_DBG(CAM_SENSOR_IO, "addr 0x%x, size 0x%x", addr, len); + rc = cam_spi_write_enable(client, addr_type); + if (rc < 0) + return rc; + memset(tx, 0, header_len); + tx[0] = pg->opcode; + cam_set_addr(addr, pg->addr_len, addr_type, tx + 1); + memcpy(tx + header_len, data, len); + CAM_DBG(CAM_SENSOR_IO, "tx(%u): %02x %02x %02x %02x", + len, tx[0], tx[1], tx[2], tx[3]); + do { + rc = spi_write(spi, tx, len + header_len); + if (rc) { + if (retries == 0) { + break; + } else { + retries--; + cam_spi_wait(client, pg, addr_type); + msleep(client->spi_client->retry_delay); + } + } + } while (rc); + + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "failed %d", rc); + return rc; + } + rc = cam_spi_wait(client, pg, addr_type); + return rc; +} + +int cam_spi_write(struct camera_io_master *client, + uint32_t addr, uint32_t data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type) +{ + struct cam_camera_spi_inst *pg = + &client->spi_client->cmd_tbl.page_program; + uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len; + uint16_t len = 0; + char buf[CAMERA_SENSOR_I2C_TYPE_MAX]; + char *tx; + int rc = -EINVAL; + + if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + || (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) + return rc; + + CAM_DBG(CAM_SENSOR_IO, "Data: 0x%x", data); + len = header_len + (uint8_t)data_type; + tx = kmalloc(len, GFP_KERNEL | GFP_DMA); + if (!tx) + goto NOMEM; + + if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) { + buf[0] = data; + CAM_DBG(CAM_SENSOR_IO, "Byte %d: 0x%x", len, buf[0]); + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) { + buf[0] = (data >> BITS_PER_BYTE) & 0x00FF; + buf[1] = (data & 0x00FF); + } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) { + buf[0] = (data >> 16) & 0x00FF; + buf[1] = (data >> 8) & 0x00FF; + buf[2] = (data & 0x00FF); + } else { + buf[0] = (data >> 24) & 0x00FF; + buf[1] = (data >> 16) & 0x00FF; + buf[2] = (data >> 8) & 0x00FF; + buf[3] = (data & 0x00FF); + } + + rc = cam_spi_page_program(client, addr, buf, + addr_type, data_type, tx); + if (rc < 0) + goto ERROR; + goto OUT; +NOMEM: + CAM_ERR(CAM_SENSOR_IO, "memory allocation failed"); + return -ENOMEM; +ERROR: + CAM_ERR(CAM_SENSOR_IO, "error write"); +OUT: + kfree(tx); + return rc; +} + +int32_t cam_spi_write_seq(struct camera_io_master *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, uint32_t num_byte) +{ + struct cam_camera_spi_inst *pg = + &client->spi_client->cmd_tbl.page_program; + const uint32_t page_size = client->spi_client->page_size; + uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len; + uint16_t len; + uint32_t cur_len, end; + char *tx, *pdata = data; + int rc = -EINVAL; + + if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) || + (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)) + return rc; + /* single page write */ + if ((addr % page_size) + num_byte <= page_size) { + len = header_len + num_byte; + tx = kmalloc(len, GFP_KERNEL | GFP_DMA); + if (!tx) + goto NOMEM; + rc = cam_spi_page_program(client, addr, data, addr_type, + num_byte, tx); + if (rc < 0) + goto ERROR; + goto OUT; + } + /* multi page write */ + len = header_len + page_size; + tx = kmalloc(len, GFP_KERNEL | GFP_DMA); + if (!tx) + goto NOMEM; + while (num_byte) { + end = min(page_size, (addr % page_size) + num_byte); + cur_len = end - (addr % page_size); + CAM_ERR(CAM_SENSOR_IO, "Addr: 0x%x curr_len: 0x%x pgSize: %d", + addr, cur_len, page_size); + rc = cam_spi_page_program(client, addr, pdata, addr_type, + cur_len, tx); + if (rc < 0) + goto ERROR; + addr += cur_len; + pdata += cur_len; + num_byte -= cur_len; + } + goto OUT; +NOMEM: + pr_err("%s: memory allocation failed\n", __func__); + return -ENOMEM; +ERROR: + pr_err("%s: error write\n", __func__); +OUT: + kfree(tx); + return rc; +} + +int cam_spi_write_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting) +{ + int i; + int rc = -EFAULT; + struct cam_sensor_i2c_reg_array *reg_setting; + + if (!client || !write_setting) + return rc; + + if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) + || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID) + || (write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) + return rc; + + reg_setting = write_setting->reg_setting; + for (i = 0; i < write_setting->size; i++) { + CAM_DBG(CAM_SENSOR_IO, "addr %x data %x", + reg_setting->reg_addr, reg_setting->reg_data); + rc = cam_spi_write(client, + reg_setting->reg_addr, reg_setting->reg_data, + write_setting->addr_type, write_setting->data_type); + if (rc < 0) + break; + reg_setting++; + } + if (write_setting->delay > 20) + msleep(write_setting->delay); + else if (write_setting->delay) + usleep_range(write_setting->delay * 1000, + (write_setting->delay + * 1000) + 1000); + return rc; +} + +int cam_spi_erase(struct camera_io_master *client, uint32_t addr, + enum camera_sensor_i2c_type addr_type, uint32_t size) +{ + struct cam_camera_spi_inst *se = &client->spi_client->cmd_tbl.erase; + int rc = 0; + uint32_t cur; + uint32_t end = addr + size; + uint32_t erase_size = client->spi_client->erase_size; + + end = addr + size; + for (cur = rounddown(addr, erase_size); cur < end; cur += erase_size) { + CAM_ERR(CAM_SENSOR_IO, "%s: erasing 0x%x size: %d\n", + __func__, cur, erase_size); + rc = cam_spi_write_enable(client, addr_type); + if (rc < 0) + return rc; + rc = cam_spi_tx_helper(client, se, cur, NULL, addr_type, 0, + NULL, NULL); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "%s: erase failed\n", __func__); + return rc; + } + rc = cam_spi_wait(client, se, addr_type); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_IO, "%s: erase timedout\n", __func__); + return rc; + } + } + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h new file mode 100644 index 0000000000..73d7ea9456 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_SENSOR_SPI_H_ +#define _CAM_SENSOR_SPI_H_ + +#include +#include +#include +#include "cam_sensor_i2c.h" + +#define MAX_SPI_SIZE 110 +#define SPI_DYNAMIC_ALLOC + +struct cam_camera_spi_inst { + uint8_t opcode; + uint8_t addr_len; + uint8_t dummy_len; + uint8_t delay_intv; + uint8_t delay_count; +}; + +struct cam_spi_write_burst_data { + u8 data_msb; + u8 data_lsb; +}; + +struct cam_spi_write_burst_packet { + u8 cmd; + u8 addr_msb; + u8 addr_lsb; + struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE]; +}; + +struct cam_camera_burst_info { + uint32_t burst_addr; + uint32_t burst_start; + uint32_t burst_len; + uint32_t chunk_size; +}; + +struct cam_camera_spi_inst_tbl { + struct cam_camera_spi_inst read; + struct cam_camera_spi_inst read_seq; + struct cam_camera_spi_inst query_id; + struct cam_camera_spi_inst page_program; + struct cam_camera_spi_inst write_enable; + struct cam_camera_spi_inst read_status; + struct cam_camera_spi_inst erase; +}; + +struct cam_sensor_spi_client { + struct spi_device *spi_master; + struct cam_camera_spi_inst_tbl cmd_tbl; + uint8_t device_id0; + uint8_t device_id1; + uint8_t mfr_id0; + uint8_t mfr_id1; + uint8_t retry_delay; + uint8_t retries; + uint8_t busy_mask; + uint16_t page_size; + uint32_t erase_size; +}; +static __always_inline +uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst) +{ + return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len; +} + +int cam_spi_read(struct camera_io_master *client, + uint32_t addr, uint32_t *data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type); + +int cam_spi_read_seq(struct camera_io_master *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, + int32_t num_bytes); + +int cam_spi_query_id(struct camera_io_master *client, + uint32_t addr, + enum camera_sensor_i2c_type addr_type, + uint8_t *data, uint32_t num_byte); + +int cam_spi_write(struct camera_io_master *client, + uint32_t addr, uint32_t data, + enum camera_sensor_i2c_type addr_type, + enum camera_sensor_i2c_type data_type); + +int cam_spi_write_table(struct camera_io_master *client, + struct cam_sensor_i2c_reg_setting *write_setting); + +int cam_spi_erase(struct camera_io_master *client, + uint32_t addr, enum camera_sensor_i2c_type addr_type, + uint32_t size); + +int cam_spi_write_seq(struct camera_io_master *client, + uint32_t addr, uint8_t *data, + enum camera_sensor_i2c_type addr_type, uint32_t num_byte); +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_module_debug.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_module_debug.c new file mode 100644 index 0000000000..b921f37d18 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_module_debug.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_debug_util.h" +#include "cam_sensor_cmn_header.h" +#include "cam_sensor_io.h" +#include "cam_sensor_i2c.h" +#include "cam_actuator_dev.h" +#include "cam_eeprom_dev.h" +#include "cam_flash_dev.h" +#include "cam_ois_dev.h" +#include "cam_sensor_dev.h" + +#define RW_BUFFER_SIZE 3000 +#define LINE_BUFFER_SIZE 150 +#define NUM_OF_READ_PARAMS 5 +#define NUM_OF_WRITE_PARAMS 7 +#define USAGE_STRING "Read format: r/R, reg_addr(hex), addr_type, "\ + "data_type, device_type, instance_id (ID pair)\n"\ + "Write format: w/W, reg_addr(hex), reg_value(hex), "\ + "delay, addr_type, data_type, device_type, "\ + "instance_id (ID pair)\n" + +struct cam_sensor_i2c_devices { + struct cam_actuator_ctrl_t *actuator[MAX_CAMERAS]; + struct cam_eeprom_ctrl_t *eeprom[MAX_CAMERAS]; + struct cam_flash_ctrl *flash[MAX_CAMERAS]; + struct cam_ois_ctrl_t *ois[MAX_CAMERAS]; + struct cam_sensor_ctrl_t *sensor[MAX_CAMERAS]; + int num_actuator; + int num_eeprom; + int num_flash; + int num_ois; + int num_sensor; +}; + +static struct dentry *debugfs_root; +static struct cam_sensor_i2c_devices devices = {0}; + +static char *display_buf; + +struct camera_io_master *cam_sensor_module_get_io_master( + int device_type, int instance_number, bool *is_on) +{ + struct camera_io_master *io_master = NULL; + *is_on = false; + + switch (device_type) { + case CAM_SENSOR_ACTUATOR: + io_master = instance_number >= devices.num_actuator ? NULL : + &devices.actuator[instance_number]->io_master_info; + if (io_master) + *is_on = devices.actuator[instance_number]->cam_act_state == + CAM_ACTUATOR_CONFIG ? true : false; + break; + case CAM_SENSOR_EEPROM: + io_master = instance_number >= devices.num_eeprom ? NULL : + &devices.eeprom[instance_number]->io_master_info; + if (io_master) + *is_on = devices.eeprom[instance_number]->cam_eeprom_state == + CAM_EEPROM_CONFIG ? true : false; + break; + case CAM_SENSOR_FLASH: + io_master = instance_number >= devices.num_flash ? NULL : + &devices.flash[instance_number]->io_master_info; + if (io_master) + *is_on = devices.flash[instance_number]->flash_state >= + CAM_FLASH_STATE_CONFIG ? true : false; + break; + case CAM_SENSOR_OIS: + io_master = instance_number >= devices.num_ois ? NULL : + &devices.ois[instance_number]->io_master_info; + if (io_master) + *is_on = devices.ois[instance_number]->cam_ois_state >= + CAM_OIS_CONFIG ? true : false; + break; + case CAM_SENSOR_DEVICE: + io_master = instance_number >= devices.num_sensor ? NULL : + &devices.sensor[instance_number]->io_master_info; + if (io_master) + *is_on = devices.sensor[instance_number]->sensor_state >= + CAM_SENSOR_ACQUIRE ? true : false; + break; + default: + CAM_WARN(CAM_SENSOR, "Unrecognized sensor device type: %d", device_type); + break; + } + + return io_master; +} + +static int cam_sensor_module_parse_line(const char *p_line, + struct cam_sensor_i2c_reg_setting *reg_list, + struct camera_io_master **io_master, bool *is_read, + bool *power_state) +{ + int device_type, instance_number, rc; + struct cam_sensor_i2c_reg_array *reg_array = reg_list->reg_setting; + + if (!strlen(p_line)) + return -EINVAL; + + CAM_DBG(CAM_SENSOR, "Sensor debugfs string: %s", p_line); + + if (p_line[0] == 'r' || p_line[0] == 'R') { + *is_read = true; + + rc = sscanf(p_line+2, "%x,%d,%d,%d,%d", + ®_array->reg_addr, ®_list->addr_type, ®_list->data_type, + &device_type, &instance_number); + if (rc == NUM_OF_READ_PARAMS) { + *io_master = cam_sensor_module_get_io_master( + device_type, instance_number, power_state); + return 0; + } + } else if (p_line[0] == 'w' || p_line[0] == 'W') { + *is_read = false; + + rc = sscanf(p_line+2, "%x,%x,%d,%d,%d,%d,%d", + ®_array->reg_addr, ®_array->reg_data, ®_array->delay, + ®_list->addr_type, ®_list->data_type, + &device_type, &instance_number); + if (rc == NUM_OF_WRITE_PARAMS) { + *io_master = cam_sensor_module_get_io_master( + device_type, instance_number, power_state); + return 0; + } + } + + return -EINVAL; +} + +static void cam_sensor_get_device_status(int offset) +{ + int i; + char line_buffer[LINE_BUFFER_SIZE], *start; + + if (display_buf) + start = display_buf + offset; + else + return; + + for (i = 0; i < devices.num_actuator; i++) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Device: %s, \t %u, state: %d, ID(device_type, instance_number): %d, %d\n", + devices.actuator[i]->device_name, devices.actuator[i]->soc_info.index, + devices.actuator[i]->cam_act_state, CAM_SENSOR_ACTUATOR, i); + strlcat(start, line_buffer, RW_BUFFER_SIZE - offset); + } + memset(line_buffer, '\0', LINE_BUFFER_SIZE); + for (i = 0; i < devices.num_eeprom; i++) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Device: %s, \t\t %u, state: %d, ID(device_type, instance_number): %d, %d\n", + devices.eeprom[i]->device_name, devices.eeprom[i]->soc_info.index, + devices.eeprom[i]->cam_eeprom_state, CAM_SENSOR_EEPROM, i); + strlcat(start, line_buffer, RW_BUFFER_SIZE - offset); + } + memset(line_buffer, '\0', LINE_BUFFER_SIZE); + for (i = 0; i < devices.num_flash; i++) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Device: %s, \t\t %u, state: %d, ID(device_type, instance_number): %d, %d\n", + devices.flash[i]->device_name, devices.flash[i]->soc_info.index, + devices.flash[i]->flash_state, CAM_SENSOR_FLASH, i); + strlcat(start, line_buffer, RW_BUFFER_SIZE - offset); + } + memset(line_buffer, '\0', LINE_BUFFER_SIZE); + for (i = 0; i < devices.num_ois; i++) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Device: %s, \t\t %u, state: %d, ID(device_type, instance_number): %d, %d\n", + devices.ois[i]->device_name, devices.ois[i]->soc_info.index, + devices.ois[i]->cam_ois_state, CAM_SENSOR_OIS, i); + strlcat(start, line_buffer, RW_BUFFER_SIZE - offset); + } + memset(line_buffer, '\0', LINE_BUFFER_SIZE); + for (i = 0; i < devices.num_sensor; i++) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Device: %s, \t\t %u, state: %d, ID(device_type, instance_number): %d, %d\n", + devices.sensor[i]->sensor_name, devices.sensor[i]->soc_info.index, + devices.sensor[i]->sensor_state, CAM_SENSOR_DEVICE, i); + strlcat(start, line_buffer, RW_BUFFER_SIZE - offset); + } + memset(line_buffer, '\0', LINE_BUFFER_SIZE); +} + +static ssize_t i2c_read(struct file *t_file, char __user *t_char, + size_t t_size_t, loff_t *t_loff_t) +{ + ssize_t count = 0; + + if (display_buf) { + count = simple_read_from_buffer(t_char, t_size_t, + t_loff_t, display_buf, RW_BUFFER_SIZE); + memset(display_buf, '\0', RW_BUFFER_SIZE); + } + + return count; +} + +static ssize_t i2c_write(struct file *t_file, const char __user *t_char, + size_t t_size_t, loff_t *t_loff_t) +{ + ssize_t bytes_written = 0, rc = 0; + struct cam_sensor_i2c_reg_setting read_write; + struct cam_sensor_i2c_reg_array reg_array; + struct camera_io_master *io_master = NULL; + char *line_buffer = NULL; + char *input_buf = NULL; + bool is_read, power_state; + + if (!display_buf) + return -EINVAL; + + line_buffer = CAM_MEM_ZALLOC(sizeof(char) * LINE_BUFFER_SIZE, GFP_KERNEL); + if (!line_buffer) + return -ENOMEM; + input_buf = CAM_MEM_ZALLOC(sizeof(char) * RW_BUFFER_SIZE, GFP_KERNEL); + if (!input_buf) { + CAM_MEM_FREE(line_buffer); + return -ENOMEM; + } + + memset(display_buf, '\0', RW_BUFFER_SIZE); + read_write.reg_setting = ®_array; + + bytes_written = simple_write_to_buffer(input_buf, RW_BUFFER_SIZE - 1, + t_loff_t, t_char, t_size_t); + + /* Turn it into a C string */ + input_buf[bytes_written + 1] = '\0'; + + rc = cam_sensor_module_parse_line(input_buf, &read_write, + &io_master, &is_read, &power_state); + + if (!rc) { + if (!power_state) { + snprintf(line_buffer, LINE_BUFFER_SIZE, "Dev not on"); + goto end; + } + if (!is_read) { + read_write.size = 1; + rc = camera_io_dev_write(io_master, &read_write); + if (rc) { + snprintf(line_buffer, LINE_BUFFER_SIZE, + "Error: 0x%X, 0x%X, rc: %zu\n", + read_write.reg_setting->reg_addr, + read_write.reg_setting->reg_data, rc); + strlcat(display_buf, line_buffer, RW_BUFFER_SIZE); + } + } else { + rc = camera_io_dev_read(io_master, + read_write.reg_setting->reg_addr, + &read_write.reg_setting->reg_data, read_write.addr_type, + read_write.data_type, false); + if (!rc) + snprintf(line_buffer, LINE_BUFFER_SIZE, "Read data: 0x%X\n", + read_write.reg_setting->reg_data); + else + snprintf(line_buffer, LINE_BUFFER_SIZE, "Error, rc: %zu\n", rc); + strlcat(display_buf, line_buffer, RW_BUFFER_SIZE); + } + } else { + strscpy(display_buf, USAGE_STRING, RW_BUFFER_SIZE); + cam_sensor_get_device_status(strlen(USAGE_STRING)); + } + +end: + CAM_MEM_FREE(line_buffer); + CAM_MEM_FREE(input_buf); + return bytes_written; +} + +const struct file_operations i2c_operations = { + .open = simple_open, + .read = i2c_read, + .write = i2c_write, +}; + +void cam_sensor_module_add_i2c_device(void *ctrl_struct, int device_type) +{ + if (!cam_debugfs_available()) + return; + + CAM_INFO(CAM_SENSOR, "Adding device type: %d", device_type); + + switch (device_type) { + case CAM_SENSOR_ACTUATOR: + devices.actuator[devices.num_actuator++] = + (struct cam_actuator_ctrl_t *) ctrl_struct; + break; + case CAM_SENSOR_EEPROM: + devices.eeprom[devices.num_eeprom++] = + (struct cam_eeprom_ctrl_t *) ctrl_struct; + break; + case CAM_SENSOR_FLASH: + devices.flash[devices.num_flash++] = + (struct cam_flash_ctrl *) ctrl_struct; + break; + case CAM_SENSOR_OIS: + devices.ois[devices.num_ois++] = + (struct cam_ois_ctrl_t *) ctrl_struct; + break; + case CAM_SENSOR_DEVICE: + devices.sensor[devices.num_sensor++] = + (struct cam_sensor_ctrl_t *) ctrl_struct; + break; + default: + CAM_WARN(CAM_SENSOR, "Unrecognized sensor device type: %d", device_type); + break; + } +} + +int cam_sensor_module_debug_register(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + display_buf = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("i2c", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_MEM, "DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + + debugfs_root = dbgfileptr; + debugfs_create_file("i2c-rw", 0644, debugfs_root, + NULL, &i2c_operations); + + display_buf = CAM_MEM_ZALLOC(sizeof(char) * RW_BUFFER_SIZE, GFP_KERNEL); + +end: + return rc; +} + +void cam_sensor_module_debug_deregister(void) +{ + debugfs_root = NULL; + if (display_buf) + CAM_MEM_FREE(display_buf); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h new file mode 100644 index 0000000000..b351c6d90c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h @@ -0,0 +1,390 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_CMN_HEADER_ +#define _CAM_SENSOR_CMN_HEADER_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define MAX_POWER_CONFIG 12 +#define MAX_PER_FRAME_ARRAY 32 +#define BATCH_SIZE_MAX 16 +#define CAM_I3C_DEV_PROBE_TIMEOUT_MS 10 +#define CAM_I3C_DEV_PROBE_TIMEOUT_US (CAM_I3C_DEV_PROBE_TIMEOUT_MS * 1000) +#define I3C_SENSOR_DEV_ID_DT_PATH "/soc/qcom,cam-i3c-id-table" +#define MAX_I3C_DEVICE_ID_ENTRIES (MAX_CAMERAS * 2) + +#define CAM_SENSOR_NAME "cam-sensor" +#define CAM_ACTUATOR_NAME "cam-actuator" +#define CAM_CSIPHY_NAME "cam-csiphy" +#define CAM_FLASH_NAME "cam-flash" +#define CAM_EEPROM_NAME "cam-eeprom" +#define CAM_OIS_NAME "cam-ois" +#define CAM_TPG_NAME "cam-tpg" + +#define MAX_SYSTEM_PIPELINE_DELAY 2 + +#define CAM_PKT_NOP_OPCODE 127 + +enum camera_flash_opcode { + CAMERA_SENSOR_FLASH_OP_INVALID, + CAMERA_SENSOR_FLASH_OP_OFF, + CAMERA_SENSOR_FLASH_OP_FIRELOW, + CAMERA_SENSOR_FLASH_OP_FIREHIGH, + CAMERA_SENSOR_FLASH_OP_FIREDURATION, + CAMERA_SENSOR_FLASH_OP_MAX, +}; + +enum camera_sensor_i2c_type { + CAMERA_SENSOR_I2C_TYPE_INVALID, + CAMERA_SENSOR_I2C_TYPE_BYTE, + CAMERA_SENSOR_I2C_TYPE_WORD, + CAMERA_SENSOR_I2C_TYPE_3B, + CAMERA_SENSOR_I2C_TYPE_DWORD, + CAMERA_SENSOR_I2C_TYPE_MAX, +}; + +enum i2c_freq_mode { + I2C_STANDARD_MODE, + I2C_FAST_MODE, + I2C_CUSTOM_MODE, + I2C_FAST_PLUS_MODE, + I2C_MAX_MODES, +}; + +enum position_roll { + ROLL_0 = 0, + ROLL_90 = 90, + ROLL_180 = 180, + ROLL_270 = 270, + ROLL_INVALID = 360, +}; + +enum position_yaw { + FRONT_CAMERA_YAW = 0, + REAR_CAMERA_YAW = 180, + INVALID_YAW = 360, +}; + +enum position_pitch { + LEVEL_PITCH = 0, + INVALID_PITCH = 360, +}; + +enum sensor_sub_module { + SUB_MODULE_SENSOR, + SUB_MODULE_ACTUATOR, + SUB_MODULE_EEPROM, + SUB_MODULE_LED_FLASH, + SUB_MODULE_CSID, + SUB_MODULE_CSIPHY, + SUB_MODULE_OIS, + SUB_MODULE_EXT, + SUB_MODULE_MAX, +}; + +enum msm_camera_power_seq_type { + SENSOR_MCLK, + SENSOR_VANA, + SENSOR_VDIG, + SENSOR_VIO, + SENSOR_VAF, + SENSOR_VAF_PWDM, + SENSOR_CUSTOM_REG1, + SENSOR_CUSTOM_REG2, + SENSOR_RESET, + SENSOR_STANDBY, + SENSOR_CUSTOM_GPIO1, + SENSOR_CUSTOM_GPIO2, + SENSOR_VANA1, + SENSOR_SEQ_TYPE_MAX, +}; + +enum msm_bus_perf_setting { + S_INIT, + S_PREVIEW, + S_VIDEO, + S_CAPTURE, + S_ZSL, + S_STEREO_VIDEO, + S_STEREO_CAPTURE, + S_DEFAULT, + S_LIVESHOT, + S_DUAL, + S_EXIT +}; + +enum msm_camera_device_type_t { + MSM_CAMERA_I2C_DEVICE, + MSM_CAMERA_PLATFORM_DEVICE, + MSM_CAMERA_SPI_DEVICE, +}; + +enum cam_flash_device_type { + CAMERA_FLASH_DEVICE_TYPE_PMIC = 0, + CAMERA_FLASH_DEVICE_TYPE_I2C, + CAMERA_FLASH_DEVICE_TYPE_GPIO, +}; + +enum cci_i2c_master_t { + MASTER_0 = CCI_MASTER_0, + MASTER_1 = CCI_MASTER_1, + MASTER_MAX = CCI_MASTER_MAX, +}; + +enum cci_device_num { + CCI_DEVICE_0, + CCI_DEVICE_1, + CCI_DEVICE_MAX, +}; + +enum camera_vreg_type { + VREG_TYPE_DEFAULT, + VREG_TYPE_CUSTOM, +}; + +enum cam_sensor_i2c_cmd_type { + CAM_SENSOR_I2C_WRITE_RANDOM, + CAM_SENSOR_I2C_WRITE_BURST, + CAM_SENSOR_I2C_WRITE_SEQ, + CAM_SENSOR_I2C_READ_RANDOM, + CAM_SENSOR_I2C_READ_SEQ, + CAM_SENSOR_I2C_POLL +}; + +struct common_header { + uint32_t first_word; + uint8_t fifth_byte; + uint8_t cmd_type; + uint16_t reserved; +}; + +struct camera_vreg_t { + const char *reg_name; + int min_voltage; + int max_voltage; + int op_mode; + uint32_t delay; + const char *custom_vreg_name; + enum camera_vreg_type type; +}; + +struct msm_camera_gpio_num_info { + uint16_t gpio_num[SENSOR_SEQ_TYPE_MAX]; + uint8_t valid[SENSOR_SEQ_TYPE_MAX]; +}; + +struct msm_cam_clk_info { + const char *clk_name; + long clk_rate; + uint32_t delay; +}; + +struct msm_pinctrl_info { + struct pinctrl *pinctrl; + struct pinctrl_state *gpio_state_active; + struct pinctrl_state *gpio_state_suspend; + bool use_pinctrl; +}; + +struct cam_sensor_i2c_reg_array { + uint32_t reg_addr; + uint32_t reg_data; + uint32_t delay; + uint32_t data_mask; +}; + +enum cam_sensor_module_debugfs_device_type { + CAM_SENSOR_ACTUATOR, + CAM_SENSOR_EEPROM, + CAM_SENSOR_FLASH, + CAM_SENSOR_OIS, + CAM_SENSOR_DEVICE, + CAM_SENSOR_MAX, +}; + +struct cam_sensor_i2c_reg_setting { + struct cam_sensor_i2c_reg_array *reg_setting; + uint32_t size; + enum camera_sensor_i2c_type addr_type; + enum camera_sensor_i2c_type data_type; + unsigned short delay; + uint8_t *read_buff; + uint32_t read_buff_len; +}; + +struct cam_sensor_i2c_seq_reg { + uint32_t reg_addr; + uint8_t *reg_data; + uint32_t size; + enum camera_sensor_i2c_type addr_type; +}; + +struct i2c_settings_list { + struct cam_sensor_i2c_reg_setting i2c_settings; + struct cam_sensor_i2c_seq_reg seq_settings; + enum cam_sensor_i2c_cmd_type op_code; + struct list_head list; +}; + +struct i2c_settings_array { + struct list_head list_head; + int32_t is_settings_valid; + int64_t request_id; + struct timespec64 applied_timestamp; +}; + +struct i2c_data_settings { + struct i2c_settings_array init_settings; + struct i2c_settings_array config_settings; + struct i2c_settings_array streamon_settings; + struct i2c_settings_array streamoff_settings; + struct i2c_settings_array read_settings; + struct i2c_settings_array *per_frame; + struct i2c_settings_array *deferred_frame_update; + struct i2c_settings_array *frame_skip; + struct i2c_settings_array *bubble_update; + struct i2c_settings_array reg_bank_unlock_settings; + struct i2c_settings_array reg_bank_lock_settings; +}; + +struct cam_sensor_power_ctrl_t { + struct device *dev; + struct cam_sensor_power_setting *power_setting; + uint16_t power_setting_size; + struct cam_sensor_power_setting *power_down_setting; + uint16_t power_down_setting_size; + struct msm_camera_gpio_num_info *gpio_num_info; + struct msm_pinctrl_info pinctrl_info; + uint8_t cam_pinctrl_status; +}; + +struct cam_camera_slave_info { + uint32_t sensor_slave_addr; + uint32_t sensor_id_reg_addr; + uint32_t sensor_id; + uint32_t sensor_id_mask; +}; + +struct msm_sensor_init_params { + int modes_supported; + unsigned int sensor_mount_angle; +}; + +enum msm_sensor_camera_id_t { + CAMERA_0, + CAMERA_1, + CAMERA_2, + CAMERA_3, + CAMERA_4, + CAMERA_5, + CAMERA_6, + CAMERA_7, + CAMERA_8, + CAMERA_9, + CAMERA_10, + CAMERA_11, + CAMERA_12, + CAMERA_13, + CAMERA_14, + CAMERA_15, + MAX_CAMERAS, +}; + +struct msm_sensor_id_info_t { + unsigned short sensor_id_reg_addr; + unsigned short sensor_id; + unsigned short sensor_id_mask; +}; + +enum msm_sensor_output_format_t { + MSM_SENSOR_BAYER, + MSM_SENSOR_YCBCR, + MSM_SENSOR_META, +}; + +struct cam_sensor_power_setting { + enum msm_camera_power_seq_type seq_type; + unsigned short seq_val; + long config_val; + unsigned short delay; + void *data[10]; + bool valid_config; +}; + +struct cam_sensor_board_info { + struct cam_camera_slave_info slave_info; + int32_t sensor_mount_angle; + int32_t secure_mode; + int modes_supported; + int32_t pos_roll; + int32_t pos_yaw; + int32_t pos_pitch; + int32_t subdev_id[SUB_MODULE_MAX]; + int32_t subdev_intf[SUB_MODULE_MAX]; + const char *misc_regulator; + struct cam_sensor_power_ctrl_t power_info; +}; + +enum msm_camera_vreg_name_t { + CAM_VDIG, + CAM_VIO, + CAM_VANA, + CAM_VAF, + CAM_V_CUSTOM1, + CAM_V_CUSTOM2, + CAM_VREG_MAX, +}; + +struct msm_camera_gpio_conf { + void *cam_gpiomux_conf_tbl; + uint8_t cam_gpiomux_conf_tbl_size; + struct gpio *cam_gpio_common_tbl; + uint8_t cam_gpio_common_tbl_size; + struct gpio *cam_gpio_req_tbl; + uint8_t cam_gpio_req_tbl_size; + uint32_t gpio_no_mux; + uint32_t *camera_off_table; + uint8_t camera_off_table_size; + uint32_t *camera_on_table; + uint8_t camera_on_table_size; + struct msm_camera_gpio_num_info *gpio_num_info; +}; + +/** + * cam_sensor_module_add_i2c_device() + * @brief: Each sensor device passes its ctrl struct to a global + * structure of i2c devices in cam_sensor_module_debug.c + * @ctrl_struct: Passed as void pointer, and recast to actual struct + * based on device_type + * @device_type: Device type based on cam_sensor_module_debugfs_device_type + */ +void cam_sensor_module_add_i2c_device(void *ctrl_struct, int device_type); + +/** + * cam_sensor_module_debug_register() + * @brief Creates debugfs entry + */ +int cam_sensor_module_debug_register(void); + +/** + * cam_sensor_module_debug_deregister(void) + * @brief Take down debugfs entry + */ +void cam_sensor_module_debug_deregister(void); + +#endif /* _CAM_SENSOR_CMN_HEADER_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c new file mode 100644 index 0000000000..903b51913c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c @@ -0,0 +1,2697 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_sensor_util.h" +#include "cam_mem_mgr.h" +#include "cam_res_mgr_api.h" +#include "cam_mem_mgr_api.h" + +#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend" +#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default" + +#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \ + (config_val >= min) && (config_val <= max)) + +int cam_sensor_count_elems_i3c_device_id(struct device_node *dev, + int *num_entries, char *sensor_id_table_str) +{ + if (!num_entries) { + CAM_ERR(CAM_SENSOR_UTIL, "Num_entries ptr is null"); + return -EINVAL; + } + + if (!dev) { + CAM_ERR(CAM_SENSOR_UTIL, "dev ptr is null"); + return -EINVAL; + } + + if (!sensor_id_table_str) { + CAM_ERR(CAM_SENSOR_UTIL, "sensor_id_table_str ptr is null"); + return -EINVAL; + } + + *num_entries = of_property_count_u32_elems(dev, sensor_id_table_str); + if (*num_entries <= 0) { + CAM_DBG(CAM_SENSOR_UTIL, + "Failed while reading the property for dev: %s num_entries:%d", + dev->full_name, *num_entries); + return -ENOENT; + } + + if (*num_entries >= MAX_I3C_DEVICE_ID_ENTRIES) { + CAM_ERR(CAM_SENSOR_UTIL, + "Num_entries are more than MAX_I3C_DEVICE_ID_ENTRIES for dev: %s", + dev->full_name); + return -ENOMEM; + } + + return 0; +} + +int cam_sensor_fill_i3c_device_id(struct device_node *dev, int num_entries, + char *sensor_id_table_str, struct i3c_device_id *sensor_i3c_device_id) +{ + int i = 0; + uint8_t ent_num = 0; + uint32_t mid; + uint32_t pid; + int rc; + + if (!dev) { + CAM_ERR(CAM_SENSOR_UTIL, "dev ptr is null"); + return -EINVAL; + } + + if (!sensor_id_table_str) { + CAM_ERR(CAM_SENSOR_UTIL, "sensor_id_table_str ptr is null"); + return -EINVAL; + } + + if (!sensor_i3c_device_id) { + CAM_ERR(CAM_SENSOR_UTIL, "sensor_i3c_device_id ptr is null"); + return -EINVAL; + } + + while (i < num_entries) { + if (ent_num >= MAX_I3C_DEVICE_ID_ENTRIES) { + CAM_WARN(CAM_SENSOR_UTIL, + "Num_entries are more than MAX_I3C_DEVICE_ID_ENTRIES for dev: %s", + dev->full_name); + return -ENOMEM; + } + + rc = of_property_read_u32_index(dev, sensor_id_table_str, i, &mid); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed in reading the MID. dev: %s rc: %d", + dev->full_name, rc); + return rc; + } + i++; + + rc = of_property_read_u32_index(dev, sensor_id_table_str, i, &pid); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed in reading the PID. dev: %s rc: %d", + dev->full_name, rc); + return rc; + } + i++; + + CAM_DBG(CAM_SENSOR_UTIL, "dev: %s PID: 0x%x, MID: 0x%x", dev->full_name, pid, mid); + + sensor_i3c_device_id[ent_num].manuf_id = mid; + sensor_i3c_device_id[ent_num].match_flags = I3C_MATCH_MANUF_AND_PART; + sensor_i3c_device_id[ent_num].part_id = pid; + sensor_i3c_device_id[ent_num].data = 0; + + ent_num++; + } + + return 0; +} + +static struct i2c_settings_list* + cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings, + uint32_t size) +{ + struct i2c_settings_list *tmp; + + tmp = CAM_MEM_ZALLOC(sizeof(struct i2c_settings_list), GFP_KERNEL); + + if (tmp != NULL) + list_add_tail(&(tmp->list), + &(i2c_reg_settings->list_head)); + else + return NULL; + + tmp->i2c_settings.reg_setting = (struct cam_sensor_i2c_reg_array *) + vzalloc(size * sizeof(struct cam_sensor_i2c_reg_array)); + if (tmp->i2c_settings.reg_setting == NULL) { + list_del(&(tmp->list)); + CAM_MEM_FREE(tmp); + return NULL; + } + tmp->i2c_settings.size = size; + + return tmp; +} + +int32_t cam_sensor_util_get_current_qtimer_ns(uint64_t *qtime_ns) +{ + uint64_t ticks = 0; + int32_t rc = 0; + + ticks = arch_timer_read_counter(); + if (ticks == 0) { + CAM_ERR(CAM_SENSOR_UTIL, "qtimer returned 0, rc:%d", rc); + return -EINVAL; + } + + if (qtime_ns != NULL) { + *qtime_ns = mul_u64_u32_div(ticks, + QTIMER_MUL_FACTOR, QTIMER_DIV_FACTOR); + CAM_DBG(CAM_SENSOR_UTIL, "Qtimer time: 0x%lx", *qtime_ns); + } else { + CAM_ERR(CAM_SENSOR_UTIL, "NULL pointer passed"); + return -EINVAL; + } + + return rc; +} + +int32_t delete_request(struct i2c_settings_array *i2c_array) +{ + struct i2c_settings_list *i2c_list = NULL, *i2c_next = NULL; + int32_t rc = 0; + + if (i2c_array == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, "FATAL:: Invalid argument"); + return -EINVAL; + } + + list_for_each_entry_safe(i2c_list, i2c_next, + &(i2c_array->list_head), list) { + vfree(i2c_list->i2c_settings.reg_setting); + list_del(&(i2c_list->list)); + CAM_MEM_FREE(i2c_list); + } + INIT_LIST_HEAD(&(i2c_array->list_head)); + i2c_array->is_settings_valid = 0; + + return rc; +} + +int32_t cam_sensor_handle_delay( + uint32_t **cmd_buf, + uint16_t generic_op_code, + struct i2c_settings_array *i2c_reg_settings, + uint32_t offset, uint32_t *byte_cnt, + struct list_head *list_ptr) +{ + int32_t rc = 0; + struct cam_cmd_unconditional_wait *cmd_uncond_wait = + (struct cam_cmd_unconditional_wait *) *cmd_buf; + struct i2c_settings_list *i2c_list = NULL; + + if (list_ptr == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid list ptr"); + return -EINVAL; + } + + if (offset > 0) { + i2c_list = + list_entry(list_ptr, struct i2c_settings_list, list); + if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_HW_UCND) { + int32_t size = i2c_list->i2c_settings.size; + + if (offset >= size) + i2c_list->i2c_settings.reg_setting[offset - size].delay = + cmd_uncond_wait->delay; + else + CAM_WARN(CAM_SENSOR_UTIL, "Setting size is bigger than offset."); + } + else + i2c_list->i2c_settings.delay = cmd_uncond_wait->delay; + (*cmd_buf) += + sizeof( + struct cam_cmd_unconditional_wait) / sizeof(uint32_t); + (*byte_cnt) += + sizeof( + struct cam_cmd_unconditional_wait); + } else { + CAM_ERR(CAM_SENSOR_UTIL, "Delay Rxed Before any buffer: %d", offset); + return -EINVAL; + } + + return rc; +} + +int32_t cam_sensor_handle_poll( + uint32_t **cmd_buf, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *byte_cnt, int32_t *offset, + struct list_head **list_ptr) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0; + struct cam_cmd_conditional_wait *cond_wait + = (struct cam_cmd_conditional_wait *) *cmd_buf; + + i2c_list = + cam_sensor_get_i2c_ptr(i2c_reg_settings, 1); + if (!i2c_list || !i2c_list->i2c_settings.reg_setting) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed in allocating mem for list"); + return -ENOMEM; + } + + i2c_list->op_code = CAM_SENSOR_I2C_POLL; + i2c_list->i2c_settings.data_type = + cond_wait->data_type; + i2c_list->i2c_settings.addr_type = + cond_wait->addr_type; + i2c_list->i2c_settings.reg_setting->reg_addr = + cond_wait->reg_addr; + i2c_list->i2c_settings.reg_setting->reg_data = + cond_wait->reg_data; + i2c_list->i2c_settings.reg_setting->delay = + cond_wait->timeout; + + (*cmd_buf) += sizeof(struct cam_cmd_conditional_wait) / + sizeof(uint32_t); + (*byte_cnt) += sizeof(struct cam_cmd_conditional_wait); + + *offset = 1; + *list_ptr = &(i2c_list->list); + + return rc; +} + +int32_t cam_sensor_handle_random_write( + struct cam_cmd_i2c_random_wr *cam_cmd_i2c_random_wr, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, uint32_t payload_count) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0, cnt; + + i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings, + payload_count); + if (i2c_list == NULL || + i2c_list->i2c_settings.reg_setting == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed in allocating i2c_list"); + return -ENOMEM; + } + + *cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) + + sizeof(struct i2c_random_wr_payload) * + payload_count); + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM; + i2c_list->i2c_settings.addr_type = + cam_cmd_i2c_random_wr->header.addr_type; + i2c_list->i2c_settings.data_type = + cam_cmd_i2c_random_wr->header.data_type; + + for (cnt = 0; cnt < payload_count; cnt++) { + i2c_list->i2c_settings.reg_setting[cnt].reg_addr = + cam_cmd_i2c_random_wr->random_wr_payload_flex[cnt].reg_addr; + i2c_list->i2c_settings.reg_setting[cnt].reg_data = + cam_cmd_i2c_random_wr->random_wr_payload_flex[cnt].reg_data; + i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0; + } + *offset = cnt; + *list = &(i2c_list->list); + + return rc; +} + +int32_t cam_sensor_handle_continuous_write( + struct cam_cmd_i2c_continuous_wr *cam_cmd_i2c_continuous_wr, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, uint32_t payload_count) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0, cnt; + + i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings, + payload_count); + if (i2c_list == NULL || + i2c_list->i2c_settings.reg_setting == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed in allocating i2c_list"); + return -ENOMEM; + } + + *cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) + + sizeof(cam_cmd_i2c_continuous_wr->reg_addr) + + sizeof(struct cam_cmd_read) * + (payload_count)); + if (cam_cmd_i2c_continuous_wr->header.op_code == + CAMERA_SENSOR_I2C_OP_CONT_WR_BRST) + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST; + else if (cam_cmd_i2c_continuous_wr->header.op_code == + CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN) + i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ; + else + return -EINVAL; + + i2c_list->i2c_settings.addr_type = + cam_cmd_i2c_continuous_wr->header.addr_type; + i2c_list->i2c_settings.data_type = + cam_cmd_i2c_continuous_wr->header.data_type; + i2c_list->i2c_settings.size = + payload_count; + + for (cnt = 0; cnt < payload_count; cnt++) { + i2c_list->i2c_settings.reg_setting[cnt].reg_addr = + cam_cmd_i2c_continuous_wr->reg_addr; + i2c_list->i2c_settings.reg_setting[cnt].reg_data = + cam_cmd_i2c_continuous_wr->data_read_flex[cnt].reg_data; + i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0; + } + *offset = cnt; + *list = &(i2c_list->list); + + return rc; +} + +static int32_t cam_sensor_get_io_buffer( + struct cam_buf_io_cfg *io_cfg, + struct cam_sensor_i2c_reg_setting *i2c_settings) +{ + uintptr_t buf_addr = 0x0; + size_t buf_size = 0; + int32_t rc = 0; + + if (io_cfg == NULL || i2c_settings == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid args, io buf or i2c settings is NULL"); + return -EINVAL; + } + + if (io_cfg->direction == CAM_BUF_OUTPUT) { + rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0], + &buf_addr, &buf_size); + if ((rc < 0) || (!buf_addr)) { + CAM_ERR(CAM_SENSOR_UTIL, + "invalid buffer, rc: %d, buf_addr: %pK", + rc, buf_addr); + return -EINVAL; + } + CAM_DBG(CAM_SENSOR_UTIL, + "buf_addr: %pK, buf_size: %zu, offsetsize: %d", + (void *)buf_addr, buf_size, io_cfg->offsets[0]); + if (io_cfg->offsets[0] >= buf_size) { + CAM_ERR(CAM_SENSOR_UTIL, + "invalid size:io_cfg->offsets[0]: %d, buf_size: %d", + io_cfg->offsets[0], buf_size); + cam_mem_put_cpu_buf(io_cfg->mem_handle[0]); + return -EINVAL; + } + i2c_settings->read_buff = + (uint8_t *)buf_addr + io_cfg->offsets[0]; + i2c_settings->read_buff_len = + buf_size - io_cfg->offsets[0]; + } else { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid direction: %d", + io_cfg->direction); + rc = -EINVAL; + } + cam_mem_put_cpu_buf(io_cfg->mem_handle[0]); + return rc; +} + +int32_t cam_sensor_util_write_qtimer_to_io_buffer( + uint64_t qtime_ns, struct cam_buf_io_cfg *io_cfg) +{ + uintptr_t buf_addr = 0x0, target_buf = 0x0; + size_t buf_size = 0, target_size = 0; + int32_t rc = 0; + + if (io_cfg == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid args, io buf is NULL"); + return -EINVAL; + } + + if (io_cfg->direction == CAM_BUF_OUTPUT) { + rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0], + &buf_addr, &buf_size); + if ((rc < 0) || (!buf_addr)) { + CAM_ERR(CAM_SENSOR_UTIL, + "invalid buffer, rc: %d, buf_addr: %pK", + rc, buf_addr); + return -EINVAL; + } + CAM_DBG(CAM_SENSOR_UTIL, + "buf_addr: %pK, buf_size: %zu, offsetsize: %d", + (void *)buf_addr, buf_size, io_cfg->offsets[0]); + if (io_cfg->offsets[0] >= buf_size) { + CAM_ERR(CAM_SENSOR_UTIL, + "invalid size:io_cfg->offsets[0]: %d, buf_size: %d", + io_cfg->offsets[0], buf_size); + cam_mem_put_cpu_buf(io_cfg->mem_handle[0]); + return -EINVAL; + } + + target_buf = buf_addr + io_cfg->offsets[0]; + target_size = buf_size - io_cfg->offsets[0]; + + if (target_size < sizeof(uint64_t)) { + CAM_ERR(CAM_SENSOR_UTIL, + "not enough size for qtimer, target_size:%d", + target_size); + cam_mem_put_cpu_buf(io_cfg->mem_handle[0]); + return -EINVAL; + } + + memcpy((void *)target_buf, &qtime_ns, sizeof(uint64_t)); + cam_mem_put_cpu_buf(io_cfg->mem_handle[0]); + } else { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid direction: %d", + io_cfg->direction); + rc = -EINVAL; + } + + return rc; +} + +int32_t cam_sensor_handle_random_read( + struct cam_cmd_i2c_random_rd *cmd_i2c_random_rd, + struct i2c_settings_array *i2c_reg_settings, + uint16_t *cmd_length_in_bytes, + int32_t *offset, + struct list_head **list, + struct cam_buf_io_cfg *io_cfg, uint32_t payload_count) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0, cnt = 0; + + i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings, + payload_count); + if ((i2c_list == NULL) || + (i2c_list->i2c_settings.reg_setting == NULL)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in allocating i2c_list: %pK", + i2c_list); + return -ENOMEM; + } + + rc = cam_sensor_get_io_buffer(io_cfg, &(i2c_list->i2c_settings)); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed to get read buffer: %d", rc); + } else { + *cmd_length_in_bytes = sizeof(struct i2c_rdwr_header) + + (sizeof(struct cam_cmd_read) * + payload_count); + i2c_list->op_code = CAM_SENSOR_I2C_READ_RANDOM; + i2c_list->i2c_settings.addr_type = + cmd_i2c_random_rd->header.addr_type; + i2c_list->i2c_settings.data_type = + cmd_i2c_random_rd->header.data_type; + i2c_list->i2c_settings.size = + payload_count; + + for (cnt = 0; cnt < payload_count; cnt++) { + i2c_list->i2c_settings.reg_setting[cnt].reg_addr = + cmd_i2c_random_rd->data_read_flex[cnt].reg_data; + } + *offset = cnt; + *list = &(i2c_list->list); + } + + return rc; +} + +int32_t cam_sensor_handle_continuous_read( + struct cam_cmd_i2c_continuous_rd *cmd_i2c_continuous_rd, + struct i2c_settings_array *i2c_reg_settings, + uint16_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, + struct cam_buf_io_cfg *io_cfg) +{ + struct i2c_settings_list *i2c_list; + int32_t rc = 0, cnt = 0; + + i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings, 1); + if ((i2c_list == NULL) || + (i2c_list->i2c_settings.reg_setting == NULL)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in allocating i2c_list: %pK", + i2c_list); + return -ENOMEM; + } + + rc = cam_sensor_get_io_buffer(io_cfg, &(i2c_list->i2c_settings)); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, "Failed to get read buffer: %d", rc); + } else { + *cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd); + i2c_list->op_code = CAM_SENSOR_I2C_READ_SEQ; + + i2c_list->i2c_settings.addr_type = + cmd_i2c_continuous_rd->header.addr_type; + i2c_list->i2c_settings.data_type = + cmd_i2c_continuous_rd->header.data_type; + i2c_list->i2c_settings.size = + cmd_i2c_continuous_rd->header.count; + i2c_list->i2c_settings.reg_setting[0].reg_addr = + cmd_i2c_continuous_rd->reg_addr; + + *offset = cnt; + *list = &(i2c_list->list); + } + + return rc; +} + +static int cam_sensor_handle_slave_info( + struct camera_io_master *io_master, + uint32_t *cmd_buf) +{ + int rc = 0; + struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + + if (io_master == NULL || cmd_buf == NULL) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid args"); + return -EINVAL; + } + + switch (io_master->master_type) { + case CCI_MASTER: + io_master->cci_client->sid = (i2c_info->slave_addr >> 1); + io_master->cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode; + break; + + case I2C_MASTER: + io_master->qup_client->i2c_client->addr = i2c_info->slave_addr; + break; + + case SPI_MASTER: + break; + + default: + CAM_ERR(CAM_SENSOR_UTIL, "Invalid master type: %d", + io_master->master_type); + rc = -EINVAL; + break; + } + + return rc; +} + +/** + * Name : cam_sensor_i2c_command_parser + * Description : Parse CSL CCI packet and apply register settings + * Parameters : io_master input master information + * i2c_reg_settings output register settings to fill + * cmd_desc input command description + * num_cmd_buffers input number of command buffers to process + * io_cfg input buffer details for read operation only + * Description : + * Handle multiple I2C RD/WR and WAIT cmd formats in one command + * buffer, for example, a command buffer of m x RND_WR + 1 x HW_ + * WAIT + n x RND_WR with num_cmd_buf = 1. Do not exepect RD/WR + * with different cmd_type and op_code in one command buffer. + */ +int cam_sensor_i2c_command_parser( + struct camera_io_master *io_master, + struct i2c_settings_array *i2c_reg_settings, + struct cam_cmd_buf_desc *cmd_desc, + int32_t num_cmd_buffers, + struct cam_buf_io_cfg *io_cfg) +{ + int16_t rc = 0, i = 0; + size_t len_of_buff = 0; + uintptr_t generic_ptr; + uint16_t cmd_length_in_bytes = 0; + size_t remain_len = 0; + size_t tot_size = 0; + + for (i = 0; i < num_cmd_buffers; i++) { + uint32_t *cmd_buf = NULL; + struct common_header *cmm_hdr; + uint16_t generic_op_code; + uint32_t byte_cnt = 0; + uint32_t j = 0; + struct list_head *list = NULL; + uint32_t payload_count = 0; + /* + * It is not expected the same settings to + * be spread across multiple cmd buffers + */ + CAM_DBG(CAM_SENSOR_UTIL, "Total cmd Buf in Bytes: %d", + cmd_desc[i].length); + + if (!cmd_desc[i].length) + continue; + + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + &generic_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "cmd hdl failed:%d, Err: %d, Buffer_len: %zd", + cmd_desc[i].mem_handle, rc, len_of_buff); + return rc; + } + + remain_len = len_of_buff; + if ((len_of_buff < sizeof(struct common_header)) || + (cmd_desc[i].offset > + (len_of_buff - sizeof(struct common_header)))) { + CAM_ERR(CAM_SENSOR_UTIL, "buffer provided too small"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return -EINVAL; + } + cmd_buf = (uint32_t *)generic_ptr; + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + + remain_len -= cmd_desc[i].offset; + if (remain_len < cmd_desc[i].length) { + CAM_ERR(CAM_SENSOR_UTIL, "buffer provided too small"); + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return -EINVAL; + } + + while (byte_cnt < cmd_desc[i].length) { + if ((remain_len - byte_cnt) < + sizeof(struct common_header)) { + CAM_ERR(CAM_SENSOR_UTIL, "Not enough buffer"); + rc = -EINVAL; + goto end; + } + cmm_hdr = (struct common_header *)cmd_buf; + generic_op_code = cmm_hdr->fifth_byte; + switch (cmm_hdr->cmd_type) { + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: { + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_random_wr + *cam_cmd_i2c_random_wr = + (struct cam_cmd_i2c_random_wr *)cmd_buf; + payload_count = cam_cmd_i2c_random_wr->header.count; + + if ((remain_len - byte_cnt) < + sizeof(struct cam_cmd_i2c_random_wr)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided"); + rc = -EINVAL; + goto end; + } + tot_size = sizeof(struct i2c_rdwr_header) + + (sizeof(struct i2c_random_wr_payload) * + payload_count); + + if (tot_size > (remain_len - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided"); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_random_write( + cam_cmd_i2c_random_wr, + i2c_reg_settings, + &cmd_length_in_bytes, &j, &list, payload_count); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in random write %d", rc); + rc = -EINVAL; + goto end; + } + + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + byte_cnt += cmd_length_in_bytes; + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR: { + uint32_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_continuous_wr + *cam_cmd_i2c_continuous_wr = + (struct cam_cmd_i2c_continuous_wr *) + cmd_buf; + payload_count = cam_cmd_i2c_continuous_wr->header.count; + + if ((remain_len - byte_cnt) < + sizeof(struct cam_cmd_i2c_continuous_wr)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided"); + rc = -EINVAL; + goto end; + } + + tot_size = sizeof(struct i2c_rdwr_header) + + sizeof(cam_cmd_i2c_continuous_wr->reg_addr) + + (sizeof(struct cam_cmd_read) * + payload_count); + + if (tot_size > (remain_len - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided"); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_continuous_write( + cam_cmd_i2c_continuous_wr, + i2c_reg_settings, + &cmd_length_in_bytes, &j, &list, payload_count); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in continuous write %d", rc); + goto end; + } + + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + byte_cnt += cmd_length_in_bytes; + break; + } + case CAMERA_SENSOR_CMD_TYPE_WAIT: { + if ((((generic_op_code == CAMERA_SENSOR_WAIT_OP_HW_UCND) || + (generic_op_code == CAMERA_SENSOR_WAIT_OP_SW_UCND)) && + ((remain_len - byte_cnt) < + sizeof(struct cam_cmd_unconditional_wait))) || + ((generic_op_code == CAMERA_SENSOR_WAIT_OP_COND) && + ((remain_len - byte_cnt) < + sizeof(struct cam_cmd_conditional_wait)))) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer space"); + rc = -EINVAL; + goto end; + } + + if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_HW_UCND || + generic_op_code == + CAMERA_SENSOR_WAIT_OP_SW_UCND) { + rc = cam_sensor_handle_delay( + &cmd_buf, generic_op_code, + i2c_reg_settings, j, &byte_cnt, + list); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "delay hdl failed: %d", + rc); + goto end; + } + + } else if (generic_op_code == + CAMERA_SENSOR_WAIT_OP_COND) { + rc = cam_sensor_handle_poll( + &cmd_buf, i2c_reg_settings, + &byte_cnt, &j, &list); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Random read fail: %d", + rc); + goto end; + } + } else { + CAM_ERR(CAM_SENSOR_UTIL, + "Wrong Wait Command: %d", + generic_op_code); + rc = -EINVAL; + goto end; + } + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: { + if (remain_len - byte_cnt < + sizeof(struct cam_cmd_i2c_info)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer space"); + rc = -EINVAL; + goto end; + } + rc = cam_sensor_handle_slave_info( + io_master, cmd_buf); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, + "Handle slave info failed with rc: %d", + rc); + goto end; + } + cmd_length_in_bytes = + sizeof(struct cam_cmd_i2c_info); + cmd_buf += + cmd_length_in_bytes / sizeof(uint32_t); + byte_cnt += cmd_length_in_bytes; + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD: { + uint16_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_random_rd *i2c_random_rd = + (struct cam_cmd_i2c_random_rd *)cmd_buf; + payload_count = i2c_random_rd->header.count; + + if (remain_len - byte_cnt < + sizeof(struct cam_cmd_i2c_random_rd)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer space"); + rc = -EINVAL; + goto end; + } + + tot_size = sizeof(struct i2c_rdwr_header) + + (sizeof(struct cam_cmd_read) * + payload_count); + + if (tot_size > (remain_len - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided %d, %d, %d", + tot_size, remain_len, byte_cnt); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_random_read( + i2c_random_rd, + i2c_reg_settings, + &cmd_length_in_bytes, &j, &list, + io_cfg, payload_count); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in random read %d", rc); + goto end; + } + + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + byte_cnt += cmd_length_in_bytes; + break; + } + case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD: { + uint16_t cmd_length_in_bytes = 0; + struct cam_cmd_i2c_continuous_rd + *i2c_continuous_rd = + (struct cam_cmd_i2c_continuous_rd *)cmd_buf; + + if (remain_len - byte_cnt < + sizeof(struct cam_cmd_i2c_continuous_rd)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer space"); + rc = -EINVAL; + goto end; + } + + tot_size = + sizeof(struct cam_cmd_i2c_continuous_rd); + + if (tot_size > (remain_len - byte_cnt)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Not enough buffer provided %d, %d, %d", + tot_size, remain_len, byte_cnt); + rc = -EINVAL; + goto end; + } + + rc = cam_sensor_handle_continuous_read( + i2c_continuous_rd, + i2c_reg_settings, + &cmd_length_in_bytes, &j, &list, + io_cfg); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed in continuous read %d", rc); + goto end; + } + + cmd_buf += cmd_length_in_bytes / + sizeof(uint32_t); + byte_cnt += cmd_length_in_bytes; + break; + } + default: + CAM_ERR(CAM_SENSOR_UTIL, "Invalid Command Type:%d", + cmm_hdr->cmd_type); + rc = -EINVAL; + goto end; + } + } + i2c_reg_settings->is_settings_valid = 1; + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + } + + return rc; + +end: + cam_mem_put_cpu_buf(cmd_desc[i].mem_handle); + return rc; +} + +int cam_sensor_util_i2c_apply_setting( + struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list) +{ + int32_t rc = 0; + uint32_t i, size; + + switch (i2c_list->op_code) { + case CAM_SENSOR_I2C_WRITE_RANDOM: { + rc = camera_io_dev_write(io_master_info, + &(i2c_list->i2c_settings)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed to random write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_WRITE_SEQ: { + rc = camera_io_dev_write_continuous( + io_master_info, &(i2c_list->i2c_settings), CAM_SENSOR_I2C_WRITE_SEQ); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed to seq write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_WRITE_BURST: { + rc = camera_io_dev_write_continuous( + io_master_info, &(i2c_list->i2c_settings), CAM_SENSOR_I2C_WRITE_BURST); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed to burst write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_POLL: { + size = i2c_list->i2c_settings.size; + for (i = 0; i < size; i++) { + rc = camera_io_dev_poll( + io_master_info, + i2c_list->i2c_settings.reg_setting[i].reg_addr, + i2c_list->i2c_settings.reg_setting[i].reg_data, + i2c_list->i2c_settings.reg_setting[i].data_mask, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.reg_setting[i].delay); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "i2c poll apply setting Fail: %d", rc); + return rc; + } + } + break; + } + default: + CAM_ERR(CAM_SENSOR_UTIL, "Wrong Opcode: %d", i2c_list->op_code); + rc = -EINVAL; + break; + } + + return rc; +} + +int32_t cam_sensor_i2c_read_data( + struct i2c_settings_array *i2c_settings, + struct camera_io_master *io_master_info) +{ + int32_t rc = 0; + struct i2c_settings_list *i2c_list; + uint32_t cnt = 0; + uint8_t *read_buff = NULL; + uint32_t buff_length = 0; + uint32_t read_length = 0; + + list_for_each_entry(i2c_list, + &(i2c_settings->list_head), list) { + read_buff = i2c_list->i2c_settings.read_buff; + buff_length = i2c_list->i2c_settings.read_buff_len; + if ((read_buff == NULL) || (buff_length == 0)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid input buffer, buffer: %pK, length: %d", + read_buff, buff_length); + return -EINVAL; + } + + if (i2c_list->op_code == CAM_SENSOR_I2C_READ_RANDOM) { + read_length = i2c_list->i2c_settings.data_type * + i2c_list->i2c_settings.size; + if ((read_length > buff_length) || + (read_length < i2c_list->i2c_settings.size)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid size, readLen:%d, bufLen:%d, size: %d", + read_length, buff_length, + i2c_list->i2c_settings.size); + return -EINVAL; + } + for (cnt = 0; cnt < (i2c_list->i2c_settings.size); + cnt++) { + struct cam_sensor_i2c_reg_array *reg_setting = + &(i2c_list->i2c_settings.reg_setting[cnt]); + rc = camera_io_dev_read(io_master_info, + reg_setting->reg_addr, + ®_setting->reg_data, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + false); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed: random read I2C settings: %d", + rc); + return rc; + } + if (i2c_list->i2c_settings.data_type < + CAMERA_SENSOR_I2C_TYPE_MAX) { + memcpy(read_buff, + ®_setting->reg_data, + i2c_list->i2c_settings.data_type); + read_buff += + i2c_list->i2c_settings.data_type; + } + } + } else if (i2c_list->op_code == CAM_SENSOR_I2C_READ_SEQ) { + read_length = i2c_list->i2c_settings.size; + if (read_length > buff_length) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid buffer size, readLen: %d, bufLen: %d", + read_length, buff_length); + return -EINVAL; + } + rc = camera_io_dev_read_seq( + io_master_info, + i2c_list->i2c_settings.reg_setting[0].reg_addr, + read_buff, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.size); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "failed: seq read I2C settings: %d", + rc); + return rc; + } + } + } + + return rc; +} + +int32_t msm_camera_fill_vreg_params( + struct cam_hw_soc_info *soc_info, + struct cam_sensor_power_setting *power_setting, + uint16_t power_setting_size) +{ + int32_t rc = 0, j = 0, i = 0; + int num_vreg; + + /* Validate input parameters */ + if (!soc_info || !power_setting) { + CAM_ERR(CAM_SENSOR_UTIL, "failed: soc_info %pK power_setting %pK", + soc_info, power_setting); + return -EINVAL; + } + + num_vreg = soc_info->num_rgltr; + + if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) { + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) + CAM_DBG(CAM_SENSOR_UTIL, "Bypass parameter check num_vreg %d", + num_vreg); + else { + CAM_ERR(CAM_SENSOR_UTIL, "failed: num_vreg %d", num_vreg); + return -EINVAL; + } + } + + for (i = 0; i < power_setting_size; i++) { + + if (power_setting[i].seq_type < SENSOR_MCLK || + power_setting[i].seq_type >= SENSOR_SEQ_TYPE_MAX) { + CAM_ERR(CAM_SENSOR_UTIL, "failed: Invalid Seq type: %d", + power_setting[i].seq_type); + return -EINVAL; + } + + power_setting[i].valid_config = false; + + switch (power_setting[i].seq_type) { + case SENSOR_VDIG: + for (j = 0; j < num_vreg; j++) { + if (!strcmp(soc_info->rgltr_name[j], + "cam_vdig")) { + + CAM_DBG(CAM_SENSOR_UTIL, + "i: %d j: %d cam_vdig", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + + case SENSOR_VIO: + for (j = 0; j < num_vreg; j++) { + + if (!strcmp(soc_info->rgltr_name[j], + "cam_vio")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i: %d j: %d cam_vio", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + + case SENSOR_VANA: + for (j = 0; j < num_vreg; j++) { + + if (!strcmp(soc_info->rgltr_name[j], + "cam_vana")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i: %d j: %d cam_vana", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + + case SENSOR_VANA1: + for (j = 0; j < num_vreg; j++) { + if (!strcmp(soc_info->rgltr_name[j], + "cam_vana1")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i: %d j: %d cam_vana1", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + + case SENSOR_VAF: + for (j = 0; j < num_vreg; j++) { + + if (!strcmp(soc_info->rgltr_name[j], + "cam_vaf")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i: %d j: %d cam_vaf", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + + case SENSOR_CUSTOM_REG1: + for (j = 0; j < num_vreg; j++) { + + if (!strcmp(soc_info->rgltr_name[j], + "cam_v_custom1")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i:%d j:%d cam_vcustom1", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + case SENSOR_CUSTOM_REG2: + for (j = 0; j < num_vreg; j++) { + + if (!strcmp(soc_info->rgltr_name[j], + "cam_v_custom2")) { + CAM_DBG(CAM_SENSOR_UTIL, + "i:%d j:%d cam_vcustom2", i, j); + power_setting[i].seq_val = j; + + if (VALIDATE_VOLTAGE( + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + power_setting[i].config_val)) + power_setting[i].valid_config = true; + + break; + } + } + if (j == num_vreg) + power_setting[i].seq_val = INVALID_VREG; + break; + default: + break; + } + } + + return rc; +} + +int cam_sensor_util_request_gpio_table( + struct cam_hw_soc_info *soc_info, int gpio_en) +{ + int rc = 0, i = 0; + uint8_t size = 0; + struct cam_soc_gpio_data *gpio_conf = + soc_info->gpio_data; + struct gpio *gpio_tbl = NULL; + + if (!gpio_conf) { + CAM_DBG(CAM_SENSOR_UTIL, "No GPIO data"); + return 0; + } + + if (gpio_conf->cam_gpio_common_tbl_size <= 0) { + CAM_ERR(CAM_SENSOR_UTIL, "No GPIO entry"); + return -EINVAL; + } + + gpio_tbl = gpio_conf->cam_gpio_req_tbl; + size = gpio_conf->cam_gpio_req_tbl_size; + + if (!gpio_tbl || !size) { + CAM_ERR(CAM_SENSOR_UTIL, "invalid gpio_tbl %pK / size %d", + gpio_tbl, size); + return -EINVAL; + } + + for (i = 0; i < size; i++) { + CAM_DBG(CAM_SENSOR_UTIL, "%s%d, i: %d, gpio %d dir %lld", + soc_info->dev_name, soc_info->index, i, + gpio_tbl[i].gpio, gpio_tbl[i].flags); + } + + if (gpio_en) { + for (i = 0; i < size; i++) { + rc = cam_res_mgr_gpio_request(soc_info->dev, + gpio_tbl[i].gpio, + gpio_tbl[i].flags, gpio_tbl[i].label); + if (rc) { + /* + * After GPIO request fails, contine to + * apply new gpios, outout a error message + * for driver bringup debug + */ + CAM_ERR(CAM_SENSOR_UTIL, "gpio %d:%s request fails", + gpio_tbl[i].gpio, gpio_tbl[i].label); + goto request_fail; + } + } + } else { + cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, size); + } + + return rc; + +request_fail: + if (gpio_en && (i > 0)) + cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, i); + return rc; +} + +bool cam_sensor_util_check_gpio_is_shared( + struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + uint8_t size = 0; + struct cam_soc_gpio_data *gpio_conf = + soc_info->gpio_data; + struct gpio *gpio_tbl = NULL; + + if (!gpio_conf) { + CAM_DBG(CAM_SENSOR_UTIL, "No GPIO data"); + return false; + } + + if (gpio_conf->cam_gpio_common_tbl_size <= 0) { + CAM_DBG(CAM_SENSOR_UTIL, "No GPIO entry"); + return false; + } + + gpio_tbl = gpio_conf->cam_gpio_req_tbl; + size = gpio_conf->cam_gpio_req_tbl_size; + + if (!gpio_tbl || !size) { + CAM_ERR(CAM_SENSOR_UTIL, "invalid gpio_tbl %pK / size %d", + gpio_tbl, size); + return false; + } + + rc = cam_res_mgr_util_check_if_gpio_is_shared( + gpio_tbl, size); + if (!rc) { + CAM_DBG(CAM_SENSOR_UTIL, + "dev: %s don't have shared gpio resources", + soc_info->dev_name); + return false; + } + + return true; +} + +static int32_t cam_sensor_validate(void *ptr, size_t remain_buf) +{ + struct common_header *cmm_hdr = (struct common_header *)ptr; + size_t validate_size = 0; + + if (remain_buf < sizeof(struct common_header)) + return -EINVAL; + + if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_PWR_UP || + cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) + validate_size = sizeof(struct cam_cmd_power); + else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) + validate_size = sizeof(struct cam_cmd_unconditional_wait); + + if (remain_buf < validate_size) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid cmd_buf len %zu min %zu", + remain_buf, validate_size); + return -EINVAL; + } + return 0; +} + +int32_t cam_sensor_update_power_settings(void *cmd_buf, + uint32_t cmd_length, struct cam_sensor_power_ctrl_t *power_info, + size_t cmd_buf_len) +{ + int32_t rc = 0, tot_size = 0, last_cmd_type = 0; + int32_t i = 0, pwr_up = 0, pwr_down = 0; + struct cam_sensor_power_setting *pwr_settings; + void *ptr = NULL, *scr; + struct common_header *cmm_hdr = NULL; + struct cam_cmd_power *pwr_cmd = + CAM_MEM_ZALLOC(cmd_buf_len, GFP_KERNEL); + + if (!pwr_cmd) { + CAM_DBG(CAM_SENSOR_UTIL, "pwr_cmd memory allocation failed!"); + return -ENOMEM; + } + memcpy(pwr_cmd, cmd_buf, cmd_buf_len); + ptr = pwr_cmd; + cmm_hdr = (struct common_header *)pwr_cmd; + + if (!pwr_cmd || !cmd_length || cmd_buf_len < (size_t)cmd_length || + cam_sensor_validate(pwr_cmd, cmd_buf_len)) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid Args: pwr_cmd %pK, cmd_length: %d", + pwr_cmd, cmd_length); + rc = -EINVAL; + goto free_power_command; + } + + memcpy(pwr_cmd, cmd_buf, sizeof(struct cam_cmd_power)); + + power_info->power_setting_size = 0; + power_info->power_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting) * + MAX_POWER_CONFIG, GFP_KERNEL); + + if (!power_info->power_setting) { + rc = -ENOMEM; + goto free_power_command; + } + + power_info->power_down_setting_size = 0; + power_info->power_down_setting = + CAM_MEM_ZALLOC(sizeof(struct cam_sensor_power_setting) * + MAX_POWER_CONFIG, GFP_KERNEL); + + if (!power_info->power_down_setting) { + CAM_MEM_FREE(power_info->power_setting); + power_info->power_setting = NULL; + power_info->power_setting_size = 0; + rc = -ENOMEM; + goto free_power_command; + } + + while (tot_size < cmd_length) { + if (cam_sensor_validate(ptr, (cmd_length - tot_size))) { + rc = -EINVAL; + goto free_power_settings; + } + + if (cmm_hdr->cmd_type == + CAMERA_SENSOR_CMD_TYPE_PWR_UP) { + struct cam_cmd_power *pwr_up_cmd = + (struct cam_cmd_power *)ptr; + + if ((U16_MAX - power_info->power_setting_size) < + pwr_up_cmd->count) { + CAM_ERR(CAM_SENSOR_UTIL, "ERR: Overflow occurs"); + rc = -EINVAL; + goto free_power_settings; + } + + power_info->power_setting_size += pwr_up_cmd->count; + + if ((power_info->power_setting_size > MAX_POWER_CONFIG) + || (pwr_up_cmd->count >= SENSOR_SEQ_TYPE_MAX)) { + CAM_ERR(CAM_SENSOR_UTIL, + "pwr_up setting size %d, pwr_cmd->count: %d", + power_info->power_setting_size, + pwr_up_cmd->count); + rc = -EINVAL; + goto free_power_settings; + } + + scr = ptr + sizeof(struct cam_cmd_power); + tot_size = tot_size + sizeof(struct cam_cmd_power); + + if (pwr_up_cmd->count == 0) + CAM_WARN(CAM_SENSOR_UTIL, "pwr_up_size is zero"); + + for (i = 0; i < pwr_up_cmd->count; i++, pwr_up++) { + power_info->power_setting[pwr_up].seq_type = + pwr_up_cmd->power_settings_flex[i].power_seq_type; + power_info->power_setting[pwr_up].config_val = + pwr_up_cmd->power_settings_flex[i].config_val_low; + power_info->power_setting[pwr_up].delay = 0; + + if (i) { + scr = scr + + sizeof( + struct cam_power_settings); + tot_size = tot_size + + sizeof( + struct cam_power_settings); + } + + if (tot_size > cmd_length) { + CAM_ERR(CAM_SENSOR_UTIL, + "Error: Cmd Buffer is wrong"); + rc = -EINVAL; + goto free_power_settings; + } + + CAM_DBG(CAM_SENSOR_UTIL, + "Seq Type[%d]: %d Config_val: %ld", pwr_up, + power_info->power_setting[pwr_up].seq_type, + power_info->power_setting[pwr_up].config_val); + } + last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP; + ptr = (void *) scr; + cmm_hdr = (struct common_header *)ptr; + } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) { + struct cam_cmd_unconditional_wait *wait_cmd = + (struct cam_cmd_unconditional_wait *)ptr; + if ((wait_cmd->op_code == + CAMERA_SENSOR_WAIT_OP_SW_UCND) && + (last_cmd_type == + CAMERA_SENSOR_CMD_TYPE_PWR_UP)) { + + if (pwr_up > 0) { + pwr_settings = + &power_info->power_setting[pwr_up - 1]; + pwr_settings->delay += + wait_cmd->delay; + } else { + CAM_ERR(CAM_SENSOR_UTIL, + "Delay is expected only after valid power up setting"); + } + } else if ((wait_cmd->op_code == + CAMERA_SENSOR_WAIT_OP_SW_UCND) && + (last_cmd_type == + CAMERA_SENSOR_CMD_TYPE_PWR_DOWN)) { + + if (pwr_down > 0) { + pwr_settings = + &power_info->power_down_setting[ + pwr_down - 1]; + pwr_settings->delay += + wait_cmd->delay; + } else { + CAM_ERR(CAM_SENSOR_UTIL, + "Delay is expected only after valid power up setting"); + } + } else { + CAM_DBG(CAM_SENSOR_UTIL, "Invalid op code: %d", + wait_cmd->op_code); + } + + tot_size = tot_size + + sizeof(struct cam_cmd_unconditional_wait); + + if (tot_size > cmd_length) { + CAM_ERR(CAM_SENSOR_UTIL, "Command Buffer is wrong"); + return -EINVAL; + } + + scr = (void *) (wait_cmd); + ptr = (void *) + (scr + + sizeof(struct cam_cmd_unconditional_wait)); + CAM_DBG(CAM_SENSOR_UTIL, "ptr: %pK sizeof: %d Next: %pK", + scr, (int32_t)sizeof( + struct cam_cmd_unconditional_wait), ptr); + + cmm_hdr = (struct common_header *)ptr; + } else if (cmm_hdr->cmd_type == + CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) { + struct cam_cmd_power *pwr_dwn_cmd = + (struct cam_cmd_power *)ptr; + + scr = ptr + sizeof(struct cam_cmd_power); + tot_size = tot_size + sizeof(struct cam_cmd_power); + + if ((U16_MAX - power_info->power_down_setting_size) < + pwr_dwn_cmd->count) { + CAM_ERR(CAM_SENSOR_UTIL, "ERR: Overflow"); + rc = -EINVAL; + goto free_power_settings; + } + + power_info->power_down_setting_size += pwr_dwn_cmd->count; + + if ((power_info->power_down_setting_size > + MAX_POWER_CONFIG) || (pwr_dwn_cmd->count >= + SENSOR_SEQ_TYPE_MAX)) { + CAM_ERR(CAM_SENSOR_UTIL, + "pwr_down_setting_size %d, pwr_cmd->count: %d", + power_info->power_down_setting_size, + pwr_dwn_cmd->count); + rc = -EINVAL; + goto free_power_settings; + } + + if (pwr_dwn_cmd->count == 0) + CAM_ERR(CAM_SENSOR_UTIL, "pwr_down size is zero"); + + for (i = 0; i < pwr_dwn_cmd->count; i++, pwr_down++) { + pwr_settings = + &power_info->power_down_setting[pwr_down]; + pwr_settings->seq_type = + pwr_dwn_cmd->power_settings_flex[i].power_seq_type; + pwr_settings->config_val = + pwr_dwn_cmd->power_settings_flex[i].config_val_low; + power_info->power_down_setting[pwr_down].delay = 0; + + if (i) { + scr = scr + + sizeof( + struct cam_power_settings); + tot_size = + tot_size + + sizeof( + struct cam_power_settings); + } + + if (tot_size > cmd_length) { + CAM_ERR(CAM_SENSOR_UTIL, + "Command Buffer is wrong"); + rc = -EINVAL; + goto free_power_settings; + } + CAM_DBG(CAM_SENSOR_UTIL, + "Seq Type[%d]: %d Config_val: %ld", + pwr_down, pwr_settings->seq_type, + pwr_settings->config_val); + } + last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN; + ptr = (void *) scr; + cmm_hdr = (struct common_header *)ptr; + } else { + CAM_ERR(CAM_SENSOR_UTIL, + "Error: Un expected Header Type: %d", + cmm_hdr->cmd_type); + rc = -EINVAL; + goto free_power_settings; + } + } + + goto free_power_command; +free_power_settings: + CAM_MEM_FREE(power_info->power_down_setting); + CAM_MEM_FREE(power_info->power_setting); + power_info->power_down_setting = NULL; + power_info->power_setting = NULL; + power_info->power_down_setting_size = 0; + power_info->power_setting_size = 0; +free_power_command: + CAM_MEM_FREE(pwr_cmd); + pwr_cmd = NULL; + return rc; +} + +int cam_get_dt_power_setting_data(struct device_node *of_node, + struct cam_hw_soc_info *soc_info, + struct cam_sensor_power_ctrl_t *power_info) +{ + int rc = 0, i; + int count = 0; + const char *seq_name = NULL; + uint32_t *array = NULL; + struct cam_sensor_power_setting *ps; + int c, end; + + if (!power_info) + return -EINVAL; + + count = of_property_count_strings(of_node, "qcom,cam-power-seq-type"); + if (count <= 0) + return 0; + + CAM_DBG(CAM_SENSOR_UTIL, "qcom,cam-power-seq-type count %d", count); + power_info->power_setting_size = count; + + ps = CAM_MEM_ZALLOC_ARRAY(count, sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + power_info->power_setting = ps; + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, + "qcom,cam-power-seq-type", i, &seq_name); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "failed"); + goto ERROR1; + } + CAM_DBG(CAM_SENSOR_UTIL, "seq_name[%d] = %s", i, seq_name); + if (!strcmp(seq_name, "cam_vio")) { + ps[i].seq_type = SENSOR_VIO; + } else if (!strcmp(seq_name, "cam_vana")) { + ps[i].seq_type = SENSOR_VANA; + } else if (!strcmp(seq_name, "cam_vana1")) { + ps[i].seq_type = SENSOR_VANA1; + } else if (!strcmp(seq_name, "cam_clk")) { + ps[i].seq_type = SENSOR_MCLK; + } else { + CAM_ERR(CAM_SENSOR_UTIL, "unrecognized seq-type %s", + seq_name); + rc = -EILSEQ; + goto ERROR1; + } + CAM_DBG(CAM_SENSOR_UTIL, "seq_type[%d] %d", i, ps[i].seq_type); + } + + array = CAM_MEM_ZALLOC_ARRAY(count, sizeof(uint32_t), GFP_KERNEL); + if (!array) { + rc = -ENOMEM; + goto ERROR1; + } + + rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val", + array, count); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "failed "); + goto ERROR2; + } + + for (i = 0; i < count; i++) { + ps[i].config_val = array[i]; + CAM_DBG(CAM_SENSOR_UTIL, "power_setting[%d].config_val = %ld", i, + ps[i].config_val); + } + + rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay", + array, count); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "failed"); + goto ERROR2; + } + for (i = 0; i < count; i++) { + ps[i].delay = array[i]; + CAM_DBG(CAM_SENSOR_UTIL, "power_setting[%d].delay = %d", i, + ps[i].delay); + } + CAM_MEM_FREE(array); + + power_info->power_down_setting = + CAM_MEM_ZALLOC_ARRAY(count, sizeof(*ps), GFP_KERNEL); + + if (!power_info->power_down_setting) { + CAM_ERR(CAM_SENSOR_UTIL, "failed"); + rc = -ENOMEM; + goto ERROR1; + } + + power_info->power_down_setting_size = count; + + end = count - 1; + + for (c = 0; c < count; c++) { + power_info->power_down_setting[c] = ps[end]; + end--; + } + return rc; +ERROR2: + CAM_MEM_FREE(array); +ERROR1: + CAM_MEM_FREE(ps); + return rc; +} + +int cam_sensor_util_init_gpio_pin_tbl( + struct cam_hw_soc_info *soc_info, + struct msm_camera_gpio_num_info **pgpio_num_info) +{ + int rc = 0, val = 0; + uint32_t gpio_array_size; + struct device_node *of_node = NULL; + struct cam_soc_gpio_data *gconf = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + + if (!soc_info->dev) { + CAM_ERR(CAM_SENSOR_UTIL, "device node NULL"); + return -EINVAL; + } + + of_node = soc_info->dev->of_node; + + gconf = soc_info->gpio_data; + if (!gconf) { + CAM_ERR(CAM_SENSOR_UTIL, "No gpio_common_table is found"); + return -EINVAL; + } + + if (!gconf->cam_gpio_common_tbl) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio_common_table is not initialized"); + return -EINVAL; + } + + gpio_array_size = gconf->cam_gpio_common_tbl_size; + + if (!gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "invalid size of gpio table"); + return -EINVAL; + } + + *pgpio_num_info = CAM_MEM_ZALLOC(sizeof(struct msm_camera_gpio_num_info), + GFP_KERNEL); + if (!*pgpio_num_info) + return -ENOMEM; + gpio_num_info = *pgpio_num_info; + + rc = of_property_read_u32(of_node, "gpio-vana", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-vana failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-vana invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VANA] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VANA] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-vana %d", + gpio_num_info->gpio_num[SENSOR_VANA]); + } + + rc = of_property_read_u32(of_node, "gpio-vana1", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-vana1 failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-vana1 invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VANA1] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VANA1] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-vana1 %d", + gpio_num_info->gpio_num[SENSOR_VANA1]); + } + + rc = of_property_read_u32(of_node, "gpio-vio", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-vio failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-vio invalid %d", val); + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VIO] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VIO] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-vio %d", + gpio_num_info->gpio_num[SENSOR_VIO]); + } + + rc = of_property_read_u32(of_node, "gpio-vaf", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-vaf failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-vaf invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VAF] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VAF] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-vaf %d", + gpio_num_info->gpio_num[SENSOR_VAF]); + } + + rc = of_property_read_u32(of_node, "gpio-vdig", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-vdig failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-vdig invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VDIG] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VDIG] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-vdig %d", + gpio_num_info->gpio_num[SENSOR_VDIG]); + } + + rc = of_property_read_u32(of_node, "gpio-reset", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "read gpio-reset failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-reset invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_RESET] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_RESET] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-reset %d", + gpio_num_info->gpio_num[SENSOR_RESET]); + } + + rc = of_property_read_u32(of_node, "gpio-standby", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "read gpio-standby failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-standby invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_STANDBY] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_STANDBY] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-standby %d", + gpio_num_info->gpio_num[SENSOR_STANDBY]); + } + + rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "read gpio-af-pwdm failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-af-pwdm invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_VAF_PWDM] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_VAF_PWDM] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-af-pwdm %d", + gpio_num_info->gpio_num[SENSOR_VAF_PWDM]); + } + + rc = of_property_read_u32(of_node, "gpio-custom1", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "read gpio-custom1 failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-custom1 invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-custom1 %d", + gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]); + } + + rc = of_property_read_u32(of_node, "gpio-custom2", &val); + if (rc != -EINVAL) { + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "read gpio-custom2 failed rc %d", rc); + goto free_gpio_info; + } else if (val >= gpio_array_size) { + CAM_ERR(CAM_SENSOR_UTIL, "gpio-custom2 invalid %d", val); + rc = -EINVAL; + goto free_gpio_info; + } + gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] = + gconf->cam_gpio_common_tbl[val].gpio; + gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1; + + CAM_DBG(CAM_SENSOR_UTIL, "gpio-custom2 %d", + gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]); + } else { + rc = 0; + } + + return rc; + +free_gpio_info: + CAM_MEM_FREE(gpio_num_info); + gpio_num_info = NULL; + return rc; +} + +int msm_camera_pinctrl_init( + struct msm_pinctrl_info *sensor_pctrl, struct device *dev) +{ + + sensor_pctrl->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) { + CAM_DBG(CAM_SENSOR_UTIL, "Getting pinctrl handle failed"); + return -EINVAL; + } + sensor_pctrl->gpio_state_active = + pinctrl_lookup_state(sensor_pctrl->pinctrl, + CAM_SENSOR_PINCTRL_STATE_DEFAULT); + if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed to get the active state pinctrl handle"); + return -EINVAL; + } + sensor_pctrl->gpio_state_suspend + = pinctrl_lookup_state(sensor_pctrl->pinctrl, + CAM_SENSOR_PINCTRL_STATE_SLEEP); + if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) { + CAM_ERR(CAM_SENSOR_UTIL, + "Failed to get the suspend state pinctrl handle"); + return -EINVAL; + } + + return 0; +} + +int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info, + int bob_reg_idx, bool flag) +{ + int rc = 0; + uint32_t op_current = + (flag == true) ? soc_info->rgltr_op_mode[bob_reg_idx] : 0; + + if (soc_info->rgltr[bob_reg_idx] != NULL) { + rc = cam_wrapper_regulator_set_load(soc_info->rgltr[bob_reg_idx], + op_current, soc_info->rgltr_name[bob_reg_idx]); + if (rc) + CAM_WARN(CAM_SENSOR_UTIL, + "BoB PWM SetLoad failed rc: %d", rc); + } + + return rc; +} + +int msm_cam_sensor_handle_reg_gpio(int seq_type, + struct msm_camera_gpio_num_info *gpio_num_info, int val) +{ + int gpio_offset = -1; + + if (!gpio_num_info) { + CAM_INFO(CAM_SENSOR_UTIL, "Input Parameters are not proper"); + return 0; + } + + CAM_DBG(CAM_SENSOR_UTIL, "Seq type: %d, config: %d", seq_type, val); + + gpio_offset = seq_type; + + if (gpio_num_info->valid[gpio_offset] == 1) { + CAM_DBG(CAM_SENSOR_UTIL, "VALID GPIO offset: %d, seqtype: %d", + gpio_offset, seq_type); + cam_res_mgr_gpio_set_value( + gpio_num_info->gpio_num + [gpio_offset], val); + } + + return 0; +} + +static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl, + struct cam_hw_soc_info *soc_info, int32_t index) +{ + int32_t num_vreg = 0, j = 0, rc = 0, idx = 0; + struct cam_sensor_power_setting *ps = NULL; + struct cam_sensor_power_setting *pd = NULL; + + num_vreg = soc_info->num_rgltr; + + pd = &ctrl->power_down_setting[index]; + + for (j = 0; j < num_vreg; j++) { + if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) { + ps = NULL; + for (idx = 0; idx < ctrl->power_setting_size; idx++) { + if (ctrl->power_setting[idx].seq_type == + pd->seq_type) { + ps = &ctrl->power_setting[idx]; + break; + } + } + + if (ps != NULL) { + CAM_DBG(CAM_SENSOR_UTIL, "Disable MCLK Regulator"); + rc = cam_soc_util_regulator_disable( + soc_info->rgltr[j], + soc_info->rgltr_name[j], + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + soc_info->rgltr_op_mode[j], + soc_info->rgltr_delay[j]); + + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, + "MCLK REG DISALBE FAILED: %d", + rc); + return rc; + } + + ps->data[0] = + soc_info->rgltr[j]; + } + } + } + + return rc; +} + +int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl, + struct cam_hw_soc_info *soc_info, struct completion *i3c_probe_status) +{ + int rc = 0, index = 0, ret = 0, num_vreg, j = 0, i = 0; + int32_t vreg_idx = -1; + struct cam_sensor_power_setting *power_setting = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + long time_left; + uint32_t seq_min_volt = 0; + uint32_t seq_max_volt = 0; + + CAM_DBG(CAM_SENSOR_UTIL, "Enter"); + if (!ctrl) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid ctrl handle"); + return -EINVAL; + } + + gpio_num_info = ctrl->gpio_num_info; + num_vreg = soc_info->num_rgltr; + + if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) { + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) + CAM_DBG(CAM_SENSOR_UTIL, "Bypass parameter check num_vreg %d", + num_vreg); + else { + CAM_ERR(CAM_SENSOR_UTIL, "failed: num_vreg %d", num_vreg); + return -EINVAL; + } + } + + ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev); + if (ret < 0) { + /* Some sensor subdev no pinctrl. */ + CAM_DBG(CAM_SENSOR_UTIL, "Initialization of pinctrl failed"); + ctrl->cam_pinctrl_status = 0; + } else { + ctrl->cam_pinctrl_status = 1; + } + + rc = cam_sensor_util_request_gpio_table(soc_info, 1); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "request gpio table failed"); + return -EINVAL; + } + + if (ctrl->cam_pinctrl_status) { + ret = pinctrl_select_state( + ctrl->pinctrl_info.pinctrl, + ctrl->pinctrl_info.gpio_state_active); + if (ret) + CAM_ERR(CAM_SENSOR_UTIL, "cannot set pin to active state"); + } + + CAM_DBG(CAM_SENSOR_UTIL, "power setting size: %d", ctrl->power_setting_size); + + for (index = 0; index < ctrl->power_setting_size; index++) { + CAM_DBG(CAM_SENSOR_UTIL, "index: %d", index); + power_setting = &ctrl->power_setting[index]; + if (!power_setting) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid power up settings for index %d", + index); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR_UTIL, "seq_type %d", power_setting->seq_type); + + switch (power_setting->seq_type) { + case SENSOR_MCLK: + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass mclk enable"); + continue; + } + + if (power_setting->seq_val >= soc_info->num_clk) { + CAM_ERR(CAM_SENSOR_UTIL, "clk index %d >= max %u", + power_setting->seq_val, + soc_info->num_clk); + goto power_up_failed; + } + for (j = 0; j < num_vreg; j++) { + if (!strcmp(soc_info->rgltr_name[j], + "cam_clk")) { + CAM_DBG(CAM_SENSOR_UTIL, + "Enable cam_clk: %d", j); + + if (IS_ERR_OR_NULL( + soc_info->rgltr[j])) { + rc = PTR_ERR( + soc_info->rgltr[j]); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_SENSOR_UTIL, + "vreg %s %d", + soc_info->rgltr_name[j], + rc); + goto power_up_failed; + } + + rc = cam_soc_util_regulator_enable( + soc_info->rgltr[j], + soc_info->rgltr_name[j], + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + soc_info->rgltr_op_mode[j], + soc_info->rgltr_delay[j]); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, + "Reg enable failed"); + goto power_up_failed; + } + power_setting->data[0] = + soc_info->rgltr[j]; + } + } + if (power_setting->config_val) + soc_info->clk_rate[0][power_setting->seq_val] = + power_setting->config_val; + + for (j = 0; j < soc_info->num_clk; j++) { + rc = cam_soc_util_clk_enable(soc_info, CAM_CLK_SW_CLIENT_IDX, + false, j, 0); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in clk enable %d", i); + break; + } + } + + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, "clk enable failed"); + goto power_up_failed; + } + break; + case SENSOR_RESET: + case SENSOR_STANDBY: + case SENSOR_CUSTOM_GPIO1: + case SENSOR_CUSTOM_GPIO2: + if (!gpio_num_info) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid gpio_num_info"); + goto power_up_failed; + } + CAM_DBG(CAM_SENSOR_UTIL, "gpio set val %d", + gpio_num_info->gpio_num + [power_setting->seq_type]); + + rc = msm_cam_sensor_handle_reg_gpio( + power_setting->seq_type, + gpio_num_info, + (int) power_setting->config_val); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Error in handling VREG GPIO"); + goto power_up_failed; + } + break; + case SENSOR_VANA: + case SENSOR_VANA1: + case SENSOR_VDIG: + case SENSOR_VIO: + case SENSOR_VAF: + case SENSOR_VAF_PWDM: + case SENSOR_CUSTOM_REG1: + case SENSOR_CUSTOM_REG2: + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass regulator enable seq_type %d", + power_setting->seq_type); + continue; + } + + if (power_setting->seq_val == INVALID_VREG) + break; + + if (power_setting->seq_val >= CAM_VREG_MAX) { + CAM_ERR(CAM_SENSOR_UTIL, "vreg index %d >= max %d", + power_setting->seq_val, + CAM_VREG_MAX); + goto power_up_failed; + } + if (power_setting->seq_val < num_vreg) { + CAM_DBG(CAM_SENSOR_UTIL, "Enable Regulator"); + vreg_idx = power_setting->seq_val; + + if (IS_ERR_OR_NULL( + soc_info->rgltr[vreg_idx])) { + rc = PTR_ERR(soc_info->rgltr[vreg_idx]); + rc = rc ? rc : -EINVAL; + + CAM_ERR(CAM_SENSOR_UTIL, "%s get failed %d", + soc_info->rgltr_name[vreg_idx], + rc); + + goto power_up_failed; + } + + if (power_setting->valid_config) { + seq_min_volt = power_setting->config_val; + seq_max_volt = power_setting->config_val; + } else { + seq_min_volt = soc_info->rgltr_min_volt[vreg_idx]; + seq_max_volt = soc_info->rgltr_max_volt[vreg_idx]; + } + + rc = cam_soc_util_regulator_enable( + soc_info->rgltr[vreg_idx], + soc_info->rgltr_name[vreg_idx], + seq_min_volt, + seq_max_volt, + soc_info->rgltr_op_mode[vreg_idx], + soc_info->rgltr_delay[vreg_idx]); + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, + "Reg Enable failed for %s", + soc_info->rgltr_name[vreg_idx]); + goto power_up_failed; + } + power_setting->data[0] = + soc_info->rgltr[vreg_idx]; + } else { + CAM_ERR(CAM_SENSOR_UTIL, "usr_idx:%d dts_idx:%d", + power_setting->seq_val, num_vreg); + } + + rc = msm_cam_sensor_handle_reg_gpio( + power_setting->seq_type, + gpio_num_info, 1); + if (rc < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "Error in handling VREG GPIO"); + goto power_up_failed; + } + break; + default: + CAM_ERR(CAM_SENSOR_UTIL, "error power seq type %d", + power_setting->seq_type); + break; + } + if (power_setting->delay > 20) + msleep(power_setting->delay); + else if (power_setting->delay) + usleep_range(power_setting->delay * 1000, + (power_setting->delay * 1000) + 5); + } + + if (i3c_probe_status) { + time_left = cam_common_wait_for_completion_timeout( + i3c_probe_status, + msecs_to_jiffies(CAM_I3C_DEV_PROBE_TIMEOUT_MS)); + if (!time_left) { + CAM_ERR(CAM_SENSOR_UTIL, "Wait for I3C probe timedout"); + rc = -ETIMEDOUT; + goto power_up_failed; + } + } + + return 0; +power_up_failed: + CAM_ERR(CAM_SENSOR_UTIL, "failed. rc:%d", rc); + for (index--; index >= 0; index--) { + CAM_DBG(CAM_SENSOR_UTIL, "index %d", index); + power_setting = &ctrl->power_setting[index]; + CAM_DBG(CAM_SENSOR_UTIL, "type %d", + power_setting->seq_type); + switch (power_setting->seq_type) { + case SENSOR_MCLK: + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass mclk disable"); + continue; + } + + for (i = soc_info->num_clk - 1; i >= 0; i--) { + cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, false, i); + } + ret = cam_config_mclk_reg(ctrl, soc_info, index); + if (ret < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "config clk reg failed rc: %d", ret); + continue; + } + break; + case SENSOR_RESET: + case SENSOR_STANDBY: + case SENSOR_CUSTOM_GPIO1: + case SENSOR_CUSTOM_GPIO2: + if (!gpio_num_info) + continue; + if (!gpio_num_info->valid + [power_setting->seq_type]) + continue; + cam_res_mgr_gpio_set_value( + gpio_num_info->gpio_num + [power_setting->seq_type], GPIOF_OUT_INIT_LOW); + break; + case SENSOR_VANA: + case SENSOR_VANA1: + case SENSOR_VDIG: + case SENSOR_VIO: + case SENSOR_VAF: + case SENSOR_VAF_PWDM: + case SENSOR_CUSTOM_REG1: + case SENSOR_CUSTOM_REG2: + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass regulator disable seq_type %d", + power_setting->seq_type); + continue; + } + + if (power_setting->seq_val < num_vreg) { + CAM_DBG(CAM_SENSOR_UTIL, "Disable Regulator"); + vreg_idx = power_setting->seq_val; + + rc = cam_soc_util_regulator_disable( + soc_info->rgltr[vreg_idx], + soc_info->rgltr_name[vreg_idx], + soc_info->rgltr_min_volt[vreg_idx], + soc_info->rgltr_max_volt[vreg_idx], + soc_info->rgltr_op_mode[vreg_idx], + soc_info->rgltr_delay[vreg_idx]); + + if (rc) { + CAM_ERR(CAM_SENSOR_UTIL, + "Fail to disalbe reg: %s", + soc_info->rgltr_name[vreg_idx]); + soc_info->rgltr[vreg_idx] = NULL; + msm_cam_sensor_handle_reg_gpio( + power_setting->seq_type, + gpio_num_info, + GPIOF_OUT_INIT_LOW); + continue; + } + power_setting->data[0] = + soc_info->rgltr[vreg_idx]; + + } else { + CAM_ERR(CAM_SENSOR_UTIL, "seq_val:%d > num_vreg: %d", + power_setting->seq_val, num_vreg); + } + + msm_cam_sensor_handle_reg_gpio(power_setting->seq_type, + gpio_num_info, GPIOF_OUT_INIT_LOW); + + break; + default: + CAM_ERR(CAM_SENSOR_UTIL, "error power seq type %d", + power_setting->seq_type); + break; + } + if (power_setting->delay > 20) { + msleep(power_setting->delay); + } else if (power_setting->delay) { + usleep_range(power_setting->delay * 1000, + (power_setting->delay * 1000) + 5); + } + } + + if (ctrl->cam_pinctrl_status) { + ret = pinctrl_select_state( + ctrl->pinctrl_info.pinctrl, + ctrl->pinctrl_info.gpio_state_suspend); + if (ret) + CAM_ERR(CAM_SENSOR_UTIL, "cannot set pin to suspend state"); + devm_pinctrl_put(ctrl->pinctrl_info.pinctrl); + } + + ctrl->cam_pinctrl_status = 0; + cam_sensor_util_request_gpio_table(soc_info, 0); + + return -EINVAL; +} + +static struct cam_sensor_power_setting* +msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl, + enum msm_camera_power_seq_type seq_type, + uint16_t seq_val) +{ + struct cam_sensor_power_setting *power_setting, *ps = NULL; + int idx; + + for (idx = 0; idx < ctrl->power_setting_size; idx++) { + power_setting = &ctrl->power_setting[idx]; + if (power_setting->seq_type == seq_type && + power_setting->seq_val == seq_val) { + ps = power_setting; + return ps; + } + + } + + return ps; +} + +int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl, + struct cam_hw_soc_info *soc_info) +{ + int index = 0, ret = 0, num_vreg = 0, i; + struct cam_sensor_power_setting *pd = NULL; + struct cam_sensor_power_setting *ps = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + + CAM_DBG(CAM_SENSOR_UTIL, "Enter"); + if (!ctrl || !soc_info) { + CAM_ERR(CAM_SENSOR_UTIL, "failed ctrl %pK", ctrl); + return -EINVAL; + } + + gpio_num_info = ctrl->gpio_num_info; + num_vreg = soc_info->num_rgltr; + + if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) { + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) + CAM_DBG(CAM_SENSOR_UTIL, "Bypass parameter check num_vreg %d", + num_vreg); + else { + CAM_ERR(CAM_SENSOR_UTIL, "failed: num_vreg %d", num_vreg); + return -EINVAL; + } + } + + if (ctrl->power_down_setting_size > MAX_POWER_CONFIG) { + CAM_ERR(CAM_SENSOR_UTIL, "Invalid: power setting size %d", + ctrl->power_setting_size); + return -EINVAL; + } + + for (index = 0; index < ctrl->power_down_setting_size; index++) { + CAM_DBG(CAM_SENSOR_UTIL, "power_down_index %d", index); + pd = &ctrl->power_down_setting[index]; + if (!pd) { + CAM_ERR(CAM_SENSOR_UTIL, + "Invalid power down settings for index %d", + index); + return -EINVAL; + } + + CAM_DBG(CAM_SENSOR_UTIL, "seq_type %d", pd->seq_type); + switch (pd->seq_type) { + case SENSOR_MCLK: + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass mclk disable"); + continue; + } + + for (i = soc_info->num_clk - 1; i >= 0; i--) { + cam_soc_util_clk_disable(soc_info, CAM_CLK_SW_CLIENT_IDX, false, i); + } + + ret = cam_config_mclk_reg(ctrl, soc_info, index); + if (ret < 0) { + CAM_ERR(CAM_SENSOR_UTIL, + "config clk reg failed rc: %d", ret); + continue; + } + break; + case SENSOR_RESET: + case SENSOR_STANDBY: + case SENSOR_CUSTOM_GPIO1: + case SENSOR_CUSTOM_GPIO2: + + if (!gpio_num_info->valid[pd->seq_type]) + continue; + + cam_res_mgr_gpio_set_value( + gpio_num_info->gpio_num + [pd->seq_type], + (int) pd->config_val); + + break; + case SENSOR_VANA: + case SENSOR_VANA1: + case SENSOR_VDIG: + case SENSOR_VIO: + case SENSOR_VAF: + case SENSOR_VAF_PWDM: + case SENSOR_CUSTOM_REG1: + case SENSOR_CUSTOM_REG2: + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_DBG(CAM_SENSOR_UTIL, "Bypass regulator disable seq_type %d", + pd->seq_type); + continue; + } + + if (pd->seq_val == INVALID_VREG) + break; + + ps = msm_camera_get_power_settings( + ctrl, pd->seq_type, + pd->seq_val); + if (ps) { + if (pd->seq_val < num_vreg) { + CAM_DBG(CAM_SENSOR_UTIL, + "Disable Regulator"); + ret = cam_soc_util_regulator_disable( + soc_info->rgltr[ps->seq_val], + soc_info->rgltr_name[ps->seq_val], + soc_info->rgltr_min_volt[ps->seq_val], + soc_info->rgltr_max_volt[ps->seq_val], + soc_info->rgltr_op_mode[ps->seq_val], + soc_info->rgltr_delay[ps->seq_val]); + if (ret) { + CAM_ERR(CAM_SENSOR_UTIL, + "Reg: %s disable failed", + soc_info->rgltr_name[ + ps->seq_val]); + msm_cam_sensor_handle_reg_gpio( + pd->seq_type, + gpio_num_info, + GPIOF_OUT_INIT_LOW); + continue; + } + ps->data[0] = + soc_info->rgltr[ps->seq_val]; + } else { + CAM_ERR(CAM_SENSOR_UTIL, + "seq_val:%d > num_vreg: %d", + pd->seq_val, + num_vreg); + } + } else + CAM_ERR(CAM_SENSOR_UTIL, + "error in power up/down seq"); + + ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type, + gpio_num_info, GPIOF_OUT_INIT_LOW); + + if (ret < 0) + CAM_ERR(CAM_SENSOR_UTIL, + "Error disabling VREG GPIO"); + break; + default: + CAM_ERR(CAM_SENSOR_UTIL, "error power seq type %d", + pd->seq_type); + break; + } + if (pd->delay > 20) + msleep(pd->delay); + else if (pd->delay) + usleep_range(pd->delay * 1000, + (pd->delay * 1000) + 5); + } + + if (ctrl->cam_pinctrl_status) { + ret = pinctrl_select_state( + ctrl->pinctrl_info.pinctrl, + ctrl->pinctrl_info.gpio_state_suspend); + if (ret) + CAM_ERR(CAM_SENSOR_UTIL, "cannot set pin to suspend state"); + + devm_pinctrl_put(ctrl->pinctrl_info.pinctrl); + } + + cam_sensor_util_request_gpio_table(soc_info, 0); + ctrl->cam_pinctrl_status = 0; + + return 0; +} + +void cam_sensor_utils_parse_pm_ctrl_flag(struct device_node *of_node, + struct camera_io_master *io_master_info) +{ + struct device_node *of_parent = of_get_next_parent(of_node); + + if ((of_parent != NULL) && + (io_master_info != NULL) && + (io_master_info->qup_client != NULL)) { + io_master_info->qup_client->pm_ctrl_client_enable = + of_property_read_bool(of_parent, "qcom,pm-ctrl-client"); + } +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h new file mode 100644 index 0000000000..52330fa719 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SENSOR_UTIL_H_ +#define _CAM_SENSOR_UTIL_H_ + +#include +#include +#include +#include +#include +#include "cam_sensor_cmn_header.h" +#include "cam_req_mgr_util.h" +#include "cam_req_mgr_interface.h" +#include +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_sensor_io.h" +#include "cam_csiphy_core.h" + +#define INVALID_VREG 100 +#define RES_MGR_GPIO_NEED_HOLD 1 +#define RES_MGR_GPIO_CAN_FREE 2 + +/* + * Constant Factors needed to change QTimer ticks to nanoseconds + * QTimer Freq = 19.2 MHz + * Time(us) = ticks/19.2 + * Time(ns) = ticks/19.2 * 1000 + */ +#define QTIMER_MUL_FACTOR 10000 +#define QTIMER_DIV_FACTOR 192 + +int cam_sensor_count_elems_i3c_device_id(struct device_node *dev, + int *num_entries, char *sensor_id_table_str); + +int cam_sensor_fill_i3c_device_id(struct device_node *dev, int num_entries, + char *sensor_id_table_str, struct i3c_device_id *sensor_i3c_device_id); + +int cam_get_dt_power_setting_data(struct device_node *of_node, + struct cam_hw_soc_info *soc_info, + struct cam_sensor_power_ctrl_t *power_info); + +int msm_camera_pinctrl_init + (struct msm_pinctrl_info *sensor_pctrl, struct device *dev); + +int32_t cam_sensor_util_get_current_qtimer_ns(uint64_t *qtime_ns); + +int32_t cam_sensor_util_write_qtimer_to_io_buffer( + uint64_t qtime_ns, struct cam_buf_io_cfg *io_cfg); + +int32_t cam_sensor_handle_random_write( + struct cam_cmd_i2c_random_wr *cam_cmd_i2c_random_wr, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, uint32_t payload_count); + +int32_t cam_sensor_handle_continuous_write( + struct cam_cmd_i2c_continuous_wr *cam_cmd_i2c_continuous_wr, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, uint32_t payload_count); + +int32_t cam_sensor_handle_delay( + uint32_t **cmd_buf, + uint16_t generic_op_code, + struct i2c_settings_array *i2c_reg_settings, + uint32_t offset, uint32_t *byte_cnt, + struct list_head *list_ptr); + +int32_t cam_sensor_handle_poll( + uint32_t **cmd_buf, + struct i2c_settings_array *i2c_reg_settings, + uint32_t *byte_cnt, int32_t *offset, + struct list_head **list_ptr); + +int32_t cam_sensor_handle_random_read( + struct cam_cmd_i2c_random_rd *cmd_i2c_random_rd, + struct i2c_settings_array *i2c_reg_settings, + uint16_t *cmd_length_in_bytes, + int32_t *offset, + struct list_head **list, + struct cam_buf_io_cfg *io_cfg, uint32_t payload_count); + +int32_t cam_sensor_handle_continuous_read( + struct cam_cmd_i2c_continuous_rd *cmd_i2c_continuous_rd, + struct i2c_settings_array *i2c_reg_settings, + uint16_t *cmd_length_in_bytes, int32_t *offset, + struct list_head **list, + struct cam_buf_io_cfg *io_cfg); + +int cam_sensor_i2c_command_parser(struct camera_io_master *io_master, + struct i2c_settings_array *i2c_reg_settings, + struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers, + struct cam_buf_io_cfg *io_cfg); + +int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list); + +int32_t cam_sensor_i2c_read_data( + struct i2c_settings_array *i2c_settings, + struct camera_io_master *io_master_info); + +int32_t delete_request(struct i2c_settings_array *i2c_array); +int cam_sensor_util_request_gpio_table( + struct cam_hw_soc_info *soc_info, int gpio_en); + +int cam_sensor_util_init_gpio_pin_tbl( + struct cam_hw_soc_info *soc_info, + struct msm_camera_gpio_num_info **pgpio_num_info); +int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl, + struct cam_hw_soc_info *soc_info, struct completion *i3c_probe_status); + +int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl, + struct cam_hw_soc_info *soc_info); + +int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info, + struct cam_sensor_power_setting *power_setting, + uint16_t power_setting_size); + +int32_t cam_sensor_update_power_settings(void *cmd_buf, + uint32_t cmd_length, struct cam_sensor_power_ctrl_t *power_info, + size_t cmd_buf_len); + +int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info, + int bob_reg_idx, bool flag); + +bool cam_sensor_util_check_gpio_is_shared(struct cam_hw_soc_info *soc_info); + +static inline int cam_sensor_util_aon_ops(bool get_access, uint32_t phy_idx) +{ + CAM_DBG(CAM_SENSOR, "Updating Main/Aon operation"); + return cam_csiphy_util_update_aon_ops(get_access, phy_idx); +} + +static inline int cam_sensor_util_aon_registration(uint32_t phy_idx, uint32_t aon_camera_id) +{ + CAM_DBG(CAM_SENSOR, "Register phy_idx: %u for AON_Camera_ID: %d", phy_idx, aon_camera_id); + return cam_csiphy_util_update_aon_registration(phy_idx, aon_camera_id); +} + +void cam_sensor_utils_parse_pm_ctrl_flag(struct device_node *of_node, + struct camera_io_master *io_master_info); + +#endif /* _CAM_SENSOR_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.c new file mode 100644 index 0000000000..e8a3278ffc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.c @@ -0,0 +1,995 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include "cam_tpg_core.h" +#include "cam_packet_util.h" +#include +#include "tpg_hw/tpg_hw.h" + +int cam_tpg_shutdown(struct cam_tpg_device *tpg_dev) +{ + if (tpg_dev != NULL) { + CAM_INFO(CAM_TPG, "TPG[%d] shutdown cleanup.", + tpg_dev->soc_info.index); + if (!tpg_dev->hw_no_ops) + tpg_hw_reset(&tpg_dev->tpg_hw); + tpg_dev->state = CAM_TPG_STATE_INIT; + } + return 0; +} + +int cam_tpg_publish_dev_info( + struct cam_req_mgr_device_info *info) +{ + int rc = 0; + struct cam_tpg_device *tpg_dev = NULL; + + if (!info) + return -EINVAL; + + tpg_dev = (struct cam_tpg_device *) + cam_get_device_priv(info->dev_hdl); + + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "Device data is NULL"); + return -EINVAL; + } + + info->dev_id = CAM_REQ_MGR_DEVICE_TPG; + strscpy(info->name, CAM_TPG_NAME, sizeof(info->name)); + /* Hard code for now */ + info->p_delay = 1; + info->trigger = CAM_TRIGGER_POINT_SOF; + + return rc; +} + +int cam_tpg_setup_link( + struct cam_req_mgr_core_dev_link_setup *link) +{ + struct cam_tpg_device *tpg_dev = NULL; + + if (!link) + return -EINVAL; + + tpg_dev = (struct cam_tpg_device *) + cam_get_device_priv(link->dev_hdl); + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&tpg_dev->mutex); + if (link->link_enable) { + tpg_dev->crm_intf.link_hdl = link->link_hdl; + tpg_dev->crm_intf.crm_cb = link->crm_cb; + CAM_DBG(CAM_TPG, "TPG[%d] CRM enable link done", tpg_dev->soc_info.index); + } else { + tpg_dev->crm_intf.link_hdl = -1; + tpg_dev->crm_intf.crm_cb = NULL; + CAM_DBG(CAM_TPG, "TPG[%d] CRM disable link done", tpg_dev->soc_info.index); + } + mutex_unlock(&tpg_dev->mutex); + return 0; +} + +static int cam_tpg_notify_frame_skip( + struct cam_req_mgr_apply_request *apply) +{ + struct cam_tpg_device *tpg_dev = NULL; + + if (apply == NULL) { + CAM_ERR(CAM_TPG, "invalid parameters"); + return -EINVAL; + } + tpg_dev = (struct cam_tpg_device *)cam_get_device_priv(apply->dev_hdl); + if (tpg_dev == NULL) { + CAM_ERR(CAM_TPG, "Device data is NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_TPG, "TPG[%d] Got Skip frame from crm for request %lld", + tpg_dev->soc_info.index, + apply->request_id); + return 0; +} + +static int cam_tpg_apply_req( + struct cam_req_mgr_apply_request *apply) +{ + struct cam_tpg_device *tpg_dev = NULL; + + if (!apply) { + CAM_ERR(CAM_TPG, "invalid parameters"); + return -EINVAL; + } + tpg_dev = (struct cam_tpg_device *) + cam_get_device_priv(apply->dev_hdl); + + if (tpg_dev == NULL) { + CAM_ERR(CAM_TPG, "Device data is NULL"); + return -EINVAL; + } + + CAM_DBG(CAM_TPG, "TPG[%d] Got Apply request from crm %lld", + tpg_dev->soc_info.index, + apply->request_id); + mutex_lock(&tpg_dev->mutex); + tpg_hw_apply(&tpg_dev->tpg_hw, + apply->request_id); + mutex_unlock(&tpg_dev->mutex); + + return 0; +} + +static int cam_tpg_flush_req( + struct cam_req_mgr_flush_request *flush_req) +{ + int rc = 0; + struct cam_tpg_device *tpg_dev = NULL; + + if (!flush_req) { + CAM_ERR(CAM_TPG, "Invalid flush request handle encountered"); + return -EINVAL; + } + + tpg_dev = (struct cam_tpg_device *) + cam_get_device_priv(flush_req->dev_hdl); + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "Invalid TPG handle encountered during flush req"); + return -EINVAL; + } + CAM_DBG(CAM_TPG, "Got flush request from crm. Flush Type: %d Req: %lld", + flush_req->type, flush_req->req_id); + + mutex_lock(&tpg_dev->mutex); + switch (flush_req->type) { + case CAM_REQ_MGR_FLUSH_TYPE_ALL: + rc = tpg_hw_flush_requests(&tpg_dev->tpg_hw, flush_req->req_id, true); + break; + + case CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ: + rc = tpg_hw_flush_requests(&tpg_dev->tpg_hw, flush_req->req_id, false); + break; + + default: + CAM_ERR(CAM_TPG, "Invalid TPG flush type [%d] rcvd", flush_req->type); + rc = -EINVAL; + } + if (rc != 0) + CAM_ERR(CAM_TPG, "Flushing active/waiting queue failed"); + + mutex_unlock(&tpg_dev->mutex); + return rc; +} + +static int cam_tpg_process_crm_evt( + struct cam_req_mgr_link_evt_data *event) +{ + + struct cam_tpg_device *tpg_dev = NULL; + if (!event) { + CAM_ERR(CAM_TPG, "Invalid argument"); + return -EINVAL; + } + + tpg_dev = cam_get_device_priv(event->dev_hdl); + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "Invalid dev_hdl"); + return -EINVAL; + } + + switch(event->evt_type) { + case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE: + if (tpg_dev->hw_no_ops) { + CAM_DBG(CAM_TPG, "TPG[%d] SOF Freeze hw_no_ops: %d", + tpg_dev->soc_info.index, tpg_dev->hw_no_ops); + } else + tpg_hw_dump_status(&tpg_dev->tpg_hw); + break; + default: + CAM_DBG(CAM_TPG, "Got crm event notification: %d", event->evt_type); + break; + } + return 0; +} + +static int cam_tpg_dump_req( + struct cam_req_mgr_dump_info *dump_info) +{ + CAM_DBG(CAM_TPG, "Got dump request from CRM"); + return 0; +} + +int tpg_crm_intf_init( + struct cam_tpg_device *tpg_dev) +{ + if (tpg_dev == NULL) + return -EINVAL; + + tpg_dev->crm_intf.device_hdl = -1; + tpg_dev->crm_intf.link_hdl = -1; + tpg_dev->crm_intf.ops.get_dev_info = cam_tpg_publish_dev_info; + tpg_dev->crm_intf.ops.link_setup = cam_tpg_setup_link; + tpg_dev->crm_intf.ops.apply_req = cam_tpg_apply_req; + tpg_dev->crm_intf.ops.notify_frame_skip = + cam_tpg_notify_frame_skip; + tpg_dev->crm_intf.ops.flush_req = cam_tpg_flush_req; + tpg_dev->crm_intf.ops.process_evt = cam_tpg_process_crm_evt; + tpg_dev->crm_intf.ops.dump_req = cam_tpg_dump_req; + + return 0; +} + +static int __cam_tpg_handle_query_cap( + struct cam_tpg_device *tpg_dev, + struct cam_tpg_query_cap *query) +{ + struct cam_hw_soc_info *soc_info = NULL; + + if (!tpg_dev || !query) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + + soc_info = &tpg_dev->soc_info; + CAM_DBG(CAM_TPG, "Handling tpg query capability for %d slot: %d phy:%d", + soc_info->index, tpg_dev->slot_id, tpg_dev->phy_id); + query->slot_info = soc_info->index; + query->csiphy_slot_id = tpg_dev->phy_id; + query->version = 0x0; + if (tpg_dev->tpg_hw.hw_info) { + query->version = tpg_dev->tpg_hw.hw_info->version; + } else { + CAM_ERR(CAM_TPG, "Invalid hw info"); + return -EINVAL; + } + + return 0; +} + +static int __cam_tpg_handle_acquire_dev( + struct cam_tpg_device *tpg_dev, + struct cam_tpg_acquire_dev *acquire) +{ + int rc = 0; + struct cam_create_dev_hdl crm_intf_params; + + if (!tpg_dev || !acquire) { + CAM_ERR(CAM_TPG, "invalid input "); + rc = -EINVAL; + goto exit; + } + + if (tpg_dev->state != CAM_TPG_STATE_INIT) { + CAM_ERR(CAM_TPG, "TPG[%d] not in right state[%d] to acquire", + tpg_dev->soc_info.index, tpg_dev->state); + rc = -EINVAL; + goto exit; + } + + crm_intf_params.session_hdl = acquire->session_handle; + crm_intf_params.ops = &tpg_dev->crm_intf.ops; + crm_intf_params.v4l2_sub_dev_flag = 0; + crm_intf_params.media_entity_flag = 0; + crm_intf_params.priv = tpg_dev; + crm_intf_params.dev_id = CAM_TPG; + + acquire->device_handle = + cam_create_device_hdl(&crm_intf_params); + tpg_dev->crm_intf.device_hdl = acquire->device_handle; + tpg_dev->crm_intf.session_hdl = acquire->session_handle; + rc = tpg_hw_acquire(&tpg_dev->tpg_hw, (struct tpg_hw_acquire_args *)NULL); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw acquire failed", tpg_dev->soc_info.index); + cam_destroy_device_hdl(tpg_dev->crm_intf.device_hdl); + tpg_dev->crm_intf.device_hdl = -1; + tpg_dev->crm_intf.session_hdl = -1; + } else { + tpg_dev->state = CAM_TPG_STATE_ACQUIRE; + CAM_INFO(CAM_TPG, "TPG[%d] Acquire Device done", tpg_dev->soc_info.index); + } +exit: + return rc; +} + +static int __cam_tpg_handle_release_dev( + struct cam_tpg_device *tpg_dev, + struct cam_release_dev_cmd *release) +{ + int rc = 0; + + if (!release || !tpg_dev) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + + if (release->dev_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid device handle for context"); + return -EINVAL; + } + + if (release->session_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid session handle for context"); + return -EINVAL; + } + if (tpg_dev->state == CAM_TPG_STATE_INIT) { + CAM_WARN(CAM_TPG, "TPG[%d] not in right state[%d] to release", + tpg_dev->soc_info.index, tpg_dev->state); + return 0; + } + + if (tpg_dev->state == CAM_TPG_STATE_START) { + CAM_DBG(CAM_TPG, "TPG[%d] release from start state", + tpg_dev->soc_info.index); + if (!tpg_dev->hw_no_ops) { + rc = tpg_hw_stop(&tpg_dev->tpg_hw); + if (rc < 0) { + CAM_ERR(CAM_TPG, "TPG[%d] unable to stop tpg", + tpg_dev->soc_info.index); + return rc; + } + } + } + rc = tpg_hw_release(&tpg_dev->tpg_hw); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw release failed", + tpg_dev->soc_info.index); + } else { + cam_destroy_device_hdl(tpg_dev->crm_intf.device_hdl); + tpg_dev->crm_intf.device_hdl = -1; + tpg_dev->crm_intf.session_hdl = -1; + CAM_INFO(CAM_TPG, "TPG[%d] Release Done.", tpg_dev->soc_info.index); + tpg_dev->state = CAM_TPG_STATE_INIT; + } + + return rc; +} + +static int __cam_tpg_handle_start_dev( + struct cam_tpg_device *tpg_dev, + struct cam_start_stop_dev_cmd *start) +{ + int rc = 0; + if (!start || !tpg_dev) + return -EINVAL; + + if (start->dev_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid device handle for context"); + return -EINVAL; + } + + if (start->session_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid session handle for context"); + return -EINVAL; + } + if (tpg_dev->state != CAM_TPG_STATE_ACQUIRE) { + CAM_ERR(CAM_TPG, "TPG[%d] not in right state[%d] to start", + tpg_dev->soc_info.index, tpg_dev->state); + return -EINVAL; + } + if (!tpg_dev->hw_no_ops) + rc = tpg_hw_start(&tpg_dev->tpg_hw); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] START_DEV failed hw_no_ops: %d", + tpg_dev->soc_info.index, tpg_dev->hw_no_ops); + } else { + tpg_dev->state = CAM_TPG_STATE_START; + CAM_INFO(CAM_TPG, "TPG[%d] START_DEV done hw_no_ops: %d.", + tpg_dev->soc_info.index, tpg_dev->hw_no_ops); + } + + return rc; +} + +static int __cam_tpg_handle_stop_dev( + struct cam_tpg_device *tpg_dev, + struct cam_start_stop_dev_cmd *stop) +{ + int rc = 0; + if (!stop || !tpg_dev) + return -EINVAL; + + if (stop->dev_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid device handle for context"); + return -EINVAL; + } + + if (stop->session_handle <= 0) { + CAM_ERR(CAM_TPG, "Invalid session handle for context"); + return -EINVAL; + } + if (tpg_dev->state != CAM_TPG_STATE_START) { + CAM_ERR(CAM_TPG, "TPG[%d] not in right state[%d] to stop", + tpg_dev->soc_info.index, tpg_dev->state); + rc = -EINVAL; + } + + if ((!rc) && (!tpg_dev->hw_no_ops)) + rc = tpg_hw_stop(&tpg_dev->tpg_hw); + + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] STOP_DEV failed hw_no_ops: %d", + tpg_dev->soc_info.index, tpg_dev->hw_no_ops); + } else { + tpg_dev->state = CAM_TPG_STATE_ACQUIRE; + CAM_INFO(CAM_TPG, "TPG[%d] STOP_DEV done hw_no_ops: %d.", + tpg_dev->soc_info.index, tpg_dev->hw_no_ops); + } + return rc; +} + +static int cam_tpg_validate_cmd_descriptor( + struct cam_cmd_buf_desc *cmd_desc, + uint32_t *cmd_type, uintptr_t *cmd_addr) +{ + int rc = 0; + uintptr_t generic_ptr; + size_t len_of_buff = 0; + size_t remain_len = 0; + ssize_t cmd_header_size = 0; + uint32_t *cmd_buf = NULL; + struct tpg_command_header_t *cmd_header_user = NULL; + struct tpg_command_header_t *cmd_header = NULL; + + if (!cmd_desc || !cmd_type || !cmd_addr) + return -EINVAL; + + rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, + &generic_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_TPG, + "Failed to get cmd buf Mem address : %d", rc); + return rc; + } + + if (cmd_desc->offset >= len_of_buff) { + CAM_ERR(CAM_TPG, "Buffer Offset: %d past length of buffer %zu", + cmd_desc->offset, + len_of_buff); + rc = -EINVAL; + goto end; + } + remain_len = len_of_buff - cmd_desc->offset; + + if ((cmd_desc->size > remain_len) || + (cmd_desc->length > cmd_desc->size)) { + CAM_ERR(CAM_TPG, "Invalid cmd desc, size: %zu remainlen: %zu length: %zu", + cmd_desc->size, + remain_len, + cmd_desc->length); + rc = -EINVAL; + goto end; + } + + if (remain_len < sizeof(struct tpg_command_header_t)) { + CAM_ERR(CAM_TPG, "Invalid buf size, remain len: %zu cmd header size: %zu", + remain_len, + sizeof(struct tpg_command_header_t)); + rc = -EINVAL; + goto end; + } + + cmd_buf = (uint32_t *)generic_ptr; + cmd_buf += cmd_desc->offset / sizeof(uint32_t); + cmd_header_user = (struct tpg_command_header_t *)cmd_buf; + + cmd_header_size = cmd_header_user->size; + + /* Check for cmd_header_size overflow or underflow condition */ + if ((cmd_header_size < 0) || + ((SIZE_MAX - cmd_header_size) < cmd_desc->offset)) { + CAM_ERR(CAM_TPG, "Invalid cmd header size: %zu, offset: %d", + cmd_header_size, + cmd_desc->offset); + rc = -EINVAL; + goto end; + } + + if ((cmd_desc->offset + (size_t)cmd_header_size) > len_of_buff) { + CAM_ERR(CAM_TPG, "Cmd header offset mismatch, offset: %d size: %zu len: %zu", + cmd_desc->offset, + cmd_header_size, + len_of_buff); + rc = -EINVAL; + goto end; + } + + cmd_header = kmemdup(cmd_header_user, cmd_header_size, GFP_KERNEL); + if (!cmd_header) { + CAM_ERR(CAM_TPG, "Local cmd_header mem allocation failed"); + rc = -ENOMEM; + goto end; + } + cmd_header->size = cmd_header_size; + + switch (cmd_header->cmd_type) { + case TPG_CMD_TYPE_GLOBAL_CONFIG: { + if (cmd_header->size != sizeof(struct tpg_global_config_t)) { + CAM_ERR(CAM_TPG, "Got invalid global config command recv: %d exp: %d", + cmd_header->size, + sizeof(struct tpg_global_config_t)); + rc = -EINVAL; + goto end; + } + CAM_INFO(CAM_TPG, "Got global config cmd"); + *cmd_type = TPG_CMD_TYPE_GLOBAL_CONFIG; + break; + } + case TPG_CMD_TYPE_STREAM_CONFIG: { + if (cmd_header->cmd_version == 3 && + cmd_header->size != sizeof(struct tpg_stream_config_v3_t)) { + CAM_ERR(CAM_TPG, "Got invalid stream config command recv: %d exp: %d", + cmd_header->size, + sizeof(struct tpg_stream_config_v3_t)); + + rc = -EINVAL; + goto end; + } else if (cmd_header->cmd_version == 2 && + cmd_header->size != sizeof(struct tpg_stream_config_t)) { + CAM_ERR(CAM_TPG, "Got invalid stream config cmd recv: %d exp: %d", + cmd_header->size, + sizeof(struct tpg_stream_config_t)); + + rc = -EINVAL; + goto end; + } else if (cmd_header->cmd_version == 1 && + cmd_header->size != sizeof(struct tpg_old_stream_config_t)) { + CAM_ERR(CAM_TPG, "Got invalid stream config cmd recv: %d exp: %d", + cmd_header->size, + sizeof(struct tpg_old_stream_config_t)); + + rc = -EINVAL; + goto end; + } + *cmd_type = TPG_CMD_TYPE_STREAM_CONFIG; + break; + } + case TPG_CMD_TYPE_ILLUMINATION_CONFIG: { + if (cmd_header->size != sizeof(struct tpg_illumination_control)) { + CAM_ERR(CAM_TPG, "Got invalid illumination config command"); + rc = -EINVAL; + goto end; + } + *cmd_type = TPG_CMD_TYPE_ILLUMINATION_CONFIG; + break; + } + case TPG_CMD_TYPE_SETTINGS_CONFIG: { + struct tpg_settings_config_t *settings; + + if (cmd_header->size != sizeof(struct tpg_settings_config_t)) { + CAM_ERR(CAM_TPG, "Got invalid settings config command recv: %d exp: %d", + cmd_header->size, + sizeof(struct tpg_settings_config_t)); + rc = -EINVAL; + goto end; + } + + settings = (struct tpg_settings_config_t *)cmd_header; + if ((cmd_desc->offset + settings->settings_array_offset) > + (len_of_buff - + settings->settings_array_size * sizeof(struct tpg_reg_settings))) { + CAM_ERR(CAM_TPG, + "Got invalid setting config, cmd offset: %u, setting array offset: %u, num reg settings: %u, size of reg setting: %zu, len of buf: %zu", + cmd_desc->offset, settings->settings_array_offset, + settings->settings_array_size, sizeof(struct tpg_reg_settings), + len_of_buff); + rc = -EINVAL; + goto end; + } + + CAM_INFO(CAM_TPG, "Got settings config command"); + *cmd_type = TPG_CMD_TYPE_SETTINGS_CONFIG; + break; + } + + default: + rc = -EINVAL; + CAM_ERR(CAM_TPG, "invalid config command"); + goto end; + } + + *cmd_addr = (uintptr_t)cmd_header; +end: + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; +} + +static int cam_tpg_cmd_buf_parse( + struct cam_tpg_device *tpg_dev, + struct cam_packet *packet) +{ + int rc = 0, i = 0; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct tpg_hw_request *req = NULL; + uintptr_t cmd_addr = 0; + + if (!tpg_dev || !packet) + return -EINVAL; + + /* Allocate space for TPG requests */ + req = tpg_hw_create_request(&tpg_dev->tpg_hw, + packet->header.request_id); + if (req == NULL) { + CAM_ERR(CAM_TPG, "TPG[%d] Unable to create hw request ", + tpg_dev->soc_info.index); + return -EINVAL; + } + + rc = tpg_hw_request_set_opcode(req, + packet->header.op_code & 0xFF); + if (rc < 0) + goto free_request; + + CAM_DBG(CAM_TPG, "Queue TPG requests for request id %lld", req->request_id); + + for (i = 0; i < packet->num_cmd_buf; i++) { + uint32_t cmd_type = TPG_CMD_TYPE_INVALID; + struct tpg_command_header_t *cmd_header = NULL; + + cmd_desc = (struct cam_cmd_buf_desc *) + ((uint32_t *)&packet->payload_flex + + (packet->cmd_buf_offset / 4) + + (i * (sizeof(struct cam_cmd_buf_desc)/4))); + + rc = cam_tpg_validate_cmd_descriptor(cmd_desc, + &cmd_type, &cmd_addr); + if (rc < 0) + goto free_request; + + cmd_header = (struct tpg_command_header_t *)cmd_addr; + + switch (cmd_type) { + case TPG_CMD_TYPE_GLOBAL_CONFIG: + /* Copy the global config to request */ + memcpy(&req->global_config, + (struct tpg_global_config_t *)cmd_addr, + sizeof(struct tpg_global_config_t)); + tpg_dev->tpg_hw.tpg_clock = req->global_config.tpg_clock; + break; + case TPG_CMD_TYPE_STREAM_CONFIG: { + if ((cmd_header->cmd_version == 3) && + (!tpg_dev->hw_no_ops)) { + rc = tpg_hw_add_stream_v3(&tpg_dev->tpg_hw, + req, + (struct tpg_stream_config_v3_t *)cmd_addr); + CAM_DBG(CAM_TPG, "Stream config v3"); + } else if ((cmd_header->cmd_version == 1 || + cmd_header->cmd_version == 2) && + (!tpg_dev->hw_no_ops)) { + rc = tpg_hw_add_stream(&tpg_dev->tpg_hw, + req, + (struct tpg_stream_config_t *)cmd_addr); + CAM_DBG(CAM_TPG, "Stream config"); + } + if (rc < 0) + goto free_request; + break; + } + case TPG_CMD_TYPE_SETTINGS_CONFIG: { + CAM_DBG(CAM_TPG, "TPG[%d] Got TPG Settings Config", + tpg_dev->soc_info.index); + if (!tpg_dev->hw_no_ops) + rc = tpg_hw_copy_settings_config( + &tpg_dev->tpg_hw, + (struct tpg_settings_config_t *)cmd_addr); + break; + } + case TPG_CMD_TYPE_ILLUMINATION_CONFIG: + CAM_ERR(CAM_TPG, "TPG[%d] ILLUMINATION CONFIG not supported now ", + tpg_dev->soc_info.index); + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] invalid command %d", + tpg_dev->soc_info.index, + cmd_type); + rc = -EINVAL; + goto free_request; + break; + } + CAM_MEM_FREE((void *)cmd_addr); + cmd_addr = 0; + } + if (!tpg_dev->hw_no_ops) + tpg_hw_add_request(&tpg_dev->tpg_hw, req); + return rc; +free_request: + if (cmd_addr != 0) { + CAM_MEM_FREE((void *)cmd_addr); + cmd_addr = 0; + } + /* free the request and return the failure */ + tpg_hw_free_request(&tpg_dev->tpg_hw, req); + CAM_MEM_FREE(req); + return rc; +} + +static int cam_tpg_packet_parse( + struct cam_tpg_device *tpg_dev, + struct cam_config_dev_cmd *config) +{ + int rc = 0; + uintptr_t generic_ptr; + size_t len_of_buff = 0, remain_len = 0; + struct cam_packet *csl_packet_u = NULL; + struct cam_packet *csl_packet = NULL; + + rc = cam_mem_get_cpu_buf(config->packet_handle, + &generic_ptr, &len_of_buff); + if (rc < 0) { + CAM_ERR(CAM_TPG, "Failed in getting the packet: %d", rc); + return rc; + } + + if ((sizeof(struct cam_packet) > len_of_buff) || + ((size_t)config->offset >= len_of_buff - + sizeof(struct cam_packet))) { + CAM_ERR(CAM_TPG, + "Inval cam_packet struct size: %zu, len_of_buff: %zu", + sizeof(struct cam_packet), len_of_buff); + rc = -EINVAL; + goto end; + } + remain_len = len_of_buff; + remain_len -= (size_t)config->offset; + csl_packet_u = (struct cam_packet *)(generic_ptr + + (uint32_t)config->offset); + rc = cam_packet_util_copy_pkt_to_kmd(csl_packet_u, &csl_packet, remain_len); + if (rc) { + CAM_ERR(CAM_TPG, "Copying packet to KMD failed"); + goto end; + } + + CAM_DBG(CAM_TPG, "TPG[%d] " + "CONFIG_DEV, Packet opcode = %d num_cmds: %d num_ios: %d num_patch: %d", + tpg_dev->soc_info.index, + (csl_packet->header.op_code & 0xFF), + csl_packet->num_cmd_buf, + csl_packet->num_io_configs, + csl_packet->num_patches); + switch ((csl_packet->header.op_code & 0xFF)) { + case CAM_TPG_PACKET_OPCODE_INITIAL_CONFIG: { + if (csl_packet->num_cmd_buf <= 0) { + CAM_ERR(CAM_TPG, "Expecting atleast one command in Init packet"); + rc = -EINVAL; + goto free_kdup; + } + rc = cam_tpg_cmd_buf_parse(tpg_dev, csl_packet); + if (rc < 0) { + CAM_ERR(CAM_TPG, "CMD buffer parse failed"); + goto free_kdup; + } + if (!tpg_dev->hw_no_ops) + tpg_hw_config(&tpg_dev->tpg_hw, + TPG_HW_CMD_INIT_CONFIG, NULL); + break; + } + case CAM_TPG_PACKET_OPCODE_NOP: + case CAM_TPG_PACKET_OPCODE_UPDATE: + { + struct cam_req_mgr_add_request add_req = {0}; + rc = cam_tpg_cmd_buf_parse(tpg_dev, csl_packet); + if (rc < 0) { + CAM_ERR(CAM_TPG, "CMD buffer parse failed"); + goto free_kdup; + } + + CAM_DBG(CAM_TPG, "TPG[%d] packet request id : %llu", + tpg_dev->soc_info.index, + csl_packet->header.request_id); + if ((tpg_dev->crm_intf.link_hdl != -1) && + (tpg_dev->crm_intf.device_hdl != -1) && + (tpg_dev->crm_intf.crm_cb != NULL)) { + add_req.link_hdl = tpg_dev->crm_intf.link_hdl; + add_req.dev_hdl = tpg_dev->crm_intf.device_hdl; + add_req.req_id = csl_packet->header.request_id; + tpg_dev->crm_intf.crm_cb->add_req(&add_req); + } else { + CAM_ERR(CAM_TPG, "TPG[%d] add request %d failed", + tpg_dev->soc_info.index, + csl_packet->header.request_id); + } + break; + } + default: + CAM_ERR(CAM_TPG, "TPG[%d] Invalid packet %x", + tpg_dev->soc_info.index, + (csl_packet->header.op_code & 0xFF)); + rc = -EINVAL; + break; + } + +free_kdup: + cam_common_mem_free(csl_packet); +end: + cam_mem_put_cpu_buf(config->packet_handle); + return rc; +} + +static int __cam_tpg_handle_config_dev( + struct cam_tpg_device *tpg_dev, + struct cam_config_dev_cmd *config) +{ + int rc = 0; + + if (!config || !tpg_dev) + return -EINVAL; + + if (config->dev_handle <= 0) { + CAM_ERR(CAM_TPG, "TPG[%d] Invalid device handle", + tpg_dev->soc_info.index); + return -EINVAL; + } + + if (config->session_handle <= 0) { + CAM_ERR(CAM_TPG, "TPG[%d] Invalid session handle", + tpg_dev->soc_info.index); + return -EINVAL; + } + + if (tpg_dev->state < CAM_TPG_STATE_ACQUIRE) { + CAM_ERR(CAM_TPG, "TPG[%d] not in right state[%d] to configure", + tpg_dev->soc_info.index, tpg_dev->state); + } + // Handle Config Dev + rc = cam_tpg_packet_parse(tpg_dev, config); + return rc; +} + +static int validate_ioctl_params( + struct cam_tpg_device *tpg_dev, + struct cam_control *cmd) +{ + int rc = 0; + + if (!tpg_dev || !cmd) { + CAM_ERR(CAM_TPG, "Invalid input args"); + rc = -EINVAL; + goto exit; + } + + if (cmd->handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_TPG, "TPG[%d] Invalid handle type: %d", + tpg_dev->soc_info.index, + cmd->handle_type); + rc = -EINVAL; + } + CAM_DBG(CAM_TPG, "TPG[%d] Opcode: %d", tpg_dev->soc_info.index, cmd->op_code); +exit: + return rc; +} + +int cam_tpg_core_cfg( + struct cam_tpg_device *tpg_dev, + void *arg) +{ + int rc = 0; + struct cam_control *cmd = (struct cam_control *)arg; + + rc = validate_ioctl_params(tpg_dev, cmd); + if (rc < 0) + return rc; + + mutex_lock(&tpg_dev->mutex); + switch (cmd->op_code) { + case CAM_QUERY_CAP: { + struct cam_tpg_query_cap query = {0}; + + if (copy_from_user(&query, u64_to_user_ptr(cmd->handle), + sizeof(query))) { + rc = -EFAULT; + break; + } + + rc = __cam_tpg_handle_query_cap(tpg_dev, &query); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] querycap is failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + break; + } + + if (copy_to_user(u64_to_user_ptr(cmd->handle), &query, + sizeof(query))) + rc = -EFAULT; + + break; + } + case CAM_ACQUIRE_DEV: { + struct cam_tpg_acquire_dev acquire = {0}; + + if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle), + sizeof(acquire))) { + rc = -EFAULT; + break; + } + rc = __cam_tpg_handle_acquire_dev(tpg_dev, &acquire); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] acquire device failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + break; + } + if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire, + sizeof(acquire))) + rc = -EFAULT; + break; + } + case CAM_RELEASE_DEV: { + struct cam_release_dev_cmd release; + + if (copy_from_user(&release, u64_to_user_ptr(cmd->handle), + sizeof(release))) + rc = -EFAULT; + else { + rc = __cam_tpg_handle_release_dev(tpg_dev, &release); + if (rc) + CAM_ERR(CAM_TPG, + "TPG[%d] release device failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + } + break; + } + case CAM_START_DEV: { + struct cam_start_stop_dev_cmd start; + + if (copy_from_user(&start, u64_to_user_ptr(cmd->handle), + sizeof(start))) + rc = -EFAULT; + else { + rc = __cam_tpg_handle_start_dev(tpg_dev, &start); + if (rc) + CAM_ERR(CAM_TPG, + "TPG[%d] start device failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + } + break; + } + case CAM_STOP_DEV: { + struct cam_start_stop_dev_cmd stop; + + if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle), + sizeof(stop))) + rc = -EFAULT; + else { + rc = __cam_tpg_handle_stop_dev(tpg_dev, &stop); + if (rc) + CAM_ERR(CAM_TPG, + "TPG[%d] stop device failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + } + break; + + } + case CAM_CONFIG_DEV: { + struct cam_config_dev_cmd config; + + if (copy_from_user(&config, u64_to_user_ptr(cmd->handle), + sizeof(config))) + rc = -EFAULT; + else { + rc = __cam_tpg_handle_config_dev(tpg_dev, &config); + if (rc) + CAM_ERR(CAM_TPG, + "TPG[%d] config device failed(rc = %d)", + tpg_dev->soc_info.index, + rc); + } + break; + } + default: + CAM_ERR(CAM_TPG, "Invalid ioctl : %d", cmd->op_code); + rc = -EINVAL; + break; + } + mutex_unlock(&tpg_dev->mutex); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.h new file mode 100644 index 0000000000..2cb0737054 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_core.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __CAM_TPG_CORE_H__ +#define __CAM_TPG_CORE_H__ + +#include "cam_tpg_dev.h" + +/** + * @brief : do clean up of driver on user space process restart + * + * @param tpg_dev: tpg device + * + * @return : 0 on success + */ +int cam_tpg_shutdown(struct cam_tpg_device *tpg_dev); + +/** + * @brief initialize crm interface + * + * @param tpg_dev: tpg device instance + * + * @return : 0 on success + */ +int tpg_crm_intf_init(struct cam_tpg_device *tpg_dev); + +/** + * @brief : handle tpg device configuration + * + * @param tpg_dev : tpg device instance + * @param arg : configuration argument + * + * @return : 0 on success + */ +int32_t cam_tpg_core_cfg(struct cam_tpg_device *tpg_dev, void *arg); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.c new file mode 100644 index 0000000000..fe9a6504d8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_tpg_dev.h" +#include "cam_tpg_core.h" +#include "camera_main.h" +#include "tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0_data.h" +#include "tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2_data.h" +#include "tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_data.h" +#include "tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_data.h" +#include "cam_req_mgr_dev.h" +#include + +static int cam_tpg_subdev_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct cam_tpg_device *tpg_dev = + v4l2_get_subdevdata(sd); + + bool crm_active = cam_req_mgr_is_open(); + + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "tpg_dev ptr is NULL"); + return -EINVAL; + } + + if (crm_active) { + CAM_DBG(CAM_TPG, "CRM is ACTIVE, close should be from CRM"); + return 0; + } + + mutex_lock(&tpg_dev->mutex); + if (tpg_dev->state == CAM_TPG_STATE_INIT) { + CAM_DBG(CAM_TPG, "TPG node %d is succesfully closed", tpg_dev->soc_info.index); + mutex_unlock(&tpg_dev->mutex); + return 0; + } + cam_tpg_shutdown(tpg_dev); + mutex_unlock(&tpg_dev->mutex); + + return 0; +} + +static long cam_tpg_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + struct cam_tpg_device *tpg_dev = v4l2_get_subdevdata(sd); + int rc = 0; + + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_tpg_core_cfg(tpg_dev, arg); + break; + case CAM_SD_SHUTDOWN: + if (!cam_req_mgr_is_shutdown()) { + CAM_ERR(CAM_CORE, "SD shouldn't come from user space"); + return 0; + } + mutex_lock(&tpg_dev->mutex); + cam_tpg_shutdown(tpg_dev); + mutex_unlock(&tpg_dev->mutex); + break; + default: + CAM_ERR(CAM_TPG, "Wrong ioctl : %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + return rc; +} + + +#ifdef CONFIG_COMPAT +static long cam_tpg_subdev_compat_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, unsigned long arg) +{ + int32_t rc = 0; + struct cam_control cmd_data; + + if (copy_from_user(&cmd_data, (void __user *)arg, + sizeof(cmd_data))) { + CAM_ERR(CAM_TPG, "Failed to copy from user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + return -EFAULT; + } + + /* All the arguments converted to 64 bit here + * Passed to the api in core.c + */ + switch (cmd) { + case VIDIOC_CAM_CONTROL: + rc = cam_tpg_subdev_ioctl(sd, cmd, &cmd_data); + if (rc) + CAM_ERR(CAM_TPG, + "Failed in subdev_ioctl: %d", rc); + break; + default: + CAM_ERR(CAM_TPG, "Invalid compat ioctl cmd: %d", cmd); + rc = -ENOIOCTLCMD; + break; + } + + if (!rc) { + if (copy_to_user((void __user *)arg, &cmd_data, + sizeof(cmd_data))) { + CAM_ERR(CAM_TPG, + "Failed to copy to user_ptr=%pK size=%zu", + (void __user *)arg, sizeof(cmd_data)); + rc = -EFAULT; + } + } + + return rc; +} +#endif + + +static struct v4l2_subdev_core_ops tpg_subdev_core_ops = { + .ioctl = cam_tpg_subdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = cam_tpg_subdev_compat_ioctl, +#endif +}; + + +static const struct v4l2_subdev_ops tpg_subdev_ops = { + .core = &tpg_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops tpg_subdev_intern_ops = { + .close = cam_tpg_subdev_close, +}; + +static irqreturn_t cam_tpg_irq_handler(int irq_num, void *data) +{ + struct cam_tpg_device *tpg_dev = + (struct cam_tpg_device *)data; + struct tpg_hw *hw = &tpg_dev->tpg_hw; + irqreturn_t rc = IRQ_NONE; + + /* handle the registered irq handler */ + if (hw && + hw->hw_info && + hw->hw_info->ops && + hw->hw_info->ops->handle_irq) { + rc = hw->hw_info->ops->handle_irq(hw); + } + + return rc; +} + +static int tpg_subdev_init(struct cam_tpg_device *tpg_dev, + struct device *dev) +{ + int32_t rc = 0; + struct platform_device *pdev = to_platform_device(dev); + + tpg_dev->tpg_subdev.pdev = pdev; + tpg_dev->tpg_subdev.internal_ops = &tpg_subdev_intern_ops; + tpg_dev->tpg_subdev.ops = &tpg_subdev_ops; + tpg_dev->tpg_subdev.name = tpg_dev->device_name; + tpg_dev->tpg_subdev.sd_flags = + (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS); + tpg_dev->tpg_subdev.ent_function = CAM_TPG_DEVICE_TYPE; + tpg_dev->tpg_subdev.msg_cb = NULL; + tpg_dev->tpg_subdev.token = tpg_dev; + + rc = cam_register_subdev(&(tpg_dev->tpg_subdev)); + if (rc) + CAM_ERR(CAM_TPG, "cam_register_subdev Failed rc: %d", rc); + else + CAM_DBG(CAM_TPG, "TPG subdev init done"); + return rc; + +} + +static int tpg_soc_info_init(struct cam_tpg_device *tpg_dev, + struct device *dev) +{ + int32_t rc = 0, i; + struct platform_device *pdev = to_platform_device(dev); + struct device_node *of_node = NULL; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0}; + + if (!dev || !tpg_dev) + return -EINVAL; + + tpg_dev->soc_info.pdev = pdev; + tpg_dev->soc_info.dev = &pdev->dev; + tpg_dev->soc_info.dev_name = pdev->name; + + of_node = dev->of_node; + + rc = cam_soc_util_get_dt_properties(&tpg_dev->soc_info); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc); + return rc; + } + + rc = of_property_read_u32(of_node, "phy-id", &(tpg_dev->phy_id)); + if (rc) { + CAM_ERR(CAM_TPG, "device %s failed to read phy-id", + tpg_dev->soc_info.dev_name); + return rc; + } + + for (i = 0; i < tpg_dev->soc_info.irq_count; i++) + irq_data[i] = tpg_dev; + + if (!of_property_read_bool(of_node, "hw-no-ops")) + tpg_dev->hw_no_ops = false; + else + tpg_dev->hw_no_ops = true; + + if (!tpg_dev->hw_no_ops) + rc = cam_soc_util_request_platform_resource( + &tpg_dev->soc_info, cam_tpg_irq_handler, + &(irq_data[0])); + if (rc) + CAM_ERR(CAM_TPG, "unable to request platfrom resources"); + else + CAM_DBG(CAM_TPG, "TPG dt parse done"); + + return rc; +} + +static int tpg_register_cpas_client(struct cam_tpg_device *tpg_dev, + struct device *dev) +{ + int32_t rc = 0; + struct cam_cpas_register_params cpas_parms; + struct platform_device *pdev = to_platform_device(dev); + + cpas_parms.cam_cpas_client_cb = NULL; + cpas_parms.cell_index = tpg_dev->soc_info.index; + cpas_parms.dev = &pdev->dev; + cpas_parms.userdata = tpg_dev; + + strscpy(cpas_parms.identifier, "tpg", CAM_HW_IDENTIFIER_LENGTH); + + rc = cam_cpas_register_client(&cpas_parms); + if (rc) { + CAM_ERR(CAM_TPG, "CPAS registration failed rc: %d", rc); + return rc; + } + + tpg_dev->cpas_handle = cpas_parms.client_handle; + CAM_DBG(CAM_TPG, "CPAS registration successful handle=%d", + cpas_parms.client_handle); + + return rc; +} + +static int cam_tpg_hw_layer_init(struct cam_tpg_device *tpg_dev, + struct device *dev) +{ + /* get top tpg hw information */ + const struct of_device_id *match_dev = NULL; + struct platform_device *pdev = to_platform_device(dev); + + match_dev = of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev); + if (!match_dev) { + CAM_ERR(CAM_TPG, "No matching table for the top tpg hw"); + return -EINVAL; + } + + tpg_dev->tpg_hw.hw_idx = tpg_dev->soc_info.index; + tpg_dev->tpg_hw.hw_info = (struct tpg_hw_info *)match_dev->data; + tpg_dev->tpg_hw.soc_info = &tpg_dev->soc_info; + tpg_dev->tpg_hw.cpas_handle = tpg_dev->cpas_handle; + spin_lock_init(&tpg_dev->tpg_hw.hw_state_lock); + tpg_dev->tpg_hw.state = TPG_HW_STATE_HW_DISABLED; + mutex_init(&tpg_dev->tpg_hw.mutex); + init_completion(&tpg_dev->tpg_hw.complete_rup); + /*Initialize the waiting queue and active queues*/ + INIT_LIST_HEAD(&(tpg_dev->tpg_hw.waiting_request_q)); + INIT_LIST_HEAD(&(tpg_dev->tpg_hw.active_request_q)); + tpg_dev->tpg_hw.waiting_request_q_depth = 0; + tpg_dev->tpg_hw.active_request_q_depth = 0; + tpg_dev->tpg_hw.settings_update = 0; + tpg_dev->tpg_hw.tpg_clock = 0; + tpg_dev->tpg_hw.hw_info->layer_init(&tpg_dev->tpg_hw); + return 0; +} + +static int cam_tpg_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct cam_tpg_device *tpg_dev = NULL; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + tpg_dev = devm_kzalloc(&pdev->dev, + sizeof(struct cam_tpg_device), GFP_KERNEL); + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "TPG dev allocation failed"); + return -ENOMEM; + } + + strscpy(tpg_dev->device_name, CAMX_TPG_DEV_NAME, + sizeof(tpg_dev->device_name)); + mutex_init(&tpg_dev->mutex); + tpg_dev->tpg_subdev.pdev = pdev; + tpg_dev->state = CAM_TPG_STATE_INIT; + rc = tpg_subdev_init(tpg_dev, dev); + if (rc < 0) { + CAM_ERR(CAM_TPG, "subdev init failed"); + goto bind_error_exit; + } + + rc = tpg_soc_info_init(tpg_dev, dev); + if (rc < 0) { + CAM_ERR(CAM_TPG, "soc init failed"); + goto release_subdev; + } + + rc = tpg_register_cpas_client(tpg_dev, dev); + if (rc < 0) { + CAM_ERR(CAM_TPG, "cpas register failed"); + goto release_subdev; + } + tpg_crm_intf_init(tpg_dev); + rc = cam_tpg_hw_layer_init(tpg_dev, dev); + if (rc < 0) { + CAM_ERR(CAM_TPG, "Hw layer init failed"); + goto release_subdev; + } + + platform_set_drvdata(pdev, tpg_dev); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +release_subdev: + cam_unregister_subdev(&(tpg_dev->tpg_subdev)); +bind_error_exit: + mutex_destroy(&tpg_dev->mutex); + return rc; +} + +static void cam_tpg_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cam_tpg_device *tpg_dev = NULL; + + tpg_dev = platform_get_drvdata(pdev); + + if (!tpg_dev) { + CAM_ERR(CAM_TPG, "Error No data in tpg_dev"); + return; + } + + CAM_INFO(CAM_TPG, "Unbind TPG component"); + cam_cpas_unregister_client(tpg_dev->cpas_handle); + cam_soc_util_release_platform_resource(&tpg_dev->soc_info); + mutex_destroy(&tpg_dev->mutex); + mutex_destroy(&tpg_dev->tpg_hw.mutex); + platform_set_drvdata(pdev, NULL); + v4l2_set_subdevdata(&(tpg_dev->tpg_subdev.sd), NULL); + cam_unregister_subdev(&(tpg_dev->tpg_subdev)); +} + +const static struct component_ops cam_tpg_component_ops = { + .bind = cam_tpg_component_bind, + .unbind = cam_tpg_component_unbind, +}; + +static int32_t cam_tpg_platform_probe(struct platform_device *pdev) +{ + int rc = 0; + + CAM_DBG(CAM_TPG, "Adding TPG component"); + rc = component_add(&pdev->dev, &cam_tpg_component_ops); + if (rc) + CAM_ERR(CAM_TPG, "failed to add component rc: %d", rc); + + return rc; +} + + +static int32_t cam_tpg_device_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_tpg_component_ops); + return 0; +} + +static const struct of_device_id cam_tpg_dt_match[] = { + { + .compatible = "qcom,cam-tpg101", + .data = &tpg_v_1_0_hw_info, + }, + { + .compatible = "qcom,cam-tpg102", + .data = &tpg_v_1_2_hw_info, + }, + { + .compatible = "qcom,cam-tpg103", + .data = &tpg_v_1_3_hw_info, + }, + { + .compatible = "qcom,cam-tpg1031", + .data = &tpg_v_1_3_1_hw_info, + }, + { + .compatible = "qcom,cam-tpg104", + .data = &tpg_v_1_4_hw_info, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_tpg_dt_match); + +struct platform_driver cam_tpg_driver = { + .probe = cam_tpg_platform_probe, + .remove = cam_tpg_device_remove, + .driver = { + .name = CAMX_TPG_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = cam_tpg_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int32_t cam_tpg_init_module(void) +{ + CAM_DBG(CAM_TPG, "tpg module init"); + return platform_driver_register(&cam_tpg_driver); +} + +void cam_tpg_exit_module(void) +{ + CAM_DBG(CAM_TPG, "tpg exit module"); + platform_driver_unregister(&cam_tpg_driver); +} + +MODULE_DESCRIPTION("CAM TPG driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.h new file mode 100644 index 0000000000..2b1a8cb939 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/cam_tpg_dev.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_TPG_DEV_H__ +#define __CAM_TPG_DEV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_debug_util.h" +#include "cam_context.h" +#include "cam_soc_util.h" +#include +#include "tpg_hw/tpg_hw.h" + +#define CAMX_TPG_DEV_NAME "cam-tpg-driver" + +struct cam_tpg_device; +struct tpg_hw; + +struct cam_tpg_ioctl_ops { + int (*acquire_dev)(struct cam_tpg_device *tpg_dev, + struct cam_acquire_dev_cmd *cmd); + int (*release_dev)(struct cam_tpg_device *tpg_dev, + struct cam_release_dev_cmd *cmd); + int (*config_dev)(struct cam_tpg_device *tpg_dev, + struct cam_config_dev_cmd *cmd); + int (*start_dev)(struct cam_tpg_device *tpg_dev, + struct cam_start_stop_dev_cmd *cmd); + int (*stop_dev)(struct cam_tpg_device *tpg_dev, + struct cam_start_stop_dev_cmd *cmd); + int (*flush_dev)(struct cam_tpg_device *tpg_dev, + struct cam_flush_dev_cmd *cmd); + int (*acquire_hw)(struct cam_tpg_device *tpg_dev, void *args); + int (*release_hw)(struct cam_tpg_device *tpg_dev, void *args); +}; + +struct cam_tpg_crm_ops { + int (*get_dev_info)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_device_info *device_info); + int (*link)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_core_dev_link_setup *link); + int (*unlink)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_core_dev_link_setup *unlink); + int (*apply_req)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_apply_request *apply); + int (*notify_frame_skip)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_apply_request *apply); + int (*flush_req)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_flush_request *flush); + int (*process_evt)(struct cam_tpg_device *tpg_dev, + struct cam_req_mgr_link_evt_data *evt_data); +}; + +struct tpg_device_ops { + struct cam_tpg_ioctl_ops ioctl_ops; + struct cam_tpg_crm_ops crm_ops; +}; + +enum cam_tpg_state { + CAM_TPG_STATE_INIT, + CAM_TPG_STATE_ACQUIRE, + CAM_TPG_STATE_START, + CAM_TPG_STATE_STATE_MAX +}; + +/** + * struct tpg_crm_intf_params + * @device_hdl: Device Handle + * @session_hdl: Session Handle + * @link_hdl: Link Handle + * @ops: KMD operations + * @crm_cb: Callback API pointers + */ +struct tpg_crm_intf_params { + int32_t device_hdl; + int32_t session_hdl; + int32_t link_hdl; + struct cam_req_mgr_kmd_ops ops; + struct cam_req_mgr_crm_cb *crm_cb; +}; + +/** + * cam_tpg_device + * + * @device_name: tpg device name + * @mutex: mutex object for private use + * @tpg_subdev: tpg subdevice instance + * @soc_info: tpg soc infrastructure + * @cpas_handle: cpas handle + * @state_machine: tpg state machine + * @crm_intf: crm interface + * @tpg_hw : tpg hw instance + * @state : state machine states + * @slot_id : slot index of this tpg + * @phy_id : phy index mapped to this tpg + * @hw_no_ops: To determine whether HW operations need to be disabled + * @waiting_request_q : waiting request queue + * @active_request_q : active request queue + * @tpg_irq_state : tpg irq state + */ +struct cam_tpg_device { + char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH]; + struct mutex mutex; + struct cam_subdev tpg_subdev; + struct cam_hw_soc_info soc_info; + uint32_t cpas_handle; + struct tpg_device_ops state_machine[CAM_TPG_STATE_STATE_MAX]; + struct tpg_crm_intf_params crm_intf; + struct tpg_hw tpg_hw; + int state; + int slot_id; + int phy_id; + bool hw_no_ops; + atomic_t tpg_irq_state; + struct list_head waiting_request_q; + struct list_head active_request_q; +}; + + +/** + * @brief : tpg module init + * + * @return : 0 on success + */ +int32_t cam_tpg_init_module(void); + +/** + * @brief : tpg module exit + */ +void cam_tpg_exit_module(void); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.c new file mode 100644 index 0000000000..691d4ae57a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.c @@ -0,0 +1,1484 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw.h" +#include "cam_mem_mgr_api.h" +#include "cam_io_util.h" + +#define CAM_TPG_HW_WAIT_TIMEOUT msecs_to_jiffies(100) +#define MAX_ACTIVE_QUEUE_DEPTH 2 +#define MAX_WAITING_QUEUE_DEPTH 32 +#define TIMEOUT_MULTIPLIER_PRESIL 5 +#define TIMEOUT_MULTIPLIER 1 +#define REQUEST_ID_UNSET -1 + +int32_t cam_tpg_mem_dmp(struct cam_hw_soc_info *soc_info) +{ + int32_t rc = 0; + resource_size_t size = 0; + void __iomem *addr = NULL; + + if (!soc_info) { + rc = -EINVAL; + CAM_ERR(CAM_TPG, "invalid input %d", rc); + return rc; + } + addr = soc_info->reg_map[0].mem_base; + CAM_DBG(CAM_TPG, "TPG[%d] register dump", soc_info->index); + size = resource_size(soc_info->mem_block[0]); + rc = cam_io_dump(addr, 0, (size >> 2)); + if (rc < 0) { + CAM_ERR(CAM_TPG, "generating dump failed %d", rc); + } + return rc; +} + + +#define __TPG_DEBUG_DUMP__ +#ifdef __TPG_DEBUG_DUMP__ +static const char * const tpg_phy_type_strings[] = { + "TPG_PHY_TYPE_INVALID", + "TPG_PHY_TYPE_DPHY", + "TPG_PHY_TYPE_CPHY", + "TPG_PHY_TYPE_MAX" +}; + +static const char * const tpg_interleaving_format_string[] = { + "TPG_INTERLEAVING_FORMAT_INVALID", + "TPG_INTERLEAVING_FORMAT_FRAME", + "TPG_INTERLEAVING_FORMAT_LINE", + "TPG_INTERLEAVING_FORMAT_SHDR", + "TPG_INTERLEAVING_FORMAT_SparsePD", + "TPG_INTERLEAVING_FORMAT_SHDR_SparsePD", + "TPG_INTERLEAVING_FORMAT_MAX" +}; + +static const char * const tpg_shutter_type_strings[] = { + "TPG_SHUTTER_TYPE_INVALID", + "TPG_SHUTTER_TYPE_ROLLING", + "TPG_SHUTTER_TYPE_GLOBAL", + "TPG_SHUTTER_TYPE_MAX" +}; + +static const char *const tpg_pattern_type_strings[] = { + "TPG_PATTERN_INVALID", + "TPG_PATTERN_REAL_IMAGE", + "TPG_PATTERN_RANDOM_PIXL", + "TPG_PATTERN_RANDOM_INCREMENTING_PIXEL", + "TPG_PATTERN_COLOR_BAR", + "TPG_PATTERN_ALTERNATING_55_AA", + "TPG_PATTERN_ALTERNATING_USER_DEFINED", + "TPG_PATTERN_MAX" +}; + +static const char *const tpg_color_bar_mode_strings[] = { + "TPG_COLOR_BAR_MODE_INVALID", + "TPG_COLOR_BAR_MODE_NORMAL", + "TPG_COLOR_BAR_MODE_SPLIT", + "TPG_COLOR_BAR_MODE_ROTATING", + "TPG_COLOR_BAR_MODE_MAX" +}; + +static const char *const tpg_stream_type_strings[] = { + "TPG_STREAM_TYPE_INVALID", + "TPG_STREAM_TYPE_IMAGE", + "TPG_STREAM_TYPE_PDAF", + "TPG_STREAM_TYPE_META", + "TPG_STREAM_TYPE_MAX" +}; + +static const char *const tpg_image_format_type_strings[] = { + "TPG_IMAGE_FORMAT_INVALID", + "TPG_IMAGE_FORMAT_BAYER", + "TPG_IMAGE_FORMAT_QCFA", + "TPG_IMAGE_FORMAT_YUV", + "TPG_IMAGE_FORMAT_JPEG", + "TPG_IMAGE_FORMAT_MAX" +}; +#endif + +int dump_global_configs(int idx, + struct tpg_global_config_t *global) +{ +#ifdef __TPG_DEBUG_DUMP__ + CAM_DBG(CAM_TPG, "TPG[%d] phy_type : %s", + idx, + tpg_phy_type_strings[global->phy_type]); + CAM_DBG(CAM_TPG, "TPG[%d] lane_count : %d", + idx, + global->lane_count); + CAM_DBG(CAM_TPG, "TPG[%d] interleaving_format : %s", + idx, + tpg_interleaving_format_string[global->interleaving_format]); + CAM_DBG(CAM_TPG, "TPG[%d] phy_mode : %d", + idx, + global->phy_mode); + CAM_DBG(CAM_TPG, "TPG[%d] shutter_type : %s", + idx, + tpg_shutter_type_strings[global->shutter_type]); + CAM_DBG(CAM_TPG, "TPG[%d] skip pattern : 0x%x", + idx, + global->skip_pattern); + CAM_DBG(CAM_TPG, "TPG[%d] tpg clock : %d", + idx, + global->tpg_clock); +#endif + return 0; +} + +int dump_stream_configs(int hw_idx, + int stream_idx, + struct tpg_stream_config_t *stream) +{ +#ifdef __TPG_DEBUG_DUMP__ + CAM_DBG(CAM_TPG, "TPG[%d][%d] pattern_type : %s", + hw_idx, + stream_idx, + tpg_pattern_type_strings[stream->pattern_type]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] cb_mode : %s", + hw_idx, + stream_idx, + tpg_color_bar_mode_strings[stream->cb_mode]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] frame_count : %d", + hw_idx, + stream_idx, + stream->frame_count); + CAM_DBG(CAM_TPG, "TPG[%d][%d] stream_type : %s", + hw_idx, + stream_idx, + tpg_stream_type_strings[stream->stream_type]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] left : %d", + hw_idx, + stream_idx, + stream->stream_dimension.left); + CAM_DBG(CAM_TPG, "TPG[%d][%d] top : %d", + hw_idx, + stream_idx, + stream->stream_dimension.top); + CAM_DBG(CAM_TPG, "TPG[%d][%d] width : %d", + hw_idx, + stream_idx, + stream->stream_dimension.width); + CAM_DBG(CAM_TPG, "TPG[%d][%d] height : %d", + hw_idx, + stream_idx, + stream->stream_dimension.height); + CAM_DBG(CAM_TPG, "TPG[%d][%d] pixel_depth : %d", + hw_idx, + stream_idx, + stream->pixel_depth); + CAM_DBG(CAM_TPG, "TPG[%d][%d] cfa_arrangement : %d", + hw_idx, + stream_idx, + stream->cfa_arrangement); + CAM_DBG(CAM_TPG, "TPG[%d][%d] output_format : %s", + hw_idx, + stream_idx, + tpg_image_format_type_strings[stream->output_format]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] vc : 0x%x", + hw_idx, + stream_idx, + stream->vc); + CAM_DBG(CAM_TPG, "TPG[%d][%d] dt : 0x%x", + hw_idx, + stream_idx, + stream->dt); + CAM_DBG(CAM_TPG, "TPG[%d][%d] hbi : %d", + hw_idx, + stream_idx, + stream->hbi); + CAM_DBG(CAM_TPG, "TPG[%d][%d] vbi : %d", + hw_idx, + stream_idx, + stream->vbi); +#endif + return 0; +} + +int dump_stream_configs_v3(int hw_idx, + int stream_idx, + struct tpg_stream_config_v3_t *stream) +{ +#ifdef __TPG_DEBUG_DUMP__ + CAM_DBG(CAM_TPG, "TPG[%d][%d] pattern_type : %s", + hw_idx, + stream_idx, + tpg_pattern_type_strings[stream->pattern_type]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] cb_mode : %s", + hw_idx, + stream_idx, + tpg_color_bar_mode_strings[stream->cb_mode]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] frame_count : %d", + hw_idx, + stream_idx, + stream->frame_count); + CAM_DBG(CAM_TPG, "TPG[%d][%d] stream_type : %s", + hw_idx, + stream_idx, + tpg_stream_type_strings[stream->stream_type]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] left : %d", + hw_idx, + stream_idx, + stream->stream_dimension.left); + CAM_DBG(CAM_TPG, "TPG[%d][%d] top : %d", + hw_idx, + stream_idx, + stream->stream_dimension.top); + CAM_DBG(CAM_TPG, "TPG[%d][%d] width : %d", + hw_idx, + stream_idx, + stream->stream_dimension.width); + CAM_DBG(CAM_TPG, "TPG[%d][%d] height : %d", + hw_idx, + stream_idx, + stream->stream_dimension.height); + CAM_DBG(CAM_TPG, "TPG[%d][%d] pixel_depth : %d", + hw_idx, + stream_idx, + stream->pixel_depth); + CAM_DBG(CAM_TPG, "TPG[%d][%d] cfa_arrangement : %d", + hw_idx, + stream_idx, + stream->cfa_arrangement); + CAM_DBG(CAM_TPG, "TPG[%d][%d] output_format : %s", + hw_idx, + stream_idx, + tpg_image_format_type_strings[stream->output_format]); + CAM_DBG(CAM_TPG, "TPG[%d][%d] vc : 0x%x", + hw_idx, + stream_idx, + stream->vc); + CAM_DBG(CAM_TPG, "TPG[%d][%d] dt : 0x%x", + hw_idx, + stream_idx, + stream->dt); + CAM_DBG(CAM_TPG, "TPG[%d][%d] hbi : %d", + hw_idx, + stream_idx, + stream->hbi); + CAM_DBG(CAM_TPG, "TPG[%d][%d] vbi : %d", + hw_idx, + stream_idx, + stream->vbi); +#endif + return 0; +} +static int tpg_hw_release_vc_slots_locked(struct tpg_hw *hw, + struct tpg_hw_request *req) +{ + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_stream *entry; + struct tpg_hw_stream_v3 *entry_v3; + int i = 0; + + if (!hw || !hw->hw_info || !req) { + CAM_ERR(CAM_TPG, "Invalid Params"); + return -EINVAL; + } + + CAM_DBG(CAM_TPG, "TPG[%d] req[%lld] Freeing all the streams", + hw->hw_idx, req->request_id); + + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + req->vc_slots[i].slot_id = i; + req->vc_slots[i].vc = -1; + req->vc_slots[i].stream_count = 0; + if (hw->stream_version == 1) { + list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) { + entry = list_entry(pos, struct tpg_hw_stream, list); + list_del(pos); + CAM_MEM_FREE(entry); + } + } else if (hw->stream_version == 3) { + list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) { + entry_v3 = list_entry(pos, struct tpg_hw_stream_v3, list); + list_del(pos); + CAM_MEM_FREE(entry_v3); + } + } + INIT_LIST_HEAD(&(req->vc_slots[i].head)); + } + + hw->vc_count = 0; + CAM_MEM_FREE(req->vc_slots); + CAM_MEM_FREE(req); + + return 0; +} + +static int tpg_hw_free_waiting_requests_locked(struct tpg_hw *hw, int64_t req_id) +{ + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_request *req = NULL; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid Params"); + return -EINVAL; + } + + /* free up the pending requests*/ + list_for_each_safe(pos, pos_next, &hw->waiting_request_q) { + req = list_entry(pos, struct tpg_hw_request, list); + + if ((req_id != REQUEST_ID_UNSET) && (req_id < req->request_id)) + continue; + + CAM_DBG(CAM_TPG, "TPG[%d] freeing waiting_request[%lld] ", + hw->hw_idx, req->request_id); + list_del(pos); + hw->waiting_request_q_depth--; + tpg_hw_release_vc_slots_locked(hw, req); + } + return 0; +} + +static int tpg_hw_free_active_requests_locked(struct tpg_hw *hw) +{ + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_request *req = NULL; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid Params"); + return -EINVAL; + } + + /* free up the active requests*/ + list_for_each_safe(pos, pos_next, &hw->active_request_q) { + req = list_entry(pos, struct tpg_hw_request, list); + + CAM_DBG(CAM_TPG, "TPG[%d] freeing active_request[%lld] ", + hw->hw_idx, req->request_id); + list_del(pos); + hw->active_request_q_depth--; + tpg_hw_release_vc_slots_locked(hw, req); + } + return 0; +} + +static int tpg_hw_soc_disable(struct tpg_hw *hw) +{ + int rc = 0; + unsigned long flags; + + if (!hw || !hw->soc_info) { + CAM_ERR(CAM_TPG, "Error Invalid params"); + return -EINVAL; + } + + rc = cam_soc_util_disable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX, true, + true); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] Disable platform failed %d", + hw->hw_idx, rc); + return rc; + } + if ((rc = cam_cpas_stop(hw->cpas_handle))) { + CAM_ERR(CAM_TPG, "TPG[%d] CPAS stop failed", + hw->hw_idx); + return rc; + } else { + spin_lock_irqsave(&hw->hw_state_lock, flags); + hw->state = TPG_HW_STATE_HW_DISABLED; + spin_unlock_irqrestore(&hw->hw_state_lock, flags); + } + + return rc; +} + +static int tpg_hw_soc_enable( + struct tpg_hw *hw, + enum cam_vote_level clk_level, + uint32_t enable_irq) +{ + int rc = 0; + struct cam_ahb_vote ahb_vote; + struct cam_axi_vote axi_vote = {0}; + unsigned long flags; + + ahb_vote.type = CAM_VOTE_ABSOLUTE; + ahb_vote.vote.level = CAM_SVS_VOTE; + axi_vote.num_paths = 1; + axi_vote.axi_path[0].path_data_type = CAM_AXI_PATH_DATA_ALL; + axi_vote.axi_path[0].transac_type = CAM_AXI_TRANSACTION_WRITE; + + axi_vote.axi_path[0].camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ab_bw = CAM_CPAS_DEFAULT_AXI_BW; + axi_vote.axi_path[0].mnoc_ib_bw = CAM_CPAS_DEFAULT_AXI_BW; + + CAM_DBG(CAM_TPG, "TPG[%d] camnoc_bw:%lld mnoc_ab_bw:%lld mnoc_ib_bw:%lld ", + hw->hw_idx, + axi_vote.axi_path[0].camnoc_bw, + axi_vote.axi_path[0].mnoc_ab_bw, + axi_vote.axi_path[0].mnoc_ib_bw); + + rc = cam_cpas_start(hw->cpas_handle, &ahb_vote, &axi_vote); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] CPAS start failed", + hw->hw_idx); + rc = -EFAULT; + goto end; + } + + rc = cam_soc_util_enable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX, true, + clk_level, enable_irq); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] enable platform failed", + hw->hw_idx); + goto stop_cpas; + } + + spin_lock_irqsave(&hw->hw_state_lock, flags); + /* might need to wait if in busy state */ + hw->state = TPG_HW_STATE_READY; + spin_unlock_irqrestore(&hw->hw_state_lock, flags); + + return rc; +stop_cpas: + cam_cpas_stop(hw->cpas_handle); +end: + return rc; +} + +static int tpg_hw_apply_settings_to_hw_locked(struct tpg_hw *hw, + struct tpg_hw_request *req) +{ + int i = 0; + uint32_t stream_idx = 0; + int num_vcs = 0; + struct global_config_args globalargs = {0}; + if (!hw || + !hw->hw_info || + !hw->hw_info->ops || + !hw->hw_info->ops->process_cmd || + !hw->soc_info || + !req) { + CAM_ERR(CAM_TPG, "Invalid argument"); + return -EINVAL; + } + /* If nop then skip applying and return success*/ + if (req->request_type == TPG_HW_REQ_TYPE_NOP) + goto end; + + dump_global_configs(hw->hw_idx, &req->global_config); + if (hw->stream_version == 1) { + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + int dt_slot = 0; + struct vc_config_args vc_config = {0}; + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_stream *entry = NULL, *vc_stream_entry = NULL; + + if (req->vc_slots[i].vc == -1) + break; + num_vcs++; + vc_config.vc_slot = i; + vc_config.num_dts = req->vc_slots[i].stream_count; + vc_stream_entry = list_first_entry(&req->vc_slots[i].head, + struct tpg_hw_stream, list); + vc_config.stream = &vc_stream_entry->stream; + hw->hw_info->ops->process_cmd(hw, + TPG_CONFIG_VC, &vc_config); + + list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) { + struct dt_config_args dt_config = {0}; + + entry = list_entry(pos, struct tpg_hw_stream, list); + dump_stream_configs(hw->hw_idx, + stream_idx++, + &entry->stream); + dt_config.vc_slot = i; + dt_config.dt_slot = dt_slot++; + dt_config.stream = &entry->stream; + hw->hw_info->ops->process_cmd(hw, TPG_CONFIG_DT, &dt_config); + } + } + } else if (hw->stream_version == 3) { + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + int dt_slot = 0; + struct vc_config_args_v3 vc_config = {0}; + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_stream_v3 *entry = NULL, *vc_stream_entry = NULL; + + if (req->vc_slots[i].vc == -1) + break; + num_vcs++; + vc_config.vc_slot = i; + vc_config.num_dts = req->vc_slots[i].stream_count; + vc_stream_entry = list_first_entry(&req->vc_slots[i].head, + struct tpg_hw_stream_v3, list); + vc_config.stream = &vc_stream_entry->stream; + hw->hw_info->ops->process_cmd(hw, + TPG_CONFIG_VC, &vc_config); + + list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) { + struct dt_config_args_v3 dt_config = {0}; + + entry = list_entry(pos, struct tpg_hw_stream_v3, list); + dump_stream_configs_v3(hw->hw_idx, + stream_idx++, + &entry->stream); + dt_config.vc_slot = i; + dt_config.dt_slot = dt_slot++; + dt_config.stream = &entry->stream; + hw->hw_info->ops->process_cmd(hw, TPG_CONFIG_DT, &dt_config); + } + } + } + globalargs.num_vcs = num_vcs; + globalargs.globalconfig = &req->global_config; + hw->hw_info->ops->process_cmd(hw, + TPG_CONFIG_CTRL, &globalargs); + if (!cam_presil_mode_enabled()) + cam_tpg_mem_dmp(hw->soc_info); +end: + return 0; +} + +int tpg_hw_dump_status(struct tpg_hw *hw) +{ + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + if (hw->hw_info->ops->dump_status) + hw->hw_info->ops->dump_status(hw, NULL); + break; + default: + CAM_WARN(CAM_TPG, "Hw version doesn't support status dump"); + break; + } + return 0; +} + +static int tpg_hw_check_hw_state_and_apply_settings_locked( + struct tpg_hw *hw, + struct tpg_hw_request *req) +{ + int rc = 0; + unsigned long wait_jiffies = 0; + unsigned long rem_jiffies = 0; + unsigned long flags; + int32_t timeout_multiplier = 0; + + if (!hw || !req) { + CAM_ERR(CAM_TPG, "Invalid param"); + rc = -EINVAL; + goto end; + } + if (req->request_type != TPG_HW_REQ_TYPE_NOP) { + if (cam_presil_mode_enabled()) + timeout_multiplier = TIMEOUT_MULTIPLIER_PRESIL; + else + timeout_multiplier = TIMEOUT_MULTIPLIER; + + /* if hw state is not ready, wait for timeout. + * if hw state becomes ready handle in follow up if. + */ + if (hw->state == TPG_HW_STATE_BUSY) { + wait_jiffies = CAM_TPG_HW_WAIT_TIMEOUT * timeout_multiplier; + reinit_completion(&hw->complete_rup); + rem_jiffies = + cam_common_wait_for_completion_timeout(&hw->complete_rup, + wait_jiffies); + if (!rem_jiffies) { + CAM_ERR(CAM_TPG, "TPG[%d] hw timeout %llu", + hw->hw_idx, rem_jiffies); + rc = -EBUSY; + goto end; + } + } + if (hw->state == TPG_HW_STATE_READY) { + hw->settings_update = 1; + spin_lock_irqsave(&hw->hw_state_lock, flags); + hw->state = TPG_HW_STATE_BUSY; + spin_unlock_irqrestore(&hw->hw_state_lock, flags); + CAM_DBG(CAM_TPG, "HW State ready to busy"); + } + } + rc = tpg_hw_apply_settings_to_hw_locked(hw, req); +end: + return rc; +} + +static int tpg_hw_reapply_from_active_queue_locked( + struct tpg_hw *hw, + uint64_t request_id) +{ + int rc = 0; + struct tpg_hw_request *reapply_req = NULL; + struct tpg_hw_request *entry = NULL; + struct list_head *pos = NULL, *pos_next = NULL; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid argument"); + return -EINVAL; + } + + list_for_each_safe(pos, pos_next, &hw->active_request_q) { + entry = list_entry(pos, struct tpg_hw_request, list); + if (entry->request_id == request_id) { + reapply_req = entry; + break; + } + } + + if (reapply_req) { + CAM_DBG(CAM_TPG, "TPG[%d] got reapply request %d", hw->hw_idx, request_id); + rc = tpg_hw_check_hw_state_and_apply_settings_locked(hw, reapply_req); + } else { + CAM_ERR(CAM_TPG, "Could not find reapply request in active queue %d", request_id); + return -EINVAL; + } + return rc; +} + +static int tpg_hw_lookup_queues_and_apply_req_locked( + struct tpg_hw *hw, + uint64_t request_id) +{ + int rc = 0; + struct tpg_hw_request *req = NULL, *active_req = NULL; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid argument"); + rc = -EINVAL; + goto end; + } + + /* Post FLUSH_TYPE_ALL, we should not service any request lesser than last_flush_req*/ + if (request_id <= hw->last_flush_req && hw->last_flush_req != REQUEST_ID_UNSET) { + CAM_ERR(CAM_TPG, "TPG[%d] Reject Request: %lld, last request flushed: %lld", + hw->hw_idx, request_id, hw->last_flush_req); + return -EBADR; + } + + if (!list_empty(&hw->waiting_request_q)) { + /* Resetting the last_flush_req on reception of first valid request post flush*/ + hw->last_flush_req = REQUEST_ID_UNSET; + req = list_first_entry(&hw->waiting_request_q, + struct tpg_hw_request, list); + if (req->request_id == request_id) { + CAM_DBG(CAM_TPG, "TPG[%d] request %d got matched", hw->hw_idx, request_id); + rc = tpg_hw_check_hw_state_and_apply_settings_locked(hw, req); + + if (rc == 0) { + /* delete the request from waiting_q*/ + list_del(&req->list); + hw->waiting_request_q_depth--; + /* delete active request from active q*/ + if (!list_empty(&hw->active_request_q) && + (hw->active_request_q_depth >= MAX_ACTIVE_QUEUE_DEPTH)) { + active_req = list_first_entry(&hw->active_request_q, + struct tpg_hw_request, list); + list_del(&active_req->list); + hw->active_request_q_depth--; + + /* free the previously active request */ + tpg_hw_release_vc_slots_locked(hw, active_req); + } + + /* Add the currently applied request to active queue*/ + list_add_tail(&req->list, &hw->active_request_q); + hw->active_request_q_depth++; + } + } else { + rc = tpg_hw_reapply_from_active_queue_locked(hw, request_id); + } + } else { + CAM_DBG(CAM_TPG, "TPG[%d] got apply for request %d when waiting queue is empty", + hw->hw_idx, + request_id); + rc = tpg_hw_reapply_from_active_queue_locked(hw, request_id); + } + + if (rc == 0) { + CAM_DBG(CAM_TPG, "TPG[%d] Hw Apply done for req %lld", hw->hw_idx, request_id); + } else { + CAM_ERR(CAM_TPG, "TPG[%d] Hw Apply Failed for req %lld", hw->hw_idx, request_id); + rc = -EAGAIN; + } + +end: + return rc; +} + +/* apply all pending requests sequencially */ +static int tpg_hw_apply_req_from_waiting_queue_locked( + struct tpg_hw *hw) +{ + int rc = 0; + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_request *req = NULL; + struct tpg_hw_request *prev_req = NULL; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid Params"); + return -EINVAL; + } + + list_for_each_safe(pos, pos_next, &hw->waiting_request_q) { + req = list_entry(pos, struct tpg_hw_request, list); + tpg_hw_apply_settings_to_hw_locked(hw, req); + list_del(pos); + hw->waiting_request_q_depth--; + /* free previous req if any*/ + if (prev_req) + tpg_hw_release_vc_slots_locked(hw, prev_req); + prev_req = req; + } + + /* last applied request in active */ + if (prev_req != NULL) { + list_add_tail(&prev_req->list, &hw->active_request_q); + hw->active_request_q_depth++; + } + return rc; +} + +int tpg_hw_start(struct tpg_hw *hw) +{ + int rc = 0; + struct tpg_reg_settings *reg_settings = NULL; + struct tpg_settings_config_t *config = NULL; + uint32_t settings_count = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + + mutex_lock(&hw->mutex); + reg_settings = hw->register_settings; + config = &hw->settings_config; + settings_count = config->active_count; + + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + if (hw->hw_info->ops->start) + hw->hw_info->ops->start(hw, NULL); + break; + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + /* apply all initial requests */ + if (hw->hw_info->ops->start) + hw->hw_info->ops->start(hw, NULL); + + if (settings_count != 0) { + hw->hw_info->ops->write_settings(hw, config, + reg_settings); + } else { + tpg_hw_apply_req_from_waiting_queue_locked(hw); + } + + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + mutex_unlock(&hw->mutex); + return rc; +} + +int tpg_hw_stop(struct tpg_hw *hw) +{ + int rc = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + mutex_lock(&hw->mutex); + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + if (hw->hw_info->ops->stop) { + rc = hw->hw_info->ops->stop(hw, NULL); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw stop failed %d", + hw->hw_idx, rc); + break; + } + } + rc = tpg_hw_soc_disable(hw); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw soc disable failed %d", + hw->hw_idx, rc); + break; + } + tpg_hw_free_waiting_requests_locked(hw, REQUEST_ID_UNSET); + tpg_hw_free_active_requests_locked(hw); + hw->last_flush_req = REQUEST_ID_UNSET; + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + mutex_unlock(&hw->mutex); + + return rc; +} + +int tpg_hw_acquire(struct tpg_hw *hw, + struct tpg_hw_acquire_args *acquire) +{ + int rc = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + + mutex_lock(&hw->mutex); + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + // Start Cpas and enable required clocks + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + hw->last_flush_req = REQUEST_ID_UNSET; + mutex_unlock(&hw->mutex); + return rc; +} + +int tpg_hw_release(struct tpg_hw *hw) +{ + int rc = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + mutex_lock(&hw->mutex); + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + hw->last_flush_req = REQUEST_ID_UNSET; + mutex_unlock(&hw->mutex); + return rc; +} + +enum cam_vote_level get_tpg_clk_level( + struct tpg_hw *hw) +{ + uint32_t cam_vote_level = 0; + uint32_t last_valid_vote = 0; + uint64_t clk = 0; + struct cam_hw_soc_info *soc_info; + + if (!hw || !hw->soc_info) + return -EINVAL; + + soc_info = hw->soc_info; + clk = hw->tpg_clock; + + for (cam_vote_level = 0; + cam_vote_level < CAM_MAX_VOTE; cam_vote_level++) { + if (soc_info->clk_level_valid[cam_vote_level] != true) + continue; + + if (soc_info->clk_rate[cam_vote_level] + [soc_info->src_clk_idx] >= clk) { + CAM_INFO(CAM_TPG, + "match detected %s : %llu:%d level : %d", + soc_info->clk_name[soc_info->src_clk_idx], + clk, + soc_info->clk_rate[cam_vote_level] + [soc_info->src_clk_idx], + cam_vote_level); + return cam_vote_level; + } + last_valid_vote = cam_vote_level; + } + return last_valid_vote; +} + +static int tpg_hw_configure_init_settings( + struct tpg_hw *hw, + struct tpg_hw_initsettings *settings) +{ + int rc = 0; + int clk_level = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + mutex_lock(&hw->mutex); + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + /* Need to handle the clock from the init config */ + /* Need to handle this latter */ + clk_level = get_tpg_clk_level(hw); + rc = tpg_hw_soc_enable(hw, clk_level, true); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d", + hw->hw_idx, rc); + break; + } + + if (hw->hw_info->ops->init) + rc = hw->hw_info->ops->init(hw, settings); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw init failed %d", + hw->hw_idx, rc); + } + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + mutex_unlock(&hw->mutex); + return rc; +} + +static int tpg_hw_configure_init_settings_v3( + struct tpg_hw *hw, + struct tpg_hw_initsettings_v3 *settings) +{ + int rc = 0; + int clk_level = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + mutex_lock(&hw->mutex); + switch (hw->hw_info->version) { + case TPG_HW_VERSION_1_0: + case TPG_HW_VERSION_1_1: + case TPG_HW_VERSION_1_2: + case TPG_HW_VERSION_1_3: + case TPG_HW_VERSION_1_3_1: + case TPG_HW_VERSION_1_4: + clk_level = get_tpg_clk_level(hw); + rc = tpg_hw_soc_enable(hw, clk_level, true); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d", + hw->hw_idx, rc); + break; + } + + if (hw->hw_info->ops->init) + rc = hw->hw_info->ops->init(hw, settings); + + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] hw init failed %d", + hw->hw_idx, rc); + } + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version", + hw->hw_idx); + rc = -EINVAL; + break; + } + mutex_unlock(&hw->mutex); + return rc; +} + + +int tpg_hw_config( + struct tpg_hw *hw, + enum tpg_hw_cmd_t config_cmd, + void *config_args) +{ + int rc = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->ops) + return -EINVAL; + switch (config_cmd) { + case TPG_HW_CMD_INIT_CONFIG: + //validate_stream_list(hw); + if (hw->settings_config.active_count != 0) { + tpg_hw_configure_init_settings_v3(hw, + (struct tpg_hw_initsettings_v3 *)config_args); + } else { + if (hw->stream_version == 1) { + tpg_hw_configure_init_settings(hw, + (struct tpg_hw_initsettings *)config_args); + } else if (hw->stream_version == 3) { + tpg_hw_configure_init_settings_v3(hw, + (struct tpg_hw_initsettings_v3 *)config_args); + } + } + break; + default: + CAM_ERR(CAM_TPG, "TPG[%d] Unsupported hw config command", + hw->hw_idx); + rc = -EINVAL; + break; + } + return rc; +} + +int tpg_hw_copy_settings_config( + struct tpg_hw *hw, + struct tpg_settings_config_t *settings) +{ + struct tpg_reg_settings *reg_settings; + uint32_t num_settings_array; + + if (!hw || !settings) { + CAM_ERR(CAM_TPG, "invalid parameter"); + return -EINVAL; + } + + num_settings_array = settings->settings_array_size; + hw->register_settings = + CAM_MEM_ZALLOC(sizeof(struct tpg_reg_settings) * + num_settings_array, GFP_KERNEL); + + if (hw->register_settings == NULL) { + CAM_ERR(CAM_TPG, "unable to allocate memory"); + return -EINVAL; + } + + reg_settings = (struct tpg_reg_settings *) + ((uint8_t *)settings + settings->settings_array_offset); + + mutex_lock(&hw->mutex); + memcpy(&hw->settings_config, + settings, + sizeof(struct tpg_settings_config_t)); + memcpy(hw->register_settings, + reg_settings, + sizeof(struct tpg_reg_settings) * num_settings_array); + mutex_unlock(&hw->mutex); + + return 0; +} + +static int assign_vc_slot( + struct tpg_hw *hw, + int vc, + struct tpg_hw_request *req, + struct tpg_hw_stream *stream + ) +{ + int rc = -EINVAL, i = 0, slot_matched = 0; + + if (!hw || !stream || !req) { + return -EINVAL; + } + + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + /* Found a matching slot */ + if (req->vc_slots[i].vc == vc) { + slot_matched = 1; + if (req->vc_slots[i].stream_count + < hw->hw_info->max_dt_channels_per_vc) { + list_add_tail(&stream->list, &req->vc_slots[i].head); + req->vc_slots[i].stream_count++; + req->vc_slots[i].vc = vc; + rc = 0; + CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i); + break; + } else { + + /** + * already slot was assigned for this vc + * however this slot have been filled with + * full streams + */ + rc = -EINVAL; + CAM_ERR(CAM_TPG, "vc[%d]dt[%d]=>slot[%d] is overlfown", + vc, stream->stream.dt, i); + break; + } + } + + /** + * none of the above slots matched, and now found an empty slot + * so assigning stream to that slot + */ + if (req->vc_slots[i].vc == -1) { + list_add_tail(&stream->list, &req->vc_slots[i].head); + req->vc_slots[i].stream_count++; + req->vc_slots[i].vc = vc; + req->vc_count++; + rc = 0; + CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i); + break; + } + } + if ((slot_matched == 0) && (rc != 0)) { + CAM_ERR(CAM_TPG, "No slot matched"); + } + return rc; +} + +static int assign_vc_slot_v3( + struct tpg_hw *hw, + int vc, + struct tpg_hw_request *req, + struct tpg_hw_stream_v3 *stream + ) +{ + int rc = -EINVAL, i = 0, slot_matched = 0; + + if (!hw || !stream || !req) + return -EINVAL; + + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + /* Found a matching slot */ + if (req->vc_slots[i].vc == vc) { + slot_matched = 1; + if (req->vc_slots[i].stream_count + < hw->hw_info->max_dt_channels_per_vc) { + list_add_tail(&stream->list, &req->vc_slots[i].head); + req->vc_slots[i].stream_count++; + req->vc_slots[i].vc = vc; + rc = 0; + CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", + vc, + stream->stream.dt, + i); + } else { + + /** + * already slot was assigned for this vc + * however this slot have been filled with + * full streams + */ + rc = -EINVAL; + CAM_ERR(CAM_TPG, "vc[%d]dt[%d]=>slot[%d] is overlfown", + vc, stream->stream.dt, i); + } + break; + } + + /** + * none of the above slots matched, and now found an empty slot + * so assigning stream to that slot + */ + if (req->vc_slots[i].vc == -1) { + list_add_tail(&stream->list, &req->vc_slots[i].head); + req->vc_slots[i].stream_count++; + req->vc_slots[i].vc = vc; + req->vc_count++; + rc = 0; + CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i); + break; + } + } + if ((slot_matched == 0) && (rc != 0)) + CAM_ERR(CAM_TPG, "No slot matched"); + return rc; +} + +int tpg_hw_free_request( + struct tpg_hw *hw, + struct tpg_hw_request *req) +{ + int rc = 0; + + if (!hw || !req) + return -EINVAL; + + mutex_lock(&hw->mutex); + rc = tpg_hw_release_vc_slots_locked(hw, req); + mutex_unlock(&hw->mutex); + return rc; +} + +int tpg_hw_reset(struct tpg_hw *hw) +{ + int rc = 0; + unsigned long flags; + if (!hw) + return -EINVAL; + + CAM_DBG(CAM_TPG, "TPG HW reset"); + /* disable the hw */ + mutex_lock(&hw->mutex); + + rc = tpg_hw_free_waiting_requests_locked(hw, REQUEST_ID_UNSET); + if (rc) + CAM_ERR(CAM_TPG, "TPG[%d] unable to free up the pending requests", + hw->hw_idx); + + if (hw->state != TPG_HW_STATE_HW_DISABLED) { + rc = cam_soc_util_disable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX, + true, false); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] Disable platform failed %d", hw->hw_idx, rc); + } + rc = cam_cpas_stop(hw->cpas_handle); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] CPAS stop failed", hw->hw_idx); + } + spin_lock_irqsave(&hw->hw_state_lock, flags); + hw->state = TPG_HW_STATE_HW_DISABLED; + hw->last_flush_req = REQUEST_ID_UNSET; + spin_unlock_irqrestore(&hw->hw_state_lock, flags); + } + + rc = tpg_hw_free_active_requests_locked(hw); + if (rc) { + CAM_ERR(CAM_TPG, "TPG[%d] unable to free up the active requests", + hw->hw_idx); + } + mutex_unlock(&hw->mutex); + + return rc; +} + +int tpg_hw_add_stream( + struct tpg_hw *hw, + struct tpg_hw_request *req, + struct tpg_stream_config_t *cmd) +{ + int rc = 0; + struct tpg_hw_stream *stream = NULL; + if (!hw || !req || !cmd) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + + hw->stream_version = 1; + mutex_lock(&hw->mutex); + stream = CAM_MEM_ZALLOC(sizeof(struct tpg_hw_stream), GFP_KERNEL); + if (!stream) { + CAM_ERR(CAM_TPG, "TPG[%d] stream allocation failed", + hw->hw_idx); + mutex_unlock(&hw->mutex); + return -ENOMEM; + } + memcpy(&stream->stream, + cmd, + sizeof(struct tpg_stream_config_t)); + + rc = assign_vc_slot(hw, stream->stream.vc, req, stream); + mutex_unlock(&hw->mutex); + return rc; +} + +int tpg_hw_add_request(struct tpg_hw *hw, + struct tpg_hw_request *req) +{ + int rc = 0; + + if (!hw || !req) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + mutex_lock(&hw->mutex); + if (hw->waiting_request_q_depth <= MAX_WAITING_QUEUE_DEPTH) { + list_add_tail(&req->list, &hw->waiting_request_q); + hw->waiting_request_q_depth++; + } else { + CAM_ERR(CAM_TPG, "waiting queue size is exceed"); + } + mutex_unlock(&hw->mutex); + return rc; +} + +struct tpg_hw_request *tpg_hw_create_request( + struct tpg_hw *hw, + uint64_t request_id) +{ + struct tpg_hw_request *req = NULL; + uint32_t num_vc_channels, i; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid params"); + return NULL; + } + + /* Allocate request */ + req = CAM_MEM_ZALLOC(sizeof(struct tpg_hw_request), GFP_KERNEL); + if (!req) { + CAM_ERR(CAM_TPG, "TPG[%d] request allocation failed", hw->hw_idx); + return NULL; + } + + req->request_id = request_id; + /* Allocate Vc slots in request */ + num_vc_channels = hw->hw_info->max_vc_channels; + req->vc_slots = CAM_MEM_ZALLOC_ARRAY(num_vc_channels, sizeof(struct tpg_vc_slot_info), + GFP_KERNEL); + + req->vc_count = 0; + if (!req->vc_slots) { + CAM_ERR(CAM_TPG, "TPG[%d] vc slot allocation failed", + hw->hw_idx); + goto err_exit_1; + } + + INIT_LIST_HEAD(&req->list); + /* Initialize the slot variables */ + for (i = 0; i < hw->hw_info->max_vc_channels; i++) { + req->vc_slots[i].slot_id = i; + req->vc_slots[i].vc = -1; + req->vc_slots[i].stream_count = 0; + INIT_LIST_HEAD(&(req->vc_slots[i].head)); + } + CAM_DBG(CAM_TPG, "TPG[%d] request(%lld) allocated success", + hw->hw_idx, request_id); + return req; +err_exit_1: + CAM_MEM_FREE(req); + return NULL; +} + +int tpg_hw_request_set_opcode( + struct tpg_hw_request *req, + uint32_t opcode) +{ + int rc = 0; + + if (!req) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + switch (opcode) { + case CAM_TPG_PACKET_OPCODE_INITIAL_CONFIG: + req->request_type = TPG_HW_REQ_TYPE_INIT; + break; + case CAM_TPG_PACKET_OPCODE_NOP: + req->request_type = TPG_HW_REQ_TYPE_NOP; + break; + case CAM_TPG_PACKET_OPCODE_UPDATE: + req->request_type = TPG_HW_REQ_TYPE_UPDATE; + break; + default: + req->request_type = TPG_HW_REQ_TYPE_NOP; + break; + } + CAM_INFO(CAM_TPG, "req[%d] type = %d", + req->request_id, + req->request_type); + return rc; +} + +int tpg_hw_apply( + struct tpg_hw *hw, + uint64_t request_id) +{ + int rc = 0; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + mutex_lock(&hw->mutex); + rc = tpg_hw_lookup_queues_and_apply_req_locked(hw, request_id); + mutex_unlock(&hw->mutex); + return rc; +} +int tpg_hw_add_stream_v3( + struct tpg_hw *hw, + struct tpg_hw_request *req, + struct tpg_stream_config_v3_t *cmd) +{ + int rc = 0; + struct tpg_hw_stream_v3 *stream = NULL; + + if (!hw || !req || !cmd) { + CAM_ERR(CAM_TPG, "Invalid params"); + return -EINVAL; + } + + hw->stream_version = 3; + mutex_lock(&hw->mutex); + stream = CAM_MEM_ZALLOC(sizeof(struct tpg_hw_stream_v3), GFP_KERNEL); + if (!stream) { + CAM_ERR(CAM_TPG, "TPG[%d] stream allocation failed", + hw->hw_idx); + mutex_unlock(&hw->mutex); + return -ENOMEM; + } + memcpy(&stream->stream, + cmd, + sizeof(struct tpg_stream_config_v3_t)); + + rc = assign_vc_slot_v3(hw, stream->stream.vc, req, stream); + mutex_unlock(&hw->mutex); + return rc; +} + +int tpg_hw_flush_requests( + struct tpg_hw *hw, + uint32_t last_flushed_req, + bool is_flush_all) +{ + int rc = 0; + struct list_head *pos = NULL, *pos_next = NULL; + struct tpg_hw_request *req = NULL; + bool is_request_flushed = false; + + if (!hw) { + CAM_ERR(CAM_TPG, "Invalid tpg_hw param"); + return -EINVAL; + } + mutex_lock(&hw->mutex); + + if (is_flush_all) { + rc = tpg_hw_free_waiting_requests_locked(hw, last_flushed_req); + if (rc) + CAM_ERR(CAM_TPG, "TPG[%d] unable to free the pending requests", + hw->hw_idx); + + /* Setting the last_flushed_req only in case of FLUSH_TYPE_ALL */ + hw->last_flush_req = last_flushed_req; + CAM_INFO(CAM_TPG, "TPG[%d] Last Req pending to be flushed: %lld", + hw->hw_idx, hw->last_flush_req); + } else { + /* Search for request from waiting request list and delete it*/ + list_for_each_safe(pos, pos_next, &hw->waiting_request_q) { + req = list_entry(pos, struct tpg_hw_request, list); + if (req->request_id == last_flushed_req) { + list_del(pos); + hw->waiting_request_q_depth--; + is_request_flushed = true; + tpg_hw_release_vc_slots_locked(hw, req); + CAM_INFO(CAM_TPG, "TPG[%d] Req[%lld] deleted from wait_queue", + hw->hw_idx, req->request_id); + break; + } + } + if (!is_request_flushed) { + CAM_ERR(CAM_TPG, "TPG[%d] Flush req_id[%lld] not found in waiting queue", + hw->hw_idx, last_flushed_req); + rc = -EINVAL; + } + } + + mutex_unlock(&hw->mutex); + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.h new file mode 100644 index 0000000000..a5754ce999 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw.h @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_H__ +#define __TPG_HW_H__ + +#include +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include +#include +#include +#include + +#define TPG_HW_VERSION_1_0 0x10000000 +#define TPG_HW_VERSION_1_1 0x10000001 +#define TPG_HW_VERSION_1_2 0x10000002 +#define TPG_HW_VERSION_1_3 0x10000003 +#define TPG_HW_VERSION_1_3_1 0x10000004 +#define TPG_HW_VERSION_1_4 0x10000005 + +struct tpg_hw; + +/** + * tpg_hw_ops : tpg hw operations + * + * @init : tpg hw layer init + * @start : tpg hw start stream + * @stop : tpg hw stop stream + * @deinit : tpg hw deinit + * @read : tpg hw read register + * @write : tpg hw write register + * @process_cmd : tpg hw process command + * @dump_status : dump any status registers + * @write_settings : write register settings + */ +struct tpg_hw_ops { + int (*init)(struct tpg_hw *hw, void *data); + int (*start)(struct tpg_hw *hw, void *data); + int (*stop)(struct tpg_hw *hw, void *data); + int (*deinit)(struct tpg_hw *hw, void *data); + int (*read)(struct tpg_hw *hw, uint32_t *addr, uint32_t *val); + int (*write)(struct tpg_hw *hw, uint32_t *addr, uint32_t *val); + int (*process_cmd)(struct tpg_hw *hw, uint32_t cmd, void *arg); + int (*dump_status)(struct tpg_hw *hw, void *data); + int (*write_settings)(struct tpg_hw *hw, + struct tpg_settings_config_t *config, struct tpg_reg_settings *settings); + irqreturn_t (*handle_irq)(struct tpg_hw *hw); +}; + +/** + * tpg_hw_state : tpg hw states + * + * TPG_HW_STATE_HW_DISABLED: tpg hw is not enabled yet + * TPG_HW_STATE_HW_ENABLED : tpg hw is enabled + * TPG_HW_STATE_READY : tpg hw is ready + * TPG_HW_STATE_BUSY : tpg hw is busy + * TPG_HW_STATE_MAX : tp hw is max + */ +enum tpg_hw_state { + TPG_HW_STATE_HW_DISABLED, + TPG_HW_STATE_HW_ENABLED, + TPG_HW_STATE_READY, + TPG_HW_STATE_BUSY, + TPG_HW_STATE_MAX +}; + +#define TPG_HW_REQ_TYPE_NOP 0x00 +#define TPG_HW_REQ_TYPE_INIT 0x01 +#define TPG_HW_REQ_TYPE_UPDATE 0x02 +#define TPG_HW_REQ_TYPE_BYPASS 0x03 + +/** + * tpg_hw_request : tpg hw request info + * + * @request_id : request id + * @request_type : request type + * @global_config : global configuration + * @vc_slots : vc slots + * @vc_count : vc count + * @list : list + */ +struct tpg_hw_request { + uint64_t request_id; + int request_type; + struct tpg_global_config_t global_config; + struct tpg_vc_slot_info *vc_slots; + uint32_t vc_count; + struct list_head list; +}; + +/** + * @brief tpg_vc_slot_info + * @slot_id : slot id of this vc slot + * @vc : virtual channel configured + * @stream_count : number of streams in this slot + * @head : head pointing all data types in with this vc + */ +struct tpg_vc_slot_info { + int slot_id; + int vc; + int stream_count; + struct list_head head; +}; + +/** + * tpg_hw_info : tpg hw layer info + * + * @version: version of tpg hw + * @max_vc_channels: max number of virtual channels supported by tpg + * @max_dt_channels_per_vc: max dts supported in each vc + * @ops: tpg hw operations + * @hw_data: tpg hw data + * @hw: hw layer initialization + * @xcfa_debug: for xcfa debug + * @shdr_overlap: for shdr overlap + * @shdr_offset_num_batch: for shdr offset num batch + * @shdr_line_offset0: for shdr line offset0 + * @shdr_line_offset1: for shdr line offset1 + */ +struct tpg_hw_info { + uint32_t version; + uint32_t max_vc_channels; + uint32_t max_dt_channels_per_vc; + struct tpg_hw_ops *ops; + void *hw_data; + int (*layer_init)(struct tpg_hw *hw); + int xcfa_debug; + int shdr_overlap; + int shdr_offset_num_batch; + int shdr_line_offset0; + int shdr_line_offset1; +}; + + +/** + * tpg_hw_stream : tpg hw stream + * + * @stream : tpg stream; + * @list : entry to tpg stream list + */ +struct tpg_hw_stream { + struct tpg_stream_config_t stream; + struct list_head list; +}; + +/** + * tpg_hw_stream_v3 : tpg hw stream version 2 + * + * @stream : tpg stream version 2 + * @list : entry to tpg stream list + */ +struct tpg_hw_stream_v3 { + struct tpg_stream_config_v3_t stream; + struct list_head list; +}; + + +/** + * tpg_hw : tpg hw + * + * @hw_idx : hw id + * @stream_version : stream version + * @state : tpg hw state + * @cpas_handle : handle to cpas + * @vc_count : vc count + * @tpg_clock : tpg clock + * @hw_info : tp hw info + * @soc_info : soc info + * @mutex : lock + * @complete_rup : rup done completion + * @vc_slots : vc slots info + * @settings_config : settings configuration + * @register_settings : register settings info + * @tpg_hw_irq_handler : handle hw irq + * @waiting_request_q : queue of waiting requests to be applied + * @acitive_request_q : queue of active requests being applied, + * @active_request_q_depth : active request queue depth + * @waiting_request_q_depth : waiting request queue depth + * @settings update : settings update flag + * @last_flush_req : last request id with flush trigger + */ +struct tpg_hw { + uint32_t hw_idx; + uint32_t stream_version; + uint32_t state; + uint32_t cpas_handle; + uint32_t vc_count; + uint64_t tpg_clock; + struct tpg_hw_info *hw_info; + struct cam_hw_soc_info *soc_info; + struct mutex mutex; + spinlock_t hw_state_lock; + struct completion complete_rup; + struct tpg_vc_slot_info *vc_slots; + struct tpg_settings_config_t settings_config; + struct tpg_reg_settings *register_settings; + int (*tpg_hw_irq_handler)(struct tpg_hw *hw); + /* Waiting and active request Queues */ + struct list_head waiting_request_q; + struct list_head active_request_q; + uint32_t active_request_q_depth; + uint32_t waiting_request_q_depth; + uint32_t settings_update; + int64_t last_flush_req; +}; + +/** + * tpg_hw_acquire_args : tpg hw acquire arguments + * + * @resource_list : list of resources to acquire + * @count : number of resources to acquire + */ +struct tpg_hw_acquire_args { + /* Integer id of resources */ + uint32_t *resource_list; + ssize_t count; +}; + +enum tpg_hw_cmd_t { + TPG_HW_CMD_INVALID = 0, + TPG_HW_CMD_INIT_CONFIG, + TPG_HW_CMD_MAX, +}; + +#define TPG_HW_CONFIG_BASE 0x4000 +#define TPG_CONFIG_CTRL (TPG_HW_CONFIG_BASE + 0) +#define TPG_CONFIG_VC (TPG_HW_CONFIG_BASE + 1) +#define TPG_CONFIG_DT (TPG_HW_CONFIG_BASE + 2) + +/* pixel bit width */ +#define PACK_8_BIT 8 +#define PACK_10_BIT 10 +#define PACK_12_BIT 12 +#define PACK_14_BIT 14 +#define PACK_16_BIT 16 + +/** + * @vc_config_args : arguments for vc config process cmd + * + * @vc_slot : slot to configure this vc + * @num_dts : number of dts in this vc + * @stream : output stream + */ +struct vc_config_args { + uint32_t vc_slot; + uint32_t num_dts; + struct tpg_stream_config_t *stream; +}; + +/** + * dt_config_args : arguments for dt config process cmd + * + * @vc_slot : vc slot to configure this dt + * @dt_slot : dt slot to configure this dt + * @stream : stream packet to configure for this dt + */ +struct dt_config_args { + uint32_t vc_slot; + uint32_t dt_slot; + struct tpg_stream_config_t *stream; +}; + +/** + * @vc_config_args_v3 : arguments for vc config process cmd version 2 + * + * @vc_slot : slot to configure this vc + * @num_dts : number of dts in this vc + * @stream : output stream version 2 + */ +struct vc_config_args_v3 { + uint32_t vc_slot; + uint32_t num_dts; + struct tpg_stream_config_v3_t *stream; +}; + +/** + * dt_config_args_v3 : arguments for dt config process cmd version 2 + * + * @vc_slot : vc slot to configure this dt + * @dt_slot : dt slot to configure this dt + * @stream : stream packet to configure for this dt version 2 + */ +struct dt_config_args_v3 { + uint32_t vc_slot; + uint32_t dt_slot; + struct tpg_stream_config_v3_t *stream; +}; + + +/** + * global_config_args : tpg global config args + * + * @num_vcs : number of vcs to be configured + * @globalconfig: global config cmd + */ +struct global_config_args { + uint32_t num_vcs; + struct tpg_global_config_t *globalconfig; +}; + +/** + * tpg_hw_initsettings : initial configurations + * + * @global_config : global configuration + * @streamconfigs : array of stream configurations + * @num_streams : number of streams in strea config array + */ +struct tpg_hw_initsettings { + struct tpg_global_config_t globalconfig; + struct tpg_stream_config_t *streamconfigs; + uint32_t num_streams; +}; + +/** + * tpg_hw_initsettings_v3 : initial configurations version 2 + * + * @global_config : global configuration + * @streamconfigs : array of stream configurations version 2 + * @num_streams : number of streams in strea config array + */ +struct tpg_hw_initsettings_v3 { + struct tpg_global_config_t globalconfig; + struct tpg_stream_config_v3_t *streamconfigs; + uint32_t num_streams; +}; + + +/** + * @brief dump the tpg memory info + * + * @param soc_info: soc info for soc related info + * + * @return : 0 on succuss + */ +int32_t cam_tpg_mem_dmp(struct cam_hw_soc_info *soc_info); + +/** + * @brief dump global config command + * + * @param idx : hw index + * @param global : global config command + * + * @return : 0 on success + */ +int dump_global_configs(int idx, + struct tpg_global_config_t *global); + +/** + * @brief : dump stream config command + * + * @param hw_idx: hw index + * @param stream_idx: stream index + * @param stream: stream config command + * + * @return : 0 on success + */ +int dump_stream_configs(int hw_idx, + int stream_idx, + struct tpg_stream_config_t *stream); + +/** + * @brief : dump stream config command version 2 + * + * @param hw_idx: hw index + * @param stream_idx: stream index + * @param stream: stream config command version 2 + * + * @return : 0 on success + */ +int dump_stream_configs_v3(int hw_idx, int stream_idx, struct tpg_stream_config_v3_t *stream); + + +/** + * @brief : dump any hw status registers + * + * @param hw: tpg hw instance + * + * @return : 0 on success + */ +int tpg_hw_dump_status(struct tpg_hw *hw); +/** + * @brief : start tpg hw stream + * + * @param hw: tpg hw instance + * + * @return : 0 on success + */ +int tpg_hw_start(struct tpg_hw *hw); + +/** + * @brief : stop tpg hw stream + * + * @param hw: tpg hw instance + * + * @return : 0 on success + */ +int tpg_hw_stop(struct tpg_hw *hw); + +/** + * @brief : tpg hw acquire + * + * @param hw: tpg hw instance + * @param acquire: list of resources to acquire + * + * @return : 0 on success + */ +int tpg_hw_acquire(struct tpg_hw *hw, + struct tpg_hw_acquire_args *acquire); +/** + * @brief release tpg hw + * + * @param hw: tpg hw instance + * + * @return : 0 on success + */ +int tpg_hw_release(struct tpg_hw *hw); + +/** + * @brief : configure tpg hw + * + * @param hw: tpg hw instance + * @param cmd: configuration command + * @param arg: configuration command argument + * + * @return : 0 on success + */ +int tpg_hw_config(struct tpg_hw *hw, enum tpg_hw_cmd_t cmd, void *arg); + +/** + * @brief : reset the tpg hw instance + * + * @param hw: tpg hw instance + * + * @return : 0 on success + */ +int tpg_hw_reset(struct tpg_hw *hw); + +/** + * @brief : tp hw add stream + * + * @param hw: tpg hw instance + * @param req: req instance + * @param cmd: tpg hw command + * + * @return : 0 on success + */ +int tpg_hw_add_stream( + struct tpg_hw *hw, + struct tpg_hw_request *req, + struct tpg_stream_config_t *cmd); + +/** + * @brief : tp hw add stream version 2 + * + * @param hw: tpg hw instance + * @param req: req instance + * @param cmd: tpg hw command version 2 + * + * @return : 0 on success + */ +int tpg_hw_add_stream_v3( + struct tpg_hw *hw, + struct tpg_hw_request *req, + struct tpg_stream_config_v3_t *cmd); + +/** + * @brief : hw apply + * + * @param hw: tpg hw instance + * @param request_id: request id + * + * @return : 0 on success + */ +int tpg_hw_apply( + struct tpg_hw *hw, + uint64_t request_id); + +/** + * @brief : create hw request + * + * @param hw: tpg hw instance + * @param request_id: request id + * + * @return : req on success + */ +struct tpg_hw_request *tpg_hw_create_request( + struct tpg_hw *hw, + uint64_t request_id); + +/** + * @brief : free hw request + * + * @param hw: tpg hw instance + * @param req: request instance + * + * @return : 0 on success + */ +int tpg_hw_free_request(struct tpg_hw *hw, + struct tpg_hw_request *req); + +/** + * @brief : add hw request + * + * @param hw: tpg hw instance + * @param req: request instance + * + * @return : 0 on success + */ +int tpg_hw_add_request(struct tpg_hw *hw, + struct tpg_hw_request *req); + +/** + * @brief : set opcode of request + * + * @param hw: tpg hw instance + * @param opcode: op code + * + * @return : 0 on success + */ +int tpg_hw_request_set_opcode( + struct tpg_hw_request *req, + uint32_t opcode); + +/** + * @brief : copy settings config command + * + * @param hw: tpg hw instance + * @param settings: settings config command + * + * @return : 0 on success + */ +int tpg_hw_copy_settings_config(struct tpg_hw *hw, struct tpg_settings_config_t *settings); + +/** + * @brief : Flush tpg request queues + * + * @param hw: tpg hw instance + * @param last_flush_req: Request number to be flushed + * @param is_flush_all: flag indicating flush type + * + * @return : 0 on success + */ +int tpg_hw_flush_requests(struct tpg_hw *hw, uint32_t last_flushed_req, bool is_flush_all); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.c new file mode 100644 index 0000000000..0988e36959 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw_common.h" + +int tpg_hw_write_settings(struct tpg_hw *hw, + struct tpg_settings_config_t *config, struct tpg_reg_settings *settings) +{ + struct cam_hw_soc_info *soc_info = NULL; + int setting_index; + phys_addr_t size; + uint32_t tpg_reg_offset; + + if (!hw || !config || !settings) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + soc_info = hw->soc_info; + size = resource_size(soc_info->mem_block[0]); + + for (setting_index = 0; setting_index < config->active_count; setting_index++) { + tpg_reg_offset = settings->reg_offset; + if (tpg_reg_offset >= size) { + CAM_ERR(CAM_TPG, "settings reg_offset error"); + return -EINVAL; + } + cam_io_w_mb(settings->reg_value, soc_info->reg_map[0].mem_base + + tpg_reg_offset); + settings++; + } + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.h new file mode 100644 index 0000000000..52313d79b4 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_common.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_COMMON_H__ +#define __TPG_HW_COMMON_H__ + +#include +#include "cam_debug_util.h" +#include "cam_soc_util.h" +#include +#include +#include "tpg_hw.h" + +/** + * @brief write register settings + * + * @param hw : tpg hw instance + * @param config : argument for settings config + * @param settings : argument for register settings + * + * @return : 0 on sucdess + */ +int tpg_hw_write_settings(struct tpg_hw *hw, + struct tpg_settings_config_t *config, struct tpg_reg_settings *settings); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.c new file mode 100644 index 0000000000..7bb53a3b0b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw_v_1_0.h" + +enum tpg_hw_encode_format_t { + RAW_8_BIT = 1, + RAW_10_BIT, + RAW_12_BIT, + RAW_14_BIT, + RAW_16_BIT +}; + +static struct cam_tpg_ver1_reg_offset cam_tpg101_reg = { + .tpg_hw_version = 0x0, + .tpg_hw_status = 0x4, + .tpg_ctrl = 0x60, + .tpg_vc_cfg0 = 0x64, + .tpg_vc_cfg1 = 0x68, + .tpg_lfsr_seed = 0x6c, + .tpg_dt_0_cfg_0 = 0x70, + .tpg_dt_1_cfg_0 = 0x74, + .tpg_dt_2_cfg_0 = 0x78, + .tpg_dt_3_cfg_0 = 0x7C, + .tpg_dt_0_cfg_1 = 0x80, + .tpg_dt_1_cfg_1 = 0x84, + .tpg_dt_2_cfg_1 = 0x88, + .tpg_dt_3_cfg_1 = 0x8C, + .tpg_dt_0_cfg_2 = 0x90, + .tpg_dt_1_cfg_2 = 0x94, + .tpg_dt_2_cfg_2 = 0x98, + .tpg_dt_3_cfg_2 = 0x9C, + .tpg_color_bar_cfg = 0xA0, + .tpg_common_gen_cfg = 0xA4, + .tpg_vbi_cfg = 0xA8, + .tpg_test_bus_crtl = 0xF8, + .tpg_spare = 0xFC, + + /* configurations */ + .major_version = 1, + .minor_version = 0, + .version_incr = 0, + .tpg_en_shift_val = 0, + .tpg_phy_sel_shift_val = 3, + .tpg_num_active_lines_shift = 4, + .tpg_fe_pkt_en_shift = 2, + .tpg_fs_pkt_en_shift = 1, + .tpg_line_interleaving_mode_shift = 10, + .tpg_num_dts_shift_val = 8, + .tpg_v_blank_cnt_shift = 12, + .tpg_dt_encode_format_shift = 16, + .tpg_payload_mode_color = 0x8, + .tpg_split_en_shift = 5, + .top_mux_reg_offset = 0x1C, +}; + +static int configure_global_configs(struct tpg_hw *hw, + struct tpg_global_config_t *configs) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver1_reg_offset *tpg_reg = &cam_tpg101_reg; + + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + soc_info = hw->soc_info; + + val = cam_io_r_mb(soc_info->reg_map[1].mem_base + + tpg_reg->top_mux_reg_offset); + val |= (1 << hw->hw_idx); + + cam_io_w_mb(val, + soc_info->reg_map[1].mem_base + tpg_reg->top_mux_reg_offset); + CAM_INFO(CAM_ISP, "TPG:%d Set top Mux: 0x%x", + hw->hw_idx, val); + + val = ((4 - 1) << + tpg_reg->tpg_num_active_lines_shift) | + (1 << tpg_reg->tpg_fe_pkt_en_shift) | + (1 << tpg_reg->tpg_fs_pkt_en_shift) | + (0 << tpg_reg->tpg_phy_sel_shift_val) | + (1 << tpg_reg->tpg_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + + return 0; +} + +static int get_tpg_encode_format(int sw_encode_format) +{ + switch (sw_encode_format) { + case PACK_8_BIT: + return RAW_8_BIT; + case PACK_10_BIT: + return RAW_10_BIT; + case PACK_12_BIT: + return RAW_12_BIT; + case PACK_14_BIT: + return RAW_14_BIT; + case PACK_16_BIT: + return RAW_16_BIT; + } + return RAW_8_BIT; +} + +static int configure_dt( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_t *stream) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver1_reg_offset *tpg_reg = &cam_tpg101_reg; + + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + /* configure width and height */ + val = (((stream->stream_dimension.width & 0xFFFF) << 16) | + (stream->stream_dimension.height & 0x3FFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_dt_0_cfg_0 + 0x10 * dt_slot); + + /* configure data type */ + cam_io_w_mb(stream->dt, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_dt_0_cfg_1 + 0x10 * dt_slot); + + /* configure bpp */ + val = ((get_tpg_encode_format(stream->pixel_depth) & 0xF) << + tpg_reg->tpg_dt_encode_format_shift) | + tpg_reg->tpg_payload_mode_color; + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_dt_0_cfg_2 + 0x10 * dt_slot); + + return 0; +} + +static int configure_vc( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver1_reg_offset *tpg_reg = &cam_tpg101_reg; + + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "Configureing vc : %d at the slot : %d num_dts=%d", + stream->vc, vc_slot, num_dts); + val = ((num_dts - 1) << + tpg_reg->tpg_num_dts_shift_val) | stream->vc; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc_cfg0); + + cam_io_w_mb(stream->hbi, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc_cfg1); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_common_gen_cfg); + + cam_io_w_mb(stream->vbi, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vbi_cfg); + + return 0; +} + +static int tpg_hw_v_1_0_reset(struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver1_reg_offset *tpg_reg = &cam_tpg101_reg; + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + soc_info = hw->soc_info; + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_ctrl); + + /* Reset the TOP tpg mux sel*/ + val = cam_io_r_mb(soc_info->reg_map[1].mem_base + + tpg_reg->top_mux_reg_offset); + val &= ~(1 << hw->hw_idx); + + cam_io_w_mb(val, + soc_info->reg_map[1].mem_base + tpg_reg->top_mux_reg_offset); + CAM_INFO(CAM_TPG, "TPG:%d Reset Top Mux: 0x%x", + hw->hw_idx, val); + + return 0; +} + +int tpg_hw_v_1_0_process_cmd( + struct tpg_hw *hw, + uint32_t cmd, + void *arg) +{ + + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + switch(cmd) { + case TPG_CONFIG_VC: + { + struct vc_config_args *vc_config = + (struct vc_config_args *)arg; + + if (vc_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + configure_vc(hw, + vc_config->vc_slot, + vc_config->num_dts, + vc_config->stream); + } + break; + case TPG_CONFIG_DT: + { + struct dt_config_args *dt_config = + (struct dt_config_args *)arg; + + if (dt_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + configure_dt(hw, + dt_config->vc_slot, + dt_config->dt_slot, + dt_config->stream); + } + break; + case TPG_CONFIG_CTRL: + configure_global_configs(hw, arg); + break; + default: + CAM_ERR(CAM_TPG, "invalid argument"); + break; + } + return 0; +} + +int tpg_hw_v_1_0_stop(struct tpg_hw *hw, void *data) +{ + CAM_INFO(CAM_TPG, "TPG V1.0 HWL stop"); + tpg_hw_v_1_0_reset(hw, data); + return 0; +} + +int tpg_hw_v_1_0_start(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.3 HWL start"); + return 0; +} + +int tpg_hw_v_1_0_init(struct tpg_hw *hw, void *data) +{ + CAM_INFO(CAM_TPG, "TPG V1.0 HWL init"); + tpg_hw_v_1_0_reset(hw, data); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.h new file mode 100644 index 0000000000..1f6d2cec9a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_0_H__ +#define __TPG_HW_V_1_0_H__ + +#include "../tpg_hw.h" + +struct cam_tpg_ver1_reg_offset { + uint32_t tpg_hw_version; + uint32_t tpg_hw_status; + uint32_t tpg_ctrl; + uint32_t tpg_vc_cfg0; + uint32_t tpg_vc_cfg1; + uint32_t tpg_lfsr_seed; + uint32_t tpg_dt_0_cfg_0; + uint32_t tpg_dt_1_cfg_0; + uint32_t tpg_dt_2_cfg_0; + uint32_t tpg_dt_3_cfg_0; + uint32_t tpg_dt_0_cfg_1; + uint32_t tpg_dt_1_cfg_1; + uint32_t tpg_dt_2_cfg_1; + uint32_t tpg_dt_3_cfg_1; + uint32_t tpg_dt_0_cfg_2; + uint32_t tpg_dt_1_cfg_2; + uint32_t tpg_dt_2_cfg_2; + uint32_t tpg_dt_3_cfg_2; + uint32_t tpg_color_bar_cfg; + uint32_t tpg_common_gen_cfg; + uint32_t tpg_vbi_cfg; + uint32_t tpg_test_bus_crtl; + uint32_t tpg_spare; + + /* configurations */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t tpg_en_shift_val; + uint32_t tpg_phy_sel_shift_val; + uint32_t tpg_num_active_lines_shift; + uint32_t tpg_fe_pkt_en_shift; + uint32_t tpg_fs_pkt_en_shift; + uint32_t tpg_line_interleaving_mode_shift; + uint32_t tpg_num_dts_shift_val; + uint32_t tpg_v_blank_cnt_shift; + uint32_t tpg_dt_encode_format_shift; + uint32_t tpg_payload_mode_color; + uint32_t tpg_split_en_shift; + uint32_t top_mux_reg_offset; +}; + +/** + * @brief : initialize the tpg hw v 1.0 + * + * @param hw: tpg hw instance + * @param data: initialize data + * + * @return : return 0 on success + */ +int tpg_hw_v_1_0_init(struct tpg_hw *hw, void *data); + +/** + * @brief : start tpg hw v 1.0 + * + * @param hw: tpg hw instance + * @param data: start argument + * + * @return : 0 on success + */ +int tpg_hw_v_1_0_start(struct tpg_hw *hw, void *data); + +/** + * @brief : stop tpg hw + * + * @param hw: tpg hw instance + * @param data: arguments to stop tpg hw 1.0 + * + * @return : 0 on success + */ +int tpg_hw_v_1_0_stop(struct tpg_hw *hw, void *data); + + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0_data.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0_data.h new file mode 100644 index 0000000000..e1a374f8af --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_0/tpg_hw_v_1_0_data.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_0_DATA_H__ +#define __TPG_HW_V_1_0_DATA_H__ + +#include "../tpg_hw.h" +#include "tpg_hw_v_1_0.h" + +struct tpg_hw_ops tpg_hw_v_1_0_ops = { + .start = tpg_hw_v_1_0_start, + .stop = tpg_hw_v_1_0_stop, + .init = tpg_hw_v_1_0_init, +}; + +struct tpg_hw_info tpg_v_1_0_hw_info = { + .version = TPG_HW_VERSION_1_0, + .max_vc_channels = 2, + .max_dt_channels_per_vc = 4, + .ops = &tpg_hw_v_1_0_ops, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.c new file mode 100644 index 0000000000..d72f6d5c81 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw_v_1_2.h" + +/* TPG HW IDs */ +enum cam_tpg_hw_id { + CAM_TPG_0 = 13, + CAM_TPG_1, + CAM_TPG_MAX, +}; + +static struct cam_tpg_ver_1_2_reg_offset cam_tpg102_reg = { + .tpg_hw_version = 0x0, + .tpg_hw_status = 0x4, + .tpg_module_cfg = 0x60, + .tpg_cfg0 = 0x68, + .tpg_cfg1 = 0x6C, + .tpg_cfg2 = 0x70, + .tpg_cfg3 = 0x74, + .tpg_spare = 0x1FC, + + /* configurations */ + .major_version = 1, + .minor_version = 0, + .version_incr = 2, + .tpg_en_shift = 0, + .tpg_hbi_shift = 20, + .tpg_dt_shift = 11, + .tpg_rotate_period_shift = 5, + .tpg_split_en_shift = 4, + .top_mux_reg_offset = 0x90, + .tpg_mux_sel_tpg_0_shift = 0, + .tpg_mux_sel_tpg_1_shift = 8, +}; + +static int configure_global_configs( + struct tpg_hw *hw, + int num_vcs, + struct tpg_global_config_t *configs) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_2_reg_offset *tpg_reg = &cam_tpg102_reg; + + if (!hw) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + soc_info = hw->soc_info; + + if (num_vcs <= 0) { + CAM_ERR(CAM_TPG, "Invalid vc count"); + return -EINVAL; + } + + /* Program number of frames */ + val = 0xFFFFF; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_cfg3); + CAM_DBG(CAM_TPG, "TPG[%d] cfg3=0x%x", + hw->hw_idx, val); + + val = cam_io_r_mb(soc_info->reg_map[1].mem_base + + tpg_reg->top_mux_reg_offset); + + if (hw->hw_idx == CAM_TPG_0) + val |= 1 << tpg_reg->tpg_mux_sel_tpg_0_shift; + else if (hw->hw_idx == CAM_TPG_1) + val |= 1 << tpg_reg->tpg_mux_sel_tpg_1_shift; + + cam_io_w_mb(val, + soc_info->reg_map[1].mem_base + tpg_reg->top_mux_reg_offset); + CAM_INFO(CAM_TPG, "TPG[%d] Set CPAS top mux: 0x%x", + hw->hw_idx, val); + + val = (1 << tpg_reg->tpg_en_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_module_cfg); + CAM_DBG(CAM_TPG, "TPG[%d] tpg_module_cfg=0x%x", hw->hw_idx, val); + + return 0; +} + +static int configure_dt( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_t *stream) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_2_reg_offset *tpg_reg = &cam_tpg102_reg; + if (!hw) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + soc_info = hw->soc_info; + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + + val = (((stream->stream_dimension.width & 0xFFFF) << 16) | + (stream->stream_dimension.height & 0xFFFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_cfg0); + CAM_DBG(CAM_TPG, "TPG[%d] cfg0=0x%x", + hw->hw_idx, val); + + val = stream->dt << tpg_reg->tpg_dt_shift; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_cfg2); + CAM_DBG(CAM_TPG, "TPG[%d] cfg2=0x%x", + hw->hw_idx, val); + + return 0; +} + +static int configure_vc( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_2_reg_offset *tpg_reg = &cam_tpg102_reg; + if (!hw) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + soc_info = hw->soc_info; + if (stream->cb_mode == TPG_COLOR_BAR_MODE_SPLIT) + val |= (1 << tpg_reg->tpg_split_en_shift); + + CAM_DBG(CAM_TPG, "TPG[%d] period: %d", hw->hw_idx, stream->rotate_period); + val |= ((stream->rotate_period & 0x3F) << + tpg_reg->tpg_rotate_period_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_cfg2); + CAM_DBG(CAM_TPG, "TPG[%d] cfg2=0x%x", + hw->hw_idx, val); + + val = stream->hbi << tpg_reg->tpg_hbi_shift | stream->vbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_cfg1); + CAM_DBG(CAM_TPG, "TPG[%d] cfg1=0x%x", + hw->hw_idx, val); + + return 0; +} + +static int tpg_hw_v_1_2_reset( + struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver_1_2_reg_offset *tpg_reg = &cam_tpg102_reg; + if (!hw) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + soc_info = hw->soc_info; + + /* Clear out tpg_module_cfg before reset */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + tpg_reg->tpg_module_cfg); + + /* Read the version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->tpg_hw_version); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW version: 0x%x started", + hw->hw_idx, val); + return 0; +} + +int tpg_hw_v_1_2_process_cmd( + struct tpg_hw *hw, + uint32_t cmd, + void *arg) +{ + int rc = 0; + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + + CAM_DBG(CAM_TPG, "TPG[%d] Cmd opcode:0x%x", hw->hw_idx, cmd); + switch(cmd) { + case TPG_CONFIG_VC: + { + struct vc_config_args *vc_config = + (struct vc_config_args *)arg; + + if (vc_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_vc(hw, + vc_config->vc_slot, + vc_config->num_dts, + vc_config->stream); + } + break; + case TPG_CONFIG_DT: + { + struct dt_config_args *dt_config = + (struct dt_config_args *)arg; + + if (dt_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_dt(hw, + dt_config->vc_slot, + dt_config->dt_slot, + dt_config->stream); + } + break; + case TPG_CONFIG_CTRL: + { + struct global_config_args *global_args = + (struct global_config_args *)arg; + rc = configure_global_configs(hw, + global_args->num_vcs, + global_args->globalconfig); + } + break; + default: + CAM_ERR(CAM_TPG, "invalid argument"); + break; + } + return rc; +} + +int tpg_hw_v_1_2_start(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.2 HWL start"); + return 0; +} + +int tpg_hw_v_1_2_stop(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.2 HWL stop"); + tpg_hw_v_1_2_reset(hw, data); + return 0; +} + +int tpg_hw_v_1_2_dump_status(struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver_1_2_reg_offset *tpg_reg = &cam_tpg102_reg; + + if (!hw) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + soc_info = hw->soc_info; + CAM_DBG(CAM_TPG, "TPG V1.2 HWL status dump"); + /* Read the version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->tpg_hw_status); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW status: 0x%x started", + hw->hw_idx, val); + + return 0; +} + +int tpg_hw_v_1_2_init(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.2 HWL init"); + tpg_hw_v_1_2_reset(hw, data); + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.h new file mode 100644 index 0000000000..54a8ff9d33 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_2_H__ +#define __TPG_HW_V_1_2_H__ + +#include "../tpg_hw.h" + +struct cam_tpg_ver_1_2_reg_offset { + uint32_t tpg_hw_version; + uint32_t tpg_hw_status; + uint32_t tpg_module_cfg; + uint32_t tpg_cfg0; + uint32_t tpg_cfg1; + uint32_t tpg_cfg2; + uint32_t tpg_cfg3; + uint32_t tpg_spare; + + /* configurations */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t tpg_en_shift; + uint32_t tpg_hbi_shift; + uint32_t tpg_dt_shift; + uint32_t tpg_rotate_period_shift; + uint32_t tpg_split_en_shift; + uint32_t top_mux_reg_offset; + uint32_t tpg_mux_sel_tpg_0_shift; + uint32_t tpg_mux_sel_tpg_1_shift; +}; + + +/** + * @brief initialize the tpg hw instance + * + * @param hw : tpg hw instance + * @param data : argument for initialize + * + * @return : 0 on success + */ +int tpg_hw_v_1_2_init(struct tpg_hw *hw, void *data); + +/** + * @brief start tpg hw + * + * @param hw : tpg hw instance + * @param data : tpg hw instance data + * + * @return : 0 on success + */ +int tpg_hw_v_1_2_start(struct tpg_hw *hw, void *data); + +/** + * @brief stop tpg hw + * + * @param hw : tpg hw instance + * @param data : argument for tpg hw stop + * + * @return : 0 on success + */ +int tpg_hw_v_1_2_stop(struct tpg_hw *hw, void *data); + +/** + * @brief process a command send from hw layer + * + * @param hw : tpg hw instance + * @param cmd : command to process + * @param arg : argument corresponding to command + * + * @return : 0 on success + */ +int tpg_hw_v_1_2_process_cmd(struct tpg_hw *hw, + uint32_t cmd, void *arg); + +/** + * @brief dump hw status registers + * + * @param hw : tpg hw instance + * @param data : argument for status dump + * + * @return : 0 on sucdess + */ +int tpg_hw_v_1_2_dump_status(struct tpg_hw *hw, void *data); + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2_data.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2_data.h new file mode 100644 index 0000000000..490192fd2d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_2/tpg_hw_v_1_2_data.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_2_DATA_H__ +#define __TPG_HW_V_1_2_DATA_H__ + +#include "../tpg_hw.h" +#include "tpg_hw_v_1_2.h" + +struct tpg_hw_ops tpg_hw_v_1_2_ops = { + .start = tpg_hw_v_1_2_start, + .stop = tpg_hw_v_1_2_stop, + .init = tpg_hw_v_1_2_init, + .process_cmd = tpg_hw_v_1_2_process_cmd, + .dump_status = tpg_hw_v_1_2_dump_status, +}; + +struct tpg_hw_info tpg_v_1_2_hw_info = { + .version = TPG_HW_VERSION_1_2, + .max_vc_channels = 1, + .max_dt_channels_per_vc = 1, + .ops = &tpg_hw_v_1_2_ops, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.c new file mode 100644 index 0000000000..a4e4480d2f --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.c @@ -0,0 +1,894 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw_v_1_3.h" + +enum tpg_hw_v_1_3_encode_fomat_t { + RAW_8_BIT = 1, + RAW_10_BIT, + RAW_12_BIT, + RAW_14_BIT, + RAW_16_BIT +}; + + +#define FRAME_INTERLEAVE 0x0 +#define LINE_INTERLEAVE 0x1 +#define SHDR_INTERLEAVE 0x2 +#define SPARSE_PD_INTERLEAVE 0x3 +#define SHDR_SPARSE_PD_INTERLEAVE 0x4 +#define CFA_PATTERN_ROW_WIDTH 8 +#define CFA_PATTERN_BITS_PER_INDEX 2 +#define Invalid 0x0 +#define Red 0x0 +#define Green 0x1 +#define Blue 0x2 +#define IR 0x3 +#define Mono 0x3 + +static int get_tpg_vc_dt_pattern_id( + enum tpg_interleaving_format_t vc_dt_pattern) +{ + switch (vc_dt_pattern) { + case TPG_INTERLEAVING_FORMAT_INVALID: + case TPG_INTERLEAVING_FORMAT_MAX: + case TPG_INTERLEAVING_FORMAT_FRAME: + return FRAME_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_LINE: + return LINE_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SHDR: + return SHDR_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SPARSE_PD: + return SPARSE_PD_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SHDR_SPARSE_PD: + return SHDR_SPARSE_PD_INTERLEAVE; + + } + return FRAME_INTERLEAVE; +} + +static int configure_global_configs( + struct tpg_hw *hw, + int num_vcs, + struct tpg_global_config_t *configs) +{ + uint32_t val, phy_type = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + soc_info = hw->soc_info; + + if (configs->phy_type == TPG_PHY_TYPE_CPHY) + phy_type = 1; + + if (num_vcs <= 0) { + CAM_ERR(CAM_TPG, "Invalid vc count"); + return -EINVAL; + } + + val = configs->skip_pattern; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_throttle); + CAM_DBG(CAM_TPG, "tpg[%d] throttle=0x%x", hw->hw_idx, val); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_irq_mask); + + + val = ((num_vcs - 1) << + (tpg_reg->tpg_num_active_vcs_shift) | + (configs->lane_count - 1) << + tpg_reg->tpg_num_active_lanes_shift) | + get_tpg_vc_dt_pattern_id(configs->interleaving_format) << + (tpg_reg->tpg_vc_dt_pattern_id_shift) | + (phy_type << tpg_reg->tpg_cphy_dphy_sel_shift_val) | + (1 << tpg_reg->tpg_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + CAM_DBG(CAM_TPG, "tpg[%d] tpg_ctrl=0x%x", hw->hw_idx, val); + return 0; +} + +static int get_tpg_encode_format(int sw_encode_format) +{ + switch (sw_encode_format) { + case PACK_8_BIT: + return RAW_8_BIT; + case PACK_10_BIT: + return RAW_10_BIT; + case PACK_12_BIT: + return RAW_12_BIT; + case PACK_14_BIT: + return RAW_14_BIT; + case PACK_16_BIT: + return RAW_16_BIT; + } + return RAW_8_BIT; +} + +#define INCREMENTING 0x0 +#define ALTERNATING_55_AA 0x1 +#define RANDOM 0x4 +#define USER_SPECIFIED 0x5 +#define COLOR_BARS 0x8 + +static int get_tpg_payload_mode(enum tpg_pattern_t pattern) +{ + switch (pattern) { + case TPG_PATTERN_INVALID: + case TPG_PATTERN_REAL_IMAGE: + case TPG_PATTERN_COLOR_BAR: + return COLOR_BARS; + case TPG_PATTERN_RANDOM_PIXL: + case TPG_PATTERN_RANDOM_INCREMENTING_PIXEL: + return RANDOM; + case TPG_PATTERN_ALTERNATING_55_AA: + return ALTERNATING_55_AA; + case TPG_PATTERN_ALTERNATING_USER_DEFINED: + return USER_SPECIFIED; + default: + return COLOR_BARS; + } + return COLOR_BARS; +} + +static int get_pixel_coordinate( + int cfa_pattern_start_index, + int cfa_pattern_end_index, + uint32_t *val, + struct tpg_stream_config_v3_t *configs) +{ + uint32_t shift = 0; + int idx = 0; + int i = 0; + int j = 0; + *val = 0; + for (i = cfa_pattern_start_index; i < cfa_pattern_end_index; i++) { + for (j = 0; j < configs->cfa_info.pattern_width; j++) { + shift = ((i * CFA_PATTERN_ROW_WIDTH) + j) * + CFA_PATTERN_BITS_PER_INDEX; + idx = i * configs->cfa_info.pattern_height + j; + *val |= (configs->cfa_info.pixel_coordinate[idx].pixel_type) << shift; + } + } + return 0; +} + +static int configure_xcfa_array_v3( + struct tpg_hw *hw, + struct tpg_stream_config_v3_t *configs) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + uint32_t val = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + switch (configs->xcfa_type) { + case XCFA_TYPE_RGBIR: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case XCFA_TYPE_QUADCFA: + get_pixel_coordinate(0, 2, &val, configs); + CAM_DBG(CAM_TPG, "val = 0x%x", val); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, configs->cfa_info.pattern_height, &val, configs); + CAM_DBG(CAM_TPG, "val = 0x%x", val); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case XCFA_TYPE_THREEXTHREECFA: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, 4, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + get_pixel_coordinate(4, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + break; + case XCFA_TYPE_FOURXFOURCFA: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, 4, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + get_pixel_coordinate(4, 6, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + get_pixel_coordinate(6, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color3); + break; + default: + break; + } + return 0; +} + +static int configure_dt_v3( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_v3_t *stream) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + + val = (((stream->stream_dimension.width & 0xFFFF) << 16) | + (stream->stream_dimension.height & 0xFFFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_0 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_0=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + cam_io_w_mb(stream->dt, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_1 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_1=0x%x", + hw->hw_idx, + vc_slot, dt_slot, stream->dt); + + val = ((get_tpg_encode_format(stream->pixel_depth) & 0xF) << + tpg_reg->tpg_dt_encode_format_shift) | + get_tpg_payload_mode(stream->pattern_type); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_2 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_2=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + return 0; +} + +static int configure_dt( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_t *stream) +{ + uint32_t val; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + + val = (((stream->stream_dimension.width & 0xFFFF) << 16) | + (stream->stream_dimension.height & 0xFFFF)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_0 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_0=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + cam_io_w_mb(stream->dt, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_1 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_1=0x%x", + hw->hw_idx, + vc_slot, dt_slot, stream->dt); + + val = ((get_tpg_encode_format(stream->pixel_depth) & 0xF) << + tpg_reg->tpg_dt_encode_format_shift) | + get_tpg_payload_mode(stream->pattern_type); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_2 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_2=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + return 0; +} + + + +#define VC1_GAIN 0x100 + +static int configure_vc_v3( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_v3_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + /* Use CFA pattern here */ + if (stream->output_format == TPG_IMAGE_FORMAT_QCFA) + val |= (1 << tpg_reg->tpg_color_bar_qcfa_en_shift); + + if (stream->cb_mode == TPG_COLOR_BAR_MODE_SPLIT) + val |= (1 << tpg_reg->tpg_split_en_shift); + + if (stream->cfa_info_exist != 0) { + val |= ((stream->cfa_info.pattern_height - 1) << tpg_reg->tpg_size_y_shift); + val |= ((stream->cfa_info.pattern_width - 1) << tpg_reg->tpg_size_x_shift); + val |= (1 << tpg_reg->tpg_xcfa_en_shift); + configure_xcfa_array_v3(hw, stream); + } + + CAM_DBG(CAM_TPG, "TPG[%d] period: %d", hw->hw_idx, stream->rotate_period); + val |= ((stream->rotate_period & 0x3F) << + tpg_reg->tpg_color_bar_qcfa_rotate_period_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_color_bar_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_color_bar_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->hbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_hbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_hbi_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->vbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_vbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + cam_io_w_mb(0x12345678, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_lfsr_seed + (0x60 * vc_slot)); + + val = ((0 << tpg_reg->tpg_num_frames_shift_val) | + ((num_dts-1) << tpg_reg->tpg_num_dts_shift_val) | + stream->vc); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_cfg0 + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x", + hw->hw_idx, + vc_slot, val); + if (hw->hw_info->shdr_overlap == 1) { + cam_io_w_mb(hw->hw_info->shdr_overlap << tpg_reg->tpg_overlap_shdr_en_shift, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + } + if (hw->hw_info->shdr_offset_num_batch >= 0 && vc_slot > 0) { + val = (VC1_GAIN << tpg_reg->tpg_gain_shift); + val |= (hw->hw_info->shdr_offset_num_batch << + tpg_reg->tpg_shdr_offset_num_batch_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_gain_cfg + (0x60 * (vc_slot-1))); + val = ((stream->shdr_line_offset0 * vc_slot) + << tpg_reg->tpg_shdr_line_offset0_shift); + val |= ((stream->shdr_line_offset1 * vc_slot) + << tpg_reg->tpg_shdr_line_offset1_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_shdr_cfg + (0x60 * (vc_slot-1))); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x shdr", + hw->hw_idx, + vc_slot, val); + } + + return 0; +} + +#define RGGB_IR_0 0x00770091 +#define RGGB_IR_1 0x00770019 +#define RGGB_2x2 0x05055A5A +#define RGGB_3x3_0 0x05400540 +#define RGGB_3x3_1 0x0a950540 +#define RGGB_3x3_2 0x0a950a95 +#define RGGB_4x4_0 0x55005500 +#define RGGB_4x4_1 0x55005500 +#define RGGB_4x4_2 0xaa55aa55 +#define RGGB_4x4_3 0xaa55aa55 +#define VC1_GAIN 0x100 + +static int configure_xcfa_array(struct tpg_hw *hw, int config) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + switch (config) { + case 1: + cam_io_w_mb(RGGB_IR_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_IR_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case 2: + cam_io_w_mb(RGGB_2x2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + break; + case 3: + cam_io_w_mb(RGGB_3x3_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_3x3_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + cam_io_w_mb(RGGB_3x3_2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + break; + case 4: + cam_io_w_mb(RGGB_4x4_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_4x4_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + cam_io_w_mb(RGGB_4x4_2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + cam_io_w_mb(RGGB_4x4_3, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color3); + break; + break; + + } + return 0; +} + +static int configure_vc( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + /* Use CFA pattern here */ + if (stream->output_format == TPG_IMAGE_FORMAT_QCFA) + val |= (1 << tpg_reg->tpg_color_bar_qcfa_en_shift); + + if (stream->cb_mode == TPG_COLOR_BAR_MODE_SPLIT) + val |= (1 << tpg_reg->tpg_split_en_shift); + + if (stream->xcfa_debug > 0) { + if (stream->xcfa_debug == 1) { + val |= (3 << tpg_reg->tpg_size_y_shift); + val |= (3 << tpg_reg->tpg_size_x_shift); + } else { + val |= ((stream->xcfa_debug * 2 - 1) << tpg_reg->tpg_size_y_shift); + val |= ((stream->xcfa_debug * 2 - 1) << tpg_reg->tpg_size_x_shift); + } + val |= (1 << tpg_reg->tpg_xcfa_en_shift); + configure_xcfa_array(hw, stream->xcfa_debug); + CAM_DBG(CAM_TPG, "xcfa_debug = %d", stream->xcfa_debug); + } + + CAM_DBG(CAM_TPG, "TPG[%d] period: %d", hw->hw_idx, stream->rotate_period); + val |= ((stream->rotate_period & 0x3F) << + tpg_reg->tpg_color_bar_qcfa_rotate_period_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_color_bar_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_color_bar_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->hbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_hbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_hbi_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->vbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_vbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + cam_io_w_mb(0x12345678, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_lfsr_seed + (0x60 * vc_slot)); + + val = ((0 << tpg_reg->tpg_num_frames_shift_val) | + ((num_dts-1) << tpg_reg->tpg_num_dts_shift_val) | + stream->vc); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_cfg0 + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x", + hw->hw_idx, + vc_slot, val); + if (hw->hw_info->shdr_overlap == 1) { + cam_io_w_mb(hw->hw_info->shdr_overlap << tpg_reg->tpg_overlap_shdr_en_shift, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + } + if (hw->hw_info->shdr_offset_num_batch >= 0 && vc_slot > 0) { + val = (VC1_GAIN << tpg_reg->tpg_gain_shift); + val |= (hw->hw_info->shdr_offset_num_batch << + tpg_reg->tpg_shdr_offset_num_batch_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_gain_cfg + (0x60 * (vc_slot-1))); + val = ((stream->shdr_line_offset0 * vc_slot) + << tpg_reg->tpg_shdr_line_offset0_shift); + val |= ((stream->shdr_line_offset1 * vc_slot) + << tpg_reg->tpg_shdr_line_offset1_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_shdr_cfg + (0x60 * (vc_slot-1))); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x shdr", + hw->hw_idx, + vc_slot, val); + } + return 0; +} + +static int tpg_hw_v_1_3_reset( + struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + /* Clear out tpg_ctrl and irqs before reset */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_irq_mask); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_irq_clear); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_irq_cmd); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_clear); + + /* Read the version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->tpg_hw_version); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW version: 0x%x started", + hw->hw_idx, val); + return 0; +} + +int tpg_hw_v_1_3_process_cmd( + struct tpg_hw *hw, + uint32_t cmd, + void *arg) +{ + int rc = 0; + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + switch(cmd) { + case TPG_CONFIG_VC: + { + if (hw->stream_version == 1) { + struct vc_config_args *vc_config = + (struct vc_config_args *)arg; + + if (vc_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_vc(hw, + vc_config->vc_slot, + vc_config->num_dts, + vc_config->stream); + } else if (hw->stream_version == 3) { + struct vc_config_args_v3 *vc_config_v3 = + (struct vc_config_args_v3 *)arg; + + if (vc_config_v3 == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_vc_v3(hw, + vc_config_v3->vc_slot, + vc_config_v3->num_dts, + vc_config_v3->stream); + } + } + break; + case TPG_CONFIG_DT: + { + if (hw->stream_version == 1) { + struct dt_config_args *dt_config = + (struct dt_config_args *)arg; + + if (dt_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_dt(hw, + dt_config->vc_slot, + dt_config->dt_slot, + dt_config->stream); + } else if (hw->stream_version == 3) { + struct dt_config_args_v3 *dt_config_v3 = + (struct dt_config_args_v3 *)arg; + + if (dt_config_v3 == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_dt_v3(hw, + dt_config_v3->vc_slot, + dt_config_v3->dt_slot, + dt_config_v3->stream); + } + } + break; + case TPG_CONFIG_CTRL: + { + struct global_config_args *global_args = + (struct global_config_args *)arg; + rc = configure_global_configs(hw, + global_args->num_vcs, + global_args->globalconfig); + } + break; + default: + CAM_ERR(CAM_TPG, "invalid argument"); + break; + } + return rc; +} + +int tpg_hw_v_1_3_start(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.3 HWL start"); + return 0; +} + +int tpg_hw_v_1_3_stop(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.3 HWL stop"); + tpg_hw_v_1_3_reset(hw, data); + return 0; +} + +int tpg_hw_v_1_3_dump_status(struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver_1_3_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + CAM_DBG(CAM_TPG, "TPG V1.3 HWL status dump"); + /* Read the version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->tpg_hw_status); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW status: 0x%x started", + hw->hw_idx, val); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->tpg_top_irq_status); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW irq status: 0x%x started", + hw->hw_idx, val); + + return 0; +} + +int tpg_hw_v_1_3_init(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.3 HWL init"); + tpg_hw_v_1_3_reset(hw, data); + return 0; +} + +static int tpg_1_3_get_xcfa_test(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get xcfa test %d", hw->hw_info->xcfa_debug); + *val = hw->hw_info->xcfa_debug; + return 0; +} +static int tpg_1_3_get_shdr_overlap_test(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr test : %d", hw->hw_info->shdr_overlap); + *val = hw->hw_info->shdr_overlap; + return 0; +} +static int tpg_1_3_get_shdr_offset_num_batch(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_num_batch : %d", hw->hw_info->shdr_offset_num_batch); + *val = hw->hw_info->shdr_offset_num_batch; + return 0; +} +static int tpg_1_3_get_shdr_line_offset0(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_offset0 : %d", hw->hw_info->shdr_line_offset0); + *val = hw->hw_info->shdr_line_offset0; + return 0; +} +static int tpg_1_3_get_shdr_line_offset1(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_offset1 : %d", hw->hw_info->shdr_line_offset1); + *val = hw->hw_info->shdr_line_offset1; + return 0; +} + + +static int tpg_1_3_set_xcfa_test(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set xcfa test prev : %d", hw->hw_info->xcfa_debug); + hw->hw_info->xcfa_debug = val; + return 0; +} +static int tpg_1_3_set_shdr_overlap_test(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr test prev : %d", hw->hw_info->shdr_overlap); + hw->hw_info->shdr_overlap = val; + return 0; +} +static int tpg_1_3_set_shdr_offset_num_batch(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_num_batch : %d", hw->hw_info->shdr_offset_num_batch); + hw->hw_info->shdr_offset_num_batch = val; + return 0; +} +static int tpg_1_3_set_shdr_line_offset0(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_offset0 : %d", hw->hw_info->shdr_line_offset0); + hw->hw_info->shdr_line_offset0 = val; + return 0; +} +static int tpg_1_3_set_shdr_line_offset1(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_offset1 : %d", hw->hw_info->shdr_line_offset1); + hw->hw_info->shdr_line_offset1 = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_3_xcfa_test, + &tpg_1_3_get_xcfa_test, + &tpg_1_3_set_xcfa_test, + "%16lld"); + +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_3_shdr_overlap_test, + &tpg_1_3_get_shdr_overlap_test, + &tpg_1_3_set_shdr_overlap_test, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_3_shdr_offset_num_batch, + &tpg_1_3_get_shdr_offset_num_batch, + &tpg_1_3_set_shdr_offset_num_batch, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_3_shdr_line_offset0, + &tpg_1_3_get_shdr_line_offset0, + &tpg_1_3_set_shdr_line_offset0, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_3_shdr_line_offset1, + &tpg_1_3_get_shdr_line_offset1, + &tpg_1_3_set_shdr_line_offset1, + "%16lld"); + + +int tpg_1_3_layer_init(struct tpg_hw *hw) +{ + struct dentry *dbgfileptr_parent = NULL; + char dir_name[160]; + + snprintf(dir_name, sizeof(dir_name), "tpg%d", + hw->hw_idx); + + dbgfileptr_parent = debugfs_create_dir(dir_name, NULL); + if (!dbgfileptr_parent) { + CAM_ERR(CAM_TPG, "Debug fs could not create directory"); + return -ENOENT; + } + + debugfs_create_file("tpg_xcfa_test", 0644, + dbgfileptr_parent, hw, &tpg_1_3_xcfa_test); + debugfs_create_file("tpg_shdr_overlap_test", 0644, + dbgfileptr_parent, hw, &tpg_1_3_shdr_overlap_test); + debugfs_create_file("tpg_shdr_offset_num_batch", 0644, + dbgfileptr_parent, hw, &tpg_1_3_shdr_offset_num_batch); + debugfs_create_file("tpg_shdr_line_offset0", 0644, + dbgfileptr_parent, hw, &tpg_1_3_shdr_line_offset0); + debugfs_create_file("tpg_shdr_line_offset1", 0644, + dbgfileptr_parent, hw, &tpg_1_3_shdr_line_offset1); + CAM_INFO(CAM_TPG, "Layer init called"); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.h new file mode 100644 index 0000000000..717098bafb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_3_H__ +#define __TPG_HW_V_1_3_H__ + +#include "../tpg_hw.h" + +struct cam_tpg_ver_1_3_reg_offset { + uint32_t tpg_hw_version; + uint32_t tpg_hw_status; + uint32_t tpg_ctrl; + uint32_t tpg_vc0_gain_cfg; + uint32_t tpg_vc0_cfg0; + uint32_t tpg_vc0_lfsr_seed; + uint32_t tpg_vc0_hbi_cfg; + uint32_t tpg_vc0_vbi_cfg; + uint32_t tpg_vc0_color_bar_cfg; + uint32_t tpg_vc0_dt_0_cfg_0; + uint32_t tpg_vc0_dt_0_cfg_1; + uint32_t tpg_vc0_dt_0_cfg_2; + uint32_t tpg_vc0_dt_1_cfg_0; + uint32_t tpg_vc0_dt_1_cfg_1; + uint32_t tpg_vc0_dt_1_cfg_2; + uint32_t tpg_vc0_dt_2_cfg_0; + uint32_t tpg_vc0_dt_2_cfg_1; + uint32_t tpg_vc0_dt_2_cfg_2; + uint32_t tpg_vc0_dt_3_cfg_0; + uint32_t tpg_vc0_dt_3_cfg_1; + uint32_t tpg_vc0_dt_3_cfg_2; + uint32_t tpg_vc0_color_bar_cfa_color0; + uint32_t tpg_vc0_color_bar_cfa_color1; + uint32_t tpg_vc0_color_bar_cfa_color2; + uint32_t tpg_vc0_color_bar_cfa_color3; + + uint32_t tpg_vc1_gain_cfg; + uint32_t tpg_vc1_shdr_cfg; + uint32_t tpg_vc1_cfg0; + uint32_t tpg_vc1_lfsr_seed; + uint32_t tpg_vc1_hbi_cfg; + uint32_t tpg_vc1_vbi_cfg; + uint32_t tpg_vc1_color_bar_cfg; + uint32_t tpg_vc1_dt_0_cfg_0; + uint32_t tpg_vc1_dt_0_cfg_1; + uint32_t tpg_vc1_dt_0_cfg_2; + uint32_t tpg_vc1_dt_1_cfg_0; + uint32_t tpg_vc1_dt_1_cfg_1; + uint32_t tpg_vc1_dt_1_cfg_2; + uint32_t tpg_vc1_dt_2_cfg_0; + uint32_t tpg_vc1_dt_2_cfg_1; + uint32_t tpg_vc1_dt_2_cfg_2; + uint32_t tpg_vc1_dt_3_cfg_0; + uint32_t tpg_vc1_dt_3_cfg_1; + uint32_t tpg_vc1_dt_3_cfg_2; + uint32_t tpg_vc1_color_bar_cfa_color0; + uint32_t tpg_vc1_color_bar_cfa_color1; + uint32_t tpg_vc1_color_bar_cfa_color2; + uint32_t tpg_vc1_color_bar_cfa_color3; + + uint32_t tpg_vc2_gain_cfg; + uint32_t tpg_vc2_shdr_cfg; + uint32_t tpg_vc2_cfg0; + uint32_t tpg_vc2_lfsr_seed; + uint32_t tpg_vc2_hbi_cfg; + uint32_t tpg_vc2_vbi_cfg; + uint32_t tpg_vc2_color_bar_cfg; + uint32_t tpg_vc2_dt_0_cfg_0; + uint32_t tpg_vc2_dt_0_cfg_1; + uint32_t tpg_vc2_dt_0_cfg_2; + uint32_t tpg_vc2_dt_1_cfg_0; + uint32_t tpg_vc2_dt_1_cfg_1; + uint32_t tpg_vc2_dt_1_cfg_2; + uint32_t tpg_vc2_dt_2_cfg_0; + uint32_t tpg_vc2_dt_2_cfg_1; + uint32_t tpg_vc2_dt_2_cfg_2; + uint32_t tpg_vc2_dt_3_cfg_0; + uint32_t tpg_vc2_dt_3_cfg_1; + uint32_t tpg_vc2_dt_3_cfg_2; + uint32_t tpg_vc2_color_bar_cfa_color0; + uint32_t tpg_vc2_color_bar_cfa_color1; + uint32_t tpg_vc2_color_bar_cfa_color2; + uint32_t tpg_vc2_color_bar_cfa_color3; + + uint32_t tpg_vc3_gain_cfg; + uint32_t tpg_vc3_shdr_cfg; + uint32_t tpg_vc3_cfg0; + uint32_t tpg_vc3_lfsr_seed; + uint32_t tpg_vc3_hbi_cfg; + uint32_t tpg_vc3_vbi_cfg; + uint32_t tpg_vc3_color_bar_cfg; + uint32_t tpg_vc3_dt_0_cfg_0; + uint32_t tpg_vc3_dt_0_cfg_1; + uint32_t tpg_vc3_dt_0_cfg_2; + uint32_t tpg_vc3_dt_1_cfg_0; + uint32_t tpg_vc3_dt_1_cfg_1; + uint32_t tpg_vc3_dt_1_cfg_2; + uint32_t tpg_vc3_dt_2_cfg_0; + uint32_t tpg_vc3_dt_2_cfg_1; + uint32_t tpg_vc3_dt_2_cfg_2; + uint32_t tpg_vc3_dt_3_cfg_0; + uint32_t tpg_vc3_dt_3_cfg_1; + uint32_t tpg_vc3_dt_3_cfg_2; + uint32_t tpg_vc3_color_bar_cfa_color0; + uint32_t tpg_vc3_color_bar_cfa_color1; + uint32_t tpg_vc3_color_bar_cfa_color2; + uint32_t tpg_vc3_color_bar_cfa_color3; + + uint32_t tpg_throttle; + uint32_t tpg_top_irq_status; + uint32_t tpg_top_irq_mask; + uint32_t tpg_top_irq_clear; + uint32_t tpg_top_irq_set; + uint32_t tpg_top_irq_cmd; + uint32_t tpg_top_clear; + uint32_t tpg_test_bus_crtl; + uint32_t tpg_spare; + + /* configurations */ + uint32_t major_version; + uint32_t minor_version; + uint32_t version_incr; + uint32_t tpg_en_shift_val; + uint32_t tpg_cphy_dphy_sel_shift_val; + uint32_t tpg_num_active_lanes_shift; + uint32_t tpg_fe_pkt_en_shift; + uint32_t tpg_fs_pkt_en_shift; + uint32_t tpg_line_interleaving_mode_shift; + uint32_t tpg_num_frames_shift_val; + uint32_t tpg_num_dts_shift_val; + uint32_t tpg_v_blank_cnt_shift; + uint32_t tpg_dt_encode_format_shift; + uint32_t tpg_payload_mode_color; + uint32_t tpg_split_en_shift; + uint32_t top_mux_reg_offset; + uint32_t tpg_vc_dt_pattern_id_shift; + uint32_t tpg_num_active_vcs_shift; + uint32_t tpg_color_bar_qcfa_en_shift; + uint32_t tpg_color_bar_qcfa_rotate_period_shift; + uint32_t tpg_overlap_shdr_en_shift; + uint32_t tpg_num_batch_shift; + uint32_t tpg_xcfa_en_shift; + uint32_t tpg_noise_en_shift; + uint32_t tpg_size_x_shift; + uint32_t tpg_size_y_shift; + uint32_t tpg_gain_shift; + uint32_t tpg_shdr_offset_num_batch_shift; + uint32_t tpg_shdr_line_offset0_shift; + uint32_t tpg_shdr_line_offset1_shift; +}; + + +/** + * @brief initialize the tpg hw instance + * + * @param hw : tpg hw instance + * @param data : argument for initialize + * + * @return : 0 on success + */ +int tpg_hw_v_1_3_init(struct tpg_hw *hw, void *data); + +/** + * @brief start tpg hw + * + * @param hw : tpg hw instance + * @param data : tpg hw instance data + * + * @return : 0 on success + */ +int tpg_hw_v_1_3_start(struct tpg_hw *hw, void *data); + +/** + * @brief stop tpg hw + * + * @param hw : tpg hw instance + * @param data : argument for tpg hw stop + * + * @return : 0 on success + */ +int tpg_hw_v_1_3_stop(struct tpg_hw *hw, void *data); + +/** + * @brief process a command send from hw layer + * + * @param hw : tpg hw instance + * @param cmd : command to process + * @param arg : argument corresponding to command + * + * @return : 0 on success + */ +int tpg_hw_v_1_3_process_cmd(struct tpg_hw *hw, + uint32_t cmd, void *arg); + +/** + * @brief dump hw status registers + * + * @param hw : tpg hw instance + * @param data : argument for status dump + * + * @return : 0 on sucdess + */ +int tpg_hw_v_1_3_dump_status(struct tpg_hw *hw, void *data); + +int tpg_1_3_layer_init(struct tpg_hw *hw); +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_0.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_0.h new file mode 100644 index 0000000000..689b4fc536 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_0.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_TPG_HW_V_1_3_0_H__ +#define __CAM_TPG_HW_V_1_3_0_H__ + +static struct cam_tpg_ver_1_3_reg_offset cam_tpg103_reg = { + .tpg_hw_version = 0x0, + .tpg_hw_status = 0x4, + .tpg_ctrl = 0x64, + .tpg_vc0_cfg0 = 0x68, + .tpg_vc0_lfsr_seed = 0x6C, + .tpg_vc0_hbi_cfg = 0x70, + .tpg_vc0_vbi_cfg = 0x74, + .tpg_vc0_color_bar_cfg = 0x78, + .tpg_vc0_dt_0_cfg_0 = 0x7C, + .tpg_vc0_dt_0_cfg_1 = 0x80, + .tpg_vc0_dt_0_cfg_2 = 0x84, + .tpg_vc0_dt_1_cfg_0 = 0x88, + .tpg_vc0_dt_1_cfg_1 = 0x8C, + .tpg_vc0_dt_1_cfg_2 = 0x90, + .tpg_vc0_dt_2_cfg_0 = 0x94, + .tpg_vc0_dt_2_cfg_1 = 0x98, + .tpg_vc0_dt_2_cfg_2 = 0x9C, + .tpg_vc0_dt_3_cfg_0 = 0xA0, + .tpg_vc0_dt_3_cfg_1 = 0xA4, + .tpg_vc0_dt_3_cfg_2 = 0xA8, + + .tpg_vc1_cfg0 = 0xC8, + .tpg_vc1_lfsr_seed = 0xCC, + .tpg_vc1_hbi_cfg = 0xD0, + .tpg_vc1_vbi_cfg = 0xD4, + .tpg_vc1_color_bar_cfg = 0xD8, + .tpg_vc1_dt_0_cfg_0 = 0xDC, + .tpg_vc1_dt_0_cfg_1 = 0xE0, + .tpg_vc1_dt_0_cfg_2 = 0xE4, + .tpg_vc1_dt_1_cfg_0 = 0xE8, + .tpg_vc1_dt_1_cfg_1 = 0xEC, + .tpg_vc1_dt_1_cfg_2 = 0xF0, + .tpg_vc1_dt_2_cfg_0 = 0xF4, + .tpg_vc1_dt_2_cfg_1 = 0xF8, + .tpg_vc1_dt_2_cfg_2 = 0xFC, + .tpg_vc1_dt_3_cfg_0 = 0x100, + .tpg_vc1_dt_3_cfg_1 = 0x104, + .tpg_vc1_dt_3_cfg_2 = 0x108, + + .tpg_vc2_cfg0 = 0x128, + .tpg_vc2_lfsr_seed = 0x12C, + .tpg_vc2_hbi_cfg = 0x130, + .tpg_vc2_vbi_cfg = 0x134, + .tpg_vc2_color_bar_cfg = 0x138, + .tpg_vc2_dt_0_cfg_0 = 0x13C, + .tpg_vc2_dt_0_cfg_1 = 0x140, + .tpg_vc2_dt_0_cfg_2 = 0x144, + .tpg_vc2_dt_1_cfg_0 = 0x148, + .tpg_vc2_dt_1_cfg_1 = 0x14C, + .tpg_vc2_dt_1_cfg_2 = 0x150, + .tpg_vc2_dt_2_cfg_0 = 0x154, + .tpg_vc2_dt_2_cfg_1 = 0x158, + .tpg_vc2_dt_2_cfg_2 = 0x15C, + .tpg_vc2_dt_3_cfg_0 = 0x160, + .tpg_vc2_dt_3_cfg_1 = 0x164, + .tpg_vc2_dt_3_cfg_2 = 0x168, + + .tpg_vc3_cfg0 = 0x188, + .tpg_vc3_lfsr_seed = 0x18C, + .tpg_vc3_hbi_cfg = 0x190, + .tpg_vc3_vbi_cfg = 0x194, + .tpg_vc3_color_bar_cfg = 0x198, + .tpg_vc3_dt_0_cfg_0 = 0x19C, + .tpg_vc3_dt_0_cfg_1 = 0x1A0, + .tpg_vc3_dt_0_cfg_2 = 0x1A4, + .tpg_vc3_dt_1_cfg_0 = 0x1A8, + .tpg_vc3_dt_1_cfg_1 = 0x1AC, + .tpg_vc3_dt_1_cfg_2 = 0x1B0, + .tpg_vc3_dt_2_cfg_0 = 0x1B4, + .tpg_vc3_dt_2_cfg_1 = 0x1B8, + .tpg_vc3_dt_2_cfg_2 = 0x1BC, + .tpg_vc3_dt_3_cfg_0 = 0x1C0, + .tpg_vc3_dt_3_cfg_1 = 0x1C4, + .tpg_vc3_dt_3_cfg_2 = 0x1C8, + .tpg_throttle = 0x1CC, + .tpg_top_irq_status = 0x1E0, + .tpg_top_irq_mask = 0x1E4, + .tpg_top_irq_clear = 0x1E8, + .tpg_top_irq_set = 0x1EC, + .tpg_top_irq_cmd = 0x1F0, + .tpg_top_clear = 0x1F4, + .tpg_test_bus_crtl = 0x1F8, + .tpg_spare = 0x1FC, + + /* configurations */ + .major_version = 2, + .minor_version = 0, + .version_incr = 0, + .tpg_en_shift_val = 0, + .tpg_cphy_dphy_sel_shift_val = 3, + .tpg_num_active_lanes_shift = 4, + .tpg_fe_pkt_en_shift = 2, + .tpg_fs_pkt_en_shift = 1, + .tpg_line_interleaving_mode_shift = 10, + .tpg_num_frames_shift_val = 16, + .tpg_num_dts_shift_val = 8, + .tpg_v_blank_cnt_shift = 12, + .tpg_dt_encode_format_shift = 20, + .tpg_payload_mode_color = 0x8, + .tpg_split_en_shift = 4, + .top_mux_reg_offset = 0x1C, + .tpg_vc_dt_pattern_id_shift = 6, + .tpg_num_active_vcs_shift = 30, + .tpg_color_bar_qcfa_en_shift = 3, + .tpg_color_bar_qcfa_rotate_period_shift = 8, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_1.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_1.h new file mode 100644 index 0000000000..d3f7618574 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_1.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_TPG_HW_V_1_3_1_H__ +#define __CAM_TPG_HW_V_1_3_1_H__ + +static struct cam_tpg_ver_1_3_reg_offset cam_tpg103_1_reg = { + .tpg_hw_version = 0x0, + .tpg_hw_status = 0x4, + .tpg_vc0_gain_cfg = 0x60, + .tpg_ctrl = 0x64, + .tpg_vc0_cfg0 = 0x68, + .tpg_vc0_lfsr_seed = 0x6C, + .tpg_vc0_hbi_cfg = 0x70, + .tpg_vc0_vbi_cfg = 0x74, + .tpg_vc0_color_bar_cfg = 0x78, + .tpg_vc0_dt_0_cfg_0 = 0x7C, + .tpg_vc0_dt_0_cfg_1 = 0x80, + .tpg_vc0_dt_0_cfg_2 = 0x84, + .tpg_vc0_dt_1_cfg_0 = 0x88, + .tpg_vc0_dt_1_cfg_1 = 0x8C, + .tpg_vc0_dt_1_cfg_2 = 0x90, + .tpg_vc0_dt_2_cfg_0 = 0x94, + .tpg_vc0_dt_2_cfg_1 = 0x98, + .tpg_vc0_dt_2_cfg_2 = 0x9C, + .tpg_vc0_dt_3_cfg_0 = 0xA0, + .tpg_vc0_dt_3_cfg_1 = 0xA4, + .tpg_vc0_dt_3_cfg_2 = 0xA8, + .tpg_vc0_color_bar_cfa_color0 = 0xB0, + .tpg_vc0_color_bar_cfa_color1 = 0xB4, + .tpg_vc0_color_bar_cfa_color2 = 0xB8, + .tpg_vc0_color_bar_cfa_color3 = 0xBC, + + .tpg_vc1_gain_cfg = 0xC0, + .tpg_vc1_shdr_cfg = 0xC4, + .tpg_vc1_cfg0 = 0xC8, + .tpg_vc1_lfsr_seed = 0xCC, + .tpg_vc1_hbi_cfg = 0xD0, + .tpg_vc1_vbi_cfg = 0xD4, + .tpg_vc1_color_bar_cfg = 0xD8, + .tpg_vc1_dt_0_cfg_0 = 0xDC, + .tpg_vc1_dt_0_cfg_1 = 0xE0, + .tpg_vc1_dt_0_cfg_2 = 0xE4, + .tpg_vc1_dt_1_cfg_0 = 0xE8, + .tpg_vc1_dt_1_cfg_1 = 0xEC, + .tpg_vc1_dt_1_cfg_2 = 0xF0, + .tpg_vc1_dt_2_cfg_0 = 0xF4, + .tpg_vc1_dt_2_cfg_1 = 0xF8, + .tpg_vc1_dt_2_cfg_2 = 0xFC, + .tpg_vc1_dt_3_cfg_0 = 0x100, + .tpg_vc1_dt_3_cfg_1 = 0x104, + .tpg_vc1_dt_3_cfg_2 = 0x108, + .tpg_vc1_color_bar_cfa_color0 = 0x110, + .tpg_vc1_color_bar_cfa_color1 = 0x114, + .tpg_vc1_color_bar_cfa_color2 = 0x118, + .tpg_vc1_color_bar_cfa_color3 = 0x11C, + + .tpg_vc2_gain_cfg = 0x120, + .tpg_vc2_shdr_cfg = 0x124, + .tpg_vc2_cfg0 = 0x128, + .tpg_vc2_lfsr_seed = 0x12C, + .tpg_vc2_hbi_cfg = 0x130, + .tpg_vc2_vbi_cfg = 0x134, + .tpg_vc2_color_bar_cfg = 0x138, + .tpg_vc2_dt_0_cfg_0 = 0x13C, + .tpg_vc2_dt_0_cfg_1 = 0x140, + .tpg_vc2_dt_0_cfg_2 = 0x144, + .tpg_vc2_dt_1_cfg_0 = 0x148, + .tpg_vc2_dt_1_cfg_1 = 0x14C, + .tpg_vc2_dt_1_cfg_2 = 0x150, + .tpg_vc2_dt_2_cfg_0 = 0x154, + .tpg_vc2_dt_2_cfg_1 = 0x158, + .tpg_vc2_dt_2_cfg_2 = 0x15C, + .tpg_vc2_dt_3_cfg_0 = 0x160, + .tpg_vc2_dt_3_cfg_1 = 0x164, + .tpg_vc2_dt_3_cfg_2 = 0x168, + .tpg_vc2_color_bar_cfa_color0 = 0x170, + .tpg_vc2_color_bar_cfa_color1 = 0x174, + .tpg_vc2_color_bar_cfa_color2 = 0x178, + .tpg_vc2_color_bar_cfa_color3 = 0x17C, + + .tpg_vc3_gain_cfg = 0x180, + .tpg_vc3_shdr_cfg = 0x184, + .tpg_vc3_cfg0 = 0x188, + .tpg_vc3_lfsr_seed = 0x18C, + .tpg_vc3_hbi_cfg = 0x190, + .tpg_vc3_vbi_cfg = 0x194, + .tpg_vc3_color_bar_cfg = 0x198, + .tpg_vc3_dt_0_cfg_0 = 0x19C, + .tpg_vc3_dt_0_cfg_1 = 0x1A0, + .tpg_vc3_dt_0_cfg_2 = 0x1A4, + .tpg_vc3_dt_1_cfg_0 = 0x1A8, + .tpg_vc3_dt_1_cfg_1 = 0x1AC, + .tpg_vc3_dt_1_cfg_2 = 0x1B0, + .tpg_vc3_dt_2_cfg_0 = 0x1B4, + .tpg_vc3_dt_2_cfg_1 = 0x1B8, + .tpg_vc3_dt_2_cfg_2 = 0x1BC, + .tpg_vc3_dt_3_cfg_0 = 0x1C0, + .tpg_vc3_dt_3_cfg_1 = 0x1C4, + .tpg_vc3_dt_3_cfg_2 = 0x1C8, + .tpg_throttle = 0x1CC, + .tpg_vc3_color_bar_cfa_color0 = 0x1D0, + .tpg_vc3_color_bar_cfa_color1 = 0x1D4, + .tpg_vc3_color_bar_cfa_color2 = 0x1D8, + .tpg_vc3_color_bar_cfa_color3 = 0x1DC, + + .tpg_top_irq_status = 0x1E0, + .tpg_top_irq_mask = 0x1E4, + .tpg_top_irq_clear = 0x1E8, + .tpg_top_irq_set = 0x1EC, + .tpg_top_irq_cmd = 0x1F0, + .tpg_top_clear = 0x1F4, + .tpg_test_bus_crtl = 0x1F8, + .tpg_spare = 0x1FC, + + /* configurations */ + .major_version = 2, + .minor_version = 1, + .version_incr = 0, + .tpg_en_shift_val = 0, + .tpg_cphy_dphy_sel_shift_val = 3, + .tpg_num_active_lanes_shift = 4, + .tpg_fe_pkt_en_shift = 2, + .tpg_fs_pkt_en_shift = 1, + .tpg_line_interleaving_mode_shift = 10, + .tpg_num_frames_shift_val = 16, + .tpg_num_dts_shift_val = 8, + .tpg_v_blank_cnt_shift = 12, + .tpg_dt_encode_format_shift = 28, + .tpg_payload_mode_color = 0x8, + .tpg_split_en_shift = 4, + .top_mux_reg_offset = 0x1C, + .tpg_vc_dt_pattern_id_shift = 6, + .tpg_num_active_vcs_shift = 30, + .tpg_color_bar_qcfa_en_shift = 3, + + .tpg_color_bar_qcfa_rotate_period_shift = 8, + .tpg_overlap_shdr_en_shift = 10, + .tpg_num_batch_shift = 12, + .tpg_noise_en_shift = 5, + .tpg_xcfa_en_shift = 16, + .tpg_size_x_shift = 24, + .tpg_size_y_shift = 28, + .tpg_gain_shift = 0, + .tpg_shdr_offset_num_batch_shift = 16, + .tpg_shdr_line_offset0_shift = 0, + .tpg_shdr_line_offset1_shift = 16, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_data.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_data.h new file mode 100644 index 0000000000..19cf7dd83a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_3/tpg_hw_v_1_3_data.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_3_DATA_H__ +#define __TPG_HW_V_1_3_DATA_H__ + +#include "../tpg_hw.h" +#include "../tpg_hw_common.h" +#include "tpg_hw_v_1_3.h" +#include "tpg_hw_v_1_3_0.h" +#include "tpg_hw_v_1_3_1.h" + +struct tpg_hw_ops tpg_hw_v_1_3_ops = { + .start = tpg_hw_v_1_3_start, + .stop = tpg_hw_v_1_3_stop, + .init = tpg_hw_v_1_3_init, + .process_cmd = tpg_hw_v_1_3_process_cmd, + .dump_status = tpg_hw_v_1_3_dump_status, + .write_settings = tpg_hw_write_settings, +}; + +struct tpg_hw_info tpg_v_1_3_hw_info = { + .version = TPG_HW_VERSION_1_3, + .max_vc_channels = 4, + .max_dt_channels_per_vc = 4, + .ops = &tpg_hw_v_1_3_ops, + .hw_data = &cam_tpg103_reg, + .layer_init = &tpg_1_3_layer_init, + .xcfa_debug = -1, + .shdr_overlap = -1, + .shdr_offset_num_batch = 0, + .shdr_line_offset0 = 0x6c, + .shdr_line_offset1 = 0x6c, +}; + +struct tpg_hw_info tpg_v_1_3_1_hw_info = { + .version = TPG_HW_VERSION_1_3_1, + .max_vc_channels = 4, + .max_dt_channels_per_vc = 4, + .ops = &tpg_hw_v_1_3_ops, + .hw_data = &cam_tpg103_1_reg, + .layer_init = &tpg_1_3_layer_init, + .xcfa_debug = 0, + .shdr_overlap = 0, + .shdr_offset_num_batch = 0, + .shdr_line_offset0 = 0x6c, + .shdr_line_offset1 = 0x6c, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.c b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.c new file mode 100644 index 0000000000..d043c49c2e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.c @@ -0,0 +1,963 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "tpg_hw_v_1_4.h" + +enum tpg_hw_v_1_4_encode_fomat_t { + RAW_8_BIT = 1, + RAW_10_BIT, + RAW_12_BIT, + RAW_14_BIT, + RAW_16_BIT +}; + +#define CAM_TPG_HW_WAIT_TIMEOUT msecs_to_jiffies(100) +#define FRAME_INTERLEAVE 0x0 +#define LINE_INTERLEAVE 0x1 +#define SHDR_INTERLEAVE 0x2 +#define SPARSE_PD_INTERLEAVE 0x3 +#define SHDR_SPARSE_PD_INTERLEAVE 0x4 +#define CFA_PATTERN_ROW_WIDTH 8 +#define CFA_PATTERN_BITS_PER_INDEX 2 +#define TIMEOUT_MULTIPLIER 1 +#define TIMEOUT_MULTIPLIER_PRESIL 5 +#define TPG_DISABLE 0 + +static int get_tpg_vc_dt_pattern_id( + enum tpg_interleaving_format_t vc_dt_pattern) +{ + switch (vc_dt_pattern) { + case TPG_INTERLEAVING_FORMAT_INVALID: + case TPG_INTERLEAVING_FORMAT_MAX: + case TPG_INTERLEAVING_FORMAT_FRAME: + return FRAME_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_LINE: + return LINE_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SHDR: + return SHDR_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SPARSE_PD: + return SPARSE_PD_INTERLEAVE; + case TPG_INTERLEAVING_FORMAT_SHDR_SPARSE_PD: + return SHDR_SPARSE_PD_INTERLEAVE; + } + return FRAME_INTERLEAVE; +} + +static int get_tpg_encode_format(int sw_encode_format) +{ + switch (sw_encode_format) { + case PACK_8_BIT: + return RAW_8_BIT; + case PACK_10_BIT: + return RAW_10_BIT; + case PACK_12_BIT: + return RAW_12_BIT; + case PACK_14_BIT: + return RAW_14_BIT; + case PACK_16_BIT: + return RAW_16_BIT; + } + return RAW_8_BIT; +} + +#define INCREMENTING 0x0 +#define ALTERNATING_55_AA 0x1 +#define RANDOM 0x4 +#define USER_SPECIFIED 0x5 +#define COLOR_BARS 0x8 + +static int get_tpg_payload_mode(enum tpg_pattern_t pattern) +{ + switch (pattern) { + case TPG_PATTERN_INVALID: + case TPG_PATTERN_REAL_IMAGE: + case TPG_PATTERN_COLOR_BAR: + return COLOR_BARS; + case TPG_PATTERN_RANDOM_PIXL: + case TPG_PATTERN_RANDOM_INCREMENTING_PIXEL: + return RANDOM; + case TPG_PATTERN_ALTERNATING_55_AA: + return ALTERNATING_55_AA; + case TPG_PATTERN_ALTERNATING_USER_DEFINED: + return USER_SPECIFIED; + default: + return COLOR_BARS; + } + return COLOR_BARS; +} + +#define RGGB_IR_0 0x00770091 +#define RGGB_IR_1 0x00770019 +#define RGGB_2x2 0x05055A5A +#define RGGB_3x3_0 0x05400540 +#define RGGB_3x3_1 0x0a950540 +#define RGGB_3x3_2 0x0a950a95 +#define RGGB_4x4_0 0x55005500 +#define RGGB_4x4_1 0x55005500 +#define RGGB_4x4_2 0xaa55aa55 +#define RGGB_4x4_3 0xaa55aa55 +#define VC1_GAIN 0x100 + +static int configure_xcfa_array(struct tpg_hw *hw, int config) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + switch (config) { + case 1: + cam_io_w_mb(RGGB_IR_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_IR_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case 2: + cam_io_w_mb(RGGB_2x2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + break; + case 3: + cam_io_w_mb(RGGB_3x3_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_3x3_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + cam_io_w_mb(RGGB_3x3_2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + break; + case 4: + cam_io_w_mb(RGGB_4x4_0, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + cam_io_w_mb(RGGB_4x4_1, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + cam_io_w_mb(RGGB_4x4_2, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + cam_io_w_mb(RGGB_4x4_3, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color3); + break; + break; + + } + return 0; +} + +static int configure_global_configs( + struct tpg_hw *hw, + int num_vcs, + struct tpg_global_config_t *configs) +{ + uint32_t val, phy_type = 0; + uint32_t timeout_multiplier = -1; + int rc = 0; + unsigned long wait_jiffies = 0; + unsigned long rem_jiffies = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + soc_info = hw->soc_info; + + if (configs->phy_type == TPG_PHY_TYPE_CPHY) + phy_type = 1; + + if (num_vcs <= 0) { + CAM_ERR(CAM_TPG, "invalid vc count"); + return -EINVAL; + } + + val = (1 << tpg_reg->rup_done_mask_vec_shift) | (1 << tpg_reg->tpg_done_mask_vec_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->top_irq_mask); + + val = (num_vcs - 1) << + (tpg_reg->num_active_vc_shift) | + (configs->lane_count - 1) << (tpg_reg->num_active_lanes_shift) | + (get_tpg_vc_dt_pattern_id(configs->interleaving_format) + << (tpg_reg->vc_dt_pattern_id_shift)) | + (phy_type << tpg_reg->phy_sel_shift) | + ((tpg_reg->async_mode_min_hbi)<< + (tpg_reg->async_mode_min_hbi_shift)); + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + + CAM_DBG(CAM_TPG, "tpg[%d] tpg_ctrl=0x%x", hw->hw_idx, val); + + /* Check with hw poc if this needs to be done perframe */ + val = (1 << tpg_reg->test_en_cmd_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd); + CAM_DBG(CAM_TPG, "tpg[%d] tpg_ctrl_cmd=0x%x", hw->hw_idx, val); + + if (hw->settings_update) { + cam_io_w_mb(TPG_DISABLE, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd); + if (cam_presil_mode_enabled()) + timeout_multiplier = TIMEOUT_MULTIPLIER_PRESIL; + else + timeout_multiplier = TIMEOUT_MULTIPLIER; + CAM_DBG(CAM_TPG, "wait for TPG done and RUP done Interrupt"); + wait_jiffies = CAM_TPG_HW_WAIT_TIMEOUT * timeout_multiplier; + reinit_completion(&hw->complete_rup); + rem_jiffies = + cam_common_wait_for_completion_timeout(&hw->complete_rup, wait_jiffies); + if (rem_jiffies) { + val = (1 << tpg_reg->test_en_cmd_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd); + hw->settings_update = 0; + } else { + CAM_ERR(CAM_TPG, "TPG[%d] hw timeout %llu", + hw->hw_idx, rem_jiffies); + rc = -EBUSY; + } + } + + return rc; +} + +static int get_pixel_coordinate( + int cfa_pattern_start_index, + int cfa_pattern_end_index, + uint32_t *val, + struct tpg_stream_config_v3_t *configs) +{ + uint32_t shift = 0; + int idx = 0; + int i = 0; + int j = 0; + *val = 0; + for (i = cfa_pattern_start_index; i < cfa_pattern_end_index; i++) { + for (j = 0; j < configs->cfa_info.pattern_width; j++) { + shift = ((i * CFA_PATTERN_ROW_WIDTH) + j) * + CFA_PATTERN_BITS_PER_INDEX; + idx = i * configs->cfa_info.pattern_height + j; + *val |= (configs->cfa_info.pixel_coordinate[idx].pixel_type) << shift; + } + } + return 0; +} + +static int configure_xcfa_array_v3( + struct tpg_hw *hw, + struct tpg_stream_config_v3_t *configs) +{ + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + uint32_t val = 0; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + switch (configs->xcfa_type) { + case XCFA_TYPE_RGBIR: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case XCFA_TYPE_QUADCFA: + get_pixel_coordinate(0, 2, &val, configs); + CAM_DBG(CAM_TPG, "val = 0x%x", val); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, configs->cfa_info.pattern_height, &val, configs); + CAM_DBG(CAM_TPG, "val = 0x%x", val); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + break; + case XCFA_TYPE_THREEXTHREECFA: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, 4, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + get_pixel_coordinate(4, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + break; + case XCFA_TYPE_FOURXFOURCFA: + get_pixel_coordinate(0, 2, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color0); + get_pixel_coordinate(2, 4, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color1); + get_pixel_coordinate(4, 6, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color2); + get_pixel_coordinate(6, configs->cfa_info.pattern_height, &val, configs); + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_vc0_color_bar_cfa_color3); + break; + default: + break; + } + return 0; +} + +static int configure_dt_v3( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_v3_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + + val = (((stream->stream_dimension.width & 0xFFFF) << tpg_reg->frame_width_shift) | + (stream->stream_dimension.height & 0xFFFF << tpg_reg->frame_height_shift)); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_0 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_0=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + cam_io_w_mb(stream->dt, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_1 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_1=0x%x", + hw->hw_idx, + vc_slot, dt_slot, stream->dt); + + val = ((get_tpg_encode_format(stream->pixel_depth) & 0xF) << + tpg_reg->encode_format_shift) | + (get_tpg_payload_mode(stream->pattern_type) << tpg_reg->pattern_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_2 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_2=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + return 0; +} + +static int configure_dt( + struct tpg_hw *hw, + uint32_t vc_slot, + uint32_t dt_slot, + struct tpg_stream_config_t *stream) +{ + + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + CAM_DBG(CAM_TPG, "TPG[%d] slot(%d,%d) <= dt:%d", + hw->hw_idx, + vc_slot, + dt_slot, + stream->dt); + + val = (((stream->stream_dimension.width & 0xFFFF) << tpg_reg->frame_width_shift) | + (stream->stream_dimension.height & 0xFFFF) << tpg_reg->frame_height_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_0 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_0=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + cam_io_w_mb(stream->dt, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_1 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_1=0x%x", + hw->hw_idx, + vc_slot, dt_slot, stream->dt); + + val = ((get_tpg_encode_format(stream->pixel_depth) & 0xF) << + tpg_reg->encode_format_shift) | + (get_tpg_payload_mode(stream->pattern_type) << tpg_reg->pattern_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_dt_0_cfg_2 + + (0x60 * vc_slot) + (dt_slot * 0x0c)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_dt%d_cfg_2=0x%x", + hw->hw_idx, + vc_slot, dt_slot, val); + + return 0; +} + +static int configure_vc_v3( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_v3_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + /* Use CFA pattern here */ + if (stream->output_format == TPG_IMAGE_FORMAT_QCFA) + val |= (1 << tpg_reg->qcfa_en_shift); + + if (stream->cb_mode == TPG_COLOR_BAR_MODE_SPLIT) + val |= (1 << tpg_reg->split_en_shift); + + if (stream->cfa_info_exist != 0) { + val |= ((stream->cfa_info.pattern_height - 1) << tpg_reg->size_y_shift); + val |= ((stream->cfa_info.pattern_width - 1) << tpg_reg->size_x_shift); + val |= (1 << tpg_reg->xcfa_en_shift); + configure_xcfa_array_v3(hw, stream); + } + + CAM_DBG(CAM_TPG, "TPG[%d] period: %d", hw->hw_idx, stream->rotate_period); + val |= ((stream->rotate_period & 0x3F) << + tpg_reg->rotate_period_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_color_bars_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_color_bar_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->hbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_hbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_hbi_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->vbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_vbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->skip_pattern; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_throttle + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + cam_io_w_mb(0x12345678, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_lfsr_seed + (0x60 * vc_slot)); + + val = ((stream->frame_count << tpg_reg->num_frames_shift) | + ((num_dts-1) << tpg_reg->num_active_dt_shift) | + stream->vc); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_cfg0 + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x", + hw->hw_idx, + vc_slot, val); + if (hw->hw_info->shdr_overlap == 1) { + cam_io_w_mb(hw->hw_info->shdr_overlap << tpg_reg->overlap_shdr_en_shift, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + } + if (hw->hw_info->shdr_offset_num_batch >= 0 && vc_slot > 0) { + val = (VC1_GAIN << tpg_reg->gain_shift); + val |= (hw->hw_info->shdr_offset_num_batch << + tpg_reg->shdr_offset_num_batch_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_gain_cfg + (0x60 * (vc_slot-1))); + val = ((stream->shdr_line_offset0 * vc_slot) + << tpg_reg->shdr_line_offset0_shift); + val |= ((stream->shdr_line_offset1 * vc_slot) + << tpg_reg->shdr_line_offset1_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_shdr_cfg + (0x60 * (vc_slot-1))); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x shdr", + hw->hw_idx, + vc_slot, val); + } + + return 0; +} + +static int configure_vc( + struct tpg_hw *hw, + uint32_t vc_slot, + int num_dts, + struct tpg_stream_config_t *stream) +{ + uint32_t val = 0; + struct cam_hw_soc_info *soc_info = NULL; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + + + if (stream->output_format == TPG_IMAGE_FORMAT_QCFA) + val |= (1 << tpg_reg->qcfa_en_shift); + + if (stream->cb_mode == TPG_COLOR_BAR_MODE_SPLIT) + val |= (1 << tpg_reg->split_en_shift); + + if (stream->xcfa_debug > 0) { + if (stream->xcfa_debug == 1) { + val |= (3 << tpg_reg->size_y_shift); + val |= (3 << tpg_reg->size_x_shift); + } else { + val |= ((stream->xcfa_debug * 2 - 1) << tpg_reg->size_y_shift); + val |= ((stream->xcfa_debug * 2 - 1) << tpg_reg->size_x_shift); + } + val |= (1 << tpg_reg->xcfa_en_shift); + configure_xcfa_array(hw, stream->xcfa_debug); + CAM_DBG(CAM_TPG, "xcfa_debug = %d", stream->xcfa_debug); + } + + CAM_DBG(CAM_TPG, "TPG[%d] period: %d", hw->hw_idx, stream->rotate_period); + val |= ((stream->rotate_period & 0x3F) << + tpg_reg->rotate_period_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_color_bars_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_color_bar_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->hbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_hbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_hbi_cfg=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->vbi; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_vbi_cfg + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + val = stream->skip_pattern; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_throttle + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_vbi_cgf=0x%x", + hw->hw_idx, + vc_slot, val); + + cam_io_w_mb(0x12345678, + soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_lfsr_seed + (0x60 * vc_slot)); + + val = ((stream->frame_count << tpg_reg->num_frames_shift) | + ((num_dts-1) << tpg_reg->num_active_dt_shift) | + stream->vc); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc0_cfg0 + (0x60 * vc_slot)); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x", + hw->hw_idx, + vc_slot, val); + if (hw->hw_info->shdr_overlap == 1) { + cam_io_w_mb(hw->hw_info->shdr_overlap << tpg_reg->overlap_shdr_en_shift, + soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + } + if (hw->hw_info->shdr_offset_num_batch >= 0 && vc_slot > 0) { + val = (VC1_GAIN << tpg_reg->gain_shift); + val |= (hw->hw_info->shdr_offset_num_batch << + tpg_reg->shdr_offset_num_batch_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_gain_cfg + (0x60 * (vc_slot-1))); + val = ((stream->shdr_line_offset0 * vc_slot) + << tpg_reg->shdr_line_offset0_shift); + val |= ((stream->shdr_line_offset1 * vc_slot) + << tpg_reg->shdr_line_offset1_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_vc1_shdr_cfg + (0x60 * (vc_slot-1))); + CAM_DBG(CAM_TPG, "TPG[%d] vc%d_cfg0=0x%x shdr", + hw->hw_idx, + vc_slot, val); + } + + + return 0; +} + +static int tpg_hw_v_1_4_reset( + struct tpg_hw *hw, void *data) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + tpg_reg = hw->hw_info->hw_data; + + soc_info = hw->soc_info; + /* Clear out tpg_ctrl and irqs before reset */ + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl); + + cam_io_w_mb(0, soc_info->reg_map[0].mem_base + + tpg_reg->top_irq_mask); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->top_irq_clear); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->irq_cmd); + + cam_io_w_mb(1, soc_info->reg_map[0].mem_base + + tpg_reg->tpg_ctrl_cmd); + + /* Read the version */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->hw_version); + CAM_INFO(CAM_TPG, "TPG[%d] TPG HW version: 0x%x started", + hw->hw_idx, val); + + return 0; +} + +int tpg_hw_v_1_4_process_cmd( + struct tpg_hw *hw, + uint32_t cmd, + void *arg) +{ + int rc = 0; + + if (hw == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + switch (cmd) { + case TPG_CONFIG_VC: + { + if (hw->stream_version == 1) { + struct vc_config_args *vc_config = + (struct vc_config_args *)arg; + + if (vc_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_vc(hw, + vc_config->vc_slot, + vc_config->num_dts, + vc_config->stream); + } else if (hw->stream_version == 3) { + struct vc_config_args_v3 *vc_config_v3 = + (struct vc_config_args_v3 *)arg; + + if (vc_config_v3 == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_vc_v3(hw, + vc_config_v3->vc_slot, + vc_config_v3->num_dts, + vc_config_v3->stream); + } + } + break; + case TPG_CONFIG_DT: + { + if (hw->stream_version == 1) { + struct dt_config_args *dt_config = + (struct dt_config_args *)arg; + + if (dt_config == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_dt(hw, + dt_config->vc_slot, + dt_config->dt_slot, + dt_config->stream); + } else if (hw->stream_version == 3) { + struct dt_config_args_v3 *dt_config_v3 = + (struct dt_config_args_v3 *)arg; + + if (dt_config_v3 == NULL) { + CAM_ERR(CAM_TPG, "invalid argument"); + return -EINVAL; + } + rc = configure_dt_v3(hw, + dt_config_v3->vc_slot, + dt_config_v3->dt_slot, + dt_config_v3->stream); + } + } + break; + case TPG_CONFIG_CTRL: + { + struct global_config_args *global_args = + (struct global_config_args *)arg; + rc = configure_global_configs(hw, + global_args->num_vcs, + global_args->globalconfig); + } + break; + default: + CAM_ERR(CAM_TPG, "invalid argument"); + break; + } + return rc; +} + +int tpg_hw_v_1_4_start(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.4 HWL start"); + return 0; +} + +int tpg_hw_v_1_4_stop(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.4 HWL stop"); + tpg_hw_v_1_4_reset(hw, data); + return 0; +} + +irqreturn_t tpg_hw_v_1_4_handle_irq(struct tpg_hw *hw) +{ + struct cam_hw_soc_info *soc_info = NULL; + uint32_t val; + unsigned long flags; + struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL; + bool rup_done = false; + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data || !hw->soc_info) { + CAM_ERR(CAM_TPG, "Invalid Params"); + return IRQ_NONE; + } + + tpg_reg = hw->hw_info->hw_data; + soc_info = hw->soc_info; + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + tpg_reg->top_irq_status); + + if (val & ((1 << tpg_reg->rup_done_status_shift) | + (1 << tpg_reg->tpg_done_status_shift))) { + CAM_DBG(CAM_TPG, "Got TPG Interrupt val = 0x%x", val); + + if (val & (1 << tpg_reg->rup_done_status_shift)) { + rup_done = true; + spin_lock_irqsave(&hw->hw_state_lock, flags); + hw->state = TPG_HW_STATE_READY; + spin_unlock_irqrestore(&hw->hw_state_lock, flags); + } + + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->top_irq_clear); + val = (1 << tpg_reg->clear_shift); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->irq_cmd); + + if (rup_done) + complete(&hw->complete_rup); + } else { + CAM_ERR(CAM_TPG, "Not a valid event 0x%x", val); + } + + return IRQ_HANDLED; +} +int tpg_hw_v_1_4_dump_status(struct tpg_hw *hw, void *data) +{ + + if (!hw || !hw->hw_info || !hw->hw_info->hw_data) { + CAM_ERR(CAM_TPG, "invalid params"); + return -EINVAL; + } + + CAM_DBG(CAM_TPG, "TPG V1.4 HWL status dump"); + + return 0; +} + +int tpg_hw_v_1_4_init(struct tpg_hw *hw, void *data) +{ + CAM_DBG(CAM_TPG, "TPG V1.4 HWL init"); + tpg_hw_v_1_4_reset(hw, data); + return 0; +} + + + +static int tpg_1_4_get_xcfa_test(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get xcfa test %d", hw->hw_info->xcfa_debug); + *val = hw->hw_info->xcfa_debug; + return 0; +} +static int tpg_1_4_get_shdr_overlap_test(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr test : %d", hw->hw_info->shdr_overlap); + *val = hw->hw_info->shdr_overlap; + return 0; +} +static int tpg_1_4_get_shdr_offset_num_batch(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_num_batch : %d", hw->hw_info->shdr_offset_num_batch); + *val = hw->hw_info->shdr_offset_num_batch; + return 0; +} +static int tpg_1_4_get_shdr_line_offset0(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_offset0 : %d", hw->hw_info->shdr_line_offset0); + *val = hw->hw_info->shdr_line_offset0; + return 0; +} +static int tpg_1_4_get_shdr_line_offset1(void *data, u64 *val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "get shdr_offset1 : %d", hw->hw_info->shdr_line_offset1); + *val = hw->hw_info->shdr_line_offset1; + return 0; +} + + +static int tpg_1_4_set_xcfa_test(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set xcfa test prev : %d", hw->hw_info->xcfa_debug); + hw->hw_info->xcfa_debug = val; + return 0; +} +static int tpg_1_4_set_shdr_overlap_test(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr test prev : %d", hw->hw_info->shdr_overlap); + hw->hw_info->shdr_overlap = val; + return 0; +} +static int tpg_1_4_set_shdr_offset_num_batch(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_num_batch : %d", hw->hw_info->shdr_offset_num_batch); + hw->hw_info->shdr_offset_num_batch = val; + return 0; +} +static int tpg_1_4_set_shdr_line_offset0(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_offset0 : %d", hw->hw_info->shdr_line_offset0); + hw->hw_info->shdr_line_offset0 = val; + return 0; +} +static int tpg_1_4_set_shdr_line_offset1(void *data, u64 val) +{ + struct tpg_hw *hw = (struct tpg_hw *)data; + + CAM_INFO(CAM_TPG, "set shdr_offset1 : %d", hw->hw_info->shdr_line_offset1); + hw->hw_info->shdr_line_offset1 = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_4_xcfa_test, + &tpg_1_4_get_xcfa_test, + &tpg_1_4_set_xcfa_test, + "%16lld"); + +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_4_shdr_overlap_test, + &tpg_1_4_get_shdr_overlap_test, + &tpg_1_4_set_shdr_overlap_test, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_4_shdr_offset_num_batch, + &tpg_1_4_get_shdr_offset_num_batch, + &tpg_1_4_set_shdr_offset_num_batch, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_4_shdr_line_offset0, + &tpg_1_4_get_shdr_line_offset0, + &tpg_1_4_set_shdr_line_offset0, + "%16lld"); +DEFINE_SIMPLE_ATTRIBUTE(tpg_1_4_shdr_line_offset1, + &tpg_1_4_get_shdr_line_offset1, + &tpg_1_4_set_shdr_line_offset1, + "%16lld"); + + +int tpg_1_4_layer_init(struct tpg_hw *hw) +{ + struct dentry *dbgfileptr_parent = NULL; + char dir_name[160]; + + snprintf(dir_name, sizeof(dir_name), "tpg%d", + hw->hw_idx); + + dbgfileptr_parent = debugfs_create_dir(dir_name, NULL); + if (!dbgfileptr_parent) { + CAM_ERR(CAM_TPG, "Debug fs could not create directory"); + return -ENOENT; + } + + debugfs_create_file("tpg_xcfa_test", 0644, + dbgfileptr_parent, hw, &tpg_1_4_xcfa_test); + debugfs_create_file("tpg_shdr_overlap_test", 0644, + dbgfileptr_parent, hw, &tpg_1_4_shdr_overlap_test); + debugfs_create_file("tpg_shdr_offset_num_batch", 0644, + dbgfileptr_parent, hw, &tpg_1_4_shdr_offset_num_batch); + debugfs_create_file("tpg_shdr_line_offset0", 0644, + dbgfileptr_parent, hw, &tpg_1_4_shdr_line_offset0); + debugfs_create_file("tpg_shdr_line_offset1", 0644, + dbgfileptr_parent, hw, &tpg_1_4_shdr_line_offset1); + CAM_INFO(CAM_TPG, "Layer init called"); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.h new file mode 100644 index 0000000000..e55539c7af --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_4_H__ +#define __TPG_HW_V_1_4_H__ + +#include "../tpg_hw.h" + +struct cam_tpg_ver_1_4_reg_offset { + /* Register offsets below */ + int hw_version; + int hw_status; + int dmi_cfg; + int dmi_lut_cfg; + int dmi_data; + int dmi_data_1; + int dmi_data_2; + int dmi_data_3; + int dmi_data_4; + int dmi_data_5; + int dmi_data_6; + int dmi_data_7; + int dmi_data_8; + int dmi_data_9; + int dmi_data_10; + int dmi_data_11; + int dmi_data_12; + int dmi_data_13; + int dmi_data_14; + int dmi_data_15; + int dmi_cmd; + int dmi_status; + int dmi_lut_bank_cfg; + int module_lut_bank_cfg; + int tpg_vc0_gain_cfg; + int tpg_ctrl; + int tpg_vc0_cfg0; + int tpg_vc0_lfsr_seed; + int tpg_vc0_hbi_cfg; + int tpg_vc0_vbi_cfg; + int tpg_vc0_color_bars_cfg; + int tpg_vc0_dt_0_cfg_0; + int tpg_vc0_dt_0_cfg_1; + int tpg_vc0_dt_0_cfg_2; + int tpg_vc0_dt_1_cfg_0; + int tpg_vc0_dt_1_cfg_1; + int tpg_vc0_dt_1_cfg_2; + int tpg_vc0_dt_2_cfg_0; + int tpg_vc0_dt_2_cfg_1; + int tpg_vc0_dt_2_cfg_2; + int tpg_vc0_dt_3_cfg_0; + int tpg_vc0_dt_3_cfg_1; + int tpg_vc0_dt_3_cfg_2; + int tpg_vc0_throttle; + int tpg_vc0_color_bar_cfa_color0; + int tpg_vc0_color_bar_cfa_color1; + int tpg_vc0_color_bar_cfa_color2; + int tpg_vc0_color_bar_cfa_color3; + int tpg_vc1_gain_cfg; + int tpg_vc1_shdr_cfg; + int tpg_vc1_cfg0; + int tpg_vc1_lfsr_seed; + int tpg_vc1_hbi_cfg; + int tpg_vc1_vbi_cfg; + int tpg_vc1_color_bars_cfg; + int tpg_vc1_dt_0_cfg_0; + int tpg_vc1_dt_0_cfg_1; + int tpg_vc1_dt_0_cfg_2; + int tpg_vc1_dt_1_cfg_0; + int tpg_vc1_dt_1_cfg_1; + int tpg_vc1_dt_1_cfg_2; + int tpg_vc1_dt_2_cfg_0; + int tpg_vc1_dt_2_cfg_1; + int tpg_vc1_dt_2_cfg_2; + int tpg_vc1_dt_3_cfg_0; + int tpg_vc1_dt_3_cfg_1; + int tpg_vc1_dt_3_cfg_2; + int tpg_vc1_throttle; + int tpg_vc1_color_bar_cfa_color0; + int tpg_vc1_color_bar_cfa_color1; + int tpg_vc1_color_bar_cfa_color2; + int tpg_vc1_color_bar_cfa_color3; + int tpg_vc2_gain_cfg; + int tpg_vc2_shdr_cfg; + int tpg_vc2_cfg0; + int tpg_vc2_lfsr_seed; + int tpg_vc2_hbi_cfg; + int tpg_vc2_vbi_cfg; + int tpg_vc2_color_bars_cfg; + int tpg_vc2_dt_0_cfg_0; + int tpg_vc2_dt_0_cfg_1; + int tpg_vc2_dt_0_cfg_2; + int tpg_vc2_dt_1_cfg_0; + int tpg_vc2_dt_1_cfg_1; + int tpg_vc2_dt_1_cfg_2; + int tpg_vc2_dt_2_cfg_0; + int tpg_vc2_dt_2_cfg_1; + int tpg_vc2_dt_2_cfg_2; + int tpg_vc2_dt_3_cfg_0; + int tpg_vc2_dt_3_cfg_1; + int tpg_vc2_dt_3_cfg_2; + int tpg_vc2_throttle; + int tpg_vc2_color_bar_cfa_color0; + int tpg_vc2_color_bar_cfa_color1; + int tpg_vc2_color_bar_cfa_color2; + int tpg_vc2_color_bar_cfa_color3; + int tpg_vc3_gain_cfg; + int tpg_vc3_shdr_cfg; + int tpg_vc3_cfg0; + int tpg_vc3_lfsr_seed; + int tpg_vc3_hbi_cfg; + int tpg_vc3_vbi_cfg; + int tpg_vc3_color_bars_cfg; + int tpg_vc3_dt_0_cfg_0; + int tpg_vc3_dt_0_cfg_1; + int tpg_vc3_dt_0_cfg_2; + int tpg_vc3_dt_1_cfg_0; + int tpg_vc3_dt_1_cfg_1; + int tpg_vc3_dt_1_cfg_2; + int tpg_vc3_dt_2_cfg_0; + int tpg_vc3_dt_2_cfg_1; + int tpg_vc3_dt_2_cfg_2; + int tpg_vc3_dt_3_cfg_0; + int tpg_vc3_dt_3_cfg_1; + int tpg_vc3_dt_3_cfg_2; + int tpg_vc3_throttle; + int tpg_vc3_color_bar_cfa_color0; + int tpg_vc3_color_bar_cfa_color1; + int tpg_vc3_color_bar_cfa_color2; + int tpg_vc3_color_bar_cfa_color3; + int top_irq_status; + int top_irq_mask; + int top_irq_clear; + int top_irq_set; + int irq_cmd; + int tpg_ctrl_cmd; + int test_bus_ctrl; + int spare; + /* Register fields below */ + + int gen_shift; + int rev_shift; + int step_shift; + int violation_shift; + int auto_load_pattern_shift; + int auto_load_en_shift; + int addr_shift; + int lut_sel_shift; + int data_shift; + int auto_load_status_clr_shift; + int auto_load_cmd_shift; + int auto_load_done_shift; + int bank_sel_shift; + int gain_shift; + int num_active_vc_shift; + int overlap_shdr_en_shift; + int vc_dt_pattern_id_shift; + int num_active_lanes_shift; + int phy_sel_shift; + int num_frames_shift; + int num_batch_shift; + int num_active_dt_shift; + int fe_dis_shift; + int fs_dis_shift; + int vc_num_shift; + int seed_shift; + int hbi_clk_cnt_shift; + int vbi_line_cnt_shift; + int size_y_shift; + int size_x_shift; + int xcfa_en_shift; + int rotate_period_shift; + int pix_intl_hdr_mode_shift; + int noise_en_shift; + int split_en_shift; + int qcfa_en_shift; + int pix_pattern_shift; + int frame_width_shift; + int frame_height_shift; + int crc_xor_mask_shift; + int ecc_xor_mask_shift; + int nfi_ssm_mode_en_shift; + int data_type_shift; + int encode_format_shift; + int user_specified_payload_shift; + int payload_mode_shift; + int pattern_shift; + int array15_shift; + int array14_shift; + int array13_shift; + int array12_shift; + int array11_shift; + int array10_shift; + int array9_shift; + int array8_shift; + int array7_shift; + int array6_shift; + int array5_shift; + int array4_shift; + int array3_shift; + int array2_shift; + int array1_shift; + int array0_shift; + int array31_shift; + int array30_shift; + int array29_shift; + int array28_shift; + int array27_shift; + int array26_shift; + int array25_shift; + int array24_shift; + int array23_shift; + int array22_shift; + int array21_shift; + int array20_shift; + int array19_shift; + int array18_shift; + int array17_shift; + int array16_shift; + int array47_shift; + int array46_shift; + int array45_shift; + int array44_shift; + int array43_shift; + int array42_shift; + int array41_shift; + int array40_shift; + int array39_shift; + int array38_shift; + int array37_shift; + int array36_shift; + int array35_shift; + int array34_shift; + int array33_shift; + int array32_shift; + int array63_shift; + int array62_shift; + int array61_shift; + int array60_shift; + int array59_shift; + int array58_shift; + int array57_shift; + int array56_shift; + int array55_shift; + int array54_shift; + int array53_shift; + int array52_shift; + int array51_shift; + int array50_shift; + int array49_shift; + int array48_shift; + int shdr_offset_num_batch_shift; + int shdr_line_offset1_shift; + int shdr_line_offset0_shift; + int tpg_done_status_shift; + int rup_done_status_shift; + int status_vec_shift; + int rup_done_mask_vec_shift; + int tpg_done_mask_vec_shift; + int rup_done_clear_vec_shift; + int tpg_done_clear_vec_shift; + int set_vec_shift; + int set_shift; + int clear_shift; + int test_en_cmd_shift; + int hw_reset_shift; + int test_bus_en_shift; + int test_bus_sel_shift; + int spare_shift; + int async_mode_min_hbi; + int async_mode_min_hbi_shift; + /* Custome Variables below */ +}; + +/** + * @brief initialize the tpg hw instance + * + * @param hw : tpg hw instance + * @param data : argument for initialize + * + * @return : 0 on success + */ +int tpg_hw_v_1_4_init(struct tpg_hw *hw, void *data); + +/** + * @brief start tpg hw + * + * @param hw : tpg hw instance + * @param data : tpg hw instance data + * + * @return : 0 on success + */ +int tpg_hw_v_1_4_start(struct tpg_hw *hw, void *data); + +/** + * @brief stop tpg hw + * + * @param hw : tpg hw instance + * @param data : argument for tpg hw stop + * + * @return : 0 on success + */ +int tpg_hw_v_1_4_stop(struct tpg_hw *hw, void *data); + +/** + * @brief process a command send from hw layer + * + * @param hw : tpg hw instance + * @param cmd : command to process + * @param arg : argument corresponding to command + * + * @return : 0 on success + */ +int tpg_hw_v_1_4_process_cmd(struct tpg_hw *hw, + uint32_t cmd, void *arg); + +/** + * @brief dump hw status registers + * + * @param hw : tpg hw instance + * @param data : argument for status dump + * + * @return : 0 on success + */ +int tpg_hw_v_1_4_dump_status(struct tpg_hw *hw, void *data); + +/** + * @brief hw layer initialization + * + * @param hw : tpg hw instance + * + * @return : 0 on success + */ +int tpg_1_4_layer_init(struct tpg_hw *hw); + +/** + * @brief handle tpg irq + * + * @param hw : tpg hw instance + * + * @return : IRQ_HANDLED on success + */ +irqreturn_t tpg_hw_v_1_4_handle_irq(struct tpg_hw *hw); + +#endif /* __TPG_HW_V_1_4_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_0.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_0.h new file mode 100644 index 0000000000..07f8766ca8 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_0.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_4_0_H__ +#define __TPG_HW_V_1_4_0_H__ + + +struct cam_tpg_ver_1_4_reg_offset cam_tpg104_reg = { + /* Register offsets below */ + .hw_version = 0x0, + .hw_status = 0x4, + .dmi_cfg = 0x8, + .dmi_lut_cfg = 0xc, + .dmi_data = 0x10, + .dmi_data_1 = 0x14, + .dmi_data_2 = 0x18, + .dmi_data_3 = 0x1c, + .dmi_data_4 = 0x20, + .dmi_data_5 = 0x24, + .dmi_data_6 = 0x28, + .dmi_data_7 = 0x2c, + .dmi_data_8 = 0x30, + .dmi_data_9 = 0x34, + .dmi_data_10 = 0x38, + .dmi_data_11 = 0x3c, + .dmi_data_12 = 0x40, + .dmi_data_13 = 0x44, + .dmi_data_14 = 0x48, + .dmi_data_15 = 0x4c, + .dmi_cmd = 0x50, + .dmi_status = 0x54, + .dmi_lut_bank_cfg = 0x58, + .module_lut_bank_cfg = 0x5c, + .tpg_vc0_gain_cfg = 0x60, + .tpg_ctrl = 0x64, + .tpg_vc0_cfg0 = 0x68, + .tpg_vc0_lfsr_seed = 0x6c, + .tpg_vc0_hbi_cfg = 0x70, + .tpg_vc0_vbi_cfg = 0x74, + .tpg_vc0_color_bars_cfg = 0x78, + .tpg_vc0_dt_0_cfg_0 = 0x7c, + .tpg_vc0_dt_0_cfg_1 = 0x80, + .tpg_vc0_dt_0_cfg_2 = 0x84, + .tpg_vc0_dt_1_cfg_0 = 0x88, + .tpg_vc0_dt_1_cfg_1 = 0x8c, + .tpg_vc0_dt_1_cfg_2 = 0x90, + .tpg_vc0_dt_2_cfg_0 = 0x94, + .tpg_vc0_dt_2_cfg_1 = 0x98, + .tpg_vc0_dt_2_cfg_2 = 0x9c, + .tpg_vc0_dt_3_cfg_0 = 0xa0, + .tpg_vc0_dt_3_cfg_1 = 0xa4, + .tpg_vc0_dt_3_cfg_2 = 0xa8, + .tpg_vc0_throttle = 0xac, + .tpg_vc0_color_bar_cfa_color0 = 0xb0, + .tpg_vc0_color_bar_cfa_color1 = 0xb4, + .tpg_vc0_color_bar_cfa_color2 = 0xb8, + .tpg_vc0_color_bar_cfa_color3 = 0xbc, + .tpg_vc1_gain_cfg = 0xc0, + .tpg_vc1_shdr_cfg = 0xc4, + .tpg_vc1_cfg0 = 0xc8, + .tpg_vc1_lfsr_seed = 0xcc, + .tpg_vc1_hbi_cfg = 0xd0, + .tpg_vc1_vbi_cfg = 0xd4, + .tpg_vc1_color_bars_cfg = 0xd8, + .tpg_vc1_dt_0_cfg_0 = 0xdc, + .tpg_vc1_dt_0_cfg_1 = 0xe0, + .tpg_vc1_dt_0_cfg_2 = 0xe4, + .tpg_vc1_dt_1_cfg_0 = 0xe8, + .tpg_vc1_dt_1_cfg_1 = 0xec, + .tpg_vc1_dt_1_cfg_2 = 0xf0, + .tpg_vc1_dt_2_cfg_0 = 0xf4, + .tpg_vc1_dt_2_cfg_1 = 0xf8, + .tpg_vc1_dt_2_cfg_2 = 0xfc, + .tpg_vc1_dt_3_cfg_0 = 0x100, + .tpg_vc1_dt_3_cfg_1 = 0x104, + .tpg_vc1_dt_3_cfg_2 = 0x108, + .tpg_vc1_throttle = 0x10c, + .tpg_vc1_color_bar_cfa_color0 = 0x110, + .tpg_vc1_color_bar_cfa_color1 = 0x114, + .tpg_vc1_color_bar_cfa_color2 = 0x118, + .tpg_vc1_color_bar_cfa_color3 = 0x11c, + .tpg_vc2_gain_cfg = 0x120, + .tpg_vc2_shdr_cfg = 0x124, + .tpg_vc2_cfg0 = 0x128, + .tpg_vc2_lfsr_seed = 0x12c, + .tpg_vc2_hbi_cfg = 0x130, + .tpg_vc2_vbi_cfg = 0x134, + .tpg_vc2_color_bars_cfg = 0x138, + .tpg_vc2_dt_0_cfg_0 = 0x13c, + .tpg_vc2_dt_0_cfg_1 = 0x140, + .tpg_vc2_dt_0_cfg_2 = 0x144, + .tpg_vc2_dt_1_cfg_0 = 0x148, + .tpg_vc2_dt_1_cfg_1 = 0x14c, + .tpg_vc2_dt_1_cfg_2 = 0x150, + .tpg_vc2_dt_2_cfg_0 = 0x154, + .tpg_vc2_dt_2_cfg_1 = 0x158, + .tpg_vc2_dt_2_cfg_2 = 0x15c, + .tpg_vc2_dt_3_cfg_0 = 0x160, + .tpg_vc2_dt_3_cfg_1 = 0x164, + .tpg_vc2_dt_3_cfg_2 = 0x168, + .tpg_vc2_throttle = 0x16c, + .tpg_vc2_color_bar_cfa_color0 = 0x170, + .tpg_vc2_color_bar_cfa_color1 = 0x174, + .tpg_vc2_color_bar_cfa_color2 = 0x178, + .tpg_vc2_color_bar_cfa_color3 = 0x17c, + .tpg_vc3_gain_cfg = 0x180, + .tpg_vc3_shdr_cfg = 0x184, + .tpg_vc3_cfg0 = 0x188, + .tpg_vc3_lfsr_seed = 0x18c, + .tpg_vc3_hbi_cfg = 0x190, + .tpg_vc3_vbi_cfg = 0x194, + .tpg_vc3_color_bars_cfg = 0x198, + .tpg_vc3_dt_0_cfg_0 = 0x19c, + .tpg_vc3_dt_0_cfg_1 = 0x1a0, + .tpg_vc3_dt_0_cfg_2 = 0x1a4, + .tpg_vc3_dt_1_cfg_0 = 0x1a8, + .tpg_vc3_dt_1_cfg_1 = 0x1ac, + .tpg_vc3_dt_1_cfg_2 = 0x1b0, + .tpg_vc3_dt_2_cfg_0 = 0x1b4, + .tpg_vc3_dt_2_cfg_1 = 0x1b8, + .tpg_vc3_dt_2_cfg_2 = 0x1bc, + .tpg_vc3_dt_3_cfg_0 = 0x1c0, + .tpg_vc3_dt_3_cfg_1 = 0x1c4, + .tpg_vc3_dt_3_cfg_2 = 0x1c8, + .tpg_vc3_throttle = 0x1cc, + .tpg_vc3_color_bar_cfa_color0 = 0x1d0, + .tpg_vc3_color_bar_cfa_color1 = 0x1d4, + .tpg_vc3_color_bar_cfa_color2 = 0x1d8, + .tpg_vc3_color_bar_cfa_color3 = 0x1dc, + .top_irq_status = 0x1e0, + .top_irq_mask = 0x1e4, + .top_irq_clear = 0x1e8, + .top_irq_set = 0x1ec, + .irq_cmd = 0x1f0, + .tpg_ctrl_cmd = 0x1f4, + .test_bus_ctrl = 0x1f8, + .spare = 0x1fc, + + /* Register fields below */ + .gen_shift = 0x1c, + .rev_shift = 0x10, + .step_shift = 0x0, + .violation_shift = 0x0, + .auto_load_pattern_shift = 0x15, + .auto_load_en_shift = 0x14, + .addr_shift = 0x0, + .lut_sel_shift = 0x0, + .data_shift = 0x0, + .auto_load_status_clr_shift = 0x1, + .auto_load_cmd_shift = 0x0, + .auto_load_done_shift = 0x0, + .bank_sel_shift = 0x0, + .gain_shift = 0x0, + .num_active_vc_shift = 0x1e, + .overlap_shdr_en_shift = 0xa, + .vc_dt_pattern_id_shift = 0x6, + .num_active_lanes_shift = 0x4, + .phy_sel_shift = 0x3, + .num_frames_shift = 0x10, + .num_batch_shift = 0xc, + .num_active_dt_shift = 0x8, + .fe_dis_shift = 0x7, + .fs_dis_shift = 0x6, + .vc_num_shift = 0x0, + .seed_shift = 0x0, + .hbi_clk_cnt_shift = 0x0, + .vbi_line_cnt_shift = 0x0, + .size_y_shift = 0x1c, + .size_x_shift = 0x18, + .xcfa_en_shift = 0x10, + .rotate_period_shift = 0x8, + .pix_intl_hdr_mode_shift = 0x6, + .noise_en_shift = 0x5, + .split_en_shift = 0x4, + .qcfa_en_shift = 0x3, + .pix_pattern_shift = 0x0, + .frame_width_shift = 0x10, + .frame_height_shift = 0x0, + .crc_xor_mask_shift = 0x10, + .ecc_xor_mask_shift = 0x8, + .nfi_ssm_mode_en_shift = 0x7, + .data_type_shift = 0x0, + .encode_format_shift = 0x1c, + .user_specified_payload_shift = 0x4, + .payload_mode_shift = 0x0, + .pattern_shift = 0x0, + .array15_shift = 0x1e, + .array14_shift = 0x1c, + .array13_shift = 0x1a, + .array12_shift = 0x18, + .array11_shift = 0x16, + .array10_shift = 0x14, + .array9_shift = 0x12, + .array8_shift = 0x10, + .array7_shift = 0xe, + .array6_shift = 0xc, + .array5_shift = 0xa, + .array4_shift = 0x8, + .array3_shift = 0x6, + .array2_shift = 0x4, + .array1_shift = 0x2, + .array0_shift = 0x0, + .array31_shift = 0x1e, + .array30_shift = 0x1c, + .array29_shift = 0x1a, + .array28_shift = 0x18, + .array27_shift = 0x16, + .array26_shift = 0x14, + .array25_shift = 0x12, + .array24_shift = 0x10, + .array23_shift = 0xe, + .array22_shift = 0xc, + .array21_shift = 0xa, + .array20_shift = 0x8, + .array19_shift = 0x6, + .array18_shift = 0x4, + .array17_shift = 0x2, + .array16_shift = 0x0, + .array47_shift = 0x1e, + .array46_shift = 0x1c, + .array45_shift = 0x1a, + .array44_shift = 0x18, + .array43_shift = 0x16, + .array42_shift = 0x14, + .array41_shift = 0x12, + .array40_shift = 0x10, + .array39_shift = 0xe, + .array38_shift = 0xc, + .array37_shift = 0xa, + .array36_shift = 0x8, + .array35_shift = 0x6, + .array34_shift = 0x4, + .array33_shift = 0x2, + .array32_shift = 0x0, + .array63_shift = 0x1e, + .array62_shift = 0x1c, + .array61_shift = 0x1a, + .array60_shift = 0x18, + .array59_shift = 0x16, + .array58_shift = 0x14, + .array57_shift = 0x12, + .array56_shift = 0x10, + .array55_shift = 0xe, + .array54_shift = 0xc, + .array53_shift = 0xa, + .array52_shift = 0x8, + .array51_shift = 0x6, + .array50_shift = 0x4, + .array49_shift = 0x2, + .array48_shift = 0x0, + .shdr_offset_num_batch_shift = 0x10, + .shdr_line_offset1_shift = 0x10, + .shdr_line_offset0_shift = 0x0, + .tpg_done_status_shift = 0x0, + .rup_done_status_shift = 0x1, + .status_vec_shift = 0x0, + .rup_done_mask_vec_shift = 0x1, + .tpg_done_mask_vec_shift = 0x0, + .rup_done_clear_vec_shift = 0x1, + .tpg_done_clear_vec_shift = 0x0, + .set_vec_shift = 0x0, + .set_shift = 0x4, + .clear_shift = 0x0, + .test_en_cmd_shift = 0x4, + .hw_reset_shift = 0x0, + .test_bus_en_shift = 0x0, + .test_bus_sel_shift = 0x4, + .spare_shift = 0x0, + .async_mode_min_hbi = 0x0A, + .async_mode_min_hbi_shift = 0x10, + /* Custome Variables below */ +}; + +#endif /* __TPG_HW_V_1_4_0_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_data.h b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_data.h new file mode 100644 index 0000000000..942dcde865 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sensor_module/cam_tpg/tpg_hw/tpg_hw_v_1_4/tpg_hw_v_1_4_data.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __TPG_HW_V_1_4_DATA_H__ +#define __TPG_HW_V_1_4_DATA_H__ + +#include "../tpg_hw.h" +#include "../tpg_hw_common.h" +#include "tpg_hw_v_1_4.h" +#include "tpg_hw_v_1_4_0.h" + +struct tpg_hw_ops tpg_hw_v_1_4_ops = { + .start = tpg_hw_v_1_4_start, + .stop = tpg_hw_v_1_4_stop, + .init = tpg_hw_v_1_4_init, + .process_cmd = tpg_hw_v_1_4_process_cmd, + .dump_status = tpg_hw_v_1_4_dump_status, + .write_settings = tpg_hw_write_settings, + .handle_irq = tpg_hw_v_1_4_handle_irq, +}; + +struct tpg_hw_info tpg_v_1_4_hw_info = { + .version = TPG_HW_VERSION_1_4, + .max_vc_channels = 4, + .max_dt_channels_per_vc = 4, + .ops = &tpg_hw_v_1_4_ops, + .hw_data = &cam_tpg104_reg, + .layer_init = &tpg_1_4_layer_init, + .xcfa_debug = 0, + .shdr_overlap = 0, + .shdr_offset_num_batch = 0, + .shdr_line_offset0 = 0x6c, + .shdr_line_offset1 = 0x6c, +}; + +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.c b/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.c new file mode 100644 index 0000000000..0517853254 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.c @@ -0,0 +1,5850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "cam_compat.h" +#include "cam_smmu_api.h" +#include "cam_debug_util.h" +#include "camera_main.h" +#include "cam_trace.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +#define SHARED_MEM_POOL_GRANULARITY 16 + +#define IOMMU_INVALID_DIR -1 +#define HANDLE_INIT (-1) +#define CAM_SMMU_CB_MAX 6 +#define CAM_SMMU_SHARED_HDL_MAX 6 +#define CAM_SMMU_MULTI_REGION_MAX 2 + +#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK)) +#define GET_SMMU_MULTI_CLIENT_IDX(x) (((x) >> MULTI_CLIENT_REGION_SHIFT)) +#define CAM_SMMU_HDL_VALIDATE(x, y) ((x) != ((y) & CAM_SMMU_HDL_MASK)) + +#define CAM_SMMU_MONITOR_MAX_ENTRIES 100 +#define CAM_SMMU_BUF_TRACKING_POOL 600 +#define CAM_SMMU_INC_MONITOR_HEAD(head, ret) \ + div_u64_rem(atomic64_add_return(1, head),\ + CAM_SMMU_MONITOR_MAX_ENTRIES, (ret)) + +static int g_num_pf_handled = 1; +module_param(g_num_pf_handled, int, 0644); + +struct cam_fw_alloc_info *icp_fw; +struct cam_smmu_buffer_tracker *buf_tracking_pool; + +struct cam_smmu_work_payload { + int idx; + struct iommu_domain *domain; + struct device *dev; + unsigned long iova; + int flags; + void *token; + struct list_head list; +}; + +enum cam_io_coherency_mode { + CAM_SMMU_NO_COHERENCY, + CAM_SMMU_DMA_COHERENT, + CAM_SMMU_DMA_COHERENT_HINT_CACHED, +}; + +enum cam_protection_type { + CAM_PROT_INVALID, + CAM_NON_SECURE, + CAM_SECURE, + CAM_PROT_MAX, +}; + +enum cam_iommu_type { + CAM_SMMU_INVALID, + CAM_QSMMU, + CAM_ARM_SMMU, + CAM_SMMU_MAX, +}; + +enum cam_smmu_buf_state { + CAM_SMMU_BUFF_EXIST, + CAM_SMMU_BUFF_NOT_EXIST, +}; + +enum cam_smmu_init_dir { + CAM_SMMU_TABLE_INIT, + CAM_SMMU_TABLE_DEINIT, +}; + +struct scratch_mapping { + void *bitmap; + size_t bits; + unsigned int order; + dma_addr_t base; +}; + +struct cam_smmu_monitor { + struct timespec64 timestamp; + bool is_map; + + /* map-unmap info */ + int ion_fd; + unsigned long i_ino; + dma_addr_t paddr; + size_t len; + enum cam_smmu_region_id region_id; +}; + +struct cam_smmu_debug { + struct dentry *dentry; + uint32_t fatal_pf_mask; + bool cb_dump_enable; + bool map_profile_enable; + bool disable_buf_tracking; +}; + +struct cam_smmu_subregion_info { + enum cam_smmu_subregion_id subregion_id; + struct cam_smmu_region_info subregion_info; + + int32_t mapped_refcnt; + unsigned long mapped_client_mask; + bool is_allocated; +}; + +struct cam_smmu_nested_region_info { + uint32_t num_subregions; + struct cam_smmu_subregion_info subregions[ + CAM_SMMU_SUBREGION_MAX]; + struct cam_smmu_region_info region_info; + + int32_t mapped_refcnt; + unsigned long mapped_client_mask; + bool is_allocated; + bool subregion_support; +}; + +/* + * This struct holds info on multiple regions of the + * same type, and each such region can have multiple + * unique subregions + * + * A0 --> a : b : c + * A1 --> a : c + * + * Here A0 & A1 are 2 umbrella regions of the same type, + * and a, b & c are the subregions within them + */ +struct cam_smmu_multi_region_info { + int32_t num_regions; + struct cam_smmu_nested_region_info nested_regions[CAM_SMMU_MULTI_REGION_MAX]; +}; + +struct cam_context_bank_info { + struct device *dev; + struct iommu_domain *domain; + dma_addr_t va_start; + size_t va_len; + const char *name[CAM_SMMU_SHARED_HDL_MAX]; + bool is_secure; + uint8_t scratch_buf_support; + uint8_t firmware_support; + uint8_t shared_support; + uint8_t io_support; + uint8_t secheap_support; + uint8_t fwuncached_region_support; + uint8_t qdss_support; + uint8_t device_region_support; + dma_addr_t qdss_phy_addr; + bool is_secheap_allocated; + bool is_qdss_allocated; + bool non_fatal_faults_en; + bool stall_disable_en; + + struct scratch_mapping scratch_map; + struct gen_pool *shared_mem_pool[CAM_SMMU_MULTI_REGION_MAX]; + + /* Regular singleton regions */ + struct cam_smmu_region_info scratch_info; + struct cam_smmu_region_info secheap_info; + + /* Regions capable of having multiple of them */ + struct cam_smmu_multi_region_info firmware_info; + struct cam_smmu_multi_region_info shared_info; + struct cam_smmu_multi_region_info io_info; + struct cam_smmu_multi_region_info fwuncached_region; + struct cam_smmu_multi_region_info device_region; + struct cam_smmu_multi_region_info qdss_info; + + struct list_head smmu_buf_list; + struct list_head smmu_buf_kernel_list; + struct mutex lock; + int handle; + enum cam_smmu_ops_param state; + + void (*handler[CAM_SMMU_CB_MAX]) (struct cam_smmu_pf_info *pf_info); + void *token[CAM_SMMU_CB_MAX]; + int cb_count; + int secure_count; + int pf_count; + size_t io_mapping_size; + size_t shared_mapping_size; + bool is_mul_client; + int device_count; + int num_shared_hdl; + int num_multi_regions; + const char *multi_region_clients[CAM_SMMU_MULTI_REGION_MAX]; + enum cam_io_coherency_mode coherency_mode; + + /* discard iova - non-zero values are valid */ + dma_addr_t discard_iova_start; + size_t discard_iova_len; + + atomic64_t monitor_head; + struct cam_smmu_monitor monitor_entries[CAM_SMMU_MONITOR_MAX_ENTRIES]; +}; + +struct cam_iommu_cb_set { + struct cam_context_bank_info *cb_info; + u32 cb_num; + u32 cb_init_count; + struct work_struct smmu_work; + struct mutex payload_list_lock; + struct list_head payload_list; + struct cam_smmu_debug debug_cfg; + struct list_head buf_tracker_free_list; + struct cam_csf_version csf_version; + spinlock_t s_lock; + bool force_cache_allocs; + bool need_shared_buffer_padding; + bool is_expanded_memory; + bool is_track_buf_disabled; +}; + +static const struct of_device_id msm_cam_smmu_dt_match[] = { + { .compatible = "qcom,msm-cam-smmu", }, + { .compatible = "qcom,msm-cam-smmu-cb", }, + { .compatible = "qcom,msm-cam-smmu-fw-dev", }, + {} +}; + +struct cam_dma_buff_info { + struct dma_buf *buf; + struct dma_buf_attachment *attach; + struct sg_table *table; + enum dma_data_direction dir; + enum cam_smmu_region_id region_id; + int iommu_dir; + int map_count; + struct kref ref_count; + dma_addr_t paddr; + struct list_head list; + int ion_fd; + unsigned long i_ino; + size_t len; + size_t phys_len; + bool is_internal; + struct timespec64 ts; + int multi_client_device_idx; +}; + +struct cam_sec_buff_info { + struct dma_buf *buf; + struct dma_buf_attachment *attach; + struct sg_table *table; + enum dma_data_direction dir; + int map_count; + struct kref ref_count; + dma_addr_t paddr; + struct list_head list; + int ion_fd; + unsigned long i_ino; + size_t len; +}; + +struct cam_smmu_mini_dump_cb_info { + struct cam_smmu_monitor mapping[CAM_SMMU_MONITOR_MAX_ENTRIES]; + struct cam_smmu_region_info scratch_info; + struct cam_smmu_region_info firmware_info; + struct cam_smmu_region_info shared_info; + struct cam_smmu_region_info io_info; + struct cam_smmu_region_info secheap_info; + struct cam_smmu_region_info fwuncached_region; + struct cam_smmu_region_info device_mem_region; + struct cam_smmu_region_info qdss_info; + struct region_buf_info secheap_buf; + struct region_buf_info fwuncached_reg_buf; + char name[CAM_SMMU_SHARED_HDL_MAX][16]; + size_t va_len; + size_t io_mapping_size; + size_t shared_mapping_size; + size_t discard_iova_len; + int handle; + int device_count; + int num_shared_hdl; + int cb_count; + int secure_count; + int pf_count; + dma_addr_t va_start; + dma_addr_t discard_iova_start; + dma_addr_t qdss_phy_addr; + enum cam_io_coherency_mode coherency_mode; + enum cam_smmu_ops_param state; + uint8_t scratch_buf_support; + uint8_t firmware_support; + uint8_t shared_support; + uint8_t io_support; + uint8_t secheap_support; + uint8_t fwuncached_region_support; + uint8_t qdss_support; + bool is_mul_client; + bool is_secure; + bool is_secheap_allocated; + bool is_fwuncached_buf_allocated; + bool is_qdss_allocated; +}; + +struct cam_smmu_mini_dump_info { + uint32_t cb_num; + struct cam_smmu_mini_dump_cb_info *cb; +}; + +static struct cam_iommu_cb_set iommu_cb_set; + +static enum dma_data_direction cam_smmu_translate_dir( + enum cam_smmu_map_dir dir); + +static bool cam_smmu_is_hdl_nonunique_or_null(int hdl); + +static int cam_smmu_create_iommu_handle(int idx); + +static int cam_smmu_create_add_handle_in_table(char *name, + int *hdl); + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx, + int ion_fd, struct dma_buf *dma_buf); + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx, + struct dma_buf *buf); + +static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx, + int ion_fd, struct dma_buf *dma_buf); + +static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map, + dma_addr_t base, size_t size, + int order); + +static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping, + size_t size, + dma_addr_t *iova); + +static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping, + dma_addr_t addr, size_t size); + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx, + dma_addr_t virt_addr); + +static int cam_smmu_map_buffer_and_add_to_list(int handle, int ion_fd, + bool dis_delayed_unmap, enum dma_data_direction dma_dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, + enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf, + struct kref **ref_count); + +static int cam_smmu_map_kernel_buffer_and_add_to_list(int handle, + struct dma_buf *buf, enum dma_data_direction dma_dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, + enum cam_smmu_region_id region_id); + +static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx, + size_t virt_len, + size_t phys_len, + unsigned int iommu_dir, + dma_addr_t *virt_addr); + +static int cam_smmu_unmap_buf_and_remove_from_list( + struct cam_dma_buff_info *mapping_info, int idx); + +static int cam_smmu_free_scratch_buffer_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx); + +static void cam_smmu_clean_user_buffer_list(int idx); + +static void cam_smmu_clean_kernel_buffer_list(int idx); + +static void cam_smmu_dump_cb_info(int idx); + +static void cam_smmu_print_user_list(int idx); + +static void cam_smmu_print_kernel_list(int idx); + +static void cam_smmu_print_table(void); + +static int cam_smmu_probe(struct platform_device *pdev); + +static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr, bool *in_map_region); + +static void cam_smmu_update_monitor_array( + struct cam_context_bank_info *cb_info, + bool is_map, + struct cam_dma_buff_info *mapping_info) +{ + int iterator; + + CAM_SMMU_INC_MONITOR_HEAD(&cb_info->monitor_head, &iterator); + + CAM_GET_TIMESTAMP(cb_info->monitor_entries[iterator].timestamp); + + cb_info->monitor_entries[iterator].is_map = is_map; + cb_info->monitor_entries[iterator].ion_fd = mapping_info->ion_fd; + cb_info->monitor_entries[iterator].i_ino = mapping_info->i_ino; + cb_info->monitor_entries[iterator].paddr = mapping_info->paddr; + cb_info->monitor_entries[iterator].len = mapping_info->len; + cb_info->monitor_entries[iterator].region_id = mapping_info->region_id; +} + +static void cam_smmu_dump_monitor_array( + struct cam_context_bank_info *cb_info) +{ + int i = 0; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry; + uint64_t ms, hrs, min, sec; + + state_head = atomic64_read(&cb_info->monitor_head); + + if (state_head == -1) { + return; + } else if (state_head < CAM_SMMU_MONITOR_MAX_ENTRIES) { + num_entries = state_head; + oldest_entry = 0; + } else { + num_entries = CAM_SMMU_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_SMMU_MONITOR_MAX_ENTRIES, &oldest_entry); + } + + CAM_INFO(CAM_SMMU, + "========Dumping monitor information for cb %s===========", + cb_info->name[0]); + + index = oldest_entry; + + for (i = 0; i < num_entries; i++) { + CAM_CONVERT_TIMESTAMP_FORMAT(cb_info->monitor_entries[index].timestamp, + hrs, min, sec, ms); + + CAM_INFO(CAM_SMMU, + "**** %llu:%llu:%llu.%llu : Index[%d] [%s] : ion_fd=%d i_ino=%lu start=0x%llx end=0x%llx len=%zu region=%d", + hrs, min, sec, ms, + index, + cb_info->monitor_entries[index].is_map ? "MAP" : "UNMAP", + cb_info->monitor_entries[index].ion_fd, + cb_info->monitor_entries[index].i_ino, + cb_info->monitor_entries[index].paddr, + cb_info->monitor_entries[index].paddr + + cb_info->monitor_entries[index].len, + cb_info->monitor_entries[index].len, + cb_info->monitor_entries[index].region_id); + + index = (index + 1) % CAM_SMMU_MONITOR_MAX_ENTRIES; + } +} + +bool cam_smmu_need_shared_buffer_padding(void) +{ + return iommu_cb_set.need_shared_buffer_padding; +} + +bool cam_smmu_is_expanded_memory(void) +{ + return iommu_cb_set.is_expanded_memory; +} + +int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached) +{ + int idx; + uint32_t curr_mode = 0, final_mode = 0; + bool validate = false; + + if (!force_alloc_cached) { + CAM_ERR(CAM_SMMU, "Invalid arg"); + return -EINVAL; + } + + CAM_INFO(CAM_SMMU, "force_cache_allocs=%d", + iommu_cb_set.force_cache_allocs); + + /* + * First validate whether all SMMU CBs are properly setup to comply with + * iommu_cb_set.force_alloc_cached flag. + * This helps as a validation check to make sure a valid DT combination + * is set for a given chipset. + */ + for (idx = 0; idx < iommu_cb_set.cb_num; idx++) { + /* ignore secure cb for now. need to revisit */ + if (iommu_cb_set.cb_info[idx].is_secure) + continue; + + curr_mode = iommu_cb_set.cb_info[idx].coherency_mode; + + /* + * 1. No coherency: + * We can map both CACHED and UNCACHED buffers into same CB. + * We need to allocate UNCACHED buffers for Cmdbuffers + * and Shared Buffers. UNCAHE support must exists with memory + * allocators (ion or dma-buf-heaps) for CmdBuffers, + * SharedBuffers to work - as it is difficult to do + * cache operations on these buffers in camera design. + * ImageBuffers can be CACHED or UNCACHED. If CACHED, clients + * need to make required CACHE operations. + * Cannot force all allocations to CACHE. + * 2. dma-coherent: + * We cannot map CACHED and UNCACHED buffers into the same CB + * This means, we must force allocate all buffers to be + * CACHED. + * 3. dma-coherent-hint-cached + * We can map both CACHED and UNCACHED buffers into the same + * CB. So any option is fine force_cache_allocs. + * Forcing to cache is preferable though. + * + * Other rule we are enforcing is - all camera CBs (except + * secure CB) must have same coherency mode set. Assume one CB + * is having no_coherency mode and other CB is having + * dma_coherent. For no_coherency CB to work - we must not force + * buffers to be CACHE (exa cmd buffers), for dma_coherent mode + * we must force all buffers to be CACHED. But at the time of + * allocation, we dont know to which CB we will be mapping this + * buffer. So it becomes difficult to generalize cache + * allocations and io coherency mode that we want to support. + * So, to simplify, all camera CBs will have same mode. + */ + + CAM_DBG(CAM_SMMU, "[%s] : curr_mode=%d", + iommu_cb_set.cb_info[idx].name[0], curr_mode); + + if (curr_mode == CAM_SMMU_NO_COHERENCY) { + if (iommu_cb_set.force_cache_allocs) { + CAM_ERR(CAM_SMMU, + "[%s] Can't force alloc cache with no coherency", + iommu_cb_set.cb_info[idx].name[0]); + return -EINVAL; + } + } else if (curr_mode == CAM_SMMU_DMA_COHERENT) { + if (!iommu_cb_set.force_cache_allocs) { + CAM_ERR(CAM_SMMU, + "[%s] Must force cache allocs for dma coherent device", + iommu_cb_set.cb_info[idx].name[0]); + return -EINVAL; + } + } + + if (validate) { + if (curr_mode != final_mode) { + CAM_ERR(CAM_SMMU, + "[%s] CBs having different coherency modes final=%d, curr=%d", + iommu_cb_set.cb_info[idx].name[0], + final_mode, curr_mode); + return -EINVAL; + } + } else { + validate = true; + final_mode = curr_mode; + } + } + + /* + * To be more accurate - if this flag is TRUE and if this buffer will + * be mapped to external devices like CVP - we need to ensure we do + * one of below : + * 1. CVP CB having dma-coherent or dma-coherent-hint-cached + * 2. camera/cvp sw layers properly doing required cache operations. We + * cannot anymore assume these buffers (camera <--> cvp) are uncached + */ + *force_alloc_cached = iommu_cb_set.force_cache_allocs; + + return 0; +} + +static struct cam_smmu_subregion_info *cam_smmu_find_subregion( + enum cam_smmu_subregion_id subregion_id, + struct cam_smmu_subregion_info *subregions) +{ + int i; + + for (i = 0; i < CAM_SMMU_SUBREGION_MAX; i++) { + if (subregions[i].subregion_id == subregion_id) + return &subregions[i]; + } + + return NULL; +} + +static int cam_smmu_validate_nested_region_idx( + struct cam_context_bank_info *cb_info, + int32_t *nested_reg_idx, int32_t region_id) +{ + /* Array indexing starts from 0, subtracting number of regions by 1 */ + switch (region_id) { + case CAM_SMMU_REGION_FIRMWARE: + if ((*nested_reg_idx) > (cb_info->firmware_info.num_regions - 1)) + goto err; + + break; + case CAM_SMMU_REGION_SHARED: + if ((*nested_reg_idx) > (cb_info->shared_info.num_regions - 1)) + goto err; + + break; + case CAM_SMMU_REGION_FWUNCACHED: + if ((*nested_reg_idx) > (cb_info->fwuncached_region.num_regions - 1)) + goto err; + + break; + /* For device and IO, if there is no additional region, default to index 0 */ + case CAM_SMMU_REGION_DEVICE: + if ((*nested_reg_idx) > (cb_info->device_region.num_regions - 1)) + *nested_reg_idx = 0; + + break; + case CAM_SMMU_REGION_IO: + if ((*nested_reg_idx) > (cb_info->io_info.num_regions - 1)) + *nested_reg_idx = 0; + + break; + case CAM_SMMU_REGION_QDSS: + fallthrough; + case CAM_SMMU_REGION_SECHEAP: + fallthrough; + case CAM_SMMU_REGION_SCRATCH: + *nested_reg_idx = 0; + break; + default: + CAM_DBG(CAM_SMMU, + "Invalid region id=%u on cb=%s to get nested region index", + region_id, cb_info->name[0]); + break; + } + + return 0; + +err: + CAM_ERR(CAM_SMMU, + "Nested region idx=%d exceeds max regions=%d for region_id=%d in cb=%s", + nested_reg_idx, cb_info->shared_info.num_regions, region_id, cb_info->name[0]); + return -EINVAL; +} + +static inline int cam_smmu_get_multiregion_client_dev_idx( + struct cam_context_bank_info *cb_info, int32_t multi_client_device_idx, + int32_t region_id, int32_t *region_idx) +{ + if (cb_info->num_multi_regions && multi_client_device_idx) { + *region_idx = multi_client_device_idx; + return cam_smmu_validate_nested_region_idx(cb_info, region_idx, region_id); + } + + return 0; +} + +static void cam_smmu_page_fault_work(struct work_struct *work) +{ + int j; + int idx; + struct cam_smmu_work_payload *payload; + uint32_t buf_info = 0; + struct cam_smmu_pf_info pf_info; + bool in_map = false; + + mutex_lock(&iommu_cb_set.payload_list_lock); + if (list_empty(&iommu_cb_set.payload_list)) { + CAM_ERR(CAM_SMMU, "Payload list empty"); + mutex_unlock(&iommu_cb_set.payload_list_lock); + return; + } + + payload = list_first_entry(&iommu_cb_set.payload_list, + struct cam_smmu_work_payload, + list); + list_del(&payload->list); + mutex_unlock(&iommu_cb_set.payload_list_lock); + + cam_check_iommu_faults(payload->domain, &pf_info); + + /* Dereference the payload to call the handler */ + idx = payload->idx; + /* If fault address is null, found closest buffer is inaccurate */ + if (payload->iova) + buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova, &in_map); + + if (buf_info != 0) + CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx); + + pf_info.domain = payload->domain; + pf_info.dev = payload->dev; + pf_info.iova = payload->iova; + pf_info.flags = payload->flags; + pf_info.buf_info = buf_info; + pf_info.is_secure = iommu_cb_set.cb_info[idx].is_secure; + pf_info.in_map_region = in_map; + + for (j = 0; j < CAM_SMMU_CB_MAX; j++) { + if ((iommu_cb_set.cb_info[idx].handler[j])) { + pf_info.token = iommu_cb_set.cb_info[idx].token[j]; + iommu_cb_set.cb_info[idx].handler[j](&pf_info); + } + } + cam_smmu_dump_cb_info(idx); + CAM_MEM_FREE(payload); +} + +static void cam_smmu_dump_cb_info(int idx) +{ + struct cam_dma_buff_info *mapping, *mapping_temp; + struct cam_smmu_nested_region_info *nested_reg_info; + size_t shared_reg_len = 0, io_reg_len = 0; + size_t shared_free_len = 0, io_free_len = 0; + int32_t i = 0, j; + uint64_t ms, hrs, min, sec; + struct timespec64 current_ts; + struct cam_context_bank_info *cb_info = + &iommu_cb_set.cb_info[idx]; + + if (cb_info->shared_support) { + for (j = 0; j < cb_info->shared_info.num_regions; j++) { + nested_reg_info = &cb_info->shared_info.nested_regions[j]; + shared_reg_len += nested_reg_info->region_info.iova_len; + } + shared_free_len = shared_reg_len - cb_info->shared_mapping_size; + } + + if (cb_info->io_support) { + for (j = 0; j < cb_info->io_info.num_regions; j++) { + nested_reg_info = &cb_info->io_info.nested_regions[j]; + io_reg_len += nested_reg_info->region_info.iova_len; + } + io_free_len = io_reg_len - cb_info->io_mapping_size; + } + + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + + CAM_ERR(CAM_SMMU, + "********** %llu:%llu:%llu:%llu Context bank dump for %s **********", + hrs, min, sec, ms, cb_info->name[0]); + CAM_ERR(CAM_SMMU, + "Usage: shared_usage=%lu io_usage=%lu shared_free=%lu io_free=%lu", + cb_info->shared_mapping_size, cb_info->io_mapping_size, + shared_free_len, io_free_len); + + if (iommu_cb_set.debug_cfg.cb_dump_enable) { + list_for_each_entry_safe(mapping, mapping_temp, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + i++; + CAM_CONVERT_TIMESTAMP_FORMAT(mapping->ts, hrs, min, sec, ms); + CAM_ERR(CAM_SMMU, + "%llu:%llu:%llu:%llu: %u ion_fd=%d i_ino=%lu start=0x%lx end=0x%lx len=%lu region=%d", + hrs, min, sec, ms, i, mapping->ion_fd, mapping->i_ino, + mapping->paddr, + ((uint64_t)mapping->paddr + (uint64_t)mapping->len), + mapping->len, mapping->region_id); + } + + cam_smmu_dump_monitor_array(&iommu_cb_set.cb_info[idx]); + } +} + +static void cam_smmu_print_user_list(int idx) +{ + struct cam_dma_buff_info *mapping; + + CAM_ERR(CAM_SMMU, "index = %d", idx); + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + CAM_ERR(CAM_SMMU, + "ion_fd = %d, i_ino=%lu, paddr= 0x%lx, len = %lu, region = %d", + mapping->ion_fd, mapping->i_ino, mapping->paddr, mapping->len, + mapping->region_id); + } +} + +static void cam_smmu_print_kernel_list(int idx) +{ + struct cam_dma_buff_info *mapping; + + CAM_ERR(CAM_SMMU, "index = %d", idx); + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) { + CAM_ERR(CAM_SMMU, + "dma_buf = %pK, i_ino = %lu, paddr= 0x%lx, len = %lu, region = %d", + mapping->buf, mapping->i_ino, mapping->paddr, + mapping->len, mapping->region_id); + } +} + +static void cam_smmu_print_table(void) +{ + int i, j; + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++) { + CAM_ERR(CAM_SMMU, + "i= %d, handle= %d, name_addr=%pK name %s", + i, (int)iommu_cb_set.cb_info[i].handle, + (void *)iommu_cb_set.cb_info[i].name[j], + iommu_cb_set.cb_info[i].name[j]); + } + CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev); + } +} + +static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr, bool *in_map_region) +{ + struct cam_dma_buff_info *mapping, *closest_mapping = NULL; + unsigned long start_addr, end_addr, current_addr; + uint32_t buf_info = 0; + + long delta = 0, lowest_delta = 0; + + current_addr = (unsigned long)vaddr; + *in_map_region = false; + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + start_addr = (unsigned long)mapping->paddr; + end_addr = (unsigned long)mapping->paddr + mapping->len; + + if (start_addr <= current_addr && current_addr <= end_addr) { + closest_mapping = mapping; + CAM_INFO(CAM_SMMU, + "Found va 0x%lx in:0x%lx-0x%lx, fd %d i_ino %lu cb:%s", + current_addr, start_addr, + end_addr, mapping->ion_fd, mapping->i_ino, + iommu_cb_set.cb_info[idx].name[0]); + goto end; + } else { + if (start_addr > current_addr) + delta = start_addr - current_addr; + else + delta = current_addr - end_addr - 1; + + if (delta < lowest_delta || lowest_delta == 0) { + lowest_delta = delta; + closest_mapping = mapping; + } + CAM_DBG(CAM_SMMU, + "approx va %lx not in range: %lx-%lx fd = %0x i_ino %lu", + current_addr, start_addr, + end_addr, mapping->ion_fd, mapping->i_ino); + } + } + +end: + if (closest_mapping) { + buf_info = closest_mapping->ion_fd; + start_addr = (unsigned long)closest_mapping->paddr; + end_addr = (unsigned long)closest_mapping->paddr + closest_mapping->len; + if (start_addr <= current_addr && current_addr < end_addr) + *in_map_region = true; + CAM_INFO(CAM_SMMU, + "Faulting addr 0x%lx closest map fd %d i_ino %lu %llu-%llu 0x%lx-0x%lx buf=%pK", + current_addr, closest_mapping->ion_fd, closest_mapping->i_ino, + mapping->len, closest_mapping->len, + (unsigned long)closest_mapping->paddr, + (unsigned long)closest_mapping->paddr + closest_mapping->len, + closest_mapping->buf); + } else + CAM_ERR(CAM_SMMU, + "Cannot find vaddr:%lx in SMMU %s virt address", + current_addr, iommu_cb_set.cb_info[idx].name[0]); + + return buf_info; +} + +void cam_smmu_set_client_page_fault_handler(int handle, + void (*handler_cb)(struct cam_smmu_pf_info *pf_info), void *token) +{ + int idx, i = 0; + + if (!token || (handle == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle"); + return; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + + if (handler_cb) { + if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) { + CAM_ERR(CAM_SMMU, + "%s Should not regiester more handlers", + iommu_cb_set.cb_info[idx].name[0]); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + + iommu_cb_set.cb_info[idx].cb_count++; + + for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == NULL) { + iommu_cb_set.cb_info[idx].token[i] = token; + iommu_cb_set.cb_info[idx].handler[i] = + handler_cb; + break; + } + } + } else { + for (i = 0; i < CAM_SMMU_CB_MAX; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == token) { + iommu_cb_set.cb_info[idx].token[i] = NULL; + iommu_cb_set.cb_info[idx].handler[i] = + NULL; + iommu_cb_set.cb_info[idx].cb_count--; + break; + } + } + if (i == CAM_SMMU_CB_MAX) + CAM_ERR(CAM_SMMU, + "Error: hdl %x no matching tokens: %s", + handle, iommu_cb_set.cb_info[idx].name[0]); + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +} + +void cam_smmu_unset_client_page_fault_handler(int handle, void *token) +{ + int idx, i = 0; + + if (!token || (handle == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle"); + return; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + + for (i = 0; i < CAM_SMMU_CB_MAX; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == token) { + iommu_cb_set.cb_info[idx].token[i] = NULL; + iommu_cb_set.cb_info[idx].handler[i] = + NULL; + iommu_cb_set.cb_info[idx].cb_count--; + break; + } + } + if (i == CAM_SMMU_CB_MAX) + CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s", + handle, iommu_cb_set.cb_info[idx].name[0]); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +} + +static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long iova, + int flags, void *token) +{ + char *cb_name; + int idx; + struct cam_smmu_work_payload *payload; + + if (!token) { + CAM_ERR(CAM_SMMU, + "token is NULL, domain = %pK, device = %pK,iova = 0x%lx, flags = 0x%x", + domain, dev, iova, flags); + return 0; + } + + cb_name = (char *)token; + /* Check whether it is in the table */ + for (idx = 0; idx < iommu_cb_set.cb_num; idx++) { + if (!strcmp(iommu_cb_set.cb_info[idx].name[0], cb_name)) + break; + } + + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "index is invalid, index = %d, token = %s, cb_num = %s", + idx, cb_name, iommu_cb_set.cb_num); + return 0; + } + + if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) { + CAM_INFO_RATE_LIMIT(CAM_SMMU, "PF already handled %d %d %d", + g_num_pf_handled, idx, + iommu_cb_set.cb_info[idx].pf_count); + return 0; + } + + payload = CAM_MEM_ZALLOC(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC); + if (!payload) + return 0; + + payload->domain = domain; + payload->dev = dev; + payload->iova = iova; + payload->flags = flags; + payload->token = token; + payload->idx = idx; + + mutex_lock(&iommu_cb_set.payload_list_lock); + list_add_tail(&payload->list, &iommu_cb_set.payload_list); + mutex_unlock(&iommu_cb_set.payload_list_lock); + + cam_smmu_page_fault_work(&iommu_cb_set.smmu_work); + + /* + * If cb has faults marked as non-fatal, but if the debugfs is set for panic + * trigger a panic on fault + */ + if (iommu_cb_set.cb_info[idx].non_fatal_faults_en) { + if (iommu_cb_set.debug_cfg.fatal_pf_mask & BIT(idx)) + CAM_TRIGGER_PANIC("SMMU context fault from soc: %s[cb_idx: %u]", + iommu_cb_set.cb_info[idx].name[0], idx); + } + + return -ENOSYS; +} + +int cam_smmu_is_cb_non_fatal_fault_en(int smmu_hdl, bool *non_fatal_en) +{ + int idx; + + if (smmu_hdl == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Invalid iommu handle %d", smmu_hdl); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Invalid handle or idx. idx: %d, hdl: 0x%x", idx, smmu_hdl); + return -EINVAL; + } + + *non_fatal_en = iommu_cb_set.cb_info[idx].non_fatal_faults_en; + + return 0; +} + +void cam_smmu_reset_cb_page_fault_cnt(void) +{ + int idx; + + for (idx = 0; idx < iommu_cb_set.cb_num; idx++) + iommu_cb_set.cb_info[idx].pf_count = 0; + +} + +static int cam_smmu_translate_dir_to_iommu_dir( + enum cam_smmu_map_dir dir) +{ + switch (dir) { + case CAM_SMMU_MAP_READ: + return IOMMU_READ; + case CAM_SMMU_MAP_WRITE: + return IOMMU_WRITE; + case CAM_SMMU_MAP_RW: + return IOMMU_READ|IOMMU_WRITE; + case CAM_SMMU_MAP_INVALID: + default: + CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir); + break; + } + return IOMMU_INVALID_DIR; +} + +static enum dma_data_direction cam_smmu_translate_dir( + enum cam_smmu_map_dir dir) +{ + switch (dir) { + case CAM_SMMU_MAP_READ: + return DMA_FROM_DEVICE; + case CAM_SMMU_MAP_WRITE: + return DMA_TO_DEVICE; + case CAM_SMMU_MAP_RW: + return DMA_BIDIRECTIONAL; + case CAM_SMMU_MAP_INVALID: + default: + CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", + (int)dir); + break; + } + return DMA_NONE; +} + +void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops) +{ + unsigned int i; + int j = 0; + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + if (ops == CAM_SMMU_TABLE_INIT) + mutex_init(&iommu_cb_set.cb_info[i].lock); + + mutex_lock(&iommu_cb_set.cb_info[i].lock); + + iommu_cb_set.cb_info[i].handle = HANDLE_INIT; + INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list); + INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_kernel_list); + iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH; + iommu_cb_set.cb_info[i].dev = NULL; + iommu_cb_set.cb_info[i].cb_count = 0; + iommu_cb_set.cb_info[i].pf_count = 0; + for (j = 0; j < CAM_SMMU_CB_MAX; j++) { + iommu_cb_set.cb_info[i].token[j] = NULL; + iommu_cb_set.cb_info[i].handler[j] = NULL; + } + + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + + if (ops != CAM_SMMU_TABLE_INIT) + mutex_destroy(&iommu_cb_set.cb_info[i].lock); + } +} + +static bool cam_smmu_is_hdl_nonunique_or_null(int hdl) +{ + int i; + + if ((hdl == HANDLE_INIT) || (!hdl)) { + CAM_DBG(CAM_SMMU, "iommu handle: %d is not valid", hdl); + return 1; + } + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT) + continue; + + if (iommu_cb_set.cb_info[i].handle == hdl) { + CAM_DBG(CAM_SMMU, "iommu handle %d conflicts", + (int)hdl); + return 1; + } + } + return 0; +} + +/** + * use low 2 bytes for handle cookie + */ +static int cam_smmu_create_iommu_handle(int idx) +{ + int rand, hdl = 0; + + get_random_bytes(&rand, COOKIE_NUM_BYTE); + hdl = GET_SMMU_HDL(idx, rand); + CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl); + return hdl; +} + +static int cam_smmu_attach_device(int idx) +{ + int rc; + struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx]; + + /* attach the mapping to device */ + rc = iommu_attach_device(cb->domain, cb->dev); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d", + rc); + rc = -ENODEV; + } + + return rc; +} + +static inline int cam_smmu_update_multiregion_dev_id( + struct cam_context_bank_info *cb_info, char *name, + int dev_cnt, int *hdl) +{ + int k; + + for (k = 0; k < cb_info->num_multi_regions; k++) { + if (!strcmp(cb_info->multi_region_clients[k], name)) { + *hdl |= (dev_cnt << MULTI_CLIENT_REGION_SHIFT); + CAM_DBG(CAM_SMMU, "%s got shared multi region handle 0x%x", + name, *hdl); + return 0; + } + } + + CAM_ERR(CAM_SMMU, "%s not found as client in bank: %s", + name, cb_info->name[0]); + + return -EINVAL; +} +static int cam_smmu_create_add_handle_in_table(char *name, + int *hdl) +{ + int i, j, rc = -EINVAL; + int handle; + + /* create handle and add in the iommu hardware table */ + for (i = 0; i < iommu_cb_set.cb_num; i++) { + for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++) { + if (strcmp(iommu_cb_set.cb_info[i].name[j], name)) + continue; + + if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT) { + mutex_lock(&iommu_cb_set.cb_info[i].lock); + /* make sure handle is unique and non-zero*/ + do { + handle = + cam_smmu_create_iommu_handle(i); + } while (cam_smmu_is_hdl_nonunique_or_null( + handle)); + + /* put handle in the table */ + iommu_cb_set.cb_info[i].handle = handle; + iommu_cb_set.cb_info[i].cb_count = 0; + if (iommu_cb_set.cb_info[i].is_secure) + iommu_cb_set.cb_info[i].secure_count++; + + if (iommu_cb_set.cb_info[i].is_mul_client) + iommu_cb_set.cb_info[i].device_count++; + + *hdl = handle; + CAM_DBG(CAM_SMMU, "%s creates handle 0x%x", name, handle); + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + rc = 0; + goto end; + } else { + mutex_lock(&iommu_cb_set.cb_info[i].lock); + if (iommu_cb_set.cb_info[i].is_secure) { + iommu_cb_set.cb_info[i].secure_count++; + *hdl = iommu_cb_set.cb_info[i].handle; + mutex_unlock( + &iommu_cb_set.cb_info[i].lock); + return 0; + } + + if (iommu_cb_set.cb_info[i].is_mul_client) { + *hdl = iommu_cb_set.cb_info[i].handle; + rc = 0; + if (iommu_cb_set.cb_info[i].num_multi_regions) + rc = cam_smmu_update_multiregion_dev_id( + &iommu_cb_set.cb_info[i], name, + iommu_cb_set.cb_info[i].device_count, hdl); + + iommu_cb_set.cb_info[i].device_count++; + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + goto end; + } + + CAM_ERR(CAM_SMMU, + "Error: %s already got handle 0x%x", + name, iommu_cb_set.cb_info[i].handle); + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + rc = -EALREADY; + goto end; + } + } + } + + CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist", + name); + cam_smmu_print_table(); +end: + return rc; +} + +static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map, + dma_addr_t base, size_t size, + int order) +{ + unsigned int count = size >> (PAGE_SHIFT + order); + unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + int err = 0; + + if (!count) { + err = -EINVAL; + CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu", + size); + goto bail; + } + + scratch_map->bitmap = CAM_MEM_ZALLOC(bitmap_size, GFP_KERNEL); + if (!scratch_map->bitmap) { + err = -ENOMEM; + goto bail; + } + + scratch_map->base = base; + scratch_map->bits = BITS_PER_BYTE * bitmap_size; + scratch_map->order = order; + +bail: + return err; +} + +static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping, + size_t size, + dma_addr_t *iova) +{ + unsigned int order = get_order(size); + unsigned int align = 0; + unsigned int count, start; + + count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + + /* + * Transparently, add a guard page to the total count of pages + * to be allocated + */ + count++; + + if (order > mapping->order) + align = (1 << (order - mapping->order)) - 1; + + start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, + count, align); + + if (start > mapping->bits) + return -ENOMEM; + + bitmap_set(mapping->bitmap, start, count); + *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT)); + + return 0; +} + +static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping, + dma_addr_t addr, size_t size) +{ + unsigned int start = (addr - mapping->base) >> + (mapping->order + PAGE_SHIFT); + unsigned int count = ((size >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + + if (!addr) { + CAM_ERR(CAM_SMMU, "Error: Invalid address"); + return -EINVAL; + } + + if (start + count > mapping->bits) { + CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map"); + return -EINVAL; + } + + /* + * Transparently, add a guard page to the total count of pages + * to be freed + */ + count++; + bitmap_clear(mapping->bitmap, start, count); + + return 0; +} + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx, + dma_addr_t virt_addr) +{ + struct cam_dma_buff_info *mapping; + + list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if (mapping->paddr == virt_addr) { + CAM_DBG(CAM_SMMU, "Found virtual address %lx", + (unsigned long)virt_addr); + return mapping; + } + } + + CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d", + (unsigned long)virt_addr, idx); + return NULL; +} + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx, + int ion_fd, struct dma_buf *dmabuf) +{ + struct cam_dma_buff_info *mapping; + unsigned long i_ino; + + if (ion_fd < 0) { + CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd); + return NULL; + } + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + CAM_DBG(CAM_SMMU, "find ion_fd %d i_ino %lu", ion_fd, i_ino); + return mapping; + } + } + + CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d, fd %d i_ino %lu", + idx, ion_fd, i_ino); + + return NULL; +} + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx, + struct dma_buf *buf) +{ + struct cam_dma_buff_info *mapping; + + if (!buf) { + CAM_ERR(CAM_SMMU, "Invalid dma_buf"); + return NULL; + } + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, + list) { + if (mapping->buf == buf) { + CAM_DBG(CAM_SMMU, "find dma_buf %pK", buf); + return mapping; + } + } + + CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx); + + return NULL; +} + +static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx, + int ion_fd, struct dma_buf *dmabuf) +{ + struct cam_sec_buff_info *mapping; + unsigned long i_ino; + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + CAM_DBG(CAM_SMMU, "find ion_fd %d, i_ino %lu", ion_fd, i_ino); + return mapping; + } + } + CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d i_ino %lu by index %d", + ion_fd, i_ino, idx); + return NULL; +} + +static void cam_smmu_clean_user_buffer_list(int idx) +{ + int ret; + struct cam_dma_buff_info *mapping_info, *temp; + + list_for_each_entry_safe(mapping_info, temp, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d, i_ino = %lu", + (void *)mapping_info->paddr, idx, + mapping_info->ion_fd, mapping_info->i_ino); + + if (mapping_info->ion_fd == 0xDEADBEEF) + /* Clean up scratch buffers */ + ret = cam_smmu_free_scratch_buffer_remove_from_list( + mapping_info, idx); + else + /* Clean up regular mapped buffers */ + ret = cam_smmu_unmap_buf_and_remove_from_list( + mapping_info, + idx); + + if (ret < 0) { + CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d", + idx); + CAM_ERR(CAM_SMMU, + "Buffer delete failed: addr = 0x%lx, fd = %d, i_ino = %lu", + mapping_info->paddr, + mapping_info->ion_fd, mapping_info->i_ino); + /* + * Ignore this error and continue to delete other + * buffers in the list + */ + continue; + } + } +} + +static void cam_smmu_clean_kernel_buffer_list(int idx) +{ + int ret; + struct cam_dma_buff_info *mapping_info, *temp; + + list_for_each_entry_safe(mapping_info, temp, + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) { + CAM_DBG(CAM_SMMU, + "Free mapping address %pK, i = %d, dma_buf = %pK", + (void *)mapping_info->paddr, idx, + mapping_info->buf); + + /* Clean up regular mapped buffers */ + ret = cam_smmu_unmap_buf_and_remove_from_list( + mapping_info, + idx); + + if (ret < 0) { + CAM_ERR(CAM_SMMU, + "Buffer delete in kernel list failed: idx = %d", + idx); + CAM_ERR(CAM_SMMU, + "Buffer delete failed: addr = 0x%lx, dma_buf = %pK", + mapping_info->paddr, mapping_info->buf); + /* + * Ignore this error and continue to delete other + * buffers in the list + */ + continue; + } + } +} + +static int cam_smmu_attach(int idx) +{ + int ret; + + if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) { + ret = -EALREADY; + } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) { + ret = cam_smmu_attach_device(idx); + if (ret < 0) { + CAM_ERR(CAM_SMMU, "Error: ATTACH fail"); + return -ENODEV; + } + iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH; + ret = 0; + } else { + CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d", + iommu_cb_set.cb_info[idx].state); + ret = -EINVAL; + } + + return ret; +} + +static int cam_smmu_detach_device(int idx) +{ + int rc = 0; + struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx]; + + /* detach the mapping to device if not already detached */ + if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) { + rc = -EALREADY; + } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) { + iommu_detach_device(cb->domain, cb->dev); + iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH; + } + + return rc; +} + +static int cam_smmu_alloc_iova(size_t size, int multi_client_device_idx, + int32_t smmu_hdl, unsigned long *iova) +{ + int rc = 0, shared_mem_pool_idx = 0; + int idx; + unsigned long vaddr = 0; + + if (!iova || !size || (smmu_hdl == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: Input args are invalid"); + return -EINVAL; + } + + CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X", + size, smmu_hdl); + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, smmu_hdl)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, smmu_hdl); + rc = -EINVAL; + goto get_addr_end; + } + + if (!iommu_cb_set.cb_info[idx].shared_support) { + CAM_ERR(CAM_SMMU, + "Error: Shared memory not supported for hdl = %X", + smmu_hdl); + rc = -EINVAL; + goto get_addr_end; + } + + rc = cam_smmu_get_multiregion_client_dev_idx(&iommu_cb_set.cb_info[idx], + multi_client_device_idx, CAM_SMMU_REGION_SHARED, &shared_mem_pool_idx); + if (rc) + goto get_addr_end; + + vaddr = gen_pool_alloc( + iommu_cb_set.cb_info[idx].shared_mem_pool[shared_mem_pool_idx], size); + + if (!vaddr) + return -ENOMEM; + + *iova = vaddr; + +get_addr_end: + return rc; +} + +static int cam_smmu_free_iova(unsigned long iova, size_t size, + int multi_client_device_idx, int32_t smmu_hdl) +{ + int rc = 0, idx; + int shared_mem_pool_idx = 0; + + if (!size || (smmu_hdl == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: Input args are invalid"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, smmu_hdl)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, smmu_hdl); + rc = -EINVAL; + goto get_addr_end; + } + + rc = cam_smmu_get_multiregion_client_dev_idx(&iommu_cb_set.cb_info[idx], + multi_client_device_idx, CAM_SMMU_REGION_SHARED, &shared_mem_pool_idx); + if (rc) + goto get_addr_end; + + gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool[shared_mem_pool_idx], + iova, size); + +get_addr_end: + return rc; +} + +int cam_smmu_alloc_firmware(int32_t smmu_hdl, + dma_addr_t *iova, + uintptr_t *cpuva, + size_t *len) +{ + int rc; + int32_t idx, multi_client_device_idx, nested_reg_idx = 0; + size_t firmware_len = 0, firmware_start = 0; + struct iommu_domain *domain; + struct cam_smmu_nested_region_info *nested_reg_info; + struct cam_context_bank_info *cb; + + if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: Input args are invalid"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + rc = -EINVAL; + goto end; + } + + cb = &iommu_cb_set.cb_info[idx]; + if (cam_smmu_get_multiregion_client_dev_idx(cb, + multi_client_device_idx, CAM_SMMU_REGION_FIRMWARE, &nested_reg_idx)) + return -EINVAL; + + if (!cb->firmware_support) { + CAM_ERR(CAM_SMMU, + "Firmware memory not supported for this SMMU handle"); + rc = -EINVAL; + goto end; + } + + mutex_lock(&cb->lock); + nested_reg_info = &cb->firmware_info.nested_regions[nested_reg_idx]; + if (nested_reg_info->is_allocated) { + CAM_ERR(CAM_SMMU, "Trying to allocate twice"); + rc = -ENOMEM; + goto unlock_and_end; + } + + firmware_len = nested_reg_info->region_info.iova_len; + firmware_start = nested_reg_info->region_info.iova_start; + CAM_DBG(CAM_SMMU, "Firmware area from DT, firmware_start: %zu, firmware_len: %zu", + firmware_start, firmware_len); + + rc = cam_reserve_icp_fw(&icp_fw[multi_client_device_idx], firmware_len); + if (rc) + goto unlock_and_end; + else + CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK", + icp_fw[multi_client_device_idx].fw_kva, + (void *)icp_fw[multi_client_device_idx].fw_hdl); + + domain = cb->domain; + + /* + * Revisit this - what should we map this with - CACHED or UNCACHED? + * chipsets using dma-coherent-hint-cached - leaving it like this is + * fine as we can map both CACHED and UNCACHED on same CB. + * But on chipsets which use dma-coherent - all the buffers that are + * being mapped to this CB must be CACHED + */ + rc = cam_iommu_map(domain, + firmware_start, + (phys_addr_t) icp_fw[multi_client_device_idx].fw_hdl, + firmware_len, + IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV); + + if (rc) { + CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU"); + rc = -ENOMEM; + goto alloc_fail; + } + nested_reg_info->is_allocated = true; + + *iova = firmware_start; + *cpuva = (uintptr_t)icp_fw[multi_client_device_idx].fw_kva; + *len = firmware_len; + mutex_unlock(&cb->lock); + + return rc; + +alloc_fail: + cam_unreserve_icp_fw(&icp_fw[multi_client_device_idx], firmware_len); +unlock_and_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +end: + return rc; +} +EXPORT_SYMBOL(cam_smmu_alloc_firmware); + +int cam_smmu_dealloc_firmware(int32_t smmu_hdl) +{ + int rc = 0; + int32_t idx, multi_client_device_idx, nested_reg_idx = 0; + size_t firmware_len = 0; + size_t firmware_start = 0; + struct iommu_domain *domain; + size_t unmapped = 0; + struct cam_context_bank_info *cb; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (smmu_hdl == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + rc = -EINVAL; + goto end; + } + + cb = &iommu_cb_set.cb_info[idx]; + if (cam_smmu_get_multiregion_client_dev_idx(cb, + multi_client_device_idx, CAM_SMMU_REGION_FIRMWARE, &nested_reg_idx)) { + rc = -EINVAL; + goto end; + } + + if (!cb->firmware_support) { + CAM_ERR(CAM_SMMU, + "Firmware memory not supported for this SMMU handle"); + rc = -EINVAL; + goto end; + } + + mutex_lock(&cb->lock); + nested_reg_info = &cb->firmware_info.nested_regions[nested_reg_idx]; + if (!nested_reg_info->is_allocated) { + CAM_ERR(CAM_SMMU, + "Trying to deallocate firmware that is not allocated"); + rc = -ENOMEM; + goto unlock_and_end; + } + + firmware_len = nested_reg_info->region_info.iova_len; + firmware_start = nested_reg_info->region_info.iova_start; + domain = cb->domain; + unmapped = iommu_unmap(domain, + firmware_start, + firmware_len); + + if (unmapped != firmware_len) { + CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu", + unmapped, + firmware_len); + rc = -EINVAL; + } + + cam_unreserve_icp_fw(&icp_fw[multi_client_device_idx], firmware_len); + + icp_fw[multi_client_device_idx].fw_kva = NULL; + icp_fw[multi_client_device_idx].fw_hdl = 0; + + nested_reg_info->is_allocated = false; + +unlock_and_end: + mutex_unlock(&cb->lock); +end: + return rc; +} +EXPORT_SYMBOL(cam_smmu_dealloc_firmware); + +static int cam_smmu_retrieve_region_info( + struct cam_context_bank_info *cb_info, + uint32_t region_id, uint32_t subregion_id, + int32_t nested_reg_idx, + bool *is_supported, int32_t **ref_cnt, + unsigned long **mapping_mask, bool **is_allocated, + struct cam_smmu_region_info **region_info) +{ + int rc = 0; + struct cam_smmu_subregion_info *subregion = NULL; + + switch (region_id) { + case CAM_SMMU_REGION_QDSS: { + struct cam_smmu_multi_region_info *qdss_region; + + qdss_region = &cb_info->qdss_info; + + /* No subregion for QDSS */ + *is_supported = cb_info->qdss_support; + *is_allocated = &qdss_region->nested_regions[nested_reg_idx].is_allocated; + *region_info = &qdss_region->nested_regions[nested_reg_idx].region_info; + *ref_cnt = &qdss_region->nested_regions[nested_reg_idx].mapped_refcnt; + *mapping_mask = &qdss_region->nested_regions[nested_reg_idx].mapped_client_mask; + } + break; + case CAM_SMMU_REGION_DEVICE: { + struct cam_smmu_multi_region_info *device_region; + + *is_supported = cb_info->device_region_support; + device_region = &cb_info->device_region; + + if (device_region->nested_regions[nested_reg_idx].subregion_support) { + subregion = cam_smmu_find_subregion(subregion_id, + device_region->nested_regions[nested_reg_idx].subregions); + if (IS_ERR_OR_NULL(subregion)) { + CAM_ERR(CAM_SMMU, + "Failed to find subregion: %d in region: %d cb: %s", + subregion, region_id, cb_info->name[0]); + rc = PTR_ERR(subregion); + goto end; + } + + *is_allocated = &subregion->is_allocated; + *region_info = &subregion->subregion_info; + *ref_cnt = &subregion->mapped_refcnt; + *mapping_mask = &subregion->mapped_client_mask; + } else { + *is_allocated = &device_region->nested_regions[nested_reg_idx].is_allocated; + *region_info = &device_region->nested_regions[nested_reg_idx].region_info; + *ref_cnt = &device_region->nested_regions[nested_reg_idx].mapped_refcnt; + *mapping_mask = + &device_region->nested_regions[nested_reg_idx].mapped_client_mask; + } + } + break; + case CAM_SMMU_REGION_FWUNCACHED: { + struct cam_smmu_multi_region_info *fw_uncached; + + fw_uncached = &cb_info->fwuncached_region; + if (fw_uncached->nested_regions[nested_reg_idx].subregion_support) { + subregion = cam_smmu_find_subregion(subregion_id, + fw_uncached->nested_regions[nested_reg_idx].subregions); + if (IS_ERR_OR_NULL(subregion)) { + CAM_ERR(CAM_SMMU, + "Failed to find subregion: %d in region: %d cb: %s", + subregion, region_id, cb_info->name[0]); + rc = PTR_ERR(subregion); + goto end; + } + + *is_supported = true; + *is_allocated = &subregion->is_allocated; + *region_info = &subregion->subregion_info; + *ref_cnt = &subregion->mapped_refcnt; + *mapping_mask = &subregion->mapped_client_mask; + } else { + *is_allocated = &fw_uncached->nested_regions[nested_reg_idx].is_allocated; + *region_info = &fw_uncached->nested_regions[nested_reg_idx].region_info; + *ref_cnt = &fw_uncached->nested_regions[nested_reg_idx].mapped_refcnt; + *mapping_mask = + &fw_uncached->nested_regions[nested_reg_idx].mapped_client_mask; + } + } + break; + default: + CAM_ERR(CAM_SMMU, + "Unsupported region: %u in cb: %s for mapping known phy addr", + region_id, cb_info->name[0]); + rc = -EINVAL; + break; + } + +end: + return rc; +} + +int cam_smmu_map_phy_mem_region(int32_t smmu_hdl, + uint32_t region_id, uint32_t subregion_id, + dma_addr_t *iova, size_t *len) +{ + int rc, idx, prot = IOMMU_READ | IOMMU_WRITE; + int multi_client_device_idx = 0, nested_reg_idx = 0; + int32_t *ref_cnt; + unsigned long *map_client_mask; + size_t region_len = 0; + dma_addr_t region_start; + bool is_supported = false; + bool *is_allocated; + struct iommu_domain *domain; + struct cam_context_bank_info *cb_info; + struct cam_smmu_region_info *region_info = NULL; + + if (!iova || !len || (smmu_hdl == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: Input args are invalid"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + rc = -EINVAL; + goto end; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, smmu_hdl)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, smmu_hdl); + rc = -EINVAL; + goto end; + } + + cb_info = &iommu_cb_set.cb_info[idx]; + rc = cam_smmu_get_multiregion_client_dev_idx(&iommu_cb_set.cb_info[idx], + multi_client_device_idx, region_id, &nested_reg_idx); + if (rc) + goto end; + + rc = cam_smmu_retrieve_region_info(cb_info, region_id, subregion_id, + nested_reg_idx, &is_supported, &ref_cnt, &map_client_mask, + &is_allocated, ®ion_info); + if (rc) + goto end; + + if (!is_supported) { + CAM_ERR(CAM_SMMU, + "region: %u not supported for phy addr mapping in cb: %s", + region_id, cb_info->name[0]); + rc = -EINVAL; + goto end; + } + + mutex_lock(&cb_info->lock); + if ((*is_allocated)) { + if (test_bit(multi_client_device_idx, map_client_mask)) { + CAM_ERR(CAM_SMMU, + "Trying to allocate region: %u [subregion: %u] twice on cb: %s", + region_id, subregion_id, cb_info->name[0]); + rc = -ENOMEM; + goto unlock_and_end; + } + + set_bit(multi_client_device_idx, map_client_mask); + (*ref_cnt)++; + CAM_DBG(CAM_SMMU, + "Trying to allocate region: %u [subregion: %u] on cb: %s for different clients: 0x%x ref_cnt: %u", + region_id, subregion_id, cb_info->name[0], *map_client_mask, *ref_cnt); + + *iova = region_info->iova_start; + *len = region_info->iova_len; + rc = 0; + goto unlock_and_end; + } + + if (!region_info->phy_addr) { + CAM_ERR(CAM_SMMU, + "Invalid phy addr for region: %u [subregion: %u] on cb: %s", + region_id, subregion_id, cb_info->name[0]); + rc = -ENOMEM; + goto unlock_and_end; + } + + region_len = region_info->iova_len; + region_start = region_info->iova_start; + + CAM_DBG(CAM_SMMU, + "mapping region= %u [subregion = %u] for va = 0x%x len = %zu phy = 0x%x prot=0x%x", + region_id, subregion_id, region_start, region_len, region_info->phy_addr, prot); + + domain = cb_info->domain; + if (region_id == CAM_SMMU_REGION_DEVICE) + prot |= IOMMU_MMIO; + + /* + * Revisit this - what should we map this with - CACHED or UNCACHED? + * chipsets using dma-coherent-hint-cached - leaving it like this is + * fine as we can map both CACHED and UNCACHED on same CB. + * But on chipsets which use dma-coherent - all the buffers that are + * being mapped to this CB must be CACHED + */ + rc = cam_iommu_map(domain, region_start, region_info->phy_addr, region_len, prot); + if (rc) { + CAM_ERR(CAM_SMMU, "Failed to map region = %u into IOMMU cb = %s", + region_id, cb_info->name[0]); + goto unlock_and_end; + } + + *is_allocated = true; + (*ref_cnt)++; + *iova = region_info->iova_start; + *len = region_info->iova_len; + set_bit(multi_client_device_idx, map_client_mask); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + + return rc; + +unlock_and_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +end: + return rc; +} +EXPORT_SYMBOL(cam_smmu_map_phy_mem_region); + +int cam_smmu_unmap_phy_mem_region(int32_t smmu_hdl, + uint32_t region_id, uint32_t subregion_id) +{ + int rc = 0, idx; + int multi_client_device_idx = 0, nested_reg_idx = 0; + int32_t *ref_cnt; + unsigned long *map_client_mask; + size_t len = 0; + dma_addr_t start = 0; + struct iommu_domain *domain; + size_t unmapped = 0; + bool is_supported = false; + bool *is_allocated = NULL; + struct cam_context_bank_info *cb_info; + struct cam_smmu_region_info *region_info = NULL; + + if (smmu_hdl == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + rc = -EINVAL; + goto end; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, smmu_hdl)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, smmu_hdl); + rc = -EINVAL; + goto end; + } + + cb_info = &iommu_cb_set.cb_info[idx]; + rc = cam_smmu_get_multiregion_client_dev_idx(cb_info, + multi_client_device_idx, region_id, &nested_reg_idx); + if (rc) + goto end; + + rc = cam_smmu_retrieve_region_info(cb_info, region_id, subregion_id, + nested_reg_idx, &is_supported, &ref_cnt, &map_client_mask, + &is_allocated, ®ion_info); + if (rc) + goto end; + + if (!is_supported) { + CAM_ERR(CAM_SMMU, + "region: %u not supported for this SMMU handle cb: %s", + region_id, cb_info->name[0]); + rc = -EINVAL; + goto end; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (!(*is_allocated)) { + CAM_ERR(CAM_SMMU, + "Trying to free region: %u that is not allocated on cb: %s", + region_id, cb_info->name[0]); + rc = -ENOMEM; + goto unlock_and_end; + } + + (*ref_cnt)--; + clear_bit(multi_client_device_idx, map_client_mask); + if ((*ref_cnt) > 0) { + CAM_DBG(CAM_SMMU, + "Mapping for region: %u on cb: %s still in use refcnt: %d mapping_mask: 0x%x", + region_id, cb_info->name[0], *ref_cnt, *map_client_mask); + goto unlock_and_end; + } + + len = region_info->iova_len; + start = region_info->iova_start; + domain = cb_info->domain; + unmapped = iommu_unmap(domain, start, len); + + if (unmapped != len) { + CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu", + unmapped, + len); + rc = -EINVAL; + } + + *is_allocated = false; + *ref_cnt = 0; + *map_client_mask = 0; + +unlock_and_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +end: + return rc; +} +EXPORT_SYMBOL(cam_smmu_unmap_phy_mem_region); + +int cam_smmu_get_io_region_info(int32_t smmu_hdl, + dma_addr_t *iova, size_t *len, + dma_addr_t *discard_iova_start, size_t *discard_iova_len) +{ + int32_t idx, rc; + int multi_client_device_idx = 0, nested_reg_idx = 0; + struct cam_smmu_nested_region_info *io_region; + + if (!iova || !len || !discard_iova_start || !discard_iova_len || + (smmu_hdl == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: Input args are invalid"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + if (!iommu_cb_set.cb_info[idx].io_support) { + CAM_ERR(CAM_SMMU, + "I/O memory not supported for this SMMU handle"); + return -EINVAL; + } + + rc = cam_smmu_get_multiregion_client_dev_idx(&iommu_cb_set.cb_info[idx], + multi_client_device_idx, CAM_SMMU_REGION_IO, &nested_reg_idx); + if (rc) + return rc; + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + io_region = &iommu_cb_set.cb_info[idx].io_info.nested_regions[nested_reg_idx]; + + *iova = io_region->region_info.iova_start; + *len = io_region->region_info.iova_len; + *discard_iova_start = + io_region->region_info.discard_iova_start; + *discard_iova_len = + io_region->region_info.discard_iova_len; + + CAM_DBG(CAM_SMMU, + "I/O area for hdl = %x Region:[%pK %zu] Discard:[%pK %zu]", + smmu_hdl, *iova, *len, + *discard_iova_start, *discard_iova_len); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + + return 0; +} + +int cam_smmu_get_region_info(int32_t smmu_hdl, + enum cam_smmu_region_id region_id, + struct cam_smmu_region_info *region_info) +{ + int32_t idx, multi_client_device_idx = 0, nested_reg_idx = 0; + struct cam_context_bank_info *cb = NULL; + + if (!region_info) { + CAM_ERR(CAM_SMMU, "Invalid params"); + return -EINVAL; + } + + if (smmu_hdl == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Invalid handle"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + if (cam_smmu_get_multiregion_client_dev_idx(&iommu_cb_set.cb_info[idx], + multi_client_device_idx, region_id, &nested_reg_idx)) + return -EINVAL; + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + cb = &iommu_cb_set.cb_info[idx]; + if (!cb) { + CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + switch (region_id) { + case CAM_SMMU_REGION_FIRMWARE: { + struct cam_smmu_nested_region_info *nested_reg_info; + + if (!cb->firmware_support) { + CAM_ERR(CAM_SMMU, "Firmware not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + nested_reg_info = &cb->firmware_info.nested_regions[nested_reg_idx]; + + region_info->iova_start = nested_reg_info->region_info.iova_start; + region_info->iova_len = nested_reg_info->region_info.iova_len; + } + break; + case CAM_SMMU_REGION_SHARED: { + struct cam_smmu_nested_region_info *nested_reg_info; + + if (!cb->shared_support) { + CAM_ERR(CAM_SMMU, "Shared mem not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + nested_reg_info = &cb->shared_info.nested_regions[nested_reg_idx]; + + region_info->iova_start = nested_reg_info->region_info.iova_start; + region_info->iova_len = nested_reg_info->region_info.iova_len; + } + break; + case CAM_SMMU_REGION_SCRATCH: + if (!cb->scratch_buf_support) { + CAM_ERR(CAM_SMMU, "Scratch memory not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + region_info->iova_start = cb->scratch_info.iova_start; + region_info->iova_len = cb->scratch_info.iova_len; + break; + case CAM_SMMU_REGION_IO: { + struct cam_smmu_nested_region_info *nested_reg_info; + + if (!cb->io_support) { + CAM_ERR(CAM_SMMU, "IO memory not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + nested_reg_info = &cb->io_info.nested_regions[nested_reg_idx]; + + region_info->iova_start = nested_reg_info->region_info.iova_start; + region_info->iova_len = nested_reg_info->region_info.iova_len; + } + break; + case CAM_SMMU_REGION_SECHEAP: + if (!cb->secheap_support) { + CAM_ERR(CAM_SMMU, "Secondary heap not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + region_info->iova_start = cb->secheap_info.iova_start; + region_info->iova_len = cb->secheap_info.iova_len; + break; + case CAM_SMMU_REGION_FWUNCACHED: { + struct cam_smmu_nested_region_info *nested_reg_info; + + if (!cb->fwuncached_region_support) { + CAM_WARN(CAM_SMMU, "FW uncached region not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + nested_reg_info = &cb->fwuncached_region.nested_regions[nested_reg_idx]; + + region_info->iova_start = nested_reg_info->region_info.iova_start; + region_info->iova_len = nested_reg_info->region_info.iova_len; + } + break; + case CAM_SMMU_REGION_DEVICE: { + struct cam_smmu_nested_region_info *nested_reg_info; + + if (!cb->device_region_support) { + CAM_WARN(CAM_SMMU, "device memory region not supported"); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -ENODEV; + } + + nested_reg_info = &cb->device_region.nested_regions[nested_reg_idx]; + + region_info->iova_start = nested_reg_info->region_info.iova_start; + region_info->iova_len = nested_reg_info->region_info.iova_len; + } + break; + default: + CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X", + smmu_hdl, region_id); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; +} +EXPORT_SYMBOL(cam_smmu_get_region_info); + +int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region, + int32_t smmu_hdl, struct dma_buf *buf, dma_addr_t *iova, + size_t *request_len) +{ + struct cam_context_bank_info *cb_info; + struct region_buf_info *buf_info = NULL; + struct cam_smmu_region_info *region_info = NULL; + struct cam_smmu_subregion_info *subregion_info = NULL; + bool *is_buf_allocated; + bool region_supported; + ssize_t size = 0; + int idx, rc = 0, multi_client_device_idx, prot, nested_reg_idx; + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + cb_info = &iommu_cb_set.cb_info[idx]; + rc = cam_smmu_get_multiregion_client_dev_idx(cb_info, + multi_client_device_idx, region, &nested_reg_idx); + if (rc) + return rc; + + if (region == CAM_SMMU_REGION_SECHEAP) { + region_supported = cb_info->secheap_support; + } else if (region == CAM_SMMU_REGION_FWUNCACHED) { + region_supported = cb_info->fwuncached_region_support; + } else { + CAM_ERR(CAM_SMMU, "Region not supported for reserving %d", + region); + return -EINVAL; + } + + if (!region_supported) { + CAM_ERR(CAM_SMMU, "Reserve for region %d not supported", + region); + return -EINVAL; + } + + mutex_lock(&cb_info->lock); + + if (region == CAM_SMMU_REGION_SECHEAP) { + is_buf_allocated = &cb_info->is_secheap_allocated; + buf_info = &cb_info->secheap_info.buf_info; + region_info = &cb_info->secheap_info; + } else if (region == CAM_SMMU_REGION_FWUNCACHED) { + struct cam_smmu_nested_region_info *nested_reg_info; + + nested_reg_info = + &cb_info->fwuncached_region.nested_regions[nested_reg_idx]; + + if (nested_reg_info->subregion_support) { + /* For FW uncached, subregion generic is to be used */ + subregion_info = cam_smmu_find_subregion(CAM_SMMU_SUBREGION_GENERIC, + nested_reg_info->subregions); + if (IS_ERR_OR_NULL(subregion_info)) { + CAM_ERR(CAM_SMMU, + "Failed to find free uncached subregion on cb: %s", + cb_info->name[0]); + mutex_unlock(&cb_info->lock); + return -EINVAL; + } + region_info = &subregion_info->subregion_info; + buf_info = &subregion_info->subregion_info.buf_info; + is_buf_allocated = &subregion_info->is_allocated; + } else { + region_info = &nested_reg_info->region_info; + buf_info = &nested_reg_info->region_info.buf_info; + is_buf_allocated = &nested_reg_info->is_allocated; + } + } else { + CAM_ERR(CAM_SMMU, "Region not supported for reserving %d", + region); + mutex_unlock(&cb_info->lock); + return -EINVAL; + } + + if ((*is_buf_allocated)) { + CAM_ERR(CAM_SMMU, "Trying to allocate twice for region %d", + region); + rc = -ENOMEM; + mutex_unlock(&cb_info->lock); + return rc; + } + + if (IS_ERR_OR_NULL(buf)) { + rc = PTR_ERR(buf); + CAM_ERR(CAM_SMMU, + "Error: dma get buf failed. rc = %d", rc); + goto err_out; + } + + CAM_DBG(CAM_SMMU, "Map region=%d iova=0x%x len=0x%x", + region, region_info->iova_start, region_info->iova_len); + + buf_info->buf = buf; + buf_info->attach = dma_buf_attach(buf_info->buf, + cb_info->dev); + if (IS_ERR_OR_NULL(buf_info->attach)) { + rc = PTR_ERR(buf_info->attach); + CAM_ERR(CAM_SMMU, "Error: dma buf attach failed"); + goto err_put; + } + + buf_info->table = cam_compat_dmabuf_map_attach(buf_info->attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(buf_info->table)) { + rc = PTR_ERR(buf_info->table); + CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed"); + goto err_detach; + } + + prot = IOMMU_READ | IOMMU_WRITE; + if (iommu_cb_set.force_cache_allocs) + prot |= IOMMU_CACHE; + + size = cam_iommu_map_sg(cb_info->domain, + region_info->iova_start, + buf_info->table->sgl, + buf_info->table->orig_nents, + prot); + if ((size < 0) || (region_info->iova_len < size)) { + CAM_ERR(CAM_SMMU, + "IOMMU mapping failed for size=%zu available iova_len=%zu", + size, region_info->iova_len); + rc = -EINVAL; + goto err_unmap_sg; + } + + *iova = (uint32_t)region_info->iova_start; + /* Assign size mapped */ + *request_len = size; + *is_buf_allocated = true; + mutex_unlock(&cb_info->lock); + + return rc; + +err_unmap_sg: + cam_compat_dmabuf_unmap_attach(buf_info->attach, + buf_info->table, + DMA_BIDIRECTIONAL); +err_detach: + dma_buf_detach(buf_info->buf, + buf_info->attach); +err_put: + dma_buf_put(buf_info->buf); +err_out: + mutex_unlock(&cb_info->lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_reserve_buf_region); + +int cam_smmu_release_buf_region(enum cam_smmu_region_id region, + int32_t smmu_hdl) +{ + int idx, multi_client_device_idx, nested_reg_idx; + size_t size = 0; + struct region_buf_info *buf_info = NULL; + struct cam_context_bank_info *cb_info; + bool *is_buf_allocated; + bool region_supported; + struct cam_smmu_region_info *region_info = NULL; + struct cam_smmu_subregion_info *subregion_info = NULL; + + idx = GET_SMMU_TABLE_IDX(smmu_hdl); + multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(smmu_hdl); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, smmu_hdl); + return -EINVAL; + } + + cb_info = &iommu_cb_set.cb_info[idx]; + if (cam_smmu_get_multiregion_client_dev_idx(cb_info, + multi_client_device_idx, region, &nested_reg_idx)) + return -EINVAL; + + if (region == CAM_SMMU_REGION_SECHEAP) { + region_supported = cb_info->secheap_support; + } else if (region == CAM_SMMU_REGION_FWUNCACHED) { + region_supported = cb_info->fwuncached_region_support; + } else { + CAM_ERR(CAM_SMMU, "Region not supported for release %d", + region); + return -EINVAL; + } + + if (!region_supported) { + CAM_ERR(CAM_SMMU, "region: %d not supported", region); + return -EINVAL; + } + mutex_lock(&cb_info->lock); + + if (region == CAM_SMMU_REGION_SECHEAP) { + is_buf_allocated = &cb_info->is_secheap_allocated; + buf_info = &cb_info->secheap_info.buf_info; + region_info = &cb_info->secheap_info; + } else if (region == CAM_SMMU_REGION_FWUNCACHED) { + struct cam_smmu_nested_region_info *nested_reg_info; + + nested_reg_info = + &cb_info->fwuncached_region.nested_regions[nested_reg_idx]; + if (nested_reg_info->subregion_support) { + subregion_info = cam_smmu_find_subregion(CAM_SMMU_SUBREGION_GENERIC, + nested_reg_info->subregions); + if (IS_ERR_OR_NULL(subregion_info)) { + CAM_ERR(CAM_SMMU, + "Failed to find uncached subregion on cb: %s", + cb_info->name[0]); + mutex_unlock(&cb_info->lock); + return -EINVAL; + } + + is_buf_allocated = &subregion_info->is_allocated; + buf_info = &subregion_info->subregion_info.buf_info; + region_info = &subregion_info->subregion_info; + } else { + is_buf_allocated = &nested_reg_info->is_allocated; + buf_info = &nested_reg_info->region_info.buf_info; + region_info = &nested_reg_info->region_info; + } + } else { + CAM_ERR(CAM_SMMU, "Region not supported for release %d", + region); + mutex_unlock(&cb_info->lock); + return -EINVAL; + } + + if (!(*is_buf_allocated)) { + CAM_ERR(CAM_SMMU, + "Trying to release freed region cb: %s region: %d", + cb_info->name[0], region); + mutex_unlock(&cb_info->lock); + return -ENOMEM; + } + + size = iommu_unmap(cb_info->domain, + region_info->iova_start, + region_info->iova_len); + if (size != region_info->iova_len) { + CAM_ERR(CAM_SMMU, "Failed: Unmapped = %zu, requested = %zu", + size, + region_info->iova_len); + } + + cam_compat_dmabuf_unmap_attach(buf_info->attach, + buf_info->table, DMA_BIDIRECTIONAL); + dma_buf_detach(buf_info->buf, buf_info->attach); + *is_buf_allocated = false; + mutex_unlock(&cb_info->lock); + + return 0; +} +EXPORT_SYMBOL(cam_smmu_release_buf_region); + +static int cam_smmu_util_return_map_entry(struct cam_smmu_buffer_tracker *entry) +{ + spin_lock_bh(&iommu_cb_set.s_lock); + list_add_tail(&entry->list, &iommu_cb_set.buf_tracker_free_list); + spin_unlock_bh(&iommu_cb_set.s_lock); + + return 0; +} + +void cam_smmu_buffer_tracker_putref(struct list_head *track_list) +{ + struct cam_smmu_buffer_tracker *buffer_tracker, *temp; + + if (iommu_cb_set.is_track_buf_disabled) + return; + + if (!track_list || list_empty(track_list)) + return; + + list_for_each_entry_safe(buffer_tracker, temp, track_list, list) { + if (!buffer_tracker || !buffer_tracker->ref_count) + continue; + if (refcount_dec_and_test(&buffer_tracker->ref_count->refcount)) + CAM_ERR(CAM_SMMU, + "[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation", + buffer_tracker->ion_fd, buffer_tracker->i_ino, + buffer_tracker->cb_name); + else + CAM_DBG(CAM_SMMU, + "[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d", + buffer_tracker->ion_fd, buffer_tracker->i_ino, + buffer_tracker->cb_name, + kref_read(buffer_tracker->ref_count)); + + list_del_init(&buffer_tracker->list); + + cam_smmu_util_return_map_entry(buffer_tracker); + + } +} +EXPORT_SYMBOL(cam_smmu_buffer_tracker_putref); + +static int cam_smmu_map_buffer_validate(struct dma_buf *buf, + int idx, int multi_client_device_idx, enum dma_data_direction dma_dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, enum cam_smmu_region_id region_id, + bool dis_delayed_unmap, struct cam_dma_buff_info **mapping_info) +{ + struct dma_buf_attachment *attach = NULL; + struct sg_table *table = NULL; + struct iommu_domain *domain; + ssize_t size; + unsigned long iova = 0; + int rc = 0, prot = 0; + struct timespec64 ts1, ts2; + long microsec = 0; + + if (IS_ERR_OR_NULL(buf)) { + rc = PTR_ERR(buf); + CAM_ERR(CAM_SMMU, + "Error: dma get buf failed. rc = %d", rc); + goto err_out; + } + + if (!mapping_info) { + rc = -EINVAL; + CAM_ERR(CAM_SMMU, "Error: mapping_info is invalid"); + goto err_out; + } + + if (iommu_cb_set.debug_cfg.map_profile_enable) + CAM_GET_TIMESTAMP(ts1); + + attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev); + if (IS_ERR_OR_NULL(attach)) { + rc = PTR_ERR(attach); + CAM_ERR(CAM_SMMU, "Error: dma buf attach failed"); + goto err_out; + } + + if (region_id == CAM_SMMU_REGION_SHARED) { + table = cam_compat_dmabuf_map_attach(attach, dma_dir); + if (IS_ERR_OR_NULL(table)) { + rc = PTR_ERR(table); + CAM_ERR(CAM_SMMU, "Error: dma map attachment failed"); + goto err_detach; + } + + if (!table->sgl) { + rc = -EINVAL; + CAM_ERR(CAM_SMMU, "Error: table sgl is null"); + goto err_unmap_sg; + } + + domain = iommu_cb_set.cb_info[idx].domain; + if (!domain) { + CAM_ERR(CAM_SMMU, "CB has no domain set"); + goto err_unmap_sg; + } + + rc = cam_smmu_alloc_iova(*len_ptr, multi_client_device_idx, + iommu_cb_set.cb_info[idx].handle, &iova); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "IOVA alloc failed for shared memory, size=%zu, idx=%d, handle=%d", + *len_ptr, idx, + iommu_cb_set.cb_info[idx].handle); + goto err_unmap_sg; + } + + prot = IOMMU_READ | IOMMU_WRITE; + if (iommu_cb_set.force_cache_allocs) + prot |= IOMMU_CACHE; + + size = cam_iommu_map_sg(domain, iova, table->sgl, table->orig_nents, prot); + if ((size < 0) || (size < *len_ptr)) { + CAM_ERR(CAM_SMMU, "IOMMU mapping failed"); + rc = cam_smmu_free_iova(iova, + size, multi_client_device_idx, + iommu_cb_set.cb_info[idx].handle); + if (rc) + CAM_ERR(CAM_SMMU, "IOVA free failed"); + rc = -ENOMEM; + goto err_unmap_sg; + } else { + CAM_DBG(CAM_SMMU, + "iommu_map_sg returned iova=%pK, size=%zu", + iova, size); + *paddr_ptr = iova; + *len_ptr = size; + } + iommu_cb_set.cb_info[idx].shared_mapping_size += *len_ptr; + } else if (region_id == CAM_SMMU_REGION_IO) { + if (!dis_delayed_unmap) + attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP; + table = cam_compat_dmabuf_map_attach(attach, dma_dir); + if (IS_ERR_OR_NULL(table)) { + rc = PTR_ERR(table); + CAM_ERR(CAM_SMMU, + "Error: dma map attachment failed, size=%zu, rc %d dma_dir %d", + buf->size, rc, dma_dir); + goto err_detach; + } + + *paddr_ptr = sg_dma_address(table->sgl); + *len_ptr = (size_t)buf->size; + iommu_cb_set.cb_info[idx].io_mapping_size += *len_ptr; + } else { + CAM_ERR(CAM_SMMU, "Error: Wrong region id passed"); + rc = -EINVAL; + goto err_detach; + } + + CAM_DBG(CAM_SMMU, + "DMA buf: %pK, device: %pK, attach: %pK, table: %pK", + (void *)buf, + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)attach, (void *)table); + CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x", + (void *)table->sgl, rc, + (unsigned int)table->sgl->dma_address); + CAM_DBG(CAM_SMMU, + "iova=%pK, region_id=%d, paddr=0x%llx, len=%zu, dma_map_attrs=%d", + iova, region_id, *paddr_ptr, *len_ptr, attach->dma_map_attrs); + + if (iommu_cb_set.debug_cfg.map_profile_enable) { + CAM_GET_TIMESTAMP(ts2); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec); + trace_cam_log_event("SMMUMapProfile", "size and time in micro", + *len_ptr, microsec); + } + + /* fill up mapping_info */ + *mapping_info = CAM_MEM_ZALLOC(sizeof(struct cam_dma_buff_info), GFP_KERNEL); + if (!(*mapping_info)) { + rc = -ENOSPC; + goto err_alloc; + } + + (*mapping_info)->buf = buf; + (*mapping_info)->attach = attach; + (*mapping_info)->table = table; + (*mapping_info)->paddr = *paddr_ptr; + (*mapping_info)->len = *len_ptr; + (*mapping_info)->dir = dma_dir; + (*mapping_info)->map_count = 1; + (*mapping_info)->region_id = region_id; + (*mapping_info)->multi_client_device_idx = multi_client_device_idx; + + if (!*paddr_ptr || !*len_ptr) { + CAM_ERR(CAM_SMMU, "Error: Space Allocation failed"); + CAM_MEM_FREE(*mapping_info); + *mapping_info = NULL; + rc = -ENOSPC; + goto err_alloc; + } + + CAM_DBG(CAM_SMMU, "idx=%d, dma_buf=%pK, dev=%pOFfp, paddr=0x%llx, len=%zu", + idx, buf, + iommu_cb_set.cb_info[idx].dev->of_node, + *paddr_ptr, *len_ptr); + + /* Unmap the mapping in dma region as this is not used anyway */ + if (region_id == CAM_SMMU_REGION_SHARED) + cam_compat_dmabuf_unmap_attach(attach, table, dma_dir); + + return 0; + +err_alloc: + if (region_id == CAM_SMMU_REGION_SHARED) { + cam_smmu_free_iova(iova, + size, + multi_client_device_idx, + iommu_cb_set.cb_info[idx].handle); + + iommu_unmap(iommu_cb_set.cb_info[idx].domain, + *paddr_ptr, + *len_ptr); + } +err_unmap_sg: + cam_compat_dmabuf_unmap_attach(attach, table, dma_dir); +err_detach: + dma_buf_detach(buf, attach); +err_out: + return rc; +} + + +static int cam_smmu_map_buffer_and_add_to_list(int handle, int ion_fd, + bool dis_delayed_unmap, enum dma_data_direction dma_dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, + enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf, + struct kref **ref_count) +{ + int rc = -1, idx = GET_SMMU_TABLE_IDX(handle); + int multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(handle); + struct cam_dma_buff_info *mapping_info = NULL; + + rc = cam_smmu_map_buffer_validate(buf, idx, multi_client_device_idx, + dma_dir, paddr_ptr, len_ptr, + region_id, dis_delayed_unmap, &mapping_info); + if (rc) { + CAM_ERR(CAM_SMMU, "buffer validation failure"); + return rc; + } + + mapping_info->ion_fd = ion_fd; + mapping_info->i_ino = file_inode(buf->file)->i_ino; + mapping_info->is_internal = is_internal; + kref_init(&mapping_info->ref_count); + *ref_count = &mapping_info->ref_count; + CAM_GET_TIMESTAMP(mapping_info->ts); + /* add to the list */ + list_add(&mapping_info->list, + &iommu_cb_set.cb_info[idx].smmu_buf_list); + + CAM_DBG(CAM_SMMU, "fd %d i_ino %lu dmabuf %pK", ion_fd, mapping_info->i_ino, buf); + + cam_smmu_update_monitor_array(&iommu_cb_set.cb_info[idx], true, + mapping_info); + + return 0; +} + +static int cam_smmu_map_kernel_buffer_and_add_to_list(int handle, + struct dma_buf *buf, enum dma_data_direction dma_dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, + enum cam_smmu_region_id region_id) +{ + int rc = -1, idx = GET_SMMU_TABLE_IDX(handle); + int multi_client_device_idx = GET_SMMU_MULTI_CLIENT_IDX(handle); + struct cam_dma_buff_info *mapping_info = NULL; + + rc = cam_smmu_map_buffer_validate(buf, idx, multi_client_device_idx, + dma_dir, paddr_ptr, len_ptr, + region_id, false, &mapping_info); + + if (rc) { + CAM_ERR(CAM_SMMU, "buffer validation failure"); + return rc; + } + + mapping_info->ion_fd = -1; + mapping_info->i_ino = file_inode(buf->file)->i_ino; + CAM_GET_TIMESTAMP(mapping_info->ts); + + /* add to the list */ + list_add(&mapping_info->list, + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list); + + CAM_DBG(CAM_SMMU, "fd %d i_ino %lu dmabuf %pK", + mapping_info->ion_fd, mapping_info->i_ino, buf); + + cam_smmu_update_monitor_array(&iommu_cb_set.cb_info[idx], true, + mapping_info); + + return 0; +} + + +static int cam_smmu_unmap_buf_and_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx) +{ + int rc; + size_t size; + struct iommu_domain *domain; + struct timespec64 ts1, ts2; + long microsec = 0; + + if ((!mapping_info->buf) || (!mapping_info->table) || + (!mapping_info->attach)) { + CAM_ERR(CAM_SMMU, + "Error: Invalid params dev = %pK, table = %pK", + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)mapping_info->table); + CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK", + (void *)mapping_info->buf, + (void *)mapping_info->attach); + return -EINVAL; + } + + cam_smmu_update_monitor_array(&iommu_cb_set.cb_info[idx], false, + mapping_info); + + CAM_DBG(CAM_SMMU, + "region_id=%d, paddr=0x%llx, len=%d, dma_map_attrs=%d", + mapping_info->region_id, mapping_info->paddr, mapping_info->len, + mapping_info->attach->dma_map_attrs); + + if (iommu_cb_set.debug_cfg.map_profile_enable) + CAM_GET_TIMESTAMP(ts1); + + if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) { + CAM_DBG(CAM_SMMU, + "Removing SHARED buffer paddr = 0x%llx, len = %zu", + mapping_info->paddr, mapping_info->len); + + domain = iommu_cb_set.cb_info[idx].domain; + + size = iommu_unmap(domain, + mapping_info->paddr, + mapping_info->len); + + if (size != mapping_info->len) { + CAM_ERR(CAM_SMMU, "IOMMU unmap failed"); + CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu", + size, + mapping_info->len); + } + + rc = cam_smmu_free_iova(mapping_info->paddr, + mapping_info->len, + mapping_info->multi_client_device_idx, + iommu_cb_set.cb_info[idx].handle); + if (rc) + CAM_ERR(CAM_SMMU, "IOVA free failed"); + + iommu_cb_set.cb_info[idx].shared_mapping_size -= + mapping_info->len; + } else if (mapping_info->region_id == CAM_SMMU_REGION_IO) { + if (mapping_info->is_internal) + mapping_info->attach->dma_map_attrs |= + DMA_ATTR_SKIP_CPU_SYNC; + + cam_compat_dmabuf_unmap_attach(mapping_info->attach, + mapping_info->table, mapping_info->dir); + iommu_cb_set.cb_info[idx].io_mapping_size -= mapping_info->len; + } + + + dma_buf_detach(mapping_info->buf, mapping_info->attach); + + if (iommu_cb_set.debug_cfg.map_profile_enable) { + CAM_GET_TIMESTAMP(ts2); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec); + trace_cam_log_event("SMMUUnmapProfile", + "size and time in micro", mapping_info->len, microsec); + } + + mapping_info->buf = NULL; + + list_del_init(&mapping_info->list); + + /* free one buffer */ + CAM_MEM_FREE(mapping_info); + return 0; +} + +static int cam_smmu_util_get_free_map_entry(struct cam_smmu_buffer_tracker **entry) +{ + spin_lock_bh(&iommu_cb_set.s_lock); + if (list_empty(&iommu_cb_set.buf_tracker_free_list)) { + CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem to track buffer"); + spin_unlock_bh(&iommu_cb_set.s_lock); + return -ENOMEM; + } + + *entry = list_first_entry(&iommu_cb_set.buf_tracker_free_list, + struct cam_smmu_buffer_tracker, list); + + list_del_init(&(*entry)->list); + spin_unlock_bh(&iommu_cb_set.s_lock); + + return 0; +} + +int cam_smmu_add_buf_to_track_list(int ion_fd, unsigned long inode, + struct kref **ref_count, struct list_head *buf_tracker, int idx) +{ + int rc = 0; + struct cam_smmu_buffer_tracker *buf; + + if (iommu_cb_set.is_track_buf_disabled) + return rc; + + rc = cam_smmu_util_get_free_map_entry(&buf); + if (rc == -ENOMEM) { + rc = 0; + return rc; + } + + kref_get(*ref_count); + + buf->ion_fd = ion_fd; + buf->i_ino = inode; + buf->ref_count = *ref_count; + buf->cb_name = iommu_cb_set.cb_info[idx].name[0]; + + CAM_DBG(CAM_SMMU, + "[SMMU_BT] ref_cnt increased for fd 0x%x, ino 0x%x: %d, cb: %s", + buf->ion_fd, buf->i_ino, kref_read(buf->ref_count), buf->cb_name); + + list_add(&buf->list, buf_tracker); + + return rc; +} + +static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx, + int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr, + struct timespec64 **ts_mapping, unsigned long *inode, struct kref **ref_count) +{ + struct cam_dma_buff_info *mapping; + unsigned long i_ino; + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + *ts_mapping = &mapping->ts; + *inode = i_ino; + *ref_count = &mapping->ref_count; + return CAM_SMMU_BUFF_EXIST; + } + } + + return CAM_SMMU_BUFF_NOT_EXIST; +} + +static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx, + int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr, + struct timespec64 **ts_mapping, struct kref **ref_count) +{ + struct cam_dma_buff_info *mapping; + unsigned long i_ino; + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + *ts_mapping = &mapping->ts; + mapping->map_count++; + *ref_count = &mapping->ref_count; + return CAM_SMMU_BUFF_EXIST; + } + } + + return CAM_SMMU_BUFF_NOT_EXIST; +} + +static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx, + struct dma_buf *buf, dma_addr_t *paddr_ptr, size_t *len_ptr) +{ + struct cam_dma_buff_info *mapping; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) { + if (mapping->buf == buf) { + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + return CAM_SMMU_BUFF_EXIST; + } + } + + return CAM_SMMU_BUFF_NOT_EXIST; +} + +static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx, + int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr, + struct kref **ref_count) +{ + struct cam_sec_buff_info *mapping; + unsigned long i_ino; + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + mapping->map_count++; + *ref_count = &mapping->ref_count; + return CAM_SMMU_BUFF_EXIST; + } + } + + return CAM_SMMU_BUFF_NOT_EXIST; +} + +static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx, + int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr, + unsigned long *inode, struct kref **ref_count) +{ + struct cam_sec_buff_info *mapping; + unsigned long i_ino; + + i_ino = file_inode(dmabuf->file)->i_ino; + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) { + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + *inode = i_ino; + *ref_count = &mapping->ref_count; + return CAM_SMMU_BUFF_EXIST; + } + } + + return CAM_SMMU_BUFF_NOT_EXIST; +} + +int cam_smmu_get_handle(char *identifier, int *handle_ptr) +{ + int rc = 0; + + if (!identifier) { + CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL"); + return -EINVAL; + } + + if (!handle_ptr) { + CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL"); + return -EINVAL; + } + + /* create and put handle in the table */ + rc = cam_smmu_create_add_handle_in_table(identifier, handle_ptr); + if (rc < 0) + CAM_ERR(CAM_SMMU, "Error: %s get handle fail, rc %d", + identifier, rc); + + return rc; +} +EXPORT_SYMBOL(cam_smmu_get_handle); + +int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops) +{ + int ret = 0, idx; + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + switch (ops) { + case CAM_SMMU_ATTACH: { + ret = cam_smmu_attach(idx); + break; + } + case CAM_SMMU_DETACH: { + ret = cam_smmu_detach_device(idx); + break; + } + case CAM_SMMU_VOTE: + case CAM_SMMU_DEVOTE: + default: + CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops); + ret = -EINVAL; + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return ret; +} +EXPORT_SYMBOL(cam_smmu_ops); + +static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx, + size_t virt_len, + size_t phys_len, + unsigned int iommu_dir, + dma_addr_t *virt_addr) +{ + unsigned long nents = virt_len / phys_len; + struct cam_dma_buff_info *mapping_info = NULL; + size_t unmapped; + dma_addr_t iova = 0; + struct scatterlist *sg; + int i = 0; + int rc; + struct iommu_domain *domain = NULL; + struct page *page; + struct sg_table *table = NULL; + + CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len = %zx", + nents, idx, virt_len); + CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK", + phys_len, iommu_dir, virt_addr); + + /* + * This table will go inside the 'mapping' structure + * where it will be held until put_scratch_buffer is called + */ + table = CAM_MEM_ZALLOC(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + rc = -ENOMEM; + goto err_table_alloc; + } + + rc = sg_alloc_table(table, nents, GFP_KERNEL); + if (rc < 0) { + rc = -EINVAL; + goto err_sg_alloc; + } + + page = alloc_pages(GFP_KERNEL, get_order(phys_len)); + if (!page) { + rc = -ENOMEM; + goto err_page_alloc; + } + + /* Now we create the sg list */ + for_each_sg(table->sgl, sg, table->nents, i) + sg_set_page(sg, page, phys_len, 0); + + + /* Get the domain from within our cb_set struct and map it*/ + domain = iommu_cb_set.cb_info[idx].domain; + + rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map, + virt_len, &iova); + + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Could not find valid iova for scratch buffer"); + goto err_iommu_map; + } + + if (iommu_cb_set.force_cache_allocs) + iommu_dir |= IOMMU_CACHE; + + if (cam_iommu_map_sg(domain, + iova, + table->sgl, + table->nents, + iommu_dir) != virt_len) { + CAM_ERR(CAM_SMMU, "iommu_map_sg() failed"); + goto err_iommu_map; + } + + /* Now update our mapping information within the cb_set struct */ + mapping_info = CAM_MEM_ZALLOC(sizeof(struct cam_dma_buff_info), GFP_KERNEL); + if (!mapping_info) { + rc = -ENOMEM; + goto err_mapping_info; + } + + mapping_info->ion_fd = 0xDEADBEEF; + mapping_info->i_ino = 0; + mapping_info->buf = NULL; + mapping_info->attach = NULL; + mapping_info->table = table; + mapping_info->paddr = iova; + mapping_info->len = virt_len; + mapping_info->iommu_dir = iommu_dir; + mapping_info->map_count = 1; + mapping_info->phys_len = phys_len; + mapping_info->region_id = CAM_SMMU_REGION_SCRATCH; + + CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx", + (void *)mapping_info->paddr, + mapping_info->len, mapping_info->phys_len); + + list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list); + + *virt_addr = (dma_addr_t)iova; + + CAM_DBG(CAM_SMMU, "mapped virtual address = %lx", + (unsigned long)*virt_addr); + return 0; + +err_mapping_info: + unmapped = iommu_unmap(domain, iova, virt_len); + if (unmapped != virt_len) + CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx", + unmapped, virt_len); +err_iommu_map: + __free_pages(page, get_order(phys_len)); +err_page_alloc: + sg_free_table(table); +err_sg_alloc: + CAM_MEM_FREE(table); +err_table_alloc: + return rc; +} + +static int cam_smmu_free_scratch_buffer_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx) +{ + int rc = 0; + size_t unmapped; + struct iommu_domain *domain = + iommu_cb_set.cb_info[idx].domain; + struct scratch_mapping *scratch_map = + &iommu_cb_set.cb_info[idx].scratch_map; + + if (!mapping_info->table) { + CAM_ERR(CAM_SMMU, + "Error: Invalid params: dev = %pK, table = %pK", + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)mapping_info->table); + return -EINVAL; + } + + /* Clean up the mapping_info struct from the list */ + unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len); + if (unmapped != mapping_info->len) + CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx", + unmapped, mapping_info->len); + + rc = cam_smmu_free_scratch_va(scratch_map, + mapping_info->paddr, + mapping_info->len); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Error: Invalid iova while freeing scratch buffer"); + rc = -EINVAL; + } + + __free_pages(sg_page(mapping_info->table->sgl), + get_order(mapping_info->phys_len)); + sg_free_table(mapping_info->table); + CAM_MEM_FREE(mapping_info->table); + list_del_init(&mapping_info->list); + + CAM_MEM_FREE(mapping_info); + mapping_info = NULL; + + return rc; +} + +int cam_smmu_get_scratch_iova(int handle, + enum cam_smmu_map_dir dir, + dma_addr_t *paddr_ptr, + size_t virt_len, + size_t phys_len) +{ + int idx, rc; + unsigned int iommu_dir; + + if (!paddr_ptr || !virt_len || !phys_len) { + CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid"); + return -EINVAL; + } + + if (virt_len < phys_len) { + CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len"); + return -EINVAL; + } + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir); + if (iommu_dir == IOMMU_INVALID_DIR) { + CAM_ERR(CAM_SMMU, + "Error: translate direction failed. dir = %d", dir); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto error; + } + + if (!iommu_cb_set.cb_info[idx].scratch_buf_support) { + CAM_ERR(CAM_SMMU, + "Error: Context bank does not support scratch bufs"); + rc = -EINVAL; + goto error; + } + + CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d", + handle, idx, dir); + CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len = %zx", + phys_len, virt_len); + + if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { + CAM_ERR(CAM_SMMU, + "Err:Dev %s should call SMMU attach before map buffer", + iommu_cb_set.cb_info[idx].name[0]); + rc = -EINVAL; + goto error; + } + + if (!IS_ALIGNED(virt_len, PAGE_SIZE)) { + CAM_ERR(CAM_SMMU, + "Requested scratch buffer length not page aligned"); + rc = -EINVAL; + goto error; + } + + if (!IS_ALIGNED(virt_len, phys_len)) { + CAM_ERR(CAM_SMMU, + "Requested virt length not aligned with phys length"); + rc = -EINVAL; + goto error; + } + + rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx, + virt_len, + phys_len, + iommu_dir, + paddr_ptr); + if (rc < 0) + CAM_ERR(CAM_SMMU, "Error: mapping or add list fail"); + +error: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} + +int cam_smmu_put_scratch_iova(int handle, + dma_addr_t paddr) +{ + int idx; + int rc = -1; + struct cam_dma_buff_info *mapping_info; + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto handle_err; + } + + if (!iommu_cb_set.cb_info[idx].scratch_buf_support) { + CAM_ERR(CAM_SMMU, + "Error: Context bank does not support scratch buffers"); + rc = -EINVAL; + goto handle_err; + } + + /* Based on virtual address and index, we can find mapping info + * of the scratch buffer + */ + mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr); + if (!mapping_info) { + CAM_ERR(CAM_SMMU, "Error: Invalid params"); + rc = -ENODEV; + goto handle_err; + } + + /* unmapping one buffer from device */ + rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail"); + goto handle_err; + } + +handle_err: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} + +static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, + enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr, + size_t *len_ptr, struct dma_buf *dmabuf, struct kref **ref_count) +{ + int rc = 0; + struct dma_buf_attachment *attach = NULL; + struct sg_table *table = NULL; + struct cam_sec_buff_info *mapping_info; + + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + *len_ptr = (size_t)0; + + /* + * ion_phys() is deprecated. call dma_buf_attach() and + * dma_buf_map_attachment() to get the buffer's physical + * address. + */ + attach = dma_buf_attach(dmabuf, iommu_cb_set.cb_info[idx].dev); + if (IS_ERR_OR_NULL(attach)) { + CAM_ERR(CAM_SMMU, + "Error: dma buf attach failed, idx=%d, ion_fd=%d", + idx, ion_fd); + rc = PTR_ERR(attach); + goto err_out; + } + + attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + + if (IS_CSF25(iommu_cb_set.csf_version.arch_ver, + iommu_cb_set.csf_version.max_ver)) + attach->dma_map_attrs = + cam_update_dma_map_attributes(attach->dma_map_attrs); + table = cam_compat_dmabuf_map_attach(attach, dma_dir); + if (IS_ERR_OR_NULL(table)) { + CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed"); + rc = PTR_ERR(table); + goto err_detach; + } + + /* return addr and len to client */ + if (IS_CSF25(iommu_cb_set.csf_version.arch_ver, + iommu_cb_set.csf_version.max_ver)) + *paddr_ptr = sg_dma_address(table->sgl); + else + *paddr_ptr = sg_phys(table->sgl); + + *len_ptr = (size_t)sg_dma_len(table->sgl); + + /* fill up mapping_info */ + mapping_info = CAM_MEM_ZALLOC(sizeof(struct cam_sec_buff_info), GFP_KERNEL); + if (!mapping_info) { + rc = -ENOMEM; + goto err_unmap_sg; + } + + mapping_info->ion_fd = ion_fd; + mapping_info->i_ino = file_inode(dmabuf->file)->i_ino; + mapping_info->paddr = *paddr_ptr; + mapping_info->len = *len_ptr; + mapping_info->dir = dma_dir; + mapping_info->map_count = 1; + mapping_info->buf = dmabuf; + mapping_info->attach = attach; + mapping_info->table = table; + + kref_init(&mapping_info->ref_count); + *ref_count = &mapping_info->ref_count; + + CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, i_ino=%lu, dev=%pOFfp, paddr=0x%llx, len=%zu", + idx, ion_fd, mapping_info->i_ino, + iommu_cb_set.cb_info[idx].dev->of_node, + *paddr_ptr, *len_ptr); + + /* add to the list */ + list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list); + + return 0; + +err_unmap_sg: + cam_compat_dmabuf_unmap_attach(attach, table, dma_dir); +err_detach: + dma_buf_detach(dmabuf, attach); +err_out: + return rc; +} + +int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf, + enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr, + struct kref **ref_count) +{ + int idx, rc; + enum dma_data_direction dma_dir; + enum cam_smmu_buf_state buf_state; + + if (!paddr_ptr || !len_ptr) { + CAM_ERR(CAM_SMMU, + "Error: Invalid inputs, paddr_ptr:%pK, len_ptr: %pK", + paddr_ptr, len_ptr); + return -EINVAL; + } + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + *len_ptr = (size_t)0; + + dma_dir = cam_smmu_translate_dir(dir); + if (dma_dir == DMA_NONE) { + CAM_ERR(CAM_SMMU, + "Error: translate direction failed. dir = %d", dir); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if ((handle == HANDLE_INIT) || + (idx < 0) || + (idx >= iommu_cb_set.cb_num)) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + if (!iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't map secure mem to non secure cb, idx=%d", + idx); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, idx=%d, table_hdl=%x, hdl=%x", + idx, iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr, + len_ptr, ref_count); + if (buf_state == CAM_SMMU_BUFF_EXIST) { + CAM_DBG(CAM_SMMU, + "fd:%d already in list idx:%d, handle=%d give same addr back", + ion_fd, idx, handle); + rc = 0; + goto get_addr_end; + } + rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir, + paddr_ptr, len_ptr, dmabuf, ref_count); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d", + idx, handle, ion_fd, rc); + goto get_addr_end; + } + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_map_stage2_iova); + +static int cam_smmu_secure_unmap_buf_and_remove_from_list( + struct cam_sec_buff_info *mapping_info, + int idx) +{ + if ((!mapping_info->buf) || (!mapping_info->table) || + (!mapping_info->attach)) { + CAM_ERR(CAM_SMMU, "Error: Invalid params dev = %pK, table = %pK", + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)mapping_info->table); + CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK\n", + (void *)mapping_info->buf, + (void *)mapping_info->attach); + return -EINVAL; + } + + /* skip cache operations */ + mapping_info->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + + /* iommu buffer clean up */ + cam_compat_dmabuf_unmap_attach(mapping_info->attach, + mapping_info->table, mapping_info->dir); + dma_buf_detach(mapping_info->buf, mapping_info->attach); + mapping_info->buf = NULL; + + list_del_init(&mapping_info->list); + + CAM_DBG(CAM_SMMU, "unmap fd: %d, i_ino : %lu, idx : %d", + mapping_info->ion_fd, mapping_info->i_ino, idx); + + /* free one buffer */ + CAM_MEM_FREE(mapping_info); + return 0; +} + +int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + bool force_unmap) +{ + int idx, rc; + struct cam_sec_buff_info *mapping_info; + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if ((handle == HANDLE_INIT) || + (idx < 0) || + (idx >= iommu_cb_set.cb_num)) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + if (!iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't unmap secure mem from non secure cb"); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto put_addr_end; + } + + /* based on ion fd and index, we can find mapping info of buffer */ + mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd, dma_buf); + if (!mapping_info) { + CAM_ERR(CAM_SMMU, + "Error: Invalid params! idx = %d, fd = %d", + idx, ion_fd); + rc = -EINVAL; + goto put_addr_end; + } + + mapping_info->map_count--; + if (mapping_info->map_count > 0) { + CAM_DBG(CAM_SMMU, + "idx: %d fd = %d map_count: %d", + idx, ion_fd, mapping_info->map_count); + rc = 0; + goto put_addr_end; + } + mapping_info->map_count = 0; + if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) { + CAM_ERR(CAM_SMMU, + "[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d", + idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino, + kref_read(&mapping_info->ref_count)); + rc = -EPERM; + goto put_addr_end; + } + + /* unmapping one buffer from device */ + rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx); + if (rc) { + CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail"); + goto put_addr_end; + } + +put_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova); + +static int cam_smmu_map_iova_validate_params(int handle, + enum cam_smmu_map_dir dir, + dma_addr_t *paddr_ptr, size_t *len_ptr, + enum cam_smmu_region_id region_id) +{ + int idx, rc = 0; + enum dma_data_direction dma_dir; + + if (!paddr_ptr || !len_ptr) { + CAM_ERR(CAM_SMMU, "Input pointers are invalid"); + return -EINVAL; + } + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Invalid handle"); + return -EINVAL; + } + + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + if (region_id != CAM_SMMU_REGION_SHARED) + *len_ptr = (size_t)0; + + dma_dir = cam_smmu_translate_dir(dir); + if (dma_dir == DMA_NONE) { + CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + return rc; +} + +bool cam_smmu_supports_shared_region(int handle) +{ + int idx = GET_SMMU_TABLE_IDX(handle); + bool is_shared; + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + is_shared = (iommu_cb_set.cb_info[idx].shared_support) ? true : false; + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + + return is_shared; +} + +void cam_smmu_buffer_tracker_buffer_putref(struct cam_smmu_buffer_tracker *entry) +{ + + if (!entry) { + CAM_WARN(CAM_ISP, "[SMMU_BT] track buffer entry is NULL"); + return; + } + + if (refcount_dec_and_test(&entry->ref_count->refcount)) + CAM_ERR(CAM_SMMU, + "[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation", + entry->ion_fd, entry->i_ino, entry->cb_name); + else + CAM_DBG(CAM_SMMU, + "[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d", + entry->ion_fd, entry->i_ino, entry->cb_name, kref_read(entry->ref_count)); + + + list_del_init(&entry->list); + + cam_smmu_util_return_map_entry(entry); + +} + +int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf, + bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, + size_t *len_ptr, enum cam_smmu_region_id region_id, + bool is_internal, struct kref **ref_count) +{ + int idx, rc = 0; + struct timespec64 *ts = NULL; + enum cam_smmu_buf_state buf_state; + enum dma_data_direction dma_dir; + + rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr, + len_ptr, region_id); + if (rc) { + CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed"); + return rc; + } + + dma_dir = (enum dma_data_direction)dir; + idx = GET_SMMU_TABLE_IDX(handle); + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't map non-secure mem to secure cb idx=%d", + idx); + rc = -EINVAL; + goto get_addr_end; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "hdl is not valid, idx=%d, table_hdl = %x, hdl = %x", + idx, iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { + CAM_ERR(CAM_SMMU, + "Err:Dev %s should call SMMU attach before map buffer", + iommu_cb_set.cb_info[idx].name[0]); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_user_reuse_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr, + len_ptr, &ts, ref_count); + if (buf_state == CAM_SMMU_BUFF_EXIST) { + uint64_t ms = 0, hrs = 0, min = 0, sec = 0; + + if (ts) + CAM_CONVERT_TIMESTAMP_FORMAT((*ts), hrs, min, sec, ms); + CAM_DBG(CAM_SMMU, + "fd=%d already in list [%llu:%llu:%lu:%llu] cb=%s idx=%d handle=%d len=%llu,give same addr back", + ion_fd, hrs, min, sec, ms, + iommu_cb_set.cb_info[idx].name[0], + idx, handle, *len_ptr); + rc = 0; + goto get_addr_end; + } + + rc = cam_smmu_map_buffer_and_add_to_list(handle, ion_fd, + dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr, + region_id, is_internal, dmabuf, ref_count); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d", + iommu_cb_set.cb_info[idx].name[0], idx, + ion_fd, region_id, rc); + cam_smmu_dump_cb_info(idx); + } + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_map_user_iova); + +int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf, + enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, + size_t *len_ptr, enum cam_smmu_region_id region_id) +{ + int idx, rc = 0; + enum cam_smmu_buf_state buf_state; + enum dma_data_direction dma_dir; + + rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr, + len_ptr, region_id); + if (rc) { + CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed"); + return rc; + } + + dma_dir = cam_smmu_translate_dir(dir); + idx = GET_SMMU_TABLE_IDX(handle); + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't map non-secure mem to secure cb"); + rc = -EINVAL; + goto get_addr_end; + } + + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { + CAM_ERR(CAM_SMMU, + "Err:Dev %s should call SMMU attach before map buffer", + iommu_cb_set.cb_info[idx].name[0]); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_check_dma_buf_in_list(idx, buf, + paddr_ptr, len_ptr); + if (buf_state == CAM_SMMU_BUFF_EXIST) { + CAM_ERR(CAM_SMMU, + "dma_buf :%pK already in the list", buf); + rc = -EALREADY; + goto get_addr_end; + } + + rc = cam_smmu_map_kernel_buffer_and_add_to_list(handle, buf, dma_dir, + paddr_ptr, len_ptr, region_id); + if (rc < 0) + CAM_ERR(CAM_SMMU, "mapping or add list fail"); + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_map_kernel_iova); + +int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker, + struct kref **ref_count) +{ + int idx, rc = 0; + struct timespec64 *ts = NULL; + enum cam_smmu_buf_state buf_state; + unsigned long i_ino; + + if (!paddr_ptr || !len_ptr) { + CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid"); + return -EINVAL; + } + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + *len_ptr = (size_t)0; + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + if (iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't get non-secure mem from secure cb"); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, + len_ptr, &ts, &i_ino, ref_count); + if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) { + CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd); + rc = -EINVAL; + cam_smmu_dump_cb_info(idx); + goto get_addr_end; + } + + if (buf_tracker) + rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx); + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_get_iova); + +int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker, + struct kref **ref_count) +{ + int idx, rc = 0; + enum cam_smmu_buf_state buf_state; + unsigned long i_ino; + + if (!paddr_ptr || !len_ptr) { + CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid"); + return -EINVAL; + } + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + *len_ptr = (size_t)0; + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + if (!iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't get secure mem from non secure cb"); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr, + &i_ino, ref_count); + + if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) { + CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd); + rc = -EINVAL; + goto get_addr_end; + } + + if (buf_tracker) + rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx); + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_get_stage2_iova); + +static int cam_smmu_unmap_validate_params(int handle) +{ + int idx; + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + return 0; +} + +int cam_smmu_unmap_user_iova(int handle, + int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id, + bool force_unmap) +{ + int idx, rc; + struct cam_dma_buff_info *mapping_info; + + rc = cam_smmu_unmap_validate_params(handle); + if (rc) { + CAM_ERR(CAM_SMMU, "unmap util validation failure"); + return rc; + } + + idx = GET_SMMU_TABLE_IDX(handle); + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't unmap non-secure mem from secure cb"); + rc = -EINVAL; + goto unmap_end; + } + + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto unmap_end; + } + + /* Based on ion_fd & index, we can find mapping info of buffer */ + mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd, dma_buf); + + if (!mapping_info) { + CAM_ERR(CAM_SMMU, + "Error: Invalid params idx = %d, fd = %d", + idx, ion_fd); + rc = -EINVAL; + goto unmap_end; + } + + mapping_info->map_count--; + if (mapping_info->map_count > 0) { + CAM_DBG(CAM_SMMU, + "idx: %d, cb: %s fd = %d , ino: 0x%x, map_count: %d, ref_count: %d", + idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, + mapping_info->i_ino, mapping_info->map_count, + kref_read(&mapping_info->ref_count)); + rc = 0; + goto unmap_end; + } + mapping_info->map_count = 0; + if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) { + CAM_ERR(CAM_SMMU, + "[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d", + idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino, + kref_read(&mapping_info->ref_count)); + rc = -EPERM; + goto unmap_end; + } + + /* Unmapping one buffer from device */ + CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx); + rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx); + if (rc < 0) + CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail"); + +unmap_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_unmap_user_iova); + +int cam_smmu_unmap_kernel_iova(int handle, + struct dma_buf *buf, enum cam_smmu_region_id region_id) +{ + int idx, rc; + struct cam_dma_buff_info *mapping_info; + + rc = cam_smmu_unmap_validate_params(handle); + if (rc) { + CAM_ERR(CAM_SMMU, "unmap util validation failure"); + return rc; + } + + idx = GET_SMMU_TABLE_IDX(handle); + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].is_secure) { + CAM_ERR(CAM_SMMU, + "Error: can't unmap non-secure mem from secure cb"); + rc = -EINVAL; + goto unmap_end; + } + + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto unmap_end; + } + + /* Based on dma_buf & index, we can find mapping info of buffer */ + mapping_info = cam_smmu_find_mapping_by_dma_buf(idx, buf); + + if (!mapping_info) { + CAM_ERR(CAM_SMMU, + "Error: Invalid params idx = %d, dma_buf = %pK", + idx, buf); + rc = -EINVAL; + goto unmap_end; + } + + /* Unmapping one buffer from device */ + CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx); + rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx); + if (rc < 0) + CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail"); + +unmap_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova); + + +int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf) +{ + int idx; + int rc = 0; + struct cam_dma_buff_info *mapping_info; + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto put_addr_end; + } + + /* based on ion fd and index, we can find mapping info of buffer */ + mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd, dma_buf); + if (!mapping_info) { + CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d", + idx, ion_fd); + rc = -EINVAL; + goto put_addr_end; + } + +put_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_put_iova); + +int cam_smmu_destroy_handle(int handle) +{ + int idx; + + if (handle == HANDLE_INIT) { + CAM_ERR(CAM_SMMU, "Error: Invalid handle"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (CAM_SMMU_HDL_VALIDATE(iommu_cb_set.cb_info[idx].handle, handle)) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) { + CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean", + iommu_cb_set.cb_info[idx].name[0]); + cam_smmu_print_user_list(idx); + cam_smmu_clean_user_buffer_list(idx); + } + + if (!list_empty_careful( + &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) { + CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean", + iommu_cb_set.cb_info[idx].name[0]); + cam_smmu_print_kernel_list(idx); + cam_smmu_clean_kernel_buffer_list(idx); + } + + if (iommu_cb_set.cb_info[idx].is_secure) { + if (iommu_cb_set.cb_info[idx].secure_count == 0) { + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EPERM; + } + + iommu_cb_set.cb_info[idx].secure_count--; + if (iommu_cb_set.cb_info[idx].secure_count == 0) { + iommu_cb_set.cb_info[idx].cb_count = 0; + iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; + } + + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; + } + + if (iommu_cb_set.cb_info[idx].is_mul_client && + iommu_cb_set.cb_info[idx].device_count) { + iommu_cb_set.cb_info[idx].device_count--; + + if (!iommu_cb_set.cb_info[idx].device_count) { + iommu_cb_set.cb_info[idx].cb_count = 0; + iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; + } + + iommu_cb_set.cb_info[idx].device_count = 0; + iommu_cb_set.cb_info[idx].cb_count = 0; + iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; +} +EXPORT_SYMBOL(cam_smmu_destroy_handle); + +static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb) +{ + int i; + + if (cb->io_support && cb->domain) + cb->domain = NULL; + + if (cb->shared_support) { + for (i = 0; i < cb->shared_info.num_regions; i++) { + if (cb->shared_mem_pool[i]) { + gen_pool_destroy(cb->shared_mem_pool[i]); + cb->shared_mem_pool[i] = NULL; + } + } + } + + if (cb->scratch_buf_support) { + CAM_MEM_FREE(cb->scratch_map.bitmap); + cb->scratch_map.bitmap = NULL; + } +} + +static void cam_smmu_release_cb(struct platform_device *pdev) +{ + int i = 0; + + for (i = 0; i < iommu_cb_set.cb_num; i++) + cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]); + + devm_kfree(&pdev->dev, iommu_cb_set.cb_info); + iommu_cb_set.cb_num = 0; +} + +static int cam_smmu_setup_cb(struct cam_context_bank_info *cb, + struct device *dev) +{ + int rc = 0, i; + + if (!cb || !dev) { + CAM_ERR(CAM_SMMU, "Error: invalid input params"); + return -EINVAL; + } + + cb->dev = dev; + cb->is_secheap_allocated = false; + + atomic64_set(&cb->monitor_head, -1); + + /* Create a pool with 64K granularity for supporting shared memory */ + if (cb->shared_support) { + for (i = 0; i < cb->shared_info.num_regions; i++) { + cb->shared_mem_pool[i] = gen_pool_create( + SHARED_MEM_POOL_GRANULARITY, -1); + + if (!cb->shared_mem_pool[i]) + goto end; + + rc = gen_pool_add(cb->shared_mem_pool[i], + cb->shared_info.nested_regions[i].region_info.iova_start, + cb->shared_info.nested_regions[i].region_info.iova_len, + -1); + if (rc) { + CAM_ERR(CAM_SMMU, "Genpool chunk creation failed"); + gen_pool_destroy(cb->shared_mem_pool[i]); + cb->shared_mem_pool[i] = NULL; + goto end; + } + + CAM_DBG(CAM_SMMU, "cb: %s Shared mem start->%lX len->%zu", + cb->name[0], (unsigned long) + cb->shared_info.nested_regions[i].region_info.iova_start, + cb->shared_info.nested_regions[i].region_info.iova_len); + } + } + + if (cb->scratch_buf_support) { + rc = cam_smmu_init_scratch_map(&cb->scratch_map, + cb->scratch_info.iova_start, + cb->scratch_info.iova_len, + 0); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Error: failed to create scratch map"); + rc = -ENODEV; + goto end; + } + } + + /* create a virtual mapping */ + if (cb->io_support) { + cb->domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->domain)) { + CAM_ERR(CAM_SMMU, "Error: create domain Failed"); + rc = -ENODEV; + goto end; + } + + /* Enable custom iommu features, if applicable */ + cam_smmu_util_iommu_custom(dev, cb->discard_iova_start, + cb->discard_iova_len); + + cb->state = CAM_SMMU_ATTACH; + } else { + CAM_ERR(CAM_SMMU, "Context bank does not have IO region"); + rc = -ENODEV; + goto end; + } + + return rc; + +end: + if (cb->shared_support) { + for (--i; i >= 0; i--) { + if (cb->shared_mem_pool[i]) { + gen_pool_destroy(cb->shared_mem_pool[i]); + cb->shared_mem_pool[i] = NULL; + } + } + } + + if (cb->scratch_buf_support) { + CAM_MEM_FREE(cb->scratch_map.bitmap); + cb->scratch_map.bitmap = NULL; + } + + return rc; +} + +static int cam_alloc_smmu_context_banks(struct device *dev) +{ + struct device_node *domains_child_node = NULL; + + if (!dev) { + CAM_ERR(CAM_SMMU, "Error: Invalid device"); + return -ENODEV; + } + + iommu_cb_set.cb_num = 0; + + /* traverse thru all the child nodes and increment the cb count */ + for_each_available_child_of_node(dev->of_node, domains_child_node) { + if (of_device_is_compatible(domains_child_node, + "qcom,msm-cam-smmu-cb")) + iommu_cb_set.cb_num++; + + if (of_device_is_compatible(domains_child_node, + "qcom,qsmmu-cam-cb")) + iommu_cb_set.cb_num++; + } + + if (iommu_cb_set.cb_num == 0) { + CAM_ERR(CAM_SMMU, "Error: no context banks present"); + return -ENOENT; + } + + /* allocate memory for the context banks */ + iommu_cb_set.cb_info = devm_kzalloc(dev, + iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info), + GFP_KERNEL); + + if (!iommu_cb_set.cb_info) { + CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks"); + return -ENOMEM; + } + + cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT); + iommu_cb_set.cb_init_count = 0; + + CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num); + return 0; +} + +static int cam_smmu_get_discard_memory_regions(struct device_node *of_node, + dma_addr_t *discard_iova_start, size_t *discard_iova_len) +{ + uint32_t discard_iova[2] = { 0 }; + int num_values = 0; + int rc = 0; + + if (!discard_iova_start || !discard_iova_len) + return -EINVAL; + + *discard_iova_start = 0; + *discard_iova_len = 0; + + num_values = of_property_count_u32_elems(of_node, + "iova-region-discard"); + if (num_values <= 0) { + CAM_DBG(CAM_UTIL, "No discard region specified"); + return 0; + } else if (num_values != 2) { + CAM_ERR(CAM_UTIL, "Invalid discard region specified %d", + num_values); + return -EINVAL; + } + + rc = of_property_read_u32_array(of_node, + "iova-region-discard", + discard_iova, num_values); + if (rc) { + CAM_ERR(CAM_UTIL, "Can not read discard region %d", num_values); + return rc; + } else if (!discard_iova[0] || !discard_iova[1]) { + CAM_ERR(CAM_UTIL, + "Incorrect Discard region specified [0x%x 0x%x]", + discard_iova[0], discard_iova[1]); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, "Discard region [0x%x 0x%x]", + discard_iova[0], discard_iova[0] + discard_iova[1]); + + *discard_iova_start = discard_iova[0]; + *discard_iova_len = discard_iova[1]; + + return 0; +} + +static int cam_smmu_get_iova_info_util( + struct device_node **child_node, dma_addr_t *region_start, + size_t *region_len, uint32_t *region_id) +{ + int rc; + uint32_t id; + dma_addr_t start = 0; + size_t len = 0; + + if (iommu_cb_set.is_expanded_memory) { + rc = of_property_read_u64(*child_node, "iova-region-start", &start); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read iova-region-start"); + return -EINVAL; + } + + rc = of_property_read_u64(*child_node, "iova-region-len", + (uint64_t *)&len); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read iova-region-len"); + return -EINVAL; + } + } else { + rc = of_property_read_u32(*child_node, "iova-region-start", + (uint32_t *)&start); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read iova-region-start"); + return -EINVAL; + } + + rc = of_property_read_u32(*child_node, "iova-region-len", + (uint32_t *)&len); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read iova-region-len"); + return -EINVAL; + } + } + + rc = of_property_read_u32(*child_node, "iova-region-id", &id); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read iova-region-id"); + return -EINVAL; + } + + *region_start = start; + *region_len = len; + *region_id = id; + + return 0; +} + +static int cam_smmu_get_subregions_memory_info( + struct device_node **of_node, + struct cam_smmu_nested_region_info *nested_regions, + struct cam_context_bank_info *cb) +{ + int rc; + uint32_t subregion_count = 0, subregion_mask = 0; + enum cam_smmu_subregion_id subregion_id; + struct device_node *sub_node = NULL; + const char *subregion_name; + dma_addr_t subregion_start = 0; + size_t subregion_len = 0; + struct cam_smmu_subregion_info *subregions = NULL; + struct device_node *child_node = *of_node; + + /* Count number of child nodes */ + for_each_available_child_of_node(child_node, sub_node) + subregion_count++; + + if (subregion_count > CAM_SMMU_SUBREGION_MAX) { + CAM_ERR(CAM_SMMU, + "Invalid number of subregions max=%u count=%u cb=%s", + CAM_SMMU_SUBREGION_MAX, subregion_count, cb->name[0]); + return -EINVAL; + } + + for_each_available_child_of_node(child_node, sub_node) { + subregions = + &nested_regions->subregions[nested_regions->num_subregions]; + + rc = of_property_read_string(sub_node, + "iova-region-name", &subregion_name); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "IOVA subregion not found"); + goto err; + } + + rc = cam_smmu_get_iova_info_util(&sub_node, &subregion_start, + &subregion_len, &subregion_id); + if (rc) + goto err; + + switch (subregion_id) { + case CAM_SMMU_SUBREGION_GENERIC: + if (subregion_mask & BIT(subregion_id)) + goto repeated_subregion; + + subregions->subregion_id = subregion_id; + subregions->subregion_info.iova_len = subregion_len; + subregions->subregion_info.iova_start = subregion_start; + break; + case CAM_SMMU_SUBREGION_SYNX_HWMUTEX: + case CAM_SMMU_SUBREGION_IPC_HWMUTEX: + case CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM: + case CAM_SMMU_SUBREGION_GLOBAL_CNTR: + case CAM_SMMU_SUBREGION_LLCC_REGISTER: + if (subregion_mask & BIT(subregion_id)) + goto repeated_subregion; + + subregions->subregion_id = subregion_id; + subregions->subregion_info.iova_len = subregion_len; + subregions->subregion_info.iova_start = subregion_start; + rc = of_property_read_u32(sub_node, + "phy-addr", (uint32_t *)&subregions->subregion_info.phy_addr); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed to read phy addr"); + goto err; + } + break; + default: + CAM_ERR(CAM_SMMU, "Unsupported subregion_id: %d", subregion_id); + rc = -EINVAL; + goto err; + } + + subregion_mask |= BIT(subregion_id); + nested_regions->num_subregions++; + CAM_DBG(CAM_SMMU, + "cb=%s region=%d iova=0x%x len=0x%x phy=0x%pK num_subregions=%u", + cb->name[0], subregion_id, subregions->subregion_info.iova_start, + subregions->subregion_info.iova_len, + subregions->subregion_info.phy_addr, nested_regions->num_subregions); + } + + return 0; + +repeated_subregion: + CAM_ERR(CAM_SMMU, + "Subregion=%u was already populated in cb=%s subregion_mask=0x%x", + subregion_id, cb->name[0], subregion_mask); + rc = -EINVAL; +err: + of_node_put(child_node); + return rc; +} + +static int cam_smmu_validate_discard_iova_region( + struct cam_context_bank_info *cb, + struct cam_smmu_region_info *region_info) +{ + /* Make sure Discard region is properly specified */ + if ((cb->discard_iova_start != + region_info->discard_iova_start) || + (cb->discard_iova_len != + region_info->discard_iova_len)) { + CAM_ERR(CAM_SMMU, + "Mismatch Discard region specified, [0x%x 0x%x] [0x%x 0x%x]", + cb->discard_iova_start, + cb->discard_iova_len, + region_info->discard_iova_start, + region_info->discard_iova_len); + return -EINVAL; + } else if (cb->discard_iova_start && cb->discard_iova_len) { + if ((cb->discard_iova_start <= + region_info->iova_start) || + (cb->discard_iova_start >= + region_info->iova_start + region_info->iova_len) || + (cb->discard_iova_start + cb->discard_iova_len >= + region_info->iova_start + region_info->iova_len)) { + CAM_ERR(CAM_SMMU, + "[%s] : Incorrect Discard region specified [0x%x 0x%x] in [0x%x 0x%x]", + cb->name[0], + cb->discard_iova_start, + cb->discard_iova_start + cb->discard_iova_len, + region_info->iova_start, + region_info->iova_start + region_info->iova_len); + return -EINVAL; + } + + CAM_INFO(CAM_SMMU, + "[%s] : Discard region specified [0x%x 0x%x] in [0x%x 0x%x]", + cb->name[0], + cb->discard_iova_start, + cb->discard_iova_start + cb->discard_iova_len, + region_info->iova_start, + region_info->iova_start + region_info->iova_len); + } + + return 0; +} + +static int cam_smmu_get_memory_regions_info(struct device_node *of_node, + struct cam_context_bank_info *cb) +{ + int rc = 0, i; + struct device_node *mem_map_node = NULL; + struct device_node *child_node = NULL; + dma_addr_t region_start = 0; + size_t region_len = 0; + uint32_t region_id; + const char *region_name; + int num_regions = 0; + + if (!of_node || !cb) { + CAM_ERR(CAM_SMMU, "Invalid argument(s)"); + return -EINVAL; + } + + mem_map_node = of_get_child_by_name(of_node, "iova-mem-map"); + cb->is_secure = of_property_read_bool(of_node, "qcom,secure-cb"); + + /* + * We always expect a memory map node, except when it is a secure + * context bank. + */ + if (!mem_map_node) { + if (cb->is_secure) + return 0; + CAM_ERR(CAM_SMMU, "iova-mem-map not present"); + return -EINVAL; + } + + for_each_available_child_of_node(mem_map_node, child_node) { + num_regions++; + + rc = of_property_read_string(child_node, + "iova-region-name", ®ion_name); + if (rc < 0) { + of_node_put(mem_map_node); + CAM_ERR(CAM_SMMU, "IOVA region not found"); + return -EINVAL; + } + + rc = cam_smmu_get_iova_info_util(&child_node, + ®ion_start, ®ion_len, ®ion_id); + if (rc) { + of_node_put(mem_map_node); + return rc; + } + + switch (region_id) { + case CAM_SMMU_REGION_FIRMWARE: { + int32_t num_firmware_regions = cb->firmware_info.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_firmware_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_firmware_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = &cb->firmware_info.nested_regions[num_firmware_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + rc = cam_smmu_get_subregions_memory_info(&child_node, + nested_reg_info, cb); + if (rc) + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + + cb->firmware_info.num_regions++; + cb->firmware_support = 1; + } + break; + case CAM_SMMU_REGION_SHARED: { + int32_t num_shared_regions = cb->shared_info.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_shared_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_shared_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = &cb->shared_info.nested_regions[num_shared_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + rc = cam_smmu_get_subregions_memory_info(&child_node, + nested_reg_info, cb); + if (rc) + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + + cb->shared_info.num_regions++; + cb->shared_support = 1; + } + break; + case CAM_SMMU_REGION_SCRATCH: + cb->scratch_buf_support = 1; + cb->scratch_info.iova_start = region_start; + cb->scratch_info.iova_len = region_len; + break; + case CAM_SMMU_REGION_IO: { + int32_t num_io_regions = cb->io_info.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_io_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_io_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = &cb->io_info.nested_regions[num_io_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + rc = cam_smmu_get_subregions_memory_info(&child_node, + nested_reg_info, cb); + if (rc) + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + rc = cam_smmu_get_discard_memory_regions(child_node, + &nested_reg_info->region_info.discard_iova_start, + &nested_reg_info->region_info.discard_iova_len); + if (rc) { + CAM_ERR(CAM_SMMU, + "Invalid Discard region specified in IO region, rc: %d cb: %s", + rc, cb->name[0]); + goto end; + } + cb->io_info.num_regions++; + cb->io_support = 1; + } + break; + case CAM_SMMU_REGION_SECHEAP: + cb->secheap_support = 1; + cb->secheap_info.iova_start = region_start; + cb->secheap_info.iova_len = region_len; + break; + case CAM_SMMU_REGION_FWUNCACHED:{ + int32_t num_fwuncached_regions = cb->fwuncached_region.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_fwuncached_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_fwuncached_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = + &cb->fwuncached_region.nested_regions[num_fwuncached_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + rc = cam_smmu_get_subregions_memory_info(&child_node, + nested_reg_info, cb); + if (rc) + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + + cb->fwuncached_region.num_regions++; + cb->fwuncached_region_support = 1; + } + break; + case CAM_SMMU_REGION_QDSS: { + int32_t num_qdss_regions = cb->qdss_info.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_qdss_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_qdss_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = + &cb->qdss_info.nested_regions[num_qdss_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + CAM_ERR(CAM_SMMU, + "Subregion for QDSS not supported, failing cb: %s initialization", + cb->name[0]); + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + /* phy-addr field is mandatory for QDSS */ + rc = of_property_read_u32(child_node, "qdss-phy-addr", + (uint32_t *)&nested_reg_info->region_info.phy_addr); + if (rc) { + CAM_ERR(CAM_SMMU, "No phy-addr field for qdss in cb: %s", + cb->name[0]); + goto end; + } + + cb->qdss_info.num_regions++; + cb->qdss_support = 1; + } + break; + case CAM_SMMU_REGION_DEVICE:{ + int32_t num_device_regions = cb->device_region.num_regions; + struct cam_smmu_nested_region_info *nested_reg_info; + + if (num_device_regions >= CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_SMMU, + "Exceeding max supported number of regions max: %u current: %u in cb: %s for region: %d", + CAM_SMMU_MULTI_REGION_MAX, num_device_regions, + cb->name[0], region_id); + rc = -EINVAL; + goto end; + } + + nested_reg_info = &cb->device_region.nested_regions[num_device_regions]; + nested_reg_info->subregion_support = + of_property_read_bool(child_node, "subregion_support"); + + if (nested_reg_info->subregion_support) { + rc = cam_smmu_get_subregions_memory_info(&child_node, + nested_reg_info, cb); + if (rc) + goto end; + } + + nested_reg_info->region_info.iova_start = region_start; + nested_reg_info->region_info.iova_len = region_len; + + rc = of_property_read_u32(child_node, + "phy-addr", (uint32_t *)&nested_reg_info->region_info.phy_addr); + if (rc) { + CAM_DBG(CAM_SMMU, "No phy-addr field in device in cb: %s", + cb->name[0]); + rc = 0; + } + + cb->device_region.num_regions++; + cb->device_region_support = 1; + } + break; + default: + CAM_ERR(CAM_SMMU, + "Incorrect region id present in DT file: %d", + region_id); + } + + CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name[0]); + CAM_DBG(CAM_SMMU, "Found region -> %s", region_name); + CAM_DBG(CAM_SMMU, "region_start -> 0x%lx", region_start); + CAM_DBG(CAM_SMMU, "region_len -> 0x%lx", region_len); + CAM_DBG(CAM_SMMU, "region_id -> 0x%x", region_id); + } + + if (cb->io_support) { + rc = cam_smmu_get_discard_memory_regions(of_node, + &cb->discard_iova_start, + &cb->discard_iova_len); + if (rc) { + CAM_ERR(CAM_SMMU, + "Invalid Discard region specified in CB, rc=%d", + rc); + return -EINVAL; + } + + for (i = 0; i < cb->io_info.num_regions; i++) { + rc = cam_smmu_validate_discard_iova_region(cb, + &cb->io_info.nested_regions[i].region_info); + if (rc) + goto end; + } + } + + if (!num_regions) { + CAM_ERR(CAM_SMMU, + "No memory regions found, at least one needed"); + rc = -ENODEV; + } + + return rc; + +end: + of_node_put(mem_map_node); + return rc; +} + +static void cam_smmu_check_for_fault_properties( + const char *fault_property, struct cam_context_bank_info *cb) +{ + if (!strcmp(fault_property, "non-fatal")) + cb->non_fatal_faults_en = true; + else if (!strcmp(fault_property, "stall-disable")) + cb->stall_disable_en = true; + + CAM_DBG(CAM_SMMU, "iommu fault property: %s found for cb: %s", + fault_property, cb->name[0]); +} + +static int cam_populate_smmu_context_banks(struct device *dev, + enum cam_iommu_type type) +{ + int rc = 0, i, j, num_fault_props = 0; + struct cam_context_bank_info *cb; + struct device *ctx = NULL; + bool dma_coherent, dma_coherent_hint, is_found; + + if (!dev) { + CAM_ERR(CAM_SMMU, "Error: Invalid device"); + return -ENODEV; + } + + /* check the bounds */ + if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb"); + rc = -EBADHANDLE; + goto cb_init_fail; + } + + /* read the context bank from cb set */ + cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count]; + + cb->is_mul_client = + of_property_read_bool(dev->of_node, "multiple-client-devices"); + cb->num_shared_hdl = of_property_count_strings(dev->of_node, + "cam-smmu-label"); + + if (cb->num_shared_hdl > + CAM_SMMU_SHARED_HDL_MAX) { + CAM_ERR(CAM_CDM, "Invalid count of client names count=%d", + cb->num_shared_hdl); + rc = -EINVAL; + return rc; + } + + /* set the name of the context bank */ + for (i = 0; i < cb->num_shared_hdl; i++) { + rc = of_property_read_string_index(dev->of_node, + "cam-smmu-label", i, &cb->name[i]); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Error: failed to read label from sub device"); + goto cb_init_fail; + } + } + + cb->num_multi_regions = of_property_count_strings(dev->of_node, + "multiple-same-region-clients"); + + /* Optional property is not set for this bank */ + if (cb->num_multi_regions < 0) + cb->num_multi_regions = 0; + + if (cb->num_multi_regions > CAM_SMMU_MULTI_REGION_MAX) { + CAM_ERR(CAM_CDM, "Invalid count of multi region clients = %d", + cb->num_multi_regions); + rc = -EINVAL; + goto cb_init_fail; + } + + for (j = 0; j < cb->num_multi_regions; j++) { + is_found = false; + + rc = of_property_read_string_index(dev->of_node, + "multiple-same-region-clients", j, &cb->multi_region_clients[j]); + if (rc < 0) { + CAM_ERR(CAM_SMMU, + "Error: failed to read label from sub device"); + goto cb_init_fail; + } + + /* Needs to match shared hdl client list */ + for (i = 0; i < cb->num_shared_hdl; i++) { + if (strcmp(cb->name[i], cb->multi_region_clients[j])) { + is_found = true; + break; + } + } + + if (!is_found) { + CAM_ERR(CAM_SMMU, + "%s multi region client not found in shared client list cb = %s", + cb->multi_region_clients[j], cb->name[0]); + rc = -EINVAL; + goto cb_init_fail; + } + } + + rc = cam_smmu_get_memory_regions_info(dev->of_node, cb); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Error: Getting region info"); + return rc; + } + + if (cb->is_secure) { + /* increment count to next bank */ + cb->dev = dev; + iommu_cb_set.cb_init_count++; + return 0; + } + + /* set up the iommu mapping for the context bank */ + if (type == CAM_QSMMU) { + CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s", + cb->name[0]); + return -ENODEV; + } + + ctx = dev; + CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name[0]); + + cb->coherency_mode = CAM_SMMU_NO_COHERENCY; + + dma_coherent = of_property_read_bool(dev->of_node, "dma-coherent"); + dma_coherent_hint = of_property_read_bool(dev->of_node, + "dma-coherent-hint-cached"); + + if (dma_coherent && dma_coherent_hint) { + CAM_ERR(CAM_SMMU, + "[%s] : Cannot enable both dma-coherent and dma-coherent-hint-cached", + cb->name[0]); + return -EBADR; + } + + if (dma_coherent) + cb->coherency_mode = CAM_SMMU_DMA_COHERENT; + else if (dma_coherent_hint) + cb->coherency_mode = CAM_SMMU_DMA_COHERENT_HINT_CACHED; + + CAM_DBG(CAM_SMMU, "[%s] : io cohereny mode %d", cb->name[0], + cb->coherency_mode); + + rc = cam_smmu_setup_cb(cb, ctx); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", + cb->name[0]); + goto cb_init_fail; + } + if (cb->io_support && cb->domain) { + iommu_set_fault_handler(cb->domain, + cam_smmu_iommu_fault_handler, + (void *)cb->name[0]); + + num_fault_props = of_property_count_strings(dev->of_node, "qcom,iommu-faults"); + if (num_fault_props > 0) { + const char *fault_property = NULL; + + for (i = 0; i < num_fault_props; i++) { + rc = of_property_read_string_index(dev->of_node, + "qcom,iommu-faults", i, &fault_property); + if (!rc) + cam_smmu_check_for_fault_properties(fault_property, cb); + } + /* Missing fault property reads is not an error */ + rc = 0; + } + } + + if (!dev->dma_parms) + dev->dma_parms = devm_kzalloc(dev, + sizeof(*dev->dma_parms), GFP_KERNEL); + + if (!dev->dma_parms) { + CAM_WARN(CAM_SMMU, + "Failed to allocate dma_params"); + dev->dma_parms = NULL; + goto end; + } + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64)); + + if (iommu_cb_set.is_expanded_memory) { + CAM_DBG(CAM_SMMU, "[%s] setting max address mask", cb->name[0]); + /* the largest address is the min(dma_mask, value_from_iommu-dma_addr_pool) */ + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (rc) + CAM_ERR(CAM_SMMU, "[%s] Failed in setting max address mask, rc %d", + cb->name[0], rc); + } + +end: + /* increment count to next bank */ + iommu_cb_set.cb_init_count++; + CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count); + +cb_init_fail: + return rc; +} + +static void cam_smmu_mini_dump_entries( + struct cam_smmu_mini_dump_cb_info *target, + struct cam_context_bank_info *src) +{ + int i = 0; + int64_t state_head = 0; + uint32_t index, num_entries, oldest_entry; + + state_head = atomic64_read(&src->monitor_head); + + if (state_head == -1) { + return; + } else if (state_head < CAM_SMMU_MONITOR_MAX_ENTRIES) { + num_entries = state_head; + oldest_entry = 0; + } else { + num_entries = CAM_SMMU_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_SMMU_MONITOR_MAX_ENTRIES, &oldest_entry); + } + + index = oldest_entry; + + for (i = 0; i < num_entries; i++) { + memcpy(&target->mapping[index], + &src->monitor_entries[index], + sizeof(struct cam_smmu_monitor)); + index = (index + 1) % CAM_SMMU_MONITOR_MAX_ENTRIES; + } +} + +static unsigned long cam_smmu_mini_dump_cb(void *dst, unsigned long len, + void *priv_data) +{ + struct cam_smmu_mini_dump_cb_info *cb_md; + struct cam_smmu_mini_dump_info *md; + struct cam_context_bank_info *cb; + unsigned long dumped_len = 0; + unsigned long remain_len = len; + uint32_t i = 0, j = 0; + + if (!dst || len < sizeof(*md)) { + CAM_ERR(CAM_SMMU, "Invalid params dst: %pk len:%lu", + dst, len); + return 0; + } + + md = (struct cam_smmu_mini_dump_info *)dst; + md->cb_num = 0; + md->cb = (struct cam_smmu_mini_dump_cb_info *) + ((uint8_t *)dst + sizeof(*md)); + dumped_len += sizeof(*md); + remain_len = len - dumped_len; + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + if (remain_len < sizeof(*cb_md)) + goto end; + + cb = &iommu_cb_set.cb_info[i]; + cb_md = &md->cb[i]; + cb_md->is_mul_client = cb->is_mul_client; + cb_md->is_secure = cb->is_secure; + cb_md->is_secheap_allocated = cb->is_secheap_allocated; + cb_md->is_qdss_allocated = cb->is_qdss_allocated; + cb_md->scratch_buf_support = cb->scratch_buf_support; + cb_md->firmware_support = cb->firmware_support; + cb_md->shared_support = cb->shared_support; + cb_md->io_support = cb->io_support; + cb_md->fwuncached_region_support = cb->fwuncached_region_support; + cb_md->qdss_support = cb->qdss_support; + cb_md->coherency_mode = cb->coherency_mode; + cb_md->state = cb->state; + cb_md->va_start = cb->va_start; + cb_md->discard_iova_start = cb->discard_iova_start; + cb_md->qdss_phy_addr = cb->qdss_phy_addr; + cb_md->va_len = cb->va_len; + cb_md->io_mapping_size = cb->io_mapping_size; + cb_md->shared_mapping_size = cb->shared_mapping_size; + cb_md->discard_iova_len = cb->discard_iova_len; + cb_md->handle = cb->handle; + cb_md->device_count = cb->device_count; + cb_md->num_shared_hdl = cb->num_shared_hdl; + cb_md->secure_count = cb->secure_count; + cb_md->cb_count = cb->cb_count; + cb_md->pf_count = cb->pf_count; + memcpy(&cb_md->scratch_info, &cb->scratch_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->firmware_info, &cb->firmware_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->shared_info, &cb->shared_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->io_info, &cb->io_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->secheap_info, &cb->secheap_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->fwuncached_region, &cb->fwuncached_region, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->qdss_info, &cb->qdss_info, + sizeof(struct cam_smmu_region_info)); + memcpy(&cb_md->device_mem_region, &cb->device_region, + sizeof(struct cam_smmu_region_info)); + + for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++) + scnprintf(cb_md->name[j], 16, cb->name[j]); + + cam_smmu_mini_dump_entries(cb_md, cb); + dumped_len += sizeof(*cb_md); + remain_len = len - dumped_len; + md->cb_num++; + } +end: + return dumped_len; +} + +static int cam_smmu_set_fatal_pf_mask(void *data, u64 val) +{ + iommu_cb_set.debug_cfg.fatal_pf_mask = val; + CAM_DBG(CAM_SMMU, "Set fatal page fault value: 0x%llx", + iommu_cb_set.debug_cfg.fatal_pf_mask); + return 0; +} + +static int cam_smmu_get_fatal_pf_mask(void *data, u64 *val) +{ + *val = iommu_cb_set.debug_cfg.fatal_pf_mask; + CAM_DBG(CAM_SMMU, "Get fatal page fault value: 0x%llx", + *val); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_smmu_fatal_pf_mask, + cam_smmu_get_fatal_pf_mask, cam_smmu_set_fatal_pf_mask, "%16llu"); + +static int cam_smmu_create_debug_fs(void) +{ + int rc = 0; + struct dentry *dbgfileptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("smmu", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_SMMU,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + iommu_cb_set.debug_cfg.dentry = dbgfileptr; + + debugfs_create_bool("cb_dump_enable", 0644, + iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.cb_dump_enable); + debugfs_create_bool("map_profile_enable", 0644, + iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.map_profile_enable); + debugfs_create_file("fatal_pf_mask", 0644, + iommu_cb_set.debug_cfg.dentry, NULL, &cam_smmu_fatal_pf_mask); + debugfs_create_bool("disable_buf_tracking", 0644, + iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.disable_buf_tracking); + +end: + return rc; +} + +int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs) +{ + int i; + /* expect inputs to be valid */ + if (!csf_ver || !num_cbs) { + CAM_ERR(CAM_SMMU, "Invalid params csf: %p num_cbs: %p", + csf_ver, num_cbs); + return -EINVAL; + } + + *num_cbs = iommu_cb_set.cb_num; + memcpy(csf_ver, &iommu_cb_set.csf_version, sizeof(*csf_ver)); + + iommu_cb_set.is_track_buf_disabled = iommu_cb_set.debug_cfg.disable_buf_tracking; + + if (!iommu_cb_set.is_track_buf_disabled) { + buf_tracking_pool = CAM_MEM_ZALLOC_ARRAY(CAM_SMMU_BUF_TRACKING_POOL, + sizeof(struct cam_smmu_buffer_tracker), GFP_KERNEL); + + if (!buf_tracking_pool) { + CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem for buffer tracker pool"); + goto end; + } + + INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list); + for (i = 0; i < CAM_SMMU_BUF_TRACKING_POOL; i++) { + INIT_LIST_HEAD(&buf_tracking_pool[i].list); + list_add_tail(&buf_tracking_pool[i].list, + &iommu_cb_set.buf_tracker_free_list); + } + } + +end: + return 0; +} + +void cam_smmu_driver_deinit(void) +{ + INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list); + CAM_MEM_FREE(buf_tracking_pool); +} + +static int cam_smmu_fw_dev_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + int i, num_mem_region; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + num_mem_region = of_count_phandle_with_args(dev->of_node, "memory-region", NULL); + if ((num_mem_region < 0) || (num_mem_region > CAM_SMMU_MULTI_REGION_MAX)) { + CAM_ERR(CAM_SMMU, + "Failed at parsing number of memory region: %d, expected maximum memory region: %d", + num_mem_region, CAM_SMMU_MULTI_REGION_MAX); + return -EINVAL; + } + + CAM_DBG(CAM_SMMU, "Number of ICP fw memory region: %d", num_mem_region); + icp_fw = CAM_MEM_ZALLOC_ARRAY(num_mem_region, sizeof(struct cam_fw_alloc_info), GFP_KERNEL); + if (!icp_fw) { + CAM_ERR(CAM_SMMU, "Failed at allocating space for icp fw"); + return -ENOMEM; + } + + for (i = 0; i < num_mem_region; i++) { + icp_fw[i].fw_dev = dev; + icp_fw[i].fw_id = i; + } + + CAM_DBG(CAM_SMMU, "Binding component: %s", pdev->name); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; +} + +static void cam_smmu_fw_dev_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_MEM_FREE(icp_fw); + icp_fw = NULL; + + CAM_DBG(CAM_SMMU, "Unbinding component: %s", pdev->name); +} + +const static struct component_ops cam_smmu_fw_dev_component_ops = { + .bind = cam_smmu_fw_dev_component_bind, + .unbind = cam_smmu_fw_dev_component_unbind, +}; + +static int cam_smmu_cb_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Error: populating context banks"); + cam_smmu_release_cb(pdev); + return -ENOMEM; + } + + CAM_DBG(CAM_SMMU, "CB component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; +} + +static void cam_smmu_cb_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_DBG(CAM_SMMU, "Unbinding component: %s", pdev->name); +} + +const static struct component_ops cam_smmu_cb_component_ops = { + .bind = cam_smmu_cb_component_bind, + .unbind = cam_smmu_cb_component_unbind, +}; + +static int cam_smmu_cb_qsmmu_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc = 0; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed in populating context banks"); + return -ENOMEM; + } + + CAM_DBG(CAM_SMMU, "QSMMU CB component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; +} + +static void cam_smmu_cb_qsmmu_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + CAM_DBG(CAM_SMMU, "Unbinding component: %s", pdev->name); +} + +const static struct component_ops cam_smmu_cb_qsmmu_component_ops = { + .bind = cam_smmu_cb_qsmmu_component_bind, + .unbind = cam_smmu_cb_qsmmu_component_unbind, +}; + +static int cam_smmu_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work); + mutex_init(&iommu_cb_set.payload_list_lock); + spin_lock_init(&iommu_cb_set.s_lock); + INIT_LIST_HEAD(&iommu_cb_set.payload_list); + cam_smmu_create_debug_fs(); + + iommu_cb_set.force_cache_allocs = + of_property_read_bool(dev->of_node, "force_cache_allocs"); + iommu_cb_set.need_shared_buffer_padding = + of_property_read_bool(dev->of_node, "need_shared_buffer_padding"); + iommu_cb_set.is_expanded_memory = + of_property_read_bool(dev->of_node, "expanded_memory"); + + cam_common_register_mini_dump_cb(cam_smmu_mini_dump_cb, + "cam_smmu", NULL); + + rc = cam_smmu_fetch_csf_version(&iommu_cb_set.csf_version); + if (rc) { + CAM_ERR(CAM_SMMU, "Failed to fetch CSF version: %d", rc); + return rc; + } + + CAM_DBG(CAM_SMMU, "Main component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return 0; +} + +static void cam_smmu_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + /* release all the context banks and memory allocated */ + cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT); + if (dev && dev->dma_parms) { + devm_kfree(dev, dev->dma_parms); + dev->dma_parms = NULL; + } + + cam_smmu_release_cb(pdev); + iommu_cb_set.debug_cfg.dentry = NULL; +} + +const static struct component_ops cam_smmu_component_ops = { + .bind = cam_smmu_component_bind, + .unbind = cam_smmu_component_unbind, +}; + +static int cam_smmu_probe(struct platform_device *pdev) +{ + int rc = 0; + struct device *dev = &pdev->dev; + + dev->dma_parms = NULL; + CAM_DBG(CAM_SMMU, "Adding SMMU component: %s", pdev->name); + if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) { + rc = cam_alloc_smmu_context_banks(dev); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Failed in allocating context banks"); + return -ENOMEM; + } + + rc = component_add(&pdev->dev, &cam_smmu_component_ops); + } else if (of_device_is_compatible(dev->of_node, + "qcom,msm-cam-smmu-cb")) { + rc = component_add(&pdev->dev, &cam_smmu_cb_component_ops); + } else if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) { + rc = component_add(&pdev->dev, + &cam_smmu_cb_qsmmu_component_ops); + } else if (of_device_is_compatible(dev->of_node, + "qcom,msm-cam-smmu-fw-dev")) { + rc = component_add(&pdev->dev, &cam_smmu_fw_dev_component_ops); + } else { + CAM_ERR(CAM_SMMU, "Unrecognized child device: %s", pdev->name); + rc = -ENODEV; + } + + if (rc < 0) + CAM_ERR(CAM_SMMU, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_smmu_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + CAM_DBG(CAM_SMMU, "Removing SMMU component: %s", pdev->name); + if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) { + component_del(&pdev->dev, &cam_smmu_component_ops); + } else if (of_device_is_compatible(dev->of_node, + "qcom,msm-cam-smmu-cb")) { + component_del(&pdev->dev, &cam_smmu_cb_component_ops); + } else if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) { + component_del(&pdev->dev, &cam_smmu_cb_qsmmu_component_ops); + } else if (of_device_is_compatible(dev->of_node, + "qcom,msm-cam-smmu-fw-dev")) { + component_del(&pdev->dev, &cam_smmu_fw_dev_component_ops); + } else { + CAM_ERR(CAM_SMMU, "Unrecognized child device: %s", pdev->name); + return -ENODEV; + } + + return 0; +} + +struct platform_driver cam_smmu_driver = { + .probe = cam_smmu_probe, + .remove = cam_smmu_remove, + .driver = { + .name = "msm_cam_smmu", + .owner = THIS_MODULE, + .of_match_table = msm_cam_smmu_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_smmu_init_module(void) +{ + return platform_driver_register(&cam_smmu_driver); +} + +void cam_smmu_exit_module(void) +{ + platform_driver_unregister(&cam_smmu_driver); +} + +MODULE_DESCRIPTION("MSM Camera SMMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.h b/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.h new file mode 100644 index 0000000000..98554b659c --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_smmu/cam_smmu_api.h @@ -0,0 +1,593 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SMMU_API_H_ +#define _CAM_SMMU_API_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BYTE_SIZE 8 +#define COOKIE_NUM_BYTE 2 +#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE) +#define COOKIE_MASK ((1<> COOKIE_SIZE) & COOKIE_MASK) +#define CAM_SMMU_GET_BASE_HDL(x) ((x) & CAM_SMMU_HDL_MASK) + +#define CAM_SMMU_GET_IOVA_DELTA(val1, val2) \ +({ \ + (val1) > (val2) ? (val1) - (val2) : (val2) - (val1); \ +}) + +/*Enum for possible CAM SMMU operations */ +enum cam_smmu_ops_param { + CAM_SMMU_ATTACH, + CAM_SMMU_DETACH, + CAM_SMMU_VOTE, + CAM_SMMU_DEVOTE, + CAM_SMMU_OPS_INVALID +}; + +enum cam_smmu_map_dir { + CAM_SMMU_MAP_READ, + CAM_SMMU_MAP_WRITE, + CAM_SMMU_MAP_RW, + CAM_SMMU_MAP_INVALID +}; + +enum cam_smmu_region_id { + CAM_SMMU_REGION_FIRMWARE, + CAM_SMMU_REGION_SHARED, + CAM_SMMU_REGION_SCRATCH, + CAM_SMMU_REGION_IO, + CAM_SMMU_REGION_SECHEAP, + CAM_SMMU_REGION_QDSS, + CAM_SMMU_REGION_FWUNCACHED, + CAM_SMMU_REGION_DEVICE, +}; + +enum cam_smmu_subregion_id { + CAM_SMMU_SUBREGION_GENERIC, + CAM_SMMU_SUBREGION_SYNX_HWMUTEX, + CAM_SMMU_SUBREGION_IPC_HWMUTEX, + CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM, + CAM_SMMU_SUBREGION_GLOBAL_CNTR, + CAM_SMMU_SUBREGION_LLCC_REGISTER, + CAM_SMMU_SUBREGION_MAX, +}; + +/** + * @brief : Represents camera security framework version + * + * @param arch_ver : Captures the version of the high level secure + * camera architecture. + * @param max_ver : Captures the version of the solution with in the + * high level architecture. + * @param min_ver : Captures the version of the memory assignment + * mechanism with in the solution. + */ +struct cam_csf_version { + uint32_t arch_ver; + uint32_t max_ver; + uint32_t min_ver; +}; + +/** + * @brief : cam_smmu_buffer_tracker + * + * @param: list : list to be inserted into list of tracked buggers + * @param: ref_count : Ptr to kref object of a physical buffer allocated per CB + * @param: ion_fd : fd of buffer + * @param: i_ino : inode of buffer + * @param: cb_name : CB which this buffer belongs to + */ +struct cam_smmu_buffer_tracker { + struct list_head list; + struct kref *ref_count; + int ion_fd; + unsigned long i_ino; + const char *cb_name; +}; + +/** + * @brief : cam_smmu_pf_info + * + * @param domain : Iommu domain received in iommu page fault handler + * @param dev : Device received in iommu page fault handler + * @param iova : IOVA where page fault occurred + * @param flags : Flags received in iommu page fault handler + * @param token : Userdata given during callback registration + * @param buf_info : Closest mapped buffer info + * @param bid : bus id + * @param pid : unique id for hw group of ports + * @param mid : port id of hw + * @param is_secure : Faulted memory in secure or non-secure region + * @param in_map_region : Faulted memory fall in mapped region or not + */ + +struct cam_smmu_pf_info { + struct iommu_domain *domain; + struct device *dev; + unsigned long iova; + int flags; + void *token; + uint32_t buf_info; + uint32_t bid; + uint32_t pid; + uint32_t mid; + bool is_secure; + bool in_map_region; +}; + +/** + * @brief : Structure to store dma buf information + * + * @param buf : dma buffer + * @param attach : attachment info between dma buf and device + * @param table : scattered list + */ +struct region_buf_info { + struct dma_buf *buf; + struct dma_buf_attachment *attach; + struct sg_table *table; +}; + +/** + * @brief : Structure to store region information + * + * @param iova_start : Start address of region + * @param iova_len : length of region + * @param discard_iova_start : iova addr start from where should not be used + * @param discard_iova_len : length of discard iova region + * @param phy_addr : pa to which this va is mapped to + */ +struct cam_smmu_region_info { + dma_addr_t iova_start; + size_t iova_len; + dma_addr_t discard_iova_start; + size_t discard_iova_len; + dma_addr_t phy_addr; + struct region_buf_info buf_info; +}; + +/** + * @brief : Gets an smmu handle + * + * @param identifier: Unique identifier to be used by clients which they + * should get from device tree. CAM SMMU driver will + * not enforce how this string is obtained and will + * only validate this against the list of permitted + * identifiers + * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will + * fill the handle pointed by handle_ptr + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_handle(char *identifier, int *handle_ptr); + +/** + * @brief : Performs IOMMU operations + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH + * or CAM_SMMU_DETACH + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_ops(int handle, enum cam_smmu_ops_param op); + +/** + * @brief : Maps user space IOVA for calling driver + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param ion_fd: ION handle identifying the memory buffer. + * @param dmabuf: DMA buf handle identifying the memory buffer. + * @param dis_delayed_unmap: Whether to disable Delayed Unmap feature + * for this mapping + * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL, + * DMA_TO_DEVICE or DMA_FROM_DEVICE + * @dma_addr : Pointer to physical address where mapped address will be + * returned if region_id is CAM_SMMU_REGION_IO. If region_id is + * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter + * which specifies the cpu virtual address to map. + * @len_ptr : Length of buffer mapped returned by CAM SMMU driver. + * @region_id : Memory region identifier + * @is_internal: Specifies if this buffer is kernel allocated. + * @ref_count: Double ptr to store ref_cnt object in memmgr. + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf, + bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr, + enum cam_smmu_region_id region_id, bool is_internal, struct kref **ref_count); + +/** + * @brief : Maps kernel space IOVA for calling driver + * + * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param buf : dma_buf allocated for kernel usage in mem_mgr + * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL, + * DMA_TO_DEVICE or DMA_FROM_DEVICE + * @dma_addr : Pointer to physical address where mapped address will be + * returned if region_id is CAM_SMMU_REGION_IO. If region_id is + * CAM_SMMU_REGION_SHARED, dma_addr is used as an input + * parameter which specifies the cpu virtual address to map. + * @len_ptr : Length of buffer mapped returned by CAM SMMU driver. + * @region_id : Memory region identifier + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_map_kernel_iova(int handle, + struct dma_buf *buf, enum cam_smmu_map_dir dir, + dma_addr_t *dma_addr, size_t *len_ptr, + enum cam_smmu_region_id region_id); + +/** + * @brief : Unmaps user space IOVA for calling driver + * + * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * @param ion_fd: ION handle identifying the memory buffer. + * @param dma_buf: DMA Buf handle identifying the memory buffer. + * @param region_id: Region id from which to unmap buffer. + * @param force_unmap: If this unmap operation is part of memmgr cleanup + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_unmap_user_iova(int handle, + int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id, + bool force_unmap); + +/** + * @brief : Unmaps kernel IOVA for calling driver + * + * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * @param buf : dma_buf allocated for the kernel + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_unmap_kernel_iova(int handle, + struct dma_buf *buf, enum cam_smmu_region_id region_id); + +/** + * @brief : Allocates a scratch buffer + * + * This function allocates a scratch virtual buffer of length virt_len in the + * device virtual address space mapped to phys_len physically contiguous bytes + * in that device's SMMU. + * + * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each + * other, otherwise -EINVAL is returned. + * + * -EINVAL will be returned if virt_len is less than phys_len. + * + * Passing a too large phys_len might also cause failure if that much size is + * not available for allocation in a physically contiguous way. + * + * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * @param dir : Direction of mapping which will translate to IOMMU_READ + * IOMMU_WRITE or a bit mask of both. + * @param paddr_ptr: Device virtual address that the client device will be + * able to read from/write to + * @param virt_len : Virtual length of the scratch buffer + * @param phys_len : Physical length of the scratch buffer + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int cam_smmu_get_scratch_iova(int handle, + enum cam_smmu_map_dir dir, + dma_addr_t *paddr_ptr, + size_t virt_len, + size_t phys_len); + +/** + * @brief : Frees a scratch buffer + * + * This function frees a scratch buffer and releases the corresponding SMMU + * mappings. + * + * @param handle : Handle to identify the CAMSMMU client (IFE, ICP, etc.) + * @param paddr : Device virtual address of client's scratch buffer that + * will be freed. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int cam_smmu_put_scratch_iova(int handle, + dma_addr_t paddr); + +/** + *@brief : Destroys an smmu handle + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_destroy_handle(int handle); + +/** + * @brief : Returns if context bank identified by handle has a shared region + * + * @param handle: Handle to identify the context bank + * @return : True if context banks supports shared region, false otherwise + * @note : Currently, only ICP context banks support shared regions. + */ +bool cam_smmu_supports_shared_region(int handle); + +/** + * @brief : Registers smmu fault handler for client + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param handler_cb: It is triggered in IOMMU page fault + * @param token: It is input param when trigger page fault handler + */ +void cam_smmu_set_client_page_fault_handler(int handle, + void (*handler_cb)(struct cam_smmu_pf_info *pf_info), void *token); + +/** + * @brief : Unregisters smmu fault handler for client + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param token: It is input param when trigger page fault handler + */ +void cam_smmu_unset_client_page_fault_handler(int handle, void *token); + +/** + * @brief Maps memory from an ION fd into IOVA space + * + * @param handle: SMMU handle identifying the context bank to map to + * @param ion_fd: ION fd of memory to map to + * @param dma_buf: DMA buf of memory to map to + * @param paddr_ptr: Pointer IOVA address that will be returned + * @param len_ptr: Length of memory mapped + * @param buf_tracker: List to add tracked buffers to + * @param ref_count: Double ptr to ref_count object for memmgr table + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker, + struct kref **ref_count); + +/** + * @brief Maps memory from an ION fd into IOVA space + * + * @param handle: SMMU handle identifying the secure context bank to map to + * @param ion_fd: ION fd of memory to map to + * @param dma_buf: DMA Buf of memory to map to + * @param paddr_ptr: Pointer IOVA address that will be returned + * @param len_ptr: Length of memory mapped + * @param buf_tracker: List to add tracked buffers to + * @param ref_count: Double ptr to ref_count object for memmgr table + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker, + struct kref **ref_count); + +/** + * @brief Unmaps memory from context bank + * + * @param handle: SMMU handle identifying the context bank + * @param ion_fd: ION fd of memory to unmap + * @param dma_buf: DMA Buf of memory to unmap + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf); + +/** + * @brief Maps secure memory for SMMU handle + * + * @param handle: SMMU handle identifying secure context bank + * @param ion_fd: ION fd to map securely + * @param dmabuf: DMA buf to map securely + * @param dir: DMA Direction for the mapping + * @param dma_addr: Returned IOVA address after mapping + * @param len_ptr: Length of memory mapped + * @param ref_count: Double ptr to store ref_cnt object in memmgr + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf, + enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr, + struct kref **ref_count); + +/** + * @brief Unmaps secure memopry for SMMU handle + * + * @param handle: SMMU handle identifying secure context bank + * @param ion_fd: ION fd to unmap + * @param dma_buf: DMA Buf to unmap + * @param force_unmap: If this unmap operation is part of memmgr cleanup + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf, + bool force_unmap); + +/** + * @brief Allocates firmware for context bank + * + * @param smmu_hdl: SMMU handle identifying context bank + * @param iova: IOVA address of allocated firmware + * @param kvaddr: CPU mapped address of allocated firmware + * @param len: Length of allocated firmware memory + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_alloc_firmware(int32_t smmu_hdl, + dma_addr_t *iova, + uintptr_t *kvaddr, + size_t *len); + +/** + * @brief Deallocates firmware memory for context bank + * + * @param smmu_hdl: SMMU handle identifying the context bank + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_dealloc_firmware(int32_t smmu_hdl); + +/** + * @brief Gets region information specified by smmu handle and region id + * + * @param smmu_hdl: SMMU handle identifying the context bank + * @param region_id: Region id for which information is desired + * @param region_info: Struct populated with region information + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_region_info(int32_t smmu_hdl, + enum cam_smmu_region_id region_id, + struct cam_smmu_region_info *region_info); + +/** + * @brief Reserves a region with buffer + * + * @param region: Region id + * @param smmu_hdl: SMMU handle identifying the context bank + * @param iova: IOVA of secondary heap after reservation has completed + * @param buf: Allocated dma_buf for secondary heap + * @param request_len: Length of secondary heap after reservation has completed + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region, + int32_t smmu_hdl, struct dma_buf *buf, + dma_addr_t *iova, size_t *request_len); + +/** + * @brief Releases buffer in reserved region + * + * @param region: Region id + * @param smmu_hdl: SMMU handle identifying the context bank + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_release_buf_region(enum cam_smmu_region_id region, + int32_t smmu_hdl); + +/** + * @brief Map va for phy addr range for a given context bank + * + * @param smmu_hdl: SMMU handle identifying context bank + * @param region_id: Region ID + * @optional param subregion_id: Subregion ID + * @param iova: IOVA address of allocated qdss + * @param len: Length of allocated qdss memory + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_map_phy_mem_region(int32_t smmu_hdl, + uint32_t region_id, uint32_t subregion_id, + dma_addr_t *iova, size_t *len); + +/** + * @brief Unmap call for map_phy_mem for given context bank + * + * @param smmu_hdl: SMMU handle identifying the context bank + * @param region_id: Region ID + * @optional param subregion_id: Subregion ID + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_unmap_phy_mem_region(int32_t smmu_hdl, + uint32_t region_id, uint32_t subregion_id); + +/** + * @brief Get start addr & len of I/O region for a given cb + * + * @param smmu_hdl: SMMU handle identifying the context bank + * @param iova: IOVA address of allocated I/O region + * @param len: Length of allocated I/O memory + * @param discard_iova_start: Start address of io space to discard + * @param discard_iova_len: Length of io space to discard + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_io_region_info(int32_t smmu_hdl, + dma_addr_t *iova, size_t *len, + dma_addr_t *discard_iova_start, size_t *discard_iova_len); + +/** + * @brief : API to reset the call context bank page fault count + * This should be done on the starting of new camera open + * @return void. + */ +void cam_smmu_reset_cb_page_fault_cnt(void); + +/** + * @brief : API to register SMMU hw to platform framework. + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_smmu_init_module(void); + +/** + * @brief : API to remove SMMU Hw from platform framework. + */ +void cam_smmu_exit_module(void); + +/** + * @brief : API to determine whether to force all allocations to CACHED + */ +int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached); + +/** + * @brief : API to determine whether padding is needed for shared buffers + */ +bool cam_smmu_need_shared_buffer_padding(void); + +/** + * @brief : API to determine whether certain HW is 36-bit memory addressable + */ +bool cam_smmu_is_expanded_memory(void); + +/** + * @brief : API to query whether page fault non fatal is enable for a device's context bank + */ +int cam_smmu_is_cb_non_fatal_fault_en(int smmu_hdl, bool *non_fatal_en); + +/** + * @brief : API to initialize any SMMU config, also get any capabilities + * such as num banks and the CSF version in use that's received from SMMU proxy driver + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs); + +/** + * @brief : API to deinitialize any initialized SMMU config + */ +void cam_smmu_driver_deinit(void); + +/** + * @brief : API to putref on tracked buffers whoose ref counts + * are incremented + */ +void cam_smmu_buffer_tracker_putref(struct list_head *mapped_io_list); + +/** + * @brief : API to putref on a specific tracked buffer + */ +void cam_smmu_buffer_tracker_buffer_putref(struct cam_smmu_buffer_tracker *entry); + +/** + * @brief : Add tracked buffers to list that belongs to a context + */ +int cam_smmu_add_buf_to_track_list(int ion_fd, unsigned long inode, + struct kref **ref_count, struct list_head *buf_tracker, int idx); + +#endif /* _CAM_SMMU_API_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync.c b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync.c new file mode 100644 index 0000000000..440c67dadb --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync.c @@ -0,0 +1,2980 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +#include +#endif +#include "cam_sync_util.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_compat.h" +#include "camera_main.h" +#include "cam_req_mgr_workq.h" +#include "cam_mem_mgr_api.h" +#include "cam_req_mgr_dev.h" + +struct sync_device *sync_dev; + +/* + * Flag to determine whether to enqueue cb of a + * signaled fence onto the workq or invoke it + * directly in the same context + */ +static bool trigger_cb_without_switch; +unsigned long cam_sync_monitor_mask; + +static void cam_sync_print_fence_table(void) +{ + int idx; + + for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) { + spin_lock_bh(&sync_dev->row_spinlocks[idx]); + CAM_INFO(CAM_SYNC, + "index[%u]: sync_id=%d, name=%s, type=%d, state=%d, ref_cnt=%d", + idx, + sync_dev->sync_table[idx].sync_id, + sync_dev->sync_table[idx].name, + sync_dev->sync_table[idx].type, + sync_dev->sync_table[idx].state, + atomic_read(&sync_dev->sync_table[idx].ref_cnt)); + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + } +} + +static int cam_sync_create_util( + int32_t *sync_obj, const char *name, + struct cam_dma_fence_create_sync_obj_payload *dma_sync_create_info, + struct sync_synx_obj_info *synx_obj_sync_create_info) +{ + int rc; + long idx; + bool bit; + struct sync_table_row *row = NULL; + + do { + idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS); + if (idx >= CAM_SYNC_MAX_OBJS) { + CAM_ERR(CAM_SYNC, + "Error: Unable to create sync idx = %d sync name = %s reached max!", + idx, name); + cam_sync_print_fence_table(); + return -ENOMEM; + } + + CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx); + bit = test_and_set_bit(idx, sync_dev->bitmap); + } while (bit); + + spin_lock_bh(&sync_dev->row_spinlocks[idx]); + rc = cam_sync_init_row(sync_dev->sync_table, idx, name, + CAM_SYNC_TYPE_INDV); + + if (rc) { + CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld", + idx); + clear_bit(idx, sync_dev->bitmap); + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + return -EINVAL; + } + + *sync_obj = idx; + + /* Associate sync obj with synx if any holding sync lock */ + if (synx_obj_sync_create_info) { + row = sync_dev->sync_table + idx; + row->synx_obj_info.synx_obj_row_idx = + synx_obj_sync_create_info->synx_obj_row_idx; + row->synx_obj_info.sync_created_with_synx = + synx_obj_sync_create_info->sync_created_with_synx; + row->synx_obj_info.synx_obj = synx_obj_sync_create_info->synx_obj; + + set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask); + + CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with synx_obj: %d", + name, *sync_obj, row->synx_obj_info.synx_obj); + } + + /* Associate sync obj with dma fence if any holding sync lock */ + if (dma_sync_create_info) { + row = sync_dev->sync_table + idx; + row->dma_fence_info.dma_fence_fd = dma_sync_create_info->fd; + row->dma_fence_info.dma_fence_row_idx = dma_sync_create_info->dma_fence_row_idx; + row->dma_fence_info.sync_created_with_dma = + dma_sync_create_info->sync_created_with_dma; + set_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask); + + /* Association refcnt for non-import cases */ + if (dma_sync_create_info->sync_created_with_dma) { + rc = cam_dma_fence_get_put_ref(true, row->dma_fence_info.dma_fence_row_idx); + if (rc) + CAM_ERR(CAM_SYNC, + "Failed to getref on dma fence idx: %u fd: %d sync_obj: %d rc: %d", + row->dma_fence_info.dma_fence_row_idx, + row->dma_fence_info.dma_fence_fd, + *sync_obj, rc); + goto end; + } + + CAM_DBG(CAM_SYNC, "sync_obj: %s[%d] associated with dma fence fd: %d", + name, *sync_obj, dma_sync_create_info->fd); + goto end; + } + + CAM_DBG(CAM_SYNC, "sync_obj: %s[%i]", name, *sync_obj); + +end: + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + return rc; +} + +int cam_sync_create(int32_t *sync_obj, const char *name) +{ + return cam_sync_create_util(sync_obj, name, NULL, NULL); +} + +int cam_sync_register_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj) +{ + struct sync_callback_info *sync_cb; + struct sync_table_row *row = NULL; + int status = 0, rc = 0; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0) || (!cb_func)) + return -EINVAL; + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + + sync_cb = CAM_MEM_ZALLOC(sizeof(*sync_cb), GFP_ATOMIC); + if (!sync_cb) { + rc = -ENOMEM; + goto monitor_dump; + } + + /* Trigger callback if sync object is already in SIGNALED state */ + if (((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) || + (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) || + (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) && + (!row->remaining)) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_SKIP_REGISTER_CB); + if (trigger_cb_without_switch) { + CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]", + row->name, + sync_obj); + status = row->state; + CAM_MEM_FREE(sync_cb); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + cb_func(sync_obj, status, userdata); + } else { + sync_cb->callback_func = cb_func; + sync_cb->cb_data = userdata; + sync_cb->sync_obj = sync_obj; + INIT_WORK(&sync_cb->cb_dispatch_work, + cam_sync_util_cb_dispatch); + sync_cb->status = row->state; + CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%s[%d]", + row->name, + sync_cb->sync_obj); + sync_cb->workq_scheduled_ts = ktime_get(); + queue_work(sync_dev->work_queue, + &sync_cb->cb_dispatch_work); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + } + + return 0; + } + + sync_cb->callback_func = cb_func; + sync_cb->cb_data = userdata; + sync_cb->sync_obj = sync_obj; + INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch); + list_add_tail(&sync_cb->list, &row->callback_list); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_REGISTER_CB); + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + + return 0; + +monitor_dump: + cam_sync_dump_monitor_array(row); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +int cam_sync_deregister_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + struct sync_callback_info *sync_cb, *temp; + bool found = false; + int rc = 0; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + + CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]", + row->name, sync_obj); + list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) { + if ((sync_cb->callback_func == cb_func) && + (sync_cb->cb_data == userdata)) { + list_del_init(&sync_cb->list); + CAM_MEM_FREE(sync_cb); + found = true; + } + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) { + if (found) { + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_UNREGISTER_CB); + } else { + CAM_ERR(CAM_SYNC, + "Error: Callback not found sync obj = %s[%d] : sync_id %d, state %d", + row->name, sync_obj, row->sync_id, row->state); + cam_sync_dump_monitor_array(row); + } + } + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + + return found ? 0 : -ENOENT; + +monitor_dump: + cam_sync_dump_monitor_array(row); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +static inline int cam_sync_signal_dma_fence_util( + struct sync_table_row *row, uint32_t status) +{ + struct cam_dma_fence_signal signal_dma_fence; + + signal_dma_fence.dma_fence_fd = row->dma_fence_info.dma_fence_fd; + + switch (status) { + case CAM_SYNC_STATE_SIGNALED_SUCCESS: + signal_dma_fence.status = 0; + break; + case CAM_SYNC_STATE_SIGNALED_ERROR: + /* Advertise error */ + signal_dma_fence.status = -EADV; + break; + case CAM_SYNC_STATE_SIGNALED_CANCEL: + signal_dma_fence.status = -ECANCELED; + break; + default: + CAM_ERR(CAM_SYNC, + "Signaling undefined status: %d for sync obj: %d", + status, row->sync_id); + return -EINVAL; + } + + return cam_dma_fence_internal_signal(row->dma_fence_info.dma_fence_row_idx, + &signal_dma_fence); +} + +static void cam_sync_signal_parent_util(int32_t status, + uint32_t event_cause, struct list_head *parents_list) +{ + int rc; + struct sync_table_row *parent_row = NULL; + struct sync_parent_info *parent_info, *temp_parent_info; + + /* + * Now iterate over all parents of this object and if they too need to + * be signaled dispatch cb's + */ + list_for_each_entry_safe(parent_info, temp_parent_info, + parents_list, list) { + parent_row = sync_dev->sync_table + parent_info->sync_id; + spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]); + parent_row->remaining--; + + rc = cam_sync_util_update_parent_state( + parent_row, + status); + if (rc) { + CAM_ERR(CAM_SYNC, "Invalid parent state %d", + parent_row->state); + spin_unlock_bh( + &sync_dev->row_spinlocks[parent_info->sync_id]); + CAM_MEM_FREE(parent_info); + continue; + } + + if (!parent_row->remaining) + cam_sync_util_dispatch_signaled_cb( + parent_info->sync_id, parent_row->state, + event_cause); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(parent_info->sync_id, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_SIGNAL); + + spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]); + list_del_init(&parent_info->list); + CAM_MEM_FREE(parent_info); + } +} + +static int cam_sync_signal_validate_util( + int32_t sync_obj, int32_t status) +{ + struct sync_table_row *row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + return -EINVAL; + } + + if (row->type == CAM_SYNC_TYPE_GROUP) { + CAM_ERR(CAM_SYNC, + "Error: Signaling a GROUP sync object = %s[%d]", + row->name, sync_obj); + return -EINVAL; + } + + if (row->state != CAM_SYNC_STATE_ACTIVE) { + CAM_ERR(CAM_SYNC, + "Error: Sync object already signaled sync_obj = %s[%d]", + row->name, sync_obj); + return -EALREADY; + } + + if ((status != CAM_SYNC_STATE_SIGNALED_SUCCESS) && + (status != CAM_SYNC_STATE_SIGNALED_ERROR) && + (status != CAM_SYNC_STATE_SIGNALED_CANCEL)) { + CAM_ERR(CAM_SYNC, + "Error: signaling with undefined status = %d", status); + return -EINVAL; + } + + return 0; +} + +int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause) +{ + struct sync_table_row *row = NULL; + struct list_head parents_list; + int rc = 0; +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + uint32_t synx_row_idx; + struct cam_synx_obj_signal signal_synx_obj; +#endif + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) { + CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)", + sync_obj, CAM_SYNC_MAX_OBJS); + return -EINVAL; + } + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + rc = cam_sync_signal_validate_util(sync_obj, status); + if (rc) { + CAM_ERR(CAM_SYNC, + "Error: Failed to validate signal info for sync_obj = %s[%d] with status = %d rc = %d", + row->name, sync_obj, status, rc); + goto monitor_dump; + } + + if (!atomic_dec_and_test(&row->ref_cnt)) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return 0; + } + + row->state = status; + /* + * Signal associated dma fence first - external entities + * waiting on this fence can start processing + */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) { + rc = cam_sync_signal_dma_fence_util(row, status); + if (rc) { + CAM_ERR(CAM_SYNC, + "Error: Failed to signal associated dma fencefd = %d for sync_obj = %s[%d]", + row->dma_fence_info.dma_fence_fd, row->name, sync_obj); + cam_sync_dump_monitor_array(row); + } + } + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* + * Signal associated synx obj prior to sync + */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) { + signal_synx_obj.status = status; + signal_synx_obj.synx_obj = row->synx_obj_info.synx_obj; + synx_row_idx = row->synx_obj_info.synx_obj_row_idx; + + /* Release & obtain the row lock after synx signal */ + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + rc = cam_synx_obj_internal_signal(synx_row_idx, &signal_synx_obj); + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + if (rc) { + CAM_ERR(CAM_SYNC, + "Error: Failed to signal associated synx obj = %d for sync_obj = %d", + signal_synx_obj.synx_obj, sync_obj); + cam_sync_dump_monitor_array(row); + } + } +#endif + + cam_sync_util_dispatch_signaled_cb(sync_obj, status, event_cause); + + /* copy parent list to local and release child lock */ + INIT_LIST_HEAD(&parents_list); + list_splice_init(&row->parents_list, &parents_list); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_SIGNAL); + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + if (list_empty(&parents_list)) + return 0; + + cam_sync_signal_parent_util(status, event_cause, &parents_list); + + return 0; + +monitor_dump: + cam_sync_dump_monitor_array(row); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj) +{ + int rc, i; + long idx = 0; + bool bit; + + if ((!sync_obj) || (!merged_obj)) { + CAM_ERR(CAM_SYNC, "Invalid pointer(s)"); + return -EINVAL; + } + + if (num_objs <= 1) { + CAM_ERR(CAM_SYNC, "Single object merge is not allowed"); + return -EINVAL; + } + + if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs) + != num_objs) { + CAM_ERR(CAM_SYNC, "The obj list has duplicate fence"); + return -EINVAL; + } + + for (i = 0; i < num_objs; i++) { + rc = cam_sync_check_valid(sync_obj[i]); + if (rc) { + CAM_ERR(CAM_SYNC, "Sync_obj[%d] %d valid check fail", + i, sync_obj[i]); + return rc; + } + } + do { + idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS); + if (idx >= CAM_SYNC_MAX_OBJS) + return -ENOMEM; + bit = test_and_set_bit(idx, sync_dev->bitmap); + } while (bit); + + spin_lock_bh(&sync_dev->row_spinlocks[idx]); + rc = cam_sync_init_group_object(sync_dev->sync_table, + idx, sync_obj, num_objs); + if (rc < 0) { + CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld", + idx); + clear_bit(idx, sync_dev->bitmap); + return -EINVAL; + } + CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx); + *merged_obj = idx; + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + + return 0; +} + +int cam_sync_get_obj_ref(int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + int rc; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + spin_lock(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (row->state != CAM_SYNC_STATE_ACTIVE) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + + atomic_inc(&row->ref_cnt); + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj); + + return 0; + +monitor_dump: + cam_sync_dump_monitor_array(row); + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +int cam_sync_put_obj_ref(int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + row = sync_dev->sync_table + sync_obj; + atomic_dec(&row->ref_cnt); + CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj); + + return 0; +} + +int cam_sync_destroy(int32_t sync_obj) +{ + return cam_sync_deinit_object(sync_dev->sync_table, sync_obj, NULL, NULL); +} + +int cam_sync_check_valid(int32_t sync_obj) +{ + struct sync_table_row *row = NULL; + int rc; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (!test_bit(sync_obj, sync_dev->bitmap)) { + CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return 0; + +monitor_dump: + cam_sync_dump_monitor_array(row); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms) +{ + unsigned long timeleft; + int rc; + struct sync_table_row *row = NULL; + + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + rc = -EINVAL; + goto monitor_dump; + } + + timeleft = cam_common_wait_for_completion_timeout(&row->signaled, + msecs_to_jiffies(timeout_ms)); + + if (!timeleft) { + CAM_ERR(CAM_SYNC, + "Error: timed out for sync obj = %s[%d]", row->name, sync_obj); + rc = -ETIMEDOUT; + goto monitor_dump; + } else { + switch (row->state) { + case CAM_SYNC_STATE_INVALID: + case CAM_SYNC_STATE_ACTIVE: + case CAM_SYNC_STATE_SIGNALED_ERROR: + case CAM_SYNC_STATE_SIGNALED_CANCEL: + CAM_ERR(CAM_SYNC, + "Error: Wait on invalid state = %d, obj = %d, name = %s", + row->state, sync_obj, row->name); + rc = -EINVAL; + goto monitor_dump; + case CAM_SYNC_STATE_SIGNALED_SUCCESS: + rc = 0; + break; + default: + rc = -EINVAL; + goto monitor_dump; + } + } + + return rc; + +monitor_dump: + cam_sync_dump_monitor_array(row); + return rc; +} + +static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_info sync_create; + int result; + + if (k_ioctl->size != sizeof(struct cam_sync_info)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&sync_create, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + sync_create.name[SYNC_DEBUG_NAME_LEN] = '\0'; + + result = cam_sync_create(&sync_create.sync_obj, + sync_create.name); + + if (!result) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->ioctl_ptr), + &sync_create, k_ioctl->size)) + return -EFAULT; + + return result; +} + +static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl) +{ + int rc; + struct cam_sync_signal sync_signal; + + if (k_ioctl->size != sizeof(struct cam_sync_signal)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&sync_signal, + u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size)) + return -EFAULT; + + /* need to get ref for UMD signaled fences */ + rc = cam_sync_get_obj_ref(sync_signal.sync_obj); + if (rc) { + CAM_DBG(CAM_SYNC, + "Error: cannot signal an uninitialized sync obj = %d", + sync_signal.sync_obj); + return rc; + } + + return cam_sync_signal(sync_signal.sync_obj, + sync_signal.sync_state, + CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT); +} + +static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_merge sync_merge; + uint32_t *sync_objs; + uint32_t num_objs; + uint32_t size; + int result; + + if (k_ioctl->size != sizeof(struct cam_sync_merge)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&sync_merge, + u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size)) + return -EFAULT; + + if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS) + return -EINVAL; + + size = sizeof(uint32_t) * sync_merge.num_objs; + sync_objs = CAM_MEM_ZALLOC(size, GFP_ATOMIC); + + if (!sync_objs) + return -ENOMEM; + + if (copy_from_user(sync_objs, + u64_to_user_ptr(sync_merge.sync_objs), + sizeof(uint32_t) * sync_merge.num_objs)) { + CAM_MEM_FREE(sync_objs); + return -EFAULT; + } + + num_objs = sync_merge.num_objs; + + result = cam_sync_merge(sync_objs, + num_objs, &sync_merge.merged); + + if (!result) + if (copy_to_user( + u64_to_user_ptr(k_ioctl->ioctl_ptr), + &sync_merge, k_ioctl->size)) { + CAM_MEM_FREE(sync_objs); + return -EFAULT; + } + + CAM_MEM_FREE(sync_objs); + + return result; +} + +static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_wait sync_wait; + + if (k_ioctl->size != sizeof(struct cam_sync_wait)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&sync_wait, + u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size)) + return -EFAULT; + + k_ioctl->result = cam_sync_wait(sync_wait.sync_obj, + sync_wait.timeout_ms); + + return 0; +} + +static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_info sync_create; + + if (k_ioctl->size != sizeof(struct cam_sync_info)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&sync_create, + u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size)) + return -EFAULT; + + return cam_sync_destroy(sync_create.sync_obj); +} + +static int cam_sync_handle_register_user_payload( + struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_userpayload_info userpayload_info; + struct sync_user_payload *user_payload_kernel; + struct sync_user_payload *user_payload_iter; + struct sync_user_payload *temp_upayload_kernel; + uint32_t sync_obj; + struct sync_table_row *row = NULL; + + if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) + return -EINVAL; + + if (!k_ioctl->ioctl_ptr) + return -EINVAL; + + if (copy_from_user(&userpayload_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + sync_obj = userpayload_info.sync_obj; + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + user_payload_kernel = CAM_MEM_ZALLOC(sizeof(*user_payload_kernel), GFP_KERNEL); + if (!user_payload_kernel) + return -ENOMEM; + + memcpy(user_payload_kernel->payload_data, + userpayload_info.payload, + CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64)); + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, sync_obj); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + CAM_MEM_FREE(user_payload_kernel); + return -EINVAL; + } + + if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) || + (row->state == CAM_SYNC_STATE_SIGNALED_ERROR) || + (row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_SKIP_REGISTER_CB); + + cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG, + sync_obj, row->state, + user_payload_kernel->payload_data, + CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64), + CAM_SYNC_COMMON_REG_PAYLOAD_EVENT); + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + CAM_MEM_FREE(user_payload_kernel); + return 0; + } + + list_for_each_entry_safe(user_payload_iter, + temp_upayload_kernel, &row->user_payload_list, list) { + if (user_payload_iter->payload_data[0] == + user_payload_kernel->payload_data[0] && + user_payload_iter->payload_data[1] == + user_payload_kernel->payload_data[1]) { + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_ALREADY_REGISTERED_CB); + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + CAM_MEM_FREE(user_payload_kernel); + return -EALREADY; + } + } + + list_add_tail(&user_payload_kernel->list, &row->user_payload_list); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_REGISTER_CB); + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return 0; +} + +static int cam_sync_handle_deregister_user_payload( + struct cam_private_ioctl_arg *k_ioctl) +{ + struct cam_sync_userpayload_info userpayload_info; + struct sync_user_payload *user_payload_kernel, *temp; + uint32_t sync_obj; + struct sync_table_row *row = NULL; + + if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) { + CAM_ERR(CAM_SYNC, "Incorrect ioctl size"); + return -EINVAL; + } + + if (!k_ioctl->ioctl_ptr) { + CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr"); + return -EINVAL; + } + + if (copy_from_user(&userpayload_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), k_ioctl->size)) + return -EFAULT; + + sync_obj = userpayload_info.sync_obj; + if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) + return -EINVAL; + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + if (row->state == CAM_SYNC_STATE_INVALID) { + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj = %s[%d]", + row->name, + sync_obj); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return -EINVAL; + } + + list_for_each_entry_safe(user_payload_kernel, temp, + &row->user_payload_list, list) { + if (user_payload_kernel->payload_data[0] == + userpayload_info.payload[0] && + user_payload_kernel->payload_data[1] == + userpayload_info.payload[1]) { + list_del_init(&user_payload_kernel->list); + CAM_MEM_FREE(user_payload_kernel); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_UNREGISTER_CB); + } + } + + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return 0; +} + +static int cam_sync_dma_fence_cb( + int32_t sync_obj, + struct cam_dma_fence_signal_sync_obj *signal_sync_obj) +{ + int32_t rc, status = CAM_SYNC_STATE_SIGNALED_SUCCESS; + struct sync_table_row *row; + struct list_head parents_list; + + if (!signal_sync_obj) { + CAM_ERR(CAM_SYNC, "Invalid signal info args"); + return -EINVAL; + } + + /* Validate sync object range */ + if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) { + CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj); + return -EINVAL; + } + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + /* Validate if sync obj has a dma fence association */ + if (!test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) { + CAM_ERR(CAM_SYNC, + "sync obj = %d[%s] has no associated dma fence ext_fence_mask = 0x%x", + sync_obj, row->name, row->ext_fence_mask); + rc = -EINVAL; + goto end; + } + + /* Validate if we are signaling the right sync obj based on dma fence fd */ + if (row->dma_fence_info.dma_fence_fd != signal_sync_obj->fd) { + CAM_ERR(CAM_SYNC, + "sync obj: %d[%s] is associated with a different fd: %d, signaling for fd: %d", + sync_obj, row->name, row->dma_fence_info.dma_fence_fd, signal_sync_obj->fd); + rc = -EINVAL; + goto end; + } + + /* Check for error status */ + if (signal_sync_obj->status < 0) { + if (signal_sync_obj->status == -ECANCELED) + status = CAM_SYNC_STATE_SIGNALED_CANCEL; + else + status = CAM_SYNC_STATE_SIGNALED_ERROR; + } + + rc = cam_sync_signal_validate_util(sync_obj, status); + if (rc) { + CAM_ERR(CAM_SYNC, + "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d", + sync_obj, row->name, status, rc); + goto end; + } + + /* Adding dma fence reference on sync */ + atomic_inc(&row->ref_cnt); + + if (!atomic_dec_and_test(&row->ref_cnt)) + goto end; + + row->state = status; + + cam_sync_util_dispatch_signaled_cb(sync_obj, status, 0); + + INIT_LIST_HEAD(&parents_list); + list_splice_init(&row->parents_list, &parents_list); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + + if (list_empty(&parents_list)) + return 0; + + cam_sync_signal_parent_util(status, 0x0, &parents_list); + return 0; + +end: + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +static int cam_sync_synx_obj_cb(int32_t sync_obj, + struct cam_synx_obj_signal_sync_obj *signal_sync_obj) +{ + int32_t rc; + struct sync_table_row *row; + struct list_head parents_list; + + if (!signal_sync_obj) { + CAM_ERR(CAM_SYNC, "Invalid signal info args"); + return -EINVAL; + } + + /* Validate sync object range */ + if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) { + CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj); + return -EINVAL; + } + + spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]); + row = sync_dev->sync_table + sync_obj; + + /* Validate if sync obj has a synx obj association */ + if (!test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) { + CAM_ERR(CAM_SYNC, + "sync obj = %d[%s] has no associated synx obj ext_fence_mask = 0x%x", + sync_obj, row->name, row->ext_fence_mask); + rc = -EINVAL; + goto end; + } + + /* Validate if we are signaling the right sync obj based on synx handle */ + if (row->synx_obj_info.synx_obj != signal_sync_obj->synx_obj) { + CAM_ERR(CAM_SYNC, + "sync obj: %d[%s] is associated with a different synx obj: %d, signaling for synx obj: %d", + sync_obj, row->name, row->synx_obj_info.synx_obj, + signal_sync_obj->synx_obj); + rc = -EINVAL; + goto end; + } + + rc = cam_sync_signal_validate_util(sync_obj, signal_sync_obj->status); + if (rc) { + CAM_ERR(CAM_SYNC, + "Error: Failed to validate signal info for sync_obj = %d[%s] with status = %d rc = %d", + sync_obj, row->name, signal_sync_obj->status, rc); + goto end; + } + + /* Adding synx reference on sync */ + atomic_inc(&row->ref_cnt); + if (!atomic_dec_and_test(&row->ref_cnt)) { + CAM_DBG(CAM_SYNC, "Sync = %d[%s] fence still has references, synx_hdl = %d", + sync_obj, row->name, signal_sync_obj->synx_obj); + goto end; + } + + row->state = signal_sync_obj->status; + + cam_sync_util_dispatch_signaled_cb(sync_obj, signal_sync_obj->status, 0); + + INIT_LIST_HEAD(&parents_list); + list_splice_init(&row->parents_list, &parents_list); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + + if (list_empty(&parents_list)) + return 0; + + cam_sync_signal_parent_util(signal_sync_obj->status, 0x0, &parents_list); + CAM_DBG(CAM_SYNC, + "Successfully signaled sync obj = %d with status = %d via synx obj = %d signal callback", + sync_obj, signal_sync_obj->status, signal_sync_obj->synx_obj); + + return 0; + +end: + spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); + return rc; +} +#endif + +static int cam_generic_fence_alloc_validate_input_info_util( + struct cam_generic_fence_cmd_args *fence_cmd_args, + struct cam_generic_fence_input_info **fence_input_info) +{ + int rc = 0; + struct cam_generic_fence_input_info *fence_input = NULL; + uint32_t num_fences; + size_t expected_size; + + *fence_input_info = NULL; + + if (fence_cmd_args->input_data_size < + sizeof(struct cam_generic_fence_input_info)) { + CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx", + sizeof(struct cam_generic_fence_input_info), + fence_cmd_args->input_data_size); + return -EINVAL; + } + + fence_input = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_cmd_args->input_data_size); + if (IS_ERR_OR_NULL(fence_input)) { + CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + return -ENOMEM; + } + + /* Validate num fences */ + num_fences = fence_input->num_fences_requested; + if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) { + CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching", + num_fences); + rc = -EINVAL; + goto free_mem; + } + + /* Validate sizes */ + expected_size = sizeof(struct cam_generic_fence_input_info) + + ((num_fences - 1) * sizeof(struct cam_generic_fence_config)); + if ((uint32_t)expected_size != fence_cmd_args->input_data_size) { + CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u", + expected_size, fence_cmd_args->input_data_size, num_fences); + rc = -EINVAL; + goto free_mem; + } + + *fence_input_info = fence_input; + return rc; + +free_mem: + CAM_MEM_FREE(fence_input); + return rc; +} + +static void cam_generic_fence_free_input_info_util( + struct cam_generic_fence_input_info **fence_input_info) +{ + struct cam_generic_fence_input_info *fence_input = *fence_input_info; + + CAM_MEM_FREE(fence_input); + *fence_input_info = NULL; +} + +static int cam_generic_fence_handle_dma_create( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc, i, dma_fence_row_idx; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_DMA_FENCE, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + + rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd, + &dma_fence_row_idx, fence_cfg->name); + if (rc) { + CAM_ERR(CAM_DMA_FENCE, + "Failed to create dma fence at index: %d rc: %d num fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + goto out_copy; + } + + CAM_DBG(CAM_DMA_FENCE, + "Created dma_fence @ i: %d fence fd: %d[%s] num fences [requested: %u processed: %u] ", + i, fence_cfg->dma_fence_fd, fence_cfg->name, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + +out_copy: + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + rc = -EFAULT; + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_dma_release( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc, i; + bool failed = false; + struct cam_dma_fence_release_params release_params; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_DMA_FENCE, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + + release_params.use_row_idx = false; + release_params.u.dma_fence_fd = fence_cfg->dma_fence_fd; + rc = cam_dma_fence_release(&release_params); + if (rc) { + CAM_ERR(CAM_DMA_FENCE, + "Failed to destroy dma fence at index: %d fd: %d rc: %d num fences [requested: %u processed: %u]", + i, fence_cfg->dma_fence_fd, rc, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + /* Continue to release other fences, but mark the call as failed */ + failed = true; + continue; + } + + CAM_DBG(CAM_DMA_FENCE, + "Released dma_fence @ i: %d fd: %d num fences [requested: %u processed: %u]", + i, fence_cfg->dma_fence_fd, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + + if (failed) + rc = -ENOMSG; + + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + rc = -EFAULT; + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_dma_import( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int32_t rc, i, dma_fence_row_idx; + struct dma_fence *fence = NULL; + struct cam_dma_fence_create_sync_obj_payload dma_sync_create; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_DMA_FENCE, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + + /* Check if fd is for a valid dma fence */ + fence = cam_dma_fence_get_fence_from_fd(fence_cfg->dma_fence_fd, + &dma_fence_row_idx); + if (IS_ERR_OR_NULL(fence)) { + CAM_ERR(CAM_DMA_FENCE, + "Invalid dma fence for fd: %d", fence_cfg->dma_fence_fd); + fence_cfg->reason_code = -EINVAL; + goto out_copy; + } + + dma_sync_create.dma_fence_row_idx = dma_fence_row_idx; + dma_sync_create.fd = fence_cfg->dma_fence_fd; + dma_sync_create.sync_created_with_dma = true; + + /* Create new sync object and associate dma fence */ + rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name, + &dma_sync_create, NULL); + if (rc) { + fence_cfg->reason_code = rc; + + /* put on the import refcnt */ + cam_dma_fence_get_put_ref(false, dma_fence_row_idx); + goto out_copy; + } + + /* Register a cb for dma fence */ + rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj, + &dma_fence_row_idx, cam_sync_dma_fence_cb); + if (rc) { + CAM_ERR(CAM_DMA_FENCE, + "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d", + fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc); + cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj, + NULL, NULL); + fence_cfg->reason_code = rc; + goto out_copy; + } + + CAM_DBG(CAM_DMA_FENCE, + "dma fence fd = %d imported for sync_obj = %d[%s] num fences [requested: %u processed: %u]", + fence_cfg->dma_fence_fd, fence_cfg->sync_obj, fence_cfg->name, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + +out_copy: + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + rc = -EFAULT; + CAM_ERR(CAM_DMA_FENCE, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_dma_signal( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + struct cam_dma_fence_signal signal_dma_fence; + + if (fence_cmd_args->input_data_size != sizeof(struct cam_dma_fence_signal)) { + CAM_ERR(CAM_DMA_FENCE, "Size is invalid expected: 0x%llx actual: 0x%llx", + sizeof(struct cam_dma_fence_signal), + fence_cmd_args->input_data_size); + return -EINVAL; + } + + if (copy_from_user(&signal_dma_fence, (void __user *)fence_cmd_args->input_handle, + fence_cmd_args->input_data_size)) + return -EFAULT; + + return cam_dma_fence_signal_fd(&signal_dma_fence); +} + +static int cam_generic_fence_process_dma_fence_cmd( + uint32_t id, + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc = -EINVAL; + + switch (id) { + case CAM_GENERIC_FENCE_CREATE: + rc = cam_generic_fence_handle_dma_create(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_RELEASE: + rc = cam_generic_fence_handle_dma_release(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_IMPORT: + rc = cam_generic_fence_handle_dma_import(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_SIGNAL: + rc = cam_generic_fence_handle_dma_signal(fence_cmd_args); + break; + default: + CAM_ERR(CAM_DMA_FENCE, "IOCTL cmd: %u not supported for dma fence", id); + break; + } + + return rc; +} + +int cam_sync_synx_core_recovery( + enum cam_sync_synx_supported_cores core_id) +{ + int rc = -EOPNOTSUPP; + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + rc = cam_synx_core_recovery(core_id); +#endif + + return rc; +} + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +static int cam_generic_fence_validate_signal_input_info_util( + int32_t fence_type, + struct cam_generic_fence_cmd_args *fence_cmd_args, + struct cam_generic_fence_signal_info **fence_signal_info, + void **fence_signal_data) +{ + int rc = 0; + struct cam_generic_fence_signal_info *signal_info = NULL; + void *signal_data; + uint32_t num_fences; + size_t expected_size; + + *fence_signal_info = NULL; + *fence_signal_data = NULL; + + if (fence_cmd_args->input_data_size != + sizeof(struct cam_generic_fence_signal_info)) { + CAM_ERR(CAM_SYNC, "Size is invalid expected: 0x%llx actual: 0x%llx", + sizeof(struct cam_generic_fence_signal_info), + fence_cmd_args->input_data_size); + return -EINVAL; + } + + signal_info = memdup_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_cmd_args->input_data_size); + if (IS_ERR_OR_NULL(signal_info)) { + CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + return -ENOMEM; + } + + /* Validate num fences */ + num_fences = signal_info->num_fences_requested; + if ((num_fences == 0) || (num_fences > CAM_GENERIC_FENCE_BATCH_MAX)) { + CAM_ERR(CAM_SYNC, "Invalid number of fences: %u for batching", + num_fences); + rc = -EINVAL; + goto free_mem; + } + + if (signal_info->fence_handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_SYNC, "Invalid signal handle type: %d", + signal_info->fence_handle_type); + rc = -EINVAL; + goto free_mem; + } + + /* Validate sizes */ + switch (fence_type) { + case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ: + expected_size = sizeof(struct cam_sync_signal); + break; + case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ: + expected_size = sizeof(struct cam_synx_obj_signal); + break; + case CAM_GENERIC_FENCE_TYPE_DMA_FENCE: + expected_size = sizeof(struct cam_dma_fence_signal); + break; + default: + CAM_ERR(CAM_SYNC, "Unsupported fence type: %u", fence_type); + rc = -EINVAL; + goto free_mem; + } + + if ((signal_info->fence_data_size) != (expected_size * num_fences)) { + CAM_ERR(CAM_SYNC, "Invalid input size expected: 0x%x actual: 0x%x for fences: %u", + (expected_size * num_fences), signal_info->fence_data_size, num_fences); + rc = -EINVAL; + goto free_mem; + } + + signal_data = memdup_user(u64_to_user_ptr(signal_info->fence_info_hdl), + signal_info->fence_data_size); + if (IS_ERR_OR_NULL(signal_data)) { + CAM_ERR(CAM_SYNC, "memdup failed for hdl: %d size: 0x%x", + signal_info->fence_info_hdl, signal_info->fence_data_size); + rc = -ENOMEM; + goto free_mem; + } + + *fence_signal_info = signal_info; + *fence_signal_data = signal_data; + return rc; + +free_mem: + CAM_MEM_FREE(signal_info); + return rc; +} + +static void cam_generic_fence_free_signal_input_info_util( + struct cam_generic_fence_signal_info **fence_signal_info, + void **fence_signal_data) +{ + void *signal_data = *fence_signal_data; + struct cam_generic_fence_signal_info *fence_input = *fence_signal_info; + + CAM_MEM_FREE(signal_data); + CAM_MEM_FREE(fence_input); + + *fence_signal_info = NULL; + *fence_signal_data = NULL; +} + +static int cam_generic_fence_config_parse_params( + struct cam_generic_fence_config *fence_cfg, + int32_t requested_param_mask, int32_t *result) +{ + uint32_t index = 0, num_entries; + + if (!result) { + CAM_ERR(CAM_SYNC, "Invalid result hdl : %p", result); + return -EINVAL; + } + + /* Assign to 0 by default */ + *result = 0; + + if (!fence_cfg->num_valid_params || !requested_param_mask) { + CAM_DBG(CAM_SYNC, + "No params configured num_valid = %d requested_mask = 0x%x", + fence_cfg->num_valid_params, requested_param_mask); + return 0; + } + + if (!(fence_cfg->valid_param_mask & requested_param_mask)) { + CAM_DBG(CAM_SYNC, + "Requested parameter not set in additional param mask expecting: 0x%x actual: 0x%x", + requested_param_mask, fence_cfg->valid_param_mask); + return 0; + } + + index = ffs(requested_param_mask) - 1; + num_entries = ARRAY_SIZE(fence_cfg->params); + if (index >= num_entries) { + CAM_DBG(CAM_SYNC, + "Obtained index %u from mask: 0x%x num_param_entries: %u, index exceeding max", + index, requested_param_mask, num_entries); + return 0; + } + + *result = fence_cfg->params[index]; + return 0; +} + +static int cam_generic_fence_handle_synx_create( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc, i; + int32_t row_idx, fence_flag; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_SYNX, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + fence_flag = 0; + + cam_generic_fence_config_parse_params(fence_cfg, + CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag); + + rc = cam_synx_obj_create(fence_cfg->name, + fence_flag, &fence_cfg->synx_obj, &row_idx); + if (rc) { + CAM_ERR(CAM_SYNX, + "Failed to create synx fence at index: %d rc: %d num fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + goto out_copy; + } + + CAM_DBG(CAM_SYNX, + "Created synx fence @ i: %d synx_obj: %d[%s] num fences [requested: %u processed: %u] ", + i, fence_cfg->synx_obj, fence_cfg->name, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + +out_copy: + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + rc = -EFAULT; + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_synx_release( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc, i; + bool failed = false; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + struct cam_synx_obj_release_params synx_release_params; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_SYNX, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + + synx_release_params.use_row_idx = false; + synx_release_params.u.synx_obj = fence_cfg->synx_obj; + + rc = cam_synx_obj_release(&synx_release_params); + if (rc) { + CAM_ERR(CAM_SYNX, + "Failed to release synx object at index: %d rc: %d num fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + /* Continue to release other fences, but mark the call as failed */ + failed = true; + continue; + } + + CAM_DBG(CAM_SYNX, + "Released synx object @ i: %d handle: %d num fences [requested: %u processed: %u]", + i, fence_cfg->synx_obj, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + + if (failed) + rc = -ENOMSG; + + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + rc = -EFAULT; + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_sync_synx_associate_obj(int32_t sync_obj, uint32_t synx_obj, + int32_t synx_obj_row_idx, bool *is_sync_obj_signaled) +{ + int rc; + struct sync_table_row *row; + struct cam_synx_obj_signal signal_synx_obj; + + rc = cam_sync_check_valid(sync_obj); + if (rc) + return rc; + + row = sync_dev->sync_table + sync_obj; + spin_lock(&sync_dev->row_spinlocks[sync_obj]); + + /* Validate if sync object already has an association */ + if (unlikely(test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask))) { + CAM_ERR(CAM_SYNC, + "sync_obj: %s[%d] has already been associated with a synx_hdl: 0x%x", + row->name, sync_obj, row->synx_obj_info.synx_obj); + rc = -EINVAL; + } else if (row->state != CAM_SYNC_STATE_ACTIVE) { + signal_synx_obj.status = row->state; + signal_synx_obj.synx_obj = synx_obj; + *is_sync_obj_signaled = true; + goto signal_synx; + } else { + row->synx_obj_info.synx_obj_row_idx = synx_obj_row_idx; + row->synx_obj_info.sync_created_with_synx = true; + row->synx_obj_info.synx_obj = synx_obj; + set_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask); + CAM_DBG(CAM_SYNX, "sync_obj: %s[%d] associated with synx_obj: %d", + row->name, sync_obj, row->synx_obj_info.synx_obj); + } + + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + return rc; + +signal_synx: + spin_unlock(&sync_dev->row_spinlocks[sync_obj]); + return cam_synx_obj_signal_obj(&signal_synx_obj); +} + +static int cam_generic_fence_handle_synx_import( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int32_t rc, i, synx_obj_row_idx; + struct sync_synx_obj_info synx_sync_create; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + bool is_sync_obj_signaled = false; + bool is_sync_obj_created = false; + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_SYNX, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + is_sync_obj_signaled = false; + is_sync_obj_created = false; + + /* Check if synx handle is for a valid synx obj */ + rc = cam_synx_obj_find_obj_in_table(fence_cfg->synx_obj, + &synx_obj_row_idx); + if (rc) { + CAM_ERR(CAM_SYNX, + "Invalid synx obj for handle: %d", fence_cfg->synx_obj); + fence_cfg->reason_code = -EINVAL; + goto out_copy; + } + + if ((fence_cfg->sync_obj > 0) && (fence_cfg->sync_obj < CAM_SYNC_MAX_OBJS)) { + /* Associate synx object with existing sync object */ + rc = cam_sync_synx_associate_obj(fence_cfg->sync_obj, + fence_cfg->synx_obj, synx_obj_row_idx, + &is_sync_obj_signaled); + } else { + /* Create new sync object and associate synx object */ + synx_sync_create.sync_created_with_synx = true; + synx_sync_create.synx_obj = fence_cfg->synx_obj; + synx_sync_create.synx_obj_row_idx = synx_obj_row_idx; + + rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name, + NULL, &synx_sync_create); + is_sync_obj_created = true; + } + + if (rc) { + fence_cfg->reason_code = rc; + goto out_copy; + } + + if (!is_sync_obj_signaled) { + /* Register a cb for synx_obj */ + rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj, + synx_obj_row_idx, cam_sync_synx_obj_cb); + if (rc) { + CAM_ERR(CAM_SYNX, + "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d", + fence_cfg->synx_obj, fence_cfg->sync_obj, rc); + if (is_sync_obj_created) + cam_sync_deinit_object(sync_dev->sync_table, + fence_cfg->sync_obj, NULL, NULL); + fence_cfg->reason_code = rc; + goto out_copy; + } + } + + CAM_DBG(CAM_SYNX, + "synx_obj handle = %d imported for dma fence fd: %d sync_obj = %d[%s] num fences [requested: %u processed: %u]", + fence_cfg->synx_obj, fence_cfg->dma_fence_fd, + fence_cfg->sync_obj, fence_cfg->name, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + +out_copy: + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + rc = -EFAULT; + CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_synx_signal( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int32_t rc, i; + struct cam_generic_fence_signal_info *fence_signal_info; + struct cam_synx_obj_signal *synx_signal_info; + + rc = cam_generic_fence_validate_signal_input_info_util( + CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, fence_cmd_args, + &fence_signal_info, (void **)&synx_signal_info); + if (rc || !fence_signal_info || !synx_signal_info) { + CAM_ERR(CAM_SYNX, + "Fence input signal info validation failed rc: %d fence_input_info: %pK synx_signal_info: %pK", + rc, fence_signal_info, synx_signal_info); + return -EINVAL; + } + + for (i = 0; i < fence_signal_info->num_fences_requested; i++) { + fence_signal_info->num_fences_processed++; + + rc = cam_synx_obj_signal_obj(&synx_signal_info[i]); + if (rc) { + CAM_ERR(CAM_SYNX, + "Failed to signal for synx_obj: %d, rc: %d, status : %d", + synx_signal_info[i].synx_obj, rc, + synx_signal_info[i].status); + } + + synx_signal_info[i].reason_code = rc; + } + + if (copy_to_user(u64_to_user_ptr(fence_signal_info->fence_info_hdl), synx_signal_info, + fence_signal_info->fence_data_size)) { + rc = -EFAULT; + CAM_ERR(CAM_SYNX, "copy to user for signal data failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, + (sizeof(struct cam_synx_obj_signal) * + fence_signal_info->num_fences_requested)); + goto end; + } + + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_signal_info, sizeof(struct cam_generic_fence_signal_info))) { + rc = -EFAULT; + CAM_ERR(CAM_SYNX, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, + sizeof(struct cam_generic_fence_signal_info)); +} + +end: + cam_generic_fence_free_signal_input_info_util(&fence_signal_info, + (void **)&synx_signal_info); + return rc; +} + +static int cam_generic_fence_process_synx_obj_cmd( + uint32_t id, + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc = -EINVAL; + + switch (id) { + case CAM_GENERIC_FENCE_CREATE: + rc = cam_generic_fence_handle_synx_create(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_RELEASE: + rc = cam_generic_fence_handle_synx_release(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_IMPORT: + rc = cam_generic_fence_handle_synx_import(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_SIGNAL: + rc = cam_generic_fence_handle_synx_signal(fence_cmd_args); + break; + default: + CAM_ERR(CAM_SYNX, "IOCTL cmd: %u not supported for synx object", id); + break; + } + + return rc; +} +#endif + +static int cam_generic_fence_handle_sync_create( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc, i, dma_fence_row_idx; + bool dma_fence_created; + unsigned long fence_sel_mask; + struct cam_dma_fence_release_params release_params; + struct cam_dma_fence_create_sync_obj_payload dma_sync_create; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + bool synx_obj_created = false; + struct sync_synx_obj_info synx_obj_create; +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + int32_t fence_flag; + int32_t synx_obj_row_idx = 0; + struct cam_synx_obj_release_params synx_release_params; + struct dma_fence *dma_fence_ptr; +#endif + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_SYNC, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + fence_cfg->reason_code = 0; + + /* Reset flag */ + dma_fence_created = false; + synx_obj_created = false; + + fence_sel_mask = fence_cfg->fence_sel_mask; + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) { + rc = cam_dma_fence_create_fd(&fence_cfg->dma_fence_fd, + &dma_fence_row_idx, fence_cfg->name); + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to create dma fence at index: %d rc: %d num_fences: %u", + i, rc, fence_input_info->num_fences_requested); + fence_cfg->reason_code = rc; + goto out_copy; + } + + dma_sync_create.dma_fence_row_idx = dma_fence_row_idx; + dma_sync_create.fd = fence_cfg->dma_fence_fd; + dma_sync_create.sync_created_with_dma = true; + dma_fence_created = true; + } + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Create a synx object */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) { + if (dma_fence_created) { + dma_fence_ptr = cam_dma_fence_get_fence_from_fd( + dma_sync_create.fd, &dma_fence_row_idx); + rc = cam_synx_obj_import_dma_fence(fence_cfg->name, + fence_cfg->params[0], dma_fence_ptr, + &fence_cfg->synx_obj, &synx_obj_row_idx); + } else { + cam_generic_fence_config_parse_params(fence_cfg, + CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX, &fence_flag); + rc = cam_synx_obj_create(fence_cfg->name, + fence_flag, &fence_cfg->synx_obj, + &synx_obj_row_idx); + } + + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to create/import synx obj at index: %d rc: %d num_fences: %u", + i, rc, fence_input_info->num_fences_requested); + + /* Release dma fence */ + if (dma_fence_created) { + release_params.use_row_idx = true; + release_params.u.dma_row_idx = dma_fence_row_idx; + + cam_dma_fence_release(&release_params); + } + goto out_copy; + } + + synx_obj_create.sync_created_with_synx = true; + synx_obj_create.synx_obj = fence_cfg->synx_obj; + synx_obj_create.synx_obj_row_idx = synx_obj_row_idx; + synx_obj_created = true; + } +#endif + rc = cam_sync_create_util(&fence_cfg->sync_obj, fence_cfg->name, + (dma_fence_created ? &dma_sync_create : NULL), + (synx_obj_created ? &synx_obj_create : NULL)); + if (rc) { + fence_cfg->reason_code = rc; + + CAM_ERR(CAM_SYNC, + "Failed to create sync obj at index: %d rc: %d num_fences: %u", + i, rc, fence_input_info->num_fences_requested); + /* Release dma fence */ + if (dma_fence_created) { + release_params.use_row_idx = true; + release_params.u.dma_row_idx = dma_fence_row_idx; + + cam_dma_fence_release(&release_params); + } +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Release synx obj */ + if (synx_obj_created) { + synx_release_params.use_row_idx = true; + synx_release_params.u.synx_row_idx = synx_obj_row_idx; + + cam_synx_obj_release(&synx_release_params); + } +#endif + goto out_copy; + } + + /* Register dma fence cb */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) { + rc = cam_dma_fence_register_cb(&fence_cfg->sync_obj, + &dma_fence_row_idx, cam_sync_dma_fence_cb); + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to register cb for dma fence fd: %d sync_obj: %d rc: %d", + fence_cfg->dma_fence_fd, fence_cfg->sync_obj, rc); + + fence_cfg->reason_code = rc; + /* Destroy sync obj */ + cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj, + NULL, NULL); + /* Release dma fence */ + if (dma_fence_created) { + release_params.use_row_idx = true; + release_params.u.dma_row_idx = dma_fence_row_idx; + + cam_dma_fence_release(&release_params); + } +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Release synx obj */ + if (synx_obj_created) { + synx_release_params.use_row_idx = true; + synx_release_params.u.synx_row_idx = synx_obj_row_idx; + + cam_synx_obj_release(&synx_release_params); + } +#endif + goto out_copy; + } + } +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Register synx object callback */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) { + rc = cam_synx_obj_register_cb(&fence_cfg->sync_obj, + synx_obj_row_idx, cam_sync_synx_obj_cb); + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to register cb for synx_obj: %d sync_obj: %d rc: %d", + fence_cfg->synx_obj, fence_cfg->sync_obj, rc); + + fence_cfg->reason_code = rc; + /* Destroy sync obj */ + cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj, + NULL, NULL); + /* Release dma fence */ + if (dma_fence_created) { + release_params.use_row_idx = true; + release_params.u.dma_row_idx = dma_fence_row_idx; + + cam_dma_fence_release(&release_params); + } + /* Release synx obj */ + if (synx_obj_created) { + synx_release_params.use_row_idx = true; + synx_release_params.u.synx_row_idx = synx_obj_row_idx; + + cam_synx_obj_release(&synx_release_params); + } + goto out_copy; + } + } +#endif + + CAM_DBG(CAM_SYNC, + "Created sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d num fences [requested: %u processed: %u]", + fence_cfg->sync_obj, fence_cfg->name, + fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + +out_copy: + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + rc = -EFAULT; + CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_handle_sync_release( + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + bool failed = false; + int rc, i; + unsigned long fence_sel_mask; + struct cam_sync_check_for_dma_release check_for_dma_release; + struct cam_dma_fence_release_params release_params; + struct cam_generic_fence_input_info *fence_input_info = NULL; + struct cam_generic_fence_config *fence_cfg = NULL; + struct cam_sync_check_for_synx_release check_for_synx_release; +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + struct cam_synx_obj_release_params synx_release_params; +#endif + + rc = cam_generic_fence_alloc_validate_input_info_util(fence_cmd_args, &fence_input_info); + if (rc || !fence_input_info) { + CAM_ERR(CAM_SYNC, + "Fence input info validation failed rc: %d fence_input_info: %pK", + rc, fence_input_info); + return -EINVAL; + } + + for (i = 0; i < fence_input_info->num_fences_requested; i++) { + fence_cfg = &fence_input_info->fence_cfg_flex[i]; + fence_input_info->num_fences_processed++; + /* Reset fields */ + fence_cfg->reason_code = 0; + check_for_dma_release.sync_created_with_dma = false; + check_for_dma_release.dma_fence_fd = fence_cfg->dma_fence_fd; + check_for_synx_release.sync_created_with_synx = false; + check_for_synx_release.synx_obj = fence_cfg->synx_obj; + + rc = cam_sync_deinit_object(sync_dev->sync_table, fence_cfg->sync_obj, + &check_for_dma_release, &check_for_synx_release); + if (rc) { + fence_cfg->reason_code = rc; + failed = true; + CAM_ERR(CAM_SYNC, + "Failed to release sync obj at index: %d rc: %d num_fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + + fence_sel_mask = fence_cfg->fence_sel_mask; + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &fence_sel_mask)) { + if (!check_for_dma_release.sync_created_with_dma) { + CAM_ERR(CAM_SYNC, + "Failed to release dma fence fd: %d with sync_obj: %d, not created together", + fence_cfg->dma_fence_fd, fence_cfg->sync_obj); + failed = true; + fence_cfg->reason_code = -EPERM; + continue; + } + + release_params.use_row_idx = true; + release_params.u.dma_row_idx = check_for_dma_release.dma_fence_row_idx; + rc = cam_dma_fence_release(&release_params); + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to destroy dma fence at index: %d rc: %d num fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + failed = true; + continue; + } + } + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Release associated synx obj */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &fence_sel_mask)) { + if (!check_for_synx_release.sync_created_with_synx) { + CAM_ERR(CAM_SYNC, + "Failed to release synx_obj: %d with sync_obj: %d, not created together", + fence_cfg->synx_obj, fence_cfg->sync_obj); + failed = true; + fence_cfg->reason_code = -EPERM; + continue; + } + + synx_release_params.use_row_idx = true; + synx_release_params.u.synx_row_idx = + check_for_synx_release.synx_obj_row_idx; + rc = cam_synx_obj_release(&synx_release_params); + if (rc) { + CAM_ERR(CAM_SYNC, + "Failed to destroy synx_obj at index: %d rc: %d num fences [requested: %u processed: %u]", + i, rc, fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + fence_cfg->reason_code = rc; + failed = true; + continue; + } + } +#endif + + CAM_DBG(CAM_SYNC, + "Released sync_obj = %d[%s] with fence_sel_mask: 0x%x dma_fence_fd: %d synx_obj: %d num fences [requested: %u processed: %u]", + fence_cfg->sync_obj, fence_cfg->name, + fence_cfg->fence_sel_mask, fence_cfg->dma_fence_fd, fence_cfg->synx_obj, + fence_input_info->num_fences_requested, + fence_input_info->num_fences_processed); + } + + if (failed) + rc = -ENOMSG; + + if (copy_to_user(u64_to_user_ptr(fence_cmd_args->input_handle), + fence_input_info, fence_cmd_args->input_data_size)) { + rc = -EFAULT; + CAM_ERR(CAM_SYNC, "copy to user failed hdl: %d size: 0x%x", + fence_cmd_args->input_handle, fence_cmd_args->input_data_size); + } + + cam_generic_fence_free_input_info_util(&fence_input_info); + return rc; +} + +static int cam_generic_fence_process_sync_obj_cmd( + uint32_t id, + struct cam_generic_fence_cmd_args *fence_cmd_args) +{ + int rc = -EINVAL; + + switch (id) { + case CAM_GENERIC_FENCE_CREATE: + rc = cam_generic_fence_handle_sync_create(fence_cmd_args); + break; + case CAM_GENERIC_FENCE_RELEASE: + rc = cam_generic_fence_handle_sync_release(fence_cmd_args); + break; + default: + CAM_ERR(CAM_SYNC, "IOCTL cmd: %u not supported for sync object", id); + break; + } + + return rc; +} + +static int cam_generic_fence_parser( + struct cam_private_ioctl_arg *k_ioctl) +{ + int rc; + struct cam_generic_fence_cmd_args fence_cmd_args; + + if (!k_ioctl->ioctl_ptr) { + CAM_ERR(CAM_SYNC, "Invalid args input ptr: %p", + k_ioctl->ioctl_ptr); + return -EINVAL; + } + + if (k_ioctl->size != sizeof(struct cam_generic_fence_cmd_args)) { + CAM_ERR(CAM_SYNC, "Size mismatch expected: 0x%llx actual: 0x%llx", + sizeof(struct cam_generic_fence_cmd_args), k_ioctl->size); + return -EINVAL; + } + + if (copy_from_user(&fence_cmd_args, u64_to_user_ptr(k_ioctl->ioctl_ptr), + sizeof(fence_cmd_args))) { + CAM_ERR(CAM_SYNC, "copy from user failed for input ptr: %pK", + k_ioctl->ioctl_ptr); + return -EFAULT; + } + + if (fence_cmd_args.input_handle_type != CAM_HANDLE_USER_POINTER) { + CAM_ERR(CAM_SYNC, "Invalid handle type: %u", + fence_cmd_args.input_handle_type); + return -EINVAL; + } + + switch (fence_cmd_args.fence_type) { + case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ: + rc = cam_generic_fence_process_sync_obj_cmd(k_ioctl->id, &fence_cmd_args); + break; + case CAM_GENERIC_FENCE_TYPE_DMA_FENCE: + rc = cam_generic_fence_process_dma_fence_cmd(k_ioctl->id, &fence_cmd_args); + break; +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ: + rc = cam_generic_fence_process_synx_obj_cmd(k_ioctl->id, &fence_cmd_args); + break; +#endif + default: + rc = -EINVAL; + CAM_ERR(CAM_SYNC, "fence type: 0x%x handling not supported", + fence_cmd_args.fence_type); + break; + } + + return rc; +} + +static long cam_sync_dev_ioctl(struct file *filep, void *fh, + bool valid_prio, unsigned int cmd, void *arg) +{ + int32_t rc; + struct sync_device *sync_dev = video_drvdata(filep); + struct cam_private_ioctl_arg k_ioctl; + + if (!sync_dev) { + CAM_ERR(CAM_SYNC, "sync_dev NULL"); + return -EINVAL; + } + + if (!arg) + return -EINVAL; + + if (cmd != CAM_PRIVATE_IOCTL_CMD) + return -ENOIOCTLCMD; + + k_ioctl = *(struct cam_private_ioctl_arg *)arg; + + switch (k_ioctl.id) { + case CAM_SYNC_CREATE: + rc = cam_sync_handle_create(&k_ioctl); + break; + case CAM_SYNC_DESTROY: + rc = cam_sync_handle_destroy(&k_ioctl); + break; + case CAM_SYNC_REGISTER_PAYLOAD: + rc = cam_sync_handle_register_user_payload( + &k_ioctl); + break; + case CAM_SYNC_DEREGISTER_PAYLOAD: + rc = cam_sync_handle_deregister_user_payload( + &k_ioctl); + break; + case CAM_SYNC_SIGNAL: + rc = cam_sync_handle_signal(&k_ioctl); + break; + case CAM_SYNC_MERGE: + rc = cam_sync_handle_merge(&k_ioctl); + break; + case CAM_SYNC_WAIT: + rc = cam_sync_handle_wait(&k_ioctl); + ((struct cam_private_ioctl_arg *)arg)->result = + k_ioctl.result; + break; + case CAM_GENERIC_FENCE_CREATE: + case CAM_GENERIC_FENCE_RELEASE: + case CAM_GENERIC_FENCE_IMPORT: + case CAM_GENERIC_FENCE_SIGNAL: + rc = cam_generic_fence_parser(&k_ioctl); + break; + default: + rc = -ENOIOCTLCMD; + } + + return rc; +} + +static __poll_t cam_sync_poll(struct file *f, + struct poll_table_struct *pll_table) +{ + __poll_t masks = 0; + struct v4l2_fh *eventq = f->private_data; + + if (!eventq) { + CAM_ERR(CAM_SYNC, "v4l2_fh_poll with unexpected input eventq"); + return masks; + } + + poll_wait(f, &eventq->wait, pll_table); + if (v4l2_event_pending(eventq)) + masks |= POLLPRI; + + return masks; +} + +static int cam_sync_open(struct file *filep) +{ + int rc; + struct sync_device *sync_dev = video_drvdata(filep); + + if (!sync_dev) { + CAM_ERR(CAM_SYNC, "Sync device NULL"); + return -ENODEV; + } + + mutex_lock(&sync_dev->table_lock); + if (sync_dev->open_cnt >= 1) { + mutex_unlock(&sync_dev->table_lock); + return -EALREADY; + } + + rc = v4l2_fh_open(filep); + if (!rc) { + sync_dev->open_cnt++; +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + cam_synx_obj_open(); +#endif + cam_dma_fence_open(); + spin_lock_bh(&sync_dev->cam_sync_eventq_lock); + sync_dev->cam_sync_eventq = filep->private_data; + spin_unlock_bh(&sync_dev->cam_sync_eventq_lock); + } else { + CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc); + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) { + sync_dev->mon_data = CAM_MEM_ZALLOC( + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_SYNC_MONITOR_TABLE_SIZE, GFP_KERNEL); + if (!sync_dev->mon_data) { + CAM_WARN(CAM_SYNC, "Failed to allocate memory %d", + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_SYNC_MONITOR_TABLE_SIZE); + } + } + + mutex_unlock(&sync_dev->table_lock); + + return rc; +} + +static int cam_sync_close(struct file *filep) +{ + int rc = 0, i; + struct sync_device *sync_dev = video_drvdata(filep); + + if (!sync_dev) { + CAM_ERR(CAM_SYNC, "Sync device NULL"); + rc = -ENODEV; + return rc; + } + + mutex_lock(&sync_dev->table_lock); + sync_dev->open_cnt--; + if (!sync_dev->open_cnt) { + for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) { + struct sync_table_row *row = + sync_dev->sync_table + i; + + /* + * Signal all ACTIVE objects as ERR, but we don't + * care about the return status here apart from logging + * it. + */ + if (row->state == CAM_SYNC_STATE_ACTIVE) { + rc = cam_sync_signal(i, + CAM_SYNC_STATE_SIGNALED_ERROR, + CAM_SYNC_COMMON_RELEASE_EVENT); + if (rc < 0) + CAM_ERR(CAM_SYNC, + "Cleanup signal fail idx:%d", i); + } + } + + /* + * Flush the work queue to wait for pending signal callbacks to + * finish + */ + flush_workqueue(sync_dev->work_queue); + + /* + * Now that all callbacks worker threads have finished, + * destroy the sync objects + */ + for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) { + struct sync_table_row *row = + sync_dev->sync_table + i; + + if (row->state != CAM_SYNC_STATE_INVALID) { + rc = cam_sync_destroy(i); + if (rc < 0) + CAM_ERR(CAM_SYNC, + "Cleanup destroy fail:idx:%d\n", i); + } + } + + if (sync_dev->mon_data) { + for (i = 0; i < CAM_SYNC_MONITOR_TABLE_SIZE; i++) { + CAM_MEM_FREE(sync_dev->mon_data[i]); + sync_dev->mon_data[i] = NULL; + } + } + CAM_MEM_FREE(sync_dev->mon_data); + sync_dev->mon_data = NULL; + } + + /* Clean dma fence table */ + cam_dma_fence_close(); +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Clean synx obj table */ + cam_synx_obj_close(); +#endif + mutex_unlock(&sync_dev->table_lock); + + spin_lock_bh(&sync_dev->cam_sync_eventq_lock); + sync_dev->cam_sync_eventq = NULL; + spin_unlock_bh(&sync_dev->cam_sync_eventq_lock); + v4l2_fh_release(filep); + + return rc; +} + +static void cam_sync_event_queue_notify_error(const struct v4l2_event *old, + struct v4l2_event *new) +{ + if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) { + struct cam_sync_ev_header_v2 *ev_header; + + ev_header = CAM_SYNC_GET_HEADER_PTR_V2((*old)); + CAM_ERR(CAM_CRM, + "Failed to notify event id %d fence %d statue %d reason %u %u %u %u", + old->id, ev_header->sync_obj, ev_header->status, + ev_header->evt_param[0], ev_header->evt_param[1], + ev_header->evt_param[2], ev_header->evt_param[3]); + + } else { + struct cam_sync_ev_header *ev_header; + + ev_header = CAM_SYNC_GET_HEADER_PTR((*old)); + CAM_ERR(CAM_CRM, + "Failed to notify event id %d fence %d statue %d", + old->id, ev_header->sync_obj, ev_header->status); + } +} + +static struct v4l2_subscribed_event_ops cam_sync_v4l2_ops = { + .merge = cam_sync_event_queue_notify_error, +}; + +int cam_sync_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (!((sub->type == CAM_SYNC_V4L_EVENT) || + (sub->type == CAM_SYNC_V4L_EVENT_V2))) { + CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type); + return -EINVAL; + } + + sync_dev->version = sub->type; + CAM_DBG(CAM_SYNC, "Sync event verion type 0x%x", sync_dev->version); + return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, + &cam_sync_v4l2_ops); +} + +int cam_sync_unsubscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (!((sub->type == CAM_SYNC_V4L_EVENT) || + (sub->type == CAM_SYNC_V4L_EVENT_V2))) { + CAM_ERR(CAM_SYNC, "Non supported event type 0x%x", sub->type); + return -EINVAL; + } + + return v4l2_event_unsubscribe(fh, sub); +} + +static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = { + .vidioc_subscribe_event = cam_sync_subscribe_event, + .vidioc_unsubscribe_event = cam_sync_unsubscribe_event, + .vidioc_default = cam_sync_dev_ioctl, +}; + +static struct v4l2_file_operations cam_sync_v4l2_fops = { + .owner = THIS_MODULE, + .open = cam_sync_open, + .release = cam_sync_close, + .poll = cam_sync_poll, + .unlocked_ioctl = video_ioctl2, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = video_ioctl2, +#endif +}; + +#if IS_REACHABLE(CONFIG_MEDIA_CONTROLLER) +static int cam_sync_media_controller_init(struct sync_device *sync_dev, + struct platform_device *pdev) +{ + int rc; + + sync_dev->v4l2_dev.mdev = CAM_MEM_ZALLOC(sizeof(struct media_device), + GFP_KERNEL); + if (!sync_dev->v4l2_dev.mdev) + return -ENOMEM; + + media_device_init(sync_dev->v4l2_dev.mdev); + strscpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME, + sizeof(sync_dev->v4l2_dev.mdev->model)); + sync_dev->v4l2_dev.mdev->dev = &(pdev->dev); + + rc = media_device_register(sync_dev->v4l2_dev.mdev); + if (rc < 0) + goto register_fail; + + rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL); + if (rc < 0) + goto entity_fail; + + return 0; + +entity_fail: + media_device_unregister(sync_dev->v4l2_dev.mdev); +register_fail: + media_device_cleanup(sync_dev->v4l2_dev.mdev); + return rc; +} + +static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev) +{ + media_entity_cleanup(&sync_dev->vdev->entity); + media_device_unregister(sync_dev->v4l2_dev.mdev); + media_device_cleanup(sync_dev->v4l2_dev.mdev); + CAM_MEM_FREE(sync_dev->v4l2_dev.mdev); +} + +static void cam_sync_init_entity(struct sync_device *sync_dev) +{ + sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE; + sync_dev->vdev->entity.name = + video_device_node_name(sync_dev->vdev); +} +#else +static int cam_sync_media_controller_init(struct sync_device *sync_dev, + struct platform_device *pdev) +{ + return 0; +} + +static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev) +{ +} + +static void cam_sync_init_entity(struct sync_device *sync_dev) +{ +} +#endif + +static int cam_sync_create_debugfs(void) +{ + int rc; + struct dentry *dbgfileptr = NULL; + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("sync", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_SYNC,"DebugFS could not create directory!"); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + sync_dev->dentry = dbgfileptr; + + debugfs_create_bool("trigger_cb_without_switch", 0644, + sync_dev->dentry, &trigger_cb_without_switch); + + debugfs_create_ulong("cam_sync_monitor_mask", 0644, + sync_dev->dentry, &cam_sync_monitor_mask); +end: + return rc; +} + +#if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) +int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status) +{ + int rc; + uint32_t sync_status = synx_status; + + switch (synx_status) { + case SYNX_STATE_ACTIVE: + sync_status = CAM_SYNC_STATE_ACTIVE; + break; + case SYNX_STATE_SIGNALED_SUCCESS: + sync_status = CAM_SYNC_STATE_SIGNALED_SUCCESS; + break; + case SYNX_STATE_SIGNALED_ERROR: + sync_status = CAM_SYNC_STATE_SIGNALED_ERROR; + break; + case 4: /* SYNX_STATE_SIGNALED_CANCEL: */ + sync_status = CAM_SYNC_STATE_SIGNALED_CANCEL; + break; + default: + CAM_ERR(CAM_SYNC, "Invalid synx status %d for obj %d", + synx_status, sync_obj); + sync_status = CAM_SYNC_STATE_SIGNALED_ERROR; + break; + } + + rc = cam_sync_signal(sync_obj, sync_status, CAM_SYNC_COMMON_EVENT_SYNX); + if (rc) { + CAM_ERR(CAM_SYNC, + "synx signal failed with %d, sync_obj=%d, synx_status=%d, sync_status=%d", + sync_obj, synx_status, sync_status, rc); + } + + return rc; +} + +int cam_synx_sync_register_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj) +{ + return cam_sync_register_callback(cb_func, userdata, sync_obj); +} + +int cam_synx_sync_deregister_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj) +{ + return cam_sync_deregister_callback(cb_func, userdata, sync_obj); +} + +static int cam_sync_register_synx_bind_ops( + struct synx_register_params *object) +{ + int rc; + + rc = synx_register_ops(object); + if (rc) + CAM_ERR(CAM_SYNC, "synx registration fail with rc=%d", rc); + + return rc; +} + +static void cam_sync_unregister_synx_bind_ops( + struct synx_register_params *object) +{ + int rc; + + rc = synx_deregister_ops(object); + if (rc) + CAM_ERR(CAM_SYNC, "sync unregistration fail with %d", rc); +} + +static void cam_sync_configure_synx_obj(struct synx_register_params *object) +{ + struct synx_register_params *params = object; + + params->name = CAM_SYNC_NAME; + params->type = SYNX_TYPE_CSL; + params->ops.register_callback = cam_synx_sync_register_callback; + params->ops.deregister_callback = cam_synx_sync_deregister_callback; + params->ops.enable_signaling = cam_sync_get_obj_ref; + params->ops.signal = cam_synx_sync_signal; +} +#endif + +static int cam_sync_component_bind(struct device *dev, + struct device *master_dev, void *data) +{ + int rc, idx; + struct platform_device *pdev = to_platform_device(dev); + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + sync_dev = CAM_MEM_ZALLOC(sizeof(*sync_dev), GFP_KERNEL); + if (!sync_dev) + return -ENOMEM; + + sync_dev->sync_table = vzalloc(sizeof(struct sync_table_row) * CAM_SYNC_MAX_OBJS); + if (!sync_dev->sync_table) { + CAM_ERR(CAM_SYNC, "Mem Allocation failed for sync table"); + CAM_MEM_FREE(sync_dev); + return -ENOMEM; + } + + mutex_init(&sync_dev->table_lock); + spin_lock_init(&sync_dev->cam_sync_eventq_lock); + + for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++) + spin_lock_init(&sync_dev->row_spinlocks[idx]); + + sync_dev->vdev = video_device_alloc(); + if (!sync_dev->vdev) { + rc = -ENOMEM; + goto vdev_fail; + } + + rc = cam_sync_media_controller_init(sync_dev, pdev); + if (rc < 0) + goto mcinit_fail; + + sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev; + + rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev); + if (rc < 0) + goto register_fail; + + strscpy(sync_dev->vdev->name, CAM_SYNC_NAME, + sizeof(sync_dev->vdev->name)); + sync_dev->vdev->release = video_device_release_empty; + sync_dev->vdev->fops = &cam_sync_v4l2_fops; + sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops; + sync_dev->vdev->minor = -1; + sync_dev->vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE; + sync_dev->vdev->vfl_type = VFL_TYPE_VIDEO; + rc = video_register_device(sync_dev->vdev, VFL_TYPE_VIDEO, -1); + if (rc < 0) { + CAM_ERR(CAM_SYNC, + "video device registration failure rc = %d, name = %s, device_caps = %d", + rc, sync_dev->vdev->name, sync_dev->vdev->device_caps); + goto v4l2_fail; + } + + cam_sync_init_entity(sync_dev); + video_set_drvdata(sync_dev->vdev, sync_dev); + bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS); + + /* + * We treat zero as invalid handle, so we will keep the 0th bit set + * always + */ + set_bit(0, sync_dev->bitmap); + + sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME, + WQ_HIGHPRI | WQ_UNBOUND, 1); + + if (!sync_dev->work_queue) { + CAM_ERR(CAM_SYNC, + "Error: high priority work queue creation failed"); + rc = -ENOMEM; + goto v4l2_fail; + } + + /* Initialize dma fence driver */ + rc = cam_dma_fence_driver_init(); + if (rc) { + CAM_ERR(CAM_SYNC, + "DMA fence driver initialization failed rc: %d", rc); + goto workq_destroy; + } + + trigger_cb_without_switch = false; + cam_sync_monitor_mask = 0; + cam_sync_create_debugfs(); +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + /* Initialize synx obj driver */ + rc = cam_synx_obj_driver_init(); + if (rc) { + CAM_ERR(CAM_SYNC, + "Synx obj driver initialization failed rc: %d", rc); + goto dma_driver_deinit; + } +#elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) + CAM_DBG(CAM_SYNC, "Registering with synx driver"); + cam_sync_configure_synx_obj(&sync_dev->params); + rc = cam_sync_register_synx_bind_ops(&sync_dev->params); + if (rc) + goto dma_driver_deinit; +#endif + CAM_DBG(CAM_SYNC, "Component bound successfully"); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; + +#if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +dma_driver_deinit: + cam_dma_fence_driver_deinit(); +#endif +workq_destroy: + destroy_workqueue(sync_dev->work_queue); +v4l2_fail: + v4l2_device_unregister(sync_dev->vdev->v4l2_dev); +register_fail: + cam_sync_media_controller_cleanup(sync_dev); +mcinit_fail: + video_unregister_device(sync_dev->vdev); + video_device_release(sync_dev->vdev); +vdev_fail: + vfree(sync_dev->sync_table); + mutex_destroy(&sync_dev->table_lock); + CAM_MEM_FREE(sync_dev); + return rc; +} + +static void cam_sync_component_unbind(struct device *dev, + struct device *master_dev, void *data) +{ + int i; + + v4l2_device_unregister(sync_dev->vdev->v4l2_dev); + cam_sync_media_controller_cleanup(sync_dev); +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) + cam_synx_obj_driver_deinit(); +#elif IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) + cam_sync_unregister_synx_bind_ops(&sync_dev->params); +#endif + video_unregister_device(sync_dev->vdev); + video_device_release(sync_dev->vdev); + sync_dev->dentry = NULL; + + cam_dma_fence_driver_deinit(); + for (i = 0; i < CAM_SYNC_MAX_OBJS; i++) + spin_lock_init(&sync_dev->row_spinlocks[i]); + + vfree(sync_dev->sync_table); + CAM_MEM_FREE(sync_dev); + sync_dev = NULL; +} + +const static struct component_ops cam_sync_component_ops = { + .bind = cam_sync_component_bind, + .unbind = cam_sync_component_unbind, +}; + +static int cam_sync_probe(struct platform_device *pdev) +{ + int rc; + + CAM_DBG(CAM_SYNC, "Adding Sync component"); + rc = component_add(&pdev->dev, &cam_sync_component_ops); + if (rc) + CAM_ERR(CAM_SYNC, "failed to add component rc: %d", rc); + + return rc; +} + +static int cam_sync_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &cam_sync_component_ops); + return 0; +} + +static const struct of_device_id cam_sync_dt_match[] = { + {.compatible = "qcom,cam-sync"}, + {} +}; + +MODULE_DEVICE_TABLE(of, cam_sync_dt_match); + +struct platform_driver cam_sync_driver = { + .probe = cam_sync_probe, + .remove = cam_sync_remove, + .driver = { + .name = "cam_sync", + .owner = THIS_MODULE, + .of_match_table = cam_sync_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_sync_init(void) +{ + return platform_driver_register(&cam_sync_driver); +} + +void cam_sync_exit(void) +{ + platform_driver_unregister(&cam_sync_driver); +} + +MODULE_DESCRIPTION("Camera sync driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_api.h b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_api.h new file mode 100644 index 0000000000..0c77ae427a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_api.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_SYNC_API_H__ +#define __CAM_SYNC_API_H__ + +#include +#include +#include +#include +#include + +#define SYNC_DEBUG_NAME_LEN 63 +typedef void (*sync_callback)(int32_t sync_obj, int status, void *data); + +enum cam_sync_synx_supported_cores { + CAM_ICP_0_SYNX_CORE = 1, + CAM_ICP_1_SYNX_CORE, + CAM_INVALID_SYNX_CORE, +}; + +/* Kernel APIs */ + +/** + * @brief: Creates a sync object + * + * The newly created sync obj is assigned to sync_obj. + * sync object. + * + * @param sync_obj : Pointer to int referencing the sync object. + * @param name : Optional parameter associating a name with the sync object for + * debug purposes. Only first SYNC_DEBUG_NAME_LEN bytes are accepted, + * rest will be ignored. + * + * @return Status of operation. Zero in case of success. + * -EINVAL will be returned if sync_obj is an invalid pointer. + * -ENOMEM will be returned if the kernel can't allocate space for + * sync object. + */ +int cam_sync_create(int32_t *sync_obj, const char *name); + +/** + * @brief: Registers a callback with a sync object + * + * @param cb_func : Pointer to callback to be registered + * @param userdata : Opaque pointer which will be passed back with callback. + * @param sync_obj : int referencing the sync object. + * + * @return Status of operation. Zero in case of success. + * -EINVAL will be returned if userdata is invalid. + * -ENOMEM will be returned if cb_func is invalid. + */ +int cam_sync_register_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj); + +/** + * @brief: De-registers a callback with a sync object + * + * @param cb_func : Pointer to callback to be de-registered + * @param userdata : Opaque pointer which will be passed back with callback. + * @param sync_obj : int referencing the sync object. + * + * @return Status of operation. Zero in case of success. + * -EINVAL will be returned if userdata is invalid. + * -ENOMEM will be returned if cb_func is invalid. + * -ENOENT will be returned if call back not found + */ +int cam_sync_deregister_callback(sync_callback cb_func, + void *userdata, int32_t sync_obj); + +/** + * @brief: Signals a sync object with the status argument. + * + * This function will signal the sync object referenced by the sync_obj + * parameter and when doing so, will trigger callbacks in both user space and + * kernel. Callbacks will triggered asynchronously and their order of execution + * is not guaranteed. The status parameter will indicate whether the entity + * performing the signaling wants to convey an error case or a success case. + * + * @param sync_obj : int referencing the sync object. + * @param status : Status of the signaling. Can be either SYNC_SIGNAL_ERROR or + * SYNC_SIGNAL_SUCCESS. + * @param evt_param : Event parameter + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t evt_param); + +/** + * @brief: Merges multiple sync objects + * + * This function will merge multiple sync objects into a sync group. + * + * @param sync_obj : pointer to a block of ints to be merged + * @param num_objs : Number of ints in the block + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj); + +/** + * @brief: get ref count of sync obj + * + * This function will increment ref count for the sync object, and the ref + * count will be decremented when this sync object is signaled. + * + * @param sync_obj: sync object + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_get_obj_ref(int32_t sync_obj); + +/** + * @brief: put ref count of sync obj + * + * This function will decrement ref count for the sync object. + * + * @param sync_obj: sync object + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_put_obj_ref(int32_t sync_obj); + +/** + * @brief: Destroys a sync object + * + * @param sync_obj: int referencing the sync object to be destroyed + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_destroy(int32_t sync_obj); + +/** + * @brief: Waits for a sync object synchronously + * + * Does a wait on the sync object identified by sync_obj for a maximum + * of timeout_ms milliseconds. Must not be called from interrupt context as + * this API can sleep. Should be called from process context only. + * + * @param sync_obj : int referencing the sync object to be waited upon + * @param timeout_ms : Timeout in ms. + * + * @return Status of operation. Zero in case of success + * -EINVAL if sync object is in bad state or arguments are invalid + * -ETIMEDOUT if wait times out + */ +int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms); + +/** + * @brief: Check if sync object is valid + * + * @param sync_obj: int referencing the sync object to be checked + * + * @return Status of operation. Zero in case of success + * -EINVAL if sync object is in bad state or arguments are invalid + */ +int cam_sync_check_valid(int32_t sync_obj); + +/** + * @brief: Synx recovery for a given core + * + * @param core_id: Core ID we want to recover for + * + * @return Status of operation. Zero in case of success + * -EINVAL if core_id is invalid + */ +int cam_sync_synx_core_recovery( + enum cam_sync_synx_supported_cores core_id); + +/** + * @brief : API to register SYNC to platform framework. + * + * @return struct platform_device pointer on on success, or ERR_PTR() on error. + */ +int cam_sync_init(void); + +/** + * @brief : API to remove SYNC from platform framework. + */ +void cam_sync_exit(void); +#endif /* __CAM_SYNC_API_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.c b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.c new file mode 100644 index 0000000000..36df3fe95d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.c @@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sync_dma_fence.h" +#include "cam_sync_util.h" +#include "cam_mem_mgr_api.h" + +extern unsigned long cam_sync_monitor_mask; + +/** + * struct cam_dma_fence_row - DMA fence row + */ +struct cam_dma_fence_row { + char name[CAM_DMA_FENCE_NAME_LEN]; + struct dma_fence *fence; + int32_t fd; + enum cam_dma_fence_state state; + struct dma_fence_cb fence_cb; + int32_t sync_obj; + cam_sync_callback_for_dma_fence sync_cb; + bool cb_registered_for_sync; + bool ext_dma_fence; + bool sync_signal_dma; +}; + +/** + * struct cam_dma_fence_device - DMA fence device + */ +struct cam_dma_fence_device { + uint64_t dma_fence_context; + struct cam_dma_fence_row rows[CAM_DMA_FENCE_MAX_FENCES]; + spinlock_t row_spinlocks[CAM_DMA_FENCE_MAX_FENCES]; + struct mutex dev_lock; + DECLARE_BITMAP(bitmap, CAM_DMA_FENCE_MAX_FENCES); + struct cam_generic_fence_monitor_data **monitor_data; +}; + +static atomic64_t g_cam_dma_fence_seq_no; +static struct cam_dma_fence_device *g_cam_dma_fence_dev; + +bool __cam_dma_fence_enable_signaling( + struct dma_fence *fence) +{ + return true; +} + +const char *__cam_dma_fence_get_driver_name( + struct dma_fence *fence) +{ + return "Camera DMA fence driver"; +} + +void __cam_dma_fence_free(struct dma_fence *fence) +{ + CAM_DBG(CAM_DMA_FENCE, + "Free memory for dma fence context: %llu seqno: %llu", + fence->context, fence->seqno); + CAM_MEM_FREE(fence->lock); + CAM_MEM_FREE(fence); +} + +static struct dma_fence_ops cam_sync_dma_fence_ops = { + .enable_signaling = __cam_dma_fence_enable_signaling, + .get_driver_name = __cam_dma_fence_get_driver_name, + .get_timeline_name = __cam_dma_fence_get_driver_name, + .release = __cam_dma_fence_free, +}; + +static inline struct cam_generic_fence_monitor_entry * + __cam_dma_fence_get_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *monitor_data; + + monitor_data = CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data, idx); + if (monitor_data->swap_monitor_entries) + return monitor_data->prev_monitor_entries; + else + return monitor_data->monitor_entries; +} + +static inline struct cam_generic_fence_monitor_entry * + __cam_dma_fence_get_prev_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *monitor_data; + + monitor_data = CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data, idx); + if (monitor_data->swap_monitor_entries) + return monitor_data->monitor_entries; + else + return monitor_data->prev_monitor_entries; +} + +static void __cam_dma_fence_print_table(void) +{ + int i; + struct cam_dma_fence_row *row; + struct dma_fence *fence; + + /* Index zero is marked as an invalid slot */ + for (i = 1; i < CAM_DMA_FENCE_MAX_FENCES; i++) { + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + row = &g_cam_dma_fence_dev->rows[i]; + + /* free slots starting this index */ + if (row->state == CAM_DMA_FENCE_STATE_INVALID) { + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + return; + } + + fence = row->fence; + CAM_INFO(CAM_DMA_FENCE, + "Idx: %d seqno: %llu name: %s state: %d", + i, fence->seqno, row->name, row->state); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + } +} + +static int __cam_dma_fence_find_free_idx(uint32_t *idx) +{ + int rc = 0; + bool bit; + + do { + *idx = find_first_zero_bit(g_cam_dma_fence_dev->bitmap, CAM_DMA_FENCE_MAX_FENCES); + if (*idx >= CAM_DMA_FENCE_MAX_FENCES) { + rc = -ENOMEM; + break; + } + + bit = test_and_set_bit(*idx, g_cam_dma_fence_dev->bitmap); + } while (bit); + + if (rc) { + CAM_ERR(CAM_DMA_FENCE, "No free idx, printing dma fence table......"); + __cam_dma_fence_print_table(); + } + + return rc; +} + +static struct dma_fence *__cam_dma_fence_find_fence_in_table( + int32_t fd, int32_t *idx) +{ + int i; + struct dma_fence *fence = NULL; + struct cam_dma_fence_row *row = NULL; + + for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) { + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + + row = &g_cam_dma_fence_dev->rows[i]; + if ((row->state != CAM_DMA_FENCE_STATE_INVALID) && (row->fd == fd)) { + *idx = i; + fence = row->fence; + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + break; + } + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + } + + return fence; +} + +static void __cam_dma_fence_init_row(const char *name, + struct dma_fence *dma_fence, int32_t fd, uint32_t idx, + bool ext_dma_fence) +{ + struct cam_dma_fence_row *row; + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]); + row = &g_cam_dma_fence_dev->rows[idx]; + row->fence = dma_fence; + row->fd = fd; + row->state = CAM_DMA_FENCE_STATE_ACTIVE; + row->ext_dma_fence = ext_dma_fence; + strscpy(row->name, name, CAM_DMA_FENCE_NAME_LEN); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) { + cam_generic_fence_update_monitor_array(idx, + &g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_CREATE); + } + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]); +} + +void __cam_dma_fence_signal_cb( + struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct cam_dma_fence_signal_sync_obj signal_sync_obj; + struct cam_dma_fence_row *dma_fence_row = + container_of(cb, struct cam_dma_fence_row, fence_cb); + uint32_t idx; + + if (dma_fence_row->state == CAM_DMA_FENCE_STATE_INVALID) { + CAM_ERR(CAM_DMA_FENCE, "dma fence seqno: %llu is in invalid state: %d", + fence->seqno, dma_fence_row->state); + return; + } + + /* If this dma fence is signaled by sync obj, skip cb */ + if (dma_fence_row->sync_signal_dma) + return; + + CAM_DBG(CAM_DMA_FENCE, "dma fence seqno: %llu fd: %d signaled, signal sync obj: %d", + fence->seqno, dma_fence_row->fd, dma_fence_row->sync_obj); + if ((dma_fence_row->cb_registered_for_sync) && (dma_fence_row->sync_cb)) { + signal_sync_obj.fd = dma_fence_row->fd; + + /* + * Signal is invoked with the fence lock held, + * lock not needed to query status + */ + signal_sync_obj.status = dma_fence_get_status_locked(fence); + dma_fence_row->state = CAM_DMA_FENCE_STATE_SIGNALED; + dma_fence_row->sync_cb(dma_fence_row->sync_obj, &signal_sync_obj); + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) { + __cam_dma_fence_find_fence_in_table(dma_fence_row->fd, &idx); + cam_generic_fence_update_monitor_array(idx, + &g_cam_dma_fence_dev->dev_lock, + g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + } + } +} + +static void __cam_dma_fence_dump_monitor_array(int dma_row_idx) +{ + struct dma_fence *fence; + struct cam_generic_fence_monitor_obj_info obj_info; + struct cam_dma_fence_row *row; + + if (!g_cam_dma_fence_dev->monitor_data || + !test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) + return; + + if (!CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data, + dma_row_idx)->prev_obj_id) + return; + + row = &g_cam_dma_fence_dev->rows[dma_row_idx]; + fence = row->fence; + + obj_info.name = row->name; + obj_info.obj_id = row->fd; + obj_info.state = row->state; + obj_info.ref_cnt = kref_read(&fence->refcount); + obj_info.monitor_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_dma_fence_dev->monitor_data, dma_row_idx); + obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_DMA_FENCE; + obj_info.sync_id = row->sync_obj; + obj_info.monitor_entries = + __cam_dma_fence_get_monitor_entries(dma_row_idx); + obj_info.prev_monitor_entries = + __cam_dma_fence_get_prev_monitor_entries(dma_row_idx); + cam_generic_fence_dump_monitor_array(&obj_info); +} + +int cam_dma_fence_get_put_ref( + bool get_or_put, int32_t dma_fence_row_idx) +{ + struct dma_fence *dma_fence; + struct cam_dma_fence_row *row; + int rc = 0; + + if ((dma_fence_row_idx < 0) || + (dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) { + CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid", + dma_fence_row_idx); + return -EINVAL; + } + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx]; + + if (row->state == CAM_DMA_FENCE_STATE_INVALID) { + CAM_ERR(CAM_DMA_FENCE, + "dma fence at idx: %d is in invalid state: %d", + dma_fence_row_idx, row->state); + rc = -EINVAL; + goto monitor_dump; + } + + dma_fence = row->fence; + + if (get_or_put) + dma_fence_get(dma_fence); + else + dma_fence_put(dma_fence); + + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + + CAM_DBG(CAM_DMA_FENCE, "Refcnt: %u after %s for dma fence with seqno: %llu", + kref_read(&dma_fence->refcount), (get_or_put ? "getref" : "putref"), + dma_fence->seqno); + + return rc; + +monitor_dump: + __cam_dma_fence_dump_monitor_array(dma_fence_row_idx); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + return rc; +} + +static struct dma_fence *cam_dma_fence_get_fence_from_sync_file( + int32_t fd, int32_t *dma_fence_row_idx) +{ + uint32_t idx; + struct dma_fence *dma_fence = NULL; + + dma_fence = sync_file_get_fence(fd); + if (IS_ERR_OR_NULL(dma_fence)) { + CAM_ERR(CAM_DMA_FENCE, "Invalid fd: %d no dma fence found", fd); + return ERR_PTR(-EINVAL); + } + + if (__cam_dma_fence_find_free_idx(&idx)) { + CAM_ERR(CAM_DMA_FENCE, "No free idx"); + goto end; + } + + __cam_dma_fence_init_row(dma_fence->ops->get_driver_name(dma_fence), + dma_fence, fd, idx, true); + *dma_fence_row_idx = idx; + CAM_DBG(CAM_DMA_FENCE, + "External dma fence with fd: %d seqno: %llu ref_cnt: %u updated in tbl", + fd, dma_fence->seqno, kref_read(&dma_fence->refcount)); + + return dma_fence; + +end: + dma_fence_put(dma_fence); + return NULL; +} + +struct dma_fence *cam_dma_fence_get_fence_from_fd( + int32_t fd, int32_t *dma_fence_row_idx) +{ + struct dma_fence *dma_fence = NULL; + struct cam_dma_fence_row *row; + + dma_fence = __cam_dma_fence_find_fence_in_table(fd, dma_fence_row_idx); + if (IS_ERR_OR_NULL(dma_fence)) { + CAM_WARN(CAM_DMA_FENCE, + "dma fence with fd: %d is an external fence, querying sync file", + fd); + return cam_dma_fence_get_fence_from_sync_file(fd, dma_fence_row_idx); + } + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]); + row = &g_cam_dma_fence_dev->rows[*dma_fence_row_idx]; + + if ((row->state == CAM_DMA_FENCE_STATE_INVALID) || + (row->fd != fd) || + (row->fence != dma_fence)) { + CAM_ERR(CAM_DMA_FENCE, + "dma fence at idx: %d is in invalid state: %d", + dma_fence_row_idx, row->state); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]); + return ERR_PTR(-EINVAL); + } + + dma_fence_get(dma_fence); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]); + + CAM_DBG(CAM_DMA_FENCE, "dma fence found for fd: %d with seqno: %llu ref_cnt: %u", + fd, dma_fence->seqno, kref_read(&dma_fence->refcount)); + + return dma_fence; +} + +int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx, + cam_sync_callback_for_dma_fence sync_cb) +{ + int rc = 0, dma_fence_row_idx; + struct cam_dma_fence_row *row; + struct dma_fence *dma_fence; + + if (!sync_obj || !dma_fence_idx || !sync_cb) { + CAM_ERR(CAM_DMA_FENCE, + "Invalid args sync_obj: %p dma_fence_idx: %p sync_cb: %p", + sync_obj, dma_fence_idx, sync_cb); + return -EINVAL; + } + + dma_fence_row_idx = *dma_fence_idx; + if ((dma_fence_row_idx < 0) || + (dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) { + CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid", + dma_fence_row_idx); + return -EINVAL; + } + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx]; + dma_fence = row->fence; + + if (row->state != CAM_DMA_FENCE_STATE_ACTIVE) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(dma_fence_row_idx, + &g_cam_dma_fence_dev->dev_lock, + g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_SKIP_REGISTER_CB); + CAM_ERR(CAM_DMA_FENCE, + "dma fence at idx: %d fd: %d seqno: %llu is not active, current state: %d", + dma_fence_row_idx, row->fd, dma_fence->seqno, row->state); + rc = -EINVAL; + goto monitor_dump; + } + + /** + * If the cb is already registered, return + * If a fd is closed by userspace without releasing the dma fence, it is + * possible that same fd is returned to a new fence. + */ + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(dma_fence_row_idx, + &g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_ALREADY_REGISTERED_CB); + + CAM_WARN(CAM_DMA_FENCE, + "dma fence at idx: %d fd: %d seqno: %llu has already registered a cb for sync: %d - same fd for 2 fences?", + dma_fence_row_idx, row->fd, dma_fence->seqno, row->sync_obj); + goto monitor_dump; + } + + rc = dma_fence_add_callback(row->fence, &row->fence_cb, + __cam_dma_fence_signal_cb); + if (rc) { + CAM_ERR(CAM_DMA_FENCE, + "Failed to register cb for dma fence fd: %d seqno: %llu rc: %d", + row->fd, dma_fence->seqno, rc); + goto monitor_dump; + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(dma_fence_row_idx, + &g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_REGISTER_CB); + + row->cb_registered_for_sync = true; + row->sync_obj = *sync_obj; + row->sync_cb = sync_cb; + + CAM_DBG(CAM_DMA_FENCE, + "CB successfully registered for dma fence fd: %d seqno: %llu for sync_obj: %d", + row->fd, dma_fence->seqno, *sync_obj); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + return rc; + +monitor_dump: + __cam_dma_fence_dump_monitor_array(dma_fence_row_idx); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + return rc; +} + +static int __cam_dma_fence_signal_fence( + struct dma_fence *dma_fence, + int32_t status) +{ + int rc; + bool fence_signaled = false; + + spin_lock_bh(dma_fence->lock); + fence_signaled = dma_fence_is_signaled_locked(dma_fence); + if (fence_signaled) { + rc = -EPERM; + CAM_DBG(CAM_DMA_FENCE, + "dma fence seqno: %llu is already signaled", + dma_fence->seqno); + goto end; + } + + if (status) + dma_fence_set_error(dma_fence, status); + + rc = dma_fence_signal_locked(dma_fence); + +end: + spin_unlock_bh(dma_fence->lock); + return rc; +} + +int cam_dma_fence_internal_signal( + int32_t dma_fence_row_idx, + struct cam_dma_fence_signal *signal_dma_fence) +{ + int rc; + struct dma_fence *dma_fence = NULL; + struct cam_dma_fence_row *row = NULL; + + if ((dma_fence_row_idx < 0) || + (dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) { + CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid", + dma_fence_row_idx); + return -EINVAL; + } + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx]; + + /* Ensures sync obj cb is not invoked */ + row->sync_signal_dma = true; + dma_fence = row->fence; + + if (IS_ERR_OR_NULL(dma_fence)) { + CAM_ERR(CAM_DMA_FENCE, "DMA fence in row: %d is invalid", + dma_fence_row_idx); + rc = -EINVAL; + goto monitor_dump; + } + + if (row->state == CAM_DMA_FENCE_STATE_SIGNALED) { + CAM_WARN(CAM_DMA_FENCE, + "dma fence fd: %d[seqno: %llu] already in signaled state", + signal_dma_fence->dma_fence_fd, dma_fence->seqno); + rc = 0; + goto monitor_dump; + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(dma_fence_row_idx, + &g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + + if (row->cb_registered_for_sync) { + if (!dma_fence_remove_callback(row->fence, &row->fence_cb)) { + CAM_ERR(CAM_DMA_FENCE, + "Failed to remove cb for dma fence seqno: %llu fd: %d", + dma_fence->seqno, row->fd); + rc = -EINVAL; + goto monitor_dump; + } + } + + rc = __cam_dma_fence_signal_fence(dma_fence, signal_dma_fence->status); + if (rc) + CAM_WARN(CAM_DMA_FENCE, + "dma fence seqno: %llu fd: %d already signaled rc: %d", + dma_fence->seqno, row->fd, rc); + + row->state = CAM_DMA_FENCE_STATE_SIGNALED; + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + + CAM_DBG(CAM_DMA_FENCE, + "dma fence fd: %d[seqno: %llu] signaled with status: %d rc: %d", + signal_dma_fence->dma_fence_fd, dma_fence->seqno, + signal_dma_fence->status, rc); + + return 0; + +monitor_dump: + __cam_dma_fence_dump_monitor_array(dma_fence_row_idx); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]); + return rc; +} + +int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence) +{ + uint32_t idx; + struct dma_fence *dma_fence = NULL; + + dma_fence = __cam_dma_fence_find_fence_in_table( + signal_dma_fence->dma_fence_fd, &idx); + + if (IS_ERR_OR_NULL(dma_fence)) { + CAM_ERR(CAM_DMA_FENCE, "Failed to find dma fence for fd: %d", + signal_dma_fence->dma_fence_fd); + return -EINVAL; + } + + return cam_dma_fence_internal_signal(idx, signal_dma_fence); +} + +static int __cam_dma_fence_get_fd(int32_t *row_idx, + const char *name) +{ + int fd = -1; + uint32_t idx; + struct dma_fence *dma_fence = NULL; + spinlock_t *dma_fence_lock = NULL; + struct sync_file *sync_file = NULL; + + if (__cam_dma_fence_find_free_idx(&idx)) + goto end; + + dma_fence_lock = CAM_MEM_ZALLOC(sizeof(spinlock_t), GFP_KERNEL); + if (!dma_fence_lock) + goto free_idx; + + dma_fence = CAM_MEM_ZALLOC(sizeof(struct dma_fence), GFP_KERNEL); + if (!dma_fence) { + CAM_MEM_FREE(dma_fence_lock); + goto free_idx; + } + + spin_lock_init(dma_fence_lock); + dma_fence_init(dma_fence, &cam_sync_dma_fence_ops, dma_fence_lock, + g_cam_dma_fence_dev->dma_fence_context, + atomic64_inc_return(&g_cam_dma_fence_seq_no)); + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + CAM_ERR(CAM_DMA_FENCE, "failed to get a unused fd: %d", fd); + dma_fence_put(dma_fence); + goto free_idx; + } + + sync_file = sync_file_create(dma_fence); + if (!sync_file) { + put_unused_fd(fd); + fd = -1; + dma_fence_put(dma_fence); + goto free_idx; + } + + fd_install(fd, sync_file->file); + + *row_idx = idx; + __cam_dma_fence_init_row(name, dma_fence, fd, idx, false); + + CAM_DBG(CAM_DMA_FENCE, "Created dma fence fd: %d[%s] seqno: %llu row_idx: %u ref_cnt: %u", + fd, name, dma_fence->seqno, idx, kref_read(&dma_fence->refcount)); + + return fd; + +free_idx: + clear_bit(idx, g_cam_dma_fence_dev->bitmap); +end: + return fd; +} + +int cam_dma_fence_create_fd( + int32_t *dma_fence_fd, int32_t *dma_fence_row_idx, const char *name) +{ + int fd = -1, rc = 0; + + if (!dma_fence_fd || !dma_fence_row_idx) { + CAM_ERR(CAM_DMA_FENCE, "Invalid args fd: %pK dma_fence_row_idx: %pK", + dma_fence_fd, dma_fence_row_idx); + return -EINVAL; + } + + fd = __cam_dma_fence_get_fd(dma_fence_row_idx, name); + if (fd < 0) { + rc = -EBADFD; + goto end; + } + + *dma_fence_fd = fd; + +end: + return rc; +} + +void __cam_dma_fence_save_previous_monitor_data(int dma_row_idx) +{ + struct cam_generic_fence_monitor_data *row_mon_data; + struct cam_dma_fence_row *row; + + if (!g_cam_dma_fence_dev->monitor_data) + return; + + row = &g_cam_dma_fence_dev->rows[dma_row_idx]; + row_mon_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_dma_fence_dev->monitor_data, dma_row_idx); + + /* save current usage details into prev variables */ + strscpy(row_mon_data->prev_name, row->name, CAM_DMA_FENCE_NAME_LEN); + row_mon_data->prev_obj_id = row->fd; + row_mon_data->prev_sync_id = row->sync_obj; + row_mon_data->prev_state = row->state; + row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries; + row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head); +} + +static int __cam_dma_fence_release(int32_t dma_row_idx) +{ + struct dma_fence *dma_fence; + struct cam_dma_fence_row *row; + int rc; + + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]); + row = &g_cam_dma_fence_dev->rows[dma_row_idx]; + dma_fence = row->fence; + + if (row->state == CAM_DMA_FENCE_STATE_INVALID) { + CAM_ERR(CAM_DMA_FENCE, "Invalid row index: %u, state: %u", + dma_row_idx, row->state); + rc = -EINVAL; + goto monitor_dump; + } + + if (row->state == CAM_DMA_FENCE_STATE_ACTIVE) { + CAM_WARN(CAM_DMA_FENCE, + "Unsignaled fence being released name: %s seqno: %llu fd: %d", + row->name, dma_fence->seqno, row->fd); + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(dma_row_idx, + &g_cam_dma_fence_dev->dev_lock, + g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + } + + /* Ensure dma fence is signaled prior to release */ + if (!row->ext_dma_fence) { + rc = __cam_dma_fence_signal_fence(dma_fence, -ECANCELED); + if ((!rc) && (row->state == CAM_DMA_FENCE_STATE_SIGNALED)) + CAM_WARN(CAM_DMA_FENCE, + "Unsignaled fence being released name: %s seqno: %llu fd: %d, row was marked as signaled", + row->name, dma_fence->seqno, row->fd); + } + + CAM_DBG(CAM_DMA_FENCE, + "Releasing dma fence with fd: %d[%s] row_idx: %u current ref_cnt: %u", + row->fd, row->name, dma_row_idx, kref_read(&dma_fence->refcount)); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) { + /* Update monitor entries & save data before row memset to 0 */ + cam_generic_fence_update_monitor_array(dma_row_idx, + &g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_DESTROY); + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE_DUMP, &cam_sync_monitor_mask)) + __cam_dma_fence_dump_monitor_array(dma_row_idx); + __cam_dma_fence_save_previous_monitor_data(dma_row_idx); + } + + /* putref on dma fence */ + dma_fence_put(dma_fence); + + /* deinit row */ + memset(row, 0, sizeof(struct cam_dma_fence_row)); + clear_bit(dma_row_idx, g_cam_dma_fence_dev->bitmap); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]); + + return 0; + +monitor_dump: + __cam_dma_fence_dump_monitor_array(dma_row_idx); + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]); + return rc; +} + +static int __cam_dma_fence_release_fd(int fd) +{ + int32_t idx; + struct dma_fence *dma_fence = NULL; + + dma_fence = __cam_dma_fence_find_fence_in_table(fd, &idx); + if (IS_ERR_OR_NULL(dma_fence)) { + CAM_ERR(CAM_DMA_FENCE, "Failed to find dma fence for fd: %d", fd); + return -EINVAL; + } + + return __cam_dma_fence_release(idx); +} + +static int __cam_dma_fence_release_row( + int32_t dma_fence_row_idx) +{ + if ((dma_fence_row_idx < 0) || + (dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) { + CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid", + dma_fence_row_idx); + return -EINVAL; + } + + return __cam_dma_fence_release(dma_fence_row_idx); +} + +int cam_dma_fence_release( + struct cam_dma_fence_release_params *release_params) +{ + if (release_params->use_row_idx) + return __cam_dma_fence_release_row(release_params->u.dma_row_idx); + else + return __cam_dma_fence_release_fd(release_params->u.dma_fence_fd); +} + +void cam_dma_fence_close(void) +{ + int i; + struct cam_dma_fence_row *row = NULL; + + mutex_lock(&g_cam_dma_fence_dev->dev_lock); + for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) { + spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + + row = &g_cam_dma_fence_dev->rows[i]; + if (row->state != CAM_DMA_FENCE_STATE_INVALID) { + CAM_DBG(CAM_DMA_FENCE, + "Releasing dma fence seqno: %llu associated with fd: %d[%s] ref_cnt: %u", + row->fence->seqno, row->fd, row->name, + kref_read(&row->fence->refcount)); + + /* If registered for cb, remove cb */ + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(i, + NULL, + g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_CB); + dma_fence_remove_callback(row->fence, &row->fence_cb); + } + + /* Signal and put if the dma fence is created from camera */ + if (!row->ext_dma_fence) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(i, + NULL, + g_cam_dma_fence_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + __cam_dma_fence_signal_fence(row->fence, -EADV); + } + dma_fence_put(row->fence); + memset(row, 0, sizeof(struct cam_dma_fence_row)); + clear_bit(i, g_cam_dma_fence_dev->bitmap); + } + spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]); + } + + if (g_cam_dma_fence_dev->monitor_data) { + for (i = 0; i < CAM_DMA_FENCE_TABLE_SZ; i++) { + CAM_MEM_FREE(g_cam_dma_fence_dev->monitor_data[i]); + g_cam_dma_fence_dev->monitor_data[i] = NULL; + } + } + CAM_MEM_FREE(g_cam_dma_fence_dev->monitor_data); + g_cam_dma_fence_dev->monitor_data = NULL; + + mutex_unlock(&g_cam_dma_fence_dev->dev_lock); + CAM_DBG(CAM_DMA_FENCE, "Close on Camera DMA fence driver"); +} + +void cam_dma_fence_open(void) +{ + mutex_lock(&g_cam_dma_fence_dev->dev_lock); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) { + g_cam_dma_fence_dev->monitor_data = CAM_MEM_ZALLOC( + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_DMA_FENCE_TABLE_SZ, GFP_KERNEL); + if (!g_cam_dma_fence_dev->monitor_data) { + CAM_WARN(CAM_DMA_FENCE, "Failed to allocate memory %d", + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_DMA_FENCE_TABLE_SZ); + } + } + + /* DMA fence seqno reset */ + atomic64_set(&g_cam_dma_fence_seq_no, 0); + mutex_unlock(&g_cam_dma_fence_dev->dev_lock); + CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver opened"); +} + +int cam_dma_fence_driver_init(void) +{ + int i; + + g_cam_dma_fence_dev = CAM_MEM_ZALLOC(sizeof(struct cam_dma_fence_device), GFP_KERNEL); + if (!g_cam_dma_fence_dev) + return -ENOMEM; + + mutex_init(&g_cam_dma_fence_dev->dev_lock); + for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) + spin_lock_init(&g_cam_dma_fence_dev->row_spinlocks[i]); + + bitmap_zero(g_cam_dma_fence_dev->bitmap, CAM_DMA_FENCE_MAX_FENCES); + + /* zero will be considered an invalid slot */ + set_bit(0, g_cam_dma_fence_dev->bitmap); + + g_cam_dma_fence_dev->dma_fence_context = dma_fence_context_alloc(1); + + CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver initialized"); + return 0; +} + +void cam_dma_fence_driver_deinit(void) +{ + CAM_MEM_FREE(g_cam_dma_fence_dev); + g_cam_dma_fence_dev = NULL; + CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver deinitialized"); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.h b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.h new file mode 100644 index 0000000000..71928e0383 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_dma_fence.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __CAM_SYNC_DMA_FENCE_H__ +#define __CAM_SYNC_DMA_FENCE_H__ + +#include +#include +#include +#include +#include + +#include "cam_sync.h" +#include "cam_debug_util.h" + +#define CAM_DMA_FENCE_MAX_FENCES 128 +#define CAM_DMA_FENCE_NAME_LEN 128 +#define CAM_DMA_FENCE_TABLE_SZ (CAM_DMA_FENCE_MAX_FENCES / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ) + +/* DMA fence state */ +enum cam_dma_fence_state { + CAM_DMA_FENCE_STATE_INVALID, + CAM_DMA_FENCE_STATE_ACTIVE, + CAM_DMA_FENCE_STATE_SIGNALED, +}; + +/** + * struct cam_dma_fence_release_params - DMA release payload + * Based on the flag row_idx or fd is consumed + * + * @dma_row_idx : DMA fence row idx + * @dma_fence_fd : DMA fence fd + * @use_row_idx : Use row idx + */ +struct cam_dma_fence_release_params { + union { + int32_t dma_row_idx; + int32_t dma_fence_fd; + } u; + bool use_row_idx; +}; + +/** + * struct cam_dma_fence_signal_sync_obj - DMA -> sync signal info + * Payload to signal sync on a dma fence + * being signaled + * + * @status : DMA fence status + * @fd : DMA fence fd if any + */ +struct cam_dma_fence_signal_sync_obj { + int32_t status; + int32_t fd; +}; + +/* DMA fence callback function type */ +typedef int (*cam_sync_callback_for_dma_fence)(int32_t sync_obj, + struct cam_dma_fence_signal_sync_obj *signal_sync_obj); + +/** + * struct cam_dma_fence_create_sync_obj_payload - + * Payload to create sync for a dma fence + * + * @dma_fence_row_idx : DMA fence row idx + * @fd : DMA fence fd + * @sync_created_with_dma : Set if dma fence and sync obj are being + * created in a single IOCTL call + */ +struct cam_dma_fence_create_sync_obj_payload { + int32_t dma_fence_row_idx; + int32_t fd; + bool sync_created_with_dma; +}; + +/** + * @brief: Signal a dma fence fd [userspace API] + * + * @param signal_dma_fence: Info on DMA fence to be signaled + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_signal_fd( + struct cam_dma_fence_signal *signal_dma_fence); + +/** + * @brief: Signal a dma fence when sync obj is signaled + * + * @param dma_fence_row_idx : DMA fence row idx + * @param signal_dma_fence : Info on DMA fence to be signaled + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_internal_signal(int32_t dma_fence_row_idx, + struct cam_dma_fence_signal *signal_dma_fence); + +/** + * @brief: Create a dma fence fd + * + * @param name : DMA fence name, optional param + * will accommodate names of length + * CAM_DMA_FENCE_NAME_LEN + * @output dma_fence_fd : DMA fence fd + * @output dma_fence_row_idx : Row Idx corresponding to DMA fence in the table + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_create_fd( + int32_t *dma_fence_fd, int32_t *dma_fence_row_idx, const char *name); + +/** + * @brief: Release a dma fence + * + * @param release_params : dma fence info to be released + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_release( + struct cam_dma_fence_release_params *release_params); + +/** + * @brief: Gets the dma fence from a fd, increments refcnt + * + * @param fd : File descriptor + * @output dma_fence_row_idx : Row idx pertaining to this dma fence + * + * @return Status of operation. Error or valid fence. + */ +struct dma_fence *cam_dma_fence_get_fence_from_fd(int32_t fd, + int32_t *dma_fence_row_idx); + +/** + * @brief: DMA fence register cb + * + * @param sync_obj : Sync object + * @param dma_fence_idx : DMA fence row idx + * @param sync_cb : Sync object callback + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_register_cb(int32_t *sync_obj, + int32_t *dma_fence_row_idx, cam_sync_callback_for_dma_fence sync_cb); + +/** + * @brief: get/put on dma fence + * + * @get_or_put : True for get, false for put + * @param dma_fence_row_idx : Idx in the dma fence table pertaining to + * the dma fence on which get/put ref is invoked + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_dma_fence_get_put_ref(bool get_or_put, int32_t dma_fence_row_idx); + +/** + * @brief: dma fence driver open + * + */ +void cam_dma_fence_open(void); + +/** + * @brief: dma fence driver close + * + */ +void cam_dma_fence_close(void); + +/** + * @brief: dma fence driver initialize + * + */ +int cam_dma_fence_driver_init(void); + +/** + * @brief: dma fence driver deinit + * + */ +void cam_dma_fence_driver_deinit(void); + +#endif /* __CAM_SYNC_DMA_FENCE_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_private.h b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_private.h new file mode 100644 index 0000000000..0ee54641f5 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_private.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_SYNC_PRIVATE_H__ +#define __CAM_SYNC_PRIVATE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_sync_api.h" +#include "cam_sync_dma_fence.h" + +#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +#include "cam_sync_synx.h" +#endif + +#if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) || IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE) +#include +#endif + +#ifdef CONFIG_CAM_SYNC_DBG +#define CDBG(fmt, args...) pr_err(fmt, ##args) +#else +#define CDBG(fmt, args...) pr_debug(fmt, ##args) +#endif + +#define CAM_SYNC_OBJ_NAME_LEN 64 +#define CAM_SYNC_MAX_OBJS 2048 +#define CAM_GENERIC_FENCE_BATCH_MAX 10 +#define CAM_SYNC_MAX_V4L2_EVENTS 250 +#define CAM_SYNC_DEBUG_FILENAME "cam_debug" +#define CAM_SYNC_DEBUG_BASEDIR "cam" +#define CAM_SYNC_DEBUG_BUF_SIZE 32 +#define CAM_SYNC_PAYLOAD_WORDS 2 +#define CAM_SYNC_NAME "cam_sync" +#define CAM_SYNC_WORKQUEUE_NAME "HIPRIO_SYNC_WORK_QUEUE" + +#define CAM_SYNC_TYPE_INDV 0 +#define CAM_SYNC_TYPE_GROUP 1 + +/* Number of monitor table elements */ +#define CAM_SYNC_MONITOR_TABLE_SIZE 16 +/* Number of monitored objects per table entry */ +#define CAM_SYNC_MONITOR_TABLE_ENTRY_SZ (CAM_SYNC_MAX_OBJS / CAM_SYNC_MONITOR_TABLE_SIZE) +#define CAM_SYNC_MONITOR_MAX_ENTRIES 30 +#define CAM_SYNC_INC_MONITOR_HEAD(head, ret) \ + div_u64_rem(atomic64_add_return(1, head),\ + CAM_SYNC_MONITOR_MAX_ENTRIES, (ret)) +#define CAM_SYNC_MONITOR_GET_DATA(idx) \ + (sync_dev->mon_data[idx / CAM_SYNC_MONITOR_TABLE_ENTRY_SZ] + \ + (idx % CAM_SYNC_MONITOR_TABLE_ENTRY_SZ)) +#define CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ CAM_SYNC_MONITOR_TABLE_ENTRY_SZ +#define CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx) \ + ((mon_data)[idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ] + \ + (idx % CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)) + +/** + * Feature is enabled by setting BIT(fence_type), this will trigger the fence + * dumps on any error, to explicitly trigger a dump on every fence release + * below BIT(fence_type_dump) needs to be used at the same time + */ +#define CAM_GENERIC_FENCE_DUMP_ALWAYS 0x10 +#define CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP \ + (CAM_GENERIC_FENCE_TYPE_SYNC_OBJ + (CAM_GENERIC_FENCE_DUMP_ALWAYS)) +#define CAM_GENERIC_FENCE_TYPE_DMA_FENCE_DUMP \ + (CAM_GENERIC_FENCE_TYPE_DMA_FENCE + (CAM_GENERIC_FENCE_DUMP_ALWAYS)) +#define CAM_GENERIC_FENCE_TYPE_SYNX_OBJ_DUMP \ + (CAM_GENERIC_FENCE_TYPE_SYNX_OBJ + (CAM_GENERIC_FENCE_DUMP_ALWAYS)) + +/** + * enum sync_type - Enum to indicate the type of sync object, + * i.e. individual or group. + * + * @SYNC_TYPE_INDV : Object is an individual sync object + * @SYNC_TYPE_GROUP : Object is a group sync object + */ +enum sync_type { + SYNC_TYPE_INDV, + SYNC_TYPE_GROUP +}; + +/** + * enum sync_list_clean_type - Enum to indicate the type of list clean action + * to be peformed, i.e. specific sync ID or all list sync ids. + * + * @SYNC_CLEAN_ONE : Specific object to be cleaned in the list + * @SYNC_CLEAN_ALL : Clean all objects in the list + */ +enum sync_list_clean_type { + SYNC_LIST_CLEAN_ONE, + SYNC_LIST_CLEAN_ALL +}; + +/** + * struct sync_parent_info - Single node of information about a parent + * of a sync object, usually part of the parents linked list + * + * @sync_id : Sync object id of parent + * @list : List member used to append this node to a linked list + */ +struct sync_parent_info { + int32_t sync_id; + struct list_head list; +}; + +/** + * struct sync_parent_info - Single node of information about a child + * of a sync object, usually part of the children linked list + * + * @sync_id : Sync object id of child + * @list : List member used to append this node to a linked list + */ +struct sync_child_info { + int32_t sync_id; + struct list_head list; +}; + + +/** + * struct sync_callback_info - Single node of information about a kernel + * callback registered on a sync object + * + * @callback_func : Callback function, registered by client driver + * @cb_data : Callback data, registered by client driver + * @status : Status with which callback will be invoked in client + * @sync_obj : Sync id of the object for which callback is registered + * @workq_scheduled_ts : workqueue scheduled timestamp + * @cb_dispatch_work : Work representing the call dispatch + * @list : List member used to append this node to a linked list + */ +struct sync_callback_info { + sync_callback callback_func; + void *cb_data; + int status; + int32_t sync_obj; + ktime_t workq_scheduled_ts; + struct work_struct cb_dispatch_work; + struct list_head list; +}; + +/** + * struct sync_user_payload - Single node of information about a user space + * payload registered from user space + * + * @payload_data : Payload data, opaque to kernel + * @list : List member used to append this node to a linked list + */ +struct sync_user_payload { + uint64_t payload_data[CAM_SYNC_PAYLOAD_WORDS]; + struct list_head list; +}; + +/** + * struct sync_dma_fence_info - DMA fence info associated with this sync obj + * + * @dma_fence_fd : DMA fence fd + * @dma_fence_row_idx : Index of the row corresponding to this dma fence + * in the dma fence table + * @sync_created_with_dma : If sync obj and dma fence are created together + */ +struct sync_dma_fence_info { + int32_t dma_fence_fd; + int32_t dma_fence_row_idx; + bool sync_created_with_dma; +}; + +/** + * enum cam_fence_op - Enum to indicate the type of operation performed + * + * @CAM_FENCE_OP_CREATE : Created obj + * @CAM_FENCE_OP_REGISTER_CB : Successful callback registration + * @CAM_FENCE_OP_SKIP_REGISTER_CB : Callback registration skipped + * @CAM_FENCE_OP_ALREADY_REGISTERED_CB : Callback already registered + * @CAM_FENCE_OP_SIGNAL : Signaled obj + * @CAM_FENCE_OP_UNREGISTER_ON_SIGNAL : Callback unregistered after signaling + * @CAM_FENCE_OP_UNREGISTER_CB : Callback unregistered + * @CAM_FENCE_OP_DESTROY : Destroyed obj + */ +enum cam_fence_op { + CAM_FENCE_OP_CREATE, + CAM_FENCE_OP_REGISTER_CB, + CAM_FENCE_OP_SKIP_REGISTER_CB, + CAM_FENCE_OP_ALREADY_REGISTERED_CB, + CAM_FENCE_OP_SIGNAL, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL, + CAM_FENCE_OP_UNREGISTER_CB, + CAM_FENCE_OP_DESTROY, +}; + +/** + * struct cam_generic_fence_monitor_entry - Single operation sync data + * + * @timestamp : Timestamp of op + * @op : Operation id + */ +struct cam_generic_fence_monitor_entry { + struct timespec64 timestamp; + enum cam_fence_op op; +}; + +/** + * struct cam_generic_fence_monitor_data - All operations data from current & + * previous use of a fence object + * + * @monitor_head : Executed operations count + * @prev_name : Previous name of this fence obj + * @prev_type : Previous type of this fence obj + * @prev_obj_id : Previous handle of this fence obj + * @prev_sync_id : Previous handle of this fence's associated sync obj + * @prev_remaining : Previous count of remaining children that not been + * signaled + * @prev_state : Previous state (INVALID, ACTIVE, SIGNALED_SUCCESS or + * SIGNALED_ERROR) + * @prev_monitor_head : Previous executed ops count + * @swap_monitor_entries : Flag indicating which entry table should be used + * as current/previous. Used to avoid copying. + * @monitor_entries : Op info entry table + * @prev_monitor_entries : Previous op info entry table + */ +struct cam_generic_fence_monitor_data { + atomic64_t monitor_head; + char prev_name[CAM_DMA_FENCE_NAME_LEN]; + enum sync_type prev_type; + int32_t prev_obj_id; + int32_t prev_sync_id; + uint32_t prev_remaining; + uint32_t prev_state; + uint64_t prev_monitor_head; + bool swap_monitor_entries; + struct cam_generic_fence_monitor_entry monitor_entries[ + CAM_SYNC_MONITOR_MAX_ENTRIES]; + struct cam_generic_fence_monitor_entry prev_monitor_entries[ + CAM_SYNC_MONITOR_MAX_ENTRIES]; +}; + +/** + * struct cam_generic_fence_monitor_obj_info - Single object monitor info + * + * @name : Name of this fence obj + * @sync_type : Type of this fence obj + * @obj_id : Handle of this fence obj + * @sync_id : Handle of this fence's associated sync obj + * @state : Previous state (INVALID, ACTIVE, SIGNALED_SUCCESS or + * SIGNALED_ERROR) + * @remaining : Count of remaining children that not been signaled + * @ref_cnt : Ref count of the number of usage of the fence. + * @fence_type : Fence type - DMA/Sync/Synx + * @monitor_data : Fence operations data + * @monitor_entries : Op info entry table + * @prev_monitor_entries : Previous op info entry table + */ +struct cam_generic_fence_monitor_obj_info { + char *name; + enum sync_type sync_type; + int32_t obj_id; + int32_t sync_id; + uint32_t state; + uint32_t remaining; + uint32_t ref_cnt; + uint32_t fence_type; + struct cam_generic_fence_monitor_data *monitor_data; + struct cam_generic_fence_monitor_entry *monitor_entries; + struct cam_generic_fence_monitor_entry *prev_monitor_entries; +}; + +/** + * struct sync_synx_obj_info - Synx object info associated with this sync obj + * + * @synx_obj : Synx object handle + * @synx_obj_row_idx : Index of the row corresponding to this synx obj + * in the synx obj table + * @sync_created_with_synx : If sync obj and synx obj are created together + */ +struct sync_synx_obj_info { + uint32_t synx_obj; + int32_t synx_obj_row_idx; + bool sync_created_with_synx; +}; + +/** + * struct sync_table_row - Single row of information about a sync object, used + * for internal book keeping in the sync driver + * + * @name : Optional string representation of the sync object + * @type : Type of the sync object (individual or group) + * @sync_id : Integer id representing this sync object + * @parents_list : Linked list of parents of this sync object + * @children_list : Linked list of children of this sync object + * @state : State (INVALID, ACTIVE, SIGNALED_SUCCESS or + * SIGNALED_ERROR) + * @remaining : Count of remaining children that not been signaled + * @signaled : Completion variable on which block calls will wait + * @callback_list : Linked list of kernel callbacks registered + * @user_payload_list : LInked list of user space payloads registered + * @ref_cnt : ref count of the number of usage of the fence. + * @ext_fence_mask : Mask to indicate associated external fence types + * @dma_fence_info : dma fence info if associated + * @synx_obj_info : synx obj info if associated + */ +struct sync_table_row { + char name[CAM_SYNC_OBJ_NAME_LEN]; + enum sync_type type; + int32_t sync_id; + /* List of parents, which are merged objects */ + struct list_head parents_list; + /* List of children, which constitute the merged object */ + struct list_head children_list; + uint32_t state; + uint32_t remaining; + struct completion signaled; + struct list_head callback_list; + struct list_head user_payload_list; + atomic_t ref_cnt; + unsigned long ext_fence_mask; + struct sync_dma_fence_info dma_fence_info; + struct sync_synx_obj_info synx_obj_info; +}; + +/** + * struct cam_signalable_info - Information for a single sync object that is + * ready to be signaled + * + * @sync_obj : Sync object id of signalable object + * @status : Status with which to signal + * @list : List member used to append this node to a linked list + */ +struct cam_signalable_info { + int32_t sync_obj; + uint32_t status; + struct list_head list; +}; + +/** + * struct sync_device - Internal struct to book keep sync driver details + * + * @vdev : Video device + * @v4l2_dev : V4L2 device + * @sync_table : Table of all sync objects allocated when driver initializes + * @row_spinlocks : Spinlock array, one for each row in the table + * @table_lock : Mutex used to lock the table + * @open_cnt : Count of file open calls made on the sync driver + * @dentry : Debugfs entry + * @work_queue : Work queue used for dispatching kernel callbacks + * @cam_sync_eventq : Event queue used to dispatch user payloads to user space + * @bitmap : Bitmap representation of all sync objects + * @mon_data : Objects monitor data + * @params : Parameters for synx call back registration + * @version : version support + */ +struct sync_device { + struct video_device *vdev; + struct v4l2_device v4l2_dev; + struct sync_table_row *sync_table; + spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS]; + struct mutex table_lock; + int open_cnt; + struct dentry *dentry; + struct workqueue_struct *work_queue; + struct v4l2_fh *cam_sync_eventq; + spinlock_t cam_sync_eventq_lock; + DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS); + struct cam_generic_fence_monitor_data **mon_data; +#if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX) + struct synx_register_params params; +#endif + uint32_t version; +}; + + +#endif /* __CAM_SYNC_PRIVATE_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.c b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.c new file mode 100644 index 0000000000..f6983f8752 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.c @@ -0,0 +1,906 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sync_synx.h" +#include "cam_sync_util.h" +#include "cam_mem_mgr_api.h" + +extern unsigned long cam_sync_monitor_mask; + +/** + * struct cam_synx_obj_row - Synx obj row + */ +struct cam_synx_obj_row { + char name[CAM_SYNX_OBJ_NAME_LEN]; + uint32_t synx_obj; + enum cam_synx_obj_state state; + cam_sync_callback_for_synx_obj sync_cb; + bool cb_registered_for_sync; + bool sync_signal_synx; + int32_t sync_obj; +}; + +/** + * struct cam_synx_obj_device - Synx obj device + */ +struct cam_synx_obj_device { + struct cam_synx_obj_row rows[CAM_SYNX_MAX_OBJS]; + spinlock_t row_spinlocks[CAM_SYNX_MAX_OBJS]; + struct synx_session *session_handle; + struct mutex dev_lock; + DECLARE_BITMAP(bitmap, CAM_SYNX_MAX_OBJS); + struct cam_generic_fence_monitor_data **monitor_data; +}; + +static struct cam_synx_obj_device *g_cam_synx_obj_dev; +static char cam_synx_session_name[64] = "Camera_Generic_Synx_Session"; + + +static inline struct cam_generic_fence_monitor_entry * + __cam_synx_obj_get_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *monitor_data; + + monitor_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_synx_obj_dev->monitor_data, idx); + if (monitor_data->swap_monitor_entries) + return monitor_data->prev_monitor_entries; + else + return monitor_data->monitor_entries; +} + +static inline struct cam_generic_fence_monitor_entry * + __cam_synx_obj_get_prev_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *monitor_data; + + monitor_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_synx_obj_dev->monitor_data, idx); + if (monitor_data->swap_monitor_entries) + return monitor_data->monitor_entries; + else + return monitor_data->prev_monitor_entries; +} + +static int __cam_synx_obj_map_sync_status_util(uint32_t sync_status, + uint32_t *out_synx_status) +{ + if (!out_synx_status) + return -EINVAL; + + switch (sync_status) { + case CAM_SYNC_STATE_SIGNALED_SUCCESS: + *out_synx_status = SYNX_STATE_SIGNALED_SUCCESS; + break; + case CAM_SYNC_STATE_SIGNALED_CANCEL: + default: + *out_synx_status = SYNX_STATE_SIGNALED_CANCEL; + break; + } + + return 0; +} + +static void __cam_synx_obj_save_previous_monitor_data(int32_t row_idx) +{ + struct cam_generic_fence_monitor_data *row_mon_data; + struct cam_synx_obj_row *row; + + if (!g_cam_synx_obj_dev->monitor_data) + return; + + row = &g_cam_synx_obj_dev->rows[row_idx]; + row_mon_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_synx_obj_dev->monitor_data, row_idx); + + /* save current usage details into prev variables */ + strscpy(row_mon_data->prev_name, row->name, CAM_SYNX_OBJ_NAME_LEN); + row_mon_data->prev_obj_id = row->synx_obj; + row_mon_data->prev_sync_id = row->sync_obj; + row_mon_data->prev_state = row->state; + row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head); + row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries; +} + +static void __cam_synx_obj_dump_monitor_array(int32_t row_idx) +{ + struct cam_generic_fence_monitor_obj_info obj_info; + struct cam_synx_obj_row *row; + + if (!g_cam_synx_obj_dev->monitor_data || + !test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + return; + + if (!CAM_GENERIC_MONITOR_GET_DATA(g_cam_synx_obj_dev->monitor_data, + row_idx)->prev_obj_id) + return; + + row = &g_cam_synx_obj_dev->rows[row_idx]; + + obj_info.name = row->name; + obj_info.obj_id = row->synx_obj; + obj_info.state = row->state; + obj_info.monitor_data = CAM_GENERIC_MONITOR_GET_DATA( + g_cam_synx_obj_dev->monitor_data, row_idx); + obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNX_OBJ; + obj_info.sync_id = row->sync_obj; + obj_info.monitor_entries = + __cam_synx_obj_get_monitor_entries(row_idx); + obj_info.prev_monitor_entries = + __cam_synx_obj_get_prev_monitor_entries(row_idx); + cam_generic_fence_dump_monitor_array(&obj_info); +} + +static void __cam_synx_obj_signal_cb(u32 h_synx, int status, void *data) +{ + struct cam_synx_obj_signal_sync_obj signal_sync_obj; + struct cam_synx_obj_row *synx_obj_row = NULL; + int32_t idx; + + if (!data) { + CAM_ERR(CAM_SYNX, + "Invalid data passed to synx obj : No callback function set."); + return; + } + + synx_obj_row = (struct cam_synx_obj_row *)data; + + /* If this synx obj is signaled by sync obj, skip cb */ + if (synx_obj_row->sync_signal_synx) + return; + + if (synx_obj_row->synx_obj != h_synx) { + CAM_ERR(CAM_SYNX, + "Synx obj: %d callback does not match synx obj: %d in sync table.", + h_synx, synx_obj_row->synx_obj); + return; + } + + if (synx_obj_row->state == CAM_SYNX_OBJ_STATE_INVALID) { + CAM_ERR(CAM_SYNX, + "Synx obj :%d is in invalid state: %d", + synx_obj_row->synx_obj, synx_obj_row->state); + return; + } + + CAM_DBG(CAM_SYNX, "Synx obj: %d signaled, signal sync obj: %d", + synx_obj_row->synx_obj, synx_obj_row->sync_obj); + + if ((synx_obj_row->cb_registered_for_sync) && (synx_obj_row->sync_cb)) { + signal_sync_obj.synx_obj = synx_obj_row->synx_obj; + switch (status) { + case SYNX_STATE_SIGNALED_SUCCESS: + signal_sync_obj.status = CAM_SYNC_STATE_SIGNALED_SUCCESS; + break; + case SYNX_STATE_SIGNALED_CANCEL: + signal_sync_obj.status = CAM_SYNC_STATE_SIGNALED_CANCEL; + break; + default: + CAM_WARN(CAM_SYNX, + "Synx signal status %d is neither SUCCESS nor CANCEL, custom code?", + status); + signal_sync_obj.status = CAM_SYNC_STATE_SIGNALED_ERROR; + break; + } + synx_obj_row->state = CAM_SYNX_OBJ_STATE_SIGNALED; + synx_obj_row->sync_cb(synx_obj_row->sync_obj, &signal_sync_obj); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) { + cam_synx_obj_find_obj_in_table(synx_obj_row->synx_obj, &idx); + cam_generic_fence_update_monitor_array(idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + } + } + +} + +/* + * Synx APIs need to be invoked in non atomic context, + * all these utils invoke synx driver + */ +static inline int __cam_synx_signal_util( + uint32_t synx_hdl, uint32_t signal_status) +{ + return synx_signal(g_cam_synx_obj_dev->session_handle, synx_hdl, signal_status); +} + +static inline int __cam_synx_deregister_cb_util( + uint32_t synx_hdl, void *data) +{ + struct synx_callback_params cb_params; + + cb_params.userdata = data; + cb_params.cancel_cb_func = NULL; + cb_params.h_synx = synx_hdl; + cb_params.cb_func = __cam_synx_obj_signal_cb; + + return synx_cancel_async_wait(g_cam_synx_obj_dev->session_handle, &cb_params); +} + +static inline int __cam_synx_create_hdl_util( + struct synx_create_params *params) +{ + return synx_create(g_cam_synx_obj_dev->session_handle, params); +} + +static inline int __cam_synx_release_hdl_util(uint32_t synx_hdl) +{ + return synx_release(g_cam_synx_obj_dev->session_handle, synx_hdl); +} + +static inline int __cam_synx_import_hdl_util( + struct synx_import_params *params) +{ + return synx_import(g_cam_synx_obj_dev->session_handle, params); +} + +static inline int __cam_synx_register_cb_util( + struct synx_callback_params *cb_params) +{ + return synx_async_wait(g_cam_synx_obj_dev->session_handle, cb_params); +} + +static int __cam_synx_obj_release(int32_t row_idx) +{ + int rc; + bool deregister_cb = false; + uint32_t synx_hdl; + struct cam_synx_obj_row *row; + + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + row = &g_cam_synx_obj_dev->rows[row_idx]; + synx_hdl = row->synx_obj; + + if (row->state == CAM_SYNX_OBJ_STATE_ACTIVE) { + CAM_DBG(CAM_SYNX, + "Unsignaled synx obj being released name: %s synx_obj:%d", + row->name, row->synx_obj); + + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, + g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + deregister_cb = true; + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + + if (deregister_cb) { + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + rc = __cam_synx_deregister_cb_util(synx_hdl, row); + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + if (rc) { + CAM_DBG(CAM_SYNX, + "Failed to deregister cb for synx hdl: %u rc: %d", + synx_hdl, rc); + __cam_synx_obj_dump_monitor_array(row_idx); + } + } + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) { + /* Update monitor entries & save data before row memset to 0 */ + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_DESTROY); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ_DUMP, &cam_sync_monitor_mask)) + __cam_synx_obj_dump_monitor_array(row_idx); + __cam_synx_obj_save_previous_monitor_data(row_idx); + } + + CAM_DBG(CAM_SYNX, + "Releasing synx_obj: %d[%s] row_idx: %u", row->synx_obj, row->name, row_idx); + + /* deinit row */ + memset(row, 0, sizeof(struct cam_synx_obj_row)); + clear_bit(row_idx, g_cam_synx_obj_dev->bitmap); + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + + return __cam_synx_release_hdl_util(synx_hdl); +} + +static int __cam_synx_obj_find_free_idx(uint32_t *idx) +{ + int rc = 0; + bool bit; + + do { + *idx = find_first_zero_bit(g_cam_synx_obj_dev->bitmap, CAM_SYNX_MAX_OBJS); + if (*idx >= CAM_SYNX_MAX_OBJS) { + CAM_ERR(CAM_SYNC, + "Error: Unable to create synx, no free index"); + rc = -ENOMEM; + break; + } + + bit = test_and_set_bit(*idx, g_cam_synx_obj_dev->bitmap); + } while (bit); + + return rc; +} + +static void __cam_synx_obj_init_row(uint32_t idx, const char *name, + uint32_t synx_obj) +{ + struct cam_synx_obj_row *row; + + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[idx]); + row = &g_cam_synx_obj_dev->rows[idx]; + row->synx_obj = synx_obj; + row->state = CAM_SYNX_OBJ_STATE_ACTIVE; + strscpy(row->name, name, CAM_SYNX_OBJ_NAME_LEN); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) { + cam_generic_fence_update_monitor_array(idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_CREATE); + } + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[idx]); +} + +static int __cam_synx_obj_release_row(int32_t row_idx) +{ + if ((row_idx < 0) || (row_idx >= CAM_SYNX_MAX_OBJS)) { + CAM_ERR(CAM_SYNX, "synx row idx: %d is invalid", row_idx); + return -EINVAL; + } + + return __cam_synx_obj_release(row_idx); +} + +int cam_synx_obj_find_obj_in_table(uint32_t synx_obj, int32_t *idx) +{ + int i, rc = -EINVAL; + struct cam_synx_obj_row *row = NULL; + + for (i = 0; i < CAM_SYNX_MAX_OBJS; i++) { + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[i]); + row = &g_cam_synx_obj_dev->rows[i]; + if ((row->state != CAM_SYNX_OBJ_STATE_INVALID) && + (row->synx_obj == synx_obj)) { + *idx = i; + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[i]); + rc = 0; + break; + } + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[i]); + } + + return rc; +} + +static int __cam_synx_obj_release_obj(uint32_t synx_obj, int32_t *idx) +{ + if (cam_synx_obj_find_obj_in_table(synx_obj, idx)) { + CAM_ERR(CAM_SYNX, "Failed to find synx obj: %d", synx_obj); + return -EINVAL; + } + + return __cam_synx_obj_release(*idx); +} + +static int __cam_synx_obj_import(const char *name, + struct synx_import_params *params, int32_t *row_idx) +{ + int rc = -1; + uint32_t idx; + + if (__cam_synx_obj_find_free_idx(&idx)) + goto end; + + rc = __cam_synx_import_hdl_util(params); + if (rc) { + CAM_ERR(CAM_SYNX, "Synx import failed for fence : %p", + params->indv.fence); + goto free_idx; + } + + *row_idx = idx; + __cam_synx_obj_init_row(idx, name, *params->indv.new_h_synx); + + CAM_DBG(CAM_SYNX, "Imported synx obj handle: %d[%s] row_idx: %u", + *params->indv.new_h_synx, name, idx); + + return rc; + +free_idx: + clear_bit(idx, g_cam_synx_obj_dev->bitmap); +end: + return rc; +} + +static int __cam_synx_map_generic_flags_to_create(uint32_t generic_flags, + struct synx_create_params *params) +{ + if (!params) { + CAM_ERR(CAM_SYNX, "Create parameters missing"); + return -EINVAL; + } + + /* + * Create Global Always - remove after userspace optimizes and + * determines when global Vs local is needed + */ + params->flags |= SYNX_CREATE_GLOBAL_FENCE; + + return 0; +} + +static int __cam_synx_map_generic_flags_to_import(uint32_t generic_flags, + struct synx_import_indv_params *params) +{ + if (!params) { + CAM_ERR(CAM_SYNX, "Import parameters missing"); + return -EINVAL; + } + + /* + * Create Global Always - remove after userspace optimizes and + * determines when global Vs local is needed + */ + params->flags |= SYNX_IMPORT_GLOBAL_FENCE; + + return 0; +} + +int cam_synx_obj_create(const char *name, uint32_t flags, uint32_t *synx_obj, + int32_t *row_idx) +{ + int rc = -1; + uint32_t idx; + struct synx_create_params params; + + if (__cam_synx_obj_find_free_idx(&idx)) + goto end; + + params.fence = NULL; + params.name = name; + params.flags = 0; + params.h_synx = synx_obj; + + rc = __cam_synx_map_generic_flags_to_create(flags, ¶ms); + if (rc) { + CAM_ERR(CAM_SYNX, "Failed to generate create flags"); + goto free_idx; + } + + rc = __cam_synx_create_hdl_util(¶ms); + if (rc) { + CAM_ERR(CAM_SYNX, "Failed to create new synx handle rc: %d", rc); + goto free_idx; + } + + *row_idx = idx; + __cam_synx_obj_init_row(idx, name, *synx_obj); + + CAM_DBG(CAM_SYNX, "Created synx obj handle: %d[%s] row_idx: %u", + *synx_obj, name, idx); + + return rc; + +free_idx: + clear_bit(idx, g_cam_synx_obj_dev->bitmap); +end: + return rc; +} + +int cam_synx_obj_import_dma_fence(const char *name, uint32_t flags, void *fence, + uint32_t *synx_obj, int32_t *row_idx) +{ + struct synx_import_params params; + + if (!fence) { + CAM_ERR(CAM_SYNX, + "Importing DMA fence failed - fence pointer is NULL"); + return -EINVAL; + } + + params.indv.flags = 0; + params.indv.fence = fence; + params.indv.new_h_synx = synx_obj; + params.type = SYNX_IMPORT_INDV_PARAMS; + params.indv.flags |= SYNX_IMPORT_DMA_FENCE; + + if (__cam_synx_map_generic_flags_to_import(flags, ¶ms.indv)) { + CAM_ERR(CAM_SYNX, + "Importing DMA fence failed - invalid synx import flags"); + return -EINVAL; + } + + return __cam_synx_obj_import(name, ¶ms, row_idx); +} + +int cam_synx_obj_internal_signal(int32_t row_idx, + struct cam_synx_obj_signal *signal_synx_obj) +{ + int rc; + uint32_t signal_status; + bool deregister_cb = false; + struct cam_synx_obj_row *row = NULL; + + if ((row_idx < 0) || (row_idx >= CAM_SYNX_MAX_OBJS)) { + CAM_ERR(CAM_SYNX, "synx obj row idx: %d is invalid", + row_idx); + return -EINVAL; + } + + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + row = &g_cam_synx_obj_dev->rows[row_idx]; + + /* Ensures sync obj cb is not invoked */ + row->sync_signal_synx = true; + + if (row->state != CAM_SYNX_OBJ_STATE_ACTIVE) { + CAM_ERR(CAM_SYNX, "synx obj: %u not in right state: %d to signal", + signal_synx_obj->synx_obj, row->state); + rc = -EINVAL; + goto monitor_dump; + } + + if (row->synx_obj != signal_synx_obj->synx_obj) { + CAM_WARN(CAM_SYNX, + "Trying to signal synx obj: %u in row: %u having a different synx obj: %u", + signal_synx_obj->synx_obj, row_idx, row->synx_obj); + rc = 0; + goto monitor_dump; + } + + rc = __cam_synx_obj_map_sync_status_util(signal_synx_obj->status, &signal_status); + if (rc) { + CAM_WARN(CAM_SYNX, + "Signaling undefined status: %d for synx obj: %d", + signal_synx_obj->status, signal_synx_obj->synx_obj); + } + + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + deregister_cb = true; + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + + row->state = CAM_SYNX_OBJ_STATE_SIGNALED; + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + + if (deregister_cb) { + rc = __cam_synx_deregister_cb_util(signal_synx_obj->synx_obj, row); + if (rc) { + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + CAM_ERR(CAM_SYNX, "Failed to deregister cb for synx: %u rc: %d", + signal_synx_obj->synx_obj, rc); + goto monitor_dump; + } + } + + rc = __cam_synx_signal_util(signal_synx_obj->synx_obj, signal_status); + if (rc) { + CAM_ERR(CAM_SYNX, "Failed to signal synx hdl: %u with status: %u rc: %d", + signal_synx_obj->synx_obj, signal_status, rc); + goto end; + } + + CAM_DBG(CAM_SYNX, "synx obj: %d signaled with status: %d rc: %d", + signal_synx_obj->synx_obj, signal_status, rc); + + return rc; + +monitor_dump: + __cam_synx_obj_dump_monitor_array(row_idx); + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); +end: + return rc; +} + +int cam_synx_obj_release(struct cam_synx_obj_release_params *release_params) +{ + int rc; + int32_t idx = -1; + + if (release_params->use_row_idx) { + rc = __cam_synx_obj_release_row(release_params->u.synx_row_idx); + if (rc < 0) + __cam_synx_obj_dump_monitor_array(release_params->u.synx_row_idx); + } else { + rc = __cam_synx_obj_release_obj(release_params->u.synx_obj, &idx); + if ((rc < 0) && (idx >= 0)) + __cam_synx_obj_dump_monitor_array(idx); + } + return rc; +} + +int cam_synx_obj_signal_obj(struct cam_synx_obj_signal *signal_synx_obj) +{ + int rc; + uint32_t idx; + + rc = cam_synx_obj_find_obj_in_table(signal_synx_obj->synx_obj, &idx); + if (rc) { + CAM_ERR(CAM_SYNX, "Failed to find synx obj: %u", signal_synx_obj->synx_obj); + return -EINVAL; + } + + return cam_synx_obj_internal_signal(idx, signal_synx_obj); +} + +int cam_synx_obj_register_cb(int32_t *sync_obj, int32_t row_idx, + cam_sync_callback_for_synx_obj sync_cb) +{ + int rc = 0; + uint32_t synx_obj; + struct cam_synx_obj_row *row; + struct synx_callback_params cb_params; + + if (!sync_obj || !sync_cb) { + CAM_ERR(CAM_SYNX, "Invalid args sync_obj: %p sync_cb: %p", + sync_obj, sync_cb); + return -EINVAL; + } + + if ((row_idx < 0) || (row_idx >= CAM_SYNX_MAX_OBJS)) { + CAM_ERR(CAM_SYNX, "synx obj idx: %d is invalid", + row_idx); + return -EINVAL; + } + + spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + row = &g_cam_synx_obj_dev->rows[row_idx]; + synx_obj = row->synx_obj; + + if (row->state != CAM_SYNX_OBJ_STATE_ACTIVE) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_SKIP_REGISTER_CB); + CAM_ERR(CAM_SYNX, + "synx obj at idx: %d handle: %d is not active, current state: %d", + row_idx, row->synx_obj, row->state); + rc = -EINVAL; + goto monitor_dump; + } + + /** + * If the cb is already registered, return + */ + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_ALREADY_REGISTERED_CB); + CAM_WARN(CAM_SYNX, + "synx obj at idx: %d handle: %d has already registered a cb for sync: %d", + row_idx, row->synx_obj, row->sync_obj); + goto monitor_dump; + } + + row->sync_cb = sync_cb; + row->sync_obj = *sync_obj; + row->cb_registered_for_sync = true; + + cb_params.userdata = row; + cb_params.cancel_cb_func = NULL; + cb_params.h_synx = synx_obj; + cb_params.timeout_ms = SYNX_NO_TIMEOUT; + cb_params.cb_func = __cam_synx_obj_signal_cb; + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(row_idx, + &g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_REGISTER_CB); + + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + + rc = __cam_synx_register_cb_util(&cb_params); + if (rc) { + CAM_ERR(CAM_SYNX, + "Failed to register cb for synx obj: %d rc: %d", synx_obj, rc); + return rc; + } + + CAM_DBG(CAM_SYNX, + "CB successfully registered for synx obj: %d for sync_obj: %d", + synx_obj, *sync_obj); + + return rc; + +monitor_dump: + __cam_synx_obj_dump_monitor_array(row_idx); + spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]); + return rc; +} + +int cam_synx_core_recovery( + enum cam_sync_synx_supported_cores cam_core_id) +{ + int rc; + enum synx_client_id client_id = SYNX_CLIENT_MAX; + + switch (cam_core_id) { + case CAM_ICP_0_SYNX_CORE: + client_id = SYNX_CLIENT_ICP_CTX0; + break; + default: + rc = -EINVAL; + goto err; + } + + rc = synx_recover(client_id); + if (rc) + goto err; + + CAM_DBG(CAM_SYNX, "Synx recovery for synx_client: %d[%d] success", + client_id, cam_core_id); + + return rc; + +err: + CAM_ERR(CAM_SYNX, "Failed to recover for synx_client: %d rc: %d", + client_id, rc); + return rc; +} + +int __cam_synx_init_session(void) +{ + struct synx_queue_desc queue_desc; + struct synx_initialization_params params; + + params.name = cam_synx_session_name; + params.ptr = &queue_desc; + params.flags = SYNX_INIT_MAX; + params.id = SYNX_CLIENT_NATIVE; + g_cam_synx_obj_dev->session_handle = synx_initialize(¶ms); + + if (!g_cam_synx_obj_dev->session_handle) { + CAM_ERR(CAM_SYNX, "Synx session initialization failed"); + return -EINVAL; + } + + CAM_DBG(CAM_SYNX, "Synx session initialized: %p", + g_cam_synx_obj_dev->session_handle); + + return 0; +} + +void cam_synx_obj_open(void) +{ + mutex_lock(&g_cam_synx_obj_dev->dev_lock); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) { + g_cam_synx_obj_dev->monitor_data = CAM_MEM_ZALLOC( + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_SYNX_TABLE_SZ, GFP_KERNEL); + if (!g_cam_synx_obj_dev->monitor_data) { + CAM_WARN(CAM_DMA_FENCE, "Failed to allocate memory %d", + sizeof(struct cam_generic_fence_monitor_data *) * + CAM_SYNX_TABLE_SZ); + } + } + mutex_unlock(&g_cam_synx_obj_dev->dev_lock); +} + +void cam_synx_obj_close(void) +{ + int i; + struct cam_synx_obj_row *row = NULL; + + mutex_lock(&g_cam_synx_obj_dev->dev_lock); + for (i = 0; i < CAM_SYNX_MAX_OBJS; i++) { + row = &g_cam_synx_obj_dev->rows[i]; + if (row->state == CAM_SYNX_OBJ_STATE_INVALID) + continue; + + CAM_DBG(CAM_SYNX, "Releasing synx_obj: %d[%s]", + row->synx_obj, row->name); + + /* If registered for cb, remove cb */ + if (row->cb_registered_for_sync) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(i, + NULL, + g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_UNREGISTER_CB); + + __cam_synx_deregister_cb_util(row->synx_obj, row); + } + + /* Signal and release the synx obj */ + if (row->state != CAM_SYNX_OBJ_STATE_SIGNALED) { + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(i, + NULL, + g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_SIGNAL); + + __cam_synx_signal_util(row->synx_obj, SYNX_STATE_SIGNALED_CANCEL); + } + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(i, + NULL, + g_cam_synx_obj_dev->monitor_data, + CAM_FENCE_OP_DESTROY); + + __cam_synx_release_hdl_util(row->synx_obj); + memset(row, 0, sizeof(struct cam_synx_obj_row)); + clear_bit(i, g_cam_synx_obj_dev->bitmap); + } + + if (g_cam_synx_obj_dev->monitor_data) { + for (i = 0; i < CAM_SYNX_TABLE_SZ; i++) { + CAM_MEM_FREE(g_cam_synx_obj_dev->monitor_data[i]); + g_cam_synx_obj_dev->monitor_data[i] = NULL; + } + } + CAM_MEM_FREE(g_cam_synx_obj_dev->monitor_data); + g_cam_synx_obj_dev->monitor_data = NULL; + + mutex_unlock(&g_cam_synx_obj_dev->dev_lock); + CAM_DBG(CAM_SYNX, "Close on Camera SYNX driver"); +} + +int cam_synx_obj_driver_init(void) +{ + int i; + + g_cam_synx_obj_dev = CAM_MEM_ZALLOC(sizeof(struct cam_synx_obj_device), GFP_KERNEL); + if (!g_cam_synx_obj_dev) + return -ENOMEM; + + if (__cam_synx_init_session()) + goto deinit_driver; + + mutex_init(&g_cam_synx_obj_dev->dev_lock); + for (i = 0; i < CAM_SYNX_MAX_OBJS; i++) + spin_lock_init(&g_cam_synx_obj_dev->row_spinlocks[i]); + + memset(&g_cam_synx_obj_dev->rows, 0, sizeof(g_cam_synx_obj_dev->rows)); + memset(&g_cam_synx_obj_dev->bitmap, 0, sizeof(g_cam_synx_obj_dev->bitmap)); + bitmap_zero(g_cam_synx_obj_dev->bitmap, CAM_SYNX_MAX_OBJS); + + /* zero will be considered an invalid slot */ + set_bit(0, g_cam_synx_obj_dev->bitmap); + + CAM_DBG(CAM_SYNX, "Camera synx obj driver initialized"); + return 0; + +deinit_driver: + CAM_ERR(CAM_SYNX, "Camera synx obj driver initialization failed"); + CAM_MEM_FREE(g_cam_synx_obj_dev); + g_cam_synx_obj_dev = NULL; + return -EINVAL; +} + +void cam_synx_obj_driver_deinit(void) +{ + int rc; + + if (g_cam_synx_obj_dev->session_handle) { + rc = synx_uninitialize(g_cam_synx_obj_dev->session_handle); + if (rc) { + CAM_ERR(CAM_SYNX, + "Synx failed to uninitialize session: %p, rc: %d", + g_cam_synx_obj_dev->session_handle, rc); + } + } + + CAM_MEM_FREE(g_cam_synx_obj_dev); + g_cam_synx_obj_dev = NULL; + CAM_DBG(CAM_SYNX, "Camera synx obj driver deinitialized"); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.h b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.h new file mode 100644 index 0000000000..31d1d491d0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_synx.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __CAM_SYNC_SYNX_H__ +#define __CAM_SYNC_SYNX_H__ + +#include +#include +#include +#include + +#include "cam_sync_api.h" +#include "cam_sync.h" +#include "cam_debug_util.h" + +#define CAM_SYNX_MAX_OBJS 256 +#define CAM_SYNX_OBJ_NAME_LEN 64 +#define CAM_SYNX_TABLE_SZ (CAM_SYNX_MAX_OBJS / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ) + +/* Synx obj state */ +enum cam_synx_obj_state { + CAM_SYNX_OBJ_STATE_INVALID, + CAM_SYNX_OBJ_STATE_ACTIVE, + CAM_SYNX_OBJ_STATE_SIGNALED, +}; + +/** + * struct cam_synx_obj_release_params - Synx release payload + * Based on the flag row_idx or synx_obj is consumed + * + * @synx_row_idx : Synx obj row idx + * @synx_obj : Synx object handle + * @use_row_idx : Use row idx + */ +struct cam_synx_obj_release_params { + union { + int32_t synx_row_idx; + uint32_t synx_obj; + } u; + bool use_row_idx; +}; + +/** + * struct cam_synx_obj_fence_signal_sync_obj - SYNX -> sync signal info + * Payload to signal sync on a synx fence being signaled + * + * @synx_obj : Synx object handle + * @status : Sync signal status + */ +struct cam_synx_obj_signal_sync_obj { + int32_t synx_obj; + int32_t status; +}; + +/* Synx obj callback function type */ +typedef int (*cam_sync_callback_for_synx_obj)(int32_t sync_obj, + struct cam_synx_obj_signal_sync_obj *signal_sync_obj); + +/** + * @brief Find the synx obj in the device's table + * + * @param synx_obj : Synx obj + * @param idx : Synx object table index + * + * @return Status of operation. Zero in case of success. + */ +int cam_synx_obj_find_obj_in_table(uint32_t synx_obj, int32_t *idx); + +/** + * @brief Create a synx object + * + * @param name : Synx obj name + * @param flags : Creation flags + * @param synx_obj : Created synx obj handle + * @param row_idx : Created synx obj table row idx + * + * @return Status of operation. Zero in case of success. + * -EINVAL will be returned if params were invalid. + * -ENOMEM will be returned if the kernel can't allocate space for + * synx object. + */ +int cam_synx_obj_create(const char *name, uint32_t flags, uint32_t *synx_obj, + int32_t *row_idx); + +/** + * @brief Signal a synx obj when sync obj is signaled + * + * @param row_idx : Synx obj table row index + * @param signal_synx_obj : Info on synx obj to be signaled + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_synx_obj_internal_signal(int32_t row_idx, + struct cam_synx_obj_signal *signal_synx_obj); + +/** + * @brief Import a synx obj for synchronization + * + * @param name : Synx obj name + * @param flags : Import flags + * @param fence : DMA fence ptr + * @param synx_obj : New synx obj handle + * @param row_idx : Imported obj table row idx + * + * @return Status of operation. Zero in case of success + * -EINVAL if synx object is bad state + */ +int cam_synx_obj_import_dma_fence(const char *name, uint32_t flags, void *fence, + uint32_t *synx_obj, int32_t *row_idx); + +/** + * @brief Release a synx object + * + * @param release_params : Synx obj release info + * + * @return Status of operation. Zero upon success. Negative value otherwise + */ +int cam_synx_obj_release(struct cam_synx_obj_release_params *release_params); + +/** + * @brief Signal a synx obj [userspace API] + * + * @param signal_synx_obj : Signal info + * + * @return Status of operation. Zero upon success. Negative value otherwise + */ +int cam_synx_obj_signal_obj(struct cam_synx_obj_signal *signal_synx_obj); + +/** + * @brief Synx obj register callback + * + * @param sync_obj : Sync object + * @param row_idx : Synx obj table row idx + * @param sync_cb : Sync object callback + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_synx_obj_register_cb(int32_t *sync_obj, int32_t row_idx, + cam_sync_callback_for_synx_obj sync_cb); + +/** + * @brief: Synx recovery for a given core + * + * @param core_id: Core ID we want to recover for + * + * @return Status of operation. Zero in case of success + * -EINVAL if core_id is invalid + */ +int cam_synx_core_recovery( + enum cam_sync_synx_supported_cores core_id); + +/** + * @brief: cam synx driver open + * + */ +void cam_synx_obj_open(void); + +/** + * @brief: cam synx driver close + * + */ +void cam_synx_obj_close(void); + +/** + * @brief: cam synx driver initialize + * + */ +int cam_synx_obj_driver_init(void); + +/** + * @brief: cam synx driver deinit + * + */ +void cam_synx_obj_driver_deinit(void); + +#endif /* __CAM_SYNC_SYNX_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.c b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.c new file mode 100644 index 0000000000..67bd7eca52 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.c @@ -0,0 +1,829 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_sync_util.h" +#include "cam_req_mgr_workq.h" +#include "cam_common_util.h" +#include "cam_mem_mgr_api.h" + +extern unsigned long cam_sync_monitor_mask; + +static int cam_generic_expand_monitor_table(int idx, struct mutex *lock, + struct cam_generic_fence_monitor_data **mon_data) +{ + struct cam_generic_fence_monitor_data *row_mon_data; + + if (lock) + mutex_lock(lock); + row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)]; + if (!row_mon_data) { + row_mon_data = CAM_MEM_ZALLOC( + sizeof(struct cam_generic_fence_monitor_data) * + CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, GFP_KERNEL); + mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)] = row_mon_data; + } + if (!row_mon_data) { + CAM_ERR(CAM_SYNC, "Error allocating memory %d, idx %d", + sizeof(struct cam_generic_fence_monitor_data) * + CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, idx); + + if (lock) + mutex_unlock(lock); + + return -ENOMEM; + } + + if (lock) + mutex_unlock(lock); + + return 0; +} + +static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *mon_data; + + mon_data = CAM_SYNC_MONITOR_GET_DATA(idx); + if (mon_data->swap_monitor_entries) + return mon_data->prev_monitor_entries; + else + return mon_data->monitor_entries; +} + +static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_prev_monitor_entries(int idx) +{ + struct cam_generic_fence_monitor_data *mon_data; + + mon_data = CAM_SYNC_MONITOR_GET_DATA(idx); + if (mon_data->swap_monitor_entries) + return mon_data->monitor_entries; + else + return mon_data->prev_monitor_entries; +} + +const char *cam_fence_op_to_string( + enum cam_fence_op op) +{ + switch (op) { + case CAM_FENCE_OP_CREATE: + return "CREATE"; + case CAM_FENCE_OP_REGISTER_CB: + return "REGISTER_CB"; + case CAM_FENCE_OP_SIGNAL: + return "SIGNAL"; + case CAM_FENCE_OP_UNREGISTER_ON_SIGNAL: + return "UNREGISTER_ON_SIGNAL"; + case CAM_FENCE_OP_UNREGISTER_CB: + return "UNREGISTER_CB"; + case CAM_FENCE_OP_SKIP_REGISTER_CB: + return "SKIP_REGISTER_CB"; + case CAM_FENCE_OP_ALREADY_REGISTERED_CB: + return "ALREADY_REGISTERED_CB"; + case CAM_FENCE_OP_DESTROY: + return "DESTROY"; + default: + return "INVALID"; + } +} + +static void __cam_sync_save_previous_monitor_data( + struct sync_table_row *row) +{ + struct cam_generic_fence_monitor_data *row_mon_data; + + if (!sync_dev->mon_data) + return; + + row_mon_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id); + + /* save current usage details into prev variables */ + strscpy(row_mon_data->prev_name, row->name, SYNC_DEBUG_NAME_LEN); + row_mon_data->prev_type = row->type; + row_mon_data->prev_obj_id = row->sync_id; + row_mon_data->prev_state = row->state; + row_mon_data->prev_remaining = row->remaining; + row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head); + + /* Toggle swap flag. Avoid copying and just read/write using correct table idx */ + row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries; +} + +void cam_generic_fence_update_monitor_array(int idx, + struct mutex *lock, + struct cam_generic_fence_monitor_data **mon_data, + enum cam_fence_op op) +{ + int iterator, rc; + struct cam_generic_fence_monitor_data *row_mon_data; + struct cam_generic_fence_monitor_entry *row_mon_entries; + + /* Validate inputs */ + if (!mon_data) + return; + + row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)]; + if (!row_mon_data) { + rc = cam_generic_expand_monitor_table(idx, lock, mon_data); + if (rc) { + CAM_ERR(CAM_SYNC, "Failed to expand monitor table"); + return; + } + } + + row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx); + if (op == CAM_FENCE_OP_CREATE) + atomic64_set(&row_mon_data->monitor_head, -1); + if (row_mon_data->swap_monitor_entries) + row_mon_entries = row_mon_data->monitor_entries; + else + row_mon_entries = row_mon_data->prev_monitor_entries; + + CAM_SYNC_INC_MONITOR_HEAD(&row_mon_data->monitor_head, &iterator); + CAM_GET_TIMESTAMP(row_mon_entries[iterator].timestamp); + row_mon_entries[iterator].op = op; +} + +static void __cam_generic_fence_dump_monitor_entries( + struct cam_generic_fence_monitor_entry *monitor_entries, + uint32_t index, uint32_t num_entries) +{ + int i = 0; + uint64_t ms, hrs, min, sec; + + for (i = 0; i < num_entries; i++) { + CAM_CONVERT_TIMESTAMP_FORMAT(monitor_entries[index].timestamp, + hrs, min, sec, ms); + + CAM_INFO(CAM_SYNC, + "**** %llu:%llu:%llu.%llu : Index[%d] Op[%s]", + hrs, min, sec, ms, + index, + cam_fence_op_to_string(monitor_entries[index].op)); + + index = (index + 1) % CAM_SYNC_MONITOR_MAX_ENTRIES; + } +} + +static int __cam_generic_fence_get_monitor_entries_info(uint64_t state_head, + uint32_t *oldest_entry, uint32_t *num_entries) +{ + *oldest_entry = 0; + *num_entries = 0; + + if (state_head == -1) { + return -EINVAL; + } else if (state_head < CAM_SYNC_MONITOR_MAX_ENTRIES) { + /* head starts from -1 */ + *num_entries = state_head + 1; + *oldest_entry = 0; + } else { + *num_entries = CAM_SYNC_MONITOR_MAX_ENTRIES; + div_u64_rem(state_head + 1, + CAM_SYNC_MONITOR_MAX_ENTRIES, oldest_entry); + } + + return 0; +} + +void cam_generic_fence_dump_monitor_array( + struct cam_generic_fence_monitor_obj_info *obj_info) +{ + int rc; + uint32_t num_entries, oldest_entry; + uint64_t ms, hrs, min, sec; + struct timespec64 current_ts; + struct cam_generic_fence_monitor_data *mon_data = obj_info->monitor_data; + + /* Check if there are any current entries in the monitor data */ + rc = __cam_generic_fence_get_monitor_entries_info( + atomic64_read(&mon_data->monitor_head), + &oldest_entry, &num_entries); + + if (rc) + return; + + /* Print current monitor entries */ + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + switch (obj_info->fence_type) { + case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ: + CAM_INFO(CAM_SYNC, + "======== %llu:%llu:%llu:%llu Dumping monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d ref_cnt %d num_entries %u ===========", + hrs, min, sec, ms, obj_info->name, obj_info->sync_type, + obj_info->obj_id, obj_info->state, obj_info->remaining, + obj_info->ref_cnt, num_entries); + break; + case CAM_GENERIC_FENCE_TYPE_DMA_FENCE: + CAM_INFO(CAM_DMA_FENCE, + "======== %llu:%llu:%llu:%llu Dumping monitor information for dma obj %s, fd %d sync_id %d state %d ref_cnt %d num_entries %u ===========", + hrs, min, sec, ms, obj_info->name, obj_info->obj_id, + obj_info->sync_id, obj_info->state, obj_info->ref_cnt, + num_entries); + break; + case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ: + CAM_INFO(CAM_SYNX, + "======== %llu:%llu:%llu:%llu Dumping monitor information for synx obj %s, synx_id %d sync_id %d state %d ref_cnt %d num_entries %u ===========", + hrs, min, sec, ms, obj_info->name, obj_info->obj_id, + obj_info->sync_id, obj_info->state, obj_info->ref_cnt, + num_entries); + break; + default: + break; + } + + __cam_generic_fence_dump_monitor_entries(obj_info->monitor_entries, + oldest_entry, num_entries); + + + /* Check if there are any previous entries in the monitor data */ + rc = __cam_generic_fence_get_monitor_entries_info( + mon_data->prev_monitor_head, + &oldest_entry, &num_entries); + + if (rc) + return; + + /* Print previous monitor entries */ + CAM_GET_TIMESTAMP(current_ts); + CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms); + switch (obj_info->fence_type) { + case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ: + CAM_INFO(CAM_SYNC, + "======== %llu:%llu:%llu:%llu Dumping previous monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d num_entries %u ===========", + hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_type, + mon_data->prev_obj_id, mon_data->prev_state, mon_data->prev_remaining, + num_entries); + break; + case CAM_GENERIC_FENCE_TYPE_DMA_FENCE: + CAM_INFO(CAM_DMA_FENCE, + "======== %llu:%llu:%llu:%llu Dumping previous monitor information for dma obj %s, fd %d sync_id %d state %d num_entries %u ===========", + hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id, + mon_data->prev_sync_id, mon_data->prev_state, + num_entries); + break; + case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ: + CAM_INFO(CAM_SYNX, + "======== %llu:%llu:%llu:%llu Dumping previous monitor information for synx obj %s, synx_id %d sync_id %d state %d num_entries %u ===========", + hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id, + mon_data->prev_sync_id, mon_data->prev_state, + num_entries); + break; + default: + break; + } + + __cam_generic_fence_dump_monitor_entries(obj_info->prev_monitor_entries, + oldest_entry, num_entries); + +} + +void cam_sync_dump_monitor_array(struct sync_table_row *row) +{ + struct cam_generic_fence_monitor_obj_info obj_info; + + if (!sync_dev->mon_data || + !test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask) || + !(CAM_GENERIC_MONITOR_GET_DATA(sync_dev->mon_data, row->sync_id)->prev_obj_id)) + return; + + obj_info.name = row->name; + obj_info.sync_type = row->type; + obj_info.obj_id = row->sync_id; + obj_info.state = row->state; + obj_info.remaining = row->remaining; + obj_info.ref_cnt = atomic_read(&row->ref_cnt); + obj_info.monitor_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id); + obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNC_OBJ; + obj_info.monitor_entries = + __cam_sync_get_monitor_entries(row->sync_id); + obj_info.prev_monitor_entries = + __cam_sync_get_prev_monitor_entries(row->sync_id); + cam_generic_fence_dump_monitor_array(&obj_info); +} + +int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev, + long *idx) +{ + int rc = 0; + + mutex_lock(&sync_dev->table_lock); + + *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS); + + if (*idx < CAM_SYNC_MAX_OBJS) + set_bit(*idx, sync_dev->bitmap); + else + rc = -1; + + mutex_unlock(&sync_dev->table_lock); + + return rc; +} + +int cam_sync_init_row(struct sync_table_row *table, + uint32_t idx, const char *name, uint32_t type) +{ + struct sync_table_row *row = table + idx; + + if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) + return -EINVAL; + + strscpy(row->name, name, SYNC_DEBUG_NAME_LEN); + INIT_LIST_HEAD(&row->parents_list); + INIT_LIST_HEAD(&row->children_list); + row->type = type; + row->sync_id = idx; + row->state = CAM_SYNC_STATE_ACTIVE; + row->remaining = 0; + atomic_set(&row->ref_cnt, 0); + init_completion(&row->signaled); + INIT_LIST_HEAD(&row->callback_list); + INIT_LIST_HEAD(&row->user_payload_list); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) { + cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock, + sync_dev->mon_data, + CAM_FENCE_OP_CREATE); + } + CAM_DBG(CAM_SYNC, + "row name:%s sync_id:%i [idx:%u] row_state:%u ", + row->name, row->sync_id, idx, row->state); + + return 0; +} + +int cam_sync_init_group_object(struct sync_table_row *table, + uint32_t idx, + uint32_t *sync_objs, + uint32_t num_objs) +{ + int i, rc; + struct sync_child_info *child_info; + struct sync_parent_info *parent_info; + struct sync_table_row *row = table + idx; + struct sync_table_row *child_row = NULL; + + cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP); + + /* + * While traversing for children, parent's row list is updated with + * child info and each child's row is updated with parent info. + * If any child state is ERROR or SUCCESS, it will not be added to list. + */ + for (i = 0; i < num_objs; i++) { + child_row = table + sync_objs[i]; + spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + + /* validate child */ + if ((child_row->type == CAM_SYNC_TYPE_GROUP) || + (child_row->state == CAM_SYNC_STATE_INVALID)) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + CAM_ERR(CAM_SYNC, + "Invalid child fence:%i state:%u type:%u", + child_row->sync_id, child_row->state, + child_row->type); + rc = -EINVAL; + goto clean_children_info; + } + + /* check for child's state */ + if ((child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) || + (child_row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) { + row->state = child_row->state; + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + continue; + } + if (child_row->state != CAM_SYNC_STATE_ACTIVE) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + continue; + } + + row->remaining++; + + /* Add child info */ + child_info = CAM_MEM_ZALLOC(sizeof(*child_info), GFP_ATOMIC); + if (!child_info) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + rc = -ENOMEM; + goto clean_children_info; + } + child_info->sync_id = sync_objs[i]; + list_add_tail(&child_info->list, &row->children_list); + + /* Add parent info */ + parent_info = CAM_MEM_ZALLOC(sizeof(*parent_info), GFP_ATOMIC); + if (!parent_info) { + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + rc = -ENOMEM; + goto clean_children_info; + } + parent_info->sync_id = idx; + list_add_tail(&parent_info->list, &child_row->parents_list); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + } + + if (!row->remaining) { + if ((row->state != CAM_SYNC_STATE_SIGNALED_ERROR) && + (row->state != CAM_SYNC_STATE_SIGNALED_CANCEL)) + row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS; + complete_all(&row->signaled); + } + + return 0; + +clean_children_info: + row->state = CAM_SYNC_STATE_INVALID; + for (i = i-1; i >= 0; i--) { + spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + child_row = table + sync_objs[i]; + cam_sync_util_cleanup_parents_list(child_row, + SYNC_LIST_CLEAN_ONE, idx); + spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]); + } + + cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0); + return rc; +} + +int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx, + struct cam_sync_check_for_dma_release *check_for_dma_release, + struct cam_sync_check_for_synx_release *check_for_synx_release) +{ + struct sync_table_row *row = table + idx; + struct sync_child_info *child_info, *temp_child; + struct sync_callback_info *sync_cb, *temp_cb; + struct sync_parent_info *parent_info, *temp_parent; + struct sync_user_payload *upayload_info, *temp_upayload; + struct sync_table_row *child_row = NULL, *parent_row = NULL; + struct list_head temp_child_list, temp_parent_list; + + if (!table || (idx <= 0) || (idx >= CAM_SYNC_MAX_OBJS)) + return -EINVAL; + + CAM_DBG(CAM_SYNC, + "row name:%s sync_id:%i [idx:%u] row_state:%u", + row->name, row->sync_id, idx, row->state); + + spin_lock_bh(&sync_dev->row_spinlocks[idx]); + if (row->state == CAM_SYNC_STATE_INVALID) { + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + CAM_ERR(CAM_SYNC, + "Error: accessing an uninitialized sync obj: idx = %d name = %s", + idx, + row->name); + return -EINVAL; + } + + if (row->state == CAM_SYNC_STATE_ACTIVE) + CAM_DBG(CAM_SYNC, + "Destroying an active sync object name:%s id:%i", + row->name, row->sync_id); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) { + cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock, + sync_dev->mon_data, + CAM_FENCE_OP_DESTROY); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP, &cam_sync_monitor_mask)) + cam_sync_dump_monitor_array(row); + __cam_sync_save_previous_monitor_data(row); + } + + row->state = CAM_SYNC_STATE_INVALID; + + /* Object's child and parent objects will be added into this list */ + INIT_LIST_HEAD(&temp_child_list); + INIT_LIST_HEAD(&temp_parent_list); + + list_for_each_entry_safe(child_info, temp_child, &row->children_list, + list) { + if (child_info->sync_id <= 0) + continue; + + list_del_init(&child_info->list); + list_add_tail(&child_info->list, &temp_child_list); + } + + list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list, + list) { + if (parent_info->sync_id <= 0) + continue; + + list_del_init(&parent_info->list); + list_add_tail(&parent_info->list, &temp_parent_list); + } + + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + + /* Cleanup the child to parent link from child list */ + while (!list_empty(&temp_child_list)) { + child_info = list_first_entry(&temp_child_list, + struct sync_child_info, list); + child_row = sync_dev->sync_table + child_info->sync_id; + + spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]); + + if (child_row->state == CAM_SYNC_STATE_INVALID) { + list_del_init(&child_info->list); + spin_unlock_bh(&sync_dev->row_spinlocks[ + child_info->sync_id]); + CAM_MEM_FREE(child_info); + continue; + } + + if (child_row->state == CAM_SYNC_STATE_ACTIVE) + CAM_DBG(CAM_SYNC, + "Warning: destroying active child sync obj = %s[%d]", + child_row->name, + child_info->sync_id); + + cam_sync_util_cleanup_parents_list(child_row, + SYNC_LIST_CLEAN_ONE, idx); + + list_del_init(&child_info->list); + spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]); + CAM_MEM_FREE(child_info); + } + + /* Cleanup the parent to child link */ + while (!list_empty(&temp_parent_list)) { + parent_info = list_first_entry(&temp_parent_list, + struct sync_parent_info, list); + parent_row = sync_dev->sync_table + parent_info->sync_id; + + spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]); + + if (parent_row->state == CAM_SYNC_STATE_INVALID) { + list_del_init(&parent_info->list); + spin_unlock_bh(&sync_dev->row_spinlocks[ + parent_info->sync_id]); + CAM_MEM_FREE(parent_info); + continue; + } + + if (parent_row->state == CAM_SYNC_STATE_ACTIVE) + CAM_DBG(CAM_SYNC, + "Warning: destroying active parent sync obj = %s[%d]", + parent_row->name, + parent_info->sync_id); + + cam_sync_util_cleanup_children_list(parent_row, + SYNC_LIST_CLEAN_ONE, idx); + + list_del_init(&parent_info->list); + spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]); + CAM_MEM_FREE(parent_info); + } + + spin_lock_bh(&sync_dev->row_spinlocks[idx]); + list_for_each_entry_safe(upayload_info, temp_upayload, + &row->user_payload_list, list) { + list_del_init(&upayload_info->list); + CAM_MEM_FREE(upayload_info); + } + + list_for_each_entry_safe(sync_cb, temp_cb, + &row->callback_list, list) { + list_del_init(&sync_cb->list); + CAM_MEM_FREE(sync_cb); + } + + /* Decrement ref cnt for imported dma fence */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) { + cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx); + + /* Check if same dma fence is being released with the sync obj */ + if (check_for_dma_release) { + if (row->dma_fence_info.dma_fence_fd == + check_for_dma_release->dma_fence_fd) { + check_for_dma_release->sync_created_with_dma = + row->dma_fence_info.sync_created_with_dma; + check_for_dma_release->dma_fence_row_idx = + row->dma_fence_info.dma_fence_row_idx; + } + } + } + + /* Check if same synx obj is being released with the sync obj */ + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &row->ext_fence_mask)) { + if (check_for_synx_release) { + if (row->synx_obj_info.synx_obj == + check_for_synx_release->synx_obj) { + check_for_synx_release->synx_obj_row_idx = + row->synx_obj_info.synx_obj_row_idx; + check_for_synx_release->sync_created_with_synx = + row->synx_obj_info.sync_created_with_synx; + } + } + } + + memset(row, 0, sizeof(*row)); + clear_bit(idx, sync_dev->bitmap); + INIT_LIST_HEAD(&row->callback_list); + INIT_LIST_HEAD(&row->parents_list); + INIT_LIST_HEAD(&row->children_list); + INIT_LIST_HEAD(&row->user_payload_list); + spin_unlock_bh(&sync_dev->row_spinlocks[idx]); + + return 0; +} + +void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work) +{ + struct sync_callback_info *cb_info = container_of(cb_dispatch_work, + struct sync_callback_info, + cb_dispatch_work); + sync_callback sync_data = cb_info->callback_func; + void *cb = cb_info->callback_func; + + cam_common_util_thread_switch_delay_detect( + "cam_sync_workq", "schedule", cb, + cb_info->workq_scheduled_ts, + CAM_WORKQ_SCHEDULE_TIME_THRESHOLD); + sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data); + + CAM_MEM_FREE(cb_info); +} + +void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj, + uint32_t status, uint32_t event_cause) +{ + struct sync_callback_info *sync_cb; + struct sync_user_payload *payload_info; + struct sync_callback_info *temp_sync_cb; + struct sync_table_row *signalable_row; + struct sync_user_payload *temp_payload_info; + + signalable_row = sync_dev->sync_table + sync_obj; + if (signalable_row->state == CAM_SYNC_STATE_INVALID) { + CAM_DBG(CAM_SYNC, + "Accessing invalid sync object:%s[%i]", signalable_row->name, + sync_obj); + return; + } + + /* Dispatch kernel callbacks if any were registered earlier */ + list_for_each_entry_safe(sync_cb, + temp_sync_cb, &signalable_row->callback_list, list) { + sync_cb->status = status; + list_del_init(&sync_cb->list); + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + queue_work(sync_dev->work_queue, + &sync_cb->cb_dispatch_work); + } + + /* Dispatch user payloads if any were registered earlier */ + list_for_each_entry_safe(payload_info, temp_payload_info, + &signalable_row->user_payload_list, list) { + spin_lock_bh(&sync_dev->cam_sync_eventq_lock); + if (!sync_dev->cam_sync_eventq) { + spin_unlock_bh( + &sync_dev->cam_sync_eventq_lock); + break; + } + spin_unlock_bh(&sync_dev->cam_sync_eventq_lock); + cam_sync_util_send_v4l2_event( + CAM_SYNC_V4L_EVENT_ID_CB_TRIG, + sync_obj, + status, + payload_info->payload_data, + CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64), + event_cause); + + list_del_init(&payload_info->list); + + if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, + &cam_sync_monitor_mask)) + cam_generic_fence_update_monitor_array(sync_obj, + &sync_dev->table_lock, sync_dev->mon_data, + CAM_FENCE_OP_UNREGISTER_ON_SIGNAL); + /* + * We can free the list node here because + * sending V4L event will make a deep copy + * anyway + */ + CAM_MEM_FREE(payload_info); + } + + /* + * This needs to be done because we want to unblock anyone + * who might be blocked and waiting on this sync object + */ + complete_all(&signalable_row->signaled); +} + +void cam_sync_util_send_v4l2_event(uint32_t id, + uint32_t sync_obj, + int status, + void *payload, + int len, uint32_t event_cause) +{ + struct v4l2_event event; + __u64 *payload_data = NULL; + + if (sync_dev->version == CAM_SYNC_V4L_EVENT_V2) { + struct cam_sync_ev_header_v2 *ev_header = NULL; + + event.id = id; + event.type = CAM_SYNC_V4L_EVENT_V2; + + ev_header = CAM_SYNC_GET_HEADER_PTR_V2(event); + ev_header->sync_obj = sync_obj; + ev_header->status = status; + ev_header->version = sync_dev->version; + ev_header->evt_param[CAM_SYNC_EVENT_REASON_CODE_INDEX] = + event_cause; + payload_data = CAM_SYNC_GET_PAYLOAD_PTR_V2(event, __u64); + } else { + struct cam_sync_ev_header *ev_header = NULL; + + event.id = id; + event.type = CAM_SYNC_V4L_EVENT; + + ev_header = CAM_SYNC_GET_HEADER_PTR(event); + ev_header->sync_obj = sync_obj; + ev_header->status = status; + payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64); + } + + memcpy(payload_data, payload, len); + v4l2_event_queue(sync_dev->vdev, &event); + CAM_DBG(CAM_SYNC, "send v4l2 event version %d for sync_obj :%d", + sync_dev->version, + sync_obj); +} + +int cam_sync_util_update_parent_state(struct sync_table_row *parent_row, + int new_state) +{ + int rc = 0; + + switch (parent_row->state) { + case CAM_SYNC_STATE_ACTIVE: + case CAM_SYNC_STATE_SIGNALED_SUCCESS: + parent_row->state = new_state; + break; + + case CAM_SYNC_STATE_SIGNALED_ERROR: + case CAM_SYNC_STATE_SIGNALED_CANCEL: + break; + + case CAM_SYNC_STATE_INVALID: + default: + rc = -EINVAL; + break; + } + + return rc; +} + +void cam_sync_util_cleanup_children_list(struct sync_table_row *row, + uint32_t list_clean_type, uint32_t sync_obj) +{ + struct sync_child_info *child_info = NULL; + struct sync_child_info *temp_child_info = NULL; + uint32_t curr_sync_obj; + + list_for_each_entry_safe(child_info, + temp_child_info, &row->children_list, list) { + if ((list_clean_type == SYNC_LIST_CLEAN_ONE) && + (child_info->sync_id != sync_obj)) + continue; + + curr_sync_obj = child_info->sync_id; + list_del_init(&child_info->list); + CAM_MEM_FREE(child_info); + + if ((list_clean_type == SYNC_LIST_CLEAN_ONE) && + (curr_sync_obj == sync_obj)) + break; + } +} + +void cam_sync_util_cleanup_parents_list(struct sync_table_row *row, + uint32_t list_clean_type, uint32_t sync_obj) +{ + struct sync_parent_info *parent_info = NULL; + struct sync_parent_info *temp_parent_info = NULL; + uint32_t curr_sync_obj; + + list_for_each_entry_safe(parent_info, + temp_parent_info, &row->parents_list, list) { + if ((list_clean_type == SYNC_LIST_CLEAN_ONE) && + (parent_info->sync_id != sync_obj)) + continue; + + curr_sync_obj = parent_info->sync_id; + list_del_init(&parent_info->list); + CAM_MEM_FREE(parent_info); + + if ((list_clean_type == SYNC_LIST_CLEAN_ONE) && + (curr_sync_obj == sync_obj)) + break; + } +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.h b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.h new file mode 100644 index 0000000000..966abd1c10 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_sync/cam_sync_util.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_SYNC_UTIL_H__ +#define __CAM_SYNC_UTIL_H__ + + +#include "cam_sync_private.h" +#include "cam_debug_util.h" + +extern struct sync_device *sync_dev; + +/** + * struct cam_sync_check_for_dma_release - + * Checks if the dma fence being released + * was created with the sync obj + * + * @dma_fence_row_idx : Get DMA fence row idx that is associated with + * the sync obj + * @dma_fence_fd : Check if DMA fence fd is associated with + * sync obj + * @sync_created_with_dma : Set if the dma fence fd was created + * with sync obj + */ +struct cam_sync_check_for_dma_release { + int32_t dma_fence_row_idx; + int32_t dma_fence_fd; + bool sync_created_with_dma; +}; + +/** + * struct cam_sync_check_for_synx_release - + * Checks if the synx obj being released + * was created with the sync obj + * + * @synx_obj : Check if synx obj is associated with + * sync obj + * @synx_obj_row_idx : Get synx obj row idx that is associated with + * the sync obj + * @sync_created_with_synx : Set if the dma fence fd was created + * with sync obj + */ +struct cam_sync_check_for_synx_release { + int32_t synx_obj; + int32_t synx_obj_row_idx; + bool sync_created_with_synx; +}; + +/** + * @brief: Finds an empty row in the sync table and sets its corresponding bit + * in the bit array + * + * @param sync_dev : Pointer to the sync device instance + * @param idx : Pointer to an long containing the index found in the bit + * array + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev, + long *idx); + +/** + * @brief: Function to initialize an empty row in the sync table. This should be + * called only for individual sync objects. + * + * @param table : Pointer to the sync objects table + * @param idx : Index of row to initialize + * @param name : Optional string representation of the sync object. Should be + * 63 characters or less + * @param type : type of row to be initialized + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_init_row(struct sync_table_row *table, + uint32_t idx, const char *name, uint32_t type); + +/** + * @brief: Function to uninitialize a row in the sync table + * + * @param table : Pointer to the sync objects table + * @param idx : Index of row to initialize + * @optional param check_for_dma_release : checks for dma fence release + * @optional param check_for_synx_release : checks for synx obj release + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx, + struct cam_sync_check_for_dma_release *check_for_dma_release, + struct cam_sync_check_for_synx_release *check_for_synx_release); + +/** + * @brief: Function to initialize a row in the sync table when the object is a + * group object, also known as a merged sync object + * + * @param table : Pointer to the sync objects table + * @param idx : Index of row to initialize + * @param sync_objs : Array of sync objects which will merged + * or grouped together + * @param num_objs : Number of sync objects in the array + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_sync_init_group_object(struct sync_table_row *table, + uint32_t idx, + uint32_t *sync_objs, + uint32_t num_objs); + +/** + * @brief: Function to dispatch a kernel callback for a sync callback + * + * @param cb_dispatch_work : Pointer to the work_struct that needs to be + * dispatched + * + * @return None + */ +void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work); + +/** + * @brief: Function to dispatch callbacks for a signaled sync object + * + * @sync_obj : Sync object that is signaled + * @status : Status of the signaled object + * @evt_param : Event paramaeter + * + * @return None + */ +void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj, + uint32_t status, uint32_t evt_param); + +/** + * @brief: Function to send V4L event to user space + * @param id : V4L event id to send + * @param sync_obj : Sync obj for which event needs to be sent + * @param status : Status of the event + * @payload : Payload that needs to be sent to user space + * @len : Length of the payload + * @evt_param : Event Paramenter + * + * @return None + */ +void cam_sync_util_send_v4l2_event(uint32_t id, + uint32_t sync_obj, + int status, + void *payload, + int len, + uint32_t evt_param); + +/** + * @brief: Function which gets the next state of the sync object based on the + * current state and the new state + * + * @param current_state : Current state of the sync object + * @param new_state : New state of the sync object + * + * @return Next state of the sync object + */ +int cam_sync_util_update_parent_state(struct sync_table_row *parent_row, + int new_state); + +/** + * @brief: Function to clean up the children of a sync object + * @row : Row whose child list to clean + * @list_clean_type : Clean specific object or clean all objects + * @sync_obj : Sync object to be clean if list clean type is + * SYNC_LIST_CLEAN_ONE + * + * @return None + */ +void cam_sync_util_cleanup_children_list(struct sync_table_row *row, + uint32_t list_clean_type, uint32_t sync_obj); + +/** + * @brief: Function to clean up the parents of a sync object + * @row : Row whose parent list to clean + * @list_clean_type : Clean specific object or clean all objects + * @sync_obj : Sync object to be clean if list clean type is + * SYNC_LIST_CLEAN_ONE + * + * @return None + */ +void cam_sync_util_cleanup_parents_list(struct sync_table_row *row, + uint32_t list_clean_type, uint32_t sync_obj); + +/** + * @brief: Function to dump sync obj & monitor data + * @row : Row whose data to dump + * + * @return None + */ +void cam_sync_dump_monitor_array(struct sync_table_row *row); + +/** + * @brief: Function to add a new entry to the monitor table + * @idx : Index of row to update + * @mutex : Mutex lock when expand monitor table + * @mon_data : Pointer to the monitor data array + * @op : Operation id + * + * @return None + */ +void cam_generic_fence_update_monitor_array(int idx, + struct mutex *lock, + struct cam_generic_fence_monitor_data **mon_data, + enum cam_fence_op op); + +/** + * @brief: Function to dump monitor array for sync/dma/synx + * @obj_info : Monitor object that needs to be dumped + * + * @return None + */ +void cam_generic_fence_dump_monitor_array( + struct cam_generic_fence_monitor_obj_info *obj_info); + +#endif /* __CAM_SYNC_UTIL_H__ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.c new file mode 100644 index 0000000000..4fd5d215ac --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.c @@ -0,0 +1,887 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "cam_common_util.h" +#include "cam_debug_util.h" +#include "cam_presil_hw_access.h" +#include "cam_hw.h" +#include "cam_mem_mgr_api.h" +#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP) +#include +static struct cam_common_mini_dump_dev_info g_minidump_dev_info; +#endif + +#define CAM_PRESIL_POLL_DELAY 20 + +static struct cam_common_inject_evt_info g_inject_evt_info; + +static uint timeout_multiplier = 1; +module_param(timeout_multiplier, uint, 0644); +typedef int (*cam_common_evt_inject_cmd_parse_handler)( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token); + +int cam_common_util_get_string_index(const char **strings, + uint32_t num_strings, const char *matching_string, uint32_t *index) +{ + int i; + + for (i = 0; i < num_strings; i++) { + if (strnstr(strings[i], matching_string, strlen(strings[i]))) { + CAM_DBG(CAM_UTIL, "matched %s : %d\n", + matching_string, i); + *index = i; + return 0; + } + } + + return -EINVAL; +} + +uint32_t cam_common_util_remove_duplicate_arr(int32_t *arr, uint32_t num) +{ + int i, j; + uint32_t wr_idx = 1; + + if (!arr) { + CAM_ERR(CAM_UTIL, "Null input array"); + return 0; + } + + for (i = 1; i < num; i++) { + for (j = 0; j < wr_idx ; j++) { + if (arr[i] == arr[j]) + break; + } + if (j == wr_idx) + arr[wr_idx++] = arr[i]; + } + + return wr_idx; +} + +unsigned long cam_common_wait_for_completion_timeout( + struct completion *complete, + unsigned long timeout_jiffies) +{ + unsigned long wait_jiffies; + unsigned long rem_jiffies; + + if (!complete) { + CAM_ERR(CAM_UTIL, "Null complete pointer"); + return 0; + } + + if (timeout_multiplier < 1) + timeout_multiplier = 1; + + wait_jiffies = timeout_jiffies * timeout_multiplier; + rem_jiffies = wait_for_completion_timeout(complete, wait_jiffies); + + return rem_jiffies; +} + +int cam_common_read_poll_timeout( + void __iomem *addr, + unsigned long delay, + unsigned long timeout, + uint32_t mask, + uint32_t check_val, + uint32_t *status) +{ + unsigned long wait_time_us; + int rc = -EINVAL; + + if (!addr || !status) { + CAM_ERR(CAM_UTIL, "Invalid param addr: %pK status: %pK", + addr, status); + return rc; + } + + if (timeout_multiplier < 1) + timeout_multiplier = 1; + + wait_time_us = timeout * timeout_multiplier; + + if (false == cam_presil_mode_enabled()) { + rc = readl_poll_timeout(addr, *status, (*status & mask) == check_val, delay, + wait_time_us); + } else { + rc = cam_presil_readl_poll_timeout(addr, mask, + wait_time_us/(CAM_PRESIL_POLL_DELAY * 600), CAM_PRESIL_POLL_DELAY); + } + + return rc; +} + +int cam_common_modify_timer(struct timer_list *timer, int32_t timeout_val) +{ + if (!timer) { + CAM_ERR(CAM_UTIL, "Invalid reference to system timer"); + return -EINVAL; + } + + if (timeout_multiplier < 1) + timeout_multiplier = 1; + + CAM_DBG(CAM_UTIL, "Starting timer to fire in %d ms. (jiffies=%lu)\n", + (timeout_val * timeout_multiplier), jiffies); + mod_timer(timer, + (jiffies + msecs_to_jiffies(timeout_val * timeout_multiplier))); + + return 0; +} + +void cam_common_util_thread_switch_delay_detect(char *wq_name, const char *state, + void *cb, ktime_t scheduled_time, uint32_t threshold) +{ + uint64_t diff; + ktime_t cur_time; + struct timespec64 cur_ts; + struct timespec64 scheduled_ts; + + cur_time = ktime_get(); + diff = ktime_ms_delta(cur_time, scheduled_time); + + if (diff > threshold) { + scheduled_ts = ktime_to_timespec64(scheduled_time); + cur_ts = ktime_to_timespec64(cur_time); + CAM_WARN_RATE_LIMIT_CUSTOM(CAM_UTIL, 5, 1, + "%s cb: %ps delay in %s detected %ld:%06ld cur %ld:%06ld\n" + "diff %ld: threshold %d", + wq_name, cb, state, scheduled_ts.tv_sec, + scheduled_ts.tv_nsec/NSEC_PER_USEC, + cur_ts.tv_sec, cur_ts.tv_nsec/NSEC_PER_USEC, + diff, threshold); + } +} + +#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP) +static void cam_common_mini_dump_handler(void *dst, unsigned long len) +{ + int i = 0; + uint8_t *waddr; + unsigned long bytes_written = 0; + unsigned long remain_len = len; + struct cam_common_mini_dump_data *md; + + if (len < sizeof(*md)) { + CAM_WARN(CAM_UTIL, "Insufficient len %lu", len); + return; + } + + md = (struct cam_common_mini_dump_data *)dst; + waddr = (uint8_t *)md + sizeof(*md); + remain_len -= sizeof(*md); + + for (i = 0; i < CAM_COMMON_MINI_DUMP_DEV_NUM; i++) { + if (!g_minidump_dev_info.dump_cb[i]) + continue; + + memcpy(md->name[i], g_minidump_dev_info.name[i], + strlen(g_minidump_dev_info.name[i])); + md->waddr[i] = (void *)waddr; + bytes_written = g_minidump_dev_info.dump_cb[i]( + (void *)waddr, remain_len, g_minidump_dev_info.priv_data[i]); + md->size[i] = bytes_written; + if (bytes_written >= len) { + CAM_WARN(CAM_UTIL, "No more space to dump"); + goto nomem; + } + + remain_len -= bytes_written; + waddr += bytes_written; + } + + return; +nomem: + for (; i >=0; i--) + CAM_WARN(CAM_UTIL, "%s: Dumped len: %lu", md->name[i], md->size[i]); +} + +static int cam_common_md_notify_handler(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct va_md_entry cbentry; + int rc = 0; + + cbentry.vaddr = 0x0; + strscpy(cbentry.owner, "Camera", sizeof(cbentry.owner)); + cbentry.size = CAM_COMMON_MINI_DUMP_SIZE; + cbentry.cb = cam_common_mini_dump_handler; + rc = qcom_va_md_add_region(&cbentry); + if (rc) { + CAM_ERR(CAM_UTIL, "Va Region add falied %d", rc); + return NOTIFY_STOP_MASK; + } + + return NOTIFY_OK; +} + +static struct notifier_block cam_common_md_notify_blk = { + .notifier_call = cam_common_md_notify_handler, + .priority = INT_MAX, +}; + +int cam_common_register_mini_dump_cb( + cam_common_mini_dump_cb mini_dump_cb, + uint8_t *dev_name, void *priv_data) +{ + int rc = 0; + uint32_t idx; + + if (g_minidump_dev_info.num_devs >= CAM_COMMON_MINI_DUMP_DEV_NUM) { + CAM_ERR(CAM_UTIL, "No free index available"); + return -EINVAL; + } + + if (!mini_dump_cb || !dev_name) { + CAM_ERR(CAM_UTIL, "Invalid params"); + return -EINVAL; + } + + idx = g_minidump_dev_info.num_devs; + g_minidump_dev_info.dump_cb[idx] = + mini_dump_cb; + scnprintf(g_minidump_dev_info.name[idx], + CAM_COMMON_MINI_DUMP_DEV_NAME_LEN, dev_name); + g_minidump_dev_info.priv_data[idx] = priv_data; + g_minidump_dev_info.num_devs++; + if (!g_minidump_dev_info.is_registered) { + rc = qcom_va_md_register("Camera", &cam_common_md_notify_blk); + if (rc) { + CAM_ERR(CAM_UTIL, "Camera VA minidump register failed"); + goto end; + } + g_minidump_dev_info.is_registered = true; + } +end: + return rc; +} +#endif + +void *cam_common_user_dump_clock( + void *dump_struct, uint8_t *addr_ptr) +{ + struct cam_hw_info *hw_info = NULL; + uint64_t *addr = NULL; + + hw_info = (struct cam_hw_info *)dump_struct; + + if (!hw_info || !addr_ptr) { + CAM_ERR(CAM_ISP, "HW info or address pointer NULL"); + return addr; + } + + addr = (uint64_t *)addr_ptr; + *addr++ = cam_soc_util_get_applied_src_clk(&hw_info->soc_info, true); + + return addr; +} + +int cam_common_user_dump_helper( + void *cmd_args, + void *(*func)(void *dump_struct, uint8_t *addr_ptr), + void *dump_struct, + size_t size, + const char *tag, ...) +{ + + uint8_t *dst; + uint8_t *addr, *start; + void *returned_ptr; + struct cam_common_hw_dump_args *dump_args; + struct cam_common_hw_dump_header *hdr; + va_list args; + void*(*func_ptr)(void *dump_struct, uint8_t *addr_ptr); + + dump_args = (struct cam_common_hw_dump_args *)cmd_args; + + if (!dump_args) { + CAM_ERR(CAM_UTIL, "dump_args is NULL!"); + return -EINVAL; + } + if (!dump_args->cpu_addr || !dump_args->buf_len) { + CAM_ERR(CAM_UTIL, + "Invalid params: cpu_addr=%pk, buf_len=%zu", + (void *)dump_args->cpu_addr, + dump_args->buf_len); + return -EINVAL; + } + if (dump_args->buf_len <= dump_args->offset) { + CAM_WARN(CAM_UTIL, + "Dump offset overshoot offset %zu buf_len %zu", + dump_args->offset, dump_args->buf_len); + return -ENOSPC; + } + if (dump_args->offset + size + sizeof(struct cam_common_hw_dump_header) + > dump_args->buf_len) { + CAM_ERR(CAM_UTIL, + "Insufficient buffer space: offset %zu, required %zu, buf_len %zu", + dump_args->offset, + size + sizeof(struct cam_common_hw_dump_header), + dump_args->buf_len); + return -EINVAL; + } + + dst = (uint8_t *)dump_args->cpu_addr + dump_args->offset; + hdr = (struct cam_common_hw_dump_header *)dst; + + va_start(args, tag); + vscnprintf(hdr->tag, CAM_COMMON_HW_DUMP_TAG_MAX_LEN, tag, args); + va_end(args); + + hdr->word_size = size; + + addr = (uint8_t *)(dst + sizeof(struct cam_common_hw_dump_header)); + start = addr; + + if (!func || !dump_struct) { + CAM_ERR(CAM_UTIL, "function ptr / dump struct is NULL"); + return -EINVAL; + } + func_ptr = func; + returned_ptr = func_ptr(dump_struct, addr); + + if (IS_ERR(returned_ptr) || !returned_ptr) { + CAM_ERR(CAM_UTIL, "function call failed!"); + return PTR_ERR(returned_ptr); + } + + addr = (uint8_t *)returned_ptr; + hdr->size = addr - start; + CAM_DBG(CAM_UTIL, "hdr size: %d, word size: %d, addr: %x, start: %x", + hdr->size, hdr->word_size, addr, start); + dump_args->offset += hdr->size + + sizeof(struct cam_common_hw_dump_header); + + return 0; +} + +int cam_common_register_evt_inject_cb(cam_common_evt_inject_cb evt_inject_cb, + enum cam_common_evt_inject_hw_id hw_id) +{ + int rc = 0; + + if (g_inject_evt_info.num_hw_registered >= CAM_COMMON_EVT_INJECT_HW_MAX) { + CAM_ERR(CAM_UTIL, "No free index available"); + return -EINVAL; + } + + if (!evt_inject_cb || hw_id >= CAM_COMMON_EVT_INJECT_HW_MAX) { + CAM_ERR(CAM_UTIL, "Invalid params evt_inject_cb %s hw_id: %d", + CAM_IS_NULL_TO_STR(evt_inject_cb), hw_id); + return -EINVAL; + } + + g_inject_evt_info.evt_inject_cb[hw_id] = evt_inject_cb; + g_inject_evt_info.num_hw_registered++; + CAM_DBG(CAM_UTIL, "Evt inject cb registered for HW_id: %d, total registered: %d", hw_id, + g_inject_evt_info.num_hw_registered); + return rc; +} + +void cam_common_release_evt_params(int32_t dev_hdl) +{ + struct list_head *pos = NULL, *pos_next = NULL; + struct cam_common_inject_evt_param *inject_params; + + if (!g_inject_evt_info.is_list_initialised) + return; + + if (list_empty(&g_inject_evt_info.active_evt_ctx_list)) { + CAM_DBG(CAM_UTIL, "Event injection list is initialized but empty"); + return; + } + + list_for_each_safe(pos, pos_next, &g_inject_evt_info.active_evt_ctx_list) { + inject_params = list_entry(pos, struct cam_common_inject_evt_param, list); + if (inject_params->dev_hdl == dev_hdl) { + CAM_INFO(CAM_UTIL, "entry deleted for %d dev hdl", dev_hdl); + list_del(pos); + CAM_MEM_FREE(inject_params); + } + } +} + +static inline int cam_common_evt_inject_get_hw_id(uint8_t *hw_id, char *token) +{ + if (strcmp(token, CAM_COMMON_IFE_NODE) == 0) + *hw_id = CAM_COMMON_EVT_INJECT_HW_ISP; + else if (strcmp(token, CAM_COMMON_ICP_NODE) == 0) + *hw_id = CAM_COMMON_EVT_INJECT_HW_ICP; + else if (strcmp(token, CAM_COMMON_JPEG_NODE) == 0) + *hw_id = CAM_COMMON_EVT_INJECT_HW_JPEG; + else { + CAM_ERR(CAM_UTIL, "Invalid camera hardware [ %s ]", token); + return -EINVAL; + } + + return 0; +} + +static inline int cam_common_evt_inject_get_str_id_type(uint8_t *id_type, char *token) +{ + if (!strcmp(token, CAM_COMMON_EVT_INJECT_BUFFER_ERROR)) + *id_type = CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE; + else if (!strcmp(token, CAM_COMMON_EVT_INJECT_NOTIFY_EVENT)) + *id_type = CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE; + else { + CAM_ERR(CAM_UTIL, "Invalid string id: %s", token); + return -EINVAL; + } + + return 0; +} + +static int cam_common_evt_inject_parse_buffer_error_evt_params( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + struct cam_hw_inject_buffer_error_param *buf_err_params = + &inject_params->evt_params.u.buf_err_evt; + int rc = 0; + + switch (param_counter) { + case SYNC_ERROR_CAUSE: + if (kstrtou32(token, 0, &buf_err_params->sync_error)) { + CAM_ERR(CAM_UTIL, "Invalid event type %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameters: %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_parse_node_evt_params( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + struct cam_hw_inject_node_evt_param *node_params = + &inject_params->evt_params.u.evt_notify.u.node_evt_params; + int rc = 0; + + switch (param_counter) { + case EVENT_TYPE: + if (kstrtou32(token, 0, &node_params->event_type)) { + CAM_ERR(CAM_UTIL, "Invalid event type %s", token); + rc = -EINVAL; + } + break; + case EVENT_CAUSE: + if (kstrtou32(token, 0, &node_params->event_cause)) { + CAM_ERR(CAM_UTIL, "Invalid event cause %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameters: %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_parse_pf_params( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + struct cam_hw_inject_pf_evt_param *pf_params = + &inject_params->evt_params.u.evt_notify.u.pf_evt_params; + int rc = 0; + + switch (param_counter) { + case PF_PARAM_CTX_FOUND: + if (kstrtobool(token, &pf_params->ctx_found)) { + CAM_ERR(CAM_UTIL, "Invalid context found value %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameters %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_parse_err_evt_params( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + struct cam_hw_inject_err_evt_param *err_params = + &inject_params->evt_params.u.evt_notify.u.err_evt_params; + int rc = 0; + + switch (param_counter) { + case ERR_PARAM_ERR_TYPE: + if (kstrtou32(token, 0, &err_params->err_type)) { + CAM_ERR(CAM_UTIL, "Invalid error type %s", token); + rc = -EINVAL; + } + break; + case ERR_PARAM_ERR_CODE: + if (kstrtou32(token, 0, &err_params->err_code)) { + CAM_ERR(CAM_UTIL, "Invalid error code %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameters: %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_parse_event_notify( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + int rc = 0; + + switch (param_counter) { + case EVT_NOTIFY_TYPE: + if (kstrtou32(token, 0, + &inject_params->evt_params.u.evt_notify.evt_notify_type)) { + CAM_ERR(CAM_UTIL, "Invalid Event notify type %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameters: %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_parse_common_params( + struct cam_common_inject_evt_param *inject_params, + uint32_t param_counter, char *token) +{ + int rc = 0; + struct cam_hw_inject_evt_param *evt_param = &inject_params->evt_params; + + switch (param_counter) { + case STRING_ID: + rc = cam_common_evt_inject_get_str_id_type(&evt_param->inject_id, token); + break; + case HW_NAME: + rc = cam_common_evt_inject_get_hw_id(&inject_params->hw_id, token); + break; + case DEV_HDL: + if (kstrtos32(token, 0, &inject_params->dev_hdl)) { + CAM_ERR(CAM_UTIL, "Invalid device handle %s", token); + rc = -EINVAL; + } + break; + case REQ_ID: + if (kstrtou64(token, 0, &evt_param->req_id)) { + CAM_ERR(CAM_UTIL, "Invalid request id %s", token); + rc = -EINVAL; + } + break; + default: + CAM_ERR(CAM_UTIL, "Invalid extra parameter: %s", token); + rc = -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_generic_command_parser( + struct cam_common_inject_evt_param *inject_params, + char **msg, uint32_t max_params, cam_common_evt_inject_cmd_parse_handler cmd_parse_cb) +{ + char *token = NULL; + int rc = 0, param_counter = 0; + + token = strsep(msg, ":"); + while (token != NULL) { + rc = cmd_parse_cb(inject_params, param_counter, token); + if (rc) { + CAM_ERR(CAM_UTIL, "Parsed Command failed rc: %d", rc); + return rc; + } + + param_counter++; + if (param_counter == max_params) + break; + token = strsep(msg, ":"); + } + + if (param_counter < max_params) { + CAM_ERR(CAM_UTIL, + "Insufficient parameters passed for total parameters: %u", + param_counter); + return -EINVAL; + } + + return rc; +} + +static int cam_common_evt_inject_set(const char *kmessage, + const struct kernel_param *kp) +{ + struct cam_common_inject_evt_param *inject_params = NULL; + struct cam_hw_inject_evt_param *hw_evt_params = NULL; + cam_common_evt_inject_cmd_parse_handler parse_handler = NULL; + int rc = 0; + char tmp_buff[CAM_COMMON_EVT_INJECT_BUFFER_LEN]; + char *msg = NULL; + uint32_t param_output = 0; + + inject_params = CAM_MEM_ZALLOC(sizeof(struct cam_common_inject_evt_param), GFP_KERNEL); + if (!inject_params) { + CAM_ERR(CAM_UTIL, "no free memory"); + return -ENOMEM; + } + + rc = strscpy(tmp_buff, kmessage, CAM_COMMON_EVT_INJECT_BUFFER_LEN); + if (rc == -E2BIG) + goto free; + + CAM_INFO(CAM_UTIL, "parsing input param for cam event injection: %s", tmp_buff); + + msg = tmp_buff; + hw_evt_params = &inject_params->evt_params; + + rc = cam_common_evt_inject_generic_command_parser(inject_params, &msg, + COMMON_PARAM_MAX, cam_common_evt_inject_parse_common_params); + if (rc) { + CAM_ERR(CAM_UTIL, "Fail to parse common params %d", rc); + goto free; + } + + switch (hw_evt_params->inject_id) { + case CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE: + rc = cam_common_evt_inject_generic_command_parser(inject_params, &msg, + EVT_NOTIFY_PARAM_MAX, cam_common_evt_inject_parse_event_notify); + if (rc) { + CAM_ERR(CAM_UTIL, "Fail to parse event notify type param %d", rc); + goto free; + } + + switch (hw_evt_params->u.evt_notify.evt_notify_type) { + case V4L_EVENT_CAM_REQ_MGR_ERROR: + parse_handler = cam_common_evt_inject_parse_err_evt_params; + param_output = ERR_PARAM_MAX; + break; + case V4L_EVENT_CAM_REQ_MGR_NODE_EVENT: + parse_handler = cam_common_evt_inject_parse_node_evt_params; + param_output = NODE_PARAM_MAX; + break; + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: + parse_handler = cam_common_evt_inject_parse_pf_params; + param_output = PF_PARAM_MAX; + break; + default: + CAM_ERR(CAM_UTIL, "Invalid event notification type: %u", + hw_evt_params->u.evt_notify.evt_notify_type); + goto free; + } + break; + case CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE: + parse_handler = cam_common_evt_inject_parse_buffer_error_evt_params; + param_output = BUFFER_ERROR_PARAM_MAX; + break; + default: + CAM_ERR(CAM_UTIL, "Invalid Injection id: %u", hw_evt_params->inject_id); + } + + if (!parse_handler) + goto free; + + rc = cam_common_evt_inject_generic_command_parser(inject_params, &msg, + param_output, parse_handler); + if (rc) { + CAM_ERR(CAM_UTIL, "Command Parsed failed with Inject id: %u rc: %d", + hw_evt_params->inject_id, rc); + goto free; + } + + if (g_inject_evt_info.evt_inject_cb[inject_params->hw_id]) { + rc = g_inject_evt_info.evt_inject_cb[inject_params->hw_id](inject_params); + if (rc) + goto free; + } else { + CAM_ERR(CAM_UTIL, "Handler for HW_id [%hhu] not registered", inject_params->hw_id); + goto free; + } + + if (!g_inject_evt_info.is_list_initialised) { + INIT_LIST_HEAD(&g_inject_evt_info.active_evt_ctx_list); + g_inject_evt_info.is_list_initialised = true; + } + + list_add(&inject_params->list, &g_inject_evt_info.active_evt_ctx_list); + + return rc; + +free: + CAM_MEM_FREE(inject_params); + return rc; +} + +static int cam_common_evt_inject_get(char *buffer, + const struct kernel_param *kp) +{ + uint8_t hw_name[16], string_id[16]; + uint16_t buff_max_size = CAM_COMMON_EVT_INJECT_MODULE_PARAM_MAX_LENGTH; + struct cam_common_inject_evt_param *inject_params = NULL; + struct cam_hw_inject_evt_param *evt_params = NULL; + uint32_t ret = 0; + + if (!g_inject_evt_info.is_list_initialised) + return scnprintf(buffer, buff_max_size, "uninitialised"); + + if (list_empty(&g_inject_evt_info.active_evt_ctx_list)) + return scnprintf(buffer, buff_max_size, "Active err inject list is empty"); + + list_for_each_entry(inject_params, &g_inject_evt_info.active_evt_ctx_list, list) { + evt_params = &inject_params->evt_params; + + switch (inject_params->hw_id) { + case CAM_COMMON_EVT_INJECT_HW_ISP: + strscpy(hw_name, CAM_COMMON_IFE_NODE, sizeof(hw_name)); + break; + case CAM_COMMON_EVT_INJECT_HW_ICP: + strscpy(hw_name, CAM_COMMON_ICP_NODE, sizeof(hw_name)); + break; + case CAM_COMMON_EVT_INJECT_HW_JPEG: + strscpy(hw_name, CAM_COMMON_JPEG_NODE, sizeof(hw_name)); + break; + default: + ret += scnprintf(buffer+ret, buff_max_size, "Undefined HW id\n"); + goto undefined_param; + } + + switch (evt_params->inject_id) { + case CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE: + strscpy(string_id, CAM_COMMON_EVT_INJECT_BUFFER_ERROR, sizeof(string_id)); + break; + case CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE: + strscpy(string_id, CAM_COMMON_EVT_INJECT_NOTIFY_EVENT, sizeof(string_id)); + break; + default: + ret += scnprintf(buffer+ret, buff_max_size, "Undefined string id\n"); + goto undefined_param; + } + + ret += scnprintf(buffer+ret, buff_max_size, + "string_id: %s hw_name: %s dev_hdl: %d req_id: %llu ", + string_id, hw_name, + inject_params->dev_hdl, evt_params->req_id); + + if (buff_max_size > ret) { + buff_max_size -= ret; + } else { + CAM_WARN(CAM_UTIL, "out buff max limit reached"); + break; + } + + if (evt_params->inject_id == + CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE) { + ret += scnprintf(buffer+ret, buff_max_size, + "sync_error: %u\n", evt_params->u.buf_err_evt.sync_error); + } else { + switch (evt_params->u.evt_notify.evt_notify_type) { + case V4L_EVENT_CAM_REQ_MGR_ERROR: { + struct cam_hw_inject_err_evt_param *err_evt_params = + &evt_params->u.evt_notify.u.err_evt_params; + ret += scnprintf(buffer+ret, buff_max_size, + "Error event: error type: %u error code: %u\n", + err_evt_params->err_type, err_evt_params->err_code); + break; + } + case V4L_EVENT_CAM_REQ_MGR_NODE_EVENT: { + struct cam_hw_inject_node_evt_param *node_evt_params = + &evt_params->u.evt_notify.u.node_evt_params; + ret += scnprintf(buffer+ret, buff_max_size, + "Node event: event type: %u event cause: %u\n", + node_evt_params->event_type, node_evt_params->event_cause); + break; + } + case V4L_EVENT_CAM_REQ_MGR_PF_ERROR: { + struct cam_hw_inject_pf_evt_param *pf_evt_params = + &evt_params->u.evt_notify.u.pf_evt_params; + ret += scnprintf(buffer+ret, buff_max_size, + "PF event: ctx found %hhu\n", + pf_evt_params->ctx_found); + break; + } + default: + ret += scnprintf(buffer+ret, buff_max_size, + "Undefined notification event\n"); + } + } + +undefined_param: + CAM_DBG(CAM_UTIL, "output buffer: %s", buffer); + + if (buff_max_size > ret) { + buff_max_size -= ret; + } else { + CAM_WARN(CAM_UTIL, "out buff max limit reached"); + break; + } + } + + return ret; +} + +static const struct kernel_param_ops cam_common_evt_inject = { + .set = cam_common_evt_inject_set, + .get = cam_common_evt_inject_get +}; + +module_param_cb(cam_event_inject, &cam_common_evt_inject, NULL, 0644); + +int cam_common_mem_kdup(void **dst, + void *src, size_t size) +{ + gfp_t flag = GFP_KERNEL; + + if (!src || !dst || !size) { + CAM_ERR(CAM_UTIL, "Invalid params src: %pK dst: %pK size: %u", + src, dst, size); + return -EINVAL; + } + + if (!in_task()) + flag = GFP_ATOMIC; + + *dst = kvzalloc(size, flag); + if (!*dst) { + CAM_ERR(CAM_UTIL, "Failed to allocate memory with size: %u", size); + return -ENOMEM; + } + + memcpy(*dst, src, size); + CAM_DBG(CAM_UTIL, "Allocate and copy memory with size: %u", size); + + return 0; +} +EXPORT_SYMBOL(cam_common_mem_kdup); + +void cam_common_mem_free(void *memory) +{ + kvfree(memory); +} +EXPORT_SYMBOL(cam_common_mem_free); diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.h new file mode 100644 index 0000000000..2be65da534 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_common_util.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_COMMON_UTIL_H_ +#define _CAM_COMMON_UTIL_H_ + +#include +#include + +#include "cam_hw_mgr_intf.h" + +#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift) +#define CAM_36BIT_INTF_GET_IOVA_BASE(iova) ((iova) >> 8) +#define CAM_36BIT_INTF_GET_IOVA_OFFSET(iova) ((iova) & 0xff) + +#define CAM_COMMON_MINI_DUMP_DEV_NUM 6 +#define CAM_COMMON_MINI_DUMP_DEV_NAME_LEN 16 +#define CAM_COMMON_MINI_DUMP_SIZE 10 * 1024 * 1024 + +#define CAM_COMMON_HW_DUMP_TAG_MAX_LEN 128 +#define CAM_MAX_NUM_CCI_PAYLOAD_BYTES 11 + +#define CAM_COMMON_EVT_INJECT_MODULE_PARAM_MAX_LENGTH 4096 +#define CAM_COMMON_EVT_INJECT_BUFFER_LEN 200 +#define CAM_COMMON_EVT_INJECT_BUFFER_ERROR "Buffer_Error" +#define CAM_COMMON_EVT_INJECT_NOTIFY_EVENT "Notify_Event" +#define CAM_COMMON_IFE_NODE "IFE" +#define CAM_COMMON_ICP_NODE "IPE" +#define CAM_COMMON_JPEG_NODE "JPEG" + +#define CAM_COMMON_NS_PER_MS 1000000ULL + +#define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr) +#define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr) + +#define CAM_TRIGGER_PANIC(format, args...) panic("CAMERA - " format "\n", ##args) + +#define CAM_GET_BOOT_TIMESTAMP(timestamp) ktime_get_boottime_ts64(&(timestamp)) +#define CAM_GET_TIMESTAMP(timestamp) ktime_get_real_ts64(&(timestamp)) +#define CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, diff_microsec) \ +({ \ + diff_microsec = 0; \ + if (ts_end.tv_nsec >= ts_start.tv_nsec) { \ + diff_microsec = \ + (ts_end.tv_nsec - ts_start.tv_nsec) / 1000; \ + diff_microsec += \ + (ts_end.tv_sec - ts_start.tv_sec) * 1000 * 1000; \ + } else { \ + diff_microsec = \ + (ts_end.tv_nsec + \ + (1000*1000*1000 - ts_start.tv_nsec)) / 1000; \ + diff_microsec += \ + (ts_end.tv_sec - ts_start.tv_sec - 1) * 1000 * 1000; \ + } \ +}) + +#define CAM_CONVERT_TIMESTAMP_FORMAT(ts, hrs, min, sec, ms) \ +({ \ + uint64_t tmp = ((ts).tv_sec); \ + (ms) = ((ts).tv_nsec) / 1000000; \ + (sec) = do_div(tmp, 60); \ + (min) = do_div(tmp, 60); \ + (hrs) = do_div(tmp, 24); \ +}) + +#define CAM_COMMON_WAIT_FOR_COMPLETION_TIMEOUT_ERRMSG(complete, timeout_jiffies, module_id, \ + fmt, args...) \ +({ \ + struct timespec64 start_time, end_time; \ + unsigned long rem_jiffies; \ + start_time = ktime_to_timespec64(ktime_get()); \ + rem_jiffies = cam_common_wait_for_completion_timeout((complete), (timeout_jiffies)); \ + if (!rem_jiffies) { \ + end_time = ktime_to_timespec64(ktime_get()); \ + CAM_ERR(module_id, \ + fmt " (timeout: %ums start: timestamp:[%lld:%06lld] end: timestamp:[%lld:%06lld])",\ + ##args, jiffies_to_msecs(timeout_jiffies), \ + start_time.tv_sec, (start_time.tv_nsec/NSEC_PER_USEC), \ + end_time.tv_sec, (end_time.tv_nsec/NSEC_PER_USEC)); \ + } \ + rem_jiffies; \ +}) + +/* + * Sanitizes the linked list entry for event payload + */ +#define CAM_COMMON_SANITIZE_LIST_ENTRY(evt_payload, payload_struct_type) \ +({ \ + memset(evt_payload, 0, sizeof(payload_struct_type)); \ +}) + +/* + * manage locking between process context and tasklets. + * use appropriate api based on current context. + */ +#define _SPIN_LOCK_PROCESS_TO_BH(lock) \ +({ \ + if (in_task()) \ + spin_lock_bh(lock); \ + else \ + spin_lock(lock); \ +}) \ + +#define _SPIN_UNLOCK_PROCESS_TO_BH(lock) \ +({ \ + if (in_task()) \ + spin_unlock_bh(lock); \ + else \ + spin_unlock(lock); \ +}) \ + +typedef unsigned long (*cam_common_mini_dump_cb) (void *dst, + unsigned long len, void *priv_data); + +/** + * struct cam_common_mini_dump_dev_info + * @dump_cb : address of data dumped + * @name : Name of driver + * @num_devs : Number of device registerd + * @is_registered : Bool to indicate if registered + */ +struct cam_common_mini_dump_dev_info { + cam_common_mini_dump_cb dump_cb[CAM_COMMON_MINI_DUMP_DEV_NUM]; + uint8_t name[CAM_COMMON_MINI_DUMP_DEV_NUM] + [CAM_COMMON_MINI_DUMP_DEV_NAME_LEN]; + void *priv_data[CAM_COMMON_MINI_DUMP_DEV_NUM]; + uint8_t num_devs; + bool is_registered; +}; + +/** + * struct cam_common_mini_dump_data + * @link : address of data dumped + * @name : Name of driver + * @size : Size dumped + */ +struct cam_common_mini_dump_data { + void *waddr[CAM_COMMON_MINI_DUMP_DEV_NUM]; + uint8_t name[CAM_COMMON_MINI_DUMP_DEV_NUM][CAM_COMMON_MINI_DUMP_DEV_NAME_LEN]; + unsigned long size[CAM_COMMON_MINI_DUMP_DEV_NUM]; +}; + + +typedef int (*cam_common_evt_inject_cb) (void *inject_args); + +enum cam_common_evt_inject_str_id_type { + CAM_COMMON_EVT_INJECT_BUFFER_ERROR_TYPE, + CAM_COMMON_EVT_INJECT_NOTIFY_EVENT_TYPE +}; + +enum cam_common_evt_inject_hw_id { + CAM_COMMON_EVT_INJECT_HW_ISP, + CAM_COMMON_EVT_INJECT_HW_ICP, + CAM_COMMON_EVT_INJECT_HW_JPEG, + CAM_COMMON_EVT_INJECT_HW_MAX +}; + +enum cam_common_evt_inject_common_param_pos { + STRING_ID, + HW_NAME, + DEV_HDL, + REQ_ID, + COMMON_PARAM_MAX +}; + +enum cam_common_evt_inject_notify_event_pos { + EVT_NOTIFY_TYPE, + EVT_NOTIFY_PARAM_MAX +}; + +enum cam_evt_inject_buffer_error_event { + SYNC_ERROR_CAUSE, + BUFFER_ERROR_PARAM_MAX +}; + +enum cam_evt_inject_error_param_pos { + ERR_PARAM_ERR_TYPE, + ERR_PARAM_ERR_CODE, + ERR_PARAM_MAX +}; + +enum cam_evt_inject_node_param_pos { + EVENT_TYPE, + EVENT_CAUSE, + NODE_PARAM_MAX, +}; + +enum cam_evt_inject_pf_params_pos { + PF_PARAM_CTX_FOUND, + PF_PARAM_MAX +}; + +/** + * struct cam_common_evt_inject_data + * @buf_done_data: buf done data + * @evt_params : event params for the injected event + */ +struct cam_common_evt_inject_data { + void *buf_done_data; + struct cam_hw_inject_evt_param *evt_params; +}; + +typedef int (*cam_common_evt_inject_ops) (void *cam_ctx, + struct cam_common_evt_inject_data *inject_evt); + +/** + * struct cam_common_inject_evt_param + * @evt_params : injection event params + * @dev_hdl : device handle to match with ctx's dev_hdl + * @hw_id : hw to be injected with the event + */ +struct cam_common_inject_evt_param { + struct list_head list; + struct cam_hw_inject_evt_param evt_params; + int32_t dev_hdl; + uint8_t hw_id; +}; + +/** + * struct cam_common_inject_evt_info + * @evt_inject_cb : address of callback + * @active_err_ctx_list: list containing active evt inject requests + * @num_hw_registered : number of callbacks registered + * @is_list_initialised: bool to check init for evt_inject list + */ +struct cam_common_inject_evt_info { + cam_common_evt_inject_cb evt_inject_cb[CAM_COMMON_EVT_INJECT_HW_MAX]; + struct list_head active_evt_ctx_list; + uint8_t num_hw_registered; + bool is_list_initialised; +}; + +/** + * struct cam_common_hw_dump_args + * @req_id : request id + * @cpu_addr : address where dumping will start from + * @buf_len : length of buffer where data is being dumped to + * @offset : buffer offset from cpu_addr after each item dump + * @ctxt_to_hw_map : context to hw map + * @is_dump_all : flag to indicate if all information or just bw/clk rate + * @ + */ +struct cam_common_hw_dump_args { + uint64_t req_id; + uintptr_t cpu_addr; + size_t buf_len; + size_t offset; + void *ctxt_to_hw_map; + bool is_dump_all; +}; + +/** + * struct cam_common_hw_dump_header + * @tag : string used by the parser to call parse functions + * @size : size of the header in the buffer + * @word_size : word size of the header + * @ + */ +struct cam_common_hw_dump_header { + uint8_t tag[CAM_COMMON_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * @brief release all event inject params in the g_inject_evt_info + * for a specific dev_hdl + * @dev_hdl: device handle to which the evt inject params belong to + */ +void cam_common_release_evt_params(int32_t dev_hdl); + +/** + * cam_common_util_get_string_index() + * + * @brief Match the string from list of strings to return + * matching index + * + * @strings: Pointer to list of strings + * @num_strings: Number of strings in 'strings' + * @matching_string: String to match + * @index: Pointer to index to return matching index + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_common_util_get_string_index(const char **strings, + uint32_t num_strings, const char *matching_string, uint32_t *index); + +/** + * cam_common_util_remove_duplicate_arr() + * + * @brief Move all the unique integers to the start of + * the array and return the number of unique integers + * + * @array: Pointer to the first integer of array + * @num: Number of elements in array + * + * @return: Number of unique integers in array + */ +uint32_t cam_common_util_remove_duplicate_arr(int32_t *array, + uint32_t num); + +/** + * cam_common_wait_for_completion_timeout() + * + * @brief common interface to implement wait for completion + * for slow environment like presil, single debug + * timeout variable can take care + * + * @complete: Pointer to the first integer of array + * @timeout_jiffies: Timeout value in jiffie + * + * @return: Remaining jiffies, non-zero for success, zero + * in case of failure + */ +unsigned long cam_common_wait_for_completion_timeout( + struct completion *complete, + unsigned long timeout_jiffies); +/** + * cam_common_read_poll_timeout() + * + * @brief common interface to read poll timeout + * + * @addr: Address of IO register + * @delay: Delay interval of poll + * @timeout: Timeout for poll + * @mask: Mask to be checked + * @check_val: Value to be compared to break poll + * @status: Status of register of IO + * + * @return: 0 if success and negative if fail + * */ +int cam_common_read_poll_timeout( + void __iomem *addr, + unsigned long delay, + unsigned long timeout, + uint32_t mask, + uint32_t check_val, + uint32_t *status); + +/** + * cam_common_modify_timer() + * + * @brief common interface to modify timer, + * + * @timer: reference to system timer + * @timeout_val: timeout value for timer + * + * @return: 0 if success and negative if fail + */ +int cam_common_modify_timer(struct timer_list *timer, int32_t timeout_val); + +/** + * cam_common_util_thread_switch_delay_detect() + * + * @brief Detect if there is any scheduling delay + * + * @wq_name: workq name + * @state: either schedule or execution + * @cb: callback scheduled or executed + * @scheduled_time: Time when workq or tasklet was scheduled + * @threshold: Threshold time + * + */ +void cam_common_util_thread_switch_delay_detect(char *wq_name, const char *state, + void *cb, ktime_t scheduled_time, uint32_t threshold); + +/** + * cam_common_register_mini_dump_cb() + * + * @brief common interface to register mini dump cb + * + * @mini_dump_cb: Pointer to the mini_dump_cb + * @name: name of device registering + * + * @return: 0 if success in register non-zero if failes + */ +#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP) +int cam_common_register_mini_dump_cb( + cam_common_mini_dump_cb mini_dump_cb, uint8_t *dev_name, void *priv_data); +#else +static inline int cam_common_register_mini_dump_cb( + cam_common_mini_dump_cb mini_dump_cb, + uint8_t *dev_name, void *priv_data) +{ + return 0; +} +#endif + +/** + * cam_common_user_dump_clock() + * + * @brief Handles clock rate dump + * + * @dump_struct: Struct holding dump info + * @addr_ptr: Pointer to buffer address pointer + */ +void *cam_common_user_dump_clock( + void *dump_struct, + uint8_t *addr_ptr); + +/** + * cam_common_user_dump_helper() + * + * @brief Handles buffer addressing and dumping for user dump + * + * @cmd_args: Holds cam_common_hw_dump_args pointer + * @func: Function pointer for dump function + * @dump_struct: Struct holding dump info + * @size: Size_t value used for header word size + * @tag: Tag for header, used by parser + * @...: Variadic arguments, appended to tag if given + */ +int cam_common_user_dump_helper( + void *cmd_args, + void *(*func)(void *, uint8_t *), + void *dump_struct, + size_t size, + const char *tag, + ...); + +/** + * cam_common_register_evt_inject_cb() + * + * @brief common interface to register evt inject cb + * + * @evt_inject_cb: Pointer to evt_inject_cb + * @hw_id: HW id of the HW driver registering + * + * @return: 0 if success in register non-zero if failes + */ +int cam_common_register_evt_inject_cb( + cam_common_evt_inject_cb evt_inject_cb, + enum cam_common_evt_inject_hw_id hw_id); + +/** + * @brief: Memory alloc and copy + * + * @dst: Address of destination address of memory + * @src: Source address of memory + * @size: Length of memory + * + * @return 0 if success in register non-zero if failes + */ +int cam_common_mem_kdup(void **dst, void *src, size_t size); + +/** + * @brief: Free the memory + * + * @memory: Address of memory + */ +void cam_common_mem_free(void *memory); +#endif /* _CAM_COMMON_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.c new file mode 100644 index 0000000000..3be943205a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.c @@ -0,0 +1,1053 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include "cam_compat.h" +#include "cam_debug_util.h" +#include "cam_cpas_api.h" +#include "camera_main.h" +#include "cam_eeprom_dev.h" +#include "cam_eeprom_core.h" + +#if IS_ENABLED(CONFIG_SPECTRA_USE_RPMH_DRV_API) +#include + +#define CAM_RSC_DRV_IDENTIFIER "cam_rsc" + +const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index) +{ + const struct device *rsc_dev; + + rsc_dev = rpmh_get_device(CAM_RSC_DRV_IDENTIFIER, index); + if (!rsc_dev) { + CAM_ERR(CAM_CPAS, "Invalid dev for index: %u", index); + return NULL; + } + + return rsc_dev; +} + +int cam_cpas_start_drv_for_dev(const struct device *dev) +{ + int rc = 0; + + if (!dev) { + CAM_ERR(CAM_CPAS, "Invalid dev for DRV enable"); + return -EINVAL; + } + + rc = rpmh_drv_start(dev); + if (rc) { + CAM_ERR(CAM_CPAS, "[%s] Failed in DRV start", dev_name(dev)); + return rc; + } + + return rc; +} + +int cam_cpas_stop_drv_for_dev(const struct device *dev) +{ + int rc = 0; + + if (!dev) { + CAM_ERR(CAM_CPAS, "Invalid dev for DRV disable"); + return -EINVAL; + } + + rc = rpmh_drv_stop(dev); + if (rc) { + CAM_ERR(CAM_CPAS, "[%s] Failed in DRV stop", dev_name(dev)); + return rc; + } + + return rc; +} + +int cam_cpas_drv_channel_switch_for_dev(const struct device *dev) +{ + int rc = 0; + + if (!dev) { + CAM_ERR(CAM_CPAS, "Invalid dev for DRV channel switch"); + return -EINVAL; + } + + rc = rpmh_write_sleep_and_wake_no_child(dev); + if (rc) { + CAM_ERR(CAM_CPAS, "[%s] Failed in DRV channel switch", dev_name(dev)); + return rc; + } + + return rc; +} + +#else +const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index) +{ + return NULL; +} + +int cam_cpas_start_drv_for_dev(const struct device *dev) + +{ + return 0; +} + +int cam_cpas_stop_drv_for_dev(const struct device *dev) +{ + return 0; +} + +int cam_cpas_drv_channel_switch_for_dev(const struct device *dev) +{ + return 0; +} +#endif + +int cam_smmu_fetch_csf_version(struct cam_csf_version *csf_version) +{ +#ifdef CONFIG_ARCH_QTI_VM + csf_version->arch_ver = 3; + csf_version->max_ver = 0; + csf_version->min_ver = 0; +#elif defined CONFIG_SECURE_CAMERA_25 + struct csf_version csf_ver; + int rc; + + /* Fetch CSF version from SMMU proxy driver */ + rc = smmu_proxy_get_csf_version(&csf_ver); + if (rc) { + CAM_ERR(CAM_SMMU, + "Failed to get CSF version from SMMU proxy: %d", rc); + return rc; + } + + csf_version->arch_ver = csf_ver.arch_ver; + csf_version->max_ver = csf_ver.max_ver; + csf_version->min_ver = csf_ver.min_ver; +#else + /* This defaults to the legacy version */ + csf_version->arch_ver = 2; + csf_version->max_ver = 0; + csf_version->min_ver = 0; +#endif + return 0; +} + +unsigned long cam_update_dma_map_attributes(unsigned long attrs) +{ +#ifdef CONFIG_SECURE_CAMERA_25 + attrs |= DMA_ATTR_QTI_SMMU_PROXY_MAP; +#endif + return attrs; +} + +size_t cam_align_dma_buf_size(size_t len) +{ +#ifdef CONFIG_SECURE_CAMERA_25 + len = ALIGN(len, SMMU_PROXY_MEM_ALIGNMENT); +#endif + return len; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) +int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length) +{ + int rc = 0; + struct device_node *of_node; + struct device_node *mem_node; + struct resource res; + + of_node = (icp_fw->fw_dev)->of_node; + mem_node = of_parse_phandle(of_node, "memory-region", icp_fw->fw_id); + if (!mem_node) { + rc = -ENOMEM; + CAM_ERR(CAM_SMMU, "FW memory carveout of ICP%d not found", icp_fw->fw_id); + goto end; + } + + rc = of_address_to_resource(mem_node, 0, &res); + of_node_put(mem_node); + if (rc < 0) { + CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout of ICP%u", icp_fw->fw_id); + goto end; + } + + icp_fw->fw_hdl = res.start; + icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length); + if (!icp_fw->fw_kva) { + CAM_ERR(CAM_SMMU, "Failed to map the FW of ICP%d", icp_fw->fw_id); + rc = -ENOMEM; + goto end; + } + + memset_io(icp_fw->fw_kva, 0, fw_length); +end: + return rc; +} + +void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length) +{ + iounmap(icp_fw->fw_kva); +} + +int cam_ife_notify_safe_lut_scm(bool safe_trigger) +{ + const uint32_t smmu_se_ife = 0; + uint32_t camera_hw_version, rc = 0; + + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + if (!rc) { + switch (camera_hw_version) { + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_175_V100: + if (qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) { + CAM_ERR(CAM_ISP, "scm call to enable safe failed"); + rc = -EINVAL; + } + break; + default: + break; + } + } + + return rc; +} + +void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa) +{ + int reg_val; + + qcom_scm_io_readl(errata_wa->data.reg_info.offset, ®_val); + reg_val |= errata_wa->data.reg_info.value; + qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val); +} + +static int camera_platform_compare_dev(struct device *dev, const void *data) +{ + return platform_bus_type.match(dev, (struct device_driver *) data); +} + +static int camera_i2c_compare_dev(struct device *dev, const void *data) +{ + return i2c_bus_type.match(dev, (struct device_driver *) data); +} +#else +int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length) +{ + int rc = 0; + + icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length, + &icp_fw->fw_hdl, GFP_KERNEL); + + if (!icp_fw->fw_kva) { + CAM_ERR(CAM_SMMU, "FW memory of ICP%u alloc failed", icp_fw->fw_id); + rc = -ENOMEM; + } + + return rc; +} + +void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length) +{ + dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva, + icp_fw->fw_hdl); +} + +int cam_ife_notify_safe_lut_scm(bool safe_trigger) +{ + const uint32_t smmu_se_ife = 0; + uint32_t camera_hw_version, rc = 0; + struct scm_desc description = { + .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL), + .args[0] = smmu_se_ife, + .args[1] = safe_trigger, + }; + + rc = cam_cpas_get_cpas_hw_version(&camera_hw_version); + if (!rc) { + switch (camera_hw_version) { + case CAM_CPAS_TITAN_170_V100: + case CAM_CPAS_TITAN_170_V110: + case CAM_CPAS_TITAN_175_V100: + if (scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) { + CAM_ERR(CAM_ISP, "scm call to enable safe failed"); + rc = -EINVAL; + } + break; + default: + break; + } + } + + return rc; +} + +void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa) +{ + int reg_val; + + reg_val = scm_io_read(errata_wa->data.reg_info.offset); + reg_val |= errata_wa->data.reg_info.value; + scm_io_write(errata_wa->data.reg_info.offset, reg_val); +} + +static int camera_platform_compare_dev(struct device *dev, void *data) +{ + return platform_bus_type.match(dev, (struct device_driver *) data); +} + +static int camera_i2c_compare_dev(struct device *dev, void *data) +{ + return i2c_bus_type.match(dev, (struct device_driver *) data); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +void cam_free_clear(const void * ptr) +{ + kfree_sensitive(ptr); +} +#else +void cam_free_clear(const void * ptr) +{ + kzfree(ptr); +} +#endif + +#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA +int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset, bool __maybe_unused is_shutdown) +{ + int rc = 0; + struct Object client_env, sc_object; + ITCDriverSensorInfo params = {0}; + struct cam_csiphy_secure_info *secure_info; + + if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset"); + return -EINVAL; + } + +#if !IS_ENABLED(CONFIG_QCOM_SI_CORE) + if (!is_shutdown) { +#endif + rc = get_client_env_object(&client_env); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed getting mink env object, rc: %d", rc); + return rc; + } + + rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed getting mink sc_object, rc: %d", rc); + return rc; + } + + secure_info = &csiphy_dev->csiphy_info[offset].secure_info; + params.csid_hw_idx_mask = secure_info->csid_hw_idx_mask; + params.cdm_hw_idx_mask = secure_info->cdm_hw_idx_mask; + params.vc_mask = secure_info->vc_mask; + params.phy_lane_sel_mask = + csiphy_dev->csiphy_info[offset].csiphy_phy_lane_sel_mask; + params.protect = protect ? 1 : 0; + + rc = ITrustedCameraDriver_dynamicProtectSensor(sc_object, ¶ms); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Mink secure call failed, rc: %d", rc); + return rc; + } + + rc = Object_release(sc_object); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed releasing secure camera object, rc: %d", rc); + return rc; + } + + rc = Object_release(client_env); + if (rc) { + CAM_ERR(CAM_CSIPHY, "Failed releasing mink env object, rc: %d", rc); + return rc; + } +#if !IS_ENABLED(CONFIG_QCOM_SI_CORE) + } else { + /* This is a temporary work around until the SMC Invoke driver is + * refactored to avoid the dependency on FDs, which was causing issues + * during process shutdown. + */ + rc = qcom_scm_camera_protect_phy_lanes(protect, 0); + if (rc) { + CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed"); + return rc; + } + } +#endif + + return 0; +} +#elif KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE +int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset, bool __always_unused is_shutdown) +{ + int rc = 0; + + /** + * A check here is made if the target is using + * an older version of the kernel driver (< 6.0) + * with domain id feature present. In this case, + * we are to fail this call, as the new mink call + * is only supported on kernel driver versions 6.0 + * and above, and the new domain id scheme is not + * backwards compatible with the older scheme. + */ + if (csiphy_dev->domain_id_security) { + CAM_ERR(CAM_CSIPHY, + "Domain id support not present on current kernel driver: %d", + LINUX_VERSION_CODE); + return -EINVAL; + } + + if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset"); + rc = -EINVAL; + } else if (qcom_scm_camera_protect_phy_lanes(protect, + csiphy_dev->csiphy_info[offset].csiphy_cpas_cp_reg_mask)) { + CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed"); + rc = -EINVAL; + } + + return rc; +} +#else +int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset, bool __always_unused is_shutdown) +{ + int rc = 0; + struct scm_desc description = { + .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL), + .args[0] = protect, + .args[1] = csiphy_dev->csiphy_info[offset] + .csiphy_cpas_cp_reg_mask, + }; + + if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) { + CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset"); + rc = -EINVAL; + } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) { + CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed"); + rc = -EINVAL; + } + + return rc; +} +#endif + +#ifdef CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE +int cam_update_camnoc_qos_settings(uint32_t use_case_id, + uint32_t qos_cnt, struct qcom_scm_camera_qos *scm_buf) +{ + int rc = 0; + + rc = qcom_scm_camera_update_camnoc_qos(use_case_id, qos_cnt, scm_buf); + if (rc) + CAM_ERR(CAM_CPAS, "scm call to update QoS failed: %d, use_case_id: %d", + rc, use_case_id); + + return rc; +} +#else +int cam_update_camnoc_qos_settings(uint32_t use_case_id, + uint32_t qos_cnt, struct qcom_scm_camera_qos *scm_buf) +{ + CAM_ERR(CAM_CPAS, "scm call to update QoS is not supported under this kernel"); + return -EOPNOTSUPP; +} +#endif + +/* Callback to compare device from match list before adding as component */ +static inline int camera_component_compare_dev(struct device *dev, void *data) +{ + return dev == data; +} + +/* Add component matches to list for master of aggregate driver */ +int camera_component_match_add_drivers(struct device *master_dev, + struct component_match **match_list) +{ + int i, rc = 0; + struct platform_device *pdev = NULL; + struct i2c_client *client = NULL; + struct device *start_dev = NULL, *match_dev = NULL; + + if (!master_dev || !match_list) { + CAM_ERR(CAM_UTIL, "Invalid parameters for component match add"); + rc = -EINVAL; + goto end; + } + + for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) + struct device_driver const *drv = + &cam_component_platform_drivers[i]->driver; + const void *drv_ptr = (const void *)drv; +#else + struct device_driver *drv = &cam_component_platform_drivers[i]->driver; + void *drv_ptr = (void *)drv; +#endif + start_dev = NULL; + while ((match_dev = bus_find_device(&platform_bus_type, + start_dev, drv_ptr, &camera_platform_compare_dev))) { + put_device(start_dev); + pdev = to_platform_device(match_dev); + CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name); + component_match_add(master_dev, match_list, + camera_component_compare_dev, match_dev); + start_dev = match_dev; + } + put_device(start_dev); + } + + for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) + struct device_driver const *drv = + &cam_component_i2c_drivers[i]->driver; + const void *drv_ptr = (const void *)drv; +#else + struct device_driver *drv = &cam_component_i2c_drivers[i]->driver; + void *drv_ptr = (void *)drv; +#endif + start_dev = NULL; + while ((match_dev = bus_find_device(&i2c_bus_type, + start_dev, drv_ptr, &camera_i2c_compare_dev))) { + put_device(start_dev); + client = to_i2c_client(match_dev); + CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name); + component_match_add(master_dev, match_list, + camera_component_compare_dev, match_dev); + start_dev = match_dev; + } + put_device(start_dev); + } + +end: + return rc; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +#include +void cam_check_iommu_faults(struct iommu_domain *domain, + struct cam_smmu_pf_info *pf_info) +{ + struct qcom_iommu_fault_ids fault_ids = {0, 0, 0}; + + if (qcom_iommu_get_fault_ids(domain, &fault_ids)) + CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids"); + else + CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d", + fault_ids.bid, fault_ids.pid, fault_ids.mid); + + pf_info->bid = fault_ids.bid; + pf_info->pid = fault_ids.pid; + pf_info->mid = fault_ids.mid; +} +#else +void cam_check_iommu_faults(struct iommu_domain *domain, + struct cam_smmu_pf_info *pf_info) +{ + struct iommu_fault_ids fault_ids = {0, 0, 0}; + + if (iommu_get_fault_ids(domain, &fault_ids)) + CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids"); + + CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d", + fault_ids.bid, fault_ids.pid, fault_ids.mid); + + pf_info->bid = fault_ids.bid; + pf_info->pid = fault_ids.pid; + pf_info->mid = fault_ids.mid; +} +#endif + +static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2) +{ + if (entry_1->close_seq_prior > entry_2->close_seq_prior) + return 1; + else if (entry_1->close_seq_prior < entry_2->close_seq_prior) + return -1; + else + return 0; +} + +#if (KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE) +int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr) +{ + struct iosys_map mapping = {0}; +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + int error_code = dma_buf_vmap_unlocked(dmabuf, &mapping); +#else + int error_code = dma_buf_vmap(dmabuf, &mapping); +#endif + + if (error_code) { + *vaddr = 0; + } else { + *vaddr = (mapping.is_iomem) ? + (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr; + CAM_DBG(CAM_MEM, + "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p", + dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr); + } + + return error_code; +} + +void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr) +{ + struct iosys_map mapping = IOSYS_MAP_INIT_VADDR(vaddr); + +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + dma_buf_vunmap_unlocked(dmabuf, &mapping); +#else + dma_buf_vunmap(dmabuf, &mapping); +#endif +} + +#elif (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr) +{ + struct dma_buf_map mapping; + int error_code = dma_buf_vmap(dmabuf, &mapping); + + if (error_code) { + *vaddr = 0; + } else { + *vaddr = (mapping.is_iomem) ? + (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr; + CAM_DBG(CAM_MEM, + "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p", + dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr); + } + + return error_code; +} + +void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr) +{ + struct dma_buf_map mapping = DMA_BUF_MAP_INIT_VADDR(vaddr); + + dma_buf_vunmap(dmabuf, &mapping); +} + +#else +int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr) +{ + int error_code = 0; + void *addr = dma_buf_vmap(dmabuf); + + if (!addr) { + *vaddr = 0; + error_code = -ENOSPC; + } else { + *vaddr = (uintptr_t)addr; + } + + return error_code; +} + +void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr) +{ + dma_buf_vunmap(dmabuf, vaddr); +} +#endif + +struct sg_table *cam_compat_dmabuf_map_attach(struct dma_buf_attachment *attach, + enum dma_data_direction dma_dir) +{ +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + return dma_buf_map_attachment_unlocked(attach, dma_dir); +#else + return dma_buf_map_attachment(attach, dma_dir); +#endif +} + +void cam_compat_dmabuf_unmap_attach(struct dma_buf_attachment *attach, + struct sg_table *table, enum dma_data_direction dma_dir) +{ +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + dma_buf_unmap_attachment_unlocked(attach, table, dma_dir); +#else + dma_buf_unmap_attachment(attach, table, dma_dir); +#endif +} + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +void cam_smmu_util_iommu_custom(struct device *dev, + dma_addr_t discard_start, size_t discard_length) +{ + +} + +int cam_req_mgr_ordered_list_cmp(void *priv, + const struct list_head *head_1, const struct list_head *head_2) +{ + return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list), + list_entry(head_2, struct cam_subdev, list)); +} +#else +void cam_smmu_util_iommu_custom(struct device *dev, + dma_addr_t discard_start, size_t discard_length) +{ + iommu_dma_enable_best_fit_algo(dev); + + if (discard_start) + iommu_dma_reserve_iova(dev, discard_start, discard_length); + + return; +} + +int cam_req_mgr_ordered_list_cmp(void *priv, + struct list_head *head_1, struct list_head *head_2) +{ + return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list), + list_entry(head_2, struct cam_subdev, list)); +} +#endif + +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) +long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + long ret = 0; + + ret = dma_buf_set_name(dmabuf, name); + + return ret; +} +#else +long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + return 0; +} +#endif + +#if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE +void cam_eeprom_spi_driver_remove(struct spi_device *sdev) +{ + struct v4l2_subdev *sd = spi_get_drvdata(sdev); + struct cam_eeprom_ctrl_t *e_ctrl; + struct cam_eeprom_soc_private *soc_private; + + if (!sd) { + CAM_ERR(CAM_EEPROM, "Subdevice is NULL"); + return; + } + + e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd); + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "eeprom device is NULL"); + return; + } + + mutex_lock(&(e_ctrl->eeprom_mutex)); + cam_eeprom_shutdown(e_ctrl); + mutex_unlock(&(e_ctrl->eeprom_mutex)); + mutex_destroy(&(e_ctrl->eeprom_mutex)); + cam_unregister_subdev(&(e_ctrl->v4l2_dev_str)); + CAM_MEM_FREE(e_ctrl->io_master_info.spi_client); + e_ctrl->io_master_info.spi_client = NULL; + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + if (soc_private) { + CAM_MEM_FREE(soc_private->power_info.gpio_num_info); + soc_private->power_info.gpio_num_info = NULL; + CAM_MEM_FREE(soc_private); + soc_private = NULL; + } + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(e_ctrl); +} + +int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, i; + struct device_node *of_node = NULL; + + of_node = soc_info->dev->of_node; + + for (i = 0; i < soc_info->irq_count; i++) { + rc = of_property_read_string_index(of_node, "interrupt-names", + i, &soc_info->irq_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed to get irq names at i = %d rc = %d", + i, rc); + return -EINVAL; + } + + soc_info->irq_num[i] = platform_get_irq(soc_info->pdev, i); + if (soc_info->irq_num[i] < 0) { + rc = soc_info->irq_num[i]; + CAM_ERR(CAM_UTIL, "Failed to get irq resource at i = %d rc = %d", + i, rc); + return rc; + } + } + + return rc; +} +#else +int cam_eeprom_spi_driver_remove(struct spi_device *sdev) +{ + struct v4l2_subdev *sd = spi_get_drvdata(sdev); + struct cam_eeprom_ctrl_t *e_ctrl; + struct cam_eeprom_soc_private *soc_private; + struct cam_hw_soc_info *soc_info; + + if (!sd) { + CAM_ERR(CAM_EEPROM, "Subdevice is NULL"); + return -EINVAL; + } + + e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd); + if (!e_ctrl) { + CAM_ERR(CAM_EEPROM, "eeprom device is NULL"); + return -EINVAL; + } + + soc_info = &e_ctrl->soc_info; + mutex_lock(&(e_ctrl->eeprom_mutex)); + cam_eeprom_shutdown(e_ctrl); + mutex_unlock(&(e_ctrl->eeprom_mutex)); + mutex_destroy(&(e_ctrl->eeprom_mutex)); + cam_unregister_subdev(&(e_ctrl->v4l2_dev_str)); + CAM_MEM_FREE(e_ctrl->io_master_info.spi_client); + e_ctrl->io_master_info.spi_client = NULL; + soc_private = + (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private; + if (soc_private) { + CAM_MEM_FREE(soc_private->power_info.gpio_num_info); + soc_private->power_info.gpio_num_info = NULL; + CAM_MEM_FREE(soc_private); + soc_private = NULL; + } + v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL); + CAM_MEM_FREE(e_ctrl); + + return 0; +} + +int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info) +{ + int rc = 0, i; + struct device_node *of_node = NULL; + + of_node = soc_info->dev->of_node; + + for (i = 0; i < soc_info->irq_count; i++) { + rc = of_property_read_string_index(of_node, "interrupt-names", + i, &soc_info->irq_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed to get irq names at i = %d rc = %d", + i, rc); + return -EINVAL; + } + + soc_info->irq_line[i] = platform_get_resource_byname(soc_info->pdev, + IORESOURCE_IRQ, soc_info->irq_name[i]); + if (!soc_info->irq_line[i]) { + CAM_ERR(CAM_UTIL, "Failed to get IRQ line for irq: %s of %s", + soc_info->irq_name[i], soc_info->dev_name); + rc = -ENODEV; + return rc; + } + soc_info->irq_num[i] = soc_info->irq_line[i]->start; + } + + return rc; +} +#endif + +bool cam_secure_get_vfe_fd_port_config(void) +{ +#if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE + return false; +#else + return true; +#endif +} + +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +int cam_get_subpart_info(uint32_t *part_info, uint32_t max_num_cam) +{ + int rc = 0; + int num_cam; + + num_cam = socinfo_get_part_count(PART_CAMERA); + if (num_cam != max_num_cam) { + CAM_ERR(CAM_CPAS, "Unsupported number of parts: %d", num_cam); + return -EINVAL; + } + + /* + * If bit value in part_info is "0" then HW is available. + * If bit value in part_info is "1" then HW is unavailable. + */ + rc = socinfo_get_subpart_info(PART_CAMERA, part_info, num_cam); + if (rc) { + CAM_ERR(CAM_CPAS, "Failed while getting subpart_info, rc = %d.", rc); + return rc; + } + + return 0; +} +#else +int cam_get_subpart_info(uint32_t *part_info, uint32_t max_num_cam) +{ + return 0; +} +#endif + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +int cam_iommu_map(struct iommu_domain *domain, + size_t firmware_start, phys_addr_t fw_hdl, + size_t firmware_len, int prot) +{ + int rc = 0; + + rc = iommu_map(domain, firmware_start, + fw_hdl, firmware_len, + prot, GFP_ATOMIC); + return rc; +} +#else +int cam_iommu_map(struct iommu_domain *domain, + size_t firmware_start, phys_addr_t fw_hdl, + size_t firmware_len, int prot) +{ + int rc = 0; + + rc = iommu_map(domain, firmware_start, + fw_hdl, firmware_len, + prot); + return rc; +} +#endif + +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +ssize_t cam_iommu_map_sg(struct iommu_domain *domain, + dma_addr_t iova_start, struct scatterlist *sgl, + uint64_t orig_nents, int prot) +{ + ssize_t size = 0; + + size = iommu_map_sg(domain, + iova_start, + sgl, orig_nents, + prot, GFP_ATOMIC); + return size; +} +#else +ssize_t cam_iommu_map_sg(struct iommu_domain *domain, + dma_addr_t iova_start, struct scatterlist *sgl, + uint64_t orig_nents, int prot) +{ + ssize_t size = 0; + + size = iommu_map_sg(domain, iova_start, + sgl, orig_nents, + prot); + return size; +} +#endif + +int16_t cam_get_gpio_counts(struct cam_hw_soc_info *soc_info) +{ + struct device_node *of_node = NULL; + int16_t gpio_array_size = 0; + + of_node = soc_info->dev->of_node; +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE + gpio_array_size = of_count_phandle_with_args( + of_node, "gpios", "#gpio-cells"); +#else + gpio_array_size = of_gpio_count(of_node); +#endif + + return gpio_array_size; +} + +uint16_t cam_get_named_gpio(struct cam_hw_soc_info *soc_info, + int index) +{ + struct device_node *of_node = NULL; + uint16_t gpio_pin = 0; + + of_node = soc_info->dev->of_node; +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE + gpio_pin = of_get_named_gpio(of_node, "gpios", index); +#else + gpio_pin = of_get_gpio(of_node, index); +#endif + + return gpio_pin; +} + +inline struct icc_path *cam_icc_get_path(struct device *dev, + const int src_id, const int dst_id, const char *path_name, bool use_path_name) +{ + CAM_DBG(CAM_UTIL, "Get icc path name: %s src_id:%d dst_id:%d use_path_name:%s", path_name, + src_id, dst_id, CAM_BOOL_TO_YESNO(use_path_name)); + +#if KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE + if (!use_path_name) { + CAM_ERR(CAM_UTIL, "Must use path names to get icc path handle"); + return NULL; + } + + return of_icc_get(dev, path_name); +#else + if (use_path_name) + return of_icc_get(dev, path_name); + else + return icc_get(dev, src_id, dst_id); +#endif +} + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM +#if KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE +void *cam_mem_heap_add_kernel_pool(struct dma_heap *heap, size_t size) +{ + /* Comment out logic that depends on memory team's change temporary */ + // return qcom_tvm_heap_add_kernel_pool(heap, size); + return NULL; +} + +void cam_mem_heap_remove_kernel_pool(void *handle) +{ + /* Comment out logic that depends on memory team's change temporary */ + // qcom_tvm_heap_remove_kernel_pool(handle); +} +#else +void *cam_mem_heap_add_kernel_pool(struct dma_heap *heap, size_t size) +{ + /* Comment out logic that depends on memory team's change temporary */ + // return qcom_tui_heap_add_kernel_pool(heap, size); + return NULL; +} + +void cam_mem_heap_remove_kernel_pool(void *handle) +{ + /* Comment out logic that depends on memory team's change temporary */ + // qcom_tui_heap_remove_kernel_pool(handle); +} +#endif +#endif +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.h new file mode 100644 index 0000000000..6e7989be4e --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_compat.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_COMPAT_H_ +#define _CAM_COMPAT_H_ + +#include +#include +#include +#include +#if KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE +#include +#else +#include +#endif +#include +#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE +#include +#endif +#include +#include + +#include "cam_csiphy_dev.h" +#include "cam_cpastop_hw.h" +#include "cam_smmu_api.h" + +#ifdef CONFIG_SECURE_CAMERA_25 +#include +#endif + +#if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE +#include +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +#include +#include +#define VFL_TYPE_VIDEO VFL_TYPE_GRABBER +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) +#include +#include +#endif + +#if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE +MODULE_IMPORT_NS(DMA_BUF); +#endif + +#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA +#include +#include +#include +#endif + +#if IS_REACHABLE(CONFIG_INTERCONNECT_QCOM) +#include +#endif + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM +#include +#if KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE +/* Comment out logic that depends on memory team's change temporary */ +// #include +#else +#include +#endif +#endif +#endif + +#if (KERNEL_VERSION(6, 7, 0) <= LINUX_VERSION_CODE) +#define CAM_SUBDEV_NAME_SIZE 32 +#else +#define CAM_SUBDEV_NAME_SIZE V4L2_SUBDEV_NAME_SIZE +#endif + +#define IS_CSF25(x, y) ((((x) == 2) && ((y) == 5)) ? 1 : 0) + +/* Unblock compilation if target does not support camnoc reg update through HYP */ +#ifndef CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE +#define QCOM_SCM_CAMERA_MAX_QOS_CNT 20 +struct qcom_scm_camera_qos { + u32 offset; + u32 val; +}; +#endif + +struct cam_fw_alloc_info { + struct device *fw_dev; + void *fw_kva; + uint32_t fw_id; + uint64_t fw_hdl; +}; + +int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length); +void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length); +void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa); +int cam_update_camnoc_qos_settings(uint32_t use_case_id, + uint32_t num_arg, struct qcom_scm_camera_qos *scm_buf); +int cam_ife_notify_safe_lut_scm(bool safe_trigger); +int camera_component_match_add_drivers(struct device *master_dev, + struct component_match **match_list); +int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset, bool is_shutdown); +bool cam_is_mink_api_available(void); +void cam_free_clear(const void *); +void cam_check_iommu_faults(struct iommu_domain *domain, + struct cam_smmu_pf_info *pf_info); +static inline int cam_get_ddr_type(void) { return of_fdt_get_ddrtype(); } +int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr); +void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr); +struct sg_table *cam_compat_dmabuf_map_attach( + struct dma_buf_attachment *attach, enum dma_data_direction dma_dir); +void cam_compat_dmabuf_unmap_attach(struct dma_buf_attachment *attach, + struct sg_table *table, enum dma_data_direction dma_dir); +void cam_smmu_util_iommu_custom(struct device *dev, + dma_addr_t discard_start, size_t discard_length); + +const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index); + +int cam_cpas_start_drv_for_dev(const struct device *dev); + +int cam_cpas_stop_drv_for_dev(const struct device *dev); + +int cam_cpas_drv_channel_switch_for_dev(const struct device *dev); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +int cam_req_mgr_ordered_list_cmp(void *priv, + const struct list_head *head_1, const struct list_head *head_2); +#else +int cam_req_mgr_ordered_list_cmp(void *priv, + struct list_head *head_1, struct list_head *head_2); +#endif + +long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name); + +#if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE +void cam_eeprom_spi_driver_remove(struct spi_device *sdev); +#else +int cam_eeprom_spi_driver_remove(struct spi_device *sdev); +#endif + +int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info); + +bool cam_secure_get_vfe_fd_port_config(void); + +int cam_smmu_fetch_csf_version(struct cam_csf_version *csf_version); + +unsigned long cam_update_dma_map_attributes(unsigned long attr); + +size_t cam_align_dma_buf_size(size_t len); + +int cam_get_subpart_info(uint32_t *part_info, uint32_t max_num_cam); + +int cam_iommu_map(struct iommu_domain *domain, + size_t firmware_start, phys_addr_t fw_hdl, size_t firmware_len, + int prot); + +ssize_t cam_iommu_map_sg(struct iommu_domain *domain, + dma_addr_t iova_start, struct scatterlist *sgl, uint64_t orig_nents, + int prot); + +int16_t cam_get_gpio_counts(struct cam_hw_soc_info *soc_info); + +uint16_t cam_get_named_gpio(struct cam_hw_soc_info *soc_info, + int index); + +#if IS_REACHABLE(CONFIG_INTERCONNECT_QCOM) +inline struct icc_path *cam_icc_get_path(struct device *dev, + const int src_id, const int dst_id, const char *path_name, bool use_path_name); +#endif + +#if IS_REACHABLE(CONFIG_DMABUF_HEAPS) +#ifdef CONFIG_ARCH_QTI_VM +void *cam_mem_heap_add_kernel_pool(struct dma_heap *heap, size_t size); + +void cam_mem_heap_remove_kernel_pool(void *handle); +#endif +#endif +#endif /* _CAM_COMPAT_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.c new file mode 100644 index 0000000000..b0f93ba045 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "cam_soc_util.h" +#include "cam_debug_util.h" + +static struct cx_ipeak_client *cam_cx_ipeak; +static int cx_ipeak_level = CAM_NOMINAL_VOTE; +static int cx_default_ipeak_mask; +static int cx_current_ipeak_mask; +static int cam_cx_client_cnt; + +int cam_cx_ipeak_register_cx_ipeak(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + + soc_info->cam_cx_ipeak_enable = true; + soc_info->cam_cx_ipeak_bit = 1 << cam_cx_client_cnt++; + cx_default_ipeak_mask |= soc_info->cam_cx_ipeak_bit; + + if (cam_cx_ipeak) + goto exit; + + cam_cx_ipeak = cx_ipeak_register(soc_info->dev->of_node, + "qcom,cam-cx-ipeak"); + + if (cam_cx_ipeak) { + goto exit; + } else { + rc = -EINVAL; + goto exit; + } + +exit: + CAM_DBG(CAM_UTIL, "cam_cx_ipeak is enabled for %s\n" + "mask = %x cx_default_ipeak_mask = %x", + soc_info->dev_name, soc_info->cam_cx_ipeak_bit, + cx_default_ipeak_mask); + return rc; +} + +int cam_cx_ipeak_update_vote_cx_ipeak(struct cam_hw_soc_info *soc_info, + int32_t apply_level) +{ + int32_t soc_cx_ipeak_bit = soc_info->cam_cx_ipeak_bit; + int ret = 0; + + CAM_DBG(CAM_UTIL, "E: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x", + apply_level, cx_current_ipeak_mask, soc_cx_ipeak_bit); + + if (apply_level < cx_ipeak_level && + (cx_current_ipeak_mask & soc_cx_ipeak_bit)) { + if (cx_current_ipeak_mask == cx_default_ipeak_mask) { + ret = cx_ipeak_update(cam_cx_ipeak, false); + if (ret) + goto exit; + CAM_DBG(CAM_UTIL, + "X: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x %s UNVOTE", + apply_level, cx_current_ipeak_mask, + soc_cx_ipeak_bit, soc_info->dev_name); + } + cx_current_ipeak_mask &= (~soc_cx_ipeak_bit); + CAM_DBG(CAM_UTIL, + "X: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x %s DISABLE_BIT", + apply_level, cx_current_ipeak_mask, + soc_cx_ipeak_bit, soc_info->dev_name); + goto exit; + } else if (apply_level < cx_ipeak_level) { + CAM_DBG(CAM_UTIL, + "X: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x NO AI", + apply_level, cx_current_ipeak_mask, soc_cx_ipeak_bit); + goto exit; + } + + cx_current_ipeak_mask |= soc_cx_ipeak_bit; + CAM_DBG(CAM_UTIL, + "X: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x %s ENABLE_BIT", + apply_level, cx_current_ipeak_mask, + soc_cx_ipeak_bit, soc_info->dev_name); + if (cx_current_ipeak_mask == cx_default_ipeak_mask) { + ret = cx_ipeak_update(cam_cx_ipeak, true); + if (ret) + goto exit; + CAM_DBG(CAM_UTIL, + "X: apply_level = %d cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x %s VOTE", + apply_level, cx_current_ipeak_mask, + soc_cx_ipeak_bit, soc_info->dev_name); + } + +exit: + return ret; +} + +int cam_cx_ipeak_unvote_cx_ipeak(struct cam_hw_soc_info *soc_info) +{ + int32_t soc_cx_ipeak_bit = soc_info->cam_cx_ipeak_bit; + + CAM_DBG(CAM_UTIL, "E:cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x", + cx_current_ipeak_mask, soc_cx_ipeak_bit); + if (cx_current_ipeak_mask == cx_default_ipeak_mask) { + if (cam_cx_ipeak) + cx_ipeak_update(cam_cx_ipeak, false); + CAM_DBG(CAM_UTIL, "X:cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x UNVOTE", + cx_current_ipeak_mask, soc_cx_ipeak_bit); + } + cx_current_ipeak_mask &= (~soc_cx_ipeak_bit); + CAM_DBG(CAM_UTIL, "X:cx_current_ipeak_mask = %x\n" + "soc_cx_ipeak_bit = %x", + cx_current_ipeak_mask, soc_cx_ipeak_bit); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.h new file mode 100644 index 0000000000..4c88fa3450 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_cx_ipeak.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_CX_IPEAK_H_ +#define _CAM_CX_IPEAK_H_ + +#include "cam_soc_util.h" + +#ifndef CONFIG_QCOM_CX_IPEAK +static inline int cam_cx_ipeak_register_cx_ipeak + (struct cam_hw_soc_info *soc_info) +{ + return 0; +} + +static inline int cam_cx_ipeak_update_vote_cx_ipeak + (struct cam_hw_soc_info *soc_info, int32_t apply_level) +{ + return 0; +} + +static inline int cam_cx_ipeak_unvote_cx_ipeak + (struct cam_hw_soc_info *soc_info) +{ + return 0; +} +#else +int cam_cx_ipeak_register_cx_ipeak(struct cam_hw_soc_info *soc_info); + +int cam_cx_ipeak_update_vote_cx_ipeak(struct cam_hw_soc_info *soc_info, + int32_t apply_level); +int cam_cx_ipeak_unvote_cx_ipeak(struct cam_hw_soc_info *soc_info); +#endif + +#endif /* _CAM_CX_IPEAK_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.c new file mode 100644 index 0000000000..ecd3807d1b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "cam_trace.h" + +#include "cam_debug_util.h" +#include "cam_mem_mgr_api.h" + +unsigned long long debug_mdl; +module_param(debug_mdl, ullong, 0644); + +/* 0x0 - only logs, 0x1 - only trace, 0x2 - logs + trace */ +uint debug_type; +module_param(debug_type, uint, 0644); + +uint debug_priority; +module_param(debug_priority, uint, 0644); + +uint debug_drv; +module_param(debug_drv, uint, 0644); + +uint debug_disable_rt_clk_bw_limit; +module_param(debug_disable_rt_clk_bw_limit, uint, 0644); + +uint debug_bypass_drivers; +module_param(debug_bypass_drivers, uint, 0644); + +struct camera_debug_settings cam_debug; + +struct dentry *cam_debugfs_root; + +void cam_debugfs_init(void) +{ + struct dentry *tmp; + + if (!cam_debugfs_available()) { + cam_debugfs_root = NULL; + CAM_DBG(CAM_UTIL, "debugfs not available"); + return; + } + + if (cam_debugfs_root) { + CAM_WARN(CAM_UTIL, "already created debugfs root"); + return; + } + + tmp = debugfs_create_dir("camera", NULL); + if (IS_ERR_VALUE(tmp)) { + CAM_ERR(CAM_UTIL, "failed to create debugfs root folder (rc=%d)", PTR_ERR(tmp)); + return; + } + + cam_debugfs_root = tmp; + CAM_DBG(CAM_UTIL, "successfully created debugfs root"); +} + +void cam_debugfs_deinit(void) +{ + if (!cam_debugfs_available()) + return; + + debugfs_remove_recursive(cam_debugfs_root); + cam_debugfs_root = NULL; +} + +int cam_debugfs_create_subdir(const char *name, struct dentry **subdir) +{ + struct dentry *tmp; + + if (!cam_debugfs_root) { + CAM_WARN(CAM_UTIL, "debugfs root not created"); + *subdir = NULL; + return -ENODEV; + } + + if (!subdir) { + CAM_ERR(CAM_UTIL, "invalid subdir pointer %pK", subdir); + return -EINVAL; + } + + tmp = debugfs_create_dir(name, cam_debugfs_root); + if (IS_ERR_VALUE(tmp)) { + CAM_ERR(CAM_UTIL, "failed to create debugfs subdir (name=%s, rc=%d)", name, + PTR_ERR(tmp)); + return PTR_ERR(tmp); + } + + *subdir = tmp; + return 0; +} + +int cam_debugfs_lookup_subdir(const char *name, struct dentry **subdir) +{ + if (!cam_debugfs_root) { + CAM_WARN(CAM_UTIL, "debugfs root not created"); + *subdir = NULL; + return -ENODEV; + } + + if (!subdir) { + CAM_ERR(CAM_UTIL, "invalid subdir pointer %pK", subdir); + return -EINVAL; + } + + *subdir = debugfs_lookup(name, cam_debugfs_root); + return (*subdir) ? 0 : -ENOENT; +} + +const struct camera_debug_settings *cam_debug_get_settings(void) +{ + return &cam_debug; +} + +static int cam_debug_parse_cpas_settings(const char *setting, u64 value) +{ + if (!strcmp(setting, "camnoc_bw")) { + cam_debug.cpas_settings.camnoc_bw = value; + } else if (!strcmp(setting, "mnoc_hf_0_ab_bw")) { + cam_debug.cpas_settings.mnoc_hf_0_ab_bw = value; + } else if (!strcmp(setting, "mnoc_hf_0_ib_bw")) { + cam_debug.cpas_settings.mnoc_hf_0_ib_bw = value; + } else if (!strcmp(setting, "mnoc_hf_1_ab_bw")) { + cam_debug.cpas_settings.mnoc_hf_1_ab_bw = value; + } else if (!strcmp(setting, "mnoc_hf_1_ib_bw")) { + cam_debug.cpas_settings.mnoc_hf_1_ib_bw = value; + } else if (!strcmp(setting, "mnoc_sf_0_ab_bw")) { + cam_debug.cpas_settings.mnoc_sf_0_ab_bw = value; + } else if (!strcmp(setting, "mnoc_sf_0_ib_bw")) { + cam_debug.cpas_settings.mnoc_sf_0_ib_bw = value; + } else if (!strcmp(setting, "mnoc_sf_1_ab_bw")) { + cam_debug.cpas_settings.mnoc_sf_1_ab_bw = value; + } else if (!strcmp(setting, "mnoc_sf_1_ib_bw")) { + cam_debug.cpas_settings.mnoc_sf_1_ib_bw = value; + } else if (!strcmp(setting, "mnoc_sf_icp_ab_bw")) { + cam_debug.cpas_settings.mnoc_sf_icp_ab_bw = value; + } else if (!strcmp(setting, "mnoc_sf_icp_ib_bw")) { + cam_debug.cpas_settings.mnoc_sf_icp_ib_bw = value; + } else if (!strcmp(setting, "cam_ife_0_drv_ab_high_bw")) { + cam_debug.cpas_settings.cam_ife_0_drv_ab_high_bw = value; + } else if (!strcmp(setting, "cam_ife_0_drv_ib_high_bw")) { + cam_debug.cpas_settings.cam_ife_0_drv_ib_high_bw = value; + } else if (!strcmp(setting, "cam_ife_1_drv_ab_high_bw")) { + cam_debug.cpas_settings.cam_ife_1_drv_ab_high_bw = value; + } else if (!strcmp(setting, "cam_ife_1_drv_ib_high_bw")) { + cam_debug.cpas_settings.cam_ife_1_drv_ib_high_bw = value; + } else if (!strcmp(setting, "cam_ife_2_drv_ab_high_bw")) { + cam_debug.cpas_settings.cam_ife_2_drv_ab_high_bw = value; + } else if (!strcmp(setting, "cam_ife_2_drv_ib_high_bw")) { + cam_debug.cpas_settings.cam_ife_2_drv_ib_high_bw = value; + } else if (!strcmp(setting, "cam_ife_0_drv_ab_low_bw")) { + cam_debug.cpas_settings.cam_ife_0_drv_ab_low_bw = value; + } else if (!strcmp(setting, "cam_ife_0_drv_ib_low_bw")) { + cam_debug.cpas_settings.cam_ife_0_drv_ib_low_bw = value; + } else if (!strcmp(setting, "cam_ife_1_drv_ab_low_bw")) { + cam_debug.cpas_settings.cam_ife_1_drv_ab_low_bw = value; + } else if (!strcmp(setting, "cam_ife_1_drv_ib_low_bw")) { + cam_debug.cpas_settings.cam_ife_1_drv_ib_low_bw = value; + } else if (!strcmp(setting, "cam_ife_2_drv_ab_low_bw")) { + cam_debug.cpas_settings.cam_ife_2_drv_ab_low_bw = value; + } else if (!strcmp(setting, "cam_ife_2_drv_ib_low_bw")) { + cam_debug.cpas_settings.cam_ife_2_drv_ib_low_bw = value; + } else if (!strcmp(setting, "cam_ife_0_drv_low_set_zero")) { + cam_debug.cpas_settings.cam_ife_0_drv_low_set_zero = value; + } else if (!strcmp(setting, "cam_ife_1_drv_low_set_zero")) { + cam_debug.cpas_settings.cam_ife_1_drv_low_set_zero = value; + } else if (!strcmp(setting, "cam_ife_2_drv_low_set_zero")) { + cam_debug.cpas_settings.cam_ife_2_drv_low_set_zero = value; + } else { + CAM_ERR(CAM_UTIL, "Unsupported cpas sysfs entry"); + return -EINVAL; + } + + cam_debug.cpas_settings.is_updated = true; + return 0; +} + +ssize_t cam_debug_sysfs_node_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc = 0; + char *local_buf = NULL, *local_buf_temp = NULL; + char *driver; + char *setting = NULL; + char *value_str = NULL; + u64 value; + + CAM_INFO(CAM_UTIL, "Sysfs debug attr name:[%s] buf:[%s] bytes:[%d]", + attr->attr.name, buf, count); + local_buf = kmemdup(buf, (count + sizeof(char)), GFP_KERNEL); + local_buf_temp = local_buf; + driver = strsep(&local_buf, "#"); + if (!driver) { + CAM_ERR(CAM_UTIL, + "Invalid input driver name buf:[%s], count:%d", + buf, count); + goto error; + } + + setting = strsep(&local_buf, "="); + if (!setting) { + CAM_ERR(CAM_UTIL, "Invalid input setting buf:[%s], count:%d", + buf, count); + goto error; + } + + value_str = strsep(&local_buf, "="); + if (!value_str) { + CAM_ERR(CAM_UTIL, "Invalid input value buf:[%s], count:%d", + buf, count); + goto error; + } + + rc = kstrtou64(value_str, 0, &value); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "Error converting value:[%s], buf:[%s]", + value_str, buf); + goto error; + } + + CAM_INFO(CAM_UTIL, + "Processing sysfs store for driver:[%s], setting:[%s], value:[%llu]", + driver, setting, value); + + if (!strcmp(driver, "cpas")) { + rc = cam_debug_parse_cpas_settings(setting, value); + if (rc) + goto error; + } else { + CAM_ERR(CAM_UTIL, "Unsupported driver in camera debug node"); + goto error; + } + + kfree(local_buf_temp); + return count; + +error: + kfree(local_buf_temp); + return -EPERM; +} + +static inline void __cam_print_to_buffer(char *buf, const size_t buf_size, size_t *len, + unsigned int tag, enum cam_debug_module_id module_id, const char *fmt, va_list args) +{ + size_t buf_len = *len; + + buf_len += scnprintf(buf + buf_len, ((buf_size <= buf_len) ? 0 : (buf_size - buf_len)), + "\n%-8s: %s:\t", CAM_LOG_TAG_NAME(tag), CAM_DBG_MOD_NAME(module_id)); + buf_len += vscnprintf(buf + buf_len, ((buf_size <= buf_len) ? 0 : (buf_size - buf_len)), + fmt, args); + *len = buf_len; +} + +void cam_print_to_buffer(char *buf, const size_t buf_size, size_t *len, unsigned int tag, + unsigned long long module_id, const char *fmt, ...) +{ + va_list args; + + if ((*len) >= buf_size) { + CAM_ERR(CAM_UTIL, "Inadequate buffer size: %lu for length: %lu", buf_size, len); + return; + } + + va_start(args, fmt); + __cam_print_to_buffer(buf, buf_size, len, tag, module_id, fmt, args); + va_end(args); +} + +static void __cam_print_log(int type, const char *fmt, ...) +{ + va_list args1, args2, args; + + va_start(args, fmt); + va_copy(args1, args); + va_copy(args2, args1); + if ((type & CAM_PRINT_LOG) && (debug_type != 1)) + vprintk(fmt, args1); + if ((type & CAM_PRINT_TRACE) && (debug_type != 0)) { + /* skip the first character which is used by printk to identify the log level */ + trace_cam_log_debug(fmt + sizeof(KERN_INFO) - 1, &args2); + } + va_end(args2); + va_end(args1); + va_end(args); +} + +void cam_print_log(int type, int module, int tag, const char *func, + int line, const char *fmt, ...) +{ + char buf[CAM_LOG_BUF_LEN] = {0,}; + va_list args; + + if (!type) + return; + + va_start(args, fmt); + vscnprintf(buf, CAM_LOG_BUF_LEN, fmt, args); + __cam_print_log(type, __CAM_LOG_FMT, + CAM_LOG_TAG_NAME(tag), CAM_DBG_MOD_NAME(module), func, + line, buf); + va_end(args); +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.h new file mode 100644 index 0000000000..0303a61173 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_debug_util.h @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_DEBUG_UTIL_H_ +#define _CAM_DEBUG_UTIL_H_ + +#include +#include +#include "cam_presil_hw_access.h" +#include "cam_trace.h" + +extern unsigned long long debug_mdl; +extern unsigned int debug_type; +extern unsigned int debug_priority; +extern unsigned int debug_drv; +extern unsigned int debug_disable_rt_clk_bw_limit; +extern unsigned int debug_bypass_drivers; + +#define CAM_IS_NULL_TO_STR(ptr) ((ptr) ? "Non-NULL" : "NULL") + +#define CAM_LOG_BUF_LEN 512 +#define BYPASS_VALUE 0xDEADBEEF +#define DEFAULT_CLK_VALUE 19200000 + +/* Module IDs used for debug logging */ +enum cam_debug_module_id { + CAM_CDM, /* bit 0 */ + CAM_CORE, /* bit 1 */ + CAM_CPAS, /* bit 2 */ + CAM_ISP, /* bit 3 */ + CAM_CRM, /* bit 4 */ + CAM_SENSOR, /* bit 5 */ + CAM_SMMU, /* bit 6 */ + CAM_SYNC, /* bit 7 */ + CAM_ICP, /* bit 8 */ + CAM_JPEG, /* bit 9 */ + CAM_FD, /* bit 10 */ + CAM_LRME, /* bit 11 */ + CAM_FLASH, /* bit 12 */ + CAM_ACTUATOR, /* bit 13 */ + CAM_CCI, /* bit 14 */ + CAM_CSIPHY, /* bit 15 */ + CAM_EEPROM, /* bit 16 */ + CAM_UTIL, /* bit 17 */ + CAM_HFI, /* bit 18 */ + CAM_CTXT, /* bit 19 */ + CAM_OIS, /* bit 20 */ + CAM_RES, /* bit 21 */ + CAM_MEM, /* bit 22 */ + CAM_IRQ_CTRL, /* bit 23 */ + CAM_REQ, /* bit 24 */ + CAM_PERF, /* bit 25 */ + CAM_CUSTOM, /* bit 26 */ + CAM_PRESIL, /* bit 27 */ + CAM_OPE, /* bit 28 */ + CAM_IO_ACCESS, /* bit 29 */ + CAM_SFE, /* bit 30 */ + CAM_CRE, /* bit 31 */ + CAM_PRESIL_CORE, /* bit 32 */ + CAM_TPG, /* bit 33 */ + CAM_DMA_FENCE, /* bit 34 */ + CAM_SENSOR_UTIL, /* bit 35 */ + CAM_SYNX, /* bit 36 */ + CAM_VMRM, /* bit 37 */ + CAM_IO_DUMP, /* bit 38 */ + CAM_SENSOR_IO, /* bit 39 */ + CAM_DBG_MOD_MAX +}; + +/* Log level types */ +enum cam_debug_log_level { + CAM_TYPE_TRACE, + CAM_TYPE_ERR, + CAM_TYPE_WARN, + CAM_TYPE_INFO, + CAM_TYPE_DBG, + CAM_TYPE_MAX, +}; + +/* + * enum cam_debug_priority - Priority of debug log (0 = Lowest) + */ +enum cam_debug_priority { + CAM_DBG_PRIORITY_0, + CAM_DBG_PRIORITY_1, + CAM_DBG_PRIORITY_2, +}; + +static const char *cam_debug_mod_name[CAM_DBG_MOD_MAX] = { + [CAM_CDM] = "CAM-CDM", + [CAM_CORE] = "CAM-CORE", + [CAM_CRM] = "CAM-CRM", + [CAM_CPAS] = "CAM-CPAS", + [CAM_ISP] = "CAM-ISP", + [CAM_SENSOR] = "CAM-SENSOR", + [CAM_SMMU] = "CAM-SMMU", + [CAM_SYNC] = "CAM-SYNC", + [CAM_ICP] = "CAM-ICP", + [CAM_JPEG] = "CAM-JPEG", + [CAM_FD] = "CAM-FD", + [CAM_LRME] = "CAM-LRME", + [CAM_FLASH] = "CAM-FLASH", + [CAM_ACTUATOR] = "CAM-ACTUATOR", + [CAM_CCI] = "CAM-CCI", + [CAM_CSIPHY] = "CAM-CSIPHY", + [CAM_EEPROM] = "CAM-EEPROM", + [CAM_UTIL] = "CAM-UTIL", + [CAM_CTXT] = "CAM-CTXT", + [CAM_HFI] = "CAM-HFI", + [CAM_OIS] = "CAM-OIS", + [CAM_IRQ_CTRL] = "CAM-IRQ-CTRL", + [CAM_MEM] = "CAM-MEM", + [CAM_PERF] = "CAM-PERF", + [CAM_REQ] = "CAM-REQ", + [CAM_CUSTOM] = "CAM-CUSTOM", + [CAM_OPE] = "CAM-OPE", + [CAM_PRESIL] = "CAM-PRESIL", + [CAM_RES] = "CAM-RES", + [CAM_IO_ACCESS] = "CAM-IO-ACCESS", + [CAM_SFE] = "CAM-SFE", + [CAM_CRE] = "CAM-CRE", + [CAM_PRESIL_CORE] = "CAM-CORE-PRESIL", + [CAM_TPG] = "CAM-TPG", + [CAM_DMA_FENCE] = "CAM-DMA-FENCE", + [CAM_SENSOR_UTIL] = "CAM-SENSOR-UTIL", + [CAM_SYNX] = "CAM_SYNX", + [CAM_VMRM] = "CAM-VMRM", + [CAM_IO_DUMP] = "CAM-IO-DUMP", + [CAM_SENSOR_IO] = "CAM-SENSOR-IO", +}; + +#define ___CAM_DBG_MOD_NAME(module_id) \ +__builtin_choose_expr(((module_id) == CAM_CDM), "CAM-CDM", \ +__builtin_choose_expr(((module_id) == CAM_CORE), "CAM-CORE", \ +__builtin_choose_expr(((module_id) == CAM_CRM), "CAM-CRM", \ +__builtin_choose_expr(((module_id) == CAM_CPAS), "CAM-CPAS", \ +__builtin_choose_expr(((module_id) == CAM_ISP), "CAM-ISP", \ +__builtin_choose_expr(((module_id) == CAM_SENSOR), "CAM-SENSOR", \ +__builtin_choose_expr(((module_id) == CAM_SMMU), "CAM-SMMU", \ +__builtin_choose_expr(((module_id) == CAM_SYNC), "CAM-SYNC", \ +__builtin_choose_expr(((module_id) == CAM_ICP), "CAM-ICP", \ +__builtin_choose_expr(((module_id) == CAM_JPEG), "CAM-JPEG", \ +__builtin_choose_expr(((module_id) == CAM_FD), "CAM-FD", \ +__builtin_choose_expr(((module_id) == CAM_LRME), "CAM-LRME", \ +__builtin_choose_expr(((module_id) == CAM_FLASH), "CAM-FLASH", \ +__builtin_choose_expr(((module_id) == CAM_ACTUATOR), "CAM-ACTUATOR", \ +__builtin_choose_expr(((module_id) == CAM_CCI), "CAM-CCI", \ +__builtin_choose_expr(((module_id) == CAM_CSIPHY), "CAM-CSIPHY", \ +__builtin_choose_expr(((module_id) == CAM_EEPROM), "CAM-EEPROM", \ +__builtin_choose_expr(((module_id) == CAM_UTIL), "CAM-UTIL", \ +__builtin_choose_expr(((module_id) == CAM_CTXT), "CAM-CTXT", \ +__builtin_choose_expr(((module_id) == CAM_HFI), "CAM-HFI", \ +__builtin_choose_expr(((module_id) == CAM_OIS), "CAM-OIS", \ +__builtin_choose_expr(((module_id) == CAM_IRQ_CTRL), "CAM-IRQ-CTRL", \ +__builtin_choose_expr(((module_id) == CAM_MEM), "CAM-MEM", \ +__builtin_choose_expr(((module_id) == CAM_PERF), "CAM-PERF", \ +__builtin_choose_expr(((module_id) == CAM_REQ), "CAM-REQ", \ +__builtin_choose_expr(((module_id) == CAM_CUSTOM), "CAM-CUSTOM", \ +__builtin_choose_expr(((module_id) == CAM_OPE), "CAM-OPE", \ +__builtin_choose_expr(((module_id) == CAM_PRESIL), "CAM-PRESIL", \ +__builtin_choose_expr(((module_id) == CAM_RES), "CAM-RES", \ +__builtin_choose_expr(((module_id) == CAM_IO_ACCESS), "CAM-IO-ACCESS", \ +__builtin_choose_expr(((module_id) == CAM_SFE), "CAM-SFE", \ +__builtin_choose_expr(((module_id) == CAM_CRE), "CAM-CRE", \ +__builtin_choose_expr(((module_id) == CAM_PRESIL_CORE), "CAM-CORE-PRESIL", \ +__builtin_choose_expr(((module_id) == CAM_TPG), "CAM-TPG", \ +__builtin_choose_expr(((module_id) == CAM_DMA_FENCE), "CAM-DMA-FENCE", \ +__builtin_choose_expr(((module_id) == CAM_SENSOR_UTIL), "CAM-SENSOR-UTIL", \ +__builtin_choose_expr(((module_id) == CAM_SYNX), "CAM-SYNX", \ +__builtin_choose_expr(((module_id) == CAM_VMRM), "CAM-VMRM", \ +__builtin_choose_expr(((module_id) == CAM_IO_DUMP), "CAM-IO-DUMP", \ +__builtin_choose_expr(((module_id) == CAM_SENSOR_IO), "CAM-SENSOR-IO", \ +"CAMERA")))))))))))))))))))))))))))))))))))))) + +#define CAM_DBG_MOD_NAME(module_id) \ +((module_id < CAM_DBG_MOD_MAX) ? cam_debug_mod_name[module_id] : "CAMERA") + +#define __CAM_DBG_MOD_NAME(module_id) \ +__builtin_choose_expr(__builtin_constant_p((module_id)), ___CAM_DBG_MOD_NAME(module_id), \ + CAM_DBG_MOD_NAME(module_id)) + +static const char *cam_debug_tag_name[CAM_TYPE_MAX] = { + [CAM_TYPE_TRACE] = "CAM_TRACE", + [CAM_TYPE_ERR] = "CAM_ERR", + [CAM_TYPE_WARN] = "CAM_WARN", + [CAM_TYPE_INFO] = "CAM_INFO", + [CAM_TYPE_DBG] = "CAM_DBG", +}; + +#define ___CAM_LOG_TAG_NAME(tag) \ +({ \ + static_assert(tag < CAM_TYPE_MAX); \ + cam_debug_tag_name[tag]; \ +}) + +#define CAM_LOG_TAG_NAME(tag) ((tag < CAM_TYPE_MAX) ? cam_debug_tag_name[tag] : "CAM_LOG") + +#define __CAM_LOG_TAG_NAME(tag) \ +__builtin_choose_expr(__builtin_constant_p((tag)), ___CAM_LOG_TAG_NAME(tag), \ + CAM_LOG_TAG_NAME(tag)) + +enum cam_log_print_type { + CAM_PRINT_LOG = 0x1, + CAM_PRINT_TRACE = 0x2, + CAM_PRINT_BOTH = 0x3, +}; + +#define __CAM_LOG_FMT KERN_INFO "%s: %s: %s: %d: %s " + +/** + * cam_print_log() - function to print logs (internal use only, use macros instead) + * + * @type: Corresponds to enum cam_log_print_type, selects if logs are printed in log buffer, + * trace buffers or both + * @module_id: Module calling the log macro + * @tag: Tag for log level + * @func: Function string + * @line: Line number + * @fmt: Formatting string + */ + +void cam_print_log(int type, int module, int tag, const char *func, + int line, const char *fmt, ...); + +#define __CAM_LOG(type, tag, module_id, fmt, args...) \ +({ \ + cam_print_log(type, \ + module_id, tag, __func__, \ + __LINE__, fmt, ##args); \ +}) + +#define CAM_LOG(tag, module_id, fmt, args...) \ +__CAM_LOG(CAM_PRINT_BOTH, tag, module_id, fmt, ##args) + +#define CAM_LOG_RL_CUSTOM(type, module_id, interval, burst, fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, (interval * HZ), burst); \ + __CAM_LOG(__ratelimit(&_rs) ? CAM_PRINT_BOTH : CAM_PRINT_TRACE, \ + type, module_id, fmt, ##args); \ +}) + +#define CAM_LOG_RL(type, module_id, fmt, args...) \ +CAM_LOG_RL_CUSTOM(type, module_id, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST, \ +fmt, ##args) + +#define __CAM_DBG(module_id, priority, fmt, args...) \ +({ \ + if (unlikely((debug_mdl & BIT_ULL(module_id)) && (priority >= debug_priority))) { \ + CAM_LOG(CAM_TYPE_DBG, module_id, fmt, ##args); \ + } \ +}) + +/** + * CAM_ERR / CAM_WARN / CAM_INFO / CAM_TRACE + * + * @brief: Macros to print logs at respective level error/warn/info/trace. All + * logs except CAM_TRACE are printed in both log and trace buffers. + * + * @__module: Respective enum cam_debug_module_id + * @fmt: Format string + * @args: Arguments to match with format + */ +#define CAM_ERR(__module, fmt, args...) CAM_LOG(CAM_TYPE_ERR, __module, fmt, ##args) +#define CAM_WARN(__module, fmt, args...) CAM_LOG(CAM_TYPE_WARN, __module, fmt, ##args) +#define CAM_INFO(__module, fmt, args...) CAM_LOG(CAM_TYPE_INFO, __module, fmt, ##args) +#define CAM_TRACE(__module, fmt, args...) \ +__CAM_LOG(CAM_PRINT_TRACE, CAM_TYPE_TRACE, __module, fmt, ##args) + +/** + * CAM_ERR_RATE_LIMIT / CAM_WARN_RATE_LIMIT / CAM_INFO_RATE_LIMIT + * + * @brief: Rate limited version of logs used to reduce log spew. + * + * @__module: Respective enum cam_debug_module_id + * @fmt: Format string + * @args: Arguments to match with format + */ +#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) CAM_LOG_RL(CAM_TYPE_ERR, __module, fmt, ##args) +#define CAM_WARN_RATE_LIMIT(__module, fmt, args...) CAM_LOG_RL(CAM_TYPE_WARN, __module, fmt, ##args) +#define CAM_INFO_RATE_LIMIT(__module, fmt, args...) CAM_LOG_RL(CAM_TYPE_INFO, __module, fmt, ##args) + +/** + * CAM_ERR_RATE_LIMIT_CUSTOM / CAM_WARN_RATE_LIMITT_CUSTOM/ CAM_INFO_RATE_LIMITT_CUSTOM + * + * @brief: Rate limited version of logs used to reduce log spew that can have + * customized burst rate + * + * @__module: Respective enum cam_debug_module_id + * @interval: Sliding window interval in which to count logs + * @burst: Maximum number of logs in the specified interval + * @fmt: Format string + * @args: Arguments to match with format + */ +#define CAM_ERR_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ + CAM_LOG_RL_CUSTOM(CAM_TYPE_ERR, __module, interval, burst, fmt, ##args) + +#define CAM_WARN_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ + CAM_LOG_RL_CUSTOM(CAM_TYPE_WARN, __module, interval, burst, fmt, ##args) + +#define CAM_INFO_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ + CAM_LOG_RL_CUSTOM(CAM_TYPE_INFO, __module, interval, burst, fmt, ##args) + +/* + * CAM_DBG + * @brief : This Macro will print debug logs when enabled using GROUP and + * if its priority is greater than the priority parameter + * + * @__module : Respective module id which is been calling this Macro + * @fmt : Formatted string which needs to be print in log + * @args : Arguments which needs to be print in log + */ +#define CAM_DBG(__module, fmt, args...) __CAM_DBG(__module, CAM_DBG_PRIORITY_0, fmt, ##args) +#define CAM_DBG_PR1(__module, fmt, args...) __CAM_DBG(__module, CAM_DBG_PRIORITY_1, fmt, ##args) +#define CAM_DBG_PR2(__module, fmt, args...) __CAM_DBG(__module, CAM_DBG_PRIORITY_2, fmt, ##args) + +/** + * cam_print_to_buffer + * @brief: Function to print to camera logs to a buffer. Don't use directly. Use macros + * provided below. + * + * @buf: Buffer to print into + * @buf_size: Total size of the buffer + * @len: Pointer to variable used to keep track of the length + * @tag: Log level tag to be prefixed + * @module_id: Module id tag to be prefixed + * @fmt: Formatted string which needs to be print in log + * @args: Arguments which needs to be print in log + */ +void cam_print_to_buffer(char *buf, const size_t buf_size, size_t *len, unsigned int tag, + unsigned long long module_id, const char *fmt, ...); + +/** + * CAM_[ERR/WARN/INFO]_BUF + * @brief: Macro to print a new line into log buffer. + * + * @module_id: Module id tag to be prefixed + * @buf: Buffer to print into + * @buf_size: Total size of the buffer + * @len: Pointer to the variable used to keep track of the length + * @fmt: Formatted string which needs to be print in log + * @args: Arguments which needs to be print in log + */ +#define CAM_ERR_BUF(module_id, buf, buf_size, len, fmt, args...) \ + cam_print_to_buffer(buf, buf_size, len, CAM_TYPE_ERR, module_id, fmt, ##args) +#define CAM_WARN_BUF(module_id, buf, buf_size, len, fmt, args...) \ + cam_print_to_buffer(buf, buf_size, len, CAM_TYPE_WARN, module_id, fmt, ##args) +#define CAM_INFO_BUF(module_id, buf, buf_size, len, fmt, args...) \ + cam_print_to_buffer(buf, buf_size, len, CAM_TYPE_INFO, module_id, fmt, ##args) + +#define CAM_BOOL_TO_YESNO(val) ((val) ? "Y" : "N") + +/** + * struct cam_cpas_debug_settings - Sysfs debug settings for cpas driver + */ +struct cam_cpas_debug_settings { + uint64_t mnoc_hf_0_ab_bw; + uint64_t mnoc_hf_0_ib_bw; + uint64_t mnoc_hf_1_ab_bw; + uint64_t mnoc_hf_1_ib_bw; + uint64_t mnoc_sf_0_ab_bw; + uint64_t mnoc_sf_0_ib_bw; + uint64_t mnoc_sf_1_ab_bw; + uint64_t mnoc_sf_1_ib_bw; + uint64_t mnoc_sf_icp_ab_bw; + uint64_t mnoc_sf_icp_ib_bw; + uint64_t camnoc_bw; + uint64_t cam_ife_0_drv_ab_high_bw; + uint64_t cam_ife_0_drv_ib_high_bw; + uint64_t cam_ife_1_drv_ab_high_bw; + uint64_t cam_ife_1_drv_ib_high_bw; + uint64_t cam_ife_2_drv_ab_high_bw; + uint64_t cam_ife_2_drv_ib_high_bw; + uint64_t cam_ife_0_drv_ab_low_bw; + uint64_t cam_ife_0_drv_ib_low_bw; + uint64_t cam_ife_1_drv_ab_low_bw; + uint64_t cam_ife_1_drv_ib_low_bw; + uint64_t cam_ife_2_drv_ab_low_bw; + uint64_t cam_ife_2_drv_ib_low_bw; + uint64_t cam_ife_0_drv_low_set_zero; + uint64_t cam_ife_1_drv_low_set_zero; + uint64_t cam_ife_2_drv_low_set_zero; + bool is_updated; +}; + +/** + * struct camera_debug_settings - Sysfs debug settings for camera + * + * @cpas_settings: Debug settings for cpas driver. + */ +struct camera_debug_settings { + struct cam_cpas_debug_settings cpas_settings; +}; + +/** + * @brief : API to get camera debug settings + * @return const struct camera_debug_settings pointer. + */ +const struct camera_debug_settings *cam_debug_get_settings(void); + +/** + * @brief : API to parse and store input from sysfs debug node + * @return Number of bytes read from buffer on success, or -EPERM on error. + */ +ssize_t cam_debug_sysfs_node_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); + +/** + * cam_debugfs_init() + * + * @brief: create camera debugfs root folder + */ +void cam_debugfs_init(void); + +/** + * cam_debugfs_deinit() + * + * @brief: remove camera debugfs root folder + */ +void cam_debugfs_deinit(void); + +/** + * cam_debugfs_create_subdir() + * + * @brief: create a directory within the camera debugfs root folder + * + * @name: name of the directory + * @subdir: pointer to the newly created directory entry + * + * @return: 0 on success, negative on failure + */ +int cam_debugfs_create_subdir(const char *name, struct dentry **subdir); + +/** + * cam_debugfs_lookup_subdir() + * + * @brief: lookup a directory within the camera debugfs root folder + * + * @name: name of the directory + * @subdir: pointer to the successfully found directory entry + * + * @return: 0 on success, negative on failure + */ +int cam_debugfs_lookup_subdir(const char *name, struct dentry **subdir); + +/** + * cam_debugfs_available() + * + * @brief: Check if debugfs is enabled for camera. Use this function before creating any + * debugfs entries. + * + * @return: true if enabled, false otherwise + */ +static inline bool cam_debugfs_available(void) +{ + #if defined(CONFIG_DEBUG_FS) + return true; + #else + return false; + #endif +} + +#endif /* _CAM_DEBUG_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.c new file mode 100644 index 0000000000..ae87c56f92 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2014, 2017-2018, 2020, The Linux Foundation. + * All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "cam_io_util.h" +#include "cam_debug_util.h" + +int cam_io_w(uint32_t data, void __iomem *addr) +{ + if (!addr) + return -EINVAL; + + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", addr, data); + writel_relaxed(data, addr); + + return 0; +} + +int cam_io_w_mb(uint32_t data, void __iomem *addr) +{ + if (!addr) + return -EINVAL; + + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", addr, data); + writel(data, addr); + + return 0; +} + +uint32_t cam_io_r(void __iomem *addr) +{ + uint32_t data; + + if (!addr) { + CAM_ERR(CAM_IO_ACCESS, "Invalid args"); + return 0; + } + + data = readl_relaxed(addr); + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", addr, data); + + return data; +} + +uint32_t cam_io_r_mb(void __iomem *addr) +{ + uint32_t data; + + if (!addr) { + CAM_ERR(CAM_IO_ACCESS, "Invalid args"); + return 0; + } + + data = readl(addr); + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", addr, data); + + return data; +} + +int cam_io_memcpy(void __iomem *dest_addr, + void __iomem *src_addr, uint32_t len) +{ + int i; + uint32_t *d = (uint32_t *) dest_addr; + uint32_t *s = (uint32_t *) src_addr; + + if (!dest_addr || !src_addr) + return -EINVAL; + + CAM_DBG(CAM_IO_ACCESS, "%pK %pK %d", dest_addr, src_addr, len); + + for (i = 0; i < len/4; i++) { + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", d, *s); + writel_relaxed(*s++, d++); + } + + return 0; +} + +int cam_io_memcpy_mb(void __iomem *dest_addr, + void __iomem *src_addr, uint32_t len) +{ + int i; + uint32_t *d = (uint32_t *) dest_addr; + uint32_t *s = (uint32_t *) src_addr; + + if (!dest_addr || !src_addr) + return -EINVAL; + + CAM_DBG(CAM_IO_ACCESS, "%pK %pK %d", dest_addr, src_addr, len); + + /* + * Do not use cam_io_w_mb to avoid double wmb() after a write + * and before the next write. + */ + wmb(); + for (i = 0; i < (len / 4); i++) { + CAM_DBG(CAM_IO_ACCESS, "0x%pK %08x", d, *s); + writel_relaxed(*s++, d++); + } + /* Ensure previous writes are done */ + wmb(); + + return 0; +} + +int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry, + unsigned long min_usecs, unsigned long max_usecs) +{ + uint32_t tmp, cnt = 0; + int rc = 0; + + if (!addr) + return -EINVAL; + + tmp = readl_relaxed(addr); + while ((tmp != wait_data) && (cnt++ < retry)) { + if (min_usecs > 0 && max_usecs > 0) + usleep_range(min_usecs, max_usecs); + tmp = readl_relaxed(addr); + } + + if (cnt > retry) { + CAM_DBG(CAM_IO_ACCESS, "Poll failed by value"); + rc = -EINVAL; + } + + return rc; +} + +int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data, + uint32_t bmask, uint32_t retry, unsigned long min_usecs, + unsigned long max_usecs) +{ + uint32_t tmp, cnt = 0; + int rc = 0; + + if (!addr) + return -EINVAL; + + tmp = readl_relaxed(addr); + while (((tmp & bmask) != wait_data) && (cnt++ < retry)) { + if (min_usecs > 0 && max_usecs > 0) + usleep_range(min_usecs, max_usecs); + tmp = readl_relaxed(addr); + } + + if (cnt > retry) { + CAM_DBG(CAM_IO_ACCESS, "Poll failed with mask"); + rc = -EINVAL; + } + + return rc; +} + +int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr, + uint32_t len) +{ + int i; + + if (!data || !len || !addr) + return -EINVAL; + + for (i = 0; i < len; i++) { + CAM_DBG(CAM_IO_ACCESS, "i= %d len =%d val=%x addr =%pK", + i, len, data[i], addr); + writel_relaxed(data[i], addr); + } + + return 0; +} + +int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr, + uint32_t len) +{ + int i; + + if (!data || !len || !addr) + return -EINVAL; + + for (i = 0; i < len; i++) { + CAM_DBG(CAM_IO_ACCESS, "i= %d len =%d val=%x addr =%pK", + i, len, data[i], addr); + /* Ensure previous writes are done */ + wmb(); + writel_relaxed(data[i], addr); + } + + return 0; +} + +#define __OFFSET(__i) (data[__i][0]) +#define __VAL(__i) (data[__i][1]) +int cam_io_w_offset_val_block(const uint32_t data[][2], + void __iomem *addr_base, uint32_t len) +{ + int i; + + if (!data || !len || !addr_base) + return -EINVAL; + + for (i = 0; i < len; i++) { + CAM_DBG(CAM_IO_ACCESS, + "i= %d len =%d val=%x addr_base =%pK reg=%x", + i, len, __VAL(i), addr_base, __OFFSET(i)); + writel_relaxed(__VAL(i), addr_base + __OFFSET(i)); + } + + return 0; +} + +int cam_io_w_mb_offset_val_block(const uint32_t data[][2], + void __iomem *addr_base, uint32_t len) +{ + int i; + + if (!data || !len || !addr_base) + return -EINVAL; + + /* Ensure write is done */ + wmb(); + for (i = 0; i < len; i++) { + CAM_DBG(CAM_IO_ACCESS, + "i= %d len =%d val=%x addr_base =%pK reg=%x", + i, len, __VAL(i), addr_base, __OFFSET(i)); + writel_relaxed(__VAL(i), addr_base + __OFFSET(i)); + } + + return 0; +} + +#define BYTES_PER_REGISTER 4 +#define NUM_REGISTER_PER_LINE 4 +#define REG_OFFSET(__start, __i) (__start + (__i * BYTES_PER_REGISTER)) +int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size) +{ + char line_str[128]; + char *p_str; + int i; + int bytes_written, used_size; + uint32_t data; + + CAM_DBG(CAM_IO_DUMP, "addr=%pK offset=0x%x size=%d", + base_addr, start_offset, size); + + if (!base_addr || (size <= 0)) + return -EINVAL; + + line_str[0] = '\0'; + p_str = line_str; + used_size = 0; + for (i = 0; i < size; i++) { + if (i % NUM_REGISTER_PER_LINE == 0) { + bytes_written = scnprintf(p_str, + sizeof(line_str) - used_size, "0x%08x: ", + REG_OFFSET(start_offset, i)); + p_str += bytes_written; + used_size += bytes_written; + } + data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i)); + bytes_written = scnprintf(p_str, sizeof(line_str) - used_size, + "%08x ", data); + p_str += bytes_written; + used_size += bytes_written; + if ((i + 1) % NUM_REGISTER_PER_LINE == 0) { + CAM_DBG(CAM_IO_DUMP, "%s", line_str); + line_str[0] = '\0'; + p_str = line_str; + used_size = 0; + } + } + if (line_str[0] != '\0') + CAM_DBG(CAM_IO_DUMP, "%s", line_str); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.h new file mode 100644 index 0000000000..66aaeaa45d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_io_util.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2011-2014, 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _CAM_IO_UTIL_H_ +#define _CAM_IO_UTIL_H_ + +#include + +/** + * cam_io_w() + * + * @brief: Camera IO util for register write + * + * @data: Value to be written + * @addr: Address used to write the value + * + * @return: Success or Failure + */ +int cam_io_w(uint32_t data, void __iomem *addr); + +/** + * cam_io_w_mb() + * + * @brief: Camera IO util for register write with memory barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call wmb() independently in the caller. + * + * @data: Value to be written + * @addr: Address used to write the value + * + * @return: Success or Failure + */ +int cam_io_w_mb(uint32_t data, void __iomem *addr); + +/** + * cam_io_r() + * + * @brief: Camera IO util for register read + * + * @addr: Address of register to be read + * + * @return: Value read from the register address + */ +uint32_t cam_io_r(void __iomem *addr); + +/** + * cam_io_r_mb() + * + * @brief: Camera IO util for register read with memory barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call rmb() independently in the caller. + * + * @addr: Address of register to be read + * + * @return: Value read from the register address + */ +uint32_t cam_io_r_mb(void __iomem *addr); + +/** + * cam_io_memcpy() + * + * @brief: Camera IO util for memory to register copy + * + * @dest_addr: Destination register address + * @src_addr: Source regiser address + * @len: Range to be copied + * + * @return: Success or Failure + */ +int cam_io_memcpy(void __iomem *dest_addr, + void __iomem *src_addr, uint32_t len); + +/** + * cam_io_memcpy_mb() + * + * @brief: Camera IO util for memory to register copy + * with barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call wmb() independently in the caller. + * + * @dest_addr: Destination register address + * @src_addr: Source regiser address + * @len: Range to be copied + * + * @return: Success or Failure + */ +int cam_io_memcpy_mb(void __iomem *dest_addr, + void __iomem *src_addr, uint32_t len); + +/** + * cam_io_poll_value_wmask() + * + * @brief: Poll register value with bitmask. + * + * @addr: Register address to be polled + * @wait_data: Wait until @bmask read from @addr matches this data + * @bmask: Bit mask + * @retry: Number of retry + * @min_usecs: Minimum time to wait for retry + * @max_usecs: Maximum time to wait for retry + * + * @return: Success or Failure + * + * This function can sleep so it should not be called from interrupt + * handler, spin_lock etc. + */ +int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data, + uint32_t bmask, uint32_t retry, unsigned long min_usecs, + unsigned long max_usecs); + +/** + * cam_io_poll_value() + * + * @brief: Poll register value + * + * @addr: Register address to be polled + * @wait_data: Wait until value read from @addr matches this data + * @retry: Number of retry + * @min_usecs: Minimum time to wait for retry + * @max_usecs: Maximum time to wait for retry + * + * @return: Success or Failure + * + * This function can sleep so it should not be called from interrupt + * handler, spin_lock etc. + */ +int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry, + unsigned long min_usecs, unsigned long max_usecs); + +/** + * cam_io_w_same_offset_block() + * + * @brief: Write a block of data to same address + * + * @data: Block data to be written + * @addr: Register offset to be written. + * @len: Number of the data to be written + * + * @return: Success or Failure + */ +int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr, + uint32_t len); + +/** + * cam_io_w_mb_same_offset_block() + * + * @brief: Write a block of data to same address with barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call wmb() independently in the caller. + * + * @data: Block data to be written + * @addr: Register offset to be written. + * @len: Number of the data to be written + * + * @return: Success or Failure + */ +int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr, + uint32_t len); + +/** + * cam_io_w_offset_val_block() + * + * @brief: This API is to write a block of registers + * represented by a 2 dimensional array table with + * register offset and value pair + * + * offset0, value0, + * offset1, value1, + * offset2, value2, + * and so on... + * + * @data: Pointer to 2-dimensional offset-value array + * @addr_base: Base address to which offset will be added to + * get the register address + * @len: Length of offset-value pair array to be written in + * number of uin32_t + * + * @return: Success or Failure + * + */ +int32_t cam_io_w_offset_val_block(const uint32_t data[][2], + void __iomem *addr_base, uint32_t len); + +/** + * cam_io_w_mb_offset_val_block() + * + * @brief: This API is to write a block of registers + * represented by a 2 dimensional array table with + * register offset and value pair with memory barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call wmb() independently in the caller. + * The OFFSETS NEED to be different because of the way + * barrier is used here. + * + * offset0, value0, + * offset1, value1, + * offset2, value2, + * and so on... + * + * @data: Pointer to 2-dimensional offset-value array + * @addr_base: Base address to which offset will be added to + * get the register address + * @len: Length of offset-value pair array to be written in + * number of uin32_t + * + * @return: Success or Failure + * + */ +int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2], + void __iomem *addr_base, uint32_t len); + +/** + * cam_io_dump() + * + * @brief: Camera IO util for dumping a range of register + * + * @base_addr: Start register address for the dumping + * @start_offset: Start register offset for the dump + * @size: Size specifying the range for dumping + * + * @return: Success or Failure + */ +int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size); + +#endif /* _CAM_IO_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.c new file mode 100644 index 0000000000..51862577d6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.c @@ -0,0 +1,855 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "cam_mem_mgr.h" +#include "cam_packet_util.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" + +#define CAM_UNIQUE_SRC_HDL_MAX 50 +#define CAM_PRESIL_UNIQUE_HDL_MAX 50 + +struct cam_patch_unique_src_buf_tbl { + int32_t hdl; + dma_addr_t iova; + size_t buf_size; + uint32_t flags; +}; + +int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr, + size_t *len) +{ + int rc = 0; + uintptr_t kmd_buf_addr = 0; + + rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len); + if (rc) { + CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc); + rc = -EINVAL; + } else { + if (kmd_buf_addr && *len) { + *buf_addr = (uint32_t *)kmd_buf_addr; + } else { + cam_mem_put_cpu_buf(handle); + CAM_ERR(CAM_UTIL, "Invalid addr and length :%zd", *len); + rc = -ENOMEM; + } + } + return rc; +} + +int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc) +{ + + if (!cmd_desc) { + CAM_ERR(CAM_UTIL, "Invalid cmd desc"); + return -EINVAL; + } + + if ((cmd_desc->length > cmd_desc->size) || + (cmd_desc->mem_handle <= 0)) { + CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d", + cmd_desc->offset, cmd_desc->length, + cmd_desc->mem_handle, cmd_desc->size); + return -EINVAL; + } + + return 0; +} + +int cam_packet_util_validate_packet(struct cam_packet *packet, + size_t remain_len) +{ + size_t sum_cmd_desc = 0; + size_t sum_io_cfgs = 0; + size_t sum_patch_desc = 0; + size_t pkt_wo_payload = offsetof(struct cam_packet, payload); + + if (!packet) + return -EINVAL; + + if (!(packet->header.size) || + (size_t)packet->header.size > remain_len || + (size_t)packet->header.size <= pkt_wo_payload) { + CAM_ERR(CAM_UTIL, + "Invalid packet size: %zu, CPU buf length: %zu", + (size_t)packet->header.size, remain_len); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d", + packet->num_cmd_buf, packet->num_io_configs, + packet->kmd_cmd_buf_index); + + sum_cmd_desc = packet->num_cmd_buf * sizeof(struct cam_cmd_buf_desc); + sum_io_cfgs = packet->num_io_configs * sizeof(struct cam_buf_io_cfg); + sum_patch_desc = packet->num_patches * sizeof(struct cam_patch_desc); + + if (((pkt_wo_payload + (size_t)packet->cmd_buf_offset + + sum_cmd_desc) > (size_t)packet->header.size) || + ((pkt_wo_payload + (size_t)packet->io_configs_offset + + sum_io_cfgs) > (size_t)packet->header.size) || + ((pkt_wo_payload + (size_t)packet->patch_offset + + sum_patch_desc) > (size_t)packet->header.size)) { + CAM_ERR(CAM_UTIL, "params not within mem len:%zu %zu %zu %zu", + (size_t)packet->header.size, sum_cmd_desc, + sum_io_cfgs, sum_patch_desc); + return -EINVAL; + } + + return 0; +} + +int cam_packet_util_copy_pkt_to_kmd(struct cam_packet *packet_u, struct cam_packet **packet, + size_t remain_len) +{ + int rc = 0; + uint32_t packet_size; + + packet_size = packet_u->header.size; + if (packet_size >= sizeof(struct cam_packet) && packet_size <= remain_len) { + rc = cam_common_mem_kdup((void **) packet, + packet_u, packet_size); + if (rc) { + CAM_ERR(CAM_UTIL, "Alloc and copy request %lld packet fail", + packet_u->header.request_id); + return -EINVAL; + } + (*packet)->header.size = packet_size; + } else { + CAM_ERR(CAM_UTIL, "Invalid packet header size %u", + packet_size); + return -EINVAL; + } + + if (cam_packet_util_validate_packet(*packet, remain_len)) { + CAM_ERR(CAM_UTIL, "Invalid packet params"); + rc = -EINVAL; + goto free_packet; + } + return rc; + +free_packet: + cam_common_mem_free(*packet); + *packet = NULL; + + return rc; +} + +int cam_packet_util_get_kmd_buffer(struct cam_packet *packet, + struct cam_kmd_buf_info *kmd_buf) +{ + int rc = 0; + size_t len = 0; + size_t remain_len = 0; + struct cam_cmd_buf_desc *cmd_desc; + uint32_t *cpu_addr; + + if (!packet || !kmd_buf) { + CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf); + return -EINVAL; + } + + if (!packet->num_cmd_buf) { + CAM_ERR(CAM_UTIL, "Invalid num_cmd_buf = %d", packet->num_cmd_buf); + return -EINVAL; + } + + if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) { + CAM_ERR(CAM_UTIL, "Invalid kmd buf index: %d", + packet->kmd_cmd_buf_index); + return -EINVAL; + } + + /* Take first command descriptor and add offset to it for kmd*/ + cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *) + &packet->payload_flex + packet->cmd_buf_offset); + cmd_desc += packet->kmd_cmd_buf_index; + + rc = cam_packet_util_validate_cmd_desc(cmd_desc); + if (rc) + return rc; + + rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr, + &len); + if (rc) + return rc; + + remain_len = len; + if (((size_t)cmd_desc->offset >= len) || + ((size_t)cmd_desc->size > (len - (size_t)cmd_desc->offset))) { + CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d", + len, cmd_desc->size); + rc = -EINVAL; + goto rel_kmd_buf; + } + + remain_len -= (size_t)cmd_desc->offset; + if ((size_t)packet->kmd_cmd_buf_offset >= remain_len) { + CAM_ERR(CAM_UTIL, "Invalid kmd cmd buf offset: %zu", + (size_t)packet->kmd_cmd_buf_offset); + rc = -EINVAL; + goto rel_kmd_buf; + } + + cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4); + CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d", + cmd_desc->size, cmd_desc->length, + cmd_desc->size - cmd_desc->length); + CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK", + cmd_desc->mem_handle, cmd_desc->offset, + packet->kmd_cmd_buf_offset, cpu_addr); + + kmd_buf->cpu_addr = cpu_addr; + kmd_buf->handle = cmd_desc->mem_handle; + kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset; + kmd_buf->size = cmd_desc->size - cmd_desc->length; + kmd_buf->used_bytes = 0; + return rc; + +rel_kmd_buf: + cam_mem_put_cpu_buf(cmd_desc->mem_handle); + return rc; +} + +void cam_packet_util_dump_patch_info(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_iommu_hdl, struct cam_hw_dump_pf_args *pf_args) +{ + struct cam_patch_desc *patch_desc = NULL; + struct cam_context_pf_info *pf_context_info = NULL; + dma_addr_t iova_addr; + size_t dst_buf_len; + size_t src_buf_size; + int i, rc = 0; + int32_t hdl; + uintptr_t cpu_addr = 0; + uint32_t *dst_cpu_addr; + uint32_t flags, buf_fd; + uint32_t value = 0; + + if (!packet) { + CAM_ERR(CAM_UTIL, "Invalid packet"); + return; + } + + patch_desc = (struct cam_patch_desc *) + ((uint32_t *) &packet->payload_flex + + packet->patch_offset/4); + + if (pf_args) { + pf_context_info = &(pf_args->pf_context_info); + buf_fd = pf_args->pf_smmu_info->buf_info; + } + + CAM_INFO(CAM_UTIL, "Total num of patches : %d", + packet->num_patches); + + for (i = 0; i < packet->num_patches; i++) { + hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ? + sec_iommu_hdl : iommu_hdl; + rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl, + hdl, &iova_addr, &src_buf_size, &flags, NULL); + if (rc < 0) { + CAM_ERR(CAM_UTIL, + "unable to get src buf address for hdl 0x%x", + hdl); + return; + } + + if (pf_args && + GET_FD_FROM_HANDLE(patch_desc[i].src_buf_hdl) == buf_fd && + pf_context_info->mem_type == CAM_FAULT_BUF_NOT_FOUND) { + /* found PF at this hdl */ + pf_context_info->mem_type = CAM_FAULT_PATCH_BUF; + pf_context_info->patch_idx = i; + pf_context_info->buf_hdl = patch_desc[i].src_buf_hdl; + pf_context_info->offset = patch_desc[i].src_offset; + pf_context_info->mem_flag = flags; + pf_context_info->delta = + CAM_SMMU_GET_IOVA_DELTA(pf_args->pf_smmu_info->iova, iova_addr); + pf_context_info->req_id = packet->header.request_id; + pf_context_info->ctx_found = true; + + CAM_ERR(CAM_UTIL, "Found PF at patch: %d src buf hdl: 0x%llx", + i, patch_desc[i].src_buf_hdl); + } + + rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl, + &cpu_addr, &dst_buf_len); + if (rc < 0 || !cpu_addr || (dst_buf_len == 0)) { + CAM_ERR(CAM_UTIL, "unable to get dst buf address"); + return; + } + + dst_cpu_addr = (uint32_t *)cpu_addr; + dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr + + patch_desc[i].dst_offset); + value = *dst_cpu_addr; + CAM_INFO(CAM_UTIL, + "i = %d src_buf 0x%llx src_hdl 0x%x src_buf_with_offset 0x%llx src_size 0x%llx src_flags: %x dst %p dst_offset %u dst_hdl 0x%x value 0x%x", + i, iova_addr, patch_desc[i].src_buf_hdl, + (iova_addr + patch_desc[i].src_offset), + src_buf_size, flags, dst_cpu_addr, + patch_desc[i].dst_offset, + patch_desc[i].dst_buf_hdl, value); + + if (!(*dst_cpu_addr)) + CAM_ERR(CAM_ICP, "Null at dst addr %p", dst_cpu_addr); + + cam_mem_put_cpu_buf(patch_desc[i].dst_buf_hdl); + } +} + +static int cam_packet_util_get_patch_iova( + struct cam_patch_unique_src_buf_tbl *tbl, + int32_t hdl, uint32_t buf_hdl, dma_addr_t *iova, + size_t *buf_size, uint32_t *flags, struct list_head *mapped_io_list) +{ + int idx = 0; + int rc = 0; + size_t src_buf_size; + dma_addr_t iova_addr; + bool is_found = false; + + for (idx = 0; idx < CAM_UNIQUE_SRC_HDL_MAX; idx++) { + if (buf_hdl == tbl[idx].hdl) { + CAM_DBG(CAM_UTIL, + "Matched entry for src_buf_hdl: 0x%x with src_hdl[%d]: 0x%x", + buf_hdl, idx, tbl[idx].hdl); + *iova = tbl[idx].iova; + *buf_size = tbl[idx].buf_size; + *flags = tbl[idx].flags; + is_found = true; + break; + } else if ((tbl[idx].hdl == 0) || (tbl[idx].iova == 0)) { + CAM_DBG(CAM_UTIL, "New src handle detected 0x%x", buf_hdl); + is_found = false; + break; + } + CAM_DBG(CAM_UTIL, + "Index: %d is filled with differnt src_hdl: 0x%x", + idx, buf_hdl); + } + + if (!is_found) { + CAM_DBG(CAM_UTIL, "src_hdl 0x%x not found in table entries", + buf_hdl); + rc = cam_mem_get_io_buf(buf_hdl, hdl, &iova_addr, &src_buf_size, flags, + mapped_io_list); + if (rc < 0) { + CAM_ERR(CAM_UTIL, + "unable to get iova for src_hdl: 0x%x", + buf_hdl); + return rc; + } + /* Update the table entry with unique src buf handle */ + if (idx < CAM_UNIQUE_SRC_HDL_MAX && tbl[idx].hdl == 0) { + tbl[idx].buf_size = src_buf_size; + tbl[idx].iova = iova_addr; + tbl[idx].hdl = buf_hdl; + tbl[idx].flags = *flags; + CAM_DBG(CAM_UTIL, + "Updated table index: %d with src_buf_hdl: 0x%x flags: %x", + idx, tbl[idx].hdl, *flags); + } + *iova = iova_addr; + *buf_size = src_buf_size; + } + + return rc; +} + +int cam_packet_util_process_patches(struct cam_packet *packet, + struct list_head *mapped_io_list, int32_t iommu_hdl, int32_t sec_mmu_hdl, + bool exp_mem) +{ + struct cam_patch_desc *patch_desc = NULL; + dma_addr_t iova_addr; + uintptr_t cpu_addr = 0; + dma_addr_t temp; + uint32_t *dst_cpu_addr; + size_t dst_buf_len; + size_t src_buf_size; + int i = 0; + int rc = 0; + uint32_t flags = 0; + int32_t hdl; + struct cam_patch_unique_src_buf_tbl + tbl[CAM_UNIQUE_SRC_HDL_MAX]; + + memset(tbl, 0, CAM_UNIQUE_SRC_HDL_MAX * + sizeof(struct cam_patch_unique_src_buf_tbl)); + + /* process patch descriptor */ + patch_desc = (struct cam_patch_desc *) + ((uint32_t *) &packet->payload_flex + + packet->patch_offset/4); + CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu", + (void *)packet, (void *)patch_desc, + sizeof(struct cam_patch_desc)); + + for (i = 0; i < packet->num_patches; i++) { + hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ? + sec_mmu_hdl : iommu_hdl; + + rc = cam_packet_util_get_patch_iova(&tbl[0], hdl, patch_desc[i].src_buf_hdl, + &iova_addr, &src_buf_size, &flags, mapped_io_list); + + if (rc) { + CAM_ERR(CAM_UTIL, + "get_iova failed for patch[%d], src_buf_hdl: 0x%x: rc: %d", + i, patch_desc[i].src_buf_hdl, rc); + return rc; + } + + if ((size_t)patch_desc[i].src_offset >= src_buf_size) { + CAM_ERR(CAM_UTIL, + "Invalid src buf patch offset: patch:src_offset: 0x%x, src_buf_size: %zu", + patch_desc[i].src_offset, src_buf_size); + return -EINVAL; + } + + temp = iova_addr; + + rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl, + &cpu_addr, &dst_buf_len); + if (rc < 0 || !cpu_addr || (dst_buf_len == 0)) { + CAM_ERR(CAM_UTIL, "unable to get dst buf address"); + return rc; + } + dst_cpu_addr = (uint32_t *)cpu_addr; + + CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i, + patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset, + patch_desc[i].src_buf_hdl, patch_desc[i].src_offset); + + if ((dst_buf_len < sizeof(void *)) || + ((dst_buf_len - sizeof(void *)) < + (size_t)patch_desc[i].dst_offset)) { + CAM_ERR(CAM_UTIL, + "Invalid dst buf patch offset"); + cam_mem_put_cpu_buf((int32_t)patch_desc[i].dst_buf_hdl); + return -EINVAL; + } + + dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr + + patch_desc[i].dst_offset); + temp += patch_desc[i].src_offset; + + if (exp_mem && cam_smmu_is_expanded_memory()) { + if ((flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) || + (flags & CAM_MEM_FLAG_CMD_BUF_TYPE)) { + *dst_cpu_addr = temp; + } else { + if (CAM_36BIT_INTF_GET_IOVA_OFFSET(temp)) + CAM_ERR(CAM_UTIL, + "Buffer address 0x%lx not aligned to 256bytes", + temp); + + *dst_cpu_addr = CAM_36BIT_INTF_GET_IOVA_BASE(temp); + } + } else { + *dst_cpu_addr = temp; + } + + CAM_DBG(CAM_UTIL, + "patch is done for dst %pK with base iova 0x%lx final iova 0x%lx patched value 0x%x, shared=%s, cmd=%s, HwAndCDM %s", + dst_cpu_addr, iova_addr, temp, *dst_cpu_addr, + CAM_BOOL_TO_YESNO(flags & CAM_MEM_FLAG_HW_SHARED_ACCESS), + CAM_BOOL_TO_YESNO(flags & CAM_MEM_FLAG_CMD_BUF_TYPE), + CAM_BOOL_TO_YESNO(flags & CAM_MEM_FLAG_HW_AND_CDM_OR_SHARED)); + cam_mem_put_cpu_buf((int32_t)patch_desc[i].dst_buf_hdl); + } + + return rc; +} + +void cam_packet_util_dump_io_bufs(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_mmu_hdl, + struct cam_hw_dump_pf_args *pf_args, bool res_id_support) +{ + struct cam_buf_io_cfg *io_cfg; + struct cam_context_pf_info *pf_context_info; + int32_t mmu_hdl, buf_fd; + dma_addr_t iova_addr; + size_t src_buf_size; + int i, j, rc = 0; + uint32_t resource_type; + + if (!packet) { + CAM_ERR(CAM_UTIL, "Invalid packet"); + return; + } + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload_flex + + packet->io_configs_offset / 4); + + buf_fd = pf_args->pf_smmu_info->buf_info; + pf_context_info = &(pf_args->pf_context_info); + resource_type = pf_context_info->resource_type; + + for (i = 0; i < packet->num_io_configs; i++) { + if (res_id_support && io_cfg[i].resource_type != + pf_context_info->resource_type) + continue; + + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + CAM_INFO(CAM_UTIL, "port: 0x%x f: %u format: %d dir %d", + io_cfg[i].resource_type, + io_cfg[i].fence, + io_cfg[i].format, + io_cfg[i].direction); + + mmu_hdl = cam_mem_is_secure_buf( + io_cfg[i].mem_handle[j]) ? sec_mmu_hdl : + iommu_hdl; + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], + mmu_hdl, &iova_addr, &src_buf_size, NULL, NULL); + if (rc < 0) { + CAM_ERR(CAM_UTIL, + "get src buf address fail mem_handle 0x%x", + io_cfg[i].mem_handle[j]); + continue; + } + if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) == buf_fd) { + pf_context_info->mem_type = CAM_FAULT_IO_CFG_BUF; + pf_context_info->buf_hdl = io_cfg[i].mem_handle[j]; + pf_context_info->offset = io_cfg[i].offsets[j]; + pf_context_info->resource_type = io_cfg[i].resource_type; + pf_context_info->delta = + CAM_SMMU_GET_IOVA_DELTA(pf_args->pf_smmu_info->iova, + iova_addr); + pf_context_info->req_id = packet->header.request_id; + pf_context_info->ctx_found = true; + + resource_type = pf_context_info->resource_type; + CAM_INFO(CAM_UTIL, + "Found PF at port: 0x%x mem 0x%x fd: %d plane id: %d delta: %llu", + io_cfg[i].resource_type, + io_cfg[i].mem_handle[j], + buf_fd, + j, pf_context_info->delta); + } + CAM_INFO(CAM_UTIL, + "pln %d w %d h %d s %u size %zu addr 0x%llx end_addr 0x%llx offset %u memh 0x%x", + j, io_cfg[i].planes[j].width, + io_cfg[i].planes[j].height, + io_cfg[i].planes[j].plane_stride, + src_buf_size, iova_addr, + iova_addr + src_buf_size, + io_cfg[i].offsets[j], + io_cfg[i].mem_handle[j]); + } + + if (res_id_support) + return; + } + + if (res_id_support) + CAM_ERR(CAM_UTIL, + "getting io port for mid resource id failed req id: %llu res id: 0x%x", + packet->header.request_id, resource_type); + +} + +int cam_packet_util_process_generic_blob(uint32_t length, uint32_t *blob_ptr, + cam_packet_generic_blob_handler blob_handler_cb, void *user_data) +{ + int rc = 0; + uint32_t blob_type, blob_size, blob_block_size, len_read; + + len_read = 0; + while (len_read + sizeof(uint32_t) < length) { + blob_type = + ((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_TYPE_MASK) >> + CAM_GENERIC_BLOB_CMDBUFFER_TYPE_SHIFT; + blob_size = + ((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_SIZE_MASK) >> + CAM_GENERIC_BLOB_CMDBUFFER_SIZE_SHIFT; + + if (!blob_size) + goto end; + + blob_block_size = sizeof(uint32_t) + + (((blob_size + sizeof(uint32_t) - 1) / + sizeof(uint32_t)) * sizeof(uint32_t)); + + CAM_DBG(CAM_UTIL, + "Blob type=%d size=%d block_size=%d len_read=%d total=%d", + blob_type, blob_size, blob_block_size, len_read, + length); + + if (len_read + blob_block_size > length) { + CAM_ERR(CAM_UTIL, "Invalid Blob %d %d %d %d", + blob_type, blob_size, len_read, + length); + rc = -EINVAL; + goto end; + } + + len_read += blob_block_size; + + rc = blob_handler_cb(user_data, blob_type, blob_size, + (uint8_t *)(blob_ptr + 1)); + if (rc) { + CAM_ERR(CAM_UTIL, "Error in handling blob type %d %d", + blob_type, blob_size); + goto end; + } + + blob_ptr += (blob_block_size / sizeof(uint32_t)); + } +end: + return rc; +} + +int cam_packet_util_process_generic_cmd_buffer( + struct cam_cmd_buf_desc *cmd_buf, + cam_packet_generic_blob_handler blob_handler_cb, void *user_data) +{ + int rc = 0; + uintptr_t cpu_addr = 0; + size_t buf_size, remain_len = 0, blob_size; + uint32_t *blob_ptr, *blob_ptr_u; + + if (!cmd_buf || !blob_handler_cb) { + CAM_ERR(CAM_UTIL, "Invalid args %pK %pK", + cmd_buf, blob_handler_cb); + return -EINVAL; + } + + if (!cmd_buf->length || !cmd_buf->size) { + CAM_ERR(CAM_UTIL, "Invalid cmd buf size %d %d", + cmd_buf->length, cmd_buf->size); + return -EINVAL; + } + + rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle, &cpu_addr, &buf_size); + if (rc || !cpu_addr || (buf_size == 0)) { + CAM_ERR(CAM_UTIL, "Failed in Get cpu addr, rc=%d, cpu_addr=%pK", + rc, (void *)cpu_addr); + return rc; + } + + remain_len = buf_size; + if ((buf_size < sizeof(uint32_t)) || + ((size_t)cmd_buf->offset > (buf_size - sizeof(uint32_t)))) { + CAM_ERR(CAM_UTIL, "Invalid offset for cmd buf: %zu", + (size_t)cmd_buf->offset); + rc = -EINVAL; + goto put_cpu_buf; + } + remain_len -= (size_t)cmd_buf->offset; + blob_ptr_u = (uint32_t *)(((uint8_t *)cpu_addr) + + cmd_buf->offset); + blob_size = cmd_buf->length; + + if (blob_size <= remain_len) { + rc = cam_common_mem_kdup((void **)&blob_ptr, + blob_ptr_u, blob_size); + if (rc) { + CAM_ERR(CAM_UTIL, "Alloc and copy blob buffer failed"); + goto put_cpu_buf; + } + } else { + CAM_ERR(CAM_UTIL, "Invalid blob size %u", blob_size); + rc = -EINVAL; + goto put_cpu_buf; + } + + CAM_DBG(CAM_UTIL, + "GenericCmdBuffer cpuaddr=%pK, blobptr=%pK, len=%d", + (void *)cpu_addr, (void *)blob_ptr, blob_size); + + rc = cam_packet_util_process_generic_blob(blob_size, blob_ptr, + blob_handler_cb, user_data); + if (rc) + CAM_ERR(CAM_UTIL, "Error in parse of blob type blob data %d", + rc); + + cam_common_mem_free(blob_ptr); +put_cpu_buf: + cam_mem_put_cpu_buf(cmd_buf->mem_handle); + return rc; +} + +int cam_presil_retrieve_buffers_from_packet(struct cam_packet *packet, int iommu_hdl, + int out_res_id) +{ + int rc = 0, i, j; + struct cam_buf_io_cfg *io_cfg = NULL; + dma_addr_t io_addr[CAM_PACKET_MAX_PLANES]; + size_t size; + + if (!packet || (iommu_hdl < 0)) { + CAM_ERR(CAM_PRESIL, "Invalid params packet %pK iommu_hdl: %d", packet, iommu_hdl); + return -EINVAL; + } + + CAM_DBG(CAM_PRESIL, "Retrieving output buffer corresponding to res: 0x%x", out_res_id); + io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)&packet->payload_flex + + packet->io_configs_offset); + for (i = 0; i < packet->num_io_configs; i++) { + if ((io_cfg[i].direction != CAM_BUF_OUTPUT) || + (io_cfg[i].resource_type != out_res_id)) + continue; + + memset(io_addr, 0, sizeof(io_addr)); + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], iommu_hdl, &io_addr[j], + &size, NULL, NULL); + if (rc) { + CAM_ERR(CAM_PRESIL, "no io addr for plane%d", j); + rc = -ENOMEM; + return rc; + } + + /* For presil, address should be within 32 bit */ + if (io_addr[j] >> 32) { + CAM_ERR(CAM_PRESIL, + "Invalid address, presil mapped address should be 32 bit"); + rc = -EINVAL; + return rc; + } + + CAM_INFO(CAM_PRESIL, + "Retrieving IO CFG buffer:%d addr: 0x%x offset 0x%x res_id: 0x%x", + io_cfg[i].mem_handle[j], io_addr[j], io_cfg[i].offsets[j], + io_cfg[i].resource_type); + cam_mem_mgr_retrieve_buffer_from_presil(io_cfg[i].mem_handle[j], size, + io_cfg[i].offsets[j], iommu_hdl); + } + } + + return rc; +} + +static void cam_presil_add_unique_buf_hdl_to_list(int32_t buf_hdl, + int32_t *hdl_list, int *num_hdls, int max_handles) +{ + int k; + bool hdl_found = false; + + if (!buf_hdl) + return; + + if (*num_hdls >= max_handles) { + CAM_ERR(CAM_PRESIL, "Failed to add entry num_hdls: %d max_handles:%d", *num_hdls, + max_handles); + return; + } + + for (k = 0; k < *num_hdls; k++) { + if (hdl_list[k] == buf_hdl) { + hdl_found = true; + break; + } + } + + if (!hdl_found) + hdl_list[(*num_hdls)++] = buf_hdl; +} + +int cam_presil_send_buffers_from_packet(struct cam_packet *packet, int img_iommu_hdl, + int cdm_iommu_hdl) +{ + struct cam_buf_io_cfg *io_cfg = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_patch_desc *patch_desc = NULL; + int i, j, rc = 0; + int32_t unique_img_buffers[CAM_PRESIL_UNIQUE_HDL_MAX] = {0}; + int32_t unique_cmd_buffers[CAM_PRESIL_UNIQUE_HDL_MAX] = {0}; + int num_img_handles = 0, num_cmd_handles = 0; + + if(!packet) { + CAM_ERR(CAM_PRESIL, "Packet is NULL"); + return -EINVAL; + } + + if (img_iommu_hdl == -1) { + goto send_cmd_buffers; + } + + /* Adding IO config buffer handles to list*/ + io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)&packet->payload_flex + + packet->io_configs_offset); + for (i = 0; i < packet->num_io_configs; i++) { + if (io_cfg[i].direction == CAM_BUF_OUTPUT) + continue; + + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + CAM_DBG(CAM_PRESIL, "Adding IO CFG buffer:%d", io_cfg[i].mem_handle[j]); + cam_presil_add_unique_buf_hdl_to_list(io_cfg[i].mem_handle[j], + unique_img_buffers, &num_img_handles, CAM_PRESIL_UNIQUE_HDL_MAX); + } + } + + for (i = 0; i < num_img_handles; i++) { + CAM_DBG(CAM_PRESIL, "Sending Image buffer i:%d mem_handle:%d", i, + unique_img_buffers[i]); + rc = cam_mem_mgr_send_buffer_to_presil(img_iommu_hdl, + unique_img_buffers[i]); + if (rc) { + CAM_ERR(CAM_PRESIL, "Failed to send buffer i:%d mem_handle:%d rc:%d", + i, unique_img_buffers[i], rc); + return rc; + } + } + +send_cmd_buffers: + if (cdm_iommu_hdl == -1) { + goto end; + } + + /* Adding CMD buffer handles to list*/ + cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload_flex + + packet->cmd_buf_offset); + for (i = 0; i < packet->num_cmd_buf; i++) { + rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]); + if (rc) + return rc; + + CAM_DBG(CAM_PRESIL, "Adding CMD buffer:%d", cmd_desc[i].mem_handle); + cam_presil_add_unique_buf_hdl_to_list(cmd_desc[i].mem_handle, + unique_cmd_buffers, &num_cmd_handles, CAM_PRESIL_UNIQUE_HDL_MAX); + } + + /* Adding Patch src buffer handles to list */ + patch_desc = (struct cam_patch_desc *) ((uint8_t *)&packet->payload_flex + + packet->patch_offset); + for (i = 0; i < packet->num_patches; i++) { + CAM_DBG(CAM_PRESIL, "Adding Patch src buffer:%d", patch_desc[i].src_buf_hdl); + cam_presil_add_unique_buf_hdl_to_list(patch_desc[i].src_buf_hdl, + unique_cmd_buffers, &num_cmd_handles, CAM_PRESIL_UNIQUE_HDL_MAX); + } + + for (i = 0; i < num_cmd_handles; i++) { + CAM_DBG(CAM_PRESIL, "Sending Command buffer i:%d mem_handle:%d", i, + unique_cmd_buffers[i]); + rc = cam_mem_mgr_send_buffer_to_presil(cdm_iommu_hdl, + unique_cmd_buffers[i]); + if (rc) { + CAM_ERR(CAM_PRESIL, "Failed to send buffer i:%d mem_handle:%d rc:%d", + i, unique_cmd_buffers[i], rc); + return rc; + } + } + +end: + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.h new file mode 100644 index 0000000000..fa2b4a64e0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_packet_util.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_PACKET_UTIL_H_ +#define _CAM_PACKET_UTIL_H_ + +#include +#include "cam_hw_mgr_intf.h" + +/** + * @brief KMD scratch buffer information + * + * @handle: Memory handle + * @cpu_addr: Cpu address + * @offset: Offset from the start of the buffer + * @size: Size of the buffer in bytes + * @used_bytes: Used memory in bytes + * + */ +struct cam_kmd_buf_info { + int handle; + uint32_t *cpu_addr; + uint32_t offset; + uint32_t size; + uint32_t used_bytes; +}; + +/* Generic Cmd Buffer blob callback function type */ +typedef int (*cam_packet_generic_blob_handler)(void *user_data, + uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data); + +/** + * cam_packet_util_get_cmd_mem_addr() + * + * @brief Get command buffer address + * + * @handle: Command buffer memory handle + * @buf_addr: Command buffer cpu mapped address + * @len: Command buffer length + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr, + size_t *len); + +/** + * cam_packet_util_validate_packet() + * + * @brief Validate the packet + * + * @packet: Packet to be validated + * + * @remain_len: CPU buff length after config offset + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_packet_util_validate_packet(struct cam_packet *packet, + size_t remain_len); + +int cam_packet_util_copy_pkt_to_kmd(struct cam_packet *packet_u, + struct cam_packet **packet, size_t remain_len); +/** + * cam_packet_util_validate_cmd_desc() + * + * @brief Validate the packet + * + * @cmd_desc: Command descriptor to be validated + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc); + +/** + * cam_packet_util_get_kmd_buffer() + * + * @brief Get the kmd buffer from the packet command descriptor + * + * @packet: Packet data + * @kmd_buf: Extracted the KMD buffer information + * + * @return: 0 for success + * -EINVAL for Fail + */ +int cam_packet_util_get_kmd_buffer(struct cam_packet *packet, + struct cam_kmd_buf_info *kmd_buf_info); + +/** + * cam_packet_util_dump_patch_info() + * + * @brief: Dump patch info in case of page fault + * + * @packet: Input packet containing Command Buffers and Patches + * @iommu_hdl: IOMMU handle of the HW Device that received the packet + * @sec_iommu_hdl: Secure IOMMU handle of the HW Device that + * received the packet + * @pf_args: Page fault arguments + * + */ +void cam_packet_util_dump_patch_info(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_iommu_hdl, struct cam_hw_dump_pf_args *pf_args); + +/** + * cam_packet_util_process_patches() + * + * @brief: Replace the handle in Packet to Address using the + * information from patches. + * + * @packet: Input packet containing Command Buffers and Patches + * @mapped_io_list: List in to add patches/buffers to for reference counting + * @iommu_hdl: IOMMU handle of the HW Device that received the packet + * @sec_iommu_hdl: Secure IOMMU handle of the HW Device that + * received the packet + * @exp_mem: Boolean to know if patched address is in expanded memory range + * or within default 32-bit address space. + * + * @return: 0: Success + * Negative: Failure + */ +int cam_packet_util_process_patches(struct cam_packet *packet, + struct list_head *mapped_io_list, int32_t iommu_hdl, int32_t sec_mmu_hdl, + bool exp_mem); + +/** + * cam_packet_util_dump_io_bufs() + * + * @brief: Search for faulted io buffer in packet and print io buffers info + * + * @packet: Input packet containing io buffers + * @iommu_hdl: IOMMU handle of the HW Device that received the packet + * @sec_iommu_hdl: Secure IOMMU handle of the HW Device that + * received the packet + * @pf_args: Pault Fault related info + * @res_id_support: if the specific device has knowledge of the resource id for hw + */ + +void cam_packet_util_dump_io_bufs(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_mmu_hdl, + struct cam_hw_dump_pf_args *pf_args, bool res_id_support); + +/** + * cam_packet_util_process_generic_cmd_buffer() + * + * @brief: Process Generic Blob command buffer. This utility + * function process the command buffer and calls the + * blob_handle_cb callback for each blob that exists + * in the command buffer. + * + * @cmd_buf: Generic Blob Cmd Buffer handle + * @blob_handler_cb: Callback pointer to call for each blob exists in the + * command buffer + * @user_data: User data to be passed while callback + * + * @return: 0: Success + * Negative: Failure + */ +int cam_packet_util_process_generic_cmd_buffer( + struct cam_cmd_buf_desc *cmd_buf, + cam_packet_generic_blob_handler blob_handler_cb, void *user_data); + +/** + * @brief : API to retrieve image buffers from presil after processing is + * done,using packet from request + * + * @packet: Packet pointer for current request + * @iommu_hdl: IOMMU hdl for Image buffers + * @out_res_id: Resource ID corresponding to the output buffer + * + * @return: Success or Failure + */ +int cam_presil_retrieve_buffers_from_packet(struct cam_packet *packet, int iommu_hdl, + int out_res_id); + +/** + * @brief : API to send relevant buffers to presil + * + * @packet : Packet pointer for current request + * @img_iommu_hdl: IOMMU hdl for Image buffers + * @cdm_iommu_hdl: IOMMU hdl for cdm buffers + * + */ +int cam_presil_send_buffers_from_packet(struct cam_packet *packet, int img_iommu_hdl, + int cdm_iommu_hdl); + +/** + * @brief : API to handle the blob data to get blob type and size + * + * @length : length of the blob + * @blob_ptr: blob base address + * @blob_handler_cb: blob handler call back + * @user_data: user data information + */ +int cam_packet_util_process_generic_blob(uint32_t length, uint32_t *blob_ptr, + cam_packet_generic_blob_handler blob_handler_cb, void *user_data); + + +#endif /* _CAM_PACKET_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.c new file mode 100644 index 0000000000..10e3089bdd --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "cam_soc_bus.h" +#include "cam_mem_mgr_api.h" + +/** + * struct cam_soc_bus_client_data : Bus client data + * + * @pdata: Bus pdata information + * @client_id: Bus client id + * @num_paths: Number of paths for this client + * @curr_vote_level: current voted index + * @dyn_vote: whether dynamic voting enabled + */ +struct cam_soc_bus_client_data { + struct msm_bus_scale_pdata *pdata; + uint32_t client_id; + int num_paths; + unsigned int curr_vote_level; + bool dyn_vote; +}; + +int cam_soc_bus_client_update_request(void *client, unsigned int idx) +{ + int rc = 0; + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) client; + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + + if (bus_client_data->dyn_vote) { + CAM_ERR(CAM_UTIL, + "Dyn update not allowed client[%d][%s], dyn_vote: %d", + bus_client_data->client_id, + bus_client->common_data->name, + bus_client_data->dyn_vote); + rc = -EINVAL; + goto end; + } + + if (idx >= bus_client->common_data->num_usecases) { + CAM_ERR(CAM_UTIL, "Invalid vote level=%d, usecases=%d", idx, + bus_client->common_data->num_usecases); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_PERF, "Bus client=[%d][%s] index[%d]", + bus_client_data->client_id, bus_client->common_data->name, idx); + + rc = msm_bus_scale_client_update_request(bus_client_data->client_id, + idx); + if (rc) { + CAM_ERR(CAM_UTIL, + "Update request failed, client[%d][%s], idx: %d", + bus_client_data->client_id, + bus_client->common_data->name, idx); + goto end; + } + +end: + return rc; +} + +int cam_soc_bus_client_update_bw(void *client, uint64_t ab, uint64_t ib, + enum cam_soc_bus_path_data bus_path_data) +{ + int idx = 0; + struct msm_bus_paths *path; + struct msm_bus_scale_pdata *pdata; + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) client; + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + int rc = 0; + + if ((bus_client->common_data->num_usecases != 2) || + (bus_client_data->num_paths != 1) || + (!bus_client_data->dyn_vote)) { + CAM_ERR(CAM_UTIL, + "dynamic update not allowed Bus client=[%d][%s], %d %d %d", + bus_client_data->client_id, + bus_client->common_data->name, + bus_client->common_data->num_usecases, + bus_client_data->num_paths, + bus_client_data->dyn_vote); + rc = -EINVAL; + goto end; + } + + idx = bus_client_data->curr_vote_level; + idx = 1 - idx; + bus_client_data->curr_vote_level = idx; + + pdata = bus_client_data->pdata; + path = &(pdata->usecase[idx]); + path->vectors[0].ab = ab; + path->vectors[0].ib = ib; + + CAM_DBG(CAM_PERF, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]", + bus_client_data->client_id, bus_client->common_data->name, ab, + ib, idx); + rc = msm_bus_scale_client_update_request(bus_client_data->client_id, + idx); + if (rc) { + CAM_ERR(CAM_UTIL, + "Update request failed, client[%d][%s], idx: %d", + bus_client_data->client_id, + bus_client->common_data->name, idx); + return rc; + } + +end: + return rc; +} + +int cam_soc_bus_client_register(struct platform_device *pdev, + struct device_node *dev_node, void **client, + struct cam_soc_bus_client_common_data *common_data, bool use_path_name) +{ + struct msm_bus_scale_pdata *pdata = NULL; + struct cam_soc_bus_client *bus_client = NULL; + struct cam_soc_bus_client_data *bus_client_data = NULL; + uint32_t client_id; + int rc; + + bus_client = CAM_MEM_ZALLOC(sizeof(struct cam_soc_bus_client), GFP_KERNEL); + if (!bus_client) { + CAM_ERR(CAM_UTIL, "Non Enought Memroy"); + rc = -ENOMEM; + goto end; + } + + *client = bus_client; + + bus_client_data = CAM_MEM_ZALLOC(sizeof(struct cam_soc_bus_client_data), + GFP_KERNEL); + if (!bus_client_data) { + CAM_MEM_FREE(bus_client); + *client = NULL; + rc = -ENOMEM; + goto end; + } + + bus_client->client_data = bus_client_data; + pdata = msm_bus_pdata_from_node(pdev, + dev_node); + if (!pdata) { + CAM_ERR(CAM_UTIL, "failed get_pdata"); + rc = -EINVAL; + goto error; + } + + if ((pdata->num_usecases == 0) || + (pdata->usecase[0].num_paths == 0)) { + CAM_ERR(CAM_UTIL, "usecase=%d", pdata->num_usecases); + rc = -EINVAL; + goto error; + } + + client_id = msm_bus_scale_register_client(pdata); + if (!client_id) { + CAM_ERR(CAM_UTIL, "failed in register bus client_data"); + rc = -EINVAL; + goto error; + } + + bus_client->common_data = common_data; + + bus_client_data->dyn_vote = of_property_read_bool(dev_node, + "qcom,msm-bus-vector-dyn-vote"); + + if (bus_client_data->dyn_vote && (pdata->num_usecases != 2)) { + CAM_ERR(CAM_UTIL, "Excess or less vectors %d", + pdata->num_usecases); + rc = -EINVAL; + goto fail_unregister_client; + } + + rc = msm_bus_scale_client_update_request(client_id, 0); + if (rc) { + CAM_ERR(CAM_UTIL, "Bus client update request failed, rc = %d", + rc); + goto fail_unregister_client; + } + + bus_client->common_data->src_id = pdata->usecase[0].vectors[0].src; + bus_client->common_data->dst_id = pdata->usecase[0].vectors[0].dst; + bus_client_data->pdata = pdata; + bus_client_data->client_id = client_id; + bus_client->common_data->num_usecases = pdata->num_usecases; + bus_client_data->num_paths = pdata->usecase[0].num_paths; + bus_client->common_data->name = pdata->name; + + CAM_DBG(CAM_PERF, "Register Bus Client=[%d][%s] : src=%d, dst=%d", + bus_client_data->client_id, bus_client->common_data->name, + bus_client->common_data->src_id, + bus_client->common_data->dst_id); + + return 0; +fail_unregister_client: + msm_bus_scale_unregister_client(bus_client_data->client_id); +error: + CAM_MEM_FREE(bus_client_data); + bus_client->client_data = NULL; + CAM_MEM_FREE(bus_client); + *client = NULL; +end: + return rc; + +} + +void cam_soc_bus_client_unregister(void **client) +{ + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) (*client); + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + + if (bus_client_data->dyn_vote) + cam_soc_bus_client_update_bw(bus_client, 0, 0); + else + cam_soc_bus_client_update_request(bus_client, 0); + + msm_bus_scale_unregister_client(bus_client_data->client_id); + CAM_MEM_FREE(bus_client_data); + bus_client->client_data = NULL; + CAM_MEM_FREE(bus_client); + *client = NULL; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.h new file mode 100644 index 0000000000..cd8b38e03a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_bus.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SOC_BUS_H_ +#define _CAM_SOC_BUS_H_ + +#include +#include +#include +#include "cam_debug_util.h" +#include "cam_cpas.h" + +#define CAM_SOC_BUS_MAX_NUM_USECASES 9 + +enum cam_soc_bus_path_data { + CAM_SOC_BUS_PATH_DATA_HLOS, + CAM_SOC_BUS_PATH_DATA_DRV_HIGH, + CAM_SOC_BUS_PATH_DATA_DRV_LOW, + CAM_SOC_BUS_PATH_DATA_MAX, +}; + +/** + * struct cam_soc_bus_client_ab_ib : Bandwidth values for selected usecase + * + * @ab: Arbitrated Bandwidth + * @ib: Instantaneous Bandwidth + */ +struct cam_soc_bus_client_ab_ib { + uint64_t ab; + uint64_t ib; +}; + +/** + * struct cam_soc_bus_client_common_data : Common data fields for bus client + * + * @name: Name of bus client + * @src_id: Bus master/src id + * @dst_id: Bus slave/dst id + * @is_drv_port: If DRV bus client + * @num_usecases: Number of use cases for this client + * @bw_pair: Bandwidth values for applicable usecases + */ +struct cam_soc_bus_client_common_data { + const char *name; + uint32_t src_id; + uint32_t dst_id; + bool is_drv_port; + int num_usecases; + struct cam_soc_bus_client_ab_ib bw_pair[CAM_SOC_BUS_MAX_NUM_USECASES]; +}; + +/** + * struct cam_soc_bus_client : Bus client information + * + * @client_data: Bus client data + * @common_data: Common data fields for bus client + */ +struct cam_soc_bus_client { + void *client_data; + struct cam_soc_bus_client_common_data *common_data; +}; + + +#if IS_REACHABLE(CONFIG_QCOM_BUS_SCALING) || \ + IS_REACHABLE(CONFIG_INTERCONNECT_QCOM) + +const char *cam_soc_bus_path_data_to_str(enum cam_soc_bus_path_data bus_path_data); + +int cam_soc_bus_client_update_request(void *client, unsigned int idx); + +int cam_soc_bus_client_update_bw(void *client, uint64_t ab, uint64_t ib, + enum cam_soc_bus_path_data bus_path_data); + +int cam_soc_bus_client_register(struct platform_device *pdev, + struct device_node *dev_node, void **client, + struct cam_soc_bus_client_common_data *common_data, bool use_path_name); + +void cam_soc_bus_client_unregister(void **client); + +#else + +static const char *cam_soc_bus_path_data_to_str(enum cam_soc_bus_path_data bus_path_data) +{ + return NULL; +} + +static inline int cam_soc_bus_client_update_request(void *client, + unsigned int idx) +{ + return 0; +} + +int cam_soc_bus_client_update_bw(void *client, uint64_t ab, uint64_t ib, + enum cam_soc_bus_path_data bus_path_data) +{ + return 0; +} + +static inline int cam_soc_bus_client_register( + struct platform_device *pdev, struct device_node *dev_node, + void **client, struct cam_soc_bus_client_common_data *common_data, bool use_path_name) +{ + return 0; +} + +static inline void cam_soc_bus_client_unregister(void **client) +{ +} + +#endif + +#endif /* _CAM_SOC_BUS_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_icc.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_icc.c new file mode 100644 index 0000000000..1a0595bc99 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_icc.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "cam_soc_bus.h" +#include "cam_compat.h" +#include "cam_soc_util.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +extern bool clk_rgltr_bus_ops_profiling; + +static inline struct icc_path *cam_wrapper_icc_get(struct device *dev, + const int src_id, const int dst_id, const char *name, bool use_path_name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + struct icc_path *temp; + + if (cam_vmrm_proxy_icc_voting_enable() || (debug_bypass_drivers & CAM_BYPASS_ICC)) { + CAM_WARN(CAM_UTIL, "Bypass icc get for %d %d", src_id, dst_id); + return (struct icc_path *)BYPASS_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = cam_icc_get_path(dev, src_id, dst_id, name, use_path_name); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile", "icc_get (time taken in usec)"); + + return temp; +} + +static inline void cam_wrapper_icc_put(struct icc_path *path) +{ + struct timespec64 ts1, ts2; + long usec = 0; + + if (cam_vmrm_proxy_icc_voting_enable() || (debug_bypass_drivers & CAM_BYPASS_ICC)) { + CAM_WARN(CAM_UTIL, "Bypass icc put"); + return; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + icc_put(path); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile", "icc_put (time taken in usec)"); +} + +static inline int cam_wrapper_icc_set_bw(struct icc_path *path, + u32 avg_bw, u32 peak_bw) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (cam_vmrm_proxy_icc_voting_enable() || (debug_bypass_drivers & CAM_BYPASS_ICC)) { + CAM_WARN(CAM_UTIL, "Bypass icc set bw"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = icc_set_bw(path, avg_bw, peak_bw); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile", "icc_set_bw (time taken in usec)"); + + return temp; +} + +static inline void cam_wrapper_icc_set_tag(struct icc_path *path, + u32 tag) +{ + struct timespec64 ts1, ts2; + long usec = 0; + + if (cam_vmrm_proxy_icc_voting_enable() || (debug_bypass_drivers & CAM_BYPASS_ICC)) { + CAM_WARN(CAM_UTIL, "Bypass icc set tag"); + return; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + icc_set_tag(path, tag); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile", "icc_set_tag (time taken in usec)"); +} + +/** + * struct cam_soc_bus_client_data : Bus client data + * + * @icc_data: Bus icc path information + */ +struct cam_soc_bus_client_data { + struct icc_path *icc_data[CAM_SOC_BUS_PATH_DATA_MAX]; +}; + +const char *cam_soc_bus_path_data_to_str(enum cam_soc_bus_path_data bus_path_data) +{ + switch (bus_path_data) { + case CAM_SOC_BUS_PATH_DATA_HLOS: + return "BUS_PATH_HLOS"; + case CAM_SOC_BUS_PATH_DATA_DRV_HIGH: + return "BUS_PATH_DRV_HIGH"; + case CAM_SOC_BUS_PATH_DATA_DRV_LOW: + return "BUS_PATH_DRV_LOW"; + default: + return "BUS_PATH_INVALID"; + } +} +int cam_soc_bus_client_update_request(void *client, unsigned int idx) +{ + int rc = 0; + uint64_t ab = 0, ib = 0; + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) client; + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + + if (debug_bypass_drivers & CAM_BYPASS_ICC) { + CAM_WARN(CAM_UTIL, "Bypass icc set bw"); + return rc; + } + + if (idx >= bus_client->common_data->num_usecases) { + CAM_ERR(CAM_UTIL, "Invalid vote level=%d, usecases=%d", idx, + bus_client->common_data->num_usecases); + rc = -EINVAL; + goto end; + } + + ab = bus_client->common_data->bw_pair[idx].ab; + ib = bus_client->common_data->bw_pair[idx].ib; + + CAM_DBG(CAM_PERF, "Bus client=[%s] index[%d] ab[%llu] ib[%llu]", + bus_client->common_data->name, idx, ab, ib); + + if (cam_vmrm_proxy_icc_voting_enable()) { + rc = cam_vmrm_icc_vote(bus_client->common_data->name, ab, ib); + if (rc) + CAM_ERR(CAM_PERF, + "Bus client=[%s] index[%d] ab[%llu] ib[%llu] vmrm icc vote failed", + bus_client->common_data->name, idx, ab, ib); + goto end; + } + + rc = cam_wrapper_icc_set_bw( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS], + Bps_to_icc(ab), + Bps_to_icc(ib)); + if (rc) { + CAM_ERR(CAM_UTIL, + "Update request failed, client[%s], idx: %d", + bus_client->common_data->name, idx); + goto end; + } + +end: + return rc; +} + +int cam_soc_bus_client_update_bw(void *client, uint64_t ab, uint64_t ib, + enum cam_soc_bus_path_data bus_path_data) +{ + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) client; + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + int rc = 0; + + CAM_DBG(CAM_PERF, "Bus client=[%s] [%s] :ab[%llu] ib[%llu]", + bus_client->common_data->name, cam_soc_bus_path_data_to_str(bus_path_data), + ab, ib); + + if (cam_vmrm_proxy_icc_voting_enable()) { + rc = cam_vmrm_icc_vote(bus_client->common_data->name, ab, ib); + if (rc) + CAM_ERR(CAM_PERF, + "Bus client=[%s] [%s] :ab[%llu] ib[%llu] vmrm icc vote failed", + bus_client->common_data->name, + cam_soc_bus_path_data_to_str(bus_path_data), ab, ib); + goto end; + } + + rc = cam_wrapper_icc_set_bw( + bus_client_data->icc_data[bus_path_data], Bps_to_icc(ab), + Bps_to_icc(ib)); + if (rc) { + CAM_ERR(CAM_UTIL, "Update request failed, client[%s]", + bus_client->common_data->name); + goto end; + } + +end: + return rc; + +} + +int cam_soc_bus_client_register(struct platform_device *pdev, + struct device_node *dev_node, void **client, + struct cam_soc_bus_client_common_data *common_data, bool use_path_name) +{ + struct cam_soc_bus_client *bus_client = NULL; + struct cam_soc_bus_client_data *bus_client_data = NULL; + int rc = 0; + + bus_client = CAM_MEM_ZALLOC(sizeof(struct cam_soc_bus_client), GFP_KERNEL); + if (!bus_client) { + CAM_ERR(CAM_UTIL, "soc bus client is NULL"); + rc = -ENOMEM; + goto end; + } + + *client = bus_client; + + bus_client_data = CAM_MEM_ZALLOC(sizeof(struct cam_soc_bus_client_data), GFP_KERNEL); + if (!bus_client_data) { + CAM_MEM_FREE(bus_client); + *client = NULL; + rc = -ENOMEM; + goto end; + } + + bus_client->client_data = bus_client_data; + bus_client->common_data = common_data; + if (bus_client->common_data->is_drv_port) { + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH] = + cam_wrapper_icc_get(&pdev->dev, + bus_client->common_data->src_id, bus_client->common_data->dst_id, + bus_client->common_data->name, use_path_name); + if (IS_ERR_OR_NULL(bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH])) { + CAM_ERR(CAM_UTIL, + "Failed to register DRV bus client Bus Client=[%s] : src=%d, dst=%d bus_path:%d", + bus_client->common_data->src_id, bus_client->common_data->dst_id, + CAM_SOC_BUS_PATH_DATA_DRV_HIGH); + rc = -EINVAL; + goto error; + } + + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW] = + cam_wrapper_icc_get(&pdev->dev, + bus_client->common_data->src_id, bus_client->common_data->dst_id, + bus_client->common_data->name, use_path_name); + if (IS_ERR_OR_NULL(bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW])) { + CAM_ERR(CAM_UTIL, + "Failed to register DRV bus client Bus Client=[%s] : src=%d, dst=%d bus_path:%d", + bus_client->common_data->src_id, bus_client->common_data->dst_id, + CAM_SOC_BUS_PATH_DATA_DRV_LOW); + rc = -EINVAL; + goto error; + } + + /* Set appropriate tags for HIGH and LOW vote paths */ + cam_wrapper_icc_set_tag( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH], + QCOM_ICC_TAG_ACTIVE_ONLY); + cam_wrapper_icc_set_tag( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW], + QCOM_ICC_TAG_SLEEP); + + rc = cam_wrapper_icc_set_bw( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH], 0, 0); + if (rc) { + CAM_ERR(CAM_UTIL, "Bus client[%s] update request failed, rc = %d", + bus_client->common_data->name, rc); + goto fail_unregister_client; + } + + rc = cam_wrapper_icc_set_bw( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW], 0, 0); + if (rc) { + CAM_ERR(CAM_UTIL, "Bus client[%s] update request failed, rc = %d", + bus_client->common_data->name, rc); + goto fail_unregister_client; + } + } else { + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS] = + cam_wrapper_icc_get(&pdev->dev, + bus_client->common_data->src_id, bus_client->common_data->dst_id, + bus_client->common_data->name, use_path_name); + if (IS_ERR_OR_NULL(bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS])) { + CAM_ERR(CAM_UTIL, "failed to register HLOS bus client"); + rc = -EINVAL; + goto error; + } + + rc = cam_wrapper_icc_set_bw( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS], 0, 0); + if (rc) { + CAM_ERR(CAM_UTIL, "Bus client[%s] update request failed, rc = %d", + bus_client->common_data->name, rc); + goto fail_unregister_client; + } + } + + CAM_DBG(CAM_PERF, "Register Bus Client=[%s] : src=%d, dst=%d is_drv_port:%s", + bus_client->common_data->name, bus_client->common_data->src_id, + bus_client->common_data->dst_id, + CAM_BOOL_TO_YESNO(bus_client->common_data->is_drv_port)); + + return 0; + +fail_unregister_client: + if (bus_client->common_data->is_drv_port) { + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH]); + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW]); + } else { + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS]); + } + +error: + CAM_MEM_FREE(bus_client_data); + bus_client->client_data = NULL; + CAM_MEM_FREE(bus_client); + *client = NULL; +end: + return rc; + +} + +void cam_soc_bus_client_unregister(void **client) +{ + struct cam_soc_bus_client *bus_client = + (struct cam_soc_bus_client *) (*client); + struct cam_soc_bus_client_data *bus_client_data = + (struct cam_soc_bus_client_data *) bus_client->client_data; + + if (bus_client->common_data->is_drv_port) { + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_HIGH]); + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_DRV_LOW]); + } else { + cam_wrapper_icc_put( + bus_client_data->icc_data[CAM_SOC_BUS_PATH_DATA_HLOS]); + } + + CAM_MEM_FREE(bus_client_data); + bus_client->client_data = NULL; + CAM_MEM_FREE(bus_client); + *client = NULL; + +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.c new file mode 100644 index 0000000000..19b2512870 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.c @@ -0,0 +1,5138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "cam_soc_util.h" +#include "cam_debug_util.h" +#include "cam_cx_ipeak.h" +#include "cam_mem_mgr.h" +#include "cam_presil_hw_access.h" +#include "cam_compat.h" +#include "cam_vmrm_interface.h" +#include "cam_mem_mgr_api.h" + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) +#include +#include +#endif + +#define CAM_TO_MASK(bitn) (1 << (int)(bitn)) +#define CAM_IS_BIT_SET(mask, bit) ((mask) & CAM_TO_MASK(bit)) +#define CAM_SET_BIT(mask, bit) ((mask) |= CAM_TO_MASK(bit)) +#define CAM_CLEAR_BIT(mask, bit) ((mask) &= ~CAM_TO_MASK(bit)) + +#define CAM_CLK_DIRNAME "clk" + +#define CAM_MAX_CLK_NAME_LEN 128 +bool clk_rgltr_bus_ops_profiling; +bool clk_ops_profiling_hw_and_sw_voting; + +static uint skip_mmrm_set_rate; +module_param(skip_mmrm_set_rate, uint, 0644); + +/** + * struct cam_clk_wrapper_clk: This represents an entry corresponding to a + * shared clock in Clk wrapper. Clients that share + * the same clock are registered to this clk entry + * and set rate from them is consolidated before + * setting it to clk driver. + * + * @list: List pointer to point to next shared clk entry + * @clk_id: Clk Id of this clock + * @curr_clk_rate: Current clock rate set for this clock + * @client_list: List of clients registered to this shared clock entry + * @num_clients: Number of registered clients + * @active_clients: Number of active clients + * @mmrm_client: MMRM Client handle for src clock + * @soc_info: soc_info of client with which mmrm handle is created. + * This is used as unique identifier for a client and mmrm + * callback data. When client corresponds to this soc_info is + * unregistered, need to unregister mmrm handle as well. + * @is_nrt_dev: Whether this clock corresponds to NRT device + * @min_clk_rate: Minimum clk rate that this clock supports + * @name: Name of the clock + **/ +struct cam_clk_wrapper_clk { + struct list_head list; + uint32_t clk_id; + int64_t curr_clk_rate; + struct list_head client_list; + uint32_t num_clients; + uint32_t active_clients; + void *mmrm_handle; + struct cam_hw_soc_info *soc_info; + bool is_nrt_dev; + int64_t min_clk_rate; + char name[CAM_MAX_CLK_NAME_LEN]; +}; + +/** + * struct cam_clk_wrapper_client: This represents a client (device) that wants + * to share the clock with some other client. + * + * @list: List pointer to point to next client that share the + * same clock + * @soc_info: soc_info of client. This is used as unique identifier + * for a client + * @clk: Clk handle + * @curr_clk_rate: Current clock rate set for this client + **/ +struct cam_clk_wrapper_client { + struct list_head list; + struct cam_hw_soc_info *soc_info; + struct clk *clk; + int64_t curr_clk_rate; +}; + +static char supported_clk_info[256]; + +static DEFINE_MUTEX(wrapper_lock); +static LIST_HEAD(wrapper_clk_list); + +#define CAM_IS_VALID_CESTA_IDX(idx) ((idx >= 0) && (idx < CAM_CESTA_MAX_CLIENTS)) + +#define CAM_CRM_DEV_IDENTIFIER "cam_crm" + +const struct device *cam_cesta_crm_dev; + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API) +static int cam_soc_util_set_hw_client_rate_through_mmrm( + void *mmrm_handle, long low_val, long high_val, + uint32_t num_hw_blocks, int cesta_client_idx); +#endif + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) +static inline const struct device *cam_wrapper_crm_get_device( + const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + const struct device *temp; + + if (debug_bypass_drivers & CAM_BYPASS_CESTA) { + CAM_WARN(CAM_UTIL, "Bypass crm get device"); + return (const struct device *)BYPASS_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = crm_get_device(name); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: crm_get_device (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_crm_write_pwr_states(const struct device *dev, + u32 drv_id, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_CESTA) { + CAM_WARN(CAM_UTIL, "Bypass crm write pwr states"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = crm_write_pwr_states(cam_cesta_crm_dev, drv_id); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: crm_write_pwr_states (name, time taken in usec)", name); + + return temp; +} + +#endif + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API) +static inline int cam_wrapper_qcom_clk_crm_set_rate(struct clk *clk, + enum crm_drv_type client_type, u32 client_idx, + u32 pwr_st, unsigned long rate, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_CESTA) { + CAM_WARN(CAM_UTIL, "Bypass qcom clk crm set rate"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = qcom_clk_crm_set_rate(clk, client_type, client_idx, pwr_st, rate); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: qcom_clk_crm_set_rate (name, time taken in usec)", name); + + return temp; +} +#endif + +#if IS_ENABLED(CONFIG_QCOM_CRM_V2) +static inline int cam_wrapper_qcom_clk_crmb_set_rate(struct clk *clk, + enum crm_drv_type client_type, u32 client_idx, + u32 nd_idx, u32 pwr_st, unsigned long ab_rate, + unsigned long ib_rate, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_CESTA) { + CAM_WARN(CAM_UTIL, "Bypass qcom clk crmb set rate"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = qcom_clk_crmb_set_rate(clk, client_type, client_idx, nd_idx, pwr_st, + ab_rate, ib_rate); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: qcom_clk_crmb_set_rate (name, time taken in usec)", name); + + return temp; +} + +bool cam_is_crmb_supported(struct cam_hw_soc_info *soc_info) +{ + const char *src_clk_name; + int32_t src_clk_idx = soc_info->src_clk_idx; + + src_clk_name = soc_info->clk_name[src_clk_idx]; + + if (soc_info->is_crmb_clk) { + CAM_DBG(CAM_UTIL, "CRMB is supported for clk %s", src_clk_name); + return true; + } + CAM_DBG(CAM_UTIL, "CRMB is NOT supported for clk %s", src_clk_name); + return false; +} + +#elif IS_ENABLED(CONFIG_QCOM_CRM) +static inline int cam_wrapper_qcom_clk_crmb_set_rate(struct clk *clk, + enum crm_drv_type client_type, u32 client_idx, + u32 nd_idx, u32 pwr_st, unsigned long ab_rate, + unsigned long ib_rate, const char *name) +{ + CAM_ERR(CAM_UTIL, "CRMB API is NOT Supported"); + return -EOPNOTSUPP; +} + +bool cam_is_crmb_supported(struct cam_hw_soc_info *soc_info) +{ + return false; +} +#endif + +static inline int cam_wrapper_clk_set_rate(struct clk *clk, unsigned long rate, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk set rate"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = clk_set_rate(clk, rate); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_set_rate (name, time taken in usec)", name); + + return temp; +} + +static inline long cam_wrapper_clk_round_rate(struct clk *clk, unsigned long rate, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + long temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk round rate"); + return rate; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = clk_round_rate(clk, rate); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_round_rate (name, time taken in usec)", name); + + return temp; +} + +inline unsigned long cam_wrapper_clk_get_rate(struct clk *clk, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + unsigned long temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk get rate"); + return DEFAULT_CLK_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = clk_get_rate(clk); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_get_rate (name, time taken in usec)", name); + + return temp; +} + +static inline struct clk *cam_wrapper_clk_get(struct device *dev, const char *id) +{ + struct timespec64 ts1, ts2; + long usec = 0; + struct clk *temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk get"); + return (struct clk *)BYPASS_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = clk_get(dev, id); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_get (name, time taken in usec)", id); + + return temp; +} + +static inline void cam_wrapper_clk_put(struct clk *clk, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk put"); + return; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + clk_put(clk); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_put (name, time taken in usec)", name); +} + +static inline struct clk *cam_wrapper_of_clk_get_from_provider( + struct of_phandle_args *clkspec, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + struct clk *temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass of clk get from provider"); + return (struct clk *)BYPASS_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = of_clk_get_from_provider(clkspec); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: of_clk_get_from_provider (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_clk_prepare_enable(struct clk *clk, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk prepare enable"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = clk_prepare_enable(clk); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_prepare_enable (name, time taken in usec)", name); + + return temp; +} + +static inline void cam_wrapper_clk_disable_unprepare(struct clk *clk, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk disable unprepare"); + return; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + clk_disable_unprepare(clk); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: clk_disable_unprepare (name, time taken in usec)", name); +} + +static inline struct regulator *cam_wrapper_regulator_get(struct device *dev, + const char *id) +{ + struct timespec64 ts1, ts2; + long usec = 0; + struct regulator *temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator get"); + return (struct regulator *)BYPASS_VALUE; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_get(dev, id); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_get (name, time taken in usec)", id); + + return temp; +} + +static inline void cam_wrapper_regulator_put(struct regulator *regulator, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator put"); + return; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + regulator_put(regulator); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_put (name, time taken in usec)", name); +} + +static inline int cam_wrapper_regulator_disable(struct regulator *regulator, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator disable"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_disable(regulator); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_disable (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_regulator_enable(struct regulator *regulator, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator enable"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_enable(regulator); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_enable (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_regulator_set_voltage( + struct regulator *regulator, int min_uV, int max_uV, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator set voltage"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_set_voltage(regulator, min_uV, max_uV); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_set_voltage (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_regulator_count_voltages( + struct regulator *regulator, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator count voltages"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_count_voltages(regulator); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_count_voltages (name, time taken in usec)", name); + + return temp; +} + +inline int cam_wrapper_regulator_set_load( + struct regulator *regulator, int uA_load, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator set load"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_set_load(regulator, uA_load); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_set_load (name, time taken in usec)", name); + + return temp; +} + +inline int cam_wrapper_regulator_set_mode( + struct regulator *regulator, unsigned int mode, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR_MODE) { + CAM_WARN(CAM_UTIL, "Bypass regulator set mode"); + return 0; +} + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_set_mode(regulator, mode); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_set_mode (name, time taken in usec)", name); + + return temp; +} + +static inline int cam_wrapper_regulator_is_enabled( + struct regulator *regulator, const char *name) +{ + struct timespec64 ts1, ts2; + long usec = 0; + int temp; + + if (debug_bypass_drivers & CAM_BYPASS_RGLTR) { + CAM_WARN(CAM_UTIL, "Bypass regulator is enabled"); + return 0; + } + + CAM_SAVE_START_TIMESTAMP_IF(ts1); + + temp = regulator_is_enabled(regulator); + + CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, + "ClkRegBusOpsProfile: regulator_is_enabled (name, time taken in usec)", name); + + return temp; +} + +inline void cam_soc_util_set_bypass_drivers( + uint32_t bypass_drivers) +{ + debug_bypass_drivers = bypass_drivers; + CAM_INFO(CAM_UTIL, "bypass drivers %d", debug_bypass_drivers); +} + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) +inline int cam_soc_util_cesta_populate_crm_device(void) +{ + cam_cesta_crm_dev = cam_wrapper_crm_get_device(CAM_CRM_DEV_IDENTIFIER); + if (!cam_cesta_crm_dev) { + CAM_ERR(CAM_UTIL, "Failed to get cesta crm dev for %s", CAM_CRM_DEV_IDENTIFIER); + return -ENODEV; + } + + return 0; +} + +int cam_soc_util_cesta_channel_switch(uint32_t cesta_client_idx, const char *identifier) +{ + int rc = 0; + + if (!cam_cesta_crm_dev) { + CAM_ERR(CAM_UTIL, "camera cesta crm device is null"); + return -EINVAL; + } + + if (!CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + CAM_ERR(CAM_UTIL, "Invalid client index for camera cesta idx: %d max: %d", + cesta_client_idx, CAM_CESTA_MAX_CLIENTS); + return -EINVAL; + } + + CAM_DBG(CAM_PERF, "CESTA Channel switch : hw client idx %d identifier=%s", + cesta_client_idx, identifier); + + rc = cam_wrapper_crm_write_pwr_states(cam_cesta_crm_dev, cesta_client_idx, + identifier); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed to trigger cesta channel switch cesta_client_idx: %u rc: %d", + cesta_client_idx, rc); + return rc; + } + + return rc; +} +#else +inline int cam_soc_util_cesta_populate_crm_device(void) +{ + CAM_ERR(CAM_UTIL, "Not supported"); + + return -EOPNOTSUPP; +} + +inline int cam_soc_util_cesta_channel_switch(uint32_t cesta_client_idx, const char *identifier) +{ + CAM_ERR(CAM_UTIL, "Not supported, cesta_client_idx=%d, identifier=%s", + cesta_client_idx, identifier); + + return -EOPNOTSUPP; +} +#endif + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API) +static int cam_soc_util_set_cesta_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t cesta_client_idx, unsigned long high_val, unsigned long low_val, + unsigned long *applied_high_val, unsigned long *applied_low_val) +{ + int32_t src_clk_idx; + struct clk *clk = NULL; + int rc = 0; + bool is_crmb_sup = false; + + if (!soc_info || (soc_info->src_clk_idx < 0) || + (soc_info->src_clk_idx >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d", + soc_info ? soc_info->src_clk_idx : -1); + return -EINVAL; + } + + if (!CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + CAM_ERR(CAM_UTIL, "Invalid client index for camera cesta idx: %d max: %d", + cesta_client_idx, CAM_CESTA_MAX_CLIENTS); + return -EINVAL; + } + + /* Only source clocks are supported by this API to set HW client clock votes */ + src_clk_idx = soc_info->src_clk_idx; + clk = soc_info->clk[src_clk_idx]; + is_crmb_sup = cam_is_crmb_supported(soc_info); + + CAM_DBG(CAM_UTIL, "%s Requested clk [hi lo]:[%llu %llu] cesta_client_idx:%d is_crmb_sup:%d", + soc_info->clk_name[src_clk_idx], high_val, low_val, cesta_client_idx, is_crmb_sup); + + if (!skip_mmrm_set_rate && soc_info->mmrm_handle) { + if (!is_crmb_sup) { + CAM_DBG(CAM_UTIL, + "cesta mmrm hw client: set %s, high-rate %lld low-rate %lld", + soc_info->clk_name[src_clk_idx], high_val, low_val); + + rc = cam_soc_util_set_hw_client_rate_through_mmrm( + soc_info->mmrm_handle, + low_val, high_val, 1, + cesta_client_idx); + + if (rc) { + CAM_ERR(CAM_UTIL, + "set_hw_client_rate through mmrm failed on %s clk_id %d low_val %llu high_val %llu client idx=%d", + soc_info->clk_name[src_clk_idx], + soc_info->clk_id[src_clk_idx], + low_val, high_val, cesta_client_idx); + return rc; + } + goto end; + } + } + + if (is_crmb_sup) { + + /* For CRMB, we pass 0 for nd_idx as we only need a single node for CESTA */ + + rc = cam_wrapper_qcom_clk_crmb_set_rate( + clk, CRM_HW_DRV, cesta_client_idx, 0, CRM_PWR_STATE1, + high_val, 0, soc_info->clk_name[src_clk_idx]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta crm_B high clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d", + cesta_client_idx, CRM_PWR_STATE1, high_val, rc); + return rc; + } + + rc = cam_wrapper_qcom_clk_crmb_set_rate( + clk, CRM_HW_DRV, cesta_client_idx, 0, CRM_PWR_STATE0, + low_val, 0, soc_info->clk_name[src_clk_idx]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta crm_B low clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d", + cesta_client_idx, CRM_PWR_STATE0, low_val, rc); + return rc; + } + + } else { + + rc = cam_wrapper_qcom_clk_crm_set_rate( + clk, CRM_HW_DRV, cesta_client_idx, CRM_PWR_STATE1, + high_val, soc_info->clk_name[src_clk_idx]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta high clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d", + cesta_client_idx, CRM_PWR_STATE1, high_val, rc); + return rc; + } + + rc = cam_wrapper_qcom_clk_crm_set_rate( + clk, CRM_HW_DRV, cesta_client_idx, CRM_PWR_STATE0, + low_val, soc_info->clk_name[src_clk_idx]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta low clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d", + cesta_client_idx, CRM_PWR_STATE0, low_val, rc); + return rc; + } + } + +end: + if (applied_high_val) + *applied_high_val = high_val; + + if (applied_low_val) + *applied_low_val = low_val; + + return rc; +} + +#if IS_REACHABLE(CONFIG_MSM_MMRM) +int cam_soc_util_set_hw_client_rate_through_mmrm( + void *mmrm_handle, long low_val, long high_val, + uint32_t num_hw_blocks, int cesta_client_idx) +{ + int rc = 0; + struct mmrm_client_data client_data; + + client_data.num_hw_blocks = num_hw_blocks; + client_data.crm_drv_idx = cesta_client_idx; + client_data.drv_type = MMRM_CRM_HW_DRV; + client_data.pwr_st = CRM_PWR_STATE1; + client_data.flags = 0; + + CAM_DBG(CAM_UTIL, + "hw client mmrm=%pK, high_val %ld, low_val %ld, num_blocks=%d, pwr_state: %u, client_idx: %d", + mmrm_handle, high_val, low_val, num_hw_blocks, CRM_PWR_STATE1, cesta_client_idx); + + rc = mmrm_client_set_value((struct mmrm_client *)mmrm_handle, + &client_data, high_val); + if (rc) { + CAM_ERR(CAM_UTIL, "Set high rate failed rate %ld rc %d", + high_val, rc); + return rc; + } + + /* We vote a second time for pwr_st = low */ + client_data.pwr_st = CRM_PWR_STATE0; + + rc = mmrm_client_set_value((struct mmrm_client *)mmrm_handle, + &client_data, low_val); + if (rc) + CAM_ERR(CAM_UTIL, "Set low rate failed rate %ld rc %d", low_val, rc); + + return rc; +} + +#else +int cam_soc_util_set_hw_client_rate_through_mmrm( + void *mmrm_handle, long low_val, long high_val, + uint32_t num_hw_blocks, int cesta_client_idx) +{ + return 0; +} +#endif + +#else +static inline int cam_soc_util_set_cesta_clk_rate(struct cam_hw_soc_info *soc_info, + uint32_t cesta_client_idx, unsigned long high_val, unsigned long low_val, + unsigned long *applied_high_val, unsigned long *applied_low_val) +{ + CAM_ERR(CAM_UTIL, "Not supported, dev=%s, cesta_client_idx=%d, high_val=%ld, low_val=%ld", + soc_info->dev_name, cesta_client_idx, high_val, low_val); + + return -EOPNOTSUPP; +} +#endif + +#if IS_REACHABLE(CONFIG_MSM_MMRM) +bool cam_is_mmrm_supported_on_current_chip(void) +{ + bool is_supported; + + is_supported = mmrm_client_check_scaling_supported(MMRM_CLIENT_CLOCK, + MMRM_CLIENT_DOMAIN_CAMERA); + CAM_DBG(CAM_UTIL, "is mmrm supported: %s", + CAM_BOOL_TO_YESNO(is_supported));; + + return is_supported; +} + +int cam_mmrm_notifier_callback( + struct mmrm_client_notifier_data *notifier_data) +{ + if (!notifier_data) { + CAM_ERR(CAM_UTIL, "Invalid notifier data"); + return -EBADR; + } + + if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE) { + struct cam_hw_soc_info *soc_info = notifier_data->pvt_data; + + CAM_WARN(CAM_UTIL, "Dev %s Clk %s value change from %ld to %ld", + soc_info->dev_name, + (soc_info->src_clk_idx == -1) ? "No src clk" : + soc_info->clk_name[soc_info->src_clk_idx], + notifier_data->cb_data.val_chng.old_val, + notifier_data->cb_data.val_chng.new_val); + } + + return 0; +} + +int cam_soc_util_register_mmrm_client( + uint32_t clk_id, struct clk *clk, bool is_nrt_dev, + struct cam_hw_soc_info *soc_info, const char *clk_name, + void **mmrm_handle) +{ + struct mmrm_client *mmrm_client; + struct mmrm_client_desc desc = { }; + + if (!mmrm_handle) { + CAM_ERR(CAM_UTIL, "Invalid mmrm input"); + return -EINVAL; + } + + *mmrm_handle = (void *)NULL; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass register mmrm client"); + return 0; + } + + if (!cam_is_mmrm_supported_on_current_chip()) + return 0; + + desc.client_type = MMRM_CLIENT_CLOCK; + desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_CAMERA; + desc.client_info.desc.client_id = clk_id; + desc.client_info.desc.clk = clk; + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API) + if (soc_info->is_clk_drv_en) { + desc.client_info.desc.hw_drv_instances = CAM_CESTA_MAX_CLIENTS; + desc.client_info.desc.num_pwr_states = CAM_NUM_PWR_STATES; + } else { + desc.client_info.desc.hw_drv_instances = 0; + desc.client_info.desc.num_pwr_states = 0; + } +#endif + + snprintf((char *)desc.client_info.desc.name, + sizeof(desc.client_info.desc.name), "%s_%s", + soc_info->dev_name, clk_name); + + desc.priority = is_nrt_dev ? + MMRM_CLIENT_PRIOR_LOW : MMRM_CLIENT_PRIOR_HIGH; + desc.pvt_data = soc_info; + desc.notifier_callback_fn = cam_mmrm_notifier_callback; + + mmrm_client = mmrm_client_register(&desc); + if (!mmrm_client) { + CAM_ERR(CAM_UTIL, "MMRM Register failed Dev %s clk %s id %d", + soc_info->dev_name, clk_name, clk_id); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, + "MMRM Register success Dev %s is_nrt_dev %d clk %s id %d handle=%pK", + soc_info->dev_name, is_nrt_dev, clk_name, clk_id, mmrm_client); + + *mmrm_handle = (void *)mmrm_client; + + return 0; +} + +int cam_soc_util_unregister_mmrm_client( + void *mmrm_handle) +{ + int rc = 0; + + CAM_DBG(CAM_UTIL, "MMRM UnRegister handle=%pK", mmrm_handle); + + if (mmrm_handle) { + rc = mmrm_client_deregister((struct mmrm_client *)mmrm_handle); + if (rc) + CAM_ERR(CAM_UTIL, + "Failed in deregister handle=%pK, rc %d", + mmrm_handle, rc); + } + + return rc; +} + +static int cam_soc_util_set_sw_client_rate_through_mmrm( + void *mmrm_handle, bool is_nrt_dev, long min_rate, + long req_rate, uint32_t num_hw_blocks) +{ + int rc = 0; + struct mmrm_client_data client_data; + struct mmrm_client_res_value val; + + client_data.num_hw_blocks = num_hw_blocks; + client_data.flags = 0; + +#if (IS_ENABLED(CONFIG_QCOM_CRM) || IS_ENABLED(CONFIG_QCOM_CRM_V2)) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API) + client_data.drv_type = MMRM_CRM_SW_DRV; +#endif + + CAM_DBG(CAM_UTIL, + "sw client mmrm=%pK, nrt=%d, min_rate=%ld req_rate %ld, num_blocks=%d", + mmrm_handle, is_nrt_dev, min_rate, req_rate, num_hw_blocks); + + if (is_nrt_dev) { + val.min = min_rate; + val.cur = req_rate; + + rc = mmrm_client_set_value_in_range( + (struct mmrm_client *)mmrm_handle, &client_data, &val); + } else { + rc = mmrm_client_set_value( + (struct mmrm_client *)mmrm_handle, + &client_data, req_rate); + } + + if (rc) + CAM_ERR(CAM_UTIL, "Set rate failed rate %ld rc %d", + req_rate, rc); + + return rc; +} +#else +int cam_soc_util_register_mmrm_client( + uint32_t clk_id, struct clk *clk, bool is_nrt_dev, + struct cam_hw_soc_info *soc_info, const char *clk_name, + void **mmrm_handle) +{ + if (!mmrm_handle) { + CAM_ERR(CAM_UTIL, "Invalid mmrm input"); + return -EINVAL; + } + + *mmrm_handle = NULL; + + return 0; +} + +int cam_soc_util_unregister_mmrm_client( + void *mmrm_handle) +{ + return 0; +} + +static int cam_soc_util_set_sw_client_rate_through_mmrm( + void *mmrm_handle, bool is_nrt_dev, long min_rate, + long req_rate, uint32_t num_hw_blocks) +{ + return 0; +} +#endif + +static int cam_soc_util_clk_wrapper_register_entry( + uint32_t clk_id, struct clk *clk, bool is_src_clk, + struct cam_hw_soc_info *soc_info, int64_t min_clk_rate, + const char *clk_name) +{ + struct cam_clk_wrapper_clk *wrapper_clk; + struct cam_clk_wrapper_client *wrapper_client; + bool clock_found = false; + int rc = 0; + + mutex_lock(&wrapper_lock); + + list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) { + CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d", + wrapper_clk->clk_id, wrapper_clk->num_clients); + + if (wrapper_clk->clk_id == clk_id) { + clock_found = true; + list_for_each_entry(wrapper_client, + &wrapper_clk->client_list, list) { + CAM_DBG(CAM_UTIL, + "Clk id %d entry client %s", + wrapper_clk->clk_id, + wrapper_client->soc_info->dev_name); + if (wrapper_client->soc_info == soc_info) { + CAM_ERR(CAM_UTIL, + "Register with same soc info, clk id %d, client %s", + clk_id, soc_info->dev_name); + rc = -EINVAL; + goto end; + } + } + break; + } + } + + if (!clock_found) { + CAM_DBG(CAM_UTIL, "Adding new entry for clk id %d", clk_id); + wrapper_clk = CAM_MEM_ZALLOC(sizeof(struct cam_clk_wrapper_clk), + GFP_KERNEL); + if (!wrapper_clk) { + CAM_ERR(CAM_UTIL, + "Failed in allocating new clk entry %d", + clk_id); + rc = -ENOMEM; + goto end; + } + + strscpy(wrapper_clk->name, clk_name, CAM_MAX_CLK_NAME_LEN); + wrapper_clk->clk_id = clk_id; + INIT_LIST_HEAD(&wrapper_clk->list); + INIT_LIST_HEAD(&wrapper_clk->client_list); + list_add_tail(&wrapper_clk->list, &wrapper_clk_list); + } + wrapper_client = CAM_MEM_ZALLOC(sizeof(struct cam_clk_wrapper_client), + GFP_KERNEL); + if (!wrapper_client) { + CAM_ERR(CAM_UTIL, "Failed in allocating new client entry %d", + clk_id); + rc = -ENOMEM; + goto end; + } + + wrapper_client->soc_info = soc_info; + wrapper_client->clk = clk; + + if (is_src_clk && !wrapper_clk->mmrm_handle) { + wrapper_clk->is_nrt_dev = soc_info->is_nrt_dev; + wrapper_clk->min_clk_rate = min_clk_rate; + wrapper_clk->soc_info = soc_info; + + rc = cam_soc_util_register_mmrm_client(clk_id, clk, + wrapper_clk->is_nrt_dev, soc_info, clk_name, + &wrapper_clk->mmrm_handle); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in register mmrm client Dev %s clk id %d", + soc_info->dev_name, clk_id); + CAM_MEM_FREE(wrapper_client); + goto end; + } + } + + INIT_LIST_HEAD(&wrapper_client->list); + list_add_tail(&wrapper_client->list, &wrapper_clk->client_list); + wrapper_clk->num_clients++; + + CAM_DBG(CAM_UTIL, + "Adding new client %s for clk[%s] id %d, num clients %d", + soc_info->dev_name, clk_name, clk_id, wrapper_clk->num_clients); + +end: + mutex_unlock(&wrapper_lock); + return rc; +} + +static int cam_soc_util_clk_wrapper_unregister_entry( + uint32_t clk_id, struct cam_hw_soc_info *soc_info) +{ + struct cam_clk_wrapper_clk *wrapper_clk; + struct cam_clk_wrapper_client *wrapper_client; + bool clock_found = false; + bool client_found = false; + int rc = 0; + + mutex_lock(&wrapper_lock); + + list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) { + CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d", + wrapper_clk->clk_id, wrapper_clk->num_clients); + + if (wrapper_clk->clk_id == clk_id) { + clock_found = true; + list_for_each_entry(wrapper_client, + &wrapper_clk->client_list, list) { + CAM_DBG(CAM_UTIL, "Clk id %d entry client %s", + wrapper_clk->clk_id, + wrapper_client->soc_info->dev_name); + if (wrapper_client->soc_info == soc_info) { + client_found = true; + break; + } + } + break; + } + } + + if (!clock_found) { + CAM_ERR(CAM_UTIL, "Shared clk id %d entry not found", clk_id); + rc = -EINVAL; + goto end; + } + + if (!client_found) { + CAM_ERR(CAM_UTIL, + "Client %pK for Shared clk id %d entry not found", + soc_info, clk_id); + rc = -EINVAL; + goto end; + } + + wrapper_clk->num_clients--; + if (wrapper_clk->mmrm_handle && (wrapper_clk->soc_info == soc_info)) { + cam_soc_util_unregister_mmrm_client(wrapper_clk->mmrm_handle); + wrapper_clk->mmrm_handle = NULL; + wrapper_clk->soc_info = NULL; + } + + list_del_init(&wrapper_client->list); + CAM_MEM_FREE(wrapper_client); + + CAM_DBG(CAM_UTIL, "Unregister client %s for clk id %d, num clients %d", + soc_info->dev_name, clk_id, wrapper_clk->num_clients); + + if (!wrapper_clk->num_clients) { + list_del_init(&wrapper_clk->list); + CAM_MEM_FREE(wrapper_clk); + } +end: + mutex_unlock(&wrapper_lock); + return rc; +} + +static int cam_soc_util_clk_wrapper_set_clk_rate( + uint32_t clk_id, struct cam_hw_soc_info *soc_info, + struct clk *clk, int64_t clk_rate) +{ + struct cam_clk_wrapper_clk *wrapper_clk; + struct cam_clk_wrapper_client *wrapper_client; + bool clk_found = false; + bool client_found = false; + int rc = 0; + int64_t final_clk_rate = 0; + uint32_t active_clients = 0; + + if (!soc_info || !clk) { + CAM_ERR(CAM_UTIL, "Invalid param soc_info %pK clk %pK", + soc_info, clk); + return -EINVAL; + } + + mutex_lock(&wrapper_lock); + + list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) { + CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d", + wrapper_clk->clk_id, wrapper_clk->num_clients); + if (wrapper_clk->clk_id == clk_id) { + clk_found = true; + break; + } + } + + if (!clk_found) { + CAM_ERR(CAM_UTIL, "Clk entry not found id %d client %s", + clk_id, soc_info->dev_name); + rc = -EINVAL; + goto end; + } + + list_for_each_entry(wrapper_client, &wrapper_clk->client_list, list) { + CAM_DBG(CAM_UTIL, "Clk id %d client %s, clk rate %lld", + wrapper_clk->clk_id, wrapper_client->soc_info->dev_name, + wrapper_client->curr_clk_rate); + if (wrapper_client->soc_info == soc_info) { + client_found = true; + CAM_DBG(CAM_UTIL, + "Clk enable clk id %d, client %s curr %ld new %ld", + clk_id, wrapper_client->soc_info->dev_name, + wrapper_client->curr_clk_rate, clk_rate); + + wrapper_client->curr_clk_rate = clk_rate; + } + + if (wrapper_client->curr_clk_rate > 0) + active_clients++; + + if (final_clk_rate < wrapper_client->curr_clk_rate) + final_clk_rate = wrapper_client->curr_clk_rate; + } + + if (!client_found) { + CAM_ERR(CAM_UTIL, + "Wrapper clk enable without client entry clk id %d client %s", + clk_id, soc_info->dev_name); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_UTIL, + "Clk id %d, client %s, clients rate %ld, curr %ld final %ld", + wrapper_clk->clk_id, soc_info->dev_name, clk_rate, + wrapper_clk->curr_clk_rate, final_clk_rate); + + if ((final_clk_rate != wrapper_clk->curr_clk_rate) || + (active_clients != wrapper_clk->active_clients)) { + bool set_rate_finish = false; + + if (!skip_mmrm_set_rate && wrapper_clk->mmrm_handle) { + rc = cam_soc_util_set_sw_client_rate_through_mmrm( + wrapper_clk->mmrm_handle, + wrapper_clk->is_nrt_dev, + wrapper_clk->min_clk_rate, + final_clk_rate, active_clients); + if (rc) { + CAM_ERR(CAM_UTIL, + "set_sw_client_rate through mmrm failed clk_id %d, rate=%ld", + wrapper_clk->clk_id, final_clk_rate); + goto end; + } + + set_rate_finish = true; + } + + if (!set_rate_finish && final_clk_rate && + (final_clk_rate != wrapper_clk->curr_clk_rate)) { + rc = cam_wrapper_clk_set_rate(clk, final_clk_rate, wrapper_clk->name); + if (rc) { + CAM_ERR(CAM_UTIL, "set_rate failed on clk %d", + wrapper_clk->clk_id); + goto end; + } + } + + wrapper_clk->curr_clk_rate = final_clk_rate; + wrapper_clk->active_clients = active_clients; + } + +end: + mutex_unlock(&wrapper_lock); + return rc; +} + +int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info, + int64_t clk_rate, int clk_idx, int32_t *clk_lvl) +{ + int i; + long clk_rate_round; + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + *clk_lvl = CAM_MINSVS_VOTE; + return 0; + } + + if (!soc_info || (clk_idx < 0) || (clk_idx >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d", clk_idx); + *clk_lvl = -1; + return -EINVAL; + } + + clk_rate_round = cam_wrapper_clk_round_rate( + soc_info->clk[clk_idx], clk_rate, soc_info->clk_name[clk_idx]); + if (clk_rate_round < 0) { + CAM_ERR(CAM_UTIL, "round failed rc = %ld", + clk_rate_round); + *clk_lvl = -1; + return -EINVAL; + } + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass get clk level"); + *clk_lvl = CAM_NOMINAL_VOTE; + return 0; + } + + for (i = 0; i < CAM_MAX_VOTE; i++) { + if ((soc_info->clk_level_valid[i]) && + (soc_info->clk_rate[i][clk_idx] >= + clk_rate_round)) { + CAM_DBG(CAM_UTIL, + "soc = %d round rate = %ld actual = %lld", + soc_info->clk_rate[i][clk_idx], + clk_rate_round, clk_rate); + *clk_lvl = i; + return 0; + } + } + + CAM_WARN(CAM_UTIL, "Invalid clock rate %ld", clk_rate_round); + *clk_lvl = -1; + return -EINVAL; +} + +const char *cam_soc_util_get_string_from_level(enum cam_vote_level level) +{ + switch (level) { + case CAM_SUSPEND_VOTE: + return ""; + case CAM_MINSVS_VOTE: + return "MINSVS[1]"; + case CAM_LOWSVS_D1_VOTE: + return "LOWSVSD1[2]"; + case CAM_LOWSVS_VOTE: + return "LOWSVS[3]"; + case CAM_SVS_VOTE: + return "SVS[4]"; + case CAM_SVSL1_VOTE: + return "SVSL1[5]"; + case CAM_NOMINAL_VOTE: + return "NOM[6]"; + case CAM_NOMINALL1_VOTE: + return "NOML1[7]"; + case CAM_TURBO_VOTE: + return "TURBO[8]"; + default: + return ""; + } +} + +/** + * cam_soc_util_get_supported_clk_levels() + * + * @brief: Returns the string of all the supported clk levels for + * the given device + * + * @soc_info: Device soc information + * + * @return: String containing all supported clk levels + */ +static const char *cam_soc_util_get_supported_clk_levels( + struct cam_hw_soc_info *soc_info) +{ + int i = 0; + + scnprintf(supported_clk_info, sizeof(supported_clk_info), "Supported levels: "); + + for (i = 0; i < CAM_MAX_VOTE; i++) { + if (soc_info->clk_level_valid[i] == true) { + strlcat(supported_clk_info, + cam_soc_util_get_string_from_level(i), + sizeof(supported_clk_info)); + strlcat(supported_clk_info, " ", + sizeof(supported_clk_info)); + } + } + + strlcat(supported_clk_info, "\n", sizeof(supported_clk_info)); + return supported_clk_info; +} + +static int cam_soc_util_clk_lvl_options_open(struct inode *inode, + struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t cam_soc_util_clk_lvl_options_read(struct file *file, + char __user *clk_info, size_t size_t, loff_t *loff_t) +{ + struct cam_hw_soc_info *soc_info = + (struct cam_hw_soc_info *)file->private_data; + const char *display_string = + cam_soc_util_get_supported_clk_levels(soc_info); + + return simple_read_from_buffer(clk_info, size_t, loff_t, display_string, + strlen(display_string)); +} + +static const struct file_operations cam_soc_util_clk_lvl_options = { + .open = cam_soc_util_clk_lvl_options_open, + .read = cam_soc_util_clk_lvl_options_read, +}; + +static int cam_soc_util_set_clk_lvl_override(void *data, u64 val) +{ + struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data; + + if ((val <= CAM_SUSPEND_VOTE) || (val >= CAM_MAX_VOTE)) { + CAM_WARN(CAM_UTIL, "Invalid clk lvl override %d", val); + return 0; + } + + if (soc_info->clk_level_valid[val]) + soc_info->clk_level_override_high = val; + else + soc_info->clk_level_override_high = 0; + + return 0; +} + +static int cam_soc_util_get_clk_lvl_override(void *data, u64 *val) +{ + struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data; + + *val = soc_info->clk_level_override_high; + + return 0; +} + +static int cam_soc_util_set_clk_lvl_override_low(void *data, u64 val) +{ + struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data; + + if ((val <= CAM_SUSPEND_VOTE) || (val >= CAM_MAX_VOTE)) { + CAM_WARN(CAM_UTIL, "Invalid clk lvl override %d", val); + return 0; + } + + if (soc_info->clk_level_valid[val]) + soc_info->clk_level_override_low = val; + else + soc_info->clk_level_override_low = 0; + + return 0; +} + +static int cam_soc_util_get_clk_lvl_override_low(void *data, u64 *val) +{ + struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data; + + *val = soc_info->clk_level_override_low; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control, + cam_soc_util_get_clk_lvl_override, cam_soc_util_set_clk_lvl_override, "%08llu"); + +DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control_low, + cam_soc_util_get_clk_lvl_override_low, cam_soc_util_set_clk_lvl_override_low, "%08llu"); + + +/** + * cam_soc_util_create_clk_lvl_debugfs() + * + * @brief: Creates debugfs files to view/control device clk rates + * + * @soc_info: Device soc information + * + * @return: Success or failure + */ +static int cam_soc_util_create_clk_lvl_debugfs(struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + struct dentry *clkdirptr = NULL; + + if (!cam_debugfs_available()) + return 0; + + if (soc_info->dentry) { + CAM_DBG(CAM_UTIL, "Debugfs entry for %s already exists", + soc_info->dev_name); + goto end; + } + + rc = cam_debugfs_lookup_subdir(CAM_CLK_DIRNAME, &clkdirptr); + if (rc) { + rc = cam_debugfs_create_subdir(CAM_CLK_DIRNAME, &clkdirptr); + if (rc) { + CAM_ERR(CAM_UTIL, "DebugFS could not create clk directory!"); + rc = -ENOENT; + goto end; + } + } + + soc_info->dentry = debugfs_create_dir(soc_info->dev_name, clkdirptr); + if (IS_ERR_OR_NULL(soc_info->dentry)) { + CAM_ERR(CAM_UTIL, "DebugFS could not create directory for dev:%s!", + soc_info->dev_name); + rc = -ENOENT; + goto end; + } + /* Store parent inode for cleanup in caller */ + debugfs_create_file("clk_lvl_options", 0444, + soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_options); + debugfs_create_file("clk_lvl_control", 0644, + soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_control); + debugfs_create_file("clk_lvl_control_low", 0644, + soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_control_low); +end: + return rc; +} + +int cam_soc_util_get_level_from_string(const char *string, + enum cam_vote_level *level) +{ + if (!level) + return -EINVAL; + + if (!strcmp(string, "suspend")) { + *level = CAM_SUSPEND_VOTE; + } else if (!strcmp(string, "minsvs")) { + *level = CAM_MINSVS_VOTE; + } else if (!strcmp(string, "lowsvsd1")) { + *level = CAM_LOWSVS_D1_VOTE; + } else if (!strcmp(string, "lowsvs")) { + *level = CAM_LOWSVS_VOTE; + } else if (!strcmp(string, "svs")) { + *level = CAM_SVS_VOTE; + } else if (!strcmp(string, "svs_l1")) { + *level = CAM_SVSL1_VOTE; + } else if (!strcmp(string, "nominal")) { + *level = CAM_NOMINAL_VOTE; + } else if (!strcmp(string, "nominal_l1")) { + *level = CAM_NOMINALL1_VOTE; + } else if (!strcmp(string, "turbo")) { + *level = CAM_TURBO_VOTE; + } else { + CAM_ERR(CAM_UTIL, "Invalid string %s", string); + return -EINVAL; + } + + return 0; +} + +/** + * cam_soc_util_get_clk_level_to_apply() + * + * @brief: Get the clock level to apply. If the requested level + * is not valid, bump the level to next available valid + * level. If no higher level found, return failure. + * + * @soc_info: Device soc struct to be populated + * @req_level: Requested level + * @apply_level Level to apply + * + * @return: success or failure + */ +static int cam_soc_util_get_clk_level_to_apply( + struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level, + enum cam_vote_level *apply_level) +{ + if (req_level >= CAM_MAX_VOTE) { + CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d", + req_level); + return -EINVAL; + } + + if (soc_info->clk_level_valid[req_level] == true) { + *apply_level = req_level; + } else { + int i; + + for (i = (req_level + 1); i < CAM_MAX_VOTE; i++) + if (soc_info->clk_level_valid[i] == true) { + *apply_level = i; + break; + } + + if (i == CAM_MAX_VOTE) { + CAM_ERR(CAM_UTIL, + "No valid clock level found to apply, req=%d", + req_level); + return -EINVAL; + } + } + + CAM_DBG(CAM_UTIL, "Req level %s, Applying %s", + cam_soc_util_get_string_from_level(req_level), + cam_soc_util_get_string_from_level(*apply_level)); + + return 0; +} + +int cam_soc_util_get_valid_clk_rate( + struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level, + int64_t *apply_rate) +{ + int rc = 0; + enum cam_vote_level valid_clk_lvl; + + rc = cam_soc_util_get_clk_level_to_apply(soc_info, req_level, + &valid_clk_lvl); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed to get clk level to apply rc: %d", rc); + return rc; + } + + *apply_rate = soc_info->clk_rate[valid_clk_lvl][soc_info->src_clk_idx]; + + return rc; +} + +int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info) +{ + int i, rc = 0; + + if (!soc_info) { + CAM_ERR(CAM_UTIL, "Invalid arguments"); + return -EINVAL; + } + + for (i = 0; i < soc_info->irq_count; i++) { + if (soc_info->irq_num[i] < 0) { + CAM_ERR(CAM_UTIL, "No IRQ line available for irq: %s dev: %s", + soc_info->irq_name[i], soc_info->dev_name); + rc = -ENODEV; + goto disable_irq; + } + + enable_irq(soc_info->irq_num[i]); + } + + return rc; + +disable_irq: + for (i = i - 1; i >= 0; i--) + disable_irq(soc_info->irq_num[i]); + + return rc; +} + +int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info) +{ + int i, rc = 0; + + if (!soc_info) { + CAM_ERR(CAM_UTIL, "Invalid arguments"); + return -EINVAL; + } + + for (i = 0; i < soc_info->irq_count; i++) { + if (soc_info->irq_num[i] < 0) { + CAM_ERR(CAM_UTIL, "No IRQ line available irq: %s dev:", + soc_info->irq_name[i], soc_info->dev_name); + rc = -ENODEV; + continue; + } + + disable_irq(soc_info->irq_num[i]); + } + + return rc; +} + +long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_index, unsigned long clk_rate) +{ + if (!soc_info || (clk_index >= soc_info->num_clk) || (clk_rate == 0)) { + CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d %lu", + soc_info, clk_index, clk_rate); + return clk_rate; + } + + return cam_wrapper_clk_round_rate(soc_info->clk[clk_index], clk_rate, + soc_info->clk_name[clk_index]); +} + +/** + * cam_soc_util_set_clk_rate() + * + * @brief: Sets the given rate for the clk requested for + * + * @clk: Clock structure information for which rate is to be set + * @clk_name: Name of the clock for which rate is being set + * @clk_rate: Clock rate to be set + * @shared_clk: Whether this is a shared clk + * @is_src_clk: Whether this is source clk + * @clk_id: Clock ID + * @applied_clk_rate: Final clock rate set to the clk + * + * @return: Success or failure + */ +static int cam_soc_util_set_clk_rate(struct cam_hw_soc_info *soc_info, + struct clk *clk, const char *clk_name, + int64_t clk_rate, bool shared_clk, bool is_src_clk, uint32_t clk_id, + unsigned long *applied_clk_rate) +{ + int rc = 0; + long clk_rate_round = -1; + bool set_rate = false; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass set clk rate"); + return rc; + } + + if (!clk_name) { + CAM_ERR(CAM_UTIL, "Invalid input clk %pK clk_name %pK", + clk, clk_name); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, "set %s, rate %lld", clk_name, clk_rate); + if (!clk) + return 0; + if (clk_rate > 0) { + clk_rate_round = cam_wrapper_clk_round_rate(clk, clk_rate, clk_name); + CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round); + if (clk_rate_round < 0) { + CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld", + clk_name, clk_rate_round); + return clk_rate_round; + } + set_rate = true; + } else if (clk_rate == INIT_RATE) { + clk_rate_round = cam_wrapper_clk_get_rate(clk, clk_name); + CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round); + if (clk_rate_round == 0) { + clk_rate_round = cam_wrapper_clk_round_rate(clk, 0, clk_name); + if (clk_rate_round <= 0) { + CAM_ERR(CAM_UTIL, "round rate failed on %s", + clk_name); + return clk_rate_round; + } + } + set_rate = true; + } + + if (set_rate) { + if (shared_clk) { + CAM_DBG(CAM_UTIL, + "Dev %s clk %s id %d Set Shared clk %ld", + soc_info->dev_name, clk_name, clk_id, + clk_rate_round); + cam_soc_util_clk_wrapper_set_clk_rate( + clk_id, soc_info, clk, clk_rate_round); + } else { + bool set_rate_finish = false; + + CAM_DBG(CAM_UTIL, + "Dev %s clk %s clk_id %d src_idx %d src_clk_id %d", + soc_info->dev_name, clk_name, clk_id, + soc_info->src_clk_idx, + (soc_info->src_clk_idx == -1) ? -1 : + soc_info->clk_id[soc_info->src_clk_idx]); + + if (is_src_clk && soc_info->mmrm_handle && + !skip_mmrm_set_rate) { + int32_t idx = soc_info->src_clk_idx; + uint32_t min_level = soc_info->lowest_clk_level; + + if (idx < 0) { + CAM_ERR(CAM_UTIL, "Invalid src clk index"); + return -EINVAL; + } + + rc = cam_soc_util_set_sw_client_rate_through_mmrm( + soc_info->mmrm_handle, + soc_info->is_nrt_dev, + soc_info->clk_rate[min_level][idx], + clk_rate_round, 1); + + if (rc) { + CAM_ERR(CAM_UTIL, + "set_sw_client_rate through mmrm failed on %s clk_id %d, rate=%ld", + clk_name, clk_id, clk_rate_round); + return rc; + } + set_rate_finish = true; + } + + if (!set_rate_finish) { + rc = cam_wrapper_clk_set_rate(clk, clk_rate_round, clk_name); + if (rc) { + CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name); + return rc; + } + } + } + } + + if (applied_clk_rate && (clk_rate_round >= 0)) + *applied_clk_rate = clk_rate_round; + + return rc; +} + +int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + unsigned long clk_rate_high, unsigned long clk_rate_low) +{ + int rc = 0; + int i = 0; + int32_t src_clk_idx; + int32_t scl_clk_idx; + struct clk *clk = NULL; + int32_t apply_level; + uint32_t clk_level_override_high = 0, clk_level_override_low = 0; + + if (!soc_info || (soc_info->src_clk_idx < 0) || + (soc_info->src_clk_idx >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d", + soc_info ? soc_info->src_clk_idx : -1); + return -EINVAL; + } + + src_clk_idx = soc_info->src_clk_idx; + clk_level_override_high = soc_info->clk_level_override_high; + clk_level_override_low = soc_info->clk_level_override_low; + + if (clk_level_override_high && clk_rate_high) + clk_rate_high = soc_info->clk_rate[clk_level_override_high][src_clk_idx]; + + if (clk_level_override_low && clk_rate_low) + clk_rate_low = soc_info->clk_rate[clk_level_override_low][src_clk_idx]; + + if ((!debug_disable_rt_clk_bw_limit) && + (!strnstr(soc_info->dev_name, "cpas", strlen(soc_info->dev_name))) && + (!soc_info->is_nrt_dev) && (!clk_level_override_high) && (clk_rate_high > + soc_info->clk_rate[soc_info->highest_clk_level][src_clk_idx])) + CAM_WARN(CAM_UTIL, + "Requested clk rate: %llu greater than max supported rate: %llu for clk: %s", + clk_rate_high, + soc_info->clk_rate[soc_info->highest_clk_level][src_clk_idx], + soc_info->clk_name[src_clk_idx]); + + clk = soc_info->clk[src_clk_idx]; + rc = cam_soc_util_get_clk_level(soc_info, clk_rate_high, src_clk_idx, + &apply_level); + if (rc || (apply_level < 0) || (apply_level >= CAM_MAX_VOTE)) { + CAM_ERR(CAM_UTIL, + "set %s, rate %lld dev_name = %s apply level = %d", + soc_info->clk_name[src_clk_idx], clk_rate_high, + soc_info->dev_name, apply_level); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, + "set %s, cesta_client_idx: %d rate [%ld %ld] dev_name = %s apply level = %d", + soc_info->clk_name[src_clk_idx], cesta_client_idx, clk_rate_high, clk_rate_low, + soc_info->dev_name, apply_level); + + if ((soc_info->cam_cx_ipeak_enable) && (clk_rate_high > 0)) { + cam_cx_ipeak_update_vote_cx_ipeak(soc_info, + apply_level); + } + + if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, clk_rate_high, + clk_rate_low, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d", + clk_rate_high, clk_rate_low, cesta_client_idx, rc); + return rc; + } + if (!clk_ops_profiling_hw_and_sw_voting) + goto end; + } + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_set_src_clk_rate(soc_info->hw_id, + CAM_CLK_SW_CLIENT_IDX, clk_rate_high, clk_rate_low); + if (rc) { + CAM_ERR(CAM_UTIL, "vmrm set src clk rate failed %d", rc); + return rc; + } + goto end; + } + + rc = cam_soc_util_set_clk_rate(soc_info, clk, + soc_info->clk_name[src_clk_idx], clk_rate_high, + CAM_IS_BIT_SET(soc_info->shared_clk_mask, src_clk_idx), + true, soc_info->clk_id[src_clk_idx], + &soc_info->applied_src_clk_rates.sw_client); + if (rc) { + CAM_ERR(CAM_UTIL, + "SET_RATE Failed: src clk: %s, rate %lld, dev_name = %s rc: %d", + soc_info->clk_name[src_clk_idx], clk_rate_high, + soc_info->dev_name, rc); + return rc; + } + + /* set clk rate for scalable clk if available */ + + for (i = 0; i < soc_info->scl_clk_count; i++) { + scl_clk_idx = soc_info->scl_clk_idx[i]; + if (scl_clk_idx < 0) { + CAM_DBG(CAM_UTIL, "Scl clk index invalid"); + continue; + } + clk = soc_info->clk[scl_clk_idx]; + rc = cam_soc_util_set_clk_rate(soc_info, clk, + soc_info->clk_name[scl_clk_idx], + soc_info->clk_rate[apply_level][scl_clk_idx], + CAM_IS_BIT_SET(soc_info->shared_clk_mask, scl_clk_idx), + false, soc_info->clk_id[scl_clk_idx], + NULL); + if (rc) { + CAM_WARN(CAM_UTIL, + "SET_RATE Failed: scl clk: %s, rate %d dev_name = %s, rc: %d", + soc_info->clk_name[scl_clk_idx], + soc_info->clk_rate[apply_level][scl_clk_idx], + soc_info->dev_name, rc); + } + } + +end: + return 0; +} + +int cam_soc_util_put_optional_clk(struct cam_hw_soc_info *soc_info, + int32_t clk_indx) +{ + if (clk_indx < 0) { + CAM_ERR(CAM_UTIL, "Invalid params clk %d", clk_indx); + return -EINVAL; + } + + if (CAM_IS_BIT_SET(soc_info->optional_shared_clk_mask, clk_indx)) + cam_soc_util_clk_wrapper_unregister_entry( + soc_info->optional_clk_id[clk_indx], soc_info); + + cam_wrapper_clk_put(soc_info->optional_clk[clk_indx], + soc_info->optional_clk_name[clk_indx]); + soc_info->optional_clk[clk_indx] = NULL; + + return 0; +} + +static struct clk *cam_soc_util_option_clk_get(struct device_node *np, + int index, uint32_t *clk_id, const char *clk_name) +{ + struct of_phandle_args clkspec; + struct clk *clk; + int rc; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass option clk get"); + return (struct clk *)BYPASS_VALUE; + } + + if (index < 0) + return ERR_PTR(-EINVAL); + + rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells", + index, &clkspec); + if (rc) + return ERR_PTR(rc); + + clk = cam_wrapper_of_clk_get_from_provider(&clkspec, clk_name); + + *clk_id = clkspec.args[0]; + of_node_put(clkspec.np); + + return clk; +} + +int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info, + const char *clk_name, int32_t *clk_index) +{ + int index = 0; + int rc = 0; + struct device_node *of_node = NULL; + uint32_t shared_clk_val; + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) + return 0; + + if (!soc_info || !clk_name || !clk_index) { + CAM_ERR(CAM_UTIL, + "Invalid params soc_info %pK clk_name %s clk_index %pK", + soc_info, clk_name, clk_index); + return -EINVAL; + } + + of_node = soc_info->dev->of_node; + + index = of_property_match_string(of_node, "clock-names-option", + clk_name); + if (index < 0) { + CAM_DBG(CAM_UTIL, "No clk data for %s", clk_name); + *clk_index = -1; + return -EINVAL; + } + + if (index >= CAM_SOC_MAX_OPT_CLK) { + CAM_ERR(CAM_UTIL, "Insufficient optional clk entries %d %d", + index, CAM_SOC_MAX_OPT_CLK); + return -EINVAL; + } + + of_property_read_string_index(of_node, "clock-names-option", + index, &(soc_info->optional_clk_name[index])); + + soc_info->optional_clk[index] = cam_soc_util_option_clk_get(of_node, + index, &soc_info->optional_clk_id[index], + soc_info->optional_clk_name[index]); + if (IS_ERR(soc_info->optional_clk[index])) { + CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name, + soc_info->dev_name); + *clk_index = -1; + return -EFAULT; + } + *clk_index = index; + + rc = of_property_read_u32_index(of_node, "clock-rates-option", + index, &soc_info->optional_clk_rate[index]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Error reading clock-rates clk_name %s index %d", + clk_name, index); + goto error; + } + + /* + * Option clocks are assumed to be available to single Device here. + * Hence use INIT_RATE instead of NO_SET_RATE. + */ + soc_info->optional_clk_rate[index] = + (soc_info->optional_clk_rate[index] == 0) ? + (int32_t)INIT_RATE : soc_info->optional_clk_rate[index]; + + CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d", + clk_name, *clk_index, soc_info->optional_clk_rate[index]); + + rc = of_property_read_u32_index(of_node, "shared-clks-option", + index, &shared_clk_val); + if (rc) { + CAM_DBG(CAM_UTIL, "Not shared clk %s index %d", + clk_name, index); + } else if (shared_clk_val > 1) { + CAM_WARN(CAM_UTIL, "Invalid shared clk val %d", shared_clk_val); + } else { + CAM_DBG(CAM_UTIL, + "Dev %s shared clk %s index %d, clk id %d, shared_clk_val %d", + soc_info->dev_name, clk_name, index, + soc_info->optional_clk_id[index], shared_clk_val); + + if (shared_clk_val) { + CAM_SET_BIT(soc_info->optional_shared_clk_mask, index); + + /* Create a wrapper entry if this is a shared clock */ + CAM_DBG(CAM_UTIL, + "Dev %s, clk %s, id %d register wrapper entry for shared clk", + soc_info->dev_name, + soc_info->optional_clk_name[index], + soc_info->optional_clk_id[index]); + + rc = cam_soc_util_clk_wrapper_register_entry( + soc_info->optional_clk_id[index], + soc_info->optional_clk[index], false, + soc_info, + soc_info->optional_clk_rate[index], + soc_info->optional_clk_name[index]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in registering shared clk Dev %s id %d", + soc_info->dev_name, + soc_info->optional_clk_id[index]); + goto error; + } + } + } + + return 0; +error: + cam_wrapper_clk_put(soc_info->optional_clk[index], + soc_info->optional_clk_name[index]); + soc_info->optional_clk_rate[index] = 0; + soc_info->optional_clk[index] = NULL; + *clk_index = -1; + + return rc; +} + +int cam_soc_util_get_reset_resource(struct cam_hw_soc_info *soc_info) +{ + struct device_node *of_node = NULL; + int count; + int i, j, rc = 0; + + if (!soc_info || !soc_info->dev) { + CAM_ERR(CAM_UTIL, "Invalid params"); + return -EINVAL; + } + + of_node = soc_info->dev->of_node; + + count = of_property_count_strings(of_node, "reset-names"); + + CAM_DBG(CAM_UTIL, "E: dev_name = %s count = %d", + soc_info->dev_name, count); + if (count > CAM_SOC_MAX_RESET) { + CAM_ERR(CAM_UTIL, "invalid count of reset, count=%d", count); + rc = -EINVAL; + return rc; + } + if (count <= 0) { + CAM_DBG(CAM_UTIL, "No reset found"); + count = 0; + soc_info->num_reset = count; + return 0; + } + soc_info->num_reset = count; + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "reset-names", + i, &(soc_info->reset_name[i])); + CAM_DBG(CAM_UTIL, "reset-names[%d] = %s", + i, soc_info->reset_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, + "i= %d count= %d reading reset-names failed", + i, count); + return rc; + } + + soc_info->resets[i] = devm_reset_control_get(soc_info->dev, + soc_info->reset_name[i]); + if (IS_ERR(soc_info->resets[i])) { + rc = PTR_ERR(soc_info->resets[i]); + CAM_ERR(CAM_UTIL, + "i= %d count= %d reset= %s control get failed: %d", + i, count, soc_info->reset_name[i], rc); + goto err; + } + } + + return 0; + +err: + for (j = i-1; j >= 0; j--) { + reset_control_put(soc_info->resets[i]); + soc_info->resets[i] = NULL; + } + soc_info->num_reset = 0; + + return rc; +} + +int cam_soc_util_put_reset_resource(struct cam_hw_soc_info *soc_info) +{ + int i; + + if (soc_info->num_reset <= 0) { + CAM_ERR(CAM_UTIL, "Invalid params reset %d"); + return -EINVAL; + } + + for (i = 0; i < soc_info->num_reset; i++) { + reset_control_put(soc_info->resets[i]); + soc_info->resets[i] = NULL; + } + + return 0; +} + +int cam_soc_util_clk_enable(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + bool optional_clk, int32_t clk_idx, int32_t apply_level) +{ + int rc = 0; + struct clk *clk; + const char *clk_name; + unsigned long clk_rate; + uint32_t shared_clk_mask; + uint32_t clk_id; + bool is_src_clk = false; + + if (!soc_info || (clk_idx < 0) || (apply_level >= CAM_MAX_VOTE)) { + CAM_ERR(CAM_UTIL, "Invalid param %d %d", clk_idx, apply_level); + return -EINVAL; + } + + if (optional_clk) { + clk = soc_info->optional_clk[clk_idx]; + clk_name = soc_info->optional_clk_name[clk_idx]; + clk_rate = (apply_level == -1) ? + 0 : soc_info->optional_clk_rate[clk_idx]; + shared_clk_mask = soc_info->optional_shared_clk_mask; + clk_id = soc_info->optional_clk_id[clk_idx]; + } else { + clk = soc_info->clk[clk_idx]; + clk_name = soc_info->clk_name[clk_idx]; + clk_rate = (apply_level == -1) ? + 0 : soc_info->clk_rate[apply_level][clk_idx]; + shared_clk_mask = soc_info->shared_clk_mask; + clk_id = soc_info->clk_id[clk_idx]; + if (clk_idx == soc_info->src_clk_idx) + is_src_clk = true; + } + if (!clk) + return 0; + + if (is_src_clk && soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, clk_rate, clk_rate, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low); + if (rc) { + CAM_ERR(CAM_UTIL, + "[%s] Failed in setting cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d", + soc_info->dev_name, clk_rate, clk_rate, cesta_client_idx, rc); + return rc; + } + + rc = cam_soc_util_cesta_channel_switch(cesta_client_idx, soc_info->dev_name); + if (rc) { + CAM_ERR(CAM_UTIL, + "[%s] Failed to apply power states for cesta client:%d rc:%d", + soc_info->dev_name, cesta_client_idx, rc); + return rc; + } + + if (!clk_ops_profiling_hw_and_sw_voting) + goto end; + } + rc = cam_soc_util_set_clk_rate(soc_info, clk, clk_name, clk_rate, + CAM_IS_BIT_SET(shared_clk_mask, clk_idx), is_src_clk, clk_id, + &soc_info->applied_src_clk_rates.sw_client); + if (rc) { + CAM_ERR(CAM_UTIL, "[%s] Failed in setting clk rate %ld rc:%d", + soc_info->dev_name, clk_rate, rc); + return rc; + } + + +end: + CAM_DBG(CAM_UTIL, "[%s] : clk enable %s", soc_info->dev_name, clk_name); + rc = cam_wrapper_clk_prepare_enable(clk, clk_name); + if (rc) { + CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc); + return rc; + } + + return rc; +} + +int cam_soc_util_clk_disable(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + bool optional_clk, int32_t clk_idx) +{ + int rc = 0; + struct clk *clk; + const char *clk_name; + uint32_t shared_clk_mask; + uint32_t clk_id; + + if (!soc_info || (clk_idx < 0)) { + CAM_ERR(CAM_UTIL, "Invalid param %d", clk_idx); + return -EINVAL; + } + + if (optional_clk) { + clk = soc_info->optional_clk[clk_idx]; + clk_name = soc_info->optional_clk_name[clk_idx]; + shared_clk_mask = soc_info->optional_shared_clk_mask; + clk_id = soc_info->optional_clk_id[clk_idx]; + } else { + clk = soc_info->clk[clk_idx]; + clk_name = soc_info->clk_name[clk_idx]; + shared_clk_mask = soc_info->shared_clk_mask; + clk_id = soc_info->clk_id[clk_idx]; + } + + CAM_DBG(CAM_UTIL, "disable %s", clk_name); + if (!clk) + return 0; + + cam_wrapper_clk_disable_unprepare(clk, clk_name); + + if ((clk_idx == soc_info->src_clk_idx) && soc_info->is_clk_drv_en && + CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, 0, 0, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in setting cesta clk rates[high low]:[0 0] client_idx:%d rc:%d", + cesta_client_idx, rc); + return rc; + } + + rc = cam_soc_util_cesta_channel_switch(cesta_client_idx, soc_info->dev_name); + if (rc) { + CAM_ERR(CAM_CSIPHY, + "Failed to apply power states for cesta_client_idx:%d rc:%d", + cesta_client_idx, rc); + return rc; + } + if (!clk_ops_profiling_hw_and_sw_voting) + goto end; + + } + if (CAM_IS_BIT_SET(shared_clk_mask, clk_idx)) { + CAM_DBG(CAM_UTIL, + "Dev %s clk %s Disabling Shared clk, set 0 rate", + soc_info->dev_name, clk_name); + cam_soc_util_clk_wrapper_set_clk_rate(clk_id, soc_info, clk, 0); + } else if (soc_info->mmrm_handle && (!skip_mmrm_set_rate) && + (soc_info->src_clk_idx == clk_idx)) { + CAM_DBG(CAM_UTIL, "Dev %s Disabling %s clk, set 0 rate", + soc_info->dev_name, clk_name); + cam_soc_util_set_sw_client_rate_through_mmrm( + soc_info->mmrm_handle, + soc_info->is_nrt_dev, + 0, 0, 1); + } + + +end: + return 0; +} + +/** + * cam_soc_util_clk_enable_default() + * + * @brief: This function enables the default clocks present + * in soc_info + * + * @soc_info: Device soc struct to be populated + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * @clk_level: Clk level to apply while enabling + * + * @return: success or failure + */ +int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, enum cam_vote_level clk_level) +{ + int i, rc = 0; + enum cam_vote_level apply_level; + uint32_t clk_level_override_high = 0; + unsigned long clk_rate_high = 0; + int32_t src_clk_idx; + int32_t temp_apply_level; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk enable default"); + return rc; + } + + if ((soc_info->num_clk == 0) || + (soc_info->num_clk >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid number of clock %d", + soc_info->num_clk); + return -EINVAL; + } + + src_clk_idx = soc_info->src_clk_idx; + clk_level_override_high = soc_info->clk_level_override_high; + + if ((soc_info->src_clk_idx >= 0) && (soc_info->src_clk_idx < CAM_SOC_MAX_CLK) && + clk_level_override_high) { + clk_rate_high = soc_info->clk_rate[clk_level_override_high][src_clk_idx]; + + CAM_DBG(CAM_UTIL, "src_clk_idx: %d, override_high: %d, clk_rate_high: %lld", + src_clk_idx, clk_level_override_high, clk_rate_high); + + rc = cam_soc_util_get_clk_level(soc_info, clk_rate_high, src_clk_idx, + &temp_apply_level); + if (rc || (temp_apply_level < 0) || (temp_apply_level >= CAM_MAX_VOTE)) { + CAM_ERR(CAM_UTIL, + "set %s, rate %lld dev_name = %s apply level = %d", + soc_info->clk_name[src_clk_idx], clk_rate_high, + soc_info->dev_name, temp_apply_level); + } + + if (temp_apply_level > clk_level) + clk_level = temp_apply_level; + } + + rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level, + &apply_level); + if (rc) { + CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level=%d, rc=%d", + soc_info->dev_name, clk_level, rc); + return rc; + } + + if (soc_info->cam_cx_ipeak_enable) + cam_cx_ipeak_update_vote_cx_ipeak(soc_info, apply_level); + + CAM_DBG(CAM_UTIL, "Dev[%s] : cesta client %d, request level %s, apply level %s", + soc_info->dev_name, cesta_client_idx, + cam_soc_util_get_string_from_level(clk_level), + cam_soc_util_get_string_from_level(apply_level)); + + memset(&soc_info->applied_src_clk_rates, 0, sizeof(struct cam_soc_util_clk_rates)); + + for (i = 0; i < soc_info->num_clk; i++) { + rc = cam_soc_util_clk_enable(soc_info, cesta_client_idx, false, i, apply_level); + if (rc) { + CAM_ERR(CAM_UTIL, + "[%s] : failed to enable clk apply_level=%d, rc=%d, cesta_client_idx=%d", + soc_info->dev_name, apply_level, rc, cesta_client_idx); + goto clk_disable; + } + + if (soc_info->cam_cx_ipeak_enable) + CAM_DBG(CAM_UTIL, + "dev name = %s clk name = %s idx = %d apply_level = %d clc idx = %d", + soc_info->dev_name, soc_info->clk_name[i], i, apply_level, i); + } + + return rc; + +clk_disable: + if (soc_info->cam_cx_ipeak_enable) + cam_cx_ipeak_update_vote_cx_ipeak(soc_info, 0); + for (i--; i >= 0; i--) { + cam_soc_util_clk_disable(soc_info, cesta_client_idx, false, i); + } + + return rc; +} + +/** + * cam_soc_util_clk_disable_default() + * + * @brief: This function disables the default clocks present + * in soc_info + * + * @soc_info: device soc struct to be populated + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * + * @return: success or failure + */ +void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info, + int cesta_client_idx) +{ + int i; + + if (soc_info->num_clk == 0) + return; + + if (soc_info->cam_cx_ipeak_enable) + cam_cx_ipeak_unvote_cx_ipeak(soc_info); + for (i = soc_info->num_clk - 1; i >= 0; i--) + cam_soc_util_clk_disable(soc_info, cesta_client_idx, false, i); +} + +/** + * cam_soc_util_get_dt_clk_info() + * + * @brief: Parse the DT and populate the Clock properties + * + * @soc_info: device soc struct to be populated + * @src_clk_str name of src clock that has rate control + * + * @return: success or failure + */ +static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info) +{ + struct device_node *of_node = NULL; + int count; + int num_clk_rates, num_clk_levels; + int i, j, rc; + int32_t num_clk_level_strings; + const char *src_clk_str = NULL; + const char *scl_clk_str = NULL; + const char *clk_control_debugfs = NULL; + const char *clk_cntl_lvl_string = NULL; + enum cam_vote_level level; + int shared_clk_cnt; + struct of_phandle_args clk_args = {0}; + + if (!soc_info || !soc_info->dev) + return -EINVAL; + + of_node = soc_info->dev->of_node; + + if (!of_property_read_bool(of_node, "use-shared-clk")) { + CAM_DBG(CAM_UTIL, "No shared clk parameter defined"); + soc_info->use_shared_clk = false; + } else { + soc_info->use_shared_clk = true; + } + + count = of_property_count_strings(of_node, "clock-names"); + + CAM_DBG(CAM_UTIL, "E: dev_name = %s count = %d", + soc_info->dev_name, count); + if (count > CAM_SOC_MAX_CLK) { + CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count); + rc = -EINVAL; + return rc; + } + if (count <= 0) { + CAM_DBG(CAM_UTIL, "No clock-names found"); + count = 0; + soc_info->num_clk = count; + return 0; + } + soc_info->num_clk = count; + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, "clock-names", + i, &(soc_info->clk_name[i])); + CAM_DBG(CAM_UTIL, "clock-names[%d] = %s", + i, soc_info->clk_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, + "i= %d count= %d reading clock-names failed", + i, count); + return rc; + } + } + + num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates"); + if (num_clk_rates <= 0) { + CAM_ERR(CAM_UTIL, "reading clock-rates count failed"); + return -EINVAL; + } + + if ((num_clk_rates % soc_info->num_clk) != 0) { + CAM_ERR(CAM_UTIL, + "mismatch clk/rates, No of clocks=%d, No of rates=%d", + soc_info->num_clk, num_clk_rates); + return -EINVAL; + } + + num_clk_levels = (num_clk_rates / soc_info->num_clk); + + num_clk_level_strings = of_property_count_strings(of_node, + "clock-cntl-level"); + if (num_clk_level_strings != num_clk_levels) { + CAM_ERR(CAM_UTIL, + "Mismatch No of levels=%d, No of level string=%d", + num_clk_levels, num_clk_level_strings); + return -EINVAL; + } + + soc_info->lowest_clk_level = CAM_TURBO_VOTE; + soc_info->highest_clk_level = CAM_SUSPEND_VOTE; + + for (i = 0; i < num_clk_levels; i++) { + rc = of_property_read_string_index(of_node, + "clock-cntl-level", i, &clk_cntl_lvl_string); + if (rc) { + CAM_ERR(CAM_UTIL, + "Error reading clock-cntl-level, rc=%d", rc); + return rc; + } + + rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string, + &level); + if (rc) + return rc; + + CAM_DBG(CAM_UTIL, + "[%d] : %s %d", i, clk_cntl_lvl_string, level); + soc_info->clk_level_valid[level] = true; + for (j = 0; j < soc_info->num_clk; j++) { + rc = of_property_read_u32_index(of_node, "clock-rates", + ((i * soc_info->num_clk) + j), + &soc_info->clk_rate[level][j]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Error reading clock-rates, rc=%d", + rc); + return rc; + } + + soc_info->clk_rate[level][j] = + (soc_info->clk_rate[level][j] == 0) ? + (int32_t)NO_SET_RATE : + soc_info->clk_rate[level][j]; + + CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d", + level, j, + soc_info->clk_rate[level][j]); + } + + if ((level > CAM_MINSVS_VOTE) && + (level < soc_info->lowest_clk_level)) + soc_info->lowest_clk_level = level; + + if ((level < CAM_MAX_VOTE) && + (level > soc_info->highest_clk_level)) + soc_info->highest_clk_level = level; + } + + soc_info->src_clk_idx = -1; + rc = of_property_read_string_index(of_node, "src-clock-name", 0, + &src_clk_str); + if (rc || !src_clk_str) { + CAM_DBG(CAM_UTIL, "No src_clk_str found"); + rc = 0; + goto end; + } + + for (i = 0; i < soc_info->num_clk; i++) { + if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) { + soc_info->src_clk_idx = i; + CAM_DBG(CAM_UTIL, "src clock = %s, index = %d", + src_clk_str, i); + } + + rc = of_parse_phandle_with_args(of_node, "clocks", + "#clock-cells", i, &clk_args); + if (rc) { + CAM_ERR(CAM_CPAS, + "failed to clock info rc=%d", rc); + rc = -EINVAL; + goto end; + } + + soc_info->clk_id[i] = clk_args.args[0]; + of_node_put(clk_args.np); + + CAM_DBG(CAM_UTIL, "Dev %s clk %s id %d", + soc_info->dev_name, soc_info->clk_name[i], + soc_info->clk_id[i]); + } + + CAM_DBG(CAM_UTIL, "Dev %s src_clk_idx %d, lowest_clk_level %d highest_clk_level: %d", + soc_info->dev_name, soc_info->src_clk_idx, + soc_info->lowest_clk_level, soc_info->highest_clk_level); + + soc_info->shared_clk_mask = 0; + shared_clk_cnt = of_property_count_u32_elems(of_node, "shared-clks"); + if (shared_clk_cnt <= 0) { + CAM_DBG(CAM_UTIL, "Dev %s, no shared clks", soc_info->dev_name); + } else if (shared_clk_cnt != count) { + CAM_ERR(CAM_UTIL, "Dev %s, incorrect shared clock count %d %d", + soc_info->dev_name, shared_clk_cnt, count); + rc = -EINVAL; + goto end; + } else { + uint32_t shared_clk_val; + + for (i = 0; i < shared_clk_cnt; i++) { + rc = of_property_read_u32_index(of_node, + "shared-clks", i, &shared_clk_val); + if (rc || (shared_clk_val > 1)) { + CAM_ERR(CAM_UTIL, + "Incorrect shared clk info at %d, val=%d, count=%d", + i, shared_clk_val, shared_clk_cnt); + rc = -EINVAL; + goto end; + } + + if (shared_clk_val) + CAM_SET_BIT(soc_info->shared_clk_mask, i); + } + + CAM_DBG(CAM_UTIL, "Dev %s shared clk mask 0x%x", + soc_info->dev_name, soc_info->shared_clk_mask); + } + + /* scalable clk info parsing */ + soc_info->scl_clk_count = 0; + soc_info->scl_clk_count = of_property_count_strings(of_node, + "scl-clk-names"); + if ((soc_info->scl_clk_count <= 0) || + (soc_info->scl_clk_count > CAM_SOC_MAX_CLK)) { + if (soc_info->scl_clk_count == -EINVAL) { + CAM_DBG(CAM_UTIL, "scl_clk_name prop not avialable"); + } else if ((soc_info->scl_clk_count == -ENODATA) || + (soc_info->scl_clk_count > CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid scl_clk_count: %d", + soc_info->scl_clk_count); + return -EINVAL; + } + CAM_DBG(CAM_UTIL, "Invalid scl_clk count: %d", + soc_info->scl_clk_count); + soc_info->scl_clk_count = -1; + } else { + CAM_DBG(CAM_UTIL, "No of scalable clocks: %d", + soc_info->scl_clk_count); + for (i = 0; i < soc_info->scl_clk_count; i++) { + rc = of_property_read_string_index(of_node, + "scl-clk-names", i, + (const char **)&scl_clk_str); + if (rc || !scl_clk_str) { + CAM_WARN(CAM_UTIL, "scl_clk_str is NULL"); + soc_info->scl_clk_idx[i] = -1; + continue; + } + for (j = 0; j < soc_info->num_clk; j++) { + if (strnstr(scl_clk_str, soc_info->clk_name[j], + strlen(scl_clk_str))) { + soc_info->scl_clk_idx[i] = j; + CAM_DBG(CAM_UTIL, + "scl clock = %s, index = %d", + scl_clk_str, j); + break; + } + } + } + } + + rc = of_property_read_string_index(of_node, + "clock-control-debugfs", 0, &clk_control_debugfs); + if (rc || !clk_control_debugfs) { + CAM_DBG(CAM_UTIL, "No clock_control_debugfs property found"); + rc = 0; + goto end; + } + + if (strcmp("true", clk_control_debugfs) == 0) + soc_info->clk_control_enable = true; + + CAM_DBG(CAM_UTIL, "X: dev_name = %s count = %d", + soc_info->dev_name, count); +end: + return rc; +} + +int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, enum cam_vote_level clk_level_high, + enum cam_vote_level clk_level_low, bool do_not_set_src_clk) +{ + int i, rc = 0; + enum cam_vote_level apply_level_high; + enum cam_vote_level apply_level_low = soc_info->lowest_clk_level; + unsigned long applied_clk_rate; + + if (debug_bypass_drivers & CAM_BYPASS_CLKS) { + CAM_WARN(CAM_UTIL, "Bypass clk set rate"); + return 0; + } + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_set_clk_rate_level(soc_info->hw_id, CAM_CLK_SW_CLIENT_IDX, + clk_level_high, clk_level_low, do_not_set_src_clk, 0); + if (rc) { + CAM_ERR(CAM_UTIL, "vmrm set clk rate level failed %d", rc); + return rc; + } + goto end; + } + + if ((soc_info->num_clk == 0) || + (soc_info->num_clk >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "Invalid number of clock %d", soc_info->num_clk); + return -EINVAL; + } + + rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level_high, + &apply_level_high); + if (rc) { + CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level_high=%d, rc=%d", + soc_info->dev_name, clk_level_high, rc); + return rc; + } + + if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) { + rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level_low, + &apply_level_low); + if (rc) { + CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level_low=%d, rc=%d", + soc_info->dev_name, clk_level_low, rc); + return rc; + } + } + + if (soc_info->cam_cx_ipeak_enable) + cam_cx_ipeak_update_vote_cx_ipeak(soc_info, apply_level_high); + + for (i = 0; i < soc_info->num_clk; i++) { + if (do_not_set_src_clk && (i == soc_info->src_clk_idx)) { + CAM_DBG(CAM_UTIL, "Skipping set rate for src clk %s", + soc_info->clk_name[i]); + continue; + } + + if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx) && + (i == soc_info->src_clk_idx)) { + rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, + soc_info->clk_rate[apply_level_high][i], + soc_info->clk_rate[apply_level_low][i], + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high, + &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed to set the req clk level[high low]: [%s %s] cesta_client_idx: %d", + cam_soc_util_get_string_from_level(apply_level_high), + cam_soc_util_get_string_from_level(apply_level_low), + cesta_client_idx); + break; + } + if (!clk_ops_profiling_hw_and_sw_voting) + continue; + } + + CAM_DBG(CAM_UTIL, "Set rate for clk %s rate %d", soc_info->clk_name[i], + soc_info->clk_rate[apply_level_high][i]); + + rc = cam_soc_util_set_clk_rate(soc_info, soc_info->clk[i], + soc_info->clk_name[i], + soc_info->clk_rate[apply_level_high][i], + CAM_IS_BIT_SET(soc_info->shared_clk_mask, i), + (i == soc_info->src_clk_idx) ? true : false, + soc_info->clk_id[i], + &applied_clk_rate); + if (rc < 0) { + CAM_DBG(CAM_UTIL, + "dev name = %s clk_name = %s idx = %d apply_level = %s", + soc_info->dev_name, soc_info->clk_name[i], + i, cam_soc_util_get_string_from_level(apply_level_high)); + if (soc_info->cam_cx_ipeak_enable) + cam_cx_ipeak_update_vote_cx_ipeak(soc_info, 0); + break; + } + + if (i == soc_info->src_clk_idx) + soc_info->applied_src_clk_rates.sw_client = applied_clk_rate; + } + +end: + return rc; +}; + +static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node, + struct cam_soc_gpio_data *gconf, uint16_t *gpio_array, + uint16_t gpio_array_size) +{ + int32_t rc = 0, i = 0; + uint32_t count = 0; + uint32_t *val_array = NULL; + + if (!of_get_property(of_node, "gpio-req-tbl-num", &count)) + return 0; + + count /= sizeof(uint32_t); + if (!count) { + CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0"); + return 0; + } + + val_array = CAM_MEM_ZALLOC_ARRAY(count, sizeof(uint32_t), GFP_KERNEL); + if (!val_array) + return -ENOMEM; + + gconf->cam_gpio_req_tbl = CAM_MEM_ZALLOC_ARRAY(count, sizeof(struct gpio), + GFP_KERNEL); + if (!gconf->cam_gpio_req_tbl) { + rc = -ENOMEM; + goto free_val_array; + } + gconf->cam_gpio_req_tbl_size = count; + + rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num", + val_array, count); + if (rc) { + CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d", + rc); + goto free_gpio_req_tbl; + } + + for (i = 0; i < count; i++) { + if (val_array[i] >= gpio_array_size) { + CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid", + val_array[i]); + goto free_gpio_req_tbl; + } + gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]]; + CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i, + gconf->cam_gpio_req_tbl[i].gpio); + } + + rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags", + val_array, count); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc); + goto free_gpio_req_tbl; + } + + for (i = 0; i < count; i++) { + gconf->cam_gpio_req_tbl[i].flags = val_array[i]; + CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i, + gconf->cam_gpio_req_tbl[i].flags); + } + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(of_node, + "gpio-req-tbl-label", i, + &gconf->cam_gpio_req_tbl[i].label); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed rc %d", rc); + goto free_gpio_req_tbl; + } + CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i, + gconf->cam_gpio_req_tbl[i].label); + } + + CAM_MEM_FREE(val_array); + + return rc; + +free_gpio_req_tbl: + CAM_MEM_FREE(gconf->cam_gpio_req_tbl); +free_val_array: + CAM_MEM_FREE(val_array); + gconf->cam_gpio_req_tbl_size = 0; + + return rc; +} + +static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info) +{ + int32_t rc = 0, i = 0; + uint16_t *gpio_array = NULL; + int16_t gpio_array_size = 0; + struct cam_soc_gpio_data *gconf = NULL; + struct device_node *of_node = NULL; + + if (!soc_info || !soc_info->dev) + return -EINVAL; + + of_node = soc_info->dev->of_node; + + /* Validate input parameters */ + if (!of_node) { + CAM_ERR(CAM_UTIL, "Invalid param of_node"); + return -EINVAL; + } + + gpio_array_size = cam_get_gpio_counts(soc_info); + + if (gpio_array_size <= 0) + return 0; + + CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size); + + gpio_array = CAM_MEM_ZALLOC_ARRAY(gpio_array_size, sizeof(uint16_t), GFP_KERNEL); + if (!gpio_array) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < gpio_array_size; i++) { + gpio_array[i] = cam_get_named_gpio(soc_info, i); + CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]); + } + + gconf = CAM_MEM_ZALLOC(sizeof(*gconf), GFP_KERNEL); + if (!gconf) { + rc = -ENOMEM; + goto free_gpio_array; + } + + rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array, + gpio_array_size); + if (rc) { + CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl"); + goto free_gpio_conf; + } + + gconf->cam_gpio_common_tbl = CAM_MEM_ZALLOC_ARRAY(gpio_array_size, + sizeof(struct gpio), GFP_KERNEL); + if (!gconf->cam_gpio_common_tbl) { + rc = -ENOMEM; + goto free_gpio_conf; + } + + for (i = 0; i < gpio_array_size; i++) + gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i]; + + gconf->cam_gpio_common_tbl_size = gpio_array_size; + soc_info->gpio_data = gconf; + + soc_info->gpio_data->gpio_for_vmrm_purpose = + of_property_read_bool(of_node, "gpio_for_vmrm_purpose"); + CAM_DBG(CAM_UTIL, "dev name %s gpio_for_vmrm_purpose %d", soc_info->dev_name, + soc_info->gpio_data->gpio_for_vmrm_purpose); + + CAM_MEM_FREE(gpio_array); + + return rc; + +free_gpio_conf: + CAM_MEM_FREE(gconf); +free_gpio_array: + CAM_MEM_FREE(gpio_array); +err: + soc_info->gpio_data = NULL; + + return rc; +} + +static int cam_soc_util_request_gpio_table( + struct cam_hw_soc_info *soc_info, bool gpio_en) +{ + int rc = 0, i = 0; + uint8_t size = 0; + struct cam_soc_gpio_data *gpio_conf = + soc_info->gpio_data; + struct gpio *gpio_tbl = NULL; + + + if (!gpio_conf) { + CAM_DBG(CAM_UTIL, "No GPIO entry"); + return 0; + } + if (gpio_conf->cam_gpio_common_tbl_size <= 0) { + CAM_ERR(CAM_UTIL, "GPIO table size is invalid"); + return -EINVAL; + } + size = gpio_conf->cam_gpio_req_tbl_size; + gpio_tbl = gpio_conf->cam_gpio_req_tbl; + + if (soc_info->gpio_data->gpio_for_vmrm_purpose) { + CAM_DBG(CAM_UTIL, "dev name %s does not have valid request gpio table", + soc_info->dev_name); + return 0; + } + + if (!gpio_tbl || !size) { + CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d", + gpio_tbl, size); + return -EINVAL; + } + for (i = 0; i < size; i++) { + CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i, + gpio_tbl[i].gpio, gpio_tbl[i].flags); + } + if (gpio_en) { + for (i = 0; i < size; i++) { + rc = gpio_request_one(gpio_tbl[i].gpio, + gpio_tbl[i].flags, gpio_tbl[i].label); + if (rc) { + /* + * After GPIO request fails, contine to + * apply new gpios, outout a error message + * for driver bringup debug + */ + CAM_ERR(CAM_UTIL, "gpio %d:%s request fails", + gpio_tbl[i].gpio, gpio_tbl[i].label); + } + } + } else { + gpio_free_array(gpio_tbl, size); + } + + return rc; +} + +static int cam_soc_util_get_dt_regulator_info + (struct cam_hw_soc_info *soc_info) +{ + int rc = 0, count = 0, i = 0; + struct device_node *of_node = NULL; + + if (!soc_info || !soc_info->dev) { + CAM_ERR(CAM_UTIL, "Invalid parameters"); + return -EINVAL; + } + + of_node = soc_info->dev->of_node; + + soc_info->num_rgltr = 0; + count = of_property_count_strings(of_node, "regulator-names"); + if (count != -EINVAL) { + if (count <= 0) { + CAM_ERR(CAM_UTIL, "no regulators found"); + return -EINVAL; + } + + soc_info->num_rgltr = count; + + } else { + CAM_DBG(CAM_UTIL, "No regulators node found"); + return 0; + } + + if (soc_info->num_rgltr > CAM_SOC_MAX_REGULATOR) { + CAM_ERR(CAM_UTIL, "Invalid regulator count:%d", + soc_info->num_rgltr); + return -EINVAL; + } + + for (i = 0; i < soc_info->num_rgltr; i++) { + rc = of_property_read_string_index(of_node, + "regulator-names", i, &soc_info->rgltr_name[i]); + CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s", + i, soc_info->rgltr_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i); + return -ENODEV; + } + } + + if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) { + CAM_DBG(CAM_UTIL, "No regulator control parameter defined"); + soc_info->rgltr_ctrl_support = false; + return 0; + } + + soc_info->rgltr_ctrl_support = true; + + rc = of_property_read_u32_array(of_node, "rgltr-min-voltage", + soc_info->rgltr_min_volt, soc_info->num_rgltr); + if (rc) { + CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc); + return -EINVAL; + } + + rc = of_property_read_u32_array(of_node, "rgltr-max-voltage", + soc_info->rgltr_max_volt, soc_info->num_rgltr); + if (rc) { + CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc); + return -EINVAL; + } + + rc = of_property_read_u32_array(of_node, "rgltr-load-current", + soc_info->rgltr_op_mode, soc_info->num_rgltr); + if (rc) { + CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc); + return -EINVAL; + } + + return rc; +} + +#ifdef CONFIG_CAM_PRESIL +static uint32_t next_dummy_irq_line_num = 0x000f; +struct resource dummy_irq_line[512]; +#endif + +int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info) +{ + struct device_node *of_node = NULL; + int count = 0, i = 0, rc = 0; +#ifdef CONFIG_SPECTRA_VMRM + int num_vmrm_resource_ids = 0; +#endif + + if (!soc_info || !soc_info->dev) + return -EINVAL; + + of_node = soc_info->dev->of_node; + + rc = of_property_read_u32(of_node, "cell-index", &soc_info->index); + if (rc) { + CAM_ERR(CAM_UTIL, "device %s failed to read cell-index", + soc_info->dev_name); + return rc; + } + + count = of_property_count_strings(of_node, "reg-names"); + if (count <= 0) { + CAM_DBG(CAM_UTIL, "no reg-names found for: %s", + soc_info->dev_name); + count = 0; + } + soc_info->num_mem_block = count; + + for (i = 0; i < soc_info->num_mem_block; i++) { + rc = of_property_read_string_index(of_node, "reg-names", i, + &soc_info->mem_block_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i); + return rc; + } + soc_info->mem_block[i] = + platform_get_resource_byname(soc_info->pdev, + IORESOURCE_MEM, soc_info->mem_block_name[i]); + + if (!soc_info->mem_block[i]) { + CAM_ERR(CAM_UTIL, "no mem resource by name %s", + soc_info->mem_block_name[i]); + rc = -ENODEV; + return rc; + } + } + + rc = of_property_read_string(of_node, "label", &soc_info->label_name); + if (rc) + CAM_DBG(CAM_UTIL, "Label is not available in the node: %d", rc); + + if (soc_info->num_mem_block > 0) { + rc = of_property_read_u32_array(of_node, "reg-cam-base", + soc_info->mem_block_cam_base, soc_info->num_mem_block); + if (rc) { + CAM_ERR(CAM_UTIL, "Error reading register offsets"); + return rc; + } + } + + count = of_property_count_strings(of_node, "interrupt-names"); + if (count <= 0) { + CAM_DBG(CAM_UTIL, "No interrupt line present for: %s", soc_info->dev_name); + soc_info->irq_count = 0; + } else { + if (count > CAM_SOC_MAX_IRQ_LINES_PER_DEV) { + CAM_ERR(CAM_UTIL, + "Number of interrupt: %d exceeds maximum allowable interrupts: %d", + count, CAM_SOC_MAX_IRQ_LINES_PER_DEV); + return -EINVAL; + } + + soc_info->irq_count = count; + + rc = cam_compat_util_get_irq(soc_info); + if (rc == EINVAL) { + CAM_ERR(CAM_UTIL, "failed to read interrupt name at %d", i); + return rc; + } + if (rc < 0) { + CAM_ERR(CAM_UTIL, "get irq resource failed: %d for: %s", + rc, soc_info->dev_name); + #ifndef CONFIG_CAM_PRESIL + return rc; + #else + /* Pre-sil for new devices not present on old */ + for (i = 0; i < soc_info->irq_count; i++) { + soc_info->irq_line[i] = + &dummy_irq_line[next_dummy_irq_line_num++]; + CAM_DBG(CAM_PRESIL, + "interrupt line for dev %s irq name %s number %d", + soc_info->dev_name, soc_info->irq_name[i], + soc_info->irq_line[i]->start); + } + #endif + } + } + + rc = of_property_read_string_index(of_node, "compatible", 0, + (const char **)&soc_info->compatible); + if (rc) + CAM_DBG(CAM_UTIL, "No compatible string present for: %s", + soc_info->dev_name); + + soc_info->is_nrt_dev = false; + if (of_property_read_bool(of_node, "nrt-device")) + soc_info->is_nrt_dev = true; + + soc_info->is_crmb_clk = false; + if (of_property_read_bool(of_node, "cam-crmb-clk")) + soc_info->is_crmb_clk = true; + + CAM_DBG(CAM_UTIL, "Dev %s, nrt_dev: %d is_crmb_clk: %d", soc_info->dev_name, + soc_info->is_nrt_dev, soc_info->is_crmb_clk); + + rc = cam_soc_util_get_dt_regulator_info(soc_info); + if (rc) + return rc; + + rc = cam_soc_util_get_dt_clk_info(soc_info); + if (rc) + return rc; + + rc = cam_soc_util_get_gpio_info(soc_info); + if (rc) + return rc; + + if (of_find_property(of_node, "qcom,cam-cx-ipeak", NULL)) + rc = cam_cx_ipeak_register_cx_ipeak(soc_info); + +#ifdef CONFIG_SPECTRA_VMRM + num_vmrm_resource_ids = of_property_count_u32_elems(of_node, "vmrm-resource-ids"); + + if ((num_vmrm_resource_ids <= 0) || + (num_vmrm_resource_ids > CAM_VMRM_MAX_RESOURCE_IDS)) { + CAM_ERR(CAM_UTIL, "Invalid vmrm resource ids counter %d max %d", + num_vmrm_resource_ids, CAM_VMRM_MAX_RESOURCE_IDS); + } else { + soc_info->num_vmrm_resource_ids = num_vmrm_resource_ids; + for (i = 0; i < num_vmrm_resource_ids; i++) { + of_property_read_u32_index(of_node, "vmrm-resource-ids", i, + &soc_info->vmrm_resource_ids[i]); + CAM_DBG(CAM_UTIL, "dev name %s vmrm resource id num %d index %d id %d", + soc_info->dev_name, num_vmrm_resource_ids, i, + soc_info->vmrm_resource_ids[i]); + } + } +#endif + return rc; +} + +/** + * cam_soc_util_get_regulator() + * + * @brief: Get regulator resource named vdd + * + * @dev: Device associated with regulator + * @reg: Return pointer to be filled with regulator on success + * @rgltr_name: Name of regulator to get + * + * @return: 0 for Success, negative value for failure + */ +static int cam_soc_util_get_regulator(struct device *dev, + struct regulator **reg, const char *rgltr_name) +{ + int rc = 0; + *reg = cam_wrapper_regulator_get(dev, rgltr_name); + if (IS_ERR_OR_NULL(*reg)) { + rc = PTR_ERR(*reg); + rc = rc ? rc : -EINVAL; + CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc); + *reg = NULL; + } + return rc; +} + +int cam_soc_util_regulator_disable(struct regulator *rgltr, + const char *rgltr_name, uint32_t rgltr_min_volt, + uint32_t rgltr_max_volt, uint32_t rgltr_op_mode, + uint32_t rgltr_delay_ms) +{ + int32_t rc = 0; + + if (!rgltr) { + CAM_ERR(CAM_UTIL, "Invalid NULL parameter"); + return -EINVAL; + } + + rc = cam_wrapper_regulator_is_enabled(rgltr, rgltr_name); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "%s regulator_is_enabled failed", rgltr_name); + return rc; + } else if (rc == 0) { + CAM_DBG(CAM_UTIL, "%s regulator already disabled", rgltr_name); + return rc; + } + + rc = cam_wrapper_regulator_disable(rgltr, rgltr_name); + if (rc) { + CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name); + return rc; + } + + if (rgltr_delay_ms > 20) + msleep(rgltr_delay_ms); + else if (rgltr_delay_ms) + usleep_range(rgltr_delay_ms * 1000, + (rgltr_delay_ms * 1000) + 1000); + + if (cam_wrapper_regulator_count_voltages(rgltr, rgltr_name) > 0) { + cam_wrapper_regulator_set_load(rgltr, 0, rgltr_name); + cam_wrapper_regulator_set_voltage(rgltr, 0, rgltr_max_volt, rgltr_name); + } + + return rc; +} + + +int cam_soc_util_regulator_enable(struct regulator *rgltr, + const char *rgltr_name, + uint32_t rgltr_min_volt, uint32_t rgltr_max_volt, + uint32_t rgltr_op_mode, uint32_t rgltr_delay) +{ + int32_t rc = 0; + + if (!rgltr) { + CAM_ERR(CAM_UTIL, "Invalid NULL parameter"); + return -EINVAL; + } + + if (cam_wrapper_regulator_count_voltages(rgltr, rgltr_name) > 0) { + CAM_DBG(CAM_UTIL, "[%s] voltage min=%d, max=%d", + rgltr_name, rgltr_min_volt, rgltr_max_volt); + + rc = cam_wrapper_regulator_set_voltage( + rgltr, rgltr_min_volt, rgltr_max_volt, rgltr_name); + if (rc) { + CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name); + return rc; + } + + rc = cam_wrapper_regulator_set_load(rgltr, rgltr_op_mode, rgltr_name); + if (rc) { + CAM_ERR(CAM_UTIL, "%s set optimum mode failed", + rgltr_name); + return rc; + } + } + + rc = cam_wrapper_regulator_enable(rgltr, rgltr_name); + if (rc) { + CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name); + return rc; + } + + if (rgltr_delay > 20) + msleep(rgltr_delay); + else if (rgltr_delay) + usleep_range(rgltr_delay * 1000, + (rgltr_delay * 1000) + 1000); + + return rc; +} + +int cam_soc_util_select_pinctrl_state(struct cam_hw_soc_info *soc_info, + int pctrl_idx, bool active) +{ + int rc = 0; + + struct cam_soc_pinctrl_info *pctrl_info = &soc_info->pinctrl_info; + + if (pctrl_idx >= CAM_SOC_MAX_PINCTRL_MAP) { + CAM_ERR(CAM_UTIL, "Invalid Map idx: %d max supported: %d", + pctrl_idx, CAM_SOC_MAX_PINCTRL_MAP); + return -EINVAL; + } + + if (pctrl_info->pctrl_state[pctrl_idx].gpio_state_active && + active && + !pctrl_info->pctrl_state[pctrl_idx].is_active) { + rc = pinctrl_select_state(pctrl_info->pinctrl, + pctrl_info->pctrl_state[pctrl_idx].gpio_state_active); + if (rc) + CAM_ERR(CAM_UTIL, + "Pinctrl active state transition failed: rc: %d", + rc); + else { + pctrl_info->pctrl_state[pctrl_idx].is_active = true; + CAM_DBG(CAM_UTIL, "Pctrl_idx: %d is in active state", + pctrl_idx); + } + } + + if (pctrl_info->pctrl_state[pctrl_idx].gpio_state_suspend && + !active && + pctrl_info->pctrl_state[pctrl_idx].is_active) { + rc = pinctrl_select_state(pctrl_info->pinctrl, + pctrl_info->pctrl_state[pctrl_idx].gpio_state_suspend); + if (rc) + CAM_ERR(CAM_UTIL, + "Pinctrl suspend state transition failed: rc: %d", + rc); + else { + pctrl_info->pctrl_state[pctrl_idx].is_active = false; + CAM_DBG(CAM_UTIL, "Pctrl_idx: %d is in suspend state", + pctrl_idx); + } + } + + return rc; +} + +static int cam_soc_util_request_pinctrl( + struct cam_hw_soc_info *soc_info) +{ + struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info; + struct device *dev = soc_info->dev; + struct device_node *of_node = dev->of_node; + uint32_t i = 0; + int rc = 0; + const char *name; + uint32_t idx; + char pctrl_active[50]; + char pctrl_suspend[50]; + int32_t num_of_map_idx = 0; + int32_t num_of_string = 0; + + device_pctrl->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) { + CAM_DBG(CAM_UTIL, "Pinctrl not available"); + device_pctrl->pinctrl = NULL; + return 0; + } + + num_of_map_idx = of_property_count_u32_elems( + of_node, "pctrl-idx-mapping"); + if (num_of_map_idx <= 0) { + CAM_ERR(CAM_UTIL, + "Reading pctrl-idx-mapping failed"); + return -EINVAL; + } + + num_of_string = of_property_count_strings( + of_node, "pctrl-map-names"); + if (num_of_string <= 0) { + CAM_ERR(CAM_UTIL, "no pinctrl-mapping found for: %s", + soc_info->dev_name); + device_pctrl->pinctrl = NULL; + return -EINVAL; + } + + if (num_of_map_idx != num_of_string) { + CAM_ERR(CAM_UTIL, + "Incorrect inputs mapping-idx count: %d mapping-names: %d", + num_of_map_idx, num_of_string); + device_pctrl->pinctrl = NULL; + return -EINVAL; + } + + if (num_of_map_idx > CAM_SOC_MAX_PINCTRL_MAP) { + CAM_ERR(CAM_UTIL, "Invalid mapping %u max supported: %d", + num_of_map_idx, CAM_SOC_MAX_PINCTRL_MAP); + return -EINVAL; + } + + for (i = 0; i < num_of_map_idx; i++) { + of_property_read_u32_index(of_node, + "pctrl-idx-mapping", i, &idx); + + if (idx >= CAM_SOC_MAX_PINCTRL_MAP) { + CAM_ERR(CAM_UTIL, "Invalid Index: %d max supported: %d", + idx, CAM_SOC_MAX_PINCTRL_MAP); + return -EINVAL; + } + + rc = of_property_read_string_index( + of_node, "pctrl-map-names", i, &name); + if (rc) { + CAM_ERR(CAM_UTIL, + "failed to read pinctrl-mapping at %d", i); + return rc; + } + + snprintf(pctrl_active, sizeof(pctrl_active), + "%s%s", name, "_active"); + CAM_DBG(CAM_UTIL, "pctrl_active at index: %d name: %s", + i, pctrl_active); + snprintf(pctrl_suspend, sizeof(pctrl_suspend), + "%s%s", name, "_suspend"); + CAM_DBG(CAM_UTIL, "pctrl_suspend at index: %d name: %s", + i, pctrl_suspend); + + device_pctrl->pctrl_state[idx].gpio_state_active = + pinctrl_lookup_state(device_pctrl->pinctrl, + pctrl_active); + if (IS_ERR_OR_NULL( + device_pctrl->pctrl_state[idx].gpio_state_active)) { + CAM_ERR(CAM_UTIL, + "Failed to get the active state pinctrl handle"); + device_pctrl->pctrl_state[idx].gpio_state_active = + NULL; + return -EINVAL; + } + device_pctrl->pctrl_state[idx].gpio_state_suspend = + pinctrl_lookup_state(device_pctrl->pinctrl, + pctrl_suspend); + if (IS_ERR_OR_NULL( + device_pctrl->pctrl_state[idx].gpio_state_suspend)) { + CAM_ERR(CAM_UTIL, + "Failed to get the active state pinctrl handle"); + device_pctrl->pctrl_state[idx].gpio_state_suspend = NULL; + return -EINVAL; + } + } + + return 0; +} + +static void cam_soc_util_release_pinctrl(struct cam_hw_soc_info *soc_info) +{ + if (soc_info->pinctrl_info.pinctrl) + devm_pinctrl_put(soc_info->pinctrl_info.pinctrl); +} + +static void cam_soc_util_regulator_disable_default( + struct cam_hw_soc_info *soc_info) +{ + int j = 0; + uint32_t num_rgltr = soc_info->num_rgltr; + + for (j = num_rgltr-1; j >= 0; j--) { + if (soc_info->rgltr_ctrl_support == true) { + cam_soc_util_regulator_disable(soc_info->rgltr[j], + soc_info->rgltr_name[j], + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + soc_info->rgltr_op_mode[j], + soc_info->rgltr_delay[j]); + } else { + if (soc_info->rgltr[j]) + cam_wrapper_regulator_disable(soc_info->rgltr[j], + soc_info->rgltr_name[j]); + } + } +} + +int cam_soc_util_reset_control( + struct cam_hw_soc_info *soc_info) +{ + int rc = 0; + uint32_t num_reset = soc_info->num_reset; + + if (num_reset <= 0) + return 0; + + if (num_reset > CAM_SOC_MAX_RESET) { + CAM_ERR(CAM_UTIL, + "%s has invalid reset resource number %d", + soc_info->dev_name, num_reset); + return -EINVAL; + } + + /* pscbc reset */ + reset_control_assert(soc_info->resets[0]); + reset_control_assert(soc_info->resets[1]); + reset_control_assert(soc_info->resets[2]); + reset_control_assert(soc_info->resets[3]); + usleep_range(1, 2); + reset_control_deassert(soc_info->resets[0]); + reset_control_deassert(soc_info->resets[1]); + + /* sw mode sleep stg mode. STG done for SF */ + reset_control_assert(soc_info->resets[4]); + reset_control_assert(soc_info->resets[5]); + usleep_range(1, 2); + reset_control_deassert(soc_info->resets[1]); + reset_control_deassert(soc_info->resets[5]); + reset_control_deassert(soc_info->resets[4]); + + /* sw mode sleep stg mode. STG done for HF */ + reset_control_assert(soc_info->resets[6]); + reset_control_assert(soc_info->resets[7]); + usleep_range(1, 2); + reset_control_deassert(soc_info->resets[0]); + reset_control_deassert(soc_info->resets[7]); + reset_control_deassert(soc_info->resets[6]); + return rc; +} + +static int cam_soc_util_regulator_enable_default( + struct cam_hw_soc_info *soc_info) +{ + int j = 0, rc = 0; + uint32_t num_rgltr = soc_info->num_rgltr; + + if (num_rgltr > CAM_SOC_MAX_REGULATOR) { + CAM_ERR(CAM_UTIL, + "%s has invalid regulator number %d", + soc_info->dev_name, num_rgltr); + return -EINVAL; + } + + for (j = 0; j < num_rgltr; j++) { + CAM_DBG(CAM_UTIL, "[%s] : start regulator %s enable, rgltr_ctrl_support %d", + soc_info->dev_name, soc_info->rgltr_name[j], soc_info->rgltr_ctrl_support); + if (soc_info->rgltr_ctrl_support == true) { + rc = cam_soc_util_regulator_enable(soc_info->rgltr[j], + soc_info->rgltr_name[j], + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + soc_info->rgltr_op_mode[j], + soc_info->rgltr_delay[j]); + } else { + if (soc_info->rgltr[j]) + rc = cam_wrapper_regulator_enable(soc_info->rgltr[j], + soc_info->rgltr_name[j]); + } + + if (rc) { + CAM_ERR(CAM_UTIL, "%s enable failed", + soc_info->rgltr_name[j]); + goto disable_rgltr; + } + } + + return rc; +disable_rgltr: + + for (j--; j >= 0; j--) { + if (soc_info->rgltr_ctrl_support == true) { + cam_soc_util_regulator_disable(soc_info->rgltr[j], + soc_info->rgltr_name[j], + soc_info->rgltr_min_volt[j], + soc_info->rgltr_max_volt[j], + soc_info->rgltr_op_mode[j], + soc_info->rgltr_delay[j]); + } else { + if (soc_info->rgltr[j]) + cam_wrapper_regulator_disable(soc_info->rgltr[j], + soc_info->rgltr_name[j]); + } + } + + return rc; +} + +static bool cam_soc_util_is_presil_address_space(unsigned long mem_block_start) +{ + if (mem_block_start >= CAM_SS_START_PRESIL && mem_block_start < CAM_SS_END_PRESIL) + return true; + + return false; +} + +#ifndef CONFIG_CAM_PRESIL +void __iomem * cam_soc_util_get_mem_base( + unsigned long mem_block_start, + unsigned long mem_block_size, + const char *mem_block_name, + uint32_t reserve_mem) +{ + void __iomem * mem_base; + + if (reserve_mem) { + if (!request_mem_region(mem_block_start, + mem_block_size, + mem_block_name)) { + CAM_ERR(CAM_UTIL, + "Error Mem region request Failed:%s", + mem_block_name); + return NULL; + } + } + + mem_base = ioremap(mem_block_start, mem_block_size); + + if (!mem_base) { + CAM_ERR(CAM_UTIL, "get mem base failed"); + } + + return mem_base; +} + +int cam_soc_util_request_irq(struct device *dev, + unsigned int irq_line_start, + irq_handler_t handler, + unsigned long irqflags, + const char *irq_name, + void *irq_data, + unsigned long mem_block_start) +{ + int rc; + + rc = devm_request_irq(dev, + irq_line_start, + handler, + IRQF_TRIGGER_RISING, + irq_name, + irq_data); + if (rc) { + CAM_ERR(CAM_UTIL, "irq request fail rc %d", rc); + return -EBUSY; + } + + disable_irq(irq_line_start); + + return rc; +} + +#else +void __iomem * cam_soc_util_get_mem_base( + unsigned long mem_block_start, + unsigned long mem_block_size, + const char *mem_block_name, + uint32_t reserve_mem) +{ + void __iomem * mem_base; + + if(cam_soc_util_is_presil_address_space(mem_block_start)) + mem_base = (void __iomem *)mem_block_start; + else { + if (reserve_mem) { + if (!request_mem_region(mem_block_start, + mem_block_size, + mem_block_name)) { + CAM_ERR(CAM_UTIL, + "Error Mem region request Failed:%s", + mem_block_name); + return NULL; + } + } + + mem_base = ioremap(mem_block_start, mem_block_size); + } + + if (!mem_base) { + CAM_ERR(CAM_UTIL, "get mem base failed"); + } + + return mem_base; +} + +int cam_soc_util_request_irq(struct device *dev, + unsigned int irq_line_start, + irq_handler_t handler, + unsigned long irqflags, + const char *irq_name, + void *irq_data, + unsigned long mem_block_start) +{ + int rc; + + if(cam_soc_util_is_presil_address_space(mem_block_start)) { + rc = devm_request_irq(dev, + irq_line_start, + handler, + irqflags, + irq_name, + irq_data); + if (rc) { + CAM_ERR(CAM_UTIL, "presil irq request fail"); + return -EBUSY; + } + + disable_irq(irq_line_start); + + rc = !(cam_presil_subscribe_device_irq(irq_line_start, + handler, irq_data, irq_name)); + CAM_DBG(CAM_PRESIL, "Subscribe presil IRQ: rc=%d NUM=%d Name=%s handler=0x%x", + rc, irq_line_start, irq_name, handler); + if (rc) { + CAM_ERR(CAM_UTIL, "presil irq request fail"); + return -EBUSY; + } + } else { + rc = devm_request_irq(dev, + irq_line_start, + handler, + irqflags, + irq_name, + irq_data); + if (rc) { + CAM_ERR(CAM_UTIL, "irq request fail"); + return -EBUSY; + } + disable_irq(irq_line_start); + CAM_INFO(CAM_UTIL, "Subscribe for non-presil IRQ success"); + } + + CAM_INFO(CAM_UTIL, "returning IRQ for mem_block_start 0x%0x rc %d", + mem_block_start, rc); + + return rc; +} +#endif + +int cam_soc_util_request_platform_resource( + struct cam_hw_soc_info *soc_info, + irq_handler_t handler, void **irq_data) +{ + int i = 0, rc = 0; + bool clk_regulator_needed = true; + + if (!soc_info || !soc_info->dev) { + CAM_ERR(CAM_UTIL, "Invalid parameters"); + return -EINVAL; + } + + if (unlikely(soc_info->irq_count > CAM_SOC_MAX_IRQ_LINES_PER_DEV)) { + CAM_ERR(CAM_UTIL, "Invalid irq count: %u Max IRQ per device: %d", + soc_info->irq_count, CAM_SOC_MAX_IRQ_LINES_PER_DEV); + return -EINVAL; + } + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) + clk_regulator_needed = false; + + for (i = 0; i < soc_info->num_mem_block; i++) { + + soc_info->reg_map[i].mem_base = cam_soc_util_get_mem_base( + soc_info->mem_block[i]->start, + resource_size(soc_info->mem_block[i]), + soc_info->mem_block_name[i], + soc_info->reserve_mem); + + if (!soc_info->reg_map[i].mem_base) { + CAM_ERR(CAM_UTIL, "i= %d base NULL", i); + rc = -ENOMEM; + goto unmap_base; + } + + soc_info->reg_map[i].mem_cam_base = + soc_info->mem_block_cam_base[i]; + soc_info->reg_map[i].size = + resource_size(soc_info->mem_block[i]); + + soc_info->num_reg_map++; + } + + if (clk_regulator_needed) { + for (i = 0; i < soc_info->num_rgltr; i++) { + if (soc_info->rgltr_name[i] == NULL) { + CAM_ERR(CAM_UTIL, "can't find regulator name"); + goto put_regulator; + } + + rc = cam_soc_util_get_regulator(soc_info->dev, + &soc_info->rgltr[i], + soc_info->rgltr_name[i]); + if (rc) + goto put_regulator; + } + } + + for (i = 0; i < soc_info->irq_count; i++) { + rc = cam_soc_util_request_irq(soc_info->dev, soc_info->irq_num[i], + handler, IRQF_TRIGGER_RISING, soc_info->irq_name[i], + irq_data[i], soc_info->mem_block[0]->start); + if (rc) { + CAM_ERR(CAM_UTIL, "irq request fail for irq name: %s dev: %s", + soc_info->irq_name[i], soc_info->dev_name); + rc = -EBUSY; + goto put_irq; + } + + soc_info->irq_data[i] = irq_data[i]; + } + + if (clk_regulator_needed) { + /* Get Clock */ + for (i = 0; i < soc_info->num_clk; i++) { + soc_info->clk[i] = cam_wrapper_clk_get(soc_info->dev, + soc_info->clk_name[i]); + if (IS_ERR(soc_info->clk[i])) { + CAM_ERR(CAM_UTIL, "get failed for %s", + soc_info->clk_name[i]); + rc = -ENOENT; + goto put_clk; + } else if (!soc_info->clk[i]) { + CAM_DBG(CAM_UTIL, "%s handle is NULL skip get", + soc_info->clk_name[i]); + continue; + } + + /* Create a wrapper entry if this is a shared clock */ + if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i)) { + uint32_t min_level = soc_info->lowest_clk_level; + + CAM_DBG(CAM_UTIL, + "Dev %s, clk %s, id %d register wrapper entry for shared clk", + soc_info->dev_name, soc_info->clk_name[i], + soc_info->clk_id[i]); + + rc = cam_soc_util_clk_wrapper_register_entry( + soc_info->clk_id[i], soc_info->clk[i], + (i == soc_info->src_clk_idx) ? true : false, + soc_info, soc_info->clk_rate[min_level][i], + soc_info->clk_name[i]); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in registering shared clk Dev %s id %d", + soc_info->dev_name, + soc_info->clk_id[i]); + cam_wrapper_clk_put(soc_info->clk[i], + soc_info->clk_name[i]); + soc_info->clk[i] = NULL; + goto put_clk; + } + } else if (i == soc_info->src_clk_idx) { + rc = cam_soc_util_register_mmrm_client( + soc_info->clk_id[i], soc_info->clk[i], + soc_info->is_nrt_dev, + soc_info, soc_info->clk_name[i], + &soc_info->mmrm_handle); + if (rc) { + CAM_ERR(CAM_UTIL, + "Failed in register mmrm client Dev %s clk id %d", + soc_info->dev_name, + soc_info->clk_id[i]); + cam_wrapper_clk_put(soc_info->clk[i], + soc_info->clk_name[i]); + soc_info->clk[i] = NULL; + goto put_clk; + } + } + } + } + + rc = cam_soc_util_request_pinctrl(soc_info); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed in requesting Pinctrl, rc: %d", rc); + goto put_clk; + } + + rc = cam_soc_util_request_gpio_table(soc_info, true); + if (rc) { + CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc); + goto put_clk; + } + + if (clk_regulator_needed) { + if (soc_info->clk_control_enable) + cam_soc_util_create_clk_lvl_debugfs(soc_info); + } + + return rc; + +put_clk: + + if (clk_regulator_needed) { + if (soc_info->mmrm_handle) { + cam_soc_util_unregister_mmrm_client(soc_info->mmrm_handle); + soc_info->mmrm_handle = NULL; + } + + for (i = i - 1; i >= 0; i--) { + if (soc_info->clk[i]) { + if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i)) + cam_soc_util_clk_wrapper_unregister_entry( + soc_info->clk_id[i], soc_info); + + cam_wrapper_clk_put(soc_info->clk[i], soc_info->clk_name[i]); + soc_info->clk[i] = NULL; + } + } + } + +put_irq: + if (i == -1) + i = soc_info->irq_count; + for (i = i - 1; i >= 0; i--) { + if (soc_info->irq_num[i] > 0) + disable_irq(soc_info->irq_num[i]); + } + +put_regulator: + if (clk_regulator_needed) { + if (i == -1) + i = soc_info->num_rgltr; + for (i = i - 1; i >= 0; i--) { + if (soc_info->rgltr[i]) { + cam_wrapper_regulator_disable(soc_info->rgltr[i], + soc_info->rgltr_name[i]); + cam_wrapper_regulator_put(soc_info->rgltr[i], + soc_info->rgltr_name[i]); + soc_info->rgltr[i] = NULL; + } + } + } + +unmap_base: + if (i == -1) + i = soc_info->num_reg_map; + for (i = i - 1; i >= 0; i--) { + if (soc_info->reserve_mem) + release_mem_region(soc_info->mem_block[i]->start, + resource_size(soc_info->mem_block[i])); + iounmap(soc_info->reg_map[i].mem_base); + soc_info->reg_map[i].mem_base = NULL; + soc_info->reg_map[i].size = 0; + } + + return rc; +} + +int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info) +{ + int i; + bool b_ret = false; + bool clk_regulator_needed = true; + + if (!soc_info || !soc_info->dev) { + CAM_ERR(CAM_UTIL, "Invalid parameter"); + return -EINVAL; + } + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) + clk_regulator_needed = false; + + if (clk_regulator_needed) { + if (soc_info->mmrm_handle) { + cam_soc_util_unregister_mmrm_client(soc_info->mmrm_handle); + soc_info->mmrm_handle = NULL; + } + + for (i = soc_info->num_clk - 1; i >= 0; i--) { + if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i)) + cam_soc_util_clk_wrapper_unregister_entry( + soc_info->clk_id[i], soc_info); + if (!soc_info->clk[i]) { + CAM_DBG(CAM_UTIL, "%s handle is NULL skip put", + soc_info->clk_name[i]); + continue; + } + cam_wrapper_clk_put(soc_info->clk[i], soc_info->clk_name[i]); + soc_info->clk[i] = NULL; + } + + for (i = soc_info->num_rgltr - 1; i >= 0; i--) { + if (soc_info->rgltr[i]) { + cam_wrapper_regulator_put(soc_info->rgltr[i], + soc_info->rgltr_name[i]); + soc_info->rgltr[i] = NULL; + } + } + + for (i = soc_info->num_reg_map - 1; i >= 0; i--) { + iounmap(soc_info->reg_map[i].mem_base); + soc_info->reg_map[i].mem_base = NULL; + soc_info->reg_map[i].size = 0; + } + } + + for (i = soc_info->irq_count -1; i >= 0; i--) { + if (soc_info->irq_num[i] > 0) { + if (cam_presil_mode_enabled()) { + if (cam_soc_util_is_presil_address_space( + soc_info->mem_block[0]->start)) { + b_ret = cam_presil_unsubscribe_device_irq( + soc_info->irq_line[i]->start); + CAM_DBG(CAM_PRESIL, + "UnSubscribe IRQ: Ret=%d NUM=%d Name=%s", + b_ret, soc_info->irq_line[i]->start, + soc_info->irq_name[i]); + } + } + + disable_irq(soc_info->irq_num[i]); + } + } + + cam_soc_util_release_pinctrl(soc_info); + + /* release for gpio */ + cam_soc_util_request_gpio_table(soc_info, false); + + soc_info->dentry = NULL; + + return 0; +} + +int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, bool enable_clocks, enum cam_vote_level clk_level, + bool irq_enable) +{ + int rc = 0, i; + + if (!soc_info) + return -EINVAL; + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_soc_enable_disable_resources(soc_info->hw_id, true); + if (rc) { + CAM_ERR(CAM_UTIL, "vmrm soc enable soc resources failed %d", rc); + return rc; + } + } else { + rc = cam_soc_util_regulator_enable_default(soc_info); + if (rc) { + CAM_ERR(CAM_UTIL, "Regulators enable failed"); + return rc; + } + + rc = cam_soc_util_reset_control(soc_info); + if (rc) { + CAM_ERR(CAM_UTIL, "resource control assert/de-assert failed"); + goto disable_regulator; + } + + if (enable_clocks) { + rc = cam_soc_util_clk_enable_default(soc_info, cesta_client_idx, clk_level); + if (rc) + goto disable_regulator; + } + } + + if (irq_enable) { + for (i = 0; i < soc_info->irq_count; i++) { + if (soc_info->irq_num[i] < 0) { + CAM_ERR(CAM_UTIL, "No IRQ line available for irq: %s dev: %s", + soc_info->irq_name[i], soc_info->dev_name); + rc = -ENODEV; + goto disable_irq; + } + + enable_irq(soc_info->irq_num[i]); + } + } + + return rc; + +disable_irq: + if (irq_enable) { + for (i = i - 1; i >= 0; i--) + disable_irq(soc_info->irq_num[i]); + } + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_soc_enable_disable_resources(soc_info->hw_id, false); + if (rc) { + CAM_ERR(CAM_UTIL, "vmrm soc disable soc resources failed %d", rc); + return rc; + } + } else { + if (enable_clocks) + cam_soc_util_clk_disable_default(soc_info, cesta_client_idx); + +disable_regulator: + cam_soc_util_regulator_disable_default(soc_info); + } + + return rc; +} + +int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, bool disable_clocks, bool disable_irq) +{ + int rc = 0; + + if (!soc_info) + return -EINVAL; + + if (disable_irq) + rc |= cam_soc_util_irq_disable(soc_info); + + if (cam_vmrm_proxy_clk_rgl_voting_enable()) { + rc = cam_vmrm_soc_enable_disable_resources(soc_info->hw_id, false); + if (rc) { + CAM_ERR(CAM_UTIL, "vmrm soc disable soc resources failed %d", rc); + return rc; + } + } else { + if (disable_clocks) + cam_soc_util_clk_disable_default(soc_info, cesta_client_idx); + + cam_soc_util_regulator_disable_default(soc_info); + } + + return rc; +} + +int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset, int size) +{ + void __iomem *base_addr = NULL; + + CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size); + + if (!soc_info || base_index >= soc_info->num_reg_map || + size <= 0 || (offset + size) >= + CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index)) + return -EINVAL; + + base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index); + + /* + * All error checking already done above, + * hence ignoring the return value below. + */ + cam_io_dump(base_addr, offset, size); + + return 0; +} + +static inline int cam_soc_util_reg_addr_validation( + uint32_t reg_map_size, uint32_t offset, char *reg_unit) +{ + if (!IS_ALIGNED(offset, 4)) { + CAM_ERR(CAM_UTIL, "Offset: 0x%X of %s is not memory aligned", + offset, reg_unit); + return -EINVAL; + } else if (offset > reg_map_size) { + CAM_ERR(CAM_UTIL, + "Reg offset: 0x%X of %s out of range, reg_map size: 0x%X", + offset, reg_unit, reg_map_size); + return -EINVAL; + } + + return 0; +} + +static int cam_soc_util_dump_cont_reg_range( + struct cam_hw_soc_info *soc_info, + struct cam_reg_range_read_desc *reg_read, uint32_t base_idx, + struct cam_reg_dump_out_buffer *dump_out_buf, + uintptr_t cmd_buf_end) +{ + int i = 0, rc = 0; + uint32_t write_idx = 0, reg_map_size; + + if (!soc_info || !dump_out_buf || !reg_read || !cmd_buf_end) { + CAM_ERR(CAM_UTIL, + "Invalid input args soc_info: %pK, dump_out_buffer: %pK reg_read: %pK cmd_buf_end: %pK", + soc_info, dump_out_buf, reg_read, cmd_buf_end); + rc = -EINVAL; + goto end; + } + + if ((reg_read->num_values) && ((reg_read->num_values > U32_MAX / 2) || + (sizeof(uint32_t) > ((U32_MAX - + sizeof(struct cam_reg_dump_out_buffer) - + dump_out_buf->bytes_written) / + (reg_read->num_values * 2))))) { + CAM_ERR(CAM_UTIL, + "Integer Overflow bytes_written: [%u] num_values: [%u]", + dump_out_buf->bytes_written, reg_read->num_values); + rc = -EOVERFLOW; + goto end; + } + + if ((cmd_buf_end - (uintptr_t)dump_out_buf) <= + (uintptr_t)(sizeof(struct cam_reg_dump_out_buffer) + - sizeof(uint32_t) + dump_out_buf->bytes_written + + (reg_read->num_values * 2 * sizeof(uint32_t)))) { + CAM_ERR(CAM_UTIL, + "Insufficient space in out buffer num_values: [%d] cmd_buf_end: %pK dump_out_buf: %pK", + reg_read->num_values, cmd_buf_end, + (uintptr_t)dump_out_buf); + rc = -EINVAL; + goto end; + } + + write_idx = dump_out_buf->bytes_written / sizeof(uint32_t); + reg_map_size = (uint32_t)soc_info->reg_map[base_idx].size; + for (i = 0; i < reg_read->num_values; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->offset + (i * sizeof(uint32_t)), + "cont_reg_range"); + if (rc) + continue; + + dump_out_buf->dump_data_flex[write_idx++] = reg_read->offset + + (i * sizeof(uint32_t)); + dump_out_buf->dump_data_flex[write_idx++] = + cam_soc_util_r(soc_info, base_idx, + (reg_read->offset + (i * sizeof(uint32_t)))); + dump_out_buf->bytes_written += (2 * sizeof(uint32_t)); + } + +end: + return rc; +} + +static int cam_soc_util_dump_dmi_ctxt_reg_range( + struct cam_hw_soc_info *soc_info, + struct cam_dmi_read_desc *reg_read, uint32_t base_idx, + struct cam_reg_dump_out_buffer *dump_out_buf, + struct cam_hw_soc_skip_dump_args *soc_skip_dump_args, + uintptr_t cmd_buf_end, uint64_t type) +{ + int i = 0, rc = 0; + uint32_t write_idx = 0, reg_map_size; + uint32_t offset, start_offset, end_offset, reg_base_type; + bool skip_register_read = false; + + if (!soc_info || !dump_out_buf || !reg_read || !cmd_buf_end) { + CAM_ERR(CAM_UTIL, + "Invalid input args soc_info: %pK, dump_out_buffer: %pK", + soc_info, dump_out_buf); + rc = -EINVAL; + goto end; + } + + if (reg_read->num_pre_writes > CAM_REG_DUMP_DMI_CONFIG_MAX || + reg_read->num_post_writes > CAM_REG_DUMP_DMI_CONFIG_MAX) { + CAM_ERR(CAM_UTIL, + "Invalid number of requested writes, pre: %d post: %d", + reg_read->num_pre_writes, reg_read->num_post_writes); + rc = -EINVAL; + goto end; + } + + if ((reg_read->num_pre_writes + reg_read->dmi_data_read.num_values) + && ((reg_read->num_pre_writes > U32_MAX / 2) || + (reg_read->dmi_data_read.num_values > U32_MAX / 2) || + ((reg_read->num_pre_writes * 2) > U32_MAX - + (reg_read->dmi_data_read.num_values * 2)) || + (sizeof(uint32_t) > ((U32_MAX - + sizeof(struct cam_reg_dump_out_buffer) - + dump_out_buf->bytes_written) / ((reg_read->num_pre_writes + + reg_read->dmi_data_read.num_values) * 2))))) { + CAM_ERR(CAM_UTIL, + "Integer Overflow bytes_written: [%u] num_pre_writes: [%u] num_values: [%u]", + dump_out_buf->bytes_written, reg_read->num_pre_writes, + reg_read->dmi_data_read.num_values); + rc = -EOVERFLOW; + goto end; + } + + if ((cmd_buf_end - (uintptr_t)dump_out_buf) <= + (uintptr_t)( + sizeof(struct cam_reg_dump_out_buffer) - sizeof(uint32_t) + + (dump_out_buf->bytes_written + + (reg_read->num_pre_writes * 2 * sizeof(uint32_t)) + + (reg_read->dmi_data_read.num_values * 2 * + sizeof(uint32_t))))) { + CAM_ERR(CAM_UTIL, + "Insufficient space in out buffer num_read_val: [%d] num_write_val: [%d] cmd_buf_end: %pK dump_out_buf: %pK", + reg_read->dmi_data_read.num_values, + reg_read->num_pre_writes, cmd_buf_end, + (uintptr_t)dump_out_buf); + rc = -EINVAL; + goto end; + } + + write_idx = dump_out_buf->bytes_written / sizeof(uint32_t); + reg_map_size = (uint32_t)soc_info->reg_map[base_idx].size; + for (i = 0; i < reg_read->num_pre_writes; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->pre_read_config[i].offset, + "pre_read_config"); + if (rc) + continue; + + cam_soc_util_w_mb(soc_info, base_idx, + reg_read->pre_read_config[i].offset, + reg_read->pre_read_config[i].value); + dump_out_buf->dump_data_flex[write_idx++] = + reg_read->pre_read_config[i].offset; + dump_out_buf->dump_data_flex[write_idx++] = + reg_read->pre_read_config[i].value; + dump_out_buf->bytes_written += (2 * sizeof(uint32_t)); + } + + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->dmi_data_read.offset, + "dmi_data_read"); + if (!rc) { + for (i = 0; i < reg_read->dmi_data_read.num_values; i++) { + if (type == CAM_REG_DUMP_READ_TYPE_DMI) { + offset = reg_read->dmi_data_read.offset; + } else { + offset = reg_read->dmi_data_read.offset + (i * sizeof(uint32_t)); + rc = cam_soc_util_reg_addr_validation(reg_map_size, + offset, "mc_reg_read"); + if (rc) + continue; + } + + if (soc_skip_dump_args->skip_regdump) { + reg_base_type = soc_skip_dump_args->reg_base_type; + if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_LEFT || + reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT) { + start_offset = soc_skip_dump_args->start_offset; + end_offset = soc_skip_dump_args->stop_offset; + if (offset >= start_offset && offset <= end_offset) + skip_register_read = true; + } + } + + dump_out_buf->dump_data_flex[write_idx++] = offset; + + if (skip_register_read) + dump_out_buf->dump_data_flex[write_idx++] = 0X1234; + else + dump_out_buf->dump_data_flex[write_idx++] = + cam_soc_util_r_mb(soc_info, base_idx, offset); + + dump_out_buf->bytes_written += (2 * sizeof(uint32_t)); + skip_register_read = false; + } + } + + for (i = 0; i < reg_read->num_post_writes; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->post_read_config[i].offset, + "post_read_config"); + if (rc) + continue; + cam_soc_util_w_mb(soc_info, base_idx, + reg_read->post_read_config[i].offset, + reg_read->post_read_config[i].value); + } +end: + return rc; +} + +static int cam_soc_util_dump_dmi_ctxt_reg_range_user_buf( + struct cam_hw_soc_info *soc_info, + struct cam_dmi_read_desc *reg_read, uint32_t base_idx, + struct cam_hw_soc_skip_dump_args *soc_skip_dump_args, + struct cam_hw_soc_dump_args *dump_args, uint32_t type) +{ + int i; + int rc; + size_t buf_len = 0; + uint8_t *dst; + size_t remain_len; + uint32_t min_len, reg_map_size = 0; + uint32_t *waddr, *start; + uintptr_t cpu_addr; + struct cam_hw_soc_dump_header *hdr; + uint32_t offset, start_offset, end_offset, reg_base_type; + bool skip_register_read = false; + + if (!soc_info || !dump_args || !reg_read) { + CAM_ERR(CAM_UTIL, + "Invalid input args soc_info: %pK, dump_args: %pK", + soc_info, dump_args); + return -EINVAL; + } + + if (reg_read->num_pre_writes > CAM_REG_DUMP_DMI_CONFIG_MAX || + reg_read->num_post_writes > CAM_REG_DUMP_DMI_CONFIG_MAX) { + CAM_ERR(CAM_UTIL, + "Invalid number of requested writes, pre: %d post: %d", + reg_read->num_pre_writes, reg_read->num_post_writes); + return -EINVAL; + } + + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + + if (buf_len <= dump_args->offset) { + CAM_WARN(CAM_UTIL, "Dump offset overshoot offset %zu len %zu", + dump_args->offset, buf_len); + rc = -ENOSPC; + goto end; + } + remain_len = buf_len - dump_args->offset; + min_len = (reg_read->num_pre_writes * 2 * sizeof(uint32_t)) + + (reg_read->dmi_data_read.num_values * 2 * sizeof(uint32_t)) + + sizeof(uint32_t); + if (remain_len < min_len) { + CAM_WARN(CAM_UTIL, + "Dump Buffer exhaust read %d write %d remain %zu min %u", + reg_read->dmi_data_read.num_values, + reg_read->num_pre_writes, remain_len, + min_len); + rc = -ENOSPC; + goto end; + } + + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_hw_soc_dump_header *)dst; + memset(hdr, 0, sizeof(struct cam_hw_soc_dump_header)); + scnprintf(hdr->tag, CAM_SOC_HW_DUMP_TAG_MAX_LEN, + "DMI_DUMP:"); + waddr = (uint32_t *)(dst + sizeof(struct cam_hw_soc_dump_header)); + start = waddr; + hdr->word_size = sizeof(uint32_t); + *waddr = soc_info->index; + waddr++; + + reg_map_size = (uint32_t)soc_info->reg_map[base_idx].size; + for (i = 0; i < reg_read->num_pre_writes; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->pre_read_config[i].offset, + "pre_read_config"); + if(rc) + continue; + + cam_soc_util_w_mb(soc_info, base_idx, + reg_read->pre_read_config[i].offset, + reg_read->pre_read_config[i].value); + *waddr++ = reg_read->pre_read_config[i].offset; + *waddr++ = reg_read->pre_read_config[i].value; + } + + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->dmi_data_read.offset, + "dmi_data_read"); + if (!rc) { + for (i = 0; i < reg_read->dmi_data_read.num_values; i++) { + if (type == CAM_REG_DUMP_READ_TYPE_DMI) { + offset = reg_read->dmi_data_read.offset; + } else { + offset = reg_read->dmi_data_read.offset + (i * sizeof(uint32_t)); + rc = cam_soc_util_reg_addr_validation(reg_map_size, + offset, "mc_reg_read"); + if (rc) + continue; + } + + if (soc_skip_dump_args->skip_regdump) { + reg_base_type = soc_skip_dump_args->reg_base_type; + if (reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_LEFT || + reg_base_type == CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT) { + offset = reg_read->dmi_data_read.offset; + start_offset = soc_skip_dump_args->start_offset; + end_offset = soc_skip_dump_args->stop_offset; + if (offset >= start_offset && offset <= end_offset) + skip_register_read = true; + } + } + + *waddr++ = offset; + + if (skip_register_read) + *waddr++ = 0X1234; + else + *waddr++ = cam_soc_util_r_mb(soc_info, base_idx, offset); + + skip_register_read = false; + } + } + + for (i = 0; i < reg_read->num_post_writes; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->post_read_config[i].offset, + "post_read_config"); + if (rc) + continue; + + cam_soc_util_w_mb(soc_info, base_idx, + reg_read->post_read_config[i].offset, + reg_read->post_read_config[i].value); + } + + hdr->size = (waddr - start) * hdr->word_size; + dump_args->offset += hdr->size + + sizeof(struct cam_hw_soc_dump_header); + +end: + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_soc_util_dump_cont_reg_range_user_buf( + struct cam_hw_soc_info *soc_info, + struct cam_reg_range_read_desc *reg_read, + uint32_t base_idx, + struct cam_hw_soc_dump_args *dump_args) +{ + int i; + int rc = 0; + size_t buf_len; + uint8_t *dst; + size_t remain_len; + uint32_t min_len, reg_map_size = 0; + uint32_t *waddr, *start; + uintptr_t cpu_addr; + struct cam_hw_soc_dump_header *hdr; + + if (!soc_info || !dump_args || !reg_read) { + CAM_ERR(CAM_UTIL, + "Invalid input args soc_info: %pK, dump_out_buffer: %pK reg_read: %pK", + soc_info, dump_args, reg_read); + return -EINVAL; + } + + rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len); + if (rc) { + CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d", + dump_args->buf_handle, rc); + return rc; + } + if (buf_len <= dump_args->offset) { + CAM_WARN(CAM_UTIL, "Dump offset overshoot %zu %zu", + dump_args->offset, buf_len); + rc = -ENOSPC; + goto end; + } + remain_len = buf_len - dump_args->offset; + min_len = (reg_read->num_values * 2 * sizeof(uint32_t)) + + sizeof(struct cam_hw_soc_dump_header) + sizeof(uint32_t); + if (remain_len < min_len) { + CAM_WARN(CAM_UTIL, + "Dump Buffer exhaust read_values %d remain %zu min %u", + reg_read->num_values, + remain_len, + min_len); + rc = -ENOSPC; + goto end; + } + dst = (uint8_t *)cpu_addr + dump_args->offset; + hdr = (struct cam_hw_soc_dump_header *)dst; + memset(hdr, 0, sizeof(struct cam_hw_soc_dump_header)); + scnprintf(hdr->tag, CAM_SOC_HW_DUMP_TAG_MAX_LEN, "%s_REG:", + soc_info->dev_name); + waddr = (uint32_t *)(dst + sizeof(struct cam_hw_soc_dump_header)); + start = waddr; + hdr->word_size = sizeof(uint32_t); + *waddr = soc_info->index; + waddr++; + + reg_map_size = (uint32_t)soc_info->reg_map[base_idx].size; + for (i = 0; i < reg_read->num_values; i++) { + rc = cam_soc_util_reg_addr_validation(reg_map_size, + reg_read->offset + (i * sizeof(uint32_t)), + "cont_reg_range_user_buf"); + if (rc) + continue; + + waddr[0] = reg_read->offset + (i * sizeof(uint32_t)); + waddr[1] = cam_soc_util_r(soc_info, base_idx, + (reg_read->offset + (i * sizeof(uint32_t)))); + waddr += 2; + } + hdr->size = (waddr - start) * hdr->word_size; + dump_args->offset += hdr->size + + sizeof(struct cam_hw_soc_dump_header); +end: + cam_mem_put_cpu_buf(dump_args->buf_handle); + return rc; +} + +static int cam_soc_util_user_reg_dump( + struct cam_reg_dump_desc *reg_dump_desc, + struct cam_hw_soc_dump_args *dump_args, + struct cam_hw_soc_info *soc_info, + struct cam_hw_soc_skip_dump_args *soc_skip_dump_args, + uint32_t reg_base_idx) +{ + int rc = 0; + int i; + struct cam_reg_read_info *reg_read_info = NULL; + + if (!dump_args || !reg_dump_desc || !soc_info) { + CAM_ERR(CAM_UTIL, + "Invalid input parameters %pK %pK %pK", + dump_args, reg_dump_desc, soc_info); + return -EINVAL; + } + for (i = 0; i < reg_dump_desc->num_read_range; i++) { + + reg_read_info = ®_dump_desc->read_range_flex[i]; + if (reg_read_info->type == + CAM_REG_DUMP_READ_TYPE_CONT_RANGE) { + rc = cam_soc_util_dump_cont_reg_range_user_buf( + soc_info, + ®_read_info->reg_read, + reg_base_idx, + dump_args); + } else if ((reg_read_info->type == CAM_REG_DUMP_READ_TYPE_DMI) || + (reg_read_info->type == CAM_REG_DUMP_READ_TYPE_CTXT)) { + rc = cam_soc_util_dump_dmi_ctxt_reg_range_user_buf( + soc_info, + ®_read_info->dmi_read, + reg_base_idx, + soc_skip_dump_args, + dump_args, + reg_read_info->type); + } else { + CAM_ERR(CAM_UTIL, + "Invalid Reg dump read type: %d", + reg_read_info->type); + rc = -EINVAL; + goto end; + } + + if (rc) { + CAM_ERR(CAM_UTIL, + "Reg range read failed rc: %d reg_base_idx: %d", + rc, reg_base_idx); + goto end; + } + } +end: + return rc; +} + +int cam_soc_util_reg_dump_to_cmd_buf(void *ctx, + struct cam_cmd_buf_desc *cmd_desc, uint64_t req_id, + cam_soc_util_regspace_data_cb reg_data_cb, + struct cam_hw_soc_dump_args *soc_dump_args, + struct cam_hw_soc_skip_dump_args *soc_skip_dump_args, + bool user_triggered_dump, uintptr_t cpu_addr, size_t buf_size) +{ + int rc = 0, i, j; + uintptr_t cmd_buf_start = 0; + uintptr_t cmd_in_data_end = 0; + uintptr_t cmd_buf_end = 0; + uint32_t reg_base_type = 0; + size_t remain_len = 0; + struct cam_reg_dump_input_info *reg_input_info = NULL; + struct cam_reg_dump_input_info *reg_input_info_u = NULL; + struct cam_reg_dump_desc *reg_dump_desc = NULL; + struct cam_reg_dump_desc *reg_dump_desc_u = NULL; + struct cam_reg_dump_out_buffer *dump_out_buf = NULL; + struct cam_reg_read_info *reg_read_info = NULL; + struct cam_hw_soc_info *soc_info; + uint32_t reg_base_idx = 0; + uint32_t local_num_dump = 0; + uint32_t local_num_read_range = 0; + + if (!ctx || !cmd_desc || !reg_data_cb) { + CAM_ERR(CAM_UTIL, "Invalid args to reg dump [%pK] [%pK]", + cmd_desc, reg_data_cb); + return -EINVAL; + } + + if (!cmd_desc->length || !cmd_desc->size) { + CAM_ERR(CAM_UTIL, "Invalid cmd buf size %d %d", + cmd_desc->length, cmd_desc->size); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, " Get cpu buf success req_id: %llu buf_size: %zu", + req_id, buf_size); + + if ((buf_size < sizeof(uint32_t)) || + ((size_t)cmd_desc->offset > (buf_size - sizeof(uint32_t)))) { + CAM_ERR(CAM_UTIL, "Invalid offset for cmd buf: %zu", + (size_t)cmd_desc->offset); + return -EINVAL; + } + + remain_len = buf_size - (size_t)cmd_desc->offset; + if ((remain_len < (size_t)cmd_desc->size) || (cmd_desc->size < + cmd_desc->length)) { + CAM_ERR(CAM_UTIL, + "Invalid params for cmd buf len: %zu size: %zu remain_len: %zu", + (size_t)cmd_desc->length, (size_t)cmd_desc->length, + remain_len); + return -EINVAL; + } + + cmd_buf_start = cpu_addr + (uintptr_t)cmd_desc->offset; + cmd_in_data_end = cmd_buf_start + (uintptr_t)cmd_desc->length; + cmd_buf_end = cmd_buf_start + (uintptr_t)cmd_desc->size; + if ((cmd_buf_end <= cmd_buf_start) || + (cmd_in_data_end <= cmd_buf_start)) { + CAM_ERR(CAM_UTIL, + "Invalid length or size for cmd buf: [%zu] [%zu]", + (size_t)cmd_desc->length, (size_t)cmd_desc->size); + return -EINVAL; + } + + CAM_DBG(CAM_UTIL, + "Buffer params start [%pK] input_end [%pK] buf_end [%pK]", + cmd_buf_start, cmd_in_data_end, cmd_buf_end); + reg_input_info_u = (struct cam_reg_dump_input_info *) cmd_buf_start; + local_num_dump = reg_input_info_u->num_dump_sets; + if (!local_num_dump) { + CAM_ERR(CAM_UTIL, + "Invalid number of dump sets 0, req_id: [%llu]", req_id); + return -EINVAL; + } + + rc = cam_common_mem_kdup((void **)®_input_info, + reg_input_info_u, + sizeof(struct cam_reg_dump_input_info) + + ((local_num_dump - 1) * sizeof(uint32_t))); + + if (rc) { + CAM_ERR(CAM_UTIL, "Alloc and copy req: %llu input info fail", req_id); + return rc; + } + + if (local_num_dump != reg_input_info->num_dump_sets) { + CAM_ERR(CAM_UTIL, "TOCTOU race with userland, error out"); + goto end; + } + + if ((reg_input_info->num_dump_sets > 1) && (sizeof(uint32_t) > + ((U32_MAX - sizeof(struct cam_reg_dump_input_info)) / + (reg_input_info->num_dump_sets - 1)))) { + CAM_ERR(CAM_UTIL, + "Integer Overflow req_id: [%llu] num_dump_sets: [%u]", + req_id, reg_input_info->num_dump_sets); + rc = -EOVERFLOW; + goto end; + } + + if ((!reg_input_info->num_dump_sets) || + ((cmd_in_data_end - cmd_buf_start) <= (uintptr_t) + (sizeof(struct cam_reg_dump_input_info) + + ((reg_input_info->num_dump_sets - 1) * sizeof(uint32_t))))) { + CAM_ERR(CAM_UTIL, + "Invalid number of dump sets, req_id: [%llu] num_dump_sets: [%u]", + req_id, reg_input_info->num_dump_sets); + rc = -EINVAL; + goto end; + } + + CAM_DBG(CAM_UTIL, + "reg_input_info req_id: %llu ctx %pK num_dump_sets: %d", + req_id, ctx, reg_input_info->num_dump_sets); + for (i = 0; i < reg_input_info->num_dump_sets; i++) { + if ((cmd_in_data_end - cmd_buf_start) <= (uintptr_t) + reg_input_info->dump_set_offsets_flex[i]) { + CAM_ERR(CAM_UTIL, + "Invalid dump set offset: [%pK], cmd_buf_start: [%pK] cmd_in_data_end: [%pK]", + (uintptr_t)reg_input_info->dump_set_offsets_flex[i], + cmd_buf_start, cmd_in_data_end); + rc = -EINVAL; + goto end; + } + + reg_dump_desc_u = (struct cam_reg_dump_desc *) + (cmd_buf_start + + (uintptr_t)reg_input_info->dump_set_offsets_flex[i]); + local_num_read_range = reg_dump_desc_u->num_read_range; + if (!local_num_read_range) { + CAM_ERR(CAM_UTIL, + "Invalid number of read ranges 0, req_id: [%llu]", req_id); + rc = -EINVAL; + goto end; + } + + rc = cam_common_mem_kdup((void **)®_dump_desc, + reg_dump_desc_u, sizeof(struct cam_reg_dump_desc) + + ((local_num_read_range - 1) * sizeof(struct cam_reg_read_info))); + if (rc) { + CAM_ERR(CAM_UTIL, "Alloc and copy req: [%llu] desc fail", req_id); + goto end; + } + + if (local_num_read_range != reg_dump_desc->num_read_range) { + CAM_ERR(CAM_UTIL, "TOCTOU race with userland, error out"); + goto free_desc; + } + + if ((reg_dump_desc->num_read_range > 1) && + (sizeof(struct cam_reg_read_info) > ((U32_MAX - + sizeof(struct cam_reg_dump_desc)) / + (reg_dump_desc->num_read_range - 1)))) { + CAM_ERR(CAM_UTIL, + "Integer Overflow req_id: [%llu] num_read_range: [%u]", + req_id, reg_dump_desc->num_read_range); + rc = -EOVERFLOW; + goto free_desc; + } + + if ((!reg_dump_desc->num_read_range) || + ((cmd_in_data_end - (uintptr_t)reg_dump_desc) <= + (uintptr_t)(sizeof(struct cam_reg_dump_desc) + + ((reg_dump_desc->num_read_range - 1) * + sizeof(struct cam_reg_read_info))))) { + CAM_ERR(CAM_UTIL, + "Invalid number of read ranges, req_id: [%llu] num_read_range: [%d]", + req_id, reg_dump_desc->num_read_range); + rc = -EINVAL; + goto free_desc; + } + + if ((cmd_buf_end - cmd_buf_start) <= (uintptr_t) + (reg_dump_desc->dump_buffer_offset + + sizeof(struct cam_reg_dump_out_buffer))) { + CAM_ERR(CAM_UTIL, + "Invalid out buffer offset: [%pK], cmd_buf_start: [%pK] cmd_buf_end: [%pK]", + (uintptr_t)reg_dump_desc->dump_buffer_offset, + cmd_buf_start, cmd_buf_end); + rc = -EINVAL; + goto free_desc; + } + + reg_base_type = reg_dump_desc->reg_base_type; + if (reg_base_type == 0 || reg_base_type > + CAM_REG_DUMP_BASE_TYPE_SFE_RIGHT) { + CAM_ERR(CAM_UTIL, + "Invalid Reg dump base type: %d", + reg_base_type); + rc = -EINVAL; + goto free_desc; + } + + rc = reg_data_cb(reg_base_type, ctx, &soc_info, ®_base_idx); + if (rc || !soc_info) { + CAM_ERR(CAM_UTIL, + "Reg space data callback failed rc: %d soc_info: [%pK]", + rc, soc_info); + rc = -EINVAL; + goto free_desc; + } + + if (reg_base_idx > soc_info->num_reg_map) { + CAM_ERR(CAM_UTIL, + "Invalid reg base idx: %d num reg map: %d", + reg_base_idx, soc_info->num_reg_map); + rc = -EINVAL; + goto free_desc; + } + + CAM_DBG(CAM_UTIL, + "Reg data callback success req_id: %llu base_type: %d base_idx: %d num_read_range: %d", + req_id, reg_base_type, reg_base_idx, + reg_dump_desc->num_read_range); + + /* If the dump request is triggered by user space + * buffer will be different from the buffer which is received + * in init packet. In this case, dump the data to the + * user provided buffer and exit. + */ + if (user_triggered_dump) { + soc_skip_dump_args->reg_base_type = reg_base_type; + rc = cam_soc_util_user_reg_dump(reg_dump_desc, + soc_dump_args, soc_info, soc_skip_dump_args, reg_base_idx); + CAM_INFO(CAM_UTIL, + "%s reg_base_idx %d dumped offset %u", + soc_info->dev_name, reg_base_idx, + soc_dump_args->offset); + goto free_desc; + } + + /* Below code is executed when data is dumped to the + * out buffer received in init packet + */ + dump_out_buf = (struct cam_reg_dump_out_buffer *) + (cmd_buf_start + + (uintptr_t)reg_dump_desc->dump_buffer_offset); + dump_out_buf->req_id = req_id; + dump_out_buf->bytes_written = 0; + soc_skip_dump_args->reg_base_type = reg_base_type; + + for (j = 0; j < reg_dump_desc->num_read_range; j++) { + CAM_DBG(CAM_UTIL, + "Number of bytes written to cmd buffer: %u req_id: %llu", + dump_out_buf->bytes_written, req_id); + reg_read_info = ®_dump_desc->read_range_flex[j]; + if (reg_read_info->type == + CAM_REG_DUMP_READ_TYPE_CONT_RANGE) { + rc = cam_soc_util_dump_cont_reg_range(soc_info, + ®_read_info->reg_read, reg_base_idx, + dump_out_buf, cmd_buf_end); + } else if ((reg_read_info->type == CAM_REG_DUMP_READ_TYPE_DMI) || + (reg_read_info->type == CAM_REG_DUMP_READ_TYPE_CTXT)) { + rc = cam_soc_util_dump_dmi_ctxt_reg_range(soc_info, + ®_read_info->dmi_read, reg_base_idx, + dump_out_buf, soc_skip_dump_args, + cmd_buf_end, reg_read_info->type); + } else { + CAM_ERR(CAM_UTIL, + "Invalid Reg dump read type: %d", + reg_read_info->type); + rc = -EINVAL; + goto free_desc; + } + + if (rc) { + CAM_ERR(CAM_UTIL, + "Reg range read failed rc: %d reg_base_idx: %d dump_out_buf: %pK", + rc, reg_base_idx, dump_out_buf); + goto free_desc; + } + } + + cam_common_mem_free(reg_dump_desc); + } + + if (!rc) + goto end; + +free_desc: + cam_common_mem_free(reg_dump_desc); +end: + cam_common_mem_free(reg_input_info); + + return rc; +} + +/** + * cam_soc_util_print_clk_freq() + * + * @brief: This function gets the clk rates for each clk from clk + * driver and prints in log + * + * @soc_info: Device soc struct to be populated + * + * @return: success or failure + */ +int cam_soc_util_print_clk_freq(struct cam_hw_soc_info *soc_info) +{ + int i; + unsigned long clk_rate = 0; + + if (!soc_info) { + CAM_ERR(CAM_UTIL, "Invalid soc info"); + return -EINVAL; + } + + if ((soc_info->num_clk == 0) || + (soc_info->num_clk >= CAM_SOC_MAX_CLK)) { + CAM_ERR(CAM_UTIL, "[%s] Invalid number of clock %d", + soc_info->dev_name, soc_info->num_clk); + return -EINVAL; + } + + for (i = 0; i < soc_info->num_clk; i++) { + clk_rate = cam_wrapper_clk_get_rate(soc_info->clk[i], soc_info->clk_name[i]); + + CAM_INFO(CAM_UTIL, + "[%s] idx = %d clk name = %s clk_rate=%lld", + soc_info->dev_name, i, soc_info->clk_name[i], + clk_rate); + } + + return 0; +} + +inline unsigned long cam_soc_util_get_applied_src_clk( + struct cam_hw_soc_info *soc_info, bool is_max) +{ + unsigned long clk_rate; + + /* + * For CRMC type, exa - ife, csid, cphy + * final clk = max(hw_client_0, hw_client_1, hw_client_2, sw_client) + * For CRMB type, exa - camnoc axi + * final clk = max(hw_client_0 + hw_client_1 + hw_client_2, sw_client) + */ + + if (is_max) { + clk_rate = max(soc_info->applied_src_clk_rates.hw_client[0].high, + soc_info->applied_src_clk_rates.hw_client[1].high); + clk_rate = max(clk_rate, soc_info->applied_src_clk_rates.hw_client[2].high); + clk_rate = max(clk_rate, soc_info->applied_src_clk_rates.sw_client); + } else { + clk_rate = max((soc_info->applied_src_clk_rates.hw_client[0].high + + soc_info->applied_src_clk_rates.hw_client[1].high + + soc_info->applied_src_clk_rates.hw_client[2].high), + soc_info->applied_src_clk_rates.sw_client); + } + + return clk_rate; +} + +int cam_soc_util_regulators_enabled(struct cam_hw_soc_info *soc_info) +{ + int j = 0, rc = 0; + int enabled_cnt = 0; + + for (j = 0; j < soc_info->num_rgltr; j++) { + if (soc_info->rgltr[j]) { + rc = cam_wrapper_regulator_is_enabled(soc_info->rgltr[j], + soc_info->rgltr_name[j]); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "%s regulator_is_enabled failed", + soc_info->rgltr_name[j]); + } else if (rc > 0) { + CAM_DBG(CAM_UTIL, "%s regulator enabled", + soc_info->rgltr_name[j]); + enabled_cnt++; + } else { + CAM_DBG(CAM_UTIL, "%s regulator is disabled", + soc_info->rgltr_name[j]); + } + } + } + + return enabled_cnt; +} + +int cam_soc_util_create_debugfs(void) +{ + struct dentry *dbgfileptr = NULL; + int rc; + + rc = cam_debugfs_create_subdir("soc_util", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_CPAS, "DebugFS could not create directory!"); + return -ENOENT; + } + + debugfs_create_bool("clk_rgltr_bus_ops_profiling", 0644, + dbgfileptr, &clk_rgltr_bus_ops_profiling); + debugfs_create_bool("clk_ops_profiling_hw_and_sw_voting", 0644, + dbgfileptr, &clk_ops_profiling_hw_and_sw_voting); + + return 0; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.h new file mode 100644 index 0000000000..90f44285d6 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_soc_util.h @@ -0,0 +1,1076 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_SOC_UTIL_H_ +#define _CAM_SOC_UTIL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cam_io_util.h" +#include "cam_debug_util.h" +#include + +#if IS_REACHABLE(CONFIG_MSM_MMRM) +#include +#endif + +#define NO_SET_RATE -1 +#define INIT_RATE -2 + +/* maximum number of device block */ +#define CAM_SOC_MAX_BLOCK 8 + +/* maximum number of device base */ +#define CAM_SOC_MAX_BASE CAM_SOC_MAX_BLOCK + +/* maximum number of device regulator */ +#define CAM_SOC_MAX_REGULATOR 10 + +/* maximum number of device clock */ +#define CAM_SOC_MAX_CLK 32 + +/* maximum number of optional device clock */ +#define CAM_SOC_MAX_OPT_CLK 7 + +/* maximum number of pinctrl mapping */ +#define CAM_SOC_MAX_PINCTRL_MAP 2 + +/* maximum number of irq per device */ +#define CAM_SOC_MAX_IRQ_LINES_PER_DEV 2 + +/* maximum length of soc name */ +#define CAM_SOC_MAX_LENGTH_NAME 64 + +/* maximum number of soc device use gpio*/ +#define CAM_SOC_MAX_GPIO 4 + +/* vmrm resource id list max */ +#define CAM_VMRM_MAX_RESOURCE_IDS 4 + +/* vmrm resource irq id offset */ +#define CAM_VMRM_RESOURCE_IRQ_OFFSET 2 + +/* vmrm resource irq bit map offset */ +#define CAM_VMRM_RESOURCE_IRQ_BIT_MAP_OFFSET 1 + +/* maximum number of device clock */ +#define CAM_SOC_MAX_RESET 8 + +/* DDR device types */ +#define DDR_TYPE_LPDDR4 6 +#define DDR_TYPE_LPDDR4X 7 +#define DDR_TYPE_LPDDR5 8 +#define DDR_TYPE_LPDDR5X 9 + +/* Maximum length of tag while dumping */ +#define CAM_SOC_HW_DUMP_TAG_MAX_LEN 128 + +/* Client index to be used to vote clk frequency through sw client */ +#define CAM_CLK_SW_CLIENT_IDX -1 + +#define CAM_SAVE_START_TIMESTAMP_IF(ts1) \ +({ \ + if (clk_rgltr_bus_ops_profiling) \ + CAM_GET_TIMESTAMP(ts1); \ +}) + +#define CAM_COMPUTE_TIME_TAKEN_IF(ts1, ts2, usec, op, name) \ +({ \ + if (clk_rgltr_bus_ops_profiling) { \ + CAM_GET_TIMESTAMP(ts2); \ + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, usec); \ + trace_cam_log_event(op, name, usec, 0); \ + } \ +}) + +/** + * enum cam_vote_level - Enum for voting level + * + * @CAM_SUSPEND_VOTE : Suspend vote + * @CAM_MINSVS_VOTE : Min SVS vote + * @CAM_LOWSVS_D1_VOTE : Low SVS D1 vote + * @CAM_LOWSVS_VOTE : Low SVS vote + * @CAM_SVS_VOTE : SVS vote + * @CAM_SVSL1_VOTE : SVS Plus vote + * @CAM_NOMINAL_VOTE : Nominal vote + * @CAM_NOMINALL1_VOTE : Nominal plus vote + * @CAM_TURBO_VOTE : Turbo vote + * @CAM_MAX_VOTE : Max voting level, This is invalid level. + */ +enum cam_vote_level { + CAM_SUSPEND_VOTE, + CAM_MINSVS_VOTE, + CAM_LOWSVS_D1_VOTE, + CAM_LOWSVS_VOTE, + CAM_SVS_VOTE, + CAM_SVSL1_VOTE, + CAM_NOMINAL_VOTE, + CAM_NOMINALL1_VOTE, + CAM_TURBO_VOTE, + CAM_MAX_VOTE, +}; + +/* pinctrl states */ +#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend" +#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default" + +#define CAM_CESTA_MAX_CLIENTS 3 +#define CAM_NUM_PWR_STATES 2 + +/** + * struct cam_soc_util_hw_client_clk_rates: Information about HW client clock vote + * + * @high: HW client clock vote high value + * @low: HW client clock vote low value + **/ +struct cam_soc_util_hw_client_clk_rates { + unsigned long high; + unsigned long low; +}; + +/** + * struct cam_soc_util_clk_rates: Information about clock vote for SW and HW clients + * + * @sw_client: SW client clock vote + * @hw_client: HW client clock vote + **/ +struct cam_soc_util_clk_rates { + unsigned long sw_client; + struct cam_soc_util_hw_client_clk_rates hw_client[CAM_CESTA_MAX_CLIENTS]; +}; + +/** + * struct cam_soc_reg_map: Information about the mapped register space + * + * @mem_base: Starting location of MAPPED register space + * @mem_cam_base: Starting offset of this register space compared + * to ENTIRE Camera register space + * @size: Size of register space + **/ +struct cam_soc_reg_map { + void __iomem *mem_base; + uint32_t mem_cam_base; + resource_size_t size; +}; + +/** + * struct cam_soc_pinctrl_state: Information about pinctrl state + * + * @gpio_state_active: default pinctrl state + * @gpio_state_suspend: suspend state of pinctrl + * @is_active: to identify if pinctrl is in use. + **/ +struct cam_soc_pinctrl_state { + struct pinctrl_state *gpio_state_active; + struct pinctrl_state *gpio_state_suspend; + bool is_active; +}; + +/** + * struct cam_soc_pinctrl_info: Information about pinctrl data + * + * @pinctrl: pintrl object + * @pctrl_state: pinctrl state montior map + **/ +struct cam_soc_pinctrl_info { + struct pinctrl *pinctrl; + struct cam_soc_pinctrl_state pctrl_state[ + CAM_SOC_MAX_PINCTRL_MAP]; +}; + +/** + * struct cam_soc_gpio_data: Information about the gpio pins + * + * @cam_gpio_common_tbl: It is list of al the gpios present in gpios node + * @cam_gpio_common_tbl_size: It is equal to number of gpios prsent in + * gpios node in DTSI + * @cam_gpio_req_tbl It is list of al the requesetd gpios + * @cam_gpio_req_tbl_size: It is size of requested gpios + * @gpio_for_vmrm_purpose: It is just for vmrm purpose, does not has valid gpio request table + **/ +struct cam_soc_gpio_data { + struct gpio *cam_gpio_common_tbl; + uint8_t cam_gpio_common_tbl_size; + struct gpio *cam_gpio_req_tbl; + uint8_t cam_gpio_req_tbl_size; + bool gpio_for_vmrm_purpose; +}; + +/** + * struct cam_hw_soc_info: Soc information pertaining to specific instance of + * Camera hardware driver module + * + * @pdev: Platform device pointer + * @device: Device pointer + * @hw_version: Camera device version + * @index: Instance id for the camera device + * @dev_name: Device Name + * @is_nrt_dev: Whether this is a non-real time device + * @irq_name: Array of irq name associated with the device + * @label_name: label name + * @irq_line: Array of Irq resources + * @irq_num: Array of Irq numbers + * @irq_data: Array of Irq Private data that are passed when IRQs are requested + * @irq_count: The number of IRQ lines associated with the device + * @compatible: Compatible string associated with the device + * @num_mem_block: Number of entry in the "reg-names" + * @mem_block_name: Array of the reg block name + * @mem_block_cam_base: Array of offset of this register space compared + * to ENTIRE Camera register space + * @mem_block: Associated resource structs + * @reg_map: Array of Mapped register info for the "reg-names" + * @num_reg_map: Number of mapped register space associated + * with mem_block. num_reg_map = num_mem_block in + * most cases + * @reserve_mem: Whether to reserve memory for Mem blocks + * @num_rgltr: Number of regulators + * @rgltr_name: Array of regulator names + * @rgltr_ctrl_support: Whether regulator control is supported + * @rgltr_min_volt: Array of minimum regulator voltage + * @rgltr_max_volt: Array of maximum regulator voltage + * @rgltr_op_mode: Array of regulator operation mode + * @rgltr_type: Array of regulator names + * @rgltr: Array of associated regulator resources + * @rgltr_delay: Array of regulator delay values + * @num_clk: Number of clocks + * @clk_name: Array of clock names + * @clk: Array of associated clock resources + * @clk_rate: 2D array of clock rates representing clock rate + * values at different vote levels + * @clk_id Clock IDs + * @shared_clk_mask Mask indicating which of the clocks are shared with + * other devices. Set rate on these clocks needs to go + * through camera clk wrapper for aggregation. + * @prev_clk_level Last vote level + * @src_clk_idx: Source clock index that is rate-controllable + * @applied_src_clk_rates: Applied src clock rates for SW and HW client + * @clk_level_valid: Indicates whether corresponding level is valid + * @lowest_clk_level: Lowest clock level that has valid freq info + * @highest_clk_level: Highest clock level that has valid freq info + * @scl_clk_count: Number of scalable clocks present + * @scl_clk_idx: Index of scalable clocks + * @optional_clk_name: Array of clock names + * @optional_clk: Array of associated clock resources + * @optional_clk_rate: Optional clock's clk rate + * @optional_clk_id Clock IDs + * @optional_shared_clk_mask Mask indicating which of the clocks are shared with + * other devices. Set rate on these clocks needs to go + * through camera clk wrapper for aggregation. + * @gpio_data: Pointer to gpio info + * @mmrm_handle: MMRM Client handle for src clock + * @is_clk_drv_en: If clock drv is enabled in hw + * @is_crmb_clk: If clock supports CRMB passthrough when voting + * @pinctrl_info: Pointer to pinctrl info + * @dentry: Debugfs entry + * @clk_level_override_high:Clk level set from debugfs. When cesta is enabled, used to override + * high clk value + * @clk_level_override_high:Low clk level set from debugfs when cesta is enabled, used to override + * low clk value + * @clk_control: Enable/disable clk rate control through debugfs + * @cam_cx_ipeak_enable cx-ipeak enable/disable flag + * @cam_cx_ipeak_bit cx-ipeak mask for driver + * @soc_private: Soc private data + * @hw_id: Vm resource manager to identify hw + * @num_vmrm_resource_ids: Vm resource manager resource ids count + * @vmrm_resource_ids: Vm resource manager resource ids array, parameter order is mem label, + * mem tag, irq label. For example one device has two irqs and two mem + * space, parameter is like this, mem label, mem tag, irq1 label, + * irq2 label. + */ +struct cam_hw_soc_info { + struct platform_device *pdev; + struct device *dev; + uint32_t hw_version; + uint32_t index; + const char *dev_name; + bool is_nrt_dev; + const char *irq_name[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + const char *label_name; + struct resource *irq_line[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + int irq_num[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + uint32_t irq_count; + const char *compatible; + + uint32_t num_mem_block; + const char *mem_block_name[CAM_SOC_MAX_BLOCK]; + uint32_t mem_block_cam_base[CAM_SOC_MAX_BLOCK]; + struct resource *mem_block[CAM_SOC_MAX_BLOCK]; + struct cam_soc_reg_map reg_map[CAM_SOC_MAX_BASE]; + uint32_t num_reg_map; + uint32_t reserve_mem; + + uint32_t num_rgltr; + const char *rgltr_name[CAM_SOC_MAX_REGULATOR]; + uint32_t rgltr_ctrl_support; + uint32_t rgltr_min_volt[CAM_SOC_MAX_REGULATOR]; + uint32_t rgltr_max_volt[CAM_SOC_MAX_REGULATOR]; + uint32_t rgltr_op_mode[CAM_SOC_MAX_REGULATOR]; + uint32_t rgltr_type[CAM_SOC_MAX_REGULATOR]; + struct regulator *rgltr[CAM_SOC_MAX_REGULATOR]; + uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR]; + + uint32_t use_shared_clk; + uint32_t num_clk; + const char *clk_name[CAM_SOC_MAX_CLK]; + struct clk *clk[CAM_SOC_MAX_CLK]; + int32_t clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK]; + uint32_t clk_id[CAM_SOC_MAX_CLK]; + uint32_t shared_clk_mask; + int32_t prev_clk_level; + int32_t src_clk_idx; + struct cam_soc_util_clk_rates applied_src_clk_rates; + bool clk_level_valid[CAM_MAX_VOTE]; + uint32_t lowest_clk_level; + uint32_t highest_clk_level; + int32_t scl_clk_count; + int32_t scl_clk_idx[CAM_SOC_MAX_CLK]; + const char *optional_clk_name[CAM_SOC_MAX_OPT_CLK]; + struct clk *optional_clk[CAM_SOC_MAX_OPT_CLK]; + int32_t optional_clk_rate[CAM_SOC_MAX_OPT_CLK]; + uint32_t optional_clk_id[CAM_SOC_MAX_OPT_CLK]; + uint32_t optional_shared_clk_mask; + + void *mmrm_handle; + + bool is_clk_drv_en; + bool is_crmb_clk; + + struct cam_soc_gpio_data *gpio_data; + struct cam_soc_pinctrl_info pinctrl_info; + + struct dentry *dentry; + uint32_t clk_level_override_high; + uint32_t clk_level_override_low; + bool clk_control_enable; + bool cam_cx_ipeak_enable; + int32_t cam_cx_ipeak_bit; + + void *soc_private; + uint32_t hw_id; +#ifdef CONFIG_SPECTRA_VMRM + uint32_t num_vmrm_resource_ids; + uint32_t vmrm_resource_ids[CAM_VMRM_MAX_RESOURCE_IDS]; +#endif + + uint32_t num_reset; + const char *reset_name[CAM_SOC_MAX_RESET]; + struct reset_control *resets[CAM_SOC_MAX_RESET]; + +}; + +/** + * struct cam_hw_soc_dump_header - SOC dump header + * + * @Brief: soc hw dump header + * + * @tag: Tag name for the header + * @word_size: Size of each word + * @size: Total size of dumped data + */ +struct cam_hw_soc_dump_header { + uint8_t tag[CAM_SOC_HW_DUMP_TAG_MAX_LEN]; + uint64_t size; + uint32_t word_size; +}; + +/** + * struct cam_hw_soc_dump_args: SOC Dump args + * + * @request_id: Issue request id + * @offset: Buffer offset, updated as the informaton is dumped + * @buf_handle: Buffer handle of the out buffer + */ +struct cam_hw_soc_dump_args { + uint64_t request_id; + size_t offset; + uint32_t buf_handle; +}; + +/** + * struct cam_hw_soc_skip_dump : SOC Dump args for skiping offset + * + * @skip_regdump skip offset is required for dump or not + * @start_offset: offset for skipping reg dump + * @stop_offset: offset for stoping skip reg dump + * @reg_base_type: register base type + */ +struct cam_hw_soc_skip_dump_args { + bool skip_regdump; + uint32_t start_offset; + uint32_t stop_offset; + uint32_t reg_base_type; +}; + +/* + * CAM_SOC_GET_REG_MAP_START + * + * @brief: This MACRO will get the mapped starting address + * where the register space can be accessed + * + * @__soc_info: Device soc information + * @__base_index: Index of register space in the HW block + * + * @return: Returns a pointer to the mapped register memory + */ +#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index) \ + ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \ + NULL : __soc_info->reg_map[__base_index].mem_base) + +/* + * CAM_SOC_GET_REG_MAP_CAM_BASE + * + * @brief: This MACRO will get the cam_base of the + * register space + * + * @__soc_info: Device soc information + * @__base_index: Index of register space in the HW block + * + * @return: Returns an int32_t value. + * Failure: -1 + * Success: Starting offset of register space compared + * to entire Camera Register Map + */ +#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index) \ + ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \ + -1 : __soc_info->reg_map[__base_index].mem_cam_base) + +/* + * CAM_SOC_GET_REG_MAP_SIZE + * + * @brief: This MACRO will get the size of the mapped + * register space + * + * @__soc_info: Device soc information + * @__base_index: Index of register space in the HW block + * + * @return: Returns a uint32_t value. + * Failure: 0 + * Success: Non-zero size of mapped register space + */ +#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index) \ + ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \ + 0 : __soc_info->reg_map[__base_index].size) + +/** + * cam_soc_util_get_level_from_string() + * + * @brief: Get the associated vote level for the input string + * + * @string: Input string to compare with. + * @level: Vote level corresponds to input string. + * + * @return: Success or failure + */ +int cam_soc_util_get_level_from_string(const char *string, + enum cam_vote_level *level); + +/** + * cam_soc_util_get_dt_properties() + * + * @brief: Parse the DT and populate the common properties that + * are part of the soc_info structure - register map, + * clocks, regulators, irq, etc. + * + * @soc_info: Device soc struct to be populated + * + * @return: Success or failure + */ +int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_request_platform_resource() + * + * @brief: Request regulator, irq, and clock resources + * + * @soc_info: Device soc information + * @handler: Irq handler function pointer + * @irq_data: Irq handler function CB data + * + * @return: Success or failure + */ +int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info, + irq_handler_t handler, void **irq_data); + +/** + * cam_soc_util_release_platform_resource() + * + * @brief: Release regulator, irq, and clock resources + * + * @soc_info: Device soc information + * + * @return: Success or failure + */ +int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_enable_platform_resource() + * + * @brief: Enable regulator, irq resources + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * @enable_clocks: Boolean flag: + * TRUE: Enable all clocks in soc_info Now. + * False: Don't enable clocks Now. Driver will + * enable independently. + * @clk_level: Clock level to be applied. + * Applicable only if enable_clocks is true + * Valid range : 0 to (CAM_MAX_VOTE - 1) + * @irq_enable: Boolean flag: + * TRUE: Enable IRQ in soc_info Now. + * False: Don't enable IRQ Now. Driver will + * enable independently. + * + * @return: Success or failure + */ +int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, bool enable_clocks, enum cam_vote_level clk_level, + bool irq_enable); + +/** + * cam_soc_util_disable_platform_resource() + * + * @brief: Disable regulator, irq resources + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * @disable_irq: Boolean flag: + * TRUE: Disable IRQ in soc_info Now. + * False: Don't disable IRQ Now. Driver will + * disable independently. + * + * @return: Success or failure + */ +int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, bool disable_clocks, bool disable_irq); + +/** + * cam_soc_util_get_clk_round_rate() + * + * @brief: Get the rounded clock rate for the given clock's + * clock rate value + * + * @soc_info: Device soc information + * @clk_index: Clock index in soc_info for which round rate is needed + * @clk_rate: Input clock rate for which rounded rate is needed + * + * @return: Rounded clock rate + */ +long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info, + uint32_t clk_index, unsigned long clk_rate); + +/** + * cam_soc_util_set_src_clk_rate() + * + * @brief: Set the rate on the source clock for sw or hw clients. Requires a valid + * CESTA client idx for hw client voting. + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA client idx if src clock belongs to cesta client, otherwise -1 + * @clk_rate_high: High clock rate associated with the src clk, applies to sw client vote + * if not cesta client + * @clk_rate_low: Low clock rate associated with the src clk, only applies to cesta based + * hw client vote + * + * @return: success or failure + */ +int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + unsigned long clk_rate_high, unsigned long clk_rate_low); + + +/** + * cam_soc_util_get_option_clk_by_name() + * + * @brief: Get reference to optional clk using name + * + * @soc_info: Device soc information + * @clk_name: Name of clock to find reference for + * @clk_index: Clk index in the option clk array to be returned + * + * @return: 0: Success + * Negative: Failure + */ +int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info, + const char *clk_name, int32_t *clk_index); + +/** + * cam_soc_util_put_optional_clk() + * + * @brief: Put clock corresponding to index specified in params + * + * @soc_info: Device soc information + * @clk_idx: Clock index in optional clocks to put + * + * @return: Success or failure + */ +int cam_soc_util_put_optional_clk(struct cam_hw_soc_info *soc_info, + int32_t clk_idx); + +/** + * cam_soc_util_get_reset_resource() + * + * @brief: Get reference to get reset resource + * + * @soc_info: Device soc information + * + * @return: 0: Success + * Negative: Failure + */ +int cam_soc_util_get_reset_resource(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_put_reset_resource() + * + * @brief: Put reset resource + * + * @soc_info: Device soc information + * + * @return: Success or failure + */ +int cam_soc_util_put_reset_resource(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_clk_enable() + * + * @brief: Enable clock specified in params + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * @optional_clk: Whether to set optional clk or normal clk with + * the idx given + * @clk_idx: Clock index to set + * @apply_level: Apply level. + * -1 for 0 rate + * any other value indicate level for normal clocks + * For optional clocks any other value means the rate saved + * in soc_info + * + * @return: Success or failure + */ +int cam_soc_util_clk_enable(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + bool optional_clk, int32_t clk_idx, int32_t apply_level); + +/** + * cam_soc_util_set_clk_rate_level() + * + * @brief: Apply clock rates for the requested level. + * This applies the new requested level for all + * the clocks listed in DT based on their values. + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA client idx for HW client based src clocks + * @clk_level_high: Clock level number to set, high value if crm based src clock + * @clk_level_low: Low clock level value if crm based src clock + * @do_not_set_src_clk: If true, set clock rates except the src clk + * + * @return: Success or failure + */ +int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info, + int cesta_client_idx, enum cam_vote_level clk_level_high, + enum cam_vote_level clk_level_low, bool do_not_set_src_clk); + +/** + * cam_soc_util_clk_disable() + * + * @brief: Disable clock specified in params + * + * @soc_info: Device soc information + * @cesta_client_idx: CESTA Client idx for hw client based src clocks + * @optional_clk: Whether to set optional clk or normal clk with + * the idx given + * @clk_idx: Clock index to disable + * + * @return: Success or failure + */ +int cam_soc_util_clk_disable(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + bool optional_clk, int32_t clk_idx); + +/** + * cam_soc_util_irq_enable() + * + * @brief: Enable IRQ in SOC + * + * @soc_info: Device soc information + * + * @return: Success or failure + */ +int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_irq_disable() + * + * @brief: Disable IRQ in SOC + * + * @soc_info: Device soc information + * + * @return: Success or failure + */ +int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_regulator_enable() + * + * @brief: Enable single regulator + * + * @rgltr Regulator that needs to be turned ON + * @rgltr_name Associated Regulator name + * @rgltr_min_volt: Requested minimum volatage + * @rgltr_max_volt: Requested maximum volatage + * @rgltr_op_mode: Requested Load + * @rgltr_delay: Requested delay needed aaftre enabling regulator + * + * @return: Success or failure + */ +int cam_soc_util_regulator_enable(struct regulator *rgltr, + const char *rgltr_name, + uint32_t rgltr_min_volt, uint32_t rgltr_max_volt, + uint32_t rgltr_op_mode, uint32_t rgltr_delay); + +/** + * cam_soc_util_regulator_enable() + * + * @brief: Disable single regulator + * + * @rgltr Regulator that needs to be turned ON + * @rgltr_name Associated Regulator name + * @rgltr_min_volt: Requested minimum volatage + * @rgltr_max_volt: Requested maximum volatage + * @rgltr_op_mode: Requested Load + * @rgltr_delay: Requested delay needed aaftre enabling regulator + * + * @return: Success or failure + */ +int cam_soc_util_regulator_disable(struct regulator *rgltr, + const char *rgltr_name, + uint32_t rgltr_min_volt, uint32_t rgltr_max_volt, + uint32_t rgltr_op_mode, uint32_t rgltr_delay); + +/** + * cam_soc_util_w() + * + * @brief: Camera SOC util for register write + * + * @soc_info: Device soc information + * @base_index: Index of register space in the HW block + * @offset: Offset of register to be writen + * @data: Value to be written + * + * @return: Success or Failure + */ +static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset, uint32_t data) +{ + if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index)) { + CAM_ERR(CAM_UTIL, "No valid mapped starting address found"); + return -EINVAL; + } + return cam_io_w(data, + CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset); +} + +/** + * cam_soc_util_w_mb() + * + * @brief: Camera SOC util for register write with memory barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call wmb() independently in the caller. + * + * @soc_info: Device soc information + * @base_index: Index of register space in the HW block + * @offset: Offset of register to be writen + * @data: Value to be written + * + * @return: Success or Failure + */ +static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset, uint32_t data) +{ + if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index)) { + CAM_ERR(CAM_UTIL, "No valid mapped starting address found"); + return -EINVAL; + } + return cam_io_w_mb(data, + CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset); +} + +/** + * cam_soc_util_r() + * + * @brief: Camera SOC util for register read + * + * @soc_info: Device soc information + * @base_index: Index of register space in the HW block + * @offset: Offset of register to be read + * + * @return: Value read from the register address + */ +static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset) +{ + if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index)) { + CAM_ERR(CAM_UTIL, "No valid mapped starting address found"); + return 0; + } + return cam_io_r( + CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset); +} + +/** + * cam_soc_util_r_mb() + * + * @brief: Camera SOC util for register read with memory barrier. + * Memory Barrier is only before the write to ensure the + * order. If need to ensure this write is also flushed + * call rmb() independently in the caller. + * + * @soc_info: Device soc information + * @base_index: Index of register space in the HW block + * @offset: Offset of register to be read + * + * @return: Value read from the register address + */ +static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset) +{ + if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index)) { + CAM_ERR(CAM_UTIL, "No valid mapped starting address found"); + return 0; + } + return cam_io_r_mb( + CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset); +} + +/** + * cam_soc_util_reg_dump() + * + * @brief: Camera SOC util for dumping a range of register + * + * @soc_info: Device soc information + * @base_index: Index of register space in the HW block + * @offset: Start register offset for the dump + * @size: Size specifying the range for dump + * + * @return: Success or Failure + */ +int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info, + uint32_t base_index, uint32_t offset, int size); + +void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info, + int cesta_client_idx); + +int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info, int cesta_client_idx, + enum cam_vote_level clk_level); + +int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info, + int64_t clk_rate, int clk_idx, int32_t *clk_lvl); + +int cam_soc_util_get_valid_clk_rate(struct cam_hw_soc_info *soc_info, + enum cam_vote_level req_level, int64_t *apply_rate); + +/* Callback to get reg space data for specific HW */ +typedef int (*cam_soc_util_regspace_data_cb)(uint32_t reg_base_type, + void *ctx, struct cam_hw_soc_info **soc_info_ptr, + uint32_t *reg_base_idx); + +/** + * cam_soc_util_reg_dump_to_cmd_buf() + * + * @brief: Camera SOC util for dumping sets of register ranges + * command buffer + * + * @ctx: Context info from specific hardware manager + * @cmd_desc: Command buffer descriptor + * @req_id: Last applied req id for which reg dump is required + * @reg_data_cb: Callback function to get reg space info based on type + * in command buffer + * @soc_dump_args: Dump buffer args to dump the soc information. + * @user_triggered_dump: Flag to indicate if the dump request is issued by + * user. + * @cpu_addr: cpu address of buffer + * @size: size of buffer + * @return: Success or Failure + */ +int cam_soc_util_reg_dump_to_cmd_buf(void *ctx, + struct cam_cmd_buf_desc *cmd_desc, uint64_t req_id, + cam_soc_util_regspace_data_cb reg_data_cb, + struct cam_hw_soc_dump_args *soc_dump_args, + struct cam_hw_soc_skip_dump_args *soc_skip_dump_args, + bool user_triggered_dump, uintptr_t cpu_addr, size_t buf_size); + +/** + * cam_soc_util_print_clk_freq() + * + * @brief: This function gets the clk rates for each clk from clk + * driver and prints in log + * + * @soc_info: Device soc struct to be populated + * + * @return: success or failure + */ +int cam_soc_util_print_clk_freq(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_select_pinctrl_state() + * + * @brief: This function gets the pinctrl handle + * + * @soc_info: Device soc struct to be populated + * @active: True for active and false for suspend state + * + * @return: success or failure + */ +int cam_soc_util_select_pinctrl_state( + struct cam_hw_soc_info *soc_info, int idx, bool active); + +/** + * cam_soc_util_print_clk_freq() + * + * @brief: This function checks whether regulators of this device are enabled at this + * time. + * + * @soc_info: Device soc struct to be populated + * + * @return: Number of regulators enabled + */ +int cam_soc_util_regulators_enabled(struct cam_hw_soc_info *soc_info); + +/** + * cam_soc_util_cesta_populate_crm_device() + * + * @brief: This function populates the camera cesta crm device in soc util + * + * @return: success or failure + */ +inline int cam_soc_util_cesta_populate_crm_device(void); + +/** + * cam_soc_util_cesta_channel_switch() + * + * @brief: This function triggers the application of power states to crm + * and channel switch operation in hw. Also, for camera it applies + * the high vote of the active channel + * @cesta_client_idx: CESTA client index through which power states need to be applied + * @identifier: Identifying the caller triggerring channel switch + * + * @return: success or failure + */ +int cam_soc_util_cesta_channel_switch(uint32_t cesta_client_idx, const char *identifier); + +/** + * cam_soc_util_get_applied_src_clk() + * + * @brief: Inline function to get applied src clk rate. + + * @soc_info: Device soc struct to be populated + * @is_max: Is max of all hw clients if cesta is enabled + * + * @return: success or failure + */ +inline unsigned long cam_soc_util_get_applied_src_clk( + struct cam_hw_soc_info *soc_info, bool is_max); + +/** + * cam_soc_util_get_string_from_level() + * + * @brief: Returns the string for a given clk level + * + * @level: Clock level + * + * @return: String corresponding to the clk level + */ +const char *cam_soc_util_get_string_from_level(enum cam_vote_level level); + +/** + * cam_wrapper_clk_get_rate() + * + * @brief: Wrapper for clk get rate + * + * @clk: Clock + * + * @name: Name of the clock + * + * @return: Clock rate + */ +inline unsigned long cam_wrapper_clk_get_rate(struct clk *clk, const char *name); + +/** + * cam_wrapper_regulator_set_load() + * + * @brief: Wrapper for regulator set load + * + * @regulator: Regulator + * + * @uA_load: Load current + * + * @name: Name of the regulator + * + * @return: Success or failure + */ +inline int cam_wrapper_regulator_set_load( + struct regulator *regulator, int uA_load, const char *name); + +/** + * cam_wrapper_regulator_set_mode() + * + * @brief: Wrapper for regulator set mode + * + * @regulator: Regulator + * + * @mode: Mode + * + * @name: Name of the regulator + * + * @return: Success or failure + */ +inline int cam_wrapper_regulator_set_mode( + struct regulator *regulator, unsigned int mode, const char *name); + +/** + * cam_soc_util_set_bypass_drivers() + * + * @brief: Set bypass drivers + * + * @bypass_drivers: Bypass drivers + * + * @return: Void + */ +inline void cam_soc_util_set_bypass_drivers( + uint32_t bypass_drivers); + +/** + * cam_soc_util_create_debugfs() + * + * @return: Success or failure + */ +int cam_soc_util_create_debugfs(void); + +/** + * cam_soc_util_reset_control() + * + * @brief: Assert/Deassert reset resource + * + * @soc_info: Device soc struct to be populated + * + * @return: Success or failure + */ +int cam_soc_util_reset_control(struct cam_hw_soc_info *soc_info); + +#endif /* _CAM_SOC_UTIL_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.c b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.c new file mode 100644 index 0000000000..9b45091b4b --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +/* Instantiate tracepoints */ +#define CREATE_TRACE_POINTS +#include "cam_trace.h" diff --git a/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.h b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.h new file mode 100644 index 0000000000..34f046ce91 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_utils/cam_trace.h @@ -0,0 +1,455 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _CAM_TRACE_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM camera +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ./cam_trace + +#include +#include +#include "cam_req_mgr_core.h" +#include "cam_req_mgr_interface.h" +#include "cam_context.h" + +#define CAM_DEFAULT_VALUE 0xFF +#define CAM_TRACE_PRINT_MAX_LEN 512 + +TRACE_EVENT(cam_context_state, + TP_PROTO(const char *name, struct cam_context *ctx), + TP_ARGS(name, ctx), + TP_STRUCT__entry( + __field(void*, ctx) + __field(uint32_t, state) + __string(name, name) + ), + TP_fast_assign( + __entry->ctx = ctx; + __entry->state = ctx->state; + __assign_str(name, name); + ), + TP_printk( + "%s: State ctx=%p ctx_state=%u", + __get_str(name), __entry->ctx, __entry->state + ) +); + +TRACE_EVENT(cam_isp_activated_irq, + TP_PROTO(struct cam_context *ctx, unsigned int substate, + unsigned int event, uint64_t timestamp), + TP_ARGS(ctx, substate, event, timestamp), + TP_STRUCT__entry( + __field(void*, ctx) + __field(uint32_t, state) + __field(uint32_t, substate) + __field(uint32_t, event) + __field(uint64_t, ts) + __field(int32_t, link_hdl) + ), + TP_fast_assign( + __entry->ctx = ctx; + __entry->state = ctx->state; + __entry->substate = substate; + __entry->event = event; + __entry->ts = timestamp; + __entry->link_hdl = ctx->link_hdl; + + ), + TP_printk( + "ISP: IRQ ctx=%p ctx_state=%u substate=%u event=%u ts=%llu link_hdl=0x%x", + __entry->ctx, __entry->state, __entry->substate, + __entry->event, __entry->ts, __entry->link_hdl + ) +); + +TRACE_EVENT(cam_log_event, + TP_PROTO(const char *string1, const char *string2, + uint64_t val1, uint64_t val2), + TP_ARGS(string1, string2, val1, val2), + TP_STRUCT__entry( + __string(string1, string1) + __string(string2, string2) + __field(uint64_t, val1) + __field(uint64_t, val2) + ), + TP_fast_assign( + __assign_str(string1, string1); + __assign_str(string2, string2); + __entry->val1 = val1; + __entry->val2 = val2; + ), + TP_printk( + "%s: %s val1=%llu val2=%llu", + __get_str(string1), __get_str(string2), + __entry->val1, __entry->val2 + ) +); + +TRACE_EVENT(cam_log_debug, + TP_PROTO(const char *fmt, va_list *args), + TP_ARGS(fmt, args), + TP_STRUCT__entry( + __dynamic_array(char, msg, CAM_TRACE_PRINT_MAX_LEN) + ), + TP_fast_assign( + va_list ap; + + va_copy(ap, *args); + vsnprintf(__get_dynamic_array(msg), CAM_TRACE_PRINT_MAX_LEN, fmt, ap); + va_end(ap); + ), + TP_printk("%s", __get_str(msg)) +); + + +TRACE_EVENT(cam_icp_fw_dbg, + TP_PROTO(char *dbg_message, uint64_t timestamp, char *dev_name), + TP_ARGS(dbg_message, timestamp, dev_name), + TP_STRUCT__entry( + __string(dbg_message, dbg_message) + __field(uint64_t, timestamp) + __string(dev_name, dev_name) + ), + TP_fast_assign( + __assign_str(dbg_message, dbg_message); + __entry->timestamp = timestamp; + __assign_str(dev_name, dev_name); + ), + TP_printk( + "[%s] %llu %s: ", + __get_str(dev_name), __entry->timestamp, __get_str(dbg_message) + ) +); + +TRACE_EVENT(cam_buf_done, + TP_PROTO(const char *ctx_type, struct cam_context *ctx, + struct cam_ctx_request *req), + TP_ARGS(ctx_type, ctx, req), + TP_STRUCT__entry( + __string(ctx_type, ctx_type) + __field(void*, ctx) + __field(int32_t, link_hdl) + __field(uint64_t, request) + ), + TP_fast_assign( + __assign_str(ctx_type, ctx_type); + __entry->ctx = ctx; + __entry->link_hdl = ctx->link_hdl; + __entry->request = req->request_id; + ), + TP_printk( + "%5s: BufDone ctx=%p request=%llu link_hdl=0x%x", + __get_str(ctx_type), __entry->ctx, __entry->request, __entry->link_hdl + ) +); + +TRACE_EVENT(cam_apply_req, + TP_PROTO(const char *entity, uint32_t id, uint64_t req_id,int32_t link_hdl), + TP_ARGS(entity, id, req_id, link_hdl), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint32_t, id) + __field(uint64_t, req_id) + __field(int32_t, link_hdl) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->id = id; + __entry->req_id = req_id; + __entry->link_hdl = link_hdl; + ), + TP_printk( + "%8s: ApplyRequest id=%u request=%llu link_hdl=0x%x", + __get_str(entity), __entry->id, __entry->req_id, __entry->link_hdl + ) +); + +TRACE_EVENT(cam_notify_frame_skip, + TP_PROTO(const char *entity, uint64_t req_id), + TP_ARGS(entity, req_id), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint64_t, req_id) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->req_id = req_id; + ), + TP_printk( + "%8s: NotifyFrameSkip request=%llu", + __get_str(entity), __entry->req_id + ) +); + +TRACE_EVENT(cam_flush_req, + TP_PROTO(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_flush_info *info), + TP_ARGS(link, info), + TP_STRUCT__entry( + __field(uint32_t, type) + __field(int64_t, req_id) + __field(void*, link) + __field(void*, session) + ), + TP_fast_assign( + __entry->type = info->flush_type; + __entry->req_id = info->req_id; + __entry->link = link; + __entry->session = link->parent; + ), + TP_printk( + "FlushRequest type=%u request=%llu link=%pK session=%pK", + __entry->type, __entry->req_id, __entry->link, + __entry->session + ) +); + +TRACE_EVENT(cam_req_mgr_connect_device, + TP_PROTO(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_device_info *info), + TP_ARGS(link, info), + TP_STRUCT__entry( + __string(name, info->name) + __field(uint32_t, id) + __field(uint32_t, delay) + __field(void*, link) + __field(void*, session) + ), + TP_fast_assign( + __assign_str(name, info->name); + __entry->id = info->dev_id; + __entry->delay = info->p_delay; + __entry->link = link; + __entry->session = link->parent; + ), + TP_printk( + "ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK", + __get_str(name), __entry->id, __entry->delay, + __entry->link, __entry->session + ) +); + +TRACE_EVENT(cam_req_mgr_apply_request, + TP_PROTO(struct cam_req_mgr_core_link *link, + struct cam_req_mgr_apply_request *req, + struct cam_req_mgr_connected_device *dev), + TP_ARGS(link, req, dev), + TP_STRUCT__entry( + __string(name, dev->dev_info.name) + __field(uint32_t, dev_id) + __field(uint64_t, req_id) + __field(int32_t, link_hdl) + __field(void*, link) + __field(void*, session) + ), + TP_fast_assign( + __assign_str(name, dev->dev_info.name); + __entry->dev_id = dev->dev_info.dev_id; + __entry->req_id = req->request_id; + __entry->link = link; + __entry->session = link->parent; + __entry->link_hdl = req->link_hdl; + ), + TP_printk( + "ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK link_hdl=0x%x", + __get_str(name), __entry->dev_id, __entry->req_id, + __entry->link, __entry->session, __entry->link_hdl + ) +); + +TRACE_EVENT(cam_req_mgr_add_req, + TP_PROTO(struct cam_req_mgr_core_link *link, + int idx, struct cam_req_mgr_add_request *add_req, + struct cam_req_mgr_req_tbl *tbl, + struct cam_req_mgr_connected_device *dev), + TP_ARGS(link, idx, add_req, tbl, dev), + TP_STRUCT__entry( + __string(name, dev->dev_info.name) + __field(uint32_t, dev_id) + __field(uint64_t, req_id) + __field(uint32_t, slot_id) + __field(uint32_t, delay) + __field(uint32_t, readymap) + __field(uint32_t, devicemap) + __field(void*, link) + __field(void*, session) + __field(int32_t, link_hdl) + ), + TP_fast_assign( + __assign_str(name, dev->dev_info.name); + __entry->dev_id = dev->dev_info.dev_id; + __entry->req_id = add_req->req_id; + __entry->slot_id = idx; + __entry->delay = tbl->pd; + __entry->readymap = tbl->slot[idx].req_ready_map; + __entry->devicemap = tbl->dev_mask; + __entry->link = link; + __entry->link_hdl = link->link_hdl; + __entry->session = link->parent; + ), + TP_printk( + "ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pK session=%pK link_hdl=0x%x", + __get_str(name), __entry->dev_id, __entry->req_id, + __entry->slot_id, __entry->delay, __entry->readymap, + __entry->devicemap, __entry->link, __entry->session, + __entry->link_hdl + ) +); + +TRACE_EVENT(cam_delay_detect, + TP_PROTO(const char *entity, + const char *text, uint64_t req_id, + uint32_t ctx_id, int32_t link_hdl, + int32_t session_hdl, int rc), + TP_ARGS(entity, text, req_id, ctx_id, + link_hdl, session_hdl, rc), + TP_STRUCT__entry( + __string(entity, entity) + __string(text, text) + __field(uint64_t, req_id) + __field(uint64_t, ctx_id) + __field(int32_t, link_hdl) + __field(int32_t, session_hdl) + __field(int32_t, rc) + ), + TP_fast_assign( + __assign_str(entity, entity); + __assign_str(text, text); + __entry->req_id = req_id; + __entry->ctx_id = ctx_id; + __entry->link_hdl = link_hdl; + __entry->session_hdl = session_hdl; + __entry->rc = rc; + ), + TP_printk( + "%s: %s request=%lld ctx_id=%lld link_hdl=0x%x session_hdl=0x%x rc=%d", + __get_str(entity), __get_str(text), __entry->req_id, + __entry->ctx_id, __entry->link_hdl, + __entry->session_hdl, __entry->rc + ) +); + +TRACE_EVENT(cam_submit_to_hw, + TP_PROTO(const char *entity, uint64_t req_id), + TP_ARGS(entity, req_id), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint64_t, req_id) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->req_id = req_id; + ), + TP_printk( + "%8s: submit request=%llu", + __get_str(entity), __entry->req_id + ) +); + +TRACE_EVENT(cam_irq_activated, + TP_PROTO(const char *entity, uint32_t irq_type), + TP_ARGS(entity, irq_type), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint32_t, irq_type) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->irq_type = irq_type; + ), + TP_printk( + "%8s: got irq type=%d", + __get_str(entity), __entry->irq_type + ) +); + +TRACE_EVENT(cam_irq_handled, + TP_PROTO(const char *entity, uint32_t irq_type), + TP_ARGS(entity, irq_type), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint32_t, irq_type) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->irq_type = irq_type; + ), + TP_printk( + "%8s: handled irq type=%d", + __get_str(entity), __entry->irq_type + ) +); + +TRACE_EVENT(cam_cdm_cb, + TP_PROTO(const char *entity, uint32_t status), + TP_ARGS(entity, status), + TP_STRUCT__entry( + __string(entity, entity) + __field(uint32_t, status) + ), + TP_fast_assign( + __assign_str(entity, entity); + __entry->status = status; + ), + TP_printk( + "%8s: cdm cb status=%d", + __get_str(entity), __entry->status + ) +); + +TRACE_EVENT(cam_cci_burst, + TP_PROTO(const int32_t idx, int32_t m, + int32_t q, const char *msg, + uint32_t val), + TP_ARGS(idx, m, q, msg, val), + TP_STRUCT__entry( + __field(int32_t, index) + __field(int32_t, master) + __field(int32_t, queue) + __string(msg, msg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->index = idx; + __entry->master = m; + __entry->queue = q; + __assign_str(msg, msg); + __entry->value = val; + ), + TP_printk( + "CCI%d_M%d_Q%d %s : 0x%x", + __entry->index, __entry->master, __entry->queue, __get_str(msg), __entry->value + ) +); + +TRACE_EVENT(cam_perf, + TP_PROTO(const char *entity, + const char *msg, unsigned long val), + TP_ARGS(entity, msg, val), + TP_STRUCT__entry( + __string(entity, entity) + __string(msg, msg) + __field(unsigned long, value) + ), + TP_fast_assign( + __assign_str(entity, entity); + __assign_str(msg, msg); + __entry->value = val; + ), + TP_printk( + "%s: %s: %lu (usec)", + __get_str(entity), __get_str(msg), __entry->value + ) +); +#endif /* _CAM_TRACE_H */ + +/* This part must be outside protection */ +#include diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms.h new file mode 100644 index 0000000000..8b8922025a --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_INTER_VM_COMMS_H__ +#define __CAM_INTER_VM_COMMS_H__ + +#include +#include +#include +#include + +/** + * @brief cam_intervm_response_data + * + * @param result : The result from the message handler. + * @param msg_buffer : The message ID that this response is for. + */ +struct cam_intervm_response_data { + int result; + uint32_t msg_id; +}; + +/** + * @brief cam_intervm_response + * + * @param send_response : Whether or not to send a response. + * @res_msg : Response message + * @msg_size : Response message size. + */ +struct cam_intervm_response { + bool send_response; + void *res_msg; + size_t msg_size; +}; + +/** + * @brief Signature of the callback function to handle incoming messages. + * It is advised to create a work to process this message + * asynchronously on the client side. + * + * @param data : Incoming message data. + * @param msg_size : Size of the incoming message. + * @res : Response to the message. + * + * @return void. + */ +typedef void (*handle_message_cb) (void *data, size_t msg_size, struct cam_intervm_response *res); +/** + * cam_inter_vm_comms_ops : Operations allowed for + * inter VM communication. + */ +struct cam_inter_vm_comms_ops { + /** + * @brief Initializes the comms handle with the given + * attributes and initiates the connection. + * + * @param handle : The comms handle for this VM. + * @param msg_size : The highest size of the messages exchanged. + * @param msg_cb : The callback function to handle the incoming + * messages. + * @param is_server_vm: Indicated whether this is a primary VM. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + int (*init)(void **handle, size_t msg_size, + handle_message_cb msg_cb, bool is_server_vm); + + /** + * @brief Sends the message to the secondary VM if the + * communication has been established. + * + * @param handle : The comms handle for this VM. + * @param data : The data to be sent over. + * @param size : The size of the data to be sent. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + int (*send_message)(void *handle, void *data, size_t size); + + /** + * @brief Closes the communication and un-initialize the handle. + * + * @param handle : The comms handle for this VM. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + int (*deinit)(void *handle); +}; + +#endif /*__CAM_INTER_VM_COMMS_H__*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms_data.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms_data.h new file mode 100644 index 0000000000..920e8f92d9 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_inter_vm_comms_data.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_INTER_VM_COMMS_DATA_H__ +#define __CAM_INTER_VM_COMMS_DATA_H__ + +#include +#include +#include +#include +#include + +#define CAM_INTER_VM_COMMS_WQ_NAME "cam_inter_vm_comms_wq" +#define CAM_INTER_VM_COMMS_MAX_PENDING_WORKS 5 + +/** + * @brief cam_inter_vm_comms_handle + * + * @param comms_lock : Mutex to synchronize the operations on the handle. + * @param msg_buffer : Buffer to hold the incoming messages. + * @param msg_size : The highest size of the messages exchanged. + * @param message_cb : Callback function to handle incoming messages. + * @param is_comms_established : Indicates whether the two way comms has been established. + * @param is_comms_terminated : Indicates whether the communication has been terminated. + * @param comms_protocol_data : Data specific to the underlying communication protocol. + * @param is_server_vm : Indicates whether this handle corresponds to PVM. + * @param msg_recv_wq : Work queue to process incoming messages. + * @param msg_recv_work : Work struct to process incoming messages. + */ +struct cam_inter_vm_comms_handle { + struct mutex comms_lock; + void *msg_buffer; + size_t msg_size; + handle_message_cb message_cb; + bool is_comms_established; + bool is_comms_terminated; + void *comms_protocol_data; + bool is_server_vm; + struct workqueue_struct *msg_recv_wq; + struct work_struct msg_recv_work; +}; + +#endif /*__CAM_INTER_VM_COMMS_DATA_H__*/ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.c b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.c new file mode 100644 index 0000000000..000ef3eb03 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.c @@ -0,0 +1,2815 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "camera_main.h" +#include "cam_debug_util.h" +#include "cam_vmrm.h" +#include "cam_common_util.h" +#include "cam_vmrm_interface.h" +#include "cam_soc_util.h" +#include "cam_req_mgr_dev.h" +#include "cam_mem_mgr_api.h" + +struct cam_vmrm_intf_dev *g_vmrm_intf_dev; + +static const struct of_device_id msm_cam_vmrm_intf_dt_match[] = { + { + .compatible = "qcom,cam-vmrm-intf", + }, + {}, +}; + +struct cam_vmrm_intf_dev *cam_vmrm_get_intf_dev(void) +{ + return g_vmrm_intf_dev; +} + +static enum gh_vm_names cam_vmid_to_gh_name(uint32_t cam_vmid) +{ + enum gh_vm_names gh_name = GH_VM_MAX; + + switch (cam_vmid) { + case CAM_PVM: + gh_name = GH_PRIMARY_VM; + break; + case CAM_SVM1: + gh_name = GH_TRUSTED_VM; + break; + default: + CAM_ERR(CAM_VMRM, "Invalid camera vmid: %d", cam_vmid); + break; + } + + return gh_name; +} + +#ifdef CONFIG_SPECTRA_RESOURCE_REQUEST_ENABLE +static uint32_t cam_gh_name_to_vmid(enum gh_vm_names vm_name) +{ + uint32_t vmid = CAM_VM_MAX; + + switch (vm_name) { + case GH_PRIMARY_VM: + vmid = CAM_PVM; + break; + case GH_TRUSTED_VM: + vmid = CAM_SVM1; + break; + default: + CAM_ERR(CAM_VMRM, "Invalid gh name: %d", vm_name); + break; + } + + return vmid; +} +#endif + +static enum gh_irq_label cam_irq_label_convert_gh_label(uint32_t label) +{ + int diff; + enum gh_irq_label gh_label = GH_IRQ_LABEL_MAX; + + if ((label >= CAM_IRQ_LABEL_BASE) && (label <= CAM_IRQ_LABEL_MAX)) { + diff = label - CAM_IRQ_LABEL_BASE; + gh_label = GH_IRQ_LABEL_CAM_BASE + diff; + } else { + CAM_ERR(CAM_VMRM, "Invalid cam irq label %d", label); + } + + return gh_label; +} + +static uint32_t cam_gh_irq_label_convert_cam_irq_label(enum gh_irq_label label) +{ + int diff; + uint32_t cam_irq_label = CAM_IRQ_LABEL_MAX + 1; + + if ((label >= GH_IRQ_LABEL_CAM_BASE) && + (label < GH_IRQ_LABEL_CAM_BASE + CAM_IRQ_LABEL_MAX)) { + diff = label - GH_IRQ_LABEL_CAM_BASE; + cam_irq_label = CAM_IRQ_LABEL_BASE + diff; + } else { + CAM_ERR(CAM_VMRM, "Invalid gh irq label %d", label); + } + + return cam_irq_label; +} + +static enum gh_mem_notifier_tag cam_mem_tag_convert_gh_tag(uint32_t tag) +{ + int diff; + enum gh_mem_notifier_tag gh_tag = GH_MEM_NOTIFIER_TAG_MAX; + + if ((tag >= CAM_MEM_TAG_BASE) && (tag <= CAM_MEM_TAG_MAX)) { + diff = tag - CAM_MEM_TAG_BASE; + gh_tag = GH_MEM_NOTIFIER_TAG_CAM_BASE + diff; + } else { + CAM_ERR(CAM_VMRM, "Invalid cam mem tag %d", tag); + } + + return gh_tag; +} + +static uint32_t cam_gh_tag_convert_cam_mem_tag(enum gh_mem_notifier_tag tag) +{ + int diff; + uint32_t cam_tag = CAM_MEM_TAG_MAX + 1; + + if ((tag >= GH_MEM_NOTIFIER_TAG_CAM_BASE) && + (tag < GH_MEM_NOTIFIER_TAG_CAM_BASE + CAM_MEM_TAG_MAX)) { + diff = tag - GH_MEM_NOTIFIER_TAG_CAM_BASE; + cam_tag = CAM_MEM_TAG_BASE + diff; + } else { + CAM_ERR(CAM_VMRM, "Invalid gh mem tag %d", tag); + } + + return cam_tag; +} + +static bool cam_validate_gh_mem_notifier_tag(enum gh_mem_notifier_tag tag) +{ + bool valid = false; + + if ((tag >= GH_MEM_NOTIFIER_TAG_CAM_BASE) && + (tag < GH_MEM_NOTIFIER_TAG_CAM_BASE + CAM_MEM_TAG_MAX)) + valid = true; + else { + CAM_ERR(CAM_VMRM, "Invalid gh mem tag %d", tag); + } + + return valid; +} + +static bool cam_validate_cam_mem_label(uint32_t label) +{ + bool valid = false; + + if ((label >= CAM_MEM_LABEL_BASE) && (label <= CAM_MEM_TAG_MAX)) + valid = true; + else + CAM_ERR(CAM_VMRM, "Invalid cam mem label %d", label); + + return valid; +} + +static bool cam_validate_gh_irq_label(enum gh_irq_label label) +{ + bool valid = false; + + if ((label >= GH_IRQ_LABEL_CAM_BASE) && + (label < GH_IRQ_LABEL_CAM_BASE + CAM_IRQ_LABEL_MAX)) + valid = true; + else { + CAM_ERR(CAM_VMRM, "Invalid gh irq label %d", label); + } + + return valid; +} + +static struct gh_acl_desc *cam_populate_acl(enum gh_vm_names vm_name) +{ + struct gh_acl_desc *acl_desc; + gh_vmid_t vmid; + + cam_vmrm_ghd_rm_get_vmid(vm_name, &vmid); + + acl_desc = CAM_MEM_ZALLOC(offsetof(struct gh_acl_desc, acl_entries[1]), GFP_KERNEL); + if (!acl_desc) { + CAM_ERR(CAM_VMRM, "Could not reserve memory for ACL entries"); + return ERR_PTR(-ENOMEM); + } + + acl_desc->n_acl_entries = 1; + acl_desc->acl_entries[0].vmid = vmid; + acl_desc->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W; + + return acl_desc; +} + +static struct gh_notify_vmid_desc *cam_populate_vmid_desc(enum gh_vm_names vm_name) +{ + struct gh_notify_vmid_desc *vmid_desc; + gh_vmid_t vmid; + + cam_vmrm_ghd_rm_get_vmid(vm_name, &vmid); + + vmid_desc = CAM_MEM_ZALLOC(offsetof(struct gh_notify_vmid_desc, + vmid_entries[1]), GFP_KERNEL); + if (!vmid_desc) { + CAM_ERR(CAM_VMRM, "Could not reserve memory for vmid entries"); + return ERR_PTR(-ENOMEM); + } + + vmid_desc->n_vmid_entries = 1; + vmid_desc->vmid_entries[0].vmid = vmid; + + return vmid_desc; +} + +static int cam_mem_entry_is_duplicate(phys_addr_t mem_base, uint32_t mem_block_size) +{ + struct cam_io_mem_entry *mem, *mem_temp; + int rc = 0; + + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, &g_vmrm_intf_dev->io_res.mem, list) { + if (mem->base == mem_base) { + if (mem->size != mem_block_size) { + CAM_DBG(CAM_VMRM, + "Duplicate mem size mismatch previous mem base 0x%x size 0x%x current size 0x%x", + mem_base, mem->size, mem_block_size); + if (mem->size < mem_block_size) { + CAM_DBG(CAM_VMRM, + "update mem size from 0x%x to 0x%x", + mem->size, mem_block_size); + mem->size = mem_block_size; + } + rc = 2; + break; + } + rc = 1; + break; + } + } + } + + return rc; +} + +static struct cam_io_irq_entry *cam_irq_lookup(enum gh_irq_label label, uint32_t irq_num) +{ + struct cam_io_irq_entry *irq = NULL, *irq_temp; + bool found = false; + + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, &g_vmrm_intf_dev->io_res.irq, list) { + if (label) { + if (cam_irq_label_convert_gh_label(irq->label) == label) { + found = true; + break; + } + } + if (irq_num) { + if (irq->irq_num == irq_num) { + found = true; + break; + } + } + } + } + if (!found) { + CAM_ERR(CAM_VMRM, "Not found the irq num %d gh label %d in list", irq_num, label); + irq = NULL; + } + + return irq; +} + +struct cam_hw_instance *cam_hw_instance_lookup(uint32_t hw_id, uint32_t mem_tag, + uint32_t irq_label, uint32_t gpio_num) +{ + struct cam_hw_instance *hw_pos = NULL, *hw_temp; + int i; + + if (!list_empty(&g_vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw_pos, hw_temp, &g_vmrm_intf_dev->hw_instance, list) { + if (hw_id && (hw_pos->hw_id == hw_id)) { + CAM_DBG(CAM_VMRM, "found hw id 0x%x", hw_id); + goto end; + } + + if (mem_tag && (hw_pos->resources.mem_tag == mem_tag)) { + CAM_DBG(CAM_VMRM, "found hw id 0x%x mem tag %d", hw_pos->hw_id, + hw_pos->resources.mem_tag); + goto end; + } + + if (irq_label && ((hw_pos->resources.irq_label[0] == irq_label) || + (hw_pos->resources.irq_label[1] == irq_label))) { + CAM_DBG(CAM_VMRM, "found hw id 0x%x irq label %d", + hw_pos->hw_id, irq_label); + goto end; + } + + if (gpio_num) { + for (i = 0; i < hw_pos->resources.gpio_count; i++) { + if (gpio_num == hw_pos->resources.gpio_num[i]) { + CAM_DBG(CAM_VMRM, "found hw id 0x%x gpio num %d", + hw_pos->hw_id, gpio_num); + goto end; + } + } + } + } + hw_pos = NULL; + CAM_ERR(CAM_VMRM, "Not found hw id 0x%x mem tag %d irq label %d gpio num %d", + hw_id, mem_tag, irq_label, gpio_num); + } else { + CAM_WARN(CAM_VMRM, "hw instance empty"); + } + +end: + return hw_pos; +} + +struct cam_driver_node *cam_driver_node_lookup(uint32_t driver_id) +{ + struct cam_driver_node *driver_pos = NULL, *driver_temp; + + if (!list_empty(&g_vmrm_intf_dev->driver_node)) { + list_for_each_entry_safe(driver_pos, driver_temp, + &g_vmrm_intf_dev->driver_node, list) { + if (driver_pos->driver_id == driver_id) { + CAM_DBG(CAM_VMRM, "found driver id 0x%x", driver_id); + goto end; + } + } + driver_pos = NULL; + CAM_ERR(CAM_VMRM, "Not found driver id 0x%x", driver_id); + } else { + CAM_WARN(CAM_VMRM, "driver node empty"); + } + +end: + return driver_pos; +} + +static void cam_set_hw_mem_owner(struct cam_hw_instance *hw_pos, uint32_t vm_owner) +{ + int i; + struct cam_io_mem_entry *mem, *mem_temp; + + if (hw_pos->resources.num_mem_block) { + for (i = 0; i < hw_pos->resources.num_mem_block; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, + &g_vmrm_intf_dev->io_res.mem, list) { + if (mem->base == hw_pos->resources.mem_block_addr[i]) { + mem->vm_owner = vm_owner; + mem->lend_in_progress = false; + break; + } + } + } + } + } + + if (hw_pos->resources.gpio_count) { + for (i = 0; i < hw_pos->resources.gpio_count; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, + &g_vmrm_intf_dev->io_res.mem, list) { + if (mem->base == hw_pos->resources.gpio_mem_addr[i]) { + mem->vm_owner = vm_owner; + mem->lend_in_progress = false; + break; + } + } + } + } + } +} + +static void cam_set_hw_irq_owner(uint32_t irq_num, uint32_t vm_owner) +{ + struct cam_io_irq_entry *irq, *irq_temp; + + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, &g_vmrm_intf_dev->io_res.irq, list) { + if (irq->irq_num == irq_num) { + irq->vm_owner = vm_owner; + break; + } + } + } + +} + +static void cam_mem_notification_pvm_handler( + enum gh_mem_notifier_tag tag, unsigned long notify_type, + void *entry_data, void *notif_msg) +{ + int rc = 0; + uint32_t mem_hdl_gh; + uint32_t cam_mem_tag; + struct gh_rm_notif_mem_accepted_payload *accepted_payload; + struct gh_rm_notif_mem_released_payload *released_payload; + struct cam_hw_instance *hw; + + if (!cam_validate_gh_mem_notifier_tag(tag)) { + CAM_ERR(CAM_VMRM, "Invalid tag: %d", tag); + return; + } + + if (!notif_msg) { + CAM_ERR(CAM_VMRM, "Invalid msg"); + return; + } + + if (notify_type == GH_RM_NOTIF_MEM_ACCEPTED) { + accepted_payload = (struct gh_rm_notif_mem_accepted_payload *)notif_msg; + mem_hdl_gh = accepted_payload->mem_handle; + cam_mem_tag = cam_gh_tag_convert_cam_mem_tag(tag); + + mutex_lock(&g_vmrm_intf_dev->lock); + hw = cam_hw_instance_lookup(0, cam_mem_tag, 0, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw mem tag failed %d", cam_mem_tag); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + mutex_unlock(&g_vmrm_intf_dev->lock); + + CAM_DBG(CAM_VMRM, "Receive hw id 0x%x mem accept notification tag: %d", + hw->hw_id, cam_mem_tag); + + spin_lock(&hw->spin_lock); + hw->resources.ready_mask |= BIT(0); + cam_set_hw_mem_owner(hw, CAM_SVM1); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = CAM_SVM1; + hw->is_using = true; + hw->resources.ready_mask = 0; + atomic_set(&hw->lend_in_progress, 0); + CAM_DBG(CAM_VMRM, "hw 0x%x is acquired by vm %d", hw->hw_id, CAM_SVM1); + } + spin_unlock(&hw->spin_lock); + } else if (notify_type == GH_RM_NOTIF_MEM_RELEASED) { + released_payload = (struct gh_rm_notif_mem_released_payload *)notif_msg; + mem_hdl_gh = released_payload->mem_handle; + cam_mem_tag = cam_gh_tag_convert_cam_mem_tag(tag); + + mutex_lock(&g_vmrm_intf_dev->lock); + hw = cam_hw_instance_lookup(0, cam_mem_tag, 0, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw mem tag failed %d", cam_mem_tag); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + rc = cam_vmrm_gh_mem_reclaim(mem_hdl_gh, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "Reclaim mem failed for tag: %d, rc: %d", + cam_mem_tag, rc); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + CAM_DBG(CAM_VMRM, "Reclaim hw id 0x%x mem succeed for tag: %d", + hw->hw_id, cam_mem_tag); + + hw->resources.ready_mask |= BIT(0); + cam_set_hw_mem_owner(hw, CAM_PVM); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = CAM_PVM; + hw->is_using = false; + hw->resources.ready_mask = 0; + CAM_DBG(CAM_VMRM, "Reclaim hw succeed 0x%x", hw->hw_id); + } + mutex_unlock(&g_vmrm_intf_dev->lock); + } else { + CAM_ERR(CAM_VMRM, "Invalid notification type: %lld", notify_type); + return; + } +} + +static void cam_mem_notification_svm_handler( + enum gh_mem_notifier_tag tag, unsigned long notify_type, + void *entry_data, void *notif_msg) +{ + int rc = 0; + enum gh_vm_names gh_vm_name; + uint32_t mem_hdl = 0, mem_hdl_gh, label, flags_accept; + uint32_t cam_mem_tag, trans_type, vm_name; + struct cam_hw_instance *hw; + struct gh_acl_desc *acl_desc = NULL; + struct gh_rm_notif_mem_shared_payload *payload; + uint8_t flags = GH_RM_MEM_NOTIFY_OWNER_ACCEPTED; + + CAM_DBG(CAM_VMRM, "notify type: %lld tag: %d", notify_type, tag); + + if (notify_type != GH_RM_NOTIF_MEM_SHARED) { + CAM_ERR(CAM_VMRM, "Invalid notification type: %lld", notify_type); + return; + } + + if (!cam_validate_gh_mem_notifier_tag(tag)) { + CAM_ERR(CAM_VMRM, "Invalid tag: %d", tag); + return; + } + + if (!notif_msg) { + CAM_ERR(CAM_VMRM, "Invalid msg"); + return; + } + + payload = (struct gh_rm_notif_mem_shared_payload *)notif_msg; + label = payload->label; + trans_type = payload->trans_type; + mem_hdl_gh = payload->mem_handle; + if (!cam_validate_cam_mem_label(label)) { + CAM_ERR(CAM_VMRM, "Invalid label: %d", label); + return; + } + + if (trans_type != GH_RM_TRANS_TYPE_LEND) { + CAM_ERR(CAM_VMRM, "Invalid transfer type: %d", trans_type); + return; + } + + CAM_DBG(CAM_VMRM, "Trans type: %d label: %d handle: %d", + trans_type, label, mem_hdl); + + vm_name = cam_vmrm_intf_get_vmid(); + cam_mem_tag = cam_gh_tag_convert_cam_mem_tag(tag); + gh_vm_name = cam_vmid_to_gh_name(vm_name); + acl_desc = cam_populate_acl(gh_vm_name); + if (IS_ERR(acl_desc)) { + CAM_ERR(CAM_VMRM, "Populate acl failed %d", PTR_ERR(acl_desc)); + return; + } + + flags_accept = GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS| + GH_RM_MEM_ACCEPT_VALIDATE_LABEL| + GH_RM_MEM_ACCEPT_DONE; + + hw = cam_hw_instance_lookup(0, cam_mem_tag, 0, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw mem tag failed %d", cam_mem_tag); + CAM_MEM_FREE(acl_desc); + return; + } + rc = cam_vmrm_gh_mem_accept(mem_hdl_gh, GH_RM_MEM_TYPE_IO, + trans_type, flags_accept, label, acl_desc, NULL, NULL, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem accept failed for tag: %d, rc: %d", cam_mem_tag, rc); + CAM_MEM_FREE(acl_desc); + return; + } + + CAM_DBG(CAM_VMRM, "Mem accept succeed for tag: %d handle: %d hw id 0x%x", + cam_mem_tag, mem_hdl_gh, hw->hw_id); + CAM_MEM_FREE(acl_desc); + + spin_lock(&hw->spin_lock); + hw->resources.ready_mask |= BIT(0); + hw->handle = mem_hdl_gh; + cam_set_hw_mem_owner(hw, cam_vmrm_intf_get_vmid()); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = cam_vmrm_intf_get_vmid(); + hw->is_using = true; + hw->resources.ready_mask = 0; + complete_all(&hw->wait_response); + CAM_DBG(CAM_VMRM, "Acquire hw succeed 0x%x", hw->hw_id); + } + spin_unlock(&hw->spin_lock); + + rc = cam_vmrm_gh_mem_notify(mem_hdl_gh, flags, tag, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem notify failed for tag: %d, rc: %d", tag, rc); + return; + } + + CAM_DBG(CAM_VMRM, "Mem accept notify succeed for tag: %d hw id 0x%x", tag, hw->hw_id); +} + +static void cam_irq_notification_pvm_handler(void *data, + unsigned long notify_type, enum gh_irq_label label) +{ + int rc = 0, i; + uint32_t irq_num; + uint32_t cam_irq_label; + struct cam_io_irq_entry *irq; + struct cam_hw_instance *hw; + + CAM_DBG(CAM_VMRM, "notify type: %lld gh irq label: %d", notify_type, label); + + if ((notify_type != GH_RM_NOTIF_VM_IRQ_ACCEPTED) && + (notify_type != GH_RM_NOTIF_VM_IRQ_RELEASED)) { + CAM_ERR(CAM_VMRM, "Invalid notification type %lld", notify_type); + return; + } + + if (!cam_validate_gh_irq_label(label)) { + CAM_ERR(CAM_VMRM, "Invalid label %d", label); + return; + } + + if (notify_type == GH_RM_NOTIF_VM_IRQ_ACCEPTED) { + mutex_lock(&g_vmrm_intf_dev->lock); + irq = cam_irq_lookup(label, 0); + if (!irq) { + CAM_ERR(CAM_VMRM, "Lookup irq label failed %d", label); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + cam_irq_label = cam_gh_irq_label_convert_cam_irq_label(label); + hw = cam_hw_instance_lookup(0, 0, cam_irq_label, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw cam irq label failed %d", cam_irq_label); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + mutex_unlock(&g_vmrm_intf_dev->lock); + + CAM_DBG(CAM_VMRM, "Receive irq accept notification camera label: %d", + cam_irq_label); + + spin_lock(&hw->spin_lock); + for (i = 0; i < hw->resources.irq_count; i++) { + if (cam_irq_label == hw->resources.irq_label[i]) + break; + } + hw->resources.ready_mask |= BIT(i + CAM_VMRM_RESOURCE_IRQ_BIT_MAP_OFFSET); + cam_set_hw_irq_owner(hw->resources.irq_num[i], CAM_SVM1); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = CAM_SVM1; + hw->is_using = true; + hw->resources.ready_mask = 0; + atomic_set(&hw->lend_in_progress, 0); + CAM_DBG(CAM_VMRM, "hw 0x%x is acquired by vm %d", hw->hw_id, CAM_SVM1); + } + spin_unlock(&hw->spin_lock); + } else if (notify_type == GH_RM_NOTIF_VM_IRQ_RELEASED) { + CAM_DBG(CAM_VMRM, "Receive irq release notification for gh label: %d", label); + mutex_lock(&g_vmrm_intf_dev->lock); + irq = cam_irq_lookup(label, 0); + if (!irq) { + CAM_ERR(CAM_VMRM, "Lookup irq label failed %d", label); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + irq_num = irq->irq_num; + rc = cam_vmrm_gh_irq_reclaim(label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq reclaim %d failed for irq num: %d camera label: %d", + irq_num, irq->label); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + cam_irq_label = cam_gh_irq_label_convert_cam_irq_label(label); + hw = cam_hw_instance_lookup(0, 0, cam_irq_label, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw cam irq label failed %d", cam_irq_label); + mutex_unlock(&g_vmrm_intf_dev->lock); + return; + } + + CAM_DBG(CAM_VMRM, "Reclaim irq succeed for label: %d", cam_irq_label); + + for (i = 0; i < hw->resources.irq_count; i++) { + if (cam_irq_label == hw->resources.irq_label[i]) + break; + } + hw->resources.ready_mask |= BIT(i + CAM_VMRM_RESOURCE_IRQ_BIT_MAP_OFFSET); + cam_set_hw_irq_owner(hw->resources.irq_num[i], CAM_PVM); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = CAM_PVM; + hw->is_using = false; + hw->resources.ready_mask = 0; + CAM_DBG(CAM_VMRM, "Reclaim hw succeed 0x%x", hw->hw_id); + } + mutex_unlock(&g_vmrm_intf_dev->lock); + + return; + } +} + +static void cam_irq_notification_svm_handler(void *data, + unsigned long notify_type, enum gh_irq_label label) +{ + int rc = 0, accepted_irq, i; + struct cam_io_irq_entry *irq; + struct cam_hw_instance *hw; + uint32_t cam_irq_label; + + if (notify_type != GH_RM_NOTIF_VM_IRQ_LENT) { + CAM_ERR(CAM_VMRM, "Invalid notification type %lld", notify_type); + return; + } + + if (!cam_validate_gh_irq_label(label)) { + CAM_ERR(CAM_VMRM, "Invalid gh label %d", label); + return; + } + + irq = cam_irq_lookup(label, 0); + if (!irq) { + CAM_ERR(CAM_VMRM, "Lookup irq label failed %d", label); + return; + } + + accepted_irq = cam_vmrm_gh_irq_accept(label, -1, IRQ_TYPE_EDGE_RISING); + if (accepted_irq < 0) { + CAM_ERR(CAM_VMRM, "Accept irq failed for gh label: %d error: %d", + label, accepted_irq); + return; + } + CAM_DBG(CAM_VMRM, "Accept irq succeed for camera label: %d", irq->label); + + cam_irq_label = cam_gh_irq_label_convert_cam_irq_label(label); + hw = cam_hw_instance_lookup(0, 0, cam_irq_label, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Look up hw cam irq label failed %d", cam_irq_label); + return; + } + + spin_lock(&hw->spin_lock); + for (i = 0; i < hw->resources.irq_count; i++) { + if (cam_irq_label == hw->resources.irq_label[i]) + break; + } + hw->resources.ready_mask |= BIT(i + CAM_VMRM_RESOURCE_IRQ_BIT_MAP_OFFSET); + cam_set_hw_irq_owner(hw->resources.irq_num[i], cam_vmrm_intf_get_vmid()); + if (hw->resources.ready_mask == hw->resources.valid_mask) { + hw->vm_owner = cam_vmrm_intf_get_vmid(); + hw->is_using = true; + hw->resources.ready_mask = 0; + complete_all(&hw->wait_response); + CAM_DBG(CAM_VMRM, "Acquire hw succeed 0x%x", hw->hw_id); + } + spin_unlock(&hw->spin_lock); + + rc = cam_vmrm_gh_irq_accept_notify(label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq accept notify failed for label: %d error: %d", label, rc); + return; + } + CAM_DBG(CAM_VMRM, "Irq accept notify succeed for gh label: %d", label); +} + +static int cam_vmrm_dt_parse(struct platform_device *pdev, + struct cam_vmrm_intf_dev *vmrm_intf_dev) +{ + int rc = 0; + + if (!pdev || !vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "Invalid Input Params. pdev: %s, vmrm_intf_dev: %s", + CAM_IS_NULL_TO_STR(pdev), CAM_IS_NULL_TO_STR(vmrm_intf_dev)); + return -EINVAL; + } + + rc = of_property_read_u32(pdev->dev.of_node, "vmid", &g_vmrm_intf_dev->cam_vmid); + if (rc) { + CAM_ERR(CAM_VMRM, "Cannot read the VM id. rc: %d", rc); + return rc; + } + + g_vmrm_intf_dev->gh_rr_enable = of_property_read_bool(pdev->dev.of_node, "gh_rr_enable"); + g_vmrm_intf_dev->no_register_read_on_bind = of_property_read_bool(pdev->dev.of_node, + "no_register_read_on_bind"); + + rc = of_property_read_u32(pdev->dev.of_node, "proxy_voting_enable", + &g_vmrm_intf_dev->proxy_voting_enable); + + if (rc) { + CAM_DBG(CAM_VMRM, "No found proxy_voting_enable"); + rc = 0; + } + + CAM_DBG(CAM_VMRM, + "VM-name: %d proxy voting enable 0x%x gh request resource enable %d no register read on bind %d", + g_vmrm_intf_dev->cam_vmid, g_vmrm_intf_dev->proxy_voting_enable, + g_vmrm_intf_dev->gh_rr_enable, g_vmrm_intf_dev->no_register_read_on_bind); + + return rc; +} + +static int cam_vmrm_set_proxy_voting_enable(void *data, u64 val) +{ + g_vmrm_intf_dev->proxy_voting_enable = val; + CAM_INFO(CAM_VMRM, "Set proxy_voting_enable value :%lld", val); + + return 0; +} + +static int cam_vmrm_get_proxy_voting_enable(void *data, u64 *val) +{ + *val = g_vmrm_intf_dev->proxy_voting_enable; + CAM_INFO(CAM_VMRM, "Get proxy_voting_enable value :%lld", + g_vmrm_intf_dev->proxy_voting_enable); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_vmrm_proxy_voting_enable, + cam_vmrm_get_proxy_voting_enable, + cam_vmrm_set_proxy_voting_enable, "%16llu"); + +static int cam_vmrm_irq_lend_and_notify(uint32_t label, uint32_t vm_name, + int irq, gh_irq_handle_fn_v2 handler, void *data) +{ + int rc = 0; + enum gh_irq_label gh_label; + enum gh_vm_names gh_vm; + + gh_label = cam_irq_label_convert_gh_label(label); + gh_vm = cam_vmid_to_gh_name(vm_name); + + rc = cam_vmrm_gh_irq_lend_v2(gh_label, gh_vm, irq, handler, data); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq lend failed for label: %d, rc: %d", label, rc); + return rc; + } + + rc = cam_vmrm_gh_irq_lend_notify(gh_label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq lend notify failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq lend and notify label %d succeed", label); + + return rc; +} + +static int cam_vmrm_irq_release_and_notify(uint32_t label) +{ + int rc = 0; + enum gh_irq_label gh_label; + + gh_label = cam_irq_label_convert_gh_label(label); + + rc = cam_vmrm_gh_irq_release(gh_label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq release failed for label: %d, rc: %d", label, rc); + return rc; + } + + rc = cam_vmrm_gh_irq_release_notify(gh_label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq release notify failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq release succeed for label: %d", label); + + return rc; +} + +static int cam_vmrm_mem_lend_and_notify(u8 mem_type, u8 flags, uint32_t label, + struct gh_sgl_desc *sgl_desc, struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, uint32_t vm_name, uint32_t tag) + +{ + int rc = 0; + enum gh_vm_names gh_vm; + enum gh_mem_notifier_tag gh_tag; + struct gh_notify_vmid_desc *vmid_desc = NULL; + struct gh_acl_desc *acl_desc = NULL; + + gh_vm = cam_vmid_to_gh_name(vm_name); + gh_tag = cam_mem_tag_convert_gh_tag(tag); + + acl_desc = cam_populate_acl(gh_vm); + if (IS_ERR(acl_desc)) { + CAM_ERR(CAM_VMRM, "populate acl failed %d", PTR_ERR(acl_desc)); + return -EINVAL; + } + + rc = cam_vmrm_gh_mem_lend(mem_type, flags, label, + acl_desc, sgl_desc, mem_attr_desc, handle); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem lend failed label: %d, rc: %d", label, rc); + goto free_acl_desc; + } + + vmid_desc = cam_populate_vmid_desc(gh_vm); + if (IS_ERR(vmid_desc)) { + CAM_ERR(CAM_VMRM, "populate vmid desc failed %d", PTR_ERR(vmid_desc)); + rc = -EINVAL; + goto free_acl_desc; + } + + rc = cam_vmrm_gh_mem_notify(*handle, GH_RM_MEM_NOTIFY_RECIPIENT_SHARED, gh_tag, vmid_desc); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem lend notify failed tag: %d, rc: %d", tag, rc); + goto free_vmid_desc; + } + + CAM_DBG(CAM_VMRM, "Mem lend and notify label: %d succeed", label); + +free_vmid_desc: + CAM_MEM_FREE(vmid_desc); +free_acl_desc: + CAM_MEM_FREE(acl_desc); + return rc; +} + +static int cam_vmrm_mem_release_and_notify( + gh_memparcel_handle_t *handle, int32_t tag) + +{ + int rc = 0; + enum gh_mem_notifier_tag gh_tag; + + gh_tag = cam_mem_tag_convert_gh_tag(tag); + + rc = cam_vmrm_gh_mem_release(*handle, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem release failed tag: %d, rc: %d", tag, rc); + return rc; + } + + rc = cam_vmrm_gh_mem_notify(*handle, GH_RM_MEM_NOTIFY_OWNER_RELEASED, gh_tag, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem release notify tag: %d failed rc: %d", tag, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Mem release and notify tag: %d succeed", tag); + + return rc; +} + +struct cam_hw_instance *cam_check_hw_instance_available( + uint32_t hw_id) +{ + struct cam_hw_instance *hw_pos; + + hw_pos = cam_hw_instance_lookup(hw_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "do not find the hw instance 0x%x", hw_id); + return hw_pos; + } + + if (hw_pos->is_using && (hw_pos->vm_owner != cam_vmrm_intf_get_vmid())) { + CAM_WARN(CAM_VMRM, "hw instance 0x%x is using by other vm %d current vm %d", + hw_pos->hw_id, hw_pos->vm_owner, cam_vmrm_intf_get_vmid()); + return NULL; + } else { + return hw_pos; + } +} + +static void cam_clean_io_mem(struct list_head *mem_list) +{ + struct cam_io_mem_entry *pos, *tmp; + + if (!list_empty(mem_list)) { + list_for_each_entry_safe(pos, tmp, mem_list, list) { + list_del(&pos->list); + CAM_MEM_FREE(pos); + } + } +} + +static void cam_clean_io_irq(struct list_head *irq_list) +{ + struct cam_io_irq_entry *pos, *tmp; + + if (!list_empty(irq_list)) { + list_for_each_entry_safe(pos, tmp, irq_list, list) { + list_del(&pos->list); + CAM_MEM_FREE(pos); + } + } +} + +static void cam_clean_hw_instance(struct list_head *hw_list) +{ + struct cam_hw_instance *pos, *tmp; + + if (!list_empty(hw_list)) { + list_for_each_entry_safe(pos, tmp, hw_list, list) { + list_del(&pos->list); + CAM_MEM_FREE(pos); + } + } +} + +static void cam_clean_driver_node(struct list_head *driver_list) +{ + struct cam_driver_node *pos, *tmp; + + if (!list_empty(driver_list)) { + list_for_each_entry_safe(pos, tmp, driver_list, list) { + list_del(&pos->list); + kfree(pos); + } + } +} + +int cam_populate_irq_resource_info(struct cam_hw_instance *hw_pos) +{ + int i; + struct cam_io_irq_entry *irq_entry; + uint32_t irq_label; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + for (i = 0; i < hw_pos->resources.irq_count; i++) { + irq_entry = CAM_MEM_ZALLOC(sizeof(*irq_entry), GFP_KERNEL); + if (!irq_entry) { + CAM_ERR(CAM_VMRM, "irq entry allocate memory failed"); + return -ENOMEM; + } + irq_entry->irq_num = hw_pos->resources.irq_num[i]; + irq_label = hw_pos->resources.irq_label[i]; + irq_entry->label = irq_label; + irq_entry->vm_owner = CAM_PVM; + + INIT_LIST_HEAD(&irq_entry->list); + mutex_lock(&vmrm_intf_dev->lock); + list_add_tail(&irq_entry->list, &vmrm_intf_dev->io_res.irq); + mutex_unlock(&vmrm_intf_dev->lock); + CAM_DBG(CAM_VMRM, "hw name %s, hw id 0x%x irq label %d irq num %d", + hw_pos->hw_name, hw_pos->hw_id, irq_entry->label, irq_entry->irq_num); + } + + return 0; +} + +int cam_populate_mem_resource_info(struct cam_hw_instance *hw_pos) +{ + int i, rc = 0; + struct cam_io_mem_entry *mem_entry; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + mutex_lock(&vmrm_intf_dev->lock); + for (i = 0; i < hw_pos->resources.num_mem_block; i++) { + if (hw_pos->hw_id == CAM_HW_ID_CPAS) { + if ((!strcmp(hw_pos->resources.mem_block_name[i], "cam_rpmh")) || + (!strcmp(hw_pos->resources.mem_block_name[i], "cam_cesta"))) { + CAM_DBG(CAM_VMRM, "%s is other hw access range can not lend", + hw_pos->resources.mem_block_name[i]); + continue; + } + } + rc = cam_mem_entry_is_duplicate(hw_pos->resources.mem_block_addr[i], + hw_pos->resources.mem_block_size[i]); + if (rc == 1) { + CAM_DBG(CAM_VMRM, "mem entry 0x%x has been added", + hw_pos->resources.mem_block_addr[i]); + rc = 0; + continue; + } else if (rc == 2) { + CAM_DBG(CAM_VMRM, + "mem entry 0x%x size mismatch with previous and update mem size to max", + hw_pos->resources.mem_block_addr[i]); + rc = 0; + continue; + } else { + CAM_DBG(CAM_VMRM, "hw id 0x%x new mem entry 0x%x", hw_pos->hw_id, + hw_pos->resources.mem_block_addr[i]); + } + + mem_entry = CAM_MEM_ZALLOC(sizeof(*mem_entry), GFP_KERNEL); + if (!mem_entry) { + CAM_ERR(CAM_VMRM, "io mem entry allocate memory failed"); + mutex_unlock(&vmrm_intf_dev->lock); + return -ENOMEM; + } + mem_entry->base = hw_pos->resources.mem_block_addr[i]; + mem_entry->size = hw_pos->resources.mem_block_size[i]; + mem_entry->vm_owner = CAM_PVM; + mem_entry->lend_in_progress = false; + + INIT_LIST_HEAD(&mem_entry->list); + list_add_tail(&mem_entry->list, &vmrm_intf_dev->io_res.mem); + CAM_DBG(CAM_VMRM, "hw name %s, mem address 0x%x size 0x%x", + hw_pos->hw_name, mem_entry->base, mem_entry->size); + } + mutex_unlock(&vmrm_intf_dev->lock); + + return 0; +} + +int cam_populate_gpio_resource_info(struct cam_hw_instance *hw_pos) +{ + int rc = 0, i; + struct cam_io_mem_entry *mem_entry; + struct resource res; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + mutex_lock(&vmrm_intf_dev->lock); + if (hw_pos->resources.gpio_count) { + for (i = 0; i < hw_pos->resources.gpio_count; i++) { + rc = msm_gpio_get_pin_address(hw_pos->resources.gpio_num[i], &res); + if (!rc) { + CAM_ERR(CAM_VMRM, "invalid gpio %d", + hw_pos->resources.gpio_num[i]); + goto end; + } + rc = 0; + + rc = cam_mem_entry_is_duplicate(res.start, resource_size(&res)); + if (rc == 1) { + CAM_DBG(CAM_VMRM, "gpio num %x mem entry 0x%x has been added", + hw_pos->resources.gpio_num[i], res.start); + rc = 0; + continue; + } else if (rc == 2) { + CAM_DBG(CAM_VMRM, + "mem entry 0x%x size mismatch with previous and update mem size to max", + hw_pos->resources.mem_block_addr[i]); + rc = 0; + continue; + } else { + CAM_DBG(CAM_VMRM, "hw id 0x%x new gpio mem entry 0x%x", + hw_pos->hw_id, hw_pos->resources.gpio_num[i]); + } + + mem_entry = CAM_MEM_ZALLOC(sizeof(*mem_entry), GFP_KERNEL); + if (!mem_entry) { + rc = -ENOMEM; + CAM_ERR(CAM_VMRM, "gpio mem allocate memory failed"); + goto end; + } + + mem_entry->base = res.start; + mem_entry->size = resource_size(&res); + mem_entry->vm_owner = CAM_PVM; + mem_entry->lend_in_progress = false; + hw_pos->resources.gpio_mem_addr[i] = mem_entry->base; + hw_pos->resources.gpio_mem_size[i] = mem_entry->size; + + INIT_LIST_HEAD(&mem_entry->list); + list_add_tail(&mem_entry->list, &vmrm_intf_dev->io_res.mem); + CAM_DBG(CAM_VMRM, "hw name %s, gpio num %d mem address 0x%x size 0x%x", + hw_pos->hw_name, hw_pos->resources.gpio_num[i], mem_entry->base, + mem_entry->size); + } + } + +end: + mutex_unlock(&vmrm_intf_dev->lock); + return rc; +} + +void cam_free_io_resource_info(void) +{ + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + mutex_lock(&vmrm_intf_dev->lock); + if (!list_empty(&vmrm_intf_dev->io_res.mem)) + cam_clean_io_mem(&vmrm_intf_dev->io_res.mem); + + if (!list_empty(&vmrm_intf_dev->io_res.irq)) + cam_clean_io_irq(&vmrm_intf_dev->io_res.irq); + mutex_unlock(&vmrm_intf_dev->lock); +} + +static int cam_vmrm_lend_irq_resources(struct cam_hw_instance *hw_pos, uint32_t vmid) +{ + int rc = 0, i; + struct cam_io_irq_entry *irq, *irq_temp; + + for (i = 0; i < hw_pos->resources.irq_count; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, + &g_vmrm_intf_dev->io_res.irq, list) { + if (irq->irq_num == hw_pos->resources.irq_num[i] && + irq->vm_owner == CAM_PVM) { + rc = cam_vmrm_irq_lend_and_notify(irq->label, vmid, + irq->irq_num, cam_irq_notification_pvm_handler, + NULL); + if (rc) { + CAM_ERR(CAM_VMRM, + "irq lend and notify irq failed label %d rc: %d", + irq->label, rc); + goto end; + } + break; + } + } + } + } + +end: + return rc; +} + +static int cam_vmrm_lend_mem_resources(struct cam_hw_instance *hw_pos, uint32_t vmid) +{ + int rc = 0, i = 0, j = 0; + struct gh_sgl_desc *sgl_desc = NULL; + struct cam_io_mem_entry *mem, *mem_temp; + + sgl_desc = CAM_MEM_ZALLOC(offsetof(struct gh_sgl_desc, + sgl_entries[hw_pos->resources.num_mem_block + hw_pos->resources.gpio_count]), + GFP_KERNEL); + if (!sgl_desc) { + CAM_ERR(CAM_VMRM, "Could not reserve memory for SGL entries"); + rc = -ENOMEM; + goto end; + } + + mutex_lock(&g_vmrm_intf_dev->lock); + if (hw_pos->resources.num_mem_block) { + for (i = 0; i < hw_pos->resources.num_mem_block; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, + &g_vmrm_intf_dev->io_res.mem, list) { + if (mem->base == hw_pos->resources.mem_block_addr[i] && + mem->vm_owner == CAM_PVM && + (!mem->lend_in_progress)) { + sgl_desc->n_sgl_entries = j + 1; + sgl_desc->sgl_entries[j].ipa_base = + ALIGN_DOWN(mem->base, PAGE_SIZE); + sgl_desc->sgl_entries[j].size = + ALIGN(mem->size, PAGE_SIZE); + mem->lend_in_progress = true; + CAM_DBG(CAM_VMRM, "base 0x%x size 0x%x index %d", + sgl_desc->sgl_entries[j].ipa_base, + sgl_desc->sgl_entries[j].size, j); + j = j + 1; + break; + } + } + } + } + } + + if (hw_pos->resources.gpio_count) { + for (i = 0; i < hw_pos->resources.gpio_count; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, + &g_vmrm_intf_dev->io_res.mem, list) { + if ((mem->base == hw_pos->resources.gpio_mem_addr[i]) && + mem->vm_owner == CAM_PVM && + (!mem->lend_in_progress)) { + sgl_desc->n_sgl_entries = j + 1; + sgl_desc->sgl_entries[j].ipa_base = mem->base; + sgl_desc->sgl_entries[j].size = mem->size; + mem->lend_in_progress = true; + CAM_DBG(CAM_VMRM, "base 0x%x size 0x%x index %d", + sgl_desc->sgl_entries[j].ipa_base, + sgl_desc->sgl_entries[j].size, j); + j = j + 1; + break; + } + } + } + } + } + mutex_unlock(&g_vmrm_intf_dev->lock); + + if (!sgl_desc->n_sgl_entries) { + CAM_DBG(CAM_VMRM, "Not mem need to lend hw id 0x%x", hw_pos->hw_id); + CAM_MEM_FREE(sgl_desc); + return 0; + } + + rc = cam_vmrm_mem_lend_and_notify(CAM_MEM_TYPE_IO, 0, hw_pos->resources.mem_label, + sgl_desc, NULL, &hw_pos->handle, vmid, hw_pos->resources.mem_tag); + if (rc) + CAM_ERR(CAM_VMRM, + "hw_id 0x%x mem lend and notify mem label %d rc: %d", + hw_pos->hw_id, hw_pos->resources.mem_label, rc); + + CAM_MEM_FREE(sgl_desc); +end: + return rc; +} + +static int cam_vmrm_lend_resources(uint32_t hw_id, uint32_t vmid) +{ + int rc = 0; + struct cam_hw_instance *hw_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + hw_pos = cam_hw_instance_lookup(hw_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "Do not find hw instance %x", hw_id); + return -EINVAL; + } + + spin_lock(&hw_pos->spin_lock); + if (atomic_read(&hw_pos->lend_in_progress)) { + CAM_ERR(CAM_VMRM, "Hw lend is in progress 0x%x", hw_pos->hw_id); + spin_unlock(&hw_pos->spin_lock); + return -EINVAL; + } + + atomic_set(&hw_pos->lend_in_progress, 1); + spin_unlock(&hw_pos->spin_lock); + + rc = cam_vmrm_lend_irq_resources(hw_pos, vmid); + if (rc) { + CAM_ERR(CAM_VMRM, "Lend irq resources failed hw instance %x vmid %d rc %d", + hw_id, vmid, rc); + goto end; + } + + rc = cam_vmrm_lend_mem_resources(hw_pos, vmid); + if (rc) { + CAM_ERR(CAM_VMRM, "Lend mem resources failed hw instance %x vmid %d rc %d", + hw_id, vmid, rc); + goto end; + } + + return rc; +end: + spin_lock(&hw_pos->spin_lock); + atomic_set(&hw_pos->lend_in_progress, 0); + spin_unlock(&hw_pos->spin_lock); + return rc; +} + +static int cam_vmrm_set_resource_state(uint32_t msg_dst_id, bool state) +{ + struct cam_hw_instance *hw_pos; + + mutex_lock(&g_vmrm_intf_dev->lock); + hw_pos = cam_hw_instance_lookup(msg_dst_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "do not find the hw instance 0x%x", msg_dst_id); + mutex_unlock(&g_vmrm_intf_dev->lock); + return -EINVAL; + } + hw_pos->is_using = state; + mutex_unlock(&g_vmrm_intf_dev->lock); + + return 0; +} + +static int cam_vmrm_release_irq_resources(struct cam_hw_instance *hw_pos) +{ + int rc = 0, i; + struct cam_io_irq_entry *irq, *irq_temp; + + for (i = 0; i < hw_pos->resources.irq_count; i++) { + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, + &g_vmrm_intf_dev->io_res.irq, list) { + if ((irq->irq_num == hw_pos->resources.irq_num[i]) && + (irq->vm_owner != CAM_PVM)) { + rc = cam_vmrm_irq_release_and_notify(irq->label); + if (rc) { + CAM_ERR(CAM_VMRM, + "irq release and notify irq failed label %d rc: %d", + irq->label, rc); + goto end; + } + cam_set_hw_irq_owner(irq->irq_num, CAM_PVM); + break; + } + } + } + } + +end: + return rc; +} + +static int cam_vmrm_release_mem_resources(struct cam_hw_instance *hw_pos) +{ + int rc = 0; + uint32_t handle, tag, hw_id; + + handle = hw_pos->handle; + tag = hw_pos->resources.mem_tag; + hw_id = hw_pos->hw_id; + + rc = cam_vmrm_mem_release_and_notify(&handle, tag); + if (rc) + CAM_ERR(CAM_VMRM, "hw_id mem release and notify mem tag %d rc: %d", + hw_id, tag, rc); + + return rc; +} + +int cam_vmrm_release_resources(uint32_t hw_id) +{ + int rc = 0; + struct cam_hw_instance *hw_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + hw_pos = cam_hw_instance_lookup(hw_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "Do not find hw instance %x", hw_id); + return -EINVAL; + } + + rc = cam_vmrm_release_irq_resources(hw_pos); + if (rc) { + CAM_ERR(CAM_VMRM, "Release irq resources failed hw instance %x vmid %d rc %d", + hw_id, cam_vmrm_intf_get_vmid(), rc); + goto end; + } + + rc = cam_vmrm_release_mem_resources(hw_pos); + if (rc) { + CAM_ERR(CAM_VMRM, "Release mem resources failed hw instance %x vmid %d rc %d", + hw_id, cam_vmrm_intf_get_vmid(), rc); + goto end; + } + + spin_lock(&hw_pos->spin_lock); + cam_set_hw_mem_owner(hw_pos, CAM_PVM); + hw_pos->vm_owner = CAM_PVM; + hw_pos->is_using = false; + spin_unlock(&hw_pos->spin_lock); + + CAM_DBG(CAM_VMRM, "Release resources succeed hw instance 0x%x vmid %d", hw_id, + cam_vmrm_intf_get_vmid()); +end: + return rc; +} + +static int cam_vmrm_hw_instance_generic_callback(void *cb_data, void *msg, + uint32_t size) +{ + int rc = 0; + uint32_t msg_type, hw_id; + struct cam_hw_soc_info *soc_info = NULL; + unsigned long cesta_clk_rate_high = 0, cesta_clk_rate_low = 0; + int cesta_client_idx = -1; + struct cam_msg_set_clk_rate *msg_set_clk_rate = NULL; + struct cam_vmrm_msg *hw_msg = NULL; + + hw_msg = msg; + hw_id = hw_msg->msg_dst_id; + + soc_info = (struct cam_hw_soc_info *)cb_data; + + msg_type = hw_msg->msg_type; + + CAM_DBG(CAM_VMRM, "hw id:0x%x, msg type %d", hw_id, msg_type); + + switch (msg_type) { + case CAM_SOC_ENABLE_RESOURCE: + rc = cam_soc_util_enable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + true, soc_info->lowest_clk_level, false); + if (rc) + CAM_ERR(CAM_VMRM, "hw id:0x%x soc enable resource failed", hw_id); + break; + + case CAM_SOC_DISABLE_RESOURCE: + rc = cam_soc_util_disable_platform_resource(soc_info, CAM_CLK_SW_CLIENT_IDX, + true, false); + if (rc) + CAM_ERR(CAM_VMRM, "hw id:0x%x soc disable resource failed", hw_id); + break; + + case CAM_CLK_SET_RATE: + msg_set_clk_rate = (struct cam_msg_set_clk_rate *)&hw_msg->data[0]; + cesta_client_idx = msg_set_clk_rate->cesta_client_idx; + cesta_clk_rate_high = msg_set_clk_rate->clk_rate_high; + cesta_clk_rate_low = msg_set_clk_rate->clk_rate_low; + CAM_DBG(CAM_VMRM, "hw id:0x%x, client idx %d, clk[high low]=[%lu %lu]", hw_id, + cesta_client_idx, cesta_clk_rate_high, cesta_clk_rate_low); + rc = cam_soc_util_set_src_clk_rate(soc_info, cesta_client_idx, + cesta_clk_rate_high, cesta_clk_rate_low); + if (rc) + CAM_ERR(CAM_VMRM, "hw id:0x%x clk set rate failed", hw_id); + break; + + default: + rc = -EINVAL; + CAM_ERR(CAM_VMRM, "hw id:0x%x Error, Invalid msg type:%d", hw_id, msg_type); + break; + } + + return rc; +} + +void cam_vmrm_msg_handle(void *msg, size_t size, struct cam_intervm_response *res) +{ + int rc = 0; + uint32_t data_size, msg_type, msg_dst_type, msg_size; + struct cam_vmrm_msg *vm_msg, *res_msg_local; + struct cam_hw_instance *hw_pos; + struct cam_driver_node *driver_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + bool response_msg; + bool need_response; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + vm_msg = msg; + data_size = vm_msg->data_size; + res->send_response = false; + CAM_DBG(CAM_VMRM, + "msg_size %d data_size %d source_vmid %d des_vmid %d dst_type %d dst_id %x msg_type %d response msg %d need response %d", + size, vm_msg->data_size, vm_msg->source_vmid, vm_msg->des_vmid, + vm_msg->msg_dst_type, vm_msg->msg_dst_id, vm_msg->msg_type, vm_msg->response_msg, + vm_msg->need_response); + + if (offsetof(struct cam_vmrm_msg, data[data_size]) != size) { + CAM_ERR(CAM_VMRM, "received msg size not match"); + return; + } + + vm_msg = kmemdup(msg, size, GFP_ATOMIC); + if (!vm_msg) { + CAM_ERR(CAM_VMRM, "msg mem allocate failed"); + return; + } + + msg_type = vm_msg->msg_type; + msg_dst_type = vm_msg->msg_dst_type; + response_msg = vm_msg->response_msg; + need_response = vm_msg->need_response; + + /* actual message */ + if (!response_msg) { + if (msg_dst_type == CAM_MSG_DST_TYPE_HW_INSTANCE) { + mutex_lock(&vmrm_intf_dev->lock); + hw_pos = cam_hw_instance_lookup(vm_msg->msg_dst_id, 0, 0, 0); + if (hw_pos) { + if (hw_pos->hw_msg_callback) + rc = hw_pos->hw_msg_callback(hw_pos->hw_msg_callback_data, + vm_msg, size); + else + rc = cam_vmrm_hw_instance_generic_callback( + hw_pos->hw_msg_callback_data, vm_msg, size); + if (!rc) + CAM_DBG(CAM_VMRM, "hw id 0x%x msg type %d handle succeed", + vm_msg->msg_dst_id, vm_msg->msg_type); + else + CAM_ERR(CAM_VMRM, + "hw id 0x%x msg type %d handle failed %d", + vm_msg->msg_dst_id, vm_msg->msg_type, rc); + } else { + CAM_ERR(CAM_VMRM, "not found the hw %x", vm_msg->msg_dst_id); + mutex_unlock(&vmrm_intf_dev->lock); + goto free_recv_msg; + } + mutex_unlock(&vmrm_intf_dev->lock); + } else if (msg_dst_type == + CAM_MSG_DST_TYPE_DRIVER_NODE) { + mutex_lock(&vmrm_intf_dev->lock); + driver_pos = cam_driver_node_lookup(vm_msg->msg_dst_id); + if (driver_pos) { + rc = driver_pos->driver_msg_callback( + driver_pos->driver_msg_callback_data, vm_msg, size); + if (!rc) + CAM_DBG(CAM_VMRM, + "driver id 0x%x msg type %d handle succeed", + vm_msg->msg_dst_id, vm_msg->msg_type); + else + CAM_ERR(CAM_VMRM, + "hw id 0x%x msg type %d handle failed %d", + vm_msg->msg_dst_id, vm_msg->msg_type, rc); + } else { + CAM_ERR(CAM_VMRM, "not found the driver %x", vm_msg->msg_dst_id); + mutex_unlock(&vmrm_intf_dev->lock); + goto free_recv_msg; + } + mutex_unlock(&vmrm_intf_dev->lock); + } else if (msg_dst_type == + CAM_MSG_DST_TYPE_VMRM) { + if (msg_type == CAM_HW_RESOURCE_ACQUIRE) { + rc = cam_vmrm_lend_resources(vm_msg->msg_dst_id, + vm_msg->source_vmid); + if (!rc) + CAM_DBG(CAM_VMRM, + "lend resources succeed hw id 0x%x vmid %d", + vm_msg->msg_dst_id, vm_msg->source_vmid); + else + CAM_ERR(CAM_VMRM, + "lend resources failed %d hw id 0x%x vmid %d", + rc, vm_msg->msg_dst_id, vm_msg->source_vmid); + } else if (msg_type == CAM_HW_RESOURCE_SET_ACQUIRE) { + rc = cam_vmrm_set_resource_state(vm_msg->msg_dst_id, true); + if (!rc) + CAM_DBG(CAM_VMRM, + "vmrm set acquire state succeed hw id 0x%x vmid %d", + vm_msg->msg_dst_id, cam_vmrm_intf_get_vmid()); + else + CAM_ERR(CAM_VMRM, + "mrm set acquire state failed %d hw id 0x%x vmid %d", + rc, vm_msg->msg_dst_id, cam_vmrm_intf_get_vmid()); + goto free_recv_msg; + } else if (msg_type == CAM_HW_RESOURCE_SET_RELEASE) { + rc = cam_vmrm_set_resource_state(vm_msg->msg_dst_id, false); + if (!rc) + CAM_DBG(CAM_VMRM, + "vmrm set release state succeed hw id 0x%x vmid %d", + vm_msg->msg_dst_id, cam_vmrm_intf_get_vmid()); + else + CAM_ERR(CAM_VMRM, + "mrm set release state failed %d hw id 0x%x vmid %d", + rc, vm_msg->msg_dst_id, cam_vmrm_intf_get_vmid()); + goto free_recv_msg; + } else { + CAM_ERR(CAM_VMRM, "Not supported msg type %d", msg_type); + goto free_recv_msg; + } + } else { + CAM_ERR(CAM_VMRM, "Not supported msg dst type", msg_dst_type); + goto free_recv_msg; + } + /* this memory is freed inside qrtr layer */ + if (need_response) { + msg_size = offsetof(struct cam_vmrm_msg, data[1]); + res_msg_local = CAM_MEM_ZALLOC((msg_size), GFP_KERNEL); + if (!res_msg_local) { + CAM_ERR(CAM_VMRM, "res msg mem allocate failed"); + goto free_recv_msg; + } + res_msg_local->response_result = rc; + res_msg_local->msg_dst_type = msg_dst_type; + res_msg_local->msg_dst_id = vm_msg->msg_dst_id; + res_msg_local->msg_type = msg_type; + res_msg_local->data_size = 1; + res_msg_local->response_msg = true; + res_msg_local->need_response = false; + res->send_response = true; + res->res_msg = res_msg_local; + res->msg_size = msg_size; + CAM_DBG(CAM_VMRM, + "send response msg dst type %d dst id 0x%x msg type %d result %d", + res_msg_local->msg_dst_type, res_msg_local->msg_dst_id, + res_msg_local->msg_type, res_msg_local->response_result); + } + } else { + if (msg_dst_type == CAM_MSG_DST_TYPE_HW_INSTANCE) { + mutex_lock(&vmrm_intf_dev->lock); + hw_pos = cam_hw_instance_lookup(vm_msg->msg_dst_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "not found the hw %x", vm_msg->msg_dst_id); + mutex_unlock(&vmrm_intf_dev->lock); + goto free_recv_msg; + } + mutex_unlock(&vmrm_intf_dev->lock); + spin_lock(&hw_pos->spin_lock); + hw_pos->response_result = vm_msg->response_result; + complete_all(&hw_pos->wait_response); + spin_unlock(&hw_pos->spin_lock); + } else if (msg_dst_type == CAM_MSG_DST_TYPE_DRIVER_NODE) { + mutex_lock(&vmrm_intf_dev->lock); + driver_pos = cam_driver_node_lookup(vm_msg->msg_dst_id); + if (driver_pos) { + complete_all(&driver_pos->wait_response); + driver_pos->response_result = vm_msg->response_result; + } else { + CAM_ERR(CAM_VMRM, "not found the driver id %x", vm_msg->msg_dst_id); + mutex_unlock(&vmrm_intf_dev->lock); + goto free_recv_msg; + } + mutex_unlock(&vmrm_intf_dev->lock); + } else if (msg_dst_type == CAM_MSG_DST_TYPE_VMRM) { + hw_pos = cam_hw_instance_lookup(vm_msg->msg_dst_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "not found the hw %x", vm_msg->msg_dst_id); + goto free_recv_msg; + } + /* + * when result failure signal completion or result succeed tvm callback + * to signal + */ + spin_lock(&hw_pos->spin_lock); + hw_pos->response_result = vm_msg->response_result; + if (hw_pos->response_result) + complete_all(&hw_pos->wait_response); + spin_unlock(&hw_pos->spin_lock); + } else { + CAM_ERR(CAM_VMRM, "Not supported msg dst type", msg_dst_type); + } + } + +free_recv_msg: + kfree(vm_msg); +} + +#ifdef CONFIG_SPECTRA_RESOURCE_REQUEST_ENABLE +int cam_vmrm_resource_cb(gh_vmid_t source_vmid, bool is_req, + struct gh_res_request *resource, int resource_cnt) +{ + int rc = 0, i; + struct cam_io_irq_entry *irq; + enum gh_vm_names vm_name; + uint32_t cam_vmid; + struct cam_hw_instance *hw; + bool resource_include_irq = false; + int resource_irq_index = 0; + int resource_gpio_index = 0; + + rc = cam_vmrm_gh_rm_get_vm_name(source_vmid, &vm_name); + if (rc) { + CAM_ERR(CAM_VMRM, "get vm name failed %d", source_vmid); + return rc; + } + + cam_vmid = cam_gh_name_to_vmid(vm_name); + if (cam_vmid == CAM_VM_MAX) { + CAM_ERR(CAM_VMRM, "get cam vmid failed %d", vm_name); + return -EINVAL; + } + + for (i = 0; i < resource_cnt; i++) { + if (resource[i].resource_type == GH_RESOURCE_IRQ) { + resource_include_irq = true; + resource_irq_index = i; + CAM_DBG(CAM_VMRM, "resource include irq index %d", resource_irq_index); + break; + } + } + + if (!resource_include_irq) { + for (i = 0; i < resource_cnt; i++) { + if (resource[i].resource_type == GH_RESOURCE_GPIO) { + resource_gpio_index = i; + CAM_DBG(CAM_VMRM, "resource include gpio index %d", + resource_gpio_index); + break; + } + } + } + + if (resource_include_irq) { + irq = cam_irq_lookup(0, resource[resource_irq_index].resource.irq_num); + if (!irq) { + CAM_ERR(CAM_VMRM, "lookup irq %d failed", + resource[resource_irq_index].resource.irq_num); + return -EINVAL; + } + hw = cam_hw_instance_lookup(0, 0, irq->label, 0); + if (!hw) { + CAM_ERR(CAM_VMRM, "Do not find hw instance irq label %d", irq->label); + return -EINVAL; + } + } else { + hw = cam_hw_instance_lookup(0, 0, 0, + resource[resource_gpio_index].resource.gpio_num); + if (!hw) { + CAM_ERR(CAM_VMRM, "Do not find hw instance gpio num %d", + resource[resource_gpio_index].resource.gpio_num); + return -EINVAL; + } + } + + /* request from svm or release from pvm*/ + if (is_req) { + rc = cam_vmrm_lend_resources(hw->hw_id, cam_vmid); + if (!rc) + CAM_DBG(CAM_VMRM, + "lend resources succeed hw id 0x%x vmid %d", + hw->hw_id, cam_vmid); + else + CAM_ERR(CAM_VMRM, + "lend resources failed %d hw id 0x%x vmid %d", + rc, hw->hw_id, cam_vmid); + } else { + rc = cam_vmrm_release_resources(hw->hw_id); + if (!rc) + CAM_DBG(CAM_VMRM, + "release resources succeed hw id 0x%x vmid %d", + hw->hw_id, cam_vmid); + else + CAM_ERR(CAM_VMRM, + "release resources failed %d hw id 0x%x vmid %d", + rc, hw->hw_id, cam_vmid); + } + + return rc; +} + +int cam_vmrm_resource_req(struct cam_hw_instance *hw_pos, bool is_req) +{ + int rc = 0; + int i = 0, j; + int resource_cnt; + struct gh_res_request *resource; + gh_vmid_t vmid; + enum gh_vm_names vmname; + + resource_cnt = hw_pos->resources.resource_cnt; + resource = CAM_ZALLOC_ARRAY(resource_cnt, sizeof(*resource), GFP_KERNEL); + if (!resource) { + CAM_ERR(CAM_VMRM, "no memory for hw id 0x%x", hw_pos->hw_id); + return -ENOMEM; + } + + for (j = 0; j < hw_pos->resources.gpio_count; j++, i++) { + resource[i].resource_type = GH_RESOURCE_GPIO; + resource[i].resource.gpio_num = hw_pos->resources.gpio_num[j]; + CAM_DBG(CAM_VMRM, "hw id 0x%x index %d gpio num %d", hw_pos->hw_id, i, + resource[i].resource.gpio_num); + } + + for (j = 0; j < hw_pos->resources.num_mem_block ; j++, i++) { + resource[i].resource_type = GH_RESOURCE_IOMEM; + resource[i].resource.sgl_entry.ipa_base = hw_pos->resources.mem_block_addr[j]; + resource[i].resource.sgl_entry.size = hw_pos->resources.mem_block_size[j]; + CAM_DBG(CAM_VMRM, "hw id 0x%x index %d mem addr 0x%x size 0x%x", hw_pos->hw_id, i, + resource[i].resource.sgl_entry.ipa_base, + resource[i].resource.sgl_entry.size); + } + + for (j = 0; j < hw_pos->resources.irq_count; j++, i++) { + resource[i].resource_type = GH_RESOURCE_IRQ; + resource[i].resource.irq_num = hw_pos->resources.irq_num[j]; + CAM_DBG(CAM_VMRM, "hw id 0x%x index %d irq num %d", hw_pos->hw_id, i, + resource[i].resource.irq_num); + } + + vmname = cam_vmid_to_gh_name(CAM_PVM); + if (vmname == GH_VM_MAX) { + CAM_ERR(CAM_VMRM, "get gh vm name failed"); + rc = -EINVAL; + goto free_res; + } + + rc = cam_vmrm_ghd_rm_get_vmid(vmname, &vmid); + if (rc) { + CAM_ERR(CAM_VMRM, "get vmid failed %d", rc); + goto free_res; + } + + if (is_req) { + rc = cam_vmrm_gh_resource_request(vmid, CAM_RESOURCE_REQ_CLIENT_NAME, + resource, resource_cnt); + if (rc) + CAM_ERR(CAM_VMRM, "svm call resource request failed %d", rc); + } else { + rc = cam_vmrm_gh_resource_release(vmid, CAM_RESOURCE_REQ_CLIENT_NAME, + resource, resource_cnt); + if (rc) + CAM_ERR(CAM_VMRM, "pvm call resource request failed %d", rc); + } + +free_res: + CAM_MEM_FREE(resource); + + return rc; +} + +int cam_register_gh_res_callback(void) +{ + int rc = 0; + struct gh_resource_client *client; + + client = CAM_MEM_ZALLOC(sizeof(*client), GFP_KERNEL); + if (!client) { + CAM_ERR(CAM_VMRM, " gh resource client allocate memory failed"); + return -ENOMEM; + } + + client->cb = cam_vmrm_resource_cb; + scnprintf(client->subsys_name, sizeof(client->subsys_name), "%s", + CAM_RESOURCE_REQ_CLIENT_NAME); + + if (CAM_IS_PRIMARY_VM()) { + rc = cam_vmrm_gh_resource_register_req_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "PVM resource register req cb failed rc: %d", rc); + CAM_MEM_FREE(client); + goto end; + } + CAM_DBG(CAM_VMRM, "PVM resource register req cb succeed"); + } else { + rc = cam_vmrm_gh_resource_register_release_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "SVM resource register release cb failed rc: %d", rc); + CAM_MEM_FREE(client); + goto end; + } + CAM_DBG(CAM_VMRM, "SVM resource register release cb succeed"); + } + + g_vmrm_intf_dev->gh_res_client = client; + +end: + return rc; +} + +int cam_unregister_gh_res_callback(void) +{ + int rc = 0; + struct gh_resource_client *client; + + client = g_vmrm_intf_dev->gh_res_client; + if (!client) { + CAM_ERR(CAM_VMRM, "client is invalid"); + return -EINVAL; + } + + if (CAM_IS_PRIMARY_VM()) { + rc = cam_vmrm_gh_resource_unregister_req_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "PVM resource unregister req failed rc: %d", rc); + goto end; + } + CAM_DBG(CAM_VMRM, "PVM resource unregister req succeed"); + } else { + rc = cam_vmrm_gh_resource_unregister_release_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "SVM resource unregister release failed rc: %d", rc); + goto end; + } + CAM_DBG(CAM_VMRM, "SVM resource unregister release succeed"); + } + +end: + CAM_MEM_FREE(client); + return rc; +} +#else +int cam_vmrm_resource_cb(void) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_resource_req(struct cam_hw_instance *hw_pos, bool is_req) +{ + return -EOPNOTSUPP; +} + +int cam_register_gh_res_callback(void) +{ + return -EOPNOTSUPP; +} + +int cam_unregister_gh_res_callback(void) +{ + return -EOPNOTSUPP; +} +#endif + +void cam_unregister_gh_mem_callback(void) +{ + struct cam_hw_instance *hw, *hw_temp; + + if (!list_empty(&g_vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw, hw_temp, + &g_vmrm_intf_dev->hw_instance, list) { + if (hw->mem_notify_handle) { + cam_vmrm_gh_mem_unregister_notifier(hw->mem_notify_handle); + CAM_DBG(CAM_VMRM, "RM unregister mem tag %d notifier succeed", + hw->resources.mem_tag); + } + } + } +} + +int cam_register_gh_mem_callback(void) +{ + int rc = 0; + struct cam_hw_instance *hw, *hw_temp; + uint32_t cam_mem_tag; + enum gh_mem_notifier_tag gh_mem_tag; + gh_mem_notifier_handler handler; + + if (CAM_IS_PRIMARY_VM()) + handler = cam_mem_notification_pvm_handler; + else + handler = cam_mem_notification_svm_handler; + + if (!list_empty(&g_vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw, hw_temp, + &g_vmrm_intf_dev->hw_instance, list) { + cam_mem_tag = hw->resources.mem_tag; + gh_mem_tag = cam_mem_tag_convert_gh_tag(cam_mem_tag); + if ((gh_mem_tag >= GH_MEM_NOTIFIER_TAG_CAM_BASE) && + (gh_mem_tag < GH_MEM_NOTIFIER_TAG_MAX)) { + rc = cam_vmrm_gh_mem_register_notifier(gh_mem_tag, handler, NULL, + &hw->mem_notify_handle); + if (rc) { + CAM_ERR(CAM_VMRM, + "VM %d register mem %d notifier failed rc: %d", + cam_vmrm_intf_get_vmid(), cam_mem_tag, rc); + goto end; + } + CAM_DBG(CAM_VMRM, + "VM %d register hw_id 0x%x mem tag %d notifier succeed", + cam_vmrm_intf_get_vmid(), hw->hw_id, cam_mem_tag); + } else { + CAM_ERR(CAM_VMRM, "invalid cam mem tag %d", cam_mem_tag); + rc = -EINVAL; + goto end; + } + } + } + +end: + return rc; +} + +int cam_register_gh_irq_callback(void) +{ + int rc = 0; + enum gh_vm_names vmname; + enum gh_irq_label gh_irq_label_local; + struct cam_io_irq_entry *irq, *irq_temp; + + if (CAM_IS_SECONDARY_VM()) { + vmname = cam_vmid_to_gh_name(CAM_PVM); + if (vmname == GH_VM_MAX) { + CAM_ERR(CAM_VMRM, "get vmid failed"); + rc = -EINVAL; + goto end; + } + + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, + &g_vmrm_intf_dev->io_res.irq, list) { + gh_irq_label_local = cam_irq_label_convert_gh_label(irq->label); + if ((gh_irq_label_local >= GH_IRQ_LABEL_CAM_BASE) && + (gh_irq_label_local < GH_IRQ_LABEL_MAX)) { + rc = cam_vmrm_gh_irq_wait_for_lend_v2(gh_irq_label_local, + vmname, cam_irq_notification_svm_handler, NULL); + if (rc) { + CAM_ERR(CAM_VMRM, + "SVM RM register irq %d wait lend failed rc: %d", + irq->label, rc); + goto end; + } + CAM_DBG(CAM_VMRM, + "SVM RM register irq number %d irq %d wait lend succeed", + irq->irq_num, irq->label); + } else { + CAM_ERR(CAM_VMRM, "invalid cam irq label %d", irq->label); + rc = -EINVAL; + goto end; + } + } + } + } + +end: + return rc; +} + +void cam_vmrm_print_soc_resources(struct cam_hw_instance *hw) +{ + int i; + + CAM_DBG(CAM_VMRM, + "hw id %d, hw name %s vm owner %d is using %d response result %d irq count %d mem count %d gpio count %d resource count %d valid mask %d ready mask %d", + hw->hw_id, hw->hw_name, hw->vm_owner, hw->is_using, hw->response_result, + hw->resources.irq_count, hw->resources.num_mem_block, hw->resources.gpio_count, + hw->resources.resource_cnt, hw->resources.valid_mask, hw->resources.ready_mask); + + for (i = 0; i < hw->resources.irq_count; i++) + CAM_DBG(CAM_VMRM, "index %d, irq name %s irq num %d irq label %d", i, + hw->resources.irq_name[i], hw->resources.irq_num[i], + hw->resources.irq_label[i]); + for (i = 0; i < hw->resources.num_mem_block; i++) + CAM_DBG(CAM_VMRM, + "index %d, mem name %s mem base 0x%x mem size 0x%x mem label mem tag", i, + hw->resources.mem_block_name[i], hw->resources.mem_block_addr[i], + hw->resources.mem_block_size[i], hw->resources.mem_label, + hw->resources.mem_tag); + for (i = 0; i < hw->resources.gpio_count; i++) + CAM_DBG(CAM_VMRM, + "index %d, gpio num %d gpio base 0x%x gpio size 0x%x mem label mem tag", i, + hw->resources.gpio_num[i], hw->resources.gpio_mem_addr[i], + hw->resources.gpio_mem_size[i], hw->resources.mem_label, + hw->resources.mem_tag); +} + +void cam_vmrm_print_hw_instances(struct cam_vmrm_intf_dev *vmrm_intf_dev) +{ + struct cam_hw_instance *hw_pos, *hw_temp; + + if (!list_empty(&vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw_pos, + hw_temp, &vmrm_intf_dev->hw_instance, list) { + cam_vmrm_print_soc_resources(hw_pos); + } + } +} + +void cam_vmrm_print_driver_nodes(struct cam_vmrm_intf_dev *vmrm_intf_dev) +{ + struct cam_driver_node *driver_pos, *driver_temp; + + if (!list_empty(&vmrm_intf_dev->driver_node)) { + list_for_each_entry_safe(driver_pos, + driver_temp, &vmrm_intf_dev->driver_node, list) { + CAM_DBG(CAM_VMRM, "driver id %d, driver name %s response result %d", + driver_pos->driver_id, driver_pos->driver_name, + driver_pos->response_result); + } + } +} + +void cam_vmrm_print_io_res(struct cam_vmrm_intf_dev *vmrm_intf_dev) +{ + int i; + struct cam_io_irq_entry *irq, *irq_temp; + struct cam_io_mem_entry *mem, *mem_temp; + + if (!list_empty(&vmrm_intf_dev->io_res.irq)) { + list_for_each_entry_safe(irq, irq_temp, + &vmrm_intf_dev->io_res.irq, list) { + i = 0; + CAM_DBG(CAM_VMRM, "index %d irq label %d, num %d vm_owner %d", + i, irq->label, irq->irq_num, irq->vm_owner); + i++; + } + } + + if (!list_empty(&vmrm_intf_dev->io_res.mem)) { + list_for_each_entry_safe(mem, mem_temp, + &vmrm_intf_dev->io_res.mem, list) { + i = 0; + CAM_DBG(CAM_VMRM, + "index %d mem base 0x%x, size 0x%x vm_owner %d lend_in_progress %d", + i, mem->base, mem->size, mem->vm_owner, mem->lend_in_progress); + i++; + } + } +} + +void cam_vmrm_print_state(void) +{ + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + CAM_DBG(CAM_VMRM, + "VM resource manager vmid %d is_initialized %d gh_rr_enable %d proxy_voting_enable 0x%x", + vmrm_intf_dev->cam_vmid, vmrm_intf_dev->is_initialized, + vmrm_intf_dev->gh_rr_enable, vmrm_intf_dev->proxy_voting_enable); + + cam_vmrm_print_hw_instances(vmrm_intf_dev); + cam_vmrm_print_driver_nodes(vmrm_intf_dev); + cam_vmrm_print_io_res(vmrm_intf_dev); +} + +static int cam_vmrm_set_lend_all_resources_test(void *data, u64 val) +{ + int rc = 0; + struct cam_hw_instance *hw_pos, *hw_temp; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + CAM_INFO(CAM_VMRM, "lend all resources test starting"); + + if (cam_vmrm_intf_get_vmid() != CAM_PVM) { + CAM_WARN(CAM_VMRM, "lend all resources test only for pvm"); + return 0; + } + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!list_empty(&vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw_pos, + hw_temp, &vmrm_intf_dev->hw_instance, list) { + rc = cam_vmrm_lend_resources(hw_pos->hw_id, CAM_SVM1); + if (rc) { + CAM_ERR(CAM_VMRM, + "lend resources test for hw id failed: 0x%x, rc: %d", + hw_pos->hw_id, rc); + return 0; + } + } + } + + CAM_INFO(CAM_VMRM, "lend all resources test succeed"); + + return 0; +} + +static int cam_vmrm_get_lend_all_resources_test(void *data, u64 *val) +{ + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(cam_vmrm_lend_all_resources_test, + cam_vmrm_get_lend_all_resources_test, + cam_vmrm_set_lend_all_resources_test, "%16llu"); + +static ssize_t cam_vmrm_get_acquire_resources_test(struct file *file, + char __user *ubuf, size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_acquire_resources_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "acquire resources test for hw id 0x%x", hw_id); + + rc = cam_vmrm_soc_acquire_resources(hw_id); + if (rc) { + CAM_ERR(CAM_VMRM, "acquire resources test for hw id failed: 0x%x, rc: %d", + hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x acquire resources test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_acquire_resources_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_acquire_resources_test, + .write = cam_vmrm_set_acquire_resources_test, +}; + +static ssize_t cam_vmrm_get_release_resources_test(struct file *file, + char __user *ubuf, size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_release_resources_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "release resources test for hw id 0x%x", hw_id); + + rc = cam_vmrm_soc_release_resources(hw_id); + if (rc) { + CAM_ERR(CAM_VMRM, "release resources test for hw id failed: 0x%x, rc: %d", + hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x release resources test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_release_resources_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_release_resources_test, + .write = cam_vmrm_set_release_resources_test, +}; + +static ssize_t cam_vmrm_get_gh_lend_resources_test( + struct file *file, char __user *ubuf, + size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_gh_lend_resources_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "lend resources test for hw id 0x%x", hw_id); + + if (cam_vmrm_intf_get_vmid() != CAM_PVM) { + CAM_WARN(CAM_VMRM, "gh lend resources test only for pvm"); + return size; + } + + rc = cam_vmrm_lend_resources(hw_id, CAM_SVM1); + if (rc) { + CAM_ERR(CAM_VMRM, "lend resources test for hw id failed: 0x%x, rc: %d", + hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x lend resources test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_gh_lend_resources_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_gh_lend_resources_test, + .write = cam_vmrm_set_gh_lend_resources_test, +}; + +static ssize_t cam_vmrm_get_gh_release_resources_test(struct file *file, + char __user *ubuf, size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_gh_release_resources_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "release resources test for hw id 0x%x", hw_id); + + if (cam_vmrm_intf_get_vmid() == CAM_PVM) { + CAM_WARN(CAM_VMRM, "gh release resources test only for svm"); + return size; + } + + rc = cam_vmrm_release_resources(hw_id); + if (rc) { + CAM_ERR(CAM_VMRM, "release resources test for hw id failed: 0x%x, rc: %d", + hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x release resources test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_gh_release_resources_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_gh_release_resources_test, + .write = cam_vmrm_set_gh_release_resources_test, +}; + +static ssize_t cam_vmrm_get_soc_resources_enable_test( + struct file *file, char __user *ubuf, + size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_soc_resources_enable_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "soc resources enable request test for hw id 0x%x", hw_id); + + if (cam_vmrm_intf_get_vmid() == CAM_PVM) { + CAM_WARN(CAM_VMRM, "soc resources enable test only for svm"); + return size; + } + + rc = cam_vmrm_soc_enable_disable_resources(hw_id, true); + if (rc) { + CAM_ERR(CAM_VMRM, "soc enable for hw id failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x soc resources enable request test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_soc_resources_enable_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_soc_resources_enable_test, + .write = cam_vmrm_set_soc_resources_enable_test, +}; + +static ssize_t cam_vmrm_get_soc_resources_disable_test(struct file *file, + char __user *ubuf, size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_soc_resources_disable_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "soc resources disable request test for hw id 0x%x", hw_id); + + if (cam_vmrm_intf_get_vmid() == CAM_PVM) { + CAM_WARN(CAM_VMRM, "soc resources disable test only for svm"); + return size; + } + + rc = cam_vmrm_soc_enable_disable_resources(hw_id, false); + if (rc) { + CAM_ERR(CAM_VMRM, "soc disable for hw id failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x soc resources disable request test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_soc_resources_disable_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_soc_resources_disable_test, + .write = cam_vmrm_set_soc_resources_disable_test, +}; + +static ssize_t cam_vmrm_get_driver_node_msg_test( + struct file *file, char __user *ubuf, + size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_driver_node_msg_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t driver_id; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, sizeof(input_buf))) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + if (kstrtou32(input_buf, CAM_HEXADECIMAL, &driver_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + + CAM_INFO(CAM_VMRM, "test for driver id 0x%x", driver_id); + + if (cam_vmrm_intf_get_vmid() == CAM_PVM) { + CAM_WARN(CAM_VMRM, "driver node msg test only for svm"); + return size; + } + + rc = cam_vmrm_send_driver_msg_wrapper(CAM_PVM, driver_id, CAM_MSG_TYPE_MAX, false, true, + NULL, 1, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg failed for driver node: 0x%x, rc: %d", driver_id, rc); + return rc; + } + + return size; +} + +static const struct file_operations cam_vmrm_driver_node_msg_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_driver_node_msg_test, + .write = cam_vmrm_set_driver_node_msg_test, +}; + +static ssize_t cam_vmrm_get_clk_rate_set_test(struct file *file, + char __user *ubuf, size_t size, loff_t *loff_t) +{ + return 0; +} + +static ssize_t cam_vmrm_set_clk_rate_set_test(struct file *file, + const char __user *ubuf, size_t size, loff_t *loff_t) +{ + int rc = 0; + char input_buf[CAM_BUF_SIZE_MAX] = {'\0'}; + uint32_t hw_id; + uint32_t clk_rate = 0; + char *msg = NULL; + char *token = NULL; + + if (size >= CAM_BUF_SIZE_MAX) { + CAM_ERR(CAM_VMRM, "%d size more than max size %d", size, CAM_BUF_SIZE_MAX); + return -EINVAL; + } + + if (copy_from_user(input_buf, ubuf, size)) { + CAM_ERR(CAM_VMRM, "copy_from_user failed"); + return -EFAULT; + } + + msg = input_buf; + token = strsep(&msg, ":"); + if (token != NULL) { + if (kstrtou32(token, CAM_HEXADECIMAL, &hw_id)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + } + + if (msg != NULL) { + if (kstrtou32(msg, 0, &clk_rate)) { + CAM_ERR(CAM_VMRM, "kstrtou32 failed"); + return -EINVAL; + } + } + + clk_rate = clk_rate * CAM_MHZ; + + CAM_INFO(CAM_VMRM, "clk rate set test hw id %x clk_rate %d", hw_id, clk_rate); + + if (cam_vmrm_intf_get_vmid() == CAM_PVM) { + CAM_WARN(CAM_VMRM, "clk rate set test only for svm"); + return size; + } + + rc = cam_vmrm_set_src_clk_rate(hw_id, -1, clk_rate, clk_rate); + if (rc) { + CAM_ERR(CAM_VMRM, "set src clk rate for hw id failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + CAM_INFO(CAM_VMRM, "hw id 0x%x clk rate set test succeed", hw_id); + + return size; +} + +static const struct file_operations cam_vmrm_clk_rate_set_test = { + .owner = THIS_MODULE, + .open = simple_open, + .read = cam_vmrm_get_clk_rate_set_test, + .write = cam_vmrm_set_clk_rate_set_test, +}; + +static int cam_vmrm_debugfs_init(struct cam_vmrm_intf_dev *vmrm_dev) +{ + struct dentry *dbgfileptr = NULL; + int rc = 0; + + if (!cam_debugfs_available()) + return 0; + + rc = cam_debugfs_create_subdir("vmrm", &dbgfileptr); + if (rc) { + CAM_ERR(CAM_VMRM, "DebugFS could not create directory!"); + return rc; + } + + vmrm_dev->dentry = dbgfileptr; + + debugfs_create_file("proxy_voting_enable", 0644, vmrm_dev->dentry, NULL, + &cam_vmrm_proxy_voting_enable); + + debugfs_create_file("cam_vmrm_lend_all_resources_test", 0644, vmrm_dev->dentry, NULL, + &cam_vmrm_lend_all_resources_test); + + debugfs_create_file("cam_vmrm_acquire_resources_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_acquire_resources_test); + + debugfs_create_file("cam_vmrm_release_resources_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_release_resources_test); + + debugfs_create_file("cam_vmrm_gh_lend_resources_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_gh_lend_resources_test); + + debugfs_create_file("cam_vmrm_gh_release_resources_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_gh_release_resources_test); + + debugfs_create_file("cam_vmrm_soc_resources_enable_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_soc_resources_enable_test); + + debugfs_create_file("cam_vmrm_soc_resources_disable_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_soc_resources_disable_test); + + debugfs_create_file("cam_vmrm_driver_node_msg_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_driver_node_msg_test); + + debugfs_create_file("cam_vmrm_clk_rate_set_test", 0644, + vmrm_dev->dentry, NULL, &cam_vmrm_clk_rate_set_test); + + return 0; +} + +static int cam_vmrm_intf_bind(struct device *dev, + struct device *parent_dev, void *data) +{ + int rc = 0; + struct platform_device *pdev; + bool is_server_vm = false; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + pdev = to_platform_device(dev); + + g_vmrm_intf_dev = CAM_MEM_ZALLOC(sizeof(struct cam_vmrm_intf_dev), GFP_KERNEL); + if (!g_vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "VM resource manager device allocate failed"); + rc = -ENOMEM; + return rc; + } + + rc = cam_vmrm_dt_parse(pdev, g_vmrm_intf_dev); + if (rc) { + CAM_ERR(CAM_VMRM, "Device tree parsing failed rc: %d", rc); + return rc; + } + + mutex_init(&g_vmrm_intf_dev->lock); + INIT_LIST_HEAD(&g_vmrm_intf_dev->hw_instance); + INIT_LIST_HEAD(&g_vmrm_intf_dev->driver_node); + INIT_LIST_HEAD(&g_vmrm_intf_dev->io_res.irq); + INIT_LIST_HEAD(&g_vmrm_intf_dev->io_res.mem); + + g_vmrm_intf_dev->ops_table = CAM_MEM_ZALLOC(sizeof(struct cam_inter_vm_comms_ops), + GFP_KERNEL); + if (!g_vmrm_intf_dev->ops_table) { + CAM_ERR(CAM_VMRM, "allocate qrtr function table mem failed"); + rc = -ENOMEM; + goto free_vmrm_intf_dev; + } + rc = cam_get_inter_vm_comms_function_table(g_vmrm_intf_dev->ops_table); + if (rc) { + CAM_ERR(CAM_VMRM, "get qrtr function table failed %d", rc); + goto free_ops_table; + } + + if (CAM_IS_PRIMARY_VM()) + is_server_vm = true; + rc = g_vmrm_intf_dev->ops_table->init(&g_vmrm_intf_dev->vm_handle, + CAM_VMRM_RECV_MSG_BUF_SIZE, cam_vmrm_msg_handle, is_server_vm); + if (rc) { + CAM_ERR(CAM_VMRM, "qrtr initialize failed %d", rc); + goto free_ops_table; + } + + cam_vmrm_debugfs_init(g_vmrm_intf_dev); + g_vmrm_intf_dev->is_initialized = true; + + CAM_DBG(CAM_VMRM, "VMRM %d name %s driver bind succeed", g_vmrm_intf_dev->cam_vmid, + CAM_GET_VM_NAME()); + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + + return rc; +free_ops_table: + CAM_MEM_FREE(g_vmrm_intf_dev->ops_table); +free_vmrm_intf_dev: + CAM_MEM_FREE(g_vmrm_intf_dev); + return rc; +} + +static void cam_vmrm_intf_unbind(struct device *dev, + struct device *parent_dev, void *data) +{ + int rc = 0; + + CAM_DBG(CAM_VMRM, "Unbinding cam_vmrm_intf driver vmid %d", g_vmrm_intf_dev->cam_vmid); + + if (!list_empty(&g_vmrm_intf_dev->io_res.mem)) + cam_clean_io_mem(&g_vmrm_intf_dev->io_res.mem); + if (!list_empty(&g_vmrm_intf_dev->io_res.irq)) + cam_clean_io_irq(&g_vmrm_intf_dev->io_res.irq); + if (!list_empty(&g_vmrm_intf_dev->driver_node)) + cam_clean_driver_node(&g_vmrm_intf_dev->driver_node); + if (!list_empty(&g_vmrm_intf_dev->hw_instance)) + cam_clean_hw_instance(&g_vmrm_intf_dev->hw_instance); + + rc = g_vmrm_intf_dev->ops_table->deinit(g_vmrm_intf_dev->vm_handle); + if (rc) + CAM_ERR(CAM_VMRM, "deinit internal vm communication failed"); + + mutex_destroy(&g_vmrm_intf_dev->lock); + CAM_MEM_FREE(g_vmrm_intf_dev->ops_table); + CAM_MEM_FREE(g_vmrm_intf_dev); +} + +static const struct component_ops cam_vmrm_intf_ops = { + .bind = cam_vmrm_intf_bind, + .unbind = cam_vmrm_intf_unbind, +}; + +static int cam_vmrm_intf_probe(struct platform_device *pdev) +{ + int rc; + + CAM_DBG(CAM_VMRM, "Probe for vmrm_intf driver"); + + rc = component_add(&pdev->dev, &cam_vmrm_intf_ops); + if (rc) { + CAM_ERR(CAM_VMRM, "Adding vmrm_intf Driver to the component list failed. rc: %d", + rc); + return rc; + } + + return rc; +} + +static int cam_vmrm_intf_remove(struct platform_device *pdev) +{ + CAM_DBG(CAM_VMRM, "Removing the vmrm_intf driver"); + component_del(&pdev->dev, &cam_vmrm_intf_ops); + return 0; +} + +struct platform_driver cam_vmrm_intf_driver = { + .probe = cam_vmrm_intf_probe, + .remove = cam_vmrm_intf_remove, + .driver = { + .name = "msm_cam_vmrm_intf", + .owner = THIS_MODULE, + .of_match_table = msm_cam_vmrm_intf_dt_match, + .suppress_bind_attrs = true, + }, +}; + +int cam_vmrm_module_init(void) +{ + return platform_driver_register(&cam_vmrm_intf_driver); +} + +void cam_vmrm_module_exit(void) +{ + platform_driver_unregister(&cam_vmrm_intf_driver); +} + +MODULE_DESCRIPTION("MSM Camera VM Resource Manager Driver"); +MODULE_LICENSE("GPL"); diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.h new file mode 100644 index 0000000000..b16717b4e0 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm.h @@ -0,0 +1,434 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_soc_util.h" +#include "cam_vmrm_gh_wrapper.h" +#include "cam_vmrm_msg_handler.h" +#include "cam_inter_vm_comms.h" +#include "cam_qrtr_comms.h" +#include + +#ifndef _CAM_VMRM_H_ +#define _CAM_VMRM_H_ + +#define CAM_GET_VM_NAME() ((cam_vmrm_intf_get_vmid() == CAM_PVM) ? "PVM" : "TVM") + +#define CAM_BUF_SIZE_MAX 32 +#define CAM_HEXADECIMAL 16 +#define CAM_MHZ 1000000 + +/* message destination id for hw instance */ +#define CAM_HW_ID_BASE 0x3000 +#define CAM_HW_ID_CSIPHY0 (CAM_HW_ID_BASE + 0) +#define CAM_HW_ID_CCI0 (CAM_HW_ID_BASE + 0x100) +#define CAM_HW_ID_CPAS (CAM_HW_ID_BASE + 0x200) +#define CAM_HW_ID_CDM0 (CAM_HW_ID_BASE + 0x300) +#define CAM_HW_ID_SFE0 (CAM_HW_ID_BASE + 0x400) +#define CAM_HW_ID_CSID0 (CAM_HW_ID_BASE + 0x500) +#define CAM_HW_ID_IFE0 (CAM_HW_ID_BASE + 0x600) +#define CAM_HW_ID_ICP (CAM_HW_ID_BASE + 0x700) +#define CAM_HW_ID_IPE (CAM_HW_ID_BASE + 0x800) +#define CAM_HW_ID_BPS (CAM_HW_ID_BASE + 0x900) +#define CAM_HW_ID_OFE (CAM_HW_ID_BASE + 0xa00) +#define CAM_HW_ID_SENSOR0 (CAM_HW_ID_BASE + 0xb00) +#define CAM_HW_ID_EEPROM0 (CAM_HW_ID_BASE + 0xc00) + +/* message destination id for driver node*/ +#define CAM_DRIVER_ID_BASE 0xa000 +#define CAM_DRIVER_ID_ISP (CAM_DRIVER_ID_BASE + 0) +#define CAM_DRIVER_ID_ICP (CAM_DRIVER_ID_BASE + 0x100) +#define CAM_DRIVER_ID_CPAS (CAM_DRIVER_ID_BASE + 0x200) + +/* + * camera irq label, there is a similar irq label defined in + * gh side need to map the camera irq label with gh irq label + * gh use irq label to manager the irq lend and release. Each + * irq need to define label in dtsi. + */ +#define CAM_IRQ_LABEL_BASE 1 +#define CAM_IRQ_LABEL_MAX 50 + +/* + * camera io mem notifier tag, there is a similar mem tag defined + * in gh side need to map the camera mem tag with gh mem tag + * gh use mem tag to notify lend/release/accept mem. Currently + * define one mem tag for each hw instance in dtsi. + */ +#define CAM_MEM_TAG_BASE 1 +#define CAM_MEM_TAG_MAX 50 + +/* + * camera io mem label, gh use camera mem label to lend mem. + * Currently define one mem label for each hw instance in dtsi. + */ +#define CAM_MEM_LABEL_BASE 1 +#define CAM_MEM_LABEL_MAX 50 + +/* camera memory type */ +#define CAM_MEM_TYPE_NORMAL 0 +#define CAM_MEM_TYPE_IO 1 + +/* message receive buffer size */ +#define CAM_VMRM_RECV_MSG_BUF_SIZE 256 + +/* default inter vm messaging timeout*/ +#define CAM_VMRM_INTER_VM_MSG_TIMEOUT 500 + +/* resource request client name*/ +#define CAM_RESOURCE_REQ_CLIENT_NAME "cam_vmrm" + +/* message callback */ +typedef int (*msg_cb_func) (void *cb_data, void *msg, uint32_t size); + +/** + * struct cam_soc_resources - camera soc resources object for the hw + * + * @irq_count: Interrupt count + * @irq_name: Interrupt name + * @irq_num: Interrupt number + * @num_mem_block: Io memory block count + * @mem_block_name: Io memory block name + * @mem_block_addr: Io memory block base address + * @mem_block_size: Io memory block size + * @gpio_count: Gpio count + * @gpio_num: Gpio number + * @gpio_mem_addr: Gpio mem address array + * @gpio_mem_size: Gpio mem size array + * @resource_cnt: Resource counter + * @mem_label: Mem label + * @mem_tag: Mem tag + * @irq_label: Irq label array + * @valid_mask: Valid resource mask, bit map 0/1/2 is mem/irq1/irq2 + * @ready_mask: Resource ready mask, when the reource lent or reclaim + */ +struct cam_soc_resources { + uint32_t irq_count; + char irq_name[CAM_SOC_MAX_IRQ_LINES_PER_DEV][CAM_SOC_MAX_LENGTH_NAME]; + uint32_t irq_num[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + uint32_t num_mem_block; + char mem_block_name[CAM_SOC_MAX_BLOCK][CAM_SOC_MAX_LENGTH_NAME]; + uint32_t mem_block_addr[CAM_SOC_MAX_BLOCK]; + uint32_t mem_block_size[CAM_SOC_MAX_BLOCK]; + uint32_t gpio_count; + uint32_t gpio_num[CAM_SOC_MAX_GPIO]; + uint32_t gpio_mem_addr[CAM_SOC_MAX_GPIO]; + uint32_t gpio_mem_size[CAM_SOC_MAX_GPIO]; + uint32_t resource_cnt; + uint32_t mem_label; + uint32_t mem_tag; + uint32_t irq_label[CAM_SOC_MAX_IRQ_LINES_PER_DEV]; + uint32_t valid_mask; + uint32_t ready_mask; +}; + +/** + * struct cam_hw_instance - camera hw instance + * + * @hw_id: Hw id + * @hw_name: Hw name + * @resources: Camera soc resources + * @handle: Memory handle + * @mem_notify_handle: Memory notify handle + * @hw_msg_callback: Hw message callback + * @hw_msg_callback_data: Data pass to callback + * @registered_rr_callback: Flag for registered request or release resource callback or not + * @vm_owner VM owner + * @is_using: Hw is using or not + * @wait_response: Completion info + * @response_result: Response result + * @lend_in_progress Whether lend is in progress + * @spin_lock: Spin lock + * @msg_comm_lock: Message communication lock + * @ref_count: Hw acquired ref count + * @list: Link list entry + */ +struct cam_hw_instance { + uint32_t hw_id; + char hw_name[CAM_SOC_MAX_LENGTH_NAME]; + struct cam_soc_resources resources; + uint32_t handle; + void *mem_notify_handle; + msg_cb_func hw_msg_callback; + void *hw_msg_callback_data; + bool registered_rr_callback; + uint32_t vm_owner; + bool is_using; + struct completion wait_response; + int response_result; + atomic_t lend_in_progress; + spinlock_t spin_lock; + struct mutex msg_comm_lock; + int32_t ref_count; + struct list_head list; +}; + +/** + * struct cam_driver_node - camera driver node + * + * @driver_id: Driver id + * @driver_name: Driver name + * @driver_msg_callback: Driver message callback + * @driver_msg_callback_data: Data pass to callback + * @wait_response: Completion info + * @response_result: Response result + * @msg_comm_lock: Message communication lock + * @list: Link list entry + */ +struct cam_driver_node { + uint32_t driver_id; + char driver_name[CAM_SOC_MAX_LENGTH_NAME]; + msg_cb_func driver_msg_callback; + void *driver_msg_callback_data; + struct completion wait_response; + int response_result; + struct mutex msg_comm_lock; + struct list_head list; +}; + +/** + * struct cam_io_irq_entry - camera io interrupt entry + * + * @label: Interrupt label for gh using + * @irq_num: Interrupt number + * @vm_owner VM owner + * @list: Link list entry + */ +struct cam_io_irq_entry { + uint32_t label; + uint32_t irq_num; + uint32_t vm_owner; + struct list_head list; +}; + +/** + * struct cam_io_mem_entry - camera io memory entry + * + * @base: Memory base address + * @size: Memory size + * @vm_owner VM owner + * @lend_in_progress Lend in progress + * @list: Link list entry + */ +struct cam_io_mem_entry { + phys_addr_t base; + phys_addr_t size; + uint32_t vm_owner; + bool lend_in_progress; + struct list_head list; +}; + +/** + * struct cam_io_res - camera io resource entry + * + * @irq: Interrupt resource list + * @mem: Io memory resource list + */ +struct cam_io_res { + struct list_head irq; + struct list_head mem; +}; + +/** + * struct cam_vmrm_intf_dev - camera vm resource manager + * + * @hw_instance: Hw instance registered list + * @driver_node: Driver node registered list + * @io_res: Io resource list + * @is_initialized: Flag for vm resource manager initialized or not + * @gh_rr_enable: Gh request resource enable or not + * @gh_res_client: Gh resource client + * @proxy_voting_enable: Proxy voting enable mask, support clk/icc proxy voting. + * @no_register_read_on_bind: Workaround no register read on bind for tvm. + * @lock: Mutex lock + * @cam_vmid: Camera vm id, such as PVM, SVM1 etc + * @dentry: Debugfs entry + * @vm_handle: Vm internal communication handle + * @lend_cnt: Lend count + * @ops_table: Operations table + */ +struct cam_vmrm_intf_dev { + struct list_head hw_instance; + struct list_head driver_node; + struct cam_io_res io_res; + bool is_initialized; + bool gh_rr_enable; + struct gh_resource_client *gh_res_client; + uint32_t proxy_voting_enable; + bool no_register_read_on_bind; + struct mutex lock; + uint32_t cam_vmid; + struct dentry *dentry; + void *vm_handle; + int lend_cnt; + struct cam_inter_vm_comms_ops *ops_table; +}; + +/** + * cam_vmrm_msg_handle() + * + * @brief: When receive message, handle message in vm resource manager + * + * @msg: Received message address + * @size: Receive message size + * @res: Inter vm response address + */ +void cam_vmrm_msg_handle(void *msg, size_t size, struct cam_intervm_response *res); + +/** + * cam_vmrm_module_init() + * + * @brief: Vmrm module initialize + * + */ +int cam_vmrm_module_init(void); + +/** + * cam_vmrm_module_exit() + * + * @brief: Vmrm module exit + * + */ +void cam_vmrm_module_exit(void); + + +/** + * cam_vmrm_get_intf_dev() + * + * @brief: Get vmrm interface device + * + */ +struct cam_vmrm_intf_dev *cam_vmrm_get_intf_dev(void); + +/** + * cam_register_gh_res_callback() + * + * @brief: Register gh resource callback + * + */ +int cam_register_gh_res_callback(void); + +/** + * cam_register_gh_mem_callback() + * + * @brief: Register gh mem callback + * + */ +int cam_register_gh_mem_callback(void); + +/** + * cam_register_gh_irq_callback() + * + * @brief: Register gh irq callback + * + */ +int cam_register_gh_irq_callback(void); + +/** + * cam_unregister_gh_mem_callback() + * + * @brief: Unregister gh mem callback + * + */ +void cam_unregister_gh_mem_callback(void); + +/** + * cam_unregister_gh_res_callback() + * + * @brief: Unregister gh res callback + * + */ +int cam_unregister_gh_res_callback(void); + +/** + * cam_check_hw_instance_available() + * + * @brief: Check hw instance if available + * + * @hw_id: Hw id + */ +struct cam_hw_instance *cam_check_hw_instance_available(uint32_t hw_id); + +/** + * cam_vmrm_intf_get_vmid() + * + * @brief: Get camera vm id + * + */ +uint32_t cam_vmrm_intf_get_vmid(void); + +/** + * cam_vmrm_resource_req() + * + * @brief: Resource request + * + * @hw_pos: Hw instance + */ +int cam_vmrm_resource_req(struct cam_hw_instance *hw_pos, bool is_req); + +/** + * cam_vmrm_release_resources() + * + * @brief: Release resources + * + * @hw_id: Hw id + */ +int cam_vmrm_release_resources(uint32_t hw_id); + +/** + * cam_hw_instance_lookup() + * + * @brief: Lookup hw instance + * + * @hw_id: Hw id + * @mem_tag: Mem tag + */ +struct cam_hw_instance *cam_hw_instance_lookup(uint32_t hw_id, uint32_t mem_tag, + uint32_t irq_label, uint32_t gpio_num); + +/** + * cam_driver_node_lookup() + * + * @brief: Lookup driver node + * + * @driver_id: Driver node id + */ +struct cam_driver_node *cam_driver_node_lookup(uint32_t driver_id); + +/** + * cam_populate_irq_resource_info() + * + * @brief: Populate irq resource info + * + * @hw_pos: Hw instance + */ +int cam_populate_irq_resource_info(struct cam_hw_instance *hw_pos); + +/** + * cam_populate_mem_resource_info() + * + * @brief: Populate mem resource info + * + * @hw_pos: Hw instance + */ +int cam_populate_mem_resource_info(struct cam_hw_instance *hw_pos); + +/** + * cam_populate_gpio_resource_info() + * + * @brief: Populate gpio resource info + * + * @hw_pos: Hw instance + */ +int cam_populate_gpio_resource_info(struct cam_hw_instance *hw_pos); + +/** + * cam_free_io_resource_info() + * + * @brief: Free io resources + * + */ +void cam_free_io_resource_info(void); + +#endif /* _CAM_VMRM_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.c b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.c new file mode 100644 index 0000000000..7a4fdaaddc --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_vmrm_gh_wrapper.h" + +int cam_vmrm_gh_irq_lend_v2(enum gh_irq_label label, enum gh_vm_names name, + int irq, gh_irq_handle_fn_v2 cb_handle, void *data) +{ + int rc = 0; + + rc = gh_irq_lend_v2(label, name, irq, cb_handle, data); + if (rc) { + CAM_ERR(CAM_VMRM, "IRQ lend failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "IRQ lent success for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_irq_lend_notify(enum gh_irq_label label) +{ + int rc = 0; + + rc = gh_irq_lend_notify(label); + if (rc) { + CAM_ERR(CAM_VMRM, "IRQ lend notify failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "IRQ lent notify success for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_irq_wait_for_lend_v2(enum gh_irq_label label, + enum gh_vm_names name, gh_irq_handle_fn_v2 on_lend, void *data) +{ + int rc = 0; + + rc = gh_irq_wait_for_lend_v2(label, name, on_lend, data); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq wait for lend failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq wait for lend succeed for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_irq_accept(enum gh_irq_label label, int irq, int type) +{ + int rc = 0; + + rc = gh_irq_accept(label, irq, type); + if (rc < 0) { + CAM_ERR(CAM_VMRM, "Irq accept failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq accept succeed for label: %d", label); + + return 0; +} + +int cam_vmrm_gh_irq_accept_notify(enum gh_irq_label label) +{ + int rc = 0; + + rc = gh_irq_accept_notify(label); + if (rc < 0) { + CAM_ERR(CAM_VMRM, "Irq accept notify failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq accept notify succeed for label: %d", label); + + return 0; +} + +int cam_vmrm_gh_irq_release(enum gh_irq_label label) +{ + int rc = 0; + + rc = gh_irq_release(label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq release failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq release succeed for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_irq_release_notify(enum gh_irq_label label) +{ + int rc = 0; + + rc = gh_irq_release_notify(label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq release notify failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq release notify succeed for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_irq_reclaim(enum gh_irq_label label) +{ + int rc = 0; + + rc = gh_irq_reclaim(label); + if (rc) { + CAM_ERR(CAM_VMRM, "Irq reclaim failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Irq reclaim succeed for label: %d", label); + + return 0; +} + +int cam_vmrm_gh_mem_register_notifier(enum gh_mem_notifier_tag tag, + gh_mem_notifier_handler handler, void *data, + void **cookie) +{ + void *cookie_temp; + int rc = 0; + + cookie_temp = gh_mem_notifier_register(tag, handler, data); + if (IS_ERR(cookie_temp)) { + rc = PTR_ERR(cookie_temp); + CAM_ERR(CAM_VMRM, "Mem notifier register failed for tag: %d, rc: %d", tag, rc); + return rc; + } + *cookie = cookie_temp; + + CAM_DBG(CAM_VMRM, "Mem notifier register success for tag: %d", tag); + + return rc; +} + +void cam_vmrm_gh_mem_unregister_notifier(void *cookie) + +{ + gh_mem_notifier_unregister(cookie); + + CAM_DBG(CAM_VMRM, "Mem notifier unregister success"); +} + +int cam_vmrm_gh_mem_lend(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle) +{ + int rc = 0, i; + int n_sgl_entries = 0; + + n_sgl_entries = sgl_desc->n_sgl_entries; + for (i = 0; i < n_sgl_entries; i++) { + CAM_DBG(CAM_VMRM, "base 0x%x size 0x%x index %d", + sgl_desc->sgl_entries[i].ipa_base, sgl_desc->sgl_entries[i].size, i); + } + + rc = ghd_rm_mem_lend(mem_type, flags, label, acl_desc, sgl_desc, mem_attr_desc, handle); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem lend failed for label %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "mem lend succeed for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_mem_notify(gh_memparcel_handle_t handle, u8 flags, + gh_label_t mem_info_tag, struct gh_notify_vmid_desc *vmid_desc) +{ + int rc = 0; + + rc = gh_rm_mem_notify(handle, flags, mem_info_tag, vmid_desc); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem notify failed for tag: %d, rc: %d", mem_info_tag, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Memory notify succeeded for tag : %d", mem_info_tag); + + return rc; +} + +int cam_vmrm_gh_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, u8 trans_type, + u8 flags, gh_label_t label, struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, u16 map_vmid) +{ + int rc = 0; + + sgl_desc = gh_rm_mem_accept(handle, mem_type, trans_type, flags, label, acl_desc, sgl_desc, + mem_attr_desc, map_vmid); + if (IS_ERR_OR_NULL(sgl_desc)) { + rc = PTR_ERR(sgl_desc); + CAM_ERR(CAM_VMRM, "Mem accept failed for label: %d, rc: %d", label, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Mem accept succeeded for label: %d", label); + + return rc; +} + +int cam_vmrm_gh_mem_release(gh_memparcel_handle_t handle, u8 flags) +{ + int rc = 0; + + rc = gh_rm_mem_release(handle, flags); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem release failed, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Mem release succeed"); + + return rc; +} + +int cam_vmrm_gh_mem_reclaim(gh_memparcel_handle_t handle, u8 flags) +{ + int rc = 0; + + rc = ghd_rm_mem_reclaim(handle, flags); + if (rc) { + CAM_ERR(CAM_VMRM, "Mem reclaim failed, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Mem reclaim succeed"); + + return 0; +} + +#ifdef CONFIG_SPECTRA_RESOURCE_REQUEST_ENABLE +int cam_vmrm_gh_resource_register_req_client(struct gh_resource_client *client) +{ + int rc = 0; + + rc = gh_resource_register_req_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in resoure register req client, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resoure register req client succeed"); + + return rc; +} + +int cam_vmrm_gh_resource_unregister_req_client(struct gh_resource_client *client) +{ + int rc = 0; + + rc = gh_resource_unregister_req_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in resource unregister req client, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resource unregister req client succeed"); + + return rc; +} + +int cam_vmrm_gh_resource_register_release_client( + struct gh_resource_client *client) +{ + int rc = 0; + + rc = gh_resource_register_release_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in resource register release cb, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resource register release cb succeed"); + + return rc; +} + +int cam_vmrm_gh_resource_unregister_release_client( + struct gh_resource_client *client) +{ + int rc = 0; + + rc = gh_resource_unregister_release_client(client); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in resource unregister release cb, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resource unregister release client succeed"); + + return rc; +} + +int cam_vmrm_gh_resource_request(gh_vmid_t target_vmid, const char *subsys_name, + struct gh_res_request *req_resource, int res_cnt) +{ + int rc = 0; + + rc = gh_resource_request(target_vmid, subsys_name, req_resource, res_cnt); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in gh resource request, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resource request succeed"); + + return rc; +} + +int cam_vmrm_gh_resource_release(gh_vmid_t target_vmid, const char *subsys_name, + struct gh_res_request *release_resource, int res_cnt) +{ + int rc = 0; + + rc = gh_resource_release(target_vmid, subsys_name, release_resource, res_cnt); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in gh resource release, rc: %d", rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh resource release succeed"); + + return rc; +} +#else +int cam_vmrm_gh_resource_register_req_cb(const char *subsys_name, gh_res_cb cb) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_gh_resource_unregister_req_cb(const char *subsys_name, gh_res_cb cb) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_gh_resource_register_release_cb(const char *subsys_name, gh_res_cb cb) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_gh_resource_unregister_release_cb( + const char *subsys_name, gh_res_cb cb) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_gh_resource_request(void) +{ + return -EOPNOTSUPP; +} + +int cam_vmrm_gh_resource_release(void) +{ + return -EOPNOTSUPP; +} +#endif + +int cam_vmrm_ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid) +{ + int rc = 0; + + rc = ghd_rm_get_vmid(vm_name, vmid); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in gh get vmid for vm name: %d, rc: %d", vm_name, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh get vmid succeed for vm name: %d", vm_name); + + return rc; +} + +int cam_vmrm_gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name) +{ + int rc = 0; + + rc = gh_rm_get_vm_name(vmid, vm_name); + if (rc) { + CAM_ERR(CAM_VMRM, "Failed in gh get vm name for vmid: %d, rc: %d", vmid, rc); + return rc; + } + + CAM_DBG(CAM_VMRM, "Gh get vmid succeed for vmid: %d", vmid); + + return rc; +} diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.h new file mode 100644 index 0000000000..f390680073 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_gh_wrapper.h @@ -0,0 +1,384 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VMRM_GH_WRAPPER_H_ +#define _CAM_VMRM_GH_WRAPPER_H_ + +#include +#include +#include "cam_debug_util.h" + +typedef int (*gh_res_cb) (void); + +/** + * cam_vmrm_gh_irq_lend_v2() + * + * @brief: Gh interrupt lend api wrapper + * + * @label: vIRQ high-level label + * @name: VM name to send interrupt to + * @irq: Linux IRQ number to lend + * @cb_handle: Callback to invoke when other VM release or accept the interrupt + * @data: Argument to pass to cb_handle + * + */ +int cam_vmrm_gh_irq_lend_v2(enum gh_irq_label label, enum gh_vm_names name, + int irq, gh_irq_handle_fn_v2 cb_handle, void *data); + +/** + * cam_vmrm_gh_irq_lend_notify() + * + * @brief: Gh interrupt lend notify api wrapper + * + * @label: vIRQ high-level label + * + */ +int cam_vmrm_gh_irq_lend_notify(enum gh_irq_label label); + +/** + * cam_vmrm_gh_irq_wait_for_lend_v2() + * + * @brief: Gh interrupt wait for lend api wrapper + * + * @label: vIRQ high-level label + * @name: VM name to send interrupt to + * @on_lend: Callback to invoke when other VM release or accept the interrupt + * @data: Argument to pass to cb_handle + * + */ +int cam_vmrm_gh_irq_wait_for_lend_v2(enum gh_irq_label label, + enum gh_vm_names name, gh_irq_handle_fn_v2 on_lend, void *data); + +/** + * cam_vmrm_gh_irq_accept() + * + * @brief: Gh interrupt accept api wrapper + * + * @label: vIRQ high-level label + * @irq: Linux IRQ# to associate vIRQ with. If don't care, use -1 + * @type IRQ flags to use when allowing RM to choose the IRQ. + * If irq parameter is specified, then type is unused + * + */ +int cam_vmrm_gh_irq_accept(enum gh_irq_label label, int irq, int type); + +/** + * cam_vmrm_gh_irq_accept_notify() + * + * @brief: Gh interrupt accept notify api wrapper + * + * @label: vIRQ high-level label + * + */ +int cam_vmrm_gh_irq_accept_notify(enum gh_irq_label label); + +/** + * cam_vmrm_gh_irq_release() + * + * @brief: Gh interrupt release api wrapper + * + * @label: vIRQ high-level label + * + */ +int cam_vmrm_gh_irq_release(enum gh_irq_label label); + +/** + * cam_vmrm_gh_irq_release_notify() + * + * @brief: Gh interrupt release notify api wrapper + * + * @label: vIRQ high-level label + * + */ +int cam_vmrm_gh_irq_release_notify(enum gh_irq_label label); + +/** + * cam_vmrm_gh_irq_reclaim() + * + * @brief: Gh interrupt reclaim api wrapper + * + * @label: vIRQ high-level label + * + */ +int cam_vmrm_gh_irq_reclaim(enum gh_irq_label label); + +/** + * cam_vmrm_gh_mem_register_notifier() + * + * @brief: Gh memory register notifier api wrapper + * + * @tag: The tag for which the caller would like to receive MEM_SHARED + * and MEM_RELEASED notifications for + * @handler: The handler that will be invoked when a MEM_SHARED or MEM_RELEASED + * notification pertaining to @tag arrives at the VM. The handler will + * also be invoked with a pointer to caller specific data, as well as + * the original MEM_SHARED/MEM_RELEASED payload from the resource manager. + * @data: The data that should be passed to @handler when it is invoked. + * @cookie: Save the original api return value. + * + */ +int cam_vmrm_gh_mem_register_notifier(enum gh_mem_notifier_tag tag, + gh_mem_notifier_handler handler, void *data, void **cookie); + +/** + * cam_vmrm_gh_mem_unregister_notifier() + * + * @brief: Gh memory unregister notifier api wrapper + * + * @cookie: The cookie returned by gh_mem_notifier_register + * + */ +void cam_vmrm_gh_mem_unregister_notifier(void *cookie); + +/** + * cam_vmrm_gh_mem_lend() + * + * @brief: Gh memory lend api wrapper + * + * @mem_type: The type of memory being lent (i.e. normal or I/O) + * @flags: Bitmask of values to influence the behavior of the RM + * when it lends the memory + * @label: The label to assign to the memparcel that the RM will create + * @acl_desc: Describes the number of ACL entries and VMID and permission + * pairs that the resource manager should consider when lending the memory + * @sgl_desc: Describes the number of SG-List entries as well as + * the location of the memory in the IPA space of the owner + * @mem_attr_desc: Describes the number of memory attribute entries and the + * memory attribute and VMID pairs that the RM should consider + * when lending the memory + * @handle: Pointer to where the memparcel handle should be stored + * + */ +int cam_vmrm_gh_mem_lend(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle); + +/** + * cam_vmrm_gh_mem_notify() + * + * @brief: Gh memory notify api wrapper + * + * @handle: The handle of the memparcel for which a notification should be sent out + * @flags: Flags to determine if the notification is for notifying that memory + * has been shared to another VM, or that a VM has released memory + * @mem_info_tag: A 32-bit value that is attached to the + * MEM_SHARED/MEM_RELEASED/MEM_ACCEPTED notifications to aid in + * distinguishing different resources from one another. + * @vmid_desc: A list of VMIDs to notify that memory has been shared with them. + * This parameter should only be non-NULL if other VMs are being + * notified (i.e. it is invalid to specify this parameter when the +.* operation is a release notification) + * + */ +int cam_vmrm_gh_mem_notify(gh_memparcel_handle_t handle, u8 flags, + gh_label_t mem_info_tag, struct gh_notify_vmid_desc *vmid_desc); + +/** + * cam_vmrm_gh_mem_accept() + * + * @brief: Gh memory accept api wrapper + * + * @handle: The memparcel handle associated with the memory + * @mem_type: The type of memory being lent (i.e. normal or I/O) + * @trans_type: The type of memory transfer + * @flags: Bitmask of values to influence the behavior of the RM + * when it lends the memory + * @label: The label to validate against the label maintained by the RM + * @acl_desc: Describes the number of ACL entries and VMID and permission + * pairs that the resource manager should validate against for AC + * regarding the memparcel + * @sgl_desc: Describes the number of SG-List entries as well as + * where the memory should be mapped in the IPA space of the VM + * denoted by @map_vmid. If this parameter is left NULL, then the + * RM will map the memory at an arbitrary location + * @mem_attr_desc: Describes the number of memory attribute entries and the + * memory attribute and VMID pairs that the RM should validate + * against regarding the memparcel. + * @map_vmid: The VMID which RM will map the memory for. VMID 0 corresponds + * to mapping the memory for the current VM + * + */ +int cam_vmrm_gh_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, u8 trans_type, + u8 flags, gh_label_t label, struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, u16 map_vmid); + +/** + * cam_vmrm_gh_mem_release() + * + * @brief: Gh memory release api wrapper + * + * @handle: The memparcel handle associated with the memory + * @flags: Bitmask of values to influence the behavior of the RM when it unmap + * the memory. + * + */ +int cam_vmrm_gh_mem_release(gh_memparcel_handle_t handle, u8 flags); + +/** + * cam_vmrm_gh_mem_reclaim() + * + * @brief: Gh memory reclaim api wrapper + * + * @handle: The memparcel handle associated with the memory + * @flags: Bitmask of values to influence the behavior of the RM when it unmap + * the memory. + * + */ +int cam_vmrm_gh_mem_reclaim(gh_memparcel_handle_t handle, u8 flags); + +#ifdef CONFIG_SPECTRA_RESOURCE_REQUEST_ENABLE +/** + * cam_vmrm_gh_resource_register_req_client() + * + * @brief: Gh resource register request client api wrapper + * + * @client: Client + * + */ +int cam_vmrm_gh_resource_register_req_client(struct gh_resource_client *client); + +/** + * cam_vmrm_gh_resource_unregister_req_client() + * + * @brief: Gh resource unregister request client api wrapper + * + * @client: Client + * + */ +int cam_vmrm_gh_resource_unregister_req_client(struct gh_resource_client *client); + + +/** + * cam_vmrm_gh_resource_register_release_client() + * + * @brief: Gh resource register release client api wrapper + * + * @client: Client + * + */ +int cam_vmrm_gh_resource_register_release_client( + struct gh_resource_client *client); + + +/** + * cam_vmrm_gh_resource_unregister_release_client() + * + * @brief: Gh resource register release client api wrapper + * + * @client: Client + * + */ +int cam_vmrm_gh_resource_unregister_release_client( + struct gh_resource_client *client); + +/** + * cam_vmrm_gh_resource_request() + * + * @brief: Gh resource request api wrapper + * + * @target_vmid: Target vm id + * @subsys_name: Subsystem name + * @req_resource: Request resource + * @res_cnt: Request resource count + * + */ +int cam_vmrm_gh_resource_request(gh_vmid_t target_vmid, const char *subsys_name, + struct gh_res_request *req_resource, int res_cnt); + +/** + * cam_vmrm_gh_resource_release() + * + * @brief: Gh resource release api wrapper + * + * @target_vmid: Target vm id + * @subsys_name: Subsystem name + * @release_resource: Release resource + * @res_cnt: Release resource count + * + */ +int cam_vmrm_gh_resource_release(gh_vmid_t target_vmid, const char *subsys_name, + struct gh_res_request *release_resource, int res_cnt); +#else +/** + * cam_vmrm_gh_resource_register_req_cb() + * + * @brief: Gh resource register request api wrapper + * @subsys_name: Subsystem name + * @cb: Callback + */ +int cam_vmrm_gh_resource_register_req_cb( + const char *subsys_name, gh_res_cb cb); + +/** + * cam_vmrm_gh_resource_unregister_req_cb() + * + * @brief: Gh resource unregister request api wrapper + * @subsys_name: Subsystem name + * @cb: Callback + */ +int cam_vmrm_gh_resource_unregister_req_cb( + const char *subsys_name, gh_res_cb cb); + +/** + * cam_vmrm_gh_resource_register_release_cb() + * + * @brief: Gh resource register release api wrapper + * @subsys_name: Subsystem name + * @cb: Callback + */ +int cam_vmrm_gh_resource_register_release_cb( + const char *subsys_name, gh_res_cb cb); + +/** + * cam_vmrm_gh_resource_unregister_release_cb() + * + * @brief: Gh resource register release api wrapper + * @subsys_name: Subsystem name + * @cb: Callback + */ +int cam_vmrm_gh_resource_unregister_release_cb( + const char *subsys_name, gh_res_cb cb); + +/** + * cam_vmrm_gh_resource_request() + * + * @brief: Gh resource request api wrapper + * + */ +int cam_vmrm_gh_resource_request(void); + +/** + * cam_vmrm_gh_resource_release() + * + * @brief: Gh resource release api wrapper + * + */ +int cam_vmrm_gh_resource_release(void); +#endif + +/** + * cam_vmrm_ghd_rm_get_vmid() + * + * @brief: Gh translate VM name to vmid api wrapper + * + * @vm_name: VM name to lookup + * @vmid: Out pointer to store found vmid if VM is found + * + */ +int cam_vmrm_ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid); + +/** + * cam_vmrm_gh_rm_get_vm_name() + * + * @brief: Gh translate vmid to vm name api wrapper + * + * @vmid: Vmid to lookup + * @vm_name: Out pointer to store found VM name if vmid is found + * + */ +int cam_vmrm_gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name); + +#endif /* _CAM_VMRM_GH_WRAPPER_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.c b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.c new file mode 100644 index 0000000000..6635c38882 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.c @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifdef CONFIG_SPECTRA_VMRM +#include +#include +#include +#include +#include "camera_main.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#endif +#include "cam_vmrm.h" +#include "cam_vmrm_interface.h" +#include "cam_cpas_api.h" +#include "cam_req_mgr_dev.h" +#include "cam_mem_mgr_api.h" + +#ifdef CONFIG_SPECTRA_VMRM +extern struct cam_vmrm_intf_dev *g_vmrm_intf_dev; + +bool cam_vmrm_is_supported(void) +{ + return true; +} + +uint32_t cam_vmrm_intf_get_vmid(void) +{ + return g_vmrm_intf_dev->cam_vmid; +} + +bool cam_vmrm_proxy_clk_rgl_voting_enable(void) +{ + struct cam_vmrm_intf_dev *vmrm_intf_dev; + bool enable = false; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "vmrm dev is not ready"); + return false; + } + + if (!vmrm_intf_dev->is_initialized) { + CAM_ERR(CAM_VMRM, "vmrm is not initialized"); + return false; + } + + if (vmrm_intf_dev->proxy_voting_enable & CAM_PROXY_CLK_RGL_VOTING) + enable = true; + + return enable; +} + +bool cam_vmrm_proxy_icc_voting_enable(void) +{ + struct cam_vmrm_intf_dev *vmrm_intf_dev; + bool enable = false; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "vmrm dev is not ready"); + return false; + } + + if (!vmrm_intf_dev->is_initialized) { + CAM_ERR(CAM_VMRM, "vmrm is not initialized"); + return false; + } + + if (vmrm_intf_dev->proxy_voting_enable & CAM_PROXY_ICC_VOTING) + enable = true; + + return enable; +} + +bool cam_vmrm_no_register_read_on_bind(void) +{ + struct cam_vmrm_intf_dev *vmrm_intf_dev; + struct timespec64 ts_start, ts_end; + long microsec = 0; + + CAM_GET_TIMESTAMP(ts_start); + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "vmrm dev is not ready"); + return false; + } + + if (!vmrm_intf_dev->is_initialized) { + CAM_ERR(CAM_VMRM, "vmrm is not initialized"); + return false; + } + + CAM_GET_TIMESTAMP(ts_end); + CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts_start, ts_end, microsec); + cam_record_bind_latency(pdev->name, microsec); + return vmrm_intf_dev->no_register_read_on_bind; +} + +int cam_vmvm_populate_hw_instance_info(struct cam_hw_soc_info *soc_info, + msg_cb_func hw_msg_callback, void *hw_msg_callback_data) +{ + int i; + struct cam_hw_instance *hw_instance_temp; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + struct cam_soc_gpio_data *gpio_data; + + if (!soc_info) { + CAM_ERR(CAM_VMRM, "Invalid hw info"); + return -EINVAL; + } + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "vmrm dev is not ready"); + return -EINVAL; + } + + if (!vmrm_intf_dev->is_initialized) { + CAM_ERR(CAM_VMRM, "vmrm is not initialized"); + return -EINVAL; + } + + hw_instance_temp = CAM_MEM_ZALLOC(sizeof(*hw_instance_temp), GFP_KERNEL); + if (!hw_instance_temp) { + CAM_ERR(CAM_VMRM, " hw instance allocate memory failed"); + return -EINVAL; + } + hw_instance_temp->hw_id = soc_info->hw_id; + scnprintf(hw_instance_temp->hw_name, + sizeof(hw_instance_temp->hw_name), "%s", soc_info->dev_name); + + for (i = 0; i < soc_info->irq_count; i++) { + scnprintf(hw_instance_temp->resources.irq_name[i], + sizeof(hw_instance_temp->resources.irq_name[i]), "%s", + soc_info->irq_name[i]); + hw_instance_temp->resources.irq_num[i] = soc_info->irq_num[i]; + hw_instance_temp->resources.irq_label[i] = + soc_info->vmrm_resource_ids[i + CAM_VMRM_RESOURCE_IRQ_OFFSET]; + hw_instance_temp->resources.valid_mask |= + BIT(i + CAM_VMRM_RESOURCE_IRQ_BIT_MAP_OFFSET); + CAM_DBG(CAM_VMRM, + "hw name %s hw id %x irq name %s irq count %d irq num %d irq label %d valid mask %d", + hw_instance_temp->hw_name, hw_instance_temp->hw_id, + hw_instance_temp->resources.irq_name[i], i, + hw_instance_temp->resources.irq_num[i], + hw_instance_temp->resources.irq_label[i], + hw_instance_temp->resources.valid_mask); + } + hw_instance_temp->resources.irq_count = soc_info->irq_count; + hw_instance_temp->resources.resource_cnt = hw_instance_temp->resources.irq_count; + + for (i = 0; i < soc_info->num_mem_block; i++) { + scnprintf(hw_instance_temp->resources.mem_block_name[i], + sizeof(hw_instance_temp->resources.mem_block_name[i]), + "%s", soc_info->mem_block_name[i]); + hw_instance_temp->resources.mem_block_addr[i] = soc_info->mem_block[i]->start; + hw_instance_temp->resources.mem_block_size[i] = + resource_size(soc_info->mem_block[i]); + + CAM_DBG(CAM_VMRM, + "hw name %s hw id %x mem name %s mem count %d mem address %lx mem size %x", + hw_instance_temp->hw_name, hw_instance_temp->hw_id, + hw_instance_temp->resources.mem_block_name[i], i, + hw_instance_temp->resources.mem_block_addr[i], + hw_instance_temp->resources.mem_block_size[i]); + } + hw_instance_temp->resources.num_mem_block = soc_info->num_mem_block; + hw_instance_temp->resources.resource_cnt += hw_instance_temp->resources.num_mem_block; + + gpio_data = soc_info->gpio_data; + if (gpio_data) { + for (i = 0; i < gpio_data->cam_gpio_common_tbl_size; i++) { + hw_instance_temp->resources.gpio_num[i] = + gpio_data->cam_gpio_common_tbl[i].gpio; + + CAM_DBG(CAM_VMRM, "hw name %s hw id %x gpio count %d gpio num %d", + hw_instance_temp->hw_name, hw_instance_temp->hw_id, i, + hw_instance_temp->resources.gpio_num[i]); + } + hw_instance_temp->resources.gpio_count = gpio_data->cam_gpio_common_tbl_size; + hw_instance_temp->resources.resource_cnt += hw_instance_temp->resources.gpio_count; + } + + hw_instance_temp->resources.mem_label = soc_info->vmrm_resource_ids[0]; + hw_instance_temp->resources.mem_tag = soc_info->vmrm_resource_ids[1]; + hw_instance_temp->hw_msg_callback = hw_msg_callback; + + if (hw_instance_temp->hw_msg_callback) + hw_instance_temp->hw_msg_callback_data = hw_msg_callback_data; + else + hw_instance_temp->hw_msg_callback_data = soc_info; + + /*Mem resource is allways there*/ + hw_instance_temp->resources.valid_mask |= BIT(0); + + /* Default owner is PVM*/ + hw_instance_temp->vm_owner = CAM_PVM; + hw_instance_temp->is_using = false; + hw_instance_temp->ref_count = 0; + + INIT_LIST_HEAD(&hw_instance_temp->list); + init_completion(&hw_instance_temp->wait_response); + spin_lock_init(&hw_instance_temp->spin_lock); + mutex_init(&hw_instance_temp->msg_comm_lock); + mutex_lock(&vmrm_intf_dev->lock); + list_add_tail(&hw_instance_temp->list, &vmrm_intf_dev->hw_instance); + mutex_unlock(&vmrm_intf_dev->lock); + + CAM_DBG(CAM_VMRM, + "hw name %s hw id %x mem count %d irq count %d gpio count %d mem label %d mem tag %d valid mask %d", + hw_instance_temp->hw_name, hw_instance_temp->hw_id, + hw_instance_temp->resources.num_mem_block, hw_instance_temp->resources.irq_count, + hw_instance_temp->resources.gpio_count, hw_instance_temp->resources.mem_label, + hw_instance_temp->resources.mem_tag, hw_instance_temp->resources.valid_mask); + + return 0; +} + +int cam_vmrm_populate_driver_node_info(struct cam_driver_node *driver_node) +{ + struct cam_driver_node *driver_node_temp; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!vmrm_intf_dev) { + CAM_ERR(CAM_VMRM, "vmrm dev is not ready"); + return -EINVAL; + } + + if (!vmrm_intf_dev->is_initialized) { + CAM_ERR(CAM_VMRM, "vmrm is not initialized"); + return -EINVAL; + } + + driver_node_temp = kmemdup(driver_node, sizeof(*driver_node_temp), GFP_ATOMIC); + if (!driver_node_temp) { + CAM_ERR(CAM_VMRM, "driver allocate memory failed"); + return -ENOMEM; + } + + CAM_DBG(CAM_VMRM, "driver name %s, driver id 0x%x", + driver_node_temp->driver_name, driver_node_temp->driver_id); + + INIT_LIST_HEAD(&driver_node_temp->list); + init_completion(&driver_node_temp->wait_response); + mutex_init(&driver_node_temp->msg_comm_lock); + mutex_lock(&vmrm_intf_dev->lock); + list_add_tail(&driver_node_temp->list, &vmrm_intf_dev->driver_node); + mutex_unlock(&vmrm_intf_dev->lock); + + return 0; +} + +int cam_vmrm_populate_io_resource_info(void) +{ + int rc = 0; + struct cam_hw_instance *hw_pos, *hw_temp; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + if (!list_empty(&vmrm_intf_dev->hw_instance)) { + list_for_each_entry_safe(hw_pos, + hw_temp, &vmrm_intf_dev->hw_instance, list) { + rc = cam_populate_irq_resource_info(hw_pos); + if (rc) { + CAM_ERR(CAM_VMRM, "Populate irq resource info failed %d", + hw_pos->hw_id); + goto free_resources; + } + rc = cam_populate_mem_resource_info(hw_pos); + if (rc) { + CAM_ERR(CAM_VMRM, "Populate mem resource info failed %d", + hw_pos->hw_id); + goto free_resources; + } + rc = cam_populate_gpio_resource_info(hw_pos); + if (rc) { + CAM_ERR(CAM_VMRM, "Populate gpio resource info failed %d", + hw_pos->hw_id); + goto free_resources; + } + } + } + + CAM_DBG(CAM_VMRM, "populate io resource succeed"); + + return rc; + +free_resources: + cam_free_io_resource_info(); + return rc; +} + +int cam_vmrm_register_gh_callback(void) +{ + int rc = 0; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + mutex_lock(&vmrm_intf_dev->lock); + if (vmrm_intf_dev->gh_rr_enable) { + rc = cam_register_gh_res_callback(); + if (rc) { + CAM_ERR(CAM_VMRM, "Register gh resource callback failed %d", rc); + mutex_unlock(&vmrm_intf_dev->lock); + return rc; + } + } + + rc = cam_register_gh_mem_callback(); + if (rc) { + CAM_ERR(CAM_VMRM, "Register gh mem callback failed %d", rc); + goto unregister_gh_mem_callback; + } + + rc = cam_register_gh_irq_callback(); + if (rc) { + CAM_ERR(CAM_VMRM, "Register gh irq callback failed %d", rc); + goto unregister_gh_mem_callback; + } + mutex_unlock(&vmrm_intf_dev->lock); + + CAM_DBG(CAM_VMRM, "register gh callback succeed"); + + return rc; + +unregister_gh_mem_callback: + cam_unregister_gh_mem_callback(); + cam_unregister_gh_res_callback(); + mutex_unlock(&vmrm_intf_dev->lock); + return rc; +} + +int cam_vmrm_unregister_gh_callback(void) +{ + int rc = 0; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + + mutex_lock(&vmrm_intf_dev->lock); + cam_unregister_gh_mem_callback(); + + if (vmrm_intf_dev->gh_rr_enable) { + rc = cam_unregister_gh_res_callback(); + if (rc) + CAM_ERR(CAM_VMRM, "Unregister gh resource callback failed %d", rc); + } + mutex_unlock(&vmrm_intf_dev->lock); + + return rc; +} + +int cam_vmrm_send_msg(uint32_t source_vmid, uint32_t des_vmid, uint32_t msg_dst_type, + uint32_t msg_dst_id, uint32_t msg_type, bool response_msg, bool need_response, + void *msg_data, uint32_t data_size, struct completion *complete, uint32_t timeout) +{ + int rc = 0; + struct cam_vmrm_msg *vm_msg; + uint32_t msg_size; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + unsigned long rem_jiffies = 0; + + msg_size = offsetof(struct cam_vmrm_msg, data[data_size]); + vm_msg = CAM_MEM_ZALLOC((msg_size), GFP_KERNEL); + if (!vm_msg) { + CAM_ERR(CAM_VMRM, "msg mem allocate failed"); + return -ENOMEM; + } + + if (!timeout) + timeout = CAM_VMRM_INTER_VM_MSG_TIMEOUT; + + vm_msg->source_vmid = source_vmid; + vm_msg->des_vmid = des_vmid; + vm_msg->msg_dst_type = msg_dst_type; + vm_msg->msg_dst_id = msg_dst_id; + vm_msg->msg_type = msg_type; + vm_msg->response_msg = response_msg; + vm_msg->need_response = need_response; + vm_msg->data_size = data_size; + + CAM_DBG(CAM_VMRM, + "msg_size %d data_size %d source_vmid %d des_vmid %d dst_type %d dst_id %x msg_type %d response msg %d need response %d", + msg_size, vm_msg->data_size, vm_msg->source_vmid, vm_msg->des_vmid, + vm_msg->msg_dst_type, vm_msg->msg_dst_id, vm_msg->msg_type, vm_msg->response_msg, + vm_msg->need_response); + + if (msg_size > CAM_VMRM_RECV_MSG_BUF_SIZE) { + CAM_ERR(CAM_VMRM, "send msg size %d more than allocated receive buffer size %d", + msg_size, CAM_VMRM_RECV_MSG_BUF_SIZE); + return -EINVAL; + } + + if (msg_data) + memcpy(vm_msg->data, msg_data, data_size); + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + rc = vmrm_intf_dev->ops_table->send_message(vmrm_intf_dev->vm_handle, + (void *)vm_msg, msg_size); + if (rc) { + if (rc == -EAGAIN) { + rc = 0; + CAM_WARN(CAM_VMRM, "connection is not ready skip send msg"); + } else { + CAM_ERR(CAM_VMRM, "msg send failed"); + } + CAM_MEM_FREE(vm_msg); + return rc; + } + CAM_MEM_FREE(vm_msg); + + CAM_DBG(CAM_VMRM, "msg send succeed"); + + if (need_response) { + rem_jiffies = cam_common_wait_for_completion_timeout(complete, + msecs_to_jiffies(timeout)); + if (rem_jiffies == 0) { + CAM_ERR(CAM_VMRM, "hw 0x%x wait for response timeout", msg_dst_id); + rc = -EINVAL; + } + } + + return rc; +} + +int cam_vmrm_soc_acquire_resources(uint32_t hw_id) +{ + int rc = 0; + struct cam_hw_instance *hw_pos, *cpas_hw_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + mutex_lock(&vmrm_intf_dev->lock); + /* + * Check if the hw resource is using by other vm firstly + * to handle concurrency acquire from different vm + */ + hw_pos = cam_check_hw_instance_available(hw_id); + if (!hw_pos) { + CAM_WARN(CAM_VMRM, "hw instance 0x%x is not available", hw_id); + rc = -EBUSY; + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + CAM_DBG(CAM_VMRM, "hw 0x%x ownership %d is_using %d ref count %d", hw_id, hw_pos->vm_owner, + hw_pos->is_using, hw_pos->ref_count); + + if ((hw_pos->vm_owner == cam_vmrm_intf_get_vmid()) && hw_pos->ref_count) { + hw_pos->ref_count++; + CAM_DBG(CAM_VMRM, "hw 0x%x ownership has been acquired %d ref count %d", + hw_id, cam_vmrm_intf_get_vmid(), hw_pos->ref_count); + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + /* + * When the hw resource is idle, check the ownership then. + * If ownership is own just return and set using flag and + * send the msg to other vm to ensure other vm know the info. + * So when other vm try to acquire the same resource will + * get busy failing. + * For PVM, need to mask the using flag and send msg to other VMs. + * For SVM, if gh new api enable, svm call new gh request api. + * And if gh not enable, svm send acquire msg to pvm. + */ + if (CAM_IS_PRIMARY_VM()) { + hw_pos->is_using = true; + hw_pos->ref_count++; + mutex_unlock(&vmrm_intf_dev->lock); + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), CAM_SVM1, CAM_MSG_DST_TYPE_VMRM, + hw_id, CAM_HW_RESOURCE_SET_ACQUIRE, false, false, NULL, 1, NULL, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "vm rm send msg failed %d", rc); + goto end; + } + CAM_DBG(CAM_VMRM, "hw 0x%x is pvm own and send msg to SVM ref count %d", + hw_id, hw_pos->ref_count); + } else { + mutex_unlock(&vmrm_intf_dev->lock); + if (vmrm_intf_dev->gh_rr_enable) { + rc = cam_vmrm_resource_req(hw_pos, true); + if (rc) { + CAM_ERR(CAM_VMRM, "svm %d resource request call failed %d", + cam_vmrm_intf_get_vmid(), rc); + goto end; + } + } else { + /* + * Due to cpas is basic resource, when acquire any hw, need to + * acquire cpas firstly. + */ + mutex_lock(&vmrm_intf_dev->lock); + if (!vmrm_intf_dev->lend_cnt) { + cpas_hw_pos = cam_hw_instance_lookup(CAM_HW_ID_CPAS, 0, 0, 0); + if (!cpas_hw_pos) { + CAM_ERR(CAM_VMRM, "Do not find cpas hw instance %x"); + rc = -EINVAL; + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + mutex_lock(&cpas_hw_pos->msg_comm_lock); + reinit_completion(&cpas_hw_pos->wait_response); + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), CAM_PVM, + CAM_MSG_DST_TYPE_VMRM, CAM_HW_ID_CPAS, + CAM_HW_RESOURCE_ACQUIRE, false, true, NULL, 1, + &cpas_hw_pos->wait_response, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "vm rm send msg failed %d", rc); + mutex_unlock(&cpas_hw_pos->msg_comm_lock); + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + if (!rc && !cpas_hw_pos->response_result) { + vmrm_intf_dev->lend_cnt++; + CAM_DBG(CAM_VMRM, + "acquire succeed hw id 0x%x, lend_cnt %d", + cpas_hw_pos->hw_id, vmrm_intf_dev->lend_cnt); + } else if (!rc && cpas_hw_pos->response_result) { + CAM_ERR(CAM_VMRM, "failure happen in pvm lend hw id 0x%x", + cpas_hw_pos->hw_id); + rc = -EINVAL; + } else { + CAM_ERR(CAM_VMRM, + "failure happen in tvm accept hw id 0x%x", + cpas_hw_pos->hw_id); + } + mutex_unlock(&cpas_hw_pos->msg_comm_lock); + } + mutex_unlock(&vmrm_intf_dev->lock); + + mutex_lock(&hw_pos->msg_comm_lock); + reinit_completion(&hw_pos->wait_response); + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), CAM_PVM, + CAM_MSG_DST_TYPE_VMRM, hw_id, CAM_HW_RESOURCE_ACQUIRE, false, true, + NULL, 1, &hw_pos->wait_response, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "vm rm send msg failed %d", rc); + mutex_unlock(&hw_pos->msg_comm_lock); + goto end; + } + /* + * Three results here, one is pvm lend failure, pvm send failure response + * message to tvm, when tvm receive the message and signal. And then check + * the response result to get the error happen in pvm. + * Another is pvm lend succeed, but tvm accept failure, pvm send succeed + * response message to tvm, when tvm receive the message and save the info, + * and do not signal. So completion will timeout. + * Last one is pvm lend and tvm accept succeed, pvm send succeed response + * message to tvm, when tvm receive the message and save the info. After + * tvm accept succeed to signal. + */ + if (!rc && !hw_pos->response_result) { + mutex_lock(&vmrm_intf_dev->lock); + vmrm_intf_dev->lend_cnt++; + hw_pos->ref_count++; + CAM_DBG(CAM_VMRM, + "acquire succeed hw id 0x%x lend_cnt %d ref count %d", + hw_pos->hw_id, vmrm_intf_dev->lend_cnt, hw_pos->ref_count); + mutex_unlock(&vmrm_intf_dev->lock); + } else if (!rc && hw_pos->response_result) { + CAM_ERR(CAM_VMRM, "failure happen in pvm lend hw id 0x%x", + hw_pos->hw_id); + rc = -EINVAL; + } else { + CAM_ERR(CAM_VMRM, "failure happen in tvm accept hw id 0x%x", + hw_pos->hw_id); + } + mutex_unlock(&hw_pos->msg_comm_lock); + } + } + +end: + return rc; + +} + +int cam_vmrm_soc_release_resources(uint32_t hw_id) +{ + int rc = 0; + struct cam_hw_instance *hw_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + mutex_lock(&vmrm_intf_dev->lock); + hw_pos = cam_hw_instance_lookup(hw_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "Do not find hw instance %x", hw_id); + rc = -EINVAL; + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + /* + * For svm need to release resource to pvm. + * For pvm reset the using flag and send msg to svm. + */ + if (!hw_pos->is_using) { + CAM_DBG(CAM_VMRM, "hw %x using flag has been reset", hw_id); + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + hw_pos->ref_count--; + + if (hw_pos->ref_count) { + CAM_DBG(CAM_VMRM, "hw %x ref count %d", hw_id, hw_pos->ref_count); + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + + if (CAM_IS_PRIMARY_VM()) + hw_pos->is_using = false; + mutex_unlock(&vmrm_intf_dev->lock); + + if (CAM_IS_PRIMARY_VM()) { + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), CAM_SVM1, CAM_MSG_DST_TYPE_VMRM, + hw_id, CAM_HW_RESOURCE_SET_RELEASE, false, false, NULL, 1, NULL, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "vmrm send msg failed %d", rc); + goto end; + } + } else { + rc = cam_vmrm_release_resources(hw_id); + if (rc) { + CAM_ERR(CAM_VMRM, "vmrm release resources failed %d hw id 0x%x vmid %d", + rc, hw_id, cam_vmrm_intf_get_vmid()); + goto end; + } + /* + * When release resource, need to decrease lend count and check if count is 1 + * if it is 1, means all resource have been released, so need to release cpas. + */ + mutex_lock(&vmrm_intf_dev->lock); + vmrm_intf_dev->lend_cnt--; + CAM_DBG(CAM_VMRM, "lend_cnt %d", vmrm_intf_dev->lend_cnt); + if (vmrm_intf_dev->lend_cnt == 1) { + rc = cam_vmrm_release_resources(CAM_HW_ID_CPAS); + if (rc) { + CAM_ERR(CAM_VMRM, + "vmrm release resources failed %d hw id 0x%x vmid %d", + rc, CAM_HW_ID_CPAS, cam_vmrm_intf_get_vmid()); + mutex_unlock(&vmrm_intf_dev->lock); + goto end; + } + vmrm_intf_dev->lend_cnt = 0; + } + mutex_unlock(&vmrm_intf_dev->lock); + } + +end: + return rc; +} + +int cam_vmrm_send_hw_msg_wrapper(uint32_t dest_vm, uint32_t hw_id, uint32_t msg_type, + bool response_msg, bool need_response, void *msg, uint32_t msg_size, uint32_t timeout) +{ + int rc = 0; + struct cam_hw_instance *hw_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + mutex_lock(&vmrm_intf_dev->lock); + hw_pos = cam_hw_instance_lookup(hw_id, 0, 0, 0); + if (!hw_pos) { + CAM_ERR(CAM_VMRM, "hw instance 0x%x look up failed", hw_id); + mutex_unlock(&vmrm_intf_dev->lock); + return -EINVAL; + } + mutex_unlock(&vmrm_intf_dev->lock); + + mutex_lock(&hw_pos->msg_comm_lock); + reinit_completion(&hw_pos->wait_response); + + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), dest_vm, + CAM_MSG_DST_TYPE_HW_INSTANCE, hw_id, msg_type, response_msg, need_response, msg, + msg_size, &hw_pos->wait_response, timeout); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for hw id failed: 0x%x, rc: %d", hw_id, rc); + mutex_unlock(&hw_pos->msg_comm_lock); + return rc; + } + + rc = hw_pos->response_result; + if (!rc) + CAM_DBG(CAM_VMRM, "send hw message succeed for hw_id:0x%x", hw_id); + else + CAM_ERR(CAM_VMRM, "send hw message failed for hw_id:0x%x", hw_id); + + mutex_unlock(&hw_pos->msg_comm_lock); + + return rc; + +} + +int cam_vmrm_send_driver_msg_wrapper(uint32_t dest_vm, uint32_t driver_id, uint32_t msg_type, + bool response_msg, bool need_response, void *msg, uint32_t msg_size, uint32_t timeout) +{ + int rc = 0; + struct cam_driver_node *driver_pos; + struct cam_vmrm_intf_dev *vmrm_intf_dev; + + vmrm_intf_dev = cam_vmrm_get_intf_dev(); + mutex_lock(&vmrm_intf_dev->lock); + driver_pos = cam_driver_node_lookup(driver_id); + if (!driver_pos) { + CAM_ERR(CAM_VMRM, "driver instance 0x%x look up failed", driver_id); + mutex_unlock(&vmrm_intf_dev->lock); + return -EINVAL; + } + mutex_unlock(&vmrm_intf_dev->lock); + + mutex_lock(&driver_pos->msg_comm_lock); + reinit_completion(&driver_pos->wait_response); + + rc = cam_vmrm_send_msg(cam_vmrm_intf_get_vmid(), dest_vm, + CAM_MSG_DST_TYPE_DRIVER_NODE, driver_id, msg_type, response_msg, need_response, + msg, msg_size, &driver_pos->wait_response, timeout); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for driver id failed: 0x%x, rc: %d", driver_id, rc); + mutex_unlock(&driver_pos->msg_comm_lock); + return rc; + } + + rc = driver_pos->response_result; + if (!rc) + CAM_DBG(CAM_VMRM, "send driver message succeed for hw_id:0x%x", driver_id); + else + CAM_ERR(CAM_VMRM, "send driver message failed for driver_id:0x%x", driver_id); + + mutex_unlock(&driver_pos->msg_comm_lock); + + return rc; + +} + +int cam_vmrm_set_clk_rate_level(uint32_t hw_id, int cesta_client_idx, + enum cam_vote_level clk_level_high, enum cam_vote_level clk_level_low, + bool do_not_set_src_clk, unsigned long clk_rate) +{ + int rc = 0; + struct cam_msg_set_clk_rate_level msg_set_clk_rate_level; + + + msg_set_clk_rate_level.cesta_client_idx = cesta_client_idx; + msg_set_clk_rate_level.clk_level_high = clk_level_high; + msg_set_clk_rate_level.clk_level_low = clk_level_low; + msg_set_clk_rate_level.do_not_set_src_clk = do_not_set_src_clk; + msg_set_clk_rate_level.clk_rate = clk_rate; + + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, hw_id, CAM_CLK_SET_RATE_LEVEL, false, true, + &msg_set_clk_rate_level, sizeof(msg_set_clk_rate_level), 0); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for hw id failed: 0x%x, rc: %d", hw_id, rc); + } + + return rc; +} + +int cam_vmrm_soc_enable_disable_resources(uint32_t hw_id, bool flag) +{ + int rc = 0; + uint32_t msg_type; + + if (flag) + msg_type = CAM_SOC_ENABLE_RESOURCE; + else + msg_type = CAM_SOC_DISABLE_RESOURCE; + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, hw_id, msg_type, + false, true, NULL, 1, 0); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for hw id:0x%x msg_type:0x%x failed rc: %d", + hw_id, msg_type, rc); + return rc; + } + + return rc; +} + +int cam_vmrm_set_src_clk_rate(uint32_t hw_id, int cesta_client_idx, + unsigned long clk_rate_high, unsigned long clk_rate_low) +{ + int rc = 0; + struct cam_msg_set_clk_rate msg_set_clk_rate; + + msg_set_clk_rate.cesta_client_idx = cesta_client_idx; + msg_set_clk_rate.clk_rate_high = clk_rate_high; + msg_set_clk_rate.clk_rate_low = clk_rate_low; + + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, hw_id, CAM_CLK_SET_RATE, false, true, + &msg_set_clk_rate, sizeof(msg_set_clk_rate), 0); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for hw id failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + return rc; +} + +int cam_vmrm_icc_vote(const char *name, uint64_t ab, uint64_t ib) +{ + int rc = 0; + struct cam_msg_icc_vote msg_icc_vote; + + CAM_DBG(CAM_VMRM, "name: %s icc vote [%llu] ib[%llu]", name, ab, ib); + + scnprintf(msg_icc_vote.name, sizeof(msg_icc_vote.name), "%s", name); + msg_icc_vote.ab = ab; + msg_icc_vote.ib = ib; + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, CAM_HW_ID_CPAS, CAM_ICC_VOTE, false, true, + &msg_icc_vote, sizeof(msg_icc_vote), 0); + if (rc) { + CAM_ERR(CAM_VMRM, "send msg for name %s failed rc: %d", name, rc); + return rc; + } + + return rc; +} + +int cam_vmrm_sensor_power_up(uint32_t hw_id) +{ + int rc = 0; + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, hw_id, CAM_HW_POWER_UP, false, true, NULL, 1, + 0); + if (rc) { + CAM_ERR(CAM_VMRM, "hw id power up failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + return rc; +} + +int cam_vmrm_sensor_power_down(uint32_t hw_id) +{ + int rc = 0; + + rc = cam_vmrm_send_hw_msg_wrapper(CAM_PVM, hw_id, CAM_HW_POWER_DOWN, false, true, NULL, 1, + 0); + if (rc) { + CAM_ERR(CAM_VMRM, "hw id power down failed: 0x%x, rc: %d", hw_id, rc); + return rc; + } + + return rc; +} + +int cam_vmrm_icp_send_msg(uint32_t dest_vm, uint32_t hw_mgr_id, uint32_t msg_type, bool need_ack, + void *msg, uint32_t msg_size, uint32_t timeout) +{ + int rc = 0; + + rc = cam_vmrm_send_driver_msg_wrapper(dest_vm, CAM_DRIVER_ID_ICP + hw_mgr_id, + msg_type, false, need_ack, msg, msg_size, timeout); + if (rc) { + CAM_ERR(CAM_VMRM, "ICP%d Failed in sending msg dest_driver:%d rc %d", + hw_mgr_id, CAM_DRIVER_ID_ICP + hw_mgr_id, rc); + return rc; + } + + + return rc; +} + +#else +bool cam_vmrm_is_supported(void) +{ + return false; +} + +uint32_t cam_vmrm_intf_get_vmid(void) +{ + return CAM_PVM; +} + +bool cam_vmrm_proxy_clk_rgl_voting_enable(void) +{ + return false; +} + +bool cam_vmrm_proxy_icc_voting_enable(void) +{ + return false; +} + +bool cam_vmrm_no_register_read_on_bind(void) +{ + return false; +} + +int cam_vmvm_populate_hw_instance_info(struct cam_hw_soc_info *soc_info, + msg_cb_func hw_msg_callback, void *hw_msg_callback_data) +{ + return 0; +} + +int cam_vmrm_populate_driver_node_info(struct cam_driver_node *driver_node) +{ + return 0; +} + +int cam_vmrm_populate_io_resource_info(void) +{ + return 0; +} + +int cam_vmrm_register_gh_callback(void) +{ + return 0; +} + +int cam_vmrm_unregister_gh_callback(void) +{ + return 0; +} + +int cam_vmrm_send_msg(uint32_t source_vmid, uint32_t des_vmid, uint32_t msg_dst_type, + uint32_t msg_dst_id, uint32_t msg_type, bool response_msg, bool need_response, + void *msg_data, uint32_t data_size, struct completion *complete, uint32_t timeout) +{ + return 0; +} + +int cam_vmrm_soc_acquire_resources(uint32_t hw_id) +{ + return 0; +} + +int cam_vmrm_soc_release_resources(uint32_t hw_id) +{ + return 0; +} + +int cam_vmrm_soc_enable_disable_resources(uint32_t hw_id, bool flag) +{ + return 0; +} + +int cam_vmrm_set_src_clk_rate(uint32_t hw_id, int cesta_client_idx, + unsigned long clk_rate_high, unsigned long clk_rate_low) +{ + return 0; +} + +int cam_vmrm_set_clk_rate_level(uint32_t hw_id, int cesta_client_idx, + enum cam_vote_level clk_level_high, enum cam_vote_level clk_level_low, + bool do_not_set_src_clk, unsigned long clk_rate) +{ + return 0; +} + +int cam_vmrm_icc_vote(const char *name, uint64_t ab, uint64_t ib) +{ + return 0; +} + +int cam_vmrm_sensor_power_up(uint32_t hw_id) +{ + return 0; +} + +int cam_vmrm_sensor_power_down(uint32_t hw_id) +{ + return 0; +} +#endif diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.h new file mode 100644 index 0000000000..dc7a623b9d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_interface.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_vmrm.h" +#include "cam_cpas_api.h" + +#ifndef _CAM_VMRM_INTERFACE_H_ +#define _CAM_VMRM_INTERFACE_H_ + +#ifdef CONFIG_SPECTRA_VMRM +#define CAM_IS_PRIMARY_VM() ((cam_vmrm_intf_get_vmid() == CAM_PVM) \ +? true : false) +#define CAM_IS_SECONDARY_VM() ((cam_vmrm_intf_get_vmid() != CAM_PVM) \ +? true : false) +#else +#ifdef CONFIG_ARCH_QTI_VM +#define CAM_IS_PRIMARY_VM() false +#define CAM_IS_SECONDARY_VM() true +#else +#define CAM_IS_PRIMARY_VM() true +#define CAM_IS_SECONDARY_VM() false +#endif +#endif + +/** + * cam_vmrm_is_supported() + * + * @brief: VMRM is supported or not + */ +bool cam_vmrm_is_supported(void); + +/** + * cam_vmrm_intf_get_vmid() + * + * @brief: Get camera vm id + */ +uint32_t cam_vmrm_intf_get_vmid(void); + +/** + * cam_vmrm_proxy_clk_rgl_voting_enable() + * + * @brief: Proxy voting clock regulator enable or not + */ +bool cam_vmrm_proxy_clk_rgl_voting_enable(void); + +/** + * cam_vmrm_proxy_icc_voting_enable() + * + * @brief: Proxy voting icc enable or not + */ +bool cam_vmrm_proxy_icc_voting_enable(void); + +/** + * cam_vmrm_no_register_read_on_bind() + * + * @brief: No register read on bind in tvm enable or not + */ +bool cam_vmrm_no_register_read_on_bind(void); + +/** + * cam_vmvm_populate_hw_instance_info() + * + * @brief: Populate hw instance information to resource manager + * + * @soc_info: Hw soc info + * @hw_msg_callback: HW msg callback + * @hw_msg_callback_data: Hw msg callback data + */ +int cam_vmvm_populate_hw_instance_info(struct cam_hw_soc_info *soc_info, + msg_cb_func hw_msg_callback, void *hw_msg_callback_data); + +/** + * cam_vmrm_populate_driver_node_info() + * + * @brief: Populate driver node information to resource manager + * + * @driver_node: Driver node + */ +int cam_vmrm_populate_driver_node_info(struct cam_driver_node *driver_node); + +/** + * cam_vmrm_populate_io_resource_info() + * + * @brief: Populate io resource information to resource manager + */ +int cam_vmrm_populate_io_resource_info(void); + +/** + * cam_vmrm_register_gh_callback() + * + * @brief: Register gh callback + */ +int cam_vmrm_register_gh_callback(void); + +/** + * cam_vmrm_unregister_gh_callback() + * + * @brief: Unregister gh callback + */ +int cam_vmrm_unregister_gh_callback(void); + +/** + * cam_vmrm_soc_acquire_resources() + * + * @brief: Acquire resources, such as interrupt, io memory ownership + * + * @hw_id: Hw id + */ +int cam_vmrm_soc_acquire_resources(uint32_t hw_id); + +/** + * cam_vmrm_soc_release_resources() + * + * @brief: Release the resource to resource manager + * + * @hw_id: Hw id + */ +int cam_vmrm_soc_release_resources(uint32_t hw_id); + +/** + * cam_vmrm_send_msg() + * + * @brief: VMRM send message + * + * @source_vmid: Source vm id + * @des_vmid: Destination vm id + * @msg_dst_type: Message destination type + * @msg_dst_id: Message destination id + * @msg_type: Message type + * @msg_data: Message data payload + * @response_msg Whether response message or actual message + * @need_response Whether need response + * @data_size: Message data payload size + * @complete: Response completion + * @timeout: Timeout for this message, if it is 0 then CAM_VMRM_INTER_VM_MSG_TIMEOUT will + * be used as timeout value + */ +int cam_vmrm_send_msg(uint32_t source_vmid, uint32_t des_vmid, uint32_t msg_dst_type, + uint32_t msg_dst_id, uint32_t msg_type, bool response_msg, bool need_response, + void *msg_data, uint32_t data_size, struct completion *complete, uint32_t timeout); + +/** + * cam_vmrm_send_hw_msg_wrapper() + * + * @brief: VMRM send HW message wrapper + * + * @dest_vm: Destination vm id + * @hw_id: Message destination hw id + * @msg_type: Message type + * @response_msg Whether response message or actual message + * @need_response Whether need response + * @msg: Message data payload + * @msg_size: Message data payload size + * @timeout: Timeout for this message, if it is 0 then CAM_VMRM_INTER_VM_MSG_TIMEOUT will + * be used as timeout value + */ +int cam_vmrm_send_hw_msg_wrapper(uint32_t dest_vm, uint32_t hw_id, uint32_t msg_type, + bool response_msg, bool need_response, void *msg, uint32_t msg_size, uint32_t timeout); + +/** + * cam_vmrm_send_driver_msg_wrapper() + * + * @brief: VMRM send driver message wrapper + * + * @dest_vm: Destination vm id + * @hw_id: Message destination hw id + * @msg_type: Message type + * @response_msg Whether response message or actual message + * @need_response Whether need response + * @msg: Message data payload + * @msg_size: Message data payload size + * @timeout: Timeout for this message, if it is 0 then CAM_VMRM_INTER_VM_MSG_TIMEOUT will + * be used as timeout value + * + */ +int cam_vmrm_send_driver_msg_wrapper(uint32_t dest_vm, uint32_t driver_id, uint32_t msg_type, + bool response_msg, bool need_response, void *msg, uint32_t msg_size, uint32_t timeout); + +/** + * cam_vmrm_soc_enable_disable_resources() + * + * @brief: Vmrm soc enable or disable resources + * + * @hw_id: Hw id + * + * @flag: Enable or disable + */ +int cam_vmrm_soc_enable_disable_resources(uint32_t hw_id, bool flag); + +/** + * cam_vmrm_set_src_clk_rate() + * + * @brief: Vmrm set src clk rate + * + * @hw_id: Hw id + * + * @cesta_client_idx: Cesta client index + * + * @clk_rate_high: Clk rate high + * + * @clk_rate_low: Clk rate low + */ +int cam_vmrm_set_src_clk_rate(uint32_t hw_id, int cesta_client_idx, + unsigned long clk_rate_high, unsigned long clk_rate_low); + +/** + * cam_vmrm_set_clk_rate_level() + * + * @brief: Vmrm set clk rate level + * + * @hw_id: Hw id + * + * @cesta_client_idx: Cesta client index + * + * @clk_level_high: Clk level high + * + * @clk_level_low: Clk level low + * + * @do_not_set_src_clk: Set src clk flag + * + * @clk_rate: Clk rate + */ +int cam_vmrm_set_clk_rate_level(uint32_t hw_id, int cesta_client_idx, + enum cam_vote_level clk_level_high, enum cam_vote_level clk_level_low, + bool do_not_set_src_clk, unsigned long clk_rate); + +/** + * cam_vmrm_icc_vote() + * + * @brief: Vmrm icc vote + * + * @name: Bus client name + * + * @ab: Arbitrated Bandwidth + * + * @ib: Instantaneous Bandwidth + */ +int cam_vmrm_icc_vote(const char *name, uint64_t ab, uint64_t ib); + +/** + * cam_vmrm_sensor_power_up() + * + * @brief: Vmrm sensor power up + * + * @hw_id: Hw id + * + */ +int cam_vmrm_sensor_power_up(uint32_t hw_id); + +/** + * cam_vmrm_sensor_power_down() + * + * @brief: Vmrm sensor power down + * + * @hw_id: Hw id + * + */ +int cam_vmrm_sensor_power_down(uint32_t hw_id); + +/** + * cam_vmrm_icp_send_msg() + * + * @brief: VMRM ICP commands + * + * dest_vm: cam vmid of the destination vm + * + * hw_mgr_id: hw_mgr_id of the icp + * + * need_ack: If we need_ack for this message + * + * msg: icp intervm message + * + * msg_size: size of the intervm message + * + * timeout: timeout value for this message + * + */ +int cam_vmrm_icp_send_msg(uint32_t dest_vm, uint32_t hw_mgr_id, uint32_t vmrm_msg_type, + bool need_ack, void *msg, uint32_t msg_size, uint32_t timeout); + +#endif /* _CAM_VMRM_INTERFACE_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_msg_handler.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_msg_handler.h new file mode 100644 index 0000000000..7439455b7d --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/cam_vmrm_msg_handler.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CAM_VMRM_MSG_HANDLER_H_ +#define _CAM_VMRM_MSG_HANDLER_H_ + +/* message destination type */ +#define CAM_MSG_DST_TYPE_HW_INSTANCE 0 +#define CAM_MSG_DST_TYPE_DRIVER_NODE 1 +#define CAM_MSG_DST_TYPE_VMRM 2 + +/* max length of bus client name */ +#define CAM_ICC_CLIENT_NAME_MAX 32 + +/* message type */ +#define CAM_HW_RESOURCE_ACQUIRE 0 +#define CAM_HW_RESOURCE_RELEASE 1 +#define CAM_HW_RESOURCE_SET_ACQUIRE 2 +#define CAM_HW_RESOURCE_SET_RELEASE 3 +#define CAM_SOC_ENABLE_RESOURCE 4 +#define CAM_SOC_DISABLE_RESOURCE 5 +#define CAM_CLK_SET_RATE 6 +#define CAM_CLK_SET_RATE_LEVEL 7 +#define CAM_ICC_VOTE 8 +#define CAM_HW_POWER_UP 9 +#define CAM_HW_POWER_DOWN 10 +#define CAM_ICP_POWER_COLLAPSE 11 +#define CAM_ICP_POWER_RESUME 12 +#define CAM_ICP_SHUTDOWN 13 +#define CAM_MSG_TYPE_MAX 14 + +/* vm communication message payload data_size only includes the data size*/ +struct cam_vmrm_msg { + uint32_t msg_dst_type; + uint32_t msg_dst_id; + uint32_t msg_type; + uint32_t source_vmid; + uint32_t des_vmid; + uint32_t data_size; + bool response_msg; + bool need_response; + int response_result; + uint8_t data[]; +}; + +/** + * struct cam_msg_set_clk_rate - camera set clock rate message data payload + * + * @cesta_client_idx: Cesta client index + * @clk_rate_high: Clock high rate + * @clk_rate_low: Clock low rate + * + */ +struct cam_msg_set_clk_rate { + int cesta_client_idx; + unsigned long clk_rate_high; + unsigned long clk_rate_low; +}; + +/** + * struct cam_msg_set_clk_rate_level - camera set clock rate level message data payload + * + * @cesta_client_idx: Cesta client index + * @clk_level_high: Clock high level + * @clk_level_low: Clock low level + * @do_not_set_src_clk: Set src clk flag + * @clk_rate: Clock rate + * + */ +struct cam_msg_set_clk_rate_level { + int cesta_client_idx; + int32_t clk_level_high; + int32_t clk_level_low; + bool do_not_set_src_clk; + unsigned long clk_rate; +}; + +/** + * struct cam_msg_icc_vote - camera icc vote message data payload + * + * @name: Bus client name + * @ab: Arbitrated Bandwidth + * @ib: Instantaneous Bandwidth + * + */ +struct cam_msg_icc_vote { + char name[CAM_ICC_CLIENT_NAME_MAX]; + uint64_t ab; + uint64_t ib; +}; + +#endif /* _CAM_VMRM_MSG_HANDLER_H_ */ diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.c b/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.c new file mode 100644 index 0000000000..23af2e5164 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "cam_qrtr_comms.h" +#include "cam_mem_mgr_api.h" + +static int cam_qrtr_handle_ctrl_message_locked(struct cam_inter_vm_comms_handle *handle, + struct qrtr_ctrl_pkt *pkt) +{ + struct cam_qrtr_comms_data *qrtr_data; + + qrtr_data = (struct cam_qrtr_comms_data *)handle->comms_protocol_data; + + /* + * For the current requirement for TVM secure camera, no other control + * messages need to be processed. When we support graceful TVM shutdown + * or PVM camera server shutdown, we need to handle QRTR_TYPE_BYE, + * QRTR_TYPE_DEL_SERVER, QRTR_TYPE_DEL_CLIENT. + */ + if (!handle->is_server_vm) { + /* + * The only control message expected on the client side is + * the'NEW_SERVER' message, before the communication has been + * established. + */ + if (le32_to_cpu(pkt->cmd) == QRTR_TYPE_NEW_SERVER && + !handle->is_comms_established) { + qrtr_data->remote_sock_addr.sq_node = pkt->server.node; + qrtr_data->remote_sock_addr.sq_port = pkt->server.port; + qrtr_data->remote_sock_addr.sq_family = AF_QIPCRTR; + handle->is_comms_established = true; + CAM_DBG(CAM_VMRM, "Connection is established"); + } + } else { + CAM_DBG(CAM_VMRM, "Unexpected control message on the server side"); + return -EINVAL; + } + + return 0; +} + +static int cam_qrtr_send_message_locked(struct cam_inter_vm_comms_handle *handle, + void *data, size_t size) +{ + struct kvec iv; + struct msghdr msg; + int ret; + struct cam_qrtr_comms_data *qrtr_data; + + if (size <= 0) { + CAM_ERR(CAM_VMRM, "Invalid message length"); + return -EINVAL; + } + + qrtr_data = (struct cam_qrtr_comms_data *)handle->comms_protocol_data; + msg.msg_name = &qrtr_data->remote_sock_addr; + msg.msg_namelen = sizeof(qrtr_data->remote_sock_addr); + iv.iov_base = data; + iv.iov_len = size; + + ret = kernel_sendmsg(qrtr_data->sock, &msg, &iv, 1, size); + if (ret < 0) { + CAM_ERR(CAM_VMRM, "Failed to send message: %d\n", ret); + mutex_unlock(&handle->comms_lock); + return ret; + } + + return 0; +} + +static int cam_qrtr_send_response_locked(struct cam_inter_vm_comms_handle *handle, + void *data, size_t size) +{ + return cam_qrtr_send_message_locked(handle, data, size); +} + +static void cam_qrtr_handle_incoming_message(struct work_struct *work) +{ + struct cam_inter_vm_comms_handle *handle; + ssize_t msg_len; + struct sockaddr_qrtr sq; + struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; + struct kvec iv; + struct qrtr_ctrl_pkt *pkt; + struct cam_qrtr_comms_data *qrtr_data; + struct cam_intervm_response response; + + handle = container_of(work, struct cam_inter_vm_comms_handle, msg_recv_work); + if (unlikely(!handle)) { + CAM_ERR(CAM_VMRM, "Invalid work data"); + return; + } + + mutex_lock(&handle->comms_lock); + if (handle->is_comms_terminated) { + CAM_ERR(CAM_VMRM, "Connection nas been terminated"); + mutex_unlock(&handle->comms_lock); + return; + } + /* Read the messages until the queue is exhausted */ + for (;;) { + qrtr_data = (struct cam_qrtr_comms_data *)handle->comms_protocol_data; + iv.iov_base = handle->msg_buffer; + iv.iov_len = handle->msg_size; + + msg_len = kernel_recvmsg(qrtr_data->sock, &msg, &iv, 1, + iv.iov_len, MSG_DONTWAIT); + if (msg_len == -EAGAIN) + break; + if (msg_len <= 0) { + CAM_ERR(CAM_VMRM, "Invalid message received"); + mutex_unlock(&handle->comms_lock); + return; + } + + CAM_DBG(CAM_VMRM, "New message received. is_server_vm = %d", handle->is_server_vm); + + if (sq.sq_node == qrtr_data->local_sock_addr.sq_node && + sq.sq_port == QRTR_PORT_CTRL) { + if (msg_len == sizeof(struct qrtr_ctrl_pkt)) { + pkt = handle->msg_buffer; + cam_qrtr_handle_ctrl_message_locked(handle, pkt); + } + } else { + /* + * Upon the first data message from the client, save the + * client address for future communication and set the + * status as communication established. + */ + CAM_DBG(CAM_VMRM, "New data message received. is_server_vm = %d", + handle->is_server_vm); + if (handle->is_server_vm) { + if (!handle->is_comms_established) { + qrtr_data->remote_sock_addr.sq_node = sq.sq_node; + qrtr_data->remote_sock_addr.sq_port = sq.sq_port; + qrtr_data->remote_sock_addr.sq_family = AF_QIPCRTR; + handle->is_comms_established = true; + CAM_DBG(CAM_VMRM, "Connection is established"); + } + } + + handle->message_cb((void *)handle->msg_buffer, msg_len, &response); + if (response.send_response) { + if (cam_qrtr_send_response_locked(handle, response.res_msg, + response.msg_size)) + CAM_ERR(CAM_VMRM, "Sending the response failed"); + CAM_MEM_FREE(response.res_msg); + } + } + } + mutex_unlock(&handle->comms_lock); +} + +static void cam_qrtr_sock_data_ready(struct sock *sk) +{ + struct cam_inter_vm_comms_handle *handle = sk->sk_user_data; + + /* The 'handle' becomes NULL when the connection is terminated */ + if (unlikely(!handle)) { + CAM_ERR(CAM_VMRM, "Invalid comms handle"); + return; + } + + CAM_DBG(CAM_VMRM, "Submit an offline work to process the message"); + queue_work(handle->msg_recv_wq, &handle->msg_recv_work); +} + +int cam_qrtr_initialize_connection(void **hdl, size_t msg_size, + handle_message_cb msg_cb, bool is_server_vm) +{ + struct cam_qrtr_comms_data *qrtr_data; + int ret; + struct qrtr_ctrl_pkt pkt; + struct msghdr msg = { }; + struct kvec iv = { &pkt, sizeof(pkt) }; + struct cam_inter_vm_comms_handle *handle; + + if (!hdl) { + CAM_ERR(CAM_VMRM, "Invalid handle holder"); + return -EINVAL; + } + + handle = CAM_MEM_ZALLOC(sizeof(struct cam_inter_vm_comms_handle), GFP_KERNEL); + if (unlikely(!handle)) { + CAM_ERR(CAM_VMRM, "Failed allocating comms handle"); + return -ENOMEM; + } + + /* Receive buffer should be able to hold control packet as well */ + if (msg_size < sizeof(struct qrtr_ctrl_pkt)) + msg_size = sizeof(struct qrtr_ctrl_pkt); + + handle->msg_size = msg_size; + handle->msg_buffer = CAM_MEM_ZALLOC(msg_size, GFP_KERNEL); + if (!handle->msg_buffer) { + CAM_ERR(CAM_VMRM, "Failed to allocate the memory for comms data"); + CAM_MEM_FREE(handle); + return -ENOMEM; + } + + qrtr_data = CAM_MEM_ZALLOC(sizeof(struct cam_qrtr_comms_data), GFP_KERNEL); + if (!qrtr_data) { + CAM_ERR(CAM_VMRM, "Failed to allocate the memory for comms data"); + CAM_MEM_FREE(handle->msg_buffer); + CAM_MEM_FREE(handle); + return -ENOMEM; + } + handle->comms_protocol_data = (void *) qrtr_data; + + INIT_WORK(&handle->msg_recv_work, cam_qrtr_handle_incoming_message); + handle->msg_recv_wq = alloc_workqueue(CAM_INTER_VM_COMMS_WQ_NAME, + WQ_HIGHPRI | WQ_UNBOUND, CAM_INTER_VM_COMMS_MAX_PENDING_WORKS); + if (!handle->msg_recv_wq) { + CAM_ERR(CAM_VMRM, "Failed to allocate the work queue"); + ret = -ENOMEM; + goto wq_alloc_failed; + } + + mutex_init(&handle->comms_lock); + handle->message_cb = msg_cb; + handle->is_server_vm = is_server_vm; + handle->is_comms_established = false; + handle->is_comms_terminated = false; + + /* Create the socket */ + ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, + PF_QIPCRTR, &qrtr_data->sock); + if (ret < 0) { + CAM_ERR(CAM_VMRM, "Failed to create the socket"); + goto early_init_failed; + } + + ret = kernel_getsockname(qrtr_data->sock, (struct sockaddr *)&qrtr_data->local_sock_addr); + if (ret < 0) { + CAM_ERR(CAM_VMRM, "Failed to get socket name"); + goto late_init_failed; + } + + qrtr_data->sock->sk->sk_user_data = handle; + qrtr_data->sock->sk->sk_data_ready = cam_qrtr_sock_data_ready; + qrtr_data->sock->sk->sk_error_report = cam_qrtr_sock_data_ready; + + if (!handle->is_server_vm) { + /* Send the lookup */ + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP); + pkt.server.service = cpu_to_le32(CAM_HOST_SERVICE_SVC_ID); + pkt.server.instance = cpu_to_le32(CAM_HOST_SVC_VERS | CAM_HOST_SERVICE_INS_ID << 8); + + qrtr_data->local_sock_addr.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &qrtr_data->local_sock_addr; + msg.msg_namelen = sizeof(qrtr_data->local_sock_addr); + + ret = kernel_sendmsg(qrtr_data->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) { + CAM_ERR(CAM_VMRM, + "Failed to send lookup registration. Init failed: %d", ret); + goto late_init_failed; + } + CAM_DBG(CAM_VMRM, "Server look up has been sent"); + } else { + /* Start the server */ + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); + pkt.server.service = cpu_to_le32(CAM_HOST_SERVICE_SVC_ID); + pkt.server.instance = cpu_to_le32(CAM_HOST_SVC_VERS | CAM_HOST_SERVICE_INS_ID << 8); + pkt.server.node = cpu_to_le32(qrtr_data->local_sock_addr.sq_node); + pkt.server.port = cpu_to_le32(qrtr_data->local_sock_addr.sq_port); + + qrtr_data->local_sock_addr.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &qrtr_data->local_sock_addr; + msg.msg_namelen = sizeof(qrtr_data->local_sock_addr); + + ret = kernel_sendmsg(qrtr_data->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) { + CAM_ERR(CAM_VMRM, "Failed to register the service. Init failed: %d", ret); + goto late_init_failed; + } + CAM_DBG(CAM_VMRM, "New server information has been broadcasted"); + } + + *hdl = (void *)handle; + return 0; + +late_init_failed: + sock_release(qrtr_data->sock); +early_init_failed: + destroy_workqueue(handle->msg_recv_wq); +wq_alloc_failed: + CAM_MEM_FREE(handle->msg_buffer); + CAM_MEM_FREE(handle->comms_protocol_data); + CAM_MEM_FREE(handle); + return ret; +} + +int cam_qrtr_send_message(void *hdl, void *data, size_t size) +{ + struct cam_inter_vm_comms_handle *handle = (struct cam_inter_vm_comms_handle *)hdl; + int ret; + + if (unlikely(!handle)) { + CAM_ERR(CAM_VMRM, "Invalid comms handle"); + return -EINVAL; + } + + mutex_lock(&handle->comms_lock); + + if (handle->is_comms_terminated) { + CAM_ERR(CAM_VMRM, "Connection nas been terminated"); + mutex_unlock(&handle->comms_lock); + return -EINVAL; + } else if (!handle->is_comms_established) { + /* Server port (PVM) is expected to start before the Client port (SVM) */ + if (handle->is_server_vm) { + CAM_WARN(CAM_VMRM, "Client VM has not started the comms yet"); + mutex_unlock(&handle->comms_lock); + return -EAGAIN; + } + CAM_ERR(CAM_VMRM, "Server on the PVM has not started yet"); + mutex_unlock(&handle->comms_lock); + return -EINVAL; + } + + + ret = cam_qrtr_send_message_locked(handle, data, size); + if (ret < 0) { + CAM_ERR(CAM_VMRM, "Failed to send message: %d\n", ret); + mutex_unlock(&handle->comms_lock); + return ret; + } + CAM_DBG(CAM_VMRM, "The message is successfully sent"); + + mutex_unlock(&handle->comms_lock); + return 0; +} + +int cam_qrtr_terminate_connection(void *hdl) +{ + struct cam_qrtr_comms_data *qrtr_data; + struct cam_inter_vm_comms_handle *handle = (struct cam_inter_vm_comms_handle *)hdl; + + if (unlikely(!handle)) { + CAM_ERR(CAM_VMRM, "Invalid comms handle"); + return -EINVAL; + } + + /* + * This needs to be refactored when we support graceful + * TVM shutdown or the PVM camera server shutdown. + */ + mutex_lock(&handle->comms_lock); + handle->is_comms_terminated = true; + + qrtr_data = (struct cam_qrtr_comms_data *)handle->comms_protocol_data; + sock_release(qrtr_data->sock); + qrtr_data->sock = NULL; + + flush_workqueue(handle->msg_recv_wq); + destroy_workqueue(handle->msg_recv_wq); + CAM_MEM_FREE(handle->msg_buffer); + CAM_MEM_FREE(handle->comms_protocol_data); + mutex_unlock(&handle->comms_lock); + CAM_MEM_FREE(handle); + + return 0; +} + +int cam_get_inter_vm_comms_function_table(struct cam_inter_vm_comms_ops *ops_table) +{ + if (unlikely(!ops_table)) { + CAM_ERR(CAM_VMRM, "Invalid ops table"); + return -EINVAL; + } + + ops_table->init = cam_qrtr_initialize_connection; + ops_table->send_message = cam_qrtr_send_message; + ops_table->deinit = cam_qrtr_terminate_connection; + + return 0; +} +EXPORT_SYMBOL(cam_get_inter_vm_comms_function_table); diff --git a/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.h b/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.h new file mode 100644 index 0000000000..e468412444 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/cam_vmrm/qrtr/cam_qrtr_comms.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __CAM_QRTR_COMMS_H__ +#define __CAM_QRTR_COMMS_H__ + +#include +#include +#include +#include + +#include "cam_trace.h" +#include "cam_debug_util.h" +#include "cam_common_util.h" +#include "cam_inter_vm_comms.h" +#include "cam_inter_vm_comms_data.h" +#include "cam_common_util.h" + + +#define CAM_HOST_SERVICE_SVC_ID 5022 +#define CAM_HOST_SVC_VERS 1 +#define CAM_HOST_SERVICE_INS_ID 1 + +struct cam_qrtr_comms_data { + struct socket *sock; + struct sockaddr_qrtr local_sock_addr; + struct sockaddr_qrtr remote_sock_addr; +}; + +/** + * @brief Returns the ops table for QRTR socket protocol. + * + * @param ops_table : The inter VM comms function table pointer. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_get_inter_vm_comms_function_table(struct cam_inter_vm_comms_ops *ops_table); + +#endif /*__CAM_QRTR_COMMS_H__*/ diff --git a/qcom/opensource/camera-kernel/drivers/camera_main.c b/qcom/opensource/camera-kernel/drivers/camera_main.c new file mode 100644 index 0000000000..2e88b108fe --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/camera_main.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include + +#include "cam_req_mgr_dev.h" +#ifdef CONFIG_SPECTRA_VMRM +#include "cam_vmrm.h" +#endif +#include "cam_sync_api.h" +#include "cam_smmu_api.h" +#include "cam_cpas_hw_intf.h" +#include "cam_cdm_intf_api.h" + +#include "cam_ife_csid_dev.h" +#include "cam_vfe.h" +#include "cam_sfe_dev.h" +#include "cam_isp_dev.h" + +#include "cam_res_mgr_api.h" +#include "cam_cci_dev.h" +#include "cam_sensor_dev.h" +#include "cam_actuator_dev.h" +#include "cam_csiphy_dev.h" +#include "cam_eeprom_dev.h" +#include "cam_ois_dev.h" +#include "cam_tpg_dev.h" +#include "cam_flash_dev.h" + +#include "cam_icp_v1_dev.h" +#include "cam_icp_v2_dev.h" +#include "ipe_core.h" +#include "bps_core.h" +#include "ofe_core.h" +#include "cam_icp_subdev.h" + +#include "jpeg_dma_core.h" +#include "jpeg_enc_core.h" +#include "cam_jpeg_dev.h" + +#include "cre_core.h" +#include "cam_cre_dev.h" + +#include "cam_fd_hw_intf.h" +#include "cam_fd_dev.h" + +#include "cam_lrme_hw_intf.h" +#include "cam_lrme_dev.h" + +#include "cam_custom_dev.h" +#include "cam_custom_csid_dev.h" +#include "cam_custom_sub_mod_dev.h" + +#include "cam_debug_util.h" + +#include "ope_dev_intf.h" +#include "cre_dev_intf.h" + +#include "cam_tfe_dev.h" +#include "cam_tfe_csid.h" +#include "cam_csid_ppi100.h" +#include "camera_main.h" + +#ifndef CAMERA_COMPILE_BY +#include "cam_generated_h" +#endif + +char camera_banner[] = "Camera-Banner: (" CAMERA_COMPILE_BY "@" + CAMERA_COMPILE_HOST ") (" CAMERA_COMPILE_TIME ")"; + +#ifdef CONFIG_CAM_PRESIL +extern int cam_presil_framework_dev_init_from_main(void); +extern void cam_presil_framework_dev_exit_from_main(void); +#endif + +struct camera_submodule_component { + int (*init)(void); + void (*exit)(void); +}; + +struct camera_submodule { + char *name; + uint num_component; + const struct camera_submodule_component *component; +}; + +static const struct camera_submodule_component camera_base[] = { + {&cam_req_mgr_init, &cam_req_mgr_exit}, +#ifdef CONFIG_SPECTRA_VMRM + {&cam_vmrm_module_init, &cam_vmrm_module_exit}, +#endif + {&cam_sync_init, &cam_sync_exit}, + {&cam_smmu_init_module, &cam_smmu_exit_module}, + {&cam_cpas_dev_init_module, &cam_cpas_dev_exit_module}, + {&cam_cdm_intf_init_module, &cam_cdm_intf_exit_module}, + {&cam_hw_cdm_init_module, &cam_hw_cdm_exit_module}, +}; + +static const struct camera_submodule_component camera_tfe[] = { +#ifdef CONFIG_SPECTRA_TFE + {&cam_csid_ppi100_init_module, &cam_csid_ppi100_exit_module}, + {&cam_tfe_init_module, &cam_tfe_exit_module}, + {&cam_tfe_csid_init_module, &cam_tfe_csid_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_isp[] = { +#ifdef CONFIG_SPECTRA_ISP + {&cam_ife_csid_init_module, &cam_ife_csid_exit_module}, + {&cam_ife_csid_lite_init_module, &cam_ife_csid_lite_exit_module}, + {&cam_vfe_init_module, &cam_vfe_exit_module}, + {&cam_sfe_init_module, &cam_sfe_exit_module}, + {&cam_isp_dev_init_module, &cam_isp_dev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_sensor[] = { +#ifdef CONFIG_SPECTRA_SENSOR + {&cam_res_mgr_init, &cam_res_mgr_exit}, + {&cam_cci_init_module, &cam_cci_exit_module}, + {&cam_csiphy_init_module, &cam_csiphy_exit_module}, + {&cam_tpg_init_module, &cam_tpg_exit_module}, + {&cam_actuator_driver_init, &cam_actuator_driver_exit}, + {&cam_sensor_driver_init, &cam_sensor_driver_exit}, + {&cam_eeprom_driver_init, &cam_eeprom_driver_exit}, + {&cam_ois_driver_init, &cam_ois_driver_exit}, + {&cam_flash_init_module, &cam_flash_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_icp[] = { +#ifdef CONFIG_SPECTRA_ICP + {&cam_icp_v1_init_module, &cam_icp_v1_exit_module}, + {&cam_icp_v2_init_module, &cam_icp_v2_exit_module}, + {&cam_ipe_init_module, &cam_ipe_exit_module}, + {&cam_bps_init_module, &cam_bps_exit_module}, + {&cam_ofe_init_module, &cam_ofe_exit_module}, + {&cam_icp_init_module, &cam_icp_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_ope[] = { +#ifdef CONFIG_SPECTRA_OPE + {&cam_ope_init_module, &cam_ope_exit_module}, + {&cam_ope_subdev_init_module, &cam_ope_subdev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_cre[] = { +#ifdef CONFIG_SPECTRA_CRE + {&cam_cre_init_module, &cam_cre_exit_module}, + {&cam_cre_subdev_init_module, &cam_cre_subdev_exit_module}, +#endif +}; +static const struct camera_submodule_component camera_jpeg[] = { +#ifdef CONFIG_SPECTRA_JPEG + {&cam_jpeg_enc_init_module, &cam_jpeg_enc_exit_module}, + {&cam_jpeg_dma_init_module, &cam_jpeg_dma_exit_module}, + {&cam_jpeg_dev_init_module, &cam_jpeg_dev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_fd[] = { +#ifdef CONFIG_SPECTRA_FD + {&cam_fd_hw_init_module, &cam_fd_hw_exit_module}, + {&cam_fd_dev_init_module, &cam_fd_dev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_lrme[] = { +#ifdef CONFIG_SPECTRA_LRME + {&cam_lrme_hw_init_module, &cam_lrme_hw_exit_module}, + {&cam_lrme_dev_init_module, &cam_lrme_dev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_custom[] = { +#ifdef CONFIG_SPECTRA_CUSTOM + {&cam_custom_hw_sub_module_init, &cam_custom_hw_sub_module_exit}, + {&cam_custom_csid_driver_init, &cam_custom_csid_driver_exit}, + {&cam_custom_dev_init_module, &cam_custom_dev_exit_module}, +#endif +}; + +static const struct camera_submodule_component camera_presil[] = { +#ifdef CONFIG_CAM_PRESIL + {&cam_presil_framework_dev_init_from_main, &cam_presil_framework_dev_exit_from_main}, +#endif +}; + +static const struct camera_submodule submodule_table[] = { + { + .name = "Camera BASE", + .num_component = ARRAY_SIZE(camera_base), + .component = camera_base, + }, + { + .name = "Camera TFE", + .num_component = ARRAY_SIZE(camera_tfe), + .component = camera_tfe, + }, + { + .name = "Camera ISP", + .num_component = ARRAY_SIZE(camera_isp), + .component = camera_isp, + }, + { + .name = "Camera SENSOR", + .num_component = ARRAY_SIZE(camera_sensor), + .component = camera_sensor + }, + { + .name = "Camera ICP", + .num_component = ARRAY_SIZE(camera_icp), + .component = camera_icp, + }, + { + .name = "Camera OPE", + .num_component = ARRAY_SIZE(camera_ope), + .component = camera_ope, + }, + { + .name = "Camera JPEG", + .num_component = ARRAY_SIZE(camera_jpeg), + .component = camera_jpeg, + }, + { + .name = "Camera FD", + .num_component = ARRAY_SIZE(camera_fd), + .component = camera_fd, + }, + { + .name = "Camera LRME", + .num_component = ARRAY_SIZE(camera_lrme), + .component = camera_lrme, + }, + { + .name = "Camera CRE", + .num_component = ARRAY_SIZE(camera_cre), + .component = camera_cre, + }, + { + .name = "Camera CUSTOM", + .num_component = ARRAY_SIZE(camera_custom), + .component = camera_custom, + }, + { + .name = "Camera Presil", + .num_component = ARRAY_SIZE(camera_presil), + .component = camera_presil, + } +}; + +static int camera_verify_submodules(void) +{ + int rc = 0; + int i, j, num_components; + + for (i = 0; i < ARRAY_SIZE(submodule_table); i++) { + num_components = submodule_table[i].num_component; + for (j = 0; j < num_components; j++) { + if (!submodule_table[i].component[j].init || + !submodule_table[i].component[j].exit) { + CAM_ERR(CAM_UTIL, + "%s module has init = %ps, exit = %ps", + submodule_table[i].name, + submodule_table[i].component[j].init, + submodule_table[i].component[j].exit); + rc = -EINVAL; + goto end; + } + } + } + +end: + return rc; +} + +static void __camera_exit(int i, int j) +{ + uint num_exits; + + /* Exit from current submodule */ + for (j -= 1; j >= 0; j--) + submodule_table[i].component[j].exit(); + + /* Exit remaining submodules */ + for (i -= 1; i >= 0; i--) { + num_exits = submodule_table[i].num_component; + for (j = num_exits - 1; j >= 0; j--) + submodule_table[i].component[j].exit(); + } +} + +static int camera_init(void) +{ + int rc; + uint i, j, num_inits; + + CAM_INFO(CAM_UTIL, "%s", camera_banner); + rc = camera_verify_submodules(); + if (rc) + goto end_init; + + cam_debugfs_init(); + + rc = cam_soc_util_create_debugfs(); + if (rc) + CAM_ERR(CAM_UTIL, "Failed to create soc_util debugfs directory"); + + /* For Probing all available submodules */ + for (i = 0; i < ARRAY_SIZE(submodule_table); i++) { + num_inits = submodule_table[i].num_component; + CAM_DBG(CAM_UTIL, "Number of %s components: %u", + submodule_table[i].name, num_inits); + for (j = 0; j < num_inits; j++) { + rc = submodule_table[i].component[j].init(); + if (rc) { + CAM_ERR(CAM_UTIL, + "%s module failure at %ps rc = %d", + submodule_table[i].name, + submodule_table[i].component[j].init, + rc); + __camera_exit(i, j); + goto end_init; + } + } + } + + CAM_DBG(CAM_UTIL, "Camera initcalls done"); + +end_init: + return rc; +} + +static void camera_exit(void) +{ + __camera_exit(ARRAY_SIZE(submodule_table), 0); + cam_debugfs_deinit(); + + CAM_INFO(CAM_UTIL, "Spectra camera driver exited!"); +} + +module_init(camera_init); +module_exit(camera_exit); + +MODULE_DESCRIPTION("Spectra camera driver"); +MODULE_LICENSE("GPL v2"); diff --git a/qcom/opensource/camera-kernel/drivers/camera_main.h b/qcom/opensource/camera-kernel/drivers/camera_main.h new file mode 100644 index 0000000000..a969855381 --- /dev/null +++ b/qcom/opensource/camera-kernel/drivers/camera_main.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef CAMERA_MAIN_H +#define CAMERA_MAIN_H + +#include +#include +#include + +#ifdef CONFIG_SPECTRA_VMRM +extern struct platform_driver cam_vmrm_intf_driver; +#endif +extern struct platform_driver cam_sync_driver; +extern struct platform_driver cam_smmu_driver; +extern struct platform_driver cam_cpas_driver; +extern struct platform_driver cam_cdm_intf_driver; +extern struct platform_driver cam_hw_cdm_driver; +#ifdef CONFIG_SPECTRA_ISP +extern struct platform_driver cam_ife_csid_driver; +extern struct platform_driver cam_ife_csid_lite_driver; +extern struct platform_driver cam_vfe_driver; +extern struct platform_driver cam_sfe_driver; +extern struct platform_driver isp_driver; +#endif +#ifdef CONFIG_SPECTRA_TFE +extern struct platform_driver cam_csid_ppi100_driver; +extern struct platform_driver cam_tfe_driver; +extern struct platform_driver cam_tfe_csid_driver; +#endif +#ifdef CONFIG_SPECTRA_SENSOR +extern struct platform_driver cam_res_mgr_driver; +extern struct platform_driver cci_driver; +extern struct platform_driver csiphy_driver; +extern struct platform_driver cam_actuator_platform_driver; +extern struct platform_driver cam_sensor_platform_driver; +extern struct platform_driver cam_eeprom_platform_driver; +extern struct platform_driver cam_ois_platform_driver; +extern struct platform_driver cam_tpg_driver; +extern struct i2c_driver cam_actuator_i2c_driver; +extern struct i2c_driver cam_flash_i2c_driver; +extern struct i2c_driver cam_ois_i2c_driver; +extern struct i2c_driver cam_eeprom_i2c_driver; +extern struct i2c_driver cam_sensor_i2c_driver; +#if IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2) || \ + IS_REACHABLE(CONFIG_LEDS_QTI_FLASH) +extern struct platform_driver cam_flash_platform_driver; +#endif +#endif +#ifdef CONFIG_SPECTRA_ICP +extern struct platform_driver cam_icp_v1_driver; +extern struct platform_driver cam_icp_v2_driver; +extern struct platform_driver cam_ipe_driver; +extern struct platform_driver cam_bps_driver; +extern struct platform_driver cam_ofe_driver; +extern struct platform_driver cam_icp_driver; +#endif +#ifdef CONFIG_SPECTRA_OPE +extern struct platform_driver cam_ope_driver; +extern struct platform_driver cam_ope_subdev_driver; +#endif +#ifdef CONFIG_SPECTRA_CRE +extern struct platform_driver cam_cre_driver; +extern struct platform_driver cam_cre_subdev_driver; +#endif +#ifdef CONFIG_SPECTRA_JPEG +extern struct platform_driver cam_jpeg_enc_driver; +extern struct platform_driver cam_jpeg_dma_driver; +extern struct platform_driver jpeg_driver; +#endif +#ifdef CONFIG_SPECTRA_FD +extern struct platform_driver cam_fd_hw_driver; +extern struct platform_driver cam_fd_driver; +#endif +#ifdef CONFIG_SPECTRA_LRME +extern struct platform_driver cam_lrme_hw_driver; +extern struct platform_driver cam_lrme_driver; +#endif +#ifdef CONFIG_SPECTRA_CUSTOM +extern struct platform_driver cam_custom_hw_sub_mod_driver; +extern struct platform_driver cam_custom_csid_driver; +extern struct platform_driver custom_driver; +#endif + +/* + * Drivers to be bound by component framework in this order with + * CRM as master + */ +static struct platform_driver *const cam_component_platform_drivers[] = { +/* BASE */ +#ifdef CONFIG_SPECTRA_VMRM + &cam_vmrm_intf_driver, +#endif + &cam_sync_driver, + &cam_smmu_driver, + &cam_cpas_driver, + &cam_cdm_intf_driver, + &cam_hw_cdm_driver, +#ifdef CONFIG_SPECTRA_TFE + &cam_csid_ppi100_driver, + &cam_tfe_driver, + &cam_tfe_csid_driver, +#endif +#ifdef CONFIG_SPECTRA_ISP + &cam_ife_csid_driver, + &cam_ife_csid_lite_driver, + &cam_vfe_driver, + &cam_sfe_driver, + &isp_driver, +#endif +#ifdef CONFIG_SPECTRA_SENSOR + &cam_res_mgr_driver, + &cci_driver, + &csiphy_driver, + &cam_actuator_platform_driver, + &cam_sensor_platform_driver, + &cam_eeprom_platform_driver, + &cam_ois_platform_driver, + &cam_tpg_driver, +#if IS_REACHABLE(CONFIG_LEDS_QPNP_FLASH_V2) || \ + IS_REACHABLE(CONFIG_LEDS_QTI_FLASH) + &cam_flash_platform_driver, +#endif +#endif +#ifdef CONFIG_SPECTRA_ICP + &cam_icp_v1_driver, + &cam_icp_v2_driver, + &cam_ipe_driver, + &cam_bps_driver, + &cam_ofe_driver, + &cam_icp_driver, +#endif +#ifdef CONFIG_SPECTRA_OPE + &cam_ope_driver, + &cam_ope_subdev_driver, +#endif +#ifdef CONFIG_SPECTRA_JPEG + &cam_jpeg_enc_driver, + &cam_jpeg_dma_driver, + &jpeg_driver, +#endif +#ifdef CONFIG_SPECTRA_CRE + &cam_cre_driver, + &cam_cre_subdev_driver, +#endif +#ifdef CONFIG_SPECTRA_FD + &cam_fd_hw_driver, + &cam_fd_driver, +#endif +#ifdef CONFIG_SPECTRA_LRME + &cam_lrme_hw_driver, + &cam_lrme_driver, +#endif +#ifdef CONFIG_SPECTRA_CUSTOM + &cam_custom_hw_sub_mod_driver, + &cam_custom_csid_driver, + &custom_driver, +#endif +}; + + +static struct i2c_driver *const cam_component_i2c_drivers[] = { +#ifdef CONFIG_SPECTRA_SENSOR + &cam_actuator_i2c_driver, + &cam_flash_i2c_driver, + &cam_ois_i2c_driver, + &cam_eeprom_i2c_driver, + &cam_sensor_i2c_driver, +#endif +}; + +#endif /* CAMERA_MAIN_H */ diff --git a/qcom/opensource/camera-kernel/dt-bindings/msm-camera.h b/qcom/opensource/camera-kernel/dt-bindings/msm-camera.h new file mode 100644 index 0000000000..1149d4a44a --- /dev/null +++ b/qcom/opensource/camera-kernel/dt-bindings/msm-camera.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_CAMERA_H +#define __MSM_CAMERA_H + +/* CPAS path data types */ +#define CAM_CPAS_PATH_DATA_IFE_START_OFFSET 0 +#define CAM_CPAS_PATH_DATA_IFE_LINEAR (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_IFE_VID (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_IFE_DISP (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_IFE_STATS (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_IFE_RDI0 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_IFE_RDI1 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 5) +#define CAM_CPAS_PATH_DATA_IFE_RDI2 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 6) +#define CAM_CPAS_PATH_DATA_IFE_RDI3 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 7) +#define CAM_CPAS_PATH_DATA_IFE_PDAF (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 8) +#define CAM_CPAS_PATH_DATA_IFE_PIXEL_RAW (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 9) +#define CAM_CPAS_PATH_DATA_IFE_FULL (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 10) +#define CAM_CPAS_PATH_DATA_IFE_DS2 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 11) +#define CAM_CPAS_PATH_DATA_IFE_DS4 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 12) +#define CAM_CPAS_PATH_DATA_IFE_DS16 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 13) +#define CAM_CPAS_PATH_DATA_IFE_RDI4 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 14) +#define CAM_CPAS_PATH_DATA_IFE_PDAF_1 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 15) +#define CAM_CPAS_PATH_DATA_IFE_PDAF_2 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 16) +#define CAM_CPAS_PATH_DATA_IFE_PDAF_3 (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 17) +#define CAM_CPAS_PATH_DATA_IFE_IR (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 18) +#define CAM_CPAS_PATH_DATA_IFE_FD (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 19) +#define CAM_CPAS_PATH_DATA_IFE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_IFE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_IPE_START_OFFSET 32 +#define CAM_CPAS_PATH_DATA_IPE_RD_IN (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_IPE_RD_REF (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_IPE_WR_VID (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_IPE_WR_DISP (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_IPE_WR_REF (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_IPE_WR_APP (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 5) +#define CAM_CPAS_PATH_DATA_IPE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_OPE_START_OFFSET 64 +#define CAM_CPAS_PATH_DATA_OPE_RD_IN (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_OPE_RD_REF (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_OPE_WR_VID (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_OPE_WR_DISP (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_OPE_WR_REF (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_OPE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_SFE_START_OFFSET 96 +#define CAM_CPAS_PATH_DATA_SFE_NRDI (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_SFE_RDI0 (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_SFE_RDI1 (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_SFE_RDI2 (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_SFE_RDI3 (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_SFE_RDI4 (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 5) +#define CAM_CPAS_PATH_DATA_SFE_STATS (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 6) +#define CAM_CPAS_PATH_DATA_SFE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_SFE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_CRE_START_OFFSET (CAM_CPAS_PATH_DATA_SFE_MAX_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_CRE_RD_IN (CAM_CPAS_PATH_DATA_CRE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_CRE_WR_OUT (CAM_CPAS_PATH_DATA_CRE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_CRE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_CRE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_OFE_START_OFFSET (CAM_CPAS_PATH_DATA_CRE_MAX_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_OFE_RD_EXT (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 0) +#define CAM_CPAS_PATH_DATA_OFE_RD_INT_PDI (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_OFE_RD_INT_HDR (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_OFE_WR_VID (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_OFE_WR_DISP (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_OFE_WR_IR (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 5) +#define CAM_CPAS_PATH_DATA_OFE_WR_HDR_LTM (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 6) +#define CAM_CPAS_PATH_DATA_OFE_WR_DC4 (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 7) +#define CAM_CPAS_PATH_DATA_OFE_WR_AI (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 8) +#define CAM_CPAS_PATH_DATA_OFE_WR_PDI (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 9) +#define CAM_CPAS_PATH_DATA_OFE_WR_IDEALRAW (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 10) +#define CAM_CPAS_PATH_DATA_OFE_WR_STATS (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 11) +#define CAM_CPAS_PATH_DATA_OFE_MAX_OFFSET \ + (CAM_CPAS_PATH_DATA_OFE_START_OFFSET + 31) + +#define CAM_CPAS_PATH_DATA_CONSO_OFFSET 256 +#define CAM_CPAS_PATH_DATA_ALL (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 0) + +/* IFE consolidated paths */ +#define CAM_CPAS_PATH_DATA_IFE_LINEAR_PDAF (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 1) +#define CAM_CPAS_PATH_DATA_IFE_UBWC_STATS (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 2) +#define CAM_CPAS_PATH_DATA_IFE_PIXEL_ALL (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 3) +#define CAM_CPAS_PATH_DATA_IFE_RDI_PIXEL_RAW (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 4) +#define CAM_CPAS_PATH_DATA_IFE_RDI_ALL (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 5) +#define CAM_CPAS_PATH_DATA_IFE_UBWC (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 6) +#define CAM_CPAS_PATH_DATA_IFE_LINEAR_STATS (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 7) +#define CAM_CPAS_PATH_DATA_IFE_UBWC_LINEAR (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 8) +#define CAM_CPAS_PATH_DATA_IFE_PDAF_LINEAR (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 9) +#define CAM_CPAS_PATH_DATA_IFE_PATH1 (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 10) +#define CAM_CPAS_PATH_DATA_IFE_PATH2 (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 11) +#define CAM_CPAS_PATH_DATA_IFE_PATH3 (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 12) +#define CAM_CPAS_PATH_DATA_IFE_PATH4 (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 13) +#define CAM_CPAS_PATH_DATA_IFE_PATH5 (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 14) + +/* IPE Consolidated paths */ +#define CAM_CPAS_PATH_DATA_IPE_WR_VID_DISP (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 1) + +/* CPAS transaction types */ +#define CAM_CPAS_TRANSACTION_READ 0 +#define CAM_CPAS_TRANSACTION_WRITE 1 + +/* CPAS traffic merge types */ +#define CAM_CPAS_TRAFFIC_MERGE_SUM 0 +#define CAM_CPAS_TRAFFIC_MERGE_SUM_INTERLEAVE 1 + +/* Feature bit type */ +#define CAM_CPAS_FEATURE_TYPE_DISABLE 0 +#define CAM_CPAS_FEATURE_TYPE_ENABLE 1 +#define CAM_CPAS_FEATURE_TYPE_VALUE 2 + +/* Feature support bit positions in feature fuse register*/ +#define CAM_CPAS_QCFA_BINNING_ENABLE 0 +#define CAM_CPAS_SECURE_CAMERA_ENABLE 1 +#define CAM_CPAS_MF_HDR_ENABLE 2 +#define CAM_CPAS_MP_LIMIT_FUSE 3 +#define CAM_CPAS_ISP_FUSE 4 +#define CAM_CPAS_ISP_PIX_FUSE 5 +#define CAM_CPAS_ISP_LITE_FUSE 6 +#define CAM_CPAS_CSIPHY_FUSE 7 +#define CAM_CPAS_IPE_VID_OUT_8BPP_LIMIT_ENABLE 8 +#define CAM_CPAS_SFE_FUSE 9 +#define CAM_CPAS_CUSTOM_FUSE 10 +#define CAM_CPAS_CAM_FUSE 11 +#define CAM_CPAS_FUSE_FEATURE_MAX 12 + +#define CCI_MASTER_0 0 +#define CCI_MASTER_1 1 +#define CCI_MASTER_MAX 2 + +/* AON Camera IDs*/ +#define AON_CAM1 0 +#define AON_CAM2 1 +#define MAX_AON_CAM 2 +#define NOT_AON_CAM 255 + +/* Camera DRV enable masks */ +#define CAM_DDR_DRV 0x1 +#define CAM_CLK_DRV 0x2 + +/* Port index for BW voting */ +#define CAM_CPAS_PORT_HLOS_DRV 0 +#define CAM_CPAS_PORT_DRV_0 1 +#define CAM_CPAS_PORT_DRV_1 2 +#define CAM_CPAS_PORT_DRV_2 3 +#define CAM_CPAS_PORT_DRV_DYN 32 + +/* Domain ID types */ +#define CAM_CPAS_NON_SECURE_DOMAIN 0 +#define CAM_CPAS_SECURE_DOMAIN 1 + +/* Debug bypass driver */ +#define CAM_BYPASS_RGLTR 0x1 +#define CAM_BYPASS_RGLTR_MODE 0x2 +#define CAM_BYPASS_CLKS 0x4 +#define CAM_BYPASS_CESTA 0x8 +#define CAM_BYPASS_ICC 0x10 + +/* Proxy voting*/ +#define CAM_PROXY_CLK_RGL_VOTING 0x1 +#define CAM_PROXY_ICC_VOTING 0x2 + +/* VM name */ +#define CAM_PVM 1 +#define CAM_SVM1 2 +#define CAM_VM_MAX 3 +#endif diff --git a/qcom/opensource/camera-kernel/include/uapi/Android.mk b/qcom/opensource/camera-kernel/include/uapi/Android.mk new file mode 100644 index 0000000000..111968c5c8 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/Android.mk @@ -0,0 +1,10 @@ +# Standalone camera UAPI header android target +LOCAL_PATH := $(call my-dir) +# Path variable for other modules to include for compilation +LOCAL_EXPORT_CAMERA_UAPI_INCLUDE := $(LOCAL_PATH)/camera/ + +CAMERA_HEADERS := $(call all-subdir-named-files,*.h) +KERNEL_SCRIPTS := $(LOCAL_PATH)/kernel/msm-$(TARGET_KERNEL_VERSION)/scripts + +include $(CLEAR_VARS) +LOCAL_MODULE := camera-uapi diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cpas.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cpas.h new file mode 100644 index 0000000000..eea1bb9122 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cpas.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_CPAS_H__ +#define __UAPI_CAM_CPAS_H__ + +#include + +#define CAM_FAMILY_CAMERA_SS 1 +#define CAM_FAMILY_CPAS_SS 2 + +/* AXI BW Voting Version */ +#define CAM_AXI_BW_VOTING_V2 2 + +/* AXI BW Voting Transaction Type */ +#define CAM_AXI_TRANSACTION_READ 0 +#define CAM_AXI_TRANSACTION_WRITE 1 + +/* AXI BW Voting Path Data Type */ +#define CAM_AXI_PATH_DATA_IFE_START_OFFSET 0 +#define CAM_AXI_PATH_DATA_IFE_LINEAR (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_IFE_VID (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_IFE_DISP (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_IFE_STATS (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_IFE_RDI0 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_IFE_RDI1 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 5) +#define CAM_AXI_PATH_DATA_IFE_RDI2 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 6) +#define CAM_AXI_PATH_DATA_IFE_RDI3 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 7) +#define CAM_AXI_PATH_DATA_IFE_PDAF (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 8) +#define CAM_AXI_PATH_DATA_IFE_PIXEL_RAW \ + (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 9) +#define CAM_AXI_PATH_DATA_IFE_FULL (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 10) +#define CAM_AXI_PATH_DATA_IFE_DS2 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 11) +#define CAM_AXI_PATH_DATA_IFE_DS4 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 12) +#define CAM_AXI_PATH_DATA_IFE_DS16 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 13) +#define CAM_AXI_PATH_DATA_IFE_RDI4 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 14) +#define CAM_AXI_PATH_DATA_IFE_PDAF_1 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 15) +#define CAM_AXI_PATH_DATA_IFE_PDAF_2 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 16) +#define CAM_AXI_PATH_DATA_IFE_PDAF_3 (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 17) +#define CAM_AXI_PATH_DATA_IFE_IR (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 18) +#define CAM_AXI_PATH_DATA_IFE_FD (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 19) +#define CAM_AXI_PATH_DATA_IFE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_IFE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_IPE_START_OFFSET 32 +#define CAM_AXI_PATH_DATA_IPE_RD_IN (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_IPE_RD_REF (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_IPE_WR_VID (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_IPE_WR_DISP (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_IPE_WR_REF (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_IPE_WR_APP (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 5) +#define CAM_AXI_PATH_DATA_IPE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_IPE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_OPE_START_OFFSET 64 +#define CAM_AXI_PATH_DATA_OPE_RD_IN (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_OPE_RD_REF (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_OPE_WR_VID (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_OPE_WR_DISP (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_OPE_WR_REF (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_OPE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_OPE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_SFE_START_OFFSET 96 +#define CAM_AXI_PATH_DATA_SFE_NRDI (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_SFE_RDI0 (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_SFE_RDI1 (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_SFE_RDI2 (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_SFE_RDI3 (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_SFE_RDI4 (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 5) +#define CAM_AXI_PATH_DATA_SFE_STATS (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 6) +#define CAM_AXI_PATH_DATA_SFE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_SFE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_CRE_START_OFFSET (CAM_AXI_PATH_DATA_SFE_MAX_OFFSET + 1) +#define CAM_AXI_PATH_DATA_CRE_RD_IN (CAM_AXI_PATH_DATA_CRE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_CRE_WR_OUT (CAM_AXI_PATH_DATA_CRE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_CRE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_CRE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_OFE_START_OFFSET (CAM_AXI_PATH_DATA_CRE_MAX_OFFSET + 1) +#define CAM_AXI_PATH_DATA_OFE_RD_EXT (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 0) +#define CAM_AXI_PATH_DATA_OFE_RD_INT_PDI (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 1) +#define CAM_AXI_PATH_DATA_OFE_RD_INT_HDR (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 2) +#define CAM_AXI_PATH_DATA_OFE_WR_VID (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 3) +#define CAM_AXI_PATH_DATA_OFE_WR_DISP (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 4) +#define CAM_AXI_PATH_DATA_OFE_WR_IR (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 5) +#define CAM_AXI_PATH_DATA_OFE_WR_HDR_LTM (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 6) +#define CAM_AXI_PATH_DATA_OFE_WR_DC4 (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 7) +#define CAM_AXI_PATH_DATA_OFE_WR_AI (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 8) +#define CAM_AXI_PATH_DATA_OFE_WR_PDI (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 9) +#define CAM_AXI_PATH_DATA_OFE_WR_IDEALRAW (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 10) +#define CAM_AXI_PATH_DATA_OFE_WR_STATS (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 11) +#define CAM_AXI_PATH_DATA_OFE_MAX_OFFSET \ + (CAM_AXI_PATH_DATA_OFE_START_OFFSET + 31) + +#define CAM_AXI_PATH_DATA_ALL 256 +#define CAM_CPAS_FUSES_MAX 32 +#define CAM_CPAS_DOMAIN_ID_MAX 5 + +/* DRV Vote level */ +#define CAM_CPAS_VOTE_LEVEL_HIGH 1 +#define CAM_CPAS_VOTE_LEVEL_LOW 2 + +/* Domain id types */ +#define CAM_CPAS_NON_SECURE_DOMAIN 0 +#define CAM_CPAS_SECURE_DOMAIN 1 + +/* sysfs entry of camera subparts info */ +#define CAM_SYSFS_SUBPARTS_INFO_FILENAME "subparts_info" + +/* Total number of sys cache */ +#define CAM_NUM_SYS_CACHE 20 + +/* sys cache type */ +#define CAM_LLCC_SMALL_1 0 +#define CAM_LLCC_SMALL_2 1 +#define CAM_LLCC_LARGE_1 2 +#define CAM_LLCC_LARGE_2 3 +#define CAM_LLCC_LARGE_3 4 +#define CAM_LLCC_LARGE_4 5 +#define CAM_LLCC_OFE_IP 6 +#define CAM_LLCC_IPE_RT_IP 7 +#define CAM_LLCC_IPE_SRT_IP 8 +#define CAM_LLCC_IPE_RT_RF 9 +#define CAM_LLCC_IPE_SRT_RF 10 + + +/* cam sys cache llcc staling mode */ +#define CAM_LLCC_STALING_MODE_CAPACITY 1 +#define CAM_LLCC_STALING_MODE_NOTIFY 2 + + +/* cam sys cache operating type */ +#define CAM_LLCC_NOTIFY_STALING_EVICT 1 +#define CAM_LLCC_NOTIFY_STALING_FORGET 2 + +/* cam cpas query type */ +#define CAM_CPAS_QUERY_BLOB_BASE CAM_COMMON_QUERY_BLOB_END +#define CAM_CPAS_QUERY_BLOB_V3 (CAM_CPAS_QUERY_BLOB_BASE + 1) +#define CAM_CPAS_QUERY_BLOB_SYSCACHE (CAM_CPAS_QUERY_BLOB_BASE + 2) + +/** + * struct cam_cpas_fuse_value - CPAS fuse value + * + * @fuse_id : Camera fuse identification + * @fuse_val : Camera Fuse Value + */ +struct cam_cpas_fuse_value { + __u32 fuse_id; + __u32 fuse_val; +}; + +/** + * struct cam_cpas_fuse_info - CPAS fuse info + * + * @num_fuses : Number of fuses + * @fuse_val : Array of different fuse info. + */ +struct cam_cpas_fuse_info { + __u32 num_fuses; + struct cam_cpas_fuse_value fuse_val[CAM_CPAS_FUSES_MAX]; +}; + +/** + * struct cam_cpas_domain_id_pairing - CPAS domain id mapping + * + * @domain_type : Domain type + * @mapping_id : ID of domain type + */ +struct cam_cpas_domain_id_pairing { + __u32 domain_type; + __u32 mapping_id; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; +}; + +/** + * struct cam_cpas_domain_id_caps - CPAS domain id info + * + * @is_supported : If domain id is supported on target + * @num_mapping : Number of domain id types supported, if any + * @entries : Stores mapping between domain type and its ID + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : These fields are reserved for future extensions + * to this structure. + */ +struct cam_cpas_domain_id_caps { + __u32 is_supported; + __u32 num_mapping; + struct cam_cpas_domain_id_pairing entries[CAM_CPAS_DOMAIN_ID_MAX]; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[6]; +}; + + +/** + * struct cam_cpas_query_cap - CPAS query device capability payload + * + * @camera_family : Camera family type + * @reserved : Reserved field for alignment + * @camera_version : Camera platform version + * @cpas_version : Camera CPAS version within camera platform + * + */ +struct cam_cpas_query_cap { + __u32 camera_family; + __u32 reserved; + struct cam_hw_version camera_version; + struct cam_hw_version cpas_version; +}; + +/** + * struct cam_cpas_query_cap - CPAS query device capability payload + * + * @camera_family : Camera family type + * @reserved : Reserved field for alignment + * @camera_version : Camera platform version + * @cpas_version : Camera CPAS version within camera platform + * @fuse_info : Camera fuse info + * + */ +struct cam_cpas_query_cap_v2 { + __u32 camera_family; + __u32 reserved; + struct cam_hw_version camera_version; + struct cam_hw_version cpas_version; + struct cam_cpas_fuse_info fuse_info; +}; + +/** + * struct cam_cpas_sys_cache_cap - sys cache payload information + * + * @version : struct version + * @scid_id : sys cache id + * @scid_num : sys cache number + * @concur_usage : concurrent usage + * + */ +struct cam_cpas_sys_cache_cap { + __u32 version; + __u32 scid_id; + __u32 scid_num; + __u32 concur_usage; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[10]; +}; + +/** + * struct cam_cpas_sys_cache_query - cache query capability payload + * + * @num_cache : number of cache + * @reserved : reserved paramas + * @sys_cache_cap : information of sys cache + */ +struct cam_cpas_sys_cache_query { + __u32 num_cache; + __u32 reserved; + struct cam_cpas_sys_cache_cap sys_cache_cap[CAM_NUM_SYS_CACHE]; +}; + +/** + * struct cam_cpas_query_cap - CPAS query device capability payload + * + * @version : Struct version + * @camera_family : Camera family type + * @camera_caps : Camera capability + * @camera_version : Camera platform version + * @cpas_version : Camera CPAS version within camera platform + * @fuse_info : Camera fuse info + * @domain_id_info : Domain id info + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : Reserved fields to make this query cap + * extendable in the future + */ +struct cam_cpas_query_cap_v3 { + __u32 version; + __u32 camera_family; + __u32 camera_caps; + struct cam_hw_version camera_version; + struct cam_hw_version cpas_version; + struct cam_cpas_fuse_info fuse_info; + struct cam_cpas_domain_id_caps domain_id_info; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[10]; +}; + +/** + * struct cam_axi_per_path_bw_vote_v2 - Per path bandwidth vote information + * + * @usage_data: client usage data (left/right/rdi) + * @transac_type: Transaction type on the path (read/write) + * @path_data_type: Path for which vote is given (video, display, rdi) + * @vote_level: Vote level for this path + * @camnoc_bw: CAMNOC bw for this path + * @mnoc_ab_bw: MNOC AB bw for this path + * @mnoc_ib_bw: MNOC IB bw for this path + * @ddr_ab_bw: DDR AB bw for this path + * @ddr_ib_bw: DDR IB bw for this path + * @num_valid_params: Number of valid params + * @valid_param_mask: Valid param mask + * @params: params + */ +struct cam_axi_per_path_bw_vote_v2 { + __u32 usage_data; + __u32 transac_type; + __u32 path_data_type; + __u32 vote_level; + __u64 camnoc_bw; + __u64 mnoc_ab_bw; + __u64 mnoc_ib_bw; + __u64 ddr_ab_bw; + __u64 ddr_ib_bw; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; +}; + +/** + * struct cam_axi_per_path_bw_vote - Per path bandwidth vote information + * + * @usage_data client usage data (left/right/rdi) + * @transac_type Transaction type on the path (read/write) + * @path_data_type Path for which vote is given (video, display, rdi) + * @reserved Reserved for alignment + * @camnoc_bw CAMNOC bw for this path + * @mnoc_ab_bw MNOC AB bw for this path + * @mnoc_ib_bw MNOC IB bw for this path + * @ddr_ab_bw DDR AB bw for this path + * @ddr_ib_bw DDR IB bw for this path + */ +struct cam_axi_per_path_bw_vote { + __u32 usage_data; + __u32 transac_type; + __u32 path_data_type; + __u32 reserved; + __u64 camnoc_bw; + __u64 mnoc_ab_bw; + __u64 mnoc_ib_bw; + __u64 ddr_ab_bw; + __u64 ddr_ib_bw; +}; + +#define CAM_CPAS_CUSTOM_CMD_FD_PORT_CFG 0 + +/** + * struct cam_cpas_fd_port_config : CPAS custom cmd struct for updating FD + * port config + * + * @is_secure Security mode of the FD port + * @reserved Reserved for alignment + */ +struct cam_cpas_fd_port_config { + __u32 is_secure; + __u32 reserved; +}; + +/** + * struct cam_sys_cache_config - sys cache config information + * + * @version struct version + * @scid_id cache scid id + * @activate sys cache need to activate or not + * @deactivate to deactivate any specific sys cache + * @staling_distance staling distance used for notification + * @llcc_staling_mode staling mode evict/forget + * @llcc_staling_op_type operation type capacity/notify + * @change_params this parameter to tell param change needed or not + * @num_valid_params: Number of valid params + * @valid_param_mask: Valid param mask + * @params: params + */ +struct cam_sys_cache_config { + __u32 version; + __u32 scid_id; + __u32 activate; + __u32 deactivate; + __u32 staling_distance; + __s32 llcc_staling_mode; + __s32 llcc_staling_op_type; + __u32 change_params; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[6]; +}; + +/** + * struct cam_sys_cache_config_request - sys cache config request + * + * @num: num of cache + * @reserved: reserved params + * @sys_cache_config: sys cache config data + */ +struct cam_sys_cache_config_request { + __u32 num; + __u32 reserved; + union { + struct cam_sys_cache_config sys_cache_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_sys_cache_config, sys_cache_config_flex); + }; +}; + +#endif /* __UAPI_CAM_CPAS_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cre.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cre.h new file mode 100644 index 0000000000..2ca62da457 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_cre.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_CRE_H__ +#define __UAPI_CAM_CRE_H__ + +#include +#include + +#define CAM_CRE_DEV_NAME_SIZE 128 + +/* CRE HW TYPE */ +#define CAM_CRE_HW_TYPE_CRE 0x1 +#define CAM_CRE_HW_TYPE_MAX 0x1 + +/* packet opcode types */ +#define CAM_CRE_OPCODE_CONFIG 0x1 + +/* input port resource type */ +#define CAM_CRE_INPUT_IMAGE 0x1 +#define CAM_CRE_INPUT_IMAGES_MAX (CAM_CRE_INPUT_IMAGE + 1) + +/* output port resource type */ +#define CAM_CRE_OUTPUT_IMAGE 0x1 +#define CAM_CRE_OUTPUT_IMAGES_MAX (CAM_CRE_OUTPUT_IMAGE + 1) + +#define CAM_CRE_MAX_PLANES 0x2 +#define CRE_MAX_BATCH_SIZE 0x10 + +/* definitions needed for cre aquire device */ +#define CAM_CRE_DEV_TYPE_NRT 0x1 +#define CAM_CRE_DEV_TYPE_RT 0x2 +#define CAM_CRE_DEV_TYPE_MAX 0x3 + +#define CAM_CRE_CMD_META_GENERIC_BLOB 0x1 +/* Clock blob */ +#define CAM_CRE_CMD_GENERIC_BLOB_CLK_V2 0x1 + +/** + * struct cam_cre_hw_ver - Device information for particular hw type + * + * This is used to get device version info of CRE + * from hardware and use this info in CAM_QUERY_CAP IOCTL + * + * @hw_ver: Major, minor and incr values of a device version + */ +struct cam_cre_hw_ver { + struct cam_hw_version hw_ver; +}; + +/** + * struct cam_cre_query_cap_cmd - CRE query device capability payload + * + * @dev_iommu_handle: CRE iommu handles for secure/non secure modes + * @num_dev: Number of cre + * @reserved: Reserved field + * @dev_ver: Returned device capability array + */ +struct cam_cre_query_cap_cmd { + struct cam_iommu_handle dev_iommu_handle; + __u32 num_dev; + __u32 reserved; + struct cam_cre_hw_ver dev_ver[CAM_CRE_HW_TYPE_MAX]; +}; + +/** + * struct cam_cre_io_buf_info - CRE IO buffers meta + * + * @direction: Direction of a buffer of a port(Input/Output) + * @res_id: Resource ID + * @num_planes: Number of planes + * @width: Height of a plane buffer + * @height: Height of a plane buffer + * @stride: Plane stride + * @format: unpacker format for FE, packer format for WE + * @alignment: Alignment + * @reserved: Reserved field 0 + */ +struct cam_cre_io_buf_info { + __u32 direction; + __u32 res_id; + __u32 num_planes; + __u32 width; + __u32 height; + __u32 stride; + __u32 fence; + __u32 format; + __u32 alignment; + __u32 reserved; +}; + +/** + * struct cam_cre_acquire_dev_info - An CRE device info + * + * @dev_type: NRT/RT Acquire + * @dev_name: Device name (CRE) + * @secure_mode: Tells if CRE will process the secure buff or not. + * @batch_size: Batch size + * @num_in_res: Number of In resources + * @num_out_res: Number of Out resources + * @reserved: Reserved field 0 + * @in_res: In resource info + * @in_res: Out resource info + */ +struct cam_cre_acquire_dev_info { + char dev_name[CAM_CRE_DEV_NAME_SIZE]; + __u32 dev_type; + __u32 secure_mode; + __u32 batch_size; + __u32 num_in_res; + __u32 num_out_res; + __u32 reserved; + struct cam_cre_io_buf_info in_res[CAM_CRE_INPUT_IMAGES_MAX]; + struct cam_cre_io_buf_info out_res[CAM_CRE_OUTPUT_IMAGES_MAX]; +}__attribute__((__packed__)); + +/** + * struct cre_clk_bw_request_v2 + * + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: Reserved field 0 + * @num_path: Number of AXI Paths + */ +struct cre_clk_bw_request_v2 { + __u64 budget_ns; + __u32 frame_cycles; + __u32 rt_flag; + __u32 reserved; + __u32 num_paths; + union { + struct cam_axi_per_path_bw_vote axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote, axi_path_flex); + }; +}; +#endif /* __UAPI_CAM_CRE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_custom.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_custom.h new file mode 100644 index 0000000000..306ce5a9dd --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_custom.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_CUSTOM_H__ +#define __UAPI_CAM_CUSTOM_H__ + +#include + +/* Custom driver name */ +#define CAM_CUSTOM_DEV_NAME "cam-custom" + +#define CAM_CUSTOM_NUM_SUB_DEVICES 2 + +/* HW type */ +#define CAM_CUSTOM_HW1 0 +#define CAM_CUSTOM_HW2 1 + +/* output path resource id's */ +#define CAM_CUSTOM_OUT_RES_UDI_0 1 +#define CAM_CUSTOM_OUT_RES_UDI_1 2 +#define CAM_CUSTOM_OUT_RES_UDI_2 3 + +/* input resource for custom hw */ +#define CAM_CUSTOM_IN_RES_UDI_0 1 + +/* Resource ID */ +#define CAM_CUSTOM_RES_ID_PORT 0 + +/* Packet opcode for Custom */ +#define CAM_CUSTOM_PACKET_OP_BASE 0 +#define CAM_CUSTOM_PACKET_INIT_DEV 1 +#define CAM_CUSTOM_PACKET_UPDATE_DEV 2 +#define CAM_CUSTOM_PACKET_OP_MAX 3 + +/* max number of vc-dt cfg for a given input */ +#define CAM_CUSTOM_VC_DT_CFG_MAX 4 + +/* phy input resource types */ +#define CAM_CUSTOM_IN_RES_BASE 0x5000 +#define CAM_CUSTOM_IN_RES_PHY_0 (CAM_CUSTOM_IN_RES_BASE + 1) +#define CAM_CUSTOM_IN_RES_PHY_1 (CAM_CUSTOM_IN_RES_BASE + 2) +#define CAM_CUSTOM_IN_RES_PHY_2 (CAM_CUSTOM_IN_RES_BASE + 3) +#define CAM_CUSTOM_IN_RES_PHY_3 (CAM_CUSTOM_IN_RES_BASE + 4) + +/* Query devices */ +/** + * struct cam_custom_dev_cap_info - A cap info for particular hw type + * + * @hw_type: Custom HW type + * @hw_version: Hardware version + * + */ +struct cam_custom_dev_cap_info { + __u32 hw_type; + __u32 hw_version; +}; + +/** + * struct cam_custom_query_cap_cmd - Custom HW query device capability payload + * + * @device_iommu: returned iommu handles for device + * @cdm_iommu: returned iommu handles for cdm + * @num_dev: returned number of device capabilities + * @reserved: reserved field for alignment + * @dev_caps: returned device capability array + * + */ +struct cam_custom_query_cap_cmd { + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + __s32 num_dev; + __u32 reserved; + struct cam_custom_dev_cap_info dev_caps[CAM_CUSTOM_NUM_SUB_DEVICES]; +}; + +/* Acquire Device */ +/** + * struct cam_custom_out_port_info - An output port resource info + * + * @res_type: output resource type + * @format: output format of the resource + * @custom_info 1-3: custom params + * @reserved: reserved field for alignment + * + */ +struct cam_custom_out_port_info { + __u32 res_type; + __u32 format; + __u32 custom_info1; + __u32 custom_info2; + __u32 custom_info3; + __u32 reserved; +}; + +/** + * struct cam_custom_in_port_info - An input port resource info + * + * @res_type: input resource type + * @lane_type: lane type: c-phy or d-phy. + * @lane_num: active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * @vc: input virtual channel number + * @dt: input data type number + * @num_valid_vc_dt: number of valid vc-dt + * @format: input format + * @test_pattern: test pattern for the testgen + * @usage_type: whether dual vfe is required + * @left_start: left input start offset in pixels + * @left_stop: left input stop offset in pixels + * @left_width: left input width in pixels + * @right_start: right input start offset in pixels. + * @right_stop: right input stop offset in pixels. + * @right_width: right input width in pixels. + * @line_start: top of the line number + * @line_stop: bottome of the line number + * @height: input height in lines + * @pixel_clk; sensor output clock + * @num_out_byte: number of valid output bytes per cycle + * @custom_info1: custom_info1 + * @custom_info2: custom info 2 + * @num_out_res: number of the output resource associated + * @data: payload that contains the output resources + * + */ +struct cam_custom_in_port_info { + __u32 res_type; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; + __u32 vc[CAM_CUSTOM_VC_DT_CFG_MAX]; + __u32 dt[CAM_CUSTOM_VC_DT_CFG_MAX]; + __u32 num_valid_vc_dt; + __u32 format; + __u32 test_pattern; + __u32 usage_type; + __u32 left_start; + __u32 left_stop; + __u32 left_width; + __u32 right_start; + __u32 right_stop; + __u32 right_width; + __u32 line_start; + __u32 line_stop; + __u32 height; + __u32 pixel_clk; + __u32 num_bytes_out; + __u32 custom_info1; + __u32 custom_info2; + __u32 num_out_res; + union { + struct cam_custom_out_port_info data[1]; + __DECLARE_FLEX_ARRAY(struct cam_custom_out_port_info, data_flex); + }; +}; + +/** + * struct cam_custom_resource - A resource bundle + * + * @resoruce_id: resource id for the resource bundle + * @length: length of the while resource blob + * @handle_type: type of the resource handle + * @reserved: reserved field for alignment + * @res_hdl: resource handle that points to the + * resource array; + */ +struct cam_custom_resource { + __u32 resource_id; + __u32 length; + __u32 handle_type; + __u32 reserved; + __u64 res_hdl; +}; + +/** + * struct cam_custom_acquire_hw_info - Custom acquire HW params + * + * @num_inputs : Number of inputs + * @input_info_size : Size of input info struct used + * @input_info_offset : Offset of input info from start of data + * @reserved : reserved + * @data : Start of data region + */ +struct cam_custom_acquire_hw_info { + __u32 num_inputs; + __u32 input_info_size; + __u32 input_info_offset; + __u32 reserved; + __u64 data; +}; + +/** + * struct cam_custom_cmd_buf_type_1 - cmd buf type 1 + * + * @custom_info: custom info + * @reserved: reserved + */ +struct cam_custom_cmd_buf_type_1 { + __u32 custom_info; + __u32 reserved; +}; + +/** + * struct cam_custom_cmd_buf_type_2 - cmd buf type 2 + * + * @custom_info1: Custom info 1 + * @custom_info2: Custom info 2 + * @custom_info3: Custom info 3 + * @reserved: reserved + */ +struct cam_custom_cmd_buf_type_2 { + __u32 custom_info1; + __u32 custom_info2; + __u32 custom_info3; + __u32 reserved; +}; +#endif /* __UAPI_CAM_CUSTOM_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_defs.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_defs.h new file mode 100644 index 0000000000..cdae033f00 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_defs.h @@ -0,0 +1,1161 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_DEFS_H__ +#define __UAPI_CAM_DEFS_H__ + +#include +#include +#include + + +/* camera op codes */ +#define CAM_COMMON_OPCODE_BASE 0x100 +#define CAM_QUERY_CAP (CAM_COMMON_OPCODE_BASE + 0x1) +#define CAM_ACQUIRE_DEV (CAM_COMMON_OPCODE_BASE + 0x2) +#define CAM_START_DEV (CAM_COMMON_OPCODE_BASE + 0x3) +#define CAM_STOP_DEV (CAM_COMMON_OPCODE_BASE + 0x4) +#define CAM_CONFIG_DEV (CAM_COMMON_OPCODE_BASE + 0x5) +#define CAM_RELEASE_DEV (CAM_COMMON_OPCODE_BASE + 0x6) +#define CAM_SD_SHUTDOWN (CAM_COMMON_OPCODE_BASE + 0x7) +#define CAM_FLUSH_REQ (CAM_COMMON_OPCODE_BASE + 0x8) +#define CAM_QUERY_CAP_V2 (CAM_COMMON_OPCODE_BASE + 0x9) +#define CAM_COMMON_OPCODE_MAX (CAM_COMMON_OPCODE_BASE + 0xa) + +#define CAM_COMMON_OPCODE_BASE_v2 0x150 +#define CAM_ACQUIRE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x1) +#define CAM_RELEASE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x2) +#define CAM_DUMP_REQ (CAM_COMMON_OPCODE_BASE_v2 + 0x3) +#define CAM_QUERY_CAP_V3 (CAM_COMMON_OPCODE_BASE_v2 + 0x4) +#define CAM_SYNX_TEST_TRIGGER (CAM_COMMON_OPCODE_BASE_v2 + 0x5) +#define CAM_CUSTOM_DEV_CONFIG (CAM_COMMON_OPCODE_BASE_v2 + 0x6) +#define CAM_QUERY_CAP_GENERIC_BLOB (CAM_COMMON_OPCODE_BASE_v2 + 0x7) +#define CAM_ACQUIRE_DEV_V2 (CAM_COMMON_OPCODE_BASE_v2 + 0x8) +#define CAM_QUERY_HW_DEV_INFO (CAM_COMMON_OPCODE_BASE_v2 + 0x9) + +#define CAM_EXT_OPCODE_BASE 0x200 +#define CAM_CONFIG_DEV_EXTERNAL (CAM_EXT_OPCODE_BASE + 0x1) + +/* camera handle type */ +#define CAM_HANDLE_USER_POINTER 1 +#define CAM_HANDLE_MEM_HANDLE 2 + +/* Generic Blob CmdBuffer header properties */ +#define CAM_GENERIC_BLOB_CMDBUFFER_SIZE_MASK 0xFFFFFF00 +#define CAM_GENERIC_BLOB_CMDBUFFER_SIZE_SHIFT 8 +#define CAM_GENERIC_BLOB_CMDBUFFER_TYPE_MASK 0xFF +#define CAM_GENERIC_BLOB_CMDBUFFER_TYPE_SHIFT 0 + +/* Command Buffer Types */ +#define CAM_CMD_BUF_DMI 0x1 +#define CAM_CMD_BUF_DMI16 0x2 +#define CAM_CMD_BUF_DMI32 0x3 +#define CAM_CMD_BUF_DMI64 0x4 +#define CAM_CMD_BUF_DIRECT 0x5 +#define CAM_CMD_BUF_INDIRECT 0x6 +#define CAM_CMD_BUF_I2C 0x7 +#define CAM_CMD_BUF_FW 0x8 +#define CAM_CMD_BUF_GENERIC 0x9 +#define CAM_CMD_BUF_LEGACY 0xA + +/* UBWC API Version */ +#define CAM_UBWC_CFG_VERSION_1 1 +#define CAM_UBWC_CFG_VERSION_2 2 + +#define CAM_MAX_ACQ_RES 5 +#define CAM_MAX_HW_SPLIT 3 + +/* camera blob handle op codes */ +#define CAM_COMMON_QUERY_BLOB_BASE 0x0 +#define CAM_COMMON_QUERY_BLOB_END 0x32 + +/** + * enum flush_type_t - Identifies the various flush types + * + * @CAM_FLUSH_TYPE_REQ: Flush specific request + * @CAM_FLUSH_TYPE_ALL: Flush all requests belonging to a context + * @CAM_FLUSH_TYPE_MAX: Max enum to validate flush type + * + */ +enum flush_type_t { + CAM_FLUSH_TYPE_REQ, + CAM_FLUSH_TYPE_ALL, + CAM_FLUSH_TYPE_MAX +}; + +/** + * struct cam_control - Structure used by ioctl control for camera + * + * @op_code: This is the op code for camera control + * @size: Control command size + * @handle_type: User pointer or shared memory handle + * @reserved: Reserved field for 64 bit alignment + * @handle: Control command payload + */ +struct cam_control { + __u32 op_code; + __u32 size; + __u32 handle_type; + __u32 reserved; + __u64 handle; +}; + +/** + * struct cam_custom_cmd - Structure used by ioctl control for camera + * devices to send custom commands. + * + * @cmd_type: Command type + * @size: Size of the data + * @handle: Pointer to the command data + */ +struct cam_custom_cmd { + __u32 cmd_type; + __u32 size; + __u64 handle; +}; + +/* camera IOCTL */ +#define VIDIOC_CAM_CONTROL \ + _IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_control) + +/** + * struct cam_hw_version - Structure for HW version of camera devices + * + * @major : Hardware version major + * @minor : Hardware version minor + * @incr : Hardware version increment + * @reserved : Reserved for 64 bit aligngment + */ +struct cam_hw_version { + __u32 major; + __u32 minor; + __u32 incr; + __u32 reserved; +}; + +/** + * struct cam_iommu_handle - Structure for IOMMU handles of camera hw devices + * + * @non_secure: Device Non Secure IOMMU handle + * @secure: Device Secure IOMMU handle + * + */ +struct cam_iommu_handle { + __s32 non_secure; + __s32 secure; +}; + +/* camera secure mode */ +#define CAM_SECURE_MODE_NON_SECURE 0 +#define CAM_SECURE_MODE_SECURE 1 + +/* Camera Format Type */ +#define CAM_FORMAT_BASE 0 +#define CAM_FORMAT_MIPI_RAW_6 1 +#define CAM_FORMAT_MIPI_RAW_8 2 +#define CAM_FORMAT_MIPI_RAW_10 3 +#define CAM_FORMAT_MIPI_RAW_12 4 +#define CAM_FORMAT_MIPI_RAW_14 5 +#define CAM_FORMAT_MIPI_RAW_16 6 +#define CAM_FORMAT_MIPI_RAW_20 7 +#define CAM_FORMAT_QTI_RAW_8 8 +#define CAM_FORMAT_QTI_RAW_10 9 +#define CAM_FORMAT_QTI_RAW_12 10 +#define CAM_FORMAT_QTI_RAW_14 11 +#define CAM_FORMAT_PLAIN8 12 +#define CAM_FORMAT_PLAIN16_8 13 +#define CAM_FORMAT_PLAIN16_10 14 +#define CAM_FORMAT_PLAIN16_12 15 +#define CAM_FORMAT_PLAIN16_14 16 +#define CAM_FORMAT_PLAIN16_16 17 +#define CAM_FORMAT_PLAIN32_20 18 +#define CAM_FORMAT_PLAIN64 19 +#define CAM_FORMAT_PLAIN128 20 +#define CAM_FORMAT_ARGB 21 +#define CAM_FORMAT_ARGB_10 22 +#define CAM_FORMAT_ARGB_12 23 +#define CAM_FORMAT_ARGB_14 24 +#define CAM_FORMAT_DPCM_10_6_10 25 +#define CAM_FORMAT_DPCM_10_8_10 26 +#define CAM_FORMAT_DPCM_12_6_12 27 +#define CAM_FORMAT_DPCM_12_8_12 28 +#define CAM_FORMAT_DPCM_14_8_14 29 +#define CAM_FORMAT_DPCM_14_10_14 30 +#define CAM_FORMAT_NV21 31 +#define CAM_FORMAT_NV12 32 +#define CAM_FORMAT_TP10 33 +#define CAM_FORMAT_YUV422 34 +#define CAM_FORMAT_PD8 35 +#define CAM_FORMAT_PD10 36 +#define CAM_FORMAT_UBWC_NV12 37 +#define CAM_FORMAT_UBWC_NV12_4R 38 +#define CAM_FORMAT_UBWC_TP10 39 +#define CAM_FORMAT_UBWC_P010 40 +#define CAM_FORMAT_PLAIN8_SWAP 41 +#define CAM_FORMAT_PLAIN8_10 42 +#define CAM_FORMAT_PLAIN8_10_SWAP 43 +#define CAM_FORMAT_YV12 44 +#define CAM_FORMAT_Y_ONLY 45 +#define CAM_FORMAT_DPCM_12_10_12 46 +#define CAM_FORMAT_PLAIN32 47 +#define CAM_FORMAT_ARGB_16 48 +#define CAM_FORMAT_PLAIN16_10_LSB 49 +#define CAM_FORMAT_YUV422_10 50 +#define CAM_FORMAT_GBR_UBWC_TP10 51 +#define CAM_FORMAT_GBR_TP10 52 +#define CAM_FORMAT_UBWC_P016 53 +#define CAM_FORMAT_BAYER_UBWC_TP10 54 + +/* This macro is deprecated and no longer needed */ +#define CAM_FORMAT_MAX 51 + +/* Pixel Patterns */ +#define PIXEL_PATTERN_RGRGRG 0x0 +#define PIXEL_PATTERN_GRGRGR 0x1 +#define PIXEL_PATTERN_BGBGBG 0x2 +#define PIXEL_PATTERN_GBGBGB 0x3 +#define PIXEL_PATTERN_YCBYCR 0x4 +#define PIXEL_PATTERN_YCRYCB 0x5 +#define PIXEL_PATTERN_CBYCRY 0x6 +#define PIXEL_PATTERN_CRYCBY 0x7 + +/* camera rotaion */ +#define CAM_ROTATE_CW_0_DEGREE 0 +#define CAM_ROTATE_CW_90_DEGREE 1 +#define CAM_RORATE_CW_180_DEGREE 2 +#define CAM_ROTATE_CW_270_DEGREE 3 + +/* camera Color Space */ +#define CAM_COLOR_SPACE_BASE 0 +#define CAM_COLOR_SPACE_BT601_FULL 1 +#define CAM_COLOR_SPACE_BT601625 2 +#define CAM_COLOR_SPACE_BT601525 3 +#define CAM_COLOR_SPACE_BT709 4 +#define CAM_COLOR_SPACE_DEPTH 5 +#define CAM_COLOR_SPACE_MAX 6 + +/* camera buffer direction */ +#define CAM_BUF_INPUT 1 +#define CAM_BUF_OUTPUT 2 +#define CAM_BUF_IN_OUT 3 + +/* camera packet device Type */ +#define CAM_PACKET_DEV_BASE 0 +#define CAM_PACKET_DEV_IMG_SENSOR 1 +#define CAM_PACKET_DEV_ACTUATOR 2 +#define CAM_PACKET_DEV_COMPANION 3 +#define CAM_PACKET_DEV_EEPOM 4 +#define CAM_PACKET_DEV_CSIPHY 5 +#define CAM_PACKET_DEV_OIS 6 +#define CAM_PACKET_DEV_FLASH 7 +#define CAM_PACKET_DEV_FD 8 +#define CAM_PACKET_DEV_JPEG_ENC 9 +#define CAM_PACKET_DEV_JPEG_DEC 10 +#define CAM_PACKET_DEV_VFE 11 +#define CAM_PACKET_DEV_CPP 12 +#define CAM_PACKET_DEV_CSID 13 +#define CAM_PACKET_DEV_ISPIF 14 +#define CAM_PACKET_DEV_IFE 15 +#define CAM_PACKET_DEV_ICP 16 +#define CAM_PACKET_DEV_LRME 17 +#define CAM_PACKET_DEV_TFE 18 +#define CAM_PACKET_DEV_OPE 19 +#define CAM_PACKET_DEV_MAX 20 + +/* Register base type */ +#define CAM_REG_DUMP_BASE_TYPE_ISP_LEFT 1 +#define CAM_REG_DUMP_BASE_TYPE_ISP_RIGHT 2 +#define CAM_REG_DUMP_BASE_TYPE_CAMNOC 3 +#define CAM_REG_DUMP_BASE_TYPE_CSID_WRAPPER 4 +#define CAM_REG_DUMP_BASE_TYPE_CSID_LEFT 5 +#define CAM_REG_DUMP_BASE_TYPE_CSID_RIGHT 6 +#define CAM_REG_DUMP_BASE_TYPE_SFE_LEFT 7 +#define CAM_REG_DUMP_BASE_TYPE_SFE_RIGHT 8 + +/* Register dump read type */ +#define CAM_REG_DUMP_READ_TYPE_CONT_RANGE 1 +#define CAM_REG_DUMP_READ_TYPE_DMI 2 +#define CAM_REG_DUMP_READ_TYPE_CTXT 3 + +/* Max number of config writes to read from DMI */ +#define CAM_REG_DUMP_DMI_CONFIG_MAX 5 + + +/* constants */ +#define CAM_PACKET_MAX_PLANES 3 + +/* synx test cmd types */ +#define CAM_SYNX_TEST_CMD_TYPE_CORE_CTRL 1 +#define CAM_SYNX_TEST_CMD_TYPE_SYNX_CMD 2 + +/** + * struct cam_plane_cfg - Plane configuration info + * + * @width: Plane width in pixels + * @height: Plane height in lines + * @plane_stride: Plane stride in pixel + * @slice_height: Slice height in line (not used by ISP) + * @meta_stride: UBWC metadata stride + * @meta_size: UBWC metadata plane size + * @meta_offset: UBWC metadata offset + * @packer_config: UBWC packer config + * @mode_config: UBWC mode config + * @tile_config: UBWC tile config + * @h_init: UBWC horizontal initial coordinate in pixels + * @v_init: UBWC vertical initial coordinate in lines + * + */ +struct cam_plane_cfg { + __u32 width; + __u32 height; + __u32 plane_stride; + __u32 slice_height; + __u32 meta_stride; + __u32 meta_size; + __u32 meta_offset; + __u32 packer_config; + __u32 mode_config; + __u32 tile_config; + __u32 h_init; + __u32 v_init; +}; + +/** + * struct cam_ubwc_plane_cfg_v1 - UBWC Plane configuration info + * + * @port_type: Port Type + * @meta_stride: UBWC metadata stride + * @meta_size: UBWC metadata plane size + * @meta_offset: UBWC metadata offset + * @packer_config: UBWC packer config + * @mode_config_0: UBWC mode config 0 + * @mode_config_1: UBWC 3 mode config 1 + * @tile_config: UBWC tile config + * @h_init: UBWC horizontal initial coordinate in pixels + * @v_init: UBWC vertical initial coordinate in lines + * + */ +struct cam_ubwc_plane_cfg_v1 { + __u32 port_type; + __u32 meta_stride; + __u32 meta_size; + __u32 meta_offset; + __u32 packer_config; + __u32 mode_config_0; + __u32 mode_config_1; + __u32 tile_config; + __u32 h_init; + __u32 v_init; +}; + +/** + * struct cam_ubwc_plane_cfg_v2 - UBWC Plane configuration info + * + * @port_type: Port Type + * @meta_stride: UBWC metadata stride + * @meta_size: UBWC metadata plane size + * @meta_offset: UBWC metadata offset + * @packer_config: UBWC packer config + * @mode_config: UBWC mode config + * @static ctrl: UBWC static ctrl + * @ctrl_2: UBWC ctrl 2 + * @tile_config: UBWC tile config + * @h_init: UBWC horizontal initial coordinate in pixels + * @v_init: UBWC vertical initial coordinate in lines + * @stats_ctrl_2: UBWC stats control + * @lossy_threshold0 UBWC lossy threshold 0 + * @lossy_threshold1 UBWC lossy threshold 1 + * @lossy_var_offset UBWC offset variance thrshold + * @bandwidth_limit: BW counter limit + * BW limiter config skipped if value is 0xFFFF or more + * If skipped here, use generic BW limiter blob to + * configure the appropriate value. + * + */ +struct cam_ubwc_plane_cfg_v2 { + __u32 port_type; + __u32 meta_stride; + __u32 meta_size; + __u32 meta_offset; + __u32 packer_config; + __u32 mode_config_0; + __u32 mode_config_1; + __u32 tile_config; + __u32 h_init; + __u32 v_init; + __u32 static_ctrl; + __u32 ctrl_2; + __u32 stats_ctrl_2; + __u32 lossy_threshold_0; + __u32 lossy_threshold_1; + __u32 lossy_var_offset; + __u32 bandwidth_limit; + __u32 reserved[3]; +}; + +/** + * struct cam_ubwc_plane_cfg_v3 - UBWC Plane configuration info + * + * @port_type: Port Type + * @meta_stride: UBWC metadata stride + * @meta_size: UBWC metadata plane size + * @meta_offset: UBWC metadata offset + * @packer_config: UBWC packer config + * @mode_config: UBWC mode config + * @static ctrl: UBWC static ctrl + * @ctrl_2: UBWC ctrl 2 + * @tile_config: UBWC tile config + * @h_init: UBWC horizontal initial coordinate in pixels + * @v_init: UBWC vertical initial coordinate in lines + * @stats_ctrl_2: UBWC stats control + * @lossy_threshold0 UBWC lossy threshold 0 + * @lossy_threshold1 UBWC lossy threshold 1 + * @lossy_var_offset UBWC offset variance thrshold + * @bandwidth_limit: BW counter limit + * BW limiter config skipped if value is 0xFFFF or more + * If skipped here, use generic BW limiter blob to + * configure the appropriate value. + * @hw_ctx_id_mask: hw context id mask in case of multi context + * definitions for valid values defined in cam_isp.h + * @num_valid_params: Number of valid params, to accommodate future changes + * @param_mask: Indicate params supported, to accommodate future changes + * @params: Indicate params supported, to accommodate future changes + */ +struct cam_ubwc_plane_cfg_v3 { + __u32 port_type; + __u32 meta_stride; + __u32 meta_size; + __u32 meta_offset; + __u32 packer_config; + __u32 mode_config_0; + __u32 mode_config_1; + __u32 tile_config; + __u32 h_init; + __u32 v_init; + __u32 static_ctrl; + __u32 ctrl_2; + __u32 stats_ctrl_2; + __u32 lossy_threshold_0; + __u32 lossy_threshold_1; + __u32 lossy_var_offset; + __u32 bandwidth_limit; + __u32 hw_ctx_id_mask; + __u32 num_valid_params; + __u32 param_mask; + __u32 params[6]; + +}; + +/** + * struct cam_cmd_buf_desc - Command buffer descriptor + * + * @mem_handle: Command buffer handle + * @offset: Command start offset + * @size: Size of the command buffer in bytes + * @length: Used memory in command buffer in bytes + * @type: Type of the command buffer + * @meta_data: Data type for private command buffer + * Between UMD and KMD + * + */ +struct cam_cmd_buf_desc { + __s32 mem_handle; + __u32 offset; + __u32 size; + __u32 length; + __u32 type; + __u32 meta_data; +}; + +/** + * struct cam_buf_io_cfg - Buffer io configuration for buffers + * + * @mem_handle: Mem_handle array for the buffers. + * @offsets: Offsets for each planes in the buffer + * @planes: Per plane information + * @width: Main plane width in pixel + * @height: Main plane height in lines + * @format: Format of the buffer + * @color_space: Color space for the buffer + * @color_pattern: Color pattern in the buffer + * @bpp: Bit per pixel + * @rotation: Rotation information for the buffer + * @resource_type: Resource type associated with the buffer + * @fence: Fence handle + * @early_fence: Fence handle for early signal + * @aux_cmd_buf: An auxiliary command buffer that may be + * used for programming the IO + * @direction: Direction of the config + * @batch_size: Batch size in HFR mode + * @subsample_pattern: Subsample pattern. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @subsample_period: Subsample period. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @framedrop_pattern: Framedrop pattern + * @framedrop_period: Framedrop period + * @flag: Flags for extra information + * for acquired version 3--> corresponds to context_id + * @direction: Buffer direction: input or output + * @padding: Padding for the structure + * + */ +struct cam_buf_io_cfg { + __s32 mem_handle[CAM_PACKET_MAX_PLANES]; + __u32 offsets[CAM_PACKET_MAX_PLANES]; + struct cam_plane_cfg planes[CAM_PACKET_MAX_PLANES]; + __u32 format; + __u32 color_space; + __u32 color_pattern; + __u32 bpp; + __u32 rotation; + __u32 resource_type; + __s32 fence; + __s32 early_fence; + struct cam_cmd_buf_desc aux_cmd_buf; + __u32 direction; + __u32 batch_size; + __u32 subsample_pattern; + __u32 subsample_period; + __u32 framedrop_pattern; + __u32 framedrop_period; + __u32 flag; + __u32 padding; +}; + +/** + * struct cam_packet_header - Camera packet header + * + * @op_code: Camera packet opcode + * @size: Size of the camera packet in bytes + * @request_id: Request id for this camera packet + * @flags: Flags for the camera packet + * @padding: Padding + * + */ +struct cam_packet_header { + __u32 op_code; + __u32 size; + __u64 request_id; + __u32 flags; + __u32 padding; +}; + +/** + * struct cam_patch_desc - Patch structure + * + * @dst_buf_hdl: Memory handle for the dest buffer + * @dst_offset: Offset byte in the dest buffer + * @src_buf_hdl: Memory handle for the source buffer + * @src_offset: Offset byte in the source buffer + * + */ +struct cam_patch_desc { + __s32 dst_buf_hdl; + __u32 dst_offset; + __s32 src_buf_hdl; + __u32 src_offset; +}; + +/** + * struct cam_packet - Camera packet structure + * + * @header: Camera packet header + * @cmd_buf_offset: Command buffer start offset + * @num_cmd_buf: Number of the command buffer in the packet + * @io_config_offset: Buffer io configuration start offset + * @num_io_configs: Number of the buffer io configurations + * @patch_offset: Patch offset for the patch structure + * @num_patches: Number of the patch structure + * @kmd_cmd_buf_index: Command buffer index which contains extra + * space for the KMD buffer + * @kmd_cmd_buf_offset: Offset from the beginning of the command + * buffer for KMD usage. + * @payload: Camera packet payload + * + */ +struct cam_packet { + struct cam_packet_header header; + __u32 cmd_buf_offset; + __u32 num_cmd_buf; + __u32 io_configs_offset; + __u32 num_io_configs; + __u32 patch_offset; + __u32 num_patches; + __u32 kmd_cmd_buf_index; + __u32 kmd_cmd_buf_offset; + union { + __u64 payload[1]; + __DECLARE_FLEX_ARRAY(__u64, payload_flex); + }; + +}; + +/** + * struct cam_release_dev_cmd - Control payload for release devices + * + * @session_handle: Session handle for the release + * @dev_handle: Device handle for the release + */ +struct cam_release_dev_cmd { + __s32 session_handle; + __s32 dev_handle; +}; + +/** + * struct cam_start_stop_dev_cmd - Control payload for start/stop device + * + * @session_handle: Session handle for the start/stop command + * @dev_handle: Device handle for the start/stop command + * + */ +struct cam_start_stop_dev_cmd { + __s32 session_handle; + __s32 dev_handle; +}; + +/** + * struct cam_config_dev_cmd - Command payload for configure device + * + * @session_handle: Session handle for the command + * @dev_handle: Device handle for the command + * @offset: Offset byte in the packet handle. + * @packet_handle: Packet memory handle for the actual packet: + * struct cam_packet. + * + */ +struct cam_config_dev_cmd { + __s32 session_handle; + __s32 dev_handle; + __u64 offset; + __u64 packet_handle; +}; + +/** + * struct cam_query_cap_cmd - Payload for query device capability + * + * @size: Handle size + * @handle_type: User pointer or shared memory handle + * @caps_handle: Device specific query command payload + * + */ +struct cam_query_cap_cmd { + __u32 size; + __u32 handle_type; + __u64 caps_handle; +}; + +#define CAM_ACQUIRE_DEV_STRUCT_VERSION_1 1 +#define CAM_ACQUIRE_DEV_STRUCT_VERSION_2 2 + +/** + * struct cam_acquire_dev_cmd - Control payload for acquire devices + * + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Resource handle type: + * 1 = user pointer, 2 = mem handle + * @num_resources: Number of the resources to be acquired + * @resources_hdl: Resource handle that refers to the actual + * resource array. Each item in this + * array is device specific resource structure + * + */ +struct cam_acquire_dev_cmd { + __s32 session_handle; + __s32 dev_handle; + __u32 handle_type; + __u32 num_resources; + __u64 resource_hdl; +}; + +/** + * struct cam_acquire_dev_cmd_v2 - Control payload for acquire devices + * + * @struct_version: = CAM_ACQUIRE_DEV_STRUCT_VERSION_2 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Resource handle type: + * 1 = user pointer, 2 = mem handle + * @num_resources: Number of the resources to be acquired + * @resources_hdl: Resource handle that refers to the actual + * resource array. Each item in this + * array is device specific resource structure + * @num_valid_params: number of valid params + * @valid_param_mask: valid param mask + * @params: additional parameters for future usage + * + */ +struct cam_acquire_dev_cmd_v2 { + __u32 struct_version; + __s32 session_handle; + __s32 dev_handle; + __u32 handle_type; + __u32 num_resources; + __u64 resource_hdl; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; +}; + +/* + * In old version, while acquiring device the num_resources in + * struct cam_acquire_dev_cmd will be a valid value. During ACQUIRE_DEV + * KMD driver will return dev_handle as well as associate HW to handle. + * If num_resources is set to the constant below, we are using + * the new version and we do not acquire HW in ACQUIRE_DEV IOCTL. + * ACQUIRE_DEV will only return handle and we should receive + * ACQUIRE_HW IOCTL after ACQUIRE_DEV and that is when the HW + * is associated with the dev_handle. + * + * (Data type): __u32 + */ +#define CAM_API_COMPAT_CONSTANT 0xFEFEFEFE + +#define CAM_ACQUIRE_HW_STRUCT_VERSION_1 1 +#define CAM_ACQUIRE_HW_STRUCT_VERSION_2 2 + +/** + * struct cam_acquire_hw_cmd_v1 - Control payload for acquire HW IOCTL (Ver 1) + * + * @struct_version: = CAM_ACQUIRE_HW_STRUCT_VERSION_1 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @reserved: Reserved field for 64-bit alignment + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Tells you how to interpret the variable resource_hdl- + * 1 = user pointer, 2 = mem handle + * @data_size: Total size of data contained in memory pointed + * to by resource_hdl + * @resource_hdl: Resource handle that refers to the actual + * resource data. + */ +struct cam_acquire_hw_cmd_v1 { + __u32 struct_version; + __u32 reserved; + __s32 session_handle; + __s32 dev_handle; + __u32 handle_type; + __u32 data_size; + __u64 resource_hdl; +}; + +/** + * struct cam_acquired_hw_info - Update the acquired hardware info + * + * @acquired_hw_id: Acquired hardware mask + * @acquired_hw_path: Acquired path mask for an input + * if input splits into multiple paths, + * its updated per hardware + * valid_acquired_hw: Valid num of acquired hardware + */ +struct cam_acquired_hw_info { + __u32 acquired_hw_id[CAM_MAX_ACQ_RES]; + __u32 acquired_hw_path[CAM_MAX_ACQ_RES][CAM_MAX_HW_SPLIT]; + __u32 valid_acquired_hw; +}; + +/** + * struct cam_acquire_hw_cmd_v2 - Control payload for acquire HW IOCTL (Ver 2) + * + * @struct_version: = CAM_ACQUIRE_HW_STRUCT_VERSION_2 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @reserved: Reserved field for 64-bit alignment + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Tells you how to interpret the variable resource_hdl- + * 1 = user pointer, 2 = mem handle + * @data_size: Total size of data contained in memory pointed + * to by resource_hdl + * @resource_hdl: Resource handle that refers to the actual + * resource data. + */ +struct cam_acquire_hw_cmd_v2 { + __u32 struct_version; + __u32 reserved; + __s32 session_handle; + __s32 dev_handle; + __u32 handle_type; + __u32 data_size; + __u64 resource_hdl; + struct cam_acquired_hw_info hw_info; +}; + +#define CAM_RELEASE_HW_STRUCT_VERSION_1 1 + +/** + * struct cam_release_hw_cmd_v1 - Control payload for release HW IOCTL (Ver 1) + * + * @struct_version: = CAM_RELEASE_HW_STRUCT_VERSION_1 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @reserved: Reserved field for 64-bit alignment + * @session_handle: Session handle for the release + * @dev_handle: Device handle for the release + */ +struct cam_release_hw_cmd_v1 { + __u32 struct_version; + __u32 reserved; + __s32 session_handle; + __s32 dev_handle; +}; + +/** + * struct cam_flush_dev_cmd - Control payload for flush devices + * + * @version: Version + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @flush_type: Flush type: + * 0 = flush specific request + * 1 = flush all + * @reserved: Reserved for 64 bit aligngment + * @req_id: Request id that needs to cancel + * + */ +struct cam_flush_dev_cmd { + __u64 version; + __s32 session_handle; + __s32 dev_handle; + __u32 flush_type; + __u32 reserved; + __s64 req_id; +}; + +/** + * struct cam_ubwc_config - UBWC Configuration Payload + * + * @api_version: UBWC config api version + * @num_ports: Number of ports to be configured + * @ubwc_plane_config: Array of UBWC configurations per port + * Size [CAM_PACKET_MAX_PLANES - 1] per port + * as UBWC is supported on Y & C planes + * and therefore a max size of 2 planes + * + */ +struct cam_ubwc_config { + __u32 api_version; + __u32 num_ports; + union { + struct cam_ubwc_plane_cfg_v1 + ubwc_plane_cfg[1][CAM_PACKET_MAX_PLANES - 1]; + struct { + struct { } __empty_ubwc_plane_cfg_array_flex; + struct cam_ubwc_plane_cfg_v1 + ubwc_plane_cfg_array_flex[][CAM_PACKET_MAX_PLANES - 1]; + }; + }; +}; + +/** + * struct cam_ubwc_config_v2 - UBWC Configuration Payload + * + * @api_version: UBWC config api version + * @num_ports: Number of ports to be configured + * @ubwc_plane_config: Array of UBWC configurations per port + * Size [CAM_PACKET_MAX_PLANES - 1] per port + * as UBWC is supported on Y & C planes + * and therefore a max size of 2 planes + * + */ +struct cam_ubwc_config_v2 { + __u32 api_version; + __u32 num_ports; + union { + struct cam_ubwc_plane_cfg_v2 + ubwc_plane_cfg[1][CAM_PACKET_MAX_PLANES - 1]; + struct { + struct { } __empty_ubwc_plane_cfg_array_flex; + struct cam_ubwc_plane_cfg_v2 + ubwc_plane_cfg_array_flex[][CAM_PACKET_MAX_PLANES - 1]; + }; + }; +}; + +/** + * struct cam_ubwc_config_v3 - UBWC Configuration Payload + * + * @api_version: UBWC config api version + * @num_ports: Number of ports to be configured, in case of MC, + * can have multiple entries for same out port + * @ubwc_plane_config: Array of UBWC configurations per port + * Size [CAM_PACKET_MAX_PLANES - 1] per port + * as UBWC is supported on Y & C planes + * and therefore a max size of 2 planes + * + */ +struct cam_ubwc_config_v3 { + __u32 api_version; + __u32 num_ports; + union { + struct cam_ubwc_plane_cfg_v3 ubwc_plane_cfg[1][CAM_PACKET_MAX_PLANES - 1]; + struct { + struct { } __empty_ubwc_plane_cfg_array_flex; + struct cam_ubwc_plane_cfg_v3 + ubwc_plane_cfg_array_flex[][CAM_PACKET_MAX_PLANES - 1]; + }; + }; +}; + +/** + * struct cam_cmd_mem_region_info - + * Cmd buffer region info + * + * @mem_handle : Memory handle of the region + * @offset : Offset if any + * @size : Size of the region + * @flags : Flags if any + */ +struct cam_cmd_mem_region_info { + __s32 mem_handle; + __u32 offset; + __u32 size; + __u32 flags; +}; + +/** + * struct cam_cmd_mem_regions - + * List of multiple memory descriptors of + * of different regions + * + * @version : Version number + * @num_regions : Number of regions + * @map_info_array : Array of all the regions + */ +struct cam_cmd_mem_regions { + __u32 version; + __u32 num_regions; + union { + struct cam_cmd_mem_region_info map_info_array[1]; + __DECLARE_FLEX_ARRAY(struct cam_cmd_mem_region_info, map_info_array_flex); + }; +}; + +/** + * struct cam_reg_write_desc - Register write descriptor + * + * @offset : Register offset at which 'value' needs to written + * @value : Register value to write + */ +struct cam_reg_write_desc { + __u32 offset; + __u32 value; +}; + +/** + * struct cam_reg_range_read_desc - Descriptor to provide read info + * + * @offset : Register offset address to start with + * @num_values : Number of values to read + */ +struct cam_reg_range_read_desc { + __u32 offset; + __u32 num_values; +}; + +/** + * struct cam_dmi_read_desc - Descriptor to provide DMI read info + * + * @num_pre_writes : Number of registers to write before reading DMI data + * @num_post_writes : Number of registers to write after reading DMI data + * @pre_read_config : Registers to write before reading DMI data + * @dmi_data_read : DMI Register, number of values to read to dump + * DMI data + * @post_read_config : Registers to write after reading DMI data + */ +struct cam_dmi_read_desc { + __u32 num_pre_writes; + __u32 num_post_writes; + struct cam_reg_write_desc pre_read_config[ + CAM_REG_DUMP_DMI_CONFIG_MAX]; + struct cam_reg_range_read_desc dmi_data_read; + struct cam_reg_write_desc post_read_config[ + CAM_REG_DUMP_DMI_CONFIG_MAX]; +}; + +/** + * struct cam_reg_read_info - Register read info for continuous read + * or DMI read or Context based reg read + * + * @type : Whether Register range read or DMI read or Context based reg read + * For context-based reading, we have to select context ID for reading + * that particular context Registers. + * if the type will be TYPE_CTX, we will write the context id and + * read the context registers. it will be similar to dmi read. + * @reserved : For acquired version 3, this corresponds to context_id + * @reg_read : Range of registers to read + * @dmi_read : DMI data to read + */ +struct cam_reg_read_info { + __u32 type; + __u32 reserved; + union { + struct cam_reg_range_read_desc reg_read; + struct cam_dmi_read_desc dmi_read; + }; +}; + +/** + * struct cam_reg_dump_out_buffer -Buffer info for dump data to be provided + * + * @req_id : Request ID corresponding to reg dump data + * @bytes_written : Number of bytes written + * @dump_data : Register dump data + */ +struct cam_reg_dump_out_buffer { + __u64 req_id; + __u32 bytes_written; + union { + __u32 dump_data[1]; + __DECLARE_FLEX_ARRAY(__u32, dump_data_flex); + }; +}; + +/** + * struct cam_reg_dump_desc - Descriptor to provide dump info + * + * @reg_base_type : Register base type, e.g. ISP_LEFT, ISP_RIGHT, CAMNOC + * @dump_buffer_offset : Offset from base of mem_handle at which Register dump + * will be written for this set + * @dump_buffer_size : Available size in bytes for writing dump values + * @num_read_range : Number register range reads (Continuous + DMI) + * @read_range : Read range info + */ +struct cam_reg_dump_desc { + __u32 reg_base_type; + __u32 dump_buffer_offset; + __u32 dump_buffer_size; + __u32 num_read_range; + union { + struct cam_reg_read_info read_range[1]; + __DECLARE_FLEX_ARRAY(struct cam_reg_read_info, read_range_flex); + }; +}; + +/** + * struct cam_reg_dump_input_info - Info about required dump sets + * + * @num_dump_sets : Number of different dump sets (base types) given + * @dump_set_offsets : Points to the given dump description structures + * (cam_reg_dump_desc) + */ +struct cam_reg_dump_input_info { + __u32 num_dump_sets; + union { + __u32 dump_set_offsets[1]; + __DECLARE_FLEX_ARRAY(__u32, dump_set_offsets_flex); + }; +}; + +/** + * struct cam_dump_req_cmd - + * Dump the information of issue req id + * + * @issue_req_id : Issue Request Id + * @offset : Offset for the buffer + * @buf_handle : Buffer Handle + * @error_type : Error type, using it, dumping information can be extended + * @session_handle : Session Handle + * @link_hdl : link handle + * @dev_handle : Device Handle + */ +struct cam_dump_req_cmd { + __u64 issue_req_id; + __kernel_size_t offset; + __u32 buf_handle; + __u32 error_type; + __s32 session_handle; + __s32 link_hdl; + __s32 dev_handle; +}; + +/** + * struct cam_synx_test_cmd - Synx test cmds + * + * @version: Struct version + * @ip_mem_hdl: Input buf mem handle + * corresponds to synx test inputs to the + * fencing core + * @op_mem_hdl: Output buf mem handle + * corresponds to synx output generated by + * the fencing core + * @num_valid_params: Num valid params + * @valid_param_mask: Valid param mask + * @params: additional params + */ +struct cam_synx_test_cmd { + __u32 version; + __s32 ip_mem_hdl; + __s32 op_mem_hdl; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; +}; + +/** + * struct cam_synx_core_control - Synx core ctrl + * + * @version: Struct version + * @core_control: Set for resume, unset for collapse + * @num_valid_params: Num valid params + * @valid_param_mask: Valid param mask + * @params: additional params + */ +struct cam_synx_core_control { + __u32 version; + __u32 core_control; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; +}; + + +/** + * struct cam_synx_test_params - Synx test params + * + * A test sequence could include creating and signaling + * synx handle between ICP <-> APPs. These test params + * would be cmds such as session initialize, synx create, + * synx async wait, synx signal and so on + * + * @version: Struct version + * @cmd_type: Type of test cmd - core control/synx cmd/... + * @num_valid_params: Num valid params + * @valid_param_mask: Valid param mask + * @params: additional params + * @test_cmd: Synx test cmd forwarded to the core + * @core_ctrl: Synx test cmd to control fencing core + */ +struct cam_synx_test_params { + __u32 version; + __u32 cmd_type; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; + union { + struct cam_synx_test_cmd test_cmd; + struct cam_synx_core_control core_ctrl; + } u; +} __attribute__((__packed__)); + +#endif /* __UAPI_CAM_DEFS_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_fd.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_fd.h new file mode 100644 index 0000000000..9140b17366 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_fd.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_CAM_FD_H__ +#define __UAPI_CAM_FD_H__ + +#include + +#define CAM_FD_MAX_FACES 35 +#define CAM_FD_RAW_RESULT_ENTRIES 512 + +/* FD Op Codes */ +#define CAM_PACKET_OPCODES_FD_FRAME_UPDATE 0x0 + +/* FD Command Buffer identifiers */ +#define CAM_FD_CMD_BUFFER_ID_GENERIC 0x0 +#define CAM_FD_CMD_BUFFER_ID_CDM 0x1 +#define CAM_FD_CMD_BUFFER_ID_MAX 0x2 + +/* FD Blob types */ +#define CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST 0x0 +#define CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED 0x1 + +/* FD Resource IDs */ +#define CAM_FD_INPUT_PORT_ID_IMAGE 0x0 +#define CAM_FD_INPUT_PORT_ID_MAX 0x1 + +#define CAM_FD_OUTPUT_PORT_ID_RESULTS 0x0 +#define CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS 0x1 +#define CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER 0x2 +#define CAM_FD_OUTPUT_PORT_ID_MAX 0x3 + +/** + * struct cam_fd_soc_clock_bw_request - SOC clock, bandwidth request info + * + * @clock_rate : Clock rate required while processing frame + * @bandwidth : Bandwidth required while processing frame + * @reserved : Reserved for future use + */ +struct cam_fd_soc_clock_bw_request { + __u64 clock_rate; + __u64 bandwidth; + __u64 reserved[4]; +}; + +/** + * struct cam_fd_face - Face properties + * + * @prop1 : Property 1 of face + * @prop2 : Property 2 of face + * @prop3 : Property 3 of face + * @prop4 : Property 4 of face + * + * Do not change this layout, this is inline with how HW writes + * these values directly when the buffer is programmed to HW + */ +struct cam_fd_face { + __u32 prop1; + __u32 prop2; + __u32 prop3; + __u32 prop4; +}; + +/** + * struct cam_fd_results - FD results layout + * + * @faces : Array of faces with face properties + * @face_count : Number of faces detected + * @reserved : Reserved for alignment + * + * Do not change this layout, this is inline with how HW writes + * these values directly when the buffer is programmed to HW + */ +struct cam_fd_results { + struct cam_fd_face faces[CAM_FD_MAX_FACES]; + __u32 face_count; + __u32 reserved[3]; +}; + +/** + * struct cam_fd_hw_caps - Face properties + * + * @core_version : FD core version + * @wrapper_version : FD wrapper version + * @raw_results_available : Whether raw results are available on this HW + * @supported_modes : Modes supported by this HW. + * @reserved : Reserved for future use + */ +struct cam_fd_hw_caps { + struct cam_hw_version core_version; + struct cam_hw_version wrapper_version; + __u32 raw_results_available; + __u32 supported_modes; + __u64 reserved; +}; + +/** + * struct cam_fd_query_cap_cmd - FD Query capabilities information + * + * @device_iommu : FD IOMMU handles + * @cdm_iommu : CDM iommu handles + * @hw_caps : FD HW capabilities + * @reserved : Reserved for alignment + */ +struct cam_fd_query_cap_cmd { + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + struct cam_fd_hw_caps hw_caps; + __u64 reserved; +}; + +/** + * struct cam_fd_acquire_dev_info - FD acquire device information + * + * @clk_bw_request : SOC clock, bandwidth request + * @priority : Priority for this acquire + * @mode : Mode in which to run FD HW. + * @get_raw_results : Whether this acquire needs face raw results + * while frame processing + * @reserved : Reserved field for 64 bit alignment + */ +struct cam_fd_acquire_dev_info { + struct cam_fd_soc_clock_bw_request clk_bw_request; + __u32 priority; + __u32 mode; + __u32 get_raw_results; + __u32 reserved[13]; +}; + +#endif /* __UAPI_CAM_FD_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_icp.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_icp.h new file mode 100644 index 0000000000..e1b36e7fc9 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_icp.h @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_ICP_H__ +#define __UAPI_CAM_ICP_H__ + +#include +#include + +/* icp, ipe, bps, cdm(ipe/bps) are used in querycap */ +#define CAM_ICP_DEV_TYPE_A5 1 +#define CAM_ICP_DEV_TYPE_IPE 2 +#define CAM_ICP_DEV_TYPE_BPS 3 +#define CAM_ICP_DEV_TYPE_IPE_CDM 4 +#define CAM_ICP_DEV_TYPE_BPS_CDM 5 +/** + * This macro is deprecated; a device type max is not necessary. + * The new macro CAM_ICP_MAX_NUM_OF_DEV_TYPES will dictate + * max number of different types of devices supported by a single + * instance of the ICP device. + */ +#define CAM_ICP_DEV_TYPE_MAX 5 + +/* ICP and OFE types added to indicate capability to userspace */ +#define CAM_ICP_DEV_TYPE_ICP 6 +#define CAM_ICP_DEV_TYPE_OFE 7 + +/* definitions needed for icp aquire device */ +#define CAM_ICP_RES_TYPE_BPS 1 +#define CAM_ICP_RES_TYPE_IPE_RT 2 +#define CAM_ICP_RES_TYPE_IPE 3 +#define CAM_ICP_RES_TYPE_IPE_SEMI_RT 4 +#define CAM_ICP_RES_TYPE_BPS_RT 5 +#define CAM_ICP_RES_TYPE_BPS_SEMI_RT 6 + +/* This macro is deprecated and no longer needed */ +#define CAM_ICP_RES_TYPE_MAX 7 + +#define CAM_ICP_RES_TYPE_OFE_RT 7 +#define CAM_ICP_RES_TYPE_OFE 8 +#define CAM_ICP_RES_TYPE_OFE_SEMI_RT 9 + +/* packet opcode types */ +#define CAM_ICP_OPCODE_IPE_UPDATE 0 +#define CAM_ICP_OPCODE_BPS_UPDATE 1 +#define CAM_ICP_OPCODE_IPE_SETTINGS 2 +#define CAM_ICP_OPCODE_BPS_SETTINGS 3 +#define CAM_ICP_OPCODE_OFE_UPDATE 4 +#define CAM_ICP_OPCODE_OFE_SETTINGS 5 + + +/* IPE input port resource type */ +#define CAM_ICP_IPE_INPUT_IMAGE_FULL 0x0 +#define CAM_ICP_IPE_INPUT_IMAGE_DS4 0x1 +#define CAM_ICP_IPE_INPUT_IMAGE_DS16 0x2 +#define CAM_ICP_IPE_INPUT_IMAGE_DS64 0x3 +#define CAM_ICP_IPE_INPUT_IMAGE_FULL_REF 0x4 +#define CAM_ICP_IPE_INPUT_IMAGE_DS4_REF 0x5 +#define CAM_ICP_IPE_INPUT_IMAGE_DS16_REF 0x6 +#define CAM_ICP_IPE_INPUT_IMAGE_DS64_REF 0x7 + +/* IPE output port resource type */ +#define CAM_ICP_IPE_OUTPUT_IMAGE_DISPLAY 0x8 +#define CAM_ICP_IPE_OUTPUT_IMAGE_VIDEO 0x9 +#define CAM_ICP_IPE_OUTPUT_IMAGE_FULL_REF 0xA +#define CAM_ICP_IPE_OUTPUT_IMAGE_DS4_REF 0xB +#define CAM_ICP_IPE_OUTPUT_IMAGE_DS16_REF 0xC +#define CAM_ICP_IPE_OUTPUT_IMAGE_DS64_REF 0xD +#define CAM_ICP_IPE_OUTPUT_IMAGE_FD 0x33 +#define CAM_ICP_IPE_OUTPUT_IMAGE_STATS_IHIST 0x3D + +#define CAM_ICP_IPE_IMAGE_MAX (CAM_ICP_IPE_OUTPUT_IMAGE_STATS_IHIST + 1) + +/* BPS input port resource type */ +#define CAM_ICP_BPS_INPUT_IMAGE 0x0 + +/* BPS output port resource type */ +#define CAM_ICP_BPS_OUTPUT_IMAGE_FULL 0x1 +#define CAM_ICP_BPS_OUTPUT_IMAGE_DS4 0x2 +#define CAM_ICP_BPS_OUTPUT_IMAGE_DS16 0x3 +#define CAM_ICP_BPS_OUTPUT_IMAGE_DS64 0x4 +#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BG 0x5 +#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BHIST 0x6 +#define CAM_ICP_BPS_OUTPUT_IMAGE_REG1 0x7 +#define CAM_ICP_BPS_OUTPUT_IMAGE_REG2 0x8 + +#define CAM_ICP_BPS_IO_IMAGES_MAX 0x9 + +/* Command meta types */ +#define CAM_ICP_CMD_META_GENERIC_BLOB 0x1 + +/* Generic blob types */ +#define CAM_ICP_CMD_GENERIC_BLOB_CLK 0x1 +#define CAM_ICP_CMD_GENERIC_BLOB_CFG_IO 0x2 +#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP 0x3 +#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP 0x4 +#define CAM_ICP_CMD_GENERIC_BLOB_CLK_V2 0x5 +#define CAM_ICP_CMD_GENERIC_BLOB_PRESIL_HANGDUMP 0x6 +#define CAM_ICP_CMD_GENERIC_BLOB_SYSCACHE_CONFIG 0x7 + +/* Max number of device types supported per ICP instance */ +#define CAM_ICP_MAX_NUM_OF_DEV_TYPES 0x5 + +/* + * The index to the max version of the ACQUIRE_DEV API + * supported in ICP in the query cap params + */ +#define CAM_ICP_QUERY_MAX_ACQUIRE_DEV_VER_SUPPORTED_INDEX 0 + +/** + * struct cam_icp_clk_bw_request_v2 + * + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: For memory alignment + * @num_paths: Number of axi paths in bw request + * @axi_path: Per path vote info for IPE/BPS + */ +struct cam_icp_clk_bw_request_v2 { + __u64 budget_ns; + __u32 frame_cycles; + __u32 rt_flag; + __u32 reserved; + __u32 num_paths; + union { + struct cam_axi_per_path_bw_vote axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote, axi_path_flex); + }; +}; + +/** + * struct cam_icp_clk_bw_request + * + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @uncompressed_bw: Bandwidth required to process frame + * @compressed_bw: Compressed bandwidth to process frame + */ +struct cam_icp_clk_bw_request { + __u64 budget_ns; + __u32 frame_cycles; + __u32 rt_flag; + __u64 uncompressed_bw; + __u64 compressed_bw; +}; + +/** + * struct cam_icp_dev_ver - Device information for particular hw type + * + * This is used to get device version info of + * ICP, IPE, BPS and CDM related IPE and BPS from firmware + * and use this info in CAM_QUERY_CAP IOCTL + * + * @dev_type: hardware type for the cap info(icp, ipe, bps, cdm(ipe/bps)) + * @reserved: reserved field + * @hw_ver: major, minor and incr values of a device version + */ +struct cam_icp_dev_ver { + __u32 dev_type; + __u32 reserved; + struct cam_hw_version hw_ver; +}; + +/** + * struct cam_icp_ver - ICP version info + * + * This strcuture is used for fw and api version + * this is used to get firmware version and api version from firmware + * and use this info in CAM_QUERY_CAP IOCTL + * + * @major: FW version major + * @minor: FW version minor + * @revision: FW version increment + */ +struct cam_icp_ver { + __u32 major; + __u32 minor; + __u32 revision; + __u32 reserved; +}; + +/** + * struct cam_icp_query_cap_cmd - ICP query device capability payload + * + * @dev_iommu_handle: icp iommu handles for secure/non secure modes + * @cdm_iommu_handle: iommu handles for secure/non secure modes + * @fw_version: firmware version info + * @api_version: api version info + * @num_ipe: number of ipes + * @num_bps: number of bps + * @dev_ver: returned device capability array + */ +struct cam_icp_query_cap_cmd { + struct cam_iommu_handle dev_iommu_handle; + struct cam_iommu_handle cdm_iommu_handle; + struct cam_icp_ver fw_version; + struct cam_icp_ver api_version; + __u32 num_ipe; + __u32 num_bps; + struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX]; +}; + +/** + * struct cam_icp_device_info - ICP device info + * + * @dev_type: type of the device (IPE/BPS/..) + * @num_devices: number of such devices + * @dev_version: device HW version + * @num_valid_params: number of valid params + * @valid_param_mask: valid param mask + * @params: additional parameters for future usage + */ +struct cam_icp_device_info { + __u32 dev_type; + __u32 num_devices; + struct cam_hw_version dev_version; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[6]; +}; + +/** + * struct cam_icp_query_cap_cmd_v2 - ICP query device capability payload + * + * @version: struct version + * @dev_iommu_handle: icp iommu handles for secure/non secure modes + * @cdm_iommu_handle: iommu handles for secure/non secure modes + * @fw_version: firmware version info + * @num_dev_info: number of devices present in this queried subdevice + * @dev_info: returned icp devices capability array + * @hw_fence_info_size: size of the data pointed to by hw_fence_info_hdl + * if the size is 0, hw fence info will not be populated + * treated as non-fatal + * @hw_fence_info_hdl: Handle to the hw fence session data if applicable + * @num_valid_params: number of valid params + * @valid_param_mask: valid param mask + * @params: additional parameters for future usage + */ + +struct cam_icp_query_cap_cmd_v2 { + __u32 version; + struct cam_iommu_handle dev_iommu_handle; + struct cam_iommu_handle cdm_iommu_handle; + struct cam_icp_ver fw_version; + __u32 num_dev_info; + struct cam_icp_device_info dev_info[CAM_ICP_MAX_NUM_OF_DEV_TYPES]; + __u32 hw_fence_info_size; + __u64 hw_fence_info_hdl; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; +} __attribute__((__packed__)); + +/** + * struct cam_icp_res_info - ICP output resource info + * + * @format: format of the resource + * @width: width in pixels + * @height: height in lines + * @fps: fps + */ +struct cam_icp_res_info { + __u32 format; + __u32 width; + __u32 height; + __u32 fps; +}; + +/** + * struct cam_icp_acquire_dev_info - An ICP device info + * + * @scratch_mem_size: Output param - size of scratch memory + * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS) + * @io_config_cmd_size: size of IO config command + * @io_config_cmd_handle: IO config command for each acquire + * @secure_mode: camera mode (secure/non secure) + * @chain_info: chaining info of FW device handles + * @in_res: resource info used for clock and bandwidth calculation + * @num_out_res: number of output resources + * @out_res: output resource + */ +struct cam_icp_acquire_dev_info { + __u32 scratch_mem_size; + __u32 dev_type; + __u32 io_config_cmd_size; + __s32 io_config_cmd_handle; + __u32 secure_mode; + __s32 chain_info; + struct cam_icp_res_info in_res; + __u32 num_out_res; + union { + struct cam_icp_res_info out_res[1]; + __DECLARE_FLEX_ARRAY(struct cam_icp_res_info, out_res_flex); + }; +} __attribute__((__packed__)); + +/** + * struct cam_icp_res_info_v2 - ICP output resource info + * + * @format: format of the resource + * @width: width in pixels + * @height: height in lines + * @fps: fps + * @port_id: ID of the out resource + * @is_secure: whether the port is secure + * @num_valid_params: number of valid params + * @valid_param_mask: valid param mask + * @params: additional parameters for future usage + */ +struct cam_icp_res_info_v2 { + __u32 format; + __u32 width; + __u32 height; + __u32 fps; + __u32 port_id; + __u32 is_secure; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[6]; +} __attribute__((__packed__)); + +/** + * struct cam_icp_acquire_dev_info_v2 - An ICP device info + * + * @scratch_mem_size: Output param - size of scratch memory + * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS) + * @io_config_cmd_size: size of IO config command + * @io_config_cmd_handle: IO config command for each acquire + * @secure_mode: camera mode (secure/non secure) + * @chain_info: chaining info of FW device handles + * @in_res: resource info used for clock and bandwidth calculation + * @num_out_res: number of output resources + * @out_res_flex: output resource + * @num_valid_params: number of valid params + * @valid_param_mask: valid param mask + * @params: additional parameters for future usage + */ +struct cam_icp_acquire_dev_info_v2 { + __u32 scratch_mem_size; + __u32 dev_type; + __u32 io_config_cmd_size; + __s32 io_config_cmd_handle; + __u32 secure_mode; + __s32 chain_info; + struct cam_icp_res_info_v2 in_res; + __u32 num_out_res; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; + __DECLARE_FLEX_ARRAY(struct cam_icp_res_info_v2, out_res_flex); +} __attribute__((__packed__)); + +#endif /* __UAPI_CAM_ICP_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp.h new file mode 100644 index 0000000000..f78e9b64c4 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp.h @@ -0,0 +1,1570 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_ISP_H__ +#define __UAPI_CAM_ISP_H__ + +#include +#include +#include +#include +#include + +/* ISP driver name */ +#define CAM_ISP_DEV_NAME "cam-isp" + +/* HW type */ +#define CAM_ISP_HW_BASE 0 +#define CAM_ISP_HW_CSID 1 +#define CAM_ISP_HW_VFE 2 +#define CAM_ISP_HW_IFE 3 +#define CAM_ISP_HW_ISPIF 4 +#define CAM_ISP_HW_IFE_LITE 5 +#define CAM_ISP_HW_CSID_LITE 6 +#define CAM_ISP_HW_SFE 7 +#define CAM_ISP_HW_MC_TFE 8 +#define CAM_ISP_HW_MAX 9 + +/* Color Pattern */ +#define CAM_ISP_PATTERN_BAYER_RGRGRG 0 +#define CAM_ISP_PATTERN_BAYER_GRGRGR 1 +#define CAM_ISP_PATTERN_BAYER_BGBGBG 2 +#define CAM_ISP_PATTERN_BAYER_GBGBGB 3 +#define CAM_ISP_PATTERN_YUV_YCBYCR 4 +#define CAM_ISP_PATTERN_YUV_YCRYCB 5 +#define CAM_ISP_PATTERN_YUV_CBYCRY 6 +#define CAM_ISP_PATTERN_YUV_CRYCBY 7 +#define CAM_ISP_PATTERN_MAX 8 + +/* Usage Type */ +#define CAM_ISP_RES_USAGE_SINGLE 0 +#define CAM_ISP_RES_USAGE_DUAL 1 +#define CAM_ISP_RES_USAGE_MAX 2 + +/* Resource ID */ +#define CAM_ISP_RES_ID_PORT 0 +#define CAM_ISP_RES_ID_CLK 1 +#define CAM_ISP_RES_ID_MAX 2 + +/* Resource Type - Type of resource for the resource id + * defined in cam_isp_vfe.h, cam_isp_ife.h + */ + +/* Lane Type in input resource for Port */ +#define CAM_ISP_LANE_TYPE_DPHY 0 +#define CAM_ISP_LANE_TYPE_CPHY 1 +#define CAM_ISP_LANE_TYPE_MAX 2 + +/* ISP Resurce Composite Group ID */ +#define CAM_ISP_RES_COMP_GROUP_NONE 0 +#define CAM_ISP_RES_COMP_GROUP_ID_0 1 +#define CAM_ISP_RES_COMP_GROUP_ID_1 2 +#define CAM_ISP_RES_COMP_GROUP_ID_2 3 +#define CAM_ISP_RES_COMP_GROUP_ID_3 4 +#define CAM_ISP_RES_COMP_GROUP_ID_4 5 +#define CAM_ISP_RES_COMP_GROUP_ID_5 6 +#define CAM_ISP_RES_COMP_GROUP_ID_MAX 6 + +/* ISP packet opcode for ISP */ +#define CAM_ISP_PACKET_OP_BASE 0 +#define CAM_ISP_PACKET_INIT_DEV 1 +#define CAM_ISP_PACKET_UPDATE_DEV 2 +#define CAM_ISP_PACKET_OP_MAX 3 + +/* ISP packet meta_data type for command buffer */ +#define CAM_ISP_PACKET_META_BASE 0 +#define CAM_ISP_PACKET_META_LEFT 1 +#define CAM_ISP_PACKET_META_RIGHT 2 +#define CAM_ISP_PACKET_META_COMMON 3 +#define CAM_ISP_PACKET_META_DMI_LEFT 4 +#define CAM_ISP_PACKET_META_DMI_RIGHT 5 +#define CAM_ISP_PACKET_META_DMI_COMMON 6 +#define CAM_ISP_PACKET_META_CLOCK 7 +#define CAM_ISP_PACKET_META_CSID 8 +#define CAM_ISP_PACKET_META_DUAL_CONFIG 9 +#define CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT 10 +#define CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT 11 +#define CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON 12 +#define CAM_ISP_PACKET_META_REG_DUMP_PER_REQUEST 13 +#define CAM_ISP_PACKET_META_REG_DUMP_ON_FLUSH 14 +#define CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR 15 +#define CAM_ISP_PACKET_META_CSID_LEFT 16 +#define CAM_ISP_PACKET_META_CSID_RIGHT 17 +#define CAM_ISP_PACKET_META_CSID_COMMON 18 + +/* SFE packet meta_data type for command buffer */ +#define CAM_ISP_SFE_PACKET_META_LEFT 0x15 +#define CAM_ISP_SFE_PACKET_META_RIGHT 0x16 +#define CAM_ISP_SFE_PACKET_META_COMMON 0x17 +#define CAM_ISP_SFE_PACKET_META_DUAL_CONFIG 0x18 + +/* DSP mode */ +#define CAM_ISP_DSP_MODE_NONE 0 +#define CAM_ISP_DSP_MODE_ONE_WAY 1 +#define CAM_ISP_DSP_MODE_ROUND 2 + +/* ISP Generic Cmd Buffer Blob types */ +#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0 +#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1 +#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG 2 +#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG 3 +#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG 4 +#define CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG 5 +#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2 6 +#define CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG 7 +#define CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG 8 +#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2 9 +#define CAM_ISP_GENERIC_BLOB_TYPE_DISCARD_INITIAL_FRAMES 10 +#define CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_DIMENSION_CONFIG 11 +#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_QCFA_CONFIG 12 +#define CAM_ISP_GENERIC_BLOB_TYPE_SENSOR_BLANKING_CONFIG 13 +#define CAM_ISP_GENERIC_BLOB_TYPE_TPG_CORE_CONFIG 14 +#define CAM_ISP_GENERIC_BLOB_TYPE_DYNAMIC_MODE_SWITCH 15 +#define CAM_ISP_GENERIC_BLOB_TYPE_BW_LIMITER_CFG 16 +#define CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG 17 +#define CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG 18 +#define CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG 19 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG 20 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG 21 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG 22 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG 23 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG 24 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG 25 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_SCRATCH_BUF_CFG 26 +#define CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG 27 +#define CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG 28 +#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V3 29 +#define CAM_ISP_GENERIC_BLOB_TYPE_NFI_MODE_SWITCH 30 +#define CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG 31 +#define CAM_ISP_GENERIC_BLOB_TYPE_VFE_OUT_CONFIG_V2 32 +#define CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG 33 +#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V3 34 +#define CAM_ISP_GENERIC_BLOB_TYPE_EXP_ORDER_UPDATE 35 + +#define CAM_ISP_VC_DT_CFG 4 +#define CAM_ISP_INVALID_VC_VALUE 0xFFFF + +#define CAM_ISP_IFE0_HW 0x1 +#define CAM_ISP_IFE1_HW 0x2 +#define CAM_ISP_IFE0_LITE_HW 0x4 +#define CAM_ISP_IFE1_LITE_HW 0x8 +#define CAM_ISP_IFE2_LITE_HW 0x10 +#define CAM_ISP_IFE3_LITE_HW 0x20 +#define CAM_ISP_IFE4_LITE_HW 0x40 +#define CAM_ISP_IFE2_HW 0x100 +#define CAM_ISP_SFE0_HW 0x1000 +#define CAM_ISP_SFE1_HW 0x2000 +#define CAM_ISP_SFE2_HW 0x4000 + +#define CAM_ISP_PXL_PATH 0x1 +#define CAM_ISP_PPP_PATH 0x2 +#define CAM_ISP_LCR_PATH 0x4 +#define CAM_ISP_RDI0_PATH 0x8 +#define CAM_ISP_RDI1_PATH 0x10 +#define CAM_ISP_RDI2_PATH 0x20 +#define CAM_ISP_RDI3_PATH 0x40 +#define CAM_ISP_RDI4_PATH 0x80 +#define CAM_ISP_PXL1_PATH 0x100 +#define CAM_ISP_PXL2_PATH 0x200 + +/* + * Multi Context Mask + */ +#define CAM_ISP_MULTI_CTXT0_MASK 0x1 +#define CAM_ISP_MULTI_CTXT1_MASK 0x2 +#define CAM_ISP_MULTI_CTXT2_MASK 0x4 + +/* Per Path Usage Data */ +#define CAM_ISP_USAGE_INVALID 0 +#define CAM_ISP_USAGE_LEFT_PX 1 +#define CAM_ISP_USAGE_RIGHT_PX 2 +#define CAM_ISP_USAGE_RDI 3 +#define CAM_ISP_USAGE_SFE_LEFT 4 +#define CAM_ISP_USAGE_SFE_RIGHT 5 +#define CAM_ISP_USAGE_SFE_RDI 6 + +/* Acquire with custom hw */ +#define CAM_ISP_ACQ_CUSTOM_NONE 0 +#define CAM_ISP_ACQ_CUSTOM_PRIMARY 1 +#define CAM_ISP_ACQ_CUSTOM_SECONDARY 2 + +#define CAM_IFE_CSID_RDI_MAX 5 + +/* Feature Flag indicators */ +#define CAM_ISP_PARAM_FETCH_SECURITY_MODE BIT(0) +#define CAM_ISP_CAN_USE_LITE_MODE BIT(1) +#define CAM_ISP_DYNAMIC_SENOR_SWITCH_EN BIT(2) +#define CAM_ISP_SFE_BINNED_EPOCH_CFG_ENABLE BIT(3) +#define CAM_ISP_EPD_SUPPORT BIT(4) +#define CAM_ISP_SFE_FS_MODE_EN BIT(5) +#define CAM_ISP_SFE_SHDR_MODE_EN BIT(6) +#define CAM_ISP_AEB_MODE_EN BIT(7) +#define CAM_ISP_HDR_MODE_DYNAMIC_SWITCH_EN BIT(8) +#define CAM_ISP_NFI_BASED_MODE_SWITCH_EN BIT(9) + +/* ISP core cfg flag params */ +#define CAM_ISP_PARAM_CORE_CFG_HDR_MUX_SEL BIT(0) +#define CAM_ISP_PARAM_CORE_CFG_PP_FORMAT BIT(16) + +/* Indicate which module is configured for FCG */ +#define CAM_ISP_FCG_ENABLE_PHASE BIT(0) +#define CAM_ISP_FCG_ENABLE_STATS BIT(1) + +/* + * Indicate which channel is configured for FCG + * For SFE, channel 1/2 are used on demand + * For IFE, treat it as channel 0 + * For TFE, use Multi Context Mask to indicate the path + */ +#define CAM_ISP_FCG_MASK_CH0 0x1 +#define CAM_ISP_FCG_MASK_CH1 0x2 +#define CAM_ISP_FCG_MASK_CH2 0x4 + +/** + * Decode format1 Support for multi VCDT use case. + * Format type is packed in 8 bits. BIT(0-7) is + * format and BIT(8-15) is format1 type in the format + * variable + */ +#define CAM_IFE_DECODE_FORMAT_MASK 0xFF +#define CAM_IFE_DECODE_FORMAT_SHIFT_VAL 8 + +/* + * to indicate if EPD mode is to be enabled for CSI2 RX + * in CSID. This reserves the input info param_mask for + * cam_isp_in_port_csid_info for acquire v3 + * + */ +#define CAM_IFE_CSID_EPD_MODE_EN BIT(0) + +/* + * to indicate if packing is to be done at Bus side. + * CSID gives the plain data and packed at bus. + * This mask reserves the param_mask for cam_isp_out_port_info_v3. + * + */ +#define CAM_IFE_USE_WM_PACK BIT(0) + +/* + * to indicate if RCS to be enabled. + */ +#define CAM_IFE_WM_RCS_EN BIT(1) + +/* + * to indicate if per port buffer alignment is enabled. + */ +#define CAM_IFE_WM_BUF_ALLIGN_EN BIT(0) + + +/* + * Indicates exposure orders within a frame time + */ +#define CAM_ISP_EXP_ORDER_LAST 1 +#define CAM_ISP_EXP_ORDER_LAST_1 2 +#define CAM_ISP_EXP_ORDER_LAST_2 3 + +/** + * struct cam_isp_irq_comp_cfg - CSID composite config for MC-based TFE + * Contains information regarding active contexts in CSID CAMIF + * and active contexts in write engine TFE + * + * @ipp_src_ctxt_mask : Active paths in CSID CAMIF + * @ipp_dst_comp_mask : Active paths in TFE + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + */ +struct cam_isp_irq_comp_cfg { + __u32 ipp_src_ctxt_mask; + __u32 ipp_dst_comp_mask; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; +} __attribute__((packed)); + +/** + * Below macro definition is the param mask for + * cam_isp_drv_config. + * + * CAM_IFE_DRV_BLANKING_THRESHOLD : DRV decision logic uses this value to check + * if current request can enable DRV, if the + * vertical blanking is greater than this value, + * then DRV can be enabled, otherwises, DRV will + * be disabled. The unit of this value is millisecond. + */ +#define CAM_IFE_DRV_BLANKING_THRESHOLD BIT(0) + +/** + * struct cam_isp_drv_config - CSID config for DRV + * Enables DRV and provides worst case timeout value in INIT packet, + * provides path_idle_en and timeout updates (if any) in UPDATE packet + * + * @drv_en : Enables DRV block + * @timeout_val : Timeout value from SOF to trigger vote up, + * given in number of Global Counter cycles. + * @path_idle_en : Mask for paths to be considered for consolidated IDLE signal. + * When paths matching the mask go idle, BW is voted down. + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + */ +struct cam_isp_drv_config { + __u32 drv_en; + __u32 timeout_val; + __u32 path_idle_en; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; +} __attribute__((packed)); + + +/* Query devices */ +/** + * struct cam_isp_dev_cap_info - A cap info for particular hw type + * + * @hw_type: Hardware type for the cap info + * @num_hw: Number of HW of type @hw_type + * @hw_version: Hardware version + * + */ +struct cam_isp_dev_cap_info { + __u32 hw_type; + __u32 num_hw; + struct cam_hw_version hw_version; +}; + +/** + * struct cam_isp_query_cap_cmd - ISP query device capability payload + * + * @device_iommu: returned iommu handles for device + * @cdm_iommu: returned iommu handles for cdm + * @num_dev: returned number of device capabilities + * @reserved: reserved field for alignment + * @dev_caps: returned device capability array + * + */ +struct cam_isp_query_cap_cmd { + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + __s32 num_dev; + __u32 reserved; + struct cam_isp_dev_cap_info dev_caps[CAM_ISP_HW_MAX]; +}; + +/* Acquire Device */ +/** + * struct cam_isp_out_port_info - An output port resource info + * + * @res_type: output resource type defined in file + * cam_isp_vfe.h or cam_isp_ife.h + * @format: output format of the resource + * @wdith: output width in pixels + * @height: output height in lines + * @comp_grp_id: composite group id for the resource. + * @split_point: split point in pixels for the dual VFE. + * @secure_mode: flag to tell if output should be run in secure + * mode or not. See cam_defs.h for definition + * @reserved: reserved field for alignment + * + */ +struct cam_isp_out_port_info { + __u32 res_type; + __u32 format; + __u32 width; + __u32 height; + __u32 comp_grp_id; + __u32 split_point; + __u32 secure_mode; + __u32 reserved; +}; + +/** + * struct cam_isp_out_port_info_v2 - An output port resource info + * + * @res_type: output resource type defined in file + * cam_isp_vfe.h or cam_isp_ife.h + * @format: output format of the resource + * @wdith: output width in pixels + * @height: output height in lines + * @comp_grp_id: composite group id for the resource. + * @split_point: split point in pixels for the dual VFE. + * @secure_mode: flag to tell if output should be run in secure + * mode or not. See cam_defs.h for definition + * @wm_mode: WM mode + * @out_port_res1: Output reserved field + * @out_port_res2: Output reserved field + * + */ +struct cam_isp_out_port_info_v2 { + __u32 res_type; + __u32 format; + __u32 width; + __u32 height; + __u32 comp_grp_id; + __u32 split_point; + __u32 secure_mode; + __u32 wm_mode; + __u32 out_port_res1; + __u32 out_port_res2; +}; + +/** + * struct cam_isp_in_port_info - An input port resource info + * + * @res_type: input resource type define in file + * cam_isp_vfe.h or cam_isp_ife.h + * @lane_type: lane type: c-phy or d-phy. + * @lane_num: active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * @vc: input virtual channel number + * @dt: input data type number + * @format: input format + * @test_pattern: test pattern for the testgen + * @usage_type: whether dual vfe is required + * @left_start: left input start offset in pixels + * @left_stop: left input stop offset in pixels + * @left_width: left input width in pixels + * @right_start: right input start offset in pixels. + * Only for Dual VFE + * @right_stop: right input stop offset in pixels. + * Only for Dual VFE + * @right_width: right input width in pixels. + * Only for dual VFE + * @line_start: top of the line number + * @line_stop: bottome of the line number + * @height: input height in lines + * @pixel_clk; sensor output clock + * @batch_size: batch size for HFR mode + * @dsp_mode: DSP stream mode (Defines as CAM_ISP_DSP_MODE_*) + * @hbi_cnt: HBI count for the camif input + * @reserved: Reserved field for alignment + * @num_out_res: number of the output resource associated + * @data: payload that contains the output resources + * + */ +struct cam_isp_in_port_info { + __u32 res_type; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; + __u32 vc; + __u32 dt; + __u32 format; + __u32 test_pattern; + __u32 usage_type; + __u32 left_start; + __u32 left_stop; + __u32 left_width; + __u32 right_start; + __u32 right_stop; + __u32 right_width; + __u32 line_start; + __u32 line_stop; + __u32 height; + __u32 pixel_clk; + __u32 batch_size; + __u32 dsp_mode; + __u32 hbi_cnt; + __u32 reserved; + __u32 num_out_res; + union { + struct cam_isp_out_port_info data[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_out_port_info, data_flex); + }; +}; + +/** + * struct cam_isp_in_port_info_v2 - An input port resource info + * + * @res_type: input resource type define in file + * cam_isp_vfe.h or cam_isp_ife.h + * @lane_type: lane type: c-phy or d-phy. + * @lane_num: active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * @vc: input virtual channel number + * @dt: input data type number + * @num_valid_vc_dt: valid vc and dt in array + * @format: input format + * @test_pattern: test pattern for the testgen + * @usage_type: whether dual vfe is required + * @left_start: left input start offset in pixels + * @left_stop: left input stop offset in pixels + * @left_width: left input width in pixels + * @right_start: right input start offset in pixels. + * Only for Dual VFE + * @right_stop: right input stop offset in pixels. + * only for Dual VFE + * @right_width: right input width in pixels. + * only for dual VFE + * @line_start: top of the line number + * @line_stop: bottome of the line number + * @height: input height in lines + * @pixel_clk; sensor output clock + * @batch_size: batch size for HFR mode + * @dsp_mode: DSP stream mode (Defines as CAM_ISP_DSP_MODE_*) + * @hbi_cnt: HBI count for the camif input + * @cust_node: if any custom HW block is present before IFE + * @num_out_res: number of the output resource associated + * @bidirectional_bin: [0 : 15] - Set 1 for Horizontal binning + * [16 : 31] - Set 1 for Vertical binning + * @qcfa_bin: Quadra Binning info + * @sfe_in_path_type: SFE input path type + * 0:15 - refer to cam_isp_sfe.h for SFE paths + * 16:31 - Corresponding IFE i/p path type + * Example:((CAM_ISP_PXL_PATH << 16) | + * CAM_ISP_SFE_INLINE_PIX) + * This will acquire SFE inline IPP and IFE IPP + * PPP is an exception CSID PPP -> IFE PPP + * @feature_flag: See the macros defined under feature flag above + * @ife_res_1: payload for future use. + * @ife_res_2: payload for future use. + * @data: payload that contains the output resources + * + */ +struct cam_isp_in_port_info_v2 { + __u32 res_type; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; + __u32 vc[CAM_ISP_VC_DT_CFG]; + __u32 dt[CAM_ISP_VC_DT_CFG]; + __u32 num_valid_vc_dt; + __u32 format; + __u32 test_pattern; + __u32 usage_type; + __u32 left_start; + __u32 left_stop; + __u32 left_width; + __u32 right_start; + __u32 right_stop; + __u32 right_width; + __u32 line_start; + __u32 line_stop; + __u32 height; + __u32 pixel_clk; + __u32 batch_size; + __u32 dsp_mode; + __u32 hbi_cnt; + __u32 cust_node; + __u32 num_out_res; + __u32 offline_mode; + __u32 bidirectional_bin; + __u32 qcfa_bin; + __u32 sfe_in_path_type; + __u32 feature_flag; + __u32 ife_res_1; + __u32 ife_res_2; + union { + struct cam_isp_out_port_info_v2 data[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_out_port_info_v2, data_flex); + }; +}; + +/** + * struct cam_isp_in_port_phy_info - CSID in port PHY info + * + * @res_type: input resource for the stream + * @lane_type: Lane type--> C-Phy/ D-Phy + * @lane_num: Number of lanes + * @lane_cfg: Lane Configuraion + */ +struct cam_isp_in_port_phy_info { + __u32 res_type; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; +}; + +/** + * struct cam_isp_in_port_csid_info - CSID in port info + * + * @vc: Virtual Channel for the incoming stream + * @dt: Data type for the incoming stream + * @num_valid_vc_dt Number of valid vc dt in case of multi vc dt on a single path + * @format: Incoming format for this input + * @width: Width of incoming stream + * @height: Height of incoming stream + * @path_id: CSID IPP Path to be acquired + * @param_mask: Reserved field to add new features + * @params: Reserved fields + */ +struct cam_isp_in_port_csid_info { + __u32 vc[CAM_ISP_VC_DT_CFG]; + __u32 dt[CAM_ISP_VC_DT_CFG]; + __u32 num_valid_vc_dt; + __u32 format; + __u32 width; + __u32 height; + __u32 path_id; + __u32 param_mask; + __u32 params[7]; +}; + +/** + * struct cam_isp_out_port_info_v3 - An output port resource info + * + * @res_type: output resource type defined in file + * cam_isp_vfe.h or cam_isp_ife.h + * @format: output format of the resource + * @width: output width in pixels + * @height: output height in lines + * @comp_grp_id: composite group id for the resource. + * @split_point: split point in pixels for the dual VFE. + * @secure_mode: flag to tell if output should be run in secure + * mode or not. See cam_defs.h for definition + * @wm_mode: WM mode + * @context_id: Context ID in case of multi context + * @param_mask: Reserved field to add new features + * @params: Reserved fields + */ +struct cam_isp_out_port_info_v3 { + __u32 res_type; + __u32 format; + __u32 width; + __u32 height; + __u32 comp_grp_id; + __u32 split_point; + __u32 secure_mode; + __u32 wm_mode; + __u32 context_id; + __u32 param_mask; + __u32 params[6]; +}; +/** + * struct cam_isp_in_port_info_v3 - A resource bundle + * + * @csid_info: resource id for the resource bundle + * @phy_info: length of the while resource blob + * @num_contexts Num of contexts in case of multi context + * @feature_mask: Feature mask to store bit fields for any specific use case + * @data: Pointer to out resource data + */ +struct cam_isp_in_port_info_v3 { + struct cam_isp_in_port_csid_info csid_info; + struct cam_isp_in_port_phy_info phy_info; + __u32 num_contexts; + __u32 feature_mask; + __u32 num_out_res; + union { + struct cam_isp_out_port_info_v3 data[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_out_port_info_v3, data_flex); + }; +}; + +/** + * struct cam_isp_resource - A resource bundle + * + * @resoruce_id: resource id for the resource bundle + * @length: length of the while resource blob + * @handle_type: type of the resource handle + * @reserved: reserved field for alignment + * @res_hdl: resource handle that points to the + * resource array; + * + */ +struct cam_isp_resource { + __u32 resource_id; + __u32 length; + __u32 handle_type; + __u32 reserved; + __u64 res_hdl; +}; + +/** + * struct cam_isp_port_hfr_config - HFR configuration for this port + * + * @resource_type: Resource type + * @subsample_pattern: Subsample pattern. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @subsample_period: Subsample period. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @framedrop_pattern: Framedrop pattern + * @framedrop_period: Framedrop period + * @reserved: Reserved for alignment + */ +struct cam_isp_port_hfr_config { + __u32 resource_type; + __u32 subsample_pattern; + __u32 subsample_period; + __u32 framedrop_pattern; + __u32 framedrop_period; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_isp_resource_hfr_config - Resource HFR configuration + * + * @num_ports: Number of ports + * @reserved: Reserved for alignment + * @port_hfr_config: HFR configuration for each IO port + */ +struct cam_isp_resource_hfr_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_port_hfr_config port_hfr_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_port_hfr_config, port_hfr_config_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_dual_split_params - dual isp spilt parameters + * + * @split_point: Split point information x, where (0 < x < width) + * left ISP's input ends at x + righ padding and + * Right ISP's input starts at x - left padding + * @right_padding: Padding added past the split point for left + * ISP's input + * @left_padding: Padding added before split point for right + * ISP's input + * @reserved: Reserved filed for alignment + * + */ +struct cam_isp_dual_split_params { + __u32 split_point; + __u32 right_padding; + __u32 left_padding; + __u32 reserved; +}; + +/** + * struct cam_isp_dual_stripe_config - stripe config per bus client + * + * @offset: Start horizontal offset relative to + * output buffer + * In UBWC mode, this value indicates the H_INIT + * value in pixel + * @width: Width of the stripe in bytes + * @tileconfig Ubwc meta tile config. Contain the partial + * tile info + * @port_id: port id of ISP output + * + */ +struct cam_isp_dual_stripe_config { + __u32 offset; + __u32 width; + __u32 tileconfig; + __u32 port_id; +}; + +/** + * struct cam_isp_dual_config - dual isp configuration + * + * @num_ports Number of isp output ports + * @reserved Reserved field for alignment + * @split_params: Inpput split parameters + * @stripes: Stripe information + * + */ +struct cam_isp_dual_config { + __u32 num_ports; + __u32 reserved; + struct cam_isp_dual_split_params split_params; + union { + struct cam_isp_dual_stripe_config stripes[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_dual_stripe_config, stripes_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_clock_config - Clock configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_rdi: Number of RDI votes + * @left_pix_hz: Pixel Clock for Left ISP + * @right_pix_hz: Pixel Clock for Right ISP, valid only if Dual + * @rdi_hz: RDI Clock. ISP clock will be max of RDI and + * PIX clocks. For a particular context which ISP + * HW the RDI is allocated to is not known to UMD. + * Hence pass the clock and let KMD decide. + */ +struct cam_isp_clock_config { + __u32 usage_type; + __u32 num_rdi; + __u64 left_pix_hz; + __u64 right_pix_hz; + union { + __u64 rdi_hz[1]; + __DECLARE_FLEX_ARRAY(__u64, rdi_hz_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_csid_clock_config - CSID clock configuration + * + * @csid_clock CSID clock + */ +struct cam_isp_csid_clock_config { + __u64 csid_clock; +} __attribute__((packed)); + +/** + * struct cam_isp_csid_qcfa_config - CSID qcfa binning support configuration + * + * @csid_binning CSID binning + */ +struct cam_isp_csid_qcfa_config { + __u32 csid_binning; +} __attribute__((packed)); + +/** + * struct cam_isp_bw_vote - Bandwidth vote information + * + * @resource_id: Resource ID + * @reserved: Reserved field for alignment + * @cam_bw_bps: Bandwidth vote for CAMNOC + * @ext_bw_bps: Bandwidth vote for path-to-DDR after CAMNOC + */ +struct cam_isp_bw_vote { + __u32 resource_id; + __u32 reserved; + __u64 cam_bw_bps; + __u64 ext_bw_bps; +} __attribute__((packed)); + +/** + * struct cam_isp_bw_config - Bandwidth configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_rdi: Number of RDI votes + * @left_pix_vote: Bandwidth vote for left ISP + * @right_pix_vote: Bandwidth vote for right ISP + * @rdi_vote: RDI bandwidth requirements + */ +struct cam_isp_bw_config { + __u32 usage_type; + __u32 num_rdi; + struct cam_isp_bw_vote left_pix_vote; + struct cam_isp_bw_vote right_pix_vote; + union { + struct cam_isp_bw_vote rdi_vote[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_bw_vote, rdi_vote_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_bw_config_v2 - Bandwidth configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_paths: Number of axi data paths + * @axi_path Per path vote info + */ +struct cam_isp_bw_config_v2 { + __u32 usage_type; + __u32 num_paths; + union { + struct cam_axi_per_path_bw_vote axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote, axi_path_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_bw_config_v3 - Bandwidth configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_paths: Number of axi data paths + * @num_valid_params: Number of valid params + * @valid_param_mask: Valid param mask + * @params: params + * @axi_path: Per path vote info v2 + */ +struct cam_isp_bw_config_v3 { + __u32 usage_type; + __u32 num_paths; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; + union { + struct cam_axi_per_path_bw_vote_v2 axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote_v2, axi_path_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_fe_config - Fetch Engine configuration + * + * @version: fetch engine veriosn + * @min_vbi: require min vbi + * @fs_mode: indicates if fs mode enabled + * @fs_line_sync_en: frame level sync or line level + * sync for fetch engine + * @hbi_count: hbi count + * @fs_sync_enable: indicates if fetch engine working + * wokring in sync with write engine + * @go_cmd_sel: softwrae go_cmd or hw go_cmd + * @client_enable: enable read engine + * @source_addr: adrress of buffer to read from + * @width: buffer width + * @height: buffer height + * @stride: buffer stride (here equal to width) + * @format: format of image in buffer + * @unpacker_cfg: unpacker config type + * @latency_buf_size: latency buffer for read engine + */ +struct cam_fe_config { + __u64 version; + __u32 min_vbi; + __u32 fs_mode; + __u32 fs_line_sync_en; + __u32 hbi_count; + __u32 fs_sync_enable; + __u32 go_cmd_sel; + __u32 client_enable; + __u32 source_addr; + __u32 width; + __u32 height; + __u32 stride; + __u32 format; + __u32 unpacker_cfg; + __u32 latency_buf_size; +} __attribute__((packed)); + + +/** + * struct cam_isp_sensor_path_dimension + * + * @width expected width + * @height expected height + * @measure_enabled flag to indicate if pixel measurement is to be enabled + */ +struct cam_isp_sensor_dimension { + __u32 width; + __u32 height; + __u32 measure_enabled; +} __attribute__((packed)); + +/** + * struct cam_isp_sensor_blanking_config + * + * @hbi HBI value + * @vbi VBI value + */ +struct cam_isp_sensor_blanking_config { + __u32 hbi; + __u32 vbi; +} __attribute__((packed)); + +/** + * struct cam_isp_sensor_config - Sensor Dimension configuration + * + * @ppp_path: expected ppp path configuration + * @ipp_path: expected ipp path configuration + * @rdi_path: expected rdi path configuration + * @hbi: HBI value + * @vbi: VBI value + */ +struct cam_isp_sensor_config { + struct cam_isp_sensor_dimension ppp_path; + struct cam_isp_sensor_dimension ipp_path; + struct cam_isp_sensor_dimension rdi_path[CAM_IFE_CSID_RDI_MAX]; + __u32 hbi; + __u32 vbi; +} __attribute__((packed)); + +/** + * struct cam_isp_core_config - ISP core registers configuration + * + * @version: Version info + * @vid_ds16_r2pd: Enables Y and C merging PD output for video DS16 + * @vid_ds4_r2pd: Enables Y and C merging PD output for video DS4 + * @disp_ds16_r2pd: Enables Y and C merging PD output for disp DS16 + * @disp_ds4_r2pd: Enables Y and C merging PD output for disp DS4 + * @dsp_streaming_tap_point: This selects source for DSP streaming interface + * @ihist_src_sel: Selects input for IHIST module + * @hdr_be_src_sel: Selects input for HDR BE module + * @hdr_bhist_src_sel: Selects input for HDR BHIST module + * @input_mux_sel_pdaf: Selects input for PDAF + * @input_mux_sel_pp: Selects input for Pixel Pipe + * @core_cfg_flag: Core config flag to set HDR mux/PP + * input format type + */ +struct cam_isp_core_config { + __u32 version; + __u32 vid_ds16_r2pd; + __u32 vid_ds4_r2pd; + __u32 disp_ds16_r2pd; + __u32 disp_ds4_r2pd; + __u32 dsp_streaming_tap_point; + __u32 ihist_src_sel; + __u32 hdr_be_src_sel; + __u32 hdr_bhist_src_sel; + __u32 input_mux_sel_pdaf; + __u32 input_mux_sel_pp; + __u32 core_cfg_flag; +} __attribute__((packed)); + +/** + * struct cam_isp_sfe_core_config - SFE core registers configuration + * + * @version : Version info + * @mode_sel : Selects core for sHDR/non-sHDR mode + * @ops_mode_cfg : Selects core if is inline/offline mode + * @fs_mode_cfg : Selects output in fast shutter mode + * @sfe_params : SFE params for future use + */ +struct cam_isp_sfe_core_config { + __u32 version; + __u32 mode_sel; + __u32 ops_mode_cfg; + __u32 fs_mode_cfg; + __u32 sfe_params[6]; +} __attribute__((packed)); + +/** + * struct cam_isp_sfe_scratch_buf_info - Scratch buf info + * + * @mem_handle : Scratch buffer handle + * @offset : Offset to the buffer + * @width : Width in pixels + * @height : Height in pixels + * @stride : Stride in pixels + * @slice_height : Slice height in lines + * @resource_type : rsrc type + * @scratch_buf_params : for future use + */ +struct cam_isp_sfe_scratch_buf_info { + __s32 mem_handle; + __u32 offset; + __u32 width; + __u32 height; + __u32 stride; + __u32 slice_height; + __u32 resource_type; + __u32 scratch_buf_params[5]; +}; + +/** + * struct cam_isp_sfe_init_scratch_buf_config - SFE init buffer cfg + * Provides scratch buffer info for SFE ports + * as part of INIT packet + * + * @num_ports : Number of ports + * @reserved : reserved + * @port_scratch_cfg : scratch buffer info + */ +struct cam_isp_sfe_init_scratch_buf_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_sfe_scratch_buf_info port_scratch_cfg[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_sfe_scratch_buf_info, port_scratch_cfg_flex); + }; +}; + +/** + * struct cam_isp_predict_fcg_config - FCG config in a single prediction + * + * @version: Version Info + * @phase_index_g: Starting index of LUT for G channel in phase + * @phase_index_r: Starting index of LUT for R channel in phase + * @phase_index_b: Starting index of LUT for B channel in phase + * @stats_index_g: Starting index of LUT for G channel in stats + * @stats_index_r: Starting index of LUT for R channel in stats + * @stats_index_b: Starting index of LUT for B channel in stats + * @num_valid_params: Number of valid params being used + * @valid_param_mask: Indicate the exact params being used + * @params: Params for future change + */ +struct cam_isp_predict_fcg_config { + __u32 version; + __u32 phase_index_g; + __u32 phase_index_r; + __u32 phase_index_b; + __u32 stats_index_g; + __u32 stats_index_r; + __u32 stats_index_b; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; +}; + +/** + * struct cam_isp_ch_ctx_fcg_config - FCG config in a single channel for SFE/IFE + * or in a single context in TFE + * + * @version: Version Info + * @fcg_ch_ctx_id: Index of the channel to be configured that FCG + * blocks reside on. If one wants to config FCG + * block for IFE/SFE, CAM_ISP_FCG_MASK_CH0/1/2 is + * used. If one wants to config FCG block for TFE, + * multi context mask is used. + * @fcg_enable_mask: Indicate which module will be enabled for + * FCG. For example, if one wants to config + * SFE FCG STATS module, CAM_ISP_FCG_ENABLE_STATS + * will be set in mask + * @num_valid_params: Number of valid params being used + * @valid_param_mask: Indicate the exact params being used + * @params: Params for future change + * @predicted_fcg_configs: Pointer to fcg config for each prediction of + * the channel in serial order + */ +struct cam_isp_ch_ctx_fcg_config { + __u32 version; + __u32 fcg_ch_ctx_id; + __u32 fcg_enable_mask; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[5]; + union { + struct cam_isp_predict_fcg_config predicted_fcg_configs[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_predict_fcg_config, predicted_fcg_configs_flex); + }; +}; + +/** + * struct cam_isp_generic_fcg_config - FCG config for a frame + * + * @version: Version info + * @size: Size for the whole FCG configurations + * @num_ch_ctx: Number of channels for fcg config in SFE/IFE + * or number of contexts in TFE + * @num_predictions: Number of predictions for each channel + * in SFE/IFE or for each context in TFE + * @num_valid_params: Number of valid params being used + * @valid_param_mask: Indicate the exact params being used + * @params: Params for future change + * @ch_ctx_fcg_configs: Pointer to fcg config for each channel in + * SFE/IFE or for each context in TFE + */ +struct cam_isp_generic_fcg_config { + __u32 version; + __u32 size; + __u32 num_ch_ctx; + __u32 num_predictions; + __u32 num_valid_params; + __u32 valid_params_mask; + __u32 params[4]; + union { + struct cam_isp_ch_ctx_fcg_config ch_ctx_fcg_configs[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_ch_ctx_fcg_config, ch_ctx_fcg_configs_flex); + }; +}; + +/** + * struct cam_isp_tpg_core_config - TPG core registers configuration + * + * @version : Version info + * @vc_dt_pattern_id : TPG pattern - SparsePD, sHDR etc. + * @qcfa_en : Selects qcfa in color bar + * @pix_pattern : Pix pattern color bar cfg + * @hbi_clk_cnt : Number of HBI # of cycles + * @vbi_clk_cnt : Number of VBI # of cycles + * @throttle_pattern : Defines bubble pattern in throttler + * @tpg_params : TPG params for future use + */ +struct cam_isp_tpg_core_config { + __u32 version; + __u32 vc_dt_pattern_id; + __u32 qcfa_en; + __u32 pix_pattern; + __u32 hbi_clk_cnt; + __u32 vbi_clk_cnt; + __u32 throttle_pattern; + __u32 tpg_params[3]; +} __attribute__((packed)); + +/** + * struct cam_isp_acquire_hw_info - ISP acquire HW params + * + * @common_info_version : Version of common info struct used + * @common_info_size : Size of common info struct used + * @common_info_offset : Offset of common info from start of data + * @num_inputs : Number of inputs + * @input_info_version : Version of input info struct used + * @input_info_size : Size of input info struct used + * @input_info_offset : Offset of input info from start of data + * @data : Start of data region + */ +struct cam_isp_acquire_hw_info { + __u16 common_info_version; + __u16 common_info_size; + __u32 common_info_offset; + __u32 num_inputs; + __u32 input_info_version; + __u32 input_info_size; + __u32 input_info_offset; + __u64 data; +}; + +/** + * struct cam_isp_vfe_wm_config - VFE write master config per port + * + * @port_type : Unique ID of output port + * @wm_mode : Write master mode + * 0x0 - Line based mode + * 0x1 - Frame based mode + * 0x2 - Index based mode, valid for BAF only + * @h_init : Horizontal starting coordinate in pixels. Must be a + * multiple of 3 for TP10 format + * @height : Height in pixels + * @width : Width in pixels + * @virtual_frame_en : Enabling virtual frame will prevent actual request from + * being sent to NOC + * @stride : Write master stride + * @offset : Write master offset + * @addr_reuse_en : Enabling addr-reuse will write output to the same addr + * after the last addr that was read from FIFO. + * @packer_format : Update packer format for Write master config + * @reserved_3 : Reserved field for Write master config + * For acquired version 3-->corresponds to context_id_mask + * @reserved_4 : Reserved field for Write master config + */ +struct cam_isp_vfe_wm_config { + __u32 port_type; + __u32 wm_mode; + __u32 h_init; + __u32 height; + __u32 width; + __u32 virtual_frame_en; + __u32 stride; + __u32 offset; + __u32 addr_reuse_en; + __u32 packer_format; + __u32 reserved_3; + __u32 reserved_4; +}; + +/** + * struct cam_isp_vfe_wm_config_v2 - VFE write master config per port + * + * @version : Version for this structure + * @port_type : Unique ID of output port + * @wm_mode : Write master mode + * 0x0 - Line based mode + * 0x1 - Frame based mode + * 0x2 - Index based mode, valid for BAF only + * @h_init : Horizontal starting coordinate in pixels. Must be a + * multiple of 3 for TP10 format + * @height : Height in pixels + * @width : Width in pixels + * @virtual_frame_en : Enabling virtual frame will prevent actual request from + * being sent to NOC + * @stride : Write master stride + * @offset : Write master offset + * @addr_reuse_en : Enabling addr-reuse will write output to the same addr + * after the last addr that was read from FIFO. + * @packer_format : Update packer format for Write master config + * @offset_in_bytes : Offest in bytes + * @context_id_mask : context id mask in case of multi context + * @use_pack : Hint to use WM pack in case of per frame changes + * @enable : Enable/Disable WM at run time + * @params : Indicate params supported, to accommodate future changes + * @param_mask : Indicate params supported, to accommodate future changes + */ +struct cam_isp_vfe_wm_config_v2 { + __u32 version; + __u32 port_type; + __u32 wm_mode; + __u32 h_init; + __u32 height; + __u32 width; + __u32 virtual_frame_en; + __u32 stride; + __u32 offset; + __u32 addr_reuse_en; + __u32 packer_format; + __u32 offset_in_bytes; + __u32 context_id_mask; + __u32 use_pack; + __u32 enable; + __u32 param_mask; + __u32 params[5]; +}; + +/** + * struct cam_isp_vfe_out_config_v2 - VFE write master config + * + * @version : Version for this structure + * @num_ports : Number of ports + * @reserved : Reserved field + * @params : Indicate params supported, to accommodate future changes + * @param_mask : Indicate params supported, to accommodate future changes + * @wm_config : VFE out config + */ +struct cam_isp_vfe_out_config_v2 { + __u32 version; + __u32 num_ports; + __u32 reserved; + __u32 param_mask; + __u32 params[5]; + union { + struct cam_isp_vfe_wm_config_v2 wm_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_vfe_wm_config_v2, wm_config_flex); + }; +}; + +/** + * struct cam_isp_vfe_out_config - VFE write master config + * + * @num_ports : Number of ports + * @reserved : Reserved field + * @wm_config : VFE out config + */ +struct cam_isp_vfe_out_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_vfe_wm_config wm_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_vfe_wm_config, wm_config_flex); + }; +}; + +/** + * struct cam_isp_mode_switch_info - Dynamic mode switch info + * + * @mup : MUP for incoming VC of next frame + * @num_expoures : Number of exposures + * @reserved : Reserved + */ +struct cam_isp_mode_switch_info{ + __u32 mup; + __u32 num_expoures; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_isp_nfi_mode_switch_info - New Frame ID (NFI) Based Switching Scheme info + * + * @version : Version info + * @mode_id : Mode ID value for the next frame + * @modeid_vc : The VC with which the embedded packet with MODE ID comes with. + * @x_offset : X offset of MODE ID location in horizontal + * direction within single EBD line packet, unit is byte. + * @y_offset : Y offset of MODE ID location in vertical direction + * within EBD Lines, unit is line packet. + * @reg_length : Number of bytes for each MODE ID + * @num_valid_params : Number of valid params + * @param_mask : Mask to indicate fields in params + * @params : Additional Params + */ +struct cam_isp_nfi_mode_switch_info { + __u32 version; + __u32 mode_id; + __u32 modeid_vc; + __u32 x_offset; + __u32 y_offset; + __u32 reg_length; + __u32 num_valid_params; + __u32 param_mask; + __u32 params[4]; +} __attribute__((packed)); + +/** + * struct cam_isp_sfe_wm_exp_order_config - SFE write master + * exposure order config + * + * This config will reflect for corresponding RM as well + * + * @res_type : output resource type defined in file + * cam_isp_sfe.h or cam_isp_ife.h + * @additional_params : Params for future use + */ +struct cam_isp_sfe_wm_exp_order_config { + __u32 res_type; + __u32 additional_params[5]; +}; + +/** + * struct cam_isp_sfe_exp_config - SFE out exposure config + * + * Exp order is determined by it's index in wm_config[] + * The last resource in the array will be considered as + * last [shortest] exposure. + * + * @num_ports : Number of ports + * @reserved : Reserved field + * @wm_config : WM exp config + */ +struct cam_isp_sfe_exp_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_sfe_wm_exp_order_config wm_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_sfe_wm_exp_order_config, wm_config_flex); + }; +}; + +/** + * struct cam_isp_discard_initial_frames - Discard init frames + * + * Some sensors require discarding the initial frames + * after the sensor is streamed on. The discard would be + * applied on all paths [IPP/PPP/RDIx] for the given + * pipeline. + * + * @num_frames : Number of frames to be discarded + * @discard_params : Params for future use + */ +struct cam_isp_discard_initial_frames { + __u32 num_frames; + __u32 discard_params[5]; +} __attribute__((packed)); + +/** + * struct cam_isp_wm_bw_limiter_config - ISP write master + * BW limter config + * + * + * @res_type : output resource type defined in file + * cam_isp_sfe.h or cam_isp_ife.h + * @enable_limiter : 0 for disable else enabled + * @counter_limit : Max counter value + * @additional_params : Params for future use + */ +struct cam_isp_wm_bw_limiter_config { + __u32 res_type; + __u32 enable_limiter; + __u32 counter_limit[CAM_PACKET_MAX_PLANES]; + __u32 additional_params[5]; +}; + +/** + * struct cam_isp_out_rsrc_bw_limiter_config - ISP out rsrc BW limiter config + * + * Configure BW limiter for ISP WMs + * + * @num_ports : Number of ports + * @reserved : Reserved field + * @bw_limit_config : WM BW limiter config + */ +struct cam_isp_out_rsrc_bw_limiter_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_wm_bw_limiter_config bw_limiter_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_wm_bw_limiter_config, bw_limiter_config_flex); + }; +}; + +/** + * struct cam_isp_init_config - Init config for IFE/CSID/SFE + * + * Any configurations to be consumed by KMD + * prior to stream on - one time configuration per stream. + * This blob is expected only in INIT packet. Per frame + * dynamic settings will not be part of this blob. + * + * @epoch_factor : % factor for epoch config with respect to frame height + * If factor is 60, epoch will be configured to 3/5th of + * the frame height. If this field is 0, + * KMD will configure default 50% of the height + * @additional_params : Reserved fields for future use + */ +struct cam_isp_init_config { + struct cam_isp_epoch_height_config { + __u32 epoch_factor; + } epoch_cfg; + + __u32 additional_params[19]; +}; + +/** + * struct cam_isp_lcr_rdi_config - RDI res id to be muxed to LCR + * + * Configure RDI Res id for LCR + * + * @res_id : Out port Res id, it is same as the out port + * configured during acquire. It would vary + * as per SFE or IFE. Based on this res id, + * Mux register in IFE will be programmed. + * Examples: + * IFE: + * CAM_ISP_IFE_OUT_RES_RDI_0 + * SFE: + * CAM_ISP_SFE_OUT_RES_RDI_0 + * This blob is expected as a part of init packet for + * all LCR cases. For SHDR-LCR cases, this can be used + * per request. For non-shdr cases, this blob is not + * expected as the input to LCR will remain same throughout + * the session + * @reserved : Reserved field + */ +struct cam_isp_lcr_rdi_config { + __u32 res_id; + __u32 reserved[5]; +}; + +/** + * struct cam_isp_per_path_exp_info - ISP per path exp order info + * + * This config will contain a map of input/output resource id + * to the exposure order. Additionally, hw context id is mapped + * for multi-context paths. + * + * @hw_type : hw type for exp info + * Valid values are: + * CAM_ISP_HW_CSID, + * CAM_ISP_HW_VFE, + * CAM_ISP_HW_IFE, + * CAM_ISP_HW_ISPIF, + * CAM_ISP_HW_IFE_LITE, + * CAM_ISP_HW_CSID_LITE, + * CAM_ISP_HW_SFE, + * CAM_ISP_HW_MC_TFE + * @res_id : Path or resource id + * @hw_ctxt_id : HW context id if applicable + * @exp_order : Exposure order for this path and hw context + * combination. + * Valid values are: + * CAM_ISP_EXP_ORDER_LAST, + * CAM_ISP_EXP_ORDER_LAST_1, + * or CAM_ISP_EXP_ORDER_LAST_2 + * @num_valid_params : Number of valid params being used + * @valid_param_mask : Indicate the exact params being used + * @params : Params for future change + */ +struct cam_isp_per_path_exp_info { + __u32 hw_type; + __u32 res_id; + __u32 hw_ctxt_id; + __u32 exp_order; + __u32 num_valid_params; + __u32 valid_params_mask; + __u32 params[6]; +}; + +/** + * struct cam_isp_path_exp_order_update - ISP exp order update + * + * This config will contain number of processed and sensor out exposures + * applicable until updated. Also, this config contains an array + * of path to exposure order info map. + * + * @version : Version info + * @num_process_exp : Number of processed exposures + * @num_out_exp : Number of sensor output exposures + * @num_path_exp_info : Number of per path exp info + * @num_valid_params : Number of valid params being used + * @valid_param_mask : Indicate the exact params being used + * @params : Params for future change + * @exp_info : Exposure info for each path + */ +struct cam_isp_path_exp_order_update { + __u32 version; + __u32 num_process_exp; + __u32 num_sensor_out_exp; + __u32 num_path_exp_info; + __u32 num_valid_params; + __u32 valid_params_mask; + __u32 params[4]; + struct cam_isp_per_path_exp_info exp_info[]; +}; + +#define CAM_ISP_ACQUIRE_COMMON_VER0 0x1000 + +#define CAM_ISP_ACQUIRE_COMMON_SIZE_VER0 0x0 + +#define CAM_ISP_ACQUIRE_INPUT_VER0 0x2000 + +#define CAM_ISP_ACQUIRE_INPUT_SIZE_VER0 sizeof(struct cam_isp_in_port_info) + +#define CAM_ISP_ACQUIRE_OUT_VER0 0x3000 + +#define CAM_ISP_ACQUIRE_OUT_SIZE_VER0 sizeof(struct cam_isp_out_port_info) + +#endif /* __UAPI_CAM_ISP_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_ife.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_ife.h new file mode 100644 index 0000000000..6fbf4d5b33 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_ife.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_ISP_IFE_H__ +#define __UAPI_CAM_ISP_IFE_H__ + +/* IFE output port resource type (global unique)*/ +#define CAM_ISP_IFE_OUT_RES_BASE 0x3000 + +#define CAM_ISP_IFE_OUT_RES_FULL (CAM_ISP_IFE_OUT_RES_BASE + 0) +#define CAM_ISP_IFE_OUT_RES_DS4 (CAM_ISP_IFE_OUT_RES_BASE + 1) +#define CAM_ISP_IFE_OUT_RES_DS16 (CAM_ISP_IFE_OUT_RES_BASE + 2) +#define CAM_ISP_IFE_OUT_RES_RAW_DUMP (CAM_ISP_IFE_OUT_RES_BASE + 3) +#define CAM_ISP_IFE_OUT_RES_FD (CAM_ISP_IFE_OUT_RES_BASE + 4) +#define CAM_ISP_IFE_OUT_RES_PDAF (CAM_ISP_IFE_OUT_RES_BASE + 5) +#define CAM_ISP_IFE_OUT_RES_RDI_0 (CAM_ISP_IFE_OUT_RES_BASE + 6) +#define CAM_ISP_IFE_OUT_RES_RDI_1 (CAM_ISP_IFE_OUT_RES_BASE + 7) +#define CAM_ISP_IFE_OUT_RES_RDI_2 (CAM_ISP_IFE_OUT_RES_BASE + 8) +#define CAM_ISP_IFE_OUT_RES_RDI_3 (CAM_ISP_IFE_OUT_RES_BASE + 9) +#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BE (CAM_ISP_IFE_OUT_RES_BASE + 10) +#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 11) +#define CAM_ISP_IFE_OUT_RES_STATS_TL_BG (CAM_ISP_IFE_OUT_RES_BASE + 12) +#define CAM_ISP_IFE_OUT_RES_STATS_BF (CAM_ISP_IFE_OUT_RES_BASE + 13) +#define CAM_ISP_IFE_OUT_RES_STATS_AWB_BG (CAM_ISP_IFE_OUT_RES_BASE + 14) +#define CAM_ISP_IFE_OUT_RES_STATS_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 15) +#define CAM_ISP_IFE_OUT_RES_STATS_RS (CAM_ISP_IFE_OUT_RES_BASE + 16) +#define CAM_ISP_IFE_OUT_RES_STATS_CS (CAM_ISP_IFE_OUT_RES_BASE + 17) +#define CAM_ISP_IFE_OUT_RES_STATS_IHIST (CAM_ISP_IFE_OUT_RES_BASE + 18) +#define CAM_ISP_IFE_OUT_RES_FULL_DISP (CAM_ISP_IFE_OUT_RES_BASE + 19) +#define CAM_ISP_IFE_OUT_RES_DS4_DISP (CAM_ISP_IFE_OUT_RES_BASE + 20) +#define CAM_ISP_IFE_OUT_RES_DS16_DISP (CAM_ISP_IFE_OUT_RES_BASE + 21) +#define CAM_ISP_IFE_OUT_RES_2PD (CAM_ISP_IFE_OUT_RES_BASE + 22) +#define CAM_ISP_IFE_OUT_RES_RDI_RD (CAM_ISP_IFE_OUT_RES_BASE + 23) +#define CAM_ISP_IFE_OUT_RES_LCR (CAM_ISP_IFE_OUT_RES_BASE + 24) +#define CAM_ISP_IFE_OUT_RES_SPARSE_PD (CAM_ISP_IFE_OUT_RES_BASE + 25) +#define CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD (CAM_ISP_IFE_OUT_RES_BASE + 26) +#define CAM_ISP_IFE_OUT_RES_AWB_BFW (CAM_ISP_IFE_OUT_RES_BASE + 27) +#define CAM_ISP_IFE_OUT_RES_STATS_AEC_BE (CAM_ISP_IFE_OUT_RES_BASE + 28) +#define CAM_ISP_IFE_OUT_RES_LTM_STATS (CAM_ISP_IFE_OUT_RES_BASE + 29) +#define CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 30) +#define CAM_ISP_IFE_LITE_OUT_RES_STATS_BG (CAM_ISP_IFE_OUT_RES_BASE + 31) +#define CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW (CAM_ISP_IFE_OUT_RES_BASE + 32) +#define CAM_ISP_IFE_OUT_RES_STATS_CAF (CAM_ISP_IFE_OUT_RES_BASE + 33) +#define CAM_ISP_IFE_OUT_RES_STATS_BAYER_RS (CAM_ISP_IFE_OUT_RES_BASE + 34) +#define CAM_ISP_IFE_OUT_RES_PDAF_PARSED_DATA (CAM_ISP_IFE_OUT_RES_BASE + 35) +#define CAM_ISP_IFE_OUT_RES_STATS_ALSC (CAM_ISP_IFE_OUT_RES_BASE + 36) +#define CAM_ISP_IFE_OUT_RES_DS2 (CAM_ISP_IFE_OUT_RES_BASE + 37) +#define CAM_ISP_IFE_OUT_RES_IR (CAM_ISP_IFE_OUT_RES_BASE + 38) +#define CAM_ISP_IFE_OUT_RES_RDI_4 (CAM_ISP_IFE_OUT_RES_BASE + 39) +#define CAM_ISP_IFE_OUT_RES_STATS_TMC_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 40) +#define CAM_ISP_IFE_OUT_RES_STATS_AF_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 41) +#define CAM_ISP_IFE_OUT_RES_STATS_AEC_BHIST (CAM_ISP_IFE_OUT_RES_BASE + 42) + +/* IFE input port resource type (global unique) */ +#define CAM_ISP_IFE_IN_RES_BASE 0x4000 + +#define CAM_ISP_IFE_IN_RES_TPG (CAM_ISP_IFE_IN_RES_BASE + 0) +#define CAM_ISP_IFE_IN_RES_PHY_0 (CAM_ISP_IFE_IN_RES_BASE + 1) +#define CAM_ISP_IFE_IN_RES_PHY_1 (CAM_ISP_IFE_IN_RES_BASE + 2) +#define CAM_ISP_IFE_IN_RES_PHY_2 (CAM_ISP_IFE_IN_RES_BASE + 3) +#define CAM_ISP_IFE_IN_RES_PHY_3 (CAM_ISP_IFE_IN_RES_BASE + 4) +#define CAM_ISP_IFE_IN_RES_PHY_4 (CAM_ISP_IFE_IN_RES_BASE + 5) +#define CAM_ISP_IFE_IN_RES_PHY_5 (CAM_ISP_IFE_IN_RES_BASE + 6) +#define CAM_ISP_IFE_IN_RES_RD (CAM_ISP_IFE_IN_RES_BASE + 7) +#define CAM_ISP_IFE_IN_RES_CPHY_TPG_0 (CAM_ISP_IFE_IN_RES_BASE + 8) +#define CAM_ISP_IFE_IN_RES_CPHY_TPG_1 (CAM_ISP_IFE_IN_RES_BASE + 9) +#define CAM_ISP_IFE_IN_RES_CPHY_TPG_2 (CAM_ISP_IFE_IN_RES_BASE + 10) +#define CAM_ISP_IFE_IN_RES_MAX (CAM_ISP_IFE_IN_RES_BASE + 11) + +#endif /* __UAPI_CAM_ISP_IFE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_sfe.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_sfe.h new file mode 100644 index 0000000000..f06bd0aabc --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_sfe.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_ISP_SFE_H__ +#define __UAPI_CAM_ISP_SFE_H__ + +/* SFE output port resource type */ +#define CAM_ISP_SFE_OUT_RES_BASE 0x6000 + +#define CAM_ISP_SFE_OUT_RES_RDI_0 (CAM_ISP_SFE_OUT_RES_BASE + 0) +#define CAM_ISP_SFE_OUT_RES_RDI_1 (CAM_ISP_SFE_OUT_RES_BASE + 1) +#define CAM_ISP_SFE_OUT_RES_RDI_2 (CAM_ISP_SFE_OUT_RES_BASE + 2) +#define CAM_ISP_SFE_OUT_RES_RDI_3 (CAM_ISP_SFE_OUT_RES_BASE + 3) +#define CAM_ISP_SFE_OUT_RES_RDI_4 (CAM_ISP_SFE_OUT_RES_BASE + 4) +#define CAM_ISP_SFE_OUT_BE_STATS_0 (CAM_ISP_SFE_OUT_RES_BASE + 5) +#define CAM_ISP_SFE_OUT_BHIST_STATS_0 (CAM_ISP_SFE_OUT_RES_BASE + 6) +#define CAM_ISP_SFE_OUT_BE_STATS_1 (CAM_ISP_SFE_OUT_RES_BASE + 7) +#define CAM_ISP_SFE_OUT_BHIST_STATS_1 (CAM_ISP_SFE_OUT_RES_BASE + 8) +#define CAM_ISP_SFE_OUT_BE_STATS_2 (CAM_ISP_SFE_OUT_RES_BASE + 9) +#define CAM_ISP_SFE_OUT_BHIST_STATS_2 (CAM_ISP_SFE_OUT_RES_BASE + 10) +#define CAM_ISP_SFE_OUT_RES_LCR (CAM_ISP_SFE_OUT_RES_BASE + 11) +#define CAM_ISP_SFE_OUT_RES_RAW_DUMP (CAM_ISP_SFE_OUT_RES_BASE + 12) +#define CAM_ISP_SFE_OUT_RES_IR (CAM_ISP_SFE_OUT_RES_BASE + 13) +#define CAM_ISP_SFE_OUT_BAYER_RS_STATS_0 (CAM_ISP_SFE_OUT_RES_BASE + 14) +#define CAM_ISP_SFE_OUT_BAYER_RS_STATS_1 (CAM_ISP_SFE_OUT_RES_BASE + 15) +#define CAM_ISP_SFE_OUT_BAYER_RS_STATS_2 (CAM_ISP_SFE_OUT_RES_BASE + 16) +#define CAM_ISP_SFE_OUT_HDR_STATS (CAM_ISP_SFE_OUT_RES_BASE + 17) + +/* This Macro is deprecated and shall not be used */ +#define CAM_ISP_SFE_OUT_RES_MAX (CAM_ISP_SFE_OUT_RES_BASE + 17) + +/* SFE input port resource type */ +#define CAM_ISP_SFE_IN_RES_BASE 0x5000 + +#define CAM_ISP_SFE_INLINE_PIX (CAM_ISP_SFE_IN_RES_BASE + 0) +#define CAM_ISP_SFE_IN_RD_0 (CAM_ISP_SFE_IN_RES_BASE + 1) +#define CAM_ISP_SFE_IN_RD_1 (CAM_ISP_SFE_IN_RES_BASE + 2) +#define CAM_ISP_SFE_IN_RD_2 (CAM_ISP_SFE_IN_RES_BASE + 3) +#define CAM_ISP_SFE_IN_RES_MAX (CAM_ISP_SFE_IN_RES_BASE + 4) + +#endif /* __UAPI_CAM_ISP_SFE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_tfe.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_tfe.h new file mode 100644 index 0000000000..4a307d6581 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_tfe.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_ISP_TFE_H__ +#define __UAPI_CAM_ISP_TFE_H__ + +/* TFE output port resource id number */ +#define CAM_ISP_TFE_OUT_RES_BASE 0x1 + +#define CAM_ISP_TFE_OUT_RES_FULL (CAM_ISP_TFE_OUT_RES_BASE + 0) +#define CAM_ISP_TFE_OUT_RES_RAW_DUMP (CAM_ISP_TFE_OUT_RES_BASE + 1) +#define CAM_ISP_TFE_OUT_RES_PDAF (CAM_ISP_TFE_OUT_RES_BASE + 2) +#define CAM_ISP_TFE_OUT_RES_RDI_0 (CAM_ISP_TFE_OUT_RES_BASE + 3) +#define CAM_ISP_TFE_OUT_RES_RDI_1 (CAM_ISP_TFE_OUT_RES_BASE + 4) +#define CAM_ISP_TFE_OUT_RES_RDI_2 (CAM_ISP_TFE_OUT_RES_BASE + 5) +#define CAM_ISP_TFE_OUT_RES_STATS_HDR_BE (CAM_ISP_TFE_OUT_RES_BASE + 6) +#define CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST (CAM_ISP_TFE_OUT_RES_BASE + 7) +#define CAM_ISP_TFE_OUT_RES_STATS_TL_BG (CAM_ISP_TFE_OUT_RES_BASE + 8) +#define CAM_ISP_TFE_OUT_RES_STATS_BF (CAM_ISP_TFE_OUT_RES_BASE + 9) +#define CAM_ISP_TFE_OUT_RES_STATS_AWB_BG (CAM_ISP_TFE_OUT_RES_BASE + 10) +#define CAM_ISP_TFE_OUT_RES_STATS_RS (CAM_ISP_TFE_OUT_RES_BASE + 11) +#define CAM_ISP_TFE_OUT_RES_DS4 (CAM_ISP_TFE_OUT_RES_BASE + 12) +#define CAM_ISP_TFE_OUT_RES_DS16 (CAM_ISP_TFE_OUT_RES_BASE + 13) +#define CAM_ISP_TFE_OUT_RES_AI (CAM_ISP_TFE_OUT_RES_BASE + 14) +#define CAM_ISP_TFE_OUT_RES_PD_LCR_STATS (CAM_ISP_TFE_OUT_RES_BASE + 15) +#define CAM_ISP_TFE_OUT_RES_PD_PREPROCESSED (CAM_ISP_TFE_OUT_RES_BASE + 16) +#define CAM_ISP_TFE_OUT_RES_PD_PARSED (CAM_ISP_TFE_OUT_RES_BASE + 17) +#define CAM_ISP_TFE_OUT_RES_MAX (CAM_ISP_TFE_OUT_RES_BASE + 18) + + +/* TFE input port resource type */ +#define CAM_ISP_TFE_IN_RES_BASE 0x1 + +#define CAM_ISP_TFE_IN_RES_TPG (CAM_ISP_TFE_IN_RES_BASE + 0) +#define CAM_ISP_TFE_IN_RES_PHY_0 (CAM_ISP_TFE_IN_RES_BASE + 1) +#define CAM_ISP_TFE_IN_RES_PHY_1 (CAM_ISP_TFE_IN_RES_BASE + 2) +#define CAM_ISP_TFE_IN_RES_PHY_2 (CAM_ISP_TFE_IN_RES_BASE + 3) +#define CAM_ISP_TFE_IN_RES_PHY_3 (CAM_ISP_TFE_IN_RES_BASE + 4) +#define CAM_ISP_TFE_IN_RES_CPHY_TPG_0 (CAM_ISP_TFE_IN_RES_BASE + 8) +#define CAM_ISP_TFE_IN_RES_CPHY_TPG_1 (CAM_ISP_TFE_IN_RES_BASE + 9) +#define CAM_ISP_TFE_IN_RES_CPHY_TPG_2 (CAM_ISP_TFE_IN_RES_BASE + 10) +#define CAM_ISP_TFE_IN_RES_MAX (CAM_ISP_TFE_IN_RES_BASE + 11) + +#endif /* __UAPI_CAM_ISP_TFE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_vfe.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_vfe.h new file mode 100644 index 0000000000..497093902b --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_isp_vfe.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_CAM_ISP_VFE_H__ +#define __UAPI_CAM_ISP_VFE_H__ + +/* VFE output port resource type (global unique) */ +#define CAM_ISP_VFE_OUT_RES_BASE 0x1000 + +#define CAM_ISP_VFE_OUT_RES_ENC (CAM_ISP_VFE_OUT_RES_BASE + 0) +#define CAM_ISP_VFE_OUT_RES_VIEW (CAM_ISP_VFE_OUT_RES_BASE + 1) +#define CAM_ISP_VFE_OUT_RES_VID (CAM_ISP_VFE_OUT_RES_BASE + 2) +#define CAM_ISP_VFE_OUT_RES_RDI_0 (CAM_ISP_VFE_OUT_RES_BASE + 3) +#define CAM_ISP_VFE_OUT_RES_RDI_1 (CAM_ISP_VFE_OUT_RES_BASE + 4) +#define CAM_ISP_VFE_OUT_RES_RDI_2 (CAM_ISP_VFE_OUT_RES_BASE + 5) +#define CAM_ISP_VFE_OUT_RES_RDI_3 (CAM_ISP_VFE_OUT_RES_BASE + 6) +#define CAM_ISP_VFE_OUT_RES_STATS_AEC (CAM_ISP_VFE_OUT_RES_BASE + 7) +#define CAM_ISP_VFE_OUT_RES_STATS_AF (CAM_ISP_VFE_OUT_RES_BASE + 8) +#define CAM_ISP_VFE_OUT_RES_STATS_AWB (CAM_ISP_VFE_OUT_RES_BASE + 9) +#define CAM_ISP_VFE_OUT_RES_STATS_RS (CAM_ISP_VFE_OUT_RES_BASE + 10) +#define CAM_ISP_VFE_OUT_RES_STATS_CS (CAM_ISP_VFE_OUT_RES_BASE + 11) +#define CAM_ISP_VFE_OUT_RES_STATS_IHIST (CAM_ISP_VFE_OUT_RES_BASE + 12) +#define CAM_ISP_VFE_OUT_RES_STATS_SKIN (CAM_ISP_VFE_OUT_RES_BASE + 13) +#define CAM_ISP_VFE_OUT_RES_STATS_BG (CAM_ISP_VFE_OUT_RES_BASE + 14) +#define CAM_ISP_VFE_OUT_RES_STATS_BF (CAM_ISP_VFE_OUT_RES_BASE + 15) +#define CAM_ISP_VFE_OUT_RES_STATS_BE (CAM_ISP_VFE_OUT_RES_BASE + 16) +#define CAM_ISP_VFE_OUT_RES_STATS_BHIST (CAM_ISP_VFE_OUT_RES_BASE + 17) +#define CAM_ISP_VFE_OUT_RES_STATS_BF_SCALE (CAM_ISP_VFE_OUT_RES_BASE + 18) +#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BE (CAM_ISP_VFE_OUT_RES_BASE + 19) +#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BHIST (CAM_ISP_VFE_OUT_RES_BASE + 20) +#define CAM_ISP_VFE_OUT_RES_STATS_AEC_BG (CAM_ISP_VFE_OUT_RES_BASE + 21) +#define CAM_ISP_VFE_OUT_RES_CAMIF_RAW (CAM_ISP_VFE_OUT_RES_BASE + 22) +#define CAM_ISP_VFE_OUT_RES_IDEAL_RAW (CAM_ISP_VFE_OUT_RES_BASE + 23) +#define CAM_ISP_VFE_OUT_RES_MAX (CAM_ISP_VFE_OUT_RES_BASE + 24) + +/* VFE input port_ resource type (global unique) */ +#define CAM_ISP_VFE_IN_RES_BASE 0x2000 + +#define CAM_ISP_VFE_IN_RES_TPG (CAM_ISP_VFE_IN_RES_BASE + 0) +#define CAM_ISP_VFE_IN_RES_PHY_0 (CAM_ISP_VFE_IN_RES_BASE + 1) +#define CAM_ISP_VFE_IN_RES_PHY_1 (CAM_ISP_VFE_IN_RES_BASE + 2) +#define CAM_ISP_VFE_IN_RES_PHY_2 (CAM_ISP_VFE_IN_RES_BASE + 3) +#define CAM_ISP_VFE_IN_RES_PHY_3 (CAM_ISP_VFE_IN_RES_BASE + 4) +#define CAM_ISP_VFE_IN_RES_PHY_4 (CAM_ISP_VFE_IN_RES_BASE + 5) +#define CAM_ISP_VFE_IN_RES_PHY_5 (CAM_ISP_VFE_IN_RES_BASE + 6) +#define CAM_ISP_VFE_IN_RES_FE (CAM_ISP_VFE_IN_RES_BASE + 7) +#define CAM_ISP_VFE_IN_RES_MAX (CAM_ISP_VFE_IN_RES_BASE + 8) + +#endif /* __UAPI_CAM_ISP_VFE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_jpeg.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_jpeg.h new file mode 100644 index 0000000000..44a2b1199b --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_jpeg.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_CAM_JPEG_H__ +#define __UAPI_CAM_JPEG_H__ + +#include + +/* enc, dma, cdm(enc/dma) are used in querycap */ +#define CAM_JPEG_DEV_TYPE_ENC 0 +#define CAM_JPEG_DEV_TYPE_DMA 1 +#define CAM_JPEG_DEV_TYPE_MAX 2 + +#define CAM_JPEG_NUM_DEV_PER_RES_MAX 1 + +/* definitions needed for jpeg aquire device */ +#define CAM_JPEG_RES_TYPE_ENC 0 +#define CAM_JPEG_RES_TYPE_DMA 1 +#define CAM_JPEG_RES_TYPE_MAX 2 + +/* packet opcode types */ +#define CAM_JPEG_OPCODE_ENC_UPDATE 0 +#define CAM_JPEG_OPCODE_DMA_UPDATE 1 + +/* ENC input port resource type */ +#define CAM_JPEG_ENC_INPUT_IMAGE 0x0 +/* ENC output port resource type */ +#define CAM_JPEG_ENC_OUTPUT_IMAGE 0x1 +#define CAM_JPEG_ENC_IO_IMAGES_MAX 0x2 + +/* DMA input port resource type */ +#define CAM_JPEG_DMA_INPUT_IMAGE 0x0 +/* DMA output port resource type */ +#define CAM_JPEG_DMA_OUTPUT_IMAGE 0x1 +#define CAM_JPEG_DMA_IO_IMAGES_MAX 0x2 + +#define CAM_JPEG_IMAGE_MAX 0x2 + +/*JPEG Command Buffer Metadata types*/ +#define CAM_JPEG_ENC_PACKET_CONFIG_DATA 0x00 +#define CAM_JPEG_DMA_PACKET_PLANE0_CONFIG_DATA 0x01 +#define CAM_JPEG_DMA_PACKET_PLANE1_CONFIG_DATA 0x02 +#define CAM_JPEG_PACKET_INOUT_PARAM 0x03 +#define CAM_JPEG_PACKET_GENERIC_BLOB 0x04 + +/*JPEG Generic Blob Command buffer type*/ +#define CAM_JPEG_THUMBNAIL_SIZE_BLOB 0x01 + +/** + * struct cam_jpeg_thumbnail_size_blob - Blob containing information about maximum size of the + * thumbnail image + * + * This blob contains information about the maximum size of the encoded thumbnail image that is + * considered valid. + * + * @threshold_size : Size threshold for the encoded Thumbnail image + * @reserved : Reserved field + */ +struct cam_jpeg_thumbnail_size_blob { + __u32 threshold_size; + __u32 reserved[3]; +}; + +/** + * struct cam_jpeg_dev_ver - Device information for particular hw type + * + * This is used to get device version info of JPEG ENC, JPEG DMA + * from hardware and use this info in CAM_QUERY_CAP IOCTL + * + * @size : Size of struct passed + * @dev_type : Hardware type for the cap info(jpeg enc, jpeg dma) + * @hw_ver : Major, minor and incr values of a device version + */ +struct cam_jpeg_dev_ver { + __u32 size; + __u32 dev_type; + struct cam_hw_version hw_ver; +}; + +/** + * struct cam_jpeg_query_cap_cmd - JPEG query device capability payload + * + * @dev_iommu_handle : Jpeg iommu handles for secure/non secure + * modes + * @cdm_iommu_handle : Iommu handles for secure/non secure modes + * @num_enc : Number of encoder + * @num_dma : Number of dma + * @dev_ver : Returned device capability array + */ +struct cam_jpeg_query_cap_cmd { + struct cam_iommu_handle dev_iommu_handle; + struct cam_iommu_handle cdm_iommu_handle; + __u32 num_enc; + __u32 num_dma; + struct cam_jpeg_dev_ver dev_ver[CAM_JPEG_DEV_TYPE_MAX]; +}; + +/** + * struct cam_jpeg_res_info - JPEG output resource info + * + * @format : Format of the resource + * @width : Width in pixels + * @height : Height in lines + * @fps : Fps + */ +struct cam_jpeg_res_info { + __u32 format; + __u32 width; + __u32 height; + __u32 fps; +}; + +/** + * struct cam_jpeg_acquire_dev_info - An JPEG device info + * + * @dev_type : Device type (ENC/DMA) + * @reserved : Reserved Bytes + * @in_res : In resource info + * @in_res : Iut resource info + */ +struct cam_jpeg_acquire_dev_info { + __u32 dev_type; + __u32 reserved; + struct cam_jpeg_res_info in_res; + struct cam_jpeg_res_info out_res; +}; + +/** + * struct cam_jpeg_config_inout_param_info - JPEG Config time + * input output params + * + * @clk_index : Input Param- clock selection index.(-1 default) + * @output_size: Output Param - jpeg encode/dma output size in + * bytes + */ +struct cam_jpeg_config_inout_param_info { + __s32 clk_index; + __s32 output_size; +}; + +#endif /* __UAPI_CAM_JPEG_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_lrme.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_lrme.h new file mode 100644 index 0000000000..0cd430218f --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_lrme.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_CAM_LRME_H__ +#define __UAPI_CAM_LRME_H__ + +#include + +/* LRME Resource Types */ + +enum CAM_LRME_IO_TYPE { + CAM_LRME_IO_TYPE_TAR, + CAM_LRME_IO_TYPE_REF, + CAM_LRME_IO_TYPE_RES, + CAM_LRME_IO_TYPE_DS2, +}; + +#define CAM_LRME_INPUT_PORT_TYPE_TAR (1 << 0) +#define CAM_LRME_INPUT_PORT_TYPE_REF (1 << 1) + +#define CAM_LRME_OUTPUT_PORT_TYPE_DS2 (1 << 0) +#define CAM_LRME_OUTPUT_PORT_TYPE_RES (1 << 1) + +#define CAM_LRME_DEV_MAX 1 + + +struct cam_lrme_hw_version { + __u32 gen; + __u32 rev; + __u32 step; +}; + +struct cam_lrme_dev_cap { + struct cam_lrme_hw_version clc_hw_version; + struct cam_lrme_hw_version bus_rd_hw_version; + struct cam_lrme_hw_version bus_wr_hw_version; + struct cam_lrme_hw_version top_hw_version; + struct cam_lrme_hw_version top_titan_version; +}; + +/** + * struct cam_lrme_query_cap_cmd - LRME query device capability payload + * + * @dev_iommu_handle: LRME iommu handles for secure/non secure + * modes + * @cdm_iommu_handle: Iommu handles for secure/non secure modes + * @num_devices: number of hardware devices + * @dev_caps: Returned device capability array + */ +struct cam_lrme_query_cap_cmd { + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + __u32 num_devices; + struct cam_lrme_dev_cap dev_caps[CAM_LRME_DEV_MAX]; +}; + +struct cam_lrme_soc_info { + __u64 clock_rate; + __u64 bandwidth; + __u64 reserved[4]; +}; + +struct cam_lrme_acquire_args { + struct cam_lrme_soc_info lrme_soc_info; +}; + +#endif /* __UAPI_CAM_LRME_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_ope.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_ope.h new file mode 100644 index 0000000000..9a20067933 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_ope.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_OPE_H__ +#define __UAPI_OPE_H__ + +#include +#include + +#define OPE_DEV_NAME_SIZE 128 + +/* OPE HW TYPE */ +#define OPE_HW_TYPE_OPE 0x1 +#define OPE_HW_TYPE_OPE_CDM 0x2 +#define OPE_HW_TYPE_MAX 0x3 + +/* OPE Device type */ +#define OPE_DEV_TYPE_OPE_RT 0x1 +#define OPE_DEV_TYPE_OPE_NRT 0x2 +#define OPE_DEV_TYPE_OPE_SEMI_RT 0x3 +#define OPE_DEV_TYPE_MAX 0x4 + +/* OPE Input Res Ports */ +#define OPE_IN_RES_FULL 0x1 +#define OPE_IN_RES_MAX OPE_IN_RES_FULL + +/* OPE Output Res Ports */ +#define OPE_OUT_RES_VIDEO 0x1 +#define OPE_OUT_RES_DISP 0x2 +#define OPE_OUT_RES_ARGB 0x3 +#define OPE_OUT_RES_STATS_RS 0x4 +#define OPE_OUT_RES_STATS_IHIST 0x5 +#define OPE_OUT_RES_STATS_LTM 0x6 +#define OPE_OUT_RES_MAX OPE_OUT_RES_STATS_LTM + +/* OPE packet opcodes */ +#define OPE_OPCODE_CONFIG 0x1 + +/* OPE Command Buffer Scope */ +#define OPE_CMD_BUF_SCOPE_BATCH 0x1 +#define OPE_CMD_BUF_SCOPE_FRAME 0x2 +#define OPE_CMD_BUF_SCOPE_PASS 0x3 +#define OPE_CMD_BUF_SCOPE_STRIPE 0x4 + +/* OPE Command Buffer Types */ +#define OPE_CMD_BUF_TYPE_DIRECT 0x1 +#define OPE_CMD_BUF_TYPE_INDIRECT 0x2 + +/* OPE Command Buffer Usage */ +#define OPE_CMD_BUF_UMD 0x1 +#define OPE_CMD_BUF_KMD 0x2 +#define OPE_CMD_BUF_DEBUG 0x3 + +/* OPE Single/Double Buffered */ +#define OPE_CMD_BUF_SINGLE_BUFFERED 0x1 +#define OPE_CMD_BUF_DOUBLE_BUFFERED 0x2 + +/* Command meta types */ +#define OPE_CMD_META_GENERIC_BLOB 0x1 + +/* Generic blob types */ +#define OPE_CMD_GENERIC_BLOB_CLK_V2 0x1 + +/* Stripe location */ +#define OPE_STRIPE_FULL 0x0 +#define OPE_STRIPE_LEFT 0x1 +#define OPE_STRIPE_RIGHT 0x2 +#define OPE_STRIPE_MIDDLE 0x3 + +#define OPE_MAX_CMD_BUFS 64 +#define OPE_MAX_IO_BUFS (OPE_OUT_RES_MAX + OPE_IN_RES_MAX) +#define OPE_MAX_PASS 1 +#define OPE_MAX_PLANES 2 +#define OPE_MAX_STRIPES 64 +#define OPE_MAX_BATCH_SIZE 16 + +/** + * struct ope_stripe_info - OPE stripe Info + * + * @offset: Offset in Bytes + * @x_init: X_init + * @stripe_location: Stripe location (OPE_STRIPE_XXX) + * @width: Width of a stripe + * @height: Height of a stripe + * @disable_bus: Flag to disable BUS master + * @reserved: Reserved + * + */ +struct ope_stripe_info { + __u32 offset; + __u32 x_init; + __u32 stripe_location; + __u32 width; + __u32 height; + __u32 disable_bus; + __u32 reserved; +}; + +/** + * struct ope_io_buf_info - OPE IO buffers meta + * + * @direction: Direction of a buffer of a port(Input/Output) + * @resource_type: Port type + * @num_planes: Number of planes for a port + * @pix_pattern: Pixel pattern for raw input + * @num_stripes: Stripes per plane + * @mem_handle: Memhandles of each Input/Output Port + * @plane_offset: Offsets of planes + * @length: Length of a plane buffer + * @plane_stride: Plane stride + * @height: Height of a plane buffer + * @format: Format + * @fence: Fence of a Port + * @stripe_info: Stripe Info + * + */ +struct ope_io_buf_info { + __u32 direction; + __u32 resource_type; + __u32 num_planes; + __u32 pix_pattern; + __u32 num_stripes[OPE_MAX_PLANES]; + __u32 mem_handle[OPE_MAX_PLANES]; + __u32 plane_offset[OPE_MAX_PLANES]; + __u32 length[OPE_MAX_PLANES]; + __u32 plane_stride[OPE_MAX_PLANES]; + __u32 height[OPE_MAX_PLANES]; + __u32 format; + __u32 fence; + struct ope_stripe_info stripe_info[OPE_MAX_PLANES][OPE_MAX_STRIPES]; +}; + +/** + * struct ope_frame_set - OPE frameset + * + * @num_io_bufs: Number of I/O buffers + * @reserved: Reserved + * @io_buf: IO buffer info for all Input and Output ports + * + */ +struct ope_frame_set { + __u32 num_io_bufs; + __u32 reserved; + struct ope_io_buf_info io_buf[OPE_MAX_IO_BUFS]; +}; + +/** + * struct ope_cmd_buf_info - OPE command buffer meta + * + * @mem_handle: Memory handle for command buffer + * @offset: Offset of a command buffer + * @size: Size of command buffer + * @length: Length of a command buffer + * @cmd_buf_scope : Scope of a command buffer (OPE_CMD_BUF_SCOPE_XXX) + * @type: Command buffer type (OPE_CMD_BUF_TYPE_XXX) + * @cmd_buf_usage: Usage of command buffer ( OPE_CMD_BUF_USAGE_XXX) + * @stripe_idx: Stripe index in a req, It is valid for SCOPE_STRIPE + * @cmd_buf_pass_idx: Pass index + * @prefetch_disable: Prefecth disable flag + * + */ + +struct ope_cmd_buf_info { + __u32 mem_handle; + __u32 offset; + __u32 size; + __u32 length; + __u32 cmd_buf_scope; + __u32 type; + __u32 cmd_buf_usage; + __u32 cmd_buf_buffered; + __u32 stripe_idx; + __u32 cmd_buf_pass_idx; + __u32 prefetch_disable; +}; + +/** + * struct ope_packet_payload - payload for a request + * + * @num_cmd_bufs: Number of command buffers + * @batch_size: Batch size in HFR mode + * @ope_cmd_buf_info: Command buffer meta data + * @ope_io_buf_info: Io buffer Info + * + */ +struct ope_frame_process { + __u32 num_cmd_bufs[OPE_MAX_BATCH_SIZE]; + __u32 batch_size; + struct ope_cmd_buf_info cmd_buf[OPE_MAX_BATCH_SIZE][OPE_MAX_CMD_BUFS]; + struct ope_frame_set frame_set[OPE_MAX_BATCH_SIZE]; +}; + +/** + * struct ope_clk_bw_request_v2 - clock and bandwidth for a request + * + * @budget_ns: Time required to process frame + * @frame_cycles: Frame cycles needed to process the frame + * @rt_flag: Flag to indicate real time stream + * @reserved: For memory alignment + * @axi_path: Per path vote info for OPE + * + */ +struct ope_clk_bw_request_v2 { + __u64 budget_ns; + __u32 frame_cycles; + __u32 rt_flag; + __u32 reserved; + __u32 num_paths; + union { + struct cam_axi_per_path_bw_vote axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote, axi_path_flex); + }; +}; + +/** + * struct ope_hw_ver - Device information for OPE + * + * This is used to get device version info of + * OPE, CDM and use this info in CAM_QUERY_CAP IOCTL + * + * @hw_type: Hardware type for the cap info(OPE_HW_TYPE_XXX) + * @reserved: Reserved field + * @hw_ver: Major, minor and incr values of a hardware version + * + */ +struct ope_hw_ver { + __u32 hw_type; + __u32 reserved; + struct cam_hw_version hw_ver; +}; + +/** + * struct ope_query_cap_cmd - OPE query device capability payload + * + * @dev_iommu_handle: OPE iommu handles for secure/non secure modes + * @cdm_iommu_handle: CDM iommu handles for secure/non secure modes + * @num_ope: Number of OPEs + * @reserved: Reserved Parameter + * @hw_ver: Hardware capability array + */ +struct ope_query_cap_cmd { + struct cam_iommu_handle dev_iommu_handle; + struct cam_iommu_handle cdm_iommu_handle; + __u32 num_ope; + __u32 reserved; + struct ope_hw_ver hw_ver[OPE_DEV_TYPE_MAX]; +}; + +/** + * struct ope_out_res_info - OPE Output resource info + * + * @res_id: Resource ID + * @format: Output resource format + * @width: Output width + * @height: Output Height + * @alignment: Alignment + * @packer_format: Packer format + * @subsample_period: Subsample period in HFR + * @subsample_pattern: Subsample pattern in HFR + * @pixel_pattern: Pixel pattern + * @reserved: Reserved Parameter + * + */ +struct ope_out_res_info { + __u32 res_id; + __u32 format; + __u32 width; + __u32 height; + __u32 alignment; + __u32 packer_format; + __u32 subsample_period; + __u32 subsample_pattern; + __u32 pixel_pattern; + __u32 reserved; +}; + +/** + * struct ope_in_res_info - OPE Input resource info + * + * @res_id: Resource ID + * @format: Input resource format + * @width: Input width + * @height: Input Height + * @pixel_pattern: Pixel pattern + * @alignment: Alignment + * @unpacker_format: Unpacker format + * @max_stripe_size: Max stripe size supported for this instance configuration + * @fps: Frames per second + * @reserved: Reserved Parameter + * + */ +struct ope_in_res_info { + __u32 res_id; + __u32 format; + __u32 width; + __u32 height; + __u32 pixel_pattern; + __u32 alignment; + __u32 unpacker_format; + __u32 max_stripe_size; + __u32 fps; + __u32 reserved; +}; + +/** + * struct ope_acquire_dev_info - OPE Acquire Info + * + * @hw_type: OPE HW Types (OPE) + * @dev_type: Nature of Device Instance (RT/NRT) + * @dev_name: Name of Device Instance + * @nrt_stripes_for_arb: Program num stripes in OPE CDM for NRT device + * before setting ARB bit + * @secure_mode: Mode of Device operation (Secure or Non Secure) + * @batch_size: Batch size + * @num_in_res: Number of input resources (OPE_IN_RES_XXX) + * @in_res: Input resource info + * @num_out_res: Number of output resources (OPE_OUT_RES_XXX) + * @reserved: Reserved Parameter + * @out_res: Output resource info + * + */ +struct ope_acquire_dev_info { + __u32 hw_type; + __u32 dev_type; + char dev_name[OPE_DEV_NAME_SIZE]; + __u32 nrt_stripes_for_arb; + __u32 secure_mode; + __u32 batch_size; + __u32 num_in_res; + struct ope_in_res_info in_res[OPE_IN_RES_MAX]; + __u32 num_out_res; + __u32 reserved; + struct ope_out_res_info out_res[OPE_OUT_RES_MAX]; +} __attribute__((__packed__)); + +#endif /* __UAPI_OPE_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_req_mgr.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_req_mgr.h new file mode 100644 index 0000000000..db7e2a2d80 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_req_mgr.h @@ -0,0 +1,958 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_LINUX_CAM_REQ_MGR_H +#define __UAPI_LINUX_CAM_REQ_MGR_H + +#include +#include +#include +#include +#include + +#define CAM_REQ_MGR_VNODE_NAME "cam-req-mgr-devnode" + +#define CAM_DEVICE_TYPE_BASE (MEDIA_ENT_F_OLD_BASE) +#define CAM_VNODE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE) +#define CAM_SENSOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 1) +#define CAM_IFE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 2) +#define CAM_ICP_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 3) +#define CAM_LRME_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 4) +#define CAM_JPEG_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 5) +#define CAM_FD_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 6) +#define CAM_CPAS_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 7) +#define CAM_CSIPHY_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 8) +#define CAM_ACTUATOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 9) +#define CAM_CCI_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 10) +#define CAM_FLASH_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 11) +#define CAM_EEPROM_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 12) +#define CAM_OIS_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 13) +#define CAM_CUSTOM_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 14) +#define CAM_OPE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 15) +#define CAM_TFE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 16) +#define CAM_CRE_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 17) +#define CAM_TPG_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 18) +#define CAM_TFE_MC_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 19) + +/* cam_req_mgr hdl info */ +#define CAM_REQ_MGR_HDL_IDX_POS 8 +#define CAM_REQ_MGR_HDL_IDX_MASK ((1 << CAM_REQ_MGR_HDL_IDX_POS) - 1) +#define CAM_REQ_MGR_GET_HDL_IDX(hdl) (hdl & CAM_REQ_MGR_HDL_IDX_MASK) + +/** + * Max handles supported by cam_req_mgr + * It includes both session and device handles + */ +#define CAM_REQ_MGR_MAX_HANDLES 64 +#define CAM_REQ_MGR_MAX_HANDLES_V2 256 +#define MAX_LINKS_PER_SESSION 2 + +/* Interval for cam_info_rate_limit_custom() */ +#define CAM_RATE_LIMIT_INTERVAL_5SEC 5 + +/* V4L event type which user space will subscribe to */ +#define V4L_EVENT_CAM_REQ_MGR_EVENT (V4L2_EVENT_PRIVATE_START + 0) + +/* Specific event ids to get notified in user space */ +#define V4L_EVENT_CAM_REQ_MGR_SOF 0 +#define V4L_EVENT_CAM_REQ_MGR_ERROR 1 +#define V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS 2 +#define V4L_EVENT_CAM_REQ_MGR_CUSTOM_EVT 3 +#define V4L_EVENT_CAM_REQ_MGR_NODE_EVENT 4 +#define V4L_EVENT_CAM_REQ_MGR_SOF_UNIFIED_TS 5 +#define V4L_EVENT_CAM_REQ_MGR_PF_ERROR 6 + +/* SOF Event status */ +#define CAM_REQ_MGR_SOF_EVENT_SUCCESS 0 +#define CAM_REQ_MGR_SOF_EVENT_ERROR 1 + +/* Link control operations */ +#define CAM_REQ_MGR_LINK_ACTIVATE 0 +#define CAM_REQ_MGR_LINK_DEACTIVATE 1 + +/* DMA buffer name length */ +#define CAM_DMA_BUF_NAME_LEN 128 +#define CAM_REQ_MGR_ALLOC_BUF_WITH_NAME 1 + +/** + * Request Manager : flush_type + * @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending + * requests from input/processing queue. + * @CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ: Req mgr will remove only particular + * request id from input/processing queue. + * @CAM_REQ_MGR_FLUSH_TYPE_MAX: Max number of the flush type + * @opcode: CAM_REQ_MGR_FLUSH_REQ + */ +#define CAM_REQ_MGR_FLUSH_TYPE_ALL 0 +#define CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ 1 +#define CAM_REQ_MGR_FLUSH_TYPE_MAX 2 + +/** + * Request Manager : Sync Mode type + * @CAM_REQ_MGR_SYNC_MODE_NO_SYNC: Req mgr will apply non-sync mode for this + * request. + * @CAM_REQ_MGR_SYNC_MODE_SYNC: Req mgr will apply sync mode for this request. + */ +#define CAM_REQ_MGR_SYNC_MODE_NO_SYNC 0 +#define CAM_REQ_MGR_SYNC_MODE_SYNC 1 + +/** + * Functional capabilities for flush + * "reserved" field in struct cam_req_mgr_flush_info to be set + * to enable/disable these functionalities + */ +#define CAM_REQ_MGR_ENABLE_SENSOR_STANDBY BIT(0) + +/** + * struct cam_req_mgr_event_data + * @session_hdl: session handle + * @link_hdl: link handle + * @frame_id: frame id + * @reserved: reserved for 64 bit aligngment + * @req_id: request id + * @tv_sec: timestamp in seconds + * @tv_usec: timestamp in micro seconds + */ +struct cam_req_mgr_event_data { + __s32 session_hdl; + __s32 link_hdl; + __s32 frame_id; + __s32 reserved; + __s64 req_id; + __u64 tv_sec; + __u64 tv_usec; +}; + +/** + * struct cam_req_mgr_session_info + * @session_hdl: In/Output param - session_handle + * @opcode1: CAM_REQ_MGR_CREATE_SESSION + * @opcode2: CAM_REQ_MGR_DESTROY_SESSION + */ +struct cam_req_mgr_session_info { + __s32 session_hdl; + __s32 reserved; +}; + +/** + * struct cam_req_mgr_link_info + * @session_hdl: Input param - Identifier for CSL session + * @num_devices: Input Param - Num of devices to be linked + * @dev_hdls: Input param - List of device handles to be linked + * @link_hdl: Output Param -Identifier for link + * @opcode: CAM_REQ_MGR_LINK + */ +struct cam_req_mgr_link_info { + __s32 session_hdl; + __u32 num_devices; + __s32 dev_hdls[CAM_REQ_MGR_MAX_HANDLES]; + __s32 link_hdl; +}; + +struct cam_req_mgr_link_info_v2 { + __s32 session_hdl; + __u32 num_devices; + __s32 dev_hdls[CAM_REQ_MGR_MAX_HANDLES_V2]; + __s32 link_hdl; +}; + +struct cam_req_mgr_ver_info { + __u32 version; + union { + struct cam_req_mgr_link_info link_info_v1; + struct cam_req_mgr_link_info_v2 link_info_v2; + } u; +}; +/** + * struct cam_req_mgr_unlink_info + * @session_hdl: input param - session handle + * @link_hdl: input param - link handle + * @opcode: CAM_REQ_MGR_UNLINK + */ +struct cam_req_mgr_unlink_info { + __s32 session_hdl; + __s32 link_hdl; +}; + +/** + * struct cam_req_mgr_flush_info + * @brief: User can tell drivers to flush a particular request id or + * flush all requests from its pending processing queue. Flush is a + * blocking call and driver shall ensure all requests are flushed + * before returning. + * @session_hdl: Input param - Identifier for CSL session + * @link_hdl: Input Param -Identifier for link + * @flush_type: User can cancel a particular req id or can flush + * all requests in queue + * @reserved: reserved for 64 bit aligngment + * @req_id: field is valid only if flush type is cancel request + * for flush all this field value is not considered. + * @opcode: CAM_REQ_MGR_FLUSH_REQ + */ +struct cam_req_mgr_flush_info { + __s32 session_hdl; + __s32 link_hdl; + __u32 flush_type; + __u32 reserved; + __s64 req_id; +}; + +/** struct cam_req_mgr_sched_request + * @session_hdl: Input param - Identifier for CSL session + * @link_hdl: Input Param -Identifier for link + * inluding itself. + * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this + * flag is set. + * @sync_mode: Type of Sync mode for this request + * @additional_timeout: Additional timeout value (in ms) associated with + * this request. This value needs to be 0 in cases where long exposure is + * not configured for the sensor.The max timeout that will be supported + * is 50000 ms + * @reserved: Reserved + * @req_id: Input Param - Request Id from which all requests will be flushed + */ +struct cam_req_mgr_sched_request { + __s32 session_hdl; + __s32 link_hdl; + __s32 bubble_enable; + __s32 sync_mode; + __s32 additional_timeout; + __s32 reserved; + __s64 req_id; +}; + +/** struct cam_req_mgr_sched_request_v2 + * @version: Version number + * @session_hdl: Input param - Identifier for CSL session + * @link_hdl: Input Param -Identifier for link including itself. + * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this + * flag is set. + * @sync_mode: Type of Sync mode for this request + * @additional_timeout: Additional timeout value (in ms) associated with + * this request. This value needs to be 0 in cases where long exposure is + * not configured for the sensor.The max timeout that will be supported + * is 50000 ms + * @num_links: Input Param - Num of links for sync + * @num_valid_params: Number of valid params + * @req_id: Input Param - Request Id from which all requests will be flushed + * @link_hdls: Input Param - Array of link handles to be for sync + * @param_mask: mask to indicate what the parameters are + * @params: parameters passed from user space + */ +struct cam_req_mgr_sched_request_v2 { + __s32 version; + __s32 session_hdl; + __s32 link_hdl; + __s32 bubble_enable; + __s32 sync_mode; + __s32 additional_timeout; + __s32 num_links; + __s32 num_valid_params; + __s64 req_id; + __s32 link_hdls[MAX_LINKS_PER_SESSION]; + __s32 param_mask; + __s32 params[5]; +}; + +/** struct cam_req_mgr_sched_request_v3 + * @version: Version number + * @session_hdl: Input param - Identifier for CSL session + * @link_hdl: Input Param -Identifier for link including itself. + * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this + * flag is set. + * @sync_mode: Type of Sync mode for this request + * @additional_timeout: Additional timeout value (in ms) associated with + * this request. This value needs to be 0 in cases where long exposure is + * not configured for the sensor.The max timeout that will be supported + * is 50000 ms + * @num_links: Input Param - Num of links for sync + * @num_valid_params: Number of valid params + * @req_id: Input Param - Request Id from which all requests will be flushed + * @param_mask: mask to indicate what the parameters are + * @params: parameters passed from user space + * @link_hdls: Input Param - Array of link handles to be for sync + */ +struct cam_req_mgr_sched_request_v3 { + __s32 version; + __s32 session_hdl; + __s32 link_hdl; + __s32 bubble_enable; + __s32 sync_mode; + __s32 additional_timeout; + __s32 num_links; + __s32 num_valid_params; + __s64 req_id; + __s32 param_mask; + __s32 params[5]; + __s32 link_hdls[]; +}; + +/** + * struct cam_req_mgr_sync_mode + * @session_hdl: Input param - Identifier for CSL session + * @sync_mode: Input Param - Type of sync mode + * @num_links: Input Param - Num of links in sync mode (Valid only + * when sync_mode is one of SYNC enabled modes) + * @link_hdls: Input Param - Array of link handles to be in sync mode + * (Valid only when sync_mode is one of SYNC + * enabled modes) + * @master_link_hdl: Input Param - To dictate which link's SOF drives system + * (Valid only when sync_mode is one of SYNC + * enabled modes) + * + * @opcode: CAM_REQ_MGR_SYNC_MODE + */ +struct cam_req_mgr_sync_mode { + __s32 session_hdl; + __s32 sync_mode; + __s32 num_links; + __s32 link_hdls[MAX_LINKS_PER_SESSION]; + __s32 master_link_hdl; + __s32 reserved; +}; + +/** + * struct cam_req_mgr_link_control + * @ops: Link operations: activate/deactive + * @session_hdl: Input param - Identifier for CSL session + * @num_links: Input Param - Num of links + * @reserved: reserved field + * @init_timeout: To account for INIT exposure settings (ms) + * If there is no change in exp settings + * field needs to assigned to 0ms. + * @link_hdls: Input Param - Links to be activated/deactivated + * + * @opcode: CAM_REQ_MGR_LINK_CONTROL + */ +struct cam_req_mgr_link_control { + __s32 ops; + __s32 session_hdl; + __s32 num_links; + __s32 reserved; + __s32 init_timeout[MAX_LINKS_PER_SESSION]; + __s32 link_hdls[MAX_LINKS_PER_SESSION]; +}; + +/** + * struct cam_req_mgr_link_properties + * @version: Input param - Version number + * @session_hdl: Input param - Identifier for CSL session + * @link_hdl: Input Param - Identifier for link + * @properties_mask: Input Param - Properties mask to indicate if current + * link enables some special properties + * @num_valid_params: Input Param - Number of valid params + * @param_mask: Input Param - Mask to indicate what are the parameters + * @params: Input Param - Parameters passed from user space + */ +/* CAM_REQ_MGR_LINK_PROPERTIES */ +struct cam_req_mgr_link_properties { + __s32 version; + __s32 session_hdl; + __s32 link_hdl; + __u32 properties_mask; + __s32 num_valid_params; + __u32 param_mask; + __s32 params[6]; +}; + +/** + * Request Manager : Link properties codes + * @CAM_LINK_PROPERTY_NONE : No special property + * @CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF : Standby the sensor after EOF + */ +#define CAM_LINK_PROPERTY_NONE 0 +#define CAM_LINK_PROPERTY_SENSOR_STANDBY_AFTER_EOF BIT(0) + +/** + * cam_req_mgr specific opcode ids + */ +#define CAM_REQ_MGR_CREATE_DEV_NODES (CAM_COMMON_OPCODE_MAX + 1) +#define CAM_REQ_MGR_CREATE_SESSION (CAM_COMMON_OPCODE_MAX + 2) +#define CAM_REQ_MGR_DESTROY_SESSION (CAM_COMMON_OPCODE_MAX + 3) +#define CAM_REQ_MGR_LINK (CAM_COMMON_OPCODE_MAX + 4) +#define CAM_REQ_MGR_UNLINK (CAM_COMMON_OPCODE_MAX + 5) +#define CAM_REQ_MGR_SCHED_REQ (CAM_COMMON_OPCODE_MAX + 6) +#define CAM_REQ_MGR_FLUSH_REQ (CAM_COMMON_OPCODE_MAX + 7) +#define CAM_REQ_MGR_SYNC_MODE (CAM_COMMON_OPCODE_MAX + 8) +#define CAM_REQ_MGR_ALLOC_BUF (CAM_COMMON_OPCODE_MAX + 9) +#define CAM_REQ_MGR_MAP_BUF (CAM_COMMON_OPCODE_MAX + 10) +#define CAM_REQ_MGR_RELEASE_BUF (CAM_COMMON_OPCODE_MAX + 11) +#define CAM_REQ_MGR_CACHE_OPS (CAM_COMMON_OPCODE_MAX + 12) +#define CAM_REQ_MGR_LINK_CONTROL (CAM_COMMON_OPCODE_MAX + 13) +#define CAM_REQ_MGR_LINK_V2 (CAM_COMMON_OPCODE_MAX + 14) +#define CAM_REQ_MGR_REQUEST_DUMP (CAM_COMMON_OPCODE_MAX + 15) +#define CAM_REQ_MGR_SCHED_REQ_V2 (CAM_COMMON_OPCODE_MAX + 16) +#define CAM_REQ_MGR_LINK_PROPERTIES (CAM_COMMON_OPCODE_MAX + 17) +#define CAM_REQ_MGR_ALLOC_BUF_V2 (CAM_COMMON_OPCODE_MAX + 18) +#define CAM_REQ_MGR_MAP_BUF_V2 (CAM_COMMON_OPCODE_MAX + 19) +#define CAM_REQ_MGR_MEM_CPU_ACCESS_OP (CAM_COMMON_OPCODE_MAX + 20) +#define CAM_REQ_MGR_QUERY_CAP (CAM_COMMON_OPCODE_MAX + 21) +#define CAM_REQ_MGR_SCHED_REQ_V3 (CAM_COMMON_OPCODE_MAX + 22) + +/* end of cam_req_mgr opcodes */ + +#define CAM_MEM_FLAG_HW_READ_WRITE (1<<0) +#define CAM_MEM_FLAG_HW_READ_ONLY (1<<1) +#define CAM_MEM_FLAG_HW_WRITE_ONLY (1<<2) +#define CAM_MEM_FLAG_KMD_ACCESS (1<<3) +#define CAM_MEM_FLAG_UMD_ACCESS (1<<4) +#define CAM_MEM_FLAG_PROTECTED_MODE (1<<5) +#define CAM_MEM_FLAG_CMD_BUF_TYPE (1<<6) +#define CAM_MEM_FLAG_PIXEL_BUF_TYPE (1<<7) +#define CAM_MEM_FLAG_STATS_BUF_TYPE (1<<8) +#define CAM_MEM_FLAG_PACKET_BUF_TYPE (1<<9) +#define CAM_MEM_FLAG_CACHE (1<<10) +#define CAM_MEM_FLAG_HW_SHARED_ACCESS (1<<11) +#define CAM_MEM_FLAG_CDSP_OUTPUT (1<<12) +#define CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP (1<<13) +#define CAM_MEM_FLAG_KMD_DEBUG_FLAG (1<<14) +#define CAM_MEM_FLAG_EVA_NOPIXEL (1<<15) +#define CAM_MEM_FLAG_HW_AND_CDM_OR_SHARED (1<<16) +#define CAM_MEM_FLAG_UBWC_P_HEAP (1<<17) +/* Allocation forced to camera heap */ +#define CAM_MEM_FLAG_USE_CAMERA_HEAP_ONLY (1<<18) + +/* Allocation forced to system heap */ +#define CAM_MEM_FLAG_USE_SYS_HEAP_ONLY (1<<19) + +#define CAM_MEM_MMU_MAX_HANDLE 16 + +/* Maximum allowed buffers in existence */ +#define CAM_MEM_BUFQ_MAX 2048 + +#define CAM_MEM_MGR_SECURE_BIT_POS 15 +#define CAM_MEM_MGR_HDL_IDX_SIZE 15 +#define CAM_MEM_MGR_HDL_FD_SIZE 16 +#define CAM_MEM_MGR_HDL_IDX_END_POS 16 +#define CAM_MEM_MGR_HDL_FD_END_POS 32 + +#define CAM_MEM_MGR_HDL_IDX_MASK ((1 << CAM_MEM_MGR_HDL_IDX_SIZE) - 1) + +#define GET_MEM_HANDLE(idx, fd) \ + ((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \ + (fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \ + +#define GET_FD_FROM_HANDLE(hdl) \ + (hdl >> (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE)) \ + +#define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK) + +#define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \ + ((flag) ? (hdl |= (1 << CAM_MEM_MGR_SECURE_BIT_POS)) : \ + ((hdl) &= ~(1 << CAM_MEM_MGR_SECURE_BIT_POS))) + +#define CAM_MEM_MGR_IS_SECURE_HDL(hdl) \ + (((hdl) & \ + (1<> CAM_MEM_MGR_SECURE_BIT_POS) + +/** + * memory allocation type + */ +#define CAM_MEM_DMA_NONE 0 +#define CAM_MEM_DMA_BIDIRECTIONAL 1 +#define CAM_MEM_DMA_TO_DEVICE 2 +#define CAM_MEM_DMA_FROM_DEVICE 3 + +/** + * memory cache operation + */ +#define CAM_MEM_CLEAN_CACHE 1 +#define CAM_MEM_INV_CACHE 2 +#define CAM_MEM_CLEAN_INV_CACHE 3 + +/** + * memory CPU access operation + */ +#define CAM_MEM_BEGIN_CPU_ACCESS BIT(0) +#define CAM_MEM_END_CPU_ACCESS BIT(1) + +/** + * memory CPU access type + */ +#define CAM_MEM_CPU_ACCESS_READ BIT(0) +#define CAM_MEM_CPU_ACCESS_WRITE BIT(1) + +/** + * Feature mask returned in query_cap + */ +#define CAM_REQ_MGR_MEM_UBWC_P_HEAP_SUPPORTED BIT(0) +#define CAM_REQ_MGR_MEM_CAMERA_HEAP_SUPPORTED BIT(1) + +/** + * struct cam_req_mgr_query_cap + * @version: Struct version + * @feature_mask Supported features + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_req_mgr_query_cap { + __u32 version; + __u64 feature_mask; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[5]; +}; + +/** + * struct cam_mem_alloc_out_params + * @buf_handle: buffer handle + * @fd: output buffer file descriptor + * @vaddr: virtual address pointer + */ +struct cam_mem_alloc_out_params { + __u32 buf_handle; + __s32 fd; + __u64 vaddr; +}; + +/** + * struct cam_mem_map_out_params + * @buf_handle: buffer handle + * @size: size of the buffer being mapped + * @vaddr: virtual address pointer + */ +struct cam_mem_map_out_params { + __u32 buf_handle; + __u32 size; + __u64 vaddr; +}; + +/** + * struct cam_mem_mgr_alloc_cmd + * @len: size of buffer to allocate + * @align: alignment of the buffer + * @mmu_hdls: array of mmu handles + * @num_hdl: number of handles + * @flags: flags of the buffer + * @out: out params + */ +/* CAM_REQ_MGR_ALLOC_BUF */ +struct cam_mem_mgr_alloc_cmd { + __u64 len; + __u64 align; + __s32 mmu_hdls[CAM_MEM_MMU_MAX_HANDLE]; + __u32 num_hdl; + __u32 flags; + struct cam_mem_alloc_out_params out; +}; + +/** + * struct cam_mem_mgr_alloc_cmd_v2 + * @version: Struct version + * @num_hdl: number of handles + * @mmu_hdls: array of mmu handles + * @len: size of buffer to allocate + * @align: alignment of the buffer + * @vmids: reserved + * @buf_name: DMA buffer name + * @flags: flags of the buffer + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + * @out: out params + */ +/* CAM_REQ_MGR_ALLOC_BUF_V2 */ +struct cam_mem_mgr_alloc_cmd_v2 { + __u32 version; + __u32 num_hdl; + __s32 mmu_hdls[CAM_MEM_MMU_MAX_HANDLE]; + __u64 len; + __u64 align; + __u64 vmids; + char buf_name[CAM_DMA_BUF_NAME_LEN]; + __u32 flags; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[5]; + struct cam_mem_alloc_out_params out; +}; + +/** + * struct cam_mem_mgr_map_cmd + * @mmu_hdls: array of mmu handles + * @num_hdl: number of handles + * @flags: flags of the buffer + * @fd: output buffer file descriptor + * @reserved: reserved field + * @out: out params + */ + +/* CAM_REQ_MGR_MAP_BUF */ +struct cam_mem_mgr_map_cmd { + __s32 mmu_hdls[CAM_MEM_MMU_MAX_HANDLE]; + __u32 num_hdl; + __u32 flags; + __s32 fd; + __u32 reserved; + struct cam_mem_map_out_params out; +}; + +/** + * struct cam_mem_mgr_map_cmd_v2 + * @version: Struct version + * @fd: output buffer file descriptor + * @mmu_hdls: array of mmu handles + * @num_hdl: number of handles + * @flags: flags of the buffer + * @vmids: reserved + * @buf_name: DMA buffer name + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + * @out: out params + */ + +/* CAM_REQ_MGR_MAP_BUF_V2 */ +struct cam_mem_mgr_map_cmd_v2 { + __u32 version; + __s32 fd; + __s32 mmu_hdls[CAM_MEM_MMU_MAX_HANDLE]; + __u32 num_hdl; + __u32 flags; + __u64 vmids; + char buf_name[CAM_DMA_BUF_NAME_LEN]; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[4]; + struct cam_mem_map_out_params out; +}; + + +/** + * struct cam_mem_mgr_map_cmd + * @buf_handle: buffer handle + * @reserved: reserved field + */ +/* CAM_REQ_MGR_RELEASE_BUF */ +struct cam_mem_mgr_release_cmd { + __s32 buf_handle; + __u32 reserved; +}; + +/** + * struct cam_mem_mgr_map_cmd + * @buf_handle: buffer handle + * @ops: cache operations + */ +/* CAM_REQ_MGR_CACHE_OPS */ +struct cam_mem_cache_ops_cmd { + __s32 buf_handle; + __u32 mem_cache_ops; +}; + +/** + * struct cam_mem_cpu_access_op + * @version: Struct version + * @buf_handle: buffer handle + * @access: CPU access operation. Allowed params : + * CAM_MEM_BEGIN_CPU_ACCESS + * CAM_MEM_END_CPU_ACCESS + * both + * @access_type: CPU access type. Allowed params : + * CAM_MEM_CPU_ACCESS_READ + * CAM_MEM_CPU_ACCESS_WRITE + * both + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +/* CAM_REQ_MGR_MEM_CPU_ACCESS_OP */ +struct cam_mem_cpu_access_op { + __u32 version; + __s32 buf_handle; + __u32 access; + __u32 access_type; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[4]; +}; + +/** + * Request Manager : error message type + * @CAM_REQ_MGR_ERROR_TYPE_DEVICE: Device error message, fatal to session + * @CAM_REQ_MGR_ERROR_TYPE_REQUEST: Error on a single request, not fatal + * @CAM_REQ_MGR_ERROR_TYPE_BUFFER: Buffer was not filled, not fatal + * @CAM_REQ_MGR_ERROR_TYPE_RECOVERY: Fatal error, can be recovered + * @CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE: SOF freeze, can be recovered + * @CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY: Full recovery, can be recovered + * @CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT: page fault, can be recovered + * @CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY: Do internal overflow recovery, notify UMD + */ +#define CAM_REQ_MGR_ERROR_TYPE_DEVICE 0 +#define CAM_REQ_MGR_ERROR_TYPE_REQUEST 1 +#define CAM_REQ_MGR_ERROR_TYPE_BUFFER 2 +#define CAM_REQ_MGR_ERROR_TYPE_RECOVERY 3 +#define CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE 4 +#define CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY 5 +#define CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT 6 +#define CAM_REQ_MGR_WARN_TYPE_KMD_RECOVERY 7 + +/** + * Request Manager : Error codes + * @CAM_REQ_MGR_ISP_UNREPORTED_ERROR : No Error Code reported + * @CAM_REQ_MGR_LINK_STALLED_ERROR : Unable to apply requests on link + * @CAM_REQ_MGR_CSID_FATAL_ERROR : CSID FATAL Error + * @CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR : CSID OutputFIFO Overflow + * @CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR : CSID Recovery Overflow + * @CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR : CSID Lane fifo overflow + * @CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH : CSID Pixel Count Mismatch + * @CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION : Packet header received by the csid rx is corrupted + * @CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA : Lesser data received in packet header than expected + * @CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING : Fatal Error encountered while switching the sensors + * @CAM_REQ_MGR_CSID_UNBOUNDED_FRAME : No EOF in the frame or the frame started with eof + * @CAM_REQ_MGR_ICP_NO_MEMORY : ICP No Memory + * @CAM_REQ_MGR_ICP_ERROR_SYSTEM_FAILURE : ICP system failure + * @CAM_REQ_MGR_CSID_MISSING_EOT : CSID is missing EOT on one or more lanes + * @CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION : CSID long packet payload CRC mismatch + * @CAM_REQ_MGR_SENSOR_STREAM_OFF_FAILED : Failed to stream off sensor + * @CAM_REQ_MGR_VALID_SHUTTER_DROPPED : Valid shutter dropped + * @CAM_REQ_MGR_ISP_ERR_HWPD_VIOLATION : HWPD image size violation + * @CAM_REQ_MGR_ISP_ERR_SETTING_MISMATCHED : Setting mismatched between sensor and isp + */ +#define CAM_REQ_MGR_ISP_UNREPORTED_ERROR 0 +#define CAM_REQ_MGR_LINK_STALLED_ERROR BIT(0) +#define CAM_REQ_MGR_CSID_FATAL_ERROR BIT(1) +#define CAM_REQ_MGR_CSID_FIFO_OVERFLOW_ERROR BIT(2) +#define CAM_REQ_MGR_CSID_RECOVERY_OVERFLOW_ERROR BIT(3) +#define CAM_REQ_MGR_CSID_LANE_FIFO_OVERFLOW_ERROR BIT(4) +#define CAM_REQ_MGR_CSID_PIXEL_COUNT_MISMATCH BIT(5) +#define CAM_REQ_MGR_CSID_RX_PKT_HDR_CORRUPTION BIT(6) +#define CAM_REQ_MGR_CSID_MISSING_PKT_HDR_DATA BIT(7) +#define CAM_REQ_MGR_CSID_ERR_ON_SENSOR_SWITCHING BIT(8) +#define CAM_REQ_MGR_CSID_UNBOUNDED_FRAME BIT(9) +#define CAM_REQ_MGR_ICP_NO_MEMORY BIT(10) +#define CAM_REQ_MGR_ICP_SYSTEM_FAILURE BIT(11) +#define CAM_REQ_MGR_CSID_MISSING_EOT BIT(12) +#define CAM_REQ_MGR_CSID_RX_PKT_PAYLOAD_CORRUPTION BIT(13) +#define CAM_REQ_MGR_SENSOR_STREAM_OFF_FAILED BIT(14) +#define CAM_REQ_MGR_VALID_SHUTTER_DROPPED BIT(15) +#define CAM_REQ_MGR_ISP_ERR_HWPD_VIOLATION BIT(16) +#define CAM_REQ_MGR_ISP_ERR_OVERFLOW BIT(17) +#define CAM_REQ_MGR_ISP_ERR_P2I BIT(18) +#define CAM_REQ_MGR_ISP_ERR_VIOLATION BIT(19) +#define CAM_REQ_MGR_ISP_ERR_BUSIF_OVERFLOW BIT(20) +#define CAM_REQ_MGR_ISP_ERR_SETTING_MISMATCHED BIT(21) +#define CAM_REQ_MGR_ISP_ERR_ILLEGAL_DT_SWITCH BIT(22) + +/** + * struct cam_req_mgr_error_msg + * @error_type: type of error + * @request_id: request id of frame + * @device_hdl: device handle + * @linke_hdl: link_hdl + * @resource_size: size of the resource + * @error_code: Error code reported by the event. + * Note: This field is a bit field. + */ +struct cam_req_mgr_error_msg { + __u32 error_type; + __u32 request_id; + __s32 device_hdl; + __s32 link_hdl; + __u32 resource_size; + __u32 error_code; +}; + +/** + * struct cam_req_mgr_frame_msg + * @request_id: request id of the frame + * @frame_id: frame id of the frame + * @timestamp: timestamp of the frame + * @link_hdl: link handle associated with this message + * @sof_status: sof status success or fail + * @frame_id_meta: refers to the meta for + * that frame in specific usecases + * @reserved: reserved + */ +struct cam_req_mgr_frame_msg { + __u64 request_id; + __u64 frame_id; + __u64 timestamp; + __s32 link_hdl; + __u32 sof_status; + __u32 frame_id_meta; + __u32 reserved; +}; + +/** + * enum cam_req_msg_timestamp_type - Identifies index of timestamps + * + * @CAM_REQ_SOF_QTIMER_TIMESTAMP: SOF qtimer timestamp + * @CAM_REQ_BOOT_TIMESTAMP: SOF boot timestamp + * @CAM_REQ_TIMESTAMP_TYPE: Max enum index for timestamp type + * + */ +enum cam_req_msg_timestamp_type { + CAM_REQ_SOF_QTIMER_TIMESTAMP = 0, + CAM_REQ_BOOT_TIMESTAMP, + CAM_REQ_TIMESTAMP_MAX +}; + +/** + * struct cam_req_mgr_frame_msg + * @request_id: request id of the frame + * @frame_id: frame id of the frame + * @timestamps: array for all the supported timestamps + * @link_hdl: link handle associated with this message + * @frame_id_meta: refers to the meta for + * that frame in specific usecases + * @reserved: reserved for future addtions and max size for structure can be 64 bytes + */ +struct cam_req_mgr_frame_msg_v2 { + __u64 request_id; + __u64 frame_id; + __u64 timestamps[CAM_REQ_TIMESTAMP_MAX]; + __s32 link_hdl; + __u32 frame_id_meta; + __u32 reserved[4]; +}; + +/** + * struct cam_req_mgr_custom_msg + * @custom_type: custom type + * @request_id: request id of the frame + * @frame_id: frame id of the frame + * @timestamp: timestamp of the frame + * @link_hdl: link handle associated with this message + * @custom_data: custom data + */ +struct cam_req_mgr_custom_msg { + __u32 custom_type; + __u64 request_id; + __u64 frame_id; + __u64 timestamp; + __s32 link_hdl; + __u64 custom_data; +}; + +/** + * Request Manager Node Msg Event Types + * @CAM_REQ_MGR_NO_EVENT : Event type not reported by the hardware + * @CAM_REQ_MGR_RETRY_EVENT : Retry request reported from the hardware + */ +#define CAM_REQ_MGR_NO_EVENT 0 +#define CAM_REQ_MGR_RETRY_EVENT 1 + +/** + * Request Manager Node Msg Event Cause + * @CAM_REQ_MGR_CAUSE_UNREPORTED : Event cause not reported by the hardware + * @CAM_REQ_MGR_JPEG_THUBNAIL_SIZE_ERROR : JPEG Thumbnail encode size exceeds the threshold size + */ +#define CAM_REQ_MGR_CAUSE_UNREPORTED 0 +#define CAM_REQ_MGR_JPEG_THUBNAIL_SIZE_ERROR 1 + +/** +* struct cam_req_mgr_node_msg +* @device_hdl : Device handle of the device reporting the error +* @link_hdl : link hdl for real time devices +* @event_type : Type of the event +* @event_cause : Cause of the event +* @request_id : Request id +* @custom_data : custom data +* @reserved : Reserved field +*/ +struct cam_req_mgr_node_msg { + __s32 device_hdl; + __s32 link_hdl; + __u32 event_type; + __u32 event_cause; + __u64 request_id; + __u64 custom_data; + __u32 reserved[2]; +}; + +/** + * Request Manager Msg Page Fault event + * @CAM_REQ_MGR_PF_EVT_BUF_NOT_FOUND : Faulted buffer not found + * @CAM_REQ_MGR_PF_EVT_BUF_FOUND_IO_CFG : Faulted buffer from io_cfg found + * @CAM_REQ_MGR_PF_EVT_BUF_FOUND_REF_BUF : Faulted io region buffer in Patch found + * @CAM_REQ_MGR_PF_EVT_BUF_FOUND_CDM : Fault in cmd buffer + * @CAM_REQ_MGR_PF_EVT_BUF_FOUND_SHARED : Fault in shared region buffer + */ +#define CAM_REQ_MGR_PF_EVT_BUF_NOT_FOUND 0 +#define CAM_REQ_MGR_PF_EVT_BUF_FOUND_IO_CFG 1 +#define CAM_REQ_MGR_PF_EVT_BUF_FOUND_REF_BUF 2 +#define CAM_REQ_MGR_PF_EVT_BUF_FOUND_CDM 3 +#define CAM_REQ_MGR_PF_EVT_BUF_FOUND_SHARED 4 + +/** + * Faulted Memory Type + * @CAM_REQ_MGR_PF_TYPE_NULL : Fault on NULL + * @CAM_REQ_MGR_PF_TYPE_OUT_OF_BOUND : Fault on address outside of any mapped buffer + * @CAM_REQ_MGR_PF_TYPE_MAPPED_REGION : Fault on address within a mapped buffer + */ +#define CAM_REQ_MGR_PF_TYPE_NULL 0 +#define CAM_REQ_MGR_PF_TYPE_OUT_OF_BOUND 1 +#define CAM_REQ_MGR_PF_TYPE_MAPPED_REGION 2 + +/** + * Faulted Memory stage + * @CAM_REQ_MGR_STAGE1_FAULT : Faulted memory in non-secure stage + * @CAM_REQ_MGR_STAGE2_FAULT : Faulted memory in secure stage + */ +#define CAM_REQ_MGR_STAGE1_FAULT 0 +#define CAM_REQ_MGR_STAGE2_FAULT 1 + +/** + * struct cam_req_mgr_pf_err_msg + * @device_hdl : device handle of the device reporting the error + * @link_hdl : link hdl for real time devices + * @pf_evt : indicates if no faulted buffer found or found + * io cfg faulted buffer or found ref faulted buffer + * or found cdm shared fautled buffer + * @pf_type : indicates if page fault type is fault on NULL or + * fault out of bound or fault within mapped region + * @pf_stage : indicates if faulted memory is from secure or non-secure region + * @patch_id : index to which patch in the packet is faulted + * @buf_hdl : faulted buffer memory handle + * @offset : offset provided in the packet + * @port_id : resource type of the io cfg in packet + * @far_delta : memory gab between faulted addr and closest + * buffer's starting address + * @req_id : request id for the faulted request + * @bid : bus id + * @pid : unique id for hw group of ports + * @mid : port id of hw + * @reserved : reserved fields + */ +struct cam_req_mgr_pf_err_msg { + __s32 device_hdl; + __s32 link_hdl; + __u8 pf_evt; + __u8 pf_type; + __u8 pf_stage; + __u8 patch_id; + __s32 buf_hdl; + __u32 offset; + __u32 port_id; + __u64 far_delta; + __u64 req_id; + __u8 bid; + __u8 pid; + __u16 mid; + __u32 reserved[3]; +}; + +/** + * struct cam_req_mgr_message - 64 bytes is the max size that can be sent as v4l2 evt + * @session_hdl: session to which the frame belongs to + * @reserved: reserved field + * @u: union which can either be error/frame/custom/node message/page fault message + */ +struct cam_req_mgr_message { + __s32 session_hdl; + __s32 reserved; + union { + struct cam_req_mgr_error_msg err_msg; + struct cam_req_mgr_frame_msg frame_msg; + struct cam_req_mgr_frame_msg_v2 frame_msg_v2; + struct cam_req_mgr_custom_msg custom_msg; + struct cam_req_mgr_node_msg node_msg; + struct cam_req_mgr_pf_err_msg pf_err_msg; + } u; +}; +#endif /* __UAPI_LINUX_CAM_REQ_MGR_H */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sensor.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sensor.h new file mode 100644 index 0000000000..2a2d565897 --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sensor.h @@ -0,0 +1,1287 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_SENSOR_H__ +#define __UAPI_CAM_SENSOR_H__ + +#include +#include +#include + +#define CAM_SENSOR_PROBE_CMD (CAM_COMMON_OPCODE_MAX + 1) +#define CAM_FLASH_MAX_LED_TRIGGERS 2 +#define MAX_OIS_NAME_SIZE 32 +#define MAX_OIS_FW_COUNT 2 +#define CAM_CSIPHY_SECURE_MODE_ENABLED 1 +#define CAM_SENSOR_NAME_MAX_SIZE 32 + +#define SKEW_CAL_MASK BIT(1) +#define PREAMBLE_PATTEN_CAL_MASK BIT(2) + +/* CSIPHY driver cmd buffer meta types */ +#define CAM_CSIPHY_PACKET_META_LANE_INFO 0 +#define CAM_CSIPHY_PACKET_META_GENERIC_BLOB 1 +#define CAM_CSIPHY_PACKET_META_LANE_INFO_V2 2 + +/* CSIPHY blob types */ +#define CAM_CSIPHY_GENERIC_BLOB_TYPE_CDR_CONFIG 0 +#define CAM_CSIPHY_GENERIC_BLOB_TYPE_AUX_CONFIG 1 + +/* CSIPHY CDR tolerance operations */ +#define CAM_CSIPHY_CDR_ADD_TOLERANCE 1 +#define CAM_CSIPHY_CDR_SUB_TOLERANCE 2 + +/* SENSOR driver cmd buffer meta types */ +#define CAM_SENSOR_PACKET_I2C_COMMANDS 0 +#define CAM_SENSOR_PACKET_GENERIC_BLOB 1 +/* Contains I2C config to be applied on the frame post the regular update */ +#define CAM_SENSOR_PACKET_DEFERRED_I2C_COMMANDS_META 2 +/* Contains I2C config to be applied on frame skips */ +#define CAM_SENSOR_PACKET_FRAME_SKIP_I2C_COMMANDS_META 3 +/* Contains I2C config to be applied on bubble */ +#define CAM_SENSOR_PACKET_BUBBLE_UPD_I2C_COMMANDS_META 4 + +/* SENSOR blob types */ +#define CAM_SENSOR_GENERIC_BLOB_RES_INFO 0 +#define CAM_SENSOR_GENERIC_BLOB_FRAME_INFO 1 + +enum camera_sensor_cmd_type { + CAMERA_SENSOR_CMD_TYPE_INVALID, + CAMERA_SENSOR_CMD_TYPE_PROBE, + CAMERA_SENSOR_CMD_TYPE_PWR_UP, + CAMERA_SENSOR_CMD_TYPE_PWR_DOWN, + CAMERA_SENSOR_CMD_TYPE_I2C_INFO, + CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR, + CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD, + CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR, + CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD, + CAMERA_SENSOR_CMD_TYPE_WAIT, + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO, + CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE, + CAMERA_SENSOR_FLASH_CMD_TYPE_RER, + CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR, + CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET, + CAMERA_SENSOR_CMD_TYPE_RD_DATA, + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE, + CAMERA_SENSOR_OIS_CMD_TYPE_FW_INFO, + CAMERA_SENSOR_CMD_TYPE_MAX, +}; + +enum cam_actuator_packet_opcodes { + CAM_ACTUATOR_PACKET_OPCODE_INIT, + CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS, + CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS, + CAM_ACTUATOR_PACKET_OPCODE_READ, + CAM_ACTUATOR_PACKET_NOP_OPCODE = 127 +}; + +enum cam_eeprom_packet_opcodes { + CAM_EEPROM_PACKET_OPCODE_INIT, + CAM_EEPROM_WRITE +}; + +enum cam_ois_packet_opcodes { + CAM_OIS_PACKET_OPCODE_INIT, + CAM_OIS_PACKET_OPCODE_OIS_CONTROL, + CAM_OIS_PACKET_OPCODE_READ, + CAM_OIS_PACKET_OPCODE_WRITE_TIME +}; + +enum camera_sensor_i2c_op_code { + CAMERA_SENSOR_I2C_OP_INVALID, + CAMERA_SENSOR_I2C_OP_RNDM_WR, + CAMERA_SENSOR_I2C_OP_RNDM_WR_VERF, + CAMERA_SENSOR_I2C_OP_CONT_WR_BRST, + CAMERA_SENSOR_I2C_OP_CONT_WR_BRST_VERF, + CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN, + CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN_VERF, + CAMERA_SENSOR_I2C_OP_RNDM_RD, + CAMERA_SENSOR_I2C_OP_CONT_RD, + CAMERA_SENSOR_I2C_OP_MAX, +}; + +enum camera_sensor_wait_op_code { + CAMERA_SENSOR_WAIT_OP_INVALID, + CAMERA_SENSOR_WAIT_OP_COND, + CAMERA_SENSOR_WAIT_OP_HW_UCND, + CAMERA_SENSOR_WAIT_OP_SW_UCND, + CAMERA_SENSOR_WAIT_OP_MAX, +}; + +enum cam_tpg_packet_opcodes { + CAM_TPG_PACKET_OPCODE_INVALID = 0, + CAM_TPG_PACKET_OPCODE_INITIAL_CONFIG, + CAM_TPG_PACKET_OPCODE_NOP, + CAM_TPG_PACKET_OPCODE_UPDATE, + CAM_TPG_PACKET_OPCODE_MAX, +}; + +enum cam_sensor_packet_opcodes { + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON, + CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE, + CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG, + CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE, + CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG, + CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF, + CAM_SENSOR_PACKET_OPCODE_SENSOR_READ, + CAM_SENSOR_PACKET_OPCODE_SENSOR_FRAME_SKIP_UPDATE, + CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE_V2, + CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_UNLOCK, + CAM_SENSOR_PACKET_OPCODE_SENSOR_REG_BANK_LOCK, + CAM_SENSOR_PACKET_OPCODE_SENSOR_BUBBLE_UPDATE, + CAM_SENSOR_PACKET_OPCODE_SENSOR_DEFERRED_META, + CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP = 127, +}; + +enum cam_endianness_type { + CAM_ENDIANNESS_BIG, + CAM_ENDIANNESS_LITTLE, +}; + +enum tpg_command_type_t { + TPG_CMD_TYPE_INVALID = 0, + TPG_CMD_TYPE_GLOBAL_CONFIG, + TPG_CMD_TYPE_STREAM_CONFIG, + TPG_CMD_TYPE_ILLUMINATION_CONFIG, + TPG_CMD_TYPE_SETTINGS_CONFIG, + TPG_CMD_TYPE_MAX, +}; + +enum tpg_pattern_t { + TPG_PATTERN_INVALID = 0, + TPG_PATTERN_REAL_IMAGE, + TPG_PATTERN_RANDOM_PIXL, + TPG_PATTERN_RANDOM_INCREMENTING_PIXEL, + TPG_PATTERN_COLOR_BAR, + TPG_PATTERN_ALTERNATING_55_AA, + TPG_PATTERN_ALTERNATING_USER_DEFINED, + TPG_PATTERN_MAX, +}; + +enum tpg_color_bar_mode_t { + TPG_COLOR_BAR_MODE_INVALID = 0, + TPG_COLOR_BAR_MODE_NORMAL, + TPG_COLOR_BAR_MODE_SPLIT, + TPG_COLOR_BAR_MODE_ROTATING, + TPG_COLOR_BAR_MODE_MAX, +}; + +enum tpg_image_format_t { + TPG_IMAGE_FORMAT_INVALID = 0, + TPG_IMAGE_FORMAT_BAYER, + TPG_IMAGE_FORMAT_QCFA, + TPG_IMAGE_FORMAT_YUV, + TPG_IMAGE_FORMAT_JPEG, + TPG_IMAGE_FORMAT_MAX, +}; + +enum tpg_phy_type_t { + TPG_PHY_TYPE_INVALID = 0, + TPG_PHY_TYPE_DPHY, + TPG_PHY_TYPE_CPHY, + TPG_PHY_TYPE_MAX, +}; + +enum tpg_pixel_type_t { + TPG_PIXEL_TYPE_INVALID = 0, + TPG_PIXEL_TYPE_RED, + TPG_PIXEL_TYPE_GREEN, + TPG_PIXEL_TYPE_BLUE, + TPG_PIXEL_TYPE_IR, + TPG_PIXEL_TYPE_MONO, +}; + +enum tpg_exposure_type_t { + TPG_EXPOSUREL_TYPE_INVALID = 0, + TPG_EXPOSURE_TYPE_LONG, + TPG_EXPOSURE_TYPE_MIDDLE, + TPG_EXPOSURE_TYPE_SHORT, +}; + +enum xcfa_type_t { + XCFA_TYPE_BAYER = 0, + XCFA_TYPE_QUADCFA, + XCFA_TYPE_THREEXTHREECFA, + XCFA_TYPE_FOURXFOURCFA, + XCFA_TYPE_RGBIR, + XCFA_TYPE_RGBWC, + XCFA_TYPE_RGBWK, + XCFA_TYPE_UNCONVENTIONAL_BAYER, +}; + +enum tpg_interleaving_format_t { + TPG_INTERLEAVING_FORMAT_INVALID = 0, + TPG_INTERLEAVING_FORMAT_FRAME, + TPG_INTERLEAVING_FORMAT_LINE, + TPG_INTERLEAVING_FORMAT_SHDR, + TPG_INTERLEAVING_FORMAT_SPARSE_PD, + TPG_INTERLEAVING_FORMAT_SHDR_SPARSE_PD, + TPG_INTERLEAVING_FORMAT_MAX, +}; + +enum tpg_shutter_t { + TPG_SHUTTER_TYPE_INVALID = 0, + TPG_SHUTTER_TYPE_ROLLING, + TPG_SHUTTER_TYPE_GLOBAL, + TPG_SHUTTER_TYPE_MAX, +}; + +enum tpg_stream_t { + TPG_STREAM_TYPE_INVALID = 0, + TPG_STREAM_TYPE_IMAGE, + TPG_STREAM_TYPE_PDAF, + TPG_STREAM_TYPE_META, + TPG_STREAM_TYPE_MAX, +}; + +enum tpg_cfa_arrangement_t { + TPG_CFA_ARRANGEMENT_TYPE_INVALID = 0, + TPG_CFA_ARRANGEMENT_TYPE_MAX, +}; + +/** + * struct cam_sensor_query_cap - capabilities info for sensor + * + * @slot_info : Indicates about the slotId or cell Index + * @secure_camera : Camera is in secure/Non-secure mode + * @pos_pitch : Sensor position pitch + * @pos_roll : Sensor position roll + * @pos_yaw : Sensor position yaw + * @actuator_slot_id : Actuator slot id which connected to sensor + * @eeprom_slot_id : EEPROM slot id which connected to sensor + * @ois_slot_id : OIS slot id which connected to sensor + * @flash_slot_id : Flash slot id which connected to sensor + * @csiphy_slot_id : CSIphy slot id which connected to sensor + * + */ +struct cam_sensor_query_cap { + __u32 slot_info; + __u32 secure_camera; + __u32 pos_pitch; + __u32 pos_roll; + __u32 pos_yaw; + __u32 actuator_slot_id; + __u32 eeprom_slot_id; + __u32 ois_slot_id; + __u32 flash_slot_id; + __u32 csiphy_slot_id; +} __attribute__((packed)); + +/** + * struct cam_csiphy_query_cap - capabilities info for csiphy + * + * @slot_info : Indicates about the slotId or cell Index + * @version : CSIphy version + * @clk lane : Of the 5 lanes, informs lane configured + * as clock lane + * @reserved + */ +struct cam_csiphy_query_cap { + __u32 slot_info; + __u32 version; + __u32 clk_lane; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_actuator_query_cap - capabilities info for actuator + * + * @slot_info : Indicates about the slotId or cell Index + * @reserved + */ +struct cam_actuator_query_cap { + __u32 slot_info; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_eeprom_query_cap_t - capabilities info for eeprom + * + * @slot_info : Indicates about the slotId or cell Index + * @eeprom_kernel_probe : Indicates about the kernel or userspace probe + */ +struct cam_eeprom_query_cap_t { + __u32 slot_info; + __u16 eeprom_kernel_probe; + __u16 is_multimodule_mode; +} __attribute__((packed)); + +/** + * struct cam_ois_query_cap_t - capabilities info for ois + * + * @slot_info : Indicates about the slotId or cell Index + */ +struct cam_ois_query_cap_t { + __u32 slot_info; + __u16 reserved; +} __attribute__((packed)); + +/** + * struct cam_tpg_query_cap - capabilities info for tpg + * + * @slot_info : Indicates about the slotId or cell Index + * @version : TPG version , in msb + * @reserved : Reserved for future Use + * @secure_camera : Camera is in secure/Non-secure mode + * @csiphy_slot_id : CSIphy slot id which connected to sensor + */ +struct cam_tpg_query_cap { + __u32 slot_info; + __u32 version; + __u32 secure_camera; + __u32 csiphy_slot_id; + __u32 reserved[2]; +} __attribute__((packed)); + + +/** + * struct cam_cmd_i2c_info - Contains slave I2C related info + * + * @slave_addr : Slave address + * @i2c_freq_mode : 4 bits are used for I2c freq mode + * @cmd_type : Explains type of command + */ +struct cam_cmd_i2c_info { + __u32 slave_addr; + __u8 i2c_freq_mode; + __u8 cmd_type; + __u16 reserved; +} __attribute__((packed)); + +/** + * Below macro definition is the param mask for + * cam_cmd_sensor_res_info. + */ +#define CAM_SENSOR_FEATURE_MASK BIT(0) +#define CAM_SENSOR_NUM_BATCHED_FRAMES BIT(1) + +/* Below macro definition is the sub definition for CAM_SENSOR_FEATURE_MASK */ +#define CAM_SENSOR_FEATURE_NONE 0 +#define CAM_SENSOR_FEATURE_AEB_ON BIT(0) +#define CAM_SENSOR_FEATURE_AEB_UPDATE BIT(1) +#define CAM_SENSOR_FEATURE_AEB_OFF BIT(2) +#define CAM_SENSOR_FEATURE_INSENSOR_HDR_3EXP_ON BIT(3) +#define CAM_SENSOR_FEATURE_INSENSOR_HDR_3EXP_OFF BIT(4) + +#define CAM_CSIPHY_T3_PREPARE_NS_MASK BIT(0) +#define CAM_CSIPHY_T3_PREAMBLE_NS_MASK BIT(1) + +/** + * struct cam_cmd_sensor_res_info - Contains sensor res info + * + * res_index is the key property, it specifies the + * combinations of other properties enclosed in this + * structure. + * + * @res_index : The resolution index that gets updated + * during a mode switch + * @fps : Frame rate + * @width : Pixel width to output to csiphy + * @height : Pixel height to output to csiphy + * @caps : Specifies capability sensor is configured + * for, (eg, XCFA, HFR), num_exposures and + * PDAF type + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + */ +struct cam_sensor_res_info { + __u16 res_index; + __u32 fps; + __u32 width; + __u32 height; + char caps[64]; + __u32 num_valid_params; + __u32 valid_param_mask; + __u16 params[3]; +} __attribute__((packed)); + +/** + * struct cam_sensor_frame_info - Contains sensor frame related info + * + * @frame_sync_shift : Indicates how far the frame synchronization + * reference point from SOF, this is used to + * align with userland and kernel frame sync offset. + * @frame_duration : Frame duration + * @blanking_duration : Vertical blanking duration for a request, and it + * is representing the blanking durations before the + * frame for this request. + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + */ +struct cam_sensor_frame_info { + __u64 frame_sync_shift; + __u64 frame_duration; + __u64 blanking_duration; + __u32 num_valid_params; + __u32 valid_param_mask; + __u64 params[4]; +} __attribute__((packed)); + +/** + * struct cam_ois_opcode - Contains OIS opcode + * + * @prog : OIS FW prog register address + * @coeff : OIS FW coeff register address + * @pheripheral : OIS pheripheral + * @memory : OIS memory + */ +struct cam_ois_opcode { + __u32 prog; + __u32 coeff; + __u32 pheripheral; + __u32 memory; +} __attribute__((packed)); + +/** + * struct cam_cmd_ois_info - Contains OIS slave info + * + * @slave_addr : OIS i2c slave address + * @i2c_freq_mode : i2c frequency mode + * @cmd_type : Explains type of command + * @ois_fw_flag : indicates if fw is present or not + * @is_ois_calib : indicates the calibration data is available + * @ois_name : OIS name + * @opcode : opcode + */ +struct cam_cmd_ois_info { + __u32 slave_addr; + __u8 i2c_freq_mode; + __u8 cmd_type; + __u8 ois_fw_flag; + __u8 is_ois_calib; + char ois_name[MAX_OIS_NAME_SIZE]; + struct cam_ois_opcode opcode; +} __attribute__((packed)); + + +/** + * struct cam_cmd_ois_fw_param - Contains OIS firmware param + * + * NOTE: if this struct is updated, + * please also update version in struct cam_cmd_ois_fw_info + * + * @fw_name : firmware file name + * @fw_start_pos : data start position in file + * @fw_size : firmware size + * @fw_len_per_write: data length per write in bytes + * @fw_addr_type : addr type + * @fw_data_type : data type + * @fw_operation : type of operation + * @reserved : reserved for 32-bit alignment + * @fw_delayUs : delay in cci write + * @fw_reg_addr : start register addr to write + * @fw_init_size : size of fw download init settings + * @fw_finalize_size: size of fw download finalize settings + */ +struct cam_cmd_ois_fw_param { + char fw_name[MAX_OIS_NAME_SIZE]; + __u32 fw_start_pos; + __u32 fw_size; + __u32 fw_len_per_write; + __u8 fw_addr_type; + __u8 fw_data_type; + __u8 fw_operation; + __u8 reserved; + __u32 fw_delayUs; + __u32 fw_reg_addr; + __u32 fw_init_size; + __u32 fw_finalize_size; +} __attribute__((packed)); + +/** + * struct cam_cmd_ois_fw_info - Contains OIS firmware info + * + * @version : version info + * NOTE: if struct cam_cmd_ois_fw_param is updated, + * version here needs to be updated too. + * @reserved : reserved + * @cmd_type : Explains type of command + * @fw_count : firmware count + * @endianness : endianness combo: + * bit[3:0] firmware data's endianness + * bit[7:4] endian type of input parameter to ois driver, say QTime + * @fw_param : includes firmware parameters + * @num_valid_params: Number of valid params + * @param_mask : Mask to indicate fields in params + * @params : Additional Params + */ +struct cam_cmd_ois_fw_info { + __u32 version; + __u8 reserved; + __u8 cmd_type; + __u8 fw_count; + __u8 endianness; + struct cam_cmd_ois_fw_param fw_param[MAX_OIS_FW_COUNT]; + __u32 num_valid_params; + __u32 param_mask; + __u32 params[4]; +} __attribute__((packed)); + +/** + * struct cam_cmd_probe - Contains sensor slave info + * + * @data_type : Slave register data type + * @addr_type : Slave register address type + * @op_code : Don't Care + * @cmd_type : Explains type of command + * @reg_addr : Slave register address + * @expected_data : Data expected at slave register address + * @data_mask : Data mask if only few bits are valid + * @camera_id : Indicates the slot to which camera + * needs to be probed + * @reserved + */ +struct cam_cmd_probe { + __u8 data_type; + __u8 addr_type; + __u8 op_code; + __u8 cmd_type; + __u32 reg_addr; + __u32 expected_data; + __u32 data_mask; + __u16 camera_id; + __u16 reserved; +} __attribute__((packed)); + +/** + * struct cam_cmd_probe_v2 - Contains sensor slave info version 2 + * + * @data_type : Slave register data type + * @addr_type : Slave register address type + * @op_code : Don't Care + * @cmd_type : Explains type of command + * @reg_addr : Slave register address + * @expected_data : Data expected at slave register address + * @data_mask : Data mask if only few bits are valid + * @camera_id : Indicates the slot to which camera + * needs to be probed + * @pipeline_delay : Pipeline delay + * @logical_camera_id : Logical Camera ID + * @sensor_name : Sensor's name + * @reserved + */ +struct cam_cmd_probe_v2 { + __u8 data_type; + __u8 addr_type; + __u8 op_code; + __u8 cmd_type; + __u32 reg_addr; + __u32 expected_data; + __u32 data_mask; + __u16 camera_id; + __u16 pipeline_delay; + __u32 logical_camera_id; + char sensor_name[CAM_SENSOR_NAME_MAX_SIZE]; + __u32 reserved[4]; +} __attribute__((packed)); + +/** + * struct cam_power_settings - Contains sensor power setting info + * + * @power_seq_type : Type of power sequence + * @reserved + * @config_val_low : Lower 32 bit value configuration value + * @config_val_high : Higher 32 bit value configuration value + * + */ +struct cam_power_settings { + __u16 power_seq_type; + __u16 reserved; + __u32 config_val_low; + __u32 config_val_high; +} __attribute__((packed)); + +/** + * struct cam_cmd_power - Explains about the power settings + * + * @count : Number of power settings follows + * @reserved + * @cmd_type : Explains type of command + * @power_settings : Contains power setting info + */ +struct cam_cmd_power { + __u32 count; + __u8 reserved; + __u8 cmd_type; + __u16 more_reserved; + union { + struct cam_power_settings power_settings[1]; + __DECLARE_FLEX_ARRAY(struct cam_power_settings, power_settings_flex); + }; +} __attribute__((packed)); + +/** + * struct i2c_rdwr_header - header of READ/WRITE I2C command + * + * @ count : Number of registers / data / reg-data pairs + * @ op_code : Operation code + * @ cmd_type : Command buffer type + * @ data_type : I2C data type + * @ addr_type : I2C address type + * @ reserved + */ +struct i2c_rdwr_header { + __u32 count; + __u8 op_code; + __u8 cmd_type; + __u8 data_type; + __u8 addr_type; +} __attribute__((packed)); + +/** + * struct i2c_random_wr_payload - payload for I2C random write + * + * @ reg_addr : Register address + * @ reg_data : Register data + * + */ +struct i2c_random_wr_payload { + __u32 reg_addr; + __u32 reg_data; +} __attribute__((packed)); + +/** + * struct cam_cmd_i2c_random_wr - I2C random write command + * @ header : header of READ/WRITE I2C command + * @ random_wr_payload : payload for I2C random write + */ +struct cam_cmd_i2c_random_wr { + struct i2c_rdwr_header header; + union { + struct i2c_random_wr_payload random_wr_payload[1]; + __DECLARE_FLEX_ARRAY(struct i2c_random_wr_payload, random_wr_payload_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_cmd_read - I2C read command + * @ reg_data : Register data + * @ reserved + */ +struct cam_cmd_read { + __u32 reg_data; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_cmd_i2c_continuous_wr - I2C continuous write command + * @ header : header of READ/WRITE I2C command + * @ reg_addr : Register address + * @ data_read : I2C read command + */ +struct cam_cmd_i2c_continuous_wr { + struct i2c_rdwr_header header; + __u32 reg_addr; + union { + struct cam_cmd_read data_read[1]; + __DECLARE_FLEX_ARRAY(struct cam_cmd_read, data_read_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_cmd_i2c_random_rd - I2C random read command + * @ header : header of READ/WRITE I2C command + * @ data_read : I2C read command + */ +struct cam_cmd_i2c_random_rd { + struct i2c_rdwr_header header; + union { + struct cam_cmd_read data_read[1]; + __DECLARE_FLEX_ARRAY(struct cam_cmd_read, data_read_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_cmd_i2c_continuous_rd - I2C continuous continuous read command + * @ header : header of READ/WRITE I2C command + * @ reg_addr : Register address + * + */ +struct cam_cmd_i2c_continuous_rd { + struct i2c_rdwr_header header; + __u32 reg_addr; +} __attribute__((packed)); + +/** + * struct cam_cmd_conditional_wait - Conditional wait command + * @data_type : Data type + * @addr_type : Address type + * @op_code : Opcode + * @cmd_type : Explains type of command + * @timeout : Timeout for retries + * @reserved + * @reg_addr : Register Address + * @reg_data : Register data + * @data_mask : Data mask if only few bits are valid + * @camera_id : Indicates the slot to which camera + * needs to be probed + * + */ +struct cam_cmd_conditional_wait { + __u8 data_type; + __u8 addr_type; + __u16 reserved; + __u8 op_code; + __u8 cmd_type; + __u16 timeout; + __u32 reg_addr; + __u32 reg_data; + __u32 data_mask; +} __attribute__((packed)); + +/** + * struct cam_cmd_unconditional_wait - Un-conditional wait command + * @delay : Delay + * @op_code : Opcode + * @cmd_type : Explains type of command + */ +struct cam_cmd_unconditional_wait { + __s16 delay; + __s16 reserved; + __u8 op_code; + __u8 cmd_type; + __u16 reserved1; +} __attribute__((packed)); + +/** + * cam_csiphy_cdr_sweep_params : Provides cdr blob structre + * + * @cdr_tolerance : CDR tolerance param + * @tolerance_op_type : Determines if the tolerance needs to be added/subtracted + * from default CDR value + * @configured_cdr : Configured CDR value for all the lanes for the + * selected data rate, default +/- tolerance, + * this is the output + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + * + */ +struct cam_csiphy_cdr_sweep_params { + __u32 cdr_tolerance; + __u32 tolerance_op_type; + __u32 configured_cdr; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[3]; +}; + +/** + * cam_csiphy_aux_settings_params : Provides aux blob structre + * + * @data_rate_aux_mask : Auxiliary settings update for different data rates, + * this is the output + * @num_valid_params : Number of valid params + * @valid_param_mask : Valid param mask + * @params : params + * + */ +struct cam_csiphy_aux_settings_params { + __u64 data_rate_aux_mask; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[2]; +}; + +/** + * cam_csiphy_info : Provides cmdbuffer structre + * @lane_assign : Lane sensor will be using + * @mipi_flags : Phy flags for different calibration operations + * @lane_cnt : Total number of lanes + * @secure_mode : Secure mode flag to enable / disable + * @settle_time : Settling time in ms + * @data_rate : Data rate + * + */ +struct cam_csiphy_info { + __u16 reserved; + __u16 lane_assign; + __u16 mipi_flags; + __u8 lane_cnt; + __u8 secure_mode; + __u64 settle_time; + __u64 data_rate; +} __attribute__((packed)); + +/** + * cam_csiphy_info_v2 : Provides cmdbuffer structre + * @version : Version number + * @lane_assign : Lane sensor will be using + * @mipi_flags : Phy flags for different calibration operations + * @lane_cnt : Total number of lanes + * @secure_mode : Secure mode flag to enable / disable + * @settle_time : Settling time in ms + * @data_rate : Data rate + * @channel_type : Channel type indicates apply which channel settings + * @num_valid_params : Number of valid params + * @param_mask : Mask to indicate what the parameters are + * @params : Additional params + */ +struct cam_csiphy_info_v2 { + __u16 version; + __u16 lane_assign; + __u16 mipi_flags; + __u8 lane_cnt; + __u8 secure_mode; + __u64 settle_time; + __u64 data_rate; + __u32 channel_type; + __u32 num_valid_params; + __u32 param_mask; + __u32 params[5]; +} __attribute__((packed)); + +/** + * cam_csiphy_acquire_dev_info : Information needed for + * csiphy at the time of acquire + * @combo_mode : Indicates the device mode of operation + * @cphy_dphy_combo_mode : Info regarding cphy_dphy_combo mode + * @csiphy_3phase : Details whether 3Phase / 2Phase operation + * @reserve + * + */ +struct cam_csiphy_acquire_dev_info { + __u32 combo_mode; + __u16 cphy_dphy_combo_mode; + __u8 csiphy_3phase; + __u8 reserve; +} __attribute__((packed)); + +/** + * cam_sensor_acquire_dev : Updates sensor acuire cmd + * @device_handle : Updates device handle + * @session_handle : Session handle for acquiring device + * @handle_type : Resource handle type + * @reserved + * @info_handle : Handle to additional info + * needed for sensor sub modules + * + */ +struct cam_sensor_acquire_dev { + __u32 session_handle; + __u32 device_handle; + __u32 handle_type; + __u32 reserved; + __u64 info_handle; +} __attribute__((packed)); + +/** + * cam_tpg_acquire_dev : Updates tpg acuire cmd + * @device_handle : Updates device handle + * @session_handle : Session handle for acquiring device + * @handle_type : Resource handle type + * @reserved + * @info_handle : Handle to additional info + * needed for sensor sub modules + */ +struct cam_tpg_acquire_dev { + __u32 session_handle; + __u32 device_handle; + __u32 handle_type; + __u32 reserved; + __u64 info_handle; +} __attribute__((packed)); + +/** + * cam_sensor_streamon_dev : StreamOn command for the sensor + * @session_handle : Session handle for acquiring device + * @device_handle : Updates device handle + * @handle_type : Resource handle type + * @reserved + * @info_handle : Information Needed at the time of streamOn + * + */ +struct cam_sensor_streamon_dev { + __u32 session_handle; + __u32 device_handle; + __u32 handle_type; + __u32 reserved; + __u64 info_handle; +} __attribute__((packed)); + + +/** + * stream_dimension : Stream dimension + * + * @left : left pixel locaiton of stream + * @top : top pixel location of stream + * @width : width of the image stream + * @height : Height of the image stream + */ +struct stream_dimension { + uint32_t left; + uint32_t top; + uint32_t width; + uint32_t height; +}; + +/** + * tpg_command_header_t : tpg command common header + * + * @cmd_type : command type + * @size : size of the command including header + * @cmd_version : version of the command associated + */ +struct tpg_command_header_t { + __u32 cmd_type; + ssize_t size; + uint32_t cmd_version; +} __attribute__((packed)); + +/** + * tpg_pixel_coordinate_t : pixel coordinate structure + * + * @xcoordinate : X coordinate + * @ycoordinate : Y coordiante + * @pixel_type : red green blue ir mono + */ +struct tpg_pixel_coordinate_t { + uint32_t xcoordinate; + uint32_t ycoordinate; + uint32_t exposure_type; + uint32_t pixel_type; +} __attribute__((packed)); + +/** + * tpg_cfa_information_t : tpg cfa information structure + * + * @number_of_pixel_per_color : number of pixel per color + * @pattern_width : pattern width + * @pattern_height : pattern height + * @pixel_coordinate_count : pixel coordinate count + * @pixel_coordinate : pixel coordinate array + */ +struct tpg_cfa_information_t { + uint32_t number_of_pixel_per_color; + uint32_t pattern_width; + uint32_t pattern_height; + uint32_t pixel_coordinate_count; + struct tpg_pixel_coordinate_t pixel_coordinate[64]; +} __attribute__((packed)); + +/** + * tpg_reg_settings : TPG register settings + * + * @reg_offset : register offset + * @reg_value : register value + * @operation : operation + * @delay_us : delay in micro second + */ +struct tpg_reg_settings { + uint32_t reg_offset; + uint32_t reg_value; + uint32_t operation; + uint32_t delay_us; + uint32_t reserved[4]; +} __attribute__((packed)); + +/** + * tpg_settings_config_t : settings configuration command structure + * + * @header : common header + * @settings_array_offset : settings array offset + * @settings_array_size : settings array size + * @active_count : active count + * @param_mask : Mask to indicate fields in params + * @params : Additional Params + */ +struct tpg_settings_config_t { + struct tpg_command_header_t header; + uint32_t settings_array_offset; + uint32_t settings_array_size; + uint32_t active_count; + uint32_t param_mask; + uint32_t params[4]; +} __attribute__((packed)); + +/** + * tpg_global_config_t : global configuration command structure + * + * @header : common header + * @phy_type : phy type , cpy , dphy + * @lane_count : number of lanes used + * @interleaving_format : interleaving format used + * @phy_mode : phy mode of operation + * @shutter_type : shutter type + * @mode : if any specific mode needs to configured + * @hbi : horizontal blanking intervel + * @vbi : vertical blanking intervel + * @skip_pattern : frame skip pattern + * @tpg_clock : tpg clock + * @reserved : reserved for future use + */ +struct tpg_global_config_t { + struct tpg_command_header_t header; + enum tpg_phy_type_t phy_type; + uint8_t lane_count; + enum tpg_interleaving_format_t interleaving_format; + uint8_t phy_mode; + enum tpg_shutter_t shutter_type; + uint32_t mode; + uint32_t hbi; + uint32_t vbi; + uint32_t skip_pattern; + uint64_t tpg_clock; + uint32_t reserved[4]; +} __attribute__((packed)); + +/** + * tpg_old_stream_config_t : stream configuration command + * + * @header: common tpg command header + * @pattern_type : tpg pattern type used in this stream + * @cb_mode : tpg color bar mode used in this stream + * @frame_count : frame count in case of trigger burst mode + * @stream_type : type of stream like image pdaf etc + * @stream_dimension : Dimension of the stream + * @pixel_depth : bits per each pixel + * @cfa_arrangement : color filter arragement + * @output_format : output image format + * @hbi : horizontal blanking intervel + * @vbi : vertical blanking intervel + * @vc : virtual channel of this stream + * @dt : data type of this stream + * @skip_pattern : skip pattern for this stream + * @rotate_period : rotate period for this stream + * @reserved : reserved for future use + */ +struct tpg_old_stream_config_t { + struct tpg_command_header_t header; + enum tpg_pattern_t pattern_type; + enum tpg_color_bar_mode_t cb_mode; + uint32_t frame_count; + enum tpg_stream_t stream_type; + struct stream_dimension stream_dimension; + uint8_t pixel_depth; + enum tpg_cfa_arrangement_t cfa_arrangement; + enum tpg_image_format_t output_format; + uint32_t hbi; + uint32_t vbi; + uint16_t vc; + uint16_t dt; + uint32_t skip_pattern; + uint32_t rotate_period; + uint32_t reserved[4]; +} __attribute__((packed)); + +/** + * tpg_stream_config_t : stream configuration command + * + * @header: common tpg command header + * @pattern_type : tpg pattern type used in this stream + * @cb_mode : tpg color bar mode used in this stream + * @frame_count : frame count in case of trigger burst mode + * @stream_type : type of stream like image pdaf etc + * @stream_dimension : Dimension of the stream + * @pixel_depth : bits per each pixel + * @cfa_arrangement : color filter arragement + * @output_format : output image format + * @hbi : horizontal blanking intervel + * @vbi : vertical blanking intervel + * @vc : virtual channel of this stream + * @dt : data type of this stream + * @skip_pattern : skip pattern for this stream + * @rotate_period : rotate period for this stream + * @xcfa_debug : for xcfa debug; + * @shdr_line_offset0 : for shdr line offset0 + * @shdr_line_offset1 : for shdr line offset1 + * @reserved : reserved for future use + */ +struct tpg_stream_config_t { + struct tpg_command_header_t header; + enum tpg_pattern_t pattern_type; + enum tpg_color_bar_mode_t cb_mode; + uint32_t frame_count; + enum tpg_stream_t stream_type; + struct stream_dimension stream_dimension; + uint8_t pixel_depth; + enum tpg_cfa_arrangement_t cfa_arrangement; + enum tpg_image_format_t output_format; + uint32_t hbi; + uint32_t vbi; + uint16_t vc; + uint16_t dt; + uint32_t skip_pattern; + uint32_t rotate_period; + uint32_t xcfa_debug; + uint32_t shdr_line_offset0; + uint32_t shdr_line_offset1; + uint32_t reserved[4]; +} __attribute__((packed)); + +/** + * tpg_stream_config_t : stream configuration command + * + * @header: common tpg command header + * @pattern_type : tpg pattern type used in this stream + * @cb_mode : tpg color bar mode used in this stream + * @frame_count : frame count in case of trigger burst mode + * @stream_type : type of stream like image pdaf etc + * @stream_dimension : Dimension of the stream + * @pixel_depth : bits per each pixel + * @cfa_arrangement : color filter arragement + * @output_format : output image format + * @hbi : horizontal blanking intervel + * @vbi : vertical blanking intervel + * @vc : virtual channel of this stream + * @dt : data type of this stream + * @skip_pattern : skip pattern for this stream + * @rotate_period : rotate period for this stream + * @shdr_line_offset0 : for shdr line offset0 + * @shdr_line_offset1 : for shdr line offset1 + * @cfa_info_exist : cfa info exists + * @cfa_info : cfa information + * @xcfa_type : xcfa type + * @reserved : reserved for future use + */ +struct tpg_stream_config_v3_t { + struct tpg_command_header_t header; + uint32_t pattern_type; + uint32_t cb_mode; + uint32_t frame_count; + uint32_t stream_type; + struct stream_dimension stream_dimension; + uint32_t pixel_depth; + uint32_t cfa_arrangement; + uint32_t output_format; + uint32_t hbi; + uint32_t vbi; + uint16_t vc; + uint16_t dt; + uint32_t skip_pattern; + uint32_t rotate_period; + uint32_t xcfa_debug; + uint32_t shdr_line_offset0; + uint32_t shdr_line_offset1; + uint32_t cfa_info_exist; + struct tpg_cfa_information_t cfa_info; + uint32_t xcfa_type; + uint32_t reserved[5]; +} __attribute__((packed)); + +/** + * tpg_illumination_control : illumianation control command + * + * @header : common header for tpg command + * @vc : virtual channel to identify the stream + * @dt : dt to identify the stream + * @exposure_short : short exposure time + * @exposure_mid : mid exposure time + * @exposure_long : long exposure time + * @r_gain : r channel gain + * @g_gain : g channel gain + * @b_gain : b channel gain + * @reserved : reserved for future use + */ +struct tpg_illumination_control { + struct tpg_command_header_t header; + uint16_t vc; + uint16_t dt; + uint32_t exposure_short; + uint32_t exposure_mid; + uint32_t exposure_long; + uint16_t r_gain; + uint16_t g_gain; + uint16_t b_gain; + uint32_t reserved[4]; +} __attribute__((packed)); + +/** + * struct cam_flash_init : Init command for the flash + * @flash_type : flash hw type + * @reserved + * @cmd_type : command buffer type + */ +struct cam_flash_init { + __u32 flash_type; + __u8 reserved; + __u8 cmd_type; + __u16 reserved1; +} __attribute__((packed)); + +/** + * struct cam_flash_set_rer : RedEyeReduction command buffer + * + * @count : Number of flash leds + * @opcode : Command buffer opcode + * CAM_FLASH_FIRE_RER + * @cmd_type : command buffer operation type + * @num_iteration : Number of led turn on/off sequence + * @reserved + * @led_on_delay_ms : flash led turn on time in ms + * @led_off_delay_ms : flash led turn off time in ms + * @led_current_ma : flash led current in ma + * + */ +struct cam_flash_set_rer { + __u32 count; + __u8 opcode; + __u8 cmd_type; + __u16 num_iteration; + __u32 led_on_delay_ms; + __u32 led_off_delay_ms; + __u32 led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS]; +} __attribute__((packed)); + +/** + * struct cam_flash_set_on_off : led turn on/off command buffer + * + * @count : Number of Flash leds + * @opcode : Command buffer opcodes + * CAM_FLASH_FIRE_LOW + * CAM_FLASH_FIRE_HIGH + * CAM_FLASH_OFF + * @cmd_type : Command buffer operation type + * @led_current_ma : Flash led current in ma + * @time_on_duration_ns : Flash time on duration in ns + * @led_on_wait_time_ns : Flash led turn on wait time in ns + * + */ +struct cam_flash_set_on_off { + __u32 count; + __u8 opcode; + __u8 cmd_type; + __u16 reserved; + __u32 led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS]; + __u64 time_on_duration_ns; + __u64 led_on_wait_time_ns; +} __attribute__((packed)); + +/** + * struct cam_flash_query_curr : query current command buffer + * + * @reserved + * @opcode : command buffer opcode + * @cmd_type : command buffer operation type + * @query_current_ma : battery current in ma + * + */ +struct cam_flash_query_curr { + __u16 reserved; + __u8 opcode; + __u8 cmd_type; + __u32 query_current_ma; +} __attribute__ ((packed)); + +/** + * struct cam_flash_query_cap : capabilities info for flash + * + * @slot_info : Indicates about the slotId or cell Index + * @max_current_flash : max supported current for flash + * @max_duration_flash : max flash turn on duration + * @max_current_torch : max supported current for torch + * + */ +struct cam_flash_query_cap_info { + __u32 slot_info; + __u32 max_current_flash[CAM_FLASH_MAX_LED_TRIGGERS]; + __u32 max_duration_flash[CAM_FLASH_MAX_LED_TRIGGERS]; + __u32 max_current_torch[CAM_FLASH_MAX_LED_TRIGGERS]; +} __attribute__ ((packed)); + +#endif diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sync.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sync.h new file mode 100644 index 0000000000..2c69dc314d --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_sync.h @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_CAM_SYNC_H__ +#define __UAPI_CAM_SYNC_H__ + +#include +#include +#include +#include + +#define CAM_SYNC_DEVICE_NAME "cam_sync_device" + +/* V4L event which user space will subscribe to */ +#define CAM_SYNC_V4L_EVENT (V4L2_EVENT_PRIVATE_START + 0) +#define CAM_SYNC_V4L_EVENT_V2 (V4L2_EVENT_PRIVATE_START + 1) + +/* Specific event ids to get notified in user space */ +#define CAM_SYNC_V4L_EVENT_ID_CB_TRIG 0 + +/* Size of opaque payload sent to kernel for safekeeping until signal time */ +#define CAM_SYNC_USER_PAYLOAD_SIZE 2 + +/* Device type for sync device needed for device discovery */ +#define CAM_SYNC_DEVICE_TYPE (MEDIA_ENT_F_OLD_BASE) + +#define CAM_SYNC_GET_PAYLOAD_PTR(ev, type) \ + (type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header)) + +#define CAM_SYNC_GET_HEADER_PTR(ev) \ + ((struct cam_sync_ev_header *)ev.u.data) + +#define CAM_SYNC_GET_PAYLOAD_PTR_V2(ev, type) \ + (type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header_v2)) + +#define CAM_SYNC_GET_HEADER_PTR_V2(ev) \ + ((struct cam_sync_ev_header_v2 *)ev.u.data) + +#define CAM_SYNC_STATE_INVALID 0 +#define CAM_SYNC_STATE_ACTIVE 1 +#define CAM_SYNC_STATE_SIGNALED_SUCCESS 2 +#define CAM_SYNC_STATE_SIGNALED_ERROR 3 +#define CAM_SYNC_STATE_SIGNALED_CANCEL 4 + +/* Top level common sync event reason types */ +#define CAM_SYNC_COMMON_EVENT_START 0 +#define CAM_SYNC_COMMON_EVENT_UNUSED (CAM_SYNC_COMMON_EVENT_START + 0) +#define CAM_SYNC_COMMON_EVENT_SUCCESS (CAM_SYNC_COMMON_EVENT_START + 1) +#define CAM_SYNC_COMMON_EVENT_FLUSH (CAM_SYNC_COMMON_EVENT_START + 2) +#define CAM_SYNC_COMMON_EVENT_STOP (CAM_SYNC_COMMON_EVENT_START + 3) +#define CAM_SYNC_COMMON_EVENT_SYNX (CAM_SYNC_COMMON_EVENT_START + 4) +#define CAM_SYNC_COMMON_REG_PAYLOAD_EVENT (CAM_SYNC_COMMON_EVENT_START + 5) +#define CAM_SYNC_COMMON_SYNC_SIGNAL_EVENT (CAM_SYNC_COMMON_EVENT_START + 6) +#define CAM_SYNC_COMMON_RELEASE_EVENT (CAM_SYNC_COMMON_EVENT_START + 7) +#define CAM_SYNC_COMMON_EVENT_END (CAM_SYNC_COMMON_EVENT_START + 50) + +/* ISP Sync event reason types */ +#define CAM_SYNC_ISP_EVENT_START (CAM_SYNC_COMMON_EVENT_END + 1) +#define CAM_SYNC_ISP_EVENT_UNKNOWN (CAM_SYNC_ISP_EVENT_START + 0) +#define CAM_SYNC_ISP_EVENT_BUBBLE (CAM_SYNC_ISP_EVENT_START + 1) +#define CAM_SYNC_ISP_EVENT_OVERFLOW (CAM_SYNC_ISP_EVENT_START + 2) +#define CAM_SYNC_ISP_EVENT_P2I_ERROR (CAM_SYNC_ISP_EVENT_START + 3) +#define CAM_SYNC_ISP_EVENT_VIOLATION (CAM_SYNC_ISP_EVENT_START + 4) +#define CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW (CAM_SYNC_ISP_EVENT_START + 5) +#define CAM_SYNC_ISP_EVENT_FLUSH (CAM_SYNC_ISP_EVENT_START + 6) +#define CAM_SYNC_ISP_EVENT_HW_STOP (CAM_SYNC_ISP_EVENT_START + 7) +#define CAM_SYNC_ISP_EVENT_RECOVERY_OVERFLOW (CAM_SYNC_ISP_EVENT_START + 8) +#define CAM_SYNC_ISP_EVENT_CSID_OUTPUT_FIFO_OVERFLOW (CAM_SYNC_ISP_EVENT_START + 9) +#define CAM_SYNC_ISP_EVENT_CSID_RX_ERROR (CAM_SYNC_ISP_EVENT_START + 10) +#define CAM_SYNC_ISP_EVENT_CSID_SENSOR_SWITCH_ERROR (CAM_SYNC_ISP_EVENT_START + 11) +#define CAM_SYNC_ISP_EVENT_END (CAM_SYNC_ISP_EVENT_START + 50) + +/* ICP Sync event reason types */ +#define CAM_SYNC_ICP_EVENT_START (CAM_SYNC_ISP_EVENT_END + 1) +#define CAM_SYNC_ICP_EVENT_UNKNOWN (CAM_SYNC_ICP_EVENT_START + 0) +#define CAM_SYNC_ICP_EVENT_FRAME_PROCESS_FAILURE (CAM_SYNC_ICP_EVENT_START + 1) +#define CAM_SYNC_ICP_EVENT_CONFIG_ERR (CAM_SYNC_ICP_EVENT_START + 2) +#define CAM_SYNC_ICP_EVENT_NO_MEMORY (CAM_SYNC_ICP_EVENT_START + 3) +#define CAM_SYNC_ICP_EVENT_BAD_STATE (CAM_SYNC_ICP_EVENT_START + 4) +#define CAM_SYNC_ICP_EVENT_BAD_PARAM (CAM_SYNC_ICP_EVENT_START + 5) +#define CAM_SYNC_ICP_EVENT_BAD_ITEM (CAM_SYNC_ICP_EVENT_START + 6) +#define CAM_SYNC_ICP_EVENT_INVALID_FORMAT (CAM_SYNC_ICP_EVENT_START + 7) +#define CAM_SYNC_ICP_EVENT_UNSUPPORTED (CAM_SYNC_ICP_EVENT_START + 8) +#define CAM_SYNC_ICP_EVENT_OUT_OF_BOUND (CAM_SYNC_ICP_EVENT_START + 9) +#define CAM_SYNC_ICP_EVENT_TIME_OUT (CAM_SYNC_ICP_EVENT_START + 10) +#define CAM_SYNC_ICP_EVENT_ABORTED (CAM_SYNC_ICP_EVENT_START + 11) +#define CAM_SYNC_ICP_EVENT_HW_VIOLATION (CAM_SYNC_ICP_EVENT_START + 12) +#define CAM_SYNC_ICP_EVENT_CMD_ERROR (CAM_SYNC_ICP_EVENT_START + 13) +#define CAM_SYNC_ICP_EVENT_HFI_ERR_COMMAND_SIZE (CAM_SYNC_ICP_EVENT_START + 14) +#define CAM_SYNC_ICP_EVENT_HFI_ERR_MESSAGE_SIZE (CAM_SYNC_ICP_EVENT_START + 15) +#define CAM_SYNC_ICP_EVENT_HFI_ERR_QUEUE_EMPTY (CAM_SYNC_ICP_EVENT_START + 16) +#define CAM_SYNC_ICP_EVENT_HFI_ERR_QUEUE_FULL (CAM_SYNC_ICP_EVENT_START + 17) +#define CAM_SYNC_ICP_EVENT_END (CAM_SYNC_ICP_EVENT_START + 50) + +/* JPEG Sync event reason types */ +#define CAM_SYNC_JPEG_EVENT_START (CAM_SYNC_ICP_EVENT_END + 1) +#define CAM_SYNC_JPEG_EVENT_UNKNOWN (CAM_SYNC_JPEG_EVENT_START + 0) +#define CAM_SYNC_JPEG_EVENT_INVLD_CMD (CAM_SYNC_JPEG_EVENT_START + 1) +#define CAM_SYNC_JPEG_EVENT_SET_IRQ_CB (CAM_SYNC_JPEG_EVENT_START + 2) +#define CAM_SYNC_JPEG_EVENT_HW_RESET_FAILED (CAM_SYNC_JPEG_EVENT_START + 3) +#define CAM_SYNC_JPEG_EVENT_CDM_CHANGE_BASE_ERR (CAM_SYNC_JPEG_EVENT_START + 4) +#define CAM_SYNC_JPEG_EVENT_CDM_CONFIG_ERR (CAM_SYNC_JPEG_EVENT_START + 5) +#define CAM_SYNC_JPEG_EVENT_START_HW_ERR (CAM_SYNC_JPEG_EVENT_START + 6) +#define CAM_SYNC_JPEG_EVENT_START_HW_HANG (CAM_SYNC_JPEG_EVENT_START + 7) +#define CAM_SYNC_JPEG_EVENT_MISR_CONFIG_ERR (CAM_SYNC_JPEG_EVENT_START + 8) +#define CAM_SYNC_JPEG_EVENT_END (CAM_SYNC_JPEG_EVENT_START + 50) + +/* FD Sync event reason types */ +#define CAM_SYNC_FD_EVENT_START (CAM_SYNC_JPEG_EVENT_END + 1) +#define CAM_SYNC_FD_EVENT_UNKNOWN (CAM_SYNC_FD_EVENT_START + 0) +#define CAM_SYNC_FD_EVENT_IRQ_FRAME_DONE (CAM_SYNC_FD_EVENT_START + 1) +#define CAM_SYNC_FD_EVENT_IRQ_RESET_DONE (CAM_SYNC_FD_EVENT_START + 2) +#define CAM_SYNC_FD_EVENT_HALT (CAM_SYNC_FD_EVENT_START + 3) +#define CAM_SYNC_FD_EVENT_END (CAM_SYNC_FD_EVENT_START + 50) + +/* LRME Sync event reason types */ +#define CAM_SYNC_LRME_EVENT_START (CAM_SYNC_FD_EVENT_END + 1) +#define CAM_SYNC_LRME_EVENT_UNKNOWN (CAM_SYNC_LRME_EVENT_START + 0) +#define CAM_SYNC_LRME_EVENT_CB_ERROR (CAM_SYNC_LRME_EVENT_START + 1) +#define CAM_SYNC_LRME_EVENT_END (CAM_SYNC_LRME_EVENT_START + 50) + +/* OPE Sync event reason types */ +#define CAM_SYNC_OPE_EVENT_START (CAM_SYNC_LRME_EVENT_END + 1) +#define CAM_SYNC_OPE_EVENT_UNKNOWN (CAM_SYNC_OPE_EVENT_START + 0) +#define CAM_SYNC_OPE_EVENT_PAGE_FAULT (CAM_SYNC_OPE_EVENT_START + 1) +#define CAM_SYNC_OPE_EVENT_HW_HANG (CAM_SYNC_OPE_EVENT_START + 2) +#define CAM_SYNC_OPE_EVENT_HALT (CAM_SYNC_OPE_EVENT_START + 3) +#define CAM_SYNC_OPE_EVENT_CONFIG_ERR (CAM_SYNC_OPE_EVENT_START + 4) +#define CAM_SYNC_OPE_EVENT_HW_FLUSH (CAM_SYNC_OPE_EVENT_START + 5) +#define CAM_SYNC_OPE_EVENT_HW_RESUBMIT (CAM_SYNC_OPE_EVENT_START + 6) +#define CAM_SYNC_OPE_EVENT_HW_RESET_DONE (CAM_SYNC_OPE_EVENT_START + 7) +#define CAM_SYNC_OPE_EVENT_HW_ERROR (CAM_SYNC_OPE_EVENT_START + 8) +#define CAM_SYNC_OPE_EVENT_INVLD_CMD (CAM_SYNC_OPE_EVENT_START + 9) +#define CAM_SYNC_OPE_EVENT_HW_RESET_FAILED (CAM_SYNC_OPE_EVENT_START + 10) +#define CAM_SYNC_OPE_EVENT_END (CAM_SYNC_OPE_EVENT_START + 50) + +/* CRE Sync event reason types */ +#define CAM_SYNC_CRE_EVENT_START (CAM_SYNC_OPE_EVENT_END + 1) +#define CAM_SYNC_CRE_EVENT_UNKNOWN (CAM_SYNC_CRE_EVENT_START + 0) +#define CAM_SYNC_CRE_EVENT_CONFIG_ERR (CAM_SYNC_CRE_EVENT_START + 1) +#define CAM_SYNC_CRE_EVENT_INVLD_CMD (CAM_SYNC_CRE_EVENT_START + 2) +#define CAM_SYNC_CRE_EVENT_SET_IRQ_CB (CAM_SYNC_CRE_EVENT_START + 3) +#define CAM_SYNC_CRE_EVENT_HW_RESET_FAILED (CAM_SYNC_CRE_EVENT_START + 4) +#define CAM_SYNC_CRE_EVENT_HW_ERR (CAM_SYNC_CRE_EVENT_START + 5) +#define CAM_SYNC_CRE_EVENT_END (CAM_SYNC_CRE_EVENT_START + 50) + +#define CAM_SYNC_EVENT_MAX 8 +#define CAM_SYNC_EVENT_REASON_CODE_INDEX 0 + +/* Fence types supported by the driver, when used in the fence mask it + * represents bit position, for masks it needs to be set as + * BIT(fence_type) on other occasions use as is. + */ +#define CAM_GENERIC_FENCE_TYPE_SYNC_OBJ 0x1 +#define CAM_GENERIC_FENCE_TYPE_DMA_FENCE 0x2 +#define CAM_GENERIC_FENCE_TYPE_SYNX_OBJ 0x3 + +/* Additional param index for cam_generic_fence_config */ +#define CAM_GENERIC_FENCE_CONFIG_FLAG_PARAM_INDEX BIT(0) + +/* Flag fields for cam_generic_fence_config */ +#define CAM_GENERIC_FENCE_FLAG_IS_GLOBAL_SYNX_OBJ BIT(0) + +/** + * struct cam_sync_ev_header - Event header for sync event notification + * + * @sync_obj: Sync object + * @status: Status of the object + */ +struct cam_sync_ev_header { + __s32 sync_obj; + __s32 status; +}; + +/** + * struct cam_sync_ev_header_v2 - Event header for sync event notification + * + * @sync_obj: Sync object + * @status: Status of the object + * @version: sync driver version + * @evt_param: event parameter + */ +struct cam_sync_ev_header_v2 { + __s32 sync_obj; + __s32 status; + uint32_t version; + uint32_t evt_param[CAM_SYNC_EVENT_MAX]; +}; + +/** + * struct cam_sync_info - Sync object creation information + * + * @name: Optional string representation of the sync object + * @sync_obj: Sync object returned after creation in kernel + */ +struct cam_sync_info { + char name[64]; + __s32 sync_obj; +}; + +/** + * struct cam_sync_signal - Sync object signaling struct + * + * @sync_obj: Sync object to be signaled + * @sync_state: State of the sync object to which it should be signaled + */ +struct cam_sync_signal { + __s32 sync_obj; + __u32 sync_state; +}; + +/** + * struct cam_sync_merge - Merge information for sync objects + * + * @sync_objs: Pointer to sync objects + * @num_objs: Number of objects in the array + * @merged: Merged sync object + */ +struct cam_sync_merge { + __u64 sync_objs; + __u32 num_objs; + __s32 merged; +}; + +/** + * struct cam_sync_userpayload_info - Payload info from user space + * + * @sync_obj: Sync object for which payload has to be registered for + * @reserved: Reserved + * @payload: Pointer to user payload + */ +struct cam_sync_userpayload_info { + __s32 sync_obj; + __u32 reserved; + __u64 payload[CAM_SYNC_USER_PAYLOAD_SIZE]; +}; + +/** + * struct cam_sync_wait - Sync object wait information + * + * @sync_obj: Sync object to wait on + * @reserved: Reserved + * @timeout_ms: Timeout in milliseconds + */ +struct cam_sync_wait { + __s32 sync_obj; + __u32 reserved; + uint64_t timeout_ms; +}; + +/** + * struct cam_generic_fence_config - Fence config + * Based on the operation, fields could be + * input/output + * + * @version: Struct version + * @name: Optional string representation [used for create/import] + * @fence_sel_mask: Fence select mask, if set for fence types other than the type + * this input is processed for, the corresponding types would be + * processed as well. For example if one wants to import a sync + * object for an existing dma fence, set bit with index + * CAM_GENERIC_FENCE_TYPE_SYNC_OBJ in mask, + * a new sync object would be returned in sync_obj linked to an + * existing dma_fence_fd. + * @sync_obj: Sync object + * @dma_fence_fd: DMA fence fd + * @synx_obj: Synx object + * @reason_code: Indicates if the operation was successful or not + * for this set of fences. It is the responsibility of + * the caller to clean up any partially processed batched + * fences + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_generic_fence_config { + __u32 version; + char name[64]; + __u32 fence_sel_mask; + __s32 sync_obj; + __s32 dma_fence_fd; + __s32 synx_obj; + __s32 reason_code; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[4]; +}; + +/** + * struct cam_generic_fence_signal_info - Fence signaling info + * + * @version: Struct version + * @num_fences_requested: Number of fences to process + * @num_fences_processed: Number of fences processed by the driver + * @fence_handle_type: Type of the fence signal handle [user handle is expected] + * @fence_data_size: Size of the data pointed to by fence_info_hdl, + * it should be the consolidated size of + * total fences to process * size of the corresponding signaling + * input structure + * @fence_info_hdl: Handle to the fence signal data (synx/dma) + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_generic_fence_signal_info { + __u32 version; + __u32 num_fences_requested; + __u32 num_fences_processed; + __u32 fence_handle_type; + __u32 fence_data_size; + __u64 fence_info_hdl; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[3]; +}; + +/** + * struct cam_dma_fence_signal - DMA fence signaling info + * + * @version: Struct version + * @dma_fence_fd: DMA fence to be signaled + * @status: Any status if applicable, 0 for success + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_dma_fence_signal { + __u32 version; + __s32 dma_fence_fd; + __s32 status; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[3]; +}; + +/** + * struct cam_synx_obj_signal - Synx object signaling info + * + * @version: Struct version + * @synx_obj: Synx object to be signaled + * @status: Fence status + * @reason_code: Indicates if the operation was successful or not + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_synx_obj_signal { + __u32 version; + __u32 synx_obj; + __u32 status; + __s32 reason_code; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[4]; +}; + +/** + * struct cam_generic_fence_input_info - Parent structure that + * provides info on fence batching + * + * @version: Struct version + * @num_fences_requested: Number of fences to process + * @num_fences_processed: Number of fences processed + * If userland requests 5 fences to be created + * and it fails on the 3rd, num processed will be + * 3. This can be used by userspace to clean + * partially batched fences + * @num_valid_params: Valid number of params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + * @fence_info: Variable length fence info input based on num_fences + */ +struct cam_generic_fence_input_info { + __u32 version; + __u32 num_fences_requested; + __u32 num_fences_processed; + __u32 num_valid_params; + __u32 valid_param_mask; + __s32 params[3]; + union { + struct cam_generic_fence_config fence_cfg[1]; + __DECLARE_FLEX_ARRAY(struct cam_generic_fence_config, fence_cfg_flex); + }; +}; + +/** + * struct cam_generic_fence_cmd_args - Generic fence cmd args + * + * @version: Struct version + * @fence_type: Type of fence the ioctl cmd is for [dma/sync/synx] + * @input_handle_type: Type of the fence input handle [user handle is expected] + * @input_data_size: Size of the data pointed to by input_handle + * @input_handle: Handle to the fence input data [create/signal/import...] + * corresponding to the fence_type + * @num_valid_params: Valid number of reserved params being used + * @valid_param_mask: Mask to indicate the field types in params + * @params: Additional params + */ +struct cam_generic_fence_cmd_args { + __u32 version; + __u32 fence_type; + __u32 input_handle_type; + __u32 input_data_size; + __u64 input_handle; + __u32 num_valid_params; + __u32 valid_param_mask; + __u32 params[6]; +}; + +/** + * struct cam_private_ioctl_arg - Sync driver ioctl argument + * + * @id: IOCTL command id + * @size: Size of command payload + * @result: Result of command execution + * @reserved: Reserved + * @ioctl_ptr: Pointer to user data + */ +struct cam_private_ioctl_arg { + __u32 id; + __u32 size; + __u32 result; + __u32 reserved; + __u64 ioctl_ptr; +}; + +#define CAM_PRIVATE_IOCTL_CMD \ + _IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_private_ioctl_arg) + +/* Exclusive sync object IOCTL cmds */ +#define CAM_SYNC_CREATE 0 +#define CAM_SYNC_DESTROY 1 +#define CAM_SYNC_SIGNAL 2 +#define CAM_SYNC_MERGE 3 +#define CAM_SYNC_REGISTER_PAYLOAD 4 +#define CAM_SYNC_DEREGISTER_PAYLOAD 5 +#define CAM_SYNC_WAIT 6 + +/* Generic fence [sync/dma/synx] IOCTL cmds */ +#define CAM_GENERIC_FENCE_CREATE 11 +#define CAM_GENERIC_FENCE_RELEASE 12 +#define CAM_GENERIC_FENCE_IMPORT 13 +#define CAM_GENERIC_FENCE_SIGNAL 14 + +#endif /* __UAPI_CAM_SYNC_H__ */ diff --git a/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_tfe.h b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_tfe.h new file mode 100644 index 0000000000..60e7c8573f --- /dev/null +++ b/qcom/opensource/camera-kernel/include/uapi/camera/media/cam_tfe.h @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_CAM_TFE_H__ +#define __UAPI_CAM_TFE_H__ + +#include +#include +#include + + +/* ISP TFE driver name */ +#define CAM_ISP_TFE_DEV_NAME "cam-isp" + +/* HW type */ +#define CAM_ISP_TFE_HW_BASE 0 +#define CAM_ISP_TFE_HW_CSID 1 +#define CAM_ISP_TFE_HW_TFE 2 +#define CAM_ISP_TFE_HW_MAX 3 + +/* Color Pattern */ +#define CAM_ISP_TFE_PATTERN_BAYER_RGRGRG 0 +#define CAM_ISP_TFE_PATTERN_BAYER_GRGRGR 1 +#define CAM_ISP_TFE_PATTERN_BAYER_BGBGBG 2 +#define CAM_ISP_TFE_PATTERN_BAYER_GBGBGB 3 +#define CAM_ISP_TFE_PATTERN_YUV_YCBYCR 4 +#define CAM_ISP_TFE_PATTERN_YUV_YCRYCB 5 +#define CAM_ISP_TFE_PATTERN_YUV_CBYCRY 6 +#define CAM_ISP_TFE_PATTERN_YUV_CRYCBY 7 +#define CAM_ISP_TFE_PATTERN_MAX 8 + +/* Usage Type */ +#define CAM_ISP_TFE_IN_RES_USAGE_SINGLE 0 +#define CAM_ISP_TFE_IN_RES_USAGE_DUAL 1 +#define CAM_ISP_TFE_IN_RES_USAGE_MAX 2 + +/* Resource ID */ +#define CAM_ISP_TFE_RES_ID_PORT 0 +#define CAM_ISP_TFE_RES_ID_MAX 1 + +/* Resource Type - Type of resource for the resource id + * defined in cam_isp_tfe.h + */ + +/* Lane Type in input resource for Port */ +#define CAM_ISP_TFE_IN_LANE_TYPE_DPHY 0 +#define CAM_ISP_TFE_IN_LANE_TYPE_CPHY 1 +#define CAM_ISP_TFE_IN_LANE_TYPE_MAX 2 + +/* ISP TFE packet opcode */ +#define CAM_ISP_TFE_PACKET_OP_BASE 0 +#define CAM_ISP_TFE_PACKET_INIT_DEV 1 +#define CAM_ISP_TFE_PACKET_CONFIG_DEV 2 +#define CAM_ISP_TFE_PACKET_OP_MAX 3 + +/* ISP TFE packet meta_data type for command buffer */ +#define CAM_ISP_TFE_PACKET_META_BASE 0 +#define CAM_ISP_TFE_PACKET_META_LEFT 1 +#define CAM_ISP_TFE_PACKET_META_RIGHT 2 +#define CAM_ISP_TFE_PACKET_META_COMMON 3 +#define CAM_ISP_TFE_PACKET_META_DUAL_CONFIG 4 +#define CAM_ISP_TFE_PACKET_META_GENERIC_BLOB_COMMON 5 +#define CAM_ISP_TFE_PACKET_META_REG_DUMP_PER_REQUEST 6 +#define CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_FLUSH 7 +#define CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR 8 + +/* ISP TFE Generic Cmd Buffer Blob types */ +#define CAM_ISP_TFE_GENERIC_BLOB_TYPE_HFR_CONFIG 0 +#define CAM_ISP_TFE_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1 +#define CAM_ISP_TFE_GENERIC_BLOB_TYPE_BW_CONFIG_V2 2 +#define CAM_ISP_TFE_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG 3 +#define CAM_ISP_TFE_GENERIC_BLOB_TYPE_BW_LIMITER_CFG 16 + +/* DSP mode */ +#define CAM_ISP_TFE_DSP_MODE_NONE 0 +#define CAM_ISP_TFE_DSP_MODE_ONE_WAY 1 +#define CAM_ISP_TFE_DSP_MODE_ROUND 2 + +/* Per Path Usage Data */ +#define CAM_ISP_TFE_USAGE_INVALID 0 +#define CAM_ISP_TFE_USAGE_LEFT_PX 1 +#define CAM_ISP_TFE_USAGE_RIGHT_PX 2 +#define CAM_ISP_TFE_USAGE_RDI 3 + +/* Bus write master modes */ +#define CAM_ISP_TFE_WM_FRAME_BASED_MODE 0 +#define CAM_ISP_TFE_WM_LINE_BASED_MODE 1 +#define CAM_ISP_TFE_WM_INDEX_BASED_MODE 2 + +#define CAM_ISP_TFE_VC_DT_CFG 2 + +/* Feature Flag indicators */ +#define CAM_ISP_TFE_FLAG_QCFA_BIN BIT(0) +#define CAM_ISP_TFE_FLAG_BAYER_BIN BIT(1) + +/* Query devices */ +/** + * struct cam_isp_tfe_dev_cap_info - A cap info for particular hw type + * + * @hw_type: Hardware type for the cap info + * @reserved: reserved field for alignment + * @hw_version: Hardware version + * + */ +struct cam_isp_tfe_dev_cap_info { + __u32 hw_type; + __u32 reserved; + struct cam_hw_version hw_version; +}; + +/** + * struct cam_isp_tfe_query_cap_cmd - ISP TFE query device + * capability payload + * + * @device_iommu: returned iommu handles for device + * @cdm_iommu: returned iommu handles for cdm + * @num_dev: returned number of device capabilities + * @reserved: reserved field for alignment + * @dev_caps: returned device capability array + * + */ +struct cam_isp_tfe_query_cap_cmd { + struct cam_iommu_handle device_iommu; + struct cam_iommu_handle cdm_iommu; + __s32 num_dev; + __u32 reserved; + struct cam_isp_tfe_dev_cap_info dev_caps[CAM_ISP_TFE_HW_MAX]; +}; + +/* Acquire Device */ +/** + * struct cam_isp_tfe_out_port_info - An output port resource info + * + * @res_id: output resource id defined in file + * cam_isp_tfe.h + * @format: output format of the resource + * @width: output width in pixels + * @height: output height in lines + * @stride: output stride + * @comp_grp_id: composite group id for the resource. + * @secure_mode: flag to tell if output should be run in secure + * mode or not. See cam_defs.h for definition + * @wm_mode: wm mode + * @reserved: reserved field for alignment + * + */ +struct cam_isp_tfe_out_port_info { + __u32 res_id; + __u32 format; + __u32 width; + __u32 height; + __u32 stride; + __u32 comp_grp_id; + __u32 secure_mode; + __u32 wm_mode; + __u32 reserved; +}; + +/** + * struct cam_isp_tfe_in_port_info - An input port resource info + * + * @res_id: input resource id CAM_ISP_TFE_IN_RES_XXX + * @lane_type: lane type: c-phy or d-phy. + * @lane_num: active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * @vc: input virtual channel number + * @dt: input data type number + * @format: input format + * @pix_pattern: pixel pattern + * @usage_type: whether dual tfe is required + * @left_start: left input start offset in pixels + * @left_end: left input stop offset in pixels + * @left_width: left input width in pixels + * @right_start: right input start offset in pixels. + * Only for Dual TFE + * @right_end: right input stop offset in + * pixels. Only for Dual TFE + * @right_width: right input width in pixels. + * Only for dual TFE + * @line_start: top of the line number + * @line_stop: bottome of the line number + * @height: input height in lines + * @batch_size: batch size for HFR mode + * @dsp_mode: DSP stream mode(Defines as + * CAM_ISP_TFE_DSP_MODE_*) + * @sensor_width: sensor width + * @sensor_height: sensor height + * @hbi_value: sensor HBI value + * @vbi_value: sensor VBI value + * @sensor_fps: sensor fps + * @init_frame_drop init frame drop value. + * @num_out_res: number of the output resource associated + * @data: payload that contains the output resources, + * array of cam_isp_tfe_out_port_info data + * + */ +struct cam_isp_tfe_in_port_info { + __u32 res_id; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; + __u32 vc; + __u32 dt; + __u32 format; + __u32 pix_pattern; + __u32 usage_type; + __u32 left_start; + __u32 left_end; + __u32 left_width; + __u32 right_start; + __u32 right_end; + __u32 right_width; + __u32 line_start; + __u32 line_end; + __u32 height; + __u32 batch_size; + __u32 dsp_mode; + __u32 sensor_width; + __u32 sensor_height; + __u32 sensor_hbi; + __u32 sensor_vbi; + __u32 sensor_fps; + __u32 init_frame_drop; + __u32 num_out_res; + union { + struct cam_isp_tfe_out_port_info data[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_tfe_out_port_info, data_flex); + }; +}; + +/** + * struct cam_isp_tfe_in_port_info_v2 - An input port resource info + * + * @res_id: input resource id CAM_ISP_TFE_IN_RES_XXX + * @lane_type: lane type: c-phy or d-phy. + * @lane_num: active lane number + * @lane_cfg: lane configurations: 4 bits per lane + * @vc: input virtual channel number + * @dt: input data type number + * @format: input format + * @pix_pattern: pixel pattern + * @usage_type: whether dual tfe is required + * @left_start: left input start offset in pixels + * @left_end: left input stop offset in pixels + * @left_width: left input width in pixels + * @right_start: right input start offset in pixels. + * Only for Dual TFE + * @right_end: right input stop offset in + * pixels. Only for Dual TFE + * @right_width: right input width in pixels. + * Only for dual TFE + * @line_start: top of the line number + * @line_stop: bottome of the line number + * @height: input height in lines + * @batch_size: batch size for HFR mode + * @dsp_mode: DSP stream mode(Defines as + * CAM_ISP_TFE_DSP_MODE_*) + * @sensor_width: sensor width + * @sensor_height: sensor height + * @sensor_hbi: sensor HBI value + * @sensor_vbi: sensor VBI value + * @sensor_fps: sensor fps + * @init_frame_drop init frame drop value. + * @num_out_res: number of the output resource associated + * @feature_flag: Feature flags for indicating QCFA, Bayer bin + * @core_cfg: Core configuration + * @reserve_field_1: Reserve field 1 + * @reserve_field_2: Reserve field 2 + * @reserve_field_3: Reserve field 3 + * @reserve_field_4: Reserve field 4 + * @reserve_field_5: Reserve field 5 + * @reserve_field_6: Reserve filed 6 + * @data: payload that contains the output resources, + * array of cam_isp_tfe_out_port_info data + * + */ +struct cam_isp_tfe_in_port_info_v2 { + __u32 res_id; + __u32 lane_type; + __u32 lane_num; + __u32 lane_cfg; + __u32 vc[CAM_ISP_TFE_VC_DT_CFG]; + __u32 dt[CAM_ISP_TFE_VC_DT_CFG]; + __u32 num_valid_vc_dt; + __u32 format; + __u32 pix_pattern; + __u32 usage_type; + __u32 left_start; + __u32 left_end; + __u32 left_width; + __u32 right_start; + __u32 right_end; + __u32 right_width; + __u32 line_start; + __u32 line_end; + __u32 height; + __u32 batch_size; + __u32 dsp_mode; + __u32 sensor_width; + __u32 sensor_height; + __u32 sensor_hbi; + __u32 sensor_vbi; + __u32 sensor_fps; + __u32 init_frame_drop; + __u32 num_out_res; + __u32 feature_flag; + __u32 core_cfg; + __u32 reserve_field_1; + __u32 reserve_field_2; + __u32 reserve_field_3; + __u32 reserve_field_4; + __u32 reserve_field_5; + __u32 reserve_field_6; + union { + struct cam_isp_tfe_out_port_info data[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_tfe_out_port_info, data_flex); + }; +}; + +/** + * struct cam_isp_tfe_resource - A resource bundle + * + * @resoruce_id: resource id for the resource bundle + * @length: length of the while resource blob + * @handle_type: type of the resource handle + * @reserved: reserved field for alignment + * @res_hdl: resource handle that points to the + * resource array + * + */ +struct cam_isp_tfe_resource { + __u32 resource_id; + __u32 length; + __u32 handle_type; + __u32 reserved; + __u64 res_hdl; +}; + +/** + * struct cam_isp_tfe_port_hfr_config - HFR configuration for + * this port + * + * @resource_type: Resource type + * @subsample_pattern: Subsample pattern. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @subsample_period: Subsample period. Used in HFR mode. It + * should be consistent with batchSize and + * CAMIF programming. + * @framedrop_pattern: Framedrop pattern + * @framedrop_period: Framedrop period + * @reserved: Reserved for alignment + */ +struct cam_isp_tfe_port_hfr_config { + __u32 resource_type; + __u32 subsample_pattern; + __u32 subsample_period; + __u32 framedrop_pattern; + __u32 framedrop_period; + __u32 reserved; +} __attribute__((packed)); + +/** + * struct cam_isp_tfe_resource_hfr_config - Resource HFR + * configuration + * + * @num_ports: Number of ports + * @reserved: Reserved for alignment + * @port_hfr_config: HFR configuration for each IO port + */ +struct cam_isp_tfe_resource_hfr_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_tfe_port_hfr_config port_hfr_config[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_tfe_port_hfr_config, port_hfr_config_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_tfe_dual_stripe_config - stripe config per bus + * client + * + * @offset: Start horizontal offset relative to + * output buffer + * @width: Width of the stripe in pixels + * @port_id: Port id of ISP TFE output + * @reserved: Reserved for alignment + * + */ +struct cam_isp_tfe_dual_stripe_config { + __u32 offset; + __u32 width; + __u32 port_id; + __u32 reserved; +}; + +/** + * struct cam_isp_tfe_dual_config - dual isp configuration + * + * @num_ports Number of isp output ports + * @reserved Reserved field for alignment + * @stripes: Stripe information + * + */ +struct cam_isp_tfe_dual_config { + __u32 num_ports; + __u32 reserved; + union { + struct cam_isp_tfe_dual_stripe_config stripes[1]; + __DECLARE_FLEX_ARRAY(struct cam_isp_tfe_dual_stripe_config, stripes_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_tfe_clock_config - Clock configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_rdi: Number of RDI votes + * @left_pix_hz: Pixel Clock for Left ISP + * @right_pix_hz: Pixel Clock for Right ISP + * valid only if Dual + * @rdi_hz: RDI Clock. ISP TFE clock will be + * max of RDI and PIX clocks. For a + * particular context which ISP TFE + * HW the RDI is allocated to is + * not known to UMD. Hence pass the + * clock and let KMD decide. + */ +struct cam_isp_tfe_clock_config { + __u32 usage_type; + __u32 num_rdi; + __u64 left_pix_hz; + __u64 right_pix_hz; + union { + __u64 rdi_hz[1]; + __DECLARE_FLEX_ARRAY(__u64, rdi_hz_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_tfe_csid_clock_config - CSID clock + * configuration + * + * @csid_clock CSID clock + * @csi_phy_clock Phy clock valid if tpg is selected + */ +struct cam_isp_tfe_csid_clock_config { + __u64 csid_clock; + __u64 phy_clock; +} __attribute__((packed)); + +/** + * struct cam_isp_tfe_bw_config_v2 - Bandwidth configuration + * + * @usage_type: Usage type (Single/Dual) + * @num_paths: Number of axi data paths + * @axi_path Per path vote info + */ +struct cam_isp_tfe_bw_config_v2 { + __u32 usage_type; + __u32 num_paths; + union { + struct cam_axi_per_path_bw_vote axi_path[1]; + __DECLARE_FLEX_ARRAY(struct cam_axi_per_path_bw_vote, axi_path_flex); + }; +} __attribute__((packed)); + +/** + * struct cam_isp_acquire_hw_info - ISP TFE acquire HW params + * + * @common_info_version : Version of common info struct used + * @common_info_size : Size of common info struct used + * @common_info_offset : Offset of common info from start of data + * @num_inputs : Number of inputs + * @input_info_version : Version of input info struct used + * @input_info_size : Size of input info struct used + * @input_info_offset : Offset of input info from start of data + * @data : Data pointer to point the cam_isp_tfe_in_port_info + * structure + */ +struct cam_isp_tfe_acquire_hw_info { + __u16 common_info_version; + __u16 common_info_size; + __u32 common_info_offset; + __u32 num_inputs; + __u32 input_info_version; + __u32 input_info_size; + __u32 input_info_offset; + __u64 data; +}; + +/** + * struct cam_isp_tfe_wm_bw_limiter_config - ISP TFE write master + * BW limter config + * + * + * @res_type : output resource type defined in file cam_isp_tfe.h + * @enable_limiter : 0 for disable else enabled + * @counter_limit : Max counter value + */ +struct cam_isp_tfe_wm_bw_limiter_config { + __u32 res_type; + __u32 enable_limiter; + __u32 counter_limit[CAM_PACKET_MAX_PLANES]; +}; + +/** + * struct cam_isp_tfe_out_rsrc_bw_limiter_config - ISP TFE out rsrc BW limiter config + * + * Configure BW limiter for ISP TFE WMs + * + * @version : Version field + * @num_ports : Number of ports + * @bw_limit_config : WM BW limiter config + */ +struct cam_isp_tfe_out_rsrc_bw_limiter_config { + __u32 version; + __u32 num_ports; + union { + struct cam_isp_tfe_wm_bw_limiter_config bw_limiter_config[1]; + __DECLARE_FLEX_ARRAY( + struct cam_isp_tfe_wm_bw_limiter_config, bw_limiter_config_flex); + }; +}; + +#define CAM_TFE_ACQUIRE_COMMON_VER0 0x1000 + +#define CAM_TFE_ACQUIRE_COMMON_SIZE_VER0 0x0 + +#define CAM_TFE_ACQUIRE_INPUT_VER0 0x2000 + +#define CAM_TFE_ACQUIRE_INPUT_SIZE_VER0 sizeof(struct cam_isp_tfe_in_port_info) + +#define CAM_TFE_ACQUIRE_OUT_VER0 0x3000 + +#define CAM_TFE_ACQUIRE_OUT_SIZE_VER0 sizeof(struct cam_isp_tfe_out_port_info) + +#endif /* __UAPI_CAM_TFE_H__ */ diff --git a/qcom/opensource/camera-kernel/pineapple_defconfig b/qcom/opensource/camera-kernel/pineapple_defconfig new file mode 100644 index 0000000000..73174a4071 --- /dev/null +++ b/qcom/opensource/camera-kernel/pineapple_defconfig @@ -0,0 +1,14 @@ +CONFIG_SPECTRA_ISP=y +CONFIG_SPECTRA_ICP=y +CONFIG_SPECTRA_JPEG=y +CONFIG_SPECTRA_SENSOR=y +CONFIG_SPECTRA_USE_CLK_CRM_API=y +CONFIG_SPECTRA_USE_RPMH_DRV_API=y +CONFIG_SPECTRA_LLCC_STALING=y +CONFIG_TARGET_SYNX_ENABLE=y +CONFIG_MSM_MMRM=y +CONFIG_INTERCONNECT_QCOM=y +CONFIG_DOMAIN_ID_SECURE_CAMERA=y +CONFIG_DYNAMIC_FD_PORT_CONFIG=y +CONFIG_SECURE_CAMERA_25=y +CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE=y diff --git a/qcom/opensource/camera-kernel/product.mk b/qcom/opensource/camera-kernel/product.mk new file mode 100644 index 0000000000..f1702f03ed --- /dev/null +++ b/qcom/opensource/camera-kernel/product.mk @@ -0,0 +1,10 @@ +CAMERA_DLKM_ENABLED := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_CAMERA_OVERRIDE), false) + CAMERA_DLKM_ENABLED := false; + endif +endif + +ifeq ($(CAMERA_DLKM_ENABLED),true) +PRODUCT_PACKAGES += camera.ko +endif diff --git a/qcom/opensource/camera-kernel/project_defconfig.bzl b/qcom/opensource/camera-kernel/project_defconfig.bzl new file mode 100644 index 0000000000..add2129955 --- /dev/null +++ b/qcom/opensource/camera-kernel/project_defconfig.bzl @@ -0,0 +1,49 @@ +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +common_configs = [ + "CONFIG_SPECTRA_ISP=y", + "CONFIG_SPECTRA_ICP=y", + "CONFIG_SPECTRA_JPEG=y", + "CONFIG_SPECTRA_SENSOR=y", + "CONFIG_SPECTRA_USE_CLK_CRM_API=y", + "CONFIG_SPECTRA_USE_RPMH_DRV_API=y", + "CONFIG_SPECTRA_LLCC_STALING=y", +] + +dependency_config = [ + "CONFIG_TARGET_SYNX_ENABLE=y", + "CONFIG_INTERCONNECT_QCOM=y", + "CONFIG_DOMAIN_ID_SECURE_CAMERA=y", + "CONFIG_DYNAMIC_FD_PORT_CONFIG=y", + "CONFIG_SECURE_CAMERA_25=y", + "CONFIG_MSM_MMRM=y", +] + +project_configs = select({ + # Project-specific configs + ":no_project": [], + ":pineapple": dependency_config + [ + "CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE=y", + ], + ":sun": dependency_config + [ + "CONFIG_SPECTRA_SECURE_DYN_PORT_CFG=y", + "CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE=y", + ], + ":canoe": [], +}) + +""" +Return a label which defines a project-specific defconfig snippet to be +applied on top of the platform defconfig. +""" + +def get_project_defconfig(target, variant): + rule_name = "{}_{}_project_defconfig".format(target, variant) + + write_file( + name = rule_name, + out = "{}.generated".format(rule_name), + content = common_configs + project_configs + [""], + ) + + return rule_name diff --git a/qcom/opensource/camera-kernel/sun_defconfig b/qcom/opensource/camera-kernel/sun_defconfig new file mode 100644 index 0000000000..95d76722b1 --- /dev/null +++ b/qcom/opensource/camera-kernel/sun_defconfig @@ -0,0 +1,15 @@ +CONFIG_SPECTRA_ISP=y +CONFIG_SPECTRA_ICP=y +CONFIG_SPECTRA_JPEG=y +CONFIG_SPECTRA_SENSOR=y +CONFIG_SPECTRA_USE_CLK_CRM_API=y +CONFIG_SPECTRA_USE_RPMH_DRV_API=y +CONFIG_SPECTRA_LLCC_STALING=y +CONFIG_TARGET_SYNX_ENABLE=y +CONFIG_MSM_MMRM=y +CONFIG_INTERCONNECT_QCOM=y +CONFIG_DOMAIN_ID_SECURE_CAMERA=y +CONFIG_DYNAMIC_FD_PORT_CONFIG=y +CONFIG_SECURE_CAMERA_25=y +CONFIG_SPECTRA_SECURE_DYN_PORT_CFG=y +CONFIG_SPECTRA_SECURE_CAMNOC_REG_UPDATE=y